diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-07 18:49:45 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-07 18:49:45 +0000 |
commit | 2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch) | |
tree | 848558de17fb3008cdf4d861b01ac7781903ce39 /drivers/net/wireless/intel/iwlwifi/pcie | |
parent | Initial commit. (diff) | |
download | linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip |
Adding upstream version 6.1.76.upstream/6.1.76
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/net/wireless/intel/iwlwifi/pcie')
-rw-r--r-- | drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c | 345 | ||||
-rw-r--r-- | drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c | 263 | ||||
-rw-r--r-- | drivers/net/wireless/intel/iwlwifi/pcie/drv.c | 1789 | ||||
-rw-r--r-- | drivers/net/wireless/intel/iwlwifi/pcie/internal.h | 843 | ||||
-rw-r--r-- | drivers/net/wireless/intel/iwlwifi/pcie/rx.c | 2388 | ||||
-rw-r--r-- | drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c | 465 | ||||
-rw-r--r-- | drivers/net/wireless/intel/iwlwifi/pcie/trans.c | 3735 | ||||
-rw-r--r-- | drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c | 259 | ||||
-rw-r--r-- | drivers/net/wireless/intel/iwlwifi/pcie/tx.c | 1630 |
9 files changed, 11717 insertions, 0 deletions
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c new file mode 100644 index 000000000..75fd386b0 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c @@ -0,0 +1,345 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2018-2022 Intel Corporation + */ +#include "iwl-trans.h" +#include "iwl-fh.h" +#include "iwl-context-info-gen3.h" +#include "internal.h" +#include "iwl-prph.h" + +static void +iwl_pcie_ctxt_info_dbg_enable(struct iwl_trans *trans, + struct iwl_prph_scratch_hwm_cfg *dbg_cfg, + u32 *control_flags) +{ + enum iwl_fw_ini_allocation_id alloc_id = IWL_FW_INI_ALLOCATION_ID_DBGC1; + struct iwl_fw_ini_allocation_tlv *fw_mon_cfg; + u32 dbg_flags = 0; + + if (!iwl_trans_dbg_ini_valid(trans)) { + struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon; + + iwl_pcie_alloc_fw_monitor(trans, 0); + + if (fw_mon->size) { + dbg_flags |= IWL_PRPH_SCRATCH_EDBG_DEST_DRAM; + + IWL_DEBUG_FW(trans, + "WRT: Applying DRAM buffer destination\n"); + + dbg_cfg->hwm_base_addr = cpu_to_le64(fw_mon->physical); + dbg_cfg->hwm_size = cpu_to_le32(fw_mon->size); + } + + goto out; + } + + fw_mon_cfg = &trans->dbg.fw_mon_cfg[alloc_id]; + + switch (le32_to_cpu(fw_mon_cfg->buf_location)) { + case IWL_FW_INI_LOCATION_SRAM_PATH: + dbg_flags |= IWL_PRPH_SCRATCH_EDBG_DEST_INTERNAL; + IWL_DEBUG_FW(trans, + "WRT: Applying SMEM buffer destination\n"); + break; + + case IWL_FW_INI_LOCATION_NPK_PATH: + dbg_flags |= IWL_PRPH_SCRATCH_EDBG_DEST_TB22DTF; + IWL_DEBUG_FW(trans, + "WRT: Applying NPK buffer destination\n"); + break; + + case IWL_FW_INI_LOCATION_DRAM_PATH: + if (trans->dbg.fw_mon_ini[alloc_id].num_frags) { + struct iwl_dram_data *frag = + &trans->dbg.fw_mon_ini[alloc_id].frags[0]; + dbg_flags |= IWL_PRPH_SCRATCH_EDBG_DEST_DRAM; + dbg_cfg->hwm_base_addr = cpu_to_le64(frag->physical); + dbg_cfg->hwm_size = cpu_to_le32(frag->size); + dbg_cfg->debug_token_config = cpu_to_le32(trans->dbg.ucode_preset); + IWL_DEBUG_FW(trans, + "WRT: Applying DRAM destination (debug_token_config=%u)\n", + dbg_cfg->debug_token_config); + IWL_DEBUG_FW(trans, + "WRT: Applying DRAM destination (alloc_id=%u, num_frags=%u)\n", + alloc_id, + trans->dbg.fw_mon_ini[alloc_id].num_frags); + } + break; + default: + IWL_ERR(trans, "WRT: Invalid buffer destination\n"); + } +out: + if (dbg_flags) + *control_flags |= IWL_PRPH_SCRATCH_EARLY_DEBUG_EN | dbg_flags; +} + +int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans, + const struct fw_img *fw) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_context_info_gen3 *ctxt_info_gen3; + struct iwl_prph_scratch *prph_scratch; + struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl; + struct iwl_prph_info *prph_info; + u32 control_flags = 0; + int ret; + int cmdq_size = max_t(u32, IWL_CMD_QUEUE_SIZE, + trans->cfg->min_txq_size); + + switch (trans_pcie->rx_buf_size) { + case IWL_AMSDU_DEF: + return -EINVAL; + case IWL_AMSDU_2K: + break; + case IWL_AMSDU_4K: + control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_4K; + break; + case IWL_AMSDU_8K: + control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_4K; + /* if firmware supports the ext size, tell it */ + control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_EXT_8K; + break; + case IWL_AMSDU_12K: + control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_4K; + /* if firmware supports the ext size, tell it */ + control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_EXT_16K; + break; + } + + /* Allocate prph scratch */ + prph_scratch = dma_alloc_coherent(trans->dev, sizeof(*prph_scratch), + &trans_pcie->prph_scratch_dma_addr, + GFP_KERNEL); + if (!prph_scratch) + return -ENOMEM; + + prph_sc_ctrl = &prph_scratch->ctrl_cfg; + + prph_sc_ctrl->version.version = 0; + prph_sc_ctrl->version.mac_id = + cpu_to_le16((u16)iwl_read32(trans, CSR_HW_REV)); + prph_sc_ctrl->version.size = cpu_to_le16(sizeof(*prph_scratch) / 4); + + control_flags |= IWL_PRPH_SCRATCH_MTR_MODE; + control_flags |= IWL_PRPH_MTR_FORMAT_256B & IWL_PRPH_SCRATCH_MTR_FORMAT; + + if (trans->trans_cfg->imr_enabled) + control_flags |= IWL_PRPH_SCRATCH_IMR_DEBUG_EN; + + /* initialize RX default queue */ + prph_sc_ctrl->rbd_cfg.free_rbd_addr = + cpu_to_le64(trans_pcie->rxq->bd_dma); + + iwl_pcie_ctxt_info_dbg_enable(trans, &prph_sc_ctrl->hwm_cfg, + &control_flags); + prph_sc_ctrl->control.control_flags = cpu_to_le32(control_flags); + + /* allocate ucode sections in dram and set addresses */ + ret = iwl_pcie_init_fw_sec(trans, fw, &prph_scratch->dram); + if (ret) + goto err_free_prph_scratch; + + + /* Allocate prph information + * currently we don't assign to the prph info anything, but it would get + * assigned later + * + * We also use the second half of this page to give the device some + * dummy TR/CR tail pointers - which shouldn't be necessary as we don't + * use this, but the hardware still reads/writes there and we can't let + * it go do that with a NULL pointer. + */ + BUILD_BUG_ON(sizeof(*prph_info) > PAGE_SIZE / 2); + prph_info = dma_alloc_coherent(trans->dev, PAGE_SIZE, + &trans_pcie->prph_info_dma_addr, + GFP_KERNEL); + if (!prph_info) { + ret = -ENOMEM; + goto err_free_prph_scratch; + } + + /* Allocate context info */ + ctxt_info_gen3 = dma_alloc_coherent(trans->dev, + sizeof(*ctxt_info_gen3), + &trans_pcie->ctxt_info_dma_addr, + GFP_KERNEL); + if (!ctxt_info_gen3) { + ret = -ENOMEM; + goto err_free_prph_info; + } + + ctxt_info_gen3->prph_info_base_addr = + cpu_to_le64(trans_pcie->prph_info_dma_addr); + ctxt_info_gen3->prph_scratch_base_addr = + cpu_to_le64(trans_pcie->prph_scratch_dma_addr); + ctxt_info_gen3->prph_scratch_size = + cpu_to_le32(sizeof(*prph_scratch)); + ctxt_info_gen3->cr_head_idx_arr_base_addr = + cpu_to_le64(trans_pcie->rxq->rb_stts_dma); + ctxt_info_gen3->tr_tail_idx_arr_base_addr = + cpu_to_le64(trans_pcie->prph_info_dma_addr + PAGE_SIZE / 2); + ctxt_info_gen3->cr_tail_idx_arr_base_addr = + cpu_to_le64(trans_pcie->prph_info_dma_addr + 3 * PAGE_SIZE / 4); + ctxt_info_gen3->mtr_base_addr = + cpu_to_le64(trans->txqs.txq[trans->txqs.cmd.q_id]->dma_addr); + ctxt_info_gen3->mcr_base_addr = + cpu_to_le64(trans_pcie->rxq->used_bd_dma); + ctxt_info_gen3->mtr_size = + cpu_to_le16(TFD_QUEUE_CB_SIZE(cmdq_size)); + ctxt_info_gen3->mcr_size = + cpu_to_le16(RX_QUEUE_CB_SIZE(trans->cfg->num_rbds)); + + trans_pcie->ctxt_info_gen3 = ctxt_info_gen3; + trans_pcie->prph_info = prph_info; + trans_pcie->prph_scratch = prph_scratch; + + /* Allocate IML */ + trans_pcie->iml = dma_alloc_coherent(trans->dev, trans->iml_len, + &trans_pcie->iml_dma_addr, + GFP_KERNEL); + if (!trans_pcie->iml) { + ret = -ENOMEM; + goto err_free_ctxt_info; + } + + memcpy(trans_pcie->iml, trans->iml, trans->iml_len); + + iwl_enable_fw_load_int_ctx_info(trans); + + /* kick FW self load */ + iwl_write64(trans, CSR_CTXT_INFO_ADDR, + trans_pcie->ctxt_info_dma_addr); + iwl_write64(trans, CSR_IML_DATA_ADDR, + trans_pcie->iml_dma_addr); + iwl_write32(trans, CSR_IML_SIZE_ADDR, trans->iml_len); + + iwl_set_bit(trans, CSR_CTXT_INFO_BOOT_CTRL, + CSR_AUTO_FUNC_BOOT_ENA); + + return 0; + +err_free_ctxt_info: + dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info_gen3), + trans_pcie->ctxt_info_gen3, + trans_pcie->ctxt_info_dma_addr); + trans_pcie->ctxt_info_gen3 = NULL; +err_free_prph_info: + dma_free_coherent(trans->dev, PAGE_SIZE, prph_info, + trans_pcie->prph_info_dma_addr); + +err_free_prph_scratch: + dma_free_coherent(trans->dev, + sizeof(*prph_scratch), + prph_scratch, + trans_pcie->prph_scratch_dma_addr); + return ret; + +} + +void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans, bool alive) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + if (trans_pcie->iml) { + dma_free_coherent(trans->dev, trans->iml_len, trans_pcie->iml, + trans_pcie->iml_dma_addr); + trans_pcie->iml_dma_addr = 0; + trans_pcie->iml = NULL; + } + + iwl_pcie_ctxt_info_free_fw_img(trans); + + if (alive) + return; + + if (!trans_pcie->ctxt_info_gen3) + return; + + /* ctxt_info_gen3 and prph_scratch are still needed for PNVM load */ + dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info_gen3), + trans_pcie->ctxt_info_gen3, + trans_pcie->ctxt_info_dma_addr); + trans_pcie->ctxt_info_dma_addr = 0; + trans_pcie->ctxt_info_gen3 = NULL; + + dma_free_coherent(trans->dev, sizeof(*trans_pcie->prph_scratch), + trans_pcie->prph_scratch, + trans_pcie->prph_scratch_dma_addr); + trans_pcie->prph_scratch_dma_addr = 0; + trans_pcie->prph_scratch = NULL; + + /* this is needed for the entire lifetime */ + dma_free_coherent(trans->dev, PAGE_SIZE, trans_pcie->prph_info, + trans_pcie->prph_info_dma_addr); + trans_pcie->prph_info_dma_addr = 0; + trans_pcie->prph_info = NULL; +} + +int iwl_trans_pcie_ctx_info_gen3_set_pnvm(struct iwl_trans *trans, + const void *data, u32 len) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl = + &trans_pcie->prph_scratch->ctrl_cfg; + int ret; + + if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) + return 0; + + /* only allocate the DRAM if not allocated yet */ + if (!trans->pnvm_loaded) { + if (WARN_ON(prph_sc_ctrl->pnvm_cfg.pnvm_size)) + return -EBUSY; + + ret = iwl_pcie_ctxt_info_alloc_dma(trans, data, len, + &trans_pcie->pnvm_dram); + if (ret < 0) { + IWL_DEBUG_FW(trans, "Failed to allocate PNVM DMA %d.\n", + ret); + return ret; + } + } + + prph_sc_ctrl->pnvm_cfg.pnvm_base_addr = + cpu_to_le64(trans_pcie->pnvm_dram.physical); + prph_sc_ctrl->pnvm_cfg.pnvm_size = + cpu_to_le32(trans_pcie->pnvm_dram.size); + + return 0; +} + +int iwl_trans_pcie_ctx_info_gen3_set_reduce_power(struct iwl_trans *trans, + const void *data, u32 len) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl = + &trans_pcie->prph_scratch->ctrl_cfg; + int ret; + + if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) + return 0; + + /* only allocate the DRAM if not allocated yet */ + if (!trans->reduce_power_loaded) { + if (WARN_ON(prph_sc_ctrl->reduce_power_cfg.size)) + return -EBUSY; + + ret = iwl_pcie_ctxt_info_alloc_dma(trans, data, len, + &trans_pcie->reduce_power_dram); + if (ret < 0) { + IWL_DEBUG_FW(trans, + "Failed to allocate reduce power DMA %d.\n", + ret); + return ret; + } + } + + prph_sc_ctrl->reduce_power_cfg.base_addr = + cpu_to_le64(trans_pcie->reduce_power_dram.physical); + prph_sc_ctrl->reduce_power_cfg.size = + cpu_to_le32(trans_pcie->reduce_power_dram.size); + + return 0; +} diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c new file mode 100644 index 000000000..74ce31fdf --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c @@ -0,0 +1,263 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2017 Intel Deutschland GmbH + * Copyright (C) 2018-2021 Intel Corporation + */ +#include "iwl-trans.h" +#include "iwl-fh.h" +#include "iwl-context-info.h" +#include "internal.h" +#include "iwl-prph.h" + +static void *_iwl_pcie_ctxt_info_dma_alloc_coherent(struct iwl_trans *trans, + size_t size, + dma_addr_t *phys, + int depth) +{ + void *result; + + if (WARN(depth > 2, + "failed to allocate DMA memory not crossing 2^32 boundary")) + return NULL; + + result = dma_alloc_coherent(trans->dev, size, phys, GFP_KERNEL); + + if (!result) + return NULL; + + if (unlikely(iwl_txq_crosses_4g_boundary(*phys, size))) { + void *old = result; + dma_addr_t oldphys = *phys; + + result = _iwl_pcie_ctxt_info_dma_alloc_coherent(trans, size, + phys, + depth + 1); + dma_free_coherent(trans->dev, size, old, oldphys); + } + + return result; +} + +static void *iwl_pcie_ctxt_info_dma_alloc_coherent(struct iwl_trans *trans, + size_t size, + dma_addr_t *phys) +{ + return _iwl_pcie_ctxt_info_dma_alloc_coherent(trans, size, phys, 0); +} + +int iwl_pcie_ctxt_info_alloc_dma(struct iwl_trans *trans, + const void *data, u32 len, + struct iwl_dram_data *dram) +{ + dram->block = iwl_pcie_ctxt_info_dma_alloc_coherent(trans, len, + &dram->physical); + if (!dram->block) + return -ENOMEM; + + dram->size = len; + memcpy(dram->block, data, len); + + return 0; +} + +void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans) +{ + struct iwl_self_init_dram *dram = &trans->init_dram; + int i; + + if (!dram->paging) { + WARN_ON(dram->paging_cnt); + return; + } + + /* free paging*/ + for (i = 0; i < dram->paging_cnt; i++) + dma_free_coherent(trans->dev, dram->paging[i].size, + dram->paging[i].block, + dram->paging[i].physical); + + kfree(dram->paging); + dram->paging_cnt = 0; + dram->paging = NULL; +} + +int iwl_pcie_init_fw_sec(struct iwl_trans *trans, + const struct fw_img *fw, + struct iwl_context_info_dram *ctxt_dram) +{ + struct iwl_self_init_dram *dram = &trans->init_dram; + int i, ret, lmac_cnt, umac_cnt, paging_cnt; + + if (WARN(dram->paging, + "paging shouldn't already be initialized (%d pages)\n", + dram->paging_cnt)) + iwl_pcie_ctxt_info_free_paging(trans); + + lmac_cnt = iwl_pcie_get_num_sections(fw, 0); + /* add 1 due to separator */ + umac_cnt = iwl_pcie_get_num_sections(fw, lmac_cnt + 1); + /* add 2 due to separators */ + paging_cnt = iwl_pcie_get_num_sections(fw, lmac_cnt + umac_cnt + 2); + + dram->fw = kcalloc(umac_cnt + lmac_cnt, sizeof(*dram->fw), GFP_KERNEL); + if (!dram->fw) + return -ENOMEM; + dram->paging = kcalloc(paging_cnt, sizeof(*dram->paging), GFP_KERNEL); + if (!dram->paging) + return -ENOMEM; + + /* initialize lmac sections */ + for (i = 0; i < lmac_cnt; i++) { + ret = iwl_pcie_ctxt_info_alloc_dma(trans, fw->sec[i].data, + fw->sec[i].len, + &dram->fw[dram->fw_cnt]); + if (ret) + return ret; + ctxt_dram->lmac_img[i] = + cpu_to_le64(dram->fw[dram->fw_cnt].physical); + dram->fw_cnt++; + } + + /* initialize umac sections */ + for (i = 0; i < umac_cnt; i++) { + /* access FW with +1 to make up for lmac separator */ + ret = iwl_pcie_ctxt_info_alloc_dma(trans, + fw->sec[dram->fw_cnt + 1].data, + fw->sec[dram->fw_cnt + 1].len, + &dram->fw[dram->fw_cnt]); + if (ret) + return ret; + ctxt_dram->umac_img[i] = + cpu_to_le64(dram->fw[dram->fw_cnt].physical); + dram->fw_cnt++; + } + + /* + * Initialize paging. + * Paging memory isn't stored in dram->fw as the umac and lmac - it is + * stored separately. + * This is since the timing of its release is different - + * while fw memory can be released on alive, the paging memory can be + * freed only when the device goes down. + * Given that, the logic here in accessing the fw image is a bit + * different - fw_cnt isn't changing so loop counter is added to it. + */ + for (i = 0; i < paging_cnt; i++) { + /* access FW with +2 to make up for lmac & umac separators */ + int fw_idx = dram->fw_cnt + i + 2; + + ret = iwl_pcie_ctxt_info_alloc_dma(trans, fw->sec[fw_idx].data, + fw->sec[fw_idx].len, + &dram->paging[i]); + if (ret) + return ret; + + ctxt_dram->virtual_img[i] = + cpu_to_le64(dram->paging[i].physical); + dram->paging_cnt++; + } + + return 0; +} + +int iwl_pcie_ctxt_info_init(struct iwl_trans *trans, + const struct fw_img *fw) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_context_info *ctxt_info; + struct iwl_context_info_rbd_cfg *rx_cfg; + u32 control_flags = 0, rb_size; + dma_addr_t phys; + int ret; + + ctxt_info = iwl_pcie_ctxt_info_dma_alloc_coherent(trans, + sizeof(*ctxt_info), + &phys); + if (!ctxt_info) + return -ENOMEM; + + trans_pcie->ctxt_info_dma_addr = phys; + + ctxt_info->version.version = 0; + ctxt_info->version.mac_id = + cpu_to_le16((u16)iwl_read32(trans, CSR_HW_REV)); + /* size is in DWs */ + ctxt_info->version.size = cpu_to_le16(sizeof(*ctxt_info) / 4); + + switch (trans_pcie->rx_buf_size) { + case IWL_AMSDU_2K: + rb_size = IWL_CTXT_INFO_RB_SIZE_2K; + break; + case IWL_AMSDU_4K: + rb_size = IWL_CTXT_INFO_RB_SIZE_4K; + break; + case IWL_AMSDU_8K: + rb_size = IWL_CTXT_INFO_RB_SIZE_8K; + break; + case IWL_AMSDU_12K: + rb_size = IWL_CTXT_INFO_RB_SIZE_16K; + break; + default: + WARN_ON(1); + rb_size = IWL_CTXT_INFO_RB_SIZE_4K; + } + + WARN_ON(RX_QUEUE_CB_SIZE(trans->cfg->num_rbds) > 12); + control_flags = IWL_CTXT_INFO_TFD_FORMAT_LONG; + control_flags |= + u32_encode_bits(RX_QUEUE_CB_SIZE(trans->cfg->num_rbds), + IWL_CTXT_INFO_RB_CB_SIZE); + control_flags |= u32_encode_bits(rb_size, IWL_CTXT_INFO_RB_SIZE); + ctxt_info->control.control_flags = cpu_to_le32(control_flags); + + /* initialize RX default queue */ + rx_cfg = &ctxt_info->rbd_cfg; + rx_cfg->free_rbd_addr = cpu_to_le64(trans_pcie->rxq->bd_dma); + rx_cfg->used_rbd_addr = cpu_to_le64(trans_pcie->rxq->used_bd_dma); + rx_cfg->status_wr_ptr = cpu_to_le64(trans_pcie->rxq->rb_stts_dma); + + /* initialize TX command queue */ + ctxt_info->hcmd_cfg.cmd_queue_addr = + cpu_to_le64(trans->txqs.txq[trans->txqs.cmd.q_id]->dma_addr); + ctxt_info->hcmd_cfg.cmd_queue_size = + TFD_QUEUE_CB_SIZE(IWL_CMD_QUEUE_SIZE); + + /* allocate ucode sections in dram and set addresses */ + ret = iwl_pcie_init_fw_sec(trans, fw, &ctxt_info->dram); + if (ret) { + dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info), + ctxt_info, trans_pcie->ctxt_info_dma_addr); + return ret; + } + + trans_pcie->ctxt_info = ctxt_info; + + iwl_enable_fw_load_int_ctx_info(trans); + + /* Configure debug, if exists */ + if (iwl_pcie_dbg_on(trans)) + iwl_pcie_apply_destination(trans); + + /* kick FW self load */ + iwl_write64(trans, CSR_CTXT_INFO_BA, trans_pcie->ctxt_info_dma_addr); + + /* Context info will be released upon alive or failure to get one */ + + return 0; +} + +void iwl_pcie_ctxt_info_free(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + if (!trans_pcie->ctxt_info) + return; + + dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info), + trans_pcie->ctxt_info, + trans_pcie->ctxt_info_dma_addr); + trans_pcie->ctxt_info_dma_addr = 0; + trans_pcie->ctxt_info = NULL; + + iwl_pcie_ctxt_info_free_fw_img(trans); +} diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c new file mode 100644 index 000000000..4d4db5f68 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -0,0 +1,1789 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2005-2014, 2018-2021 Intel Corporation + * Copyright (C) 2013-2015 Intel Mobile Communications GmbH + * Copyright (C) 2016-2017 Intel Deutschland GmbH + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/acpi.h> + +#include "fw/acpi.h" + +#include "iwl-trans.h" +#include "iwl-drv.h" +#include "iwl-prph.h" +#include "internal.h" + +#define TRANS_CFG_MARKER BIT(0) +#define _IS_A(cfg, _struct) __builtin_types_compatible_p(typeof(cfg), \ + struct _struct) +extern int _invalid_type; +#define _TRANS_CFG_MARKER(cfg) \ + (__builtin_choose_expr(_IS_A(cfg, iwl_cfg_trans_params), \ + TRANS_CFG_MARKER, \ + __builtin_choose_expr(_IS_A(cfg, iwl_cfg), 0, _invalid_type))) +#define _ASSIGN_CFG(cfg) (_TRANS_CFG_MARKER(cfg) + (kernel_ulong_t)&(cfg)) + +#define IWL_PCI_DEVICE(dev, subdev, cfg) \ + .vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \ + .subvendor = PCI_ANY_ID, .subdevice = (subdev), \ + .driver_data = _ASSIGN_CFG(cfg) + +/* Hardware specific file defines the PCI IDs table for that hardware module */ +static const struct pci_device_id iwl_hw_card_ids[] = { +#if IS_ENABLED(CONFIG_IWLDVM) + {IWL_PCI_DEVICE(0x4232, 0x1201, iwl5100_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1301, iwl5100_agn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1204, iwl5100_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1304, iwl5100_agn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1205, iwl5100_bgn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1305, iwl5100_bgn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1206, iwl5100_abg_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1306, iwl5100_abg_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1221, iwl5100_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1321, iwl5100_agn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1224, iwl5100_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1324, iwl5100_agn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1225, iwl5100_bgn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1325, iwl5100_bgn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1226, iwl5100_abg_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1326, iwl5100_abg_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4237, 0x1211, iwl5100_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4237, 0x1311, iwl5100_agn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4237, 0x1214, iwl5100_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4237, 0x1314, iwl5100_agn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4237, 0x1215, iwl5100_bgn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4237, 0x1315, iwl5100_bgn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4237, 0x1216, iwl5100_abg_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4237, 0x1316, iwl5100_abg_cfg)}, /* Half Mini Card */ + +/* 5300 Series WiFi */ + {IWL_PCI_DEVICE(0x4235, 0x1021, iwl5300_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4235, 0x1121, iwl5300_agn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4235, 0x1024, iwl5300_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4235, 0x1124, iwl5300_agn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4235, 0x1001, iwl5300_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4235, 0x1101, iwl5300_agn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4235, 0x1004, iwl5300_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4235, 0x1104, iwl5300_agn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4236, 0x1011, iwl5300_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4236, 0x1111, iwl5300_agn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4236, 0x1014, iwl5300_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4236, 0x1114, iwl5300_agn_cfg)}, /* Half Mini Card */ + +/* 5350 Series WiFi/WiMax */ + {IWL_PCI_DEVICE(0x423A, 0x1001, iwl5350_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x423A, 0x1021, iwl5350_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x423B, 0x1011, iwl5350_agn_cfg)}, /* Mini Card */ + +/* 5150 Series Wifi/WiMax */ + {IWL_PCI_DEVICE(0x423C, 0x1201, iwl5150_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x423C, 0x1301, iwl5150_agn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x423C, 0x1206, iwl5150_abg_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x423C, 0x1306, iwl5150_abg_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x423C, 0x1221, iwl5150_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x423C, 0x1321, iwl5150_agn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x423C, 0x1326, iwl5150_abg_cfg)}, /* Half Mini Card */ + + {IWL_PCI_DEVICE(0x423D, 0x1211, iwl5150_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x423D, 0x1311, iwl5150_agn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x423D, 0x1216, iwl5150_abg_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x423D, 0x1316, iwl5150_abg_cfg)}, /* Half Mini Card */ + +/* 6x00 Series */ + {IWL_PCI_DEVICE(0x422B, 0x1101, iwl6000_3agn_cfg)}, + {IWL_PCI_DEVICE(0x422B, 0x1108, iwl6000_3agn_cfg)}, + {IWL_PCI_DEVICE(0x422B, 0x1121, iwl6000_3agn_cfg)}, + {IWL_PCI_DEVICE(0x422B, 0x1128, iwl6000_3agn_cfg)}, + {IWL_PCI_DEVICE(0x422C, 0x1301, iwl6000i_2agn_cfg)}, + {IWL_PCI_DEVICE(0x422C, 0x1306, iwl6000i_2abg_cfg)}, + {IWL_PCI_DEVICE(0x422C, 0x1307, iwl6000i_2bg_cfg)}, + {IWL_PCI_DEVICE(0x422C, 0x1321, iwl6000i_2agn_cfg)}, + {IWL_PCI_DEVICE(0x422C, 0x1326, iwl6000i_2abg_cfg)}, + {IWL_PCI_DEVICE(0x4238, 0x1111, iwl6000_3agn_cfg)}, + {IWL_PCI_DEVICE(0x4238, 0x1118, iwl6000_3agn_cfg)}, + {IWL_PCI_DEVICE(0x4239, 0x1311, iwl6000i_2agn_cfg)}, + {IWL_PCI_DEVICE(0x4239, 0x1316, iwl6000i_2abg_cfg)}, + +/* 6x05 Series */ + {IWL_PCI_DEVICE(0x0082, 0x1301, iwl6005_2agn_cfg)}, + {IWL_PCI_DEVICE(0x0082, 0x1306, iwl6005_2abg_cfg)}, + {IWL_PCI_DEVICE(0x0082, 0x1307, iwl6005_2bg_cfg)}, + {IWL_PCI_DEVICE(0x0082, 0x1308, iwl6005_2agn_cfg)}, + {IWL_PCI_DEVICE(0x0082, 0x1321, iwl6005_2agn_cfg)}, + {IWL_PCI_DEVICE(0x0082, 0x1326, iwl6005_2abg_cfg)}, + {IWL_PCI_DEVICE(0x0082, 0x1328, iwl6005_2agn_cfg)}, + {IWL_PCI_DEVICE(0x0085, 0x1311, iwl6005_2agn_cfg)}, + {IWL_PCI_DEVICE(0x0085, 0x1318, iwl6005_2agn_cfg)}, + {IWL_PCI_DEVICE(0x0085, 0x1316, iwl6005_2abg_cfg)}, + {IWL_PCI_DEVICE(0x0082, 0xC020, iwl6005_2agn_sff_cfg)}, + {IWL_PCI_DEVICE(0x0085, 0xC220, iwl6005_2agn_sff_cfg)}, + {IWL_PCI_DEVICE(0x0085, 0xC228, iwl6005_2agn_sff_cfg)}, + {IWL_PCI_DEVICE(0x0082, 0x4820, iwl6005_2agn_d_cfg)}, + {IWL_PCI_DEVICE(0x0082, 0x1304, iwl6005_2agn_mow1_cfg)},/* low 5GHz active */ + {IWL_PCI_DEVICE(0x0082, 0x1305, iwl6005_2agn_mow2_cfg)},/* high 5GHz active */ + +/* 6x30 Series */ + {IWL_PCI_DEVICE(0x008A, 0x5305, iwl1030_bgn_cfg)}, + {IWL_PCI_DEVICE(0x008A, 0x5307, iwl1030_bg_cfg)}, + {IWL_PCI_DEVICE(0x008A, 0x5325, iwl1030_bgn_cfg)}, + {IWL_PCI_DEVICE(0x008A, 0x5327, iwl1030_bg_cfg)}, + {IWL_PCI_DEVICE(0x008B, 0x5315, iwl1030_bgn_cfg)}, + {IWL_PCI_DEVICE(0x008B, 0x5317, iwl1030_bg_cfg)}, + {IWL_PCI_DEVICE(0x0090, 0x5211, iwl6030_2agn_cfg)}, + {IWL_PCI_DEVICE(0x0090, 0x5215, iwl6030_2bgn_cfg)}, + {IWL_PCI_DEVICE(0x0090, 0x5216, iwl6030_2abg_cfg)}, + {IWL_PCI_DEVICE(0x0091, 0x5201, iwl6030_2agn_cfg)}, + {IWL_PCI_DEVICE(0x0091, 0x5205, iwl6030_2bgn_cfg)}, + {IWL_PCI_DEVICE(0x0091, 0x5206, iwl6030_2abg_cfg)}, + {IWL_PCI_DEVICE(0x0091, 0x5207, iwl6030_2bg_cfg)}, + {IWL_PCI_DEVICE(0x0091, 0x5221, iwl6030_2agn_cfg)}, + {IWL_PCI_DEVICE(0x0091, 0x5225, iwl6030_2bgn_cfg)}, + {IWL_PCI_DEVICE(0x0091, 0x5226, iwl6030_2abg_cfg)}, + +/* 6x50 WiFi/WiMax Series */ + {IWL_PCI_DEVICE(0x0087, 0x1301, iwl6050_2agn_cfg)}, + {IWL_PCI_DEVICE(0x0087, 0x1306, iwl6050_2abg_cfg)}, + {IWL_PCI_DEVICE(0x0087, 0x1321, iwl6050_2agn_cfg)}, + {IWL_PCI_DEVICE(0x0087, 0x1326, iwl6050_2abg_cfg)}, + {IWL_PCI_DEVICE(0x0089, 0x1311, iwl6050_2agn_cfg)}, + {IWL_PCI_DEVICE(0x0089, 0x1316, iwl6050_2abg_cfg)}, + +/* 6150 WiFi/WiMax Series */ + {IWL_PCI_DEVICE(0x0885, 0x1305, iwl6150_bgn_cfg)}, + {IWL_PCI_DEVICE(0x0885, 0x1307, iwl6150_bg_cfg)}, + {IWL_PCI_DEVICE(0x0885, 0x1325, iwl6150_bgn_cfg)}, + {IWL_PCI_DEVICE(0x0885, 0x1327, iwl6150_bg_cfg)}, + {IWL_PCI_DEVICE(0x0886, 0x1315, iwl6150_bgn_cfg)}, + {IWL_PCI_DEVICE(0x0886, 0x1317, iwl6150_bg_cfg)}, + +/* 1000 Series WiFi */ + {IWL_PCI_DEVICE(0x0083, 0x1205, iwl1000_bgn_cfg)}, + {IWL_PCI_DEVICE(0x0083, 0x1305, iwl1000_bgn_cfg)}, + {IWL_PCI_DEVICE(0x0083, 0x1225, iwl1000_bgn_cfg)}, + {IWL_PCI_DEVICE(0x0083, 0x1325, iwl1000_bgn_cfg)}, + {IWL_PCI_DEVICE(0x0084, 0x1215, iwl1000_bgn_cfg)}, + {IWL_PCI_DEVICE(0x0084, 0x1315, iwl1000_bgn_cfg)}, + {IWL_PCI_DEVICE(0x0083, 0x1206, iwl1000_bg_cfg)}, + {IWL_PCI_DEVICE(0x0083, 0x1306, iwl1000_bg_cfg)}, + {IWL_PCI_DEVICE(0x0083, 0x1226, iwl1000_bg_cfg)}, + {IWL_PCI_DEVICE(0x0083, 0x1326, iwl1000_bg_cfg)}, + {IWL_PCI_DEVICE(0x0084, 0x1216, iwl1000_bg_cfg)}, + {IWL_PCI_DEVICE(0x0084, 0x1316, iwl1000_bg_cfg)}, + +/* 100 Series WiFi */ + {IWL_PCI_DEVICE(0x08AE, 0x1005, iwl100_bgn_cfg)}, + {IWL_PCI_DEVICE(0x08AE, 0x1007, iwl100_bg_cfg)}, + {IWL_PCI_DEVICE(0x08AF, 0x1015, iwl100_bgn_cfg)}, + {IWL_PCI_DEVICE(0x08AF, 0x1017, iwl100_bg_cfg)}, + {IWL_PCI_DEVICE(0x08AE, 0x1025, iwl100_bgn_cfg)}, + {IWL_PCI_DEVICE(0x08AE, 0x1027, iwl100_bg_cfg)}, + +/* 130 Series WiFi */ + {IWL_PCI_DEVICE(0x0896, 0x5005, iwl130_bgn_cfg)}, + {IWL_PCI_DEVICE(0x0896, 0x5007, iwl130_bg_cfg)}, + {IWL_PCI_DEVICE(0x0897, 0x5015, iwl130_bgn_cfg)}, + {IWL_PCI_DEVICE(0x0897, 0x5017, iwl130_bg_cfg)}, + {IWL_PCI_DEVICE(0x0896, 0x5025, iwl130_bgn_cfg)}, + {IWL_PCI_DEVICE(0x0896, 0x5027, iwl130_bg_cfg)}, + +/* 2x00 Series */ + {IWL_PCI_DEVICE(0x0890, 0x4022, iwl2000_2bgn_cfg)}, + {IWL_PCI_DEVICE(0x0891, 0x4222, iwl2000_2bgn_cfg)}, + {IWL_PCI_DEVICE(0x0890, 0x4422, iwl2000_2bgn_cfg)}, + {IWL_PCI_DEVICE(0x0890, 0x4822, iwl2000_2bgn_d_cfg)}, + +/* 2x30 Series */ + {IWL_PCI_DEVICE(0x0887, 0x4062, iwl2030_2bgn_cfg)}, + {IWL_PCI_DEVICE(0x0888, 0x4262, iwl2030_2bgn_cfg)}, + {IWL_PCI_DEVICE(0x0887, 0x4462, iwl2030_2bgn_cfg)}, + +/* 6x35 Series */ + {IWL_PCI_DEVICE(0x088E, 0x4060, iwl6035_2agn_cfg)}, + {IWL_PCI_DEVICE(0x088E, 0x406A, iwl6035_2agn_sff_cfg)}, + {IWL_PCI_DEVICE(0x088F, 0x4260, iwl6035_2agn_cfg)}, + {IWL_PCI_DEVICE(0x088F, 0x426A, iwl6035_2agn_sff_cfg)}, + {IWL_PCI_DEVICE(0x088E, 0x4460, iwl6035_2agn_cfg)}, + {IWL_PCI_DEVICE(0x088E, 0x446A, iwl6035_2agn_sff_cfg)}, + {IWL_PCI_DEVICE(0x088E, 0x4860, iwl6035_2agn_cfg)}, + {IWL_PCI_DEVICE(0x088F, 0x5260, iwl6035_2agn_cfg)}, + +/* 105 Series */ + {IWL_PCI_DEVICE(0x0894, 0x0022, iwl105_bgn_cfg)}, + {IWL_PCI_DEVICE(0x0895, 0x0222, iwl105_bgn_cfg)}, + {IWL_PCI_DEVICE(0x0894, 0x0422, iwl105_bgn_cfg)}, + {IWL_PCI_DEVICE(0x0894, 0x0822, iwl105_bgn_d_cfg)}, + +/* 135 Series */ + {IWL_PCI_DEVICE(0x0892, 0x0062, iwl135_bgn_cfg)}, + {IWL_PCI_DEVICE(0x0893, 0x0262, iwl135_bgn_cfg)}, + {IWL_PCI_DEVICE(0x0892, 0x0462, iwl135_bgn_cfg)}, +#endif /* CONFIG_IWLDVM */ + +#if IS_ENABLED(CONFIG_IWLMVM) +/* 7260 Series */ + {IWL_PCI_DEVICE(0x08B1, 0x4070, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4072, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4170, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4C60, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4C70, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4060, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x406A, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4160, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4062, iwl7260_n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4162, iwl7260_n_cfg)}, + {IWL_PCI_DEVICE(0x08B2, 0x4270, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B2, 0x4272, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B2, 0x4260, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B2, 0x426A, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B2, 0x4262, iwl7260_n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4470, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4472, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4460, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x446A, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4462, iwl7260_n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4870, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x486E, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4A70, iwl7260_2ac_cfg_high_temp)}, + {IWL_PCI_DEVICE(0x08B1, 0x4A6E, iwl7260_2ac_cfg_high_temp)}, + {IWL_PCI_DEVICE(0x08B1, 0x4A6C, iwl7260_2ac_cfg_high_temp)}, + {IWL_PCI_DEVICE(0x08B1, 0x4570, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4560, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B2, 0x4370, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B2, 0x4360, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x5070, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x5072, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x5170, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x5770, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4020, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x402A, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B2, 0x4220, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4420, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC070, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC072, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC170, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC060, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC06A, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC160, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC062, iwl7260_n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC162, iwl7260_n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC770, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC760, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B2, 0xC270, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xCC70, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xCC60, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B2, 0xC272, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B2, 0xC260, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B2, 0xC26A, iwl7260_n_cfg)}, + {IWL_PCI_DEVICE(0x08B2, 0xC262, iwl7260_n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC470, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC472, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC460, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC462, iwl7260_n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC570, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC560, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B2, 0xC370, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC360, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC020, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC02A, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B2, 0xC220, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC420, iwl7260_2n_cfg)}, + +/* 3160 Series */ + {IWL_PCI_DEVICE(0x08B3, 0x0070, iwl3160_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x0072, iwl3160_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x0170, iwl3160_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x0172, iwl3160_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x0060, iwl3160_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x0062, iwl3160_n_cfg)}, + {IWL_PCI_DEVICE(0x08B4, 0x0270, iwl3160_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B4, 0x0272, iwl3160_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x0470, iwl3160_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x0472, iwl3160_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B4, 0x0370, iwl3160_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x8070, iwl3160_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x8072, iwl3160_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x8170, iwl3160_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x8172, iwl3160_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x8060, iwl3160_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x8062, iwl3160_n_cfg)}, + {IWL_PCI_DEVICE(0x08B4, 0x8270, iwl3160_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B4, 0x8370, iwl3160_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B4, 0x8272, iwl3160_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x8470, iwl3160_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x8570, iwl3160_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x1070, iwl3160_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x1170, iwl3160_2ac_cfg)}, + +/* 3165 Series */ + {IWL_PCI_DEVICE(0x3165, 0x4010, iwl3165_2ac_cfg)}, + {IWL_PCI_DEVICE(0x3165, 0x4012, iwl3165_2ac_cfg)}, + {IWL_PCI_DEVICE(0x3166, 0x4212, iwl3165_2ac_cfg)}, + {IWL_PCI_DEVICE(0x3165, 0x4410, iwl3165_2ac_cfg)}, + {IWL_PCI_DEVICE(0x3165, 0x4510, iwl3165_2ac_cfg)}, + {IWL_PCI_DEVICE(0x3165, 0x4110, iwl3165_2ac_cfg)}, + {IWL_PCI_DEVICE(0x3166, 0x4310, iwl3165_2ac_cfg)}, + {IWL_PCI_DEVICE(0x3166, 0x4210, iwl3165_2ac_cfg)}, + {IWL_PCI_DEVICE(0x3165, 0x8010, iwl3165_2ac_cfg)}, + {IWL_PCI_DEVICE(0x3165, 0x8110, iwl3165_2ac_cfg)}, + +/* 3168 Series */ + {IWL_PCI_DEVICE(0x24FB, 0x2010, iwl3168_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24FB, 0x2110, iwl3168_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24FB, 0x2050, iwl3168_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24FB, 0x2150, iwl3168_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24FB, 0x0000, iwl3168_2ac_cfg)}, + +/* 7265 Series */ + {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5110, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5100, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095B, 0x5310, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_n_cfg)}, + {IWL_PCI_DEVICE(0x095B, 0x5210, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5C10, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5412, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5510, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5400, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x1010, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5000, iwl7265_2n_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x500A, iwl7265_2n_cfg)}, + {IWL_PCI_DEVICE(0x095B, 0x5200, iwl7265_2n_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5002, iwl7265_n_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5102, iwl7265_n_cfg)}, + {IWL_PCI_DEVICE(0x095B, 0x5202, iwl7265_n_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x9010, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x9012, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x900A, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x9112, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095B, 0x9210, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095B, 0x9200, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095B, 0x9310, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5020, iwl7265_2n_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x502A, iwl7265_2n_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5420, iwl7265_2n_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5090, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5190, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5590, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095B, 0x5290, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5490, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5F10, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095B, 0x5212, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095B, 0x520A, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x9000, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x9400, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x9E10, iwl7265_2ac_cfg)}, + +/* 8000 Series */ + {IWL_PCI_DEVICE(0x24F3, 0x0010, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x1010, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x10B0, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x0130, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x1130, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x0132, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x1132, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x0110, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x01F0, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x0012, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x1012, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x1110, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x0050, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x0250, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x1050, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x0150, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x1150, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F4, 0x0030, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F4, 0x1030, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0xC010, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0xC110, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0xD010, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0xC050, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0xD050, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0xD0B0, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0xB0B0, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x8010, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x8110, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x9010, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x9110, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F4, 0x8030, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F4, 0x9030, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F4, 0xC030, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F4, 0xD030, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x8130, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x9130, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x8132, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x9132, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x8050, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x8150, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x9050, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x9150, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x0004, iwl8260_2n_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x0044, iwl8260_2n_cfg)}, + {IWL_PCI_DEVICE(0x24F5, 0x0010, iwl4165_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F6, 0x0030, iwl4165_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x0810, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x0910, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x0850, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x0950, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x0930, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x0000, iwl8265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x4010, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x0010, iwl8265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x0110, iwl8265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x1110, iwl8265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x1130, iwl8265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x0130, iwl8265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x1010, iwl8265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x10D0, iwl8265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x0050, iwl8265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x0150, iwl8265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x9010, iwl8265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x8110, iwl8265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x8050, iwl8265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x8010, iwl8265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x0810, iwl8265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x9110, iwl8265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x8130, iwl8265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x0910, iwl8265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x0930, iwl8265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x0950, iwl8265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x0850, iwl8265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x1014, iwl8265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x3E02, iwl8275_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x3E01, iwl8275_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x1012, iwl8275_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x0012, iwl8275_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x0014, iwl8265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x9074, iwl8265_2ac_cfg)}, + +/* 9000 Series */ + {IWL_PCI_DEVICE(0x2526, PCI_ANY_ID, iwl9000_trans_cfg)}, + {IWL_PCI_DEVICE(0x271B, PCI_ANY_ID, iwl9000_trans_cfg)}, + {IWL_PCI_DEVICE(0x271C, PCI_ANY_ID, iwl9000_trans_cfg)}, + {IWL_PCI_DEVICE(0x30DC, PCI_ANY_ID, iwl9560_long_latency_trans_cfg)}, + {IWL_PCI_DEVICE(0x31DC, PCI_ANY_ID, iwl9560_shared_clk_trans_cfg)}, + {IWL_PCI_DEVICE(0x9DF0, PCI_ANY_ID, iwl9560_trans_cfg)}, + {IWL_PCI_DEVICE(0xA370, PCI_ANY_ID, iwl9560_trans_cfg)}, + +/* Qu devices */ + {IWL_PCI_DEVICE(0x02F0, PCI_ANY_ID, iwl_qu_trans_cfg)}, + {IWL_PCI_DEVICE(0x06F0, PCI_ANY_ID, iwl_qu_trans_cfg)}, + + {IWL_PCI_DEVICE(0x34F0, PCI_ANY_ID, iwl_qu_medium_latency_trans_cfg)}, + {IWL_PCI_DEVICE(0x3DF0, PCI_ANY_ID, iwl_qu_medium_latency_trans_cfg)}, + {IWL_PCI_DEVICE(0x4DF0, PCI_ANY_ID, iwl_qu_medium_latency_trans_cfg)}, + + {IWL_PCI_DEVICE(0x43F0, PCI_ANY_ID, iwl_qu_long_latency_trans_cfg)}, + {IWL_PCI_DEVICE(0xA0F0, PCI_ANY_ID, iwl_qu_long_latency_trans_cfg)}, + + {IWL_PCI_DEVICE(0x2720, PCI_ANY_ID, iwl_qnj_trans_cfg)}, + + {IWL_PCI_DEVICE(0x2723, PCI_ANY_ID, iwl_ax200_trans_cfg)}, + +/* So devices */ + {IWL_PCI_DEVICE(0x2725, PCI_ANY_ID, iwl_so_trans_cfg)}, + {IWL_PCI_DEVICE(0x2726, PCI_ANY_ID, iwl_snj_trans_cfg)}, + {IWL_PCI_DEVICE(0x7A70, PCI_ANY_ID, iwl_so_long_latency_imr_trans_cfg)}, + {IWL_PCI_DEVICE(0x7AF0, PCI_ANY_ID, iwl_so_trans_cfg)}, + {IWL_PCI_DEVICE(0x51F0, PCI_ANY_ID, iwl_so_long_latency_trans_cfg)}, + {IWL_PCI_DEVICE(0x51F1, PCI_ANY_ID, iwl_so_long_latency_imr_trans_cfg)}, + {IWL_PCI_DEVICE(0x51F1, PCI_ANY_ID, iwl_so_long_latency_trans_cfg)}, + {IWL_PCI_DEVICE(0x54F0, PCI_ANY_ID, iwl_so_long_latency_trans_cfg)}, + {IWL_PCI_DEVICE(0x7F70, PCI_ANY_ID, iwl_so_trans_cfg)}, + +/* Ma devices */ + {IWL_PCI_DEVICE(0x2729, PCI_ANY_ID, iwl_ma_trans_cfg)}, + {IWL_PCI_DEVICE(0x7E40, PCI_ANY_ID, iwl_ma_trans_cfg)}, + +/* Bz devices */ + {IWL_PCI_DEVICE(0x2727, PCI_ANY_ID, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0x272b, PCI_ANY_ID, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, PCI_ANY_ID, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0x7740, PCI_ANY_ID, iwl_bz_trans_cfg)}, +#endif /* CONFIG_IWLMVM */ + + {0} +}; +MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids); + +#define _IWL_DEV_INFO(_device, _subdevice, _mac_type, _mac_step, _rf_type, \ + _rf_id, _no_160, _cores, _cdb, _jacket, _cfg, _name) \ + { .device = (_device), .subdevice = (_subdevice), .cfg = &(_cfg), \ + .name = _name, .mac_type = _mac_type, .rf_type = _rf_type, \ + .no_160 = _no_160, .cores = _cores, .rf_id = _rf_id, \ + .mac_step = _mac_step, .cdb = _cdb, .jacket = _jacket } + +#define IWL_DEV_INFO(_device, _subdevice, _cfg, _name) \ + _IWL_DEV_INFO(_device, _subdevice, IWL_CFG_ANY, IWL_CFG_ANY, \ + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, \ + IWL_CFG_ANY, IWL_CFG_ANY, _cfg, _name) + +static const struct iwl_dev_info iwl_dev_info_table[] = { +#if IS_ENABLED(CONFIG_IWLMVM) +/* 9000 */ + IWL_DEV_INFO(0x2526, 0x1550, iwl9260_2ac_cfg, iwl9260_killer_1550_name), + IWL_DEV_INFO(0x2526, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_name), + IWL_DEV_INFO(0x2526, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_name), + IWL_DEV_INFO(0x30DC, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_name), + IWL_DEV_INFO(0x30DC, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_name), + IWL_DEV_INFO(0x31DC, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_name), + IWL_DEV_INFO(0x31DC, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_name), + IWL_DEV_INFO(0xA370, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_name), + IWL_DEV_INFO(0xA370, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_name), + IWL_DEV_INFO(0x54F0, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_160_name), + IWL_DEV_INFO(0x54F0, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_name), + IWL_DEV_INFO(0x51F0, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_160_name), + IWL_DEV_INFO(0x51F0, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_160_name), + IWL_DEV_INFO(0x51F0, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name), + IWL_DEV_INFO(0x51F0, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name), + IWL_DEV_INFO(0x51F1, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name), + IWL_DEV_INFO(0x54F0, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name), + IWL_DEV_INFO(0x54F0, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name), + IWL_DEV_INFO(0x7A70, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name), + IWL_DEV_INFO(0x7A70, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name), + IWL_DEV_INFO(0x7AF0, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name), + IWL_DEV_INFO(0x7AF0, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name), + + IWL_DEV_INFO(0x271C, 0x0214, iwl9260_2ac_cfg, iwl9260_1_name), + IWL_DEV_INFO(0x7E40, 0x1691, iwl_cfg_ma_a0_gf4_a0, iwl_ax411_killer_1690s_name), + IWL_DEV_INFO(0x7E40, 0x1692, iwl_cfg_ma_a0_gf4_a0, iwl_ax411_killer_1690i_name), + +/* AX200 */ + IWL_DEV_INFO(0x2723, IWL_CFG_ANY, iwl_ax200_cfg_cc, iwl_ax200_name), + IWL_DEV_INFO(0x2723, 0x1653, iwl_ax200_cfg_cc, iwl_ax200_killer_1650w_name), + IWL_DEV_INFO(0x2723, 0x1654, iwl_ax200_cfg_cc, iwl_ax200_killer_1650x_name), + + /* Qu with Hr */ + IWL_DEV_INFO(0x43F0, 0x0070, iwl_ax201_cfg_qu_hr, NULL), + IWL_DEV_INFO(0x43F0, 0x0074, iwl_ax201_cfg_qu_hr, NULL), + IWL_DEV_INFO(0x43F0, 0x0078, iwl_ax201_cfg_qu_hr, NULL), + IWL_DEV_INFO(0x43F0, 0x007C, iwl_ax201_cfg_qu_hr, NULL), + IWL_DEV_INFO(0x43F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0, iwl_ax201_killer_1650s_name), + IWL_DEV_INFO(0x43F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, iwl_ax201_killer_1650i_name), + IWL_DEV_INFO(0x43F0, 0x2074, iwl_ax201_cfg_qu_hr, NULL), + IWL_DEV_INFO(0x43F0, 0x4070, iwl_ax201_cfg_qu_hr, NULL), + IWL_DEV_INFO(0xA0F0, 0x0070, iwl_ax201_cfg_qu_hr, NULL), + IWL_DEV_INFO(0xA0F0, 0x0074, iwl_ax201_cfg_qu_hr, NULL), + IWL_DEV_INFO(0xA0F0, 0x0078, iwl_ax201_cfg_qu_hr, NULL), + IWL_DEV_INFO(0xA0F0, 0x007C, iwl_ax201_cfg_qu_hr, NULL), + IWL_DEV_INFO(0xA0F0, 0x0A10, iwl_ax201_cfg_qu_hr, NULL), + IWL_DEV_INFO(0xA0F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0, NULL), + IWL_DEV_INFO(0xA0F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, NULL), + IWL_DEV_INFO(0xA0F0, 0x2074, iwl_ax201_cfg_qu_hr, NULL), + IWL_DEV_INFO(0xA0F0, 0x4070, iwl_ax201_cfg_qu_hr, NULL), + IWL_DEV_INFO(0xA0F0, 0x6074, iwl_ax201_cfg_qu_hr, NULL), + IWL_DEV_INFO(0x02F0, 0x0070, iwl_ax201_cfg_quz_hr, NULL), + IWL_DEV_INFO(0x02F0, 0x0074, iwl_ax201_cfg_quz_hr, NULL), + IWL_DEV_INFO(0x02F0, 0x6074, iwl_ax201_cfg_quz_hr, NULL), + IWL_DEV_INFO(0x02F0, 0x0078, iwl_ax201_cfg_quz_hr, NULL), + IWL_DEV_INFO(0x02F0, 0x007C, iwl_ax201_cfg_quz_hr, NULL), + IWL_DEV_INFO(0x02F0, 0x0310, iwl_ax201_cfg_quz_hr, NULL), + IWL_DEV_INFO(0x02F0, 0x1651, iwl_ax1650s_cfg_quz_hr, NULL), + IWL_DEV_INFO(0x02F0, 0x1652, iwl_ax1650i_cfg_quz_hr, NULL), + IWL_DEV_INFO(0x02F0, 0x2074, iwl_ax201_cfg_quz_hr, NULL), + IWL_DEV_INFO(0x02F0, 0x4070, iwl_ax201_cfg_quz_hr, NULL), + IWL_DEV_INFO(0x06F0, 0x0070, iwl_ax201_cfg_quz_hr, NULL), + IWL_DEV_INFO(0x06F0, 0x0074, iwl_ax201_cfg_quz_hr, NULL), + IWL_DEV_INFO(0x06F0, 0x0078, iwl_ax201_cfg_quz_hr, NULL), + IWL_DEV_INFO(0x06F0, 0x007C, iwl_ax201_cfg_quz_hr, NULL), + IWL_DEV_INFO(0x06F0, 0x0310, iwl_ax201_cfg_quz_hr, NULL), + IWL_DEV_INFO(0x06F0, 0x1651, iwl_ax1650s_cfg_quz_hr, NULL), + IWL_DEV_INFO(0x06F0, 0x1652, iwl_ax1650i_cfg_quz_hr, NULL), + IWL_DEV_INFO(0x06F0, 0x2074, iwl_ax201_cfg_quz_hr, NULL), + IWL_DEV_INFO(0x06F0, 0x4070, iwl_ax201_cfg_quz_hr, NULL), + IWL_DEV_INFO(0x34F0, 0x0070, iwl_ax201_cfg_qu_hr, NULL), + IWL_DEV_INFO(0x34F0, 0x0074, iwl_ax201_cfg_qu_hr, NULL), + IWL_DEV_INFO(0x34F0, 0x0078, iwl_ax201_cfg_qu_hr, NULL), + IWL_DEV_INFO(0x34F0, 0x007C, iwl_ax201_cfg_qu_hr, NULL), + IWL_DEV_INFO(0x34F0, 0x0310, iwl_ax201_cfg_qu_hr, NULL), + IWL_DEV_INFO(0x34F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0, NULL), + IWL_DEV_INFO(0x34F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, NULL), + IWL_DEV_INFO(0x34F0, 0x2074, iwl_ax201_cfg_qu_hr, NULL), + IWL_DEV_INFO(0x34F0, 0x4070, iwl_ax201_cfg_qu_hr, NULL), + + IWL_DEV_INFO(0x3DF0, 0x0070, iwl_ax201_cfg_qu_hr, NULL), + IWL_DEV_INFO(0x3DF0, 0x0074, iwl_ax201_cfg_qu_hr, NULL), + IWL_DEV_INFO(0x3DF0, 0x0078, iwl_ax201_cfg_qu_hr, NULL), + IWL_DEV_INFO(0x3DF0, 0x007C, iwl_ax201_cfg_qu_hr, NULL), + IWL_DEV_INFO(0x3DF0, 0x0310, iwl_ax201_cfg_qu_hr, NULL), + IWL_DEV_INFO(0x3DF0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0, NULL), + IWL_DEV_INFO(0x3DF0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, NULL), + IWL_DEV_INFO(0x3DF0, 0x2074, iwl_ax201_cfg_qu_hr, NULL), + IWL_DEV_INFO(0x3DF0, 0x4070, iwl_ax201_cfg_qu_hr, NULL), + + IWL_DEV_INFO(0x4DF0, 0x0070, iwl_ax201_cfg_qu_hr, NULL), + IWL_DEV_INFO(0x4DF0, 0x0074, iwl_ax201_cfg_qu_hr, NULL), + IWL_DEV_INFO(0x4DF0, 0x0078, iwl_ax201_cfg_qu_hr, NULL), + IWL_DEV_INFO(0x4DF0, 0x007C, iwl_ax201_cfg_qu_hr, NULL), + IWL_DEV_INFO(0x4DF0, 0x0310, iwl_ax201_cfg_qu_hr, NULL), + IWL_DEV_INFO(0x4DF0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0, NULL), + IWL_DEV_INFO(0x4DF0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, NULL), + IWL_DEV_INFO(0x4DF0, 0x2074, iwl_ax201_cfg_qu_hr, NULL), + IWL_DEV_INFO(0x4DF0, 0x4070, iwl_ax201_cfg_qu_hr, NULL), + IWL_DEV_INFO(0x4DF0, 0x6074, iwl_ax201_cfg_qu_hr, NULL), + + /* So with HR */ + IWL_DEV_INFO(0x2725, 0x0090, iwlax211_2ax_cfg_so_gf_a0, NULL), + IWL_DEV_INFO(0x2725, 0x0020, iwlax210_2ax_cfg_ty_gf_a0, NULL), + IWL_DEV_INFO(0x2725, 0x2020, iwlax210_2ax_cfg_ty_gf_a0, NULL), + IWL_DEV_INFO(0x2725, 0x0024, iwlax210_2ax_cfg_ty_gf_a0, NULL), + IWL_DEV_INFO(0x2725, 0x0310, iwlax210_2ax_cfg_ty_gf_a0, NULL), + IWL_DEV_INFO(0x2725, 0x0510, iwlax210_2ax_cfg_ty_gf_a0, NULL), + IWL_DEV_INFO(0x2725, 0x0A10, iwlax210_2ax_cfg_ty_gf_a0, NULL), + IWL_DEV_INFO(0x2725, 0xE020, iwlax210_2ax_cfg_ty_gf_a0, NULL), + IWL_DEV_INFO(0x2725, 0xE024, iwlax210_2ax_cfg_ty_gf_a0, NULL), + IWL_DEV_INFO(0x2725, 0x4020, iwlax210_2ax_cfg_ty_gf_a0, NULL), + IWL_DEV_INFO(0x2725, 0x6020, iwlax210_2ax_cfg_ty_gf_a0, NULL), + IWL_DEV_INFO(0x2725, 0x6024, iwlax210_2ax_cfg_ty_gf_a0, NULL), + IWL_DEV_INFO(0x2725, 0x1673, iwlax210_2ax_cfg_ty_gf_a0, iwl_ax210_killer_1675w_name), + IWL_DEV_INFO(0x2725, 0x1674, iwlax210_2ax_cfg_ty_gf_a0, iwl_ax210_killer_1675x_name), + IWL_DEV_INFO(0x7A70, 0x0090, iwlax211_2ax_cfg_so_gf_a0_long, NULL), + IWL_DEV_INFO(0x7A70, 0x0098, iwlax211_2ax_cfg_so_gf_a0_long, NULL), + IWL_DEV_INFO(0x7A70, 0x00B0, iwlax411_2ax_cfg_so_gf4_a0_long, NULL), + IWL_DEV_INFO(0x7A70, 0x0310, iwlax211_2ax_cfg_so_gf_a0_long, NULL), + IWL_DEV_INFO(0x7A70, 0x0510, iwlax211_2ax_cfg_so_gf_a0_long, NULL), + IWL_DEV_INFO(0x7A70, 0x0A10, iwlax211_2ax_cfg_so_gf_a0_long, NULL), + IWL_DEV_INFO(0x7AF0, 0x0090, iwlax211_2ax_cfg_so_gf_a0, NULL), + IWL_DEV_INFO(0x7AF0, 0x0098, iwlax211_2ax_cfg_so_gf_a0, NULL), + IWL_DEV_INFO(0x7AF0, 0x00B0, iwlax411_2ax_cfg_so_gf4_a0, NULL), + IWL_DEV_INFO(0x7AF0, 0x0310, iwlax211_2ax_cfg_so_gf_a0, NULL), + IWL_DEV_INFO(0x7AF0, 0x0510, iwlax211_2ax_cfg_so_gf_a0, NULL), + IWL_DEV_INFO(0x7AF0, 0x0A10, iwlax211_2ax_cfg_so_gf_a0, NULL), + + /* So with JF */ + IWL_DEV_INFO(0x7A70, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_160_name), + IWL_DEV_INFO(0x7A70, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_160_name), + IWL_DEV_INFO(0x7AF0, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_160_name), + IWL_DEV_INFO(0x7AF0, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_160_name), + + /* SnJ with HR */ + IWL_DEV_INFO(0x2725, 0x00B0, iwlax411_2ax_cfg_sosnj_gf4_a0, NULL), + IWL_DEV_INFO(0x2726, 0x0090, iwlax211_cfg_snj_gf_a0, NULL), + IWL_DEV_INFO(0x2726, 0x0098, iwlax211_cfg_snj_gf_a0, NULL), + IWL_DEV_INFO(0x2726, 0x00B0, iwlax411_2ax_cfg_sosnj_gf4_a0, NULL), + IWL_DEV_INFO(0x2726, 0x00B4, iwlax411_2ax_cfg_sosnj_gf4_a0, NULL), + IWL_DEV_INFO(0x2726, 0x0510, iwlax211_cfg_snj_gf_a0, NULL), + IWL_DEV_INFO(0x2726, 0x1651, iwl_cfg_snj_hr_b0, iwl_ax201_killer_1650s_name), + IWL_DEV_INFO(0x2726, 0x1652, iwl_cfg_snj_hr_b0, iwl_ax201_killer_1650i_name), + IWL_DEV_INFO(0x2726, 0x1691, iwlax411_2ax_cfg_sosnj_gf4_a0, iwl_ax411_killer_1690s_name), + IWL_DEV_INFO(0x2726, 0x1692, iwlax411_2ax_cfg_sosnj_gf4_a0, iwl_ax411_killer_1690i_name), + IWL_DEV_INFO(0x7F70, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name), + IWL_DEV_INFO(0x7F70, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name), + + /* SO with GF2 */ + IWL_DEV_INFO(0x2726, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name), + IWL_DEV_INFO(0x2726, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name), + IWL_DEV_INFO(0x51F0, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name), + IWL_DEV_INFO(0x51F0, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name), + IWL_DEV_INFO(0x51F1, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name), + IWL_DEV_INFO(0x51F1, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name), + IWL_DEV_INFO(0x54F0, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name), + IWL_DEV_INFO(0x54F0, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name), + IWL_DEV_INFO(0x7A70, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name), + IWL_DEV_INFO(0x7A70, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name), + IWL_DEV_INFO(0x7AF0, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name), + IWL_DEV_INFO(0x7AF0, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name), + IWL_DEV_INFO(0x7F70, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name), + IWL_DEV_INFO(0x7F70, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name), + + /* MA with GF2 */ + IWL_DEV_INFO(0x7E40, 0x1671, iwl_cfg_ma_a0_gf_a0, iwl_ax211_killer_1675s_name), + IWL_DEV_INFO(0x7E40, 0x1672, iwl_cfg_ma_a0_gf_a0, iwl_ax211_killer_1675i_name), + + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl9560_2ac_cfg_soc, iwl9461_160_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl9560_2ac_cfg_soc, iwl9461_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl9560_2ac_cfg_soc, iwl9462_160_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl9560_2ac_cfg_soc, iwl9462_name), + + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl9560_2ac_cfg_soc, iwl9560_160_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl9560_2ac_cfg_soc, iwl9560_name), + + _IWL_DEV_INFO(0x2526, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_PNJ, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl9260_2ac_cfg, iwl9461_160_name), + _IWL_DEV_INFO(0x2526, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_PNJ, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl9260_2ac_cfg, iwl9461_name), + _IWL_DEV_INFO(0x2526, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_PNJ, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl9260_2ac_cfg, iwl9462_160_name), + _IWL_DEV_INFO(0x2526, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_PNJ, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl9260_2ac_cfg, iwl9462_name), + + _IWL_DEV_INFO(0x2526, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_CORES_BT_GNSS, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl9260_2ac_cfg, iwl9270_160_name), + _IWL_DEV_INFO(0x2526, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY, + IWL_CFG_NO_160, IWL_CFG_CORES_BT_GNSS, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl9260_2ac_cfg, iwl9270_name), + + _IWL_DEV_INFO(0x271B, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_TH1, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl9260_2ac_cfg, iwl9162_160_name), + _IWL_DEV_INFO(0x271B, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_TH1, IWL_CFG_ANY, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl9260_2ac_cfg, iwl9162_name), + + _IWL_DEV_INFO(0x2526, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl9260_2ac_cfg, iwl9260_160_name), + _IWL_DEV_INFO(0x2526, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl9260_2ac_cfg, iwl9260_name), + +/* Qu with Jf */ + /* Qu B step */ + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl9560_qu_b0_jf_b0_cfg, iwl9461_160_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl9560_qu_b0_jf_b0_cfg, iwl9461_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl9560_qu_b0_jf_b0_cfg, iwl9462_160_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl9560_qu_b0_jf_b0_cfg, iwl9462_name), + + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, + IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl9560_qu_b0_jf_b0_cfg, iwl9560_160_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, + IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl9560_qu_b0_jf_b0_cfg, iwl9560_name), + + _IWL_DEV_INFO(IWL_CFG_ANY, 0x1551, + IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, + IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl9560_qu_b0_jf_b0_cfg, iwl9560_killer_1550s_name), + _IWL_DEV_INFO(IWL_CFG_ANY, 0x1552, + IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, + IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl9560_qu_b0_jf_b0_cfg, iwl9560_killer_1550i_name), + + /* Qu C step */ + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl9560_qu_c0_jf_b0_cfg, iwl9461_160_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl9560_qu_c0_jf_b0_cfg, iwl9461_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl9560_qu_c0_jf_b0_cfg, iwl9462_160_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl9560_qu_c0_jf_b0_cfg, iwl9462_name), + + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, + IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl9560_qu_c0_jf_b0_cfg, iwl9560_160_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, + IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl9560_qu_c0_jf_b0_cfg, iwl9560_name), + + _IWL_DEV_INFO(IWL_CFG_ANY, 0x1551, + IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, + IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl9560_qu_c0_jf_b0_cfg, iwl9560_killer_1550s_name), + _IWL_DEV_INFO(IWL_CFG_ANY, 0x1552, + IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, + IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl9560_qu_c0_jf_b0_cfg, iwl9560_killer_1550i_name), + + /* QuZ */ + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl9560_quz_a0_jf_b0_cfg, iwl9461_160_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl9560_quz_a0_jf_b0_cfg, iwl9461_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl9560_quz_a0_jf_b0_cfg, iwl9462_160_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl9560_quz_a0_jf_b0_cfg, iwl9462_name), + + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl9560_quz_a0_jf_b0_cfg, iwl9560_160_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl9560_quz_a0_jf_b0_cfg, iwl9560_name), + + _IWL_DEV_INFO(IWL_CFG_ANY, 0x1551, + IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl9560_quz_a0_jf_b0_cfg, iwl9560_killer_1550s_name), + _IWL_DEV_INFO(IWL_CFG_ANY, 0x1552, + IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl9560_quz_a0_jf_b0_cfg, iwl9560_killer_1550i_name), + + /* QnJ */ + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl9560_qnj_b0_jf_b0_cfg, iwl9461_160_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl9560_qnj_b0_jf_b0_cfg, iwl9461_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl9560_qnj_b0_jf_b0_cfg, iwl9462_160_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl9560_qnj_b0_jf_b0_cfg, iwl9462_name), + + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl9560_qnj_b0_jf_b0_cfg, iwl9560_160_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl9560_qnj_b0_jf_b0_cfg, iwl9560_name), + + _IWL_DEV_INFO(IWL_CFG_ANY, 0x1551, + IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl9560_qnj_b0_jf_b0_cfg, iwl9560_killer_1550s_name), + _IWL_DEV_INFO(IWL_CFG_ANY, 0x1552, + IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl9560_qnj_b0_jf_b0_cfg, iwl9560_killer_1550i_name), + +/* Qu with Hr */ + /* Qu B step */ + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, + IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl_qu_b0_hr1_b0, iwl_ax101_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, + IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, + IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl_qu_b0_hr_b0, iwl_ax203_name), + + /* Qu C step */ + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, + IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl_qu_c0_hr1_b0, iwl_ax101_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, + IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, + IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl_qu_c0_hr_b0, iwl_ax203_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, + IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl_qu_c0_hr_b0, iwl_ax201_name), + + /* QuZ */ + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl_quz_a0_hr1_b0, iwl_ax101_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_QUZ, SILICON_B_STEP, + IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, + IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl_cfg_quz_a0_hr_b0, iwl_ax203_name), + +/* QnJ with Hr */ + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl_qnj_b0_hr_b0_cfg, iwl_ax201_name), + +/* SnJ with Jf */ + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl_cfg_snj_a0_jf_b0, iwl9461_160_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl_cfg_snj_a0_jf_b0, iwl9461_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl_cfg_snj_a0_jf_b0, iwl9462_160_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl_cfg_snj_a0_jf_b0, iwl9462_name), + + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl_cfg_snj_a0_jf_b0, iwl9560_160_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl_cfg_snj_a0_jf_b0, iwl9560_name), + +/* SnJ with Hr */ + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl_cfg_snj_hr_b0, iwl_ax101_name), + + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl_cfg_snj_hr_b0, iwl_ax201_name), + +/* Ma */ + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl_cfg_ma_a0_hr_b0, iwl_ax201_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl_cfg_ma_a0_gf_a0, iwl_ax211_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_CDB, IWL_CFG_ANY, + iwl_cfg_ma_a0_gf4_a0, iwl_ax211_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_MR, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl_cfg_ma_a0_mr_a0, iwl_ax221_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl_cfg_ma_a0_fm_a0, iwl_ax231_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_MR, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl_cfg_snj_a0_mr_a0, iwl_ax221_name), + +/* So with Hr */ + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, + IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl_cfg_so_a0_hr_a0, iwl_ax203_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl_cfg_so_a0_hr_a0, iwl_ax101_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl_cfg_so_a0_hr_a0, iwl_ax201_name), + +/* So-F with Hr */ + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, + IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl_cfg_so_a0_hr_a0, iwl_ax203_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl_cfg_so_a0_hr_a0, iwl_ax101_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl_cfg_so_a0_hr_a0, iwl_ax201_name), + +/* So-F with Gf */ + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_CDB, IWL_CFG_ANY, + iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_name), + +/* Bz */ + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl_cfg_bz_a0_hr_b0, iwl_bz_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl_cfg_bz_a0_gf_a0, iwl_bz_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_CDB, IWL_CFG_ANY, + iwl_cfg_bz_a0_gf4_a0, iwl_bz_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_MR, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl_cfg_bz_a0_mr_a0, iwl_bz_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl_cfg_bz_a0_fm_a0, iwl_bz_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_CDB, IWL_CFG_NO_JACKET, + iwl_cfg_bz_a0_fm4_a0, iwl_bz_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_GL, SILICON_A_STEP, + IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_NO_JACKET, + iwl_cfg_gl_a0_fm_a0, iwl_bz_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_GL, SILICON_B_STEP, + IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_NO_JACKET, + iwl_cfg_gl_b0_fm_b0, iwl_bz_name), + +/* BZ Z step */ + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_BZ, SILICON_Z_STEP, + IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl_cfg_bz_z0_gf_a0, iwl_bz_name), + +/* BNJ */ + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_GL, SILICON_A_STEP, + IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_IS_JACKET, + iwl_cfg_bnj_a0_fm_a0, iwl_bz_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_GL, SILICON_B_STEP, + IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_IS_JACKET, + iwl_cfg_bnj_b0_fm_b0, iwl_bz_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_CDB, IWL_CFG_IS_JACKET, + iwl_cfg_bnj_a0_fm4_a0, iwl_bz_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_IS_JACKET, + iwl_cfg_bnj_a0_gf_a0, iwl_bz_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_CDB, IWL_CFG_IS_JACKET, + iwl_cfg_bnj_a0_gf4_a0, iwl_bz_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_IS_JACKET, + iwl_cfg_bnj_a0_hr_b0, iwl_bz_name), + +/* SoF with JF2 */ + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwlax210_2ax_cfg_so_jf_b0, iwl9560_160_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwlax210_2ax_cfg_so_jf_b0, iwl9560_name), + +/* SoF with JF */ + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwlax210_2ax_cfg_so_jf_b0, iwl9461_160_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwlax210_2ax_cfg_so_jf_b0, iwl9462_160_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwlax210_2ax_cfg_so_jf_b0, iwl9461_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwlax210_2ax_cfg_so_jf_b0, iwl9462_name), + +/* SoF with JF2 */ + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwlax210_2ax_cfg_so_jf_b0, iwl9560_160_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwlax210_2ax_cfg_so_jf_b0, iwl9560_name), + +/* SoF with JF */ + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwlax210_2ax_cfg_so_jf_b0, iwl9461_160_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwlax210_2ax_cfg_so_jf_b0, iwl9462_160_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwlax210_2ax_cfg_so_jf_b0, iwl9461_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwlax210_2ax_cfg_so_jf_b0, iwl9462_name), + +/* So with GF */ + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_CDB, IWL_CFG_ANY, + iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_name), + +/* So with JF2 */ + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwlax210_2ax_cfg_so_jf_b0, iwl9560_160_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwlax210_2ax_cfg_so_jf_b0, iwl9560_name), + +/* So with JF */ + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwlax210_2ax_cfg_so_jf_b0, iwl9461_160_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwlax210_2ax_cfg_so_jf_b0, iwl9462_160_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwlax210_2ax_cfg_so_jf_b0, iwl9461_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwlax210_2ax_cfg_so_jf_b0, iwl9462_name), + +/* MsP */ +/* For now we use the same FW as MR, but this will change in the future. */ + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_MS, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl_cfg_so_a0_ms_a0, iwl_ax204_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_MS, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl_cfg_so_a0_ms_a0, iwl_ax204_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_MS, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl_cfg_ma_a0_ms_a0, iwl_ax204_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_MS, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl_cfg_snj_a0_ms_a0, iwl_ax204_name) + +#endif /* CONFIG_IWLMVM */ +}; + +/* + * In case that there is no OTP on the NIC, get the rf id and cdb info + * from the prph registers. + */ +static int get_crf_id(struct iwl_trans *iwl_trans) +{ + int ret = 0; + u32 sd_reg_ver_addr; + u32 cdb = 0; + u32 val; + + if (iwl_trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) + sd_reg_ver_addr = SD_REG_VER_GEN2; + else + sd_reg_ver_addr = SD_REG_VER; + + if (!iwl_trans_grab_nic_access(iwl_trans)) { + IWL_ERR(iwl_trans, "Failed to grab nic access before reading crf id\n"); + ret = -EIO; + goto out; + } + + /* Enable access to peripheral registers */ + val = iwl_read_umac_prph_no_grab(iwl_trans, WFPM_CTRL_REG); + val |= ENABLE_WFPM; + iwl_write_umac_prph_no_grab(iwl_trans, WFPM_CTRL_REG, val); + + /* Read crf info */ + val = iwl_read_prph_no_grab(iwl_trans, sd_reg_ver_addr); + + /* Read cdb info (also contains the jacket info if needed in the future */ + cdb = iwl_read_umac_prph_no_grab(iwl_trans, WFPM_OTP_CFG1_ADDR); + + /* Map between crf id to rf id */ + switch (REG_CRF_ID_TYPE(val)) { + case REG_CRF_ID_TYPE_JF_1: + iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_JF1 << 12); + break; + case REG_CRF_ID_TYPE_JF_2: + iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_JF2 << 12); + break; + case REG_CRF_ID_TYPE_HR_NONE_CDB: + iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_HR1 << 12); + break; + case REG_CRF_ID_TYPE_HR_CDB: + iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_HR2 << 12); + break; + case REG_CRF_ID_TYPE_GF: + iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_GF << 12); + break; + case REG_CRF_ID_TYPE_MR: + iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_MR << 12); + break; + case REG_CRF_ID_TYPE_FM: + iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_FM << 12); + break; + default: + ret = -EIO; + IWL_ERR(iwl_trans, + "Can find a correct rfid for crf id 0x%x\n", + REG_CRF_ID_TYPE(val)); + goto out_release; + + } + + /* Set CDB capabilities */ + if (cdb & BIT(4)) { + iwl_trans->hw_rf_id += BIT(28); + IWL_INFO(iwl_trans, "Adding cdb to rf id\n"); + } + + IWL_INFO(iwl_trans, "Detected RF 0x%x from crf id 0x%x\n", + iwl_trans->hw_rf_id, REG_CRF_ID_TYPE(val)); + +out_release: + iwl_trans_release_nic_access(iwl_trans); + +out: + return ret; +} + +/* PCI registers */ +#define PCI_CFG_RETRY_TIMEOUT 0x041 + +static const struct iwl_dev_info * +iwl_pci_find_dev_info(u16 device, u16 subsystem_device, + u16 mac_type, u8 mac_step, + u16 rf_type, u8 cdb, u8 jacket, u8 rf_id, u8 no_160, u8 cores) +{ + int num_devices = ARRAY_SIZE(iwl_dev_info_table); + int i; + + if (!num_devices) + return NULL; + + for (i = num_devices - 1; i >= 0; i--) { + const struct iwl_dev_info *dev_info = &iwl_dev_info_table[i]; + + if (dev_info->device != (u16)IWL_CFG_ANY && + dev_info->device != device) + continue; + + if (dev_info->subdevice != (u16)IWL_CFG_ANY && + dev_info->subdevice != subsystem_device) + continue; + + if (dev_info->mac_type != (u16)IWL_CFG_ANY && + dev_info->mac_type != mac_type) + continue; + + if (dev_info->mac_step != (u8)IWL_CFG_ANY && + dev_info->mac_step != mac_step) + continue; + + if (dev_info->rf_type != (u16)IWL_CFG_ANY && + dev_info->rf_type != rf_type) + continue; + + if (dev_info->cdb != (u8)IWL_CFG_ANY && + dev_info->cdb != cdb) + continue; + + if (dev_info->jacket != (u8)IWL_CFG_ANY && + dev_info->jacket != jacket) + continue; + + if (dev_info->rf_id != (u8)IWL_CFG_ANY && + dev_info->rf_id != rf_id) + continue; + + if (dev_info->no_160 != (u8)IWL_CFG_ANY && + dev_info->no_160 != no_160) + continue; + + if (dev_info->cores != (u8)IWL_CFG_ANY && + dev_info->cores != cores) + continue; + + return dev_info; + } + + return NULL; +} + +static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + const struct iwl_cfg_trans_params *trans; + const struct iwl_cfg *cfg_7265d __maybe_unused = NULL; + const struct iwl_dev_info *dev_info; + struct iwl_trans *iwl_trans; + struct iwl_trans_pcie *trans_pcie; + int ret; + const struct iwl_cfg *cfg; + + trans = (void *)(ent->driver_data & ~TRANS_CFG_MARKER); + + /* + * This is needed for backwards compatibility with the old + * tables, so we don't need to change all the config structs + * at the same time. The cfg is used to compare with the old + * full cfg structs. + */ + cfg = (void *)(ent->driver_data & ~TRANS_CFG_MARKER); + + /* make sure trans is the first element in iwl_cfg */ + BUILD_BUG_ON(offsetof(struct iwl_cfg, trans)); + + iwl_trans = iwl_trans_pcie_alloc(pdev, ent, trans); + if (IS_ERR(iwl_trans)) + return PTR_ERR(iwl_trans); + + trans_pcie = IWL_TRANS_GET_PCIE_TRANS(iwl_trans); + + /* + * Let's try to grab NIC access early here. Sometimes, NICs may + * fail to initialize, and if that happens it's better if we see + * issues early on (and can reprobe, per the logic inside), than + * first trying to load the firmware etc. and potentially only + * detecting any problems when the first interface is brought up. + */ + ret = iwl_pcie_prepare_card_hw(iwl_trans); + if (!ret) { + ret = iwl_finish_nic_init(iwl_trans); + if (ret) + goto out_free_trans; + if (iwl_trans_grab_nic_access(iwl_trans)) { + /* all good */ + iwl_trans_release_nic_access(iwl_trans); + } else { + ret = -EIO; + goto out_free_trans; + } + } + + iwl_trans->hw_rf_id = iwl_read32(iwl_trans, CSR_HW_RF_ID); + + /* + * The RF_ID is set to zero in blank OTP so read version to + * extract the RF_ID. + * This is relevant only for family 9000 and up. + */ + if (iwl_trans->trans_cfg->rf_id && + iwl_trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_9000 && + !CSR_HW_RFID_TYPE(iwl_trans->hw_rf_id) && get_crf_id(iwl_trans)) { + ret = -EINVAL; + goto out_free_trans; + } + + dev_info = iwl_pci_find_dev_info(pdev->device, pdev->subsystem_device, + CSR_HW_REV_TYPE(iwl_trans->hw_rev), + iwl_trans->hw_rev_step, + CSR_HW_RFID_TYPE(iwl_trans->hw_rf_id), + CSR_HW_RFID_IS_CDB(iwl_trans->hw_rf_id), + CSR_HW_RFID_IS_JACKET(iwl_trans->hw_rf_id), + IWL_SUBDEVICE_RF_ID(pdev->subsystem_device), + IWL_SUBDEVICE_NO_160(pdev->subsystem_device), + IWL_SUBDEVICE_CORES(pdev->subsystem_device)); + + if (dev_info) { + iwl_trans->cfg = dev_info->cfg; + iwl_trans->name = dev_info->name; + } + +#if IS_ENABLED(CONFIG_IWLMVM) + + /* + * Workaround for problematic SnJ device: sometimes when + * certain RF modules are connected to SnJ, the device ID + * changes to QnJ's ID. So we are using QnJ's trans_cfg until + * here. But if we detect that the MAC type is actually SnJ, + * we should switch to it here to avoid problems later. + */ + if (CSR_HW_REV_TYPE(iwl_trans->hw_rev) == IWL_CFG_MAC_TYPE_SNJ) + iwl_trans->trans_cfg = &iwl_so_trans_cfg; + + /* + * special-case 7265D, it has the same PCI IDs. + * + * Note that because we already pass the cfg to the transport above, + * all the parameters that the transport uses must, until that is + * changed, be identical to the ones in the 7265D configuration. + */ + if (cfg == &iwl7265_2ac_cfg) + cfg_7265d = &iwl7265d_2ac_cfg; + else if (cfg == &iwl7265_2n_cfg) + cfg_7265d = &iwl7265d_2n_cfg; + else if (cfg == &iwl7265_n_cfg) + cfg_7265d = &iwl7265d_n_cfg; + if (cfg_7265d && + (iwl_trans->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_7265D) + iwl_trans->cfg = cfg_7265d; + + /* + * This is a hack to switch from Qu B0 to Qu C0. We need to + * do this for all cfgs that use Qu B0, except for those using + * Jf, which have already been moved to the new table. The + * rest must be removed once we convert Qu with Hr as well. + */ + if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QU_C0) { + if (iwl_trans->cfg == &iwl_ax201_cfg_qu_hr) + iwl_trans->cfg = &iwl_ax201_cfg_qu_c0_hr_b0; + else if (iwl_trans->cfg == &killer1650s_2ax_cfg_qu_b0_hr_b0) + iwl_trans->cfg = &killer1650s_2ax_cfg_qu_c0_hr_b0; + else if (iwl_trans->cfg == &killer1650i_2ax_cfg_qu_b0_hr_b0) + iwl_trans->cfg = &killer1650i_2ax_cfg_qu_c0_hr_b0; + } + + /* same thing for QuZ... */ + if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QUZ) { + if (iwl_trans->cfg == &iwl_ax201_cfg_qu_hr) + iwl_trans->cfg = &iwl_ax201_cfg_quz_hr; + else if (iwl_trans->cfg == &killer1650s_2ax_cfg_qu_b0_hr_b0) + iwl_trans->cfg = &iwl_ax1650s_cfg_quz_hr; + else if (iwl_trans->cfg == &killer1650i_2ax_cfg_qu_b0_hr_b0) + iwl_trans->cfg = &iwl_ax1650i_cfg_quz_hr; + } + +#endif + /* + * If we didn't set the cfg yet, the PCI ID table entry should have + * been a full config - if yes, use it, otherwise fail. + */ + if (!iwl_trans->cfg) { + if (ent->driver_data & TRANS_CFG_MARKER) { + pr_err("No config found for PCI dev %04x/%04x, rev=0x%x, rfid=0x%x\n", + pdev->device, pdev->subsystem_device, + iwl_trans->hw_rev, iwl_trans->hw_rf_id); + ret = -EINVAL; + goto out_free_trans; + } + iwl_trans->cfg = cfg; + } + + /* if we don't have a name yet, copy name from the old cfg */ + if (!iwl_trans->name) + iwl_trans->name = iwl_trans->cfg->name; + + if (iwl_trans->trans_cfg->mq_rx_supported) { + if (WARN_ON(!iwl_trans->cfg->num_rbds)) { + ret = -EINVAL; + goto out_free_trans; + } + trans_pcie->num_rx_bufs = iwl_trans->cfg->num_rbds; + } else { + trans_pcie->num_rx_bufs = RX_QUEUE_SIZE; + } + + ret = iwl_trans_init(iwl_trans); + if (ret) + goto out_free_trans; + + pci_set_drvdata(pdev, iwl_trans); + + /* try to get ownership so that we'll know if we don't own it */ + iwl_pcie_prepare_card_hw(iwl_trans); + + iwl_trans->drv = iwl_drv_start(iwl_trans); + + if (IS_ERR(iwl_trans->drv)) { + ret = PTR_ERR(iwl_trans->drv); + goto out_free_trans; + } + + /* register transport layer debugfs here */ + iwl_trans_pcie_dbgfs_register(iwl_trans); + + return 0; + +out_free_trans: + iwl_trans_pcie_free(iwl_trans); + return ret; +} + +static void iwl_pci_remove(struct pci_dev *pdev) +{ + struct iwl_trans *trans = pci_get_drvdata(pdev); + + if (!trans) + return; + + iwl_drv_stop(trans->drv); + + iwl_trans_pcie_free(trans); +} + +#ifdef CONFIG_PM_SLEEP + +static int iwl_pci_suspend(struct device *device) +{ + /* Before you put code here, think about WoWLAN. You cannot check here + * whether WoWLAN is enabled or not, and your code will run even if + * WoWLAN is enabled - don't kill the NIC, someone may need it in Sx. + */ + + return 0; +} + +static int iwl_pci_resume(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct iwl_trans *trans = pci_get_drvdata(pdev); + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + /* Before you put code here, think about WoWLAN. You cannot check here + * whether WoWLAN is enabled or not, and your code will run even if + * WoWLAN is enabled - the NIC may be alive. + */ + + /* + * We disable the RETRY_TIMEOUT register (0x41) to keep + * PCI Tx retries from interfering with C3 CPU state. + */ + pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); + + if (!trans->op_mode) + return 0; + + /* In WOWLAN, let iwl_trans_pcie_d3_resume do the rest of the work */ + if (test_bit(STATUS_DEVICE_ENABLED, &trans->status)) + return 0; + + /* reconfigure the MSI-X mapping to get the correct IRQ for rfkill */ + iwl_pcie_conf_msix_hw(trans_pcie); + + /* + * Enable rfkill interrupt (in order to keep track of the rfkill + * status). Must be locked to avoid processing a possible rfkill + * interrupt while in iwl_pcie_check_hw_rf_kill(). + */ + mutex_lock(&trans_pcie->mutex); + iwl_enable_rfkill_int(trans); + iwl_pcie_check_hw_rf_kill(trans); + mutex_unlock(&trans_pcie->mutex); + + return 0; +} + +static const struct dev_pm_ops iwl_dev_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(iwl_pci_suspend, + iwl_pci_resume) +}; + +#define IWL_PM_OPS (&iwl_dev_pm_ops) + +#else /* CONFIG_PM_SLEEP */ + +#define IWL_PM_OPS NULL + +#endif /* CONFIG_PM_SLEEP */ + +static struct pci_driver iwl_pci_driver = { + .name = DRV_NAME, + .id_table = iwl_hw_card_ids, + .probe = iwl_pci_probe, + .remove = iwl_pci_remove, + .driver.pm = IWL_PM_OPS, +}; + +int __must_check iwl_pci_register_driver(void) +{ + int ret; + ret = pci_register_driver(&iwl_pci_driver); + if (ret) + pr_err("Unable to initialize PCI module\n"); + + return ret; +} + +void iwl_pci_unregister_driver(void) +{ + pci_unregister_driver(&iwl_pci_driver); +} diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h new file mode 100644 index 000000000..2ec4ee8ab --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -0,0 +1,843 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2003-2015, 2018-2022 Intel Corporation + * Copyright (C) 2013-2015 Intel Mobile Communications GmbH + * Copyright (C) 2016-2017 Intel Deutschland GmbH + */ +#ifndef __iwl_trans_int_pcie_h__ +#define __iwl_trans_int_pcie_h__ + +#include <linux/spinlock.h> +#include <linux/interrupt.h> +#include <linux/skbuff.h> +#include <linux/wait.h> +#include <linux/pci.h> +#include <linux/timer.h> +#include <linux/cpu.h> + +#include "iwl-fh.h" +#include "iwl-csr.h" +#include "iwl-trans.h" +#include "iwl-debug.h" +#include "iwl-io.h" +#include "iwl-op-mode.h" +#include "iwl-drv.h" +#include "queue/tx.h" + +/* + * RX related structures and functions + */ +#define RX_NUM_QUEUES 1 +#define RX_POST_REQ_ALLOC 2 +#define RX_CLAIM_REQ_ALLOC 8 +#define RX_PENDING_WATERMARK 16 +#define FIRST_RX_QUEUE 512 + +struct iwl_host_cmd; + +/*This file includes the declaration that are internal to the + * trans_pcie layer */ + +/** + * struct iwl_rx_mem_buffer + * @page_dma: bus address of rxb page + * @page: driver's pointer to the rxb page + * @list: list entry for the membuffer + * @invalid: rxb is in driver ownership - not owned by HW + * @vid: index of this rxb in the global table + * @offset: indicates which offset of the page (in bytes) + * this buffer uses (if multiple RBs fit into one page) + */ +struct iwl_rx_mem_buffer { + dma_addr_t page_dma; + struct page *page; + struct list_head list; + u32 offset; + u16 vid; + bool invalid; +}; + +/** + * struct isr_statistics - interrupt statistics + * + */ +struct isr_statistics { + u32 hw; + u32 sw; + u32 err_code; + u32 sch; + u32 alive; + u32 rfkill; + u32 ctkill; + u32 wakeup; + u32 rx; + u32 tx; + u32 unhandled; +}; + +/** + * struct iwl_rx_transfer_desc - transfer descriptor + * @addr: ptr to free buffer start address + * @rbid: unique tag of the buffer + * @reserved: reserved + */ +struct iwl_rx_transfer_desc { + __le16 rbid; + __le16 reserved[3]; + __le64 addr; +} __packed; + +#define IWL_RX_CD_FLAGS_FRAGMENTED BIT(0) + +/** + * struct iwl_rx_completion_desc - completion descriptor + * @reserved1: reserved + * @rbid: unique tag of the received buffer + * @flags: flags (0: fragmented, all others: reserved) + * @reserved2: reserved + */ +struct iwl_rx_completion_desc { + __le32 reserved1; + __le16 rbid; + u8 flags; + u8 reserved2[25]; +} __packed; + +/** + * struct iwl_rx_completion_desc_bz - Bz completion descriptor + * @rbid: unique tag of the received buffer + * @flags: flags (0: fragmented, all others: reserved) + * @reserved: reserved + */ +struct iwl_rx_completion_desc_bz { + __le16 rbid; + u8 flags; + u8 reserved[1]; +} __packed; + +/** + * struct iwl_rxq - Rx queue + * @id: queue index + * @bd: driver's pointer to buffer of receive buffer descriptors (rbd). + * Address size is 32 bit in pre-9000 devices and 64 bit in 9000 devices. + * In AX210 devices it is a pointer to a list of iwl_rx_transfer_desc's + * @bd_dma: bus address of buffer of receive buffer descriptors (rbd) + * @used_bd: driver's pointer to buffer of used receive buffer descriptors (rbd) + * @used_bd_dma: physical address of buffer of used receive buffer descriptors (rbd) + * @read: Shared index to newest available Rx buffer + * @write: Shared index to oldest written Rx packet + * @free_count: Number of pre-allocated buffers in rx_free + * @used_count: Number of RBDs handled to allocator to use for allocation + * @write_actual: + * @rx_free: list of RBDs with allocated RB ready for use + * @rx_used: list of RBDs with no RB attached + * @need_update: flag to indicate we need to update read/write index + * @rb_stts: driver's pointer to receive buffer status + * @rb_stts_dma: bus address of receive buffer status + * @lock: + * @queue: actual rx queue. Not used for multi-rx queue. + * @next_rb_is_fragment: indicates that the previous RB that we handled set + * the fragmented flag, so the next one is still another fragment + * + * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers + */ +struct iwl_rxq { + int id; + void *bd; + dma_addr_t bd_dma; + void *used_bd; + dma_addr_t used_bd_dma; + u32 read; + u32 write; + u32 free_count; + u32 used_count; + u32 write_actual; + u32 queue_size; + struct list_head rx_free; + struct list_head rx_used; + bool need_update, next_rb_is_fragment; + void *rb_stts; + dma_addr_t rb_stts_dma; + spinlock_t lock; + struct napi_struct napi; + struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE]; +}; + +/** + * struct iwl_rb_allocator - Rx allocator + * @req_pending: number of requests the allcator had not processed yet + * @req_ready: number of requests honored and ready for claiming + * @rbd_allocated: RBDs with pages allocated and ready to be handled to + * the queue. This is a list of &struct iwl_rx_mem_buffer + * @rbd_empty: RBDs with no page attached for allocator use. This is a list + * of &struct iwl_rx_mem_buffer + * @lock: protects the rbd_allocated and rbd_empty lists + * @alloc_wq: work queue for background calls + * @rx_alloc: work struct for background calls + */ +struct iwl_rb_allocator { + atomic_t req_pending; + atomic_t req_ready; + struct list_head rbd_allocated; + struct list_head rbd_empty; + spinlock_t lock; + struct workqueue_struct *alloc_wq; + struct work_struct rx_alloc; +}; + +/** + * iwl_get_closed_rb_stts - get closed rb stts from different structs + * @rxq - the rxq to get the rb stts from + */ +static inline __le16 iwl_get_closed_rb_stts(struct iwl_trans *trans, + struct iwl_rxq *rxq) +{ + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { + __le16 *rb_stts = rxq->rb_stts; + + return READ_ONCE(*rb_stts); + } else { + struct iwl_rb_status *rb_stts = rxq->rb_stts; + + return READ_ONCE(rb_stts->closed_rb_num); + } +} + +#ifdef CONFIG_IWLWIFI_DEBUGFS +/** + * enum iwl_fw_mon_dbgfs_state - the different states of the monitor_data + * debugfs file + * + * @IWL_FW_MON_DBGFS_STATE_CLOSED: the file is closed. + * @IWL_FW_MON_DBGFS_STATE_OPEN: the file is open. + * @IWL_FW_MON_DBGFS_STATE_DISABLED: the file is disabled, once this state is + * set the file can no longer be used. + */ +enum iwl_fw_mon_dbgfs_state { + IWL_FW_MON_DBGFS_STATE_CLOSED, + IWL_FW_MON_DBGFS_STATE_OPEN, + IWL_FW_MON_DBGFS_STATE_DISABLED, +}; +#endif + +/** + * enum iwl_shared_irq_flags - level of sharing for irq + * @IWL_SHARED_IRQ_NON_RX: interrupt vector serves non rx causes. + * @IWL_SHARED_IRQ_FIRST_RSS: interrupt vector serves first RSS queue. + */ +enum iwl_shared_irq_flags { + IWL_SHARED_IRQ_NON_RX = BIT(0), + IWL_SHARED_IRQ_FIRST_RSS = BIT(1), +}; + +/** + * enum iwl_image_response_code - image response values + * @IWL_IMAGE_RESP_DEF: the default value of the register + * @IWL_IMAGE_RESP_SUCCESS: iml was read successfully + * @IWL_IMAGE_RESP_FAIL: iml reading failed + */ +enum iwl_image_response_code { + IWL_IMAGE_RESP_DEF = 0, + IWL_IMAGE_RESP_SUCCESS = 1, + IWL_IMAGE_RESP_FAIL = 2, +}; + +/** + * struct cont_rec: continuous recording data structure + * @prev_wr_ptr: the last address that was read in monitor_data + * debugfs file + * @prev_wrap_cnt: the wrap count that was used during the last read in + * monitor_data debugfs file + * @state: the state of monitor_data debugfs file as described + * in &iwl_fw_mon_dbgfs_state enum + * @mutex: locked while reading from monitor_data debugfs file + */ +#ifdef CONFIG_IWLWIFI_DEBUGFS +struct cont_rec { + u32 prev_wr_ptr; + u32 prev_wrap_cnt; + u8 state; + /* Used to sync monitor_data debugfs file with driver unload flow */ + struct mutex mutex; +}; +#endif + +enum iwl_pcie_fw_reset_state { + FW_RESET_IDLE, + FW_RESET_REQUESTED, + FW_RESET_OK, + FW_RESET_ERROR, +}; + +/** + * enum wl_pcie_imr_status - imr dma transfer state + * @IMR_D2S_IDLE: default value of the dma transfer + * @IMR_D2S_REQUESTED: dma transfer requested + * @IMR_D2S_COMPLETED: dma transfer completed + * @IMR_D2S_ERROR: dma transfer error + */ +enum iwl_pcie_imr_status { + IMR_D2S_IDLE, + IMR_D2S_REQUESTED, + IMR_D2S_COMPLETED, + IMR_D2S_ERROR, +}; + +/** + * struct iwl_trans_pcie - PCIe transport specific data + * @rxq: all the RX queue data + * @rx_pool: initial pool of iwl_rx_mem_buffer for all the queues + * @global_table: table mapping received VID from hw to rxb + * @rba: allocator for RX replenishing + * @ctxt_info: context information for FW self init + * @ctxt_info_gen3: context information for gen3 devices + * @prph_info: prph info for self init + * @prph_scratch: prph scratch for self init + * @ctxt_info_dma_addr: dma addr of context information + * @prph_info_dma_addr: dma addr of prph info + * @prph_scratch_dma_addr: dma addr of prph scratch + * @ctxt_info_dma_addr: dma addr of context information + * @init_dram: DRAM data of firmware image (including paging). + * Context information addresses will be taken from here. + * This is driver's local copy for keeping track of size and + * count for allocating and freeing the memory. + * @iml: image loader image virtual address + * @iml_dma_addr: image loader image DMA address + * @trans: pointer to the generic transport area + * @scd_base_addr: scheduler sram base address in SRAM + * @kw: keep warm address + * @pnvm_dram: DRAM area that contains the PNVM data + * @pci_dev: basic pci-network driver stuff + * @hw_base: pci hardware address support + * @ucode_write_complete: indicates that the ucode has been copied. + * @ucode_write_waitq: wait queue for uCode load + * @cmd_queue - command queue number + * @def_rx_queue - default rx queue number + * @rx_buf_size: Rx buffer size + * @scd_set_active: should the transport configure the SCD for HCMD queue + * @rx_page_order: page order for receive buffer size + * @rx_buf_bytes: RX buffer (RB) size in bytes + * @reg_lock: protect hw register access + * @mutex: to protect stop_device / start_fw / start_hw + * @cmd_in_flight: true when we have a host command in flight +#ifdef CONFIG_IWLWIFI_DEBUGFS + * @fw_mon_data: fw continuous recording data +#endif + * @msix_entries: array of MSI-X entries + * @msix_enabled: true if managed to enable MSI-X + * @shared_vec_mask: the type of causes the shared vector handles + * (see iwl_shared_irq_flags). + * @alloc_vecs: the number of interrupt vectors allocated by the OS + * @def_irq: default irq for non rx causes + * @fh_init_mask: initial unmasked fh causes + * @hw_init_mask: initial unmasked hw causes + * @fh_mask: current unmasked fh causes + * @hw_mask: current unmasked hw causes + * @in_rescan: true if we have triggered a device rescan + * @base_rb_stts: base virtual address of receive buffer status for all queues + * @base_rb_stts_dma: base physical address of receive buffer status + * @supported_dma_mask: DMA mask to validate the actual address against, + * will be DMA_BIT_MASK(11) or DMA_BIT_MASK(12) depending on the device + * @alloc_page_lock: spinlock for the page allocator + * @alloc_page: allocated page to still use parts of + * @alloc_page_used: how much of the allocated page was already used (bytes) + * @imr_status: imr dma state machine + * @wait_queue_head_t: imr wait queue for dma completion + * @rf_name: name/version of the CRF, if any + */ +struct iwl_trans_pcie { + struct iwl_rxq *rxq; + struct iwl_rx_mem_buffer *rx_pool; + struct iwl_rx_mem_buffer **global_table; + struct iwl_rb_allocator rba; + union { + struct iwl_context_info *ctxt_info; + struct iwl_context_info_gen3 *ctxt_info_gen3; + }; + struct iwl_prph_info *prph_info; + struct iwl_prph_scratch *prph_scratch; + void *iml; + dma_addr_t ctxt_info_dma_addr; + dma_addr_t prph_info_dma_addr; + dma_addr_t prph_scratch_dma_addr; + dma_addr_t iml_dma_addr; + struct iwl_trans *trans; + + struct net_device napi_dev; + + /* INT ICT Table */ + __le32 *ict_tbl; + dma_addr_t ict_tbl_dma; + int ict_index; + bool use_ict; + bool is_down, opmode_down; + s8 debug_rfkill; + struct isr_statistics isr_stats; + + spinlock_t irq_lock; + struct mutex mutex; + u32 inta_mask; + u32 scd_base_addr; + struct iwl_dma_ptr kw; + + struct iwl_dram_data pnvm_dram; + struct iwl_dram_data reduce_power_dram; + + struct iwl_txq *txq_memory; + + /* PCI bus related data */ + struct pci_dev *pci_dev; + u8 __iomem *hw_base; + + bool ucode_write_complete; + bool sx_complete; + wait_queue_head_t ucode_write_waitq; + wait_queue_head_t sx_waitq; + + u8 def_rx_queue; + u8 n_no_reclaim_cmds; + u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS]; + u16 num_rx_bufs; + + enum iwl_amsdu_size rx_buf_size; + bool scd_set_active; + bool pcie_dbg_dumped_once; + u32 rx_page_order; + u32 rx_buf_bytes; + u32 supported_dma_mask; + + /* allocator lock for the two values below */ + spinlock_t alloc_page_lock; + struct page *alloc_page; + u32 alloc_page_used; + + /*protect hw register */ + spinlock_t reg_lock; + bool cmd_hold_nic_awake; + +#ifdef CONFIG_IWLWIFI_DEBUGFS + struct cont_rec fw_mon_data; +#endif + + struct msix_entry msix_entries[IWL_MAX_RX_HW_QUEUES]; + bool msix_enabled; + u8 shared_vec_mask; + u32 alloc_vecs; + u32 def_irq; + u32 fh_init_mask; + u32 hw_init_mask; + u32 fh_mask; + u32 hw_mask; + cpumask_t affinity_mask[IWL_MAX_RX_HW_QUEUES]; + u16 tx_cmd_queue_size; + bool in_rescan; + + void *base_rb_stts; + dma_addr_t base_rb_stts_dma; + + bool fw_reset_handshake; + enum iwl_pcie_fw_reset_state fw_reset_state; + wait_queue_head_t fw_reset_waitq; + enum iwl_pcie_imr_status imr_status; + wait_queue_head_t imr_waitq; + char rf_name[32]; +}; + +static inline struct iwl_trans_pcie * +IWL_TRANS_GET_PCIE_TRANS(struct iwl_trans *trans) +{ + return (void *)trans->trans_specific; +} + +static inline void iwl_pcie_clear_irq(struct iwl_trans *trans, int queue) +{ + /* + * Before sending the interrupt the HW disables it to prevent + * a nested interrupt. This is done by writing 1 to the corresponding + * bit in the mask register. After handling the interrupt, it should be + * re-enabled by clearing this bit. This register is defined as + * write 1 clear (W1C) register, meaning that it's being clear + * by writing 1 to the bit. + */ + iwl_write32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(queue)); +} + +static inline struct iwl_trans * +iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie) +{ + return container_of((void *)trans_pcie, struct iwl_trans, + trans_specific); +} + +/* + * Convention: trans API functions: iwl_trans_pcie_XXX + * Other functions: iwl_pcie_XXX + */ +struct iwl_trans +*iwl_trans_pcie_alloc(struct pci_dev *pdev, + const struct pci_device_id *ent, + const struct iwl_cfg_trans_params *cfg_trans); +void iwl_trans_pcie_free(struct iwl_trans *trans); + +bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans); +#define _iwl_trans_pcie_grab_nic_access(trans) \ + __cond_lock(nic_access_nobh, \ + likely(__iwl_trans_pcie_grab_nic_access(trans))) + +/***************************************************** +* RX +******************************************************/ +int iwl_pcie_rx_init(struct iwl_trans *trans); +int iwl_pcie_gen2_rx_init(struct iwl_trans *trans); +irqreturn_t iwl_pcie_msix_isr(int irq, void *data); +irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id); +irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id); +irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id); +int iwl_pcie_rx_stop(struct iwl_trans *trans); +void iwl_pcie_rx_free(struct iwl_trans *trans); +void iwl_pcie_free_rbs_pool(struct iwl_trans *trans); +void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq); +void iwl_pcie_rx_napi_sync(struct iwl_trans *trans); +void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority, + struct iwl_rxq *rxq); + +/***************************************************** +* ICT - interrupt handling +******************************************************/ +irqreturn_t iwl_pcie_isr(int irq, void *data); +int iwl_pcie_alloc_ict(struct iwl_trans *trans); +void iwl_pcie_free_ict(struct iwl_trans *trans); +void iwl_pcie_reset_ict(struct iwl_trans *trans); +void iwl_pcie_disable_ict(struct iwl_trans *trans); + +/***************************************************** +* TX / HCMD +******************************************************/ +int iwl_pcie_tx_init(struct iwl_trans *trans); +void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr); +int iwl_pcie_tx_stop(struct iwl_trans *trans); +void iwl_pcie_tx_free(struct iwl_trans *trans); +bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn, + const struct iwl_trans_txq_scd_cfg *cfg, + unsigned int wdg_timeout); +void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue, + bool configure_scd); +void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id, + bool shared_mode); +int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, + struct iwl_device_tx_cmd *dev_cmd, int txq_id); +void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans); +int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); +void iwl_pcie_hcmd_complete(struct iwl_trans *trans, + struct iwl_rx_cmd_buffer *rxb); +void iwl_trans_pcie_tx_reset(struct iwl_trans *trans); + +/***************************************************** +* Error handling +******************************************************/ +void iwl_pcie_dump_csr(struct iwl_trans *trans); + +/***************************************************** +* Helpers +******************************************************/ +static inline void _iwl_disable_interrupts(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + clear_bit(STATUS_INT_ENABLED, &trans->status); + if (!trans_pcie->msix_enabled) { + /* disable interrupts from uCode/NIC to host */ + iwl_write32(trans, CSR_INT_MASK, 0x00000000); + + /* acknowledge/clear/reset any interrupts still pending + * from uCode or flow handler (Rx/Tx DMA) */ + iwl_write32(trans, CSR_INT, 0xffffffff); + iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff); + } else { + /* disable all the interrupt we might use */ + iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, + trans_pcie->fh_init_mask); + iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, + trans_pcie->hw_init_mask); + } + IWL_DEBUG_ISR(trans, "Disabled interrupts\n"); +} + +static inline int iwl_pcie_get_num_sections(const struct fw_img *fw, + int start) +{ + int i = 0; + + while (start < fw->num_sec && + fw->sec[start].offset != CPU1_CPU2_SEPARATOR_SECTION && + fw->sec[start].offset != PAGING_SEPARATOR_SECTION) { + start++; + i++; + } + + return i; +} + +static inline void iwl_pcie_ctxt_info_free_fw_img(struct iwl_trans *trans) +{ + struct iwl_self_init_dram *dram = &trans->init_dram; + int i; + + if (!dram->fw) { + WARN_ON(dram->fw_cnt); + return; + } + + for (i = 0; i < dram->fw_cnt; i++) + dma_free_coherent(trans->dev, dram->fw[i].size, + dram->fw[i].block, dram->fw[i].physical); + + kfree(dram->fw); + dram->fw_cnt = 0; + dram->fw = NULL; +} + +static inline void iwl_disable_interrupts(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + spin_lock_bh(&trans_pcie->irq_lock); + _iwl_disable_interrupts(trans); + spin_unlock_bh(&trans_pcie->irq_lock); +} + +static inline void _iwl_enable_interrupts(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + IWL_DEBUG_ISR(trans, "Enabling interrupts\n"); + set_bit(STATUS_INT_ENABLED, &trans->status); + if (!trans_pcie->msix_enabled) { + trans_pcie->inta_mask = CSR_INI_SET_MASK; + iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); + } else { + /* + * fh/hw_mask keeps all the unmasked causes. + * Unlike msi, in msix cause is enabled when it is unset. + */ + trans_pcie->hw_mask = trans_pcie->hw_init_mask; + trans_pcie->fh_mask = trans_pcie->fh_init_mask; + iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, + ~trans_pcie->fh_mask); + iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, + ~trans_pcie->hw_mask); + } +} + +static inline void iwl_enable_interrupts(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + spin_lock_bh(&trans_pcie->irq_lock); + _iwl_enable_interrupts(trans); + spin_unlock_bh(&trans_pcie->irq_lock); +} +static inline void iwl_enable_hw_int_msk_msix(struct iwl_trans *trans, u32 msk) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, ~msk); + trans_pcie->hw_mask = msk; +} + +static inline void iwl_enable_fh_int_msk_msix(struct iwl_trans *trans, u32 msk) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~msk); + trans_pcie->fh_mask = msk; +} + +static inline void iwl_enable_fw_load_int(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + IWL_DEBUG_ISR(trans, "Enabling FW load interrupt\n"); + if (!trans_pcie->msix_enabled) { + trans_pcie->inta_mask = CSR_INT_BIT_FH_TX; + iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); + } else { + iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, + trans_pcie->hw_init_mask); + iwl_enable_fh_int_msk_msix(trans, + MSIX_FH_INT_CAUSES_D2S_CH0_NUM); + } +} + +static inline void iwl_enable_fw_load_int_ctx_info(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + IWL_DEBUG_ISR(trans, "Enabling ALIVE interrupt only\n"); + + if (!trans_pcie->msix_enabled) { + /* + * When we'll receive the ALIVE interrupt, the ISR will call + * iwl_enable_fw_load_int_ctx_info again to set the ALIVE + * interrupt (which is not really needed anymore) but also the + * RX interrupt which will allow us to receive the ALIVE + * notification (which is Rx) and continue the flow. + */ + trans_pcie->inta_mask = CSR_INT_BIT_ALIVE | CSR_INT_BIT_FH_RX; + iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); + } else { + iwl_enable_hw_int_msk_msix(trans, + MSIX_HW_INT_CAUSES_REG_ALIVE); + /* + * Leave all the FH causes enabled to get the ALIVE + * notification. + */ + iwl_enable_fh_int_msk_msix(trans, trans_pcie->fh_init_mask); + } +} + +static inline const char *queue_name(struct device *dev, + struct iwl_trans_pcie *trans_p, int i) +{ + if (trans_p->shared_vec_mask) { + int vec = trans_p->shared_vec_mask & + IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0; + + if (i == 0) + return DRV_NAME ":shared_IRQ"; + + return devm_kasprintf(dev, GFP_KERNEL, + DRV_NAME ":queue_%d", i + vec); + } + if (i == 0) + return DRV_NAME ":default_queue"; + + if (i == trans_p->alloc_vecs - 1) + return DRV_NAME ":exception"; + + return devm_kasprintf(dev, GFP_KERNEL, + DRV_NAME ":queue_%d", i); +} + +static inline void iwl_enable_rfkill_int(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n"); + if (!trans_pcie->msix_enabled) { + trans_pcie->inta_mask = CSR_INT_BIT_RF_KILL; + iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); + } else { + iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, + trans_pcie->fh_init_mask); + iwl_enable_hw_int_msk_msix(trans, + MSIX_HW_INT_CAUSES_REG_RF_KILL); + } + + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_9000) { + /* + * On 9000-series devices this bit isn't enabled by default, so + * when we power down the device we need set the bit to allow it + * to wake up the PCI-E bus for RF-kill interrupts. + */ + iwl_set_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN); + } +} + +void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans, bool from_irq); + +static inline bool iwl_is_rfkill_set(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + lockdep_assert_held(&trans_pcie->mutex); + + if (trans_pcie->debug_rfkill == 1) + return true; + + return !(iwl_read32(trans, CSR_GP_CNTRL) & + CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW); +} + +static inline void __iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, + u32 reg, u32 mask, u32 value) +{ + u32 v; + +#ifdef CONFIG_IWLWIFI_DEBUG + WARN_ON_ONCE(value & ~mask); +#endif + + v = iwl_read32(trans, reg); + v &= ~mask; + v |= value; + iwl_write32(trans, reg, v); +} + +static inline void __iwl_trans_pcie_clear_bit(struct iwl_trans *trans, + u32 reg, u32 mask) +{ + __iwl_trans_pcie_set_bits_mask(trans, reg, mask, 0); +} + +static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans, + u32 reg, u32 mask) +{ + __iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask); +} + +static inline bool iwl_pcie_dbg_on(struct iwl_trans *trans) +{ + return (trans->dbg.dest_tlv || iwl_trans_dbg_ini_valid(trans)); +} + +void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state, bool from_irq); +void iwl_trans_pcie_dump_regs(struct iwl_trans *trans); + +#ifdef CONFIG_IWLWIFI_DEBUGFS +void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans); +#else +static inline void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) { } +#endif + +void iwl_pcie_rx_allocator_work(struct work_struct *data); + +/* common functions that are used by gen2 transport */ +int iwl_pcie_gen2_apm_init(struct iwl_trans *trans); +void iwl_pcie_apm_config(struct iwl_trans *trans); +int iwl_pcie_prepare_card_hw(struct iwl_trans *trans); +void iwl_pcie_synchronize_irqs(struct iwl_trans *trans); +bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans); +void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans, + bool was_in_rfkill); +void iwl_pcie_apm_stop_master(struct iwl_trans *trans); +void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie); +int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans, + struct iwl_dma_ptr *ptr, size_t size); +void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr); +void iwl_pcie_apply_destination(struct iwl_trans *trans); + +/* common functions that are used by gen3 transport */ +void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power); + +/* transport gen 2 exported functions */ +int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans, + const struct fw_img *fw, bool run_in_rfkill); +void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr); +int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans, + struct iwl_host_cmd *cmd); +void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans); +void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans); +void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans, + bool test, bool reset); +int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans, + struct iwl_host_cmd *cmd); +int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, + struct iwl_host_cmd *cmd); +void iwl_trans_pcie_copy_imr_fh(struct iwl_trans *trans, + u32 dst_addr, u64 src_addr, u32 byte_cnt); +int iwl_trans_pcie_copy_imr(struct iwl_trans *trans, + u32 dst_addr, u64 src_addr, u32 byte_cnt); + +#endif /* __iwl_trans_int_pcie_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c new file mode 100644 index 000000000..91b73e7a4 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -0,0 +1,2388 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2003-2014, 2018-2023 Intel Corporation + * Copyright (C) 2013-2015 Intel Mobile Communications GmbH + * Copyright (C) 2016-2017 Intel Deutschland GmbH + */ +#include <linux/sched.h> +#include <linux/wait.h> +#include <linux/gfp.h> + +#include "iwl-prph.h" +#include "iwl-io.h" +#include "internal.h" +#include "iwl-op-mode.h" +#include "iwl-context-info-gen3.h" + +/****************************************************************************** + * + * RX path functions + * + ******************************************************************************/ + +/* + * Rx theory of operation + * + * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs), + * each of which point to Receive Buffers to be filled by the NIC. These get + * used not only for Rx frames, but for any command response or notification + * from the NIC. The driver and NIC manage the Rx buffers by means + * of indexes into the circular buffer. + * + * Rx Queue Indexes + * The host/firmware share two index registers for managing the Rx buffers. + * + * The READ index maps to the first position that the firmware may be writing + * to -- the driver can read up to (but not including) this position and get + * good data. + * The READ index is managed by the firmware once the card is enabled. + * + * The WRITE index maps to the last position the driver has read from -- the + * position preceding WRITE is the last slot the firmware can place a packet. + * + * The queue is empty (no good data) if WRITE = READ - 1, and is full if + * WRITE = READ. + * + * During initialization, the host sets up the READ queue position to the first + * INDEX position, and WRITE to the last (READ - 1 wrapped) + * + * When the firmware places a packet in a buffer, it will advance the READ index + * and fire the RX interrupt. The driver can then query the READ index and + * process as many packets as possible, moving the WRITE index forward as it + * resets the Rx queue buffers with new memory. + * + * The management in the driver is as follows: + * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free. + * When the interrupt handler is called, the request is processed. + * The page is either stolen - transferred to the upper layer + * or reused - added immediately to the iwl->rxq->rx_free list. + * + When the page is stolen - the driver updates the matching queue's used + * count, detaches the RBD and transfers it to the queue used list. + * When there are two used RBDs - they are transferred to the allocator empty + * list. Work is then scheduled for the allocator to start allocating + * eight buffers. + * When there are another 6 used RBDs - they are transferred to the allocator + * empty list and the driver tries to claim the pre-allocated buffers and + * add them to iwl->rxq->rx_free. If it fails - it continues to claim them + * until ready. + * When there are 8+ buffers in the free list - either from allocation or from + * 8 reused unstolen pages - restock is called to update the FW and indexes. + * + In order to make sure the allocator always has RBDs to use for allocation + * the allocator has initial pool in the size of num_queues*(8-2) - the + * maximum missing RBDs per allocation request (request posted with 2 + * empty RBDs, there is no guarantee when the other 6 RBDs are supplied). + * The queues supplies the recycle of the rest of the RBDs. + * + A received packet is processed and handed to the kernel network stack, + * detached from the iwl->rxq. The driver 'processed' index is updated. + * + If there are no allocated buffers in iwl->rxq->rx_free, + * the READ INDEX is not incremented and iwl->status(RX_STALLED) is set. + * If there were enough free buffers and RX_STALLED is set it is cleared. + * + * + * Driver sequence: + * + * iwl_rxq_alloc() Allocates rx_free + * iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls + * iwl_pcie_rxq_restock. + * Used only during initialization. + * iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx + * queue, updates firmware pointers, and updates + * the WRITE index. + * iwl_pcie_rx_allocator() Background work for allocating pages. + * + * -- enable interrupts -- + * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the + * READ INDEX, detaching the SKB from the pool. + * Moves the packet buffer from queue to rx_used. + * Posts and claims requests to the allocator. + * Calls iwl_pcie_rxq_restock to refill any empty + * slots. + * + * RBD life-cycle: + * + * Init: + * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue + * + * Regular Receive interrupt: + * Page Stolen: + * rxq.queue -> rxq.rx_used -> allocator.rbd_empty -> + * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue + * Page not Stolen: + * rxq.queue -> rxq.rx_free -> rxq.queue + * ... + * + */ + +/* + * iwl_rxq_space - Return number of free slots available in queue. + */ +static int iwl_rxq_space(const struct iwl_rxq *rxq) +{ + /* Make sure rx queue size is a power of 2 */ + WARN_ON(rxq->queue_size & (rxq->queue_size - 1)); + + /* + * There can be up to (RX_QUEUE_SIZE - 1) free slots, to avoid ambiguity + * between empty and completely full queues. + * The following is equivalent to modulo by RX_QUEUE_SIZE and is well + * defined for negative dividends. + */ + return (rxq->read - rxq->write - 1) & (rxq->queue_size - 1); +} + +/* + * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr + */ +static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr) +{ + return cpu_to_le32((u32)(dma_addr >> 8)); +} + +/* + * iwl_pcie_rx_stop - stops the Rx DMA + */ +int iwl_pcie_rx_stop(struct iwl_trans *trans) +{ + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { + /* TODO: remove this once fw does it */ + iwl_write_umac_prph(trans, RFH_RXF_DMA_CFG_GEN3, 0); + return iwl_poll_umac_prph_bit(trans, RFH_GEN_STATUS_GEN3, + RXF_DMA_IDLE, RXF_DMA_IDLE, 1000); + } else if (trans->trans_cfg->mq_rx_supported) { + iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0); + return iwl_poll_prph_bit(trans, RFH_GEN_STATUS, + RXF_DMA_IDLE, RXF_DMA_IDLE, 1000); + } else { + iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); + return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG, + FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, + 1000); + } +} + +/* + * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue + */ +static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans, + struct iwl_rxq *rxq) +{ + u32 reg; + + lockdep_assert_held(&rxq->lock); + + /* + * explicitly wake up the NIC if: + * 1. shadow registers aren't enabled + * 2. there is a chance that the NIC is asleep + */ + if (!trans->trans_cfg->base_params->shadow_reg_enable && + test_bit(STATUS_TPOWER_PMI, &trans->status)) { + reg = iwl_read32(trans, CSR_UCODE_DRV_GP1); + + if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { + IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n", + reg); + iwl_set_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + rxq->need_update = true; + return; + } + } + + rxq->write_actual = round_down(rxq->write, 8); + if (!trans->trans_cfg->mq_rx_supported) + iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual); + else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) + iwl_write32(trans, HBUS_TARG_WRPTR, rxq->write_actual | + HBUS_TARG_WRPTR_RX_Q(rxq->id)); + else + iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id), + rxq->write_actual); +} + +static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int i; + + for (i = 0; i < trans->num_rx_queues; i++) { + struct iwl_rxq *rxq = &trans_pcie->rxq[i]; + + if (!rxq->need_update) + continue; + spin_lock_bh(&rxq->lock); + iwl_pcie_rxq_inc_wr_ptr(trans, rxq); + rxq->need_update = false; + spin_unlock_bh(&rxq->lock); + } +} + +static void iwl_pcie_restock_bd(struct iwl_trans *trans, + struct iwl_rxq *rxq, + struct iwl_rx_mem_buffer *rxb) +{ + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { + struct iwl_rx_transfer_desc *bd = rxq->bd; + + BUILD_BUG_ON(sizeof(*bd) != 2 * sizeof(u64)); + + bd[rxq->write].addr = cpu_to_le64(rxb->page_dma); + bd[rxq->write].rbid = cpu_to_le16(rxb->vid); + } else { + __le64 *bd = rxq->bd; + + bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid); + } + + IWL_DEBUG_RX(trans, "Assigned virtual RB ID %u to queue %d index %d\n", + (u32)rxb->vid, rxq->id, rxq->write); +} + +/* + * iwl_pcie_rxmq_restock - restock implementation for multi-queue rx + */ +static void iwl_pcie_rxmq_restock(struct iwl_trans *trans, + struct iwl_rxq *rxq) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_rx_mem_buffer *rxb; + + /* + * If the device isn't enabled - no need to try to add buffers... + * This can happen when we stop the device and still have an interrupt + * pending. We stop the APM before we sync the interrupts because we + * have to (see comment there). On the other hand, since the APM is + * stopped, we cannot access the HW (in particular not prph). + * So don't try to restock if the APM has been already stopped. + */ + if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status)) + return; + + spin_lock_bh(&rxq->lock); + while (rxq->free_count) { + /* Get next free Rx buffer, remove from free list */ + rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer, + list); + list_del(&rxb->list); + rxb->invalid = false; + /* some low bits are expected to be unset (depending on hw) */ + WARN_ON(rxb->page_dma & trans_pcie->supported_dma_mask); + /* Point to Rx buffer via next RBD in circular buffer */ + iwl_pcie_restock_bd(trans, rxq, rxb); + rxq->write = (rxq->write + 1) & (rxq->queue_size - 1); + rxq->free_count--; + } + spin_unlock_bh(&rxq->lock); + + /* + * If we've added more space for the firmware to place data, tell it. + * Increment device's write pointer in multiples of 8. + */ + if (rxq->write_actual != (rxq->write & ~0x7)) { + spin_lock_bh(&rxq->lock); + iwl_pcie_rxq_inc_wr_ptr(trans, rxq); + spin_unlock_bh(&rxq->lock); + } +} + +/* + * iwl_pcie_rxsq_restock - restock implementation for single queue rx + */ +static void iwl_pcie_rxsq_restock(struct iwl_trans *trans, + struct iwl_rxq *rxq) +{ + struct iwl_rx_mem_buffer *rxb; + + /* + * If the device isn't enabled - not need to try to add buffers... + * This can happen when we stop the device and still have an interrupt + * pending. We stop the APM before we sync the interrupts because we + * have to (see comment there). On the other hand, since the APM is + * stopped, we cannot access the HW (in particular not prph). + * So don't try to restock if the APM has been already stopped. + */ + if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status)) + return; + + spin_lock_bh(&rxq->lock); + while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) { + __le32 *bd = (__le32 *)rxq->bd; + /* The overwritten rxb must be a used one */ + rxb = rxq->queue[rxq->write]; + BUG_ON(rxb && rxb->page); + + /* Get next free Rx buffer, remove from free list */ + rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer, + list); + list_del(&rxb->list); + rxb->invalid = false; + + /* Point to Rx buffer via next RBD in circular buffer */ + bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma); + rxq->queue[rxq->write] = rxb; + rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; + rxq->free_count--; + } + spin_unlock_bh(&rxq->lock); + + /* If we've added more space for the firmware to place data, tell it. + * Increment device's write pointer in multiples of 8. */ + if (rxq->write_actual != (rxq->write & ~0x7)) { + spin_lock_bh(&rxq->lock); + iwl_pcie_rxq_inc_wr_ptr(trans, rxq); + spin_unlock_bh(&rxq->lock); + } +} + +/* + * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool + * + * If there are slots in the RX queue that need to be restocked, + * and we have free pre-allocated buffers, fill the ranks as much + * as we can, pulling from rx_free. + * + * This moves the 'write' index forward to catch up with 'processed', and + * also updates the memory address in the firmware to reference the new + * target buffer. + */ +static +void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq) +{ + if (trans->trans_cfg->mq_rx_supported) + iwl_pcie_rxmq_restock(trans, rxq); + else + iwl_pcie_rxsq_restock(trans, rxq); +} + +/* + * iwl_pcie_rx_alloc_page - allocates and returns a page. + * + */ +static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans, + u32 *offset, gfp_t priority) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + unsigned int rbsize = iwl_trans_get_rb_size(trans_pcie->rx_buf_size); + unsigned int allocsize = PAGE_SIZE << trans_pcie->rx_page_order; + struct page *page; + gfp_t gfp_mask = priority; + + if (trans_pcie->rx_page_order > 0) + gfp_mask |= __GFP_COMP; + + if (trans_pcie->alloc_page) { + spin_lock_bh(&trans_pcie->alloc_page_lock); + /* recheck */ + if (trans_pcie->alloc_page) { + *offset = trans_pcie->alloc_page_used; + page = trans_pcie->alloc_page; + trans_pcie->alloc_page_used += rbsize; + if (trans_pcie->alloc_page_used >= allocsize) + trans_pcie->alloc_page = NULL; + else + get_page(page); + spin_unlock_bh(&trans_pcie->alloc_page_lock); + return page; + } + spin_unlock_bh(&trans_pcie->alloc_page_lock); + } + + /* Alloc a new receive buffer */ + page = alloc_pages(gfp_mask, trans_pcie->rx_page_order); + if (!page) { + if (net_ratelimit()) + IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n", + trans_pcie->rx_page_order); + /* + * Issue an error if we don't have enough pre-allocated + * buffers. + */ + if (!(gfp_mask & __GFP_NOWARN) && net_ratelimit()) + IWL_CRIT(trans, + "Failed to alloc_pages\n"); + return NULL; + } + + if (2 * rbsize <= allocsize) { + spin_lock_bh(&trans_pcie->alloc_page_lock); + if (!trans_pcie->alloc_page) { + get_page(page); + trans_pcie->alloc_page = page; + trans_pcie->alloc_page_used = rbsize; + } + spin_unlock_bh(&trans_pcie->alloc_page_lock); + } + + *offset = 0; + return page; +} + +/* + * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD + * + * A used RBD is an Rx buffer that has been given to the stack. To use it again + * a page must be allocated and the RBD must point to the page. This function + * doesn't change the HW pointer but handles the list of pages that is used by + * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly + * allocated buffers. + */ +void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority, + struct iwl_rxq *rxq) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_rx_mem_buffer *rxb; + struct page *page; + + while (1) { + unsigned int offset; + + spin_lock_bh(&rxq->lock); + if (list_empty(&rxq->rx_used)) { + spin_unlock_bh(&rxq->lock); + return; + } + spin_unlock_bh(&rxq->lock); + + page = iwl_pcie_rx_alloc_page(trans, &offset, priority); + if (!page) + return; + + spin_lock_bh(&rxq->lock); + + if (list_empty(&rxq->rx_used)) { + spin_unlock_bh(&rxq->lock); + __free_pages(page, trans_pcie->rx_page_order); + return; + } + rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer, + list); + list_del(&rxb->list); + spin_unlock_bh(&rxq->lock); + + BUG_ON(rxb->page); + rxb->page = page; + rxb->offset = offset; + /* Get physical address of the RB */ + rxb->page_dma = + dma_map_page(trans->dev, page, rxb->offset, + trans_pcie->rx_buf_bytes, + DMA_FROM_DEVICE); + if (dma_mapping_error(trans->dev, rxb->page_dma)) { + rxb->page = NULL; + spin_lock_bh(&rxq->lock); + list_add(&rxb->list, &rxq->rx_used); + spin_unlock_bh(&rxq->lock); + __free_pages(page, trans_pcie->rx_page_order); + return; + } + + spin_lock_bh(&rxq->lock); + + list_add_tail(&rxb->list, &rxq->rx_free); + rxq->free_count++; + + spin_unlock_bh(&rxq->lock); + } +} + +void iwl_pcie_free_rbs_pool(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int i; + + if (!trans_pcie->rx_pool) + return; + + for (i = 0; i < RX_POOL_SIZE(trans_pcie->num_rx_bufs); i++) { + if (!trans_pcie->rx_pool[i].page) + continue; + dma_unmap_page(trans->dev, trans_pcie->rx_pool[i].page_dma, + trans_pcie->rx_buf_bytes, DMA_FROM_DEVICE); + __free_pages(trans_pcie->rx_pool[i].page, + trans_pcie->rx_page_order); + trans_pcie->rx_pool[i].page = NULL; + } +} + +/* + * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues + * + * Allocates for each received request 8 pages + * Called as a scheduled work item. + */ +static void iwl_pcie_rx_allocator(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_rb_allocator *rba = &trans_pcie->rba; + struct list_head local_empty; + int pending = atomic_read(&rba->req_pending); + + IWL_DEBUG_TPT(trans, "Pending allocation requests = %d\n", pending); + + /* If we were scheduled - there is at least one request */ + spin_lock_bh(&rba->lock); + /* swap out the rba->rbd_empty to a local list */ + list_replace_init(&rba->rbd_empty, &local_empty); + spin_unlock_bh(&rba->lock); + + while (pending) { + int i; + LIST_HEAD(local_allocated); + gfp_t gfp_mask = GFP_KERNEL; + + /* Do not post a warning if there are only a few requests */ + if (pending < RX_PENDING_WATERMARK) + gfp_mask |= __GFP_NOWARN; + + for (i = 0; i < RX_CLAIM_REQ_ALLOC;) { + struct iwl_rx_mem_buffer *rxb; + struct page *page; + + /* List should never be empty - each reused RBD is + * returned to the list, and initial pool covers any + * possible gap between the time the page is allocated + * to the time the RBD is added. + */ + BUG_ON(list_empty(&local_empty)); + /* Get the first rxb from the rbd list */ + rxb = list_first_entry(&local_empty, + struct iwl_rx_mem_buffer, list); + BUG_ON(rxb->page); + + /* Alloc a new receive buffer */ + page = iwl_pcie_rx_alloc_page(trans, &rxb->offset, + gfp_mask); + if (!page) + continue; + rxb->page = page; + + /* Get physical address of the RB */ + rxb->page_dma = dma_map_page(trans->dev, page, + rxb->offset, + trans_pcie->rx_buf_bytes, + DMA_FROM_DEVICE); + if (dma_mapping_error(trans->dev, rxb->page_dma)) { + rxb->page = NULL; + __free_pages(page, trans_pcie->rx_page_order); + continue; + } + + /* move the allocated entry to the out list */ + list_move(&rxb->list, &local_allocated); + i++; + } + + atomic_dec(&rba->req_pending); + pending--; + + if (!pending) { + pending = atomic_read(&rba->req_pending); + if (pending) + IWL_DEBUG_TPT(trans, + "Got more pending allocation requests = %d\n", + pending); + } + + spin_lock_bh(&rba->lock); + /* add the allocated rbds to the allocator allocated list */ + list_splice_tail(&local_allocated, &rba->rbd_allocated); + /* get more empty RBDs for current pending requests */ + list_splice_tail_init(&rba->rbd_empty, &local_empty); + spin_unlock_bh(&rba->lock); + + atomic_inc(&rba->req_ready); + + } + + spin_lock_bh(&rba->lock); + /* return unused rbds to the allocator empty list */ + list_splice_tail(&local_empty, &rba->rbd_empty); + spin_unlock_bh(&rba->lock); + + IWL_DEBUG_TPT(trans, "%s, exit.\n", __func__); +} + +/* + * iwl_pcie_rx_allocator_get - returns the pre-allocated pages +.* +.* Called by queue when the queue posted allocation request and + * has freed 8 RBDs in order to restock itself. + * This function directly moves the allocated RBs to the queue's ownership + * and updates the relevant counters. + */ +static void iwl_pcie_rx_allocator_get(struct iwl_trans *trans, + struct iwl_rxq *rxq) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_rb_allocator *rba = &trans_pcie->rba; + int i; + + lockdep_assert_held(&rxq->lock); + + /* + * atomic_dec_if_positive returns req_ready - 1 for any scenario. + * If req_ready is 0 atomic_dec_if_positive will return -1 and this + * function will return early, as there are no ready requests. + * atomic_dec_if_positive will perofrm the *actual* decrement only if + * req_ready > 0, i.e. - there are ready requests and the function + * hands one request to the caller. + */ + if (atomic_dec_if_positive(&rba->req_ready) < 0) + return; + + spin_lock(&rba->lock); + for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) { + /* Get next free Rx buffer, remove it from free list */ + struct iwl_rx_mem_buffer *rxb = + list_first_entry(&rba->rbd_allocated, + struct iwl_rx_mem_buffer, list); + + list_move(&rxb->list, &rxq->rx_free); + } + spin_unlock(&rba->lock); + + rxq->used_count -= RX_CLAIM_REQ_ALLOC; + rxq->free_count += RX_CLAIM_REQ_ALLOC; +} + +void iwl_pcie_rx_allocator_work(struct work_struct *data) +{ + struct iwl_rb_allocator *rba_p = + container_of(data, struct iwl_rb_allocator, rx_alloc); + struct iwl_trans_pcie *trans_pcie = + container_of(rba_p, struct iwl_trans_pcie, rba); + + iwl_pcie_rx_allocator(trans_pcie->trans); +} + +static int iwl_pcie_free_bd_size(struct iwl_trans *trans) +{ + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) + return sizeof(struct iwl_rx_transfer_desc); + + return trans->trans_cfg->mq_rx_supported ? + sizeof(__le64) : sizeof(__le32); +} + +static int iwl_pcie_used_bd_size(struct iwl_trans *trans) +{ + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) + return sizeof(struct iwl_rx_completion_desc_bz); + + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) + return sizeof(struct iwl_rx_completion_desc); + + return sizeof(__le32); +} + +static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans, + struct iwl_rxq *rxq) +{ + int free_size = iwl_pcie_free_bd_size(trans); + + if (rxq->bd) + dma_free_coherent(trans->dev, + free_size * rxq->queue_size, + rxq->bd, rxq->bd_dma); + rxq->bd_dma = 0; + rxq->bd = NULL; + + rxq->rb_stts_dma = 0; + rxq->rb_stts = NULL; + + if (rxq->used_bd) + dma_free_coherent(trans->dev, + iwl_pcie_used_bd_size(trans) * + rxq->queue_size, + rxq->used_bd, rxq->used_bd_dma); + rxq->used_bd_dma = 0; + rxq->used_bd = NULL; +} + +static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans, + struct iwl_rxq *rxq) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct device *dev = trans->dev; + int i; + int free_size; + bool use_rx_td = (trans->trans_cfg->device_family >= + IWL_DEVICE_FAMILY_AX210); + size_t rb_stts_size = use_rx_td ? sizeof(__le16) : + sizeof(struct iwl_rb_status); + + spin_lock_init(&rxq->lock); + if (trans->trans_cfg->mq_rx_supported) + rxq->queue_size = trans->cfg->num_rbds; + else + rxq->queue_size = RX_QUEUE_SIZE; + + free_size = iwl_pcie_free_bd_size(trans); + + /* + * Allocate the circular buffer of Read Buffer Descriptors + * (RBDs) + */ + rxq->bd = dma_alloc_coherent(dev, free_size * rxq->queue_size, + &rxq->bd_dma, GFP_KERNEL); + if (!rxq->bd) + goto err; + + if (trans->trans_cfg->mq_rx_supported) { + rxq->used_bd = dma_alloc_coherent(dev, + iwl_pcie_used_bd_size(trans) * + rxq->queue_size, + &rxq->used_bd_dma, + GFP_KERNEL); + if (!rxq->used_bd) + goto err; + } + + rxq->rb_stts = (u8 *)trans_pcie->base_rb_stts + rxq->id * rb_stts_size; + rxq->rb_stts_dma = + trans_pcie->base_rb_stts_dma + rxq->id * rb_stts_size; + + return 0; + +err: + for (i = 0; i < trans->num_rx_queues; i++) { + struct iwl_rxq *rxq = &trans_pcie->rxq[i]; + + iwl_pcie_free_rxq_dma(trans, rxq); + } + + return -ENOMEM; +} + +static int iwl_pcie_rx_alloc(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_rb_allocator *rba = &trans_pcie->rba; + int i, ret; + size_t rb_stts_size = trans->trans_cfg->device_family >= + IWL_DEVICE_FAMILY_AX210 ? + sizeof(__le16) : sizeof(struct iwl_rb_status); + + if (WARN_ON(trans_pcie->rxq)) + return -EINVAL; + + trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq), + GFP_KERNEL); + trans_pcie->rx_pool = kcalloc(RX_POOL_SIZE(trans_pcie->num_rx_bufs), + sizeof(trans_pcie->rx_pool[0]), + GFP_KERNEL); + trans_pcie->global_table = + kcalloc(RX_POOL_SIZE(trans_pcie->num_rx_bufs), + sizeof(trans_pcie->global_table[0]), + GFP_KERNEL); + if (!trans_pcie->rxq || !trans_pcie->rx_pool || + !trans_pcie->global_table) { + ret = -ENOMEM; + goto err; + } + + spin_lock_init(&rba->lock); + + /* + * Allocate the driver's pointer to receive buffer status. + * Allocate for all queues continuously (HW requirement). + */ + trans_pcie->base_rb_stts = + dma_alloc_coherent(trans->dev, + rb_stts_size * trans->num_rx_queues, + &trans_pcie->base_rb_stts_dma, + GFP_KERNEL); + if (!trans_pcie->base_rb_stts) { + ret = -ENOMEM; + goto err; + } + + for (i = 0; i < trans->num_rx_queues; i++) { + struct iwl_rxq *rxq = &trans_pcie->rxq[i]; + + rxq->id = i; + ret = iwl_pcie_alloc_rxq_dma(trans, rxq); + if (ret) + goto err; + } + return 0; + +err: + if (trans_pcie->base_rb_stts) { + dma_free_coherent(trans->dev, + rb_stts_size * trans->num_rx_queues, + trans_pcie->base_rb_stts, + trans_pcie->base_rb_stts_dma); + trans_pcie->base_rb_stts = NULL; + trans_pcie->base_rb_stts_dma = 0; + } + kfree(trans_pcie->rx_pool); + trans_pcie->rx_pool = NULL; + kfree(trans_pcie->global_table); + trans_pcie->global_table = NULL; + kfree(trans_pcie->rxq); + trans_pcie->rxq = NULL; + + return ret; +} + +static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + u32 rb_size; + const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */ + + switch (trans_pcie->rx_buf_size) { + case IWL_AMSDU_4K: + rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; + break; + case IWL_AMSDU_8K: + rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K; + break; + case IWL_AMSDU_12K: + rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K; + break; + default: + WARN_ON(1); + rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; + } + + if (!iwl_trans_grab_nic_access(trans)) + return; + + /* Stop Rx DMA */ + iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); + /* reset and flush pointers */ + iwl_write32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0); + iwl_write32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0); + iwl_write32(trans, FH_RSCSR_CHNL0_RDPTR, 0); + + /* Reset driver's Rx queue write index */ + iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0); + + /* Tell device where to find RBD circular buffer in DRAM */ + iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG, + (u32)(rxq->bd_dma >> 8)); + + /* Tell device where in DRAM to update its Rx status */ + iwl_write32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG, + rxq->rb_stts_dma >> 4); + + /* Enable Rx DMA + * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in + * the credit mechanism in 5000 HW RX FIFO + * Direct rx interrupts to hosts + * Rx buffer size 4 or 8k or 12k + * RB timeout 0x10 + * 256 RBDs + */ + iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, + FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL | + FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | + FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL | + rb_size | + (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) | + (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS)); + + iwl_trans_release_nic_access(trans); + + /* Set interrupt coalescing timer to default (2048 usecs) */ + iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); + + /* W/A for interrupt coalescing bug in 7260 and 3160 */ + if (trans->cfg->host_interrupt_operation_mode) + iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE); +} + +static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + u32 rb_size, enabled = 0; + int i; + + switch (trans_pcie->rx_buf_size) { + case IWL_AMSDU_2K: + rb_size = RFH_RXF_DMA_RB_SIZE_2K; + break; + case IWL_AMSDU_4K: + rb_size = RFH_RXF_DMA_RB_SIZE_4K; + break; + case IWL_AMSDU_8K: + rb_size = RFH_RXF_DMA_RB_SIZE_8K; + break; + case IWL_AMSDU_12K: + rb_size = RFH_RXF_DMA_RB_SIZE_12K; + break; + default: + WARN_ON(1); + rb_size = RFH_RXF_DMA_RB_SIZE_4K; + } + + if (!iwl_trans_grab_nic_access(trans)) + return; + + /* Stop Rx DMA */ + iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG, 0); + /* disable free amd used rx queue operation */ + iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, 0); + + for (i = 0; i < trans->num_rx_queues; i++) { + /* Tell device where to find RBD free table in DRAM */ + iwl_write_prph64_no_grab(trans, + RFH_Q_FRBDCB_BA_LSB(i), + trans_pcie->rxq[i].bd_dma); + /* Tell device where to find RBD used table in DRAM */ + iwl_write_prph64_no_grab(trans, + RFH_Q_URBDCB_BA_LSB(i), + trans_pcie->rxq[i].used_bd_dma); + /* Tell device where in DRAM to update its Rx status */ + iwl_write_prph64_no_grab(trans, + RFH_Q_URBD_STTS_WPTR_LSB(i), + trans_pcie->rxq[i].rb_stts_dma); + /* Reset device indice tables */ + iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_WIDX(i), 0); + iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_RIDX(i), 0); + iwl_write_prph_no_grab(trans, RFH_Q_URBDCB_WIDX(i), 0); + + enabled |= BIT(i) | BIT(i + 16); + } + + /* + * Enable Rx DMA + * Rx buffer size 4 or 8k or 12k + * Min RB size 4 or 8 + * Drop frames that exceed RB size + * 512 RBDs + */ + iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG, + RFH_DMA_EN_ENABLE_VAL | rb_size | + RFH_RXF_DMA_MIN_RB_4_8 | + RFH_RXF_DMA_DROP_TOO_LARGE_MASK | + RFH_RXF_DMA_RBDCB_SIZE_512); + + /* + * Activate DMA snooping. + * Set RX DMA chunk size to 64B for IOSF and 128B for PCIe + * Default queue is 0 + */ + iwl_write_prph_no_grab(trans, RFH_GEN_CFG, + RFH_GEN_CFG_RFH_DMA_SNOOP | + RFH_GEN_CFG_VAL(DEFAULT_RXQ_NUM, 0) | + RFH_GEN_CFG_SERVICE_DMA_SNOOP | + RFH_GEN_CFG_VAL(RB_CHUNK_SIZE, + trans->trans_cfg->integrated ? + RFH_GEN_CFG_RB_CHUNK_SIZE_64 : + RFH_GEN_CFG_RB_CHUNK_SIZE_128)); + /* Enable the relevant rx queues */ + iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, enabled); + + iwl_trans_release_nic_access(trans); + + /* Set interrupt coalescing timer to default (2048 usecs) */ + iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); +} + +void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq) +{ + lockdep_assert_held(&rxq->lock); + + INIT_LIST_HEAD(&rxq->rx_free); + INIT_LIST_HEAD(&rxq->rx_used); + rxq->free_count = 0; + rxq->used_count = 0; +} + +static int iwl_pcie_rx_handle(struct iwl_trans *trans, int queue, int budget); + +static int iwl_pcie_napi_poll(struct napi_struct *napi, int budget) +{ + struct iwl_rxq *rxq = container_of(napi, struct iwl_rxq, napi); + struct iwl_trans_pcie *trans_pcie; + struct iwl_trans *trans; + int ret; + + trans_pcie = container_of(napi->dev, struct iwl_trans_pcie, napi_dev); + trans = trans_pcie->trans; + + ret = iwl_pcie_rx_handle(trans, rxq->id, budget); + + IWL_DEBUG_ISR(trans, "[%d] handled %d, budget %d\n", + rxq->id, ret, budget); + + if (ret < budget) { + spin_lock(&trans_pcie->irq_lock); + if (test_bit(STATUS_INT_ENABLED, &trans->status)) + _iwl_enable_interrupts(trans); + spin_unlock(&trans_pcie->irq_lock); + + napi_complete_done(&rxq->napi, ret); + } + + return ret; +} + +static int iwl_pcie_napi_poll_msix(struct napi_struct *napi, int budget) +{ + struct iwl_rxq *rxq = container_of(napi, struct iwl_rxq, napi); + struct iwl_trans_pcie *trans_pcie; + struct iwl_trans *trans; + int ret; + + trans_pcie = container_of(napi->dev, struct iwl_trans_pcie, napi_dev); + trans = trans_pcie->trans; + + ret = iwl_pcie_rx_handle(trans, rxq->id, budget); + IWL_DEBUG_ISR(trans, "[%d] handled %d, budget %d\n", rxq->id, ret, + budget); + + if (ret < budget) { + int irq_line = rxq->id; + + /* FIRST_RSS is shared with line 0 */ + if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS && + rxq->id == 1) + irq_line = 0; + + spin_lock(&trans_pcie->irq_lock); + iwl_pcie_clear_irq(trans, irq_line); + spin_unlock(&trans_pcie->irq_lock); + + napi_complete_done(&rxq->napi, ret); + } + + return ret; +} + +void iwl_pcie_rx_napi_sync(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int i; + + if (unlikely(!trans_pcie->rxq)) + return; + + for (i = 0; i < trans->num_rx_queues; i++) { + struct iwl_rxq *rxq = &trans_pcie->rxq[i]; + + if (rxq && rxq->napi.poll) + napi_synchronize(&rxq->napi); + } +} + +static int _iwl_pcie_rx_init(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_rxq *def_rxq; + struct iwl_rb_allocator *rba = &trans_pcie->rba; + int i, err, queue_size, allocator_pool_size, num_alloc; + + if (!trans_pcie->rxq) { + err = iwl_pcie_rx_alloc(trans); + if (err) + return err; + } + def_rxq = trans_pcie->rxq; + + cancel_work_sync(&rba->rx_alloc); + + spin_lock_bh(&rba->lock); + atomic_set(&rba->req_pending, 0); + atomic_set(&rba->req_ready, 0); + INIT_LIST_HEAD(&rba->rbd_allocated); + INIT_LIST_HEAD(&rba->rbd_empty); + spin_unlock_bh(&rba->lock); + + /* free all first - we overwrite everything here */ + iwl_pcie_free_rbs_pool(trans); + + for (i = 0; i < RX_QUEUE_SIZE; i++) + def_rxq->queue[i] = NULL; + + for (i = 0; i < trans->num_rx_queues; i++) { + struct iwl_rxq *rxq = &trans_pcie->rxq[i]; + + spin_lock_bh(&rxq->lock); + /* + * Set read write pointer to reflect that we have processed + * and used all buffers, but have not restocked the Rx queue + * with fresh buffers + */ + rxq->read = 0; + rxq->write = 0; + rxq->write_actual = 0; + memset(rxq->rb_stts, 0, + (trans->trans_cfg->device_family >= + IWL_DEVICE_FAMILY_AX210) ? + sizeof(__le16) : sizeof(struct iwl_rb_status)); + + iwl_pcie_rx_init_rxb_lists(rxq); + + spin_unlock_bh(&rxq->lock); + + if (!rxq->napi.poll) { + int (*poll)(struct napi_struct *, int) = iwl_pcie_napi_poll; + + if (trans_pcie->msix_enabled) + poll = iwl_pcie_napi_poll_msix; + + netif_napi_add(&trans_pcie->napi_dev, &rxq->napi, + poll); + napi_enable(&rxq->napi); + } + + } + + /* move the pool to the default queue and allocator ownerships */ + queue_size = trans->trans_cfg->mq_rx_supported ? + trans_pcie->num_rx_bufs - 1 : RX_QUEUE_SIZE; + allocator_pool_size = trans->num_rx_queues * + (RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC); + num_alloc = queue_size + allocator_pool_size; + + for (i = 0; i < num_alloc; i++) { + struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i]; + + if (i < allocator_pool_size) + list_add(&rxb->list, &rba->rbd_empty); + else + list_add(&rxb->list, &def_rxq->rx_used); + trans_pcie->global_table[i] = rxb; + rxb->vid = (u16)(i + 1); + rxb->invalid = true; + } + + iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL, def_rxq); + + return 0; +} + +int iwl_pcie_rx_init(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int ret = _iwl_pcie_rx_init(trans); + + if (ret) + return ret; + + if (trans->trans_cfg->mq_rx_supported) + iwl_pcie_rx_mq_hw_init(trans); + else + iwl_pcie_rx_hw_init(trans, trans_pcie->rxq); + + iwl_pcie_rxq_restock(trans, trans_pcie->rxq); + + spin_lock_bh(&trans_pcie->rxq->lock); + iwl_pcie_rxq_inc_wr_ptr(trans, trans_pcie->rxq); + spin_unlock_bh(&trans_pcie->rxq->lock); + + return 0; +} + +int iwl_pcie_gen2_rx_init(struct iwl_trans *trans) +{ + /* Set interrupt coalescing timer to default (2048 usecs) */ + iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); + + /* + * We don't configure the RFH. + * Restock will be done at alive, after firmware configured the RFH. + */ + return _iwl_pcie_rx_init(trans); +} + +void iwl_pcie_rx_free(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_rb_allocator *rba = &trans_pcie->rba; + int i; + size_t rb_stts_size = trans->trans_cfg->device_family >= + IWL_DEVICE_FAMILY_AX210 ? + sizeof(__le16) : sizeof(struct iwl_rb_status); + + /* + * if rxq is NULL, it means that nothing has been allocated, + * exit now + */ + if (!trans_pcie->rxq) { + IWL_DEBUG_INFO(trans, "Free NULL rx context\n"); + return; + } + + cancel_work_sync(&rba->rx_alloc); + + iwl_pcie_free_rbs_pool(trans); + + if (trans_pcie->base_rb_stts) { + dma_free_coherent(trans->dev, + rb_stts_size * trans->num_rx_queues, + trans_pcie->base_rb_stts, + trans_pcie->base_rb_stts_dma); + trans_pcie->base_rb_stts = NULL; + trans_pcie->base_rb_stts_dma = 0; + } + + for (i = 0; i < trans->num_rx_queues; i++) { + struct iwl_rxq *rxq = &trans_pcie->rxq[i]; + + iwl_pcie_free_rxq_dma(trans, rxq); + + if (rxq->napi.poll) { + napi_disable(&rxq->napi); + netif_napi_del(&rxq->napi); + } + } + kfree(trans_pcie->rx_pool); + kfree(trans_pcie->global_table); + kfree(trans_pcie->rxq); + + if (trans_pcie->alloc_page) + __free_pages(trans_pcie->alloc_page, trans_pcie->rx_page_order); +} + +static void iwl_pcie_rx_move_to_allocator(struct iwl_rxq *rxq, + struct iwl_rb_allocator *rba) +{ + spin_lock(&rba->lock); + list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty); + spin_unlock(&rba->lock); +} + +/* + * iwl_pcie_rx_reuse_rbd - Recycle used RBDs + * + * Called when a RBD can be reused. The RBD is transferred to the allocator. + * When there are 2 empty RBDs - a request for allocation is posted + */ +static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans, + struct iwl_rx_mem_buffer *rxb, + struct iwl_rxq *rxq, bool emergency) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_rb_allocator *rba = &trans_pcie->rba; + + /* Move the RBD to the used list, will be moved to allocator in batches + * before claiming or posting a request*/ + list_add_tail(&rxb->list, &rxq->rx_used); + + if (unlikely(emergency)) + return; + + /* Count the allocator owned RBDs */ + rxq->used_count++; + + /* If we have RX_POST_REQ_ALLOC new released rx buffers - + * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is + * used for the case we failed to claim RX_CLAIM_REQ_ALLOC, + * after but we still need to post another request. + */ + if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) { + /* Move the 2 RBDs to the allocator ownership. + Allocator has another 6 from pool for the request completion*/ + iwl_pcie_rx_move_to_allocator(rxq, rba); + + atomic_inc(&rba->req_pending); + queue_work(rba->alloc_wq, &rba->rx_alloc); + } +} + +static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, + struct iwl_rxq *rxq, + struct iwl_rx_mem_buffer *rxb, + bool emergency, + int i) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id]; + bool page_stolen = false; + int max_len = trans_pcie->rx_buf_bytes; + u32 offset = 0; + + if (WARN_ON(!rxb)) + return; + + dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE); + + while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) { + struct iwl_rx_packet *pkt; + bool reclaim; + int len; + struct iwl_rx_cmd_buffer rxcb = { + ._offset = rxb->offset + offset, + ._rx_page_order = trans_pcie->rx_page_order, + ._page = rxb->page, + ._page_stolen = false, + .truesize = max_len, + }; + + pkt = rxb_addr(&rxcb); + + if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID)) { + IWL_DEBUG_RX(trans, + "Q %d: RB end marker at offset %d\n", + rxq->id, offset); + break; + } + + WARN((le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >> + FH_RSCSR_RXQ_POS != rxq->id, + "frame on invalid queue - is on %d and indicates %d\n", + rxq->id, + (le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >> + FH_RSCSR_RXQ_POS); + + IWL_DEBUG_RX(trans, + "Q %d: cmd at offset %d: %s (%.2x.%2x, seq 0x%x)\n", + rxq->id, offset, + iwl_get_cmd_string(trans, + WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)), + pkt->hdr.group_id, pkt->hdr.cmd, + le16_to_cpu(pkt->hdr.sequence)); + + len = iwl_rx_packet_len(pkt); + len += sizeof(u32); /* account for status word */ + + offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN); + + /* check that what the device tells us made sense */ + if (len < sizeof(*pkt) || offset > max_len) + break; + + trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len); + trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len); + + /* Reclaim a command buffer only if this packet is a response + * to a (driver-originated) command. + * If the packet (e.g. Rx frame) originated from uCode, + * there is no command buffer to reclaim. + * Ucode should set SEQ_RX_FRAME bit if ucode-originated, + * but apparently a few don't get set; catch them here. */ + reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME); + if (reclaim && !pkt->hdr.group_id) { + int i; + + for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) { + if (trans_pcie->no_reclaim_cmds[i] == + pkt->hdr.cmd) { + reclaim = false; + break; + } + } + } + + if (rxq->id == trans_pcie->def_rx_queue) + iwl_op_mode_rx(trans->op_mode, &rxq->napi, + &rxcb); + else + iwl_op_mode_rx_rss(trans->op_mode, &rxq->napi, + &rxcb, rxq->id); + + /* + * After here, we should always check rxcb._page_stolen, + * if it is true then one of the handlers took the page. + */ + + if (reclaim && txq) { + u16 sequence = le16_to_cpu(pkt->hdr.sequence); + int index = SEQ_TO_INDEX(sequence); + int cmd_index = iwl_txq_get_cmd_index(txq, index); + + kfree_sensitive(txq->entries[cmd_index].free_buf); + txq->entries[cmd_index].free_buf = NULL; + + /* Invoke any callbacks, transfer the buffer to caller, + * and fire off the (possibly) blocking + * iwl_trans_send_cmd() + * as we reclaim the driver command queue */ + if (!rxcb._page_stolen) + iwl_pcie_hcmd_complete(trans, &rxcb); + else + IWL_WARN(trans, "Claim null rxb?\n"); + } + + page_stolen |= rxcb._page_stolen; + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) + break; + } + + /* page was stolen from us -- free our reference */ + if (page_stolen) { + __free_pages(rxb->page, trans_pcie->rx_page_order); + rxb->page = NULL; + } + + /* Reuse the page if possible. For notification packets and + * SKBs that fail to Rx correctly, add them back into the + * rx_free list for reuse later. */ + if (rxb->page != NULL) { + rxb->page_dma = + dma_map_page(trans->dev, rxb->page, rxb->offset, + trans_pcie->rx_buf_bytes, + DMA_FROM_DEVICE); + if (dma_mapping_error(trans->dev, rxb->page_dma)) { + /* + * free the page(s) as well to not break + * the invariant that the items on the used + * list have no page(s) + */ + __free_pages(rxb->page, trans_pcie->rx_page_order); + rxb->page = NULL; + iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency); + } else { + list_add_tail(&rxb->list, &rxq->rx_free); + rxq->free_count++; + } + } else + iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency); +} + +static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans, + struct iwl_rxq *rxq, int i, + bool *join) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_rx_mem_buffer *rxb; + u16 vid; + + BUILD_BUG_ON(sizeof(struct iwl_rx_completion_desc) != 32); + BUILD_BUG_ON(sizeof(struct iwl_rx_completion_desc_bz) != 4); + + if (!trans->trans_cfg->mq_rx_supported) { + rxb = rxq->queue[i]; + rxq->queue[i] = NULL; + return rxb; + } + + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { + struct iwl_rx_completion_desc_bz *cd = rxq->used_bd; + + vid = le16_to_cpu(cd[i].rbid); + *join = cd[i].flags & IWL_RX_CD_FLAGS_FRAGMENTED; + } else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { + struct iwl_rx_completion_desc *cd = rxq->used_bd; + + vid = le16_to_cpu(cd[i].rbid); + *join = cd[i].flags & IWL_RX_CD_FLAGS_FRAGMENTED; + } else { + __le32 *cd = rxq->used_bd; + + vid = le32_to_cpu(cd[i]) & 0x0FFF; /* 12-bit VID */ + } + + if (!vid || vid > RX_POOL_SIZE(trans_pcie->num_rx_bufs)) + goto out_err; + + rxb = trans_pcie->global_table[vid - 1]; + if (rxb->invalid) + goto out_err; + + IWL_DEBUG_RX(trans, "Got virtual RB ID %u\n", (u32)rxb->vid); + + rxb->invalid = true; + + return rxb; + +out_err: + WARN(1, "Invalid rxb from HW %u\n", (u32)vid); + iwl_force_nmi(trans); + return NULL; +} + +/* + * iwl_pcie_rx_handle - Main entry function for receiving responses from fw + */ +static int iwl_pcie_rx_handle(struct iwl_trans *trans, int queue, int budget) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_rxq *rxq; + u32 r, i, count = 0, handled = 0; + bool emergency = false; + + if (WARN_ON_ONCE(!trans_pcie->rxq || !trans_pcie->rxq[queue].bd)) + return budget; + + rxq = &trans_pcie->rxq[queue]; + +restart: + spin_lock(&rxq->lock); + /* uCode's read index (stored in shared DRAM) indicates the last Rx + * buffer that the driver may process (last buffer filled by ucode). */ + r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF; + i = rxq->read; + + /* W/A 9000 device step A0 wrap-around bug */ + r &= (rxq->queue_size - 1); + + /* Rx interrupt, but nothing sent from uCode */ + if (i == r) + IWL_DEBUG_RX(trans, "Q %d: HW = SW = %d\n", rxq->id, r); + + while (i != r && ++handled < budget) { + struct iwl_rb_allocator *rba = &trans_pcie->rba; + struct iwl_rx_mem_buffer *rxb; + /* number of RBDs still waiting for page allocation */ + u32 rb_pending_alloc = + atomic_read(&trans_pcie->rba.req_pending) * + RX_CLAIM_REQ_ALLOC; + bool join = false; + + if (unlikely(rb_pending_alloc >= rxq->queue_size / 2 && + !emergency)) { + iwl_pcie_rx_move_to_allocator(rxq, rba); + emergency = true; + IWL_DEBUG_TPT(trans, + "RX path is in emergency. Pending allocations %d\n", + rb_pending_alloc); + } + + IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i); + + rxb = iwl_pcie_get_rxb(trans, rxq, i, &join); + if (!rxb) + goto out; + + if (unlikely(join || rxq->next_rb_is_fragment)) { + rxq->next_rb_is_fragment = join; + /* + * We can only get a multi-RB in the following cases: + * - firmware issue, sending a too big notification + * - sniffer mode with a large A-MSDU + * - large MTU frames (>2k) + * since the multi-RB functionality is limited to newer + * hardware that cannot put multiple entries into a + * single RB. + * + * Right now, the higher layers aren't set up to deal + * with that, so discard all of these. + */ + list_add_tail(&rxb->list, &rxq->rx_free); + rxq->free_count++; + } else { + iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency, i); + } + + i = (i + 1) & (rxq->queue_size - 1); + + /* + * If we have RX_CLAIM_REQ_ALLOC released rx buffers - + * try to claim the pre-allocated buffers from the allocator. + * If not ready - will try to reclaim next time. + * There is no need to reschedule work - allocator exits only + * on success + */ + if (rxq->used_count >= RX_CLAIM_REQ_ALLOC) + iwl_pcie_rx_allocator_get(trans, rxq); + + if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 && !emergency) { + /* Add the remaining empty RBDs for allocator use */ + iwl_pcie_rx_move_to_allocator(rxq, rba); + } else if (emergency) { + count++; + if (count == 8) { + count = 0; + if (rb_pending_alloc < rxq->queue_size / 3) { + IWL_DEBUG_TPT(trans, + "RX path exited emergency. Pending allocations %d\n", + rb_pending_alloc); + emergency = false; + } + + rxq->read = i; + spin_unlock(&rxq->lock); + iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq); + iwl_pcie_rxq_restock(trans, rxq); + goto restart; + } + } + } +out: + /* Backtrack one entry */ + rxq->read = i; + spin_unlock(&rxq->lock); + + /* + * handle a case where in emergency there are some unallocated RBDs. + * those RBDs are in the used list, but are not tracked by the queue's + * used_count which counts allocator owned RBDs. + * unallocated emergency RBDs must be allocated on exit, otherwise + * when called again the function may not be in emergency mode and + * they will be handed to the allocator with no tracking in the RBD + * allocator counters, which will lead to them never being claimed back + * by the queue. + * by allocating them here, they are now in the queue free list, and + * will be restocked by the next call of iwl_pcie_rxq_restock. + */ + if (unlikely(emergency && count)) + iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq); + + iwl_pcie_rxq_restock(trans, rxq); + + return handled; +} + +static struct iwl_trans_pcie *iwl_pcie_get_trans_pcie(struct msix_entry *entry) +{ + u8 queue = entry->entry; + struct msix_entry *entries = entry - queue; + + return container_of(entries, struct iwl_trans_pcie, msix_entries[0]); +} + +/* + * iwl_pcie_rx_msix_handle - Main entry function for receiving responses from fw + * This interrupt handler should be used with RSS queue only. + */ +irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id) +{ + struct msix_entry *entry = dev_id; + struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry); + struct iwl_trans *trans = trans_pcie->trans; + struct iwl_rxq *rxq; + + trace_iwlwifi_dev_irq_msix(trans->dev, entry, false, 0, 0); + + if (WARN_ON(entry->entry >= trans->num_rx_queues)) + return IRQ_NONE; + + if (!trans_pcie->rxq) { + if (net_ratelimit()) + IWL_ERR(trans, + "[%d] Got MSI-X interrupt before we have Rx queues\n", + entry->entry); + return IRQ_NONE; + } + + rxq = &trans_pcie->rxq[entry->entry]; + lock_map_acquire(&trans->sync_cmd_lockdep_map); + IWL_DEBUG_ISR(trans, "[%d] Got interrupt\n", entry->entry); + + local_bh_disable(); + if (napi_schedule_prep(&rxq->napi)) + __napi_schedule(&rxq->napi); + else + iwl_pcie_clear_irq(trans, entry->entry); + local_bh_enable(); + + lock_map_release(&trans->sync_cmd_lockdep_map); + + return IRQ_HANDLED; +} + +/* + * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card + */ +static void iwl_pcie_irq_handle_error(struct iwl_trans *trans) +{ + int i; + + /* W/A for WiFi/WiMAX coex and WiMAX own the RF */ + if (trans->cfg->internal_wimax_coex && + !trans->cfg->apmg_not_supported && + (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) & + APMS_CLK_VAL_MRB_FUNC_MODE) || + (iwl_read_prph(trans, APMG_PS_CTRL_REG) & + APMG_PS_CTRL_VAL_RESET_REQ))) { + clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); + iwl_op_mode_wimax_active(trans->op_mode); + wake_up(&trans->wait_command_queue); + return; + } + + for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) { + if (!trans->txqs.txq[i]) + continue; + del_timer(&trans->txqs.txq[i]->stuck_timer); + } + + /* The STATUS_FW_ERROR bit is set in this function. This must happen + * before we wake up the command caller, to ensure a proper cleanup. */ + iwl_trans_fw_error(trans, false); + + clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); + wake_up(&trans->wait_command_queue); +} + +static u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans) +{ + u32 inta; + + lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans)->irq_lock); + + trace_iwlwifi_dev_irq(trans->dev); + + /* Discover which interrupts are active/pending */ + inta = iwl_read32(trans, CSR_INT); + + /* the thread will service interrupts and re-enable them */ + return inta; +} + +/* a device (PCI-E) page is 4096 bytes long */ +#define ICT_SHIFT 12 +#define ICT_SIZE (1 << ICT_SHIFT) +#define ICT_COUNT (ICT_SIZE / sizeof(u32)) + +/* interrupt handler using ict table, with this interrupt driver will + * stop using INTA register to get device's interrupt, reading this register + * is expensive, device will write interrupts in ICT dram table, increment + * index then will fire interrupt to driver, driver will OR all ICT table + * entries from current index up to table entry with 0 value. the result is + * the interrupt we need to service, driver will set the entries back to 0 and + * set index. + */ +static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + u32 inta; + u32 val = 0; + u32 read; + + trace_iwlwifi_dev_irq(trans->dev); + + /* Ignore interrupt if there's nothing in NIC to service. + * This may be due to IRQ shared with another device, + * or due to sporadic interrupts thrown from our NIC. */ + read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]); + trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read); + if (!read) + return 0; + + /* + * Collect all entries up to the first 0, starting from ict_index; + * note we already read at ict_index. + */ + do { + val |= read; + IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n", + trans_pcie->ict_index, read); + trans_pcie->ict_tbl[trans_pcie->ict_index] = 0; + trans_pcie->ict_index = + ((trans_pcie->ict_index + 1) & (ICT_COUNT - 1)); + + read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]); + trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, + read); + } while (read); + + /* We should not get this value, just ignore it. */ + if (val == 0xffffffff) + val = 0; + + /* + * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit + * (bit 15 before shifting it to 31) to clear when using interrupt + * coalescing. fortunately, bits 18 and 19 stay set when this happens + * so we use them to decide on the real state of the Rx bit. + * In order words, bit 15 is set if bit 18 or bit 19 are set. + */ + if (val & 0xC0000) + val |= 0x8000; + + inta = (0xff & val) | ((0xff00 & val) << 16); + return inta; +} + +void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans, bool from_irq) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct isr_statistics *isr_stats = &trans_pcie->isr_stats; + bool hw_rfkill, prev, report; + + mutex_lock(&trans_pcie->mutex); + prev = test_bit(STATUS_RFKILL_OPMODE, &trans->status); + hw_rfkill = iwl_is_rfkill_set(trans); + if (hw_rfkill) { + set_bit(STATUS_RFKILL_OPMODE, &trans->status); + set_bit(STATUS_RFKILL_HW, &trans->status); + } + if (trans_pcie->opmode_down) + report = hw_rfkill; + else + report = test_bit(STATUS_RFKILL_OPMODE, &trans->status); + + IWL_WARN(trans, "RF_KILL bit toggled to %s.\n", + hw_rfkill ? "disable radio" : "enable radio"); + + isr_stats->rfkill++; + + if (prev != report) + iwl_trans_pcie_rf_kill(trans, report, from_irq); + mutex_unlock(&trans_pcie->mutex); + + if (hw_rfkill) { + if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE, + &trans->status)) + IWL_DEBUG_RF_KILL(trans, + "Rfkill while SYNC HCMD in flight\n"); + wake_up(&trans->wait_command_queue); + } else { + clear_bit(STATUS_RFKILL_HW, &trans->status); + if (trans_pcie->opmode_down) + clear_bit(STATUS_RFKILL_OPMODE, &trans->status); + } +} + +irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id) +{ + struct iwl_trans *trans = dev_id; + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct isr_statistics *isr_stats = &trans_pcie->isr_stats; + u32 inta = 0; + u32 handled = 0; + bool polling = false; + + lock_map_acquire(&trans->sync_cmd_lockdep_map); + + spin_lock_bh(&trans_pcie->irq_lock); + + /* dram interrupt table not set yet, + * use legacy interrupt. + */ + if (likely(trans_pcie->use_ict)) + inta = iwl_pcie_int_cause_ict(trans); + else + inta = iwl_pcie_int_cause_non_ict(trans); + + if (iwl_have_debug_level(IWL_DL_ISR)) { + IWL_DEBUG_ISR(trans, + "ISR inta 0x%08x, enabled 0x%08x(sw), enabled(hw) 0x%08x, fh 0x%08x\n", + inta, trans_pcie->inta_mask, + iwl_read32(trans, CSR_INT_MASK), + iwl_read32(trans, CSR_FH_INT_STATUS)); + if (inta & (~trans_pcie->inta_mask)) + IWL_DEBUG_ISR(trans, + "We got a masked interrupt (0x%08x)\n", + inta & (~trans_pcie->inta_mask)); + } + + inta &= trans_pcie->inta_mask; + + /* + * Ignore interrupt if there's nothing in NIC to service. + * This may be due to IRQ shared with another device, + * or due to sporadic interrupts thrown from our NIC. + */ + if (unlikely(!inta)) { + IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n"); + /* + * Re-enable interrupts here since we don't + * have anything to service + */ + if (test_bit(STATUS_INT_ENABLED, &trans->status)) + _iwl_enable_interrupts(trans); + spin_unlock_bh(&trans_pcie->irq_lock); + lock_map_release(&trans->sync_cmd_lockdep_map); + return IRQ_NONE; + } + + if (unlikely(inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0)) { + /* + * Hardware disappeared. It might have + * already raised an interrupt. + */ + IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta); + spin_unlock_bh(&trans_pcie->irq_lock); + goto out; + } + + /* Ack/clear/reset pending uCode interrupts. + * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS, + */ + /* There is a hardware bug in the interrupt mask function that some + * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if + * they are disabled in the CSR_INT_MASK register. Furthermore the + * ICT interrupt handling mechanism has another bug that might cause + * these unmasked interrupts fail to be detected. We workaround the + * hardware bugs here by ACKing all the possible interrupts so that + * interrupt coalescing can still be achieved. + */ + iwl_write32(trans, CSR_INT, inta | ~trans_pcie->inta_mask); + + if (iwl_have_debug_level(IWL_DL_ISR)) + IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n", + inta, iwl_read32(trans, CSR_INT_MASK)); + + spin_unlock_bh(&trans_pcie->irq_lock); + + /* Now service all interrupt bits discovered above. */ + if (inta & CSR_INT_BIT_HW_ERR) { + IWL_ERR(trans, "Hardware error detected. Restarting.\n"); + + /* Tell the device to stop sending interrupts */ + iwl_disable_interrupts(trans); + + isr_stats->hw++; + iwl_pcie_irq_handle_error(trans); + + handled |= CSR_INT_BIT_HW_ERR; + + goto out; + } + + /* NIC fires this, but we don't use it, redundant with WAKEUP */ + if (inta & CSR_INT_BIT_SCD) { + IWL_DEBUG_ISR(trans, + "Scheduler finished to transmit the frame/frames.\n"); + isr_stats->sch++; + } + + /* Alive notification via Rx interrupt will do the real work */ + if (inta & CSR_INT_BIT_ALIVE) { + IWL_DEBUG_ISR(trans, "Alive interrupt\n"); + isr_stats->alive++; + if (trans->trans_cfg->gen2) { + /* + * We can restock, since firmware configured + * the RFH + */ + iwl_pcie_rxmq_restock(trans, trans_pcie->rxq); + } + + handled |= CSR_INT_BIT_ALIVE; + } + + /* Safely ignore these bits for debug checks below */ + inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE); + + /* HW RF KILL switch toggled */ + if (inta & CSR_INT_BIT_RF_KILL) { + iwl_pcie_handle_rfkill_irq(trans, true); + handled |= CSR_INT_BIT_RF_KILL; + } + + /* Chip got too hot and stopped itself */ + if (inta & CSR_INT_BIT_CT_KILL) { + IWL_ERR(trans, "Microcode CT kill error detected.\n"); + isr_stats->ctkill++; + handled |= CSR_INT_BIT_CT_KILL; + } + + /* Error detected by uCode */ + if (inta & CSR_INT_BIT_SW_ERR) { + IWL_ERR(trans, "Microcode SW error detected. " + " Restarting 0x%X.\n", inta); + isr_stats->sw++; + iwl_pcie_irq_handle_error(trans); + handled |= CSR_INT_BIT_SW_ERR; + } + + /* uCode wakes up after power-down sleep */ + if (inta & CSR_INT_BIT_WAKEUP) { + IWL_DEBUG_ISR(trans, "Wakeup interrupt\n"); + iwl_pcie_rxq_check_wrptr(trans); + iwl_pcie_txq_check_wrptrs(trans); + + isr_stats->wakeup++; + + handled |= CSR_INT_BIT_WAKEUP; + } + + /* All uCode command responses, including Tx command responses, + * Rx "responses" (frame-received notification), and other + * notifications from uCode come through here*/ + if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX | + CSR_INT_BIT_RX_PERIODIC)) { + IWL_DEBUG_ISR(trans, "Rx interrupt\n"); + if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) { + handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX); + iwl_write32(trans, CSR_FH_INT_STATUS, + CSR_FH_INT_RX_MASK); + } + if (inta & CSR_INT_BIT_RX_PERIODIC) { + handled |= CSR_INT_BIT_RX_PERIODIC; + iwl_write32(trans, + CSR_INT, CSR_INT_BIT_RX_PERIODIC); + } + /* Sending RX interrupt require many steps to be done in the + * device: + * 1- write interrupt to current index in ICT table. + * 2- dma RX frame. + * 3- update RX shared data to indicate last write index. + * 4- send interrupt. + * This could lead to RX race, driver could receive RX interrupt + * but the shared data changes does not reflect this; + * periodic interrupt will detect any dangling Rx activity. + */ + + /* Disable periodic interrupt; we use it as just a one-shot. */ + iwl_write8(trans, CSR_INT_PERIODIC_REG, + CSR_INT_PERIODIC_DIS); + + /* + * Enable periodic interrupt in 8 msec only if we received + * real RX interrupt (instead of just periodic int), to catch + * any dangling Rx interrupt. If it was just the periodic + * interrupt, there was no dangling Rx activity, and no need + * to extend the periodic interrupt; one-shot is enough. + */ + if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) + iwl_write8(trans, CSR_INT_PERIODIC_REG, + CSR_INT_PERIODIC_ENA); + + isr_stats->rx++; + + local_bh_disable(); + if (napi_schedule_prep(&trans_pcie->rxq[0].napi)) { + polling = true; + __napi_schedule(&trans_pcie->rxq[0].napi); + } + local_bh_enable(); + } + + /* This "Tx" DMA channel is used only for loading uCode */ + if (inta & CSR_INT_BIT_FH_TX) { + iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK); + IWL_DEBUG_ISR(trans, "uCode load interrupt\n"); + isr_stats->tx++; + handled |= CSR_INT_BIT_FH_TX; + /* Wake up uCode load routine, now that load is complete */ + trans_pcie->ucode_write_complete = true; + wake_up(&trans_pcie->ucode_write_waitq); + /* Wake up IMR write routine, now that write to SRAM is complete */ + if (trans_pcie->imr_status == IMR_D2S_REQUESTED) { + trans_pcie->imr_status = IMR_D2S_COMPLETED; + wake_up(&trans_pcie->ucode_write_waitq); + } + } + + if (inta & ~handled) { + IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled); + isr_stats->unhandled++; + } + + if (inta & ~(trans_pcie->inta_mask)) { + IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n", + inta & ~trans_pcie->inta_mask); + } + + if (!polling) { + spin_lock_bh(&trans_pcie->irq_lock); + /* only Re-enable all interrupt if disabled by irq */ + if (test_bit(STATUS_INT_ENABLED, &trans->status)) + _iwl_enable_interrupts(trans); + /* we are loading the firmware, enable FH_TX interrupt only */ + else if (handled & CSR_INT_BIT_FH_TX) + iwl_enable_fw_load_int(trans); + /* Re-enable RF_KILL if it occurred */ + else if (handled & CSR_INT_BIT_RF_KILL) + iwl_enable_rfkill_int(trans); + /* Re-enable the ALIVE / Rx interrupt if it occurred */ + else if (handled & (CSR_INT_BIT_ALIVE | CSR_INT_BIT_FH_RX)) + iwl_enable_fw_load_int_ctx_info(trans); + spin_unlock_bh(&trans_pcie->irq_lock); + } + +out: + lock_map_release(&trans->sync_cmd_lockdep_map); + return IRQ_HANDLED; +} + +/****************************************************************************** + * + * ICT functions + * + ******************************************************************************/ + +/* Free dram table */ +void iwl_pcie_free_ict(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + if (trans_pcie->ict_tbl) { + dma_free_coherent(trans->dev, ICT_SIZE, + trans_pcie->ict_tbl, + trans_pcie->ict_tbl_dma); + trans_pcie->ict_tbl = NULL; + trans_pcie->ict_tbl_dma = 0; + } +} + +/* + * allocate dram shared table, it is an aligned memory + * block of ICT_SIZE. + * also reset all data related to ICT table interrupt. + */ +int iwl_pcie_alloc_ict(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + trans_pcie->ict_tbl = + dma_alloc_coherent(trans->dev, ICT_SIZE, + &trans_pcie->ict_tbl_dma, GFP_KERNEL); + if (!trans_pcie->ict_tbl) + return -ENOMEM; + + /* just an API sanity check ... it is guaranteed to be aligned */ + if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) { + iwl_pcie_free_ict(trans); + return -EINVAL; + } + + return 0; +} + +/* Device is going up inform it about using ICT interrupt table, + * also we need to tell the driver to start using ICT interrupt. + */ +void iwl_pcie_reset_ict(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + u32 val; + + if (!trans_pcie->ict_tbl) + return; + + spin_lock_bh(&trans_pcie->irq_lock); + _iwl_disable_interrupts(trans); + + memset(trans_pcie->ict_tbl, 0, ICT_SIZE); + + val = trans_pcie->ict_tbl_dma >> ICT_SHIFT; + + val |= CSR_DRAM_INT_TBL_ENABLE | + CSR_DRAM_INIT_TBL_WRAP_CHECK | + CSR_DRAM_INIT_TBL_WRITE_POINTER; + + IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val); + + iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val); + trans_pcie->use_ict = true; + trans_pcie->ict_index = 0; + iwl_write32(trans, CSR_INT, trans_pcie->inta_mask); + _iwl_enable_interrupts(trans); + spin_unlock_bh(&trans_pcie->irq_lock); +} + +/* Device is going down disable ict interrupt usage */ +void iwl_pcie_disable_ict(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + spin_lock_bh(&trans_pcie->irq_lock); + trans_pcie->use_ict = false; + spin_unlock_bh(&trans_pcie->irq_lock); +} + +irqreturn_t iwl_pcie_isr(int irq, void *data) +{ + struct iwl_trans *trans = data; + + if (!trans) + return IRQ_NONE; + + /* Disable (but don't clear!) interrupts here to avoid + * back-to-back ISRs and sporadic interrupts from our NIC. + * If we have something to service, the tasklet will re-enable ints. + * If we *don't* have something, we'll re-enable before leaving here. + */ + iwl_write32(trans, CSR_INT_MASK, 0x00000000); + + return IRQ_WAKE_THREAD; +} + +irqreturn_t iwl_pcie_msix_isr(int irq, void *data) +{ + return IRQ_WAKE_THREAD; +} + +irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id) +{ + struct msix_entry *entry = dev_id; + struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry); + struct iwl_trans *trans = trans_pcie->trans; + struct isr_statistics *isr_stats = &trans_pcie->isr_stats; + u32 inta_fh_msk = ~MSIX_FH_INT_CAUSES_DATA_QUEUE; + u32 inta_fh, inta_hw; + bool polling = false; + bool sw_err; + + if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) + inta_fh_msk |= MSIX_FH_INT_CAUSES_Q0; + + if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS) + inta_fh_msk |= MSIX_FH_INT_CAUSES_Q1; + + lock_map_acquire(&trans->sync_cmd_lockdep_map); + + spin_lock_bh(&trans_pcie->irq_lock); + inta_fh = iwl_read32(trans, CSR_MSIX_FH_INT_CAUSES_AD); + inta_hw = iwl_read32(trans, CSR_MSIX_HW_INT_CAUSES_AD); + /* + * Clear causes registers to avoid being handling the same cause. + */ + iwl_write32(trans, CSR_MSIX_FH_INT_CAUSES_AD, inta_fh & inta_fh_msk); + iwl_write32(trans, CSR_MSIX_HW_INT_CAUSES_AD, inta_hw); + spin_unlock_bh(&trans_pcie->irq_lock); + + trace_iwlwifi_dev_irq_msix(trans->dev, entry, true, inta_fh, inta_hw); + + if (unlikely(!(inta_fh | inta_hw))) { + IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n"); + lock_map_release(&trans->sync_cmd_lockdep_map); + return IRQ_NONE; + } + + if (iwl_have_debug_level(IWL_DL_ISR)) { + IWL_DEBUG_ISR(trans, + "ISR[%d] inta_fh 0x%08x, enabled (sw) 0x%08x (hw) 0x%08x\n", + entry->entry, inta_fh, trans_pcie->fh_mask, + iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD)); + if (inta_fh & ~trans_pcie->fh_mask) + IWL_DEBUG_ISR(trans, + "We got a masked interrupt (0x%08x)\n", + inta_fh & ~trans_pcie->fh_mask); + } + + inta_fh &= trans_pcie->fh_mask; + + if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) && + inta_fh & MSIX_FH_INT_CAUSES_Q0) { + local_bh_disable(); + if (napi_schedule_prep(&trans_pcie->rxq[0].napi)) { + polling = true; + __napi_schedule(&trans_pcie->rxq[0].napi); + } + local_bh_enable(); + } + + if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS) && + inta_fh & MSIX_FH_INT_CAUSES_Q1) { + local_bh_disable(); + if (napi_schedule_prep(&trans_pcie->rxq[1].napi)) { + polling = true; + __napi_schedule(&trans_pcie->rxq[1].napi); + } + local_bh_enable(); + } + + /* This "Tx" DMA channel is used only for loading uCode */ + if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM && + trans_pcie->imr_status == IMR_D2S_REQUESTED) { + IWL_DEBUG_ISR(trans, "IMR Complete interrupt\n"); + isr_stats->tx++; + + /* Wake up IMR routine once write to SRAM is complete */ + if (trans_pcie->imr_status == IMR_D2S_REQUESTED) { + trans_pcie->imr_status = IMR_D2S_COMPLETED; + wake_up(&trans_pcie->ucode_write_waitq); + } + } else if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM) { + IWL_DEBUG_ISR(trans, "uCode load interrupt\n"); + isr_stats->tx++; + /* + * Wake up uCode load routine, + * now that load is complete + */ + trans_pcie->ucode_write_complete = true; + wake_up(&trans_pcie->ucode_write_waitq); + + /* Wake up IMR routine once write to SRAM is complete */ + if (trans_pcie->imr_status == IMR_D2S_REQUESTED) { + trans_pcie->imr_status = IMR_D2S_COMPLETED; + wake_up(&trans_pcie->ucode_write_waitq); + } + } + + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) + sw_err = inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ; + else + sw_err = inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR; + + /* Error detected by uCode */ + if ((inta_fh & MSIX_FH_INT_CAUSES_FH_ERR) || sw_err) { + IWL_ERR(trans, + "Microcode SW error detected. Restarting 0x%X.\n", + inta_fh); + isr_stats->sw++; + /* during FW reset flow report errors from there */ + if (trans_pcie->imr_status == IMR_D2S_REQUESTED) { + trans_pcie->imr_status = IMR_D2S_ERROR; + wake_up(&trans_pcie->imr_waitq); + } else if (trans_pcie->fw_reset_state == FW_RESET_REQUESTED) { + trans_pcie->fw_reset_state = FW_RESET_ERROR; + wake_up(&trans_pcie->fw_reset_waitq); + } else { + iwl_pcie_irq_handle_error(trans); + } + } + + /* After checking FH register check HW register */ + if (iwl_have_debug_level(IWL_DL_ISR)) { + IWL_DEBUG_ISR(trans, + "ISR[%d] inta_hw 0x%08x, enabled (sw) 0x%08x (hw) 0x%08x\n", + entry->entry, inta_hw, trans_pcie->hw_mask, + iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD)); + if (inta_hw & ~trans_pcie->hw_mask) + IWL_DEBUG_ISR(trans, + "We got a masked interrupt 0x%08x\n", + inta_hw & ~trans_pcie->hw_mask); + } + + inta_hw &= trans_pcie->hw_mask; + + /* Alive notification via Rx interrupt will do the real work */ + if (inta_hw & MSIX_HW_INT_CAUSES_REG_ALIVE) { + IWL_DEBUG_ISR(trans, "Alive interrupt\n"); + isr_stats->alive++; + if (trans->trans_cfg->gen2) { + /* We can restock, since firmware configured the RFH */ + iwl_pcie_rxmq_restock(trans, trans_pcie->rxq); + } + } + + /* + * In some rare cases when the HW is in a bad state, we may + * get this interrupt too early, when prph_info is still NULL. + * So make sure that it's not NULL to prevent crashing. + */ + if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP && trans_pcie->prph_info) { + u32 sleep_notif = + le32_to_cpu(trans_pcie->prph_info->sleep_notif); + if (sleep_notif == IWL_D3_SLEEP_STATUS_SUSPEND || + sleep_notif == IWL_D3_SLEEP_STATUS_RESUME) { + IWL_DEBUG_ISR(trans, + "Sx interrupt: sleep notification = 0x%x\n", + sleep_notif); + trans_pcie->sx_complete = true; + wake_up(&trans_pcie->sx_waitq); + } else { + /* uCode wakes up after power-down sleep */ + IWL_DEBUG_ISR(trans, "Wakeup interrupt\n"); + iwl_pcie_rxq_check_wrptr(trans); + iwl_pcie_txq_check_wrptrs(trans); + + isr_stats->wakeup++; + } + } + + /* Chip got too hot and stopped itself */ + if (inta_hw & MSIX_HW_INT_CAUSES_REG_CT_KILL) { + IWL_ERR(trans, "Microcode CT kill error detected.\n"); + isr_stats->ctkill++; + } + + /* HW RF KILL switch toggled */ + if (inta_hw & MSIX_HW_INT_CAUSES_REG_RF_KILL) + iwl_pcie_handle_rfkill_irq(trans, true); + + if (inta_hw & MSIX_HW_INT_CAUSES_REG_HW_ERR) { + IWL_ERR(trans, + "Hardware error detected. Restarting.\n"); + + isr_stats->hw++; + trans->dbg.hw_error = true; + iwl_pcie_irq_handle_error(trans); + } + + if (inta_hw & MSIX_HW_INT_CAUSES_REG_RESET_DONE) { + IWL_DEBUG_ISR(trans, "Reset flow completed\n"); + trans_pcie->fw_reset_state = FW_RESET_OK; + wake_up(&trans_pcie->fw_reset_waitq); + } + + if (!polling) + iwl_pcie_clear_irq(trans, entry->entry); + + lock_map_release(&trans->sync_cmd_lockdep_map); + + return IRQ_HANDLED; +} diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c new file mode 100644 index 000000000..8b9e4b9c5 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c @@ -0,0 +1,465 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2017 Intel Deutschland GmbH + * Copyright (C) 2018-2021 Intel Corporation + */ +#include "iwl-trans.h" +#include "iwl-prph.h" +#include "iwl-context-info.h" +#include "iwl-context-info-gen3.h" +#include "internal.h" +#include "fw/dbg.h" + +#define FW_RESET_TIMEOUT (HZ / 5) + +/* + * Start up NIC's basic functionality after it has been reset + * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop()) + * NOTE: This does not load uCode nor start the embedded processor + */ +int iwl_pcie_gen2_apm_init(struct iwl_trans *trans) +{ + int ret = 0; + + IWL_DEBUG_INFO(trans, "Init card's basic functions\n"); + + /* + * Use "set_bit" below rather than "write", to preserve any hardware + * bits already set by default after reset. + */ + + /* + * Disable L0s without affecting L1; + * don't wait for ICH L0s (ICH bug W/A) + */ + iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS, + CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX); + + /* Set FH wait threshold to maximum (HW error during stress W/A) */ + iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL); + + /* + * Enable HAP INTA (interrupt from management bus) to + * wake device's PCI Express link L1a -> L0s + */ + iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A); + + iwl_pcie_apm_config(trans); + + ret = iwl_finish_nic_init(trans); + if (ret) + return ret; + + set_bit(STATUS_DEVICE_ENABLED, &trans->status); + + return 0; +} + +static void iwl_pcie_gen2_apm_stop(struct iwl_trans *trans, bool op_mode_leave) +{ + IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n"); + + if (op_mode_leave) { + if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status)) + iwl_pcie_gen2_apm_init(trans); + + /* inform ME that we are leaving */ + iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, + CSR_RESET_LINK_PWR_MGMT_DISABLED); + iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_PREPARE | + CSR_HW_IF_CONFIG_REG_ENABLE_PME); + mdelay(1); + iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, + CSR_RESET_LINK_PWR_MGMT_DISABLED); + mdelay(5); + } + + clear_bit(STATUS_DEVICE_ENABLED, &trans->status); + + /* Stop device's DMA activity */ + iwl_pcie_apm_stop_master(trans); + + iwl_trans_sw_reset(trans, false); + + /* + * Clear "initialization complete" bit to move adapter from + * D0A* (powered-up Active) --> D0U* (Uninitialized) state. + */ + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) + iwl_clear_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_INIT); + else + iwl_clear_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_INIT_DONE); +} + +static void iwl_trans_pcie_fw_reset_handshake(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int ret; + + trans_pcie->fw_reset_state = FW_RESET_REQUESTED; + + if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) + iwl_write_umac_prph(trans, UREG_NIC_SET_NMI_DRIVER, + UREG_NIC_SET_NMI_DRIVER_RESET_HANDSHAKE); + else if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210) + iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6, + UREG_DOORBELL_TO_ISR6_RESET_HANDSHAKE); + else + iwl_write32(trans, CSR_DOORBELL_VECTOR, + UREG_DOORBELL_TO_ISR6_RESET_HANDSHAKE); + + /* wait 200ms */ + ret = wait_event_timeout(trans_pcie->fw_reset_waitq, + trans_pcie->fw_reset_state != FW_RESET_REQUESTED, + FW_RESET_TIMEOUT); + if (!ret || trans_pcie->fw_reset_state == FW_RESET_ERROR) { + IWL_INFO(trans, + "firmware didn't ACK the reset - continue anyway\n"); + iwl_trans_fw_error(trans, true); + } + + trans_pcie->fw_reset_state = FW_RESET_IDLE; +} + +void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + lockdep_assert_held(&trans_pcie->mutex); + + if (trans_pcie->is_down) + return; + + if (trans->state >= IWL_TRANS_FW_STARTED) + if (trans_pcie->fw_reset_handshake) + iwl_trans_pcie_fw_reset_handshake(trans); + + trans_pcie->is_down = true; + + /* tell the device to stop sending interrupts */ + iwl_disable_interrupts(trans); + + /* device going down, Stop using ICT table */ + iwl_pcie_disable_ict(trans); + + /* + * If a HW restart happens during firmware loading, + * then the firmware loading might call this function + * and later it might be called again due to the + * restart. So don't process again if the device is + * already dead. + */ + if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) { + IWL_DEBUG_INFO(trans, + "DEVICE_ENABLED bit was set and is now cleared\n"); + iwl_pcie_synchronize_irqs(trans); + iwl_pcie_rx_napi_sync(trans); + iwl_txq_gen2_tx_free(trans); + iwl_pcie_rx_stop(trans); + } + + iwl_pcie_ctxt_info_free_paging(trans); + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) + iwl_pcie_ctxt_info_gen3_free(trans, false); + else + iwl_pcie_ctxt_info_free(trans); + + /* Stop the device, and put it in low power state */ + iwl_pcie_gen2_apm_stop(trans, false); + + /* re-take ownership to prevent other users from stealing the device */ + iwl_trans_sw_reset(trans, true); + + /* + * Upon stop, the IVAR table gets erased, so msi-x won't + * work. This causes a bug in RF-KILL flows, since the interrupt + * that enables radio won't fire on the correct irq, and the + * driver won't be able to handle the interrupt. + * Configure the IVAR table again after reset. + */ + iwl_pcie_conf_msix_hw(trans_pcie); + + /* + * Upon stop, the APM issues an interrupt if HW RF kill is set. + * This is a bug in certain verions of the hardware. + * Certain devices also keep sending HW RF kill interrupt all + * the time, unless the interrupt is ACKed even if the interrupt + * should be masked. Re-ACK all the interrupts here. + */ + iwl_disable_interrupts(trans); + + /* clear all status bits */ + clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); + clear_bit(STATUS_INT_ENABLED, &trans->status); + clear_bit(STATUS_TPOWER_PMI, &trans->status); + + /* + * Even if we stop the HW, we still want the RF kill + * interrupt + */ + iwl_enable_rfkill_int(trans); +} + +void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + bool was_in_rfkill; + + iwl_op_mode_time_point(trans->op_mode, + IWL_FW_INI_TIME_POINT_HOST_DEVICE_DISABLE, + NULL); + + mutex_lock(&trans_pcie->mutex); + trans_pcie->opmode_down = true; + was_in_rfkill = test_bit(STATUS_RFKILL_OPMODE, &trans->status); + _iwl_trans_pcie_gen2_stop_device(trans); + iwl_trans_pcie_handle_stop_rfkill(trans, was_in_rfkill); + mutex_unlock(&trans_pcie->mutex); +} + +static int iwl_pcie_gen2_nic_init(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int queue_size = max_t(u32, IWL_CMD_QUEUE_SIZE, + trans->cfg->min_txq_size); + + /* TODO: most of the logic can be removed in A0 - but not in Z0 */ + spin_lock_bh(&trans_pcie->irq_lock); + iwl_pcie_gen2_apm_init(trans); + spin_unlock_bh(&trans_pcie->irq_lock); + + iwl_op_mode_nic_config(trans->op_mode); + + /* Allocate the RX queue, or reset if it is already allocated */ + if (iwl_pcie_gen2_rx_init(trans)) + return -ENOMEM; + + /* Allocate or reset and init all Tx and Command queues */ + if (iwl_txq_gen2_init(trans, trans->txqs.cmd.q_id, queue_size)) + return -ENOMEM; + + /* enable shadow regs in HW */ + iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF); + IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n"); + + return 0; +} + +static void iwl_pcie_get_rf_name(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + char *buf = trans_pcie->rf_name; + size_t buflen = sizeof(trans_pcie->rf_name); + size_t pos; + u32 version; + + if (buf[0]) + return; + + switch (CSR_HW_RFID_TYPE(trans->hw_rf_id)) { + case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_JF): + pos = scnprintf(buf, buflen, "JF"); + break; + case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_GF): + pos = scnprintf(buf, buflen, "GF"); + break; + case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_GF4): + pos = scnprintf(buf, buflen, "GF4"); + break; + case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HR): + pos = scnprintf(buf, buflen, "HR"); + break; + case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HR1): + pos = scnprintf(buf, buflen, "HR1"); + break; + case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HRCDB): + pos = scnprintf(buf, buflen, "HRCDB"); + break; + default: + return; + } + + switch (CSR_HW_RFID_TYPE(trans->hw_rf_id)) { + case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HR): + case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HR1): + case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HRCDB): + version = iwl_read_prph(trans, CNVI_MBOX_C); + switch (version) { + case 0x20000: + pos += scnprintf(buf + pos, buflen - pos, " B3"); + break; + case 0x120000: + pos += scnprintf(buf + pos, buflen - pos, " B5"); + break; + default: + pos += scnprintf(buf + pos, buflen - pos, + " (0x%x)", version); + break; + } + break; + default: + break; + } + + pos += scnprintf(buf + pos, buflen - pos, ", rfid=0x%x", + trans->hw_rf_id); + + IWL_INFO(trans, "Detected RF %s\n", buf); + + /* + * also add a \n for debugfs - need to do it after printing + * since our IWL_INFO machinery wants to see a static \n at + * the end of the string + */ + pos += scnprintf(buf + pos, buflen - pos, "\n"); +} + +void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + iwl_pcie_reset_ict(trans); + + /* make sure all queue are not stopped/used */ + memset(trans->txqs.queue_stopped, 0, + sizeof(trans->txqs.queue_stopped)); + memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); + + /* now that we got alive we can free the fw image & the context info. + * paging memory cannot be freed included since FW will still use it + */ + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) + iwl_pcie_ctxt_info_gen3_free(trans, true); + else + iwl_pcie_ctxt_info_free(trans); + + /* + * Re-enable all the interrupts, including the RF-Kill one, now that + * the firmware is alive. + */ + iwl_enable_interrupts(trans); + mutex_lock(&trans_pcie->mutex); + iwl_pcie_check_hw_rf_kill(trans); + + iwl_pcie_get_rf_name(trans); + mutex_unlock(&trans_pcie->mutex); +} + +static void iwl_pcie_set_ltr(struct iwl_trans *trans) +{ + u32 ltr_val = CSR_LTR_LONG_VAL_AD_NO_SNOOP_REQ | + u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC, + CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE) | + u32_encode_bits(250, + CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL) | + CSR_LTR_LONG_VAL_AD_SNOOP_REQ | + u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC, + CSR_LTR_LONG_VAL_AD_SNOOP_SCALE) | + u32_encode_bits(250, CSR_LTR_LONG_VAL_AD_SNOOP_VAL); + + /* + * To workaround hardware latency issues during the boot process, + * initialize the LTR to ~250 usec (see ltr_val above). + * The firmware initializes this again later (to a smaller value). + */ + if ((trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210 || + trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000) && + !trans->trans_cfg->integrated) { + iwl_write32(trans, CSR_LTR_LONG_VAL_AD, ltr_val); + } else if (trans->trans_cfg->integrated && + trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000) { + iwl_write_prph(trans, HPM_MAC_LTR_CSR, HPM_MAC_LRT_ENABLE_ALL); + iwl_write_prph(trans, HPM_UMAC_LTR, ltr_val); + } +} + +int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans, + const struct fw_img *fw, bool run_in_rfkill) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + bool hw_rfkill; + int ret; + + /* This may fail if AMT took ownership of the device */ + if (iwl_pcie_prepare_card_hw(trans)) { + IWL_WARN(trans, "Exit HW not ready\n"); + return -EIO; + } + + iwl_enable_rfkill_int(trans); + + iwl_write32(trans, CSR_INT, 0xFFFFFFFF); + + /* + * We enabled the RF-Kill interrupt and the handler may very + * well be running. Disable the interrupts to make sure no other + * interrupt can be fired. + */ + iwl_disable_interrupts(trans); + + /* Make sure it finished running */ + iwl_pcie_synchronize_irqs(trans); + + mutex_lock(&trans_pcie->mutex); + + /* If platform's RF_KILL switch is NOT set to KILL */ + hw_rfkill = iwl_pcie_check_hw_rf_kill(trans); + if (hw_rfkill && !run_in_rfkill) { + ret = -ERFKILL; + goto out; + } + + /* Someone called stop_device, don't try to start_fw */ + if (trans_pcie->is_down) { + IWL_WARN(trans, + "Can't start_fw since the HW hasn't been started\n"); + ret = -EIO; + goto out; + } + + /* make sure rfkill handshake bits are cleared */ + iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); + iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, + CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); + + /* clear (again), then enable host interrupts */ + iwl_write32(trans, CSR_INT, 0xFFFFFFFF); + + ret = iwl_pcie_gen2_nic_init(trans); + if (ret) { + IWL_ERR(trans, "Unable to init nic\n"); + goto out; + } + + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) + ret = iwl_pcie_ctxt_info_gen3_init(trans, fw); + else + ret = iwl_pcie_ctxt_info_init(trans, fw); + if (ret) + goto out; + + iwl_pcie_set_ltr(trans); + + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { + iwl_write32(trans, CSR_FUNC_SCRATCH, CSR_FUNC_SCRATCH_INIT_VALUE); + iwl_set_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_ROM_START); + } else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { + iwl_write_umac_prph(trans, UREG_CPU_INIT_RUN, 1); + } else { + iwl_write_prph(trans, UREG_CPU_INIT_RUN, 1); + } + + /* re-check RF-Kill state since we may have missed the interrupt */ + hw_rfkill = iwl_pcie_check_hw_rf_kill(trans); + if (hw_rfkill && !run_in_rfkill) + ret = -ERFKILL; + +out: + mutex_unlock(&trans_pcie->mutex); + return ret; +} diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c new file mode 100644 index 000000000..c7ed35b3d --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -0,0 +1,3735 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2007-2015, 2018-2022 Intel Corporation + * Copyright (C) 2013-2015 Intel Mobile Communications GmbH + * Copyright (C) 2016-2017 Intel Deutschland GmbH + */ +#include <linux/pci.h> +#include <linux/interrupt.h> +#include <linux/debugfs.h> +#include <linux/sched.h> +#include <linux/bitops.h> +#include <linux/gfp.h> +#include <linux/vmalloc.h> +#include <linux/module.h> +#include <linux/wait.h> +#include <linux/seq_file.h> + +#include "iwl-drv.h" +#include "iwl-trans.h" +#include "iwl-csr.h" +#include "iwl-prph.h" +#include "iwl-scd.h" +#include "iwl-agn-hw.h" +#include "fw/error-dump.h" +#include "fw/dbg.h" +#include "fw/api/tx.h" +#include "mei/iwl-mei.h" +#include "internal.h" +#include "iwl-fh.h" +#include "iwl-context-info-gen3.h" + +/* extended range in FW SRAM */ +#define IWL_FW_MEM_EXTENDED_START 0x40000 +#define IWL_FW_MEM_EXTENDED_END 0x57FFF + +void iwl_trans_pcie_dump_regs(struct iwl_trans *trans) +{ +#define PCI_DUMP_SIZE 352 +#define PCI_MEM_DUMP_SIZE 64 +#define PCI_PARENT_DUMP_SIZE 524 +#define PREFIX_LEN 32 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct pci_dev *pdev = trans_pcie->pci_dev; + u32 i, pos, alloc_size, *ptr, *buf; + char *prefix; + + if (trans_pcie->pcie_dbg_dumped_once) + return; + + /* Should be a multiple of 4 */ + BUILD_BUG_ON(PCI_DUMP_SIZE > 4096 || PCI_DUMP_SIZE & 0x3); + BUILD_BUG_ON(PCI_MEM_DUMP_SIZE > 4096 || PCI_MEM_DUMP_SIZE & 0x3); + BUILD_BUG_ON(PCI_PARENT_DUMP_SIZE > 4096 || PCI_PARENT_DUMP_SIZE & 0x3); + + /* Alloc a max size buffer */ + alloc_size = PCI_ERR_ROOT_ERR_SRC + 4 + PREFIX_LEN; + alloc_size = max_t(u32, alloc_size, PCI_DUMP_SIZE + PREFIX_LEN); + alloc_size = max_t(u32, alloc_size, PCI_MEM_DUMP_SIZE + PREFIX_LEN); + alloc_size = max_t(u32, alloc_size, PCI_PARENT_DUMP_SIZE + PREFIX_LEN); + + buf = kmalloc(alloc_size, GFP_ATOMIC); + if (!buf) + return; + prefix = (char *)buf + alloc_size - PREFIX_LEN; + + IWL_ERR(trans, "iwlwifi transaction failed, dumping registers\n"); + + /* Print wifi device registers */ + sprintf(prefix, "iwlwifi %s: ", pci_name(pdev)); + IWL_ERR(trans, "iwlwifi device config registers:\n"); + for (i = 0, ptr = buf; i < PCI_DUMP_SIZE; i += 4, ptr++) + if (pci_read_config_dword(pdev, i, ptr)) + goto err_read; + print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0); + + IWL_ERR(trans, "iwlwifi device memory mapped registers:\n"); + for (i = 0, ptr = buf; i < PCI_MEM_DUMP_SIZE; i += 4, ptr++) + *ptr = iwl_read32(trans, i); + print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0); + + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); + if (pos) { + IWL_ERR(trans, "iwlwifi device AER capability structure:\n"); + for (i = 0, ptr = buf; i < PCI_ERR_ROOT_COMMAND; i += 4, ptr++) + if (pci_read_config_dword(pdev, pos + i, ptr)) + goto err_read; + print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, + 32, 4, buf, i, 0); + } + + /* Print parent device registers next */ + if (!pdev->bus->self) + goto out; + + pdev = pdev->bus->self; + sprintf(prefix, "iwlwifi %s: ", pci_name(pdev)); + + IWL_ERR(trans, "iwlwifi parent port (%s) config registers:\n", + pci_name(pdev)); + for (i = 0, ptr = buf; i < PCI_PARENT_DUMP_SIZE; i += 4, ptr++) + if (pci_read_config_dword(pdev, i, ptr)) + goto err_read; + print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0); + + /* Print root port AER registers */ + pos = 0; + pdev = pcie_find_root_port(pdev); + if (pdev) + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); + if (pos) { + IWL_ERR(trans, "iwlwifi root port (%s) AER cap structure:\n", + pci_name(pdev)); + sprintf(prefix, "iwlwifi %s: ", pci_name(pdev)); + for (i = 0, ptr = buf; i <= PCI_ERR_ROOT_ERR_SRC; i += 4, ptr++) + if (pci_read_config_dword(pdev, pos + i, ptr)) + goto err_read; + print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, + 4, buf, i, 0); + } + goto out; + +err_read: + print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0); + IWL_ERR(trans, "Read failed at 0x%X\n", i); +out: + trans_pcie->pcie_dbg_dumped_once = 1; + kfree(buf); +} + +static int iwl_trans_pcie_sw_reset(struct iwl_trans *trans, + bool retake_ownership) +{ + /* Reset entire device - do controller reset (results in SHRD_HW_RST) */ + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) + iwl_set_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_SW_RESET); + else + iwl_set_bit(trans, CSR_RESET, + CSR_RESET_REG_FLAG_SW_RESET); + usleep_range(5000, 6000); + + if (retake_ownership) + return iwl_pcie_prepare_card_hw(trans); + + return 0; +} + +static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans) +{ + struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon; + + if (!fw_mon->size) + return; + + dma_free_coherent(trans->dev, fw_mon->size, fw_mon->block, + fw_mon->physical); + + fw_mon->block = NULL; + fw_mon->physical = 0; + fw_mon->size = 0; +} + +static void iwl_pcie_alloc_fw_monitor_block(struct iwl_trans *trans, + u8 max_power, u8 min_power) +{ + struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon; + void *block = NULL; + dma_addr_t physical = 0; + u32 size = 0; + u8 power; + + if (fw_mon->size) + return; + + for (power = max_power; power >= min_power; power--) { + size = BIT(power); + block = dma_alloc_coherent(trans->dev, size, &physical, + GFP_KERNEL | __GFP_NOWARN); + if (!block) + continue; + + IWL_INFO(trans, + "Allocated 0x%08x bytes for firmware monitor.\n", + size); + break; + } + + if (WARN_ON_ONCE(!block)) + return; + + if (power != max_power) + IWL_ERR(trans, + "Sorry - debug buffer is only %luK while you requested %luK\n", + (unsigned long)BIT(power - 10), + (unsigned long)BIT(max_power - 10)); + + fw_mon->block = block; + fw_mon->physical = physical; + fw_mon->size = size; +} + +void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power) +{ + if (!max_power) { + /* default max_power is maximum */ + max_power = 26; + } else { + max_power += 11; + } + + if (WARN(max_power > 26, + "External buffer size for monitor is too big %d, check the FW TLV\n", + max_power)) + return; + + if (trans->dbg.fw_mon.size) + return; + + iwl_pcie_alloc_fw_monitor_block(trans, max_power, 11); +} + +static u32 iwl_trans_pcie_read_shr(struct iwl_trans *trans, u32 reg) +{ + iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG, + ((reg & 0x0000ffff) | (2 << 28))); + return iwl_read32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG); +} + +static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val) +{ + iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG, val); + iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG, + ((reg & 0x0000ffff) | (3 << 28))); +} + +static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux) +{ + if (trans->cfg->apmg_not_supported) + return; + + if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold)) + iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG, + APMG_PS_CTRL_VAL_PWR_SRC_VAUX, + ~APMG_PS_CTRL_MSK_PWR_SRC); + else + iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG, + APMG_PS_CTRL_VAL_PWR_SRC_VMAIN, + ~APMG_PS_CTRL_MSK_PWR_SRC); +} + +/* PCI registers */ +#define PCI_CFG_RETRY_TIMEOUT 0x041 + +void iwl_pcie_apm_config(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + u16 lctl; + u16 cap; + + /* + * L0S states have been found to be unstable with our devices + * and in newer hardware they are not officially supported at + * all, so we must always set the L0S_DISABLED bit. + */ + iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_DISABLED); + + pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl); + trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S); + + pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap); + trans->ltr_enabled = cap & PCI_EXP_DEVCTL2_LTR_EN; + IWL_DEBUG_POWER(trans, "L1 %sabled - LTR %sabled\n", + (lctl & PCI_EXP_LNKCTL_ASPM_L1) ? "En" : "Dis", + trans->ltr_enabled ? "En" : "Dis"); +} + +/* + * Start up NIC's basic functionality after it has been reset + * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop()) + * NOTE: This does not load uCode nor start the embedded processor + */ +static int iwl_pcie_apm_init(struct iwl_trans *trans) +{ + int ret; + + IWL_DEBUG_INFO(trans, "Init card's basic functions\n"); + + /* + * Use "set_bit" below rather than "write", to preserve any hardware + * bits already set by default after reset. + */ + + /* Disable L0S exit timer (platform NMI Work/Around) */ + if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000) + iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS, + CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER); + + /* + * Disable L0s without affecting L1; + * don't wait for ICH L0s (ICH bug W/A) + */ + iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS, + CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX); + + /* Set FH wait threshold to maximum (HW error during stress W/A) */ + iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL); + + /* + * Enable HAP INTA (interrupt from management bus) to + * wake device's PCI Express link L1a -> L0s + */ + iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A); + + iwl_pcie_apm_config(trans); + + /* Configure analog phase-lock-loop before activating to D0A */ + if (trans->trans_cfg->base_params->pll_cfg) + iwl_set_bit(trans, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL); + + ret = iwl_finish_nic_init(trans); + if (ret) + return ret; + + if (trans->cfg->host_interrupt_operation_mode) { + /* + * This is a bit of an abuse - This is needed for 7260 / 3160 + * only check host_interrupt_operation_mode even if this is + * not related to host_interrupt_operation_mode. + * + * Enable the oscillator to count wake up time for L1 exit. This + * consumes slightly more power (100uA) - but allows to be sure + * that we wake up from L1 on time. + * + * This looks weird: read twice the same register, discard the + * value, set a bit, and yet again, read that same register + * just to discard the value. But that's the way the hardware + * seems to like it. + */ + iwl_read_prph(trans, OSC_CLK); + iwl_read_prph(trans, OSC_CLK); + iwl_set_bits_prph(trans, OSC_CLK, OSC_CLK_FORCE_CONTROL); + iwl_read_prph(trans, OSC_CLK); + iwl_read_prph(trans, OSC_CLK); + } + + /* + * Enable DMA clock and wait for it to stabilize. + * + * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" + * bits do not disable clocks. This preserves any hardware + * bits already set by default in "CLK_CTRL_REG" after reset. + */ + if (!trans->cfg->apmg_not_supported) { + iwl_write_prph(trans, APMG_CLK_EN_REG, + APMG_CLK_VAL_DMA_CLK_RQT); + udelay(20); + + /* Disable L1-Active */ + iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG, + APMG_PCIDEV_STT_VAL_L1_ACT_DIS); + + /* Clear the interrupt in APMG if the NIC is in RFKILL */ + iwl_write_prph(trans, APMG_RTC_INT_STT_REG, + APMG_RTC_INT_STT_RFKILL); + } + + set_bit(STATUS_DEVICE_ENABLED, &trans->status); + + return 0; +} + +/* + * Enable LP XTAL to avoid HW bug where device may consume much power if + * FW is not loaded after device reset. LP XTAL is disabled by default + * after device HW reset. Do it only if XTAL is fed by internal source. + * Configure device's "persistence" mode to avoid resetting XTAL again when + * SHRD_HW_RST occurs in S3. + */ +static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans) +{ + int ret; + u32 apmg_gp1_reg; + u32 apmg_xtal_cfg_reg; + u32 dl_cfg_reg; + + /* Force XTAL ON */ + __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_XTAL_ON); + + ret = iwl_trans_pcie_sw_reset(trans, true); + + if (!ret) + ret = iwl_finish_nic_init(trans); + + if (WARN_ON(ret)) { + /* Release XTAL ON request */ + __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_XTAL_ON); + return; + } + + /* + * Clear "disable persistence" to avoid LP XTAL resetting when + * SHRD_HW_RST is applied in S3. + */ + iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG, + APMG_PCIDEV_STT_VAL_PERSIST_DIS); + + /* + * Force APMG XTAL to be active to prevent its disabling by HW + * caused by APMG idle state. + */ + apmg_xtal_cfg_reg = iwl_trans_pcie_read_shr(trans, + SHR_APMG_XTAL_CFG_REG); + iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG, + apmg_xtal_cfg_reg | + SHR_APMG_XTAL_CFG_XTAL_ON_REQ); + + ret = iwl_trans_pcie_sw_reset(trans, true); + if (ret) + IWL_ERR(trans, + "iwl_pcie_apm_lp_xtal_enable: failed to retake NIC ownership\n"); + + /* Enable LP XTAL by indirect access through CSR */ + apmg_gp1_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_GP1_REG); + iwl_trans_pcie_write_shr(trans, SHR_APMG_GP1_REG, apmg_gp1_reg | + SHR_APMG_GP1_WF_XTAL_LP_EN | + SHR_APMG_GP1_CHICKEN_BIT_SELECT); + + /* Clear delay line clock power up */ + dl_cfg_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_DL_CFG_REG); + iwl_trans_pcie_write_shr(trans, SHR_APMG_DL_CFG_REG, dl_cfg_reg & + ~SHR_APMG_DL_CFG_DL_CLOCK_POWER_UP); + + /* + * Enable persistence mode to avoid LP XTAL resetting when + * SHRD_HW_RST is applied in S3. + */ + iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_PERSIST_MODE); + + /* + * Clear "initialization complete" bit to move adapter from + * D0A* (powered-up Active) --> D0U* (Uninitialized) state. + */ + iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); + + /* Activates XTAL resources monitor */ + __iwl_trans_pcie_set_bit(trans, CSR_MONITOR_CFG_REG, + CSR_MONITOR_XTAL_RESOURCES); + + /* Release XTAL ON request */ + __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_XTAL_ON); + udelay(10); + + /* Release APMG XTAL */ + iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG, + apmg_xtal_cfg_reg & + ~SHR_APMG_XTAL_CFG_XTAL_ON_REQ); +} + +void iwl_pcie_apm_stop_master(struct iwl_trans *trans) +{ + int ret; + + /* stop device's busmaster DMA activity */ + + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { + iwl_set_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_REQ); + + ret = iwl_poll_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_STATUS, + CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_STATUS, + 100); + msleep(100); + } else { + iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER); + + ret = iwl_poll_bit(trans, CSR_RESET, + CSR_RESET_REG_FLAG_MASTER_DISABLED, + CSR_RESET_REG_FLAG_MASTER_DISABLED, 100); + } + + if (ret < 0) + IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n"); + + IWL_DEBUG_INFO(trans, "stop master\n"); +} + +static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave) +{ + IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n"); + + if (op_mode_leave) { + if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status)) + iwl_pcie_apm_init(trans); + + /* inform ME that we are leaving */ + if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000) + iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG, + APMG_PCIDEV_STT_VAL_WAKE_ME); + else if (trans->trans_cfg->device_family >= + IWL_DEVICE_FAMILY_8000) { + iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, + CSR_RESET_LINK_PWR_MGMT_DISABLED); + iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_PREPARE | + CSR_HW_IF_CONFIG_REG_ENABLE_PME); + mdelay(1); + iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, + CSR_RESET_LINK_PWR_MGMT_DISABLED); + } + mdelay(5); + } + + clear_bit(STATUS_DEVICE_ENABLED, &trans->status); + + /* Stop device's DMA activity */ + iwl_pcie_apm_stop_master(trans); + + if (trans->cfg->lp_xtal_workaround) { + iwl_pcie_apm_lp_xtal_enable(trans); + return; + } + + iwl_trans_pcie_sw_reset(trans, false); + + /* + * Clear "initialization complete" bit to move adapter from + * D0A* (powered-up Active) --> D0U* (Uninitialized) state. + */ + iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); +} + +static int iwl_pcie_nic_init(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int ret; + + /* nic_init */ + spin_lock_bh(&trans_pcie->irq_lock); + ret = iwl_pcie_apm_init(trans); + spin_unlock_bh(&trans_pcie->irq_lock); + + if (ret) + return ret; + + iwl_pcie_set_pwr(trans, false); + + iwl_op_mode_nic_config(trans->op_mode); + + /* Allocate the RX queue, or reset if it is already allocated */ + ret = iwl_pcie_rx_init(trans); + if (ret) + return ret; + + /* Allocate or reset and init all Tx and Command queues */ + if (iwl_pcie_tx_init(trans)) { + iwl_pcie_rx_free(trans); + return -ENOMEM; + } + + if (trans->trans_cfg->base_params->shadow_reg_enable) { + /* enable shadow regs in HW */ + iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF); + IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n"); + } + + return 0; +} + +#define HW_READY_TIMEOUT (50) + +/* Note: returns poll_bit return value, which is >= 0 if success */ +static int iwl_pcie_set_hw_ready(struct iwl_trans *trans) +{ + int ret; + + iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BIT_NIC_READY); + + /* See if we got it */ + ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, + CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, + HW_READY_TIMEOUT); + + if (ret >= 0) + iwl_set_bit(trans, CSR_MBOX_SET_REG, CSR_MBOX_SET_REG_OS_ALIVE); + + IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : ""); + return ret; +} + +/* Note: returns standard 0/-ERROR code */ +int iwl_pcie_prepare_card_hw(struct iwl_trans *trans) +{ + int ret; + int iter; + + IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n"); + + ret = iwl_pcie_set_hw_ready(trans); + /* If the card is ready, exit 0 */ + if (ret >= 0) { + trans->csme_own = false; + return 0; + } + + iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, + CSR_RESET_LINK_PWR_MGMT_DISABLED); + usleep_range(1000, 2000); + + for (iter = 0; iter < 10; iter++) { + int t = 0; + + /* If HW is not ready, prepare the conditions to check again */ + iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_PREPARE); + + do { + ret = iwl_pcie_set_hw_ready(trans); + if (ret >= 0) { + trans->csme_own = false; + return 0; + } + + if (iwl_mei_is_connected()) { + IWL_DEBUG_INFO(trans, + "Couldn't prepare the card but SAP is connected\n"); + trans->csme_own = true; + if (trans->trans_cfg->device_family != + IWL_DEVICE_FAMILY_9000) + IWL_ERR(trans, + "SAP not supported for this NIC family\n"); + + return -EBUSY; + } + + usleep_range(200, 1000); + t += 200; + } while (t < 150000); + msleep(25); + } + + IWL_ERR(trans, "Couldn't prepare the card\n"); + + return ret; +} + +/* + * ucode + */ +static void iwl_pcie_load_firmware_chunk_fh(struct iwl_trans *trans, + u32 dst_addr, dma_addr_t phy_addr, + u32 byte_cnt) +{ + iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL), + FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE); + + iwl_write32(trans, FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), + dst_addr); + + iwl_write32(trans, FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL), + phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK); + + iwl_write32(trans, FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL), + (iwl_get_dma_hi_addr(phy_addr) + << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt); + + iwl_write32(trans, FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL), + BIT(FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM) | + BIT(FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX) | + FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID); + + iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL), + FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | + FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE | + FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD); +} + +static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans, + u32 dst_addr, dma_addr_t phy_addr, + u32 byte_cnt) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int ret; + + trans_pcie->ucode_write_complete = false; + + if (!iwl_trans_grab_nic_access(trans)) + return -EIO; + + iwl_pcie_load_firmware_chunk_fh(trans, dst_addr, phy_addr, + byte_cnt); + iwl_trans_release_nic_access(trans); + + ret = wait_event_timeout(trans_pcie->ucode_write_waitq, + trans_pcie->ucode_write_complete, 5 * HZ); + if (!ret) { + IWL_ERR(trans, "Failed to load firmware chunk!\n"); + iwl_trans_pcie_dump_regs(trans); + return -ETIMEDOUT; + } + + return 0; +} + +static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num, + const struct fw_desc *section) +{ + u8 *v_addr; + dma_addr_t p_addr; + u32 offset, chunk_sz = min_t(u32, FH_MEM_TB_MAX_LENGTH, section->len); + int ret = 0; + + IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n", + section_num); + + v_addr = dma_alloc_coherent(trans->dev, chunk_sz, &p_addr, + GFP_KERNEL | __GFP_NOWARN); + if (!v_addr) { + IWL_DEBUG_INFO(trans, "Falling back to small chunks of DMA\n"); + chunk_sz = PAGE_SIZE; + v_addr = dma_alloc_coherent(trans->dev, chunk_sz, + &p_addr, GFP_KERNEL); + if (!v_addr) + return -ENOMEM; + } + + for (offset = 0; offset < section->len; offset += chunk_sz) { + u32 copy_size, dst_addr; + bool extended_addr = false; + + copy_size = min_t(u32, chunk_sz, section->len - offset); + dst_addr = section->offset + offset; + + if (dst_addr >= IWL_FW_MEM_EXTENDED_START && + dst_addr <= IWL_FW_MEM_EXTENDED_END) + extended_addr = true; + + if (extended_addr) + iwl_set_bits_prph(trans, LMPM_CHICK, + LMPM_CHICK_EXTENDED_ADDR_SPACE); + + memcpy(v_addr, (const u8 *)section->data + offset, copy_size); + ret = iwl_pcie_load_firmware_chunk(trans, dst_addr, p_addr, + copy_size); + + if (extended_addr) + iwl_clear_bits_prph(trans, LMPM_CHICK, + LMPM_CHICK_EXTENDED_ADDR_SPACE); + + if (ret) { + IWL_ERR(trans, + "Could not load the [%d] uCode section\n", + section_num); + break; + } + } + + dma_free_coherent(trans->dev, chunk_sz, v_addr, p_addr); + return ret; +} + +static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans, + const struct fw_img *image, + int cpu, + int *first_ucode_section) +{ + int shift_param; + int i, ret = 0, sec_num = 0x1; + u32 val, last_read_idx = 0; + + if (cpu == 1) { + shift_param = 0; + *first_ucode_section = 0; + } else { + shift_param = 16; + (*first_ucode_section)++; + } + + for (i = *first_ucode_section; i < image->num_sec; i++) { + last_read_idx = i; + + /* + * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between + * CPU1 to CPU2. + * PAGING_SEPARATOR_SECTION delimiter - separate between + * CPU2 non paged to CPU2 paging sec. + */ + if (!image->sec[i].data || + image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION || + image->sec[i].offset == PAGING_SEPARATOR_SECTION) { + IWL_DEBUG_FW(trans, + "Break since Data not valid or Empty section, sec = %d\n", + i); + break; + } + + ret = iwl_pcie_load_section(trans, i, &image->sec[i]); + if (ret) + return ret; + + /* Notify ucode of loaded section number and status */ + val = iwl_read_direct32(trans, FH_UCODE_LOAD_STATUS); + val = val | (sec_num << shift_param); + iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, val); + + sec_num = (sec_num << 1) | 0x1; + } + + *first_ucode_section = last_read_idx; + + iwl_enable_interrupts(trans); + + if (trans->trans_cfg->use_tfh) { + if (cpu == 1) + iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS, + 0xFFFF); + else + iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS, + 0xFFFFFFFF); + } else { + if (cpu == 1) + iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, + 0xFFFF); + else + iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, + 0xFFFFFFFF); + } + + return 0; +} + +static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans, + const struct fw_img *image, + int cpu, + int *first_ucode_section) +{ + int i, ret = 0; + u32 last_read_idx = 0; + + if (cpu == 1) + *first_ucode_section = 0; + else + (*first_ucode_section)++; + + for (i = *first_ucode_section; i < image->num_sec; i++) { + last_read_idx = i; + + /* + * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between + * CPU1 to CPU2. + * PAGING_SEPARATOR_SECTION delimiter - separate between + * CPU2 non paged to CPU2 paging sec. + */ + if (!image->sec[i].data || + image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION || + image->sec[i].offset == PAGING_SEPARATOR_SECTION) { + IWL_DEBUG_FW(trans, + "Break since Data not valid or Empty section, sec = %d\n", + i); + break; + } + + ret = iwl_pcie_load_section(trans, i, &image->sec[i]); + if (ret) + return ret; + } + + *first_ucode_section = last_read_idx; + + return 0; +} + +static void iwl_pcie_apply_destination_ini(struct iwl_trans *trans) +{ + enum iwl_fw_ini_allocation_id alloc_id = IWL_FW_INI_ALLOCATION_ID_DBGC1; + struct iwl_fw_ini_allocation_tlv *fw_mon_cfg = + &trans->dbg.fw_mon_cfg[alloc_id]; + struct iwl_dram_data *frag; + + if (!iwl_trans_dbg_ini_valid(trans)) + return; + + if (le32_to_cpu(fw_mon_cfg->buf_location) == + IWL_FW_INI_LOCATION_SRAM_PATH) { + IWL_DEBUG_FW(trans, "WRT: Applying SMEM buffer destination\n"); + /* set sram monitor by enabling bit 7 */ + iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BIT_MONITOR_SRAM); + + return; + } + + if (le32_to_cpu(fw_mon_cfg->buf_location) != + IWL_FW_INI_LOCATION_DRAM_PATH || + !trans->dbg.fw_mon_ini[alloc_id].num_frags) + return; + + frag = &trans->dbg.fw_mon_ini[alloc_id].frags[0]; + + IWL_DEBUG_FW(trans, "WRT: Applying DRAM destination (alloc_id=%u)\n", + alloc_id); + + iwl_write_umac_prph(trans, MON_BUFF_BASE_ADDR_VER2, + frag->physical >> MON_BUFF_SHIFT_VER2); + iwl_write_umac_prph(trans, MON_BUFF_END_ADDR_VER2, + (frag->physical + frag->size - 256) >> + MON_BUFF_SHIFT_VER2); +} + +void iwl_pcie_apply_destination(struct iwl_trans *trans) +{ + const struct iwl_fw_dbg_dest_tlv_v1 *dest = trans->dbg.dest_tlv; + const struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon; + int i; + + if (iwl_trans_dbg_ini_valid(trans)) { + iwl_pcie_apply_destination_ini(trans); + return; + } + + IWL_INFO(trans, "Applying debug destination %s\n", + get_fw_dbg_mode_string(dest->monitor_mode)); + + if (dest->monitor_mode == EXTERNAL_MODE) + iwl_pcie_alloc_fw_monitor(trans, dest->size_power); + else + IWL_WARN(trans, "PCI should have external buffer debug\n"); + + for (i = 0; i < trans->dbg.n_dest_reg; i++) { + u32 addr = le32_to_cpu(dest->reg_ops[i].addr); + u32 val = le32_to_cpu(dest->reg_ops[i].val); + + switch (dest->reg_ops[i].op) { + case CSR_ASSIGN: + iwl_write32(trans, addr, val); + break; + case CSR_SETBIT: + iwl_set_bit(trans, addr, BIT(val)); + break; + case CSR_CLEARBIT: + iwl_clear_bit(trans, addr, BIT(val)); + break; + case PRPH_ASSIGN: + iwl_write_prph(trans, addr, val); + break; + case PRPH_SETBIT: + iwl_set_bits_prph(trans, addr, BIT(val)); + break; + case PRPH_CLEARBIT: + iwl_clear_bits_prph(trans, addr, BIT(val)); + break; + case PRPH_BLOCKBIT: + if (iwl_read_prph(trans, addr) & BIT(val)) { + IWL_ERR(trans, + "BIT(%u) in address 0x%x is 1, stopping FW configuration\n", + val, addr); + goto monitor; + } + break; + default: + IWL_ERR(trans, "FW debug - unknown OP %d\n", + dest->reg_ops[i].op); + break; + } + } + +monitor: + if (dest->monitor_mode == EXTERNAL_MODE && fw_mon->size) { + iwl_write_prph(trans, le32_to_cpu(dest->base_reg), + fw_mon->physical >> dest->base_shift); + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000) + iwl_write_prph(trans, le32_to_cpu(dest->end_reg), + (fw_mon->physical + fw_mon->size - + 256) >> dest->end_shift); + else + iwl_write_prph(trans, le32_to_cpu(dest->end_reg), + (fw_mon->physical + fw_mon->size) >> + dest->end_shift); + } +} + +static int iwl_pcie_load_given_ucode(struct iwl_trans *trans, + const struct fw_img *image) +{ + int ret = 0; + int first_ucode_section; + + IWL_DEBUG_FW(trans, "working with %s CPU\n", + image->is_dual_cpus ? "Dual" : "Single"); + + /* load to FW the binary non secured sections of CPU1 */ + ret = iwl_pcie_load_cpu_sections(trans, image, 1, &first_ucode_section); + if (ret) + return ret; + + if (image->is_dual_cpus) { + /* set CPU2 header address */ + iwl_write_prph(trans, + LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR, + LMPM_SECURE_CPU2_HDR_MEM_SPACE); + + /* load to FW the binary sections of CPU2 */ + ret = iwl_pcie_load_cpu_sections(trans, image, 2, + &first_ucode_section); + if (ret) + return ret; + } + + if (iwl_pcie_dbg_on(trans)) + iwl_pcie_apply_destination(trans); + + iwl_enable_interrupts(trans); + + /* release CPU reset */ + iwl_write32(trans, CSR_RESET, 0); + + return 0; +} + +static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans, + const struct fw_img *image) +{ + int ret = 0; + int first_ucode_section; + + IWL_DEBUG_FW(trans, "working with %s CPU\n", + image->is_dual_cpus ? "Dual" : "Single"); + + if (iwl_pcie_dbg_on(trans)) + iwl_pcie_apply_destination(trans); + + IWL_DEBUG_POWER(trans, "Original WFPM value = 0x%08X\n", + iwl_read_prph(trans, WFPM_GP2)); + + /* + * Set default value. On resume reading the values that were + * zeored can provide debug data on the resume flow. + * This is for debugging only and has no functional impact. + */ + iwl_write_prph(trans, WFPM_GP2, 0x01010101); + + /* configure the ucode to be ready to get the secured image */ + /* release CPU reset */ + iwl_write_prph(trans, RELEASE_CPU_RESET, RELEASE_CPU_RESET_BIT); + + /* load to FW the binary Secured sections of CPU1 */ + ret = iwl_pcie_load_cpu_sections_8000(trans, image, 1, + &first_ucode_section); + if (ret) + return ret; + + /* load to FW the binary sections of CPU2 */ + return iwl_pcie_load_cpu_sections_8000(trans, image, 2, + &first_ucode_section); +} + +bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + bool hw_rfkill = iwl_is_rfkill_set(trans); + bool prev = test_bit(STATUS_RFKILL_OPMODE, &trans->status); + bool report; + + if (hw_rfkill) { + set_bit(STATUS_RFKILL_HW, &trans->status); + set_bit(STATUS_RFKILL_OPMODE, &trans->status); + } else { + clear_bit(STATUS_RFKILL_HW, &trans->status); + if (trans_pcie->opmode_down) + clear_bit(STATUS_RFKILL_OPMODE, &trans->status); + } + + report = test_bit(STATUS_RFKILL_OPMODE, &trans->status); + + if (prev != report) + iwl_trans_pcie_rf_kill(trans, report, false); + + return hw_rfkill; +} + +struct iwl_causes_list { + u16 mask_reg; + u8 bit; + u8 addr; +}; + +#define IWL_CAUSE(reg, mask) \ + { \ + .mask_reg = reg, \ + .bit = ilog2(mask), \ + .addr = ilog2(mask) + \ + ((reg) == CSR_MSIX_FH_INT_MASK_AD ? -16 : \ + (reg) == CSR_MSIX_HW_INT_MASK_AD ? 16 : \ + 0xffff), /* causes overflow warning */ \ + } + +static const struct iwl_causes_list causes_list_common[] = { + IWL_CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_D2S_CH0_NUM), + IWL_CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_D2S_CH1_NUM), + IWL_CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_S2D), + IWL_CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_FH_ERR), + IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_ALIVE), + IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_WAKEUP), + IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_RESET_DONE), + IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_CT_KILL), + IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_RF_KILL), + IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_PERIODIC), + IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_SCD), + IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_FH_TX), + IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_HW_ERR), + IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_HAP), +}; + +static const struct iwl_causes_list causes_list_pre_bz[] = { + IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_SW_ERR), +}; + +static const struct iwl_causes_list causes_list_bz[] = { + IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ), +}; + +static void iwl_pcie_map_list(struct iwl_trans *trans, + const struct iwl_causes_list *causes, + int arr_size, int val) +{ + int i; + + for (i = 0; i < arr_size; i++) { + iwl_write8(trans, CSR_MSIX_IVAR(causes[i].addr), val); + iwl_clear_bit(trans, causes[i].mask_reg, + BIT(causes[i].bit)); + } +} + +static void iwl_pcie_map_non_rx_causes(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int val = trans_pcie->def_irq | MSIX_NON_AUTO_CLEAR_CAUSE; + /* + * Access all non RX causes and map them to the default irq. + * In case we are missing at least one interrupt vector, + * the first interrupt vector will serve non-RX and FBQ causes. + */ + iwl_pcie_map_list(trans, causes_list_common, + ARRAY_SIZE(causes_list_common), val); + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) + iwl_pcie_map_list(trans, causes_list_bz, + ARRAY_SIZE(causes_list_bz), val); + else + iwl_pcie_map_list(trans, causes_list_pre_bz, + ARRAY_SIZE(causes_list_pre_bz), val); +} + +static void iwl_pcie_map_rx_causes(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + u32 offset = + trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0; + u32 val, idx; + + /* + * The first RX queue - fallback queue, which is designated for + * management frame, command responses etc, is always mapped to the + * first interrupt vector. The other RX queues are mapped to + * the other (N - 2) interrupt vectors. + */ + val = BIT(MSIX_FH_INT_CAUSES_Q(0)); + for (idx = 1; idx < trans->num_rx_queues; idx++) { + iwl_write8(trans, CSR_MSIX_RX_IVAR(idx), + MSIX_FH_INT_CAUSES_Q(idx - offset)); + val |= BIT(MSIX_FH_INT_CAUSES_Q(idx)); + } + iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~val); + + val = MSIX_FH_INT_CAUSES_Q(0); + if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) + val |= MSIX_NON_AUTO_CLEAR_CAUSE; + iwl_write8(trans, CSR_MSIX_RX_IVAR(0), val); + + if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS) + iwl_write8(trans, CSR_MSIX_RX_IVAR(1), val); +} + +void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie) +{ + struct iwl_trans *trans = trans_pcie->trans; + + if (!trans_pcie->msix_enabled) { + if (trans->trans_cfg->mq_rx_supported && + test_bit(STATUS_DEVICE_ENABLED, &trans->status)) + iwl_write_umac_prph(trans, UREG_CHICK, + UREG_CHICK_MSI_ENABLE); + return; + } + /* + * The IVAR table needs to be configured again after reset, + * but if the device is disabled, we can't write to + * prph. + */ + if (test_bit(STATUS_DEVICE_ENABLED, &trans->status)) + iwl_write_umac_prph(trans, UREG_CHICK, UREG_CHICK_MSIX_ENABLE); + + /* + * Each cause from the causes list above and the RX causes is + * represented as a byte in the IVAR table. The first nibble + * represents the bound interrupt vector of the cause, the second + * represents no auto clear for this cause. This will be set if its + * interrupt vector is bound to serve other causes. + */ + iwl_pcie_map_rx_causes(trans); + + iwl_pcie_map_non_rx_causes(trans); +} + +static void iwl_pcie_init_msix(struct iwl_trans_pcie *trans_pcie) +{ + struct iwl_trans *trans = trans_pcie->trans; + + iwl_pcie_conf_msix_hw(trans_pcie); + + if (!trans_pcie->msix_enabled) + return; + + trans_pcie->fh_init_mask = ~iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD); + trans_pcie->fh_mask = trans_pcie->fh_init_mask; + trans_pcie->hw_init_mask = ~iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD); + trans_pcie->hw_mask = trans_pcie->hw_init_mask; +} + +static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool from_irq) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + lockdep_assert_held(&trans_pcie->mutex); + + if (trans_pcie->is_down) + return; + + trans_pcie->is_down = true; + + /* tell the device to stop sending interrupts */ + iwl_disable_interrupts(trans); + + /* device going down, Stop using ICT table */ + iwl_pcie_disable_ict(trans); + + /* + * If a HW restart happens during firmware loading, + * then the firmware loading might call this function + * and later it might be called again due to the + * restart. So don't process again if the device is + * already dead. + */ + if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) { + IWL_DEBUG_INFO(trans, + "DEVICE_ENABLED bit was set and is now cleared\n"); + if (!from_irq) + iwl_pcie_synchronize_irqs(trans); + iwl_pcie_rx_napi_sync(trans); + iwl_pcie_tx_stop(trans); + iwl_pcie_rx_stop(trans); + + /* Power-down device's busmaster DMA clocks */ + if (!trans->cfg->apmg_not_supported) { + iwl_write_prph(trans, APMG_CLK_DIS_REG, + APMG_CLK_VAL_DMA_CLK_RQT); + udelay(5); + } + } + + /* Make sure (redundant) we've released our request to stay awake */ + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) + iwl_clear_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ); + else + iwl_clear_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + + /* Stop the device, and put it in low power state */ + iwl_pcie_apm_stop(trans, false); + + /* re-take ownership to prevent other users from stealing the device */ + iwl_trans_pcie_sw_reset(trans, true); + + /* + * Upon stop, the IVAR table gets erased, so msi-x won't + * work. This causes a bug in RF-KILL flows, since the interrupt + * that enables radio won't fire on the correct irq, and the + * driver won't be able to handle the interrupt. + * Configure the IVAR table again after reset. + */ + iwl_pcie_conf_msix_hw(trans_pcie); + + /* + * Upon stop, the APM issues an interrupt if HW RF kill is set. + * This is a bug in certain verions of the hardware. + * Certain devices also keep sending HW RF kill interrupt all + * the time, unless the interrupt is ACKed even if the interrupt + * should be masked. Re-ACK all the interrupts here. + */ + iwl_disable_interrupts(trans); + + /* clear all status bits */ + clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); + clear_bit(STATUS_INT_ENABLED, &trans->status); + clear_bit(STATUS_TPOWER_PMI, &trans->status); + + /* + * Even if we stop the HW, we still want the RF kill + * interrupt + */ + iwl_enable_rfkill_int(trans); +} + +void iwl_pcie_synchronize_irqs(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + if (trans_pcie->msix_enabled) { + int i; + + for (i = 0; i < trans_pcie->alloc_vecs; i++) + synchronize_irq(trans_pcie->msix_entries[i].vector); + } else { + synchronize_irq(trans_pcie->pci_dev->irq); + } +} + +static int iwl_trans_pcie_start_fw(struct iwl_trans *trans, + const struct fw_img *fw, bool run_in_rfkill) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + bool hw_rfkill; + int ret; + + /* This may fail if AMT took ownership of the device */ + if (iwl_pcie_prepare_card_hw(trans)) { + IWL_WARN(trans, "Exit HW not ready\n"); + return -EIO; + } + + iwl_enable_rfkill_int(trans); + + iwl_write32(trans, CSR_INT, 0xFFFFFFFF); + + /* + * We enabled the RF-Kill interrupt and the handler may very + * well be running. Disable the interrupts to make sure no other + * interrupt can be fired. + */ + iwl_disable_interrupts(trans); + + /* Make sure it finished running */ + iwl_pcie_synchronize_irqs(trans); + + mutex_lock(&trans_pcie->mutex); + + /* If platform's RF_KILL switch is NOT set to KILL */ + hw_rfkill = iwl_pcie_check_hw_rf_kill(trans); + if (hw_rfkill && !run_in_rfkill) { + ret = -ERFKILL; + goto out; + } + + /* Someone called stop_device, don't try to start_fw */ + if (trans_pcie->is_down) { + IWL_WARN(trans, + "Can't start_fw since the HW hasn't been started\n"); + ret = -EIO; + goto out; + } + + /* make sure rfkill handshake bits are cleared */ + iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); + iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, + CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); + + /* clear (again), then enable host interrupts */ + iwl_write32(trans, CSR_INT, 0xFFFFFFFF); + + ret = iwl_pcie_nic_init(trans); + if (ret) { + IWL_ERR(trans, "Unable to init nic\n"); + goto out; + } + + /* + * Now, we load the firmware and don't want to be interrupted, even + * by the RF-Kill interrupt (hence mask all the interrupt besides the + * FH_TX interrupt which is needed to load the firmware). If the + * RF-Kill switch is toggled, we will find out after having loaded + * the firmware and return the proper value to the caller. + */ + iwl_enable_fw_load_int(trans); + + /* really make sure rfkill handshake bits are cleared */ + iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); + iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); + + /* Load the given image to the HW */ + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000) + ret = iwl_pcie_load_given_ucode_8000(trans, fw); + else + ret = iwl_pcie_load_given_ucode(trans, fw); + + /* re-check RF-Kill state since we may have missed the interrupt */ + hw_rfkill = iwl_pcie_check_hw_rf_kill(trans); + if (hw_rfkill && !run_in_rfkill) + ret = -ERFKILL; + +out: + mutex_unlock(&trans_pcie->mutex); + return ret; +} + +static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr) +{ + iwl_pcie_reset_ict(trans); + iwl_pcie_tx_start(trans, scd_addr); +} + +void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans, + bool was_in_rfkill) +{ + bool hw_rfkill; + + /* + * Check again since the RF kill state may have changed while + * all the interrupts were disabled, in this case we couldn't + * receive the RF kill interrupt and update the state in the + * op_mode. + * Don't call the op_mode if the rkfill state hasn't changed. + * This allows the op_mode to call stop_device from the rfkill + * notification without endless recursion. Under very rare + * circumstances, we might have a small recursion if the rfkill + * state changed exactly now while we were called from stop_device. + * This is very unlikely but can happen and is supported. + */ + hw_rfkill = iwl_is_rfkill_set(trans); + if (hw_rfkill) { + set_bit(STATUS_RFKILL_HW, &trans->status); + set_bit(STATUS_RFKILL_OPMODE, &trans->status); + } else { + clear_bit(STATUS_RFKILL_HW, &trans->status); + clear_bit(STATUS_RFKILL_OPMODE, &trans->status); + } + if (hw_rfkill != was_in_rfkill) + iwl_trans_pcie_rf_kill(trans, hw_rfkill, false); +} + +static void iwl_trans_pcie_stop_device(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + bool was_in_rfkill; + + iwl_op_mode_time_point(trans->op_mode, + IWL_FW_INI_TIME_POINT_HOST_DEVICE_DISABLE, + NULL); + + mutex_lock(&trans_pcie->mutex); + trans_pcie->opmode_down = true; + was_in_rfkill = test_bit(STATUS_RFKILL_OPMODE, &trans->status); + _iwl_trans_pcie_stop_device(trans, false); + iwl_trans_pcie_handle_stop_rfkill(trans, was_in_rfkill); + mutex_unlock(&trans_pcie->mutex); +} + +void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state, bool from_irq) +{ + struct iwl_trans_pcie __maybe_unused *trans_pcie = + IWL_TRANS_GET_PCIE_TRANS(trans); + + lockdep_assert_held(&trans_pcie->mutex); + + IWL_WARN(trans, "reporting RF_KILL (radio %s)\n", + state ? "disabled" : "enabled"); + if (iwl_op_mode_hw_rf_kill(trans->op_mode, state)) { + if (trans->trans_cfg->gen2) + _iwl_trans_pcie_gen2_stop_device(trans); + else + _iwl_trans_pcie_stop_device(trans, from_irq); + } +} + +void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans, + bool test, bool reset) +{ + iwl_disable_interrupts(trans); + + /* + * in testing mode, the host stays awake and the + * hardware won't be reset (not even partially) + */ + if (test) + return; + + iwl_pcie_disable_ict(trans); + + iwl_pcie_synchronize_irqs(trans); + + iwl_clear_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); + + if (reset) { + /* + * reset TX queues -- some of their registers reset during S3 + * so if we don't reset everything here the D3 image would try + * to execute some invalid memory upon resume + */ + iwl_trans_pcie_tx_reset(trans); + } + + iwl_pcie_set_pwr(trans, true); +} + +static int iwl_pcie_d3_handshake(struct iwl_trans *trans, bool suspend) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int ret; + + if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210) + iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6, + suspend ? UREG_DOORBELL_TO_ISR6_SUSPEND : + UREG_DOORBELL_TO_ISR6_RESUME); + else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) + iwl_write32(trans, CSR_IPC_SLEEP_CONTROL, + suspend ? CSR_IPC_SLEEP_CONTROL_SUSPEND : + CSR_IPC_SLEEP_CONTROL_RESUME); + else + return 0; + + ret = wait_event_timeout(trans_pcie->sx_waitq, + trans_pcie->sx_complete, 2 * HZ); + + /* Invalidate it toward next suspend or resume */ + trans_pcie->sx_complete = false; + + if (!ret) { + IWL_ERR(trans, "Timeout %s D3\n", + suspend ? "entering" : "exiting"); + return -ETIMEDOUT; + } + + return 0; +} + +static int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test, + bool reset) +{ + int ret; + + if (!reset) + /* Enable persistence mode to avoid reset */ + iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_PERSIST_MODE); + + ret = iwl_pcie_d3_handshake(trans, true); + if (ret) + return ret; + + iwl_pcie_d3_complete_suspend(trans, test, reset); + + return 0; +} + +static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, + enum iwl_d3_status *status, + bool test, bool reset) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + u32 val; + int ret; + + if (test) { + iwl_enable_interrupts(trans); + *status = IWL_D3_STATUS_ALIVE; + ret = 0; + goto out; + } + + iwl_set_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + + ret = iwl_finish_nic_init(trans); + if (ret) + return ret; + + /* + * Reconfigure IVAR table in case of MSIX or reset ict table in + * MSI mode since HW reset erased it. + * Also enables interrupts - none will happen as + * the device doesn't know we're waking it up, only when + * the opmode actually tells it after this call. + */ + iwl_pcie_conf_msix_hw(trans_pcie); + if (!trans_pcie->msix_enabled) + iwl_pcie_reset_ict(trans); + iwl_enable_interrupts(trans); + + iwl_pcie_set_pwr(trans, false); + + if (!reset) { + iwl_clear_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + } else { + iwl_trans_pcie_tx_reset(trans); + + ret = iwl_pcie_rx_init(trans); + if (ret) { + IWL_ERR(trans, + "Failed to resume the device (RX reset)\n"); + return ret; + } + } + + IWL_DEBUG_POWER(trans, "WFPM value upon resume = 0x%08X\n", + iwl_read_umac_prph(trans, WFPM_GP2)); + + val = iwl_read32(trans, CSR_RESET); + if (val & CSR_RESET_REG_FLAG_NEVO_RESET) + *status = IWL_D3_STATUS_RESET; + else + *status = IWL_D3_STATUS_ALIVE; + +out: + if (*status == IWL_D3_STATUS_ALIVE) + ret = iwl_pcie_d3_handshake(trans, false); + + return ret; +} + +static void +iwl_pcie_set_interrupt_capa(struct pci_dev *pdev, + struct iwl_trans *trans, + const struct iwl_cfg_trans_params *cfg_trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int max_irqs, num_irqs, i, ret; + u16 pci_cmd; + u32 max_rx_queues = IWL_MAX_RX_HW_QUEUES; + + if (!cfg_trans->mq_rx_supported) + goto enable_msi; + + if (cfg_trans->device_family <= IWL_DEVICE_FAMILY_9000) + max_rx_queues = IWL_9000_MAX_RX_HW_QUEUES; + + max_irqs = min_t(u32, num_online_cpus() + 2, max_rx_queues); + for (i = 0; i < max_irqs; i++) + trans_pcie->msix_entries[i].entry = i; + + num_irqs = pci_enable_msix_range(pdev, trans_pcie->msix_entries, + MSIX_MIN_INTERRUPT_VECTORS, + max_irqs); + if (num_irqs < 0) { + IWL_DEBUG_INFO(trans, + "Failed to enable msi-x mode (ret %d). Moving to msi mode.\n", + num_irqs); + goto enable_msi; + } + trans_pcie->def_irq = (num_irqs == max_irqs) ? num_irqs - 1 : 0; + + IWL_DEBUG_INFO(trans, + "MSI-X enabled. %d interrupt vectors were allocated\n", + num_irqs); + + /* + * In case the OS provides fewer interrupts than requested, different + * causes will share the same interrupt vector as follows: + * One interrupt less: non rx causes shared with FBQ. + * Two interrupts less: non rx causes shared with FBQ and RSS. + * More than two interrupts: we will use fewer RSS queues. + */ + if (num_irqs <= max_irqs - 2) { + trans_pcie->trans->num_rx_queues = num_irqs + 1; + trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX | + IWL_SHARED_IRQ_FIRST_RSS; + } else if (num_irqs == max_irqs - 1) { + trans_pcie->trans->num_rx_queues = num_irqs; + trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX; + } else { + trans_pcie->trans->num_rx_queues = num_irqs - 1; + } + + IWL_DEBUG_INFO(trans, + "MSI-X enabled with rx queues %d, vec mask 0x%x\n", + trans_pcie->trans->num_rx_queues, trans_pcie->shared_vec_mask); + + WARN_ON(trans_pcie->trans->num_rx_queues > IWL_MAX_RX_HW_QUEUES); + + trans_pcie->alloc_vecs = num_irqs; + trans_pcie->msix_enabled = true; + return; + +enable_msi: + ret = pci_enable_msi(pdev); + if (ret) { + dev_err(&pdev->dev, "pci_enable_msi failed - %d\n", ret); + /* enable rfkill interrupt: hw bug w/a */ + pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); + if (pci_cmd & PCI_COMMAND_INTX_DISABLE) { + pci_cmd &= ~PCI_COMMAND_INTX_DISABLE; + pci_write_config_word(pdev, PCI_COMMAND, pci_cmd); + } + } +} + +static void iwl_pcie_irq_set_affinity(struct iwl_trans *trans) +{ + int iter_rx_q, i, ret, cpu, offset; + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + i = trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 0 : 1; + iter_rx_q = trans_pcie->trans->num_rx_queues - 1 + i; + offset = 1 + i; + for (; i < iter_rx_q ; i++) { + /* + * Get the cpu prior to the place to search + * (i.e. return will be > i - 1). + */ + cpu = cpumask_next(i - offset, cpu_online_mask); + cpumask_set_cpu(cpu, &trans_pcie->affinity_mask[i]); + ret = irq_set_affinity_hint(trans_pcie->msix_entries[i].vector, + &trans_pcie->affinity_mask[i]); + if (ret) + IWL_ERR(trans_pcie->trans, + "Failed to set affinity mask for IRQ %d\n", + trans_pcie->msix_entries[i].vector); + } +} + +static int iwl_pcie_init_msix_handler(struct pci_dev *pdev, + struct iwl_trans_pcie *trans_pcie) +{ + int i; + + for (i = 0; i < trans_pcie->alloc_vecs; i++) { + int ret; + struct msix_entry *msix_entry; + const char *qname = queue_name(&pdev->dev, trans_pcie, i); + + if (!qname) + return -ENOMEM; + + msix_entry = &trans_pcie->msix_entries[i]; + ret = devm_request_threaded_irq(&pdev->dev, + msix_entry->vector, + iwl_pcie_msix_isr, + (i == trans_pcie->def_irq) ? + iwl_pcie_irq_msix_handler : + iwl_pcie_irq_rx_msix_handler, + IRQF_SHARED, + qname, + msix_entry); + if (ret) { + IWL_ERR(trans_pcie->trans, + "Error allocating IRQ %d\n", i); + + return ret; + } + } + iwl_pcie_irq_set_affinity(trans_pcie->trans); + + return 0; +} + +static int iwl_trans_pcie_clear_persistence_bit(struct iwl_trans *trans) +{ + u32 hpm, wprot; + + switch (trans->trans_cfg->device_family) { + case IWL_DEVICE_FAMILY_9000: + wprot = PREG_PRPH_WPROT_9000; + break; + case IWL_DEVICE_FAMILY_22000: + wprot = PREG_PRPH_WPROT_22000; + break; + default: + return 0; + } + + hpm = iwl_read_umac_prph_no_grab(trans, HPM_DEBUG); + if (hpm != 0xa5a5a5a0 && (hpm & PERSISTENCE_BIT)) { + u32 wprot_val = iwl_read_umac_prph_no_grab(trans, wprot); + + if (wprot_val & PREG_WFPM_ACCESS) { + IWL_ERR(trans, + "Error, can not clear persistence bit\n"); + return -EPERM; + } + iwl_write_umac_prph_no_grab(trans, HPM_DEBUG, + hpm & ~PERSISTENCE_BIT); + } + + return 0; +} + +static int iwl_pcie_gen2_force_power_gating(struct iwl_trans *trans) +{ + int ret; + + ret = iwl_finish_nic_init(trans); + if (ret < 0) + return ret; + + iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG, + HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE); + udelay(20); + iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG, + HPM_HIPM_GEN_CFG_CR_PG_EN | + HPM_HIPM_GEN_CFG_CR_SLP_EN); + udelay(20); + iwl_clear_bits_prph(trans, HPM_HIPM_GEN_CFG, + HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE); + + return iwl_trans_pcie_sw_reset(trans, true); +} + +static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int err; + + lockdep_assert_held(&trans_pcie->mutex); + + err = iwl_pcie_prepare_card_hw(trans); + if (err) { + IWL_ERR(trans, "Error while preparing HW: %d\n", err); + return err; + } + + err = iwl_trans_pcie_clear_persistence_bit(trans); + if (err) + return err; + + err = iwl_trans_pcie_sw_reset(trans, true); + if (err) + return err; + + if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000 && + trans->trans_cfg->integrated) { + err = iwl_pcie_gen2_force_power_gating(trans); + if (err) + return err; + } + + err = iwl_pcie_apm_init(trans); + if (err) + return err; + + iwl_pcie_init_msix(trans_pcie); + + /* From now on, the op_mode will be kept updated about RF kill state */ + iwl_enable_rfkill_int(trans); + + trans_pcie->opmode_down = false; + + /* Set is_down to false here so that...*/ + trans_pcie->is_down = false; + + /* ...rfkill can call stop_device and set it false if needed */ + iwl_pcie_check_hw_rf_kill(trans); + + return 0; +} + +static int iwl_trans_pcie_start_hw(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int ret; + + mutex_lock(&trans_pcie->mutex); + ret = _iwl_trans_pcie_start_hw(trans); + mutex_unlock(&trans_pcie->mutex); + + return ret; +} + +static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + mutex_lock(&trans_pcie->mutex); + + /* disable interrupts - don't enable HW RF kill interrupt */ + iwl_disable_interrupts(trans); + + iwl_pcie_apm_stop(trans, true); + + iwl_disable_interrupts(trans); + + iwl_pcie_disable_ict(trans); + + mutex_unlock(&trans_pcie->mutex); + + iwl_pcie_synchronize_irqs(trans); +} + +static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val) +{ + writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); +} + +static void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val) +{ + writel(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); +} + +static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs) +{ + return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); +} + +static u32 iwl_trans_pcie_prph_msk(struct iwl_trans *trans) +{ + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) + return 0x00FFFFFF; + else + return 0x000FFFFF; +} + +static u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg) +{ + u32 mask = iwl_trans_pcie_prph_msk(trans); + + iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_RADDR, + ((reg & mask) | (3 << 24))); + return iwl_trans_pcie_read32(trans, HBUS_TARG_PRPH_RDAT); +} + +static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr, + u32 val) +{ + u32 mask = iwl_trans_pcie_prph_msk(trans); + + iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WADDR, + ((addr & mask) | (3 << 24))); + iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val); +} + +static void iwl_trans_pcie_configure(struct iwl_trans *trans, + const struct iwl_trans_config *trans_cfg) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + /* free all first - we might be reconfigured for a different size */ + iwl_pcie_free_rbs_pool(trans); + + trans->txqs.cmd.q_id = trans_cfg->cmd_queue; + trans->txqs.cmd.fifo = trans_cfg->cmd_fifo; + trans->txqs.cmd.wdg_timeout = trans_cfg->cmd_q_wdg_timeout; + trans->txqs.page_offs = trans_cfg->cb_data_offs; + trans->txqs.dev_cmd_offs = trans_cfg->cb_data_offs + sizeof(void *); + trans->txqs.queue_alloc_cmd_ver = trans_cfg->queue_alloc_cmd_ver; + + if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS)) + trans_pcie->n_no_reclaim_cmds = 0; + else + trans_pcie->n_no_reclaim_cmds = trans_cfg->n_no_reclaim_cmds; + if (trans_pcie->n_no_reclaim_cmds) + memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds, + trans_pcie->n_no_reclaim_cmds * sizeof(u8)); + + trans_pcie->rx_buf_size = trans_cfg->rx_buf_size; + trans_pcie->rx_page_order = + iwl_trans_get_rb_size_order(trans_pcie->rx_buf_size); + trans_pcie->rx_buf_bytes = + iwl_trans_get_rb_size(trans_pcie->rx_buf_size); + trans_pcie->supported_dma_mask = DMA_BIT_MASK(12); + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) + trans_pcie->supported_dma_mask = DMA_BIT_MASK(11); + + trans->txqs.bc_table_dword = trans_cfg->bc_table_dword; + trans_pcie->scd_set_active = trans_cfg->scd_set_active; + + trans->command_groups = trans_cfg->command_groups; + trans->command_groups_size = trans_cfg->command_groups_size; + + /* Initialize NAPI here - it should be before registering to mac80211 + * in the opmode but after the HW struct is allocated. + * As this function may be called again in some corner cases don't + * do anything if NAPI was already initialized. + */ + if (trans_pcie->napi_dev.reg_state != NETREG_DUMMY) + init_dummy_netdev(&trans_pcie->napi_dev); + + trans_pcie->fw_reset_handshake = trans_cfg->fw_reset_handshake; +} + +void iwl_trans_pcie_free(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int i; + + iwl_pcie_synchronize_irqs(trans); + + if (trans->trans_cfg->gen2) + iwl_txq_gen2_tx_free(trans); + else + iwl_pcie_tx_free(trans); + iwl_pcie_rx_free(trans); + + if (trans_pcie->rba.alloc_wq) { + destroy_workqueue(trans_pcie->rba.alloc_wq); + trans_pcie->rba.alloc_wq = NULL; + } + + if (trans_pcie->msix_enabled) { + for (i = 0; i < trans_pcie->alloc_vecs; i++) { + irq_set_affinity_hint( + trans_pcie->msix_entries[i].vector, + NULL); + } + + trans_pcie->msix_enabled = false; + } else { + iwl_pcie_free_ict(trans); + } + + iwl_pcie_free_fw_monitor(trans); + + if (trans_pcie->pnvm_dram.size) + dma_free_coherent(trans->dev, trans_pcie->pnvm_dram.size, + trans_pcie->pnvm_dram.block, + trans_pcie->pnvm_dram.physical); + + if (trans_pcie->reduce_power_dram.size) + dma_free_coherent(trans->dev, + trans_pcie->reduce_power_dram.size, + trans_pcie->reduce_power_dram.block, + trans_pcie->reduce_power_dram.physical); + + mutex_destroy(&trans_pcie->mutex); + iwl_trans_free(trans); +} + +static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state) +{ + if (state) + set_bit(STATUS_TPOWER_PMI, &trans->status); + else + clear_bit(STATUS_TPOWER_PMI, &trans->status); +} + +struct iwl_trans_pcie_removal { + struct pci_dev *pdev; + struct work_struct work; +}; + +static void iwl_trans_pcie_removal_wk(struct work_struct *wk) +{ + struct iwl_trans_pcie_removal *removal = + container_of(wk, struct iwl_trans_pcie_removal, work); + struct pci_dev *pdev = removal->pdev; + static char *prop[] = {"EVENT=INACCESSIBLE", NULL}; + + dev_err(&pdev->dev, "Device gone - attempting removal\n"); + kobject_uevent_env(&pdev->dev.kobj, KOBJ_CHANGE, prop); + pci_lock_rescan_remove(); + pci_dev_put(pdev); + pci_stop_and_remove_bus_device(pdev); + pci_unlock_rescan_remove(); + + kfree(removal); + module_put(THIS_MODULE); +} + +/* + * This version doesn't disable BHs but rather assumes they're + * already disabled. + */ +bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans) +{ + int ret; + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + u32 write = CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ; + u32 mask = CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | + CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP; + u32 poll = CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN; + + spin_lock(&trans_pcie->reg_lock); + + if (trans_pcie->cmd_hold_nic_awake) + goto out; + + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { + write = CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ; + mask = CSR_GP_CNTRL_REG_FLAG_MAC_STATUS; + poll = CSR_GP_CNTRL_REG_FLAG_MAC_STATUS; + } + + /* this bit wakes up the NIC */ + __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, write); + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000) + udelay(2); + + /* + * These bits say the device is running, and should keep running for + * at least a short while (at least as long as MAC_ACCESS_REQ stays 1), + * but they do not indicate that embedded SRAM is restored yet; + * HW with volatile SRAM must save/restore contents to/from + * host DRAM when sleeping/waking for power-saving. + * Each direction takes approximately 1/4 millisecond; with this + * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a + * series of register accesses are expected (e.g. reading Event Log), + * to keep device from sleeping. + * + * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that + * SRAM is okay/restored. We don't check that here because this call + * is just for hardware register access; but GP1 MAC_SLEEP + * check is a good idea before accessing the SRAM of HW with + * volatile SRAM (e.g. reading Event Log). + * + * 5000 series and later (including 1000 series) have non-volatile SRAM, + * and do not save/restore SRAM when power cycling. + */ + ret = iwl_poll_bit(trans, CSR_GP_CNTRL, poll, mask, 15000); + if (unlikely(ret < 0)) { + u32 cntrl = iwl_read32(trans, CSR_GP_CNTRL); + + WARN_ONCE(1, + "Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n", + cntrl); + + iwl_trans_pcie_dump_regs(trans); + + if (iwlwifi_mod_params.remove_when_gone && cntrl == ~0U) { + struct iwl_trans_pcie_removal *removal; + + if (test_bit(STATUS_TRANS_DEAD, &trans->status)) + goto err; + + IWL_ERR(trans, "Device gone - scheduling removal!\n"); + + /* + * get a module reference to avoid doing this + * while unloading anyway and to avoid + * scheduling a work with code that's being + * removed. + */ + if (!try_module_get(THIS_MODULE)) { + IWL_ERR(trans, + "Module is being unloaded - abort\n"); + goto err; + } + + removal = kzalloc(sizeof(*removal), GFP_ATOMIC); + if (!removal) { + module_put(THIS_MODULE); + goto err; + } + /* + * we don't need to clear this flag, because + * the trans will be freed and reallocated. + */ + set_bit(STATUS_TRANS_DEAD, &trans->status); + + removal->pdev = to_pci_dev(trans->dev); + INIT_WORK(&removal->work, iwl_trans_pcie_removal_wk); + pci_dev_get(removal->pdev); + schedule_work(&removal->work); + } else { + iwl_write32(trans, CSR_RESET, + CSR_RESET_REG_FLAG_FORCE_NMI); + } + +err: + spin_unlock(&trans_pcie->reg_lock); + return false; + } + +out: + /* + * Fool sparse by faking we release the lock - sparse will + * track nic_access anyway. + */ + __release(&trans_pcie->reg_lock); + return true; +} + +static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans) +{ + bool ret; + + local_bh_disable(); + ret = __iwl_trans_pcie_grab_nic_access(trans); + if (ret) { + /* keep BHs disabled until iwl_trans_pcie_release_nic_access */ + return ret; + } + local_bh_enable(); + return false; +} + +static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + lockdep_assert_held(&trans_pcie->reg_lock); + + /* + * Fool sparse by faking we acquiring the lock - sparse will + * track nic_access anyway. + */ + __acquire(&trans_pcie->reg_lock); + + if (trans_pcie->cmd_hold_nic_awake) + goto out; + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) + __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ); + else + __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + /* + * Above we read the CSR_GP_CNTRL register, which will flush + * any previous writes, but we need the write that clears the + * MAC_ACCESS_REQ bit to be performed before any other writes + * scheduled on different CPUs (after we drop reg_lock). + */ +out: + spin_unlock_bh(&trans_pcie->reg_lock); +} + +static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr, + void *buf, int dwords) +{ + int offs = 0; + u32 *vals = buf; + + while (offs < dwords) { + /* limit the time we spin here under lock to 1/2s */ + unsigned long end = jiffies + HZ / 2; + bool resched = false; + + if (iwl_trans_grab_nic_access(trans)) { + iwl_write32(trans, HBUS_TARG_MEM_RADDR, + addr + 4 * offs); + + while (offs < dwords) { + vals[offs] = iwl_read32(trans, + HBUS_TARG_MEM_RDAT); + offs++; + + if (time_after(jiffies, end)) { + resched = true; + break; + } + } + iwl_trans_release_nic_access(trans); + + if (resched) + cond_resched(); + } else { + return -EBUSY; + } + } + + return 0; +} + +static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr, + const void *buf, int dwords) +{ + int offs, ret = 0; + const u32 *vals = buf; + + if (iwl_trans_grab_nic_access(trans)) { + iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr); + for (offs = 0; offs < dwords; offs++) + iwl_write32(trans, HBUS_TARG_MEM_WDAT, + vals ? vals[offs] : 0); + iwl_trans_release_nic_access(trans); + } else { + ret = -EBUSY; + } + return ret; +} + +static int iwl_trans_pcie_read_config32(struct iwl_trans *trans, u32 ofs, + u32 *val) +{ + return pci_read_config_dword(IWL_TRANS_GET_PCIE_TRANS(trans)->pci_dev, + ofs, val); +} + +static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block) +{ + int i; + + for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) { + struct iwl_txq *txq = trans->txqs.txq[i]; + + if (i == trans->txqs.cmd.q_id) + continue; + + spin_lock_bh(&txq->lock); + + if (!block && !(WARN_ON_ONCE(!txq->block))) { + txq->block--; + if (!txq->block) { + iwl_write32(trans, HBUS_TARG_WRPTR, + txq->write_ptr | (i << 8)); + } + } else if (block) { + txq->block++; + } + + spin_unlock_bh(&txq->lock); + } +} + +#define IWL_FLUSH_WAIT_MS 2000 + +static int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue, + struct iwl_trans_rxq_dma_data *data) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + if (queue >= trans->num_rx_queues || !trans_pcie->rxq) + return -EINVAL; + + data->fr_bd_cb = trans_pcie->rxq[queue].bd_dma; + data->urbd_stts_wrptr = trans_pcie->rxq[queue].rb_stts_dma; + data->ur_bd_cb = trans_pcie->rxq[queue].used_bd_dma; + data->fr_bd_wid = 0; + + return 0; +} + +static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx) +{ + struct iwl_txq *txq; + unsigned long now = jiffies; + bool overflow_tx; + u8 wr_ptr; + + /* Make sure the NIC is still alive in the bus */ + if (test_bit(STATUS_TRANS_DEAD, &trans->status)) + return -ENODEV; + + if (!test_bit(txq_idx, trans->txqs.queue_used)) + return -EINVAL; + + IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", txq_idx); + txq = trans->txqs.txq[txq_idx]; + + spin_lock_bh(&txq->lock); + overflow_tx = txq->overflow_tx || + !skb_queue_empty(&txq->overflow_q); + spin_unlock_bh(&txq->lock); + + wr_ptr = READ_ONCE(txq->write_ptr); + + while ((txq->read_ptr != READ_ONCE(txq->write_ptr) || + overflow_tx) && + !time_after(jiffies, + now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) { + u8 write_ptr = READ_ONCE(txq->write_ptr); + + /* + * If write pointer moved during the wait, warn only + * if the TX came from op mode. In case TX came from + * trans layer (overflow TX) don't warn. + */ + if (WARN_ONCE(wr_ptr != write_ptr && !overflow_tx, + "WR pointer moved while flushing %d -> %d\n", + wr_ptr, write_ptr)) + return -ETIMEDOUT; + wr_ptr = write_ptr; + + usleep_range(1000, 2000); + + spin_lock_bh(&txq->lock); + overflow_tx = txq->overflow_tx || + !skb_queue_empty(&txq->overflow_q); + spin_unlock_bh(&txq->lock); + } + + if (txq->read_ptr != txq->write_ptr) { + IWL_ERR(trans, + "fail to flush all tx fifo queues Q %d\n", txq_idx); + iwl_txq_log_scd_error(trans, txq); + return -ETIMEDOUT; + } + + IWL_DEBUG_TX_QUEUES(trans, "Queue %d is now empty.\n", txq_idx); + + return 0; +} + +static int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm) +{ + int cnt; + int ret = 0; + + /* waiting for all the tx frames complete might take a while */ + for (cnt = 0; + cnt < trans->trans_cfg->base_params->num_of_queues; + cnt++) { + + if (cnt == trans->txqs.cmd.q_id) + continue; + if (!test_bit(cnt, trans->txqs.queue_used)) + continue; + if (!(BIT(cnt) & txq_bm)) + continue; + + ret = iwl_trans_pcie_wait_txq_empty(trans, cnt); + if (ret) + break; + } + + return ret; +} + +static void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg, + u32 mask, u32 value) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + spin_lock_bh(&trans_pcie->reg_lock); + __iwl_trans_pcie_set_bits_mask(trans, reg, mask, value); + spin_unlock_bh(&trans_pcie->reg_lock); +} + +static const char *get_csr_string(int cmd) +{ +#define IWL_CMD(x) case x: return #x + switch (cmd) { + IWL_CMD(CSR_HW_IF_CONFIG_REG); + IWL_CMD(CSR_INT_COALESCING); + IWL_CMD(CSR_INT); + IWL_CMD(CSR_INT_MASK); + IWL_CMD(CSR_FH_INT_STATUS); + IWL_CMD(CSR_GPIO_IN); + IWL_CMD(CSR_RESET); + IWL_CMD(CSR_GP_CNTRL); + IWL_CMD(CSR_HW_REV); + IWL_CMD(CSR_EEPROM_REG); + IWL_CMD(CSR_EEPROM_GP); + IWL_CMD(CSR_OTP_GP_REG); + IWL_CMD(CSR_GIO_REG); + IWL_CMD(CSR_GP_UCODE_REG); + IWL_CMD(CSR_GP_DRIVER_REG); + IWL_CMD(CSR_UCODE_DRV_GP1); + IWL_CMD(CSR_UCODE_DRV_GP2); + IWL_CMD(CSR_LED_REG); + IWL_CMD(CSR_DRAM_INT_TBL_REG); + IWL_CMD(CSR_GIO_CHICKEN_BITS); + IWL_CMD(CSR_ANA_PLL_CFG); + IWL_CMD(CSR_HW_REV_WA_REG); + IWL_CMD(CSR_MONITOR_STATUS_REG); + IWL_CMD(CSR_DBG_HPET_MEM_REG); + default: + return "UNKNOWN"; + } +#undef IWL_CMD +} + +void iwl_pcie_dump_csr(struct iwl_trans *trans) +{ + int i; + static const u32 csr_tbl[] = { + CSR_HW_IF_CONFIG_REG, + CSR_INT_COALESCING, + CSR_INT, + CSR_INT_MASK, + CSR_FH_INT_STATUS, + CSR_GPIO_IN, + CSR_RESET, + CSR_GP_CNTRL, + CSR_HW_REV, + CSR_EEPROM_REG, + CSR_EEPROM_GP, + CSR_OTP_GP_REG, + CSR_GIO_REG, + CSR_GP_UCODE_REG, + CSR_GP_DRIVER_REG, + CSR_UCODE_DRV_GP1, + CSR_UCODE_DRV_GP2, + CSR_LED_REG, + CSR_DRAM_INT_TBL_REG, + CSR_GIO_CHICKEN_BITS, + CSR_ANA_PLL_CFG, + CSR_MONITOR_STATUS_REG, + CSR_HW_REV_WA_REG, + CSR_DBG_HPET_MEM_REG + }; + IWL_ERR(trans, "CSR values:\n"); + IWL_ERR(trans, "(2nd byte of CSR_INT_COALESCING is " + "CSR_INT_PERIODIC_REG)\n"); + for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) { + IWL_ERR(trans, " %25s: 0X%08x\n", + get_csr_string(csr_tbl[i]), + iwl_read32(trans, csr_tbl[i])); + } +} + +#ifdef CONFIG_IWLWIFI_DEBUGFS +/* create and remove of files */ +#define DEBUGFS_ADD_FILE(name, parent, mode) do { \ + debugfs_create_file(#name, mode, parent, trans, \ + &iwl_dbgfs_##name##_ops); \ +} while (0) + +/* file operation */ +#define DEBUGFS_READ_FILE_OPS(name) \ +static const struct file_operations iwl_dbgfs_##name##_ops = { \ + .read = iwl_dbgfs_##name##_read, \ + .open = simple_open, \ + .llseek = generic_file_llseek, \ +}; + +#define DEBUGFS_WRITE_FILE_OPS(name) \ +static const struct file_operations iwl_dbgfs_##name##_ops = { \ + .write = iwl_dbgfs_##name##_write, \ + .open = simple_open, \ + .llseek = generic_file_llseek, \ +}; + +#define DEBUGFS_READ_WRITE_FILE_OPS(name) \ +static const struct file_operations iwl_dbgfs_##name##_ops = { \ + .write = iwl_dbgfs_##name##_write, \ + .read = iwl_dbgfs_##name##_read, \ + .open = simple_open, \ + .llseek = generic_file_llseek, \ +}; + +struct iwl_dbgfs_tx_queue_priv { + struct iwl_trans *trans; +}; + +struct iwl_dbgfs_tx_queue_state { + loff_t pos; +}; + +static void *iwl_dbgfs_tx_queue_seq_start(struct seq_file *seq, loff_t *pos) +{ + struct iwl_dbgfs_tx_queue_priv *priv = seq->private; + struct iwl_dbgfs_tx_queue_state *state; + + if (*pos >= priv->trans->trans_cfg->base_params->num_of_queues) + return NULL; + + state = kmalloc(sizeof(*state), GFP_KERNEL); + if (!state) + return NULL; + state->pos = *pos; + return state; +} + +static void *iwl_dbgfs_tx_queue_seq_next(struct seq_file *seq, + void *v, loff_t *pos) +{ + struct iwl_dbgfs_tx_queue_priv *priv = seq->private; + struct iwl_dbgfs_tx_queue_state *state = v; + + *pos = ++state->pos; + + if (*pos >= priv->trans->trans_cfg->base_params->num_of_queues) + return NULL; + + return state; +} + +static void iwl_dbgfs_tx_queue_seq_stop(struct seq_file *seq, void *v) +{ + kfree(v); +} + +static int iwl_dbgfs_tx_queue_seq_show(struct seq_file *seq, void *v) +{ + struct iwl_dbgfs_tx_queue_priv *priv = seq->private; + struct iwl_dbgfs_tx_queue_state *state = v; + struct iwl_trans *trans = priv->trans; + struct iwl_txq *txq = trans->txqs.txq[state->pos]; + + seq_printf(seq, "hwq %.3u: used=%d stopped=%d ", + (unsigned int)state->pos, + !!test_bit(state->pos, trans->txqs.queue_used), + !!test_bit(state->pos, trans->txqs.queue_stopped)); + if (txq) + seq_printf(seq, + "read=%u write=%u need_update=%d frozen=%d n_window=%d ampdu=%d", + txq->read_ptr, txq->write_ptr, + txq->need_update, txq->frozen, + txq->n_window, txq->ampdu); + else + seq_puts(seq, "(unallocated)"); + + if (state->pos == trans->txqs.cmd.q_id) + seq_puts(seq, " (HCMD)"); + seq_puts(seq, "\n"); + + return 0; +} + +static const struct seq_operations iwl_dbgfs_tx_queue_seq_ops = { + .start = iwl_dbgfs_tx_queue_seq_start, + .next = iwl_dbgfs_tx_queue_seq_next, + .stop = iwl_dbgfs_tx_queue_seq_stop, + .show = iwl_dbgfs_tx_queue_seq_show, +}; + +static int iwl_dbgfs_tx_queue_open(struct inode *inode, struct file *filp) +{ + struct iwl_dbgfs_tx_queue_priv *priv; + + priv = __seq_open_private(filp, &iwl_dbgfs_tx_queue_seq_ops, + sizeof(*priv)); + + if (!priv) + return -ENOMEM; + + priv->trans = inode->i_private; + return 0; +} + +static ssize_t iwl_dbgfs_rx_queue_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_trans *trans = file->private_data; + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + char *buf; + int pos = 0, i, ret; + size_t bufsz; + + bufsz = sizeof(char) * 121 * trans->num_rx_queues; + + if (!trans_pcie->rxq) + return -EAGAIN; + + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + for (i = 0; i < trans->num_rx_queues && pos < bufsz; i++) { + struct iwl_rxq *rxq = &trans_pcie->rxq[i]; + + pos += scnprintf(buf + pos, bufsz - pos, "queue#: %2d\n", + i); + pos += scnprintf(buf + pos, bufsz - pos, "\tread: %u\n", + rxq->read); + pos += scnprintf(buf + pos, bufsz - pos, "\twrite: %u\n", + rxq->write); + pos += scnprintf(buf + pos, bufsz - pos, "\twrite_actual: %u\n", + rxq->write_actual); + pos += scnprintf(buf + pos, bufsz - pos, "\tneed_update: %2d\n", + rxq->need_update); + pos += scnprintf(buf + pos, bufsz - pos, "\tfree_count: %u\n", + rxq->free_count); + if (rxq->rb_stts) { + u32 r = __le16_to_cpu(iwl_get_closed_rb_stts(trans, + rxq)); + pos += scnprintf(buf + pos, bufsz - pos, + "\tclosed_rb_num: %u\n", + r & 0x0FFF); + } else { + pos += scnprintf(buf + pos, bufsz - pos, + "\tclosed_rb_num: Not Allocated\n"); + } + } + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + + return ret; +} + +static ssize_t iwl_dbgfs_interrupt_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_trans *trans = file->private_data; + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct isr_statistics *isr_stats = &trans_pcie->isr_stats; + + int pos = 0; + char *buf; + int bufsz = 24 * 64; /* 24 items * 64 char per item */ + ssize_t ret; + + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + pos += scnprintf(buf + pos, bufsz - pos, + "Interrupt Statistics Report:\n"); + + pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n", + isr_stats->hw); + pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n", + isr_stats->sw); + if (isr_stats->sw || isr_stats->hw) { + pos += scnprintf(buf + pos, bufsz - pos, + "\tLast Restarting Code: 0x%X\n", + isr_stats->err_code); + } +#ifdef CONFIG_IWLWIFI_DEBUG + pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n", + isr_stats->sch); + pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n", + isr_stats->alive); +#endif + pos += scnprintf(buf + pos, bufsz - pos, + "HW RF KILL switch toggled:\t %u\n", isr_stats->rfkill); + + pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n", + isr_stats->ctkill); + + pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n", + isr_stats->wakeup); + + pos += scnprintf(buf + pos, bufsz - pos, + "Rx command responses:\t\t %u\n", isr_stats->rx); + + pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n", + isr_stats->tx); + + pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n", + isr_stats->unhandled); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +static ssize_t iwl_dbgfs_interrupt_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_trans *trans = file->private_data; + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct isr_statistics *isr_stats = &trans_pcie->isr_stats; + u32 reset_flag; + int ret; + + ret = kstrtou32_from_user(user_buf, count, 16, &reset_flag); + if (ret) + return ret; + if (reset_flag == 0) + memset(isr_stats, 0, sizeof(*isr_stats)); + + return count; +} + +static ssize_t iwl_dbgfs_csr_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_trans *trans = file->private_data; + + iwl_pcie_dump_csr(trans); + + return count; +} + +static ssize_t iwl_dbgfs_fh_reg_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_trans *trans = file->private_data; + char *buf = NULL; + ssize_t ret; + + ret = iwl_dump_fh(trans, &buf); + if (ret < 0) + return ret; + if (!buf) + return -EINVAL; + ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret); + kfree(buf); + return ret; +} + +static ssize_t iwl_dbgfs_rfkill_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_trans *trans = file->private_data; + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + char buf[100]; + int pos; + + pos = scnprintf(buf, sizeof(buf), "debug: %d\nhw: %d\n", + trans_pcie->debug_rfkill, + !(iwl_read32(trans, CSR_GP_CNTRL) & + CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_dbgfs_rfkill_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_trans *trans = file->private_data; + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + bool new_value; + int ret; + + ret = kstrtobool_from_user(user_buf, count, &new_value); + if (ret) + return ret; + if (new_value == trans_pcie->debug_rfkill) + return count; + IWL_WARN(trans, "changing debug rfkill %d->%d\n", + trans_pcie->debug_rfkill, new_value); + trans_pcie->debug_rfkill = new_value; + iwl_pcie_handle_rfkill_irq(trans, false); + + return count; +} + +static int iwl_dbgfs_monitor_data_open(struct inode *inode, + struct file *file) +{ + struct iwl_trans *trans = inode->i_private; + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + if (!trans->dbg.dest_tlv || + trans->dbg.dest_tlv->monitor_mode != EXTERNAL_MODE) { + IWL_ERR(trans, "Debug destination is not set to DRAM\n"); + return -ENOENT; + } + + if (trans_pcie->fw_mon_data.state != IWL_FW_MON_DBGFS_STATE_CLOSED) + return -EBUSY; + + trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_OPEN; + return simple_open(inode, file); +} + +static int iwl_dbgfs_monitor_data_release(struct inode *inode, + struct file *file) +{ + struct iwl_trans_pcie *trans_pcie = + IWL_TRANS_GET_PCIE_TRANS(inode->i_private); + + if (trans_pcie->fw_mon_data.state == IWL_FW_MON_DBGFS_STATE_OPEN) + trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_CLOSED; + return 0; +} + +static bool iwl_write_to_user_buf(char __user *user_buf, ssize_t count, + void *buf, ssize_t *size, + ssize_t *bytes_copied) +{ + ssize_t buf_size_left = count - *bytes_copied; + + buf_size_left = buf_size_left - (buf_size_left % sizeof(u32)); + if (*size > buf_size_left) + *size = buf_size_left; + + *size -= copy_to_user(user_buf, buf, *size); + *bytes_copied += *size; + + if (buf_size_left == *size) + return true; + return false; +} + +static ssize_t iwl_dbgfs_monitor_data_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_trans *trans = file->private_data; + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + u8 *cpu_addr = (void *)trans->dbg.fw_mon.block, *curr_buf; + struct cont_rec *data = &trans_pcie->fw_mon_data; + u32 write_ptr_addr, wrap_cnt_addr, write_ptr, wrap_cnt; + ssize_t size, bytes_copied = 0; + bool b_full; + + if (trans->dbg.dest_tlv) { + write_ptr_addr = + le32_to_cpu(trans->dbg.dest_tlv->write_ptr_reg); + wrap_cnt_addr = le32_to_cpu(trans->dbg.dest_tlv->wrap_count); + } else { + write_ptr_addr = MON_BUFF_WRPTR; + wrap_cnt_addr = MON_BUFF_CYCLE_CNT; + } + + if (unlikely(!trans->dbg.rec_on)) + return 0; + + mutex_lock(&data->mutex); + if (data->state == + IWL_FW_MON_DBGFS_STATE_DISABLED) { + mutex_unlock(&data->mutex); + return 0; + } + + /* write_ptr position in bytes rather then DW */ + write_ptr = iwl_read_prph(trans, write_ptr_addr) * sizeof(u32); + wrap_cnt = iwl_read_prph(trans, wrap_cnt_addr); + + if (data->prev_wrap_cnt == wrap_cnt) { + size = write_ptr - data->prev_wr_ptr; + curr_buf = cpu_addr + data->prev_wr_ptr; + b_full = iwl_write_to_user_buf(user_buf, count, + curr_buf, &size, + &bytes_copied); + data->prev_wr_ptr += size; + + } else if (data->prev_wrap_cnt == wrap_cnt - 1 && + write_ptr < data->prev_wr_ptr) { + size = trans->dbg.fw_mon.size - data->prev_wr_ptr; + curr_buf = cpu_addr + data->prev_wr_ptr; + b_full = iwl_write_to_user_buf(user_buf, count, + curr_buf, &size, + &bytes_copied); + data->prev_wr_ptr += size; + + if (!b_full) { + size = write_ptr; + b_full = iwl_write_to_user_buf(user_buf, count, + cpu_addr, &size, + &bytes_copied); + data->prev_wr_ptr = size; + data->prev_wrap_cnt++; + } + } else { + if (data->prev_wrap_cnt == wrap_cnt - 1 && + write_ptr > data->prev_wr_ptr) + IWL_WARN(trans, + "write pointer passed previous write pointer, start copying from the beginning\n"); + else if (!unlikely(data->prev_wrap_cnt == 0 && + data->prev_wr_ptr == 0)) + IWL_WARN(trans, + "monitor data is out of sync, start copying from the beginning\n"); + + size = write_ptr; + b_full = iwl_write_to_user_buf(user_buf, count, + cpu_addr, &size, + &bytes_copied); + data->prev_wr_ptr = size; + data->prev_wrap_cnt = wrap_cnt; + } + + mutex_unlock(&data->mutex); + + return bytes_copied; +} + +static ssize_t iwl_dbgfs_rf_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_trans *trans = file->private_data; + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + if (!trans_pcie->rf_name[0]) + return -ENODEV; + + return simple_read_from_buffer(user_buf, count, ppos, + trans_pcie->rf_name, + strlen(trans_pcie->rf_name)); +} + +DEBUGFS_READ_WRITE_FILE_OPS(interrupt); +DEBUGFS_READ_FILE_OPS(fh_reg); +DEBUGFS_READ_FILE_OPS(rx_queue); +DEBUGFS_WRITE_FILE_OPS(csr); +DEBUGFS_READ_WRITE_FILE_OPS(rfkill); +DEBUGFS_READ_FILE_OPS(rf); + +static const struct file_operations iwl_dbgfs_tx_queue_ops = { + .owner = THIS_MODULE, + .open = iwl_dbgfs_tx_queue_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release_private, +}; + +static const struct file_operations iwl_dbgfs_monitor_data_ops = { + .read = iwl_dbgfs_monitor_data_read, + .open = iwl_dbgfs_monitor_data_open, + .release = iwl_dbgfs_monitor_data_release, +}; + +/* Create the debugfs files and directories */ +void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) +{ + struct dentry *dir = trans->dbgfs_dir; + + DEBUGFS_ADD_FILE(rx_queue, dir, 0400); + DEBUGFS_ADD_FILE(tx_queue, dir, 0400); + DEBUGFS_ADD_FILE(interrupt, dir, 0600); + DEBUGFS_ADD_FILE(csr, dir, 0200); + DEBUGFS_ADD_FILE(fh_reg, dir, 0400); + DEBUGFS_ADD_FILE(rfkill, dir, 0600); + DEBUGFS_ADD_FILE(monitor_data, dir, 0400); + DEBUGFS_ADD_FILE(rf, dir, 0400); +} + +static void iwl_trans_pcie_debugfs_cleanup(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct cont_rec *data = &trans_pcie->fw_mon_data; + + mutex_lock(&data->mutex); + data->state = IWL_FW_MON_DBGFS_STATE_DISABLED; + mutex_unlock(&data->mutex); +} +#endif /*CONFIG_IWLWIFI_DEBUGFS */ + +static u32 iwl_trans_pcie_get_cmdlen(struct iwl_trans *trans, void *tfd) +{ + u32 cmdlen = 0; + int i; + + for (i = 0; i < trans->txqs.tfd.max_tbs; i++) + cmdlen += iwl_txq_gen1_tfd_tb_get_len(trans, tfd, i); + + return cmdlen; +} + +static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans, + struct iwl_fw_error_dump_data **data, + int allocated_rb_nums) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int max_len = trans_pcie->rx_buf_bytes; + /* Dump RBs is supported only for pre-9000 devices (1 queue) */ + struct iwl_rxq *rxq = &trans_pcie->rxq[0]; + u32 i, r, j, rb_len = 0; + + spin_lock_bh(&rxq->lock); + + r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF; + + for (i = rxq->read, j = 0; + i != r && j < allocated_rb_nums; + i = (i + 1) & RX_QUEUE_MASK, j++) { + struct iwl_rx_mem_buffer *rxb = rxq->queue[i]; + struct iwl_fw_error_dump_rb *rb; + + dma_sync_single_for_cpu(trans->dev, rxb->page_dma, + max_len, DMA_FROM_DEVICE); + + rb_len += sizeof(**data) + sizeof(*rb) + max_len; + + (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RB); + (*data)->len = cpu_to_le32(sizeof(*rb) + max_len); + rb = (void *)(*data)->data; + rb->index = cpu_to_le32(i); + memcpy(rb->data, page_address(rxb->page), max_len); + + *data = iwl_fw_error_next_data(*data); + } + + spin_unlock_bh(&rxq->lock); + + return rb_len; +} +#define IWL_CSR_TO_DUMP (0x250) + +static u32 iwl_trans_pcie_dump_csr(struct iwl_trans *trans, + struct iwl_fw_error_dump_data **data) +{ + u32 csr_len = sizeof(**data) + IWL_CSR_TO_DUMP; + __le32 *val; + int i; + + (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_CSR); + (*data)->len = cpu_to_le32(IWL_CSR_TO_DUMP); + val = (void *)(*data)->data; + + for (i = 0; i < IWL_CSR_TO_DUMP; i += 4) + *val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i)); + + *data = iwl_fw_error_next_data(*data); + + return csr_len; +} + +static u32 iwl_trans_pcie_fh_regs_dump(struct iwl_trans *trans, + struct iwl_fw_error_dump_data **data) +{ + u32 fh_regs_len = FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND; + __le32 *val; + int i; + + if (!iwl_trans_grab_nic_access(trans)) + return 0; + + (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FH_REGS); + (*data)->len = cpu_to_le32(fh_regs_len); + val = (void *)(*data)->data; + + if (!trans->trans_cfg->gen2) + for (i = FH_MEM_LOWER_BOUND; i < FH_MEM_UPPER_BOUND; + i += sizeof(u32)) + *val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i)); + else + for (i = iwl_umac_prph(trans, FH_MEM_LOWER_BOUND_GEN2); + i < iwl_umac_prph(trans, FH_MEM_UPPER_BOUND_GEN2); + i += sizeof(u32)) + *val++ = cpu_to_le32(iwl_trans_pcie_read_prph(trans, + i)); + + iwl_trans_release_nic_access(trans); + + *data = iwl_fw_error_next_data(*data); + + return sizeof(**data) + fh_regs_len; +} + +static u32 +iwl_trans_pci_dump_marbh_monitor(struct iwl_trans *trans, + struct iwl_fw_error_dump_fw_mon *fw_mon_data, + u32 monitor_len) +{ + u32 buf_size_in_dwords = (monitor_len >> 2); + u32 *buffer = (u32 *)fw_mon_data->data; + u32 i; + + if (!iwl_trans_grab_nic_access(trans)) + return 0; + + iwl_write_umac_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x1); + for (i = 0; i < buf_size_in_dwords; i++) + buffer[i] = iwl_read_umac_prph_no_grab(trans, + MON_DMARB_RD_DATA_ADDR); + iwl_write_umac_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x0); + + iwl_trans_release_nic_access(trans); + + return monitor_len; +} + +static void +iwl_trans_pcie_dump_pointers(struct iwl_trans *trans, + struct iwl_fw_error_dump_fw_mon *fw_mon_data) +{ + u32 base, base_high, write_ptr, write_ptr_val, wrap_cnt; + + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { + base = DBGC_CUR_DBGBUF_BASE_ADDR_LSB; + base_high = DBGC_CUR_DBGBUF_BASE_ADDR_MSB; + write_ptr = DBGC_CUR_DBGBUF_STATUS; + wrap_cnt = DBGC_DBGBUF_WRAP_AROUND; + } else if (trans->dbg.dest_tlv) { + write_ptr = le32_to_cpu(trans->dbg.dest_tlv->write_ptr_reg); + wrap_cnt = le32_to_cpu(trans->dbg.dest_tlv->wrap_count); + base = le32_to_cpu(trans->dbg.dest_tlv->base_reg); + } else { + base = MON_BUFF_BASE_ADDR; + write_ptr = MON_BUFF_WRPTR; + wrap_cnt = MON_BUFF_CYCLE_CNT; + } + + write_ptr_val = iwl_read_prph(trans, write_ptr); + fw_mon_data->fw_mon_cycle_cnt = + cpu_to_le32(iwl_read_prph(trans, wrap_cnt)); + fw_mon_data->fw_mon_base_ptr = + cpu_to_le32(iwl_read_prph(trans, base)); + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { + fw_mon_data->fw_mon_base_high_ptr = + cpu_to_le32(iwl_read_prph(trans, base_high)); + write_ptr_val &= DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK; + /* convert wrtPtr to DWs, to align with all HWs */ + write_ptr_val >>= 2; + } + fw_mon_data->fw_mon_wr_ptr = cpu_to_le32(write_ptr_val); +} + +static u32 +iwl_trans_pcie_dump_monitor(struct iwl_trans *trans, + struct iwl_fw_error_dump_data **data, + u32 monitor_len) +{ + struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon; + u32 len = 0; + + if (trans->dbg.dest_tlv || + (fw_mon->size && + (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000 || + trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210))) { + struct iwl_fw_error_dump_fw_mon *fw_mon_data; + + (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR); + fw_mon_data = (void *)(*data)->data; + + iwl_trans_pcie_dump_pointers(trans, fw_mon_data); + + len += sizeof(**data) + sizeof(*fw_mon_data); + if (fw_mon->size) { + memcpy(fw_mon_data->data, fw_mon->block, fw_mon->size); + monitor_len = fw_mon->size; + } else if (trans->dbg.dest_tlv->monitor_mode == SMEM_MODE) { + u32 base = le32_to_cpu(fw_mon_data->fw_mon_base_ptr); + /* + * Update pointers to reflect actual values after + * shifting + */ + if (trans->dbg.dest_tlv->version) { + base = (iwl_read_prph(trans, base) & + IWL_LDBG_M2S_BUF_BA_MSK) << + trans->dbg.dest_tlv->base_shift; + base *= IWL_M2S_UNIT_SIZE; + base += trans->cfg->smem_offset; + } else { + base = iwl_read_prph(trans, base) << + trans->dbg.dest_tlv->base_shift; + } + + iwl_trans_read_mem(trans, base, fw_mon_data->data, + monitor_len / sizeof(u32)); + } else if (trans->dbg.dest_tlv->monitor_mode == MARBH_MODE) { + monitor_len = + iwl_trans_pci_dump_marbh_monitor(trans, + fw_mon_data, + monitor_len); + } else { + /* Didn't match anything - output no monitor data */ + monitor_len = 0; + } + + len += monitor_len; + (*data)->len = cpu_to_le32(monitor_len + sizeof(*fw_mon_data)); + } + + return len; +} + +static int iwl_trans_get_fw_monitor_len(struct iwl_trans *trans, u32 *len) +{ + if (trans->dbg.fw_mon.size) { + *len += sizeof(struct iwl_fw_error_dump_data) + + sizeof(struct iwl_fw_error_dump_fw_mon) + + trans->dbg.fw_mon.size; + return trans->dbg.fw_mon.size; + } else if (trans->dbg.dest_tlv) { + u32 base, end, cfg_reg, monitor_len; + + if (trans->dbg.dest_tlv->version == 1) { + cfg_reg = le32_to_cpu(trans->dbg.dest_tlv->base_reg); + cfg_reg = iwl_read_prph(trans, cfg_reg); + base = (cfg_reg & IWL_LDBG_M2S_BUF_BA_MSK) << + trans->dbg.dest_tlv->base_shift; + base *= IWL_M2S_UNIT_SIZE; + base += trans->cfg->smem_offset; + + monitor_len = + (cfg_reg & IWL_LDBG_M2S_BUF_SIZE_MSK) >> + trans->dbg.dest_tlv->end_shift; + monitor_len *= IWL_M2S_UNIT_SIZE; + } else { + base = le32_to_cpu(trans->dbg.dest_tlv->base_reg); + end = le32_to_cpu(trans->dbg.dest_tlv->end_reg); + + base = iwl_read_prph(trans, base) << + trans->dbg.dest_tlv->base_shift; + end = iwl_read_prph(trans, end) << + trans->dbg.dest_tlv->end_shift; + + /* Make "end" point to the actual end */ + if (trans->trans_cfg->device_family >= + IWL_DEVICE_FAMILY_8000 || + trans->dbg.dest_tlv->monitor_mode == MARBH_MODE) + end += (1 << trans->dbg.dest_tlv->end_shift); + monitor_len = end - base; + } + *len += sizeof(struct iwl_fw_error_dump_data) + + sizeof(struct iwl_fw_error_dump_fw_mon) + + monitor_len; + return monitor_len; + } + return 0; +} + +static struct iwl_trans_dump_data * +iwl_trans_pcie_dump_data(struct iwl_trans *trans, + u32 dump_mask, + const struct iwl_dump_sanitize_ops *sanitize_ops, + void *sanitize_ctx) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_fw_error_dump_data *data; + struct iwl_txq *cmdq = trans->txqs.txq[trans->txqs.cmd.q_id]; + struct iwl_fw_error_dump_txcmd *txcmd; + struct iwl_trans_dump_data *dump_data; + u32 len, num_rbs = 0, monitor_len = 0; + int i, ptr; + bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) && + !trans->trans_cfg->mq_rx_supported && + dump_mask & BIT(IWL_FW_ERROR_DUMP_RB); + + if (!dump_mask) + return NULL; + + /* transport dump header */ + len = sizeof(*dump_data); + + /* host commands */ + if (dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD) && cmdq) + len += sizeof(*data) + + cmdq->n_window * (sizeof(*txcmd) + + TFD_MAX_PAYLOAD_SIZE); + + /* FW monitor */ + if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FW_MONITOR)) + monitor_len = iwl_trans_get_fw_monitor_len(trans, &len); + + /* CSR registers */ + if (dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR)) + len += sizeof(*data) + IWL_CSR_TO_DUMP; + + /* FH registers */ + if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS)) { + if (trans->trans_cfg->gen2) + len += sizeof(*data) + + (iwl_umac_prph(trans, FH_MEM_UPPER_BOUND_GEN2) - + iwl_umac_prph(trans, FH_MEM_LOWER_BOUND_GEN2)); + else + len += sizeof(*data) + + (FH_MEM_UPPER_BOUND - + FH_MEM_LOWER_BOUND); + } + + if (dump_rbs) { + /* Dump RBs is supported only for pre-9000 devices (1 queue) */ + struct iwl_rxq *rxq = &trans_pcie->rxq[0]; + /* RBs */ + num_rbs = + le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) + & 0x0FFF; + num_rbs = (num_rbs - rxq->read) & RX_QUEUE_MASK; + len += num_rbs * (sizeof(*data) + + sizeof(struct iwl_fw_error_dump_rb) + + (PAGE_SIZE << trans_pcie->rx_page_order)); + } + + /* Paged memory for gen2 HW */ + if (trans->trans_cfg->gen2 && dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING)) + for (i = 0; i < trans->init_dram.paging_cnt; i++) + len += sizeof(*data) + + sizeof(struct iwl_fw_error_dump_paging) + + trans->init_dram.paging[i].size; + + dump_data = vzalloc(len); + if (!dump_data) + return NULL; + + len = 0; + data = (void *)dump_data->data; + + if (dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD) && cmdq) { + u16 tfd_size = trans->txqs.tfd.size; + + data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD); + txcmd = (void *)data->data; + spin_lock_bh(&cmdq->lock); + ptr = cmdq->write_ptr; + for (i = 0; i < cmdq->n_window; i++) { + u8 idx = iwl_txq_get_cmd_index(cmdq, ptr); + u8 tfdidx; + u32 caplen, cmdlen; + + if (trans->trans_cfg->use_tfh) + tfdidx = idx; + else + tfdidx = ptr; + + cmdlen = iwl_trans_pcie_get_cmdlen(trans, + (u8 *)cmdq->tfds + + tfd_size * tfdidx); + caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen); + + if (cmdlen) { + len += sizeof(*txcmd) + caplen; + txcmd->cmdlen = cpu_to_le32(cmdlen); + txcmd->caplen = cpu_to_le32(caplen); + memcpy(txcmd->data, cmdq->entries[idx].cmd, + caplen); + if (sanitize_ops && sanitize_ops->frob_hcmd) + sanitize_ops->frob_hcmd(sanitize_ctx, + txcmd->data, + caplen); + txcmd = (void *)((u8 *)txcmd->data + caplen); + } + + ptr = iwl_txq_dec_wrap(trans, ptr); + } + spin_unlock_bh(&cmdq->lock); + + data->len = cpu_to_le32(len); + len += sizeof(*data); + data = iwl_fw_error_next_data(data); + } + + if (dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR)) + len += iwl_trans_pcie_dump_csr(trans, &data); + if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS)) + len += iwl_trans_pcie_fh_regs_dump(trans, &data); + if (dump_rbs) + len += iwl_trans_pcie_dump_rbs(trans, &data, num_rbs); + + /* Paged memory for gen2 HW */ + if (trans->trans_cfg->gen2 && + dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING)) { + for (i = 0; i < trans->init_dram.paging_cnt; i++) { + struct iwl_fw_error_dump_paging *paging; + u32 page_len = trans->init_dram.paging[i].size; + + data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING); + data->len = cpu_to_le32(sizeof(*paging) + page_len); + paging = (void *)data->data; + paging->index = cpu_to_le32(i); + memcpy(paging->data, + trans->init_dram.paging[i].block, page_len); + data = iwl_fw_error_next_data(data); + + len += sizeof(*data) + sizeof(*paging) + page_len; + } + } + if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FW_MONITOR)) + len += iwl_trans_pcie_dump_monitor(trans, &data, monitor_len); + + dump_data->len = len; + + return dump_data; +} + +static void iwl_trans_pci_interrupts(struct iwl_trans *trans, bool enable) +{ + if (enable) + iwl_enable_interrupts(trans); + else + iwl_disable_interrupts(trans); +} + +static void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans) +{ + u32 inta_addr, sw_err_bit; + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + if (trans_pcie->msix_enabled) { + inta_addr = CSR_MSIX_HW_INT_CAUSES_AD; + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) + sw_err_bit = MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ; + else + sw_err_bit = MSIX_HW_INT_CAUSES_REG_SW_ERR; + } else { + inta_addr = CSR_INT; + sw_err_bit = CSR_INT_BIT_SW_ERR; + } + + iwl_trans_sync_nmi_with_addr(trans, inta_addr, sw_err_bit); +} + +#define IWL_TRANS_COMMON_OPS \ + .op_mode_leave = iwl_trans_pcie_op_mode_leave, \ + .write8 = iwl_trans_pcie_write8, \ + .write32 = iwl_trans_pcie_write32, \ + .read32 = iwl_trans_pcie_read32, \ + .read_prph = iwl_trans_pcie_read_prph, \ + .write_prph = iwl_trans_pcie_write_prph, \ + .read_mem = iwl_trans_pcie_read_mem, \ + .write_mem = iwl_trans_pcie_write_mem, \ + .read_config32 = iwl_trans_pcie_read_config32, \ + .configure = iwl_trans_pcie_configure, \ + .set_pmi = iwl_trans_pcie_set_pmi, \ + .sw_reset = iwl_trans_pcie_sw_reset, \ + .grab_nic_access = iwl_trans_pcie_grab_nic_access, \ + .release_nic_access = iwl_trans_pcie_release_nic_access, \ + .set_bits_mask = iwl_trans_pcie_set_bits_mask, \ + .dump_data = iwl_trans_pcie_dump_data, \ + .d3_suspend = iwl_trans_pcie_d3_suspend, \ + .d3_resume = iwl_trans_pcie_d3_resume, \ + .interrupts = iwl_trans_pci_interrupts, \ + .sync_nmi = iwl_trans_pcie_sync_nmi, \ + .imr_dma_data = iwl_trans_pcie_copy_imr \ + +static const struct iwl_trans_ops trans_ops_pcie = { + IWL_TRANS_COMMON_OPS, + .start_hw = iwl_trans_pcie_start_hw, + .fw_alive = iwl_trans_pcie_fw_alive, + .start_fw = iwl_trans_pcie_start_fw, + .stop_device = iwl_trans_pcie_stop_device, + + .send_cmd = iwl_pcie_enqueue_hcmd, + + .tx = iwl_trans_pcie_tx, + .reclaim = iwl_txq_reclaim, + + .txq_disable = iwl_trans_pcie_txq_disable, + .txq_enable = iwl_trans_pcie_txq_enable, + + .txq_set_shared_mode = iwl_trans_pcie_txq_set_shared_mode, + + .wait_tx_queues_empty = iwl_trans_pcie_wait_txqs_empty, + + .freeze_txq_timer = iwl_trans_txq_freeze_timer, + .block_txq_ptrs = iwl_trans_pcie_block_txq_ptrs, +#ifdef CONFIG_IWLWIFI_DEBUGFS + .debugfs_cleanup = iwl_trans_pcie_debugfs_cleanup, +#endif +}; + +static const struct iwl_trans_ops trans_ops_pcie_gen2 = { + IWL_TRANS_COMMON_OPS, + .start_hw = iwl_trans_pcie_start_hw, + .fw_alive = iwl_trans_pcie_gen2_fw_alive, + .start_fw = iwl_trans_pcie_gen2_start_fw, + .stop_device = iwl_trans_pcie_gen2_stop_device, + + .send_cmd = iwl_pcie_gen2_enqueue_hcmd, + + .tx = iwl_txq_gen2_tx, + .reclaim = iwl_txq_reclaim, + + .set_q_ptrs = iwl_txq_set_q_ptrs, + + .txq_alloc = iwl_txq_dyn_alloc, + .txq_free = iwl_txq_dyn_free, + .wait_txq_empty = iwl_trans_pcie_wait_txq_empty, + .rxq_dma_data = iwl_trans_pcie_rxq_dma_data, + .set_pnvm = iwl_trans_pcie_ctx_info_gen3_set_pnvm, + .set_reduce_power = iwl_trans_pcie_ctx_info_gen3_set_reduce_power, +#ifdef CONFIG_IWLWIFI_DEBUGFS + .debugfs_cleanup = iwl_trans_pcie_debugfs_cleanup, +#endif +}; + +struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, + const struct pci_device_id *ent, + const struct iwl_cfg_trans_params *cfg_trans) +{ + struct iwl_trans_pcie *trans_pcie; + struct iwl_trans *trans; + int ret, addr_size; + const struct iwl_trans_ops *ops = &trans_ops_pcie_gen2; + void __iomem * const *table; + + if (!cfg_trans->gen2) + ops = &trans_ops_pcie; + + ret = pcim_enable_device(pdev); + if (ret) + return ERR_PTR(ret); + + trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), &pdev->dev, ops, + cfg_trans); + if (!trans) + return ERR_PTR(-ENOMEM); + + trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + trans_pcie->trans = trans; + trans_pcie->opmode_down = true; + spin_lock_init(&trans_pcie->irq_lock); + spin_lock_init(&trans_pcie->reg_lock); + spin_lock_init(&trans_pcie->alloc_page_lock); + mutex_init(&trans_pcie->mutex); + init_waitqueue_head(&trans_pcie->ucode_write_waitq); + init_waitqueue_head(&trans_pcie->fw_reset_waitq); + init_waitqueue_head(&trans_pcie->imr_waitq); + + trans_pcie->rba.alloc_wq = alloc_workqueue("rb_allocator", + WQ_HIGHPRI | WQ_UNBOUND, 1); + if (!trans_pcie->rba.alloc_wq) { + ret = -ENOMEM; + goto out_free_trans; + } + INIT_WORK(&trans_pcie->rba.rx_alloc, iwl_pcie_rx_allocator_work); + + trans_pcie->debug_rfkill = -1; + + if (!cfg_trans->base_params->pcie_l1_allowed) { + /* + * W/A - seems to solve weird behavior. We need to remove this + * if we don't want to stay in L1 all the time. This wastes a + * lot of power. + */ + pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | + PCIE_LINK_STATE_L1 | + PCIE_LINK_STATE_CLKPM); + } + + trans_pcie->def_rx_queue = 0; + + pci_set_master(pdev); + + addr_size = trans->txqs.tfd.addr_size; + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_size)); + if (ret) { + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + /* both attempts failed: */ + if (ret) { + dev_err(&pdev->dev, "No suitable DMA available\n"); + goto out_no_pci; + } + } + + ret = pcim_iomap_regions_request_all(pdev, BIT(0), DRV_NAME); + if (ret) { + dev_err(&pdev->dev, "pcim_iomap_regions_request_all failed\n"); + goto out_no_pci; + } + + table = pcim_iomap_table(pdev); + if (!table) { + dev_err(&pdev->dev, "pcim_iomap_table failed\n"); + ret = -ENOMEM; + goto out_no_pci; + } + + trans_pcie->hw_base = table[0]; + if (!trans_pcie->hw_base) { + dev_err(&pdev->dev, "couldn't find IO mem in first BAR\n"); + ret = -ENODEV; + goto out_no_pci; + } + + /* We disable the RETRY_TIMEOUT register (0x41) to keep + * PCI Tx retries from interfering with C3 CPU state */ + pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); + + trans_pcie->pci_dev = pdev; + iwl_disable_interrupts(trans); + + trans->hw_rev = iwl_read32(trans, CSR_HW_REV); + if (trans->hw_rev == 0xffffffff) { + dev_err(&pdev->dev, "HW_REV=0xFFFFFFFF, PCI issues?\n"); + ret = -EIO; + goto out_no_pci; + } + + /* + * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have + * changed, and now the revision step also includes bit 0-1 (no more + * "dash" value). To keep hw_rev backwards compatible - we'll store it + * in the old format. + */ + if (cfg_trans->device_family >= IWL_DEVICE_FAMILY_8000) + trans->hw_rev_step = trans->hw_rev & 0xF; + else + trans->hw_rev_step = (trans->hw_rev & 0xC) >> 2; + + IWL_DEBUG_INFO(trans, "HW REV: 0x%0x\n", trans->hw_rev); + + iwl_pcie_set_interrupt_capa(pdev, trans, cfg_trans); + trans->hw_id = (pdev->device << 16) + pdev->subsystem_device; + snprintf(trans->hw_id_str, sizeof(trans->hw_id_str), + "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device); + + init_waitqueue_head(&trans_pcie->sx_waitq); + + + if (trans_pcie->msix_enabled) { + ret = iwl_pcie_init_msix_handler(pdev, trans_pcie); + if (ret) + goto out_no_pci; + } else { + ret = iwl_pcie_alloc_ict(trans); + if (ret) + goto out_no_pci; + + ret = devm_request_threaded_irq(&pdev->dev, pdev->irq, + iwl_pcie_isr, + iwl_pcie_irq_handler, + IRQF_SHARED, DRV_NAME, trans); + if (ret) { + IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq); + goto out_free_ict; + } + } + +#ifdef CONFIG_IWLWIFI_DEBUGFS + trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_CLOSED; + mutex_init(&trans_pcie->fw_mon_data.mutex); +#endif + + iwl_dbg_tlv_init(trans); + + return trans; + +out_free_ict: + iwl_pcie_free_ict(trans); +out_no_pci: + destroy_workqueue(trans_pcie->rba.alloc_wq); +out_free_trans: + iwl_trans_free(trans); + return ERR_PTR(ret); +} + +void iwl_trans_pcie_copy_imr_fh(struct iwl_trans *trans, + u32 dst_addr, u64 src_addr, u32 byte_cnt) +{ + iwl_write_prph(trans, IMR_UREG_CHICK, + iwl_read_prph(trans, IMR_UREG_CHICK) | + IMR_UREG_CHICK_HALT_UMAC_PERMANENTLY_MSK); + iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_SRAM_ADDR, dst_addr); + iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_DRAM_ADDR_LSB, + (u32)(src_addr & 0xFFFFFFFF)); + iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_DRAM_ADDR_MSB, + iwl_get_dma_hi_addr(src_addr)); + iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_BC, byte_cnt); + iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_CTRL, + IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_IRQ_TARGET_POS | + IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_DMA_EN_POS | + IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_RS_MSK); +} + +int iwl_trans_pcie_copy_imr(struct iwl_trans *trans, + u32 dst_addr, u64 src_addr, u32 byte_cnt) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int ret = -1; + + trans_pcie->imr_status = IMR_D2S_REQUESTED; + iwl_trans_pcie_copy_imr_fh(trans, dst_addr, src_addr, byte_cnt); + ret = wait_event_timeout(trans_pcie->imr_waitq, + trans_pcie->imr_status != + IMR_D2S_REQUESTED, 5 * HZ); + if (!ret || trans_pcie->imr_status == IMR_D2S_ERROR) { + IWL_ERR(trans, "Failed to copy IMR Memory chunk!\n"); + iwl_trans_pcie_dump_regs(trans); + return -ETIMEDOUT; + } + trans_pcie->imr_status = IMR_D2S_IDLE; + return 0; +} diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c new file mode 100644 index 000000000..c72a84d8b --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c @@ -0,0 +1,259 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2017 Intel Deutschland GmbH + * Copyright (C) 2018-2020 Intel Corporation + */ +#include <net/tso.h> +#include <linux/tcp.h> + +#include "iwl-debug.h" +#include "iwl-csr.h" +#include "iwl-io.h" +#include "internal.h" +#include "fw/api/tx.h" +#include "queue/tx.h" + +/*************** HOST COMMAND QUEUE FUNCTIONS *****/ + +/* + * iwl_pcie_gen2_enqueue_hcmd - enqueue a uCode command + * @priv: device private data point + * @cmd: a pointer to the ucode command structure + * + * The function returns < 0 values to indicate the operation + * failed. On success, it returns the index (>= 0) of command in the + * command queue. + */ +int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans, + struct iwl_host_cmd *cmd) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id]; + struct iwl_device_cmd *out_cmd; + struct iwl_cmd_meta *out_meta; + void *dup_buf = NULL; + dma_addr_t phys_addr; + int i, cmd_pos, idx; + u16 copy_size, cmd_size, tb0_size; + bool had_nocopy = false; + u8 group_id = iwl_cmd_groupid(cmd->id); + const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD]; + u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD]; + struct iwl_tfh_tfd *tfd; + unsigned long flags; + + copy_size = sizeof(struct iwl_cmd_header_wide); + cmd_size = sizeof(struct iwl_cmd_header_wide); + + for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { + cmddata[i] = cmd->data[i]; + cmdlen[i] = cmd->len[i]; + + if (!cmd->len[i]) + continue; + + /* need at least IWL_FIRST_TB_SIZE copied */ + if (copy_size < IWL_FIRST_TB_SIZE) { + int copy = IWL_FIRST_TB_SIZE - copy_size; + + if (copy > cmdlen[i]) + copy = cmdlen[i]; + cmdlen[i] -= copy; + cmddata[i] += copy; + copy_size += copy; + } + + if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) { + had_nocopy = true; + if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) { + idx = -EINVAL; + goto free_dup_buf; + } + } else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) { + /* + * This is also a chunk that isn't copied + * to the static buffer so set had_nocopy. + */ + had_nocopy = true; + + /* only allowed once */ + if (WARN_ON(dup_buf)) { + idx = -EINVAL; + goto free_dup_buf; + } + + dup_buf = kmemdup(cmddata[i], cmdlen[i], + GFP_ATOMIC); + if (!dup_buf) + return -ENOMEM; + } else { + /* NOCOPY must not be followed by normal! */ + if (WARN_ON(had_nocopy)) { + idx = -EINVAL; + goto free_dup_buf; + } + copy_size += cmdlen[i]; + } + cmd_size += cmd->len[i]; + } + + /* + * If any of the command structures end up being larger than the + * TFD_MAX_PAYLOAD_SIZE and they aren't dynamically allocated into + * separate TFDs, then we will need to increase the size of the buffers + */ + if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE, + "Command %s (%#x) is too large (%d bytes)\n", + iwl_get_cmd_string(trans, cmd->id), cmd->id, copy_size)) { + idx = -EINVAL; + goto free_dup_buf; + } + + spin_lock_irqsave(&txq->lock, flags); + + idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); + tfd = iwl_txq_get_tfd(trans, txq, txq->write_ptr); + memset(tfd, 0, sizeof(*tfd)); + + if (iwl_txq_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { + spin_unlock_irqrestore(&txq->lock, flags); + + IWL_ERR(trans, "No space in command queue\n"); + iwl_op_mode_cmd_queue_full(trans->op_mode); + idx = -ENOSPC; + goto free_dup_buf; + } + + out_cmd = txq->entries[idx].cmd; + out_meta = &txq->entries[idx].meta; + + /* re-initialize to NULL */ + memset(out_meta, 0, sizeof(*out_meta)); + if (cmd->flags & CMD_WANT_SKB) + out_meta->source = cmd; + + /* set up the header */ + out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id); + out_cmd->hdr_wide.group_id = group_id; + out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id); + out_cmd->hdr_wide.length = + cpu_to_le16(cmd_size - sizeof(struct iwl_cmd_header_wide)); + out_cmd->hdr_wide.reserved = 0; + out_cmd->hdr_wide.sequence = + cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) | + INDEX_TO_SEQ(txq->write_ptr)); + + cmd_pos = sizeof(struct iwl_cmd_header_wide); + copy_size = sizeof(struct iwl_cmd_header_wide); + + /* and copy the data that needs to be copied */ + for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { + int copy; + + if (!cmd->len[i]) + continue; + + /* copy everything if not nocopy/dup */ + if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | + IWL_HCMD_DFL_DUP))) { + copy = cmd->len[i]; + + memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy); + cmd_pos += copy; + copy_size += copy; + continue; + } + + /* + * Otherwise we need at least IWL_FIRST_TB_SIZE copied + * in total (for bi-directional DMA), but copy up to what + * we can fit into the payload for debug dump purposes. + */ + copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]); + + memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy); + cmd_pos += copy; + + /* However, treat copy_size the proper way, we need it below */ + if (copy_size < IWL_FIRST_TB_SIZE) { + copy = IWL_FIRST_TB_SIZE - copy_size; + + if (copy > cmd->len[i]) + copy = cmd->len[i]; + copy_size += copy; + } + } + + IWL_DEBUG_HC(trans, + "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n", + iwl_get_cmd_string(trans, cmd->id), group_id, + out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), + cmd_size, txq->write_ptr, idx, trans->txqs.cmd.q_id); + + /* start the TFD with the minimum copy bytes */ + tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE); + memcpy(&txq->first_tb_bufs[idx], out_cmd, tb0_size); + iwl_txq_gen2_set_tb(trans, tfd, iwl_txq_get_first_tb_dma(txq, idx), + tb0_size); + + /* map first command fragment, if any remains */ + if (copy_size > tb0_size) { + phys_addr = dma_map_single(trans->dev, + (u8 *)out_cmd + tb0_size, + copy_size - tb0_size, + DMA_TO_DEVICE); + if (dma_mapping_error(trans->dev, phys_addr)) { + idx = -ENOMEM; + iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd); + goto out; + } + iwl_txq_gen2_set_tb(trans, tfd, phys_addr, + copy_size - tb0_size); + } + + /* map the remaining (adjusted) nocopy/dup fragments */ + for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { + void *data = (void *)(uintptr_t)cmddata[i]; + + if (!cmdlen[i]) + continue; + if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | + IWL_HCMD_DFL_DUP))) + continue; + if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) + data = dup_buf; + phys_addr = dma_map_single(trans->dev, data, + cmdlen[i], DMA_TO_DEVICE); + if (dma_mapping_error(trans->dev, phys_addr)) { + idx = -ENOMEM; + iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd); + goto out; + } + iwl_txq_gen2_set_tb(trans, tfd, phys_addr, cmdlen[i]); + } + + BUILD_BUG_ON(IWL_TFH_NUM_TBS > sizeof(out_meta->tbs) * BITS_PER_BYTE); + out_meta->flags = cmd->flags; + if (WARN_ON_ONCE(txq->entries[idx].free_buf)) + kfree_sensitive(txq->entries[idx].free_buf); + txq->entries[idx].free_buf = dup_buf; + + trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide); + + /* start timer if queue currently empty */ + if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) + mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); + + spin_lock(&trans_pcie->reg_lock); + /* Increment and update queue's write index */ + txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr); + iwl_txq_inc_wr_ptr(trans, txq); + spin_unlock(&trans_pcie->reg_lock); + +out: + spin_unlock_irqrestore(&txq->lock, flags); +free_dup_buf: + if (idx < 0) + kfree(dup_buf); + return idx; +} diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c new file mode 100644 index 000000000..3546c5269 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c @@ -0,0 +1,1630 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2003-2014, 2018-2021 Intel Corporation + * Copyright (C) 2013-2015 Intel Mobile Communications GmbH + * Copyright (C) 2016-2017 Intel Deutschland GmbH + */ +#include <linux/etherdevice.h> +#include <linux/ieee80211.h> +#include <linux/slab.h> +#include <linux/sched.h> +#include <net/ip6_checksum.h> +#include <net/tso.h> + +#include "iwl-debug.h" +#include "iwl-csr.h" +#include "iwl-prph.h" +#include "iwl-io.h" +#include "iwl-scd.h" +#include "iwl-op-mode.h" +#include "internal.h" +#include "fw/api/tx.h" + +/*************** DMA-QUEUE-GENERAL-FUNCTIONS ***** + * DMA services + * + * Theory of operation + * + * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer + * of buffer descriptors, each of which points to one or more data buffers for + * the device to read from or fill. Driver and device exchange status of each + * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty + * entries in each circular buffer, to protect against confusing empty and full + * queue states. + * + * The device reads or writes the data in the queues via the device's several + * DMA/FIFO channels. Each queue is mapped to a single DMA channel. + * + * For Tx queue, there are low mark and high mark limits. If, after queuing + * the packet for Tx, free space become < low mark, Tx queue stopped. When + * reclaiming packets (on 'tx done IRQ), if free space become > high mark, + * Tx queue resumed. + * + ***************************************************/ + + +int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans, + struct iwl_dma_ptr *ptr, size_t size) +{ + if (WARN_ON(ptr->addr)) + return -EINVAL; + + ptr->addr = dma_alloc_coherent(trans->dev, size, + &ptr->dma, GFP_KERNEL); + if (!ptr->addr) + return -ENOMEM; + ptr->size = size; + return 0; +} + +void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr) +{ + if (unlikely(!ptr->addr)) + return; + + dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma); + memset(ptr, 0, sizeof(*ptr)); +} + +/* + * iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware + */ +static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, + struct iwl_txq *txq) +{ + u32 reg = 0; + int txq_id = txq->id; + + lockdep_assert_held(&txq->lock); + + /* + * explicitly wake up the NIC if: + * 1. shadow registers aren't enabled + * 2. NIC is woken up for CMD regardless of shadow outside this function + * 3. there is a chance that the NIC is asleep + */ + if (!trans->trans_cfg->base_params->shadow_reg_enable && + txq_id != trans->txqs.cmd.q_id && + test_bit(STATUS_TPOWER_PMI, &trans->status)) { + /* + * wake up nic if it's powered down ... + * uCode will wake up, and interrupt us again, so next + * time we'll skip this part. + */ + reg = iwl_read32(trans, CSR_UCODE_DRV_GP1); + + if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { + IWL_DEBUG_INFO(trans, "Tx queue %d requesting wakeup, GP1 = 0x%x\n", + txq_id, reg); + iwl_set_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + txq->need_update = true; + return; + } + } + + /* + * if not in power-save mode, uCode will never sleep when we're + * trying to tx (during RFKILL, we're not trying to tx). + */ + IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->write_ptr); + if (!txq->block) + iwl_write32(trans, HBUS_TARG_WRPTR, + txq->write_ptr | (txq_id << 8)); +} + +void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans) +{ + int i; + + for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) { + struct iwl_txq *txq = trans->txqs.txq[i]; + + if (!test_bit(i, trans->txqs.queue_used)) + continue; + + spin_lock_bh(&txq->lock); + if (txq->need_update) { + iwl_pcie_txq_inc_wr_ptr(trans, txq); + txq->need_update = false; + } + spin_unlock_bh(&txq->lock); + } +} + +static inline void iwl_pcie_tfd_set_tb(struct iwl_trans *trans, void *tfd, + u8 idx, dma_addr_t addr, u16 len) +{ + struct iwl_tfd *tfd_fh = (void *)tfd; + struct iwl_tfd_tb *tb = &tfd_fh->tbs[idx]; + + u16 hi_n_len = len << 4; + + put_unaligned_le32(addr, &tb->lo); + hi_n_len |= iwl_get_dma_hi_addr(addr); + + tb->hi_n_len = cpu_to_le16(hi_n_len); + + tfd_fh->num_tbs = idx + 1; +} + +static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq, + dma_addr_t addr, u16 len, bool reset) +{ + void *tfd; + u32 num_tbs; + + tfd = (u8 *)txq->tfds + trans->txqs.tfd.size * txq->write_ptr; + + if (reset) + memset(tfd, 0, trans->txqs.tfd.size); + + num_tbs = iwl_txq_gen1_tfd_get_num_tbs(trans, tfd); + + /* Each TFD can point to a maximum max_tbs Tx buffers */ + if (num_tbs >= trans->txqs.tfd.max_tbs) { + IWL_ERR(trans, "Error can not send more than %d chunks\n", + trans->txqs.tfd.max_tbs); + return -EINVAL; + } + + if (WARN(addr & ~IWL_TX_DMA_MASK, + "Unaligned address = %llx\n", (unsigned long long)addr)) + return -EINVAL; + + iwl_pcie_tfd_set_tb(trans, tfd, num_tbs, addr, len); + + return num_tbs; +} + +static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + if (!trans->trans_cfg->base_params->apmg_wake_up_wa) + return; + + spin_lock(&trans_pcie->reg_lock); + + if (WARN_ON(!trans_pcie->cmd_hold_nic_awake)) { + spin_unlock(&trans_pcie->reg_lock); + return; + } + + trans_pcie->cmd_hold_nic_awake = false; + __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + spin_unlock(&trans_pcie->reg_lock); +} + +/* + * iwl_pcie_txq_unmap - Unmap any remaining DMA mappings and free skb's + */ +static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) +{ + struct iwl_txq *txq = trans->txqs.txq[txq_id]; + + if (!txq) { + IWL_ERR(trans, "Trying to free a queue that wasn't allocated?\n"); + return; + } + + spin_lock_bh(&txq->lock); + while (txq->write_ptr != txq->read_ptr) { + IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", + txq_id, txq->read_ptr); + + if (txq_id != trans->txqs.cmd.q_id) { + struct sk_buff *skb = txq->entries[txq->read_ptr].skb; + + if (WARN_ON_ONCE(!skb)) + continue; + + iwl_txq_free_tso_page(trans, skb); + } + iwl_txq_free_tfd(trans, txq); + txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr); + + if (txq->read_ptr == txq->write_ptr && + txq_id == trans->txqs.cmd.q_id) + iwl_pcie_clear_cmd_in_flight(trans); + } + + while (!skb_queue_empty(&txq->overflow_q)) { + struct sk_buff *skb = __skb_dequeue(&txq->overflow_q); + + iwl_op_mode_free_skb(trans->op_mode, skb); + } + + spin_unlock_bh(&txq->lock); + + /* just in case - this queue may have been stopped */ + iwl_wake_queue(trans, txq); +} + +/* + * iwl_pcie_txq_free - Deallocate DMA queue. + * @txq: Transmit queue to deallocate. + * + * Empty queue by removing and destroying all BD's. + * Free all buffers. + * 0-fill, but do not free "txq" descriptor structure. + */ +static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id) +{ + struct iwl_txq *txq = trans->txqs.txq[txq_id]; + struct device *dev = trans->dev; + int i; + + if (WARN_ON(!txq)) + return; + + iwl_pcie_txq_unmap(trans, txq_id); + + /* De-alloc array of command/tx buffers */ + if (txq_id == trans->txqs.cmd.q_id) + for (i = 0; i < txq->n_window; i++) { + kfree_sensitive(txq->entries[i].cmd); + kfree_sensitive(txq->entries[i].free_buf); + } + + /* De-alloc circular buffer of TFDs */ + if (txq->tfds) { + dma_free_coherent(dev, + trans->txqs.tfd.size * + trans->trans_cfg->base_params->max_tfd_queue_size, + txq->tfds, txq->dma_addr); + txq->dma_addr = 0; + txq->tfds = NULL; + + dma_free_coherent(dev, + sizeof(*txq->first_tb_bufs) * txq->n_window, + txq->first_tb_bufs, txq->first_tb_dma); + } + + kfree(txq->entries); + txq->entries = NULL; + + del_timer_sync(&txq->stuck_timer); + + /* 0-fill queue descriptor structure */ + memset(txq, 0, sizeof(*txq)); +} + +void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int nq = trans->trans_cfg->base_params->num_of_queues; + int chan; + u32 reg_val; + int clear_dwords = (SCD_TRANS_TBL_OFFSET_QUEUE(nq) - + SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(u32); + + /* make sure all queue are not stopped/used */ + memset(trans->txqs.queue_stopped, 0, + sizeof(trans->txqs.queue_stopped)); + memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); + + trans_pcie->scd_base_addr = + iwl_read_prph(trans, SCD_SRAM_BASE_ADDR); + + WARN_ON(scd_base_addr != 0 && + scd_base_addr != trans_pcie->scd_base_addr); + + /* reset context data, TX status and translation data */ + iwl_trans_write_mem(trans, trans_pcie->scd_base_addr + + SCD_CONTEXT_MEM_LOWER_BOUND, + NULL, clear_dwords); + + iwl_write_prph(trans, SCD_DRAM_BASE_ADDR, + trans->txqs.scd_bc_tbls.dma >> 10); + + /* The chain extension of the SCD doesn't work well. This feature is + * enabled by default by the HW, so we need to disable it manually. + */ + if (trans->trans_cfg->base_params->scd_chain_ext_wa) + iwl_write_prph(trans, SCD_CHAINEXT_EN, 0); + + iwl_trans_ac_txq_enable(trans, trans->txqs.cmd.q_id, + trans->txqs.cmd.fifo, + trans->txqs.cmd.wdg_timeout); + + /* Activate all Tx DMA/FIFO channels */ + iwl_scd_activate_fifos(trans); + + /* Enable DMA channel */ + for (chan = 0; chan < FH_TCSR_CHNL_NUM; chan++) + iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan), + FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | + FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE); + + /* Update FH chicken bits */ + reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG); + iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG, + reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN); + + /* Enable L1-Active */ + if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000) + iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG, + APMG_PCIDEV_STT_VAL_L1_ACT_DIS); +} + +void iwl_trans_pcie_tx_reset(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int txq_id; + + /* + * we should never get here in gen2 trans mode return early to avoid + * having invalid accesses + */ + if (WARN_ON_ONCE(trans->trans_cfg->gen2)) + return; + + for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; + txq_id++) { + struct iwl_txq *txq = trans->txqs.txq[txq_id]; + if (trans->trans_cfg->use_tfh) + iwl_write_direct64(trans, + FH_MEM_CBBC_QUEUE(trans, txq_id), + txq->dma_addr); + else + iwl_write_direct32(trans, + FH_MEM_CBBC_QUEUE(trans, txq_id), + txq->dma_addr >> 8); + iwl_pcie_txq_unmap(trans, txq_id); + txq->read_ptr = 0; + txq->write_ptr = 0; + } + + /* Tell NIC where to find the "keep warm" buffer */ + iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG, + trans_pcie->kw.dma >> 4); + + /* + * Send 0 as the scd_base_addr since the device may have be reset + * while we were in WoWLAN in which case SCD_SRAM_BASE_ADDR will + * contain garbage. + */ + iwl_pcie_tx_start(trans, 0); +} + +static void iwl_pcie_tx_stop_fh(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int ch, ret; + u32 mask = 0; + + spin_lock_bh(&trans_pcie->irq_lock); + + if (!iwl_trans_grab_nic_access(trans)) + goto out; + + /* Stop each Tx DMA channel */ + for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) { + iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0); + mask |= FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch); + } + + /* Wait for DMA channels to be idle */ + ret = iwl_poll_bit(trans, FH_TSSR_TX_STATUS_REG, mask, mask, 5000); + if (ret < 0) + IWL_ERR(trans, + "Failing on timeout while stopping DMA channel %d [0x%08x]\n", + ch, iwl_read32(trans, FH_TSSR_TX_STATUS_REG)); + + iwl_trans_release_nic_access(trans); + +out: + spin_unlock_bh(&trans_pcie->irq_lock); +} + +/* + * iwl_pcie_tx_stop - Stop all Tx DMA channels + */ +int iwl_pcie_tx_stop(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int txq_id; + + /* Turn off all Tx DMA fifos */ + iwl_scd_deactivate_fifos(trans); + + /* Turn off all Tx DMA channels */ + iwl_pcie_tx_stop_fh(trans); + + /* + * This function can be called before the op_mode disabled the + * queues. This happens when we have an rfkill interrupt. + * Since we stop Tx altogether - mark the queues as stopped. + */ + memset(trans->txqs.queue_stopped, 0, + sizeof(trans->txqs.queue_stopped)); + memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); + + /* This can happen: start_hw, stop_device */ + if (!trans_pcie->txq_memory) + return 0; + + /* Unmap DMA from host system and free skb's */ + for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; + txq_id++) + iwl_pcie_txq_unmap(trans, txq_id); + + return 0; +} + +/* + * iwl_trans_tx_free - Free TXQ Context + * + * Destroy all TX DMA queues and structures + */ +void iwl_pcie_tx_free(struct iwl_trans *trans) +{ + int txq_id; + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); + + /* Tx queues */ + if (trans_pcie->txq_memory) { + for (txq_id = 0; + txq_id < trans->trans_cfg->base_params->num_of_queues; + txq_id++) { + iwl_pcie_txq_free(trans, txq_id); + trans->txqs.txq[txq_id] = NULL; + } + } + + kfree(trans_pcie->txq_memory); + trans_pcie->txq_memory = NULL; + + iwl_pcie_free_dma_ptr(trans, &trans_pcie->kw); + + iwl_pcie_free_dma_ptr(trans, &trans->txqs.scd_bc_tbls); +} + +/* + * iwl_pcie_tx_alloc - allocate TX context + * Allocate all Tx DMA structures and initialize them + */ +static int iwl_pcie_tx_alloc(struct iwl_trans *trans) +{ + int ret; + int txq_id, slots_num; + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + u16 bc_tbls_size = trans->trans_cfg->base_params->num_of_queues; + + if (WARN_ON(trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)) + return -EINVAL; + + bc_tbls_size *= sizeof(struct iwlagn_scd_bc_tbl); + + /*It is not allowed to alloc twice, so warn when this happens. + * We cannot rely on the previous allocation, so free and fail */ + if (WARN_ON(trans_pcie->txq_memory)) { + ret = -EINVAL; + goto error; + } + + ret = iwl_pcie_alloc_dma_ptr(trans, &trans->txqs.scd_bc_tbls, + bc_tbls_size); + if (ret) { + IWL_ERR(trans, "Scheduler BC Table allocation failed\n"); + goto error; + } + + /* Alloc keep-warm buffer */ + ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE); + if (ret) { + IWL_ERR(trans, "Keep Warm allocation failed\n"); + goto error; + } + + trans_pcie->txq_memory = + kcalloc(trans->trans_cfg->base_params->num_of_queues, + sizeof(struct iwl_txq), GFP_KERNEL); + if (!trans_pcie->txq_memory) { + IWL_ERR(trans, "Not enough memory for txq\n"); + ret = -ENOMEM; + goto error; + } + + /* Alloc and init all Tx queues, including the command queue (#4/#9) */ + for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; + txq_id++) { + bool cmd_queue = (txq_id == trans->txqs.cmd.q_id); + + if (cmd_queue) + slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE, + trans->cfg->min_txq_size); + else + slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE, + trans->cfg->min_ba_txq_size); + trans->txqs.txq[txq_id] = &trans_pcie->txq_memory[txq_id]; + ret = iwl_txq_alloc(trans, trans->txqs.txq[txq_id], slots_num, + cmd_queue); + if (ret) { + IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id); + goto error; + } + trans->txqs.txq[txq_id]->id = txq_id; + } + + return 0; + +error: + iwl_pcie_tx_free(trans); + + return ret; +} + +int iwl_pcie_tx_init(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int ret; + int txq_id, slots_num; + bool alloc = false; + + if (!trans_pcie->txq_memory) { + ret = iwl_pcie_tx_alloc(trans); + if (ret) + goto error; + alloc = true; + } + + spin_lock_bh(&trans_pcie->irq_lock); + + /* Turn off all Tx DMA fifos */ + iwl_scd_deactivate_fifos(trans); + + /* Tell NIC where to find the "keep warm" buffer */ + iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG, + trans_pcie->kw.dma >> 4); + + spin_unlock_bh(&trans_pcie->irq_lock); + + /* Alloc and init all Tx queues, including the command queue (#4/#9) */ + for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; + txq_id++) { + bool cmd_queue = (txq_id == trans->txqs.cmd.q_id); + + if (cmd_queue) + slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE, + trans->cfg->min_txq_size); + else + slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE, + trans->cfg->min_ba_txq_size); + ret = iwl_txq_init(trans, trans->txqs.txq[txq_id], slots_num, + cmd_queue); + if (ret) { + IWL_ERR(trans, "Tx %d queue init failed\n", txq_id); + goto error; + } + + /* + * Tell nic where to find circular buffer of TFDs for a + * given Tx queue, and enable the DMA channel used for that + * queue. + * Circular buffer (TFD queue in DRAM) physical base address + */ + iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(trans, txq_id), + trans->txqs.txq[txq_id]->dma_addr >> 8); + } + + iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE); + if (trans->trans_cfg->base_params->num_of_queues > 20) + iwl_set_bits_prph(trans, SCD_GP_CTRL, + SCD_GP_CTRL_ENABLE_31_QUEUES); + + return 0; +error: + /*Upon error, free only if we allocated something */ + if (alloc) + iwl_pcie_tx_free(trans); + return ret; +} + +static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans, + const struct iwl_host_cmd *cmd) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + /* Make sure the NIC is still alive in the bus */ + if (test_bit(STATUS_TRANS_DEAD, &trans->status)) + return -ENODEV; + + if (!trans->trans_cfg->base_params->apmg_wake_up_wa) + return 0; + + /* + * wake up the NIC to make sure that the firmware will see the host + * command - we will let the NIC sleep once all the host commands + * returned. This needs to be done only on NICs that have + * apmg_wake_up_wa set (see above.) + */ + if (!_iwl_trans_pcie_grab_nic_access(trans)) + return -EIO; + + /* + * In iwl_trans_grab_nic_access(), we've acquired the reg_lock. + * There, we also returned immediately if cmd_hold_nic_awake is + * already true, so it's OK to unconditionally set it to true. + */ + trans_pcie->cmd_hold_nic_awake = true; + spin_unlock(&trans_pcie->reg_lock); + + return 0; +} + +/* + * iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd + * + * When FW advances 'R' index, all entries between old and new 'R' index + * need to be reclaimed. As result, some free space forms. If there is + * enough free space (> low mark), wake the stack that feeds us. + */ +static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx) +{ + struct iwl_txq *txq = trans->txqs.txq[txq_id]; + int nfreed = 0; + u16 r; + + lockdep_assert_held(&txq->lock); + + idx = iwl_txq_get_cmd_index(txq, idx); + r = iwl_txq_get_cmd_index(txq, txq->read_ptr); + + if (idx >= trans->trans_cfg->base_params->max_tfd_queue_size || + (!iwl_txq_used(txq, idx))) { + WARN_ONCE(test_bit(txq_id, trans->txqs.queue_used), + "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n", + __func__, txq_id, idx, + trans->trans_cfg->base_params->max_tfd_queue_size, + txq->write_ptr, txq->read_ptr); + return; + } + + for (idx = iwl_txq_inc_wrap(trans, idx); r != idx; + r = iwl_txq_inc_wrap(trans, r)) { + txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr); + + if (nfreed++ > 0) { + IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n", + idx, txq->write_ptr, r); + iwl_force_nmi(trans); + } + } + + if (txq->read_ptr == txq->write_ptr) + iwl_pcie_clear_cmd_in_flight(trans); + + iwl_txq_progress(txq); +} + +static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid, + u16 txq_id) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + u32 tbl_dw_addr; + u32 tbl_dw; + u16 scd_q2ratid; + + scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK; + + tbl_dw_addr = trans_pcie->scd_base_addr + + SCD_TRANS_TBL_OFFSET_QUEUE(txq_id); + + tbl_dw = iwl_trans_read_mem32(trans, tbl_dw_addr); + + if (txq_id & 0x1) + tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF); + else + tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000); + + iwl_trans_write_mem32(trans, tbl_dw_addr, tbl_dw); + + return 0; +} + +/* Receiver address (actually, Rx station's index into station table), + * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */ +#define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid)) + +bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn, + const struct iwl_trans_txq_scd_cfg *cfg, + unsigned int wdg_timeout) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_txq *txq = trans->txqs.txq[txq_id]; + int fifo = -1; + bool scd_bug = false; + + if (test_and_set_bit(txq_id, trans->txqs.queue_used)) + WARN_ONCE(1, "queue %d already used - expect issues", txq_id); + + txq->wd_timeout = msecs_to_jiffies(wdg_timeout); + + if (cfg) { + fifo = cfg->fifo; + + /* Disable the scheduler prior configuring the cmd queue */ + if (txq_id == trans->txqs.cmd.q_id && + trans_pcie->scd_set_active) + iwl_scd_enable_set_active(trans, 0); + + /* Stop this Tx queue before configuring it */ + iwl_scd_txq_set_inactive(trans, txq_id); + + /* Set this queue as a chain-building queue unless it is CMD */ + if (txq_id != trans->txqs.cmd.q_id) + iwl_scd_txq_set_chain(trans, txq_id); + + if (cfg->aggregate) { + u16 ra_tid = BUILD_RAxTID(cfg->sta_id, cfg->tid); + + /* Map receiver-address / traffic-ID to this queue */ + iwl_pcie_txq_set_ratid_map(trans, ra_tid, txq_id); + + /* enable aggregations for the queue */ + iwl_scd_txq_enable_agg(trans, txq_id); + txq->ampdu = true; + } else { + /* + * disable aggregations for the queue, this will also + * make the ra_tid mapping configuration irrelevant + * since it is now a non-AGG queue. + */ + iwl_scd_txq_disable_agg(trans, txq_id); + + ssn = txq->read_ptr; + } + } else { + /* + * If we need to move the SCD write pointer by steps of + * 0x40, 0x80 or 0xc0, it gets stuck. Avoids this and let + * the op_mode know by returning true later. + * Do this only in case cfg is NULL since this trick can + * be done only if we have DQA enabled which is true for mvm + * only. And mvm never sets a cfg pointer. + * This is really ugly, but this is the easiest way out for + * this sad hardware issue. + * This bug has been fixed on devices 9000 and up. + */ + scd_bug = !trans->trans_cfg->mq_rx_supported && + !((ssn - txq->write_ptr) & 0x3f) && + (ssn != txq->write_ptr); + if (scd_bug) + ssn++; + } + + /* Place first TFD at index corresponding to start sequence number. + * Assumes that ssn_idx is valid (!= 0xFFF) */ + txq->read_ptr = (ssn & 0xff); + txq->write_ptr = (ssn & 0xff); + iwl_write_direct32(trans, HBUS_TARG_WRPTR, + (ssn & 0xff) | (txq_id << 8)); + + if (cfg) { + u8 frame_limit = cfg->frame_limit; + + iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn); + + /* Set up Tx window size and frame limit for this queue */ + iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr + + SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0); + iwl_trans_write_mem32(trans, + trans_pcie->scd_base_addr + + SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32), + SCD_QUEUE_CTX_REG2_VAL(WIN_SIZE, frame_limit) | + SCD_QUEUE_CTX_REG2_VAL(FRAME_LIMIT, frame_limit)); + + /* Set up status area in SRAM, map to Tx DMA/FIFO, activate */ + iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id), + (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE) | + (cfg->fifo << SCD_QUEUE_STTS_REG_POS_TXF) | + (1 << SCD_QUEUE_STTS_REG_POS_WSL) | + SCD_QUEUE_STTS_REG_MSK); + + /* enable the scheduler for this queue (only) */ + if (txq_id == trans->txqs.cmd.q_id && + trans_pcie->scd_set_active) + iwl_scd_enable_set_active(trans, BIT(txq_id)); + + IWL_DEBUG_TX_QUEUES(trans, + "Activate queue %d on FIFO %d WrPtr: %d\n", + txq_id, fifo, ssn & 0xff); + } else { + IWL_DEBUG_TX_QUEUES(trans, + "Activate queue %d WrPtr: %d\n", + txq_id, ssn & 0xff); + } + + return scd_bug; +} + +void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id, + bool shared_mode) +{ + struct iwl_txq *txq = trans->txqs.txq[txq_id]; + + txq->ampdu = !shared_mode; +} + +void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id, + bool configure_scd) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + u32 stts_addr = trans_pcie->scd_base_addr + + SCD_TX_STTS_QUEUE_OFFSET(txq_id); + static const u32 zero_val[4] = {}; + + trans->txqs.txq[txq_id]->frozen_expiry_remainder = 0; + trans->txqs.txq[txq_id]->frozen = false; + + /* + * Upon HW Rfkill - we stop the device, and then stop the queues + * in the op_mode. Just for the sake of the simplicity of the op_mode, + * allow the op_mode to call txq_disable after it already called + * stop_device. + */ + if (!test_and_clear_bit(txq_id, trans->txqs.queue_used)) { + WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status), + "queue %d not used", txq_id); + return; + } + + if (configure_scd) { + iwl_scd_txq_set_inactive(trans, txq_id); + + iwl_trans_write_mem(trans, stts_addr, (const void *)zero_val, + ARRAY_SIZE(zero_val)); + } + + iwl_pcie_txq_unmap(trans, txq_id); + trans->txqs.txq[txq_id]->ampdu = false; + + IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id); +} + +/*************** HOST COMMAND QUEUE FUNCTIONS *****/ + +/* + * iwl_pcie_enqueue_hcmd - enqueue a uCode command + * @priv: device private data point + * @cmd: a pointer to the ucode command structure + * + * The function returns < 0 values to indicate the operation + * failed. On success, it returns the index (>= 0) of command in the + * command queue. + */ +int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, + struct iwl_host_cmd *cmd) +{ + struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id]; + struct iwl_device_cmd *out_cmd; + struct iwl_cmd_meta *out_meta; + void *dup_buf = NULL; + dma_addr_t phys_addr; + int idx; + u16 copy_size, cmd_size, tb0_size; + bool had_nocopy = false; + u8 group_id = iwl_cmd_groupid(cmd->id); + int i, ret; + u32 cmd_pos; + const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD]; + u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD]; + unsigned long flags; + + if (WARN(!trans->wide_cmd_header && + group_id > IWL_ALWAYS_LONG_GROUP, + "unsupported wide command %#x\n", cmd->id)) + return -EINVAL; + + if (group_id != 0) { + copy_size = sizeof(struct iwl_cmd_header_wide); + cmd_size = sizeof(struct iwl_cmd_header_wide); + } else { + copy_size = sizeof(struct iwl_cmd_header); + cmd_size = sizeof(struct iwl_cmd_header); + } + + /* need one for the header if the first is NOCOPY */ + BUILD_BUG_ON(IWL_MAX_CMD_TBS_PER_TFD > IWL_NUM_OF_TBS - 1); + + for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { + cmddata[i] = cmd->data[i]; + cmdlen[i] = cmd->len[i]; + + if (!cmd->len[i]) + continue; + + /* need at least IWL_FIRST_TB_SIZE copied */ + if (copy_size < IWL_FIRST_TB_SIZE) { + int copy = IWL_FIRST_TB_SIZE - copy_size; + + if (copy > cmdlen[i]) + copy = cmdlen[i]; + cmdlen[i] -= copy; + cmddata[i] += copy; + copy_size += copy; + } + + if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) { + had_nocopy = true; + if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) { + idx = -EINVAL; + goto free_dup_buf; + } + } else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) { + /* + * This is also a chunk that isn't copied + * to the static buffer so set had_nocopy. + */ + had_nocopy = true; + + /* only allowed once */ + if (WARN_ON(dup_buf)) { + idx = -EINVAL; + goto free_dup_buf; + } + + dup_buf = kmemdup(cmddata[i], cmdlen[i], + GFP_ATOMIC); + if (!dup_buf) + return -ENOMEM; + } else { + /* NOCOPY must not be followed by normal! */ + if (WARN_ON(had_nocopy)) { + idx = -EINVAL; + goto free_dup_buf; + } + copy_size += cmdlen[i]; + } + cmd_size += cmd->len[i]; + } + + /* + * If any of the command structures end up being larger than + * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically + * allocated into separate TFDs, then we will need to + * increase the size of the buffers. + */ + if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE, + "Command %s (%#x) is too large (%d bytes)\n", + iwl_get_cmd_string(trans, cmd->id), + cmd->id, copy_size)) { + idx = -EINVAL; + goto free_dup_buf; + } + + spin_lock_irqsave(&txq->lock, flags); + + if (iwl_txq_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { + spin_unlock_irqrestore(&txq->lock, flags); + + IWL_ERR(trans, "No space in command queue\n"); + iwl_op_mode_cmd_queue_full(trans->op_mode); + idx = -ENOSPC; + goto free_dup_buf; + } + + idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); + out_cmd = txq->entries[idx].cmd; + out_meta = &txq->entries[idx].meta; + + memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */ + if (cmd->flags & CMD_WANT_SKB) + out_meta->source = cmd; + + /* set up the header */ + if (group_id != 0) { + out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id); + out_cmd->hdr_wide.group_id = group_id; + out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id); + out_cmd->hdr_wide.length = + cpu_to_le16(cmd_size - + sizeof(struct iwl_cmd_header_wide)); + out_cmd->hdr_wide.reserved = 0; + out_cmd->hdr_wide.sequence = + cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) | + INDEX_TO_SEQ(txq->write_ptr)); + + cmd_pos = sizeof(struct iwl_cmd_header_wide); + copy_size = sizeof(struct iwl_cmd_header_wide); + } else { + out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id); + out_cmd->hdr.sequence = + cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) | + INDEX_TO_SEQ(txq->write_ptr)); + out_cmd->hdr.group_id = 0; + + cmd_pos = sizeof(struct iwl_cmd_header); + copy_size = sizeof(struct iwl_cmd_header); + } + + /* and copy the data that needs to be copied */ + for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { + int copy; + + if (!cmd->len[i]) + continue; + + /* copy everything if not nocopy/dup */ + if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | + IWL_HCMD_DFL_DUP))) { + copy = cmd->len[i]; + + memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy); + cmd_pos += copy; + copy_size += copy; + continue; + } + + /* + * Otherwise we need at least IWL_FIRST_TB_SIZE copied + * in total (for bi-directional DMA), but copy up to what + * we can fit into the payload for debug dump purposes. + */ + copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]); + + memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy); + cmd_pos += copy; + + /* However, treat copy_size the proper way, we need it below */ + if (copy_size < IWL_FIRST_TB_SIZE) { + copy = IWL_FIRST_TB_SIZE - copy_size; + + if (copy > cmd->len[i]) + copy = cmd->len[i]; + copy_size += copy; + } + } + + IWL_DEBUG_HC(trans, + "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n", + iwl_get_cmd_string(trans, cmd->id), + group_id, out_cmd->hdr.cmd, + le16_to_cpu(out_cmd->hdr.sequence), + cmd_size, txq->write_ptr, idx, trans->txqs.cmd.q_id); + + /* start the TFD with the minimum copy bytes */ + tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE); + memcpy(&txq->first_tb_bufs[idx], &out_cmd->hdr, tb0_size); + iwl_pcie_txq_build_tfd(trans, txq, + iwl_txq_get_first_tb_dma(txq, idx), + tb0_size, true); + + /* map first command fragment, if any remains */ + if (copy_size > tb0_size) { + phys_addr = dma_map_single(trans->dev, + ((u8 *)&out_cmd->hdr) + tb0_size, + copy_size - tb0_size, + DMA_TO_DEVICE); + if (dma_mapping_error(trans->dev, phys_addr)) { + iwl_txq_gen1_tfd_unmap(trans, out_meta, txq, + txq->write_ptr); + idx = -ENOMEM; + goto out; + } + + iwl_pcie_txq_build_tfd(trans, txq, phys_addr, + copy_size - tb0_size, false); + } + + /* map the remaining (adjusted) nocopy/dup fragments */ + for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { + void *data = (void *)(uintptr_t)cmddata[i]; + + if (!cmdlen[i]) + continue; + if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | + IWL_HCMD_DFL_DUP))) + continue; + if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) + data = dup_buf; + phys_addr = dma_map_single(trans->dev, data, + cmdlen[i], DMA_TO_DEVICE); + if (dma_mapping_error(trans->dev, phys_addr)) { + iwl_txq_gen1_tfd_unmap(trans, out_meta, txq, + txq->write_ptr); + idx = -ENOMEM; + goto out; + } + + iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], false); + } + + BUILD_BUG_ON(IWL_TFH_NUM_TBS > sizeof(out_meta->tbs) * BITS_PER_BYTE); + out_meta->flags = cmd->flags; + if (WARN_ON_ONCE(txq->entries[idx].free_buf)) + kfree_sensitive(txq->entries[idx].free_buf); + txq->entries[idx].free_buf = dup_buf; + + trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide); + + /* start timer if queue currently empty */ + if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) + mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); + + ret = iwl_pcie_set_cmd_in_flight(trans, cmd); + if (ret < 0) { + idx = ret; + goto out; + } + + /* Increment and update queue's write index */ + txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr); + iwl_pcie_txq_inc_wr_ptr(trans, txq); + + out: + spin_unlock_irqrestore(&txq->lock, flags); + free_dup_buf: + if (idx < 0) + kfree(dup_buf); + return idx; +} + +/* + * iwl_pcie_hcmd_complete - Pull unused buffers off the queue and reclaim them + * @rxb: Rx buffer to reclaim + */ +void iwl_pcie_hcmd_complete(struct iwl_trans *trans, + struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + u16 sequence = le16_to_cpu(pkt->hdr.sequence); + u8 group_id; + u32 cmd_id; + int txq_id = SEQ_TO_QUEUE(sequence); + int index = SEQ_TO_INDEX(sequence); + int cmd_index; + struct iwl_device_cmd *cmd; + struct iwl_cmd_meta *meta; + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id]; + + /* If a Tx command is being handled and it isn't in the actual + * command queue then there a command routing bug has been introduced + * in the queue management code. */ + if (WARN(txq_id != trans->txqs.cmd.q_id, + "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n", + txq_id, trans->txqs.cmd.q_id, sequence, txq->read_ptr, + txq->write_ptr)) { + iwl_print_hex_error(trans, pkt, 32); + return; + } + + spin_lock_bh(&txq->lock); + + cmd_index = iwl_txq_get_cmd_index(txq, index); + cmd = txq->entries[cmd_index].cmd; + meta = &txq->entries[cmd_index].meta; + group_id = cmd->hdr.group_id; + cmd_id = WIDE_ID(group_id, cmd->hdr.cmd); + + iwl_txq_gen1_tfd_unmap(trans, meta, txq, index); + + /* Input error checking is done when commands are added to queue. */ + if (meta->flags & CMD_WANT_SKB) { + struct page *p = rxb_steal_page(rxb); + + meta->source->resp_pkt = pkt; + meta->source->_rx_page_addr = (unsigned long)page_address(p); + meta->source->_rx_page_order = trans_pcie->rx_page_order; + } + + if (meta->flags & CMD_WANT_ASYNC_CALLBACK) + iwl_op_mode_async_cb(trans->op_mode, cmd); + + iwl_pcie_cmdq_reclaim(trans, txq_id, index); + + if (!(meta->flags & CMD_ASYNC)) { + if (!test_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status)) { + IWL_WARN(trans, + "HCMD_ACTIVE already clear for command %s\n", + iwl_get_cmd_string(trans, cmd_id)); + } + clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); + IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", + iwl_get_cmd_string(trans, cmd_id)); + wake_up(&trans->wait_command_queue); + } + + meta->flags = 0; + + spin_unlock_bh(&txq->lock); +} + +static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb, + struct iwl_txq *txq, u8 hdr_len, + struct iwl_cmd_meta *out_meta) +{ + u16 head_tb_len; + int i; + + /* + * Set up TFD's third entry to point directly to remainder + * of skb's head, if any + */ + head_tb_len = skb_headlen(skb) - hdr_len; + + if (head_tb_len > 0) { + dma_addr_t tb_phys = dma_map_single(trans->dev, + skb->data + hdr_len, + head_tb_len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(trans->dev, tb_phys))) + return -EINVAL; + trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb->data + hdr_len, + tb_phys, head_tb_len); + iwl_pcie_txq_build_tfd(trans, txq, tb_phys, head_tb_len, false); + } + + /* set up the remaining entries to point to the data */ + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + dma_addr_t tb_phys; + int tb_idx; + + if (!skb_frag_size(frag)) + continue; + + tb_phys = skb_frag_dma_map(trans->dev, frag, 0, + skb_frag_size(frag), DMA_TO_DEVICE); + + if (unlikely(dma_mapping_error(trans->dev, tb_phys))) + return -EINVAL; + trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb_frag_address(frag), + tb_phys, skb_frag_size(frag)); + tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys, + skb_frag_size(frag), false); + if (tb_idx < 0) + return tb_idx; + + out_meta->tbs |= BIT(tb_idx); + } + + return 0; +} + +#ifdef CONFIG_INET +static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, + struct iwl_txq *txq, u8 hdr_len, + struct iwl_cmd_meta *out_meta, + struct iwl_device_tx_cmd *dev_cmd, + u16 tb1_len) +{ + struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; + struct ieee80211_hdr *hdr = (void *)skb->data; + unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room; + unsigned int mss = skb_shinfo(skb)->gso_size; + u16 length, iv_len, amsdu_pad; + u8 *start_hdr; + struct iwl_tso_hdr_page *hdr_page; + struct tso_t tso; + + /* if the packet is protected, then it must be CCMP or GCMP */ + BUILD_BUG_ON(IEEE80211_CCMP_HDR_LEN != IEEE80211_GCMP_HDR_LEN); + iv_len = ieee80211_has_protected(hdr->frame_control) ? + IEEE80211_CCMP_HDR_LEN : 0; + + trace_iwlwifi_dev_tx(trans->dev, skb, + iwl_txq_get_tfd(trans, txq, txq->write_ptr), + trans->txqs.tfd.size, + &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 0); + + ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb); + snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb); + total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len - iv_len; + amsdu_pad = 0; + + /* total amount of header we may need for this A-MSDU */ + hdr_room = DIV_ROUND_UP(total_len, mss) * + (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len; + + /* Our device supports 9 segments at most, it will fit in 1 page */ + hdr_page = get_page_hdr(trans, hdr_room, skb); + if (!hdr_page) + return -ENOMEM; + + start_hdr = hdr_page->pos; + memcpy(hdr_page->pos, skb->data + hdr_len, iv_len); + hdr_page->pos += iv_len; + + /* + * Pull the ieee80211 header + IV to be able to use TSO core, + * we will restore it for the tx_status flow. + */ + skb_pull(skb, hdr_len + iv_len); + + /* + * Remove the length of all the headers that we don't actually + * have in the MPDU by themselves, but that we duplicate into + * all the different MSDUs inside the A-MSDU. + */ + le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen); + + tso_start(skb, &tso); + + while (total_len) { + /* this is the data left for this subframe */ + unsigned int data_left = + min_t(unsigned int, mss, total_len); + unsigned int hdr_tb_len; + dma_addr_t hdr_tb_phys; + u8 *subf_hdrs_start = hdr_page->pos; + + total_len -= data_left; + + memset(hdr_page->pos, 0, amsdu_pad); + hdr_page->pos += amsdu_pad; + amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen + + data_left)) & 0x3; + ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr)); + hdr_page->pos += ETH_ALEN; + ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr)); + hdr_page->pos += ETH_ALEN; + + length = snap_ip_tcp_hdrlen + data_left; + *((__be16 *)hdr_page->pos) = cpu_to_be16(length); + hdr_page->pos += sizeof(length); + + /* + * This will copy the SNAP as well which will be considered + * as MAC header. + */ + tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len); + + hdr_page->pos += snap_ip_tcp_hdrlen; + + hdr_tb_len = hdr_page->pos - start_hdr; + hdr_tb_phys = dma_map_single(trans->dev, start_hdr, + hdr_tb_len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(trans->dev, hdr_tb_phys))) + return -EINVAL; + iwl_pcie_txq_build_tfd(trans, txq, hdr_tb_phys, + hdr_tb_len, false); + trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr, + hdr_tb_phys, hdr_tb_len); + /* add this subframe's headers' length to the tx_cmd */ + le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start); + + /* prepare the start_hdr for the next subframe */ + start_hdr = hdr_page->pos; + + /* put the payload */ + while (data_left) { + unsigned int size = min_t(unsigned int, tso.size, + data_left); + dma_addr_t tb_phys; + + tb_phys = dma_map_single(trans->dev, tso.data, + size, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(trans->dev, tb_phys))) + return -EINVAL; + + iwl_pcie_txq_build_tfd(trans, txq, tb_phys, + size, false); + trace_iwlwifi_dev_tx_tb(trans->dev, skb, tso.data, + tb_phys, size); + + data_left -= size; + tso_build_data(skb, &tso, size); + } + } + + /* re -add the WiFi header and IV */ + skb_push(skb, hdr_len + iv_len); + + return 0; +} +#else /* CONFIG_INET */ +static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, + struct iwl_txq *txq, u8 hdr_len, + struct iwl_cmd_meta *out_meta, + struct iwl_device_tx_cmd *dev_cmd, + u16 tb1_len) +{ + /* No A-MSDU without CONFIG_INET */ + WARN_ON(1); + + return -1; +} +#endif /* CONFIG_INET */ + +int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, + struct iwl_device_tx_cmd *dev_cmd, int txq_id) +{ + struct ieee80211_hdr *hdr; + struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; + struct iwl_cmd_meta *out_meta; + struct iwl_txq *txq; + dma_addr_t tb0_phys, tb1_phys, scratch_phys; + void *tb1_addr; + void *tfd; + u16 len, tb1_len; + bool wait_write_ptr; + __le16 fc; + u8 hdr_len; + u16 wifi_seq; + bool amsdu; + + txq = trans->txqs.txq[txq_id]; + + if (WARN_ONCE(!test_bit(txq_id, trans->txqs.queue_used), + "TX on unused queue %d\n", txq_id)) + return -EINVAL; + + if (skb_is_nonlinear(skb) && + skb_shinfo(skb)->nr_frags > IWL_TRANS_MAX_FRAGS(trans) && + __skb_linearize(skb)) + return -ENOMEM; + + /* mac80211 always puts the full header into the SKB's head, + * so there's no need to check if it's readable there + */ + hdr = (struct ieee80211_hdr *)skb->data; + fc = hdr->frame_control; + hdr_len = ieee80211_hdrlen(fc); + + spin_lock(&txq->lock); + + if (iwl_txq_space(trans, txq) < txq->high_mark) { + iwl_txq_stop(trans, txq); + + /* don't put the packet on the ring, if there is no room */ + if (unlikely(iwl_txq_space(trans, txq) < 3)) { + struct iwl_device_tx_cmd **dev_cmd_ptr; + + dev_cmd_ptr = (void *)((u8 *)skb->cb + + trans->txqs.dev_cmd_offs); + + *dev_cmd_ptr = dev_cmd; + __skb_queue_tail(&txq->overflow_q, skb); + + spin_unlock(&txq->lock); + return 0; + } + } + + /* In AGG mode, the index in the ring must correspond to the WiFi + * sequence number. This is a HW requirements to help the SCD to parse + * the BA. + * Check here that the packets are in the right place on the ring. + */ + wifi_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); + WARN_ONCE(txq->ampdu && + (wifi_seq & 0xff) != txq->write_ptr, + "Q: %d WiFi Seq %d tfdNum %d", + txq_id, wifi_seq, txq->write_ptr); + + /* Set up driver data for this TFD */ + txq->entries[txq->write_ptr].skb = skb; + txq->entries[txq->write_ptr].cmd = dev_cmd; + + dev_cmd->hdr.sequence = + cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | + INDEX_TO_SEQ(txq->write_ptr))); + + tb0_phys = iwl_txq_get_first_tb_dma(txq, txq->write_ptr); + scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) + + offsetof(struct iwl_tx_cmd, scratch); + + tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); + tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys); + + /* Set up first empty entry in queue's array of Tx/cmd buffers */ + out_meta = &txq->entries[txq->write_ptr].meta; + out_meta->flags = 0; + + /* + * The second TB (tb1) points to the remainder of the TX command + * and the 802.11 header - dword aligned size + * (This calculation modifies the TX command, so do it before the + * setup of the first TB) + */ + len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) + + hdr_len - IWL_FIRST_TB_SIZE; + /* do not align A-MSDU to dword as the subframe header aligns it */ + amsdu = ieee80211_is_data_qos(fc) && + (*ieee80211_get_qos_ctl(hdr) & + IEEE80211_QOS_CTL_A_MSDU_PRESENT); + if (!amsdu) { + tb1_len = ALIGN(len, 4); + /* Tell NIC about any 2-byte padding after MAC header */ + if (tb1_len != len) + tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_MH_PAD); + } else { + tb1_len = len; + } + + /* + * The first TB points to bi-directional DMA data, we'll + * memcpy the data into it later. + */ + iwl_pcie_txq_build_tfd(trans, txq, tb0_phys, + IWL_FIRST_TB_SIZE, true); + + /* there must be data left over for TB1 or this code must be changed */ + BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_FIRST_TB_SIZE); + + /* map the data for TB1 */ + tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE; + tb1_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(trans->dev, tb1_phys))) + goto out_err; + iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false); + + trace_iwlwifi_dev_tx(trans->dev, skb, + iwl_txq_get_tfd(trans, txq, txq->write_ptr), + trans->txqs.tfd.size, + &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, + hdr_len); + + /* + * If gso_size wasn't set, don't give the frame "amsdu treatment" + * (adding subframes, etc.). + * This can happen in some testing flows when the amsdu was already + * pre-built, and we just need to send the resulting skb. + */ + if (amsdu && skb_shinfo(skb)->gso_size) { + if (unlikely(iwl_fill_data_tbs_amsdu(trans, skb, txq, hdr_len, + out_meta, dev_cmd, + tb1_len))) + goto out_err; + } else { + struct sk_buff *frag; + + if (unlikely(iwl_fill_data_tbs(trans, skb, txq, hdr_len, + out_meta))) + goto out_err; + + skb_walk_frags(skb, frag) { + if (unlikely(iwl_fill_data_tbs(trans, frag, txq, 0, + out_meta))) + goto out_err; + } + } + + /* building the A-MSDU might have changed this data, so memcpy it now */ + memcpy(&txq->first_tb_bufs[txq->write_ptr], dev_cmd, IWL_FIRST_TB_SIZE); + + tfd = iwl_txq_get_tfd(trans, txq, txq->write_ptr); + /* Set up entry for this TFD in Tx byte-count array */ + iwl_txq_gen1_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len), + iwl_txq_gen1_tfd_get_num_tbs(trans, + tfd)); + + wait_write_ptr = ieee80211_has_morefrags(fc); + + /* start timer if queue currently empty */ + if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) { + /* + * If the TXQ is active, then set the timer, if not, + * set the timer in remainder so that the timer will + * be armed with the right value when the station will + * wake up. + */ + if (!txq->frozen) + mod_timer(&txq->stuck_timer, + jiffies + txq->wd_timeout); + else + txq->frozen_expiry_remainder = txq->wd_timeout; + } + + /* Tell device the write index *just past* this latest filled TFD */ + txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr); + if (!wait_write_ptr) + iwl_pcie_txq_inc_wr_ptr(trans, txq); + + /* + * At this point the frame is "transmitted" successfully + * and we will get a TX status notification eventually. + */ + spin_unlock(&txq->lock); + return 0; +out_err: + iwl_txq_gen1_tfd_unmap(trans, out_meta, txq, txq->write_ptr); + spin_unlock(&txq->lock); + return -1; +} |