diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 18:50:03 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 18:50:03 +0000 |
commit | 01a69402cf9d38ff180345d55c2ee51c7e89fbc7 (patch) | |
tree | b406c5242a088c4f59c6e4b719b783f43aca6ae9 /drivers/net/ethernet/intel | |
parent | Adding upstream version 6.7.12. (diff) | |
download | linux-01a69402cf9d38ff180345d55c2ee51c7e89fbc7.tar.xz linux-01a69402cf9d38ff180345d55c2ee51c7e89fbc7.zip |
Adding upstream version 6.8.9.upstream/6.8.9
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/net/ethernet/intel')
150 files changed, 6664 insertions, 4093 deletions
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig index 06ddd7147c..d55638ad87 100644 --- a/drivers/net/ethernet/intel/Kconfig +++ b/drivers/net/ethernet/intel/Kconfig @@ -299,6 +299,17 @@ config ICE To compile this driver as a module, choose M here. The module will be called ice. +config ICE_HWMON + bool "Intel(R) Ethernet Connection E800 Series Support HWMON support" + default y + depends on ICE && HWMON && !(ICE=y && HWMON=m) + help + Say Y if you want to expose thermal sensor data on Intel devices. + + Some of our devices contain internal thermal sensors. + This data is available via the hwmon sysfs interface and exposes + the onboard sensors. + config ICE_SWITCHDEV bool "Switchdev Support" default y diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c index 4576511c99..f9328f2e66 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_hw.c +++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c @@ -3261,8 +3261,7 @@ static s32 e1000_phy_igp_get_info(struct e1000_hw *hw, return ret_val; phy_info->mdix_mode = - (e1000_auto_x_mode) ((phy_data & IGP01E1000_PSSR_MDIX) >> - IGP01E1000_PSSR_MDIX_SHIFT); + (e1000_auto_x_mode)FIELD_GET(IGP01E1000_PSSR_MDIX, phy_data); if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) == IGP01E1000_PSSR_SPEED_1000MBPS) { @@ -3273,11 +3272,11 @@ static s32 e1000_phy_igp_get_info(struct e1000_hw *hw, if (ret_val) return ret_val; - phy_info->local_rx = ((phy_data & SR_1000T_LOCAL_RX_STATUS) >> - SR_1000T_LOCAL_RX_STATUS_SHIFT) ? + phy_info->local_rx = FIELD_GET(SR_1000T_LOCAL_RX_STATUS, + phy_data) ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; - phy_info->remote_rx = ((phy_data & SR_1000T_REMOTE_RX_STATUS) >> - SR_1000T_REMOTE_RX_STATUS_SHIFT) ? + phy_info->remote_rx = FIELD_GET(SR_1000T_REMOTE_RX_STATUS, + phy_data) ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; /* Get cable length */ @@ -3327,14 +3326,12 @@ static s32 e1000_phy_m88_get_info(struct e1000_hw *hw, return ret_val; phy_info->extended_10bt_distance = - ((phy_data & M88E1000_PSCR_10BT_EXT_DIST_ENABLE) >> - M88E1000_PSCR_10BT_EXT_DIST_ENABLE_SHIFT) ? + FIELD_GET(M88E1000_PSCR_10BT_EXT_DIST_ENABLE, phy_data) ? e1000_10bt_ext_dist_enable_lower : e1000_10bt_ext_dist_enable_normal; phy_info->polarity_correction = - ((phy_data & M88E1000_PSCR_POLARITY_REVERSAL) >> - M88E1000_PSCR_POLARITY_REVERSAL_SHIFT) ? + FIELD_GET(M88E1000_PSCR_POLARITY_REVERSAL, phy_data) ? e1000_polarity_reversal_disabled : e1000_polarity_reversal_enabled; /* Check polarity status */ @@ -3348,27 +3345,25 @@ static s32 e1000_phy_m88_get_info(struct e1000_hw *hw, return ret_val; phy_info->mdix_mode = - (e1000_auto_x_mode) ((phy_data & M88E1000_PSSR_MDIX) >> - M88E1000_PSSR_MDIX_SHIFT); + (e1000_auto_x_mode)FIELD_GET(M88E1000_PSSR_MDIX, phy_data); if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) { /* Cable Length Estimation and Local/Remote Receiver Information * are only valid at 1000 Mbps. */ phy_info->cable_length = - (e1000_cable_length) ((phy_data & - M88E1000_PSSR_CABLE_LENGTH) >> - M88E1000_PSSR_CABLE_LENGTH_SHIFT); + (e1000_cable_length)FIELD_GET(M88E1000_PSSR_CABLE_LENGTH, + phy_data); ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); if (ret_val) return ret_val; - phy_info->local_rx = ((phy_data & SR_1000T_LOCAL_RX_STATUS) >> - SR_1000T_LOCAL_RX_STATUS_SHIFT) ? + phy_info->local_rx = FIELD_GET(SR_1000T_LOCAL_RX_STATUS, + phy_data) ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; - phy_info->remote_rx = ((phy_data & SR_1000T_REMOTE_RX_STATUS) >> - SR_1000T_REMOTE_RX_STATUS_SHIFT) ? + phy_info->remote_rx = FIELD_GET(SR_1000T_REMOTE_RX_STATUS, + phy_data) ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; } @@ -3516,7 +3511,7 @@ s32 e1000_init_eeprom_params(struct e1000_hw *hw) if (ret_val) return ret_val; eeprom_size = - (eeprom_size & EEPROM_SIZE_MASK) >> EEPROM_SIZE_SHIFT; + FIELD_GET(EEPROM_SIZE_MASK, eeprom_size); /* 256B eeprom size was not supported in earlier hardware, so we * bump eeprom_size up one to ensure that "1" (which maps to * 256B) is never the result used in the shifting logic below. @@ -4892,8 +4887,7 @@ static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length, &phy_data); if (ret_val) return ret_val; - cable_length = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> - M88E1000_PSSR_CABLE_LENGTH_SHIFT; + cable_length = FIELD_GET(M88E1000_PSSR_CABLE_LENGTH, phy_data); /* Convert the enum value to ranged values */ switch (cable_length) { @@ -5002,8 +4996,7 @@ static s32 e1000_check_polarity(struct e1000_hw *hw, &phy_data); if (ret_val) return ret_val; - *polarity = ((phy_data & M88E1000_PSSR_REV_POLARITY) >> - M88E1000_PSSR_REV_POLARITY_SHIFT) ? + *polarity = FIELD_GET(M88E1000_PSSR_REV_POLARITY, phy_data) ? e1000_rev_polarity_reversed : e1000_rev_polarity_normal; } else if (hw->phy_type == e1000_phy_igp) { @@ -5073,8 +5066,8 @@ static s32 e1000_check_downshift(struct e1000_hw *hw) if (ret_val) return ret_val; - hw->speed_downgraded = (phy_data & M88E1000_PSSR_DOWNSHIFT) >> - M88E1000_PSSR_DOWNSHIFT_SHIFT; + hw->speed_downgraded = FIELD_GET(M88E1000_PSSR_DOWNSHIFT, + phy_data); } return E1000_SUCCESS; diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c index be9c695dde..4eb1ceaf86 100644 --- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c +++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c @@ -92,8 +92,7 @@ static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw) nvm->type = e1000_nvm_eeprom_spi; - size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> - E1000_EECD_SIZE_EX_SHIFT); + size = (u16)FIELD_GET(E1000_EECD_SIZE_EX_MASK, eecd); /* Added to a constant, "size" becomes the left-shift value * for setting word_size. @@ -1035,17 +1034,18 @@ static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw) * iteration and increase the max iterations when * polling the phy; this fixes erroneous timeouts at 10Mbps. */ - ret_val = e1000_write_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 4), - 0xFFFF); + /* these next three accesses were always meant to use page 0x34 using + * GG82563_REG(0x34, N) but never did, so we've just corrected the call + * to not drop bits + */ + ret_val = e1000_write_kmrn_reg_80003es2lan(hw, 4, 0xFFFF); if (ret_val) return ret_val; - ret_val = e1000_read_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 9), - ®_data); + ret_val = e1000_read_kmrn_reg_80003es2lan(hw, 9, ®_data); if (ret_val) return ret_val; reg_data |= 0x3F; - ret_val = e1000_write_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 9), - reg_data); + ret_val = e1000_write_kmrn_reg_80003es2lan(hw, 9, reg_data); if (ret_val) return ret_val; ret_val = @@ -1209,8 +1209,8 @@ static s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, if (ret_val) return ret_val; - kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & - E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN; + kmrnctrlsta = FIELD_PREP(E1000_KMRNCTRLSTA_OFFSET, offset) | + E1000_KMRNCTRLSTA_REN; ew32(KMRNCTRLSTA, kmrnctrlsta); e1e_flush(); @@ -1244,8 +1244,7 @@ static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, if (ret_val) return ret_val; - kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & - E1000_KMRNCTRLSTA_OFFSET) | data; + kmrnctrlsta = FIELD_PREP(E1000_KMRNCTRLSTA_OFFSET, offset) | data; ew32(KMRNCTRLSTA, kmrnctrlsta); e1e_flush(); diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c index 0b1e890dd5..969f855a79 100644 --- a/drivers/net/ethernet/intel/e1000e/82571.c +++ b/drivers/net/ethernet/intel/e1000e/82571.c @@ -157,8 +157,7 @@ static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw) fallthrough; default: nvm->type = e1000_nvm_eeprom_spi; - size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> - E1000_EECD_SIZE_EX_SHIFT); + size = (u16)FIELD_GET(E1000_EECD_SIZE_EX_MASK, eecd); /* Added to a constant, "size" becomes the left-shift value * for setting word_size. */ diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h index 63c3c79380..23a58cada4 100644 --- a/drivers/net/ethernet/intel/e1000e/defines.h +++ b/drivers/net/ethernet/intel/e1000e/defines.h @@ -678,11 +678,8 @@ /* PCI/PCI-X/PCI-EX Config space */ #define PCI_HEADER_TYPE_REGISTER 0x0E -#define PCIE_LINK_STATUS 0x12 #define PCI_HEADER_TYPE_MULTIFUNC 0x80 -#define PCIE_LINK_WIDTH_MASK 0x3F0 -#define PCIE_LINK_WIDTH_SHIFT 4 #define PHY_REVISION_MASK 0xFFFFFFF0 #define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */ diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c index 9835e6a90d..fc0f98ea61 100644 --- a/drivers/net/ethernet/intel/e1000e/ethtool.c +++ b/drivers/net/ethernet/intel/e1000e/ethtool.c @@ -654,8 +654,8 @@ static void e1000_get_drvinfo(struct net_device *netdev, */ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d.%d-%d", - (adapter->eeprom_vers & 0xF000) >> 12, - (adapter->eeprom_vers & 0x0FF0) >> 4, + FIELD_GET(0xF000, adapter->eeprom_vers), + FIELD_GET(0x0FF0, adapter->eeprom_vers), (adapter->eeprom_vers & 0x000F)); strscpy(drvinfo->bus_info, pci_name(adapter->pdev), @@ -925,8 +925,7 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) } if (mac->type >= e1000_pch_lpt) - wlock_mac = (er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK) >> - E1000_FWSM_WLOCK_MAC_SHIFT; + wlock_mac = FIELD_GET(E1000_FWSM_WLOCK_MAC_MASK, er32(FWSM)); for (i = 0; i < mac->rar_entry_count; i++) { if (mac->type >= e1000_pch_lpt) { diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h index 1fef6bb5a5..4b6e753617 100644 --- a/drivers/net/ethernet/intel/e1000e/hw.h +++ b/drivers/net/ethernet/intel/e1000e/hw.h @@ -628,6 +628,7 @@ struct e1000_phy_info { u32 id; u32 reset_delay_us; /* in usec */ u32 revision; + u32 retry_count; enum e1000_media_type media_type; @@ -644,6 +645,7 @@ struct e1000_phy_info { bool polarity_correction; bool speed_downgraded; bool autoneg_wait_to_complete; + bool retry_enabled; }; struct e1000_nvm_info { diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index 39e9fc601b..f9e94be36e 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -222,11 +222,18 @@ out: if (hw->mac.type >= e1000_pch_lpt) { /* Only unforce SMBus if ME is not active */ if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) { + /* Switching PHY interface always returns MDI error + * so disable retry mechanism to avoid wasting time + */ + e1000e_disable_phy_retry(hw); + /* Unforce SMBus mode in PHY */ e1e_rphy_locked(hw, CV_SMB_CTRL, &phy_reg); phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS; e1e_wphy_locked(hw, CV_SMB_CTRL, phy_reg); + e1000e_enable_phy_retry(hw); + /* Unforce SMBus mode in MAC */ mac_reg = er32(CTRL_EXT); mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS; @@ -310,6 +317,11 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw) goto out; } + /* There is no guarantee that the PHY is accessible at this time + * so disable retry mechanism to avoid wasting time + */ + e1000e_disable_phy_retry(hw); + /* The MAC-PHY interconnect may be in SMBus mode. If the PHY is * inaccessible and resetting the PHY is not blocked, toggle the * LANPHYPC Value bit to force the interconnect to PCIe mode. @@ -380,6 +392,8 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw) break; } + e1000e_enable_phy_retry(hw); + hw->phy.ops.release(hw); if (!ret_val) { @@ -449,6 +463,11 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) phy->id = e1000_phy_unknown; + if (hw->mac.type == e1000_pch_mtp) { + phy->retry_count = 2; + e1000e_enable_phy_retry(hw); + } + ret_val = e1000_init_phy_workarounds_pchlan(hw); if (ret_val) return ret_val; @@ -1072,13 +1091,11 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link) lat_enc_d = (lat_enc & E1000_LTRV_VALUE_MASK) * (1U << (E1000_LTRV_SCALE_FACTOR * - ((lat_enc & E1000_LTRV_SCALE_MASK) - >> E1000_LTRV_SCALE_SHIFT))); + FIELD_GET(E1000_LTRV_SCALE_MASK, lat_enc))); max_ltr_enc_d = (max_ltr_enc & E1000_LTRV_VALUE_MASK) * - (1U << (E1000_LTRV_SCALE_FACTOR * - ((max_ltr_enc & E1000_LTRV_SCALE_MASK) - >> E1000_LTRV_SCALE_SHIFT))); + (1U << (E1000_LTRV_SCALE_FACTOR * + FIELD_GET(E1000_LTRV_SCALE_MASK, max_ltr_enc))); if (lat_enc_d > max_ltr_enc_d) lat_enc = max_ltr_enc; @@ -1148,18 +1165,6 @@ s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx) if (ret_val) goto out; - /* Force SMBus mode in PHY */ - ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg); - if (ret_val) - goto release; - phy_reg |= CV_SMB_CTRL_FORCE_SMBUS; - e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg); - - /* Force SMBus mode in MAC */ - mac_reg = er32(CTRL_EXT); - mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS; - ew32(CTRL_EXT, mac_reg); - /* Si workaround for ULP entry flow on i127/rev6 h/w. Enable * LPLU and disable Gig speed when entering ULP */ @@ -1315,6 +1320,11 @@ static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force) /* Toggle LANPHYPC Value bit */ e1000_toggle_lanphypc_pch_lpt(hw); + /* Switching PHY interface always returns MDI error + * so disable retry mechanism to avoid wasting time + */ + e1000e_disable_phy_retry(hw); + /* Unforce SMBus mode in PHY */ ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg); if (ret_val) { @@ -1335,6 +1345,8 @@ static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force) phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS; e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg); + e1000e_enable_phy_retry(hw); + /* Unforce SMBus mode in MAC */ mac_reg = er32(CTRL_EXT); mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS; @@ -2075,8 +2087,7 @@ static s32 e1000_write_smbus_addr(struct e1000_hw *hw) { u16 phy_data; u32 strap = er32(STRAP); - u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >> - E1000_STRAP_SMT_FREQ_SHIFT; + u32 freq = FIELD_GET(E1000_STRAP_SMT_FREQ_MASK, strap); s32 ret_val; strap &= E1000_STRAP_SMBUS_ADDRESS_MASK; @@ -2562,8 +2573,7 @@ void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw) hw->phy.ops.write_reg_page(hw, BM_RAR_H(i), (u16)(mac_reg & 0xFFFF)); hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i), - (u16)((mac_reg & E1000_RAH_AV) - >> 16)); + (u16)((mac_reg & E1000_RAH_AV) >> 16)); } e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg); @@ -3205,7 +3215,7 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank) &nvm_dword); if (ret_val) return ret_val; - sig_byte = (u8)((nvm_dword & 0xFF00) >> 8); + sig_byte = FIELD_GET(0xFF00, nvm_dword); if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) == E1000_ICH_NVM_SIG_VALUE) { *bank = 0; @@ -3218,7 +3228,7 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank) &nvm_dword); if (ret_val) return ret_val; - sig_byte = (u8)((nvm_dword & 0xFF00) >> 8); + sig_byte = FIELD_GET(0xFF00, nvm_dword); if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) == E1000_ICH_NVM_SIG_VALUE) { *bank = 1; diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c index 5df7ad93f3..d7df2a0ed6 100644 --- a/drivers/net/ethernet/intel/e1000e/mac.c +++ b/drivers/net/ethernet/intel/e1000e/mac.c @@ -1,6 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 1999 - 2018 Intel Corporation. */ +#include <linux/bitfield.h> + #include "e1000.h" /** @@ -13,21 +15,17 @@ **/ s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw) { + struct pci_dev *pdev = hw->adapter->pdev; struct e1000_mac_info *mac = &hw->mac; struct e1000_bus_info *bus = &hw->bus; - struct e1000_adapter *adapter = hw->adapter; - u16 pcie_link_status, cap_offset; + u16 pcie_link_status; - cap_offset = adapter->pdev->pcie_cap; - if (!cap_offset) { + if (!pci_pcie_cap(pdev)) { bus->width = e1000_bus_width_unknown; } else { - pci_read_config_word(adapter->pdev, - cap_offset + PCIE_LINK_STATUS, - &pcie_link_status); - bus->width = (enum e1000_bus_width)((pcie_link_status & - PCIE_LINK_WIDTH_MASK) >> - PCIE_LINK_WIDTH_SHIFT); + pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &pcie_link_status); + bus->width = (enum e1000_bus_width)FIELD_GET(PCI_EXP_LNKSTA_NLW, + pcie_link_status); } mac->ops.set_lan_id(hw); @@ -52,7 +50,7 @@ void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw) * for the device regardless of function swap state. */ reg = er32(STATUS); - bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT; + bus->func = FIELD_GET(E1000_STATUS_FUNC_MASK, reg); } /** diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index f536c85672..3692fce201 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -1788,8 +1788,7 @@ static irqreturn_t e1000_intr_msi(int __always_unused irq, void *data) adapter->corr_errors += pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK; adapter->uncorr_errors += - (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >> - E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT; + FIELD_GET(E1000_PBECCSTS_UNCORR_ERR_CNT_MASK, pbeccsts); /* Do the reset outside of interrupt context */ schedule_work(&adapter->reset_task); @@ -1868,8 +1867,7 @@ static irqreturn_t e1000_intr(int __always_unused irq, void *data) adapter->corr_errors += pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK; adapter->uncorr_errors += - (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >> - E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT; + FIELD_GET(E1000_PBECCSTS_UNCORR_ERR_CNT_MASK, pbeccsts); /* Do the reset outside of interrupt context */ schedule_work(&adapter->reset_task); @@ -5031,8 +5029,7 @@ static void e1000e_update_stats(struct e1000_adapter *adapter) adapter->corr_errors += pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK; adapter->uncorr_errors += - (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >> - E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT; + FIELD_GET(E1000_PBECCSTS_UNCORR_ERR_CNT_MASK, pbeccsts); } } @@ -6249,7 +6246,7 @@ static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc) phy_reg |= BM_RCTL_MPE; phy_reg &= ~(BM_RCTL_MO_MASK); if (mac_reg & E1000_RCTL_MO_3) - phy_reg |= (((mac_reg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT) + phy_reg |= (FIELD_GET(E1000_RCTL_MO_3, mac_reg) << BM_RCTL_MO_SHIFT); if (mac_reg & E1000_RCTL_BAM) phy_reg |= BM_RCTL_BAM; @@ -6626,6 +6623,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime) struct e1000_hw *hw = &adapter->hw; u32 ctrl, ctrl_ext, rctl, status, wufc; int retval = 0; + u16 smb_ctrl; /* Runtime suspend should only enable wakeup for link changes */ if (runtime) @@ -6691,14 +6689,31 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime) if (adapter->hw.phy.type == e1000_phy_igp_3) { e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); } else if (hw->mac.type >= e1000_pch_lpt) { - if (wufc && !(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC))) + if (wufc && !(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC))) { /* ULP does not support wake from unicast, multicast * or broadcast. */ retval = e1000_enable_ulp_lpt_lp(hw, !runtime); + if (retval) + return retval; + } + + /* Force SMBUS to allow WOL */ + /* Switching PHY interface always returns MDI error + * so disable retry mechanism to avoid wasting time + */ + e1000e_disable_phy_retry(hw); + + e1e_rphy(hw, CV_SMB_CTRL, &smb_ctrl); + smb_ctrl |= CV_SMB_CTRL_FORCE_SMBUS; + e1e_wphy(hw, CV_SMB_CTRL, smb_ctrl); - if (retval) - return retval; + e1000e_enable_phy_retry(hw); + + /* Force SMBus mode in MAC */ + ctrl_ext = er32(CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_FORCE_SMBUS; + ew32(CTRL_EXT, ctrl_ext); } /* Ensure that the appropriate bits are set in LPI_CTRL diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c index 08c3d477dd..93544f1cc2 100644 --- a/drivers/net/ethernet/intel/e1000e/phy.c +++ b/drivers/net/ethernet/intel/e1000e/phy.c @@ -107,6 +107,16 @@ s32 e1000e_phy_reset_dsp(struct e1000_hw *hw) return e1e_wphy(hw, M88E1000_PHY_GEN_CONTROL, 0); } +void e1000e_disable_phy_retry(struct e1000_hw *hw) +{ + hw->phy.retry_enabled = false; +} + +void e1000e_enable_phy_retry(struct e1000_hw *hw) +{ + hw->phy.retry_enabled = true; +} + /** * e1000e_read_phy_reg_mdic - Read MDI control register * @hw: pointer to the HW structure @@ -118,57 +128,73 @@ s32 e1000e_phy_reset_dsp(struct e1000_hw *hw) **/ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) { + u32 i, mdic = 0, retry_counter, retry_max; struct e1000_phy_info *phy = &hw->phy; - u32 i, mdic = 0; + bool success; if (offset > MAX_PHY_REG_ADDRESS) { e_dbg("PHY Address %d is out of range\n", offset); return -E1000_ERR_PARAM; } + retry_max = phy->retry_enabled ? phy->retry_count : 0; + /* Set up Op-code, Phy Address, and register offset in the MDI * Control register. The MAC will take care of interfacing with the * PHY to retrieve the desired data. */ - mdic = ((offset << E1000_MDIC_REG_SHIFT) | - (phy->addr << E1000_MDIC_PHY_SHIFT) | - (E1000_MDIC_OP_READ)); + for (retry_counter = 0; retry_counter <= retry_max; retry_counter++) { + success = true; - ew32(MDIC, mdic); + mdic = ((offset << E1000_MDIC_REG_SHIFT) | + (phy->addr << E1000_MDIC_PHY_SHIFT) | + (E1000_MDIC_OP_READ)); - /* Poll the ready bit to see if the MDI read completed - * Increasing the time out as testing showed failures with - * the lower time out - */ - for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) { - udelay(50); - mdic = er32(MDIC); - if (mdic & E1000_MDIC_READY) - break; - } - if (!(mdic & E1000_MDIC_READY)) { - e_dbg("MDI Read PHY Reg Address %d did not complete\n", offset); - return -E1000_ERR_PHY; - } - if (mdic & E1000_MDIC_ERROR) { - e_dbg("MDI Read PHY Reg Address %d Error\n", offset); - return -E1000_ERR_PHY; - } - if (((mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT) != offset) { - e_dbg("MDI Read offset error - requested %d, returned %d\n", - offset, - (mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT); - return -E1000_ERR_PHY; - } - *data = (u16)mdic; + ew32(MDIC, mdic); - /* Allow some time after each MDIC transaction to avoid - * reading duplicate data in the next MDIC transaction. - */ - if (hw->mac.type == e1000_pch2lan) - udelay(100); + /* Poll the ready bit to see if the MDI read completed + * Increasing the time out as testing showed failures with + * the lower time out + */ + for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) { + usleep_range(50, 60); + mdic = er32(MDIC); + if (mdic & E1000_MDIC_READY) + break; + } + if (!(mdic & E1000_MDIC_READY)) { + e_dbg("MDI Read PHY Reg Address %d did not complete\n", + offset); + success = false; + } + if (mdic & E1000_MDIC_ERROR) { + e_dbg("MDI Read PHY Reg Address %d Error\n", offset); + success = false; + } + if (FIELD_GET(E1000_MDIC_REG_MASK, mdic) != offset) { + e_dbg("MDI Read offset error - requested %d, returned %d\n", + offset, FIELD_GET(E1000_MDIC_REG_MASK, mdic)); + success = false; + } - return 0; + /* Allow some time after each MDIC transaction to avoid + * reading duplicate data in the next MDIC transaction. + */ + if (hw->mac.type == e1000_pch2lan) + usleep_range(100, 150); + + if (success) { + *data = (u16)mdic; + return 0; + } + + if (retry_counter != retry_max) { + e_dbg("Perform retry on PHY transaction...\n"); + mdelay(10); + } + } + + return -E1000_ERR_PHY; } /** @@ -181,57 +207,72 @@ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) **/ s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data) { + u32 i, mdic = 0, retry_counter, retry_max; struct e1000_phy_info *phy = &hw->phy; - u32 i, mdic = 0; + bool success; if (offset > MAX_PHY_REG_ADDRESS) { e_dbg("PHY Address %d is out of range\n", offset); return -E1000_ERR_PARAM; } + retry_max = phy->retry_enabled ? phy->retry_count : 0; + /* Set up Op-code, Phy Address, and register offset in the MDI * Control register. The MAC will take care of interfacing with the * PHY to retrieve the desired data. */ - mdic = (((u32)data) | - (offset << E1000_MDIC_REG_SHIFT) | - (phy->addr << E1000_MDIC_PHY_SHIFT) | - (E1000_MDIC_OP_WRITE)); + for (retry_counter = 0; retry_counter <= retry_max; retry_counter++) { + success = true; - ew32(MDIC, mdic); + mdic = (((u32)data) | + (offset << E1000_MDIC_REG_SHIFT) | + (phy->addr << E1000_MDIC_PHY_SHIFT) | + (E1000_MDIC_OP_WRITE)); - /* Poll the ready bit to see if the MDI read completed - * Increasing the time out as testing showed failures with - * the lower time out - */ - for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) { - udelay(50); - mdic = er32(MDIC); - if (mdic & E1000_MDIC_READY) - break; - } - if (!(mdic & E1000_MDIC_READY)) { - e_dbg("MDI Write PHY Reg Address %d did not complete\n", offset); - return -E1000_ERR_PHY; - } - if (mdic & E1000_MDIC_ERROR) { - e_dbg("MDI Write PHY Red Address %d Error\n", offset); - return -E1000_ERR_PHY; - } - if (((mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT) != offset) { - e_dbg("MDI Write offset error - requested %d, returned %d\n", - offset, - (mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT); - return -E1000_ERR_PHY; - } + ew32(MDIC, mdic); - /* Allow some time after each MDIC transaction to avoid - * reading duplicate data in the next MDIC transaction. - */ - if (hw->mac.type == e1000_pch2lan) - udelay(100); + /* Poll the ready bit to see if the MDI read completed + * Increasing the time out as testing showed failures with + * the lower time out + */ + for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) { + usleep_range(50, 60); + mdic = er32(MDIC); + if (mdic & E1000_MDIC_READY) + break; + } + if (!(mdic & E1000_MDIC_READY)) { + e_dbg("MDI Write PHY Reg Address %d did not complete\n", + offset); + success = false; + } + if (mdic & E1000_MDIC_ERROR) { + e_dbg("MDI Write PHY Reg Address %d Error\n", offset); + success = false; + } + if (FIELD_GET(E1000_MDIC_REG_MASK, mdic) != offset) { + e_dbg("MDI Write offset error - requested %d, returned %d\n", + offset, FIELD_GET(E1000_MDIC_REG_MASK, mdic)); + success = false; + } - return 0; + /* Allow some time after each MDIC transaction to avoid + * reading duplicate data in the next MDIC transaction. + */ + if (hw->mac.type == e1000_pch2lan) + usleep_range(100, 150); + + if (success) + return 0; + + if (retry_counter != retry_max) { + e_dbg("Perform retry on PHY transaction...\n"); + mdelay(10); + } + } + + return -E1000_ERR_PHY; } /** @@ -463,8 +504,8 @@ static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data, return ret_val; } - kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & - E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN; + kmrnctrlsta = FIELD_PREP(E1000_KMRNCTRLSTA_OFFSET, offset) | + E1000_KMRNCTRLSTA_REN; ew32(KMRNCTRLSTA, kmrnctrlsta); e1e_flush(); @@ -536,8 +577,7 @@ static s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data, return ret_val; } - kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & - E1000_KMRNCTRLSTA_OFFSET) | data; + kmrnctrlsta = FIELD_PREP(E1000_KMRNCTRLSTA_OFFSET, offset) | data; ew32(KMRNCTRLSTA, kmrnctrlsta); e1e_flush(); @@ -1793,8 +1833,7 @@ s32 e1000e_get_cable_length_m88(struct e1000_hw *hw) if (ret_val) return ret_val; - index = ((phy_data & M88E1000_PSSR_CABLE_LENGTH) >> - M88E1000_PSSR_CABLE_LENGTH_SHIFT); + index = FIELD_GET(M88E1000_PSSR_CABLE_LENGTH, phy_data); if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) return -E1000_ERR_PHY; @@ -3234,8 +3273,7 @@ s32 e1000_get_cable_length_82577(struct e1000_hw *hw) if (ret_val) return ret_val; - length = ((phy_data & I82577_DSTATUS_CABLE_LENGTH) >> - I82577_DSTATUS_CABLE_LENGTH_SHIFT); + length = FIELD_GET(I82577_DSTATUS_CABLE_LENGTH, phy_data); if (length == E1000_CABLE_LENGTH_UNDEFINED) return -E1000_ERR_PHY; diff --git a/drivers/net/ethernet/intel/e1000e/phy.h b/drivers/net/ethernet/intel/e1000e/phy.h index c48777d095..049bb325b4 100644 --- a/drivers/net/ethernet/intel/e1000e/phy.h +++ b/drivers/net/ethernet/intel/e1000e/phy.h @@ -51,6 +51,8 @@ s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data); s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data); void e1000_power_up_phy_copper(struct e1000_hw *hw); void e1000_power_down_phy_copper(struct e1000_hw *hw); +void e1000e_disable_phy_retry(struct e1000_hw *hw); +void e1000e_enable_phy_retry(struct e1000_hw *hw); s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data); s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data); s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c index 13a05604dc..1bc5b6c0b8 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c @@ -1057,16 +1057,16 @@ static u32 fm10k_get_rssrk_size(struct net_device __always_unused *netdev) return FM10K_RSSRK_SIZE * FM10K_RSSRK_ENTRIES_PER_REG; } -static int fm10k_get_rssh(struct net_device *netdev, u32 *indir, u8 *key, - u8 *hfunc) +static int fm10k_get_rssh(struct net_device *netdev, + struct ethtool_rxfh_param *rxfh) { struct fm10k_intfc *interface = netdev_priv(netdev); + u8 *key = rxfh->key; int i, err; - if (hfunc) - *hfunc = ETH_RSS_HASH_TOP; + rxfh->hfunc = ETH_RSS_HASH_TOP; - err = fm10k_get_reta(netdev, indir); + err = fm10k_get_reta(netdev, rxfh->indir); if (err || !key) return err; @@ -1076,23 +1076,25 @@ static int fm10k_get_rssh(struct net_device *netdev, u32 *indir, u8 *key, return 0; } -static int fm10k_set_rssh(struct net_device *netdev, const u32 *indir, - const u8 *key, const u8 hfunc) +static int fm10k_set_rssh(struct net_device *netdev, + struct ethtool_rxfh_param *rxfh, + struct netlink_ext_ack *extack) { struct fm10k_intfc *interface = netdev_priv(netdev); struct fm10k_hw *hw = &interface->hw; int i, err; /* We do not allow change in unsupported parameters */ - if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) + if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE && + rxfh->hfunc != ETH_RSS_HASH_TOP) return -EOPNOTSUPP; - err = fm10k_set_reta(netdev, indir); - if (err || !key) + err = fm10k_set_reta(netdev, rxfh->indir); + if (err || !rxfh->key) return err; - for (i = 0; i < FM10K_RSSRK_SIZE; i++, key += 4) { - u32 rssrk = le32_to_cpu(*(__le32 *)key); + for (i = 0; i < FM10K_RSSRK_SIZE; i++, rxfh->key += 4) { + u32 rssrk = le32_to_cpu(*(__le32 *)rxfh->key); if (interface->rssrk[i] == rssrk) continue; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c index ae700a1807..98861cc6df 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c @@ -866,8 +866,7 @@ static s32 fm10k_iov_assign_default_mac_vlan_pf(struct fm10k_hw *hw, * register is RO from the VF, so the PF must do this even in the * case of notifying the VF of a new VID via the mailbox. */ - txqctl = ((u32)vf_vid << FM10K_TXQCTL_VID_SHIFT) & - FM10K_TXQCTL_VID_MASK; + txqctl = FIELD_PREP(FM10K_TXQCTL_VID_MASK, vf_vid); txqctl |= (vf_idx << FM10K_TXQCTL_TC_SHIFT) | FM10K_TXQCTL_VF | vf_idx; @@ -1576,8 +1575,7 @@ static s32 fm10k_get_fault_pf(struct fm10k_hw *hw, int type, if (func & FM10K_FAULT_FUNC_PF) fault->func = 0; else - fault->func = 1 + ((func & FM10K_FAULT_FUNC_VF_MASK) >> - FM10K_FAULT_FUNC_VF_SHIFT); + fault->func = 1 + FIELD_GET(FM10K_FAULT_FUNC_VF_MASK, func); /* record fault type */ fault->type = func & FM10K_FAULT_FUNC_TYPE_MASK; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c index c50928ec14..7fb1961f29 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c @@ -127,15 +127,14 @@ static s32 fm10k_init_hw_vf(struct fm10k_hw *hw) hw->mac.max_queues = i; /* fetch default VLAN and ITR scale */ - hw->mac.default_vid = (fm10k_read_reg(hw, FM10K_TXQCTL(0)) & - FM10K_TXQCTL_VID_MASK) >> FM10K_TXQCTL_VID_SHIFT; + hw->mac.default_vid = FIELD_GET(FM10K_TXQCTL_VID_MASK, + fm10k_read_reg(hw, FM10K_TXQCTL(0))); /* Read the ITR scale from TDLEN. See the definition of * FM10K_TDLEN_ITR_SCALE_SHIFT for more information about how TDLEN is * used here. */ - hw->mac.itr_scale = (fm10k_read_reg(hw, FM10K_TDLEN(0)) & - FM10K_TDLEN_ITR_SCALE_MASK) >> - FM10K_TDLEN_ITR_SCALE_SHIFT; + hw->mac.itr_scale = FIELD_GET(FM10K_TDLEN_ITR_SCALE_MASK, + fm10k_read_reg(hw, FM10K_TDLEN(0))); return 0; diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 1bf424ac31..4d2b05de6c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -24,6 +24,7 @@ #define I40E_MAX_VEB 16 #define I40E_MAX_NUM_DESCRIPTORS 4096 +#define I40E_MAX_NUM_DESCRIPTORS_XL710 8160 #define I40E_MAX_CSR_SPACE (4 * 1024 * 1024 - 64 * 1024) #define I40E_DEFAULT_NUM_DESCRIPTORS 512 #define I40E_REQ_DESCRIPTOR_MULTIPLE 32 @@ -33,11 +34,11 @@ #define I40E_MIN_VSI_ALLOC 83 /* LAN, ATR, FCOE, 64 VF */ /* max 16 qps */ #define i40e_default_queues_per_vmdq(pf) \ - (((pf)->hw_features & I40E_HW_RSS_AQ_CAPABLE) ? 4 : 1) + (test_bit(I40E_HW_CAP_RSS_AQ, (pf)->hw.caps) ? 4 : 1) #define I40E_DEFAULT_QUEUES_PER_VF 4 #define I40E_MAX_VF_QUEUES 16 #define i40e_pf_get_max_q_per_tc(pf) \ - (((pf)->hw_features & I40E_HW_128_QP_RSS_CAPABLE) ? 128 : 64) + (test_bit(I40E_HW_CAP_128_QP_RSS, (pf)->hw.caps) ? 128 : 64) #define I40E_FDIR_RING_COUNT 32 #define I40E_MAX_AQ_BUF_SIZE 4096 #define I40E_AQ_LEN 256 @@ -78,7 +79,7 @@ #define I40E_MAX_BW_INACTIVE_ACCUM 4 /* accumulate 4 credits max */ /* driver state flags */ -enum i40e_state_t { +enum i40e_state { __I40E_TESTING, __I40E_CONFIG_BUSY, __I40E_CONFIG_DONE, @@ -126,7 +127,7 @@ enum i40e_state_t { BIT_ULL(__I40E_PF_RESET_AND_REBUILD_REQUESTED) /* VSI state flags */ -enum i40e_vsi_state_t { +enum i40e_vsi_state { __I40E_VSI_DOWN, __I40E_VSI_NEEDS_RESTART, __I40E_VSI_SYNCING_FILTERS, @@ -138,6 +139,60 @@ enum i40e_vsi_state_t { __I40E_VSI_STATE_SIZE__, }; +enum i40e_pf_flags { + I40E_FLAG_MSI_ENA, + I40E_FLAG_MSIX_ENA, + I40E_FLAG_RSS_ENA, + I40E_FLAG_VMDQ_ENA, + I40E_FLAG_SRIOV_ENA, + I40E_FLAG_DCB_CAPABLE, + I40E_FLAG_DCB_ENA, + I40E_FLAG_FD_SB_ENA, + I40E_FLAG_FD_ATR_ENA, + I40E_FLAG_MFP_ENA, + I40E_FLAG_HW_ATR_EVICT_ENA, + I40E_FLAG_VEB_MODE_ENA, + I40E_FLAG_VEB_STATS_ENA, + I40E_FLAG_LINK_POLLING_ENA, + I40E_FLAG_TRUE_PROMISC_ENA, + I40E_FLAG_LEGACY_RX_ENA, + I40E_FLAG_PTP_ENA, + I40E_FLAG_IWARP_ENA, + I40E_FLAG_LINK_DOWN_ON_CLOSE_ENA, + I40E_FLAG_SOURCE_PRUNING_DIS, + I40E_FLAG_TC_MQPRIO_ENA, + I40E_FLAG_FD_SB_INACTIVE, + I40E_FLAG_FD_SB_TO_CLOUD_FILTER, + I40E_FLAG_FW_LLDP_DIS, + I40E_FLAG_RS_FEC, + I40E_FLAG_BASE_R_FEC, + /* TOTAL_PORT_SHUTDOWN_ENA + * Allows to physically disable the link on the NIC's port. + * If enabled, (after link down request from the OS) + * no link, traffic or led activity is possible on that port. + * + * If I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENA is set, the + * I40E_FLAG_LINK_DOWN_ON_CLOSE_ENA must be explicitly forced + * to true and cannot be disabled by system admin at that time. + * The functionalities are exclusive in terms of configuration, but + * they also have similar behavior (allowing to disable physical + * link of the port), with following differences: + * - LINK_DOWN_ON_CLOSE_ENA is configurable at host OS run-time and + * is supported by whole family of 7xx Intel Ethernet Controllers + * - TOTAL_PORT_SHUTDOWN_ENA may be enabled only before OS loads + * (in BIOS) only if motherboard's BIOS and NIC's FW has support of it + * - when LINK_DOWN_ON_CLOSE_ENABLED is used, the link is being brought + * down by sending phy_type=0 to NIC's FW + * - when TOTAL_PORT_SHUTDOWN_ENA is used, phy_type is not altered, + * instead the link is being brought down by clearing + * bit (I40E_AQ_PHY_ENABLE_LINK) in abilities field of + * i40e_aq_set_phy_config structure + */ + I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENA, + I40E_FLAG_VF_VLAN_PRUNING_ENA, + I40E_PF_FLAGS_NBITS, /* must be last */ +}; + enum i40e_interrupt_policy { I40E_INTERRUPT_BEST_CASE, I40E_INTERRUPT_MEDIUM, @@ -413,9 +468,7 @@ struct i40e_pf { struct i40e_hw hw; DECLARE_BITMAP(state, __I40E_STATE_SIZE__); struct msix_entry *msix_entries; - bool fc_autoneg_status; - u16 eeprom_version; u16 num_vmdq_vsis; /* num vmdq vsis this PF has set up */ u16 num_vmdq_qps; /* num queue pairs per vmdq pool */ u16 num_vmdq_msix; /* num queue vectors per vmdq pool */ @@ -431,7 +484,6 @@ struct i40e_pf { u16 rss_size_max; /* HW defined max RSS queues */ u16 fdir_pf_filter_count; /* num of guaranteed filters for this PF */ u16 num_alloc_vsi; /* num VSIs this driver supports */ - u8 atr_sample_rate; bool wol_en; struct hlist_head fdir_filter_list; @@ -469,89 +521,16 @@ struct i40e_pf { struct hlist_head cloud_filter_list; u16 num_cloud_filters; - enum i40e_interrupt_policy int_policy; u16 rx_itr_default; u16 tx_itr_default; u32 msg_enable; char int_name[I40E_INT_NAME_STR_LEN]; - u16 adminq_work_limit; /* num of admin receive queue desc to process */ unsigned long service_timer_period; unsigned long service_timer_previous; struct timer_list service_timer; struct work_struct service_task; - u32 hw_features; -#define I40E_HW_RSS_AQ_CAPABLE BIT(0) -#define I40E_HW_128_QP_RSS_CAPABLE BIT(1) -#define I40E_HW_ATR_EVICT_CAPABLE BIT(2) -#define I40E_HW_WB_ON_ITR_CAPABLE BIT(3) -#define I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE BIT(4) -#define I40E_HW_NO_PCI_LINK_CHECK BIT(5) -#define I40E_HW_100M_SGMII_CAPABLE BIT(6) -#define I40E_HW_NO_DCB_SUPPORT BIT(7) -#define I40E_HW_USE_SET_LLDP_MIB BIT(8) -#define I40E_HW_GENEVE_OFFLOAD_CAPABLE BIT(9) -#define I40E_HW_PTP_L4_CAPABLE BIT(10) -#define I40E_HW_WOL_MC_MAGIC_PKT_WAKE BIT(11) -#define I40E_HW_HAVE_CRT_RETIMER BIT(13) -#define I40E_HW_OUTER_UDP_CSUM_CAPABLE BIT(14) -#define I40E_HW_PHY_CONTROLS_LEDS BIT(15) -#define I40E_HW_STOP_FW_LLDP BIT(16) -#define I40E_HW_PORT_ID_VALID BIT(17) -#define I40E_HW_RESTART_AUTONEG BIT(18) - - u32 flags; -#define I40E_FLAG_RX_CSUM_ENABLED BIT(0) -#define I40E_FLAG_MSI_ENABLED BIT(1) -#define I40E_FLAG_MSIX_ENABLED BIT(2) -#define I40E_FLAG_RSS_ENABLED BIT(3) -#define I40E_FLAG_VMDQ_ENABLED BIT(4) -#define I40E_FLAG_SRIOV_ENABLED BIT(5) -#define I40E_FLAG_DCB_CAPABLE BIT(6) -#define I40E_FLAG_DCB_ENABLED BIT(7) -#define I40E_FLAG_FD_SB_ENABLED BIT(8) -#define I40E_FLAG_FD_ATR_ENABLED BIT(9) -#define I40E_FLAG_MFP_ENABLED BIT(10) -#define I40E_FLAG_HW_ATR_EVICT_ENABLED BIT(11) -#define I40E_FLAG_VEB_MODE_ENABLED BIT(12) -#define I40E_FLAG_VEB_STATS_ENABLED BIT(13) -#define I40E_FLAG_LINK_POLLING_ENABLED BIT(14) -#define I40E_FLAG_TRUE_PROMISC_SUPPORT BIT(15) -#define I40E_FLAG_LEGACY_RX BIT(16) -#define I40E_FLAG_PTP BIT(17) -#define I40E_FLAG_IWARP_ENABLED BIT(18) -#define I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED BIT(19) -#define I40E_FLAG_SOURCE_PRUNING_DISABLED BIT(20) -#define I40E_FLAG_TC_MQPRIO BIT(21) -#define I40E_FLAG_FD_SB_INACTIVE BIT(22) -#define I40E_FLAG_FD_SB_TO_CLOUD_FILTER BIT(23) -#define I40E_FLAG_DISABLE_FW_LLDP BIT(24) -#define I40E_FLAG_RS_FEC BIT(25) -#define I40E_FLAG_BASE_R_FEC BIT(26) -/* TOTAL_PORT_SHUTDOWN - * Allows to physically disable the link on the NIC's port. - * If enabled, (after link down request from the OS) - * no link, traffic or led activity is possible on that port. - * - * If I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED is set, the - * I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED must be explicitly forced to true - * and cannot be disabled by system admin at that time. - * The functionalities are exclusive in terms of configuration, but they also - * have similar behavior (allowing to disable physical link of the port), - * with following differences: - * - LINK_DOWN_ON_CLOSE_ENABLED is configurable at host OS run-time and is - * supported by whole family of 7xx Intel Ethernet Controllers - * - TOTAL_PORT_SHUTDOWN may be enabled only before OS loads (in BIOS) - * only if motherboard's BIOS and NIC's FW has support of it - * - when LINK_DOWN_ON_CLOSE_ENABLED is used, the link is being brought down - * by sending phy_type=0 to NIC's FW - * - when TOTAL_PORT_SHUTDOWN is used, phy_type is not altered, instead - * the link is being brought down by clearing bit (I40E_AQ_PHY_ENABLE_LINK) - * in abilities field of i40e_aq_set_phy_config structure - */ -#define I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED BIT(27) -#define I40E_FLAG_VF_VLAN_PRUNING BIT(28) - + DECLARE_BITMAP(flags, I40E_PF_FLAGS_NBITS); struct i40e_client_instance *cinst; bool stat_offsets_loaded; struct i40e_hw_port_stats stats; @@ -559,7 +538,6 @@ struct i40e_pf { u32 tx_timeout_count; u32 tx_timeout_recovery_level; unsigned long tx_timeout_last_recovery; - u32 tx_sluggish_count; u32 hw_csum_rx_error; u32 led_status; u16 corer_count; /* Core reset count */ @@ -581,17 +559,13 @@ struct i40e_pf { struct i40e_lump_tracking *irq_pile; /* switch config info */ - u16 pf_seid; u16 main_vsi_seid; u16 mac_seid; - struct kobject *switch_kobj; #ifdef CONFIG_DEBUG_FS struct dentry *i40e_dbg_pf; #endif /* CONFIG_DEBUG_FS */ bool cur_promisc; - u16 instance; /* A unique number per i40e_pf instance in the system */ - /* sr-iov config info */ struct i40e_vf *vf; int num_alloc_vfs; /* actual number of VFs allocated */ @@ -685,9 +659,7 @@ struct i40e_pf { unsigned long ptp_tx_start; struct hwtstamp_config tstamp_config; struct timespec64 ptp_prev_hw_time; - struct work_struct ptp_pps_work; struct work_struct ptp_extts0_work; - struct work_struct ptp_extts1_work; ktime_t ptp_reset_start; struct mutex tmreg_lock; /* Used to protect the SYSTIME registers. */ u32 ptp_adj_mult; @@ -695,10 +667,7 @@ struct i40e_pf { u32 tx_hwtstamp_skipped; u32 rx_hwtstamp_cleared; u32 latch_event_flags; - u64 ptp_pps_start; - u32 pps_delay; spinlock_t ptp_rx_lock; /* Used to protect Rx timestamp registers. */ - struct ptp_pin_desc ptp_pin[3]; unsigned long latch_events[4]; bool ptp_tx; bool ptp_rx; @@ -711,7 +680,6 @@ struct i40e_pf { u32 fd_inv; u16 phy_led_val; - u16 override_q_count; u16 last_sw_conf_flags; u16 last_sw_conf_valid_flags; /* List to keep previous DDP profiles to be rolled back in the future */ @@ -940,6 +908,7 @@ struct i40e_q_vector { struct rcu_head rcu; /* to avoid race with update stats on free */ char name[I40E_INT_NAME_STR_LEN]; bool arm_wb_state; + bool in_busy_poll; int irq_num; /* IRQ assigned to this q_vector */ } ____cacheline_internodealigned_in_smp; @@ -1267,7 +1236,7 @@ struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr); void i40e_vlan_stripping_enable(struct i40e_vsi *vsi); static inline bool i40e_is_sw_dcb(struct i40e_pf *pf) { - return !!(pf->flags & I40E_FLAG_DISABLE_FW_LLDP); + return test_bit(I40E_FLAG_FW_LLDP_DIS, pf->flags); } #ifdef CONFIG_I40E_DCB @@ -1301,7 +1270,7 @@ int i40e_set_partition_bw_setting(struct i40e_pf *pf); int i40e_commit_partition_bw_setting(struct i40e_pf *pf); void i40e_print_link_message(struct i40e_vsi *vsi, bool isup); -void i40e_set_fec_in_flags(u8 fec_cfg, u32 *flags); +void i40e_set_fec_in_flags(u8 fec_cfg, unsigned long *flags); static inline bool i40e_enabled_xdp_vsi(struct i40e_vsi *vsi) { @@ -1321,13 +1290,13 @@ int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi, * i40e_is_tc_mqprio_enabled - check if TC MQPRIO is enabled on PF * @pf: pointer to a pf. * - * Check and return value of flag I40E_FLAG_TC_MQPRIO. + * Check and return state of flag I40E_FLAG_TC_MQPRIO. * - * Return: I40E_FLAG_TC_MQPRIO set state. + * Return: true/false if I40E_FLAG_TC_MQPRIO is set or not **/ -static inline u32 i40e_is_tc_mqprio_enabled(struct i40e_pf *pf) +static inline bool i40e_is_tc_mqprio_enabled(struct i40e_pf *pf) { - return pf->flags & I40E_FLAG_TC_MQPRIO; + return test_bit(I40E_FLAG_TC_MQPRIO_ENA, pf->flags); } /** diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c index 9ce6e633cc..f73f5930fc 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c @@ -9,40 +9,6 @@ static void i40e_resume_aq(struct i40e_hw *hw); /** - * i40e_adminq_init_regs - Initialize AdminQ registers - * @hw: pointer to the hardware structure - * - * This assumes the alloc_asq and alloc_arq functions have already been called - **/ -static void i40e_adminq_init_regs(struct i40e_hw *hw) -{ - /* set head and tail registers in our local struct */ - if (i40e_is_vf(hw)) { - hw->aq.asq.tail = I40E_VF_ATQT1; - hw->aq.asq.head = I40E_VF_ATQH1; - hw->aq.asq.len = I40E_VF_ATQLEN1; - hw->aq.asq.bal = I40E_VF_ATQBAL1; - hw->aq.asq.bah = I40E_VF_ATQBAH1; - hw->aq.arq.tail = I40E_VF_ARQT1; - hw->aq.arq.head = I40E_VF_ARQH1; - hw->aq.arq.len = I40E_VF_ARQLEN1; - hw->aq.arq.bal = I40E_VF_ARQBAL1; - hw->aq.arq.bah = I40E_VF_ARQBAH1; - } else { - hw->aq.asq.tail = I40E_PF_ATQT; - hw->aq.asq.head = I40E_PF_ATQH; - hw->aq.asq.len = I40E_PF_ATQLEN; - hw->aq.asq.bal = I40E_PF_ATQBAL; - hw->aq.asq.bah = I40E_PF_ATQBAH; - hw->aq.arq.tail = I40E_PF_ARQT; - hw->aq.arq.head = I40E_PF_ARQH; - hw->aq.arq.len = I40E_PF_ARQLEN; - hw->aq.arq.bal = I40E_PF_ARQBAL; - hw->aq.arq.bah = I40E_PF_ARQBAH; - } -} - -/** * i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings * @hw: pointer to the hardware structure **/ @@ -267,17 +233,17 @@ static int i40e_config_asq_regs(struct i40e_hw *hw) u32 reg = 0; /* Clear Head and Tail */ - wr32(hw, hw->aq.asq.head, 0); - wr32(hw, hw->aq.asq.tail, 0); + wr32(hw, I40E_PF_ATQH, 0); + wr32(hw, I40E_PF_ATQT, 0); /* set starting point */ - wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries | + wr32(hw, I40E_PF_ATQLEN, (hw->aq.num_asq_entries | I40E_PF_ATQLEN_ATQENABLE_MASK)); - wr32(hw, hw->aq.asq.bal, lower_32_bits(hw->aq.asq.desc_buf.pa)); - wr32(hw, hw->aq.asq.bah, upper_32_bits(hw->aq.asq.desc_buf.pa)); + wr32(hw, I40E_PF_ATQBAL, lower_32_bits(hw->aq.asq.desc_buf.pa)); + wr32(hw, I40E_PF_ATQBAH, upper_32_bits(hw->aq.asq.desc_buf.pa)); /* Check one register to verify that config was applied */ - reg = rd32(hw, hw->aq.asq.bal); + reg = rd32(hw, I40E_PF_ATQBAL); if (reg != lower_32_bits(hw->aq.asq.desc_buf.pa)) ret_code = -EIO; @@ -296,20 +262,20 @@ static int i40e_config_arq_regs(struct i40e_hw *hw) u32 reg = 0; /* Clear Head and Tail */ - wr32(hw, hw->aq.arq.head, 0); - wr32(hw, hw->aq.arq.tail, 0); + wr32(hw, I40E_PF_ARQH, 0); + wr32(hw, I40E_PF_ARQT, 0); /* set starting point */ - wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries | + wr32(hw, I40E_PF_ARQLEN, (hw->aq.num_arq_entries | I40E_PF_ARQLEN_ARQENABLE_MASK)); - wr32(hw, hw->aq.arq.bal, lower_32_bits(hw->aq.arq.desc_buf.pa)); - wr32(hw, hw->aq.arq.bah, upper_32_bits(hw->aq.arq.desc_buf.pa)); + wr32(hw, I40E_PF_ARQBAL, lower_32_bits(hw->aq.arq.desc_buf.pa)); + wr32(hw, I40E_PF_ARQBAH, upper_32_bits(hw->aq.arq.desc_buf.pa)); /* Update tail in the HW to post pre-allocated buffers */ - wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1); + wr32(hw, I40E_PF_ARQT, hw->aq.num_arq_entries - 1); /* Check one register to verify that config was applied */ - reg = rd32(hw, hw->aq.arq.bal); + reg = rd32(hw, I40E_PF_ARQBAL); if (reg != lower_32_bits(hw->aq.arq.desc_buf.pa)) ret_code = -EIO; @@ -452,11 +418,11 @@ static int i40e_shutdown_asq(struct i40e_hw *hw) } /* Stop firmware AdminQ processing */ - wr32(hw, hw->aq.asq.head, 0); - wr32(hw, hw->aq.asq.tail, 0); - wr32(hw, hw->aq.asq.len, 0); - wr32(hw, hw->aq.asq.bal, 0); - wr32(hw, hw->aq.asq.bah, 0); + wr32(hw, I40E_PF_ATQH, 0); + wr32(hw, I40E_PF_ATQT, 0); + wr32(hw, I40E_PF_ATQLEN, 0); + wr32(hw, I40E_PF_ATQBAL, 0); + wr32(hw, I40E_PF_ATQBAH, 0); hw->aq.asq.count = 0; /* to indicate uninitialized queue */ @@ -486,11 +452,11 @@ static int i40e_shutdown_arq(struct i40e_hw *hw) } /* Stop firmware AdminQ processing */ - wr32(hw, hw->aq.arq.head, 0); - wr32(hw, hw->aq.arq.tail, 0); - wr32(hw, hw->aq.arq.len, 0); - wr32(hw, hw->aq.arq.bal, 0); - wr32(hw, hw->aq.arq.bah, 0); + wr32(hw, I40E_PF_ARQH, 0); + wr32(hw, I40E_PF_ARQT, 0); + wr32(hw, I40E_PF_ARQLEN, 0); + wr32(hw, I40E_PF_ARQBAL, 0); + wr32(hw, I40E_PF_ARQBAH, 0); hw->aq.arq.count = 0; /* to indicate uninitialized queue */ @@ -503,44 +469,76 @@ shutdown_arq_out: } /** - * i40e_set_hw_flags - set HW flags + * i40e_set_hw_caps - set HW flags * @hw: pointer to the hardware structure **/ -static void i40e_set_hw_flags(struct i40e_hw *hw) +static void i40e_set_hw_caps(struct i40e_hw *hw) { - struct i40e_adminq_info *aq = &hw->aq; - - hw->flags = 0; + bitmap_zero(hw->caps, I40E_HW_CAPS_NBITS); switch (hw->mac.type) { case I40E_MAC_XL710: - if (aq->api_maj_ver > 1 || - (aq->api_maj_ver == 1 && - aq->api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710)) { - hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE; - hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE; + if (i40e_is_aq_api_ver_ge(hw, 1, + I40E_MINOR_VER_GET_LINK_INFO_XL710)) { + set_bit(I40E_HW_CAP_AQ_PHY_ACCESS, hw->caps); + set_bit(I40E_HW_CAP_FW_LLDP_STOPPABLE, hw->caps); /* The ability to RX (not drop) 802.1ad frames */ - hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE; + set_bit(I40E_HW_CAP_802_1AD, hw->caps); + } + if (i40e_is_aq_api_ver_ge(hw, 1, 5)) { + /* Supported in FW API version higher than 1.4 */ + set_bit(I40E_HW_CAP_GENEVE_OFFLOAD, hw->caps); + } + if (i40e_is_fw_ver_lt(hw, 4, 33)) { + set_bit(I40E_HW_CAP_RESTART_AUTONEG, hw->caps); + /* No DCB support for FW < v4.33 */ + set_bit(I40E_HW_CAP_NO_DCB_SUPPORT, hw->caps); + } + if (i40e_is_fw_ver_lt(hw, 4, 3)) { + /* Disable FW LLDP if FW < v4.3 */ + set_bit(I40E_HW_CAP_STOP_FW_LLDP, hw->caps); + } + if (i40e_is_fw_ver_ge(hw, 4, 40)) { + /* Use the FW Set LLDP MIB API if FW >= v4.40 */ + set_bit(I40E_HW_CAP_USE_SET_LLDP_MIB, hw->caps); + } + if (i40e_is_fw_ver_ge(hw, 6, 0)) { + /* Enable PTP L4 if FW > v6.0 */ + set_bit(I40E_HW_CAP_PTP_L4, hw->caps); } break; case I40E_MAC_X722: - hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE | - I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK; + set_bit(I40E_HW_CAP_AQ_SRCTL_ACCESS_ENABLE, hw->caps); + set_bit(I40E_HW_CAP_NVM_READ_REQUIRES_LOCK, hw->caps); + set_bit(I40E_HW_CAP_RSS_AQ, hw->caps); + set_bit(I40E_HW_CAP_128_QP_RSS, hw->caps); + set_bit(I40E_HW_CAP_ATR_EVICT, hw->caps); + set_bit(I40E_HW_CAP_WB_ON_ITR, hw->caps); + set_bit(I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE, hw->caps); + set_bit(I40E_HW_CAP_NO_PCI_LINK_CHECK, hw->caps); + set_bit(I40E_HW_CAP_USE_SET_LLDP_MIB, hw->caps); + set_bit(I40E_HW_CAP_GENEVE_OFFLOAD, hw->caps); + set_bit(I40E_HW_CAP_PTP_L4, hw->caps); + set_bit(I40E_HW_CAP_WOL_MC_MAGIC_PKT_WAKE, hw->caps); + set_bit(I40E_HW_CAP_OUTER_UDP_CSUM, hw->caps); + + if (rd32(hw, I40E_GLQF_FDEVICTENA(1)) != + I40E_FDEVICT_PCTYPE_DEFAULT) { + hw_warn(hw, "FD EVICT PCTYPES are not right, disable FD HW EVICT\n"); + clear_bit(I40E_HW_CAP_ATR_EVICT, hw->caps); + } - if (aq->api_maj_ver > 1 || - (aq->api_maj_ver == 1 && - aq->api_min_ver >= I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722)) - hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE; + if (i40e_is_aq_api_ver_ge(hw, 1, + I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722)) + set_bit(I40E_HW_CAP_FW_LLDP_STOPPABLE, hw->caps); - if (aq->api_maj_ver > 1 || - (aq->api_maj_ver == 1 && - aq->api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_X722)) - hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE; + if (i40e_is_aq_api_ver_ge(hw, 1, + I40E_MINOR_VER_GET_LINK_INFO_X722)) + set_bit(I40E_HW_CAP_AQ_PHY_ACCESS, hw->caps); - if (aq->api_maj_ver > 1 || - (aq->api_maj_ver == 1 && - aq->api_min_ver >= I40E_MINOR_VER_FW_REQUEST_FEC_X722)) - hw->flags |= I40E_HW_FLAG_X722_FEC_REQUEST_CAPABLE; + if (i40e_is_aq_api_ver_ge(hw, 1, + I40E_MINOR_VER_FW_REQUEST_FEC_X722)) + set_bit(I40E_HW_CAP_X722_FEC_REQUEST, hw->caps); fallthrough; default: @@ -548,22 +546,18 @@ static void i40e_set_hw_flags(struct i40e_hw *hw) } /* Newer versions of firmware require lock when reading the NVM */ - if (aq->api_maj_ver > 1 || - (aq->api_maj_ver == 1 && - aq->api_min_ver >= 5)) - hw->flags |= I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK; - - if (aq->api_maj_ver > 1 || - (aq->api_maj_ver == 1 && - aq->api_min_ver >= 8)) { - hw->flags |= I40E_HW_FLAG_FW_LLDP_PERSISTENT; - hw->flags |= I40E_HW_FLAG_DROP_MODE; - } + if (i40e_is_aq_api_ver_ge(hw, 1, 5)) + set_bit(I40E_HW_CAP_NVM_READ_REQUIRES_LOCK, hw->caps); + + /* The ability to RX (not drop) 802.1ad frames was added in API 1.7 */ + if (i40e_is_aq_api_ver_ge(hw, 1, 7)) + set_bit(I40E_HW_CAP_802_1AD, hw->caps); + + if (i40e_is_aq_api_ver_ge(hw, 1, 8)) + set_bit(I40E_HW_CAP_FW_LLDP_PERSISTENT, hw->caps); - if (aq->api_maj_ver > 1 || - (aq->api_maj_ver == 1 && - aq->api_min_ver >= 9)) - hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED; + if (i40e_is_aq_api_ver_ge(hw, 1, 9)) + set_bit(I40E_HW_CAP_AQ_PHY_ACCESS_EXTENDED, hw->caps); } /** @@ -593,9 +587,6 @@ int i40e_init_adminq(struct i40e_hw *hw) goto init_adminq_exit; } - /* Set up register offsets */ - i40e_adminq_init_regs(hw); - /* setup ASQ command write back timeout */ hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT; @@ -633,7 +624,7 @@ int i40e_init_adminq(struct i40e_hw *hw) /* Some features were introduced in different FW API version * for different MAC type. */ - i40e_set_hw_flags(hw); + i40e_set_hw_caps(hw); /* get the NVM version info */ i40e_read_nvm_word(hw, I40E_SR_NVM_DEV_STARTER_VERSION, @@ -648,25 +639,7 @@ int i40e_init_adminq(struct i40e_hw *hw) &oem_lo); hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo; - if (hw->mac.type == I40E_MAC_XL710 && - hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && - hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) { - hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE; - hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE; - } - if (hw->mac.type == I40E_MAC_X722 && - hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && - hw->aq.api_min_ver >= I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722) { - hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE; - } - - /* The ability to RX (not drop) 802.1ad frames was added in API 1.7 */ - if (hw->aq.api_maj_ver > 1 || - (hw->aq.api_maj_ver == 1 && - hw->aq.api_min_ver >= 7)) - hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE; - - if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) { + if (i40e_is_aq_api_ver_ge(hw, I40E_FW_API_VERSION_MAJOR + 1, 0)) { ret_code = -EIO; goto init_adminq_free_arq; } @@ -723,9 +696,9 @@ static u16 i40e_clean_asq(struct i40e_hw *hw) desc = I40E_ADMINQ_DESC(*asq, ntc); details = I40E_ADMINQ_DETAILS(*asq, ntc); - while (rd32(hw, hw->aq.asq.head) != ntc) { + while (rd32(hw, I40E_PF_ATQH) != ntc) { i40e_debug(hw, I40E_DEBUG_AQ_COMMAND, - "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head)); + "ntc %d head %d.\n", ntc, rd32(hw, I40E_PF_ATQH)); if (details->callback) { I40E_ADMINQ_CALLBACK cb_func = @@ -759,7 +732,7 @@ static bool i40e_asq_done(struct i40e_hw *hw) /* AQ designers suggest use of head for better * timing reliability than DD bit */ - return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use; + return rd32(hw, I40E_PF_ATQH) == hw->aq.asq.next_to_use; } @@ -800,7 +773,7 @@ i40e_asq_send_command_atomic_exec(struct i40e_hw *hw, hw->aq.asq_last_status = I40E_AQ_RC_OK; - val = rd32(hw, hw->aq.asq.head); + val = rd32(hw, I40E_PF_ATQH); if (val >= hw->aq.num_asq_entries) { i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: head overrun at %d\n", val); @@ -892,7 +865,7 @@ i40e_asq_send_command_atomic_exec(struct i40e_hw *hw, if (hw->aq.asq.next_to_use == hw->aq.asq.count) hw->aq.asq.next_to_use = 0; if (!details->postpone) - wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use); + wr32(hw, I40E_PF_ATQT, hw->aq.asq.next_to_use); /* if cmd_details are not defined or async flag is not set, * we need to wait for desc write back @@ -952,7 +925,7 @@ i40e_asq_send_command_atomic_exec(struct i40e_hw *hw, /* update the error if time out occurred */ if ((!cmd_completed) && (!details->async && !details->postpone)) { - if (rd32(hw, hw->aq.asq.len) & I40E_GL_ATQLEN_ATQCRIT_MASK) { + if (rd32(hw, I40E_PF_ATQLEN) & I40E_GL_ATQLEN_ATQCRIT_MASK) { i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: AQ Critical error.\n"); status = -EIO; @@ -1106,7 +1079,7 @@ int i40e_clean_arq_element(struct i40e_hw *hw, } /* set next_to_use to head */ - ntu = rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK; + ntu = rd32(hw, I40E_PF_ARQH) & I40E_PF_ARQH_ARQH_MASK; if (ntu == ntc) { /* nothing to do - shouldn't need to update ring's values */ ret_code = -EALREADY; @@ -1154,7 +1127,7 @@ int i40e_clean_arq_element(struct i40e_hw *hw, desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa)); /* set tail = the last cleaned desc index. */ - wr32(hw, hw->aq.arq.tail, ntc); + wr32(hw, I40E_PF_ARQT, ntc); /* ntc is updated to tail + 1 */ ntc++; if (ntc == hw->aq.num_arq_entries) diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h index 80125bea80..ee86d2c530 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h @@ -29,13 +29,6 @@ struct i40e_adminq_ring { /* used for interrupt processing */ u16 next_to_use; u16 next_to_clean; - - /* used for queue tracking */ - u32 head; - u32 tail; - u32 len; - u32 bah; - u32 bal; }; /* ASQ transaction details */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h index 18a1c3b6d7..c8f35d4de2 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -5,6 +5,7 @@ #define _I40E_ADMINQ_CMD_H_ #include <linux/bits.h> +#include <linux/types.h> /* This header file defines the i40e Admin Queue commands and is shared between * i40e Firmware and Software. diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 3eb6564c1c..de6ca62957 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -196,11 +196,11 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, **/ bool i40e_check_asq_alive(struct i40e_hw *hw) { - if (hw->aq.asq.len) - return !!(rd32(hw, hw->aq.asq.len) & - I40E_PF_ATQLEN_ATQENABLE_MASK); - else + /* Check if the queue is initialized */ + if (!hw->aq.asq.count) return false; + + return !!(rd32(hw, I40E_PF_ATQLEN) & I40E_PF_ATQLEN_ATQENABLE_MASK); } /** @@ -249,6 +249,7 @@ static int i40e_aq_get_set_rss_lut(struct i40e_hw *hw, struct i40e_aqc_get_set_rss_lut *cmd_resp = (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw; int status; + u16 flags; if (set) i40e_fill_default_direct_cmd_desc(&desc, @@ -261,23 +262,18 @@ static int i40e_aq_get_set_rss_lut(struct i40e_hw *hw, desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); - cmd_resp->vsi_id = - cpu_to_le16((u16)((vsi_id << - I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) & - I40E_AQC_SET_RSS_LUT_VSI_ID_MASK)); - cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID); + vsi_id = FIELD_PREP(I40E_AQC_SET_RSS_LUT_VSI_ID_MASK, vsi_id) | + FIELD_PREP(I40E_AQC_SET_RSS_LUT_VSI_VALID, 1); + cmd_resp->vsi_id = cpu_to_le16(vsi_id); if (pf_lut) - cmd_resp->flags |= cpu_to_le16((u16) - ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF << - I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) & - I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK)); + flags = FIELD_PREP(I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK, + I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF); else - cmd_resp->flags |= cpu_to_le16((u16) - ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI << - I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) & - I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK)); + flags = FIELD_PREP(I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK, + I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI); + cmd_resp->flags = cpu_to_le16(flags); status = i40e_asq_send_command(hw, &desc, lut, lut_size, NULL); return status; @@ -347,11 +343,9 @@ static int i40e_aq_get_set_rss_key(struct i40e_hw *hw, desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); - cmd_resp->vsi_id = - cpu_to_le16((u16)((vsi_id << - I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) & - I40E_AQC_SET_RSS_KEY_VSI_ID_MASK)); - cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID); + vsi_id = FIELD_PREP(I40E_AQC_SET_RSS_KEY_VSI_ID_MASK, vsi_id) | + FIELD_PREP(I40E_AQC_SET_RSS_KEY_VSI_VALID, 1); + cmd_resp->vsi_id = cpu_to_le16(vsi_id); status = i40e_asq_send_command(hw, &desc, key, key_size, NULL); @@ -670,11 +664,11 @@ int i40e_init_shared_code(struct i40e_hw *hw) hw->phy.get_link_info = true; /* Determine port number and PF number*/ - port = (rd32(hw, I40E_PFGEN_PORTNUM) & I40E_PFGEN_PORTNUM_PORT_NUM_MASK) - >> I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT; + port = FIELD_GET(I40E_PFGEN_PORTNUM_PORT_NUM_MASK, + rd32(hw, I40E_PFGEN_PORTNUM)); hw->port = (u8)port; - ari = (rd32(hw, I40E_GLPCI_CAPSUP) & I40E_GLPCI_CAPSUP_ARI_EN_MASK) >> - I40E_GLPCI_CAPSUP_ARI_EN_SHIFT; + ari = FIELD_GET(I40E_GLPCI_CAPSUP_ARI_EN_MASK, + rd32(hw, I40E_GLPCI_CAPSUP)); func_rid = rd32(hw, I40E_PF_FUNC_RID); if (ari) hw->pf_id = (u8)(func_rid & 0xff); @@ -992,9 +986,8 @@ int i40e_pf_reset(struct i40e_hw *hw) * The grst delay value is in 100ms units, and we'll wait a * couple counts longer to be sure we don't just miss the end. */ - grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) & - I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >> - I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT; + grst_del = FIELD_GET(I40E_GLGEN_RSTCTL_GRSTDEL_MASK, + rd32(hw, I40E_GLGEN_RSTCTL)); /* It can take upto 15 secs for GRST steady state. * Bump it to 16 secs max to be safe. @@ -1086,26 +1079,20 @@ void i40e_clear_hw(struct i40e_hw *hw) /* get number of interrupts, queues, and VFs */ val = rd32(hw, I40E_GLPCI_CNF2); - num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >> - I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT; - num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >> - I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT; + num_pf_int = FIELD_GET(I40E_GLPCI_CNF2_MSI_X_PF_N_MASK, val); + num_vf_int = FIELD_GET(I40E_GLPCI_CNF2_MSI_X_VF_N_MASK, val); val = rd32(hw, I40E_PFLAN_QALLOC); - base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >> - I40E_PFLAN_QALLOC_FIRSTQ_SHIFT; - j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >> - I40E_PFLAN_QALLOC_LASTQ_SHIFT; + base_queue = FIELD_GET(I40E_PFLAN_QALLOC_FIRSTQ_MASK, val); + j = FIELD_GET(I40E_PFLAN_QALLOC_LASTQ_MASK, val); if (val & I40E_PFLAN_QALLOC_VALID_MASK && j >= base_queue) num_queues = (j - base_queue) + 1; else num_queues = 0; val = rd32(hw, I40E_PF_VT_PFALLOC); - i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >> - I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT; - j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >> - I40E_PF_VT_PFALLOC_LASTVF_SHIFT; + i = FIELD_GET(I40E_PF_VT_PFALLOC_FIRSTVF_MASK, val); + j = FIELD_GET(I40E_PF_VT_PFALLOC_LASTVF_MASK, val); if (val & I40E_PF_VT_PFALLOC_VALID_MASK && j >= i) num_vfs = (j - i) + 1; else @@ -1200,8 +1187,7 @@ static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx) !hw->func_caps.led[idx]) return 0; gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(idx)); - port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK) >> - I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT; + port = FIELD_GET(I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK, gpio_val); /* if PRT_NUM_NA is 1 then this LED is not port specific, OR * if it is not our port then ignore @@ -1245,8 +1231,7 @@ u32 i40e_led_get(struct i40e_hw *hw) if (!gpio_val) continue; - mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >> - I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT; + mode = FIELD_GET(I40E_GLGEN_GPIO_CTL_LED_MODE_MASK, gpio_val); break; } @@ -1289,14 +1274,14 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink) pin_func = I40E_PIN_FUNC_LED; gpio_val &= ~I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK; - gpio_val |= ((pin_func << - I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT) & - I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK); + gpio_val |= + FIELD_PREP(I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK, + pin_func); } gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK; /* this & is a bit of paranoia, but serves as a range check */ - gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) & - I40E_GLGEN_GPIO_CTL_LED_MODE_MASK); + gpio_val |= FIELD_PREP(I40E_GLGEN_GPIO_CTL_LED_MODE_MASK, + mode); if (blink) gpio_val |= BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT); @@ -1375,8 +1360,8 @@ i40e_aq_get_phy_capabilities(struct i40e_hw *hw, if (report_init) { if (hw->mac.type == I40E_MAC_XL710 && - hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && - hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) { + i40e_is_aq_api_ver_ge(hw, I40E_FW_API_VERSION_MAJOR, + I40E_MINOR_VER_GET_LINK_INFO_XL710)) { status = i40e_aq_get_link_info(hw, true, NULL, NULL); } else { hw->phy.phy_types = le32_to_cpu(abilities->phy_type); @@ -1646,12 +1631,11 @@ int i40e_aq_get_link_info(struct i40e_hw *hw, else hw_link_info->lse_enable = false; - if ((hw->mac.type == I40E_MAC_XL710) && - (hw->aq.fw_maj_ver < 4 || (hw->aq.fw_maj_ver == 4 && - hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE) + if (hw->mac.type == I40E_MAC_XL710 && i40e_is_fw_ver_lt(hw, 4, 40) && + hw_link_info->phy_type == 0xE) hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU; - if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE && + if (test_bit(I40E_HW_CAP_AQ_PHY_ACCESS, hw->caps) && hw->mac.type != I40E_MAC_X722) { __le32 tmp; @@ -1751,21 +1735,6 @@ int i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags, } /** - * i40e_is_aq_api_ver_ge - * @aq: pointer to AdminQ info containing HW API version to compare - * @maj: API major value - * @min: API minor value - * - * Assert whether current HW API version is greater/equal than provided. - **/ -static bool i40e_is_aq_api_ver_ge(struct i40e_adminq_info *aq, u16 maj, - u16 min) -{ - return (aq->api_maj_ver > maj || - (aq->api_maj_ver == maj && aq->api_min_ver >= min)); -} - -/** * i40e_aq_add_vsi * @hw: pointer to the hw struct * @vsi_ctx: pointer to a vsi context struct @@ -1891,14 +1860,14 @@ int i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw, if (set) { flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST; - if (rx_only_promisc && i40e_is_aq_api_ver_ge(&hw->aq, 1, 5)) + if (rx_only_promisc && i40e_is_aq_api_ver_ge(hw, 1, 5)) flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY; } cmd->promiscuous_flags = cpu_to_le16(flags); cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST); - if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5)) + if (i40e_is_aq_api_ver_ge(hw, 1, 5)) cmd->valid_flags |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY); @@ -2001,13 +1970,13 @@ int i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw, if (enable) { flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST; - if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5)) + if (i40e_is_aq_api_ver_ge(hw, 1, 5)) flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY; } cmd->promiscuous_flags = cpu_to_le16(flags); cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST); - if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5)) + if (i40e_is_aq_api_ver_ge(hw, 1, 5)) cmd->valid_flags |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY); cmd->seid = cpu_to_le16(seid); @@ -2254,7 +2223,7 @@ int i40e_aq_set_switch_config(struct i40e_hw *hw, scfg->flags = cpu_to_le16(flags); scfg->valid_flags = cpu_to_le16(valid_flags); scfg->mode = mode; - if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) { + if (test_bit(I40E_HW_CAP_802_1AD, hw->caps)) { scfg->switch_tag = cpu_to_le16(hw->switch_tag); scfg->first_tag = cpu_to_le16(hw->first_tag); scfg->second_tag = cpu_to_le16(hw->second_tag); @@ -3531,8 +3500,7 @@ int i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type, desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); cmd->type = mib_type & I40E_AQ_LLDP_MIB_TYPE_MASK; - cmd->type |= ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) & - I40E_AQ_LLDP_BRIDGE_TYPE_MASK); + cmd->type |= FIELD_PREP(I40E_AQ_LLDP_BRIDGE_TYPE_MASK, bridge_type); desc.datalen = cpu_to_le16(buff_size); @@ -3638,7 +3606,7 @@ i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore, (struct i40e_aqc_lldp_restore *)&desc.params.raw; int status; - if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)) { + if (!test_bit(I40E_HW_CAP_FW_LLDP_PERSISTENT, hw->caps)) { i40e_debug(hw, I40E_DEBUG_ALL, "Restore LLDP not supported by current FW version.\n"); return -ENODEV; @@ -3681,7 +3649,7 @@ int i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent, cmd->command |= I40E_AQ_LLDP_AGENT_SHUTDOWN; if (persist) { - if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT) + if (test_bit(I40E_HW_CAP_FW_LLDP_PERSISTENT, hw->caps)) cmd->command |= I40E_AQ_LLDP_AGENT_STOP_PERSIST; else i40e_debug(hw, I40E_DEBUG_ALL, @@ -3714,7 +3682,7 @@ int i40e_aq_start_lldp(struct i40e_hw *hw, bool persist, cmd->command = I40E_AQ_LLDP_AGENT_START; if (persist) { - if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT) + if (test_bit(I40E_HW_CAP_FW_LLDP_PERSISTENT, hw->caps)) cmd->command |= I40E_AQ_LLDP_AGENT_START_PERSIST; else i40e_debug(hw, I40E_DEBUG_ALL, @@ -3742,7 +3710,7 @@ i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable, (struct i40e_aqc_set_dcb_parameters *)&desc.params.raw; int status; - if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE)) + if (!test_bit(I40E_HW_CAP_FW_LLDP_STOPPABLE, hw->caps)) return -ENODEV; i40e_fill_default_direct_cmd_desc(&desc, @@ -4213,8 +4181,7 @@ i40e_validate_filter_settings(struct i40e_hw *hw, /* FCHSIZE + FCDSIZE should not be greater than PMFCOEFMAX */ val = rd32(hw, I40E_GLHMC_FCOEFMAX); - fcoe_fmax = (val & I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK) - >> I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT; + fcoe_fmax = FIELD_GET(I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK, val); if (fcoe_filt_size + fcoe_cntx_size > fcoe_fmax) return -EINVAL; @@ -4250,30 +4217,25 @@ int i40e_set_filter_control(struct i40e_hw *hw, /* Program required PE hash buckets for the PF */ val &= ~I40E_PFQF_CTL_0_PEHSIZE_MASK; - val |= ((u32)settings->pe_filt_num << I40E_PFQF_CTL_0_PEHSIZE_SHIFT) & - I40E_PFQF_CTL_0_PEHSIZE_MASK; + val |= FIELD_PREP(I40E_PFQF_CTL_0_PEHSIZE_MASK, settings->pe_filt_num); /* Program required PE contexts for the PF */ val &= ~I40E_PFQF_CTL_0_PEDSIZE_MASK; - val |= ((u32)settings->pe_cntx_num << I40E_PFQF_CTL_0_PEDSIZE_SHIFT) & - I40E_PFQF_CTL_0_PEDSIZE_MASK; + val |= FIELD_PREP(I40E_PFQF_CTL_0_PEDSIZE_MASK, settings->pe_cntx_num); /* Program required FCoE hash buckets for the PF */ val &= ~I40E_PFQF_CTL_0_PFFCHSIZE_MASK; - val |= ((u32)settings->fcoe_filt_num << - I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT) & - I40E_PFQF_CTL_0_PFFCHSIZE_MASK; + val |= FIELD_PREP(I40E_PFQF_CTL_0_PFFCHSIZE_MASK, + settings->fcoe_filt_num); /* Program required FCoE DDP contexts for the PF */ val &= ~I40E_PFQF_CTL_0_PFFCDSIZE_MASK; - val |= ((u32)settings->fcoe_cntx_num << - I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT) & - I40E_PFQF_CTL_0_PFFCDSIZE_MASK; + val |= FIELD_PREP(I40E_PFQF_CTL_0_PFFCDSIZE_MASK, + settings->fcoe_cntx_num); /* Program Hash LUT size for the PF */ val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_MASK; if (settings->hash_lut_size == I40E_HASH_LUT_SIZE_512) hash_lut_size = 1; - val |= (hash_lut_size << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT) & - I40E_PFQF_CTL_0_HASHLUTSIZE_MASK; + val |= FIELD_PREP(I40E_PFQF_CTL_0_HASHLUTSIZE_MASK, hash_lut_size); /* Enable FDIR, Ethertype and MACVLAN filters for PF and VFs */ if (settings->enable_fdir) @@ -4674,8 +4636,7 @@ int i40e_read_phy_register_clause22(struct i40e_hw *hw, "PHY: Can't write command to external PHY.\n"); } else { command = rd32(hw, I40E_GLGEN_MSRWD(port_num)); - *value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >> - I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT; + *value = FIELD_GET(I40E_GLGEN_MSRWD_MDIRDDATA_MASK, command); } return status; @@ -4784,8 +4745,7 @@ int i40e_read_phy_register_clause45(struct i40e_hw *hw, if (!status) { command = rd32(hw, I40E_GLGEN_MSRWD(port_num)); - *value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >> - I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT; + *value = FIELD_GET(I40E_GLGEN_MSRWD_MDIRDDATA_MASK, command); } else { i40e_debug(hw, I40E_DEBUG_PHY, "PHY: Can't read register value from external PHY.\n"); @@ -5044,7 +5004,7 @@ static int i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr, u32 i; *reg_val = 0; - if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { + if (test_bit(I40E_HW_CAP_AQ_PHY_ACCESS, hw->caps)) { status = i40e_aq_get_phy_register(hw, I40E_AQ_PHY_REG_ACCESS_EXTERNAL, @@ -5077,7 +5037,7 @@ static int i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr, int status; u32 i; - if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { + if (test_bit(I40E_HW_CAP_AQ_PHY_ACCESS, hw->caps)) { status = i40e_aq_set_phy_register(hw, I40E_AQ_PHY_REG_ACCESS_EXTERNAL, @@ -5116,7 +5076,7 @@ int i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr, u8 port_num; u32 i; - if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { + if (test_bit(I40E_HW_CAP_AQ_PHY_ACCESS, hw->caps)) { status = i40e_aq_get_phy_register(hw, I40E_AQ_PHY_REG_ACCESS_EXTERNAL, @@ -5239,14 +5199,14 @@ int i40e_aq_rx_ctl_read_register(struct i40e_hw *hw, **/ u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr) { - bool use_register; + bool use_register = false; int status = 0; int retry = 5; u32 val = 0; - use_register = (((hw->aq.api_maj_ver == 1) && - (hw->aq.api_min_ver < 5)) || - (hw->mac.type == I40E_MAC_X722)); + if (i40e_is_aq_api_ver_lt(hw, 1, 5) || hw->mac.type == I40E_MAC_X722) + use_register = true; + if (!use_register) { do_retry: status = i40e_aq_rx_ctl_read_register(hw, reg_addr, &val, NULL); @@ -5301,13 +5261,13 @@ int i40e_aq_rx_ctl_write_register(struct i40e_hw *hw, **/ void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val) { - bool use_register; + bool use_register = false; int status = 0; int retry = 5; - use_register = (((hw->aq.api_maj_ver == 1) && - (hw->aq.api_min_ver < 5)) || - (hw->mac.type == I40E_MAC_X722)); + if (i40e_is_aq_api_ver_lt(hw, 1, 5) || hw->mac.type == I40E_MAC_X722) + use_register = true; + if (!use_register) { do_retry: status = i40e_aq_rx_ctl_write_register(hw, reg_addr, @@ -5335,16 +5295,17 @@ static void i40e_mdio_if_number_selection(struct i40e_hw *hw, bool set_mdio, u8 mdio_num, struct i40e_aqc_phy_register_access *cmd) { - if (set_mdio && cmd->phy_interface == I40E_AQ_PHY_REG_ACCESS_EXTERNAL) { - if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED) - cmd->cmd_flags |= - I40E_AQ_PHY_REG_ACCESS_SET_MDIO_IF_NUMBER | - ((mdio_num << - I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_SHIFT) & - I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_MASK); - else - i40e_debug(hw, I40E_DEBUG_PHY, - "MDIO I/F number selection not supported by current FW version.\n"); + if (!set_mdio || + cmd->phy_interface != I40E_AQ_PHY_REG_ACCESS_EXTERNAL) + return; + + if (test_bit(I40E_HW_CAP_AQ_PHY_ACCESS_EXTENDED, hw->caps)) { + cmd->cmd_flags |= + I40E_AQ_PHY_REG_ACCESS_SET_MDIO_IF_NUMBER | + FIELD_PREP(I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_MASK, + mdio_num); + } else { + i40e_debug(hw, I40E_DEBUG_PHY, "MDIO I/F number selection not supported by current FW version.\n"); } } @@ -5929,9 +5890,8 @@ i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid, u16 tnl_type; u32 ti; - tnl_type = (le16_to_cpu(filters[i].element.flags) & - I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >> - I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT; + tnl_type = le16_get_bits(filters[i].element.flags, + I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK); /* Due to hardware eccentricities, the VNI for Geneve is shifted * one more byte further than normally used for Tenant ID in @@ -6023,9 +5983,8 @@ i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid, u16 tnl_type; u32 ti; - tnl_type = (le16_to_cpu(filters[i].element.flags) & - I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >> - I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT; + tnl_type = le16_get_bits(filters[i].element.flags, + I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK); /* Due to hardware eccentricities, the VNI for Geneve is shifted * one more byte further than normally used for Tenant ID in diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c index d57dd30b02..8db1eb0c17 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c @@ -22,8 +22,7 @@ int i40e_get_dcbx_status(struct i40e_hw *hw, u16 *status) return -EINVAL; reg = rd32(hw, I40E_PRTDCB_GENS); - *status = (u16)((reg & I40E_PRTDCB_GENS_DCBX_STATUS_MASK) >> - I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT); + *status = FIELD_GET(I40E_PRTDCB_GENS_DCBX_STATUS_MASK, reg); return 0; } @@ -52,12 +51,9 @@ static void i40e_parse_ieee_etscfg_tlv(struct i40e_lldp_org_tlv *tlv, * |1bit | 1bit|3 bits|3bits| */ etscfg = &dcbcfg->etscfg; - etscfg->willing = (u8)((buf[offset] & I40E_IEEE_ETS_WILLING_MASK) >> - I40E_IEEE_ETS_WILLING_SHIFT); - etscfg->cbs = (u8)((buf[offset] & I40E_IEEE_ETS_CBS_MASK) >> - I40E_IEEE_ETS_CBS_SHIFT); - etscfg->maxtcs = (u8)((buf[offset] & I40E_IEEE_ETS_MAXTC_MASK) >> - I40E_IEEE_ETS_MAXTC_SHIFT); + etscfg->willing = FIELD_GET(I40E_IEEE_ETS_WILLING_MASK, buf[offset]); + etscfg->cbs = FIELD_GET(I40E_IEEE_ETS_CBS_MASK, buf[offset]); + etscfg->maxtcs = FIELD_GET(I40E_IEEE_ETS_MAXTC_MASK, buf[offset]); /* Move offset to Priority Assignment Table */ offset++; @@ -71,11 +67,9 @@ static void i40e_parse_ieee_etscfg_tlv(struct i40e_lldp_org_tlv *tlv, * ----------------------------------------- */ for (i = 0; i < 4; i++) { - priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_1_MASK) >> - I40E_IEEE_ETS_PRIO_1_SHIFT); - etscfg->prioritytable[i * 2] = priority; - priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_0_MASK) >> - I40E_IEEE_ETS_PRIO_0_SHIFT); + priority = FIELD_GET(I40E_IEEE_ETS_PRIO_1_MASK, buf[offset]); + etscfg->prioritytable[i * 2] = priority; + priority = FIELD_GET(I40E_IEEE_ETS_PRIO_0_MASK, buf[offset]); etscfg->prioritytable[i * 2 + 1] = priority; offset++; } @@ -126,12 +120,10 @@ static void i40e_parse_ieee_etsrec_tlv(struct i40e_lldp_org_tlv *tlv, * ----------------------------------------- */ for (i = 0; i < 4; i++) { - priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_1_MASK) >> - I40E_IEEE_ETS_PRIO_1_SHIFT); - dcbcfg->etsrec.prioritytable[i*2] = priority; - priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_0_MASK) >> - I40E_IEEE_ETS_PRIO_0_SHIFT); - dcbcfg->etsrec.prioritytable[i*2 + 1] = priority; + priority = FIELD_GET(I40E_IEEE_ETS_PRIO_1_MASK, buf[offset]); + dcbcfg->etsrec.prioritytable[i * 2] = priority; + priority = FIELD_GET(I40E_IEEE_ETS_PRIO_0_MASK, buf[offset]); + dcbcfg->etsrec.prioritytable[(i * 2) + 1] = priority; offset++; } @@ -172,12 +164,9 @@ static void i40e_parse_ieee_pfccfg_tlv(struct i40e_lldp_org_tlv *tlv, * ----------------------------------------- * |1bit | 1bit|2 bits|4bits| 1 octet | */ - dcbcfg->pfc.willing = (u8)((buf[0] & I40E_IEEE_PFC_WILLING_MASK) >> - I40E_IEEE_PFC_WILLING_SHIFT); - dcbcfg->pfc.mbc = (u8)((buf[0] & I40E_IEEE_PFC_MBC_MASK) >> - I40E_IEEE_PFC_MBC_SHIFT); - dcbcfg->pfc.pfccap = (u8)((buf[0] & I40E_IEEE_PFC_CAP_MASK) >> - I40E_IEEE_PFC_CAP_SHIFT); + dcbcfg->pfc.willing = FIELD_GET(I40E_IEEE_PFC_WILLING_MASK, buf[0]); + dcbcfg->pfc.mbc = FIELD_GET(I40E_IEEE_PFC_MBC_MASK, buf[0]); + dcbcfg->pfc.pfccap = FIELD_GET(I40E_IEEE_PFC_CAP_MASK, buf[0]); dcbcfg->pfc.pfcenable = buf[1]; } @@ -198,8 +187,7 @@ static void i40e_parse_ieee_app_tlv(struct i40e_lldp_org_tlv *tlv, u8 *buf; typelength = ntohs(tlv->typelength); - length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >> - I40E_LLDP_TLV_LEN_SHIFT); + length = FIELD_GET(I40E_LLDP_TLV_LEN_MASK, typelength); buf = tlv->tlvinfo; /* The App priority table starts 5 octets after TLV header */ @@ -217,12 +205,10 @@ static void i40e_parse_ieee_app_tlv(struct i40e_lldp_org_tlv *tlv, * ----------------------------------------- */ while (offset < length) { - dcbcfg->app[i].priority = (u8)((buf[offset] & - I40E_IEEE_APP_PRIO_MASK) >> - I40E_IEEE_APP_PRIO_SHIFT); - dcbcfg->app[i].selector = (u8)((buf[offset] & - I40E_IEEE_APP_SEL_MASK) >> - I40E_IEEE_APP_SEL_SHIFT); + dcbcfg->app[i].priority = FIELD_GET(I40E_IEEE_APP_PRIO_MASK, + buf[offset]); + dcbcfg->app[i].selector = FIELD_GET(I40E_IEEE_APP_SEL_MASK, + buf[offset]); dcbcfg->app[i].protocolid = (buf[offset + 1] << 0x8) | buf[offset + 2]; /* Move to next app */ @@ -250,8 +236,7 @@ static void i40e_parse_ieee_tlv(struct i40e_lldp_org_tlv *tlv, u8 subtype; ouisubtype = ntohl(tlv->ouisubtype); - subtype = (u8)((ouisubtype & I40E_LLDP_TLV_SUBTYPE_MASK) >> - I40E_LLDP_TLV_SUBTYPE_SHIFT); + subtype = FIELD_GET(I40E_LLDP_TLV_SUBTYPE_MASK, ouisubtype); switch (subtype) { case I40E_IEEE_SUBTYPE_ETS_CFG: i40e_parse_ieee_etscfg_tlv(tlv, dcbcfg); @@ -301,11 +286,9 @@ static void i40e_parse_cee_pgcfg_tlv(struct i40e_cee_feat_tlv *tlv, * ----------------------------------------- */ for (i = 0; i < 4; i++) { - priority = (u8)((buf[offset] & I40E_CEE_PGID_PRIO_1_MASK) >> - I40E_CEE_PGID_PRIO_1_SHIFT); - etscfg->prioritytable[i * 2] = priority; - priority = (u8)((buf[offset] & I40E_CEE_PGID_PRIO_0_MASK) >> - I40E_CEE_PGID_PRIO_0_SHIFT); + priority = FIELD_GET(I40E_CEE_PGID_PRIO_1_MASK, buf[offset]); + etscfg->prioritytable[i * 2] = priority; + priority = FIELD_GET(I40E_CEE_PGID_PRIO_0_MASK, buf[offset]); etscfg->prioritytable[i * 2 + 1] = priority; offset++; } @@ -362,8 +345,7 @@ static void i40e_parse_cee_app_tlv(struct i40e_cee_feat_tlv *tlv, u8 i; typelength = ntohs(tlv->hdr.typelen); - length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >> - I40E_LLDP_TLV_LEN_SHIFT); + length = FIELD_GET(I40E_LLDP_TLV_LEN_MASK, typelength); dcbcfg->numapps = length / sizeof(*app); @@ -419,15 +401,13 @@ static void i40e_parse_cee_tlv(struct i40e_lldp_org_tlv *tlv, u32 ouisubtype; ouisubtype = ntohl(tlv->ouisubtype); - subtype = (u8)((ouisubtype & I40E_LLDP_TLV_SUBTYPE_MASK) >> - I40E_LLDP_TLV_SUBTYPE_SHIFT); + subtype = FIELD_GET(I40E_LLDP_TLV_SUBTYPE_MASK, ouisubtype); /* Return if not CEE DCBX */ if (subtype != I40E_CEE_DCBX_TYPE) return; typelength = ntohs(tlv->typelength); - tlvlen = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >> - I40E_LLDP_TLV_LEN_SHIFT); + tlvlen = FIELD_GET(I40E_LLDP_TLV_LEN_MASK, typelength); len = sizeof(tlv->typelength) + sizeof(ouisubtype) + sizeof(struct i40e_cee_ctrl_tlv); /* Return if no CEE DCBX Feature TLVs */ @@ -437,11 +417,8 @@ static void i40e_parse_cee_tlv(struct i40e_lldp_org_tlv *tlv, sub_tlv = (struct i40e_cee_feat_tlv *)((char *)tlv + len); while (feat_tlv_count < I40E_CEE_MAX_FEAT_TYPE) { typelength = ntohs(sub_tlv->hdr.typelen); - sublen = (u16)((typelength & - I40E_LLDP_TLV_LEN_MASK) >> - I40E_LLDP_TLV_LEN_SHIFT); - subtype = (u8)((typelength & I40E_LLDP_TLV_TYPE_MASK) >> - I40E_LLDP_TLV_TYPE_SHIFT); + sublen = FIELD_GET(I40E_LLDP_TLV_LEN_MASK, typelength); + subtype = FIELD_GET(I40E_LLDP_TLV_TYPE_MASK, typelength); switch (subtype) { case I40E_CEE_SUBTYPE_PG_CFG: i40e_parse_cee_pgcfg_tlv(sub_tlv, dcbcfg); @@ -478,8 +455,7 @@ static void i40e_parse_org_tlv(struct i40e_lldp_org_tlv *tlv, u32 oui; ouisubtype = ntohl(tlv->ouisubtype); - oui = (u32)((ouisubtype & I40E_LLDP_TLV_OUI_MASK) >> - I40E_LLDP_TLV_OUI_SHIFT); + oui = FIELD_GET(I40E_LLDP_TLV_OUI_MASK, ouisubtype); switch (oui) { case I40E_IEEE_8021QAZ_OUI: i40e_parse_ieee_tlv(tlv, dcbcfg); @@ -517,10 +493,8 @@ int i40e_lldp_to_dcb_config(u8 *lldpmib, tlv = (struct i40e_lldp_org_tlv *)lldpmib; while (1) { typelength = ntohs(tlv->typelength); - type = (u16)((typelength & I40E_LLDP_TLV_TYPE_MASK) >> - I40E_LLDP_TLV_TYPE_SHIFT); - length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >> - I40E_LLDP_TLV_LEN_SHIFT); + type = FIELD_GET(I40E_LLDP_TLV_TYPE_MASK, typelength); + length = FIELD_GET(I40E_LLDP_TLV_LEN_MASK, typelength); offset += sizeof(typelength) + length; /* END TLV or beyond LLDPDU size */ @@ -594,7 +568,7 @@ static void i40e_cee_to_dcb_v1_config( { u16 status, tlv_status = le16_to_cpu(cee_cfg->tlv_status); u16 app_prio = le16_to_cpu(cee_cfg->oper_app_prio); - u8 i, tc, err; + u8 i, err; /* CEE PG data to ETS config */ dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc; @@ -603,13 +577,13 @@ static void i40e_cee_to_dcb_v1_config( * from those in the CEE Priority Group sub-TLV. */ for (i = 0; i < 4; i++) { - tc = (u8)((cee_cfg->oper_prio_tc[i] & - I40E_CEE_PGID_PRIO_0_MASK) >> - I40E_CEE_PGID_PRIO_0_SHIFT); - dcbcfg->etscfg.prioritytable[i * 2] = tc; - tc = (u8)((cee_cfg->oper_prio_tc[i] & - I40E_CEE_PGID_PRIO_1_MASK) >> - I40E_CEE_PGID_PRIO_1_SHIFT); + u8 tc; + + tc = FIELD_GET(I40E_CEE_PGID_PRIO_0_MASK, + cee_cfg->oper_prio_tc[i]); + dcbcfg->etscfg.prioritytable[i * 2] = tc; + tc = FIELD_GET(I40E_CEE_PGID_PRIO_1_MASK, + cee_cfg->oper_prio_tc[i]); dcbcfg->etscfg.prioritytable[i*2 + 1] = tc; } @@ -631,8 +605,7 @@ static void i40e_cee_to_dcb_v1_config( dcbcfg->pfc.pfcenable = cee_cfg->oper_pfc_en; dcbcfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS; - status = (tlv_status & I40E_AQC_CEE_APP_STATUS_MASK) >> - I40E_AQC_CEE_APP_STATUS_SHIFT; + status = FIELD_GET(I40E_AQC_CEE_APP_STATUS_MASK, tlv_status); err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0; /* Add APPs if Error is False */ if (!err) { @@ -641,22 +614,19 @@ static void i40e_cee_to_dcb_v1_config( /* FCoE APP */ dcbcfg->app[0].priority = - (app_prio & I40E_AQC_CEE_APP_FCOE_MASK) >> - I40E_AQC_CEE_APP_FCOE_SHIFT; + FIELD_GET(I40E_AQC_CEE_APP_FCOE_MASK, app_prio); dcbcfg->app[0].selector = I40E_APP_SEL_ETHTYPE; dcbcfg->app[0].protocolid = I40E_APP_PROTOID_FCOE; /* iSCSI APP */ dcbcfg->app[1].priority = - (app_prio & I40E_AQC_CEE_APP_ISCSI_MASK) >> - I40E_AQC_CEE_APP_ISCSI_SHIFT; + FIELD_GET(I40E_AQC_CEE_APP_ISCSI_MASK, app_prio); dcbcfg->app[1].selector = I40E_APP_SEL_TCPIP; dcbcfg->app[1].protocolid = I40E_APP_PROTOID_ISCSI; /* FIP APP */ dcbcfg->app[2].priority = - (app_prio & I40E_AQC_CEE_APP_FIP_MASK) >> - I40E_AQC_CEE_APP_FIP_SHIFT; + FIELD_GET(I40E_AQC_CEE_APP_FIP_MASK, app_prio); dcbcfg->app[2].selector = I40E_APP_SEL_ETHTYPE; dcbcfg->app[2].protocolid = I40E_APP_PROTOID_FIP; } @@ -675,7 +645,7 @@ static void i40e_cee_to_dcb_config( { u32 status, tlv_status = le32_to_cpu(cee_cfg->tlv_status); u16 app_prio = le16_to_cpu(cee_cfg->oper_app_prio); - u8 i, tc, err, sync, oper; + u8 i, err, sync, oper; /* CEE PG data to ETS config */ dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc; @@ -684,13 +654,13 @@ static void i40e_cee_to_dcb_config( * from those in the CEE Priority Group sub-TLV. */ for (i = 0; i < 4; i++) { - tc = (u8)((cee_cfg->oper_prio_tc[i] & - I40E_CEE_PGID_PRIO_0_MASK) >> - I40E_CEE_PGID_PRIO_0_SHIFT); - dcbcfg->etscfg.prioritytable[i * 2] = tc; - tc = (u8)((cee_cfg->oper_prio_tc[i] & - I40E_CEE_PGID_PRIO_1_MASK) >> - I40E_CEE_PGID_PRIO_1_SHIFT); + u8 tc; + + tc = FIELD_GET(I40E_CEE_PGID_PRIO_0_MASK, + cee_cfg->oper_prio_tc[i]); + dcbcfg->etscfg.prioritytable[i * 2] = tc; + tc = FIELD_GET(I40E_CEE_PGID_PRIO_1_MASK, + cee_cfg->oper_prio_tc[i]); dcbcfg->etscfg.prioritytable[i * 2 + 1] = tc; } @@ -713,8 +683,7 @@ static void i40e_cee_to_dcb_config( dcbcfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS; i = 0; - status = (tlv_status & I40E_AQC_CEE_FCOE_STATUS_MASK) >> - I40E_AQC_CEE_FCOE_STATUS_SHIFT; + status = FIELD_GET(I40E_AQC_CEE_FCOE_STATUS_MASK, tlv_status); err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0; sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0; oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0; @@ -722,15 +691,13 @@ static void i40e_cee_to_dcb_config( if (!err && sync && oper) { /* FCoE APP */ dcbcfg->app[i].priority = - (app_prio & I40E_AQC_CEE_APP_FCOE_MASK) >> - I40E_AQC_CEE_APP_FCOE_SHIFT; + FIELD_GET(I40E_AQC_CEE_APP_FCOE_MASK, app_prio); dcbcfg->app[i].selector = I40E_APP_SEL_ETHTYPE; dcbcfg->app[i].protocolid = I40E_APP_PROTOID_FCOE; i++; } - status = (tlv_status & I40E_AQC_CEE_ISCSI_STATUS_MASK) >> - I40E_AQC_CEE_ISCSI_STATUS_SHIFT; + status = FIELD_GET(I40E_AQC_CEE_ISCSI_STATUS_MASK, tlv_status); err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0; sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0; oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0; @@ -738,15 +705,13 @@ static void i40e_cee_to_dcb_config( if (!err && sync && oper) { /* iSCSI APP */ dcbcfg->app[i].priority = - (app_prio & I40E_AQC_CEE_APP_ISCSI_MASK) >> - I40E_AQC_CEE_APP_ISCSI_SHIFT; + FIELD_GET(I40E_AQC_CEE_APP_ISCSI_MASK, app_prio); dcbcfg->app[i].selector = I40E_APP_SEL_TCPIP; dcbcfg->app[i].protocolid = I40E_APP_PROTOID_ISCSI; i++; } - status = (tlv_status & I40E_AQC_CEE_FIP_STATUS_MASK) >> - I40E_AQC_CEE_FIP_STATUS_SHIFT; + status = FIELD_GET(I40E_AQC_CEE_FIP_STATUS_MASK, tlv_status); err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0; sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0; oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0; @@ -754,8 +719,7 @@ static void i40e_cee_to_dcb_config( if (!err && sync && oper) { /* FIP APP */ dcbcfg->app[i].priority = - (app_prio & I40E_AQC_CEE_APP_FIP_MASK) >> - I40E_AQC_CEE_APP_FIP_SHIFT; + FIELD_GET(I40E_AQC_CEE_APP_FIP_MASK, app_prio); dcbcfg->app[i].selector = I40E_APP_SEL_ETHTYPE; dcbcfg->app[i].protocolid = I40E_APP_PROTOID_FIP; i++; @@ -806,14 +770,11 @@ int i40e_get_dcb_config(struct i40e_hw *hw) int ret = 0; /* If Firmware version < v4.33 on X710/XL710, IEEE only */ - if ((hw->mac.type == I40E_MAC_XL710) && - (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) || - (hw->aq.fw_maj_ver < 4))) + if (hw->mac.type == I40E_MAC_XL710 && i40e_is_fw_ver_lt(hw, 4, 33)) return i40e_get_ieee_dcb_config(hw); /* If Firmware version == v4.33 on X710/XL710, use old CEE struct */ - if ((hw->mac.type == I40E_MAC_XL710) && - ((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver == 33))) { + if (hw->mac.type == I40E_MAC_XL710 && i40e_is_fw_ver_eq(hw, 4, 33)) { ret = i40e_aq_get_cee_dcb_config(hw, &cee_v1_cfg, sizeof(cee_v1_cfg), NULL); if (!ret) { @@ -879,7 +840,7 @@ int i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change) return -EOPNOTSUPP; /* Read LLDP NVM area */ - if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT) { + if (test_bit(I40E_HW_CAP_FW_LLDP_PERSISTENT, hw->caps)) { u8 offset = 0; if (hw->mac.type == I40E_MAC_XL710) @@ -1191,7 +1152,7 @@ static void i40e_add_ieee_app_pri_tlv(struct i40e_lldp_org_tlv *tlv, selector = dcbcfg->app[i].selector & 0x7; buf[offset] = (priority << I40E_IEEE_APP_PRIO_SHIFT) | selector; buf[offset + 1] = (dcbcfg->app[i].protocolid >> 0x8) & 0xFF; - buf[offset + 2] = dcbcfg->app[i].protocolid & 0xFF; + buf[offset + 2] = dcbcfg->app[i].protocolid & 0xFF; /* Move to next app */ offset += 3; i++; @@ -1287,8 +1248,7 @@ int i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen, do { i40e_add_dcb_tlv(tlv, dcbcfg, tlvid++); typelength = ntohs(tlv->typelength); - length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >> - I40E_LLDP_TLV_LEN_SHIFT); + length = FIELD_GET(I40E_LLDP_TLV_LEN_MASK, typelength); if (length) offset += length + I40E_IEEE_TLV_HEADER_LENGTH; /* END TLV or beyond LLDPDU size */ @@ -1323,20 +1283,16 @@ void i40e_dcb_hw_rx_fifo_config(struct i40e_hw *hw, u32 reg = rd32(hw, I40E_PRTDCB_RETSC); reg &= ~I40E_PRTDCB_RETSC_ETS_MODE_MASK; - reg |= ((u32)ets_mode << I40E_PRTDCB_RETSC_ETS_MODE_SHIFT) & - I40E_PRTDCB_RETSC_ETS_MODE_MASK; + reg |= FIELD_PREP(I40E_PRTDCB_RETSC_ETS_MODE_MASK, ets_mode); reg &= ~I40E_PRTDCB_RETSC_NON_ETS_MODE_MASK; - reg |= ((u32)non_ets_mode << I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT) & - I40E_PRTDCB_RETSC_NON_ETS_MODE_MASK; + reg |= FIELD_PREP(I40E_PRTDCB_RETSC_NON_ETS_MODE_MASK, non_ets_mode); reg &= ~I40E_PRTDCB_RETSC_ETS_MAX_EXP_MASK; - reg |= (max_exponent << I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT) & - I40E_PRTDCB_RETSC_ETS_MAX_EXP_MASK; + reg |= FIELD_PREP(I40E_PRTDCB_RETSC_ETS_MAX_EXP_MASK, max_exponent); reg &= ~I40E_PRTDCB_RETSC_LLTC_MASK; - reg |= (lltc_map << I40E_PRTDCB_RETSC_LLTC_SHIFT) & - I40E_PRTDCB_RETSC_LLTC_MASK; + reg |= FIELD_PREP(I40E_PRTDCB_RETSC_LLTC_MASK, lltc_map); wr32(hw, I40E_PRTDCB_RETSC, reg); } @@ -1391,14 +1347,12 @@ void i40e_dcb_hw_rx_cmd_monitor_config(struct i40e_hw *hw, */ reg = rd32(hw, I40E_PRT_SWR_PM_THR); reg &= ~I40E_PRT_SWR_PM_THR_THRESHOLD_MASK; - reg |= (threshold << I40E_PRT_SWR_PM_THR_THRESHOLD_SHIFT) & - I40E_PRT_SWR_PM_THR_THRESHOLD_MASK; + reg |= FIELD_PREP(I40E_PRT_SWR_PM_THR_THRESHOLD_MASK, threshold); wr32(hw, I40E_PRT_SWR_PM_THR, reg); reg = rd32(hw, I40E_PRTDCB_RPPMC); reg &= ~I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_MASK; - reg |= (fifo_size << I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT) & - I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_MASK; + reg |= FIELD_PREP(I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_MASK, fifo_size); wr32(hw, I40E_PRTDCB_RPPMC, reg); } @@ -1440,19 +1394,17 @@ void i40e_dcb_hw_pfc_config(struct i40e_hw *hw, reg &= ~I40E_PRTDCB_MFLCN_RFCE_MASK; reg &= ~I40E_PRTDCB_MFLCN_RPFCE_MASK; if (pfc_en) { - reg |= BIT(I40E_PRTDCB_MFLCN_RPFCM_SHIFT) & - I40E_PRTDCB_MFLCN_RPFCM_MASK; - reg |= ((u32)pfc_en << I40E_PRTDCB_MFLCN_RPFCE_SHIFT) & - I40E_PRTDCB_MFLCN_RPFCE_MASK; + reg |= FIELD_PREP(I40E_PRTDCB_MFLCN_RPFCM_MASK, 1); + reg |= FIELD_PREP(I40E_PRTDCB_MFLCN_RPFCE_MASK, + pfc_en); } wr32(hw, I40E_PRTDCB_MFLCN, reg); reg = rd32(hw, I40E_PRTDCB_FCCFG); reg &= ~I40E_PRTDCB_FCCFG_TFCE_MASK; if (pfc_en) - reg |= (I40E_DCB_PFC_ENABLED << - I40E_PRTDCB_FCCFG_TFCE_SHIFT) & - I40E_PRTDCB_FCCFG_TFCE_MASK; + reg |= FIELD_PREP(I40E_PRTDCB_FCCFG_TFCE_MASK, + I40E_DCB_PFC_ENABLED); wr32(hw, I40E_PRTDCB_FCCFG, reg); /* FCTTV and FCRTV to be set by default */ @@ -1470,25 +1422,22 @@ void i40e_dcb_hw_pfc_config(struct i40e_hw *hw, reg = rd32(hw, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE); reg &= ~I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_MASK; - reg |= ((u32)pfc_en << - I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT) & - I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_MASK; + reg |= FIELD_PREP(I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_MASK, + pfc_en); wr32(hw, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE, reg); reg = rd32(hw, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE); reg &= ~I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_MASK; - reg |= ((u32)pfc_en << - I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT) & - I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_MASK; + reg |= FIELD_PREP(I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_MASK, + pfc_en); wr32(hw, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE, reg); for (i = 0; i < I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MAX_INDEX; i++) { reg = rd32(hw, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(i)); reg &= ~I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MASK; if (pfc_en) { - reg |= ((u32)refresh_time << - I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT) & - I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MASK; + reg |= FIELD_PREP(I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MASK, + refresh_time); } wr32(hw, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(i), reg); } @@ -1500,14 +1449,12 @@ void i40e_dcb_hw_pfc_config(struct i40e_hw *hw, reg = rd32(hw, I40E_PRTDCB_TC2PFC); reg &= ~I40E_PRTDCB_TC2PFC_TC2PFC_MASK; - reg |= ((u32)tc2pfc << I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT) & - I40E_PRTDCB_TC2PFC_TC2PFC_MASK; + reg |= FIELD_PREP(I40E_PRTDCB_TC2PFC_TC2PFC_MASK, tc2pfc); wr32(hw, I40E_PRTDCB_TC2PFC, reg); reg = rd32(hw, I40E_PRTDCB_RUP); reg &= ~I40E_PRTDCB_RUP_NOVLANUP_MASK; - reg |= ((u32)first_pfc_prio << I40E_PRTDCB_RUP_NOVLANUP_SHIFT) & - I40E_PRTDCB_RUP_NOVLANUP_MASK; + reg |= FIELD_PREP(I40E_PRTDCB_RUP_NOVLANUP_MASK, first_pfc_prio); wr32(hw, I40E_PRTDCB_RUP, reg); reg = rd32(hw, I40E_PRTDCB_TDPMC); @@ -1539,8 +1486,7 @@ void i40e_dcb_hw_set_num_tc(struct i40e_hw *hw, u8 num_tc) u32 reg = rd32(hw, I40E_PRTDCB_GENC); reg &= ~I40E_PRTDCB_GENC_NUMTC_MASK; - reg |= ((u32)num_tc << I40E_PRTDCB_GENC_NUMTC_SHIFT) & - I40E_PRTDCB_GENC_NUMTC_MASK; + reg |= FIELD_PREP(I40E_PRTDCB_GENC_NUMTC_MASK, num_tc); wr32(hw, I40E_PRTDCB_GENC, reg); } @@ -1554,8 +1500,7 @@ u8 i40e_dcb_hw_get_num_tc(struct i40e_hw *hw) { u32 reg = rd32(hw, I40E_PRTDCB_GENC); - return (u8)((reg & I40E_PRTDCB_GENC_NUMTC_MASK) >> - I40E_PRTDCB_GENC_NUMTC_SHIFT); + return FIELD_GET(I40E_PRTDCB_GENC_NUMTC_MASK, reg); } /** @@ -1578,13 +1523,13 @@ void i40e_dcb_hw_rx_ets_bw_config(struct i40e_hw *hw, u8 *bw_share, reg = rd32(hw, I40E_PRTDCB_RETSTCC(i)); reg &= ~(I40E_PRTDCB_RETSTCC_BWSHARE_MASK | I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK | - I40E_PRTDCB_RETSTCC_ETSTC_SHIFT); - reg |= ((u32)bw_share[i] << I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT) & - I40E_PRTDCB_RETSTCC_BWSHARE_MASK; - reg |= ((u32)mode[i] << I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT) & - I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK; - reg |= ((u32)prio_type[i] << I40E_PRTDCB_RETSTCC_ETSTC_SHIFT) & - I40E_PRTDCB_RETSTCC_ETSTC_MASK; + I40E_PRTDCB_RETSTCC_ETSTC_MASK); + reg |= FIELD_PREP(I40E_PRTDCB_RETSTCC_BWSHARE_MASK, + bw_share[i]); + reg |= FIELD_PREP(I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK, + mode[i]); + reg |= FIELD_PREP(I40E_PRTDCB_RETSTCC_ETSTC_MASK, + prio_type[i]); wr32(hw, I40E_PRTDCB_RETSTCC(i), reg); } } @@ -1724,8 +1669,7 @@ void i40e_dcb_hw_rx_pb_config(struct i40e_hw *hw, if (new_val < old_val) { reg = rd32(hw, I40E_PRTRPB_SLW); reg &= ~I40E_PRTRPB_SLW_SLW_MASK; - reg |= (new_val << I40E_PRTRPB_SLW_SLW_SHIFT) & - I40E_PRTRPB_SLW_SLW_MASK; + reg |= FIELD_PREP(I40E_PRTRPB_SLW_SLW_MASK, new_val); wr32(hw, I40E_PRTRPB_SLW, reg); } @@ -1738,8 +1682,8 @@ void i40e_dcb_hw_rx_pb_config(struct i40e_hw *hw, if (new_val < old_val) { reg = rd32(hw, I40E_PRTRPB_SLT(i)); reg &= ~I40E_PRTRPB_SLT_SLT_TCN_MASK; - reg |= (new_val << I40E_PRTRPB_SLT_SLT_TCN_SHIFT) & - I40E_PRTRPB_SLT_SLT_TCN_MASK; + reg |= FIELD_PREP(I40E_PRTRPB_SLT_SLT_TCN_MASK, + new_val); wr32(hw, I40E_PRTRPB_SLT(i), reg); } @@ -1748,8 +1692,8 @@ void i40e_dcb_hw_rx_pb_config(struct i40e_hw *hw, if (new_val < old_val) { reg = rd32(hw, I40E_PRTRPB_DLW(i)); reg &= ~I40E_PRTRPB_DLW_DLW_TCN_MASK; - reg |= (new_val << I40E_PRTRPB_DLW_DLW_TCN_SHIFT) & - I40E_PRTRPB_DLW_DLW_TCN_MASK; + reg |= FIELD_PREP(I40E_PRTRPB_DLW_DLW_TCN_MASK, + new_val); wr32(hw, I40E_PRTRPB_DLW(i), reg); } } @@ -1760,8 +1704,7 @@ void i40e_dcb_hw_rx_pb_config(struct i40e_hw *hw, if (new_val < old_val) { reg = rd32(hw, I40E_PRTRPB_SHW); reg &= ~I40E_PRTRPB_SHW_SHW_MASK; - reg |= (new_val << I40E_PRTRPB_SHW_SHW_SHIFT) & - I40E_PRTRPB_SHW_SHW_MASK; + reg |= FIELD_PREP(I40E_PRTRPB_SHW_SHW_MASK, new_val); wr32(hw, I40E_PRTRPB_SHW, reg); } @@ -1774,8 +1717,8 @@ void i40e_dcb_hw_rx_pb_config(struct i40e_hw *hw, if (new_val < old_val) { reg = rd32(hw, I40E_PRTRPB_SHT(i)); reg &= ~I40E_PRTRPB_SHT_SHT_TCN_MASK; - reg |= (new_val << I40E_PRTRPB_SHT_SHT_TCN_SHIFT) & - I40E_PRTRPB_SHT_SHT_TCN_MASK; + reg |= FIELD_PREP(I40E_PRTRPB_SHT_SHT_TCN_MASK, + new_val); wr32(hw, I40E_PRTRPB_SHT(i), reg); } @@ -1784,8 +1727,8 @@ void i40e_dcb_hw_rx_pb_config(struct i40e_hw *hw, if (new_val < old_val) { reg = rd32(hw, I40E_PRTRPB_DHW(i)); reg &= ~I40E_PRTRPB_DHW_DHW_TCN_MASK; - reg |= (new_val << I40E_PRTRPB_DHW_DHW_TCN_SHIFT) & - I40E_PRTRPB_DHW_DHW_TCN_MASK; + reg |= FIELD_PREP(I40E_PRTRPB_DHW_DHW_TCN_MASK, + new_val); wr32(hw, I40E_PRTRPB_DHW(i), reg); } } @@ -1795,8 +1738,7 @@ void i40e_dcb_hw_rx_pb_config(struct i40e_hw *hw, new_val = new_pb_cfg->tc_pool_size[i]; reg = rd32(hw, I40E_PRTRPB_DPS(i)); reg &= ~I40E_PRTRPB_DPS_DPS_TCN_MASK; - reg |= (new_val << I40E_PRTRPB_DPS_DPS_TCN_SHIFT) & - I40E_PRTRPB_DPS_DPS_TCN_MASK; + reg |= FIELD_PREP(I40E_PRTRPB_DPS_DPS_TCN_MASK, new_val); wr32(hw, I40E_PRTRPB_DPS(i), reg); } @@ -1804,8 +1746,7 @@ void i40e_dcb_hw_rx_pb_config(struct i40e_hw *hw, new_val = new_pb_cfg->shared_pool_size; reg = rd32(hw, I40E_PRTRPB_SPS); reg &= ~I40E_PRTRPB_SPS_SPS_MASK; - reg |= (new_val << I40E_PRTRPB_SPS_SPS_SHIFT) & - I40E_PRTRPB_SPS_SPS_MASK; + reg |= FIELD_PREP(I40E_PRTRPB_SPS_SPS_MASK, new_val); wr32(hw, I40E_PRTRPB_SPS, reg); /* Program the shared pool low water mark per port if increasing */ @@ -1814,8 +1755,7 @@ void i40e_dcb_hw_rx_pb_config(struct i40e_hw *hw, if (new_val > old_val) { reg = rd32(hw, I40E_PRTRPB_SLW); reg &= ~I40E_PRTRPB_SLW_SLW_MASK; - reg |= (new_val << I40E_PRTRPB_SLW_SLW_SHIFT) & - I40E_PRTRPB_SLW_SLW_MASK; + reg |= FIELD_PREP(I40E_PRTRPB_SLW_SLW_MASK, new_val); wr32(hw, I40E_PRTRPB_SLW, reg); } @@ -1828,8 +1768,8 @@ void i40e_dcb_hw_rx_pb_config(struct i40e_hw *hw, if (new_val > old_val) { reg = rd32(hw, I40E_PRTRPB_SLT(i)); reg &= ~I40E_PRTRPB_SLT_SLT_TCN_MASK; - reg |= (new_val << I40E_PRTRPB_SLT_SLT_TCN_SHIFT) & - I40E_PRTRPB_SLT_SLT_TCN_MASK; + reg |= FIELD_PREP(I40E_PRTRPB_SLT_SLT_TCN_MASK, + new_val); wr32(hw, I40E_PRTRPB_SLT(i), reg); } @@ -1838,8 +1778,8 @@ void i40e_dcb_hw_rx_pb_config(struct i40e_hw *hw, if (new_val > old_val) { reg = rd32(hw, I40E_PRTRPB_DLW(i)); reg &= ~I40E_PRTRPB_DLW_DLW_TCN_MASK; - reg |= (new_val << I40E_PRTRPB_DLW_DLW_TCN_SHIFT) & - I40E_PRTRPB_DLW_DLW_TCN_MASK; + reg |= FIELD_PREP(I40E_PRTRPB_DLW_DLW_TCN_MASK, + new_val); wr32(hw, I40E_PRTRPB_DLW(i), reg); } } @@ -1850,8 +1790,7 @@ void i40e_dcb_hw_rx_pb_config(struct i40e_hw *hw, if (new_val > old_val) { reg = rd32(hw, I40E_PRTRPB_SHW); reg &= ~I40E_PRTRPB_SHW_SHW_MASK; - reg |= (new_val << I40E_PRTRPB_SHW_SHW_SHIFT) & - I40E_PRTRPB_SHW_SHW_MASK; + reg |= FIELD_PREP(I40E_PRTRPB_SHW_SHW_MASK, new_val); wr32(hw, I40E_PRTRPB_SHW, reg); } @@ -1864,8 +1803,8 @@ void i40e_dcb_hw_rx_pb_config(struct i40e_hw *hw, if (new_val > old_val) { reg = rd32(hw, I40E_PRTRPB_SHT(i)); reg &= ~I40E_PRTRPB_SHT_SHT_TCN_MASK; - reg |= (new_val << I40E_PRTRPB_SHT_SHT_TCN_SHIFT) & - I40E_PRTRPB_SHT_SHT_TCN_MASK; + reg |= FIELD_PREP(I40E_PRTRPB_SHT_SHT_TCN_MASK, + new_val); wr32(hw, I40E_PRTRPB_SHT(i), reg); } @@ -1874,8 +1813,8 @@ void i40e_dcb_hw_rx_pb_config(struct i40e_hw *hw, if (new_val > old_val) { reg = rd32(hw, I40E_PRTRPB_DHW(i)); reg &= ~I40E_PRTRPB_DHW_DHW_TCN_MASK; - reg |= (new_val << I40E_PRTRPB_DHW_DHW_TCN_SHIFT) & - I40E_PRTRPB_DHW_DHW_TCN_MASK; + reg |= FIELD_PREP(I40E_PRTRPB_DHW_DHW_TCN_MASK, + new_val); wr32(hw, I40E_PRTRPB_DHW(i), reg); } } diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/drivers/net/ethernet/intel/i40e/i40e_dcb.h index 6b60dc9b77..d76497566e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb.h +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.h @@ -43,7 +43,7 @@ #define I40E_LLDP_TLV_SUBTYPE_SHIFT 0 #define I40E_LLDP_TLV_SUBTYPE_MASK (0xFF << I40E_LLDP_TLV_SUBTYPE_SHIFT) #define I40E_LLDP_TLV_OUI_SHIFT 8 -#define I40E_LLDP_TLV_OUI_MASK (0xFFFFFF << I40E_LLDP_TLV_OUI_SHIFT) +#define I40E_LLDP_TLV_OUI_MASK (0xFFFFFFU << I40E_LLDP_TLV_OUI_SHIFT) /* Defines for IEEE ETS TLV */ #define I40E_IEEE_ETS_MAXTC_SHIFT 0 diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c index 077a95dad3..b96a92187a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c @@ -21,8 +21,7 @@ static void i40e_get_pfc_delay(struct i40e_hw *hw, u16 *delay) u32 val; val = rd32(hw, I40E_PRTDCB_GENC); - *delay = (u16)((val & I40E_PRTDCB_GENC_PFCLDA_MASK) >> - I40E_PRTDCB_GENC_PFCLDA_SHIFT); + *delay = FIELD_GET(I40E_PRTDCB_GENC_PFCLDA_MASK, val); } /** @@ -310,8 +309,8 @@ static u8 i40e_dcbnl_getstate(struct net_device *netdev) struct i40e_pf *pf = i40e_netdev_to_pf(netdev); dev_dbg(&pf->pdev->dev, "DCB state=%d\n", - !!(pf->flags & I40E_FLAG_DCB_ENABLED)); - return !!(pf->flags & I40E_FLAG_DCB_ENABLED); + test_bit(I40E_FLAG_DCB_ENA, pf->flags) ? 1 : 0); + return test_bit(I40E_FLAG_DCB_ENA, pf->flags) ? 1 : 0; } /** @@ -331,19 +330,19 @@ static u8 i40e_dcbnl_setstate(struct net_device *netdev, u8 state) return ret; dev_dbg(&pf->pdev->dev, "new state=%d current state=%d\n", - state, (pf->flags & I40E_FLAG_DCB_ENABLED) ? 1 : 0); + state, test_bit(I40E_FLAG_DCB_ENA, pf->flags) ? 1 : 0); /* Nothing to do */ - if (!state == !(pf->flags & I40E_FLAG_DCB_ENABLED)) + if (!state == !test_bit(I40E_FLAG_DCB_ENA, pf->flags)) return ret; if (i40e_is_sw_dcb(pf)) { if (state) { - pf->flags |= I40E_FLAG_DCB_ENABLED; + set_bit(I40E_FLAG_DCB_ENA, pf->flags); memcpy(&pf->hw.desired_dcbx_config, &pf->hw.local_dcbx_config, sizeof(struct i40e_dcbx_config)); } else { - pf->flags &= ~I40E_FLAG_DCB_ENABLED; + clear_bit(I40E_FLAG_DCB_ENA, pf->flags); } } else { /* Cannot directly manipulate FW LLDP Agent */ @@ -653,7 +652,7 @@ static u8 i40e_dcbnl_get_cap(struct net_device *netdev, int capid, u8 *cap) { struct i40e_pf *pf = i40e_netdev_to_pf(netdev); - if (!(pf->flags & I40E_FLAG_DCB_CAPABLE)) + if (!test_bit(I40E_FLAG_DCB_CAPABLE, pf->flags)) return I40E_DCBNL_STATUS_ERROR; switch (capid) { @@ -693,7 +692,7 @@ static int i40e_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num) { struct i40e_pf *pf = i40e_netdev_to_pf(netdev); - if (!(pf->flags & I40E_FLAG_DCB_CAPABLE)) + if (!test_bit(I40E_FLAG_DCB_CAPABLE, pf->flags)) return -EINVAL; *num = I40E_MAX_TRAFFIC_CLASS; @@ -827,15 +826,12 @@ static void i40e_dcbnl_get_perm_hw_addr(struct net_device *dev, u8 *perm_addr) { struct i40e_pf *pf = i40e_netdev_to_pf(dev); - int i, j; + int i; memset(perm_addr, 0xff, MAX_ADDR_LEN); for (i = 0; i < dev->addr_len; i++) perm_addr[i] = pf->hw.mac.perm_addr[i]; - - for (j = 0; j < dev->addr_len; j++, i++) - perm_addr[i] = pf->hw.mac.san_addr[j]; } static const struct dcbnl_rtnl_ops dcbnl_ops = { @@ -891,11 +887,11 @@ void i40e_dcbnl_set_all(struct i40e_vsi *vsi) return; /* DCB not enabled */ - if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) + if (!test_bit(I40E_FLAG_DCB_ENA, pf->flags)) return; /* MFP mode but not an iSCSI PF so return */ - if ((pf->flags & I40E_FLAG_MFP_ENABLED) && !(hw->func_caps.iscsi)) + if (test_bit(I40E_FLAG_MFP_ENA, pf->flags) && !(hw->func_caps.iscsi)) return; dcbxcfg = &hw->local_dcbx_config; @@ -1002,7 +998,7 @@ void i40e_dcbnl_flush_apps(struct i40e_pf *pf, int i; /* MFP mode but not an iSCSI PF so return */ - if ((pf->flags & I40E_FLAG_MFP_ENABLED) && !(pf->hw.func_caps.iscsi)) + if (test_bit(I40E_FLAG_MFP_ENA, pf->flags) && !(pf->hw.func_caps.iscsi)) return; for (i = 0; i < old_cfg->numapps; i++) { @@ -1025,7 +1021,7 @@ void i40e_dcbnl_setup(struct i40e_vsi *vsi) struct i40e_pf *pf = i40e_netdev_to_pf(dev); /* Not DCB capable */ - if (!(pf->flags & I40E_FLAG_DCB_CAPABLE)) + if (!test_bit(I40E_FLAG_DCB_CAPABLE, pf->flags)) return; dev->dcbnl_ops = &dcbnl_ops; diff --git a/drivers/net/ethernet/intel/i40e/i40e_ddp.c b/drivers/net/ethernet/intel/i40e/i40e_ddp.c index cf25bfc5dc..2f53f0f53b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ddp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ddp.c @@ -81,8 +81,8 @@ static int i40e_ddp_does_profile_exist(struct i40e_hw *hw, static bool i40e_ddp_profiles_overlap(struct i40e_profile_info *new, struct i40e_profile_info *old) { - unsigned int group_id_old = (u8)((old->track_id & 0x00FF0000) >> 16); - unsigned int group_id_new = (u8)((new->track_id & 0x00FF0000) >> 16); + unsigned int group_id_old = FIELD_GET(0x00FF0000, old->track_id); + unsigned int group_id_new = FIELD_GET(0x00FF0000, new->track_id); /* 0x00 group must be only the first */ if (group_id_new == 0) diff --git a/drivers/net/ethernet/intel/i40e/i40e_debug.h b/drivers/net/ethernet/intel/i40e/i40e_debug.h index 27ebc72d8b..e9871dfb32 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debug.h +++ b/drivers/net/ethernet/intel/i40e/i40e_debug.h @@ -37,6 +37,7 @@ struct i40e_hw; struct device *i40e_hw_to_dev(struct i40e_hw *hw); #define hw_dbg(hw, S, A...) dev_dbg(i40e_hw_to_dev(hw), S, ##A) +#define hw_warn(hw, S, A...) dev_warn(i40e_hw_to_dev(hw), S, ##A) #define i40e_debug(h, m, s, ...) \ do { \ diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index 999c9708de..ef70ddbe9c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -147,9 +147,8 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) " state[%d] = %08lx\n", i, vsi->state[i]); if (vsi == pf->vsi[pf->lan_vsi]) - dev_info(&pf->pdev->dev, " MAC address: %pM SAN MAC: %pM Port MAC: %pM\n", + dev_info(&pf->pdev->dev, " MAC address: %pM Port MAC: %pM\n", pf->hw.mac.addr, - pf->hw.mac.san_addr, pf->hw.mac.port_addr); hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) { dev_info(&pf->pdev->dev, @@ -820,8 +819,8 @@ static ssize_t i40e_dbg_command_write(struct file *filp, /* By default we are in VEPA mode, if this is the first VF/VMDq * VSI to be added switch to VEB mode. */ - if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { - pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; + if (!test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags)) { + set_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags); i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG); } @@ -1029,9 +1028,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp, "emp reset count: %d\n", pf->empr_count); dev_info(&pf->pdev->dev, "pf reset count: %d\n", pf->pfr_count); - dev_info(&pf->pdev->dev, - "pf tx sluggish count: %d\n", - pf->tx_sluggish_count); } else if (strncmp(&cmd_buf[5], "port", 4) == 0) { struct i40e_aqc_query_port_ets_config_resp *bw_data; struct i40e_dcbx_config *cfg = diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.h b/drivers/net/ethernet/intel/i40e/i40e_diag.h index ece3a6b9a5..ab20202a3d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_diag.h +++ b/drivers/net/ethernet/intel/i40e/i40e_diag.h @@ -4,6 +4,7 @@ #ifndef _I40E_DIAG_H_ #define _I40E_DIAG_H_ +#include <linux/types.h> #include "i40e_adminq_cmd.h" /* forward-declare the HW struct for the compiler */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index fd7163128c..c841779713 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -430,35 +430,35 @@ static const char i40e_gstrings_test[][ETH_GSTRING_LEN] = { struct i40e_priv_flags { char flag_string[ETH_GSTRING_LEN]; - u64 flag; + u8 bitno; bool read_only; }; -#define I40E_PRIV_FLAG(_name, _flag, _read_only) { \ +#define I40E_PRIV_FLAG(_name, _bitno, _read_only) { \ .flag_string = _name, \ - .flag = _flag, \ + .bitno = _bitno, \ .read_only = _read_only, \ } static const struct i40e_priv_flags i40e_gstrings_priv_flags[] = { /* NOTE: MFP setting cannot be changed */ - I40E_PRIV_FLAG("MFP", I40E_FLAG_MFP_ENABLED, 1), + I40E_PRIV_FLAG("MFP", I40E_FLAG_MFP_ENA, 1), I40E_PRIV_FLAG("total-port-shutdown", - I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED, 1), - I40E_PRIV_FLAG("LinkPolling", I40E_FLAG_LINK_POLLING_ENABLED, 0), - I40E_PRIV_FLAG("flow-director-atr", I40E_FLAG_FD_ATR_ENABLED, 0), - I40E_PRIV_FLAG("veb-stats", I40E_FLAG_VEB_STATS_ENABLED, 0), - I40E_PRIV_FLAG("hw-atr-eviction", I40E_FLAG_HW_ATR_EVICT_ENABLED, 0), + I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENA, 1), + I40E_PRIV_FLAG("LinkPolling", I40E_FLAG_LINK_POLLING_ENA, 0), + I40E_PRIV_FLAG("flow-director-atr", I40E_FLAG_FD_ATR_ENA, 0), + I40E_PRIV_FLAG("veb-stats", I40E_FLAG_VEB_STATS_ENA, 0), + I40E_PRIV_FLAG("hw-atr-eviction", I40E_FLAG_HW_ATR_EVICT_ENA, 0), I40E_PRIV_FLAG("link-down-on-close", - I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED, 0), - I40E_PRIV_FLAG("legacy-rx", I40E_FLAG_LEGACY_RX, 0), + I40E_FLAG_LINK_DOWN_ON_CLOSE_ENA, 0), + I40E_PRIV_FLAG("legacy-rx", I40E_FLAG_LEGACY_RX_ENA, 0), I40E_PRIV_FLAG("disable-source-pruning", - I40E_FLAG_SOURCE_PRUNING_DISABLED, 0), - I40E_PRIV_FLAG("disable-fw-lldp", I40E_FLAG_DISABLE_FW_LLDP, 0), + I40E_FLAG_SOURCE_PRUNING_DIS, 0), + I40E_PRIV_FLAG("disable-fw-lldp", I40E_FLAG_FW_LLDP_DIS, 0), I40E_PRIV_FLAG("rs-fec", I40E_FLAG_RS_FEC, 0), I40E_PRIV_FLAG("base-r-fec", I40E_FLAG_BASE_R_FEC, 0), I40E_PRIV_FLAG("vf-vlan-pruning", - I40E_FLAG_VF_VLAN_PRUNING, 0), + I40E_FLAG_VF_VLAN_PRUNING_ENA, 0), }; #define I40E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_gstrings_priv_flags) @@ -466,7 +466,7 @@ static const struct i40e_priv_flags i40e_gstrings_priv_flags[] = { /* Private flags with a global effect, restricted to PF 0 */ static const struct i40e_priv_flags i40e_gl_gstrings_priv_flags[] = { I40E_PRIV_FLAG("vf-true-promisc-support", - I40E_FLAG_TRUE_PROMISC_SUPPORT, 0), + I40E_FLAG_TRUE_PROMISC_ENA, 0), }; #define I40E_GL_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_gl_gstrings_priv_flags) @@ -502,7 +502,7 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) ethtool_link_ksettings_add_link_mode(ks, advertising, 1000baseT_Full); - if (pf->hw_features & I40E_HW_100M_SGMII_CAPABLE) { + if (test_bit(I40E_HW_CAP_100M_SGMII, pf->hw.caps)) { ethtool_link_ksettings_add_link_mode(ks, supported, 100baseT_Full); ethtool_link_ksettings_add_link_mode(ks, advertising, @@ -601,7 +601,7 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, 10000baseKX4_Full); } if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_KR && - !(pf->hw_features & I40E_HW_HAVE_CRT_RETIMER)) { + !test_bit(I40E_HW_CAP_CRT_RETIMER, pf->hw.caps)) { ethtool_link_ksettings_add_link_mode(ks, supported, 10000baseKR_Full); if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) @@ -609,7 +609,7 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, 10000baseKR_Full); } if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_KX && - !(pf->hw_features & I40E_HW_HAVE_CRT_RETIMER)) { + !test_bit(I40E_HW_CAP_CRT_RETIMER, pf->hw.caps)) { ethtool_link_ksettings_add_link_mode(ks, supported, 1000baseKX_Full); if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) @@ -917,7 +917,7 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) ethtool_link_ksettings_add_link_mode(ks, advertising, 1000baseT_Full); - if (pf->hw_features & I40E_HW_100M_SGMII_CAPABLE) { + if (test_bit(I40E_HW_CAP_100M_SGMII, pf->hw.caps)) { ethtool_link_ksettings_add_link_mode(ks, supported, 100baseT_Full); if (hw_link_info->requested_speeds & @@ -1488,12 +1488,8 @@ static int i40e_set_fec_cfg(struct net_device *netdev, u8 fec_cfg) struct i40e_pf *pf = np->vsi->back; struct i40e_hw *hw = &pf->hw; int status = 0; - u32 flags = 0; int err = 0; - flags = READ_ONCE(pf->flags); - i40e_set_fec_in_flags(fec_cfg, &flags); - /* Get the current phy config */ memset(&abilities, 0, sizeof(abilities)); status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, @@ -1525,7 +1521,7 @@ static int i40e_set_fec_cfg(struct net_device *netdev, u8 fec_cfg) err = -EAGAIN; goto done; } - pf->flags = flags; + i40e_set_fec_in_flags(fec_cfg, pf->flags); status = i40e_update_link_info(hw); if (status) /* debug level message only due to relation to the link @@ -1599,7 +1595,7 @@ static int i40e_set_fec_param(struct net_device *netdev, return -EPERM; if (hw->mac.type == I40E_MAC_X722 && - !(hw->flags & I40E_HW_FLAG_X722_FEC_REQUEST_CAPABLE)) { + !test_bit(I40E_HW_CAP_X722_FEC_REQUEST, hw->caps)) { netdev_err(netdev, "Setting FEC encoding not supported by firmware. Please update the NVM image.\n"); return -EOPNOTSUPP; } @@ -1915,7 +1911,7 @@ static int i40e_get_eeprom(struct net_device *netdev, len = eeprom->len - (I40E_NVM_SECTOR_SIZE * i); last = true; } - offset = eeprom->offset + (I40E_NVM_SECTOR_SIZE * i), + offset = eeprom->offset + (I40E_NVM_SECTOR_SIZE * i); ret_val = i40e_aq_read_nvm(hw, 0x0, offset, len, (u8 *)eeprom_buff + (I40E_NVM_SECTOR_SIZE * i), last, NULL); @@ -1956,9 +1952,8 @@ static int i40e_get_eeprom_len(struct net_device *netdev) val = X722_EEPROM_SCOPE_LIMIT + 1; return val; } - val = (rd32(hw, I40E_GLPCI_LBARCTRL) - & I40E_GLPCI_LBARCTRL_FL_SIZE_MASK) - >> I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT; + val = FIELD_GET(I40E_GLPCI_LBARCTRL_FL_SIZE_MASK, + rd32(hw, I40E_GLPCI_LBARCTRL)); /* register returns value in power of 2, 64Kbyte chunks. */ val = (64 * 1024) * BIT(val); return val; @@ -2015,6 +2010,18 @@ static void i40e_get_drvinfo(struct net_device *netdev, drvinfo->n_priv_flags += I40E_GL_PRIV_FLAGS_STR_LEN; } +static u32 i40e_get_max_num_descriptors(struct i40e_pf *pf) +{ + struct i40e_hw *hw = &pf->hw; + + switch (hw->mac.type) { + case I40E_MAC_XL710: + return I40E_MAX_NUM_DESCRIPTORS_XL710; + default: + return I40E_MAX_NUM_DESCRIPTORS; + } +} + static void i40e_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, struct kernel_ethtool_ringparam *kernel_ring, @@ -2024,8 +2031,8 @@ static void i40e_get_ringparam(struct net_device *netdev, struct i40e_pf *pf = np->vsi->back; struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; - ring->rx_max_pending = I40E_MAX_NUM_DESCRIPTORS; - ring->tx_max_pending = I40E_MAX_NUM_DESCRIPTORS; + ring->rx_max_pending = i40e_get_max_num_descriptors(pf); + ring->tx_max_pending = i40e_get_max_num_descriptors(pf); ring->rx_mini_max_pending = 0; ring->rx_jumbo_max_pending = 0; ring->rx_pending = vsi->rx_rings[0]->count; @@ -2050,12 +2057,12 @@ static int i40e_set_ringparam(struct net_device *netdev, struct kernel_ethtool_ringparam *kernel_ring, struct netlink_ext_ack *extack) { + u32 new_rx_count, new_tx_count, max_num_descriptors; struct i40e_ring *tx_rings = NULL, *rx_rings = NULL; struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_hw *hw = &np->vsi->back->hw; struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; - u32 new_rx_count, new_tx_count; u16 tx_alloc_queue_pairs; int timeout = 50; int i, err = 0; @@ -2063,14 +2070,15 @@ static int i40e_set_ringparam(struct net_device *netdev, if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) return -EINVAL; - if (ring->tx_pending > I40E_MAX_NUM_DESCRIPTORS || + max_num_descriptors = i40e_get_max_num_descriptors(pf); + if (ring->tx_pending > max_num_descriptors || ring->tx_pending < I40E_MIN_NUM_DESCRIPTORS || - ring->rx_pending > I40E_MAX_NUM_DESCRIPTORS || + ring->rx_pending > max_num_descriptors || ring->rx_pending < I40E_MIN_NUM_DESCRIPTORS) { netdev_info(netdev, "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d]\n", ring->tx_pending, ring->rx_pending, - I40E_MIN_NUM_DESCRIPTORS, I40E_MAX_NUM_DESCRIPTORS); + I40E_MIN_NUM_DESCRIPTORS, max_num_descriptors); return -EINVAL; } @@ -2419,7 +2427,7 @@ static void i40e_get_ethtool_stats(struct net_device *netdev, veb_stats = ((pf->lan_veb != I40E_NO_VEB) && (pf->lan_veb < I40E_MAX_VEB) && - (pf->flags & I40E_FLAG_VEB_STATS_ENABLED)); + test_bit(I40E_FLAG_VEB_STATS_ENA, pf->flags)); if (veb_stats) { veb = pf->veb[pf->lan_veb]; @@ -2514,13 +2522,11 @@ static void i40e_get_priv_flag_strings(struct net_device *netdev, u8 *data) u8 *p = data; for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) - ethtool_sprintf(&p, "%s", - i40e_gstrings_priv_flags[i].flag_string); + ethtool_puts(&p, i40e_gstrings_priv_flags[i].flag_string); if (pf->hw.pf_id != 0) return; for (i = 0; i < I40E_GL_PRIV_FLAGS_STR_LEN; i++) - ethtool_sprintf(&p, "%s", - i40e_gl_gstrings_priv_flags[i].flag_string); + ethtool_puts(&p, i40e_gl_gstrings_priv_flags[i].flag_string); } static void i40e_get_strings(struct net_device *netdev, u32 stringset, @@ -2548,7 +2554,7 @@ static int i40e_get_ts_info(struct net_device *dev, struct i40e_pf *pf = i40e_netdev_to_pf(dev); /* only report HW timestamping if PTP is enabled */ - if (!(pf->flags & I40E_FLAG_PTP)) + if (!test_bit(I40E_FLAG_PTP_ENA, pf->flags)) return ethtool_op_get_ts_info(dev, info); info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | @@ -2570,7 +2576,7 @@ static int i40e_get_ts_info(struct net_device *dev, BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ); - if (pf->hw_features & I40E_HW_PTP_L4_CAPABLE) + if (test_bit(I40E_HW_CAP_PTP_L4, pf->hw.caps)) info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) | @@ -2819,10 +2825,10 @@ static int i40e_set_phys_id(struct net_device *netdev, switch (state) { case ETHTOOL_ID_ACTIVE: - if (!(pf->hw_features & I40E_HW_PHY_CONTROLS_LEDS)) { + if (!test_bit(I40E_HW_CAP_PHY_CONTROLS_LEDS, pf->hw.caps)) { pf->led_status = i40e_led_get(hw); } else { - if (!(hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE)) + if (!test_bit(I40E_HW_CAP_AQ_PHY_ACCESS, hw->caps)) i40e_aq_set_phy_debug(hw, I40E_PHY_DEBUG_ALL, NULL); ret = i40e_led_get_phy(hw, &temp_status, @@ -2831,25 +2837,25 @@ static int i40e_set_phys_id(struct net_device *netdev, } return blink_freq; case ETHTOOL_ID_ON: - if (!(pf->hw_features & I40E_HW_PHY_CONTROLS_LEDS)) + if (!test_bit(I40E_HW_CAP_PHY_CONTROLS_LEDS, pf->hw.caps)) i40e_led_set(hw, 0xf, false); else ret = i40e_led_set_phy(hw, true, pf->led_status, 0); break; case ETHTOOL_ID_OFF: - if (!(pf->hw_features & I40E_HW_PHY_CONTROLS_LEDS)) + if (!test_bit(I40E_HW_CAP_PHY_CONTROLS_LEDS, pf->hw.caps)) i40e_led_set(hw, 0x0, false); else ret = i40e_led_set_phy(hw, false, pf->led_status, 0); break; case ETHTOOL_ID_INACTIVE: - if (!(pf->hw_features & I40E_HW_PHY_CONTROLS_LEDS)) { + if (!test_bit(I40E_HW_CAP_PHY_CONTROLS_LEDS, pf->hw.caps)) { i40e_led_set(hw, pf->led_status, false); } else { ret = i40e_led_set_phy(hw, false, pf->led_status, (pf->phy_led_val | I40E_PHY_LED_MODE_ORIG)); - if (!(hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE)) + if (!test_bit(I40E_HW_CAP_AQ_PHY_ACCESS, hw->caps)) i40e_aq_set_phy_debug(hw, 0, NULL); } break; @@ -2886,7 +2892,6 @@ static int __i40e_get_coalesce(struct net_device *netdev, struct i40e_vsi *vsi = np->vsi; ec->tx_max_coalesced_frames_irq = vsi->work_limit; - ec->rx_max_coalesced_frames_irq = vsi->work_limit; /* rx and tx usecs has per queue value. If user doesn't specify the * queue, return queue 0's value to represent. @@ -3020,7 +3025,7 @@ static int __i40e_set_coalesce(struct net_device *netdev, struct i40e_pf *pf = vsi->back; int i; - if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq) + if (ec->tx_max_coalesced_frames_irq) vsi->work_limit = ec->tx_max_coalesced_frames_irq; if (queue < 0) { @@ -3278,7 +3283,7 @@ static int i40e_parse_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp, } else if (valid) { data->flex_word = value & I40E_USERDEF_FLEX_WORD; data->flex_offset = - (value & I40E_USERDEF_FLEX_OFFSET) >> 16; + FIELD_GET(I40E_USERDEF_FLEX_OFFSET, value); data->flex_filter = true; } @@ -3628,7 +3633,7 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) bitmap_zero(flow_pctypes, FLOW_PCTYPES_SIZE); - if (pf->flags & I40E_FLAG_MFP_ENABLED) { + if (test_bit(I40E_FLAG_MFP_ENA, pf->flags)) { dev_err(&pf->pdev->dev, "Change of RSS hash input set is not supported when MFP mode is enabled\n"); return -EOPNOTSUPP; @@ -3644,19 +3649,22 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) switch (nfc->flow_type) { case TCP_V4_FLOW: set_bit(I40E_FILTER_PCTYPE_NONF_IPV4_TCP, flow_pctypes); - if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) + if (test_bit(I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE, + pf->hw.caps)) set_bit(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK, flow_pctypes); break; case TCP_V6_FLOW: set_bit(I40E_FILTER_PCTYPE_NONF_IPV6_TCP, flow_pctypes); - if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) + if (test_bit(I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE, + pf->hw.caps)) set_bit(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK, flow_pctypes); break; case UDP_V4_FLOW: set_bit(I40E_FILTER_PCTYPE_NONF_IPV4_UDP, flow_pctypes); - if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) { + if (test_bit(I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE, + pf->hw.caps)) { set_bit(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP, flow_pctypes); set_bit(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP, @@ -3666,7 +3674,8 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) break; case UDP_V6_FLOW: set_bit(I40E_FILTER_PCTYPE_NONF_IPV6_UDP, flow_pctypes); - if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) { + if (test_bit(I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE, + pf->hw.caps)) { set_bit(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP, flow_pctypes); set_bit(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP, @@ -4644,7 +4653,7 @@ static int i40e_check_fdir_input_set(struct i40e_vsi *vsi, * main port cannot change them when in MFP mode as this would impact * any filters on the other ports. */ - if (pf->flags & I40E_FLAG_MFP_ENABLED) { + if (test_bit(I40E_FLAG_MFP_ENA, pf->flags)) { netif_err(pf, drv, vsi->netdev, "Cannot change Flow Director input sets while MFP is enabled\n"); return -EOPNOTSUPP; } @@ -4804,7 +4813,7 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi, return -EINVAL; pf = vsi->back; - if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) + if (!test_bit(I40E_FLAG_FD_SB_ENA, pf->flags)) return -EOPNOTSUPP; if (test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state)) @@ -5001,7 +5010,7 @@ static void i40e_get_channels(struct net_device *dev, ch->max_combined = i40e_max_channels(vsi); /* report info for other vector */ - ch->other_count = (pf->flags & I40E_FLAG_FD_SB_ENABLED) ? 1 : 0; + ch->other_count = test_bit(I40E_FLAG_FD_SB_ENA, pf->flags) ? 1 : 0; ch->max_other = ch->other_count; /* Note: This code assumes DCB is disabled for now. */ @@ -5044,7 +5053,7 @@ static int i40e_set_channels(struct net_device *dev, return -EINVAL; /* verify other_count has not changed */ - if (ch->other_count != ((pf->flags & I40E_FLAG_FD_SB_ENABLED) ? 1 : 0)) + if (ch->other_count != (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags) ? 1 : 0)) return -EINVAL; /* verify the number of channels does not exceed hardware limits */ @@ -5109,15 +5118,13 @@ static u32 i40e_get_rxfh_indir_size(struct net_device *netdev) /** * i40e_get_rxfh - get the rx flow hash indirection table * @netdev: network interface device structure - * @indir: indirection table - * @key: hash key - * @hfunc: hash function + * @rxfh: pointer to param struct (indir, key, hfunc) * * Reads the indirection table directly from the hardware. Returns 0 on * success. **/ -static int i40e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, - u8 *hfunc) +static int i40e_get_rxfh(struct net_device *netdev, + struct ethtool_rxfh_param *rxfh) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; @@ -5125,13 +5132,12 @@ static int i40e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, int ret; u16 i; - if (hfunc) - *hfunc = ETH_RSS_HASH_TOP; + rxfh->hfunc = ETH_RSS_HASH_TOP; - if (!indir) + if (!rxfh->indir) return 0; - seed = key; + seed = rxfh->key; lut = kzalloc(I40E_HLUT_ARRAY_SIZE, GFP_KERNEL); if (!lut) return -ENOMEM; @@ -5139,7 +5145,7 @@ static int i40e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, if (ret) goto out; for (i = 0; i < I40E_HLUT_ARRAY_SIZE; i++) - indir[i] = (u32)(lut[i]); + rxfh->indir[i] = (u32)(lut[i]); out: kfree(lut); @@ -5150,15 +5156,15 @@ out: /** * i40e_set_rxfh - set the rx flow hash indirection table * @netdev: network interface device structure - * @indir: indirection table - * @key: hash key - * @hfunc: hash function to use + * @rxfh: pointer to param struct (indir, key, hfunc) + * @extack: extended ACK from the Netlink message * * Returns -EINVAL if the table specifies an invalid queue id, otherwise * returns 0 after programming the table. **/ -static int i40e_set_rxfh(struct net_device *netdev, const u32 *indir, - const u8 *key, const u8 hfunc) +static int i40e_set_rxfh(struct net_device *netdev, + struct ethtool_rxfh_param *rxfh, + struct netlink_ext_ack *extack) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; @@ -5166,17 +5172,18 @@ static int i40e_set_rxfh(struct net_device *netdev, const u32 *indir, u8 *seed = NULL; u16 i; - if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) + if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE && + rxfh->hfunc != ETH_RSS_HASH_TOP) return -EOPNOTSUPP; - if (key) { + if (rxfh->key) { if (!vsi->rss_hkey_user) { vsi->rss_hkey_user = kzalloc(I40E_HKEY_ARRAY_SIZE, GFP_KERNEL); if (!vsi->rss_hkey_user) return -ENOMEM; } - memcpy(vsi->rss_hkey_user, key, I40E_HKEY_ARRAY_SIZE); + memcpy(vsi->rss_hkey_user, rxfh->key, I40E_HKEY_ARRAY_SIZE); seed = vsi->rss_hkey_user; } if (!vsi->rss_lut_user) { @@ -5186,9 +5193,9 @@ static int i40e_set_rxfh(struct net_device *netdev, const u32 *indir, } /* Each 32 bits pointed by 'indir' is stored with a lut entry */ - if (indir) + if (rxfh->indir) for (i = 0; i < I40E_HLUT_ARRAY_SIZE; i++) - vsi->rss_lut_user[i] = (u8)(indir[i]); + vsi->rss_lut_user[i] = (u8)(rxfh->indir[i]); else i40e_fill_rss_lut(pf, vsi->rss_lut_user, I40E_HLUT_ARRAY_SIZE, vsi->rss_size); @@ -5215,11 +5222,11 @@ static u32 i40e_get_priv_flags(struct net_device *dev) u32 i, j, ret_flags = 0; for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) { - const struct i40e_priv_flags *priv_flags; + const struct i40e_priv_flags *priv_flag; - priv_flags = &i40e_gstrings_priv_flags[i]; + priv_flag = &i40e_gstrings_priv_flags[i]; - if (priv_flags->flag & pf->flags) + if (test_bit(priv_flag->bitno, pf->flags)) ret_flags |= BIT(i); } @@ -5227,11 +5234,11 @@ static u32 i40e_get_priv_flags(struct net_device *dev) return ret_flags; for (j = 0; j < I40E_GL_PRIV_FLAGS_STR_LEN; j++) { - const struct i40e_priv_flags *priv_flags; + const struct i40e_priv_flags *priv_flag; - priv_flags = &i40e_gl_gstrings_priv_flags[j]; + priv_flag = &i40e_gl_gstrings_priv_flags[j]; - if (priv_flags->flag & pf->flags) + if (test_bit(priv_flag->bitno, pf->flags)) ret_flags |= BIT(i + j); } @@ -5245,8 +5252,10 @@ static u32 i40e_get_priv_flags(struct net_device *dev) **/ static int i40e_set_priv_flags(struct net_device *dev, u32 flags) { + DECLARE_BITMAP(changed_flags, I40E_PF_FLAGS_NBITS); + DECLARE_BITMAP(orig_flags, I40E_PF_FLAGS_NBITS); + DECLARE_BITMAP(new_flags, I40E_PF_FLAGS_NBITS); struct i40e_netdev_priv *np = netdev_priv(dev); - u64 orig_flags, new_flags, changed_flags; enum i40e_admin_queue_err adq_err; struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; @@ -5254,51 +5263,57 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags) int status; u32 i, j; - orig_flags = READ_ONCE(pf->flags); - new_flags = orig_flags; + bitmap_copy(orig_flags, pf->flags, I40E_PF_FLAGS_NBITS); + bitmap_copy(new_flags, pf->flags, I40E_PF_FLAGS_NBITS); for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) { - const struct i40e_priv_flags *priv_flags; - - priv_flags = &i40e_gstrings_priv_flags[i]; + const struct i40e_priv_flags *priv_flag; + bool new_val; - if (flags & BIT(i)) - new_flags |= priv_flags->flag; - else - new_flags &= ~(priv_flags->flag); + priv_flag = &i40e_gstrings_priv_flags[i]; + new_val = (flags & BIT(i)) ? true : false; /* If this is a read-only flag, it can't be changed */ - if (priv_flags->read_only && - ((orig_flags ^ new_flags) & ~BIT(i))) + if (priv_flag->read_only && + test_bit(priv_flag->bitno, orig_flags) != new_val) return -EOPNOTSUPP; + + if (new_val) + set_bit(priv_flag->bitno, new_flags); + else + clear_bit(priv_flag->bitno, new_flags); } if (pf->hw.pf_id != 0) goto flags_complete; for (j = 0; j < I40E_GL_PRIV_FLAGS_STR_LEN; j++) { - const struct i40e_priv_flags *priv_flags; + const struct i40e_priv_flags *priv_flag; + bool new_val; - priv_flags = &i40e_gl_gstrings_priv_flags[j]; - - if (flags & BIT(i + j)) - new_flags |= priv_flags->flag; - else - new_flags &= ~(priv_flags->flag); + priv_flag = &i40e_gl_gstrings_priv_flags[j]; + new_val = (flags & BIT(i + j)) ? true : false; /* If this is a read-only flag, it can't be changed */ - if (priv_flags->read_only && - ((orig_flags ^ new_flags) & ~BIT(i))) + if (priv_flag->read_only && + test_bit(priv_flag->bitno, orig_flags) != new_val) return -EOPNOTSUPP; + + if (new_val) + set_bit(priv_flag->bitno, new_flags); + else + clear_bit(priv_flag->bitno, new_flags); } flags_complete: - changed_flags = orig_flags ^ new_flags; + bitmap_xor(changed_flags, pf->flags, orig_flags, I40E_PF_FLAGS_NBITS); - if (changed_flags & I40E_FLAG_DISABLE_FW_LLDP) + if (test_bit(I40E_FLAG_FW_LLDP_DIS, changed_flags)) reset_needed = I40E_PF_RESET_AND_REBUILD_FLAG; - if (changed_flags & (I40E_FLAG_VEB_STATS_ENABLED | - I40E_FLAG_LEGACY_RX | I40E_FLAG_SOURCE_PRUNING_DISABLED)) + + if (test_bit(I40E_FLAG_VEB_STATS_ENA, changed_flags) || + test_bit(I40E_FLAG_LEGACY_RX_ENA, changed_flags) || + test_bit(I40E_FLAG_SOURCE_PRUNING_DIS, changed_flags)) reset_needed = BIT(__I40E_PF_RESET_REQUESTED); /* Before we finalize any flag changes, we need to perform some @@ -5306,8 +5321,8 @@ flags_complete: */ /* ATR eviction is not supported on all devices */ - if ((new_flags & I40E_FLAG_HW_ATR_EVICT_ENABLED) && - !(pf->hw_features & I40E_HW_ATR_EVICT_CAPABLE)) + if (test_bit(I40E_FLAG_HW_ATR_EVICT_ENA, new_flags) && + !test_bit(I40E_HW_CAP_ATR_EVICT, pf->hw.caps)) return -EOPNOTSUPP; /* If the driver detected FW LLDP was disabled on init, this flag could @@ -5318,15 +5333,14 @@ flags_complete: * disable LLDP, however we _must_ not allow the user to enable/disable * LLDP with this flag on unsupported FW versions. */ - if (changed_flags & I40E_FLAG_DISABLE_FW_LLDP) { - if (!(pf->hw.flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE)) { - dev_warn(&pf->pdev->dev, - "Device does not support changing FW LLDP\n"); - return -EOPNOTSUPP; - } + if (test_bit(I40E_FLAG_FW_LLDP_DIS, changed_flags) && + !test_bit(I40E_HW_CAP_FW_LLDP_STOPPABLE, pf->hw.caps)) { + dev_warn(&pf->pdev->dev, + "Device does not support changing FW LLDP\n"); + return -EOPNOTSUPP; } - if (changed_flags & I40E_FLAG_RS_FEC && + if (test_bit(I40E_FLAG_RS_FEC, changed_flags) && pf->hw.device_id != I40E_DEV_ID_25G_SFP28 && pf->hw.device_id != I40E_DEV_ID_25G_B) { dev_warn(&pf->pdev->dev, @@ -5334,7 +5348,7 @@ flags_complete: return -EOPNOTSUPP; } - if (changed_flags & I40E_FLAG_BASE_R_FEC && + if (test_bit(I40E_FLAG_BASE_R_FEC, changed_flags) && pf->hw.device_id != I40E_DEV_ID_25G_SFP28 && pf->hw.device_id != I40E_DEV_ID_25G_B && pf->hw.device_id != I40E_DEV_ID_KX_X722) { @@ -5349,17 +5363,17 @@ flags_complete: */ /* Flush current ATR settings if ATR was disabled */ - if ((changed_flags & I40E_FLAG_FD_ATR_ENABLED) && - !(new_flags & I40E_FLAG_FD_ATR_ENABLED)) { + if (test_bit(I40E_FLAG_FD_ATR_ENA, changed_flags) && + !test_bit(I40E_FLAG_FD_ATR_ENA, new_flags)) { set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state); set_bit(__I40E_FD_FLUSH_REQUESTED, pf->state); } - if (changed_flags & I40E_FLAG_TRUE_PROMISC_SUPPORT) { + if (test_bit(I40E_FLAG_TRUE_PROMISC_ENA, changed_flags)) { u16 sw_flags = 0, valid_flags = 0; int ret; - if (!(new_flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) + if (!test_bit(I40E_FLAG_TRUE_PROMISC_ENA, new_flags)) sw_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC; valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC; ret = i40e_aq_set_switch_config(&pf->hw, sw_flags, valid_flags, @@ -5374,17 +5388,17 @@ flags_complete: } } - if ((changed_flags & I40E_FLAG_RS_FEC) || - (changed_flags & I40E_FLAG_BASE_R_FEC)) { + if (test_bit(I40E_FLAG_RS_FEC, changed_flags) || + test_bit(I40E_FLAG_BASE_R_FEC, changed_flags)) { u8 fec_cfg = 0; - if (new_flags & I40E_FLAG_RS_FEC && - new_flags & I40E_FLAG_BASE_R_FEC) { + if (test_bit(I40E_FLAG_RS_FEC, new_flags) && + test_bit(I40E_FLAG_BASE_R_FEC, new_flags)) { fec_cfg = I40E_AQ_SET_FEC_AUTO; - } else if (new_flags & I40E_FLAG_RS_FEC) { + } else if (test_bit(I40E_FLAG_RS_FEC, new_flags)) { fec_cfg = (I40E_AQ_SET_FEC_REQUEST_RS | I40E_AQ_SET_FEC_ABILITY_RS); - } else if (new_flags & I40E_FLAG_BASE_R_FEC) { + } else if (test_bit(I40E_FLAG_BASE_R_FEC, new_flags)) { fec_cfg = (I40E_AQ_SET_FEC_REQUEST_KR | I40E_AQ_SET_FEC_ABILITY_KR); } @@ -5392,35 +5406,35 @@ flags_complete: dev_warn(&pf->pdev->dev, "Cannot change FEC config\n"); } - if ((changed_flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED) && - (orig_flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED)) { + if (test_bit(I40E_FLAG_LINK_DOWN_ON_CLOSE_ENA, changed_flags) && + test_bit(I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENA, orig_flags)) { dev_err(&pf->pdev->dev, "Setting link-down-on-close not supported on this port (because total-port-shutdown is enabled)\n"); return -EOPNOTSUPP; } - if ((changed_flags & I40E_FLAG_VF_VLAN_PRUNING) && + if (test_bit(I40E_FLAG_VF_VLAN_PRUNING_ENA, changed_flags) && pf->num_alloc_vfs) { dev_warn(&pf->pdev->dev, "Changing vf-vlan-pruning flag while VF(s) are active is not supported\n"); return -EOPNOTSUPP; } - if ((changed_flags & I40E_FLAG_LEGACY_RX) && + if (test_bit(I40E_FLAG_LEGACY_RX_ENA, changed_flags) && I40E_2K_TOO_SMALL_WITH_PADDING) { dev_warn(&pf->pdev->dev, "2k Rx buffer is too small to fit standard MTU and skb_shared_info\n"); return -EOPNOTSUPP; } - if ((changed_flags & new_flags & - I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED) && - (new_flags & I40E_FLAG_MFP_ENABLED)) + if (test_bit(I40E_FLAG_LINK_DOWN_ON_CLOSE_ENA, changed_flags) && + test_bit(I40E_FLAG_LINK_DOWN_ON_CLOSE_ENA, new_flags) && + test_bit(I40E_FLAG_MFP_ENA, new_flags)) dev_warn(&pf->pdev->dev, "Turning on link-down-on-close flag may affect other partitions\n"); - if (changed_flags & I40E_FLAG_DISABLE_FW_LLDP) { - if (new_flags & I40E_FLAG_DISABLE_FW_LLDP) { + if (test_bit(I40E_FLAG_FW_LLDP_DIS, changed_flags)) { + if (test_bit(I40E_FLAG_FW_LLDP_DIS, new_flags)) { #ifdef CONFIG_I40E_DCB i40e_dcb_sw_default_config(pf); #endif /* CONFIG_I40E_DCB */ @@ -5461,7 +5475,7 @@ flags_complete: * initialization or (b) while holding the RTNL lock, we don't need * anything fancy here. */ - pf->flags = new_flags; + bitmap_copy(pf->flags, new_flags, I40E_PF_FLAGS_NBITS); /* Issue reset to cause things to take effect, as additional bits * are added we will need to create a mask of bits requiring reset @@ -5491,7 +5505,7 @@ static int i40e_get_module_info(struct net_device *netdev, int status; /* Check if firmware supports reading module EEPROM. */ - if (!(hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE)) { + if (!test_bit(I40E_HW_CAP_AQ_PHY_ACCESS, hw->caps)) { netdev_err(vsi->netdev, "Module EEPROM memory read not supported. Please update the NVM image.\n"); return -EINVAL; } @@ -5571,8 +5585,8 @@ static int i40e_get_module_info(struct net_device *netdev, modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN; break; default: - netdev_err(vsi->netdev, "Module type unrecognized\n"); - return -EINVAL; + netdev_dbg(vsi->netdev, "SFP module type unrecognized or no SFP connector used.\n"); + return -EOPNOTSUPP; } return 0; } @@ -5768,7 +5782,7 @@ static const struct ethtool_ops i40e_ethtool_recovery_mode_ops = { static const struct ethtool_ops i40e_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | - ETHTOOL_COALESCE_MAX_FRAMES_IRQ | + ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ | ETHTOOL_COALESCE_USE_ADAPTIVE | ETHTOOL_COALESCE_RX_USECS_HIGH | ETHTOOL_COALESCE_TX_USECS_HIGH, diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index b0dc0fc6b1..f3c1df46f6 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -377,7 +377,7 @@ static void i40e_tx_timeout(struct net_device *netdev, unsigned int txqueue) if (tx_ring) { head = i40e_get_head(tx_ring); /* Read interrupt register */ - if (pf->flags & I40E_FLAG_MSIX_ENABLED) + if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) val = rd32(&pf->hw, I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx + tx_ring->vsi->base_vector - 1)); @@ -1203,11 +1203,9 @@ static void i40e_update_pf_stats(struct i40e_pf *pf) val = rd32(hw, I40E_PRTPM_EEE_STAT); nsd->tx_lpi_status = - (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >> - I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT; + FIELD_GET(I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK, val); nsd->rx_lpi_status = - (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >> - I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT; + FIELD_GET(I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK, val); i40e_stat_update32(hw, I40E_PRTPM_TLPIC, pf->stat_offsets_loaded, &osd->tx_lpi_count, &nsd->tx_lpi_count); @@ -1215,13 +1213,13 @@ static void i40e_update_pf_stats(struct i40e_pf *pf) pf->stat_offsets_loaded, &osd->rx_lpi_count, &nsd->rx_lpi_count); - if (pf->flags & I40E_FLAG_FD_SB_ENABLED && + if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags) && !test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state)) nsd->fd_sb_status = true; else nsd->fd_sb_status = false; - if (pf->flags & I40E_FLAG_FD_ATR_ENABLED && + if (test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags) && !test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) nsd->fd_atr_status = true; else @@ -1259,8 +1257,11 @@ int i40e_count_filters(struct i40e_vsi *vsi) int bkt; int cnt = 0; - hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) - ++cnt; + hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { + if (f->state == I40E_FILTER_NEW || + f->state == I40E_FILTER_ACTIVE) + ++cnt; + } return cnt; } @@ -1491,7 +1492,7 @@ static s16 i40e_get_vf_new_vlan(struct i40e_vsi *vsi, return pvid; is_any = (trusted || - !(pf->flags & I40E_FLAG_VF_VLAN_PRUNING)); + !test_bit(I40E_FLAG_VF_VLAN_PRUNING_ENA, pf->flags)); if ((vlan_filters && f->vlan == I40E_VLAN_ANY) || (!is_any && !vlan_filters && f->vlan == I40E_VLAN_ANY) || @@ -1896,7 +1897,7 @@ static int i40e_vsi_config_rss(struct i40e_vsi *vsi) u8 *lut; int ret; - if (!(pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)) + if (!test_bit(I40E_HW_CAP_RSS_AQ, pf->hw.caps)) return 0; if (!vsi->rss_size) vsi->rss_size = min_t(int, pf->alloc_rss_size, @@ -2051,7 +2052,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, */ if (vsi->req_queue_pairs > 0) vsi->num_queue_pairs = vsi->req_queue_pairs; - else if (pf->flags & I40E_FLAG_MSIX_ENABLED) + else if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) vsi->num_queue_pairs = pf->num_lan_msix; else vsi->num_queue_pairs = 1; @@ -2064,7 +2065,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, else num_tc_qps = vsi->alloc_queue_pairs; - if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) { + if (enabled_tc && test_bit(I40E_FLAG_DCB_ENA, vsi->back->flags)) { /* Find numtc from enabled TC bitmap */ for (i = 0, numtc = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { if (enabled_tc & BIT(i)) /* TC is enabled */ @@ -2083,7 +2084,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1; /* Do not allow use more TC queue pairs than MSI-X vectors exist */ - if (pf->flags & I40E_FLAG_MSIX_ENABLED) + if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) num_tc_qps = min_t(int, num_tc_qps, pf->num_lan_msix); /* Setup queue offset/count for all TCs for given VSI */ @@ -2095,8 +2096,10 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, switch (vsi->type) { case I40E_VSI_MAIN: - if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | - I40E_FLAG_FD_ATR_ENABLED)) || + if ((!test_bit(I40E_FLAG_FD_SB_ENA, + pf->flags) && + !test_bit(I40E_FLAG_FD_ATR_ENA, + pf->flags)) || vsi->tc_config.enabled_tc != 1) { qcount = min_t(int, pf->alloc_rss_size, num_tc_qps); @@ -2482,7 +2485,7 @@ static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc) if (vsi->type == I40E_VSI_MAIN && pf->lan_veb != I40E_NO_VEB && - !(pf->flags & I40E_FLAG_MFP_ENABLED)) { + !test_bit(I40E_FLAG_MFP_ENA, pf->flags)) { /* set defport ON for Main VSI instead of true promisc * this way we will get all unicast/multicast and VLAN * promisc behavior but will not get VF or VMDq traffic @@ -2913,7 +2916,7 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf) */ static u16 i40e_calculate_vsi_rx_buf_len(struct i40e_vsi *vsi) { - if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) + if (!vsi->netdev || test_bit(I40E_FLAG_LEGACY_RX_ENA, vsi->back->flags)) return SKB_WITH_OVERHEAD(I40E_RXBUFFER_2048); return PAGE_SIZE < 8192 ? I40E_RXBUFFER_3072 : I40E_RXBUFFER_2048; @@ -3468,8 +3471,8 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring) ring->xsk_pool = i40e_xsk_pool(ring); /* some ATR related tx ring init */ - if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) { - ring->atr_sample_rate = vsi->back->atr_sample_rate; + if (test_bit(I40E_FLAG_FD_ATR_ENA, vsi->back->flags)) { + ring->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE; ring->atr_count = 0; } else { ring->atr_sample_rate = 0; @@ -3484,9 +3487,11 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring) tx_ctx.new_context = 1; tx_ctx.base = (ring->dma / 128); tx_ctx.qlen = ring->count; - tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED | - I40E_FLAG_FD_ATR_ENABLED)); - tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP); + if (test_bit(I40E_FLAG_FD_SB_ENA, vsi->back->flags) || + test_bit(I40E_FLAG_FD_ATR_ENA, vsi->back->flags)) + tx_ctx.fd_ena = 1; + if (test_bit(I40E_FLAG_PTP_ENA, vsi->back->flags)) + tx_ctx.timesync_ena = 1; /* FDIR VSI tx ring can still use RS bit and writebacks */ if (vsi->type != I40E_VSI_FDIR) tx_ctx.head_wb_ena = 1; @@ -3538,21 +3543,19 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring) else return -EINVAL; - qtx_ctl |= (ring->ch->vsi_number << - I40E_QTX_CTL_VFVM_INDX_SHIFT) & - I40E_QTX_CTL_VFVM_INDX_MASK; + qtx_ctl |= FIELD_PREP(I40E_QTX_CTL_VFVM_INDX_MASK, + ring->ch->vsi_number); } else { if (vsi->type == I40E_VSI_VMDQ2) { qtx_ctl = I40E_QTX_CTL_VM_QUEUE; - qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) & - I40E_QTX_CTL_VFVM_INDX_MASK; + qtx_ctl |= FIELD_PREP(I40E_QTX_CTL_VFVM_INDX_MASK, + vsi->id); } else { qtx_ctl = I40E_QTX_CTL_PF_QUEUE; } } - qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) & - I40E_QTX_CTL_PF_INDX_MASK); + qtx_ctl |= FIELD_PREP(I40E_QTX_CTL_PF_INDX_MASK, hw->pf_id); wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl); i40e_flush(hw); @@ -3684,7 +3687,7 @@ skip: } /* configure Rx buffer alignment */ - if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) { + if (!vsi->netdev || test_bit(I40E_FLAG_LEGACY_RX_ENA, vsi->back->flags)) { if (I40E_2K_TOO_SMALL_WITH_PADDING) { dev_info(&vsi->back->pdev->dev, "2k Rx buffer is too small to fit standard MTU and skb_shared_info\n"); @@ -3782,7 +3785,7 @@ static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi) u16 qoffset, qcount; int i, n; - if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) { + if (!test_bit(I40E_FLAG_DCB_ENA, vsi->back->flags)) { /* Reset the TC information */ for (i = 0; i < vsi->num_queue_pairs; i++) { rx_ring = vsi->rx_rings[i]; @@ -3849,7 +3852,7 @@ static void i40e_fdir_filter_restore(struct i40e_vsi *vsi) struct i40e_pf *pf = vsi->back; struct hlist_node *node; - if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) + if (!test_bit(I40E_FLAG_FD_SB_ENA, pf->flags)) return; /* Reset FDir counters as we're replaying all existing filters */ @@ -3915,6 +3918,12 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi) q_vector->tx.target_itr >> 1); q_vector->tx.current_itr = q_vector->tx.target_itr; + /* Set ITR for software interrupts triggered after exiting + * busy-loop polling. + */ + wr32(hw, I40E_PFINT_ITRN(I40E_SW_ITR, vector - 1), + I40E_ITR_20K); + wr32(hw, I40E_PFINT_RATEN(vector - 1), i40e_intrl_usec_to_reg(vsi->int_rate_limit)); @@ -3987,10 +3996,10 @@ static void i40e_enable_misc_int_causes(struct i40e_pf *pf) I40E_PFINT_ICR0_ENA_VFLR_MASK | I40E_PFINT_ICR0_ENA_ADMINQ_MASK; - if (pf->flags & I40E_FLAG_IWARP_ENABLED) + if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags)) val |= I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK; - if (pf->flags & I40E_FLAG_PTP) + if (test_bit(I40E_FLAG_PTP_ENA, pf->flags)) val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK; wr32(hw, I40E_PFINT_ICR0_ENA, val); @@ -4226,7 +4235,7 @@ static void i40e_vsi_disable_irq(struct i40e_vsi *vsi) } /* disable each interrupt */ - if (pf->flags & I40E_FLAG_MSIX_ENABLED) { + if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) { for (i = vsi->base_vector; i < (vsi->num_q_vectors + vsi->base_vector); i++) wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0); @@ -4252,7 +4261,7 @@ static int i40e_vsi_enable_irq(struct i40e_vsi *vsi) struct i40e_pf *pf = vsi->back; int i; - if (pf->flags & I40E_FLAG_MSIX_ENABLED) { + if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) { for (i = 0; i < vsi->num_q_vectors; i++) i40e_irq_dynamic_enable(vsi, i); } else { @@ -4273,7 +4282,7 @@ static void i40e_free_misc_vector(struct i40e_pf *pf) wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0); i40e_flush(&pf->hw); - if (pf->flags & I40E_FLAG_MSIX_ENABLED && pf->msix_entries) { + if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags) && pf->msix_entries) { free_irq(pf->msix_entries[0].vector, pf); clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state); } @@ -4308,7 +4317,7 @@ static irqreturn_t i40e_intr(int irq, void *data) (icr0 & I40E_PFINT_ICR0_SWINT_MASK)) pf->sw_int_count++; - if ((pf->flags & I40E_FLAG_IWARP_ENABLED) && + if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags) && (icr0 & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) { ena_mask &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK; dev_dbg(&pf->pdev->dev, "cleared PE_CRITERR\n"); @@ -4359,8 +4368,7 @@ static irqreturn_t i40e_intr(int irq, void *data) set_bit(__I40E_RESET_INTR_RECEIVED, pf->state); ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK; val = rd32(hw, I40E_GLGEN_RSTAT); - val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK) - >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT; + val = FIELD_GET(I40E_GLGEN_RSTAT_RESET_TYPE_MASK, val); if (val == I40E_RESET_CORER) { pf->corer_count++; } else if (val == I40E_RESET_GLOBR) { @@ -4501,7 +4509,7 @@ static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget) i += tx_ring->count; tx_ring->next_to_clean = i; - if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) + if (test_bit(I40E_FLAG_MSIX_ENA, vsi->back->flags)) i40e_irq_dynamic_enable(vsi, tx_ring->q_vector->v_idx); return budget > 0; @@ -4614,9 +4622,9 @@ static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename) struct i40e_pf *pf = vsi->back; int err; - if (pf->flags & I40E_FLAG_MSIX_ENABLED) + if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) err = i40e_vsi_request_irq_msix(vsi, basename); - else if (pf->flags & I40E_FLAG_MSI_ENABLED) + else if (test_bit(I40E_FLAG_MSI_ENA, pf->flags)) err = request_irq(pf->pdev->irq, i40e_intr, 0, pf->int_name, pf); else @@ -4648,7 +4656,7 @@ static void i40e_netpoll(struct net_device *netdev) if (test_bit(__I40E_VSI_DOWN, vsi->state)) return; - if (pf->flags & I40E_FLAG_MSIX_ENABLED) { + if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) { for (i = 0; i < vsi->num_q_vectors; i++) i40e_msix_clean_rings(0, vsi->q_vectors[i]); } else { @@ -4927,27 +4935,23 @@ int i40e_vsi_start_rings(struct i40e_vsi *vsi) void i40e_vsi_stop_rings(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; - int pf_q, err, q_end; + u32 pf_q, tx_q_end, rx_q_end; /* When port TX is suspended, don't wait */ if (test_bit(__I40E_PORT_SUSPENDED, vsi->back->state)) return i40e_vsi_stop_rings_no_wait(vsi); - q_end = vsi->base_queue + vsi->num_queue_pairs; - for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++) - i40e_pre_tx_queue_cfg(&pf->hw, (u32)pf_q, false); + tx_q_end = vsi->base_queue + + vsi->alloc_queue_pairs * (i40e_enabled_xdp_vsi(vsi) ? 2 : 1); + for (pf_q = vsi->base_queue; pf_q < tx_q_end; pf_q++) + i40e_pre_tx_queue_cfg(&pf->hw, pf_q, false); - for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++) { - err = i40e_control_wait_rx_q(pf, pf_q, false); - if (err) - dev_info(&pf->pdev->dev, - "VSI seid %d Rx ring %d disable timeout\n", - vsi->seid, pf_q); - } + rx_q_end = vsi->base_queue + vsi->num_queue_pairs; + for (pf_q = vsi->base_queue; pf_q < rx_q_end; pf_q++) + i40e_control_rx_q(pf, pf_q, false); msleep(I40E_DISABLE_TX_GAP_MSEC); - pf_q = vsi->base_queue; - for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++) + for (pf_q = vsi->base_queue; pf_q < tx_q_end; pf_q++) wr32(&pf->hw, I40E_QTX_ENA(pf_q), 0); i40e_vsi_wait_queues_disabled(vsi); @@ -4988,7 +4992,7 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi) u32 val, qp; int i; - if (pf->flags & I40E_FLAG_MSIX_ENABLED) { + if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) { if (!vsi->q_vectors) return; @@ -5022,8 +5026,8 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi) * next_q field of the registers. */ val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1)); - qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) - >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT; + qp = FIELD_GET(I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK, + val); val |= I40E_QUEUE_END_OF_LIST << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT; wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val); @@ -5045,8 +5049,8 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi) val = rd32(hw, I40E_QINT_TQCTL(qp)); - next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK) - >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT; + next = FIELD_GET(I40E_QINT_TQCTL_NEXTQ_INDX_MASK, + val); val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK | I40E_QINT_TQCTL_MSIX0_INDX_MASK | @@ -5064,8 +5068,7 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi) free_irq(pf->pdev->irq, pf); val = rd32(hw, I40E_PFINT_LNKLST0); - qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) - >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT; + qp = FIELD_GET(I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK, val); val |= I40E_QUEUE_END_OF_LIST << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT; wr32(hw, I40E_PFINT_LNKLST0, val); @@ -5150,16 +5153,17 @@ static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi) static void i40e_reset_interrupt_capability(struct i40e_pf *pf) { /* If we're in Legacy mode, the interrupt was cleaned in vsi_close */ - if (pf->flags & I40E_FLAG_MSIX_ENABLED) { + if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) { pci_disable_msix(pf->pdev); kfree(pf->msix_entries); pf->msix_entries = NULL; kfree(pf->irq_pile); pf->irq_pile = NULL; - } else if (pf->flags & I40E_FLAG_MSI_ENABLED) { + } else if (test_bit(I40E_FLAG_MSI_ENA, pf->flags)) { pci_disable_msi(pf->pdev); } - pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED); + clear_bit(I40E_FLAG_MSI_ENA, pf->flags); + clear_bit(I40E_FLAG_MSIX_ENA, pf->flags); } /** @@ -5499,11 +5503,11 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf) return pf->vsi[pf->lan_vsi]->mqprio_qopt.qopt.num_tc; /* If neither MQPRIO nor DCB is enabled, then always use single TC */ - if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) + if (!test_bit(I40E_FLAG_DCB_ENA, pf->flags)) return 1; /* SFP mode will be enabled for all TCs on port */ - if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) + if (!test_bit(I40E_FLAG_MFP_ENA, pf->flags)) return i40e_dcb_get_num_tc(dcbcfg); /* MFP mode return count of enabled TCs for this PF */ @@ -5533,11 +5537,11 @@ static u8 i40e_pf_get_tc_map(struct i40e_pf *pf) /* If neither MQPRIO nor DCB is enabled for this PF then just return * default TC */ - if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) + if (!test_bit(I40E_FLAG_DCB_ENA, pf->flags)) return I40E_DEFAULT_TRAFFIC_CLASS; /* SFP mode we want PF to be enabled for all TCs */ - if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) + if (!test_bit(I40E_FLAG_MFP_ENA, pf->flags)) return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config); /* MFP enabled and iSCSI PF type */ @@ -5626,7 +5630,7 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc, /* There is no need to reset BW when mqprio mode is on. */ if (i40e_is_tc_mqprio_enabled(pf)) return 0; - if (!vsi->mqprio_qopt.qopt.hw && !(pf->flags & I40E_FLAG_DCB_ENABLED)) { + if (!vsi->mqprio_qopt.qopt.hw && !test_bit(I40E_FLAG_DCB_ENA, pf->flags)) { ret = i40e_set_bw_limit(vsi, vsi->seid, 0); if (ret) dev_info(&pf->pdev->dev, @@ -5879,7 +5883,7 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc) } vsi->reconfig_rss = false; } - if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) { + if (test_bit(I40E_FLAG_IWARP_ENA, vsi->back->flags)) { ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID); ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA; @@ -6292,7 +6296,7 @@ static int i40e_add_channel(struct i40e_pf *pf, u16 uplink_seid, if (ch->type == I40E_VSI_VMDQ2) ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2; - if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) { + if (test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags)) { ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); ctxt.info.switch_id = @@ -6597,8 +6601,8 @@ int i40e_create_queue_channel(struct i40e_vsi *vsi, * VSI to be added switch to VEB mode. */ - if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { - pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; + if (!test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags)) { + set_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags); if (vsi->type == I40E_VSI_MAIN) { if (i40e_is_tc_mqprio_enabled(pf)) @@ -7009,9 +7013,9 @@ int i40e_hw_dcb_config(struct i40e_pf *pf, struct i40e_dcbx_config *new_cfg) if (need_reconfig) { /* Enable DCB tagging only when more than one TC */ if (new_numtc > 1) - pf->flags |= I40E_FLAG_DCB_ENABLED; + set_bit(I40E_FLAG_DCB_ENA, pf->flags); else - pf->flags &= ~I40E_FLAG_DCB_ENABLED; + clear_bit(I40E_FLAG_DCB_ENA, pf->flags); set_bit(__I40E_PORT_SUSPENDED, pf->state); /* Reconfiguration needed quiesce all VSIs */ @@ -7101,7 +7105,7 @@ out: set_bit(__I40E_CLIENT_L2_CHANGE, pf->state); } /* registers are set, lets apply */ - if (pf->hw_features & I40E_HW_USE_SET_LLDP_MIB) + if (test_bit(I40E_HW_CAP_USE_SET_LLDP_MIB, pf->hw.caps)) ret = i40e_hw_set_dcb_config(pf, new_cfg); } @@ -7122,7 +7126,7 @@ int i40e_dcb_sw_default_config(struct i40e_pf *pf) struct i40e_hw *hw = &pf->hw; int err; - if (pf->hw_features & I40E_HW_USE_SET_LLDP_MIB) { + if (test_bit(I40E_HW_CAP_USE_SET_LLDP_MIB, pf->hw.caps)) { /* Update the local cached instance with TC0 ETS */ memset(&pf->tmp_cfg, 0, sizeof(struct i40e_dcbx_config)); pf->tmp_cfg.etscfg.willing = I40E_IEEE_DEFAULT_ETS_WILLING; @@ -7183,12 +7187,12 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf) /* Do not enable DCB for SW1 and SW2 images even if the FW is capable * Also do not enable DCBx if FW LLDP agent is disabled */ - if (pf->hw_features & I40E_HW_NO_DCB_SUPPORT) { + if (test_bit(I40E_HW_CAP_NO_DCB_SUPPORT, pf->hw.caps)) { dev_info(&pf->pdev->dev, "DCB is not supported.\n"); err = -EOPNOTSUPP; goto out; } - if (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) { + if (test_bit(I40E_FLAG_FW_LLDP_DIS, pf->flags)) { dev_info(&pf->pdev->dev, "FW LLDP is disabled, attempting SW DCB\n"); err = i40e_dcb_sw_default_config(pf); if (err) { @@ -7199,8 +7203,8 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf) pf->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE; /* at init capable but disabled */ - pf->flags |= I40E_FLAG_DCB_CAPABLE; - pf->flags &= ~I40E_FLAG_DCB_ENABLED; + set_bit(I40E_FLAG_DCB_CAPABLE, pf->flags); + clear_bit(I40E_FLAG_DCB_ENA, pf->flags); goto out; } err = i40e_init_dcb(hw, true); @@ -7215,20 +7219,20 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf) pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED | DCB_CAP_DCBX_VER_IEEE; - pf->flags |= I40E_FLAG_DCB_CAPABLE; + set_bit(I40E_FLAG_DCB_CAPABLE, pf->flags); /* Enable DCB tagging only when more than one TC * or explicitly disable if only one TC */ if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1) - pf->flags |= I40E_FLAG_DCB_ENABLED; + set_bit(I40E_FLAG_DCB_ENA, pf->flags); else - pf->flags &= ~I40E_FLAG_DCB_ENABLED; + clear_bit(I40E_FLAG_DCB_ENA, pf->flags); dev_dbg(&pf->pdev->dev, "DCBX offload is supported for this PF.\n"); } } else if (pf->hw.aq.asq_last_status == I40E_AQ_RC_EPERM) { dev_info(&pf->pdev->dev, "FW LLDP disabled for this PF.\n"); - pf->flags |= I40E_FLAG_DISABLE_FW_LLDP; + set_bit(I40E_FLAG_FW_LLDP_DIS, pf->flags); } else { dev_info(&pf->pdev->dev, "Query for DCB configuration failed, err %pe aq_err %s\n", @@ -7388,7 +7392,7 @@ static int i40e_up_complete(struct i40e_vsi *vsi) struct i40e_pf *pf = vsi->back; int err; - if (pf->flags & I40E_FLAG_MSIX_ENABLED) + if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) i40e_vsi_configure_msix(vsi); else i40e_configure_msi_and_legacy(vsi); @@ -7492,7 +7496,7 @@ static int i40e_force_link_state(struct i40e_pf *pf, bool is_up) * and its speed values are OK, no need for a flap * if non_zero_phy_type was set, still need to force up */ - if (pf->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED) + if (test_bit(I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags)) non_zero_phy_type = true; else if (is_up && abilities.phy_type != 0 && abilities.link_speed != 0) return 0; @@ -7508,7 +7512,7 @@ static int i40e_force_link_state(struct i40e_pf *pf, bool is_up) non_zero_phy_type ? (u8)((mask >> 32) & 0xff) : 0; /* Copy the old settings, except of phy_type */ config.abilities = abilities.abilities; - if (pf->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED) { + if (test_bit(I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags)) { if (is_up) config.abilities |= I40E_AQ_PHY_ENABLE_LINK; else @@ -7558,8 +7562,8 @@ int i40e_up(struct i40e_vsi *vsi) int err; if (vsi->type == I40E_VSI_MAIN && - (vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED || - vsi->back->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED)) + (test_bit(I40E_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags) || + test_bit(I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENA, vsi->back->flags))) i40e_force_link_state(vsi->back, true); err = i40e_vsi_configure(vsi); @@ -7587,8 +7591,8 @@ void i40e_down(struct i40e_vsi *vsi) i40e_vsi_disable_irq(vsi); i40e_vsi_stop_rings(vsi); if (vsi->type == I40E_VSI_MAIN && - (vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED || - vsi->back->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED)) + (test_bit(I40E_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags) || + test_bit(I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENA, vsi->back->flags))) i40e_force_link_state(vsi->back, false); i40e_napi_disable_all(vsi); @@ -7994,7 +7998,7 @@ static void *i40e_fwd_add(struct net_device *netdev, struct net_device *vdev) struct i40e_fwd_adapter *fwd; int avail_macvlan, ret; - if ((pf->flags & I40E_FLAG_DCB_ENABLED)) { + if (test_bit(I40E_FLAG_DCB_ENA, pf->flags)) { netdev_info(netdev, "Macvlans are not supported when DCB is enabled\n"); return ERR_PTR(-EINVAL); } @@ -8189,23 +8193,23 @@ static int i40e_setup_tc(struct net_device *netdev, void *type_data) hw = mqprio_qopt->qopt.hw; mode = mqprio_qopt->mode; if (!hw) { - pf->flags &= ~I40E_FLAG_TC_MQPRIO; + clear_bit(I40E_FLAG_TC_MQPRIO_ENA, pf->flags); memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt)); goto config_tc; } /* Check if MFP enabled */ - if (pf->flags & I40E_FLAG_MFP_ENABLED) { + if (test_bit(I40E_FLAG_MFP_ENA, pf->flags)) { netdev_info(netdev, "Configuring TC not supported in MFP mode\n"); return ret; } switch (mode) { case TC_MQPRIO_MODE_DCB: - pf->flags &= ~I40E_FLAG_TC_MQPRIO; + clear_bit(I40E_FLAG_TC_MQPRIO_ENA, pf->flags); /* Check if DCB enabled to continue */ - if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) { + if (!test_bit(I40E_FLAG_DCB_ENA, pf->flags)) { netdev_info(netdev, "DCB is not enabled for adapter\n"); return ret; @@ -8219,20 +8223,20 @@ static int i40e_setup_tc(struct net_device *netdev, void *type_data) } break; case TC_MQPRIO_MODE_CHANNEL: - if (pf->flags & I40E_FLAG_DCB_ENABLED) { + if (test_bit(I40E_FLAG_DCB_ENA, pf->flags)) { netdev_info(netdev, "Full offload of TC Mqprio options is not supported when DCB is enabled\n"); return ret; } - if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) + if (!test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) return ret; ret = i40e_validate_mqprio_qopt(vsi, mqprio_qopt); if (ret) return ret; memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt)); - pf->flags |= I40E_FLAG_TC_MQPRIO; - pf->flags &= ~I40E_FLAG_DCB_ENABLED; + set_bit(I40E_FLAG_TC_MQPRIO_ENA, pf->flags); + clear_bit(I40E_FLAG_DCB_ENA, pf->flags); break; default: return -EINVAL; @@ -8816,11 +8820,11 @@ static int i40e_configure_clsflower(struct i40e_vsi *vsi, return -EINVAL; } - if (vsi->back->flags & I40E_FLAG_FD_SB_ENABLED) { + if (test_bit(I40E_FLAG_FD_SB_ENA, vsi->back->flags)) { dev_err(&vsi->back->pdev->dev, "Disable Flow Director Sideband, configuring Cloud filters via tc-flower\n"); - vsi->back->flags &= ~I40E_FLAG_FD_SB_ENABLED; - vsi->back->flags |= I40E_FLAG_FD_SB_TO_CLOUD_FILTER; + clear_bit(I40E_FLAG_FD_SB_ENA, vsi->back->flags); + clear_bit(I40E_FLAG_FD_SB_TO_CLOUD_FILTER, vsi->back->flags); } filter = kzalloc(sizeof(*filter), GFP_KERNEL); @@ -8916,11 +8920,11 @@ static int i40e_delete_clsflower(struct i40e_vsi *vsi, pf->num_cloud_filters--; if (!pf->num_cloud_filters) - if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) && - !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) { - pf->flags |= I40E_FLAG_FD_SB_ENABLED; - pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER; - pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE; + if (test_bit(I40E_FLAG_FD_SB_TO_CLOUD_FILTER, pf->flags) && + !test_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags)) { + set_bit(I40E_FLAG_FD_SB_ENA, pf->flags); + clear_bit(I40E_FLAG_FD_SB_TO_CLOUD_FILTER, pf->flags); + clear_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags); } return 0; } @@ -9221,11 +9225,11 @@ static void i40e_cloud_filter_exit(struct i40e_pf *pf) } pf->num_cloud_filters = 0; - if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) && - !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) { - pf->flags |= I40E_FLAG_FD_SB_ENABLED; - pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER; - pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE; + if (test_bit(I40E_FLAG_FD_SB_TO_CLOUD_FILTER, pf->flags) && + !test_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags)) { + set_bit(I40E_FLAG_FD_SB_ENA, pf->flags); + clear_bit(I40E_FLAG_FD_SB_TO_CLOUD_FILTER, pf->flags); + clear_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags); } } @@ -9313,7 +9317,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired) i40e_prep_for_reset(pf); i40e_reset_and_rebuild(pf, true, lock_acquired); dev_info(&pf->pdev->dev, - pf->flags & I40E_FLAG_DISABLE_FW_LLDP ? + test_bit(I40E_FLAG_FW_LLDP_DIS, pf->flags) ? "FW LLDP is disabled\n" : "FW LLDP is enabled\n"); @@ -9428,12 +9432,12 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf, if (I40E_IS_X710TL_DEVICE(hw->device_id) && (hw->phy.link_info.link_speed & ~(I40E_LINK_SPEED_2_5GB | I40E_LINK_SPEED_5GB)) && - !(pf->flags & I40E_FLAG_DCB_CAPABLE)) + !test_bit(I40E_FLAG_DCB_CAPABLE, pf->flags)) /* let firmware decide if the DCB should be disabled */ - pf->flags |= I40E_FLAG_DCB_CAPABLE; + set_bit(I40E_FLAG_DCB_CAPABLE, pf->flags); /* Not DCB capable or capability disabled */ - if (!(pf->flags & I40E_FLAG_DCB_CAPABLE)) + if (!test_bit(I40E_FLAG_DCB_CAPABLE, pf->flags)) return ret; /* Ignore if event is not for Nearest Bridge */ @@ -9469,7 +9473,7 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf, (I40E_LINK_SPEED_2_5GB | I40E_LINK_SPEED_5GB))) { dev_warn(&pf->pdev->dev, "DCB is not supported for X710-T*L 2.5/5G speeds\n"); - pf->flags &= ~I40E_FLAG_DCB_CAPABLE; + clear_bit(I40E_FLAG_DCB_CAPABLE, pf->flags); } else { dev_info(&pf->pdev->dev, "Failed querying DCB configuration data from firmware, err %pe aq_err %s\n", @@ -9497,9 +9501,9 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf, /* Enable DCB tagging only when more than one TC */ if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1) - pf->flags |= I40E_FLAG_DCB_ENABLED; + set_bit(I40E_FLAG_DCB_ENA, pf->flags); else - pf->flags &= ~I40E_FLAG_DCB_ENABLED; + clear_bit(I40E_FLAG_DCB_ENA, pf->flags); set_bit(__I40E_PORT_SUSPENDED, pf->state); /* Reconfiguration needed quiesce all VSIs */ @@ -9567,18 +9571,18 @@ static void i40e_handle_lan_overflow_event(struct i40e_pf *pf, dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n", queue, qtx_ctl); + if (FIELD_GET(I40E_QTX_CTL_PFVF_Q_MASK, qtx_ctl) != + I40E_QTX_CTL_VF_QUEUE) + return; + /* Queue belongs to VF, find the VF and issue VF reset */ - if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK) - >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) { - vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK) - >> I40E_QTX_CTL_VFVM_INDX_SHIFT); - vf_id -= hw->func_caps.vf_base_id; - vf = &pf->vf[vf_id]; - i40e_vc_notify_vf_reset(vf); - /* Allow VF to process pending reset notification */ - msleep(20); - i40e_reset_vf(vf, false); - } + vf_id = FIELD_GET(I40E_QTX_CTL_VFVM_INDX_MASK, qtx_ctl); + vf_id -= hw->func_caps.vf_base_id; + vf = &pf->vf[vf_id]; + i40e_vc_notify_vf_reset(vf); + /* Allow VF to process pending reset notification */ + msleep(20); + i40e_reset_vf(vf, false); } /** @@ -9604,8 +9608,7 @@ u32 i40e_get_current_fd_count(struct i40e_pf *pf) val = rd32(&pf->hw, I40E_PFQF_FDSTAT); fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) + - ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >> - I40E_PFQF_FDSTAT_BEST_CNT_SHIFT); + FIELD_GET(I40E_PFQF_FDSTAT_BEST_CNT_MASK, val); return fcnt_prog; } @@ -9619,8 +9622,7 @@ u32 i40e_get_global_fd_count(struct i40e_pf *pf) val = rd32(&pf->hw, I40E_GLQF_FDCNT_0); fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) + - ((val & I40E_GLQF_FDCNT_0_BESTCNT_MASK) >> - I40E_GLQF_FDCNT_0_BESTCNT_SHIFT); + FIELD_GET(I40E_GLQF_FDCNT_0_BESTCNT_MASK, val); return fcnt_prog; } @@ -9631,7 +9633,7 @@ u32 i40e_get_global_fd_count(struct i40e_pf *pf) static void i40e_reenable_fdir_sb(struct i40e_pf *pf) { if (test_and_clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state)) - if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && + if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags) && (I40E_DEBUG_FD & pf->hw.debug_mask)) dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n"); } @@ -9652,7 +9654,7 @@ static void i40e_reenable_fdir_atr(struct i40e_pf *pf) I40E_L3_SRC_MASK | I40E_L3_DST_MASK | I40E_L4_SRC_MASK | I40E_L4_DST_MASK); - if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && + if (test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags) && (I40E_DEBUG_FD & pf->hw.debug_mask)) dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n"); } @@ -9967,7 +9969,7 @@ static void i40e_link_event(struct i40e_pf *pf) if (pf->vf) i40e_vc_notify_link_state(pf); - if (pf->flags & I40E_FLAG_PTP) + if (test_bit(I40E_FLAG_PTP_ENA, pf->flags)) i40e_ptp_set_increment(pf); #ifdef CONFIG_I40E_DCB if (new_link == old_link) @@ -9984,13 +9986,13 @@ static void i40e_link_event(struct i40e_pf *pf) memset(&pf->tmp_cfg, 0, sizeof(pf->tmp_cfg)); err = i40e_dcb_sw_default_config(pf); if (err) { - pf->flags &= ~(I40E_FLAG_DCB_CAPABLE | - I40E_FLAG_DCB_ENABLED); + clear_bit(I40E_FLAG_DCB_CAPABLE, pf->flags); + clear_bit(I40E_FLAG_DCB_ENA, pf->flags); } else { pf->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE; - pf->flags |= I40E_FLAG_DCB_CAPABLE; - pf->flags &= ~I40E_FLAG_DCB_ENABLED; + set_bit(I40E_FLAG_DCB_CAPABLE, pf->flags); + clear_bit(I40E_FLAG_DCB_ENA, pf->flags); } } #endif /* CONFIG_I40E_DCB */ @@ -10015,7 +10017,7 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf) return; pf->service_timer_previous = jiffies; - if ((pf->flags & I40E_FLAG_LINK_POLLING_ENABLED) || + if (test_bit(I40E_FLAG_LINK_POLLING_ENA, pf->flags) || test_bit(__I40E_TEMP_LINK_POLLING, pf->state)) i40e_link_event(pf); @@ -10026,7 +10028,7 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf) if (pf->vsi[i] && pf->vsi[i]->netdev) i40e_update_stats(pf->vsi[i]); - if (pf->flags & I40E_FLAG_VEB_STATS_ENABLED) { + if (test_bit(I40E_FLAG_VEB_STATS_ENA, pf->flags)) { /* Update the stats for the active switching components */ for (i = 0; i < I40E_MAX_VEB; i++) if (pf->veb[i]) @@ -10115,7 +10117,7 @@ static void i40e_handle_link_event(struct i40e_pf *pf, if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) && (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) && (!(status->link_info & I40E_AQ_LINK_UP)) && - (!(pf->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED))) { + (!test_bit(I40E_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags))) { dev_err(&pf->pdev->dev, "Rx/Tx is disabled on this device because an unsupported SFP module type was detected.\n"); dev_err(&pf->pdev->dev, @@ -10143,7 +10145,7 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf) return; /* check for error indications */ - val = rd32(&pf->hw, pf->hw.aq.arq.len); + val = rd32(&pf->hw, I40E_PF_ARQLEN); oldval = val; if (val & I40E_PF_ARQLEN_ARQVFE_MASK) { if (hw->debug_mask & I40E_DEBUG_AQ) @@ -10162,9 +10164,9 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf) val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK; } if (oldval != val) - wr32(&pf->hw, pf->hw.aq.arq.len, val); + wr32(&pf->hw, I40E_PF_ARQLEN, val); - val = rd32(&pf->hw, pf->hw.aq.asq.len); + val = rd32(&pf->hw, I40E_PF_ATQLEN); oldval = val; if (val & I40E_PF_ATQLEN_ATQVFE_MASK) { if (pf->hw.debug_mask & I40E_DEBUG_AQ) @@ -10182,7 +10184,7 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf) val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK; } if (oldval != val) - wr32(&pf->hw, pf->hw.aq.asq.len, val); + wr32(&pf->hw, I40E_PF_ATQLEN, val); event.buf_len = I40E_MAX_AQ_BUF_SIZE; event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); @@ -10242,9 +10244,9 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf) opcode); break; } - } while (i++ < pf->adminq_work_limit); + } while (i++ < I40E_AQ_WORK_LIMIT); - if (i < pf->adminq_work_limit) + if (i < I40E_AQ_WORK_LIMIT) clear_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state); /* re-enable Admin queue interrupt cause */ @@ -10421,7 +10423,7 @@ static int i40e_reconstitute_veb(struct i40e_veb *veb) if (ret) goto end_reconstitute; - if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) + if (test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags)) veb->bridge_mode = BRIDGE_MODE_VEB; else veb->bridge_mode = BRIDGE_MODE_VEPA; @@ -10566,7 +10568,7 @@ static void i40e_fdir_sb_setup(struct i40e_pf *pf) wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]); } - if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) + if (!test_bit(I40E_FLAG_FD_SB_ENA, pf->flags)) return; /* find existing VSI and see if it needs configuring */ @@ -10578,8 +10580,8 @@ static void i40e_fdir_sb_setup(struct i40e_pf *pf) pf->vsi[pf->lan_vsi]->seid, 0); if (!vsi) { dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n"); - pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; - pf->flags |= I40E_FLAG_FD_SB_INACTIVE; + clear_bit(I40E_FLAG_FD_SB_ENA, pf->flags); + set_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags); return; } } @@ -10957,14 +10959,14 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) i40e_aq_set_dcb_parameters(hw, false, NULL); dev_warn(&pf->pdev->dev, "DCB is not supported for X710-T*L 2.5/5G speeds\n"); - pf->flags &= ~I40E_FLAG_DCB_CAPABLE; + clear_bit(I40E_FLAG_DCB_CAPABLE, pf->flags); } else { i40e_aq_set_dcb_parameters(hw, true, NULL); ret = i40e_init_pf_dcb(pf); if (ret) { dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n", ret); - pf->flags &= ~I40E_FLAG_DCB_CAPABLE; + clear_bit(I40E_FLAG_DCB_CAPABLE, pf->flags); /* Continue without DCB enabled */ } } @@ -11085,7 +11087,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) wr32(hw, I40E_REG_MSS, val); } - if (pf->hw_features & I40E_HW_RESTART_AUTONEG) { + if (test_bit(I40E_HW_CAP_RESTART_AUTONEG, pf->hw.caps)) { msleep(75); ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); if (ret) @@ -11095,7 +11097,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) pf->hw.aq.asq_last_status)); } /* reinit the misc interrupt */ - if (pf->flags & I40E_FLAG_MSIX_ENABLED) { + if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) { ret = i40e_setup_misc_vector(pf); if (ret) goto end_unlock; @@ -11202,14 +11204,10 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf) /* find what triggered the MDD event */ reg = rd32(hw, I40E_GL_MDET_TX); if (reg & I40E_GL_MDET_TX_VALID_MASK) { - u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >> - I40E_GL_MDET_TX_PF_NUM_SHIFT; - u16 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >> - I40E_GL_MDET_TX_VF_NUM_SHIFT; - u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >> - I40E_GL_MDET_TX_EVENT_SHIFT; - u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >> - I40E_GL_MDET_TX_QUEUE_SHIFT) - + u8 pf_num = FIELD_GET(I40E_GL_MDET_TX_PF_NUM_MASK, reg); + u16 vf_num = FIELD_GET(I40E_GL_MDET_TX_VF_NUM_MASK, reg); + u8 event = FIELD_GET(I40E_GL_MDET_TX_EVENT_MASK, reg); + u16 queue = FIELD_GET(I40E_GL_MDET_TX_QUEUE_MASK, reg) - pf->hw.func_caps.base_queue; if (netif_msg_tx_err(pf)) dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n", @@ -11219,12 +11217,9 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf) } reg = rd32(hw, I40E_GL_MDET_RX); if (reg & I40E_GL_MDET_RX_VALID_MASK) { - u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >> - I40E_GL_MDET_RX_FUNCTION_SHIFT; - u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >> - I40E_GL_MDET_RX_EVENT_SHIFT; - u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >> - I40E_GL_MDET_RX_QUEUE_SHIFT) - + u8 func = FIELD_GET(I40E_GL_MDET_RX_FUNCTION_MASK, reg); + u8 event = FIELD_GET(I40E_GL_MDET_RX_EVENT_MASK, reg); + u16 queue = FIELD_GET(I40E_GL_MDET_RX_QUEUE_MASK, reg) - pf->hw.func_caps.base_queue; if (netif_msg_rx_err(pf)) dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n", @@ -11370,7 +11365,7 @@ static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi) if (!vsi->num_rx_desc) vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, I40E_REQ_DESCRIPTOR_MULTIPLE); - if (pf->flags & I40E_FLAG_MSIX_ENABLED) + if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) vsi->num_q_vectors = pf->num_lan_msix; else vsi->num_q_vectors = 1; @@ -11688,7 +11683,7 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi) ring->count = vsi->num_tx_desc; ring->size = 0; ring->dcb_tc = 0; - if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) + if (test_bit(I40E_HW_CAP_WB_ON_ITR, vsi->back->hw.caps)) ring->flags = I40E_TXR_FLAGS_WB_ON_ITR; ring->itr_setting = pf->tx_itr_default; WRITE_ONCE(vsi->tx_rings[i], ring++); @@ -11705,7 +11700,7 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi) ring->count = vsi->num_tx_desc; ring->size = 0; ring->dcb_tc = 0; - if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) + if (test_bit(I40E_HW_CAP_WB_ON_ITR, vsi->back->hw.caps)) ring->flags = I40E_TXR_FLAGS_WB_ON_ITR; set_ring_xdp(ring); ring->itr_setting = pf->tx_itr_default; @@ -11769,7 +11764,7 @@ static int i40e_init_msix(struct i40e_pf *pf) int v_actual; int iwarp_requested = 0; - if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) + if (!test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) return -ENODEV; /* The number of vectors we'll request will be comprised of: @@ -11808,7 +11803,7 @@ static int i40e_init_msix(struct i40e_pf *pf) vectors_left -= pf->num_lan_msix; /* reserve one vector for sideband flow director */ - if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { + if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags)) { if (vectors_left) { pf->num_fdsb_msix = 1; v_budget++; @@ -11819,7 +11814,7 @@ static int i40e_init_msix(struct i40e_pf *pf) } /* can we reserve enough for iWARP? */ - if (pf->flags & I40E_FLAG_IWARP_ENABLED) { + if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags)) { iwarp_requested = pf->num_iwarp_msix; if (!vectors_left) @@ -11831,7 +11826,7 @@ static int i40e_init_msix(struct i40e_pf *pf) } /* any vectors left over go for VMDq support */ - if (pf->flags & I40E_FLAG_VMDQ_ENABLED) { + if (test_bit(I40E_FLAG_VMDQ_ENA, pf->flags)) { if (!vectors_left) { pf->num_vmdq_msix = 0; pf->num_vmdq_qps = 0; @@ -11888,7 +11883,7 @@ static int i40e_init_msix(struct i40e_pf *pf) v_actual = i40e_reserve_msix_vectors(pf, v_budget); if (v_actual < I40E_MIN_MSIX) { - pf->flags &= ~I40E_FLAG_MSIX_ENABLED; + clear_bit(I40E_FLAG_MSIX_ENA, pf->flags); kfree(pf->msix_entries); pf->msix_entries = NULL; pci_disable_msix(pf->pdev); @@ -11926,7 +11921,7 @@ static int i40e_init_msix(struct i40e_pf *pf) pf->num_lan_msix = 1; break; case 3: - if (pf->flags & I40E_FLAG_IWARP_ENABLED) { + if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags)) { pf->num_lan_msix = 1; pf->num_iwarp_msix = 1; } else { @@ -11934,7 +11929,7 @@ static int i40e_init_msix(struct i40e_pf *pf) } break; default: - if (pf->flags & I40E_FLAG_IWARP_ENABLED) { + if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags)) { pf->num_iwarp_msix = min_t(int, (vec / 3), iwarp_requested); pf->num_vmdq_vsis = min_t(int, (vec / 3), @@ -11943,7 +11938,7 @@ static int i40e_init_msix(struct i40e_pf *pf) pf->num_vmdq_vsis = min_t(int, (vec / 2), I40E_DEFAULT_NUM_VMDQ_VSI); } - if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { + if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags)) { pf->num_fdsb_msix = 1; vec--; } @@ -11955,22 +11950,20 @@ static int i40e_init_msix(struct i40e_pf *pf) } } - if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && - (pf->num_fdsb_msix == 0)) { + if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags) && pf->num_fdsb_msix == 0) { dev_info(&pf->pdev->dev, "Sideband Flowdir disabled, not enough MSI-X vectors\n"); - pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; - pf->flags |= I40E_FLAG_FD_SB_INACTIVE; + clear_bit(I40E_FLAG_FD_SB_ENA, pf->flags); + set_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags); } - if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) && - (pf->num_vmdq_msix == 0)) { + if (test_bit(I40E_FLAG_VMDQ_ENA, pf->flags) && pf->num_vmdq_msix == 0) { dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n"); - pf->flags &= ~I40E_FLAG_VMDQ_ENABLED; + clear_bit(I40E_FLAG_VMDQ_ENA, pf->flags); } - if ((pf->flags & I40E_FLAG_IWARP_ENABLED) && - (pf->num_iwarp_msix == 0)) { + if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags) && + pf->num_iwarp_msix == 0) { dev_info(&pf->pdev->dev, "IWARP disabled, not enough MSI-X vectors\n"); - pf->flags &= ~I40E_FLAG_IWARP_ENABLED; + clear_bit(I40E_FLAG_IWARP_ENA, pf->flags); } i40e_debug(&pf->hw, I40E_DEBUG_INIT, "MSI-X vector distribution: PF %d, VMDq %d, FDSB %d, iWARP %d\n", @@ -12024,7 +12017,7 @@ static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi) int err, v_idx, num_q_vectors; /* if not MSIX, give the one vector only to the LAN VSI */ - if (pf->flags & I40E_FLAG_MSIX_ENABLED) + if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) num_q_vectors = vsi->num_q_vectors; else if (vsi == pf->vsi[pf->lan_vsi]) num_q_vectors = 1; @@ -12055,38 +12048,39 @@ static int i40e_init_interrupt_scheme(struct i40e_pf *pf) int vectors = 0; ssize_t size; - if (pf->flags & I40E_FLAG_MSIX_ENABLED) { + if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) { vectors = i40e_init_msix(pf); if (vectors < 0) { - pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | - I40E_FLAG_IWARP_ENABLED | - I40E_FLAG_RSS_ENABLED | - I40E_FLAG_DCB_CAPABLE | - I40E_FLAG_DCB_ENABLED | - I40E_FLAG_SRIOV_ENABLED | - I40E_FLAG_FD_SB_ENABLED | - I40E_FLAG_FD_ATR_ENABLED | - I40E_FLAG_VMDQ_ENABLED); - pf->flags |= I40E_FLAG_FD_SB_INACTIVE; + clear_bit(I40E_FLAG_MSIX_ENA, pf->flags); + clear_bit(I40E_FLAG_IWARP_ENA, pf->flags); + clear_bit(I40E_FLAG_RSS_ENA, pf->flags); + clear_bit(I40E_FLAG_DCB_CAPABLE, pf->flags); + clear_bit(I40E_FLAG_DCB_ENA, pf->flags); + clear_bit(I40E_FLAG_SRIOV_ENA, pf->flags); + clear_bit(I40E_FLAG_FD_SB_ENA, pf->flags); + clear_bit(I40E_FLAG_FD_ATR_ENA, pf->flags); + clear_bit(I40E_FLAG_VMDQ_ENA, pf->flags); + set_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags); /* rework the queue expectations without MSIX */ i40e_determine_queue_usage(pf); } } - if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) && - (pf->flags & I40E_FLAG_MSI_ENABLED)) { + if (!test_bit(I40E_FLAG_MSIX_ENA, pf->flags) && + test_bit(I40E_FLAG_MSI_ENA, pf->flags)) { dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n"); vectors = pci_enable_msi(pf->pdev); if (vectors < 0) { dev_info(&pf->pdev->dev, "MSI init failed - %d\n", vectors); - pf->flags &= ~I40E_FLAG_MSI_ENABLED; + clear_bit(I40E_FLAG_MSI_ENA, pf->flags); } vectors = 1; /* one MSI or Legacy vector */ } - if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED))) + if (!test_bit(I40E_FLAG_MSI_ENA, pf->flags) && + !test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n"); /* set up vector assignment tracking */ @@ -12119,7 +12113,8 @@ static int i40e_restore_interrupt_scheme(struct i40e_pf *pf) * scheme. We need to re-enabled them here in order to attempt to * re-acquire the MSI or MSI-X vectors */ - pf->flags |= (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED); + set_bit(I40E_FLAG_MSI_ENA, pf->flags); + set_bit(I40E_FLAG_MSIX_ENA, pf->flags); err = i40e_init_interrupt_scheme(pf); if (err) @@ -12141,7 +12136,7 @@ static int i40e_restore_interrupt_scheme(struct i40e_pf *pf) if (err) goto err_unwind; - if (pf->flags & I40E_FLAG_IWARP_ENABLED) + if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags)) i40e_client_update_msix_info(pf); return 0; @@ -12169,7 +12164,7 @@ static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf) { int err; - if (pf->flags & I40E_FLAG_MSIX_ENABLED) { + if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) { err = i40e_setup_misc_vector(pf); if (err) { @@ -12179,7 +12174,7 @@ static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf) return err; } } else { - u32 flags = pf->flags & I40E_FLAG_MSI_ENABLED ? 0 : IRQF_SHARED; + u32 flags = test_bit(I40E_FLAG_MSI_ENA, pf->flags) ? 0 : IRQF_SHARED; err = request_irq(pf->pdev->irq, i40e_intr, flags, pf->int_name, pf); @@ -12383,7 +12378,7 @@ int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) { struct i40e_pf *pf = vsi->back; - if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) + if (test_bit(I40E_HW_CAP_RSS_AQ, pf->hw.caps)) return i40e_config_rss_aq(vsi, seed, lut, lut_size); else return i40e_config_rss_reg(vsi, seed, lut, lut_size); @@ -12402,7 +12397,7 @@ int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) { struct i40e_pf *pf = vsi->back; - if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) + if (test_bit(I40E_HW_CAP_RSS_AQ, pf->hw.caps)) return i40e_get_rss_aq(vsi, seed, lut, lut_size); else return i40e_get_rss_reg(vsi, seed, lut, lut_size); @@ -12505,7 +12500,7 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count) struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; int new_rss_size; - if (!(pf->flags & I40E_FLAG_RSS_ENABLED)) + if (!test_bit(I40E_FLAG_RSS_ENA, pf->flags)) return 0; queue_count = min_t(int, queue_count, num_online_cpus()); @@ -12738,9 +12733,9 @@ static int i40e_sw_init(struct i40e_pf *pf) u16 pow; /* Set default capability flags */ - pf->flags = I40E_FLAG_RX_CSUM_ENABLED | - I40E_FLAG_MSI_ENABLED | - I40E_FLAG_MSIX_ENABLED; + bitmap_zero(pf->flags, I40E_PF_FLAGS_NBITS); + set_bit(I40E_FLAG_MSI_ENA, pf->flags); + set_bit(I40E_FLAG_MSIX_ENA, pf->flags); /* Set default ITR */ pf->rx_itr_default = I40E_ITR_RX_DEF; @@ -12760,14 +12755,14 @@ static int i40e_sw_init(struct i40e_pf *pf) pf->rss_size_max = min_t(int, pf->rss_size_max, pow); if (pf->hw.func_caps.rss) { - pf->flags |= I40E_FLAG_RSS_ENABLED; + set_bit(I40E_FLAG_RSS_ENA, pf->flags); pf->alloc_rss_size = min_t(int, pf->rss_size_max, num_online_cpus()); } /* MFP mode enabled */ if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) { - pf->flags |= I40E_FLAG_MFP_ENABLED; + set_bit(I40E_FLAG_MFP_ENA, pf->flags); dev_info(&pf->pdev->dev, "MFP mode Enabled\n"); if (i40e_get_partition_bw_setting(pf)) { dev_warn(&pf->pdev->dev, @@ -12784,84 +12779,31 @@ static int i40e_sw_init(struct i40e_pf *pf) if ((pf->hw.func_caps.fd_filters_guaranteed > 0) || (pf->hw.func_caps.fd_filters_best_effort > 0)) { - pf->flags |= I40E_FLAG_FD_ATR_ENABLED; - pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE; - if (pf->flags & I40E_FLAG_MFP_ENABLED && + set_bit(I40E_FLAG_FD_ATR_ENA, pf->flags); + if (test_bit(I40E_FLAG_MFP_ENA, pf->flags) && pf->hw.num_partitions > 1) dev_info(&pf->pdev->dev, "Flow Director Sideband mode Disabled in MFP mode\n"); else - pf->flags |= I40E_FLAG_FD_SB_ENABLED; + set_bit(I40E_FLAG_FD_SB_ENA, pf->flags); pf->fdir_pf_filter_count = pf->hw.func_caps.fd_filters_guaranteed; pf->hw.fdir_shared_filter_count = pf->hw.func_caps.fd_filters_best_effort; } - if (pf->hw.mac.type == I40E_MAC_X722) { - pf->hw_features |= (I40E_HW_RSS_AQ_CAPABLE | - I40E_HW_128_QP_RSS_CAPABLE | - I40E_HW_ATR_EVICT_CAPABLE | - I40E_HW_WB_ON_ITR_CAPABLE | - I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE | - I40E_HW_NO_PCI_LINK_CHECK | - I40E_HW_USE_SET_LLDP_MIB | - I40E_HW_GENEVE_OFFLOAD_CAPABLE | - I40E_HW_PTP_L4_CAPABLE | - I40E_HW_WOL_MC_MAGIC_PKT_WAKE | - I40E_HW_OUTER_UDP_CSUM_CAPABLE); - -#define I40E_FDEVICT_PCTYPE_DEFAULT 0xc03 - if (rd32(&pf->hw, I40E_GLQF_FDEVICTENA(1)) != - I40E_FDEVICT_PCTYPE_DEFAULT) { - dev_warn(&pf->pdev->dev, - "FD EVICT PCTYPES are not right, disable FD HW EVICT\n"); - pf->hw_features &= ~I40E_HW_ATR_EVICT_CAPABLE; - } - } else if ((pf->hw.aq.api_maj_ver > 1) || - ((pf->hw.aq.api_maj_ver == 1) && - (pf->hw.aq.api_min_ver > 4))) { - /* Supported in FW API version higher than 1.4 */ - pf->hw_features |= I40E_HW_GENEVE_OFFLOAD_CAPABLE; - } - /* Enable HW ATR eviction if possible */ - if (pf->hw_features & I40E_HW_ATR_EVICT_CAPABLE) - pf->flags |= I40E_FLAG_HW_ATR_EVICT_ENABLED; - - if ((pf->hw.mac.type == I40E_MAC_XL710) && - (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || - (pf->hw.aq.fw_maj_ver < 4))) { - pf->hw_features |= I40E_HW_RESTART_AUTONEG; - /* No DCB support for FW < v4.33 */ - pf->hw_features |= I40E_HW_NO_DCB_SUPPORT; - } - - /* Disable FW LLDP if FW < v4.3 */ - if ((pf->hw.mac.type == I40E_MAC_XL710) && - (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) || - (pf->hw.aq.fw_maj_ver < 4))) - pf->hw_features |= I40E_HW_STOP_FW_LLDP; - - /* Use the FW Set LLDP MIB API if FW > v4.40 */ - if ((pf->hw.mac.type == I40E_MAC_XL710) && - (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver >= 40)) || - (pf->hw.aq.fw_maj_ver >= 5))) - pf->hw_features |= I40E_HW_USE_SET_LLDP_MIB; - - /* Enable PTP L4 if FW > v6.0 */ - if (pf->hw.mac.type == I40E_MAC_XL710 && - pf->hw.aq.fw_maj_ver >= 6) - pf->hw_features |= I40E_HW_PTP_L4_CAPABLE; + if (test_bit(I40E_HW_CAP_ATR_EVICT, pf->hw.caps)) + set_bit(I40E_FLAG_HW_ATR_EVICT_ENA, pf->flags); if (pf->hw.func_caps.vmdq && num_online_cpus() != 1) { pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI; - pf->flags |= I40E_FLAG_VMDQ_ENABLED; + set_bit(I40E_FLAG_VMDQ_ENA, pf->flags); pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf); } if (pf->hw.func_caps.iwarp && num_online_cpus() != 1) { - pf->flags |= I40E_FLAG_IWARP_ENABLED; + set_bit(I40E_FLAG_IWARP_ENA, pf->flags); /* IWARP needs one extra vector for CQP just like MISC.*/ pf->num_iwarp_msix = (int)num_online_cpus() + 1; } @@ -12871,25 +12813,23 @@ static int i40e_sw_init(struct i40e_pf *pf) * if NPAR is functioning so unset this hw flag in this case. */ if (pf->hw.mac.type == I40E_MAC_XL710 && - pf->hw.func_caps.npar_enable && - (pf->hw.flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE)) - pf->hw.flags &= ~I40E_HW_FLAG_FW_LLDP_STOPPABLE; + pf->hw.func_caps.npar_enable) + clear_bit(I40E_HW_CAP_FW_LLDP_STOPPABLE, pf->hw.caps); #ifdef CONFIG_PCI_IOV if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) { pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF; - pf->flags |= I40E_FLAG_SRIOV_ENABLED; + set_bit(I40E_FLAG_SRIOV_ENA, pf->flags); pf->num_req_vfs = min_t(int, pf->hw.func_caps.num_vfs, I40E_MAX_VF_COUNT); } #endif /* CONFIG_PCI_IOV */ - pf->eeprom_version = 0xDEAD; pf->lan_veb = I40E_NO_VEB; pf->lan_vsi = I40E_NO_VSI; /* By default FW has this off for performance reasons */ - pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED; + clear_bit(I40E_FLAG_VEB_STATS_ENA, pf->flags); /* set up queue assignment tracking */ size = sizeof(struct i40e_lump_tracking) @@ -12908,8 +12848,8 @@ static int i40e_sw_init(struct i40e_pf *pf) /* Link down on close must be on when total port shutdown * is enabled for a given port */ - pf->flags |= (I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED | - I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED); + set_bit(I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags); + set_bit(I40E_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags); dev_info(&pf->pdev->dev, "total-port-shutdown was enabled, link-down-on-close is forced on\n"); } @@ -12935,31 +12875,31 @@ bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features) */ if (features & NETIF_F_NTUPLE) { /* Enable filters and mark for reset */ - if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) + if (!test_bit(I40E_FLAG_FD_SB_ENA, pf->flags)) need_reset = true; /* enable FD_SB only if there is MSI-X vector and no cloud * filters exist */ if (pf->num_fdsb_msix > 0 && !pf->num_cloud_filters) { - pf->flags |= I40E_FLAG_FD_SB_ENABLED; - pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE; + set_bit(I40E_FLAG_FD_SB_ENA, pf->flags); + clear_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags); } } else { /* turn off filters, mark for reset and clear SW filter list */ - if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { + if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags)) { need_reset = true; i40e_fdir_filter_exit(pf); } - pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; + clear_bit(I40E_FLAG_FD_SB_ENA, pf->flags); clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state); - pf->flags |= I40E_FLAG_FD_SB_INACTIVE; + set_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags); /* reset fd counters */ pf->fd_add_err = 0; pf->fd_atr_cnt = 0; /* if ATR was auto disabled it can be re-enabled. */ if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) - if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && + if (test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags) && (I40E_DEBUG_FD & pf->hw.debug_mask)) dev_info(&pf->pdev->dev, "ATR re-enabled.\n"); } @@ -13108,7 +13048,7 @@ static int i40e_get_phys_port_id(struct net_device *netdev, struct i40e_pf *pf = np->vsi->back; struct i40e_hw *hw = &pf->hw; - if (!(pf->hw_features & I40E_HW_PORT_ID_VALID)) + if (!test_bit(I40E_HW_CAP_PORT_ID_VALID, pf->hw.caps)) return -EOPNOTSUPP; ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id)); @@ -13137,7 +13077,7 @@ static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], struct i40e_pf *pf = np->vsi->back; int err = 0; - if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED)) + if (!test_bit(I40E_FLAG_SRIOV_ENA, pf->flags)) return -EOPNOTSUPP; if (vid) { @@ -13237,9 +13177,9 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev, veb->bridge_mode = mode; /* TODO: If no VFs or VMDq VSIs, disallow VEB mode */ if (mode == BRIDGE_MODE_VEB) - pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; + set_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags); else - pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; + clear_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags); i40e_do_reset(pf, I40E_PF_RESET_FLAG, true); break; } @@ -13573,7 +13513,7 @@ static void i40e_queue_pair_enable_irq(struct i40e_vsi *vsi, int queue_pair) struct i40e_hw *hw = &pf->hw; /* All rings in a qp belong to the same qvector. */ - if (pf->flags & I40E_FLAG_MSIX_ENABLED) + if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) i40e_irq_dynamic_enable(vsi, rxr->q_vector->v_idx); else i40e_irq_dynamic_enable_icr0(pf); @@ -13598,7 +13538,7 @@ static void i40e_queue_pair_disable_irq(struct i40e_vsi *vsi, int queue_pair) * * All rings in a qp belong to the same qvector. */ - if (pf->flags & I40E_FLAG_MSIX_ENABLED) { + if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) { u32 intpf = vsi->base_vector + rxr->q_vector->v_idx; wr32(hw, I40E_PFINT_DYN_CTLN(intpf - 1), 0); @@ -13783,7 +13723,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) NETIF_F_RXCSUM | 0; - if (!(pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE)) + if (!test_bit(I40E_HW_CAP_OUTER_UDP_CSUM, pf->hw.caps)) netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; netdev->udp_tunnel_nic_info = &pf->udp_tunnel_nic; @@ -13819,7 +13759,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; - if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) + if (!test_bit(I40E_FLAG_MFP_ENA, pf->flags)) hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC; netdev->hw_features |= hw_features | NETIF_F_LOOPBACK; @@ -14010,7 +13950,7 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) * negative logic - if it's set, we need to fiddle with * the VSI to disable source pruning. */ - if (pf->flags & I40E_FLAG_SOURCE_PRUNING_DISABLED) { + if (test_bit(I40E_FLAG_SOURCE_PRUNING_DIS, pf->flags)) { memset(&ctxt, 0, sizeof(ctxt)); ctxt.seid = pf->main_vsi_seid; ctxt.pf_num = pf->hw.pf_id; @@ -14032,7 +13972,7 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) } /* MFP mode setup queue map and update VSI */ - if ((pf->flags & I40E_FLAG_MFP_ENABLED) && + if (test_bit(I40E_FLAG_MFP_ENA, pf->flags) && !(pf->hw.func_caps.iscsi)) { /* NIC type PF */ memset(&ctxt, 0, sizeof(ctxt)); ctxt.seid = pf->main_vsi_seid; @@ -14080,7 +14020,7 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) ctxt.uplink_seid = vsi->uplink_seid; ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; ctxt.flags = I40E_AQ_VSI_TYPE_PF; - if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) && + if (test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags) && (i40e_is_vsi_uplink_mode_veb(vsi))) { ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); @@ -14128,7 +14068,7 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); } - if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) { + if (test_bit(I40E_FLAG_IWARP_ENA, vsi->back->flags)) { ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID); ctxt.info.queueing_opt_flags |= @@ -14344,7 +14284,7 @@ static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi) /* In Legacy mode, we do not have to get any other vector since we * piggyback on the misc/ICR0 for queue interrupts. */ - if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) + if (!test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) return ret; if (vsi->num_q_vectors) vsi->base_vector = i40e_get_lump(pf, pf->irq_pile, @@ -14511,9 +14451,9 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, * already enabled, in which case we can't force VEPA * mode. */ - if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { + if (!test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags)) { veb->bridge_mode = BRIDGE_MODE_VEPA; - pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; + clear_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags); } i40e_config_bridge_mode(veb); } @@ -14609,12 +14549,16 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, break; } - if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) && - (vsi->type == I40E_VSI_VMDQ2)) { + if (test_bit(I40E_HW_CAP_RSS_AQ, pf->hw.caps) && + vsi->type == I40E_VSI_VMDQ2) { ret = i40e_vsi_config_rss(vsi); + if (ret) + goto err_config; } return vsi; +err_config: + i40e_vsi_clear_rings(vsi); err_rings: i40e_vsi_free_q_vectors(vsi); err_msix: @@ -14852,7 +14796,7 @@ void i40e_veb_release(struct i40e_veb *veb) static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi) { struct i40e_pf *pf = veb->pf; - bool enable_stats = !!(pf->flags & I40E_FLAG_VEB_STATS_ENABLED); + bool enable_stats = !!test_bit(I40E_FLAG_VEB_STATS_ENA, pf->flags); int ret; ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid, @@ -15041,12 +14985,11 @@ static void i40e_setup_pf_switch_element(struct i40e_pf *pf, * the PF's VSI */ pf->mac_seid = uplink_seid; - pf->pf_seid = downlink_seid; pf->main_vsi_seid = seid; if (printconfig) dev_info(&pf->pdev->dev, "pf_seid=%d main_vsi_seid=%d\n", - pf->pf_seid, pf->main_vsi_seid); + downlink_seid, pf->main_vsi_seid); break; case I40E_SWITCH_ELEMENT_TYPE_PF: case I40E_SWITCH_ELEMENT_TYPE_VF: @@ -15152,7 +15095,7 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acqui */ if ((pf->hw.pf_id == 0) && - !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) { + !test_bit(I40E_FLAG_TRUE_PROMISC_ENA, pf->flags)) { flags = I40E_AQ_SET_SWITCH_CFG_PROMISC; pf->last_sw_conf_flags = flags; } @@ -15219,16 +15162,12 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acqui /* enable RSS in the HW, even for only one queue, as the stack can use * the hash */ - if ((pf->flags & I40E_FLAG_RSS_ENABLED)) + if (test_bit(I40E_FLAG_RSS_ENA, pf->flags)) i40e_pf_config_rss(pf); /* fill in link information and enable LSE reporting */ i40e_link_event(pf); - /* Initialize user-specific link properties */ - pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info & - I40E_AQ_AN_COMPLETED) ? true : false); - i40e_ptp_init(pf); if (!lock_acquired) @@ -15261,42 +15200,42 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf) queues_left = pf->hw.func_caps.num_tx_qp; if ((queues_left == 1) || - !(pf->flags & I40E_FLAG_MSIX_ENABLED)) { + !test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) { /* one qp for PF, no queues for anything else */ queues_left = 0; pf->alloc_rss_size = pf->num_lan_qps = 1; /* make sure all the fancies are disabled */ - pf->flags &= ~(I40E_FLAG_RSS_ENABLED | - I40E_FLAG_IWARP_ENABLED | - I40E_FLAG_FD_SB_ENABLED | - I40E_FLAG_FD_ATR_ENABLED | - I40E_FLAG_DCB_CAPABLE | - I40E_FLAG_DCB_ENABLED | - I40E_FLAG_SRIOV_ENABLED | - I40E_FLAG_VMDQ_ENABLED); - pf->flags |= I40E_FLAG_FD_SB_INACTIVE; - } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED | - I40E_FLAG_FD_SB_ENABLED | - I40E_FLAG_FD_ATR_ENABLED | - I40E_FLAG_DCB_CAPABLE))) { + clear_bit(I40E_FLAG_RSS_ENA, pf->flags); + clear_bit(I40E_FLAG_IWARP_ENA, pf->flags); + clear_bit(I40E_FLAG_FD_SB_ENA, pf->flags); + clear_bit(I40E_FLAG_FD_ATR_ENA, pf->flags); + clear_bit(I40E_FLAG_DCB_CAPABLE, pf->flags); + clear_bit(I40E_FLAG_DCB_ENA, pf->flags); + clear_bit(I40E_FLAG_SRIOV_ENA, pf->flags); + clear_bit(I40E_FLAG_VMDQ_ENA, pf->flags); + set_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags); + } else if (!test_bit(I40E_FLAG_RSS_ENA, pf->flags) && + !test_bit(I40E_FLAG_FD_SB_ENA, pf->flags) && + !test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags) && + !test_bit(I40E_FLAG_DCB_CAPABLE, pf->flags)) { /* one qp for PF */ pf->alloc_rss_size = pf->num_lan_qps = 1; queues_left -= pf->num_lan_qps; - pf->flags &= ~(I40E_FLAG_RSS_ENABLED | - I40E_FLAG_IWARP_ENABLED | - I40E_FLAG_FD_SB_ENABLED | - I40E_FLAG_FD_ATR_ENABLED | - I40E_FLAG_DCB_ENABLED | - I40E_FLAG_VMDQ_ENABLED); - pf->flags |= I40E_FLAG_FD_SB_INACTIVE; + clear_bit(I40E_FLAG_RSS_ENA, pf->flags); + clear_bit(I40E_FLAG_IWARP_ENA, pf->flags); + clear_bit(I40E_FLAG_FD_SB_ENA, pf->flags); + clear_bit(I40E_FLAG_FD_ATR_ENA, pf->flags); + clear_bit(I40E_FLAG_DCB_ENA, pf->flags); + clear_bit(I40E_FLAG_VMDQ_ENA, pf->flags); + set_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags); } else { /* Not enough queues for all TCs */ - if ((pf->flags & I40E_FLAG_DCB_CAPABLE) && - (queues_left < I40E_MAX_TRAFFIC_CLASS)) { - pf->flags &= ~(I40E_FLAG_DCB_CAPABLE | - I40E_FLAG_DCB_ENABLED); + if (test_bit(I40E_FLAG_DCB_CAPABLE, pf->flags) && + queues_left < I40E_MAX_TRAFFIC_CLASS) { + clear_bit(I40E_FLAG_DCB_CAPABLE, pf->flags); + clear_bit(I40E_FLAG_DCB_ENA, pf->flags); dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n"); } @@ -15309,24 +15248,24 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf) queues_left -= pf->num_lan_qps; } - if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { + if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags)) { if (queues_left > 1) { queues_left -= 1; /* save 1 queue for FD */ } else { - pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; - pf->flags |= I40E_FLAG_FD_SB_INACTIVE; + clear_bit(I40E_FLAG_FD_SB_ENA, pf->flags); + set_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags); dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n"); } } - if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && + if (test_bit(I40E_FLAG_SRIOV_ENA, pf->flags) && pf->num_vf_qps && pf->num_req_vfs && queues_left) { pf->num_req_vfs = min_t(int, pf->num_req_vfs, (queues_left / pf->num_vf_qps)); queues_left -= (pf->num_req_vfs * pf->num_vf_qps); } - if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) && + if (test_bit(I40E_FLAG_VMDQ_ENA, pf->flags) && pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) { pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis, (queues_left / pf->num_vmdq_qps)); @@ -15337,7 +15276,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf) dev_dbg(&pf->pdev->dev, "qs_avail=%d FD SB=%d lan_qs=%d lan_tc0=%d vf=%d*%d vmdq=%d*%d, remaining=%d\n", pf->hw.func_caps.num_tx_qp, - !!(pf->flags & I40E_FLAG_FD_SB_ENABLED), + !!test_bit(I40E_FLAG_FD_SB_ENA, pf->flags), pf->num_lan_qps, pf->alloc_rss_size, pf->num_req_vfs, pf->num_vf_qps, pf->num_vmdq_vsis, pf->num_vmdq_qps, queues_left); @@ -15361,7 +15300,8 @@ static int i40e_setup_pf_filter_control(struct i40e_pf *pf) settings->hash_lut_size = I40E_HASH_LUT_SIZE_128; /* Flow Director is enabled */ - if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)) + if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags) || + test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags)) settings->enable_fdir = true; /* Ethtype and MACVLAN filters enabled for PF */ @@ -15393,21 +15333,21 @@ static void i40e_print_features(struct i40e_pf *pf) i += scnprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d", pf->hw.func_caps.num_vsis, pf->vsi[pf->lan_vsi]->num_queue_pairs); - if (pf->flags & I40E_FLAG_RSS_ENABLED) + if (test_bit(I40E_FLAG_RSS_ENA, pf->flags)) i += scnprintf(&buf[i], REMAIN(i), " RSS"); - if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) + if (test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags)) i += scnprintf(&buf[i], REMAIN(i), " FD_ATR"); - if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { + if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags)) { i += scnprintf(&buf[i], REMAIN(i), " FD_SB"); i += scnprintf(&buf[i], REMAIN(i), " NTUPLE"); } - if (pf->flags & I40E_FLAG_DCB_CAPABLE) + if (test_bit(I40E_FLAG_DCB_CAPABLE, pf->flags)) i += scnprintf(&buf[i], REMAIN(i), " DCB"); i += scnprintf(&buf[i], REMAIN(i), " VxLAN"); i += scnprintf(&buf[i], REMAIN(i), " Geneve"); - if (pf->flags & I40E_FLAG_PTP) + if (test_bit(I40E_FLAG_PTP_ENA, pf->flags)) i += scnprintf(&buf[i], REMAIN(i), " PTP"); - if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) + if (test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags)) i += scnprintf(&buf[i], REMAIN(i), " VEB"); else i += scnprintf(&buf[i], REMAIN(i), " VEPA"); @@ -15438,22 +15378,26 @@ static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf) * @fec_cfg: FEC option to set in flags * @flags: ptr to flags in which we set FEC option **/ -void i40e_set_fec_in_flags(u8 fec_cfg, u32 *flags) +void i40e_set_fec_in_flags(u8 fec_cfg, unsigned long *flags) { - if (fec_cfg & I40E_AQ_SET_FEC_AUTO) - *flags |= I40E_FLAG_RS_FEC | I40E_FLAG_BASE_R_FEC; + if (fec_cfg & I40E_AQ_SET_FEC_AUTO) { + set_bit(I40E_FLAG_RS_FEC, flags); + set_bit(I40E_FLAG_BASE_R_FEC, flags); + } if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_RS) || (fec_cfg & I40E_AQ_SET_FEC_ABILITY_RS)) { - *flags |= I40E_FLAG_RS_FEC; - *flags &= ~I40E_FLAG_BASE_R_FEC; + set_bit(I40E_FLAG_RS_FEC, flags); + clear_bit(I40E_FLAG_BASE_R_FEC, flags); } if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_KR) || (fec_cfg & I40E_AQ_SET_FEC_ABILITY_KR)) { - *flags |= I40E_FLAG_BASE_R_FEC; - *flags &= ~I40E_FLAG_RS_FEC; + set_bit(I40E_FLAG_BASE_R_FEC, flags); + clear_bit(I40E_FLAG_RS_FEC, flags); + } + if (fec_cfg == 0) { + clear_bit(I40E_FLAG_RS_FEC, flags); + clear_bit(I40E_FLAG_BASE_R_FEC, flags); } - if (fec_cfg == 0) - *flags &= ~(I40E_FLAG_RS_FEC | I40E_FLAG_BASE_R_FEC); } /** @@ -15697,7 +15641,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) #endif /* CONFIG_I40E_DCB */ struct i40e_pf *pf; struct i40e_hw *hw; - static u16 pfs_found; u16 wol_nvm_bits; char nvm_ver[32]; u16 link_status; @@ -15775,7 +15718,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) hw->bus.device = PCI_SLOT(pdev->devfn); hw->bus.func = PCI_FUNC(pdev->devfn); hw->bus.bus_id = pdev->bus->number; - pf->instance = pfs_found; /* Select something other than the 802.1ad ethertype for the * switch to use internally and drop on ingress. @@ -15837,7 +15779,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE; hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE; - pf->adminq_work_limit = I40E_AQ_WORK_LIMIT; snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc", @@ -15879,15 +15820,15 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) hw->vendor_id, hw->device_id, hw->subsystem_vendor_id, hw->subsystem_device_id); - if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && - hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw)) + if (i40e_is_aq_api_ver_ge(hw, I40E_FW_API_VERSION_MAJOR, + I40E_FW_MINOR_VERSION(hw) + 1)) dev_dbg(&pdev->dev, "The driver for the device detected a newer version of the NVM image v%u.%u than v%u.%u.\n", hw->aq.api_maj_ver, hw->aq.api_min_ver, I40E_FW_API_VERSION_MAJOR, I40E_FW_MINOR_VERSION(hw)); - else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4) + else if (i40e_is_aq_api_ver_lt(hw, 1, 4)) dev_info(&pdev->dev, "The driver for the device detected an older version of the NVM image v%u.%u than expected v%u.%u. Please update the NVM image.\n", hw->aq.api_maj_ver, @@ -15934,7 +15875,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) * Ignore error return codes because if it was already disabled via * hardware settings this will fail */ - if (pf->hw_features & I40E_HW_STOP_FW_LLDP) { + if (test_bit(I40E_HW_CAP_STOP_FW_LLDP, pf->hw.caps)) { dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n"); i40e_aq_stop_lldp(hw, true, false, NULL); } @@ -15951,7 +15892,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ether_addr_copy(hw->mac.perm_addr, hw->mac.addr); i40e_get_port_mac_addr(hw, hw->mac.port_addr); if (is_valid_ether_addr(hw->mac.port_addr)) - pf->hw_features |= I40E_HW_PORT_ID_VALID; + set_bit(I40E_HW_CAP_PORT_ID_VALID, pf->hw.caps); i40e_ptp_alloc_pins(pf); pci_set_drvdata(pdev, pf); @@ -15961,10 +15902,10 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) status = i40e_get_fw_lldp_status(&pf->hw, &lldp_status); (!status && lldp_status == I40E_GET_FW_LLDP_STATUS_ENABLED) ? - (pf->flags &= ~I40E_FLAG_DISABLE_FW_LLDP) : - (pf->flags |= I40E_FLAG_DISABLE_FW_LLDP); + (clear_bit(I40E_FLAG_FW_LLDP_DIS, pf->flags)) : + (set_bit(I40E_FLAG_FW_LLDP_DIS, pf->flags)); dev_info(&pdev->dev, - (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) ? + test_bit(I40E_FLAG_FW_LLDP_DIS, pf->flags) ? "FW LLDP is disabled\n" : "FW LLDP is enabled\n"); @@ -15974,7 +15915,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err = i40e_init_pf_dcb(pf); if (err) { dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err); - pf->flags &= ~(I40E_FLAG_DCB_CAPABLE | I40E_FLAG_DCB_ENABLED); + clear_bit(I40E_FLAG_DCB_CAPABLE, pf->flags); + clear_bit(I40E_FLAG_DCB_ENA, pf->flags); /* Continue without DCB enabled */ } #endif /* CONFIG_I40E_DCB */ @@ -16042,11 +15984,11 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) #ifdef CONFIG_PCI_IOV /* prep for VF support */ - if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && - (pf->flags & I40E_FLAG_MSIX_ENABLED) && + if (test_bit(I40E_FLAG_SRIOV_ENA, pf->flags) && + test_bit(I40E_FLAG_MSIX_ENA, pf->flags) && !test_bit(__I40E_BAD_EEPROM, pf->state)) { if (pci_num_vf(pdev)) - pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; + set_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags); } #endif err = i40e_setup_pf_switch(pf, false, false); @@ -16087,7 +16029,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) wr32(hw, I40E_REG_MSS, val); } - if (pf->hw_features & I40E_HW_RESTART_AUTONEG) { + if (test_bit(I40E_HW_CAP_RESTART_AUTONEG, pf->hw.caps)) { msleep(75); err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); if (err) @@ -16107,7 +16049,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) * the misc functionality and queue processing is combined in * the same vector and that gets setup at open. */ - if (pf->flags & I40E_FLAG_MSIX_ENABLED) { + if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) { err = i40e_setup_misc_vector(pf); if (err) { dev_info(&pdev->dev, @@ -16120,8 +16062,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) #ifdef CONFIG_PCI_IOV /* prep for VF support */ - if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && - (pf->flags & I40E_FLAG_MSIX_ENABLED) && + if (test_bit(I40E_FLAG_SRIOV_ENA, pf->flags) && + test_bit(I40E_FLAG_MSIX_ENA, pf->flags) && !test_bit(__I40E_BAD_EEPROM, pf->state)) { /* disable link interrupts for VFs */ val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM); @@ -16141,7 +16083,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } #endif /* CONFIG_PCI_IOV */ - if (pf->flags & I40E_FLAG_IWARP_ENABLED) { + if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags)) { pf->iwarp_base_vector = i40e_get_lump(pf, pf->irq_pile, pf->num_iwarp_msix, I40E_IWARP_IRQ_PILE_ID); @@ -16149,7 +16091,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) dev_info(&pdev->dev, "failed to get tracking for %d vectors for IWARP err=%d\n", pf->num_iwarp_msix, pf->iwarp_base_vector); - pf->flags &= ~I40E_FLAG_IWARP_ENABLED; + clear_bit(I40E_FLAG_IWARP_ENA, pf->flags); } } @@ -16163,7 +16105,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) round_jiffies(jiffies + pf->service_timer_period)); /* add this PF to client device list and launch a client service task */ - if (pf->flags & I40E_FLAG_IWARP_ENABLED) { + if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags)) { err = i40e_lan_add_device(pf); if (err) dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n", @@ -16176,7 +16118,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) * and will report PCI Gen 1 x 1 by default so don't bother * checking them. */ - if (!(pf->hw_features & I40E_HW_NO_PCI_LINK_CHECK)) { + if (!test_bit(I40E_HW_CAP_NO_PCI_LINK_CHECK, pf->hw.caps)) { char speed[PCI_SPEED_SIZE] = "Unknown"; char width[PCI_WIDTH_SIZE] = "Unknown"; @@ -16230,7 +16172,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pf->hw.phy.link_info.requested_speeds = abilities.link_speed; /* set the FEC config due to the board capabilities */ - i40e_set_fec_in_flags(abilities.fec_cfg_curr_mod_ext_info, &pf->flags); + i40e_set_fec_in_flags(abilities.fec_cfg_curr_mod_ext_info, pf->flags); /* get the supported phy types from the fw */ err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL); @@ -16241,11 +16183,11 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* make sure the MFS hasn't been set lower than the default */ #define MAX_FRAME_SIZE_DEFAULT 0x2600 - val = (rd32(&pf->hw, I40E_PRTGL_SAH) & - I40E_PRTGL_SAH_MFS_MASK) >> I40E_PRTGL_SAH_MFS_SHIFT; + val = FIELD_GET(I40E_PRTGL_SAH_MFS_MASK, + rd32(&pf->hw, I40E_PRTGL_SAH)); if (val < MAX_FRAME_SIZE_DEFAULT) - dev_warn(&pdev->dev, "MFS for port %x has been set below the default: %x\n", - pf->hw.port, val); + dev_warn(&pdev->dev, "MFS for port %x (%d) has been set below the default (%d)\n", + pf->hw.port, val, MAX_FRAME_SIZE_DEFAULT); /* Add a filter to drop all Flow control frames from any VSI from being * transmitted. By doing so we stop a malicious VF from sending out @@ -16257,10 +16199,10 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pf->main_vsi_seid); if ((pf->hw.device_id == I40E_DEV_ID_10G_BASE_T) || - (pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4)) - pf->hw_features |= I40E_HW_PHY_CONTROLS_LEDS; + (pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4)) + set_bit(I40E_HW_CAP_PHY_CONTROLS_LEDS, pf->hw.caps); if (pf->hw.device_id == I40E_DEV_ID_SFP_I_X722) - pf->hw_features |= I40E_HW_HAVE_CRT_RETIMER; + set_bit(I40E_HW_CAP_CRT_RETIMER, pf->hw.caps); /* print a string summarizing features */ i40e_print_features(pf); @@ -16329,10 +16271,10 @@ static void i40e_remove(struct pci_dev *pdev) usleep_range(1000, 2000); set_bit(__I40E_IN_REMOVE, pf->state); - if (pf->flags & I40E_FLAG_SRIOV_ENABLED) { + if (test_bit(I40E_FLAG_SRIOV_ENA, pf->flags)) { set_bit(__I40E_VF_RESETS_DISABLED, pf->state); i40e_free_vfs(pf); - pf->flags &= ~I40E_FLAG_SRIOV_ENABLED; + clear_bit(I40E_FLAG_SRIOV_ENA, pf->flags); } /* no more scheduling of any task */ set_bit(__I40E_SUSPENDED, pf->state); @@ -16387,7 +16329,7 @@ static void i40e_remove(struct pci_dev *pdev) i40e_cloud_filter_exit(pf); /* remove attached clients */ - if (pf->flags & I40E_FLAG_IWARP_ENABLED) { + if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags)) { ret_code = i40e_lan_del_device(pf); if (ret_code) dev_warn(&pdev->dev, "Failed to delete client device: %d\n", @@ -16406,7 +16348,7 @@ static void i40e_remove(struct pci_dev *pdev) unmap: /* Free MSI/legacy interrupt 0 when in recovery mode. */ if (test_bit(__I40E_RECOVERY_MODE, pf->state) && - !(pf->flags & I40E_FLAG_MSIX_ENABLED)) + !test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) free_irq(pf->pdev->irq, pf); /* shutdown the adminq */ @@ -16625,7 +16567,8 @@ static void i40e_shutdown(struct pci_dev *pdev) */ i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false); - if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE)) + if (test_bit(I40E_HW_CAP_WOL_MC_MAGIC_PKT_WAKE, pf->hw.caps) && + pf->wol_en) i40e_enable_mc_magic_wake(pf); i40e_prep_for_reset(pf); @@ -16637,7 +16580,7 @@ static void i40e_shutdown(struct pci_dev *pdev) /* Free MSI/legacy interrupt 0 when in recovery mode. */ if (test_bit(__I40E_RECOVERY_MODE, pf->state) && - !(pf->flags & I40E_FLAG_MSIX_ENABLED)) + !test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) free_irq(pf->pdev->irq, pf); /* Since we're going to destroy queues during the @@ -16678,7 +16621,8 @@ static int __maybe_unused i40e_suspend(struct device *dev) */ i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false); - if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE)) + if (test_bit(I40E_HW_CAP_WOL_MC_MAGIC_PKT_WAKE, pf->hw.caps) && + pf->wol_en) i40e_enable_mc_magic_wake(pf); /* Since we're going to destroy queues during the @@ -16789,7 +16733,7 @@ static int __init i40e_init_module(void) * since we need to be able to guarantee forward progress even under * memory pressure. */ - i40e_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, i40e_driver_name); + i40e_wq = alloc_workqueue("%s", 0, 0, i40e_driver_name); if (!i40e_wq) { pr_err("%s: Failed to create workqueue\n", i40e_driver_name); return -ENOMEM; diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c index e5aec09d58..605fd82f5d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c +++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c @@ -27,8 +27,7 @@ int i40e_init_nvm(struct i40e_hw *hw) * as the blank mode may be used in the factory line. */ gens = rd32(hw, I40E_GLNVM_GENS); - sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >> - I40E_GLNVM_GENS_SR_SIZE_SHIFT); + sr_size = FIELD_GET(I40E_GLNVM_GENS_SR_SIZE_MASK, gens); /* Switching to words (sr_size contains power of 2KB) */ nvm->sr_size = BIT(sr_size) * I40E_SR_WORDS_IN_1KB; @@ -194,9 +193,8 @@ static int i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset, ret_code = i40e_poll_sr_srctl_done_bit(hw); if (!ret_code) { sr_reg = rd32(hw, I40E_GLNVM_SRDATA); - *data = (u16)((sr_reg & - I40E_GLNVM_SRDATA_RDDATA_MASK) - >> I40E_GLNVM_SRDATA_RDDATA_SHIFT); + *data = FIELD_GET(I40E_GLNVM_SRDATA_RDDATA_MASK, + sr_reg); } } if (ret_code) @@ -292,7 +290,7 @@ static int i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset, static int __i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, u16 *data) { - if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) + if (test_bit(I40E_HW_CAP_AQ_SRCTL_ACCESS_ENABLE, hw->caps)) return i40e_read_nvm_word_aq(hw, offset, data); return i40e_read_nvm_word_srctl(hw, offset, data); @@ -311,14 +309,14 @@ int i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, { int ret_code = 0; - if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK) + if (test_bit(I40E_HW_CAP_NVM_READ_REQUIRES_LOCK, hw->caps)) ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); if (ret_code) return ret_code; ret_code = __i40e_read_nvm_word(hw, offset, data); - if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK) + if (test_bit(I40E_HW_CAP_NVM_READ_REQUIRES_LOCK, hw->caps)) i40e_release_nvm(hw); return ret_code; @@ -500,7 +498,7 @@ static int __i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, u16 *words, u16 *data) { - if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) + if (test_bit(I40E_HW_CAP_AQ_SRCTL_ACCESS_ENABLE, hw->caps)) return i40e_read_nvm_buffer_aq(hw, offset, words, data); return i40e_read_nvm_buffer_srctl(hw, offset, words, data); @@ -522,7 +520,7 @@ int i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, { int ret_code = 0; - if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) { + if (test_bit(I40E_HW_CAP_AQ_SRCTL_ACCESS_ENABLE, hw->caps)) { ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); if (!ret_code) { ret_code = i40e_read_nvm_buffer_aq(hw, offset, words, @@ -772,13 +770,12 @@ static inline u8 i40e_nvmupd_get_module(u32 val) } static inline u8 i40e_nvmupd_get_transaction(u32 val) { - return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT); + return FIELD_GET(I40E_NVM_TRANS_MASK, val); } static inline u8 i40e_nvmupd_get_preservation_flags(u32 val) { - return (u8)((val & I40E_NVM_PRESERVATION_FLAGS_MASK) >> - I40E_NVM_PRESERVATION_FLAGS_SHIFT); + return FIELD_GET(I40E_NVM_PRESERVATION_FLAGS_MASK, val); } static const char * const i40e_nvm_update_state_str[] = { diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h index 0011620420..ce1f11b8ad 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h @@ -501,4 +501,73 @@ i40e_add_pinfo_to_list(struct i40e_hw *hw, /* i40e_ddp */ int i40e_ddp_flash(struct net_device *netdev, struct ethtool_flash *flash); +/* Firmware and AdminQ version check helpers */ + +/** + * i40e_is_aq_api_ver_ge + * @hw: pointer to i40e_hw structure + * @maj: API major value to compare + * @min: API minor value to compare + * + * Assert whether current HW API version is greater/equal than provided. + **/ +static inline bool i40e_is_aq_api_ver_ge(struct i40e_hw *hw, u16 maj, u16 min) +{ + return (hw->aq.api_maj_ver > maj || + (hw->aq.api_maj_ver == maj && hw->aq.api_min_ver >= min)); +} + +/** + * i40e_is_aq_api_ver_lt + * @hw: pointer to i40e_hw structure + * @maj: API major value to compare + * @min: API minor value to compare + * + * Assert whether current HW API version is less than provided. + **/ +static inline bool i40e_is_aq_api_ver_lt(struct i40e_hw *hw, u16 maj, u16 min) +{ + return !i40e_is_aq_api_ver_ge(hw, maj, min); +} + +/** + * i40e_is_fw_ver_ge + * @hw: pointer to i40e_hw structure + * @maj: API major value to compare + * @min: API minor value to compare + * + * Assert whether current firmware version is greater/equal than provided. + **/ +static inline bool i40e_is_fw_ver_ge(struct i40e_hw *hw, u16 maj, u16 min) +{ + return (hw->aq.fw_maj_ver > maj || + (hw->aq.fw_maj_ver == maj && hw->aq.fw_min_ver >= min)); +} + +/** + * i40e_is_fw_ver_lt + * @hw: pointer to i40e_hw structure + * @maj: API major value to compare + * @min: API minor value to compare + * + * Assert whether current firmware version is less than provided. + **/ +static inline bool i40e_is_fw_ver_lt(struct i40e_hw *hw, u16 maj, u16 min) +{ + return !i40e_is_fw_ver_ge(hw, maj, min); +} + +/** + * i40e_is_fw_ver_eq + * @hw: pointer to i40e_hw structure + * @maj: API major value to compare + * @min: API minor value to compare + * + * Assert whether current firmware version is equal to provided. + **/ +static inline bool i40e_is_fw_ver_eq(struct i40e_hw *hw, u16 maj, u16 min) +{ + return (hw->aq.fw_maj_ver == maj && hw->aq.fw_min_ver == min); +} + #endif /* _I40E_PROTOTYPE_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c index 20b77398f0..e7ebcb09f2 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c @@ -35,7 +35,7 @@ enum i40e_ptp_pin { GPIO_4 }; -enum i40e_can_set_pins_t { +enum i40e_can_set_pins { CANT_DO_PINS = -1, CAN_SET_PINS, CAN_DO_PINS @@ -193,7 +193,7 @@ static bool i40e_is_ptp_pin_dev(struct i40e_hw *hw) * return CAN_DO_PINS if pins can be manipulated within a NIC or * return CANT_DO_PINS otherwise. **/ -static enum i40e_can_set_pins_t i40e_can_set_pins(struct i40e_pf *pf) +static enum i40e_can_set_pins i40e_can_set_pins(struct i40e_pf *pf) { if (!i40e_is_ptp_pin_dev(&pf->hw)) { dev_warn(&pf->pdev->dev, @@ -680,7 +680,7 @@ void i40e_ptp_rx_hang(struct i40e_pf *pf) * configured. We don't want to spuriously warn about Rx timestamp * hangs if we don't care about the timestamps. */ - if (!(pf->flags & I40E_FLAG_PTP) || !pf->ptp_rx) + if (!test_bit(I40E_FLAG_PTP_ENA, pf->flags) || !pf->ptp_rx) return; spin_lock_bh(&pf->ptp_rx_lock); @@ -733,7 +733,7 @@ void i40e_ptp_tx_hang(struct i40e_pf *pf) { struct sk_buff *skb; - if (!(pf->flags & I40E_FLAG_PTP) || !pf->ptp_tx) + if (!test_bit(I40E_FLAG_PTP_ENA, pf->flags) || !pf->ptp_tx) return; /* Nothing to do if we're not already waiting for a timestamp */ @@ -771,7 +771,7 @@ void i40e_ptp_tx_hwtstamp(struct i40e_pf *pf) u32 hi, lo; u64 ns; - if (!(pf->flags & I40E_FLAG_PTP) || !pf->ptp_tx) + if (!test_bit(I40E_FLAG_PTP_ENA, pf->flags) || !pf->ptp_tx) return; /* don't attempt to timestamp if we don't have an skb */ @@ -818,7 +818,7 @@ void i40e_ptp_rx_hwtstamp(struct i40e_pf *pf, struct sk_buff *skb, u8 index) /* Since we cannot turn off the Rx timestamp logic if the device is * doing Tx timestamping, check if Rx timestamping is configured. */ - if (!(pf->flags & I40E_FLAG_PTP) || !pf->ptp_rx) + if (!test_bit(I40E_FLAG_PTP_ENA, pf->flags) || !pf->ptp_rx) return; hw = &pf->hw; @@ -924,7 +924,7 @@ int i40e_ptp_get_ts_config(struct i40e_pf *pf, struct ifreq *ifr) { struct hwtstamp_config *config = &pf->tstamp_config; - if (!(pf->flags & I40E_FLAG_PTP)) + if (!test_bit(I40E_FLAG_PTP_ENA, pf->flags)) return -EOPNOTSUPP; return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ? @@ -1071,7 +1071,7 @@ static void i40e_ptp_set_pins_hw(struct i40e_pf *pf) static int i40e_ptp_set_pins(struct i40e_pf *pf, struct i40e_ptp_pins_settings *pins) { - enum i40e_can_set_pins_t pin_caps = i40e_can_set_pins(pf); + enum i40e_can_set_pins pin_caps = i40e_can_set_pins(pf); int i = 0; if (pin_caps == CANT_DO_PINS) @@ -1211,7 +1211,7 @@ static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf, case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: - if (!(pf->hw_features & I40E_HW_PTP_L4_CAPABLE)) + if (!test_bit(I40E_HW_CAP_PTP_L4, pf->hw.caps)) return -ERANGE; pf->ptp_rx = true; tsyntype = I40E_PRTTSYN_CTL1_V1MESSTYPE0_MASK | @@ -1225,7 +1225,7 @@ static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf, case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: - if (!(pf->hw_features & I40E_HW_PTP_L4_CAPABLE)) + if (!test_bit(I40E_HW_CAP_PTP_L4, pf->hw.caps)) return -ERANGE; fallthrough; case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: @@ -1234,7 +1234,7 @@ static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf, pf->ptp_rx = true; tsyntype = I40E_PRTTSYN_CTL1_V2MESSTYPE0_MASK | I40E_PRTTSYN_CTL1_TSYNTYPE_V2; - if (pf->hw_features & I40E_HW_PTP_L4_CAPABLE) { + if (test_bit(I40E_HW_CAP_PTP_L4, pf->hw.caps)) { tsyntype |= I40E_PRTTSYN_CTL1_UDP_ENA_MASK; config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; } else { @@ -1308,7 +1308,7 @@ int i40e_ptp_set_ts_config(struct i40e_pf *pf, struct ifreq *ifr) struct hwtstamp_config config; int err; - if (!(pf->flags & I40E_FLAG_PTP)) + if (!test_bit(I40E_FLAG_PTP_ENA, pf->flags)) return -EOPNOTSUPP; if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) @@ -1426,7 +1426,7 @@ static long i40e_ptp_create_clock(struct i40e_pf *pf) void i40e_ptp_save_hw_time(struct i40e_pf *pf) { /* don't try to access the PTP clock if it's not enabled */ - if (!(pf->flags & I40E_FLAG_PTP)) + if (!test_bit(I40E_FLAG_PTP_ENA, pf->flags)) return; i40e_ptp_gettimex(&pf->ptp_caps, &pf->ptp_prev_hw_time, NULL); @@ -1480,10 +1480,10 @@ void i40e_ptp_init(struct i40e_pf *pf) /* Only one PF is assigned to control 1588 logic per port. Do not * enable any support for PFs not assigned via PRTTSYN_CTL0.PF_ID */ - pf_id = (rd32(hw, I40E_PRTTSYN_CTL0) & I40E_PRTTSYN_CTL0_PF_ID_MASK) >> - I40E_PRTTSYN_CTL0_PF_ID_SHIFT; + pf_id = FIELD_GET(I40E_PRTTSYN_CTL0_PF_ID_MASK, + rd32(hw, I40E_PRTTSYN_CTL0)); if (hw->pf_id != pf_id) { - pf->flags &= ~I40E_FLAG_PTP; + clear_bit(I40E_FLAG_PTP_ENA, pf->flags); dev_info(&pf->pdev->dev, "%s: PTP not supported on %s\n", __func__, netdev->name); @@ -1504,7 +1504,7 @@ void i40e_ptp_init(struct i40e_pf *pf) if (pf->hw.debug_mask & I40E_DEBUG_LAN) dev_info(&pf->pdev->dev, "PHC enabled\n"); - pf->flags |= I40E_FLAG_PTP; + set_bit(I40E_FLAG_PTP_ENA, pf->flags); /* Ensure the clocks are running. */ regval = rd32(hw, I40E_PRTTSYN_CTL0); @@ -1539,7 +1539,7 @@ void i40e_ptp_stop(struct i40e_pf *pf) struct i40e_hw *hw = &pf->hw; u32 regval; - pf->flags &= ~I40E_FLAG_PTP; + clear_bit(I40E_FLAG_PTP_ENA, pf->flags); pf->ptp_tx = false; pf->ptp_rx = false; diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h index f6671ac797..432afbb642 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_register.h +++ b/drivers/net/ethernet/intel/i40e/i40e_register.h @@ -333,8 +333,11 @@ #define I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT 3 #define I40E_PFINT_DYN_CTLN_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) #define I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT 5 +#define I40E_PFINT_DYN_CTLN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT) #define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24 #define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT) +#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25 +#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT) #define I40E_PFINT_ICR0 0x00038780 /* Reset: CORER */ #define I40E_PFINT_ICR0_INTEVENT_SHIFT 0 #define I40E_PFINT_ICR0_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_INTEVENT_SHIFT) @@ -863,16 +866,6 @@ #define I40E_PFPM_WUFC 0x0006B400 /* Reset: POR */ #define I40E_PFPM_WUFC_MAG_SHIFT 1 #define I40E_PFPM_WUFC_MAG_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_MAG_SHIFT) -#define I40E_VF_ARQBAH1 0x00006000 /* Reset: EMPR */ -#define I40E_VF_ARQBAL1 0x00006C00 /* Reset: EMPR */ -#define I40E_VF_ARQH1 0x00007400 /* Reset: EMPR */ -#define I40E_VF_ARQLEN1 0x00008000 /* Reset: EMPR */ -#define I40E_VF_ARQT1 0x00007000 /* Reset: EMPR */ -#define I40E_VF_ATQBAH1 0x00007800 /* Reset: EMPR */ -#define I40E_VF_ATQBAL1 0x00007C00 /* Reset: EMPR */ -#define I40E_VF_ATQH1 0x00006400 /* Reset: EMPR */ -#define I40E_VF_ATQLEN1 0x00006800 /* Reset: EMPR */ -#define I40E_VF_ATQT1 0x00008400 /* Reset: EMPR */ #define I40E_VFQF_HLUT_MAX_INDEX 15 @@ -899,6 +892,7 @@ #define I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT 7 #define I40E_GLQF_ORT_FLX_PAYLOAD_MASK I40E_MASK(0x1, I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT) #define I40E_GLQF_FDEVICTENA(_i) (0x00270384 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */ +#define I40E_FDEVICT_PCTYPE_DEFAULT 0xc03 /* Redefined for X722 family */ #define I40E_GLGEN_STAT_CLEAR 0x00390004 /* Reset: CORER */ #endif /* _I40E_REGISTER_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 071ef309a3..1a12b73281 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -33,19 +33,16 @@ static void i40e_fdir(struct i40e_ring *tx_ring, i++; tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; - flex_ptype = I40E_TXD_FLTR_QW0_QINDEX_MASK & - (fdata->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT); + flex_ptype = FIELD_PREP(I40E_TXD_FLTR_QW0_QINDEX_MASK, fdata->q_index); - flex_ptype |= I40E_TXD_FLTR_QW0_FLEXOFF_MASK & - (fdata->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT); + flex_ptype |= FIELD_PREP(I40E_TXD_FLTR_QW0_FLEXOFF_MASK, + fdata->flex_off); - flex_ptype |= I40E_TXD_FLTR_QW0_PCTYPE_MASK & - (fdata->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT); + flex_ptype |= FIELD_PREP(I40E_TXD_FLTR_QW0_PCTYPE_MASK, fdata->pctype); /* Use LAN VSI Id if not programmed by user */ - flex_ptype |= I40E_TXD_FLTR_QW0_DEST_VSI_MASK & - ((u32)(fdata->dest_vsi ? : pf->vsi[pf->lan_vsi]->id) << - I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT); + flex_ptype |= FIELD_PREP(I40E_TXD_FLTR_QW0_DEST_VSI_MASK, + fdata->dest_vsi ? : pf->vsi[pf->lan_vsi]->id); dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG; @@ -55,17 +52,15 @@ static void i40e_fdir(struct i40e_ring *tx_ring, I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE << I40E_TXD_FLTR_QW1_PCMD_SHIFT; - dtype_cmd |= I40E_TXD_FLTR_QW1_DEST_MASK & - (fdata->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT); + dtype_cmd |= FIELD_PREP(I40E_TXD_FLTR_QW1_DEST_MASK, fdata->dest_ctl); - dtype_cmd |= I40E_TXD_FLTR_QW1_FD_STATUS_MASK & - (fdata->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT); + dtype_cmd |= FIELD_PREP(I40E_TXD_FLTR_QW1_FD_STATUS_MASK, + fdata->fd_status); if (fdata->cnt_index) { dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK; - dtype_cmd |= I40E_TXD_FLTR_QW1_CNTINDEX_MASK & - ((u32)fdata->cnt_index << - I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT); + dtype_cmd |= FIELD_PREP(I40E_TXD_FLTR_QW1_CNTINDEX_MASK, + fdata->cnt_index); } fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype); @@ -464,7 +459,7 @@ static int i40e_add_del_fdir_tcp(struct i40e_vsi *vsi, &pf->fd_tcp6_filter_cnt); if (add) { - if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && + if (test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags) && I40E_DEBUG_FD & pf->hw.debug_mask) dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n"); set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state); @@ -691,8 +686,7 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring, u64 qword0_raw, u32 error; qw0 = (struct i40e_16b_rx_wb_qw0 *)&qword0_raw; - error = (qword1 & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >> - I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT; + error = FIELD_GET(I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK, qword1); if (error == BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) { pf->fd_inv = le32_to_cpu(qw0->hi_dword.fd_id); @@ -734,7 +728,7 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring, u64 qword0_raw, * FD ATR/SB and then re-enable it when there is room. */ if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) { - if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && + if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags) && !test_and_set_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state)) if (I40E_DEBUG_FD & pf->hw.debug_mask) @@ -1071,7 +1065,7 @@ static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi, if (q_vector->arm_wb_state) return; - if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) { + if (test_bit(I40E_FLAG_MSIX_ENA, vsi->back->flags)) { val = I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK | I40E_PFINT_DYN_CTLN_ITR_INDX_MASK; /* set noitr */ @@ -1095,7 +1089,7 @@ static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi, **/ void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) { - if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) { + if (test_bit(I40E_FLAG_MSIX_ENA, vsi->back->flags)) { u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK | I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */ I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK | @@ -1403,8 +1397,7 @@ void i40e_clean_programming_status(struct i40e_ring *rx_ring, u64 qword0_raw, { u8 id; - id = (qword1 & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >> - I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT; + id = FIELD_GET(I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK, qword1); if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS) i40e_fd_handle_status(rx_ring, qword0_raw, qword1, id); @@ -1755,11 +1748,9 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, u64 qword; qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); - ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT; - rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >> - I40E_RXD_QW1_ERROR_SHIFT; - rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> - I40E_RXD_QW1_STATUS_SHIFT; + ptype = FIELD_GET(I40E_RXD_QW1_PTYPE_MASK, qword); + rx_error = FIELD_GET(I40E_RXD_QW1_ERROR_MASK, qword); + rx_status = FIELD_GET(I40E_RXD_QW1_STATUS_MASK, qword); decoded = decode_rx_desc_ptype(ptype); skb->ip_summed = CHECKSUM_NONE; @@ -1892,13 +1883,10 @@ void i40e_process_skb_fields(struct i40e_ring *rx_ring, union i40e_rx_desc *rx_desc, struct sk_buff *skb) { u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); - u32 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> - I40E_RXD_QW1_STATUS_SHIFT; + u32 rx_status = FIELD_GET(I40E_RXD_QW1_STATUS_MASK, qword); u32 tsynvalid = rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK; - u32 tsyn = (rx_status & I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >> - I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT; - u8 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> - I40E_RXD_QW1_PTYPE_SHIFT; + u32 tsyn = FIELD_GET(I40E_RXD_QW1_STATUS_TSYNINDX_MASK, rx_status); + u8 rx_ptype = FIELD_GET(I40E_RXD_QW1_PTYPE_MASK, qword); if (unlikely(tsynvalid)) i40e_ptp_rx_hwtstamp(rx_ring->vsi->back, skb, tsyn); @@ -2551,8 +2539,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget, continue; } - size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> - I40E_RXD_QW1_LENGTH_PBUF_SHIFT; + size = FIELD_GET(I40E_RXD_QW1_LENGTH_PBUF_MASK, qword); if (!size) break; @@ -2643,7 +2630,22 @@ process_next: return failure ? budget : (int)total_rx_packets; } -static inline u32 i40e_buildreg_itr(const int type, u16 itr) +/** + * i40e_buildreg_itr - build a value for writing to I40E_PFINT_DYN_CTLN register + * @itr_idx: interrupt throttling index + * @interval: interrupt throttling interval value in usecs + * @force_swint: force software interrupt + * + * The function builds a value for I40E_PFINT_DYN_CTLN register that + * is used to update interrupt throttling interval for specified ITR index + * and optionally enforces a software interrupt. If the @itr_idx is equal + * to I40E_ITR_NONE then no interval change is applied and only @force_swint + * parameter is taken into account. If the interval change and enforced + * software interrupt are not requested then the built value just enables + * appropriate vector interrupt. + **/ +static u32 i40e_buildreg_itr(enum i40e_dyn_idx itr_idx, u16 interval, + bool force_swint) { u32 val; @@ -2657,23 +2659,33 @@ static inline u32 i40e_buildreg_itr(const int type, u16 itr) * an event in the PBA anyway so we need to rely on the automask * to hold pending events for us until the interrupt is re-enabled * - * The itr value is reported in microseconds, and the register - * value is recorded in 2 microsecond units. For this reason we - * only need to shift by the interval shift - 1 instead of the - * full value. + * We have to shift the given value as it is reported in microseconds + * and the register value is recorded in 2 microsecond units. */ - itr &= I40E_ITR_MASK; + interval >>= 1; + /* 1. Enable vector interrupt + * 2. Update the interval for the specified ITR index + * (I40E_ITR_NONE in the register is used to indicate that + * no interval update is requested) + */ val = I40E_PFINT_DYN_CTLN_INTENA_MASK | - (type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) | - (itr << (I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT - 1)); + FIELD_PREP(I40E_PFINT_DYN_CTLN_ITR_INDX_MASK, itr_idx) | + FIELD_PREP(I40E_PFINT_DYN_CTLN_INTERVAL_MASK, interval); + + /* 3. Enforce software interrupt trigger if requested + * (These software interrupts rate is limited by ITR2 that is + * set to 20K interrupts per second) + */ + if (force_swint) + val |= I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK | + I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK | + FIELD_PREP(I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK, + I40E_SW_ITR); return val; } -/* a small macro to shorten up some long lines */ -#define INTREG I40E_PFINT_DYN_CTLN - /* The act of updating the ITR will cause it to immediately trigger. In order * to prevent this from throwing off adaptive update statistics we defer the * update so that it can only happen so often. So after either Tx or Rx are @@ -2692,11 +2704,13 @@ static inline u32 i40e_buildreg_itr(const int type, u16 itr) static inline void i40e_update_enable_itr(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) { + enum i40e_dyn_idx itr_idx = I40E_ITR_NONE; struct i40e_hw *hw = &vsi->back->hw; - u32 intval; + u16 interval = 0; + u32 itr_val; /* If we don't have MSIX, then we only need to re-enable icr0 */ - if (!(vsi->back->flags & I40E_FLAG_MSIX_ENABLED)) { + if (!test_bit(I40E_FLAG_MSIX_ENA, vsi->back->flags)) { i40e_irq_dynamic_enable_icr0(vsi->back); return; } @@ -2715,8 +2729,8 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi, */ if (q_vector->rx.target_itr < q_vector->rx.current_itr) { /* Rx ITR needs to be reduced, this is highest priority */ - intval = i40e_buildreg_itr(I40E_RX_ITR, - q_vector->rx.target_itr); + itr_idx = I40E_RX_ITR; + interval = q_vector->rx.target_itr; q_vector->rx.current_itr = q_vector->rx.target_itr; q_vector->itr_countdown = ITR_COUNTDOWN_START; } else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) || @@ -2725,25 +2739,36 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi, /* Tx ITR needs to be reduced, this is second priority * Tx ITR needs to be increased more than Rx, fourth priority */ - intval = i40e_buildreg_itr(I40E_TX_ITR, - q_vector->tx.target_itr); + itr_idx = I40E_TX_ITR; + interval = q_vector->tx.target_itr; q_vector->tx.current_itr = q_vector->tx.target_itr; q_vector->itr_countdown = ITR_COUNTDOWN_START; } else if (q_vector->rx.current_itr != q_vector->rx.target_itr) { /* Rx ITR needs to be increased, third priority */ - intval = i40e_buildreg_itr(I40E_RX_ITR, - q_vector->rx.target_itr); + itr_idx = I40E_RX_ITR; + interval = q_vector->rx.target_itr; q_vector->rx.current_itr = q_vector->rx.target_itr; q_vector->itr_countdown = ITR_COUNTDOWN_START; } else { /* No ITR update, lowest priority */ - intval = i40e_buildreg_itr(I40E_ITR_NONE, 0); if (q_vector->itr_countdown) q_vector->itr_countdown--; } - if (!test_bit(__I40E_VSI_DOWN, vsi->state)) - wr32(hw, INTREG(q_vector->reg_idx), intval); + /* Do not update interrupt control register if VSI is down */ + if (test_bit(__I40E_VSI_DOWN, vsi->state)) + return; + + /* Update ITR interval if necessary and enforce software interrupt + * if we are exiting busy poll. + */ + if (q_vector->in_busy_poll) { + itr_val = i40e_buildreg_itr(itr_idx, interval, true); + q_vector->in_busy_poll = false; + } else { + itr_val = i40e_buildreg_itr(itr_idx, interval, false); + } + wr32(hw, I40E_PFINT_DYN_CTLN(q_vector->reg_idx), itr_val); } /** @@ -2858,6 +2883,8 @@ tx_only: */ if (likely(napi_complete_done(napi, work_done))) i40e_update_enable_itr(vsi, q_vector); + else + q_vector->in_busy_poll = true; return min(work_done, budget - 1); } @@ -2885,7 +2912,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, u16 i; /* make sure ATR is enabled */ - if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED)) + if (!test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags)) return; if (test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) @@ -2930,7 +2957,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, /* Due to lack of space, no more new filters can be programmed */ if (th->syn && test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) return; - if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED) { + if (test_bit(I40E_FLAG_HW_ATR_EVICT_ENA, pf->flags)) { /* HW ATR eviction will take care of removing filters on FIN * and RST packets. */ @@ -2956,8 +2983,8 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, i++; tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; - flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) & - I40E_TXD_FLTR_QW0_QINDEX_MASK; + flex_ptype = FIELD_PREP(I40E_TXD_FLTR_QW0_QINDEX_MASK, + tx_ring->queue_index); flex_ptype |= (tx_flags & I40E_TX_FLAGS_IPV4) ? (I40E_FILTER_PCTYPE_NONF_IPV4_TCP << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) : @@ -2983,16 +3010,14 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK; if (!(tx_flags & I40E_TX_FLAGS_UDP_TUNNEL)) dtype_cmd |= - ((u32)I40E_FD_ATR_STAT_IDX(pf->hw.pf_id) << - I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) & - I40E_TXD_FLTR_QW1_CNTINDEX_MASK; + FIELD_PREP(I40E_TXD_FLTR_QW1_CNTINDEX_MASK, + I40E_FD_ATR_STAT_IDX(pf->hw.pf_id)); else dtype_cmd |= - ((u32)I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id) << - I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) & - I40E_TXD_FLTR_QW1_CNTINDEX_MASK; + FIELD_PREP(I40E_TXD_FLTR_QW1_CNTINDEX_MASK, + I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id)); - if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED) + if (test_bit(I40E_FLAG_HW_ATR_EVICT_ENA, pf->flags)) dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK; fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype); @@ -3050,7 +3075,7 @@ static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, tx_flags |= I40E_TX_FLAGS_SW_VLAN; } - if (!(tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED)) + if (!test_bit(I40E_FLAG_DCB_ENA, tx_ring->vsi->back->flags)) goto out; /* Insert 802.1p priority into VLAN header */ @@ -3226,7 +3251,7 @@ static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb, * we are not already transmitting a packet to be timestamped */ pf = i40e_netdev_to_pf(tx_ring->netdev); - if (!(pf->flags & I40E_FLAG_PTP)) + if (!test_bit(I40E_FLAG_PTP_ENA, pf->flags)) return 0; if (pf->ptp_tx && @@ -3598,8 +3623,7 @@ static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, if (tx_flags & I40E_TX_FLAGS_HW_VLAN) { td_cmd |= I40E_TX_DESC_CMD_IL2TAG1; - td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >> - I40E_TX_FLAGS_VLAN_SHIFT; + td_tag = FIELD_GET(I40E_TX_FLAGS_VLAN_MASK, tx_flags); } first->tx_flags = tx_flags; diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index 421fe56755..2cdc7de630 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -58,7 +58,7 @@ static inline u16 i40e_intrl_usec_to_reg(int intrl) * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any * register but instead is a special value meaning "don't update" ITR0/1/2. */ -enum i40e_dyn_idx_t { +enum i40e_dyn_idx { I40E_IDX_ITR0 = 0, I40E_IDX_ITR1 = 1, I40E_IDX_ITR2 = 2, @@ -68,6 +68,7 @@ enum i40e_dyn_idx_t { /* these are indexes into ITRN registers */ #define I40E_RX_ITR I40E_IDX_ITR0 #define I40E_TX_ITR I40E_IDX_ITR1 +#define I40E_SW_ITR I40E_IDX_ITR2 /* Supported RSS offloads */ #define I40E_DEFAULT_RSS_HENA ( \ @@ -92,8 +93,8 @@ enum i40e_dyn_idx_t { BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP)) #define i40e_pf_get_default_rss_hena(pf) \ - (((pf)->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \ - I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA) + (test_bit(I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE, (pf)->hw.caps) ? \ + I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA) /* Supported Rx Buffer Sizes (a multiple of 128) */ #define I40E_RXBUFFER_256 256 @@ -306,7 +307,7 @@ struct i40e_rx_queue_stats { u64 page_busy_count; }; -enum i40e_ring_state_t { +enum i40e_ring_state { __I40E_TX_FDIR_INIT_DONE, __I40E_TX_XPS_INIT_DONE, __I40E_RING_STATE_NBITS /* must be last */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h index f95bc2a4a8..d903149969 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h @@ -64,9 +64,7 @@ typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *); enum i40e_mac_type { I40E_MAC_UNKNOWN = 0, I40E_MAC_XL710, - I40E_MAC_VF, I40E_MAC_X722, - I40E_MAC_X722_VF, I40E_MAC_GENERIC, }; @@ -272,9 +270,7 @@ struct i40e_mac_info { enum i40e_mac_type type; u8 addr[ETH_ALEN]; u8 perm_addr[ETH_ALEN]; - u8 san_addr[ETH_ALEN]; u8 port_addr[ETH_ALEN]; - u16 max_fcoeq; }; enum i40e_aq_resources_ids { @@ -482,6 +478,36 @@ struct i40e_dcbx_config { struct i40e_dcb_app_priority_table app[I40E_DCBX_MAX_APPS]; }; +enum i40e_hw_flags { + I40E_HW_CAP_AQ_SRCTL_ACCESS_ENABLE, + I40E_HW_CAP_802_1AD, + I40E_HW_CAP_AQ_PHY_ACCESS, + I40E_HW_CAP_NVM_READ_REQUIRES_LOCK, + I40E_HW_CAP_FW_LLDP_STOPPABLE, + I40E_HW_CAP_FW_LLDP_PERSISTENT, + I40E_HW_CAP_AQ_PHY_ACCESS_EXTENDED, + I40E_HW_CAP_X722_FEC_REQUEST, + I40E_HW_CAP_RSS_AQ, + I40E_HW_CAP_128_QP_RSS, + I40E_HW_CAP_ATR_EVICT, + I40E_HW_CAP_WB_ON_ITR, + I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE, + I40E_HW_CAP_NO_PCI_LINK_CHECK, + I40E_HW_CAP_100M_SGMII, + I40E_HW_CAP_NO_DCB_SUPPORT, + I40E_HW_CAP_USE_SET_LLDP_MIB, + I40E_HW_CAP_GENEVE_OFFLOAD, + I40E_HW_CAP_PTP_L4, + I40E_HW_CAP_WOL_MC_MAGIC_PKT_WAKE, + I40E_HW_CAP_CRT_RETIMER, + I40E_HW_CAP_OUTER_UDP_CSUM, + I40E_HW_CAP_PHY_CONTROLS_LEDS, + I40E_HW_CAP_STOP_FW_LLDP, + I40E_HW_CAP_PORT_ID_VALID, + I40E_HW_CAP_RESTART_AUTONEG, + I40E_HW_CAPS_NBITS, +}; + /* Port hardware description */ struct i40e_hw { u8 __iomem *hw_addr; @@ -546,16 +572,7 @@ struct i40e_hw { struct i40e_dcbx_config remote_dcbx_config; /* Peer Cfg */ struct i40e_dcbx_config desired_dcbx_config; /* CEE Desired Cfg */ -#define I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE BIT_ULL(0) -#define I40E_HW_FLAG_802_1AD_CAPABLE BIT_ULL(1) -#define I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE BIT_ULL(2) -#define I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK BIT_ULL(3) -#define I40E_HW_FLAG_FW_LLDP_STOPPABLE BIT_ULL(4) -#define I40E_HW_FLAG_FW_LLDP_PERSISTENT BIT_ULL(5) -#define I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED BIT_ULL(6) -#define I40E_HW_FLAG_DROP_MODE BIT_ULL(7) -#define I40E_HW_FLAG_X722_FEC_REQUEST_CAPABLE BIT_ULL(8) - u64 flags; + DECLARE_BITMAP(caps, I40E_HW_CAPS_NBITS); /* Used in set switch config AQ command */ u16 switch_tag; @@ -567,12 +584,6 @@ struct i40e_hw { char err_str[16]; }; -static inline bool i40e_is_vf(struct i40e_hw *hw) -{ - return (hw->mac.type == I40E_MAC_VF || - hw->mac.type == I40E_MAC_X722_VF); -} - struct i40e_driver_version { u8 major_version; u8 minor_version; diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 3d8a23d335..37e77163da 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -500,10 +500,10 @@ static void i40e_release_rdma_qvlist(struct i40e_vf *vf) */ reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx; reg = rd32(hw, I40E_VPINT_CEQCTL(reg_idx)); - next_q_index = (reg & I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK) - >> I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT; - next_q_type = (reg & I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK) - >> I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT; + next_q_index = FIELD_GET(I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK, + reg); + next_q_type = FIELD_GET(I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK, + reg); reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1); reg = (next_q_index & @@ -581,10 +581,10 @@ i40e_config_rdma_qvlist(struct i40e_vf *vf, * queue on top. Also link it with the new queue in CEQCTL. */ reg = rd32(hw, I40E_VPINT_LNKLSTN(reg_idx)); - next_q_idx = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) >> - I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT); - next_q_type = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK) >> - I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT); + next_q_idx = FIELD_GET(I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK, + reg); + next_q_type = FIELD_GET(I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK, + reg); if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) { reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx; @@ -685,11 +685,9 @@ static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id, /* associate this queue with the PCI VF function */ qtx_ctl = I40E_QTX_CTL_VF_QUEUE; - qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) - & I40E_QTX_CTL_PF_INDX_MASK); - qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id) - << I40E_QTX_CTL_VFVM_INDX_SHIFT) - & I40E_QTX_CTL_VFVM_INDX_MASK); + qtx_ctl |= FIELD_PREP(I40E_QTX_CTL_PF_INDX_MASK, hw->pf_id); + qtx_ctl |= FIELD_PREP(I40E_QTX_CTL_VFVM_INDX_MASK, + vf->vf_id + hw->func_caps.vf_base_id); wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl); i40e_flush(hw); @@ -1630,8 +1628,8 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr) { struct i40e_hw *hw = &pf->hw; struct i40e_vf *vf; - int i, v; u32 reg; + int i; /* If we don't have any VFs, then there is nothing to reset */ if (!pf->num_alloc_vfs) @@ -1642,11 +1640,10 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr) return false; /* Begin reset on all VFs at once */ - for (v = 0; v < pf->num_alloc_vfs; v++) { - vf = &pf->vf[v]; + for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) { /* If VF is being reset no need to trigger reset again */ if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) - i40e_trigger_vf_reset(&pf->vf[v], flr); + i40e_trigger_vf_reset(vf, flr); } /* HW requires some time to make sure it can flush the FIFO for a VF @@ -1655,14 +1652,13 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr) * the VFs using a simple iterator that increments once that VF has * finished resetting. */ - for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) { + for (i = 0, vf = &pf->vf[0]; i < 10 && vf < &pf->vf[pf->num_alloc_vfs]; ++i) { usleep_range(10000, 20000); /* Check each VF in sequence, beginning with the VF to fail * the previous check. */ - while (v < pf->num_alloc_vfs) { - vf = &pf->vf[v]; + while (vf < &pf->vf[pf->num_alloc_vfs]) { if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) { reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id)); if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK)) @@ -1672,7 +1668,7 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr) /* If the current VF has finished resetting, move on * to the next VF in sequence. */ - v++; + ++vf; } } @@ -1682,39 +1678,39 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr) /* Display a warning if at least one VF didn't manage to reset in * time, but continue on with the operation. */ - if (v < pf->num_alloc_vfs) + if (vf < &pf->vf[pf->num_alloc_vfs]) dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n", - pf->vf[v].vf_id); + vf->vf_id); usleep_range(10000, 20000); /* Begin disabling all the rings associated with VFs, but do not wait * between each VF. */ - for (v = 0; v < pf->num_alloc_vfs; v++) { + for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) { /* On initial reset, we don't have any queues to disable */ - if (pf->vf[v].lan_vsi_idx == 0) + if (vf->lan_vsi_idx == 0) continue; /* If VF is reset in another thread just continue */ if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) continue; - i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[v].lan_vsi_idx]); + i40e_vsi_stop_rings_no_wait(pf->vsi[vf->lan_vsi_idx]); } /* Now that we've notified HW to disable all of the VF rings, wait * until they finish. */ - for (v = 0; v < pf->num_alloc_vfs; v++) { + for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) { /* On initial reset, we don't have any queues to disable */ - if (pf->vf[v].lan_vsi_idx == 0) + if (vf->lan_vsi_idx == 0) continue; /* If VF is reset in another thread just continue */ if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) continue; - i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[v].lan_vsi_idx]); + i40e_vsi_wait_queues_disabled(pf->vsi[vf->lan_vsi_idx]); } /* Hw may need up to 50ms to finish disabling the RX queues. We @@ -1723,12 +1719,12 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr) mdelay(50); /* Finish the reset on each VF */ - for (v = 0; v < pf->num_alloc_vfs; v++) { + for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) { /* If VF is reset in another thread just continue */ if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) continue; - i40e_cleanup_reset_vf(&pf->vf[v]); + i40e_cleanup_reset_vf(vf); } i40e_flush(hw); @@ -1834,7 +1830,7 @@ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs) if (pci_num_vf(pf->pdev) != num_alloc_vfs) { ret = pci_enable_sriov(pf->pdev, num_alloc_vfs); if (ret) { - pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; + clear_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags); pf->num_alloc_vfs = 0; goto err_iov; } @@ -1945,8 +1941,8 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) } if (num_vfs) { - if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { - pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; + if (!test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags)) { + set_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags); i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG); } ret = i40e_pci_sriov_enable(pdev, num_vfs); @@ -1955,7 +1951,7 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) if (!pci_vfs_assigned(pf->pdev)) { i40e_free_vfs(pf); - pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; + clear_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags); i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG); } else { dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n"); @@ -2163,14 +2159,14 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) { vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF; } else { - if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) && + if (test_bit(I40E_HW_CAP_RSS_AQ, pf->hw.caps) && (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ)) vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ; else vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG; } - if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) { + if (test_bit(I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE, pf->hw.caps)) { if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2; @@ -2179,12 +2175,12 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP) vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP; - if ((pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE) && + if (test_bit(I40E_HW_CAP_OUTER_UDP_CSUM, pf->hw.caps) && (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)) vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM; if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) { - if (pf->flags & I40E_FLAG_MFP_ENABLED) { + if (test_bit(I40E_FLAG_MFP_ENA, pf->flags)) { dev_err(&pf->pdev->dev, "VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n", vf->vf_id); @@ -2194,7 +2190,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING; } - if (pf->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) { + if (test_bit(I40E_HW_CAP_WB_ON_ITR, pf->hw.caps)) { if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_WB_ON_ITR; @@ -3145,11 +3141,12 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg) /* Allow to delete VF primary MAC only if it was not set * administratively by PF or if VF is trusted. */ - if (ether_addr_equal(addr, vf->default_lan_addr.addr) && - i40e_can_vf_change_mac(vf)) - was_unimac_deleted = true; - else - continue; + if (ether_addr_equal(addr, vf->default_lan_addr.addr)) { + if (i40e_can_vf_change_mac(vf)) + was_unimac_deleted = true; + else + continue; + } if (i40e_del_mac_filter(vsi, al->list[i].addr)) { ret = -EINVAL; @@ -4737,9 +4734,8 @@ int i40e_ndo_get_vf_config(struct net_device *netdev, ivi->max_tx_rate = vf->tx_rate; ivi->min_tx_rate = 0; - ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK; - ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >> - I40E_VLAN_PRIORITY_SHIFT; + ivi->vlan = le16_get_bits(vsi->info.pvid, I40E_VLAN_MASK); + ivi->qos = le16_get_bits(vsi->info.pvid, I40E_PRIORITY_MASK); if (vf->link_forced == false) ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; else if (vf->link_up == true) @@ -4929,7 +4925,7 @@ int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting) goto out; } - if (pf->flags & I40E_FLAG_MFP_ENABLED) { + if (test_bit(I40E_FLAG_MFP_ENA, pf->flags)) { dev_err(&pf->pdev->dev, "Trusted VF not supported in MFP mode.\n"); ret = -EINVAL; goto out; diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c index 65f38a57b3..11500003af 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c @@ -477,8 +477,7 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget) continue; } - size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> - I40E_RXD_QW1_LENGTH_PBUF_SHIFT; + size = FIELD_GET(I40E_RXD_QW1_LENGTH_PBUF_MASK, qword); if (!size) break; diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h index 63b45c61cc..db8188c7ac 100644 --- a/drivers/net/ethernet/intel/iavf/iavf.h +++ b/drivers/net/ethernet/intel/iavf/iavf.h @@ -313,7 +313,8 @@ struct iavf_adapter { #define IAVF_FLAG_AQ_SET_HENA BIT_ULL(12) #define IAVF_FLAG_AQ_SET_RSS_KEY BIT_ULL(13) #define IAVF_FLAG_AQ_SET_RSS_LUT BIT_ULL(14) -#define IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE BIT_ULL(15) +#define IAVF_FLAG_AQ_SET_RSS_HFUNC BIT_ULL(15) +#define IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE BIT_ULL(16) #define IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING BIT_ULL(19) #define IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING BIT_ULL(20) #define IAVF_FLAG_AQ_ENABLE_CHANNELS BIT_ULL(21) @@ -415,6 +416,7 @@ struct iavf_adapter { struct iavf_vsi vsi; u32 aq_wait_count; /* RSS stuff */ + enum virtchnl_rss_algorithm hfunc; u64 hena; u16 rss_key_size; u16 rss_lut_size; @@ -540,6 +542,7 @@ void iavf_get_hena(struct iavf_adapter *adapter); void iavf_set_hena(struct iavf_adapter *adapter); void iavf_set_rss_key(struct iavf_adapter *adapter); void iavf_set_rss_lut(struct iavf_adapter *adapter); +void iavf_set_rss_hfunc(struct iavf_adapter *adapter); void iavf_enable_vlan_stripping(struct iavf_adapter *adapter); void iavf_disable_vlan_stripping(struct iavf_adapter *adapter); void iavf_virtchnl_completion(struct iavf_adapter *adapter, diff --git a/drivers/net/ethernet/intel/iavf/iavf_adminq.c b/drivers/net/ethernet/intel/iavf/iavf_adminq.c index 9ffbd24d83..82fcd18ad6 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_adminq.c +++ b/drivers/net/ethernet/intel/iavf/iavf_adminq.c @@ -8,27 +8,6 @@ #include "iavf_prototype.h" /** - * iavf_adminq_init_regs - Initialize AdminQ registers - * @hw: pointer to the hardware structure - * - * This assumes the alloc_asq and alloc_arq functions have already been called - **/ -static void iavf_adminq_init_regs(struct iavf_hw *hw) -{ - /* set head and tail registers in our local struct */ - hw->aq.asq.tail = IAVF_VF_ATQT1; - hw->aq.asq.head = IAVF_VF_ATQH1; - hw->aq.asq.len = IAVF_VF_ATQLEN1; - hw->aq.asq.bal = IAVF_VF_ATQBAL1; - hw->aq.asq.bah = IAVF_VF_ATQBAH1; - hw->aq.arq.tail = IAVF_VF_ARQT1; - hw->aq.arq.head = IAVF_VF_ARQH1; - hw->aq.arq.len = IAVF_VF_ARQLEN1; - hw->aq.arq.bal = IAVF_VF_ARQBAL1; - hw->aq.arq.bah = IAVF_VF_ARQBAH1; -} - -/** * iavf_alloc_adminq_asq_ring - Allocate Admin Queue send rings * @hw: pointer to the hardware structure **/ @@ -259,17 +238,17 @@ static enum iavf_status iavf_config_asq_regs(struct iavf_hw *hw) u32 reg = 0; /* Clear Head and Tail */ - wr32(hw, hw->aq.asq.head, 0); - wr32(hw, hw->aq.asq.tail, 0); + wr32(hw, IAVF_VF_ATQH1, 0); + wr32(hw, IAVF_VF_ATQT1, 0); /* set starting point */ - wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries | + wr32(hw, IAVF_VF_ATQLEN1, (hw->aq.num_asq_entries | IAVF_VF_ATQLEN1_ATQENABLE_MASK)); - wr32(hw, hw->aq.asq.bal, lower_32_bits(hw->aq.asq.desc_buf.pa)); - wr32(hw, hw->aq.asq.bah, upper_32_bits(hw->aq.asq.desc_buf.pa)); + wr32(hw, IAVF_VF_ATQBAL1, lower_32_bits(hw->aq.asq.desc_buf.pa)); + wr32(hw, IAVF_VF_ATQBAH1, upper_32_bits(hw->aq.asq.desc_buf.pa)); /* Check one register to verify that config was applied */ - reg = rd32(hw, hw->aq.asq.bal); + reg = rd32(hw, IAVF_VF_ATQBAL1); if (reg != lower_32_bits(hw->aq.asq.desc_buf.pa)) ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR; @@ -288,20 +267,20 @@ static enum iavf_status iavf_config_arq_regs(struct iavf_hw *hw) u32 reg = 0; /* Clear Head and Tail */ - wr32(hw, hw->aq.arq.head, 0); - wr32(hw, hw->aq.arq.tail, 0); + wr32(hw, IAVF_VF_ARQH1, 0); + wr32(hw, IAVF_VF_ARQT1, 0); /* set starting point */ - wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries | + wr32(hw, IAVF_VF_ARQLEN1, (hw->aq.num_arq_entries | IAVF_VF_ARQLEN1_ARQENABLE_MASK)); - wr32(hw, hw->aq.arq.bal, lower_32_bits(hw->aq.arq.desc_buf.pa)); - wr32(hw, hw->aq.arq.bah, upper_32_bits(hw->aq.arq.desc_buf.pa)); + wr32(hw, IAVF_VF_ARQBAL1, lower_32_bits(hw->aq.arq.desc_buf.pa)); + wr32(hw, IAVF_VF_ARQBAH1, upper_32_bits(hw->aq.arq.desc_buf.pa)); /* Update tail in the HW to post pre-allocated buffers */ - wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1); + wr32(hw, IAVF_VF_ARQT1, hw->aq.num_arq_entries - 1); /* Check one register to verify that config was applied */ - reg = rd32(hw, hw->aq.arq.bal); + reg = rd32(hw, IAVF_VF_ARQBAL1); if (reg != lower_32_bits(hw->aq.arq.desc_buf.pa)) ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR; @@ -455,11 +434,11 @@ static enum iavf_status iavf_shutdown_asq(struct iavf_hw *hw) } /* Stop firmware AdminQ processing */ - wr32(hw, hw->aq.asq.head, 0); - wr32(hw, hw->aq.asq.tail, 0); - wr32(hw, hw->aq.asq.len, 0); - wr32(hw, hw->aq.asq.bal, 0); - wr32(hw, hw->aq.asq.bah, 0); + wr32(hw, IAVF_VF_ATQH1, 0); + wr32(hw, IAVF_VF_ATQT1, 0); + wr32(hw, IAVF_VF_ATQLEN1, 0); + wr32(hw, IAVF_VF_ATQBAL1, 0); + wr32(hw, IAVF_VF_ATQBAH1, 0); hw->aq.asq.count = 0; /* to indicate uninitialized queue */ @@ -489,11 +468,11 @@ static enum iavf_status iavf_shutdown_arq(struct iavf_hw *hw) } /* Stop firmware AdminQ processing */ - wr32(hw, hw->aq.arq.head, 0); - wr32(hw, hw->aq.arq.tail, 0); - wr32(hw, hw->aq.arq.len, 0); - wr32(hw, hw->aq.arq.bal, 0); - wr32(hw, hw->aq.arq.bah, 0); + wr32(hw, IAVF_VF_ARQH1, 0); + wr32(hw, IAVF_VF_ARQT1, 0); + wr32(hw, IAVF_VF_ARQLEN1, 0); + wr32(hw, IAVF_VF_ARQBAL1, 0); + wr32(hw, IAVF_VF_ARQBAH1, 0); hw->aq.arq.count = 0; /* to indicate uninitialized queue */ @@ -529,9 +508,6 @@ enum iavf_status iavf_init_adminq(struct iavf_hw *hw) goto init_adminq_exit; } - /* Set up register offsets */ - iavf_adminq_init_regs(hw); - /* setup ASQ command write back timeout */ hw->aq.asq_cmd_timeout = IAVF_ASQ_CMD_TIMEOUT; @@ -587,9 +563,9 @@ static u16 iavf_clean_asq(struct iavf_hw *hw) desc = IAVF_ADMINQ_DESC(*asq, ntc); details = IAVF_ADMINQ_DETAILS(*asq, ntc); - while (rd32(hw, hw->aq.asq.head) != ntc) { + while (rd32(hw, IAVF_VF_ATQH1) != ntc) { iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, - "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head)); + "ntc %d head %d.\n", ntc, rd32(hw, IAVF_VF_ATQH1)); if (details->callback) { IAVF_ADMINQ_CALLBACK cb_func = @@ -624,7 +600,7 @@ bool iavf_asq_done(struct iavf_hw *hw) /* AQ designers suggest use of head for better * timing reliability than DD bit */ - return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use; + return rd32(hw, IAVF_VF_ATQH1) == hw->aq.asq.next_to_use; } /** @@ -663,7 +639,7 @@ enum iavf_status iavf_asq_send_command(struct iavf_hw *hw, hw->aq.asq_last_status = IAVF_AQ_RC_OK; - val = rd32(hw, hw->aq.asq.head); + val = rd32(hw, IAVF_VF_ATQH1); if (val >= hw->aq.num_asq_entries) { iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQTX: head overrun at %d\n", val); @@ -755,7 +731,7 @@ enum iavf_status iavf_asq_send_command(struct iavf_hw *hw, if (hw->aq.asq.next_to_use == hw->aq.asq.count) hw->aq.asq.next_to_use = 0; if (!details->postpone) - wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use); + wr32(hw, IAVF_VF_ATQT1, hw->aq.asq.next_to_use); /* if cmd_details are not defined or async flag is not set, * we need to wait for desc write back @@ -810,7 +786,7 @@ enum iavf_status iavf_asq_send_command(struct iavf_hw *hw, /* update the error if time out occurred */ if ((!cmd_completed) && (!details->async && !details->postpone)) { - if (rd32(hw, hw->aq.asq.len) & IAVF_VF_ATQLEN1_ATQCRIT_MASK) { + if (rd32(hw, IAVF_VF_ATQLEN1) & IAVF_VF_ATQLEN1_ATQCRIT_MASK) { iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQTX: AQ Critical error.\n"); status = IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR; @@ -878,7 +854,7 @@ enum iavf_status iavf_clean_arq_element(struct iavf_hw *hw, } /* set next_to_use to head */ - ntu = rd32(hw, hw->aq.arq.head) & IAVF_VF_ARQH1_ARQH_MASK; + ntu = rd32(hw, IAVF_VF_ARQH1) & IAVF_VF_ARQH1_ARQH_MASK; if (ntu == ntc) { /* nothing to do - shouldn't need to update ring's values */ ret_code = IAVF_ERR_ADMIN_QUEUE_NO_WORK; @@ -926,7 +902,7 @@ enum iavf_status iavf_clean_arq_element(struct iavf_hw *hw, desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa)); /* set tail = the last cleaned desc index. */ - wr32(hw, hw->aq.arq.tail, ntc); + wr32(hw, IAVF_VF_ARQT1, ntc); /* ntc is updated to tail + 1 */ ntc++; if (ntc == hw->aq.num_arq_entries) diff --git a/drivers/net/ethernet/intel/iavf/iavf_adminq.h b/drivers/net/ethernet/intel/iavf/iavf_adminq.h index 1f60518eb0..406506f64b 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_adminq.h +++ b/drivers/net/ethernet/intel/iavf/iavf_adminq.h @@ -29,13 +29,6 @@ struct iavf_adminq_ring { /* used for interrupt processing */ u16 next_to_use; u16 next_to_clean; - - /* used for queue tracking */ - u32 head; - u32 tail; - u32 len; - u32 bah; - u32 bal; }; /* ASQ transaction details */ diff --git a/drivers/net/ethernet/intel/iavf/iavf_adv_rss.c b/drivers/net/ethernet/intel/iavf/iavf_adv_rss.c index 6edbf134b7..a9e1da35e2 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_adv_rss.c +++ b/drivers/net/ethernet/intel/iavf/iavf_adv_rss.c @@ -95,17 +95,21 @@ iavf_fill_adv_rss_sctp_hdr(struct virtchnl_proto_hdr *hdr, u64 hash_flds) * @rss_cfg: the virtchnl message to be filled with RSS configuration setting * @packet_hdrs: the RSS configuration protocol header types * @hash_flds: the RSS configuration protocol hash fields + * @symm: if true, symmetric hash is required * * Returns 0 if the RSS configuration virtchnl message is filled successfully */ int iavf_fill_adv_rss_cfg_msg(struct virtchnl_rss_cfg *rss_cfg, - u32 packet_hdrs, u64 hash_flds) + u32 packet_hdrs, u64 hash_flds, bool symm) { struct virtchnl_proto_hdrs *proto_hdrs = &rss_cfg->proto_hdrs; struct virtchnl_proto_hdr *hdr; - rss_cfg->rss_algorithm = VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC; + if (symm) + rss_cfg->rss_algorithm = VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC; + else + rss_cfg->rss_algorithm = VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC; proto_hdrs->tunnel_level = 0; /* always outer layer */ diff --git a/drivers/net/ethernet/intel/iavf/iavf_adv_rss.h b/drivers/net/ethernet/intel/iavf/iavf_adv_rss.h index 4d3be11af7..e31eb2afeb 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_adv_rss.h +++ b/drivers/net/ethernet/intel/iavf/iavf_adv_rss.h @@ -80,13 +80,14 @@ struct iavf_adv_rss { u32 packet_hdrs; u64 hash_flds; + bool symm; struct virtchnl_rss_cfg cfg_msg; }; int iavf_fill_adv_rss_cfg_msg(struct virtchnl_rss_cfg *rss_cfg, - u32 packet_hdrs, u64 hash_flds); + u32 packet_hdrs, u64 hash_flds, bool symm); struct iavf_adv_rss * iavf_find_adv_rss_cfg_by_hdrs(struct iavf_adapter *adapter, u32 packet_hdrs); void diff --git a/drivers/net/ethernet/intel/iavf/iavf_common.c b/drivers/net/ethernet/intel/iavf/iavf_common.c index 6a10c0ecf2..5a25233a89 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_common.c +++ b/drivers/net/ethernet/intel/iavf/iavf_common.c @@ -280,11 +280,11 @@ void iavf_debug_aq(struct iavf_hw *hw, enum iavf_debug_mask mask, void *desc, **/ bool iavf_check_asq_alive(struct iavf_hw *hw) { - if (hw->aq.asq.len) - return !!(rd32(hw, hw->aq.asq.len) & - IAVF_VF_ATQLEN1_ATQENABLE_MASK); - else + /* Check if the queue is initialized */ + if (!hw->aq.asq.count) return false; + + return !!(rd32(hw, IAVF_VF_ATQLEN1) & IAVF_VF_ATQLEN1_ATQENABLE_MASK); } /** @@ -331,6 +331,7 @@ static enum iavf_status iavf_aq_get_set_rss_lut(struct iavf_hw *hw, struct iavf_aq_desc desc; struct iavf_aqc_get_set_rss_lut *cmd_resp = (struct iavf_aqc_get_set_rss_lut *)&desc.params.raw; + u16 flags; if (set) iavf_fill_default_direct_cmd_desc(&desc, @@ -343,22 +344,18 @@ static enum iavf_status iavf_aq_get_set_rss_lut(struct iavf_hw *hw, desc.flags |= cpu_to_le16((u16)IAVF_AQ_FLAG_BUF); desc.flags |= cpu_to_le16((u16)IAVF_AQ_FLAG_RD); - cmd_resp->vsi_id = - cpu_to_le16((u16)((vsi_id << - IAVF_AQC_SET_RSS_LUT_VSI_ID_SHIFT) & - IAVF_AQC_SET_RSS_LUT_VSI_ID_MASK)); - cmd_resp->vsi_id |= cpu_to_le16((u16)IAVF_AQC_SET_RSS_LUT_VSI_VALID); + vsi_id = FIELD_PREP(IAVF_AQC_SET_RSS_LUT_VSI_ID_MASK, vsi_id) | + FIELD_PREP(IAVF_AQC_SET_RSS_LUT_VSI_VALID, 1); + cmd_resp->vsi_id = cpu_to_le16(vsi_id); if (pf_lut) - cmd_resp->flags |= cpu_to_le16((u16) - ((IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_PF << - IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) & - IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_MASK)); + flags = FIELD_PREP(IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_MASK, + IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_PF); else - cmd_resp->flags |= cpu_to_le16((u16) - ((IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_VSI << - IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) & - IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_MASK)); + flags = FIELD_PREP(IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_MASK, + IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_VSI); + + cmd_resp->flags = cpu_to_le16(flags); status = iavf_asq_send_command(hw, &desc, lut, lut_size, NULL); @@ -412,11 +409,9 @@ iavf_status iavf_aq_get_set_rss_key(struct iavf_hw *hw, u16 vsi_id, desc.flags |= cpu_to_le16((u16)IAVF_AQ_FLAG_BUF); desc.flags |= cpu_to_le16((u16)IAVF_AQ_FLAG_RD); - cmd_resp->vsi_id = - cpu_to_le16((u16)((vsi_id << - IAVF_AQC_SET_RSS_KEY_VSI_ID_SHIFT) & - IAVF_AQC_SET_RSS_KEY_VSI_ID_MASK)); - cmd_resp->vsi_id |= cpu_to_le16((u16)IAVF_AQC_SET_RSS_KEY_VSI_VALID); + vsi_id = FIELD_PREP(IAVF_AQC_SET_RSS_KEY_VSI_ID_MASK, vsi_id) | + FIELD_PREP(IAVF_AQC_SET_RSS_KEY_VSI_VALID, 1); + cmd_resp->vsi_id = cpu_to_le16(vsi_id); status = iavf_asq_send_command(hw, &desc, key, key_size, NULL); diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c index 25ba5653ac..378c3e9ddf 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c +++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c @@ -397,8 +397,7 @@ static void iavf_get_priv_flag_strings(struct net_device *netdev, u8 *data) unsigned int i; for (i = 0; i < IAVF_PRIV_FLAGS_STR_LEN; i++) - ethtool_sprintf(&data, "%s", - iavf_gstrings_priv_flags[i].flag_string); + ethtool_puts(&data, iavf_gstrings_priv_flags[i].flag_string); } /** @@ -1018,8 +1017,7 @@ iavf_parse_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp, #define IAVF_USERDEF_FLEX_MAX_OFFS_VAL 504 flex = &fltr->flex_words[cnt++]; flex->word = value & IAVF_USERDEF_FLEX_WORD_M; - flex->offset = (value & IAVF_USERDEF_FLEX_OFFS_M) >> - IAVF_USERDEF_FLEX_OFFS_S; + flex->offset = FIELD_GET(IAVF_USERDEF_FLEX_OFFS_M, value); if (flex->offset > IAVF_USERDEF_FLEX_MAX_OFFS_VAL) return -EINVAL; } @@ -1437,16 +1435,15 @@ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx spin_lock_bh(&adapter->fdir_fltr_lock); iavf_fdir_list_add_fltr(adapter, fltr); adapter->fdir_active_fltr++; - if (adapter->link_up) { + + if (adapter->link_up) fltr->state = IAVF_FDIR_FLTR_ADD_REQUEST; - adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER; - } else { + else fltr->state = IAVF_FDIR_FLTR_INACTIVE; - } spin_unlock_bh(&adapter->fdir_fltr_lock); if (adapter->link_up) - mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); + iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_FDIR_FILTER); ret: if (err && fltr) kfree(fltr); @@ -1476,7 +1473,6 @@ static int iavf_del_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx if (fltr) { if (fltr->state == IAVF_FDIR_FLTR_ACTIVE) { fltr->state = IAVF_FDIR_FLTR_DEL_REQUEST; - adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER; } else if (fltr->state == IAVF_FDIR_FLTR_INACTIVE) { list_del(&fltr->list); kfree(fltr); @@ -1491,7 +1487,7 @@ static int iavf_del_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx spin_unlock_bh(&adapter->fdir_fltr_lock); if (fltr && fltr->state == IAVF_FDIR_FLTR_DEL_REQUEST) - mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); + iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_DEL_FDIR_FILTER); return err; } @@ -1542,11 +1538,12 @@ static u32 iavf_adv_rss_parse_hdrs(struct ethtool_rxnfc *cmd) /** * iavf_adv_rss_parse_hash_flds - parses hash fields from RSS hash input * @cmd: ethtool rxnfc command + * @symm: true if Symmetric Topelitz is set * * This function parses the rxnfc command and returns intended hash fields for * RSS configuration */ -static u64 iavf_adv_rss_parse_hash_flds(struct ethtool_rxnfc *cmd) +static u64 iavf_adv_rss_parse_hash_flds(struct ethtool_rxnfc *cmd, bool symm) { u64 hfld = IAVF_ADV_RSS_HASH_INVALID; @@ -1618,17 +1615,20 @@ iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter, struct iavf_adv_rss *rss_old, *rss_new; bool rss_new_add = false; int count = 50, err = 0; + bool symm = false; u64 hash_flds; u32 hdrs; if (!ADV_RSS_SUPPORT(adapter)) return -EOPNOTSUPP; + symm = !!(adapter->hfunc == VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC); + hdrs = iavf_adv_rss_parse_hdrs(cmd); if (hdrs == IAVF_ADV_RSS_FLOW_SEG_HDR_NONE) return -EINVAL; - hash_flds = iavf_adv_rss_parse_hash_flds(cmd); + hash_flds = iavf_adv_rss_parse_hash_flds(cmd, symm); if (hash_flds == IAVF_ADV_RSS_HASH_INVALID) return -EINVAL; @@ -1636,7 +1636,8 @@ iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter, if (!rss_new) return -ENOMEM; - if (iavf_fill_adv_rss_cfg_msg(&rss_new->cfg_msg, hdrs, hash_flds)) { + if (iavf_fill_adv_rss_cfg_msg(&rss_new->cfg_msg, hdrs, hash_flds, + symm)) { kfree(rss_new); return -EINVAL; } @@ -1655,12 +1656,13 @@ iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter, if (rss_old) { if (rss_old->state != IAVF_ADV_RSS_ACTIVE) { err = -EBUSY; - } else if (rss_old->hash_flds != hash_flds) { + } else if (rss_old->hash_flds != hash_flds || + rss_old->symm != symm) { rss_old->state = IAVF_ADV_RSS_ADD_REQUEST; rss_old->hash_flds = hash_flds; + rss_old->symm = symm; memcpy(&rss_old->cfg_msg, &rss_new->cfg_msg, sizeof(rss_new->cfg_msg)); - adapter->aq_required |= IAVF_FLAG_AQ_ADD_ADV_RSS_CFG; } else { err = -EEXIST; } @@ -1669,13 +1671,13 @@ iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter, rss_new->state = IAVF_ADV_RSS_ADD_REQUEST; rss_new->packet_hdrs = hdrs; rss_new->hash_flds = hash_flds; + rss_new->symm = symm; list_add_tail(&rss_new->list, &adapter->adv_rss_list_head); - adapter->aq_required |= IAVF_FLAG_AQ_ADD_ADV_RSS_CFG; } spin_unlock_bh(&adapter->adv_rss_lock); if (!err) - mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); + iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_ADV_RSS_CFG); mutex_unlock(&adapter->crit_lock); @@ -1909,27 +1911,27 @@ static u32 iavf_get_rxfh_indir_size(struct net_device *netdev) /** * iavf_get_rxfh - get the rx flow hash indirection table * @netdev: network interface device structure - * @indir: indirection table - * @key: hash key - * @hfunc: hash function in use + * @rxfh: pointer to param struct (indir, key, hfunc) * * Reads the indirection table directly from the hardware. Always returns 0. **/ -static int iavf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, - u8 *hfunc) +static int iavf_get_rxfh(struct net_device *netdev, + struct ethtool_rxfh_param *rxfh) { struct iavf_adapter *adapter = netdev_priv(netdev); u16 i; - if (hfunc) - *hfunc = ETH_RSS_HASH_TOP; - if (key) - memcpy(key, adapter->rss_key, adapter->rss_key_size); + rxfh->hfunc = ETH_RSS_HASH_TOP; + if (adapter->hfunc == VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC) + rxfh->input_xfrm |= RXH_XFRM_SYM_XOR; + + if (rxfh->key) + memcpy(rxfh->key, adapter->rss_key, adapter->rss_key_size); - if (indir) + if (rxfh->indir) /* Each 32 bits pointed by 'indir' is stored with a lut entry */ for (i = 0; i < adapter->rss_lut_size; i++) - indir[i] = (u32)adapter->rss_lut[i]; + rxfh->indir[i] = (u32)adapter->rss_lut[i]; return 0; } @@ -1937,33 +1939,46 @@ static int iavf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, /** * iavf_set_rxfh - set the rx flow hash indirection table * @netdev: network interface device structure - * @indir: indirection table - * @key: hash key - * @hfunc: hash function to use + * @rxfh: pointer to param struct (indir, key, hfunc) + * @extack: extended ACK from the Netlink message * * Returns -EINVAL if the table specifies an invalid queue id, otherwise * returns 0 after programming the table. **/ -static int iavf_set_rxfh(struct net_device *netdev, const u32 *indir, - const u8 *key, const u8 hfunc) +static int iavf_set_rxfh(struct net_device *netdev, + struct ethtool_rxfh_param *rxfh, + struct netlink_ext_ack *extack) { struct iavf_adapter *adapter = netdev_priv(netdev); u16 i; /* Only support toeplitz hash function */ - if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) + if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE && + rxfh->hfunc != ETH_RSS_HASH_TOP) return -EOPNOTSUPP; - if (!key && !indir) + if ((rxfh->input_xfrm & RXH_XFRM_SYM_XOR) && + adapter->hfunc != VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC) { + if (!ADV_RSS_SUPPORT(adapter)) + return -EOPNOTSUPP; + adapter->hfunc = VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC; + adapter->aq_required |= IAVF_FLAG_AQ_SET_RSS_HFUNC; + } else if (!(rxfh->input_xfrm & RXH_XFRM_SYM_XOR) && + adapter->hfunc != VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC) { + adapter->hfunc = VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC; + adapter->aq_required |= IAVF_FLAG_AQ_SET_RSS_HFUNC; + } + + if (!rxfh->key && !rxfh->indir) return 0; - if (key) - memcpy(adapter->rss_key, key, adapter->rss_key_size); + if (rxfh->key) + memcpy(adapter->rss_key, rxfh->key, adapter->rss_key_size); - if (indir) { + if (rxfh->indir) { /* Each 32 bits pointed by 'indir' is stored with a lut entry */ for (i = 0; i < adapter->rss_lut_size; i++) - adapter->rss_lut[i] = (u8)(indir[i]); + adapter->rss_lut[i] = (u8)(rxfh->indir[i]); } return iavf_config_rss(adapter); @@ -1972,6 +1987,7 @@ static int iavf_set_rxfh(struct net_device *netdev, const u32 *indir, static const struct ethtool_ops iavf_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_USE_ADAPTIVE, + .cap_rss_sym_xor_supported = true, .get_drvinfo = iavf_get_drvinfo, .get_link = ethtool_op_get_link, .get_ringparam = iavf_get_ringparam, diff --git a/drivers/net/ethernet/intel/iavf/iavf_fdir.c b/drivers/net/ethernet/intel/iavf/iavf_fdir.c index 65ddcd81c9..2d47b0b464 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_fdir.c +++ b/drivers/net/ethernet/intel/iavf/iavf_fdir.c @@ -358,7 +358,7 @@ iavf_fill_fdir_ip6_hdr(struct iavf_fdir_fltr *fltr, if (fltr->ip_mask.tclass == U8_MAX) { iph->priority = (fltr->ip_data.tclass >> 4) & 0xF; - iph->flow_lbl[0] = (fltr->ip_data.tclass << 4) & 0xF0; + iph->flow_lbl[0] = FIELD_PREP(0xF0, fltr->ip_data.tclass); VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, TC); } diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index e8d5b889ad..1ff381361c 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -1038,13 +1038,12 @@ static int iavf_replace_primary_mac(struct iavf_adapter *adapter, */ new_f->is_primary = true; new_f->add = true; - adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER; ether_addr_copy(hw->mac.addr, new_mac); spin_unlock_bh(&adapter->mac_vlan_list_lock); /* schedule the watchdog task to immediately process the request */ - mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); + iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_MAC_FILTER); return 0; } @@ -1263,8 +1262,7 @@ static void iavf_up_complete(struct iavf_adapter *adapter) iavf_napi_enable_all(adapter); - adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_QUEUES; - mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); + iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ENABLE_QUEUES); } /** @@ -1420,8 +1418,7 @@ void iavf_down(struct iavf_adapter *adapter) adapter->aq_required |= IAVF_FLAG_AQ_DEL_ADV_RSS_CFG; } - adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES; - mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); + iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_DISABLE_QUEUES); } /** @@ -2150,6 +2147,10 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter) iavf_set_rss_lut(adapter); return 0; } + if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_HFUNC) { + iavf_set_rss_hfunc(adapter); + return 0; + } if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE) { iavf_set_promiscuous(adapter); @@ -2318,10 +2319,8 @@ iavf_set_vlan_offload_features(struct iavf_adapter *adapter, } } - if (aq_required) { - adapter->aq_required |= aq_required; - mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); - } + if (aq_required) + iavf_schedule_aq_request(adapter, aq_required); } /** @@ -3234,7 +3233,7 @@ static void iavf_adminq_task(struct work_struct *work) goto freedom; /* check for error indications */ - val = rd32(hw, hw->aq.arq.len); + val = rd32(hw, IAVF_VF_ARQLEN1); if (val == 0xdeadbeef || val == 0xffffffff) /* device in reset */ goto freedom; oldval = val; @@ -3251,9 +3250,9 @@ static void iavf_adminq_task(struct work_struct *work) val &= ~IAVF_VF_ARQLEN1_ARQCRIT_MASK; } if (oldval != val) - wr32(hw, hw->aq.arq.len, val); + wr32(hw, IAVF_VF_ARQLEN1, val); - val = rd32(hw, hw->aq.asq.len); + val = rd32(hw, IAVF_VF_ATQLEN1); oldval = val; if (val & IAVF_VF_ATQLEN1_ATQVFE_MASK) { dev_info(&adapter->pdev->dev, "ASQ VF Error detected\n"); @@ -3268,7 +3267,7 @@ static void iavf_adminq_task(struct work_struct *work) val &= ~IAVF_VF_ATQLEN1_ATQCRIT_MASK; } if (oldval != val) - wr32(hw, hw->aq.asq.len, val); + wr32(hw, IAVF_VF_ATQLEN1, val); freedom: kfree(event.msg_buf); @@ -3513,6 +3512,34 @@ static void iavf_del_all_cloud_filters(struct iavf_adapter *adapter) } /** + * iavf_is_tc_config_same - Compare the mqprio TC config with the + * TC config already configured on this adapter. + * @adapter: board private structure + * @mqprio_qopt: TC config received from kernel. + * + * This function compares the TC config received from the kernel + * with the config already configured on the adapter. + * + * Return: True if configuration is same, false otherwise. + **/ +static bool iavf_is_tc_config_same(struct iavf_adapter *adapter, + struct tc_mqprio_qopt *mqprio_qopt) +{ + struct virtchnl_channel_info *ch = &adapter->ch_config.ch_info[0]; + int i; + + if (adapter->num_tc != mqprio_qopt->num_tc) + return false; + + for (i = 0; i < adapter->num_tc; i++) { + if (ch[i].count != mqprio_qopt->count[i] || + ch[i].offset != mqprio_qopt->offset[i]) + return false; + } + return true; +} + +/** * __iavf_setup_tc - configure multiple traffic classes * @netdev: network interface device structure * @type_data: tc offload data @@ -3569,7 +3596,7 @@ static int __iavf_setup_tc(struct net_device *netdev, void *type_data) if (ret) return ret; /* Return if same TC config is requested */ - if (adapter->num_tc == num_tc) + if (iavf_is_tc_config_same(adapter, &mqprio_qopt->qopt)) return 0; adapter->num_tc = num_tc; diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c index fb7edba9c2..b71484c87a 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c +++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c @@ -989,11 +989,9 @@ static void iavf_rx_checksum(struct iavf_vsi *vsi, u64 qword; qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); - ptype = (qword & IAVF_RXD_QW1_PTYPE_MASK) >> IAVF_RXD_QW1_PTYPE_SHIFT; - rx_error = (qword & IAVF_RXD_QW1_ERROR_MASK) >> - IAVF_RXD_QW1_ERROR_SHIFT; - rx_status = (qword & IAVF_RXD_QW1_STATUS_MASK) >> - IAVF_RXD_QW1_STATUS_SHIFT; + ptype = FIELD_GET(IAVF_RXD_QW1_PTYPE_MASK, qword); + rx_error = FIELD_GET(IAVF_RXD_QW1_ERROR_MASK, qword); + rx_status = FIELD_GET(IAVF_RXD_QW1_STATUS_MASK, qword); decoded = decode_rx_desc_ptype(ptype); skb->ip_summed = CHECKSUM_NONE; @@ -1534,8 +1532,7 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget) if (!iavf_test_staterr(rx_desc, IAVF_RXD_DD)) break; - size = (qword & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >> - IAVF_RXD_QW1_LENGTH_PBUF_SHIFT; + size = FIELD_GET(IAVF_RXD_QW1_LENGTH_PBUF_MASK, qword); iavf_trace(clean_rx_irq, rx_ring, rx_desc, skb); rx_buffer = iavf_get_rx_buffer(rx_ring, size); @@ -1582,8 +1579,7 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget) total_rx_bytes += skb->len; qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); - rx_ptype = (qword & IAVF_RXD_QW1_PTYPE_MASK) >> - IAVF_RXD_QW1_PTYPE_SHIFT; + rx_ptype = FIELD_GET(IAVF_RXD_QW1_PTYPE_MASK, qword); /* populate checksum, VLAN, and protocol */ iavf_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype); @@ -2291,8 +2287,7 @@ static void iavf_tx_map(struct iavf_ring *tx_ring, struct sk_buff *skb, if (tx_flags & IAVF_TX_FLAGS_HW_VLAN) { td_cmd |= IAVF_TX_DESC_CMD_IL2TAG1; - td_tag = (tx_flags & IAVF_TX_FLAGS_VLAN_MASK) >> - IAVF_TX_FLAGS_VLAN_SHIFT; + td_tag = FIELD_GET(IAVF_TX_FLAGS_VLAN_MASK, tx_flags); } first->tx_flags = tx_flags; @@ -2468,8 +2463,7 @@ static netdev_tx_t iavf_xmit_frame_ring(struct sk_buff *skb, if (tx_flags & IAVF_TX_FLAGS_HW_OUTER_SINGLE_VLAN) { cd_type_cmd_tso_mss |= IAVF_TX_CTX_DESC_IL2TAG2 << IAVF_TXD_CTX_QW1_CMD_SHIFT; - cd_l2tag2 = (tx_flags & IAVF_TX_FLAGS_VLAN_MASK) >> - IAVF_TX_FLAGS_VLAN_SHIFT; + cd_l2tag2 = FIELD_GET(IAVF_TX_FLAGS_VLAN_MASK, tx_flags); } /* obtain protocol of skb */ diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c index 2d9366be0e..22f2df7c46 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c +++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c @@ -1142,6 +1142,34 @@ void iavf_set_rss_lut(struct iavf_adapter *adapter) } /** + * iavf_set_rss_hfunc + * @adapter: adapter structure + * + * Request the PF to set our RSS Hash function + **/ +void iavf_set_rss_hfunc(struct iavf_adapter *adapter) +{ + struct virtchnl_rss_hfunc *vrh; + int len = sizeof(*vrh); + + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot set RSS Hash function, command %d pending\n", + adapter->current_op); + return; + } + vrh = kzalloc(len, GFP_KERNEL); + if (!vrh) + return; + vrh->vsi_id = adapter->vsi.id; + vrh->rss_algorithm = adapter->hfunc; + adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_HFUNC; + adapter->aq_required &= ~IAVF_FLAG_AQ_SET_RSS_HFUNC; + iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_HFUNC, (u8 *)vrh, len); + kfree(vrh); +} + +/** * iavf_enable_vlan_stripping * @adapter: adapter structure * @@ -2190,6 +2218,19 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, dev_warn(&adapter->pdev->dev, "Failed to add VLAN filter, error %s\n", iavf_stat_str(&adapter->hw, v_retval)); break; + case VIRTCHNL_OP_CONFIG_RSS_HFUNC: + dev_warn(&adapter->pdev->dev, "Failed to configure hash function, error %s\n", + iavf_stat_str(&adapter->hw, v_retval)); + + if (adapter->hfunc == + VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC) + adapter->hfunc = + VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC; + else + adapter->hfunc = + VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC; + + break; default: dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n", v_retval, iavf_stat_str(&adapter->hw, v_retval), diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile index 0679907980..cddd82d4ca 100644 --- a/drivers/net/ethernet/intel/ice/Makefile +++ b/drivers/net/ethernet/intel/ice/Makefile @@ -34,7 +34,9 @@ ice-y := ice_main.o \ ice_lag.o \ ice_ethtool.o \ ice_repr.o \ - ice_tc_lib.o + ice_tc_lib.o \ + ice_fwlog.o \ + ice_debugfs.o ice-$(CONFIG_PCI_IOV) += \ ice_sriov.o \ ice_virtchnl.o \ @@ -49,3 +51,4 @@ ice-$(CONFIG_RFS_ACCEL) += ice_arfs.o ice-$(CONFIG_XDP_SOCKETS) += ice_xsk.o ice-$(CONFIG_ICE_SWITCHDEV) += ice_eswitch.o ice_eswitch_br.o ice-$(CONFIG_GNSS) += ice_gnss.o +ice-$(CONFIG_ICE_HWMON) += ice_hwmon.o diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index 351e0d36df..367b613d92 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -360,6 +360,7 @@ struct ice_vsi { /* RSS config */ u16 rss_table_size; /* HW RSS table size */ u16 rss_size; /* Allocated RSS queues */ + u8 rss_hfunc; /* User configured hash type */ u8 *rss_hkey_user; /* User configured hash keys */ u8 *rss_lut_user; /* User configured lookup table entries */ u8 rss_lut_type; /* used to configure Get/Set RSS LUT AQ call */ @@ -517,16 +518,22 @@ enum ice_pf_flags { }; enum ice_misc_thread_tasks { - ICE_MISC_THREAD_EXTTS_EVENT, ICE_MISC_THREAD_TX_TSTAMP, ICE_MISC_THREAD_NBITS /* must be last */ }; -struct ice_switchdev_info { +struct ice_eswitch { struct ice_vsi *control_vsi; struct ice_vsi *uplink_vsi; struct ice_esw_br_offloads *br_offloads; + struct xarray reprs; bool is_running; + /* struct to allow cp queues management optimization */ + struct { + int to_reach; + int value; + bool is_reaching; + } qs; }; struct ice_agg_node { @@ -563,6 +570,10 @@ struct ice_pf { struct ice_vsi_stats **vsi_stats; struct ice_sw *first_sw; /* first switch created by firmware */ u16 eswitch_mode; /* current mode of eswitch */ + struct dentry *ice_debugfs_pf; + struct dentry *ice_debugfs_pf_fwlog; + /* keep track of all the dentrys for FW log modules */ + struct dentry **ice_debugfs_pf_fwlog_modules; struct ice_vfs vfs; DECLARE_BITMAP(features, ICE_F_MAX); DECLARE_BITMAP(state, ICE_STATE_NBITS); @@ -597,6 +608,7 @@ struct ice_pf { u32 hw_csum_rx_error; u32 oicr_err_reg; struct msi_map oicr_irq; /* Other interrupt cause MSIX vector */ + struct msi_map ll_ts_irq; /* LL_TS interrupt MSIX vector */ u16 max_pf_txqs; /* Total Tx queues PF wide */ u16 max_pf_rxqs; /* Total Rx queues PF wide */ u16 num_lan_msix; /* Total MSIX vectors for base driver */ @@ -621,6 +633,7 @@ struct ice_pf { unsigned long tx_timeout_last_recovery; u32 tx_timeout_recovery_level; char int_name[ICE_INT_NAME_STR_LEN]; + char int_name_ll_ts[ICE_INT_NAME_STR_LEN]; struct auxiliary_device *adev; int aux_idx; u32 sw_int_count; @@ -637,7 +650,7 @@ struct ice_pf { struct ice_link_default_override_tlv link_dflt_override; struct ice_lag *lag; /* Link Aggregation information */ - struct ice_switchdev_info switchdev; + struct ice_eswitch eswitch; struct ice_esw_br_port *br_port; #define ICE_INVALID_AGG_NODE_ID 0 @@ -648,6 +661,7 @@ struct ice_pf { #define ICE_MAX_VF_AGG_NODES 32 struct ice_agg_node vf_agg_node[ICE_MAX_VF_AGG_NODES]; struct ice_dplls dplls; + struct device *hwmon_dev; }; extern struct workqueue_struct *ice_lag_wq; @@ -846,7 +860,7 @@ static inline struct ice_vsi *ice_find_vsi(struct ice_pf *pf, u16 vsi_num) */ static inline bool ice_is_switchdev_running(struct ice_pf *pf) { - return pf->switchdev.is_running; + return pf->eswitch.is_running; } #define ICE_FD_STAT_CTR_BLOCK_COUNT 256 @@ -881,6 +895,11 @@ static inline bool ice_is_adq_active(struct ice_pf *pf) return false; } +void ice_debugfs_fwlog_init(struct ice_pf *pf); +void ice_debugfs_init(void); +void ice_debugfs_exit(void); +void ice_pf_fwlog_update_module(struct ice_pf *pf, int log_level, int module); + bool netif_is_ice(const struct net_device *dev); int ice_vsi_setup_tx_rings(struct ice_vsi *vsi); int ice_vsi_setup_rx_rings(struct ice_vsi *vsi); @@ -912,6 +931,7 @@ int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size); int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size); int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed); int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed); +int ice_set_rss_hfunc(struct ice_vsi *vsi, u8 hfunc); void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size); int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset); void ice_print_link_msg(struct ice_vsi *vsi, bool isup); @@ -989,4 +1009,6 @@ static inline void ice_clear_rdma_cap(struct ice_pf *pf) set_bit(ICE_FLAG_UNPLUG_AUX_DEV, pf->flags); clear_bit(ICE_FLAG_RDMA_ENA, pf->flags); } + +extern const struct xdp_metadata_ops ice_xdp_md_ops; #endif /* _ICE_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index b8437c36ff..1f3e7a6903 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -117,6 +117,7 @@ struct ice_aqc_list_caps_elem { #define ICE_AQC_CAPS_NET_VER 0x004C #define ICE_AQC_CAPS_PENDING_NET_VER 0x004D #define ICE_AQC_CAPS_RDMA 0x0051 +#define ICE_AQC_CAPS_SENSOR_READING 0x0067 #define ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE 0x0076 #define ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT 0x0077 #define ICE_AQC_CAPS_NVM_MGMT 0x0080 @@ -592,8 +593,9 @@ struct ice_aqc_recipe_data_elem { struct ice_aqc_recipe_to_profile { __le16 profile_id; u8 rsvd[6]; - DECLARE_BITMAP(recipe_assoc, ICE_MAX_NUM_RECIPES); + __le64 recipe_assoc; }; +static_assert(sizeof(struct ice_aqc_recipe_to_profile) == 16); /* Add/Update/Remove/Get switch rules (indirect 0x02A0, 0x02A1, 0x02A2, 0x02A3) */ @@ -1414,6 +1416,30 @@ struct ice_aqc_get_phy_rec_clk_out { __le16 node_handle; }; +/* Get sensor reading (direct 0x0632) */ +struct ice_aqc_get_sensor_reading { + u8 sensor; + u8 format; + u8 reserved[6]; + __le32 addr_high; + __le32 addr_low; +}; + +/* Get sensor reading response (direct 0x0632) */ +struct ice_aqc_get_sensor_reading_resp { + union { + u8 raw[8]; + /* Output data for sensor 0x00, format 0x00 */ + struct _packed { + s8 temp; + u8 temp_warning_threshold; + u8 temp_critical_threshold; + u8 temp_fatal_threshold; + u8 reserved[4]; + } s0f0; + } data; +}; + struct ice_aqc_link_topo_params { u8 lport_num; u8 lport_num_valid; @@ -2070,78 +2096,6 @@ struct ice_aqc_add_rdma_qset_data { struct ice_aqc_add_tx_rdma_qset_entry rdma_qsets[]; }; -/* Configure Firmware Logging Command (indirect 0xFF09) - * Logging Information Read Response (indirect 0xFF10) - * Note: The 0xFF10 command has no input parameters. - */ -struct ice_aqc_fw_logging { - u8 log_ctrl; -#define ICE_AQC_FW_LOG_AQ_EN BIT(0) -#define ICE_AQC_FW_LOG_UART_EN BIT(1) - u8 rsvd0; - u8 log_ctrl_valid; /* Not used by 0xFF10 Response */ -#define ICE_AQC_FW_LOG_AQ_VALID BIT(0) -#define ICE_AQC_FW_LOG_UART_VALID BIT(1) - u8 rsvd1[5]; - __le32 addr_high; - __le32 addr_low; -}; - -enum ice_aqc_fw_logging_mod { - ICE_AQC_FW_LOG_ID_GENERAL = 0, - ICE_AQC_FW_LOG_ID_CTRL, - ICE_AQC_FW_LOG_ID_LINK, - ICE_AQC_FW_LOG_ID_LINK_TOPO, - ICE_AQC_FW_LOG_ID_DNL, - ICE_AQC_FW_LOG_ID_I2C, - ICE_AQC_FW_LOG_ID_SDP, - ICE_AQC_FW_LOG_ID_MDIO, - ICE_AQC_FW_LOG_ID_ADMINQ, - ICE_AQC_FW_LOG_ID_HDMA, - ICE_AQC_FW_LOG_ID_LLDP, - ICE_AQC_FW_LOG_ID_DCBX, - ICE_AQC_FW_LOG_ID_DCB, - ICE_AQC_FW_LOG_ID_NETPROXY, - ICE_AQC_FW_LOG_ID_NVM, - ICE_AQC_FW_LOG_ID_AUTH, - ICE_AQC_FW_LOG_ID_VPD, - ICE_AQC_FW_LOG_ID_IOSF, - ICE_AQC_FW_LOG_ID_PARSER, - ICE_AQC_FW_LOG_ID_SW, - ICE_AQC_FW_LOG_ID_SCHEDULER, - ICE_AQC_FW_LOG_ID_TXQ, - ICE_AQC_FW_LOG_ID_RSVD, - ICE_AQC_FW_LOG_ID_POST, - ICE_AQC_FW_LOG_ID_WATCHDOG, - ICE_AQC_FW_LOG_ID_TASK_DISPATCH, - ICE_AQC_FW_LOG_ID_MNG, - ICE_AQC_FW_LOG_ID_MAX, -}; - -/* Defines for both above FW logging command/response buffers */ -#define ICE_AQC_FW_LOG_ID_S 0 -#define ICE_AQC_FW_LOG_ID_M (0xFFF << ICE_AQC_FW_LOG_ID_S) - -#define ICE_AQC_FW_LOG_CONF_SUCCESS 0 /* Used by response */ -#define ICE_AQC_FW_LOG_CONF_BAD_INDX BIT(12) /* Used by response */ - -#define ICE_AQC_FW_LOG_EN_S 12 -#define ICE_AQC_FW_LOG_EN_M (0xF << ICE_AQC_FW_LOG_EN_S) -#define ICE_AQC_FW_LOG_INFO_EN BIT(12) /* Used by command */ -#define ICE_AQC_FW_LOG_INIT_EN BIT(13) /* Used by command */ -#define ICE_AQC_FW_LOG_FLOW_EN BIT(14) /* Used by command */ -#define ICE_AQC_FW_LOG_ERR_EN BIT(15) /* Used by command */ - -/* Get/Clear FW Log (indirect 0xFF11) */ -struct ice_aqc_get_clear_fw_log { - u8 flags; -#define ICE_AQC_FW_LOG_CLEAR BIT(0) -#define ICE_AQC_FW_LOG_MORE_DATA_AVAIL BIT(1) - u8 rsvd1[7]; - __le32 addr_high; - __le32 addr_low; -}; - /* Download Package (indirect 0x0C40) */ /* Also used for Update Package (indirect 0x0C41 and 0x0C42) */ struct ice_aqc_download_pkg { @@ -2404,6 +2358,84 @@ struct ice_aqc_event_lan_overflow { u8 reserved[8]; }; +enum ice_aqc_fw_logging_mod { + ICE_AQC_FW_LOG_ID_GENERAL = 0, + ICE_AQC_FW_LOG_ID_CTRL, + ICE_AQC_FW_LOG_ID_LINK, + ICE_AQC_FW_LOG_ID_LINK_TOPO, + ICE_AQC_FW_LOG_ID_DNL, + ICE_AQC_FW_LOG_ID_I2C, + ICE_AQC_FW_LOG_ID_SDP, + ICE_AQC_FW_LOG_ID_MDIO, + ICE_AQC_FW_LOG_ID_ADMINQ, + ICE_AQC_FW_LOG_ID_HDMA, + ICE_AQC_FW_LOG_ID_LLDP, + ICE_AQC_FW_LOG_ID_DCBX, + ICE_AQC_FW_LOG_ID_DCB, + ICE_AQC_FW_LOG_ID_XLR, + ICE_AQC_FW_LOG_ID_NVM, + ICE_AQC_FW_LOG_ID_AUTH, + ICE_AQC_FW_LOG_ID_VPD, + ICE_AQC_FW_LOG_ID_IOSF, + ICE_AQC_FW_LOG_ID_PARSER, + ICE_AQC_FW_LOG_ID_SW, + ICE_AQC_FW_LOG_ID_SCHEDULER, + ICE_AQC_FW_LOG_ID_TXQ, + ICE_AQC_FW_LOG_ID_RSVD, + ICE_AQC_FW_LOG_ID_POST, + ICE_AQC_FW_LOG_ID_WATCHDOG, + ICE_AQC_FW_LOG_ID_TASK_DISPATCH, + ICE_AQC_FW_LOG_ID_MNG, + ICE_AQC_FW_LOG_ID_SYNCE, + ICE_AQC_FW_LOG_ID_HEALTH, + ICE_AQC_FW_LOG_ID_TSDRV, + ICE_AQC_FW_LOG_ID_PFREG, + ICE_AQC_FW_LOG_ID_MDLVER, + ICE_AQC_FW_LOG_ID_MAX, +}; + +/* Set FW Logging configuration (indirect 0xFF30) + * Register for FW Logging (indirect 0xFF31) + * Query FW Logging (indirect 0xFF32) + * FW Log Event (indirect 0xFF33) + */ +struct ice_aqc_fw_log { + u8 cmd_flags; +#define ICE_AQC_FW_LOG_CONF_UART_EN BIT(0) +#define ICE_AQC_FW_LOG_CONF_AQ_EN BIT(1) +#define ICE_AQC_FW_LOG_QUERY_REGISTERED BIT(2) +#define ICE_AQC_FW_LOG_CONF_SET_VALID BIT(3) +#define ICE_AQC_FW_LOG_AQ_REGISTER BIT(0) +#define ICE_AQC_FW_LOG_AQ_QUERY BIT(2) + + u8 rsp_flag; + __le16 fw_rt_msb; + union { + struct { + __le32 fw_rt_lsb; + } sync; + struct { + __le16 log_resolution; +#define ICE_AQC_FW_LOG_MIN_RESOLUTION (1) +#define ICE_AQC_FW_LOG_MAX_RESOLUTION (128) + + __le16 mdl_cnt; + } cfg; + } ops; + __le32 addr_high; + __le32 addr_low; +}; + +/* Response Buffer for: + * Set Firmware Logging Configuration (0xFF30) + * Query FW Logging (0xFF32) + */ +struct ice_aqc_fw_log_cfg_resp { + __le16 module_identifier; + u8 log_level; + u8 rsvd0; +}; + /** * struct ice_aq_desc - Admin Queue (AQ) descriptor * @flags: ICE_AQ_FLAG_* flags @@ -2444,6 +2476,8 @@ struct ice_aq_desc { struct ice_aqc_restart_an restart_an; struct ice_aqc_set_phy_rec_clk_out set_phy_rec_clk_out; struct ice_aqc_get_phy_rec_clk_out get_phy_rec_clk_out; + struct ice_aqc_get_sensor_reading get_sensor_reading; + struct ice_aqc_get_sensor_reading_resp get_sensor_reading_resp; struct ice_aqc_gpio read_write_gpio; struct ice_aqc_sff_eeprom read_write_sff_param; struct ice_aqc_set_port_id_led set_port_id_led; @@ -2481,8 +2515,6 @@ struct ice_aq_desc { struct ice_aqc_add_rdma_qset add_rdma_qset; struct ice_aqc_add_get_update_free_vsi vsi_cmd; struct ice_aqc_add_update_free_vsi_resp add_update_free_vsi_res; - struct ice_aqc_fw_logging fw_logging; - struct ice_aqc_get_clear_fw_log get_clear_fw_log; struct ice_aqc_download_pkg download_pkg; struct ice_aqc_set_cgu_input_config set_cgu_input_config; struct ice_aqc_get_cgu_input_config get_cgu_input_config; @@ -2494,6 +2526,7 @@ struct ice_aq_desc { struct ice_aqc_get_cgu_ref_prio get_cgu_ref_prio; struct ice_aqc_get_cgu_info get_cgu_info; struct ice_aqc_driver_shared_params drv_shared_params; + struct ice_aqc_fw_log fw_log; struct ice_aqc_set_mac_lb set_mac_lb; struct ice_aqc_alloc_free_res_cmd sw_res_ctrl; struct ice_aqc_set_mac_cfg set_mac_cfg; @@ -2619,6 +2652,7 @@ enum ice_adminq_opc { ice_aqc_opc_set_mac_lb = 0x0620, ice_aqc_opc_set_phy_rec_clk_out = 0x0630, ice_aqc_opc_get_phy_rec_clk_out = 0x0631, + ice_aqc_opc_get_sensor_reading = 0x0632, ice_aqc_opc_get_link_topo = 0x06E0, ice_aqc_opc_read_i2c = 0x06E2, ice_aqc_opc_write_i2c = 0x06E3, @@ -2691,9 +2725,11 @@ enum ice_adminq_opc { /* Standalone Commands/Events */ ice_aqc_opc_event_lan_overflow = 0x1001, - /* debug commands */ - ice_aqc_opc_fw_logging = 0xFF09, - ice_aqc_opc_fw_logging_info = 0xFF10, + /* FW Logging Commands */ + ice_aqc_opc_fw_logs_config = 0xFF30, + ice_aqc_opc_fw_logs_register = 0xFF31, + ice_aqc_opc_fw_logs_query = 0xFF32, + ice_aqc_opc_fw_logs_event = 0xFF33, }; #endif /* _ICE_ADMINQ_CMD_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c index 4f3e65b47c..c979192e44 100644 --- a/drivers/net/ethernet/intel/ice/ice_base.c +++ b/drivers/net/ethernet/intel/ice/ice_base.c @@ -189,10 +189,16 @@ static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx) } q_vector = vsi->q_vectors[v_idx]; - ice_for_each_tx_ring(tx_ring, q_vector->tx) + ice_for_each_tx_ring(tx_ring, q_vector->tx) { + ice_queue_set_napi(vsi, tx_ring->q_index, NETDEV_QUEUE_TYPE_TX, + NULL); tx_ring->q_vector = NULL; - ice_for_each_rx_ring(rx_ring, q_vector->rx) + } + ice_for_each_rx_ring(rx_ring, q_vector->rx) { + ice_queue_set_napi(vsi, rx_ring->q_index, NETDEV_QUEUE_TYPE_RX, + NULL); rx_ring->q_vector = NULL; + } /* only VSI with an associated netdev is set up with NAPI */ if (vsi->netdev) @@ -224,24 +230,16 @@ static void ice_cfg_itr_gran(struct ice_hw *hw) /* no need to update global register if ITR gran is already set */ if (!(regval & GLINT_CTL_DIS_AUTOMASK_M) && - (((regval & GLINT_CTL_ITR_GRAN_200_M) >> - GLINT_CTL_ITR_GRAN_200_S) == ICE_ITR_GRAN_US) && - (((regval & GLINT_CTL_ITR_GRAN_100_M) >> - GLINT_CTL_ITR_GRAN_100_S) == ICE_ITR_GRAN_US) && - (((regval & GLINT_CTL_ITR_GRAN_50_M) >> - GLINT_CTL_ITR_GRAN_50_S) == ICE_ITR_GRAN_US) && - (((regval & GLINT_CTL_ITR_GRAN_25_M) >> - GLINT_CTL_ITR_GRAN_25_S) == ICE_ITR_GRAN_US)) + (FIELD_GET(GLINT_CTL_ITR_GRAN_200_M, regval) == ICE_ITR_GRAN_US) && + (FIELD_GET(GLINT_CTL_ITR_GRAN_100_M, regval) == ICE_ITR_GRAN_US) && + (FIELD_GET(GLINT_CTL_ITR_GRAN_50_M, regval) == ICE_ITR_GRAN_US) && + (FIELD_GET(GLINT_CTL_ITR_GRAN_25_M, regval) == ICE_ITR_GRAN_US)) return; - regval = ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_200_S) & - GLINT_CTL_ITR_GRAN_200_M) | - ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_100_S) & - GLINT_CTL_ITR_GRAN_100_M) | - ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_50_S) & - GLINT_CTL_ITR_GRAN_50_M) | - ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_25_S) & - GLINT_CTL_ITR_GRAN_25_M); + regval = FIELD_PREP(GLINT_CTL_ITR_GRAN_200_M, ICE_ITR_GRAN_US) | + FIELD_PREP(GLINT_CTL_ITR_GRAN_100_M, ICE_ITR_GRAN_US) | + FIELD_PREP(GLINT_CTL_ITR_GRAN_50_M, ICE_ITR_GRAN_US) | + FIELD_PREP(GLINT_CTL_ITR_GRAN_25_M, ICE_ITR_GRAN_US); wr32(hw, GLINT_CTL, regval); } @@ -278,7 +276,7 @@ static u16 ice_calc_txq_handle(struct ice_vsi *vsi, struct ice_tx_ring *ring, u8 */ static u16 ice_eswitch_calc_txq_handle(struct ice_tx_ring *ring) { - struct ice_vsi *vsi = ring->vsi; + const struct ice_vsi *vsi = ring->vsi; int i; ice_for_each_txq(vsi, i) { @@ -519,6 +517,19 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring) return 0; } +static void ice_xsk_pool_fill_cb(struct ice_rx_ring *ring) +{ + void *ctx_ptr = &ring->pkt_ctx; + struct xsk_cb_desc desc = {}; + + XSK_CHECK_PRIV_TYPE(struct ice_xdp_buff); + desc.src = &ctx_ptr; + desc.off = offsetof(struct ice_xdp_buff, pkt_ctx) - + sizeof(struct xdp_buff); + desc.bytes = sizeof(ctx_ptr); + xsk_pool_fill_cb(ring->xsk_pool, &desc); +} + /** * ice_vsi_cfg_rxq - Configure an Rx queue * @ring: the ring being configured @@ -561,6 +572,7 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring) if (err) return err; xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq); + ice_xsk_pool_fill_cb(ring); dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n", ring->q_index); @@ -584,6 +596,7 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring) xdp_init_buff(&ring->xdp, ice_rx_pg_size(ring) / 2, &ring->xdp_rxq); ring->xdp.data = NULL; + ring->xdp_ext.pkt_ctx = &ring->pkt_ctx; err = ice_setup_rx_ctx(ring); if (err) { dev_err(dev, "ice_setup_rx_ctx failed for RxQ %d, err %d\n", @@ -922,10 +935,10 @@ ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx) struct ice_hw *hw = &pf->hw; u32 val; - itr_idx = (itr_idx << QINT_TQCTL_ITR_INDX_S) & QINT_TQCTL_ITR_INDX_M; + itr_idx = FIELD_PREP(QINT_TQCTL_ITR_INDX_M, itr_idx); val = QINT_TQCTL_CAUSE_ENA_M | itr_idx | - ((msix_idx << QINT_TQCTL_MSIX_INDX_S) & QINT_TQCTL_MSIX_INDX_M); + FIELD_PREP(QINT_TQCTL_MSIX_INDX_M, msix_idx); wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val); if (ice_is_xdp_ena_vsi(vsi)) { @@ -954,10 +967,10 @@ ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx) struct ice_hw *hw = &pf->hw; u32 val; - itr_idx = (itr_idx << QINT_RQCTL_ITR_INDX_S) & QINT_RQCTL_ITR_INDX_M; + itr_idx = FIELD_PREP(QINT_RQCTL_ITR_INDX_M, itr_idx); val = QINT_RQCTL_CAUSE_ENA_M | itr_idx | - ((msix_idx << QINT_RQCTL_MSIX_INDX_S) & QINT_RQCTL_MSIX_INDX_M); + FIELD_PREP(QINT_RQCTL_MSIX_INDX_M, msix_idx); wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val); @@ -969,7 +982,7 @@ ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx) * @hw: pointer to the HW structure * @q_vector: interrupt vector to trigger the software interrupt for */ -void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector) +void ice_trigger_sw_intr(struct ice_hw *hw, const struct ice_q_vector *q_vector) { wr32(hw, GLINT_DYN_CTL(q_vector->reg_idx), (ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S) | @@ -1044,7 +1057,7 @@ ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, * are needed for stopping Tx queue */ void -ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_tx_ring *ring, +ice_fill_txq_meta(const struct ice_vsi *vsi, struct ice_tx_ring *ring, struct ice_txq_meta *txq_meta) { struct ice_channel *ch = ring->ch; diff --git a/drivers/net/ethernet/intel/ice/ice_base.h b/drivers/net/ethernet/intel/ice/ice_base.h index b67dca417a..17321ba756 100644 --- a/drivers/net/ethernet/intel/ice/ice_base.h +++ b/drivers/net/ethernet/intel/ice/ice_base.h @@ -22,12 +22,12 @@ void ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx); void ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx); -void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector); +void ice_trigger_sw_intr(struct ice_hw *hw, const struct ice_q_vector *q_vector); int ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, u16 rel_vmvf_num, struct ice_tx_ring *ring, struct ice_txq_meta *txq_meta); void -ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_tx_ring *ring, +ice_fill_txq_meta(const struct ice_vsi *vsi, struct ice_tx_ring *ring, struct ice_txq_meta *txq_meta); #endif /* _ICE_BASE_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index edac34c796..10c32cd80f 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -934,216 +934,6 @@ static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw) } /** - * ice_get_fw_log_cfg - get FW logging configuration - * @hw: pointer to the HW struct - */ -static int ice_get_fw_log_cfg(struct ice_hw *hw) -{ - struct ice_aq_desc desc; - __le16 *config; - int status; - u16 size; - - size = sizeof(*config) * ICE_AQC_FW_LOG_ID_MAX; - config = kzalloc(size, GFP_KERNEL); - if (!config) - return -ENOMEM; - - ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging_info); - - status = ice_aq_send_cmd(hw, &desc, config, size, NULL); - if (!status) { - u16 i; - - /* Save FW logging information into the HW structure */ - for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) { - u16 v, m, flgs; - - v = le16_to_cpu(config[i]); - m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S; - flgs = (v & ICE_AQC_FW_LOG_EN_M) >> ICE_AQC_FW_LOG_EN_S; - - if (m < ICE_AQC_FW_LOG_ID_MAX) - hw->fw_log.evnts[m].cur = flgs; - } - } - - kfree(config); - - return status; -} - -/** - * ice_cfg_fw_log - configure FW logging - * @hw: pointer to the HW struct - * @enable: enable certain FW logging events if true, disable all if false - * - * This function enables/disables the FW logging via Rx CQ events and a UART - * port based on predetermined configurations. FW logging via the Rx CQ can be - * enabled/disabled for individual PF's. However, FW logging via the UART can - * only be enabled/disabled for all PFs on the same device. - * - * To enable overall FW logging, the "cq_en" and "uart_en" enable bits in - * hw->fw_log need to be set accordingly, e.g. based on user-provided input, - * before initializing the device. - * - * When re/configuring FW logging, callers need to update the "cfg" elements of - * the hw->fw_log.evnts array with the desired logging event configurations for - * modules of interest. When disabling FW logging completely, the callers can - * just pass false in the "enable" parameter. On completion, the function will - * update the "cur" element of the hw->fw_log.evnts array with the resulting - * logging event configurations of the modules that are being re/configured. FW - * logging modules that are not part of a reconfiguration operation retain their - * previous states. - * - * Before resetting the device, it is recommended that the driver disables FW - * logging before shutting down the control queue. When disabling FW logging - * ("enable" = false), the latest configurations of FW logging events stored in - * hw->fw_log.evnts[] are not overridden to allow them to be reconfigured after - * a device reset. - * - * When enabling FW logging to emit log messages via the Rx CQ during the - * device's initialization phase, a mechanism alternative to interrupt handlers - * needs to be used to extract FW log messages from the Rx CQ periodically and - * to prevent the Rx CQ from being full and stalling other types of control - * messages from FW to SW. Interrupts are typically disabled during the device's - * initialization phase. - */ -static int ice_cfg_fw_log(struct ice_hw *hw, bool enable) -{ - struct ice_aqc_fw_logging *cmd; - u16 i, chgs = 0, len = 0; - struct ice_aq_desc desc; - __le16 *data = NULL; - u8 actv_evnts = 0; - void *buf = NULL; - int status = 0; - - if (!hw->fw_log.cq_en && !hw->fw_log.uart_en) - return 0; - - /* Disable FW logging only when the control queue is still responsive */ - if (!enable && - (!hw->fw_log.actv_evnts || !ice_check_sq_alive(hw, &hw->adminq))) - return 0; - - /* Get current FW log settings */ - status = ice_get_fw_log_cfg(hw); - if (status) - return status; - - ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging); - cmd = &desc.params.fw_logging; - - /* Indicate which controls are valid */ - if (hw->fw_log.cq_en) - cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_AQ_VALID; - - if (hw->fw_log.uart_en) - cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_UART_VALID; - - if (enable) { - /* Fill in an array of entries with FW logging modules and - * logging events being reconfigured. - */ - for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) { - u16 val; - - /* Keep track of enabled event types */ - actv_evnts |= hw->fw_log.evnts[i].cfg; - - if (hw->fw_log.evnts[i].cfg == hw->fw_log.evnts[i].cur) - continue; - - if (!data) { - data = devm_kcalloc(ice_hw_to_dev(hw), - ICE_AQC_FW_LOG_ID_MAX, - sizeof(*data), - GFP_KERNEL); - if (!data) - return -ENOMEM; - } - - val = i << ICE_AQC_FW_LOG_ID_S; - val |= hw->fw_log.evnts[i].cfg << ICE_AQC_FW_LOG_EN_S; - data[chgs++] = cpu_to_le16(val); - } - - /* Only enable FW logging if at least one module is specified. - * If FW logging is currently enabled but all modules are not - * enabled to emit log messages, disable FW logging altogether. - */ - if (actv_evnts) { - /* Leave if there is effectively no change */ - if (!chgs) - goto out; - - if (hw->fw_log.cq_en) - cmd->log_ctrl |= ICE_AQC_FW_LOG_AQ_EN; - - if (hw->fw_log.uart_en) - cmd->log_ctrl |= ICE_AQC_FW_LOG_UART_EN; - - buf = data; - len = sizeof(*data) * chgs; - desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); - } - } - - status = ice_aq_send_cmd(hw, &desc, buf, len, NULL); - if (!status) { - /* Update the current configuration to reflect events enabled. - * hw->fw_log.cq_en and hw->fw_log.uart_en indicate if the FW - * logging mode is enabled for the device. They do not reflect - * actual modules being enabled to emit log messages. So, their - * values remain unchanged even when all modules are disabled. - */ - u16 cnt = enable ? chgs : (u16)ICE_AQC_FW_LOG_ID_MAX; - - hw->fw_log.actv_evnts = actv_evnts; - for (i = 0; i < cnt; i++) { - u16 v, m; - - if (!enable) { - /* When disabling all FW logging events as part - * of device's de-initialization, the original - * configurations are retained, and can be used - * to reconfigure FW logging later if the device - * is re-initialized. - */ - hw->fw_log.evnts[i].cur = 0; - continue; - } - - v = le16_to_cpu(data[i]); - m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S; - hw->fw_log.evnts[m].cur = hw->fw_log.evnts[m].cfg; - } - } - -out: - devm_kfree(ice_hw_to_dev(hw), data); - - return status; -} - -/** - * ice_output_fw_log - * @hw: pointer to the HW struct - * @desc: pointer to the AQ message descriptor - * @buf: pointer to the buffer accompanying the AQ message - * - * Formats a FW Log message and outputs it via the standard driver logs. - */ -void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf) -{ - ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg Start ]\n"); - ice_debug_array(hw, ICE_DBG_FW_LOG, 16, 1, (u8 *)buf, - le16_to_cpu(desc->datalen)); - ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg End ]\n"); -} - -/** * ice_get_itr_intrl_gran * @hw: pointer to the HW struct * @@ -1152,9 +942,8 @@ void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf) */ static void ice_get_itr_intrl_gran(struct ice_hw *hw) { - u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) & - GL_PWR_MODE_CTL_CAR_MAX_BW_M) >> - GL_PWR_MODE_CTL_CAR_MAX_BW_S; + u8 max_agg_bw = FIELD_GET(GL_PWR_MODE_CTL_CAR_MAX_BW_M, + rd32(hw, GL_PWR_MODE_CTL)); switch (max_agg_bw) { case ICE_MAX_AGG_BW_200G: @@ -1186,9 +975,7 @@ int ice_init_hw(struct ice_hw *hw) if (status) return status; - hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) & - PF_FUNC_RID_FUNC_NUM_M) >> - PF_FUNC_RID_FUNC_NUM_S; + hw->pf_id = FIELD_GET(PF_FUNC_RID_FUNC_NUM_M, rd32(hw, PF_FUNC_RID)); status = ice_reset(hw, ICE_RESET_PFR); if (status) @@ -1200,10 +987,10 @@ int ice_init_hw(struct ice_hw *hw) if (status) goto err_unroll_cqinit; - /* Enable FW logging. Not fatal if this fails. */ - status = ice_cfg_fw_log(hw, true); + status = ice_fwlog_init(hw); if (status) - ice_debug(hw, ICE_DBG_INIT, "Failed to enable FW logging.\n"); + ice_debug(hw, ICE_DBG_FW_LOG, "Error initializing FW logging: %d\n", + status); status = ice_clear_pf_cfg(hw); if (status) @@ -1354,8 +1141,7 @@ void ice_deinit_hw(struct ice_hw *hw) ice_free_hw_tbls(hw); mutex_destroy(&hw->tnl_lock); - /* Attempt to disable FW logging before shutting down control queues */ - ice_cfg_fw_log(hw, false); + ice_fwlog_deinit(hw); ice_destroy_all_ctrlq(hw); /* Clear VSI contexts if not already cleared */ @@ -1374,8 +1160,8 @@ int ice_check_reset(struct ice_hw *hw) * or EMPR has occurred. The grst delay value is in 100ms units. * Add 1sec for outstanding AQ commands that can take a long time. */ - grst_timeout = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >> - GLGEN_RSTCTL_GRSTDEL_S) + 10; + grst_timeout = FIELD_GET(GLGEN_RSTCTL_GRSTDEL_M, + rd32(hw, GLGEN_RSTCTL)) + 10; for (cnt = 0; cnt < grst_timeout; cnt++) { mdelay(100); @@ -2459,7 +2245,7 @@ ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, info->tmr_index_owned = ((number & ICE_TS_TMR_IDX_OWND_M) != 0); info->tmr_index_assoc = ((number & ICE_TS_TMR_IDX_ASSOC_M) != 0); - info->clk_freq = (number & ICE_TS_CLK_FREQ_M) >> ICE_TS_CLK_FREQ_S; + info->clk_freq = FIELD_GET(ICE_TS_CLK_FREQ_M, number); info->clk_src = ((number & ICE_TS_CLK_SRC_M) != 0); if (info->clk_freq < NUM_ICE_TIME_REF_FREQ) { @@ -2660,11 +2446,12 @@ ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, info->tmr0_owned = ((number & ICE_TS_TMR0_OWND_M) != 0); info->tmr0_ena = ((number & ICE_TS_TMR0_ENA_M) != 0); - info->tmr1_owner = (number & ICE_TS_TMR1_OWNR_M) >> ICE_TS_TMR1_OWNR_S; + info->tmr1_owner = FIELD_GET(ICE_TS_TMR1_OWNR_M, number); info->tmr1_owned = ((number & ICE_TS_TMR1_OWND_M) != 0); info->tmr1_ena = ((number & ICE_TS_TMR1_ENA_M) != 0); info->ts_ll_read = ((number & ICE_TS_LL_TX_TS_READ_M) != 0); + info->ts_ll_int_read = ((number & ICE_TS_LL_TX_TS_INT_READ_M) != 0); info->ena_ports = logical_id; info->tmr_own_map = phys_id; @@ -2685,6 +2472,8 @@ ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, info->tmr1_ena); ice_debug(hw, ICE_DBG_INIT, "dev caps: ts_ll_read = %u\n", info->ts_ll_read); + ice_debug(hw, ICE_DBG_INIT, "dev caps: ts_ll_int_read = %u\n", + info->ts_ll_int_read); ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 ena_ports = %u\n", info->ena_ports); ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr_own_map = %u\n", @@ -2711,6 +2500,26 @@ ice_parse_fdir_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, } /** + * ice_parse_sensor_reading_cap - Parse ICE_AQC_CAPS_SENSOR_READING cap + * @hw: pointer to the HW struct + * @dev_p: pointer to device capabilities structure + * @cap: capability element to parse + * + * Parse ICE_AQC_CAPS_SENSOR_READING for device capability for reading + * enabled sensors. + */ +static void +ice_parse_sensor_reading_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, + struct ice_aqc_list_caps_elem *cap) +{ + dev_p->supported_sensors = le32_to_cpu(cap->number); + + ice_debug(hw, ICE_DBG_INIT, + "dev caps: supported sensors (bitmap) = 0x%x\n", + dev_p->supported_sensors); +} + +/** * ice_parse_dev_caps - Parse device capabilities * @hw: pointer to the HW struct * @dev_p: pointer to device capabilities structure @@ -2755,9 +2564,12 @@ ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, case ICE_AQC_CAPS_1588: ice_parse_1588_dev_caps(hw, dev_p, &cap_resp[i]); break; - case ICE_AQC_CAPS_FD: + case ICE_AQC_CAPS_FD: ice_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]); break; + case ICE_AQC_CAPS_SENSOR_READING: + ice_parse_sensor_reading_cap(hw, dev_p, &cap_resp[i]); + break; default: /* Don't list common capabilities as unknown */ if (!found) @@ -4072,6 +3884,7 @@ ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr, { struct ice_aqc_sff_eeprom *cmd; struct ice_aq_desc desc; + u16 i2c_bus_addr; int status; if (!data || (mem_addr & 0xff00)) @@ -4082,15 +3895,13 @@ ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr, desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD); cmd->lport_num = (u8)(lport & 0xff); cmd->lport_num_valid = (u8)((lport >> 8) & 0x01); - cmd->i2c_bus_addr = cpu_to_le16(((bus_addr >> 1) & - ICE_AQC_SFF_I2CBUS_7BIT_M) | - ((set_page << - ICE_AQC_SFF_SET_EEPROM_PAGE_S) & - ICE_AQC_SFF_SET_EEPROM_PAGE_M)); - cmd->i2c_mem_addr = cpu_to_le16(mem_addr & 0xff); - cmd->eeprom_page = cpu_to_le16((u16)page << ICE_AQC_SFF_EEPROM_PAGE_S); + i2c_bus_addr = FIELD_PREP(ICE_AQC_SFF_I2CBUS_7BIT_M, bus_addr >> 1) | + FIELD_PREP(ICE_AQC_SFF_SET_EEPROM_PAGE_M, set_page); if (write) - cmd->i2c_bus_addr |= cpu_to_le16(ICE_AQC_SFF_IS_WRITE); + i2c_bus_addr |= ICE_AQC_SFF_IS_WRITE; + cmd->i2c_bus_addr = cpu_to_le16(i2c_bus_addr); + cmd->i2c_mem_addr = cpu_to_le16(mem_addr & 0xff); + cmd->eeprom_page = le16_encode_bits(page, ICE_AQC_SFF_EEPROM_PAGE_M); status = ice_aq_send_cmd(hw, &desc, data, length, cd); return status; @@ -4345,6 +4156,7 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps, struct ice_aqc_dis_txq_item *item; struct ice_aqc_dis_txqs *cmd; struct ice_aq_desc desc; + u16 vmvf_and_timeout; u16 i, sz = 0; int status; @@ -4360,27 +4172,26 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps, cmd->num_entries = num_qgrps; - cmd->vmvf_and_timeout = cpu_to_le16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) & - ICE_AQC_Q_DIS_TIMEOUT_M); + vmvf_and_timeout = FIELD_PREP(ICE_AQC_Q_DIS_TIMEOUT_M, 5); switch (rst_src) { case ICE_VM_RESET: cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET; - cmd->vmvf_and_timeout |= - cpu_to_le16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M); + vmvf_and_timeout |= vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M; break; case ICE_VF_RESET: cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET; /* In this case, FW expects vmvf_num to be absolute VF ID */ - cmd->vmvf_and_timeout |= - cpu_to_le16((vmvf_num + hw->func_caps.vf_base_id) & - ICE_AQC_Q_DIS_VMVF_NUM_M); + vmvf_and_timeout |= (vmvf_num + hw->func_caps.vf_base_id) & + ICE_AQC_Q_DIS_VMVF_NUM_M; break; case ICE_NO_RESET: default: break; } + cmd->vmvf_and_timeout = cpu_to_le16(vmvf_and_timeout); + /* flush pipe on time out */ cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE; /* If no queue group info, we are in a reset flow. Issue the AQ */ @@ -4455,10 +4266,8 @@ ice_aq_cfg_lan_txq(struct ice_hw *hw, struct ice_aqc_cfg_txqs_buf *buf, cmd->cmd_type = ICE_AQC_Q_CFG_TC_CHNG; cmd->num_qs = num_qs; cmd->port_num_chng = (oldport & ICE_AQC_Q_CFG_SRC_PRT_M); - cmd->port_num_chng |= (newport << ICE_AQC_Q_CFG_DST_PRT_S) & - ICE_AQC_Q_CFG_DST_PRT_M; - cmd->time_out = (5 << ICE_AQC_Q_CFG_TIMEOUT_S) & - ICE_AQC_Q_CFG_TIMEOUT_M; + cmd->port_num_chng |= FIELD_PREP(ICE_AQC_Q_CFG_DST_PRT_M, newport); + cmd->time_out = FIELD_PREP(ICE_AQC_Q_CFG_TIMEOUT_M, 5); cmd->blocked_cgds = 0; status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); @@ -5539,6 +5348,35 @@ ice_aq_get_phy_rec_clk_out(struct ice_hw *hw, u8 *phy_output, u8 *port_num, } /** + * ice_aq_get_sensor_reading + * @hw: pointer to the HW struct + * @data: pointer to data to be read from the sensor + * + * Get sensor reading (0x0632) + */ +int ice_aq_get_sensor_reading(struct ice_hw *hw, + struct ice_aqc_get_sensor_reading_resp *data) +{ + struct ice_aqc_get_sensor_reading *cmd; + struct ice_aq_desc desc; + int status; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sensor_reading); + cmd = &desc.params.get_sensor_reading; +#define ICE_INTERNAL_TEMP_SENSOR_FORMAT 0 +#define ICE_INTERNAL_TEMP_SENSOR 0 + cmd->sensor = ICE_INTERNAL_TEMP_SENSOR; + cmd->format = ICE_INTERNAL_TEMP_SENSOR_FORMAT; + + status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); + if (!status) + memcpy(data, &desc.params.get_sensor_reading_resp, + sizeof(*data)); + + return status; +} + +/** * ice_replay_pre_init - replay pre initialization * @hw: pointer to the HW struct * @@ -5933,7 +5771,7 @@ ice_get_link_default_override(struct ice_link_default_override_tlv *ldo, ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n"); return status; } - ldo->options = buf & ICE_LINK_OVERRIDE_OPT_M; + ldo->options = FIELD_GET(ICE_LINK_OVERRIDE_OPT_M, buf); ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >> ICE_LINK_OVERRIDE_PHY_CFG_S; diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h index 31fdcac339..3e933f75e9 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.h +++ b/drivers/net/ethernet/intel/ice/ice_common.h @@ -6,6 +6,7 @@ #include <linux/bitfield.h> +#include "ice.h" #include "ice_type.h" #include "ice_nvm.h" #include "ice_flex_pipe.h" @@ -199,7 +200,6 @@ ice_aq_cfg_lan_txq(struct ice_hw *hw, struct ice_aqc_cfg_txqs_buf *buf, struct ice_sq_cd *cd); int ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle); void ice_replay_post(struct ice_hw *hw); -void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf); struct ice_q_ctx * ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle); int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in); @@ -241,6 +241,8 @@ ice_aq_set_phy_rec_clk_out(struct ice_hw *hw, u8 phy_output, bool enable, int ice_aq_get_phy_rec_clk_out(struct ice_hw *hw, u8 *phy_output, u8 *port_num, u8 *flags, u16 *node_handle); +int ice_aq_get_sensor_reading(struct ice_hw *hw, + struct ice_aqc_get_sensor_reading_resp *data); void ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat); diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c index 396e555023..74418c445c 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb.c @@ -35,8 +35,7 @@ ice_aq_get_lldp_mib(struct ice_hw *hw, u8 bridge_type, u8 mib_type, void *buf, ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_get_mib); cmd->type = mib_type & ICE_AQ_LLDP_MIB_TYPE_M; - cmd->type |= (bridge_type << ICE_AQ_LLDP_BRID_TYPE_S) & - ICE_AQ_LLDP_BRID_TYPE_M; + cmd->type |= FIELD_PREP(ICE_AQ_LLDP_BRID_TYPE_M, bridge_type); desc.datalen = cpu_to_le16(buf_size); @@ -147,8 +146,7 @@ static u8 ice_get_dcbx_status(struct ice_hw *hw) u32 reg; reg = rd32(hw, PRTDCB_GENS); - return (u8)((reg & PRTDCB_GENS_DCBX_STATUS_M) >> - PRTDCB_GENS_DCBX_STATUS_S); + return FIELD_GET(PRTDCB_GENS_DCBX_STATUS_M, reg); } /** @@ -174,11 +172,9 @@ ice_parse_ieee_ets_common_tlv(u8 *buf, struct ice_dcb_ets_cfg *ets_cfg) */ for (i = 0; i < 4; i++) { ets_cfg->prio_table[i * 2] = - ((buf[offset] & ICE_IEEE_ETS_PRIO_1_M) >> - ICE_IEEE_ETS_PRIO_1_S); + FIELD_GET(ICE_IEEE_ETS_PRIO_1_M, buf[offset]); ets_cfg->prio_table[i * 2 + 1] = - ((buf[offset] & ICE_IEEE_ETS_PRIO_0_M) >> - ICE_IEEE_ETS_PRIO_0_S); + FIELD_GET(ICE_IEEE_ETS_PRIO_0_M, buf[offset]); offset++; } @@ -222,11 +218,9 @@ ice_parse_ieee_etscfg_tlv(struct ice_lldp_org_tlv *tlv, * |1bit | 1bit|3 bits|3bits| */ etscfg = &dcbcfg->etscfg; - etscfg->willing = ((buf[0] & ICE_IEEE_ETS_WILLING_M) >> - ICE_IEEE_ETS_WILLING_S); - etscfg->cbs = ((buf[0] & ICE_IEEE_ETS_CBS_M) >> ICE_IEEE_ETS_CBS_S); - etscfg->maxtcs = ((buf[0] & ICE_IEEE_ETS_MAXTC_M) >> - ICE_IEEE_ETS_MAXTC_S); + etscfg->willing = FIELD_GET(ICE_IEEE_ETS_WILLING_M, buf[0]); + etscfg->cbs = FIELD_GET(ICE_IEEE_ETS_CBS_M, buf[0]); + etscfg->maxtcs = FIELD_GET(ICE_IEEE_ETS_MAXTC_M, buf[0]); /* Begin parsing at Priority Assignment Table (offset 1 in buf) */ ice_parse_ieee_ets_common_tlv(&buf[1], etscfg); @@ -268,11 +262,9 @@ ice_parse_ieee_pfccfg_tlv(struct ice_lldp_org_tlv *tlv, * ----------------------------------------- * |1bit | 1bit|2 bits|4bits| 1 octet | */ - dcbcfg->pfc.willing = ((buf[0] & ICE_IEEE_PFC_WILLING_M) >> - ICE_IEEE_PFC_WILLING_S); - dcbcfg->pfc.mbc = ((buf[0] & ICE_IEEE_PFC_MBC_M) >> ICE_IEEE_PFC_MBC_S); - dcbcfg->pfc.pfccap = ((buf[0] & ICE_IEEE_PFC_CAP_M) >> - ICE_IEEE_PFC_CAP_S); + dcbcfg->pfc.willing = FIELD_GET(ICE_IEEE_PFC_WILLING_M, buf[0]); + dcbcfg->pfc.mbc = FIELD_GET(ICE_IEEE_PFC_MBC_M, buf[0]); + dcbcfg->pfc.pfccap = FIELD_GET(ICE_IEEE_PFC_CAP_M, buf[0]); dcbcfg->pfc.pfcena = buf[1]; } @@ -294,7 +286,7 @@ ice_parse_ieee_app_tlv(struct ice_lldp_org_tlv *tlv, u8 *buf; typelen = ntohs(tlv->typelen); - len = ((typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S); + len = FIELD_GET(ICE_LLDP_TLV_LEN_M, typelen); buf = tlv->tlvinfo; /* Removing sizeof(ouisubtype) and reserved byte from len. @@ -314,12 +306,10 @@ ice_parse_ieee_app_tlv(struct ice_lldp_org_tlv *tlv, * ----------------------------------------- */ while (offset < len) { - dcbcfg->app[i].priority = ((buf[offset] & - ICE_IEEE_APP_PRIO_M) >> - ICE_IEEE_APP_PRIO_S); - dcbcfg->app[i].selector = ((buf[offset] & - ICE_IEEE_APP_SEL_M) >> - ICE_IEEE_APP_SEL_S); + dcbcfg->app[i].priority = FIELD_GET(ICE_IEEE_APP_PRIO_M, + buf[offset]); + dcbcfg->app[i].selector = FIELD_GET(ICE_IEEE_APP_SEL_M, + buf[offset]); dcbcfg->app[i].prot_id = (buf[offset + 1] << 0x8) | buf[offset + 2]; /* Move to next app */ @@ -347,8 +337,7 @@ ice_parse_ieee_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg) u8 subtype; ouisubtype = ntohl(tlv->ouisubtype); - subtype = (u8)((ouisubtype & ICE_LLDP_TLV_SUBTYPE_M) >> - ICE_LLDP_TLV_SUBTYPE_S); + subtype = FIELD_GET(ICE_LLDP_TLV_SUBTYPE_M, ouisubtype); switch (subtype) { case ICE_IEEE_SUBTYPE_ETS_CFG: ice_parse_ieee_etscfg_tlv(tlv, dcbcfg); @@ -399,11 +388,9 @@ ice_parse_cee_pgcfg_tlv(struct ice_cee_feat_tlv *tlv, */ for (i = 0; i < 4; i++) { etscfg->prio_table[i * 2] = - ((buf[offset] & ICE_CEE_PGID_PRIO_1_M) >> - ICE_CEE_PGID_PRIO_1_S); + FIELD_GET(ICE_CEE_PGID_PRIO_1_M, buf[offset]); etscfg->prio_table[i * 2 + 1] = - ((buf[offset] & ICE_CEE_PGID_PRIO_0_M) >> - ICE_CEE_PGID_PRIO_0_S); + FIELD_GET(ICE_CEE_PGID_PRIO_0_M, buf[offset]); offset++; } @@ -466,7 +453,7 @@ ice_parse_cee_app_tlv(struct ice_cee_feat_tlv *tlv, struct ice_dcbx_cfg *dcbcfg) u8 i; typelen = ntohs(tlv->hdr.typelen); - len = ((typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S); + len = FIELD_GET(ICE_LLDP_TLV_LEN_M, typelen); dcbcfg->numapps = len / sizeof(*app); if (!dcbcfg->numapps) @@ -521,14 +508,13 @@ ice_parse_cee_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg) u32 ouisubtype; ouisubtype = ntohl(tlv->ouisubtype); - subtype = (u8)((ouisubtype & ICE_LLDP_TLV_SUBTYPE_M) >> - ICE_LLDP_TLV_SUBTYPE_S); + subtype = FIELD_GET(ICE_LLDP_TLV_SUBTYPE_M, ouisubtype); /* Return if not CEE DCBX */ if (subtype != ICE_CEE_DCBX_TYPE) return; typelen = ntohs(tlv->typelen); - tlvlen = ((typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S); + tlvlen = FIELD_GET(ICE_LLDP_TLV_LEN_M, typelen); len = sizeof(tlv->typelen) + sizeof(ouisubtype) + sizeof(struct ice_cee_ctrl_tlv); /* Return if no CEE DCBX Feature TLVs */ @@ -540,9 +526,8 @@ ice_parse_cee_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg) u16 sublen; typelen = ntohs(sub_tlv->hdr.typelen); - sublen = ((typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S); - subtype = (u8)((typelen & ICE_LLDP_TLV_TYPE_M) >> - ICE_LLDP_TLV_TYPE_S); + sublen = FIELD_GET(ICE_LLDP_TLV_LEN_M, typelen); + subtype = FIELD_GET(ICE_LLDP_TLV_TYPE_M, typelen); switch (subtype) { case ICE_CEE_SUBTYPE_PG_CFG: ice_parse_cee_pgcfg_tlv(sub_tlv, dcbcfg); @@ -579,7 +564,7 @@ ice_parse_org_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg) u32 oui; ouisubtype = ntohl(tlv->ouisubtype); - oui = ((ouisubtype & ICE_LLDP_TLV_OUI_M) >> ICE_LLDP_TLV_OUI_S); + oui = FIELD_GET(ICE_LLDP_TLV_OUI_M, ouisubtype); switch (oui) { case ICE_IEEE_8021QAZ_OUI: ice_parse_ieee_tlv(tlv, dcbcfg); @@ -616,8 +601,8 @@ static int ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg) tlv = (struct ice_lldp_org_tlv *)lldpmib; while (1) { typelen = ntohs(tlv->typelen); - type = ((typelen & ICE_LLDP_TLV_TYPE_M) >> ICE_LLDP_TLV_TYPE_S); - len = ((typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S); + type = FIELD_GET(ICE_LLDP_TLV_TYPE_M, typelen); + len = FIELD_GET(ICE_LLDP_TLV_LEN_M, typelen); offset += sizeof(typelen) + len; /* END TLV or beyond LLDPDU size */ @@ -806,11 +791,11 @@ ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg, */ for (i = 0; i < ICE_MAX_TRAFFIC_CLASS / 2; i++) { dcbcfg->etscfg.prio_table[i * 2] = - ((cee_cfg->oper_prio_tc[i] & ICE_CEE_PGID_PRIO_0_M) >> - ICE_CEE_PGID_PRIO_0_S); + FIELD_GET(ICE_CEE_PGID_PRIO_0_M, + cee_cfg->oper_prio_tc[i]); dcbcfg->etscfg.prio_table[i * 2 + 1] = - ((cee_cfg->oper_prio_tc[i] & ICE_CEE_PGID_PRIO_1_M) >> - ICE_CEE_PGID_PRIO_1_S); + FIELD_GET(ICE_CEE_PGID_PRIO_1_M, + cee_cfg->oper_prio_tc[i]); } ice_for_each_traffic_class(i) { @@ -982,7 +967,7 @@ void ice_get_dcb_cfg_from_mib_change(struct ice_port_info *pi, mib = (struct ice_aqc_lldp_get_mib *)&event->desc.params.raw; - change_type = FIELD_GET(ICE_AQ_LLDP_MIB_TYPE_M, mib->type); + change_type = FIELD_GET(ICE_AQ_LLDP_MIB_TYPE_M, mib->type); if (change_type == ICE_AQ_LLDP_MIB_REMOTE) dcbx_cfg = &pi->qos_cfg.remote_dcbx_cfg; @@ -1483,7 +1468,7 @@ ice_dcb_cfg_to_lldp(u8 *lldpmib, u16 *miblen, struct ice_dcbx_cfg *dcbcfg) while (1) { ice_add_dcb_tlv(tlv, dcbcfg, tlvid++); typelen = ntohs(tlv->typelen); - len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S; + len = FIELD_GET(ICE_LLDP_TLV_LEN_M, typelen); if (len) offset += len + 2; /* END TLV or beyond LLDPDU size */ diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c index 850db8e0e6..6e20ee6100 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c @@ -934,7 +934,7 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_tx_ring *tx_ring, skb->priority != TC_PRIO_CONTROL) { first->vid &= ~VLAN_PRIO_MASK; /* Mask the lower 3 bits to set the 802.1p priority */ - first->vid |= (skb->priority << VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK; + first->vid |= FIELD_PREP(VLAN_PRIO_MASK, skb->priority); /* if this is not already set it means a VLAN 0 + priority needs * to be offloaded */ diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c index e1fbc6de45..6d50b90a73 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c @@ -227,7 +227,7 @@ static void ice_get_pfc_delay(struct ice_hw *hw, u16 *delay) u32 val; val = rd32(hw, PRTDCB_GENC); - *delay = (u16)((val & PRTDCB_GENC_PFCLDA_M) >> PRTDCB_GENC_PFCLDA_S); + *delay = FIELD_GET(PRTDCB_GENC_PFCLDA_M, val); } /** diff --git a/drivers/net/ethernet/intel/ice/ice_debugfs.c b/drivers/net/ethernet/intel/ice/ice_debugfs.c new file mode 100644 index 0000000000..c2bfba6b9e --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_debugfs.c @@ -0,0 +1,667 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022, Intel Corporation. */ + +#include <linux/fs.h> +#include <linux/debugfs.h> +#include <linux/random.h> +#include <linux/vmalloc.h> +#include "ice.h" + +static struct dentry *ice_debugfs_root; + +/* create a define that has an extra module that doesn't really exist. this + * is so we can add a module 'all' to easily enable/disable all the modules + */ +#define ICE_NR_FW_LOG_MODULES (ICE_AQC_FW_LOG_ID_MAX + 1) + +/* the ordering in this array is important. it matches the ordering of the + * values in the FW so the index is the same value as in ice_aqc_fw_logging_mod + */ +static const char * const ice_fwlog_module_string[] = { + "general", + "ctrl", + "link", + "link_topo", + "dnl", + "i2c", + "sdp", + "mdio", + "adminq", + "hdma", + "lldp", + "dcbx", + "dcb", + "xlr", + "nvm", + "auth", + "vpd", + "iosf", + "parser", + "sw", + "scheduler", + "txq", + "rsvd", + "post", + "watchdog", + "task_dispatch", + "mng", + "synce", + "health", + "tsdrv", + "pfreg", + "mdlver", + "all", +}; + +/* the ordering in this array is important. it matches the ordering of the + * values in the FW so the index is the same value as in ice_fwlog_level + */ +static const char * const ice_fwlog_level_string[] = { + "none", + "error", + "warning", + "normal", + "verbose", +}; + +/* the order in this array is important. it matches the ordering of the + * values in the FW so the index is the same value as in ice_fwlog_level + */ +static const char * const ice_fwlog_log_size[] = { + "128K", + "256K", + "512K", + "1M", + "2M", +}; + +/** + * ice_fwlog_print_module_cfg - print current FW logging module configuration + * @hw: pointer to the HW structure + * @module: module to print + * @s: the seq file to put data into + */ +static void +ice_fwlog_print_module_cfg(struct ice_hw *hw, int module, struct seq_file *s) +{ + struct ice_fwlog_cfg *cfg = &hw->fwlog_cfg; + struct ice_fwlog_module_entry *entry; + + if (module != ICE_AQC_FW_LOG_ID_MAX) { + entry = &cfg->module_entries[module]; + + seq_printf(s, "\tModule: %s, Log Level: %s\n", + ice_fwlog_module_string[entry->module_id], + ice_fwlog_level_string[entry->log_level]); + } else { + int i; + + for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) { + entry = &cfg->module_entries[i]; + + seq_printf(s, "\tModule: %s, Log Level: %s\n", + ice_fwlog_module_string[entry->module_id], + ice_fwlog_level_string[entry->log_level]); + } + } +} + +static int ice_find_module_by_dentry(struct ice_pf *pf, struct dentry *d) +{ + int i, module; + + module = -1; + /* find the module based on the dentry */ + for (i = 0; i < ICE_NR_FW_LOG_MODULES; i++) { + if (d == pf->ice_debugfs_pf_fwlog_modules[i]) { + module = i; + break; + } + } + + return module; +} + +/** + * ice_debugfs_module_show - read from 'module' file + * @s: the opened file + * @v: pointer to the offset + */ +static int ice_debugfs_module_show(struct seq_file *s, void *v) +{ + const struct file *filp = s->file; + struct dentry *dentry; + struct ice_pf *pf; + int module; + + dentry = file_dentry(filp); + pf = s->private; + + module = ice_find_module_by_dentry(pf, dentry); + if (module < 0) { + dev_info(ice_pf_to_dev(pf), "unknown module\n"); + return -EINVAL; + } + + ice_fwlog_print_module_cfg(&pf->hw, module, s); + + return 0; +} + +static int ice_debugfs_module_open(struct inode *inode, struct file *filp) +{ + return single_open(filp, ice_debugfs_module_show, inode->i_private); +} + +/** + * ice_debugfs_module_write - write into 'module' file + * @filp: the opened file + * @buf: where to find the user's data + * @count: the length of the user's data + * @ppos: file position offset + */ +static ssize_t +ice_debugfs_module_write(struct file *filp, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct ice_pf *pf = file_inode(filp)->i_private; + struct dentry *dentry = file_dentry(filp); + struct device *dev = ice_pf_to_dev(pf); + char user_val[16], *cmd_buf; + int module, log_level, cnt; + + /* don't allow partial writes or invalid input */ + if (*ppos != 0 || count > 8) + return -EINVAL; + + cmd_buf = memdup_user(buf, count); + if (IS_ERR(cmd_buf)) + return PTR_ERR(cmd_buf); + + module = ice_find_module_by_dentry(pf, dentry); + if (module < 0) { + dev_info(dev, "unknown module\n"); + return -EINVAL; + } + + cnt = sscanf(cmd_buf, "%s", user_val); + if (cnt != 1) + return -EINVAL; + + log_level = sysfs_match_string(ice_fwlog_level_string, user_val); + if (log_level < 0) { + dev_info(dev, "unknown log level '%s'\n", user_val); + return -EINVAL; + } + + if (module != ICE_AQC_FW_LOG_ID_MAX) { + ice_pf_fwlog_update_module(pf, log_level, module); + } else { + /* the module 'all' is a shortcut so that we can set + * all of the modules to the same level quickly + */ + int i; + + for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) + ice_pf_fwlog_update_module(pf, log_level, i); + } + + return count; +} + +static const struct file_operations ice_debugfs_module_fops = { + .owner = THIS_MODULE, + .open = ice_debugfs_module_open, + .read = seq_read, + .release = single_release, + .write = ice_debugfs_module_write, +}; + +/** + * ice_debugfs_nr_messages_read - read from 'nr_messages' file + * @filp: the opened file + * @buffer: where to write the data for the user to read + * @count: the size of the user's buffer + * @ppos: file position offset + */ +static ssize_t ice_debugfs_nr_messages_read(struct file *filp, + char __user *buffer, size_t count, + loff_t *ppos) +{ + struct ice_pf *pf = filp->private_data; + struct ice_hw *hw = &pf->hw; + char buff[32] = {}; + + snprintf(buff, sizeof(buff), "%d\n", + hw->fwlog_cfg.log_resolution); + + return simple_read_from_buffer(buffer, count, ppos, buff, strlen(buff)); +} + +/** + * ice_debugfs_nr_messages_write - write into 'nr_messages' file + * @filp: the opened file + * @buf: where to find the user's data + * @count: the length of the user's data + * @ppos: file position offset + */ +static ssize_t +ice_debugfs_nr_messages_write(struct file *filp, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct ice_pf *pf = filp->private_data; + struct device *dev = ice_pf_to_dev(pf); + struct ice_hw *hw = &pf->hw; + char user_val[8], *cmd_buf; + s16 nr_messages; + ssize_t ret; + + /* don't allow partial writes or invalid input */ + if (*ppos != 0 || count > 4) + return -EINVAL; + + cmd_buf = memdup_user(buf, count); + if (IS_ERR(cmd_buf)) + return PTR_ERR(cmd_buf); + + ret = sscanf(cmd_buf, "%s", user_val); + if (ret != 1) + return -EINVAL; + + ret = kstrtos16(user_val, 0, &nr_messages); + if (ret) + return ret; + + if (nr_messages < ICE_AQC_FW_LOG_MIN_RESOLUTION || + nr_messages > ICE_AQC_FW_LOG_MAX_RESOLUTION) { + dev_err(dev, "Invalid FW log number of messages %d, value must be between %d - %d\n", + nr_messages, ICE_AQC_FW_LOG_MIN_RESOLUTION, + ICE_AQC_FW_LOG_MAX_RESOLUTION); + return -EINVAL; + } + + hw->fwlog_cfg.log_resolution = nr_messages; + + return count; +} + +static const struct file_operations ice_debugfs_nr_messages_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = ice_debugfs_nr_messages_read, + .write = ice_debugfs_nr_messages_write, +}; + +/** + * ice_debugfs_enable_read - read from 'enable' file + * @filp: the opened file + * @buffer: where to write the data for the user to read + * @count: the size of the user's buffer + * @ppos: file position offset + */ +static ssize_t ice_debugfs_enable_read(struct file *filp, + char __user *buffer, size_t count, + loff_t *ppos) +{ + struct ice_pf *pf = filp->private_data; + struct ice_hw *hw = &pf->hw; + char buff[32] = {}; + + snprintf(buff, sizeof(buff), "%u\n", + (u16)(hw->fwlog_cfg.options & + ICE_FWLOG_OPTION_IS_REGISTERED) >> 3); + + return simple_read_from_buffer(buffer, count, ppos, buff, strlen(buff)); +} + +/** + * ice_debugfs_enable_write - write into 'enable' file + * @filp: the opened file + * @buf: where to find the user's data + * @count: the length of the user's data + * @ppos: file position offset + */ +static ssize_t +ice_debugfs_enable_write(struct file *filp, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct ice_pf *pf = filp->private_data; + struct ice_hw *hw = &pf->hw; + char user_val[8], *cmd_buf; + bool enable; + ssize_t ret; + + /* don't allow partial writes or invalid input */ + if (*ppos != 0 || count > 2) + return -EINVAL; + + cmd_buf = memdup_user(buf, count); + if (IS_ERR(cmd_buf)) + return PTR_ERR(cmd_buf); + + ret = sscanf(cmd_buf, "%s", user_val); + if (ret != 1) + return -EINVAL; + + ret = kstrtobool(user_val, &enable); + if (ret) + goto enable_write_error; + + if (enable) + hw->fwlog_cfg.options |= ICE_FWLOG_OPTION_ARQ_ENA; + else + hw->fwlog_cfg.options &= ~ICE_FWLOG_OPTION_ARQ_ENA; + + ret = ice_fwlog_set(hw, &hw->fwlog_cfg); + if (ret) + goto enable_write_error; + + if (enable) + ret = ice_fwlog_register(hw); + else + ret = ice_fwlog_unregister(hw); + + if (ret) + goto enable_write_error; + + /* if we get here, nothing went wrong; return count since we didn't + * really write anything + */ + ret = (ssize_t)count; + +enable_write_error: + /* This function always consumes all of the written input, or produces + * an error. Check and enforce this. Otherwise, the write operation + * won't complete properly. + */ + if (WARN_ON(ret != (ssize_t)count && ret >= 0)) + ret = -EIO; + + return ret; +} + +static const struct file_operations ice_debugfs_enable_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = ice_debugfs_enable_read, + .write = ice_debugfs_enable_write, +}; + +/** + * ice_debugfs_log_size_read - read from 'log_size' file + * @filp: the opened file + * @buffer: where to write the data for the user to read + * @count: the size of the user's buffer + * @ppos: file position offset + */ +static ssize_t ice_debugfs_log_size_read(struct file *filp, + char __user *buffer, size_t count, + loff_t *ppos) +{ + struct ice_pf *pf = filp->private_data; + struct ice_hw *hw = &pf->hw; + char buff[32] = {}; + int index; + + index = hw->fwlog_ring.index; + snprintf(buff, sizeof(buff), "%s\n", ice_fwlog_log_size[index]); + + return simple_read_from_buffer(buffer, count, ppos, buff, strlen(buff)); +} + +/** + * ice_debugfs_log_size_write - write into 'log_size' file + * @filp: the opened file + * @buf: where to find the user's data + * @count: the length of the user's data + * @ppos: file position offset + */ +static ssize_t +ice_debugfs_log_size_write(struct file *filp, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct ice_pf *pf = filp->private_data; + struct device *dev = ice_pf_to_dev(pf); + struct ice_hw *hw = &pf->hw; + char user_val[8], *cmd_buf; + ssize_t ret; + int index; + + /* don't allow partial writes or invalid input */ + if (*ppos != 0 || count > 5) + return -EINVAL; + + cmd_buf = memdup_user(buf, count); + if (IS_ERR(cmd_buf)) + return PTR_ERR(cmd_buf); + + ret = sscanf(cmd_buf, "%s", user_val); + if (ret != 1) + return -EINVAL; + + index = sysfs_match_string(ice_fwlog_log_size, user_val); + if (index < 0) { + dev_info(dev, "Invalid log size '%s'. The value must be one of 128K, 256K, 512K, 1M, 2M\n", + user_val); + ret = -EINVAL; + goto log_size_write_error; + } else if (hw->fwlog_cfg.options & ICE_FWLOG_OPTION_IS_REGISTERED) { + dev_info(dev, "FW logging is currently running. Please disable FW logging to change log_size\n"); + ret = -EINVAL; + goto log_size_write_error; + } + + /* free all the buffers and the tracking info and resize */ + ice_fwlog_realloc_rings(hw, index); + + /* if we get here, nothing went wrong; return count since we didn't + * really write anything + */ + ret = (ssize_t)count; + +log_size_write_error: + /* This function always consumes all of the written input, or produces + * an error. Check and enforce this. Otherwise, the write operation + * won't complete properly. + */ + if (WARN_ON(ret != (ssize_t)count && ret >= 0)) + ret = -EIO; + + return ret; +} + +static const struct file_operations ice_debugfs_log_size_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = ice_debugfs_log_size_read, + .write = ice_debugfs_log_size_write, +}; + +/** + * ice_debugfs_data_read - read from 'data' file + * @filp: the opened file + * @buffer: where to write the data for the user to read + * @count: the size of the user's buffer + * @ppos: file position offset + */ +static ssize_t ice_debugfs_data_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) +{ + struct ice_pf *pf = filp->private_data; + struct ice_hw *hw = &pf->hw; + int data_copied = 0; + bool done = false; + + if (ice_fwlog_ring_empty(&hw->fwlog_ring)) + return 0; + + while (!ice_fwlog_ring_empty(&hw->fwlog_ring) && !done) { + struct ice_fwlog_data *log; + u16 cur_buf_len; + + log = &hw->fwlog_ring.rings[hw->fwlog_ring.head]; + cur_buf_len = log->data_size; + if (cur_buf_len >= count) { + done = true; + continue; + } + + if (copy_to_user(buffer, log->data, cur_buf_len)) { + /* if there is an error then bail and return whatever + * the driver has copied so far + */ + done = true; + continue; + } + + data_copied += cur_buf_len; + buffer += cur_buf_len; + count -= cur_buf_len; + *ppos += cur_buf_len; + ice_fwlog_ring_increment(&hw->fwlog_ring.head, + hw->fwlog_ring.size); + } + + return data_copied; +} + +/** + * ice_debugfs_data_write - write into 'data' file + * @filp: the opened file + * @buf: where to find the user's data + * @count: the length of the user's data + * @ppos: file position offset + */ +static ssize_t +ice_debugfs_data_write(struct file *filp, const char __user *buf, size_t count, + loff_t *ppos) +{ + struct ice_pf *pf = filp->private_data; + struct device *dev = ice_pf_to_dev(pf); + struct ice_hw *hw = &pf->hw; + ssize_t ret; + + /* don't allow partial writes */ + if (*ppos != 0) + return 0; + + /* any value is allowed to clear the buffer so no need to even look at + * what the value is + */ + if (!(hw->fwlog_cfg.options & ICE_FWLOG_OPTION_IS_REGISTERED)) { + hw->fwlog_ring.head = 0; + hw->fwlog_ring.tail = 0; + } else { + dev_info(dev, "Can't clear FW log data while FW log running\n"); + ret = -EINVAL; + goto nr_buffs_write_error; + } + + /* if we get here, nothing went wrong; return count since we didn't + * really write anything + */ + ret = (ssize_t)count; + +nr_buffs_write_error: + /* This function always consumes all of the written input, or produces + * an error. Check and enforce this. Otherwise, the write operation + * won't complete properly. + */ + if (WARN_ON(ret != (ssize_t)count && ret >= 0)) + ret = -EIO; + + return ret; +} + +static const struct file_operations ice_debugfs_data_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = ice_debugfs_data_read, + .write = ice_debugfs_data_write, +}; + +/** + * ice_debugfs_fwlog_init - setup the debugfs directory + * @pf: the ice that is starting up + */ +void ice_debugfs_fwlog_init(struct ice_pf *pf) +{ + const char *name = pci_name(pf->pdev); + struct dentry *fw_modules_dir; + struct dentry **fw_modules; + int i; + + /* only support fw log commands on PF 0 */ + if (pf->hw.bus.func) + return; + + /* allocate space for this first because if it fails then we don't + * need to unwind + */ + fw_modules = kcalloc(ICE_NR_FW_LOG_MODULES, sizeof(*fw_modules), + GFP_KERNEL); + if (!fw_modules) + return; + + pf->ice_debugfs_pf = debugfs_create_dir(name, ice_debugfs_root); + if (IS_ERR(pf->ice_debugfs_pf)) + goto err_create_module_files; + + pf->ice_debugfs_pf_fwlog = debugfs_create_dir("fwlog", + pf->ice_debugfs_pf); + if (IS_ERR(pf->ice_debugfs_pf)) + goto err_create_module_files; + + fw_modules_dir = debugfs_create_dir("modules", + pf->ice_debugfs_pf_fwlog); + if (IS_ERR(fw_modules_dir)) + goto err_create_module_files; + + for (i = 0; i < ICE_NR_FW_LOG_MODULES; i++) { + fw_modules[i] = debugfs_create_file(ice_fwlog_module_string[i], + 0600, fw_modules_dir, pf, + &ice_debugfs_module_fops); + if (IS_ERR(fw_modules[i])) + goto err_create_module_files; + } + + debugfs_create_file("nr_messages", 0600, + pf->ice_debugfs_pf_fwlog, pf, + &ice_debugfs_nr_messages_fops); + + pf->ice_debugfs_pf_fwlog_modules = fw_modules; + + debugfs_create_file("enable", 0600, pf->ice_debugfs_pf_fwlog, + pf, &ice_debugfs_enable_fops); + + debugfs_create_file("log_size", 0600, pf->ice_debugfs_pf_fwlog, + pf, &ice_debugfs_log_size_fops); + + debugfs_create_file("data", 0600, pf->ice_debugfs_pf_fwlog, + pf, &ice_debugfs_data_fops); + + return; + +err_create_module_files: + debugfs_remove_recursive(pf->ice_debugfs_pf_fwlog); + kfree(fw_modules); +} + +/** + * ice_debugfs_init - create root directory for debugfs entries + */ +void ice_debugfs_init(void) +{ + ice_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL); + if (IS_ERR(ice_debugfs_root)) + pr_info("init of debugfs failed\n"); +} + +/** + * ice_debugfs_exit - remove debugfs entries + */ +void ice_debugfs_exit(void) +{ + debugfs_remove_recursive(ice_debugfs_root); + ice_debugfs_root = NULL; +} diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c index 80dc5445b5..65be56f2af 100644 --- a/drivers/net/ethernet/intel/ice/ice_devlink.c +++ b/drivers/net/ethernet/intel/ice/ice_devlink.c @@ -193,6 +193,24 @@ ice_info_pending_netlist_build(struct ice_pf __always_unused *pf, snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", netlist->hash); } +static void ice_info_cgu_fw_build(struct ice_pf *pf, struct ice_info_ctx *ctx) +{ + u32 id, cfg_ver, fw_ver; + + if (!ice_is_feature_supported(pf, ICE_F_CGU)) + return; + if (ice_aq_get_cgu_info(&pf->hw, &id, &cfg_ver, &fw_ver)) + return; + snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u", id, cfg_ver, fw_ver); +} + +static void ice_info_cgu_id(struct ice_pf *pf, struct ice_info_ctx *ctx) +{ + if (!ice_is_feature_supported(pf, ICE_F_CGU)) + return; + snprintf(ctx->buf, sizeof(ctx->buf), "%u", pf->hw.cgu_part_number); +} + #define fixed(key, getter) { ICE_VERSION_FIXED, key, getter, NULL } #define running(key, getter) { ICE_VERSION_RUNNING, key, getter, NULL } #define stored(key, getter, fallback) { ICE_VERSION_STORED, key, getter, fallback } @@ -235,6 +253,8 @@ static const struct ice_devlink_version { running("fw.app.bundle_id", ice_info_ddp_pkg_bundle_id), combined("fw.netlist", ice_info_netlist_ver, ice_info_pending_netlist_ver), combined("fw.netlist.build", ice_info_netlist_build, ice_info_pending_netlist_build), + fixed("cgu.id", ice_info_cgu_id), + running("fw.cgu", ice_info_cgu_fw_build), }; /** @@ -810,6 +830,10 @@ static void ice_traverse_tx_tree(struct devlink *devlink, struct ice_sched_node struct ice_vf *vf; int i; + if (node->rate_node) + /* already added, skip to the next */ + goto traverse_children; + if (node->parent == tc_node) { /* create root node */ rate_node = devl_rate_node_create(devlink, node, node->name, NULL); @@ -831,6 +855,7 @@ static void ice_traverse_tx_tree(struct devlink *devlink, struct ice_sched_node if (rate_node && !IS_ERR(rate_node)) node->rate_node = rate_node; +traverse_children: for (i = 0; i < node->num_children; i++) ice_traverse_tx_tree(devlink, node->children[i], tc_node, pf); } @@ -861,6 +886,30 @@ int ice_devlink_rate_init_tx_topology(struct devlink *devlink, struct ice_vsi *v return 0; } +static void ice_clear_rate_nodes(struct ice_sched_node *node) +{ + node->rate_node = NULL; + + for (int i = 0; i < node->num_children; i++) + ice_clear_rate_nodes(node->children[i]); +} + +/** + * ice_devlink_rate_clear_tx_topology - clear node->rate_node + * @vsi: main vsi struct + * + * Clear rate_node to cleanup creation of Tx topology. + * + */ +void ice_devlink_rate_clear_tx_topology(struct ice_vsi *vsi) +{ + struct ice_port_info *pi = vsi->port_info; + + mutex_lock(&pi->sched_lock); + ice_clear_rate_nodes(pi->root->children[0]); + mutex_unlock(&pi->sched_lock); +} + /** * ice_set_object_tx_share - sets node scheduling parameter * @pi: devlink struct instance diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.h b/drivers/net/ethernet/intel/ice/ice_devlink.h index 6ec96779f5..d291c0e2e1 100644 --- a/drivers/net/ethernet/intel/ice/ice_devlink.h +++ b/drivers/net/ethernet/intel/ice/ice_devlink.h @@ -20,5 +20,6 @@ void ice_devlink_destroy_regions(struct ice_pf *pf); int ice_devlink_rate_init_tx_topology(struct devlink *devlink, struct ice_vsi *vsi); void ice_tear_down_devlink_rate_tree(struct ice_pf *pf); +void ice_devlink_rate_clear_tx_topology(struct ice_vsi *vsi); #endif /* _ICE_DEVLINK_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_dpll.c b/drivers/net/ethernet/intel/ice/ice_dpll.c index 68b894bb68..bd9b1fed74 100644 --- a/drivers/net/ethernet/intel/ice/ice_dpll.c +++ b/drivers/net/ethernet/intel/ice/ice_dpll.c @@ -552,31 +552,6 @@ ice_dpll_lock_status_get(const struct dpll_device *dpll, void *dpll_priv, } /** - * ice_dpll_mode_supported - check if dpll's working mode is supported - * @dpll: registered dpll pointer - * @dpll_priv: private data pointer passed on dpll registration - * @mode: mode to be checked for support - * @extack: error reporting - * - * Dpll subsystem callback. Provides information if working mode is supported - * by dpll. - * - * Return: - * * true - mode is supported - * * false - mode is not supported - */ -static bool ice_dpll_mode_supported(const struct dpll_device *dpll, - void *dpll_priv, - enum dpll_mode mode, - struct netlink_ext_ack *extack) -{ - if (mode == DPLL_MODE_AUTOMATIC) - return true; - - return false; -} - -/** * ice_dpll_mode_get - get dpll's working mode * @dpll: registered dpll pointer * @dpll_priv: private data pointer passed on dpll registration @@ -1259,7 +1234,6 @@ static const struct dpll_pin_ops ice_dpll_output_ops = { static const struct dpll_device_ops ice_dpll_ops = { .lock_status_get = ice_dpll_lock_status_get, - .mode_supported = ice_dpll_mode_supported, .mode_get = ice_dpll_mode_get, }; @@ -1623,7 +1597,7 @@ static void ice_dpll_deinit_rclk_pin(struct ice_pf *pf) } if (WARN_ON_ONCE(!vsi || !vsi->netdev)) return; - netdev_dpll_pin_clear(vsi->netdev); + dpll_netdev_pin_clear(vsi->netdev); dpll_pin_put(rclk->pin); } @@ -1667,7 +1641,7 @@ ice_dpll_init_rclk_pins(struct ice_pf *pf, struct ice_dpll_pin *pin, } if (WARN_ON((!vsi || !vsi->netdev))) return -EINVAL; - netdev_dpll_pin_set(vsi->netdev, pf->dplls.rclk.pin); + dpll_netdev_pin_set(vsi->netdev, pf->dplls.rclk.pin); return 0; diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c index a655d499ab..9069725c71 100644 --- a/drivers/net/ethernet/intel/ice/ice_eswitch.c +++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c @@ -11,17 +11,34 @@ #include "ice_tc_lib.h" /** - * ice_eswitch_add_vf_sp_rule - add adv rule with VF's VSI index + * ice_eswitch_del_sp_rules - delete adv rules added on PRs + * @pf: pointer to the PF struct + * + * Delete all advanced rules that were used to forward packets with the + * device's VSI index to the corresponding eswitch ctrl VSI queue. + */ +static void ice_eswitch_del_sp_rules(struct ice_pf *pf) +{ + struct ice_repr *repr; + unsigned long id; + + xa_for_each(&pf->eswitch.reprs, id, repr) { + if (repr->sp_rule.rid) + ice_rem_adv_rule_by_id(&pf->hw, &repr->sp_rule); + } +} + +/** + * ice_eswitch_add_sp_rule - add adv rule with device's VSI index * @pf: pointer to PF struct - * @vf: pointer to VF struct + * @repr: pointer to the repr struct * * This function adds advanced rule that forwards packets with - * VF's VSI index to the corresponding switchdev ctrl VSI queue. + * device's VSI index to the corresponding eswitch ctrl VSI queue. */ -static int -ice_eswitch_add_vf_sp_rule(struct ice_pf *pf, struct ice_vf *vf) +static int ice_eswitch_add_sp_rule(struct ice_pf *pf, struct ice_repr *repr) { - struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi; + struct ice_vsi *ctrl_vsi = pf->eswitch.control_vsi; struct ice_adv_rule_info rule_info = { 0 }; struct ice_adv_lkup_elem *list; struct ice_hw *hw = &pf->hw; @@ -38,39 +55,42 @@ ice_eswitch_add_vf_sp_rule(struct ice_pf *pf, struct ice_vf *vf) rule_info.sw_act.vsi_handle = ctrl_vsi->idx; rule_info.sw_act.fltr_act = ICE_FWD_TO_Q; rule_info.sw_act.fwd_id.q_id = hw->func_caps.common_cap.rxq_first_id + - ctrl_vsi->rxq_map[vf->vf_id]; + ctrl_vsi->rxq_map[repr->q_id]; rule_info.flags_info.act |= ICE_SINGLE_ACT_LB_ENABLE; rule_info.flags_info.act_valid = true; rule_info.tun_type = ICE_SW_TUN_AND_NON_TUN; - rule_info.src_vsi = vf->lan_vsi_idx; + rule_info.src_vsi = repr->src_vsi->idx; err = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, - &vf->repr->sp_rule); + &repr->sp_rule); if (err) - dev_err(ice_pf_to_dev(pf), "Unable to add VF slow-path rule in switchdev mode for VF %d", - vf->vf_id); + dev_err(ice_pf_to_dev(pf), "Unable to add slow-path rule for eswitch for PR %d", + repr->id); kfree(list); return err; } -/** - * ice_eswitch_del_vf_sp_rule - delete adv rule with VF's VSI index - * @vf: pointer to the VF struct - * - * Delete the advanced rule that was used to forward packets with the VF's VSI - * index to the corresponding switchdev ctrl VSI queue. - */ -static void ice_eswitch_del_vf_sp_rule(struct ice_vf *vf) +static int +ice_eswitch_add_sp_rules(struct ice_pf *pf) { - if (!vf->repr) - return; + struct ice_repr *repr; + unsigned long id; + int err; + + xa_for_each(&pf->eswitch.reprs, id, repr) { + err = ice_eswitch_add_sp_rule(pf, repr); + if (err) { + ice_eswitch_del_sp_rules(pf); + return err; + } + } - ice_rem_adv_rule_by_id(&vf->pf->hw, &vf->repr->sp_rule); + return 0; } /** - * ice_eswitch_setup_env - configure switchdev HW filters + * ice_eswitch_setup_env - configure eswitch HW filters * @pf: pointer to PF struct * * This function adds HW filters configuration specific for switchdev @@ -78,18 +98,18 @@ static void ice_eswitch_del_vf_sp_rule(struct ice_vf *vf) */ static int ice_eswitch_setup_env(struct ice_pf *pf) { - struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi; - struct net_device *uplink_netdev = uplink_vsi->netdev; - struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi; + struct ice_vsi *uplink_vsi = pf->eswitch.uplink_vsi; + struct ice_vsi *ctrl_vsi = pf->eswitch.control_vsi; + struct net_device *netdev = uplink_vsi->netdev; struct ice_vsi_vlan_ops *vlan_ops; bool rule_added = false; ice_remove_vsi_fltr(&pf->hw, uplink_vsi->idx); - netif_addr_lock_bh(uplink_netdev); - __dev_uc_unsync(uplink_netdev, NULL); - __dev_mc_unsync(uplink_netdev, NULL); - netif_addr_unlock_bh(uplink_netdev); + netif_addr_lock_bh(netdev); + __dev_uc_unsync(netdev, NULL); + __dev_mc_unsync(netdev, NULL); + netif_addr_unlock_bh(netdev); if (ice_vsi_add_vlan_zero(uplink_vsi)) goto err_def_rx; @@ -132,19 +152,20 @@ err_def_rx: } /** - * ice_eswitch_remap_rings_to_vectors - reconfigure rings of switchdev ctrl VSI - * @pf: pointer to PF struct + * ice_eswitch_remap_rings_to_vectors - reconfigure rings of eswitch ctrl VSI + * @eswitch: pointer to eswitch struct * - * In switchdev number of allocated Tx/Rx rings is equal. + * In eswitch number of allocated Tx/Rx rings is equal. * * This function fills q_vectors structures associated with representor and * move each ring pairs to port representor netdevs. Each port representor * will have dedicated 1 Tx/Rx ring pair, so number of rings pair is equal to * number of VFs. */ -static void ice_eswitch_remap_rings_to_vectors(struct ice_pf *pf) +static void ice_eswitch_remap_rings_to_vectors(struct ice_eswitch *eswitch) { - struct ice_vsi *vsi = pf->switchdev.control_vsi; + struct ice_vsi *vsi = eswitch->control_vsi; + unsigned long repr_id = 0; int q_id; ice_for_each_txq(vsi, q_id) { @@ -152,13 +173,14 @@ static void ice_eswitch_remap_rings_to_vectors(struct ice_pf *pf) struct ice_tx_ring *tx_ring; struct ice_rx_ring *rx_ring; struct ice_repr *repr; - struct ice_vf *vf; - vf = ice_get_vf_by_id(pf, q_id); - if (WARN_ON(!vf)) - continue; + repr = xa_find(&eswitch->reprs, &repr_id, U32_MAX, + XA_PRESENT); + if (!repr) + break; - repr = vf->repr; + repr_id += 1; + repr->q_id = q_id; q_vector = repr->q_vector; tx_ring = vsi->tx_rings[q_id]; rx_ring = vsi->rx_rings[q_id]; @@ -181,136 +203,96 @@ static void ice_eswitch_remap_rings_to_vectors(struct ice_pf *pf) rx_ring->q_vector = q_vector; rx_ring->next = NULL; rx_ring->netdev = repr->netdev; - - ice_put_vf(vf); } } /** - * ice_eswitch_release_reprs - clear PR VSIs configuration + * ice_eswitch_release_repr - clear PR VSI configuration * @pf: poiner to PF struct - * @ctrl_vsi: pointer to switchdev control VSI + * @repr: pointer to PR */ static void -ice_eswitch_release_reprs(struct ice_pf *pf, struct ice_vsi *ctrl_vsi) +ice_eswitch_release_repr(struct ice_pf *pf, struct ice_repr *repr) { - struct ice_vf *vf; - unsigned int bkt; + struct ice_vsi *vsi = repr->src_vsi; - lockdep_assert_held(&pf->vfs.table_lock); - - ice_for_each_vf(pf, bkt, vf) { - struct ice_vsi *vsi = vf->repr->src_vsi; - - /* Skip VFs that aren't configured */ - if (!vf->repr->dst) - continue; + /* Skip representors that aren't configured */ + if (!repr->dst) + return; - ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof); - metadata_dst_free(vf->repr->dst); - vf->repr->dst = NULL; - ice_eswitch_del_vf_sp_rule(vf); - ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr, - ICE_FWD_TO_VSI); + ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof); + metadata_dst_free(repr->dst); + repr->dst = NULL; + ice_fltr_add_mac_and_broadcast(vsi, repr->parent_mac, + ICE_FWD_TO_VSI); - netif_napi_del(&vf->repr->q_vector->napi); - } + netif_napi_del(&repr->q_vector->napi); } /** - * ice_eswitch_setup_reprs - configure port reprs to run in switchdev mode + * ice_eswitch_setup_repr - configure PR to run in switchdev mode * @pf: pointer to PF struct + * @repr: pointer to PR struct */ -static int ice_eswitch_setup_reprs(struct ice_pf *pf) +static int ice_eswitch_setup_repr(struct ice_pf *pf, struct ice_repr *repr) { - struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi; - int max_vsi_num = 0; - struct ice_vf *vf; - unsigned int bkt; - - lockdep_assert_held(&pf->vfs.table_lock); - - ice_for_each_vf(pf, bkt, vf) { - struct ice_vsi *vsi = vf->repr->src_vsi; - - ice_remove_vsi_fltr(&pf->hw, vsi->idx); - vf->repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX, - GFP_KERNEL); - if (!vf->repr->dst) { - ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr, - ICE_FWD_TO_VSI); - goto err; - } + struct ice_vsi *ctrl_vsi = pf->eswitch.control_vsi; + struct ice_vsi *vsi = repr->src_vsi; + struct metadata_dst *dst; - if (ice_eswitch_add_vf_sp_rule(pf, vf)) { - ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr, - ICE_FWD_TO_VSI); - goto err; - } + ice_remove_vsi_fltr(&pf->hw, vsi->idx); + repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX, + GFP_KERNEL); + if (!repr->dst) + goto err_add_mac_fltr; - if (ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof)) { - ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr, - ICE_FWD_TO_VSI); - ice_eswitch_del_vf_sp_rule(vf); - metadata_dst_free(vf->repr->dst); - vf->repr->dst = NULL; - goto err; - } + if (ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof)) + goto err_dst_free; - if (ice_vsi_add_vlan_zero(vsi)) { - ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr, - ICE_FWD_TO_VSI); - ice_eswitch_del_vf_sp_rule(vf); - metadata_dst_free(vf->repr->dst); - vf->repr->dst = NULL; - ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof); - goto err; - } - - if (max_vsi_num < vsi->vsi_num) - max_vsi_num = vsi->vsi_num; + if (ice_vsi_add_vlan_zero(vsi)) + goto err_update_security; - netif_napi_add(vf->repr->netdev, &vf->repr->q_vector->napi, - ice_napi_poll); + netif_napi_add(repr->netdev, &repr->q_vector->napi, + ice_napi_poll); - netif_keep_dst(vf->repr->netdev); - } + netif_keep_dst(repr->netdev); - ice_for_each_vf(pf, bkt, vf) { - struct ice_repr *repr = vf->repr; - struct ice_vsi *vsi = repr->src_vsi; - struct metadata_dst *dst; - - dst = repr->dst; - dst->u.port_info.port_id = vsi->vsi_num; - dst->u.port_info.lower_dev = repr->netdev; - ice_repr_set_traffic_vsi(repr, ctrl_vsi); - } + dst = repr->dst; + dst->u.port_info.port_id = vsi->vsi_num; + dst->u.port_info.lower_dev = repr->netdev; + ice_repr_set_traffic_vsi(repr, ctrl_vsi); return 0; -err: - ice_eswitch_release_reprs(pf, ctrl_vsi); +err_update_security: + ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof); +err_dst_free: + metadata_dst_free(repr->dst); + repr->dst = NULL; +err_add_mac_fltr: + ice_fltr_add_mac_and_broadcast(vsi, repr->parent_mac, ICE_FWD_TO_VSI); return -ENODEV; } /** - * ice_eswitch_update_repr - reconfigure VF port representor - * @vsi: VF VSI for which port representor is configured + * ice_eswitch_update_repr - reconfigure port representor + * @repr_id: representor ID + * @vsi: VSI for which port representor is configured */ -void ice_eswitch_update_repr(struct ice_vsi *vsi) +void ice_eswitch_update_repr(unsigned long repr_id, struct ice_vsi *vsi) { struct ice_pf *pf = vsi->back; struct ice_repr *repr; - struct ice_vf *vf; int ret; if (!ice_is_switchdev_running(pf)) return; - vf = vsi->vf; - repr = vf->repr; + repr = xa_load(&pf->eswitch.reprs, repr_id); + if (!repr) + return; + repr->src_vsi = vsi; repr->dst->u.port_info.port_id = vsi->vsi_num; @@ -319,9 +301,10 @@ void ice_eswitch_update_repr(struct ice_vsi *vsi) ret = ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof); if (ret) { - ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr, ICE_FWD_TO_VSI); - dev_err(ice_pf_to_dev(pf), "Failed to update VF %d port representor", - vsi->vf->vf_id); + ice_fltr_add_mac_and_broadcast(vsi, repr->parent_mac, + ICE_FWD_TO_VSI); + dev_err(ice_pf_to_dev(pf), "Failed to update VSI of port representor %d", + repr->id); } } @@ -353,13 +336,13 @@ ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev) skb_dst_drop(skb); dst_hold((struct dst_entry *)repr->dst); skb_dst_set(skb, (struct dst_entry *)repr->dst); - skb->queue_mapping = repr->vf->vf_id; + skb->queue_mapping = repr->q_id; return ice_start_xmit(skb, netdev); } /** - * ice_eswitch_set_target_vsi - set switchdev context in Tx context descriptor + * ice_eswitch_set_target_vsi - set eswitch context in Tx context descriptor * @skb: pointer to send buffer * @off: pointer to offload struct */ @@ -375,14 +358,14 @@ ice_eswitch_set_target_vsi(struct sk_buff *skb, off->cd_qw1 |= (cd_cmd | ICE_TX_DESC_DTYPE_CTX); } else { cd_cmd = ICE_TX_CTX_DESC_SWTCH_VSI << ICE_TXD_CTX_QW1_CMD_S; - dst_vsi = ((u64)dst->u.port_info.port_id << - ICE_TXD_CTX_QW1_VSI_S) & ICE_TXD_CTX_QW1_VSI_M; + dst_vsi = FIELD_PREP(ICE_TXD_CTX_QW1_VSI_M, + dst->u.port_info.port_id); off->cd_qw1 = cd_cmd | dst_vsi | ICE_TX_DESC_DTYPE_CTX; } } /** - * ice_eswitch_release_env - clear switchdev HW filters + * ice_eswitch_release_env - clear eswitch HW filters * @pf: pointer to PF struct * * This function removes HW filters configuration specific for switchdev @@ -390,8 +373,8 @@ ice_eswitch_set_target_vsi(struct sk_buff *skb, */ static void ice_eswitch_release_env(struct ice_pf *pf) { - struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi; - struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi; + struct ice_vsi *uplink_vsi = pf->eswitch.uplink_vsi; + struct ice_vsi *ctrl_vsi = pf->eswitch.control_vsi; struct ice_vsi_vlan_ops *vlan_ops; vlan_ops = ice_get_compat_vsi_vlan_ops(uplink_vsi); @@ -407,7 +390,7 @@ static void ice_eswitch_release_env(struct ice_pf *pf) } /** - * ice_eswitch_vsi_setup - configure switchdev control VSI + * ice_eswitch_vsi_setup - configure eswitch control VSI * @pf: pointer to PF structure * @pi: pointer to port_info structure */ @@ -424,48 +407,29 @@ ice_eswitch_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) } /** - * ice_eswitch_napi_del - remove NAPI handle for all port representors - * @pf: pointer to PF structure - */ -static void ice_eswitch_napi_del(struct ice_pf *pf) -{ - struct ice_vf *vf; - unsigned int bkt; - - lockdep_assert_held(&pf->vfs.table_lock); - - ice_for_each_vf(pf, bkt, vf) - netif_napi_del(&vf->repr->q_vector->napi); -} - -/** * ice_eswitch_napi_enable - enable NAPI for all port representors - * @pf: pointer to PF structure + * @reprs: xarray of reprs */ -static void ice_eswitch_napi_enable(struct ice_pf *pf) +static void ice_eswitch_napi_enable(struct xarray *reprs) { - struct ice_vf *vf; - unsigned int bkt; - - lockdep_assert_held(&pf->vfs.table_lock); + struct ice_repr *repr; + unsigned long id; - ice_for_each_vf(pf, bkt, vf) - napi_enable(&vf->repr->q_vector->napi); + xa_for_each(reprs, id, repr) + napi_enable(&repr->q_vector->napi); } /** * ice_eswitch_napi_disable - disable NAPI for all port representors - * @pf: pointer to PF structure + * @reprs: xarray of reprs */ -static void ice_eswitch_napi_disable(struct ice_pf *pf) +static void ice_eswitch_napi_disable(struct xarray *reprs) { - struct ice_vf *vf; - unsigned int bkt; - - lockdep_assert_held(&pf->vfs.table_lock); + struct ice_repr *repr; + unsigned long id; - ice_for_each_vf(pf, bkt, vf) - napi_disable(&vf->repr->q_vector->napi); + xa_for_each(reprs, id, repr) + napi_disable(&repr->q_vector->napi); } /** @@ -486,39 +450,26 @@ static int ice_eswitch_enable_switchdev(struct ice_pf *pf) return -EINVAL; } - pf->switchdev.control_vsi = ice_eswitch_vsi_setup(pf, pf->hw.port_info); - if (!pf->switchdev.control_vsi) + pf->eswitch.control_vsi = ice_eswitch_vsi_setup(pf, pf->hw.port_info); + if (!pf->eswitch.control_vsi) return -ENODEV; - ctrl_vsi = pf->switchdev.control_vsi; - pf->switchdev.uplink_vsi = uplink_vsi; + ctrl_vsi = pf->eswitch.control_vsi; + /* cp VSI is createad with 1 queue as default */ + pf->eswitch.qs.value = 1; + pf->eswitch.uplink_vsi = uplink_vsi; if (ice_eswitch_setup_env(pf)) goto err_vsi; - if (ice_repr_add_for_all_vfs(pf)) - goto err_repr_add; - - if (ice_eswitch_setup_reprs(pf)) - goto err_setup_reprs; - - ice_eswitch_remap_rings_to_vectors(pf); - - if (ice_vsi_open(ctrl_vsi)) - goto err_setup_reprs; - if (ice_eswitch_br_offloads_init(pf)) goto err_br_offloads; - ice_eswitch_napi_enable(pf); + pf->eswitch.is_running = true; return 0; err_br_offloads: - ice_vsi_close(ctrl_vsi); -err_setup_reprs: - ice_repr_rem_from_all_vfs(pf); -err_repr_add: ice_eswitch_release_env(pf); err_vsi: ice_vsi_release(ctrl_vsi); @@ -526,19 +477,19 @@ err_vsi: } /** - * ice_eswitch_disable_switchdev - disable switchdev resources + * ice_eswitch_disable_switchdev - disable eswitch resources * @pf: pointer to PF structure */ static void ice_eswitch_disable_switchdev(struct ice_pf *pf) { - struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi; + struct ice_vsi *ctrl_vsi = pf->eswitch.control_vsi; - ice_eswitch_napi_disable(pf); ice_eswitch_br_offloads_deinit(pf); ice_eswitch_release_env(pf); - ice_eswitch_release_reprs(pf, ctrl_vsi); ice_vsi_release(ctrl_vsi); - ice_repr_rem_from_all_vfs(pf); + + pf->eswitch.is_running = false; + pf->eswitch.qs.is_reaching = false; } /** @@ -566,6 +517,7 @@ ice_eswitch_mode_set(struct devlink *devlink, u16 mode, case DEVLINK_ESWITCH_MODE_LEGACY: dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to legacy", pf->hw.pf_id); + xa_destroy(&pf->eswitch.reprs); NL_SET_ERR_MSG_MOD(extack, "Changed eswitch mode to legacy"); break; case DEVLINK_ESWITCH_MODE_SWITCHDEV: @@ -578,6 +530,7 @@ ice_eswitch_mode_set(struct devlink *devlink, u16 mode, dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to switchdev", pf->hw.pf_id); + xa_init_flags(&pf->eswitch.reprs, XA_FLAGS_ALLOC); NL_SET_ERR_MSG_MOD(extack, "Changed eswitch mode to switchdev"); break; } @@ -616,74 +569,168 @@ bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf) } /** - * ice_eswitch_release - cleanup eswitch + * ice_eswitch_start_all_tx_queues - start Tx queues of all port representors * @pf: pointer to PF structure */ -void ice_eswitch_release(struct ice_pf *pf) +static void ice_eswitch_start_all_tx_queues(struct ice_pf *pf) { - if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY) + struct ice_repr *repr; + unsigned long id; + + if (test_bit(ICE_DOWN, pf->state)) return; - ice_eswitch_disable_switchdev(pf); - pf->switchdev.is_running = false; + xa_for_each(&pf->eswitch.reprs, id, repr) + ice_repr_start_tx_queues(repr); } /** - * ice_eswitch_configure - configure eswitch + * ice_eswitch_stop_all_tx_queues - stop Tx queues of all port representors * @pf: pointer to PF structure */ -int ice_eswitch_configure(struct ice_pf *pf) +void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf) { - int status; - - if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY || pf->switchdev.is_running) - return 0; + struct ice_repr *repr; + unsigned long id; - status = ice_eswitch_enable_switchdev(pf); - if (status) - return status; + if (test_bit(ICE_DOWN, pf->state)) + return; - pf->switchdev.is_running = true; - return 0; + xa_for_each(&pf->eswitch.reprs, id, repr) + ice_repr_stop_tx_queues(repr); } -/** - * ice_eswitch_start_all_tx_queues - start Tx queues of all port representors - * @pf: pointer to PF structure - */ -static void ice_eswitch_start_all_tx_queues(struct ice_pf *pf) +static void ice_eswitch_stop_reprs(struct ice_pf *pf) { - struct ice_vf *vf; - unsigned int bkt; + ice_eswitch_del_sp_rules(pf); + ice_eswitch_stop_all_tx_queues(pf); + ice_eswitch_napi_disable(&pf->eswitch.reprs); +} - lockdep_assert_held(&pf->vfs.table_lock); +static void ice_eswitch_start_reprs(struct ice_pf *pf) +{ + ice_eswitch_napi_enable(&pf->eswitch.reprs); + ice_eswitch_start_all_tx_queues(pf); + ice_eswitch_add_sp_rules(pf); +} - if (test_bit(ICE_DOWN, pf->state)) - return; +static void +ice_eswitch_cp_change_queues(struct ice_eswitch *eswitch, int change) +{ + struct ice_vsi *cp = eswitch->control_vsi; + int queues = 0; + + if (eswitch->qs.is_reaching) { + if (eswitch->qs.to_reach >= eswitch->qs.value + change) { + queues = eswitch->qs.to_reach; + eswitch->qs.is_reaching = false; + } else { + queues = 0; + } + } else if ((change > 0 && cp->alloc_txq <= eswitch->qs.value) || + change < 0) { + queues = cp->alloc_txq + change; + } - ice_for_each_vf(pf, bkt, vf) { - if (vf->repr) - ice_repr_start_tx_queues(vf->repr); + if (queues) { + cp->req_txq = queues; + cp->req_rxq = queues; + ice_vsi_close(cp); + ice_vsi_rebuild(cp, ICE_VSI_FLAG_NO_INIT); + ice_vsi_open(cp); + } else if (!change) { + /* change == 0 means that VSI wasn't open, open it here */ + ice_vsi_open(cp); } + + eswitch->qs.value += change; + ice_eswitch_remap_rings_to_vectors(eswitch); } -/** - * ice_eswitch_stop_all_tx_queues - stop Tx queues of all port representors - * @pf: pointer to PF structure - */ -void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf) +int +ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf) { - struct ice_vf *vf; - unsigned int bkt; + struct ice_repr *repr; + int change = 1; + int err; - lockdep_assert_held(&pf->vfs.table_lock); + if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY) + return 0; - if (test_bit(ICE_DOWN, pf->state)) + if (xa_empty(&pf->eswitch.reprs)) { + err = ice_eswitch_enable_switchdev(pf); + if (err) + return err; + /* Control plane VSI is created with 1 queue as default */ + pf->eswitch.qs.to_reach -= 1; + change = 0; + } + + ice_eswitch_stop_reprs(pf); + + repr = ice_repr_add_vf(vf); + if (IS_ERR(repr)) { + err = PTR_ERR(repr); + goto err_create_repr; + } + + err = ice_eswitch_setup_repr(pf, repr); + if (err) + goto err_setup_repr; + + err = xa_alloc(&pf->eswitch.reprs, &repr->id, repr, + XA_LIMIT(1, INT_MAX), GFP_KERNEL); + if (err) + goto err_xa_alloc; + + vf->repr_id = repr->id; + + ice_eswitch_cp_change_queues(&pf->eswitch, change); + ice_eswitch_start_reprs(pf); + + return 0; + +err_xa_alloc: + ice_eswitch_release_repr(pf, repr); +err_setup_repr: + ice_repr_rem_vf(repr); +err_create_repr: + if (xa_empty(&pf->eswitch.reprs)) + ice_eswitch_disable_switchdev(pf); + ice_eswitch_start_reprs(pf); + + return err; +} + +void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf) +{ + struct ice_repr *repr = xa_load(&pf->eswitch.reprs, vf->repr_id); + struct devlink *devlink = priv_to_devlink(pf); + + if (!repr) return; - ice_for_each_vf(pf, bkt, vf) { - if (vf->repr) - ice_repr_stop_tx_queues(vf->repr); + ice_eswitch_stop_reprs(pf); + xa_erase(&pf->eswitch.reprs, repr->id); + + if (xa_empty(&pf->eswitch.reprs)) + ice_eswitch_disable_switchdev(pf); + else + ice_eswitch_cp_change_queues(&pf->eswitch, -1); + + ice_eswitch_release_repr(pf, repr); + ice_repr_rem_vf(repr); + + if (xa_empty(&pf->eswitch.reprs)) { + /* since all port representors are destroyed, there is + * no point in keeping the nodes + */ + ice_devlink_rate_clear_tx_topology(ice_get_main_vsi(pf)); + devl_lock(devlink); + devl_rate_nodes_destroy(devlink); + devl_unlock(devlink); + } else { + ice_eswitch_start_reprs(pf); } } @@ -693,30 +740,35 @@ void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf) */ int ice_eswitch_rebuild(struct ice_pf *pf) { - struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi; - int status; - - ice_eswitch_napi_disable(pf); - ice_eswitch_napi_del(pf); - - status = ice_eswitch_setup_env(pf); - if (status) - return status; + struct ice_repr *repr; + unsigned long id; + int err; - status = ice_eswitch_setup_reprs(pf); - if (status) - return status; + if (!ice_is_switchdev_running(pf)) + return 0; - ice_eswitch_remap_rings_to_vectors(pf); + err = ice_vsi_rebuild(pf->eswitch.control_vsi, ICE_VSI_FLAG_INIT); + if (err) + return err; - ice_replay_tc_fltrs(pf); + xa_for_each(&pf->eswitch.reprs, id, repr) + ice_eswitch_detach(pf, repr->vf); - status = ice_vsi_open(ctrl_vsi); - if (status) - return status; + return 0; +} - ice_eswitch_napi_enable(pf); - ice_eswitch_start_all_tx_queues(pf); +/** + * ice_eswitch_reserve_cp_queues - reserve control plane VSI queues + * @pf: pointer to PF structure + * @change: how many more (or less) queues is needed + * + * Remember to call ice_eswitch_attach/detach() the "change" times. + */ +void ice_eswitch_reserve_cp_queues(struct ice_pf *pf, int change) +{ + if (pf->eswitch.qs.value + change < 0) + return; - return 0; + pf->eswitch.qs.to_reach = pf->eswitch.qs.value + change; + pf->eswitch.qs.is_reaching = true; } diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.h b/drivers/net/ethernet/intel/ice/ice_eswitch.h index b18bf83a2f..1a288a03a7 100644 --- a/drivers/net/ethernet/intel/ice/ice_eswitch.h +++ b/drivers/net/ethernet/intel/ice/ice_eswitch.h @@ -7,8 +7,9 @@ #include <net/devlink.h> #ifdef CONFIG_ICE_SWITCHDEV -void ice_eswitch_release(struct ice_pf *pf); -int ice_eswitch_configure(struct ice_pf *pf); +void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf); +int +ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf); int ice_eswitch_rebuild(struct ice_pf *pf); int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode); @@ -17,7 +18,7 @@ ice_eswitch_mode_set(struct devlink *devlink, u16 mode, struct netlink_ext_ack *extack); bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf); -void ice_eswitch_update_repr(struct ice_vsi *vsi); +void ice_eswitch_update_repr(unsigned long repr_id, struct ice_vsi *vsi); void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf); @@ -25,8 +26,15 @@ void ice_eswitch_set_target_vsi(struct sk_buff *skb, struct ice_tx_offload_params *off); netdev_tx_t ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev); +void ice_eswitch_reserve_cp_queues(struct ice_pf *pf, int change); #else /* CONFIG_ICE_SWITCHDEV */ -static inline void ice_eswitch_release(struct ice_pf *pf) { } +static inline void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf) { } + +static inline int +ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf) +{ + return -EOPNOTSUPP; +} static inline void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf) { } @@ -34,7 +42,8 @@ static inline void ice_eswitch_set_target_vsi(struct sk_buff *skb, struct ice_tx_offload_params *off) { } -static inline void ice_eswitch_update_repr(struct ice_vsi *vsi) { } +static inline void +ice_eswitch_update_repr(unsigned long repr_id, struct ice_vsi *vsi) { } static inline int ice_eswitch_configure(struct ice_pf *pf) { @@ -68,5 +77,8 @@ ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev) { return NETDEV_TX_BUSY; } + +static inline void +ice_eswitch_reserve_cp_queues(struct ice_pf *pf, int change) { } #endif /* CONFIG_ICE_SWITCHDEV */ #endif /* _ICE_ESWITCH_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch_br.c b/drivers/net/ethernet/intel/ice/ice_eswitch_br.c index 6ae0269bdf..ac5beecd02 100644 --- a/drivers/net/ethernet/intel/ice/ice_eswitch_br.c +++ b/drivers/net/ethernet/intel/ice/ice_eswitch_br.c @@ -893,10 +893,14 @@ ice_eswitch_br_port_deinit(struct ice_esw_br *bridge, ice_eswitch_br_fdb_entry_delete(bridge, fdb_entry); } - if (br_port->type == ICE_ESWITCH_BR_UPLINK_PORT && vsi->back) + if (br_port->type == ICE_ESWITCH_BR_UPLINK_PORT && vsi->back) { vsi->back->br_port = NULL; - else if (vsi->vf && vsi->vf->repr) - vsi->vf->repr->br_port = NULL; + } else { + struct ice_repr *repr = ice_repr_get_by_vsi(vsi); + + if (repr) + repr->br_port = NULL; + } xa_erase(&bridge->ports, br_port->vsi_idx); ice_eswitch_br_port_vlans_flush(br_port); @@ -947,7 +951,7 @@ ice_eswitch_br_vf_repr_port_init(struct ice_esw_br *bridge, static int ice_eswitch_br_uplink_port_init(struct ice_esw_br *bridge, struct ice_pf *pf) { - struct ice_vsi *vsi = pf->switchdev.uplink_vsi; + struct ice_vsi *vsi = pf->eswitch.uplink_vsi; struct ice_esw_br_port *br_port; int err; @@ -1185,7 +1189,7 @@ ice_eswitch_br_port_event(struct notifier_block *nb, static void ice_eswitch_br_offloads_dealloc(struct ice_pf *pf) { - struct ice_esw_br_offloads *br_offloads = pf->switchdev.br_offloads; + struct ice_esw_br_offloads *br_offloads = pf->eswitch.br_offloads; ASSERT_RTNL(); @@ -1194,7 +1198,7 @@ ice_eswitch_br_offloads_dealloc(struct ice_pf *pf) ice_eswitch_br_deinit(br_offloads, br_offloads->bridge); - pf->switchdev.br_offloads = NULL; + pf->eswitch.br_offloads = NULL; kfree(br_offloads); } @@ -1205,14 +1209,14 @@ ice_eswitch_br_offloads_alloc(struct ice_pf *pf) ASSERT_RTNL(); - if (pf->switchdev.br_offloads) + if (pf->eswitch.br_offloads) return ERR_PTR(-EEXIST); br_offloads = kzalloc(sizeof(*br_offloads), GFP_KERNEL); if (!br_offloads) return ERR_PTR(-ENOMEM); - pf->switchdev.br_offloads = br_offloads; + pf->eswitch.br_offloads = br_offloads; br_offloads->pf = pf; return br_offloads; @@ -1223,7 +1227,7 @@ ice_eswitch_br_offloads_deinit(struct ice_pf *pf) { struct ice_esw_br_offloads *br_offloads; - br_offloads = pf->switchdev.br_offloads; + br_offloads = pf->eswitch.br_offloads; if (!br_offloads) return; diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index bde9bc74f9..a19b06f18e 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -129,7 +129,6 @@ static const struct ice_stats ice_gstrings_pf_stats[] = { ICE_PF_STAT("rx_oversize.nic", stats.rx_oversize), ICE_PF_STAT("rx_jabber.nic", stats.rx_jabber), ICE_PF_STAT("rx_csum_bad.nic", hw_csum_rx_error), - ICE_PF_STAT("rx_length_errors.nic", stats.rx_len_errors), ICE_PF_STAT("rx_dropped.nic", stats.eth.rx_discards), ICE_PF_STAT("rx_crc_errors.nic", stats.crc_errors), ICE_PF_STAT("illegal_bytes.nic", stats.illegal_bytes), @@ -1142,8 +1141,7 @@ __ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data, switch (stringset) { case ETH_SS_STATS: for (i = 0; i < ICE_VSI_STATS_LEN; i++) - ethtool_sprintf(&p, "%s", - ice_gstrings_vsi_stats[i].stat_string); + ethtool_puts(&p, ice_gstrings_vsi_stats[i].stat_string); if (ice_is_port_repr_netdev(netdev)) return; @@ -1162,8 +1160,7 @@ __ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data, return; for (i = 0; i < ICE_PF_STATS_LEN; i++) - ethtool_sprintf(&p, "%s", - ice_gstrings_pf_stats[i].stat_string); + ethtool_puts(&p, ice_gstrings_pf_stats[i].stat_string); for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) { ethtool_sprintf(&p, "tx_priority_%u_xon.nic", i); @@ -1179,8 +1176,7 @@ __ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data, break; case ETH_SS_PRIV_FLAGS: for (i = 0; i < ICE_PRIV_FLAG_ARRAY_SIZE; i++) - ethtool_sprintf(&p, "%s", - ice_gstrings_priv_flags[i].name); + ethtool_puts(&p, ice_gstrings_priv_flags[i].name); break; default: break; @@ -2505,27 +2501,15 @@ static u32 ice_parse_hdrs(struct ethtool_rxnfc *nfc) return hdrs; } -#define ICE_FLOW_HASH_FLD_IPV4_SA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) -#define ICE_FLOW_HASH_FLD_IPV6_SA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) -#define ICE_FLOW_HASH_FLD_IPV4_DA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) -#define ICE_FLOW_HASH_FLD_IPV6_DA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA) -#define ICE_FLOW_HASH_FLD_TCP_SRC_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT) -#define ICE_FLOW_HASH_FLD_TCP_DST_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT) -#define ICE_FLOW_HASH_FLD_UDP_SRC_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT) -#define ICE_FLOW_HASH_FLD_UDP_DST_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT) -#define ICE_FLOW_HASH_FLD_SCTP_SRC_PORT \ - BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT) -#define ICE_FLOW_HASH_FLD_SCTP_DST_PORT \ - BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT) - /** * ice_parse_hash_flds - parses hash fields from RSS hash input * @nfc: ethtool rxnfc command + * @symm: true if Symmetric Topelitz is set * * This function parses the rxnfc command and returns intended * hash fields for RSS configuration */ -static u64 ice_parse_hash_flds(struct ethtool_rxnfc *nfc) +static u64 ice_parse_hash_flds(struct ethtool_rxnfc *nfc, bool symm) { u64 hfld = ICE_HASH_INVALID; @@ -2594,9 +2578,11 @@ static int ice_set_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc) { struct ice_pf *pf = vsi->back; + struct ice_rss_hash_cfg cfg; struct device *dev; u64 hashed_flds; int status; + bool symm; u32 hdrs; dev = ice_pf_to_dev(pf); @@ -2606,7 +2592,8 @@ ice_set_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc) return -EINVAL; } - hashed_flds = ice_parse_hash_flds(nfc); + symm = !!(vsi->rss_hfunc == ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ); + hashed_flds = ice_parse_hash_flds(nfc, symm); if (hashed_flds == ICE_HASH_INVALID) { dev_dbg(dev, "Invalid hash fields, vsi num = %d\n", vsi->vsi_num); @@ -2620,7 +2607,12 @@ ice_set_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc) return -EINVAL; } - status = ice_add_rss_cfg(&pf->hw, vsi->idx, hashed_flds, hdrs); + cfg.hash_flds = hashed_flds; + cfg.addl_hdrs = hdrs; + cfg.hdr_type = ICE_RSS_ANY_HEADERS; + cfg.symm = symm; + + status = ice_add_rss_cfg(&pf->hw, vsi, &cfg); if (status) { dev_dbg(dev, "ice_add_rss_cfg failed, vsi num = %d, error = %d\n", vsi->vsi_num, status); @@ -2641,6 +2633,7 @@ ice_get_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc) struct ice_pf *pf = vsi->back; struct device *dev; u64 hash_flds; + bool symm; u32 hdrs; dev = ice_pf_to_dev(pf); @@ -2659,7 +2652,7 @@ ice_get_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc) return; } - hash_flds = ice_get_rss_cfg(&pf->hw, vsi->idx, hdrs); + hash_flds = ice_get_rss_cfg(&pf->hw, vsi->idx, hdrs, &symm); if (hash_flds == ICE_HASH_INVALID) { dev_dbg(dev, "No hash fields found for the given header type, vsi num = %d\n", vsi->vsi_num); @@ -3198,11 +3191,18 @@ static u32 ice_get_rxfh_indir_size(struct net_device *netdev) return np->vsi->rss_table_size; } +/** + * ice_get_rxfh - get the Rx flow hash indirection table + * @netdev: network interface device structure + * @rxfh: pointer to param struct (indir, key, hfunc) + * + * Reads the indirection table directly from the hardware. + */ static int -ice_get_rxfh_context(struct net_device *netdev, u32 *indir, - u8 *key, u8 *hfunc, u32 rss_context) +ice_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh) { struct ice_netdev_priv *np = netdev_priv(netdev); + u32 rss_context = rxfh->rss_context; struct ice_vsi *vsi = np->vsi; struct ice_pf *pf = vsi->back; u16 qcount, offset; @@ -3233,17 +3233,18 @@ ice_get_rxfh_context(struct net_device *netdev, u32 *indir, vsi = vsi->tc_map_vsi[rss_context]; } - if (hfunc) - *hfunc = ETH_RSS_HASH_TOP; + rxfh->hfunc = ETH_RSS_HASH_TOP; + if (vsi->rss_hfunc == ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ) + rxfh->input_xfrm |= RXH_XFRM_SYM_XOR; - if (!indir) + if (!rxfh->indir) return 0; lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); if (!lut) return -ENOMEM; - err = ice_get_rss_key(vsi, key); + err = ice_get_rss_key(vsi, rxfh->key); if (err) goto out; @@ -3253,12 +3254,12 @@ ice_get_rxfh_context(struct net_device *netdev, u32 *indir, if (ice_is_adq_active(pf)) { for (i = 0; i < vsi->rss_table_size; i++) - indir[i] = offset + lut[i] % qcount; + rxfh->indir[i] = offset + lut[i] % qcount; goto out; } for (i = 0; i < vsi->rss_table_size; i++) - indir[i] = lut[i]; + rxfh->indir[i] = lut[i]; out: kfree(lut); @@ -3266,42 +3267,31 @@ out: } /** - * ice_get_rxfh - get the Rx flow hash indirection table - * @netdev: network interface device structure - * @indir: indirection table - * @key: hash key - * @hfunc: hash function - * - * Reads the indirection table directly from the hardware. - */ -static int -ice_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc) -{ - return ice_get_rxfh_context(netdev, indir, key, hfunc, 0); -} - -/** * ice_set_rxfh - set the Rx flow hash indirection table * @netdev: network interface device structure - * @indir: indirection table - * @key: hash key - * @hfunc: hash function + * @rxfh: pointer to param struct (indir, key, hfunc) + * @extack: extended ACK from the Netlink message * * Returns -EINVAL if the table specifies an invalid queue ID, otherwise * returns 0 after programming the table. */ static int -ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key, - const u8 hfunc) +ice_set_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh, + struct netlink_ext_ack *extack) { struct ice_netdev_priv *np = netdev_priv(netdev); + u8 hfunc = ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ; struct ice_vsi *vsi = np->vsi; struct ice_pf *pf = vsi->back; struct device *dev; int err; dev = ice_pf_to_dev(pf); - if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) + if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE && + rxfh->hfunc != ETH_RSS_HASH_TOP) + return -EOPNOTSUPP; + + if (rxfh->rss_context) return -EOPNOTSUPP; if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { @@ -3315,7 +3305,15 @@ ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key, return -EOPNOTSUPP; } - if (key) { + /* Update the VSI's hash function */ + if (rxfh->input_xfrm & RXH_XFRM_SYM_XOR) + hfunc = ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ; + + err = ice_set_rss_hfunc(vsi, hfunc); + if (err) + return err; + + if (rxfh->key) { if (!vsi->rss_hkey_user) { vsi->rss_hkey_user = devm_kzalloc(dev, ICE_VSIQF_HKEY_ARRAY_SIZE, @@ -3323,7 +3321,8 @@ ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key, if (!vsi->rss_hkey_user) return -ENOMEM; } - memcpy(vsi->rss_hkey_user, key, ICE_VSIQF_HKEY_ARRAY_SIZE); + memcpy(vsi->rss_hkey_user, rxfh->key, + ICE_VSIQF_HKEY_ARRAY_SIZE); err = ice_set_rss_key(vsi, vsi->rss_hkey_user); if (err) @@ -3338,11 +3337,11 @@ ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key, } /* Each 32 bits pointed by 'indir' is stored with a lut entry */ - if (indir) { + if (rxfh->indir) { int i; for (i = 0; i < vsi->rss_table_size; i++) - vsi->rss_lut_user[i] = (u8)(indir[i]); + vsi->rss_lut_user[i] = (u8)(rxfh->indir[i]); } else { ice_fill_rss_lut(vsi->rss_lut_user, vsi->rss_table_size, vsi->rss_size); @@ -4220,9 +4219,11 @@ ice_get_module_eeprom(struct net_device *netdev, } static const struct ethtool_ops ice_ethtool_ops = { + .cap_rss_ctx_supported = true, .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_USE_ADAPTIVE | ETHTOOL_COALESCE_RX_USECS_HIGH, + .cap_rss_sym_xor_supported = true, .get_link_ksettings = ice_get_link_ksettings, .set_link_ksettings = ice_set_link_ksettings, .get_drvinfo = ice_get_drvinfo, @@ -4253,7 +4254,6 @@ static const struct ethtool_ops ice_ethtool_ops = { .set_pauseparam = ice_set_pauseparam, .get_rxfh_key_size = ice_get_rxfh_key_size, .get_rxfh_indir_size = ice_get_rxfh_indir_size, - .get_rxfh_context = ice_get_rxfh_context, .get_rxfh = ice_get_rxfh, .set_rxfh = ice_set_rxfh, .get_channels = ice_get_channels, diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c index d151e5bacf..9a1a04f5f1 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c @@ -302,9 +302,7 @@ void ice_fdir_rem_adq_chnl(struct ice_hw *hw, u16 vsi_idx) continue; for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) { - u64 prof_id; - - prof_id = flow + tun * ICE_FLTR_PTYPE_MAX; + u64 prof_id = prof->prof_id[tun]; for (i = 0; i < prof->cnt; i++) { if (prof->vsi_h[i] != vsi_idx) @@ -362,10 +360,9 @@ ice_fdir_erase_flow_from_hw(struct ice_hw *hw, enum ice_block blk, int flow) return; for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) { - u64 prof_id; + u64 prof_id = prof->prof_id[tun]; int j; - prof_id = flow + tun * ICE_FLTR_PTYPE_MAX; for (j = 0; j < prof->cnt; j++) { u16 vsi_num; @@ -439,14 +436,12 @@ void ice_fdir_replay_flows(struct ice_hw *hw) for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) { struct ice_flow_prof *hw_prof; struct ice_fd_hw_prof *prof; - u64 prof_id; int j; prof = hw->fdir_prof[flow]; - prof_id = flow + tun * ICE_FLTR_PTYPE_MAX; - ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, + ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof->fdir_seg[tun], TNL_SEG_CNT(tun), - &hw_prof); + false, &hw_prof); for (j = 0; j < prof->cnt; j++) { enum ice_flow_priority prio; u64 entry_h = 0; @@ -454,7 +449,7 @@ void ice_fdir_replay_flows(struct ice_hw *hw) prio = ICE_FLOW_PRIO_NORMAL; err = ice_flow_add_entry(hw, ICE_BLK_FD, - prof_id, + hw_prof->id, prof->vsi_h[0], prof->vsi_h[j], prio, prof->fdir_seg, @@ -464,6 +459,7 @@ void ice_fdir_replay_flows(struct ice_hw *hw) flow); continue; } + prof->prof_id[tun] = hw_prof->id; prof->entry_h[j][tun] = entry_h; } } @@ -507,8 +503,7 @@ ice_parse_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp, return -EINVAL; data->flex_word = value & ICE_USERDEF_FLEX_WORD_M; - data->flex_offset = (value & ICE_USERDEF_FLEX_OFFS_M) >> - ICE_USERDEF_FLEX_OFFS_S; + data->flex_offset = FIELD_GET(ICE_USERDEF_FLEX_OFFS_M, value); if (data->flex_offset > ICE_USERDEF_FLEX_MAX_OFFS_VAL) return -EINVAL; @@ -638,7 +633,6 @@ ice_fdir_set_hw_fltr_rule(struct ice_pf *pf, struct ice_flow_seg_info *seg, u64 entry1_h = 0; u64 entry2_h = 0; bool del_last; - u64 prof_id; int err; int idx; @@ -668,7 +662,7 @@ ice_fdir_set_hw_fltr_rule(struct ice_pf *pf, struct ice_flow_seg_info *seg, * then return error. */ if (hw->fdir_fltr_cnt[flow]) { - dev_err(dev, "Failed to add filter. Flow director filters on each port must have the same input set.\n"); + dev_err(dev, "Failed to add filter. Flow director filters on each port must have the same input set.\n"); return -EINVAL; } @@ -686,23 +680,23 @@ ice_fdir_set_hw_fltr_rule(struct ice_pf *pf, struct ice_flow_seg_info *seg, * That is the final parameters are 1 header (segment), no * actions (NULL) and zero actions 0. */ - prof_id = flow + tun * ICE_FLTR_PTYPE_MAX; - err = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg, - TNL_SEG_CNT(tun), &prof); + err = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, seg, + TNL_SEG_CNT(tun), false, &prof); if (err) return err; - err = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx, + err = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id, main_vsi->idx, main_vsi->idx, ICE_FLOW_PRIO_NORMAL, seg, &entry1_h); if (err) goto err_prof; - err = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx, + err = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id, main_vsi->idx, ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL, seg, &entry2_h); if (err) goto err_entry; hw_prof->fdir_seg[tun] = seg; + hw_prof->prof_id[tun] = prof->id; hw_prof->entry_h[0][tun] = entry1_h; hw_prof->entry_h[1][tun] = entry2_h; hw_prof->vsi_h[0] = main_vsi->idx; @@ -719,7 +713,7 @@ ice_fdir_set_hw_fltr_rule(struct ice_pf *pf, struct ice_flow_seg_info *seg, entry1_h = 0; vsi_h = main_vsi->tc_map_vsi[idx]->idx; - err = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, + err = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id, main_vsi->idx, vsi_h, ICE_FLOW_PRIO_NORMAL, seg, &entry1_h); @@ -756,7 +750,7 @@ err_unroll: if (!hw_prof->entry_h[idx][tun]) continue; - ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id); + ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof->id); ice_flow_rem_entry(hw, ICE_BLK_FD, hw_prof->entry_h[idx][tun]); hw_prof->entry_h[idx][tun] = 0; if (del_last) @@ -766,11 +760,11 @@ err_unroll: hw_prof->cnt = 0; err_entry: ice_rem_prof_id_flow(hw, ICE_BLK_FD, - ice_get_hw_vsi_num(hw, main_vsi->idx), prof_id); + ice_get_hw_vsi_num(hw, main_vsi->idx), prof->id); ice_flow_rem_entry(hw, ICE_BLK_FD, entry1_h); err_prof: - ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id); - dev_err(dev, "Failed to add filter. Flow director filters on each port must have the same input set.\n"); + ice_flow_rem_prof(hw, ICE_BLK_FD, prof->id); + dev_err(dev, "Failed to add filter. Flow director filters on each port must have the same input set.\n"); return err; } @@ -1853,6 +1847,7 @@ int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd) struct ice_pf *pf; struct ice_hw *hw; int fltrs_needed; + u32 max_location; u16 tunnel_port; int ret; @@ -1884,8 +1879,10 @@ int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd) if (ret) return ret; - if (fsp->location >= ice_get_fdir_cnt_all(hw)) { - dev_err(dev, "Failed to add filter. The maximum number of flow director filters has been reached.\n"); + max_location = ice_get_fdir_cnt_all(hw); + if (fsp->location >= max_location) { + dev_err(dev, "Failed to add filter. The number of ntuple filters or provided location exceed max %d.\n", + max_location); return -ENOSPC; } @@ -1893,7 +1890,7 @@ int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd) fltrs_needed = ice_get_open_tunnel_port(hw, &tunnel_port, TNL_ALL) ? 2 : 1; if (!ice_fdir_find_fltr_by_idx(hw, fsp->location) && ice_fdir_num_avail_fltr(hw, pf->vsi[vsi->idx]) < fltrs_needed) { - dev_err(dev, "Failed to add filter. The maximum number of flow director filters has been reached.\n"); + dev_err(dev, "Failed to add filter. The maximum number of flow director filters has been reached.\n"); return -ENOSPC; } diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.c b/drivers/net/ethernet/intel/ice/ice_fdir.c index ae089d32ee..5840c3e04a 100644 --- a/drivers/net/ethernet/intel/ice/ice_fdir.c +++ b/drivers/net/ethernet/intel/ice/ice_fdir.c @@ -604,55 +604,32 @@ ice_set_fd_desc_val(struct ice_fd_fltr_desc_ctx *ctx, u64 qword; /* prep QW0 of FD filter programming desc */ - qword = ((u64)ctx->qindex << ICE_FXD_FLTR_QW0_QINDEX_S) & - ICE_FXD_FLTR_QW0_QINDEX_M; - qword |= ((u64)ctx->comp_q << ICE_FXD_FLTR_QW0_COMP_Q_S) & - ICE_FXD_FLTR_QW0_COMP_Q_M; - qword |= ((u64)ctx->comp_report << ICE_FXD_FLTR_QW0_COMP_REPORT_S) & - ICE_FXD_FLTR_QW0_COMP_REPORT_M; - qword |= ((u64)ctx->fd_space << ICE_FXD_FLTR_QW0_FD_SPACE_S) & - ICE_FXD_FLTR_QW0_FD_SPACE_M; - qword |= ((u64)ctx->cnt_index << ICE_FXD_FLTR_QW0_STAT_CNT_S) & - ICE_FXD_FLTR_QW0_STAT_CNT_M; - qword |= ((u64)ctx->cnt_ena << ICE_FXD_FLTR_QW0_STAT_ENA_S) & - ICE_FXD_FLTR_QW0_STAT_ENA_M; - qword |= ((u64)ctx->evict_ena << ICE_FXD_FLTR_QW0_EVICT_ENA_S) & - ICE_FXD_FLTR_QW0_EVICT_ENA_M; - qword |= ((u64)ctx->toq << ICE_FXD_FLTR_QW0_TO_Q_S) & - ICE_FXD_FLTR_QW0_TO_Q_M; - qword |= ((u64)ctx->toq_prio << ICE_FXD_FLTR_QW0_TO_Q_PRI_S) & - ICE_FXD_FLTR_QW0_TO_Q_PRI_M; - qword |= ((u64)ctx->dpu_recipe << ICE_FXD_FLTR_QW0_DPU_RECIPE_S) & - ICE_FXD_FLTR_QW0_DPU_RECIPE_M; - qword |= ((u64)ctx->drop << ICE_FXD_FLTR_QW0_DROP_S) & - ICE_FXD_FLTR_QW0_DROP_M; - qword |= ((u64)ctx->flex_prio << ICE_FXD_FLTR_QW0_FLEX_PRI_S) & - ICE_FXD_FLTR_QW0_FLEX_PRI_M; - qword |= ((u64)ctx->flex_mdid << ICE_FXD_FLTR_QW0_FLEX_MDID_S) & - ICE_FXD_FLTR_QW0_FLEX_MDID_M; - qword |= ((u64)ctx->flex_val << ICE_FXD_FLTR_QW0_FLEX_VAL_S) & - ICE_FXD_FLTR_QW0_FLEX_VAL_M; + qword = FIELD_PREP(ICE_FXD_FLTR_QW0_QINDEX_M, ctx->qindex); + qword |= FIELD_PREP(ICE_FXD_FLTR_QW0_COMP_Q_M, ctx->comp_q); + qword |= FIELD_PREP(ICE_FXD_FLTR_QW0_COMP_REPORT_M, ctx->comp_report); + qword |= FIELD_PREP(ICE_FXD_FLTR_QW0_FD_SPACE_M, ctx->fd_space); + qword |= FIELD_PREP(ICE_FXD_FLTR_QW0_STAT_CNT_M, ctx->cnt_index); + qword |= FIELD_PREP(ICE_FXD_FLTR_QW0_STAT_ENA_M, ctx->cnt_ena); + qword |= FIELD_PREP(ICE_FXD_FLTR_QW0_EVICT_ENA_M, ctx->evict_ena); + qword |= FIELD_PREP(ICE_FXD_FLTR_QW0_TO_Q_M, ctx->toq); + qword |= FIELD_PREP(ICE_FXD_FLTR_QW0_TO_Q_PRI_M, ctx->toq_prio); + qword |= FIELD_PREP(ICE_FXD_FLTR_QW0_DPU_RECIPE_M, ctx->dpu_recipe); + qword |= FIELD_PREP(ICE_FXD_FLTR_QW0_DROP_M, ctx->drop); + qword |= FIELD_PREP(ICE_FXD_FLTR_QW0_FLEX_PRI_M, ctx->flex_prio); + qword |= FIELD_PREP(ICE_FXD_FLTR_QW0_FLEX_MDID_M, ctx->flex_mdid); + qword |= FIELD_PREP(ICE_FXD_FLTR_QW0_FLEX_VAL_M, ctx->flex_val); fdir_desc->qidx_compq_space_stat = cpu_to_le64(qword); /* prep QW1 of FD filter programming desc */ - qword = ((u64)ctx->dtype << ICE_FXD_FLTR_QW1_DTYPE_S) & - ICE_FXD_FLTR_QW1_DTYPE_M; - qword |= ((u64)ctx->pcmd << ICE_FXD_FLTR_QW1_PCMD_S) & - ICE_FXD_FLTR_QW1_PCMD_M; - qword |= ((u64)ctx->desc_prof_prio << ICE_FXD_FLTR_QW1_PROF_PRI_S) & - ICE_FXD_FLTR_QW1_PROF_PRI_M; - qword |= ((u64)ctx->desc_prof << ICE_FXD_FLTR_QW1_PROF_S) & - ICE_FXD_FLTR_QW1_PROF_M; - qword |= ((u64)ctx->fd_vsi << ICE_FXD_FLTR_QW1_FD_VSI_S) & - ICE_FXD_FLTR_QW1_FD_VSI_M; - qword |= ((u64)ctx->swap << ICE_FXD_FLTR_QW1_SWAP_S) & - ICE_FXD_FLTR_QW1_SWAP_M; - qword |= ((u64)ctx->fdid_prio << ICE_FXD_FLTR_QW1_FDID_PRI_S) & - ICE_FXD_FLTR_QW1_FDID_PRI_M; - qword |= ((u64)ctx->fdid_mdid << ICE_FXD_FLTR_QW1_FDID_MDID_S) & - ICE_FXD_FLTR_QW1_FDID_MDID_M; - qword |= ((u64)ctx->fdid << ICE_FXD_FLTR_QW1_FDID_S) & - ICE_FXD_FLTR_QW1_FDID_M; + qword = FIELD_PREP(ICE_FXD_FLTR_QW1_DTYPE_M, ctx->dtype); + qword |= FIELD_PREP(ICE_FXD_FLTR_QW1_PCMD_M, ctx->pcmd); + qword |= FIELD_PREP(ICE_FXD_FLTR_QW1_PROF_PRI_M, ctx->desc_prof_prio); + qword |= FIELD_PREP(ICE_FXD_FLTR_QW1_PROF_M, ctx->desc_prof); + qword |= FIELD_PREP(ICE_FXD_FLTR_QW1_FD_VSI_M, ctx->fd_vsi); + qword |= FIELD_PREP(ICE_FXD_FLTR_QW1_SWAP_M, ctx->swap); + qword |= FIELD_PREP(ICE_FXD_FLTR_QW1_FDID_PRI_M, ctx->fdid_prio); + qword |= FIELD_PREP(ICE_FXD_FLTR_QW1_FDID_MDID_M, ctx->fdid_mdid); + qword |= FIELD_PREP(ICE_FXD_FLTR_QW1_FDID_M, ctx->fdid); fdir_desc->dtype_cmd_vsi_fdid = cpu_to_le64(qword); } diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c index 5ce4139659..20d5db88c9 100644 --- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c +++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c @@ -1218,11 +1218,13 @@ ice_prof_has_mask(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 *masks) * @blk: HW block * @fv: field vector to search for * @masks: masks for FV + * @symm: symmetric setting for RSS flows * @prof_id: receives the profile ID */ static int ice_find_prof_id_with_mask(struct ice_hw *hw, enum ice_block blk, - struct ice_fv_word *fv, u16 *masks, u8 *prof_id) + struct ice_fv_word *fv, u16 *masks, bool symm, + u8 *prof_id) { struct ice_es *es = &hw->blk[blk].es; u8 i; @@ -1236,6 +1238,9 @@ ice_find_prof_id_with_mask(struct ice_hw *hw, enum ice_block blk, for (i = 0; i < (u8)es->count; i++) { u16 off = i * es->fvw; + if (blk == ICE_BLK_RSS && es->symm[i] != symm) + continue; + if (memcmp(&es->t[off], fv, es->fvw * sizeof(*fv))) continue; @@ -1409,13 +1414,13 @@ ice_write_prof_mask_reg(struct ice_hw *hw, enum ice_block blk, u16 mask_idx, switch (blk) { case ICE_BLK_RSS: offset = GLQF_HMASK(mask_idx); - val = (idx << GLQF_HMASK_MSK_INDEX_S) & GLQF_HMASK_MSK_INDEX_M; - val |= (mask << GLQF_HMASK_MASK_S) & GLQF_HMASK_MASK_M; + val = FIELD_PREP(GLQF_HMASK_MSK_INDEX_M, idx); + val |= FIELD_PREP(GLQF_HMASK_MASK_M, mask); break; case ICE_BLK_FD: offset = GLQF_FDMASK(mask_idx); - val = (idx << GLQF_FDMASK_MSK_INDEX_S) & GLQF_FDMASK_MSK_INDEX_M; - val |= (mask << GLQF_FDMASK_MASK_S) & GLQF_FDMASK_MASK_M; + val = FIELD_PREP(GLQF_FDMASK_MSK_INDEX_M, idx); + val |= FIELD_PREP(GLQF_FDMASK_MASK_M, mask); break; default: ice_debug(hw, ICE_DBG_PKG, "No profile masks for block %d\n", @@ -1716,15 +1721,16 @@ ice_update_prof_masking(struct ice_hw *hw, enum ice_block blk, u16 prof_id, } /** - * ice_write_es - write an extraction sequence to hardware + * ice_write_es - write an extraction sequence and symmetric setting to hardware * @hw: pointer to the HW struct * @blk: the block in which to write the extraction sequence * @prof_id: the profile ID to write * @fv: pointer to the extraction sequence to write - NULL to clear extraction + * @symm: symmetric setting for RSS profiles */ static void ice_write_es(struct ice_hw *hw, enum ice_block blk, u8 prof_id, - struct ice_fv_word *fv) + struct ice_fv_word *fv, bool symm) { u16 off; @@ -1737,6 +1743,9 @@ ice_write_es(struct ice_hw *hw, enum ice_block blk, u8 prof_id, memcpy(&hw->blk[blk].es.t[off], fv, hw->blk[blk].es.fvw * sizeof(*fv)); } + + if (blk == ICE_BLK_RSS) + hw->blk[blk].es.symm[prof_id] = symm; } /** @@ -1753,7 +1762,7 @@ ice_prof_dec_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id) if (hw->blk[blk].es.ref_count[prof_id] > 0) { if (!--hw->blk[blk].es.ref_count[prof_id]) { - ice_write_es(hw, blk, prof_id, NULL); + ice_write_es(hw, blk, prof_id, NULL, false); ice_free_prof_masks(hw, blk, prof_id); return ice_free_prof_id(hw, blk, prof_id); } @@ -2116,8 +2125,10 @@ void ice_free_hw_tbls(struct ice_hw *hw) devm_kfree(ice_hw_to_dev(hw), hw->blk[i].prof_redir.t); devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.t); devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.ref_count); + devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.symm); devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.written); devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.mask_ena); + devm_kfree(ice_hw_to_dev(hw), hw->blk[i].prof_id.id); } list_for_each_entry_safe(r, rt, &hw->rss_list_head, l_entry) { @@ -2150,6 +2161,7 @@ void ice_clear_hw_tbls(struct ice_hw *hw) for (i = 0; i < ICE_BLK_COUNT; i++) { struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir; + struct ice_prof_id *prof_id = &hw->blk[i].prof_id; struct ice_prof_tcam *prof = &hw->blk[i].prof; struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1; struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2; @@ -2178,8 +2190,11 @@ void ice_clear_hw_tbls(struct ice_hw *hw) memset(es->t, 0, es->count * sizeof(*es->t) * es->fvw); memset(es->ref_count, 0, es->count * sizeof(*es->ref_count)); + memset(es->symm, 0, es->count * sizeof(*es->symm)); memset(es->written, 0, es->count * sizeof(*es->written)); memset(es->mask_ena, 0, es->count * sizeof(*es->mask_ena)); + + memset(prof_id->id, 0, prof_id->count * sizeof(*prof_id->id)); } } @@ -2196,6 +2211,7 @@ int ice_init_hw_tbls(struct ice_hw *hw) ice_init_all_prof_masks(hw); for (i = 0; i < ICE_BLK_COUNT; i++) { struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir; + struct ice_prof_id *prof_id = &hw->blk[i].prof_id; struct ice_prof_tcam *prof = &hw->blk[i].prof; struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1; struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2; @@ -2292,6 +2308,11 @@ int ice_init_hw_tbls(struct ice_hw *hw) if (!es->ref_count) goto err; + es->symm = devm_kcalloc(ice_hw_to_dev(hw), es->count, + sizeof(*es->symm), GFP_KERNEL); + if (!es->symm) + goto err; + es->written = devm_kcalloc(ice_hw_to_dev(hw), es->count, sizeof(*es->written), GFP_KERNEL); if (!es->written) @@ -2301,6 +2322,12 @@ int ice_init_hw_tbls(struct ice_hw *hw) sizeof(*es->mask_ena), GFP_KERNEL); if (!es->mask_ena) goto err; + + prof_id->count = blk_sizes[i].prof_id; + prof_id->id = devm_kcalloc(ice_hw_to_dev(hw), prof_id->count, + sizeof(*prof_id->id), GFP_KERNEL); + if (!prof_id->id) + goto err; } return 0; @@ -2963,6 +2990,7 @@ ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype, * @attr_cnt: number of elements in attr array * @es: extraction sequence (length of array is determined by the block) * @masks: mask for extraction sequence + * @symm: symmetric setting for RSS profiles * * This function registers a profile, which matches a set of PTYPES with a * particular extraction sequence. While the hardware profile is allocated @@ -2972,7 +3000,7 @@ ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype, int ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[], const struct ice_ptype_attributes *attr, u16 attr_cnt, - struct ice_fv_word *es, u16 *masks) + struct ice_fv_word *es, u16 *masks, bool symm) { u32 bytes = DIV_ROUND_UP(ICE_FLOW_PTYPE_MAX, BITS_PER_BYTE); DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT); @@ -2986,7 +3014,7 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[], mutex_lock(&hw->blk[blk].es.prof_map_lock); /* search for existing profile */ - status = ice_find_prof_id_with_mask(hw, blk, es, masks, &prof_id); + status = ice_find_prof_id_with_mask(hw, blk, es, masks, symm, &prof_id); if (status) { /* allocate profile ID */ status = ice_alloc_prof_id(hw, blk, &prof_id); @@ -3009,7 +3037,7 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[], goto err_ice_add_prof; /* and write new es */ - ice_write_es(hw, blk, prof_id, es); + ice_write_es(hw, blk, prof_id, es, symm); } ice_prof_inc_ref(hw, blk, prof_id); @@ -3097,7 +3125,7 @@ err_ice_add_prof: * This will search for a profile tracking ID which was previously added. * The profile map lock should be held before calling this function. */ -static struct ice_prof_map * +struct ice_prof_map * ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id) { struct ice_prof_map *entry = NULL; diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h index 7af7c8e9aa..b39d7cdc38 100644 --- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h +++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h @@ -42,7 +42,9 @@ bool ice_hw_ptype_ena(struct ice_hw *hw, u16 ptype); int ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[], const struct ice_ptype_attributes *attr, u16 attr_cnt, - struct ice_fv_word *es, u16 *masks); + struct ice_fv_word *es, u16 *masks, bool symm); +struct ice_prof_map * +ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id); int ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl); int diff --git a/drivers/net/ethernet/intel/ice/ice_flex_type.h b/drivers/net/ethernet/intel/ice/ice_flex_type.h index 4f42e14ed3..d427a79d00 100644 --- a/drivers/net/ethernet/intel/ice/ice_flex_type.h +++ b/drivers/net/ethernet/intel/ice/ice_flex_type.h @@ -146,6 +146,7 @@ struct ice_es { u32 *mask_ena; struct list_head prof_map; struct ice_fv_word *t; + u8 *symm; /* symmetric setting per profile (RSS blk)*/ struct mutex prof_map_lock; /* protect access to profiles list */ u8 *written; u8 reverse; /* set to true to reverse FV order */ @@ -304,10 +305,16 @@ struct ice_masks { struct ice_mask masks[ICE_PROF_MASK_COUNT]; }; +struct ice_prof_id { + unsigned long *id; + int count; +}; + /* Tables per block */ struct ice_blk_info { struct ice_xlt1 xlt1; struct ice_xlt2 xlt2; + struct ice_prof_id prof_id; struct ice_prof_tcam prof; struct ice_prof_redir prof_redir; struct ice_es es; diff --git a/drivers/net/ethernet/intel/ice/ice_flow.c b/drivers/net/ethernet/intel/ice/ice_flow.c index fb8b925aaf..fc2b58f562 100644 --- a/drivers/net/ethernet/intel/ice/ice_flow.c +++ b/drivers/net/ethernet/intel/ice/ice_flow.c @@ -1235,6 +1235,7 @@ ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params) #define ICE_FLOW_FIND_PROF_CHK_FLDS 0x00000001 #define ICE_FLOW_FIND_PROF_CHK_VSI 0x00000002 #define ICE_FLOW_FIND_PROF_NOT_CHK_DIR 0x00000004 +#define ICE_FLOW_FIND_PROF_CHK_SYMM 0x00000008 /** * ice_flow_find_prof_conds - Find a profile matching headers and conditions @@ -1243,13 +1244,14 @@ ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params) * @dir: flow direction * @segs: array of one or more packet segments that describe the flow * @segs_cnt: number of packet segments provided + * @symm: symmetric setting for RSS profiles * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI) * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*) */ static struct ice_flow_prof * ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir, struct ice_flow_seg_info *segs, - u8 segs_cnt, u16 vsi_handle, u32 conds) + u8 segs_cnt, bool symm, u16 vsi_handle, u32 conds) { struct ice_flow_prof *p, *prof = NULL; @@ -1265,6 +1267,11 @@ ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk, !test_bit(vsi_handle, p->vsis)) continue; + /* Check for symmetric settings */ + if ((conds & ICE_FLOW_FIND_PROF_CHK_SYMM) && + p->symm != symm) + continue; + /* Protocol headers must be checked. Matched fields are * checked if specified. */ @@ -1328,26 +1335,33 @@ ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block __always_unused blk, * @hw: pointer to the HW struct * @blk: classification stage * @dir: flow direction - * @prof_id: unique ID to identify this flow profile * @segs: array of one or more packet segments that describe the flow * @segs_cnt: number of packet segments provided + * @symm: symmetric setting for RSS profiles * @prof: stores the returned flow profile added * * Assumption: the caller has acquired the lock to the profile list */ static int ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk, - enum ice_flow_dir dir, u64 prof_id, + enum ice_flow_dir dir, struct ice_flow_seg_info *segs, u8 segs_cnt, - struct ice_flow_prof **prof) + bool symm, struct ice_flow_prof **prof) { struct ice_flow_prof_params *params; + struct ice_prof_id *ids; int status; + u64 prof_id; u8 i; if (!prof) return -EINVAL; + ids = &hw->blk[blk].prof_id; + prof_id = find_first_zero_bit(ids->id, ids->count); + if (prof_id >= ids->count) + return -ENOSPC; + params = kzalloc(sizeof(*params), GFP_KERNEL); if (!params) return -ENOMEM; @@ -1369,6 +1383,7 @@ ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk, params->prof->id = prof_id; params->prof->dir = dir; params->prof->segs_cnt = segs_cnt; + params->prof->symm = symm; /* Make a copy of the segments that need to be persistent in the flow * profile instance @@ -1385,7 +1400,7 @@ ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk, /* Add a HW profile for this flow profile */ status = ice_add_prof(hw, blk, prof_id, (u8 *)params->ptypes, params->attr, params->attr_cnt, params->es, - params->mask); + params->mask, symm); if (status) { ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n"); goto out; @@ -1393,6 +1408,7 @@ ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk, INIT_LIST_HEAD(¶ms->prof->entries); mutex_init(¶ms->prof->entries_lock); + set_bit(prof_id, ids->id); *prof = params->prof; out: @@ -1436,6 +1452,7 @@ ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk, /* Remove all hardware profiles associated with this flow profile */ status = ice_rem_prof(hw, blk, prof->id); if (!status) { + clear_bit(prof->id, hw->blk[blk].prof_id.id); list_del(&prof->l_entry); mutex_destroy(&prof->entries_lock); devm_kfree(ice_hw_to_dev(hw), prof); @@ -1511,15 +1528,15 @@ ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk, * @hw: pointer to the HW struct * @blk: classification stage * @dir: flow direction - * @prof_id: unique ID to identify this flow profile * @segs: array of one or more packet segments that describe the flow * @segs_cnt: number of packet segments provided + * @symm: symmetric setting for RSS profiles * @prof: stores the returned flow profile added */ int ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir, - u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt, - struct ice_flow_prof **prof) + struct ice_flow_seg_info *segs, u8 segs_cnt, + bool symm, struct ice_flow_prof **prof) { int status; @@ -1538,8 +1555,8 @@ ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir, mutex_lock(&hw->fl_profs_locks[blk]); - status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt, - prof); + status = ice_flow_add_prof_sync(hw, blk, dir, segs, segs_cnt, + symm, prof); if (!status) list_add(&(*prof)->l_entry, &hw->fl_profs[blk]); @@ -1855,37 +1872,49 @@ int ice_flow_rem_vsi_prof(struct ice_hw *hw, u16 vsi_handle, u64 prof_id) /** * ice_flow_set_rss_seg_info - setup packet segments for RSS * @segs: pointer to the flow field segment(s) - * @hash_fields: fields to be hashed on for the segment(s) - * @flow_hdr: protocol header fields within a packet segment + * @seg_cnt: segment count + * @cfg: configure parameters * * Helper function to extract fields from hash bitmap and use flow * header value to set flow field segment for further use in flow * profile entry or removal. */ static int -ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields, - u32 flow_hdr) +ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u8 seg_cnt, + const struct ice_rss_hash_cfg *cfg) { + struct ice_flow_seg_info *seg; u64 val; - u8 i; + u16 i; - for_each_set_bit(i, (unsigned long *)&hash_fields, - ICE_FLOW_FIELD_IDX_MAX) - ice_flow_set_fld(segs, (enum ice_flow_field)i, + /* set inner most segment */ + seg = &segs[seg_cnt - 1]; + + for_each_set_bit(i, (const unsigned long *)&cfg->hash_flds, + (u16)ICE_FLOW_FIELD_IDX_MAX) + ice_flow_set_fld(seg, (enum ice_flow_field)i, ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, false); - ICE_FLOW_SET_HDRS(segs, flow_hdr); + ICE_FLOW_SET_HDRS(seg, cfg->addl_hdrs); + + /* set outer most header */ + if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV4) + segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER; + else if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV6) + segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER; - if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS & + if (seg->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS & ~ICE_FLOW_RSS_HDRS_INNER_MASK & ~ICE_FLOW_SEG_HDR_IPV_OTHER) return -EINVAL; - val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS); + val = (u64)(seg->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS); if (val && !is_power_of_2(val)) return -EIO; - val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS); + val = (u64)(seg->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS); if (val && !is_power_of_2(val)) return -EIO; @@ -1956,6 +1985,39 @@ int ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle) } /** + * ice_get_rss_hdr_type - get a RSS profile's header type + * @prof: RSS flow profile + */ +static enum ice_rss_cfg_hdr_type +ice_get_rss_hdr_type(struct ice_flow_prof *prof) +{ + if (prof->segs_cnt == ICE_FLOW_SEG_SINGLE) { + return ICE_RSS_OUTER_HEADERS; + } else if (prof->segs_cnt == ICE_FLOW_SEG_MAX) { + const struct ice_flow_seg_info *s; + + s = &prof->segs[ICE_RSS_OUTER_HEADERS]; + if (s->hdrs == ICE_FLOW_SEG_HDR_NONE) + return ICE_RSS_INNER_HEADERS; + if (s->hdrs & ICE_FLOW_SEG_HDR_IPV4) + return ICE_RSS_INNER_HEADERS_W_OUTER_IPV4; + if (s->hdrs & ICE_FLOW_SEG_HDR_IPV6) + return ICE_RSS_INNER_HEADERS_W_OUTER_IPV6; + } + + return ICE_RSS_ANY_HEADERS; +} + +static bool +ice_rss_match_prof(struct ice_rss_cfg *r, struct ice_flow_prof *prof, + enum ice_rss_cfg_hdr_type hdr_type) +{ + return (r->hash.hdr_type == hdr_type && + r->hash.hash_flds == prof->segs[prof->segs_cnt - 1].match && + r->hash.addl_hdrs == prof->segs[prof->segs_cnt - 1].hdrs); +} + +/** * ice_rem_rss_list - remove RSS configuration from list * @hw: pointer to the hardware structure * @vsi_handle: software VSI handle @@ -1966,15 +2028,16 @@ int ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle) static void ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof) { + enum ice_rss_cfg_hdr_type hdr_type; struct ice_rss_cfg *r, *tmp; /* Search for RSS hash fields associated to the VSI that match the * hash configurations associated to the flow profile. If found * remove from the RSS entry list of the VSI context and delete entry. */ + hdr_type = ice_get_rss_hdr_type(prof); list_for_each_entry_safe(r, tmp, &hw->rss_list_head, l_entry) - if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match && - r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) { + if (ice_rss_match_prof(r, prof, hdr_type)) { clear_bit(vsi_handle, r->vsis); if (bitmap_empty(r->vsis, ICE_MAX_VSI)) { list_del(&r->l_entry); @@ -1995,11 +2058,12 @@ ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof) static int ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof) { + enum ice_rss_cfg_hdr_type hdr_type; struct ice_rss_cfg *r, *rss_cfg; + hdr_type = ice_get_rss_hdr_type(prof); list_for_each_entry(r, &hw->rss_list_head, l_entry) - if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match && - r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) { + if (ice_rss_match_prof(r, prof, hdr_type)) { set_bit(vsi_handle, r->vsis); return 0; } @@ -2009,8 +2073,10 @@ ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof) if (!rss_cfg) return -ENOMEM; - rss_cfg->hashed_flds = prof->segs[prof->segs_cnt - 1].match; - rss_cfg->packet_hdr = prof->segs[prof->segs_cnt - 1].hdrs; + rss_cfg->hash.hash_flds = prof->segs[prof->segs_cnt - 1].match; + rss_cfg->hash.addl_hdrs = prof->segs[prof->segs_cnt - 1].hdrs; + rss_cfg->hash.hdr_type = hdr_type; + rss_cfg->hash.symm = prof->symm; set_bit(vsi_handle, rss_cfg->vsis); list_add_tail(&rss_cfg->l_entry, &hw->rss_list_head); @@ -2018,65 +2084,177 @@ ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof) return 0; } -#define ICE_FLOW_PROF_HASH_S 0 -#define ICE_FLOW_PROF_HASH_M (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S) -#define ICE_FLOW_PROF_HDR_S 32 -#define ICE_FLOW_PROF_HDR_M (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S) -#define ICE_FLOW_PROF_ENCAP_S 63 -#define ICE_FLOW_PROF_ENCAP_M (BIT_ULL(ICE_FLOW_PROF_ENCAP_S)) +/** + * ice_rss_config_xor_word - set the HSYMM registers for one input set word + * @hw: pointer to the hardware structure + * @prof_id: RSS hardware profile id + * @src: the FV index used by the protocol's source field + * @dst: the FV index used by the protocol's destination field + * + * Write to the HSYMM register with the index of @src FV the value of the @dst + * FV index. This will tell the hardware to XOR HSYMM[src] with INSET[dst] + * while calculating the RSS input set. + */ +static void +ice_rss_config_xor_word(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst) +{ + u32 val, reg, bits_shift; + u8 reg_idx; + + reg_idx = src / GLQF_HSYMM_REG_SIZE; + bits_shift = ((src % GLQF_HSYMM_REG_SIZE) << 3); + val = dst | GLQF_HSYMM_ENABLE_BIT; + + reg = rd32(hw, GLQF_HSYMM(prof_id, reg_idx)); + reg = (reg & ~(0xff << bits_shift)) | (val << bits_shift); + wr32(hw, GLQF_HSYMM(prof_id, reg_idx), reg); +} -#define ICE_RSS_OUTER_HEADERS 1 -#define ICE_RSS_INNER_HEADERS 2 +/** + * ice_rss_config_xor - set the symmetric registers for a profile's protocol + * @hw: pointer to the hardware structure + * @prof_id: RSS hardware profile id + * @src: the FV index used by the protocol's source field + * @dst: the FV index used by the protocol's destination field + * @len: length of the source/destination fields in words + */ +static void +ice_rss_config_xor(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst, u8 len) +{ + int fv_last_word = + ICE_FLOW_SW_FIELD_VECTOR_MAX / ICE_FLOW_FV_EXTRACT_SZ - 1; + int i; + + for (i = 0; i < len; i++) { + ice_rss_config_xor_word(hw, prof_id, + /* Yes, field vector in GLQF_HSYMM and + * GLQF_HINSET is inversed! + */ + fv_last_word - (src + i), + fv_last_word - (dst + i)); + ice_rss_config_xor_word(hw, prof_id, + fv_last_word - (dst + i), + fv_last_word - (src + i)); + } +} -/* Flow profile ID format: - * [0:31] - Packet match fields - * [32:62] - Protocol header - * [63] - Encapsulation flag, 0 if non-tunneled, 1 if tunneled +/** + * ice_rss_set_symm - set the symmetric settings for an RSS profile + * @hw: pointer to the hardware structure + * @prof: pointer to flow profile + * + * The symmetric hash will result from XORing the protocol's fields with + * indexes in GLQF_HSYMM and GLQF_HINSET. This function configures the profile's + * GLQF_HSYMM registers. */ -#define ICE_FLOW_GEN_PROFID(hash, hdr, segs_cnt) \ - ((u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \ - (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \ - ((u8)((segs_cnt) - 1) ? ICE_FLOW_PROF_ENCAP_M : 0))) +static void ice_rss_set_symm(struct ice_hw *hw, struct ice_flow_prof *prof) +{ + struct ice_prof_map *map; + u8 prof_id, m; + + mutex_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock); + map = ice_search_prof_id(hw, ICE_BLK_RSS, prof->id); + if (map) + prof_id = map->prof_id; + mutex_unlock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock); + + if (!map) + return; + + /* clear to default */ + for (m = 0; m < GLQF_HSYMM_REG_PER_PROF; m++) + wr32(hw, GLQF_HSYMM(prof_id, m), 0); + + if (prof->symm) { + struct ice_flow_seg_xtrct *ipv4_src, *ipv4_dst; + struct ice_flow_seg_xtrct *ipv6_src, *ipv6_dst; + struct ice_flow_seg_xtrct *sctp_src, *sctp_dst; + struct ice_flow_seg_xtrct *tcp_src, *tcp_dst; + struct ice_flow_seg_xtrct *udp_src, *udp_dst; + struct ice_flow_seg_info *seg; + + seg = &prof->segs[prof->segs_cnt - 1]; + + ipv4_src = &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_SA].xtrct; + ipv4_dst = &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_DA].xtrct; + + ipv6_src = &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_SA].xtrct; + ipv6_dst = &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_DA].xtrct; + + tcp_src = &seg->fields[ICE_FLOW_FIELD_IDX_TCP_SRC_PORT].xtrct; + tcp_dst = &seg->fields[ICE_FLOW_FIELD_IDX_TCP_DST_PORT].xtrct; + + udp_src = &seg->fields[ICE_FLOW_FIELD_IDX_UDP_SRC_PORT].xtrct; + udp_dst = &seg->fields[ICE_FLOW_FIELD_IDX_UDP_DST_PORT].xtrct; + + sctp_src = &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT].xtrct; + sctp_dst = &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_DST_PORT].xtrct; + + /* xor IPv4 */ + if (ipv4_src->prot_id != 0 && ipv4_dst->prot_id != 0) + ice_rss_config_xor(hw, prof_id, + ipv4_src->idx, ipv4_dst->idx, 2); + + /* xor IPv6 */ + if (ipv6_src->prot_id != 0 && ipv6_dst->prot_id != 0) + ice_rss_config_xor(hw, prof_id, + ipv6_src->idx, ipv6_dst->idx, 8); + + /* xor TCP */ + if (tcp_src->prot_id != 0 && tcp_dst->prot_id != 0) + ice_rss_config_xor(hw, prof_id, + tcp_src->idx, tcp_dst->idx, 1); + + /* xor UDP */ + if (udp_src->prot_id != 0 && udp_dst->prot_id != 0) + ice_rss_config_xor(hw, prof_id, + udp_src->idx, udp_dst->idx, 1); + + /* xor SCTP */ + if (sctp_src->prot_id != 0 && sctp_dst->prot_id != 0) + ice_rss_config_xor(hw, prof_id, + sctp_src->idx, sctp_dst->idx, 1); + } +} /** * ice_add_rss_cfg_sync - add an RSS configuration * @hw: pointer to the hardware structure * @vsi_handle: software VSI handle - * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure - * @addl_hdrs: protocol header fields - * @segs_cnt: packet segment count + * @cfg: configure parameters * * Assumption: lock has already been acquired for RSS list */ static int -ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, - u32 addl_hdrs, u8 segs_cnt) +ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, + const struct ice_rss_hash_cfg *cfg) { const enum ice_block blk = ICE_BLK_RSS; struct ice_flow_prof *prof = NULL; struct ice_flow_seg_info *segs; + u8 segs_cnt; int status; - if (!segs_cnt || segs_cnt > ICE_FLOW_SEG_MAX) - return -EINVAL; + segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ? + ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX; segs = kcalloc(segs_cnt, sizeof(*segs), GFP_KERNEL); if (!segs) return -ENOMEM; /* Construct the packet segment info from the hashed fields */ - status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds, - addl_hdrs); + status = ice_flow_set_rss_seg_info(segs, segs_cnt, cfg); if (status) goto exit; - /* Search for a flow profile that has matching headers, hash fields - * and has the input VSI associated to it. If found, no further + /* Search for a flow profile that has matching headers, hash fields, + * symm and has the input VSI associated to it. If found, no further * operations required and exit. */ prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt, - vsi_handle, + cfg->symm, vsi_handle, ICE_FLOW_FIND_PROF_CHK_FLDS | + ICE_FLOW_FIND_PROF_CHK_SYMM | ICE_FLOW_FIND_PROF_CHK_VSI); if (prof) goto exit; @@ -2087,7 +2265,8 @@ ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, * the protocol header and new hash field configuration. */ prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt, - vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI); + cfg->symm, vsi_handle, + ICE_FLOW_FIND_PROF_CHK_VSI); if (prof) { status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle); if (!status) @@ -2103,11 +2282,12 @@ ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, } } - /* Search for a profile that has same match fields only. If this - * exists then associate the VSI to this profile. + /* Search for a profile that has the same match fields and symmetric + * setting. If this exists then associate the VSI to this profile. */ prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt, - vsi_handle, + cfg->symm, vsi_handle, + ICE_FLOW_FIND_PROF_CHK_SYMM | ICE_FLOW_FIND_PROF_CHK_FLDS); if (prof) { status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle); @@ -2116,17 +2296,14 @@ ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, goto exit; } - /* Create a new flow profile with generated profile and packet - * segment information. - */ + /* Create a new flow profile with packet segment information. */ status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX, - ICE_FLOW_GEN_PROFID(hashed_flds, - segs[segs_cnt - 1].hdrs, - segs_cnt), - segs, segs_cnt, &prof); + segs, segs_cnt, cfg->symm, &prof); if (status) goto exit; + prof->symm = cfg->symm; + ice_rss_set_symm(hw, prof); status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle); /* If association to a new flow profile failed then this profile can * be removed. @@ -2146,30 +2323,43 @@ exit: /** * ice_add_rss_cfg - add an RSS configuration with specified hashed fields * @hw: pointer to the hardware structure - * @vsi_handle: software VSI handle - * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure - * @addl_hdrs: protocol header fields + * @vsi: VSI to add the RSS configuration to + * @cfg: configure parameters * * This function will generate a flow profile based on fields associated with * the input fields to hash on, the flow type and use the VSI number to add * a flow entry to the profile. */ int -ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, - u32 addl_hdrs) +ice_add_rss_cfg(struct ice_hw *hw, struct ice_vsi *vsi, + const struct ice_rss_hash_cfg *cfg) { + struct ice_rss_hash_cfg local_cfg; + u16 vsi_handle; int status; - if (hashed_flds == ICE_HASH_INVALID || - !ice_is_vsi_valid(hw, vsi_handle)) + if (!vsi) + return -EINVAL; + + vsi_handle = vsi->idx; + if (!ice_is_vsi_valid(hw, vsi_handle) || + !cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS || + cfg->hash_flds == ICE_HASH_INVALID) return -EINVAL; mutex_lock(&hw->rss_locks); - status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs, - ICE_RSS_OUTER_HEADERS); - if (!status) - status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, - addl_hdrs, ICE_RSS_INNER_HEADERS); + local_cfg = *cfg; + if (cfg->hdr_type < ICE_RSS_ANY_HEADERS) { + status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg); + } else { + local_cfg.hdr_type = ICE_RSS_OUTER_HEADERS; + status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg); + if (!status) { + local_cfg.hdr_type = ICE_RSS_INNER_HEADERS; + status = ice_add_rss_cfg_sync(hw, vsi_handle, + &local_cfg); + } + } mutex_unlock(&hw->rss_locks); return status; @@ -2179,33 +2369,33 @@ ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, * ice_rem_rss_cfg_sync - remove an existing RSS configuration * @hw: pointer to the hardware structure * @vsi_handle: software VSI handle - * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove - * @addl_hdrs: Protocol header fields within a packet segment - * @segs_cnt: packet segment count + * @cfg: configure parameters * * Assumption: lock has already been acquired for RSS list */ static int -ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, - u32 addl_hdrs, u8 segs_cnt) +ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, + const struct ice_rss_hash_cfg *cfg) { const enum ice_block blk = ICE_BLK_RSS; struct ice_flow_seg_info *segs; struct ice_flow_prof *prof; + u8 segs_cnt; int status; + segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ? + ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX; segs = kcalloc(segs_cnt, sizeof(*segs), GFP_KERNEL); if (!segs) return -ENOMEM; /* Construct the packet segment info from the hashed fields */ - status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds, - addl_hdrs); + status = ice_flow_set_rss_seg_info(segs, segs_cnt, cfg); if (status) goto out; prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt, - vsi_handle, + cfg->symm, vsi_handle, ICE_FLOW_FIND_PROF_CHK_FLDS); if (!prof) { status = -ENOENT; @@ -2233,31 +2423,39 @@ out: * ice_rem_rss_cfg - remove an existing RSS config with matching hashed fields * @hw: pointer to the hardware structure * @vsi_handle: software VSI handle - * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove - * @addl_hdrs: Protocol header fields within a packet segment + * @cfg: configure parameters * * This function will lookup the flow profile based on the input * hash field bitmap, iterate through the profile entry list of * that profile and find entry associated with input VSI to be - * removed. Calls are made to underlying flow s which will APIs + * removed. Calls are made to underlying flow apis which will in * turn build or update buffers for RSS XLT1 section. */ -int __maybe_unused -ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, - u32 addl_hdrs) +int +ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, + const struct ice_rss_hash_cfg *cfg) { + struct ice_rss_hash_cfg local_cfg; int status; - if (hashed_flds == ICE_HASH_INVALID || - !ice_is_vsi_valid(hw, vsi_handle)) + if (!ice_is_vsi_valid(hw, vsi_handle) || + !cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS || + cfg->hash_flds == ICE_HASH_INVALID) return -EINVAL; mutex_lock(&hw->rss_locks); - status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs, - ICE_RSS_OUTER_HEADERS); - if (!status) - status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds, - addl_hdrs, ICE_RSS_INNER_HEADERS); + local_cfg = *cfg; + if (cfg->hdr_type < ICE_RSS_ANY_HEADERS) { + status = ice_rem_rss_cfg_sync(hw, vsi_handle, &local_cfg); + } else { + local_cfg.hdr_type = ICE_RSS_OUTER_HEADERS; + status = ice_rem_rss_cfg_sync(hw, vsi_handle, &local_cfg); + if (!status) { + local_cfg.hdr_type = ICE_RSS_INNER_HEADERS; + status = ice_rem_rss_cfg_sync(hw, vsi_handle, + &local_cfg); + } + } mutex_unlock(&hw->rss_locks); return status; @@ -2298,18 +2496,24 @@ ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, /** * ice_add_avf_rss_cfg - add an RSS configuration for AVF driver * @hw: pointer to the hardware structure - * @vsi_handle: software VSI handle + * @vsi: VF's VSI * @avf_hash: hash bit fields (ICE_AVF_FLOW_FIELD_*) to configure * * This function will take the hash bitmap provided by the AVF driver via a * message, convert it to ICE-compatible values, and configure RSS flow * profiles. */ -int ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash) +int ice_add_avf_rss_cfg(struct ice_hw *hw, struct ice_vsi *vsi, u64 avf_hash) { + struct ice_rss_hash_cfg hcfg; + u16 vsi_handle; int status = 0; u64 hash_flds; + if (!vsi) + return -EINVAL; + + vsi_handle = vsi->idx; if (avf_hash == ICE_AVF_FLOW_FIELD_INVALID || !ice_is_vsi_valid(hw, vsi_handle)) return -EINVAL; @@ -2379,8 +2583,11 @@ int ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash) if (rss_hash == ICE_HASH_INVALID) return -EIO; - status = ice_add_rss_cfg(hw, vsi_handle, rss_hash, - ICE_FLOW_SEG_HDR_NONE); + hcfg.addl_hdrs = ICE_FLOW_SEG_HDR_NONE; + hcfg.hash_flds = rss_hash; + hcfg.hdr_type = ICE_RSS_ANY_HEADERS; + hcfg.symm = false; + status = ice_add_rss_cfg(hw, vsi, &hcfg); if (status) break; } @@ -2388,6 +2595,54 @@ int ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash) return status; } +static bool rss_cfg_symm_valid(u64 hfld) +{ + return !((!!(hfld & ICE_FLOW_HASH_FLD_IPV4_SA) ^ + !!(hfld & ICE_FLOW_HASH_FLD_IPV4_DA)) || + (!!(hfld & ICE_FLOW_HASH_FLD_IPV6_SA) ^ + !!(hfld & ICE_FLOW_HASH_FLD_IPV6_DA)) || + (!!(hfld & ICE_FLOW_HASH_FLD_TCP_SRC_PORT) ^ + !!(hfld & ICE_FLOW_HASH_FLD_TCP_DST_PORT)) || + (!!(hfld & ICE_FLOW_HASH_FLD_UDP_SRC_PORT) ^ + !!(hfld & ICE_FLOW_HASH_FLD_UDP_DST_PORT)) || + (!!(hfld & ICE_FLOW_HASH_FLD_SCTP_SRC_PORT) ^ + !!(hfld & ICE_FLOW_HASH_FLD_SCTP_DST_PORT))); +} + +/** + * ice_set_rss_cfg_symm - set symmtery for all VSI's RSS configurations + * @hw: pointer to the hardware structure + * @vsi: VSI to set/unset Symmetric RSS + * @symm: TRUE to set Symmetric RSS hashing + */ +int ice_set_rss_cfg_symm(struct ice_hw *hw, struct ice_vsi *vsi, bool symm) +{ + struct ice_rss_hash_cfg local; + struct ice_rss_cfg *r, *tmp; + u16 vsi_handle = vsi->idx; + int status = 0; + + if (!ice_is_vsi_valid(hw, vsi_handle)) + return -EINVAL; + + mutex_lock(&hw->rss_locks); + list_for_each_entry_safe(r, tmp, &hw->rss_list_head, l_entry) { + if (test_bit(vsi_handle, r->vsis) && r->hash.symm != symm) { + local = r->hash; + local.symm = symm; + if (symm && !rss_cfg_symm_valid(r->hash.hash_flds)) + continue; + + status = ice_add_rss_cfg_sync(hw, vsi_handle, &local); + if (status) + break; + } + } + mutex_unlock(&hw->rss_locks); + + return status; +} + /** * ice_replay_rss_cfg - replay RSS configurations associated with VSI * @hw: pointer to the hardware structure @@ -2404,16 +2659,7 @@ int ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle) mutex_lock(&hw->rss_locks); list_for_each_entry(r, &hw->rss_list_head, l_entry) { if (test_bit(vsi_handle, r->vsis)) { - status = ice_add_rss_cfg_sync(hw, vsi_handle, - r->hashed_flds, - r->packet_hdr, - ICE_RSS_OUTER_HEADERS); - if (status) - break; - status = ice_add_rss_cfg_sync(hw, vsi_handle, - r->hashed_flds, - r->packet_hdr, - ICE_RSS_INNER_HEADERS); + status = ice_add_rss_cfg_sync(hw, vsi_handle, &r->hash); if (status) break; } @@ -2428,11 +2674,12 @@ int ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle) * @hw: pointer to the hardware structure * @vsi_handle: software VSI handle * @hdrs: protocol header type + * @symm: whether the RSS is symmetric (bool, output) * * This function will return the match fields of the first instance of flow * profile having the given header types and containing input VSI */ -u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs) +u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs, bool *symm) { u64 rss_hash = ICE_HASH_INVALID; struct ice_rss_cfg *r; @@ -2444,8 +2691,9 @@ u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs) mutex_lock(&hw->rss_locks); list_for_each_entry(r, &hw->rss_list_head, l_entry) if (test_bit(vsi_handle, r->vsis) && - r->packet_hdr == hdrs) { - rss_hash = r->hashed_flds; + r->hash.addl_hdrs == hdrs) { + rss_hash = r->hash.hash_flds; + *symm = r->hash.symm; break; } mutex_unlock(&hw->rss_locks); diff --git a/drivers/net/ethernet/intel/ice/ice_flow.h b/drivers/net/ethernet/intel/ice/ice_flow.h index 96923ef0a5..ff82915ab4 100644 --- a/drivers/net/ethernet/intel/ice/ice_flow.h +++ b/drivers/net/ethernet/intel/ice/ice_flow.h @@ -34,6 +34,8 @@ #define ICE_HASH_TCP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_TCP_PORT) #define ICE_HASH_UDP_IPV4 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_UDP_PORT) #define ICE_HASH_UDP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_UDP_PORT) +#define ICE_HASH_SCTP_IPV4 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_SCTP_PORT) +#define ICE_HASH_SCTP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_SCTP_PORT) #define ICE_FLOW_HASH_GTP_TEID \ (BIT_ULL(ICE_FLOW_FIELD_IDX_GTPC_TEID)) @@ -227,6 +229,19 @@ enum ice_flow_field { ICE_FLOW_FIELD_IDX_MAX }; +#define ICE_FLOW_HASH_FLD_IPV4_SA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) +#define ICE_FLOW_HASH_FLD_IPV6_SA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) +#define ICE_FLOW_HASH_FLD_IPV4_DA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) +#define ICE_FLOW_HASH_FLD_IPV6_DA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA) +#define ICE_FLOW_HASH_FLD_TCP_SRC_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT) +#define ICE_FLOW_HASH_FLD_TCP_DST_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT) +#define ICE_FLOW_HASH_FLD_UDP_SRC_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT) +#define ICE_FLOW_HASH_FLD_UDP_DST_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT) +#define ICE_FLOW_HASH_FLD_SCTP_SRC_PORT \ + BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT) +#define ICE_FLOW_HASH_FLD_SCTP_DST_PORT \ + BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT) + /* Flow headers and fields for AVF support */ enum ice_flow_avf_hdr_field { /* Values 0 - 28 are reserved for future use */ @@ -279,6 +294,24 @@ enum ice_flow_avf_hdr_field { BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP) | \ BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP)) +enum ice_rss_cfg_hdr_type { + ICE_RSS_OUTER_HEADERS, /* take outer headers as inputset. */ + ICE_RSS_INNER_HEADERS, /* take inner headers as inputset. */ + /* take inner headers as inputset for packet with outer ipv4. */ + ICE_RSS_INNER_HEADERS_W_OUTER_IPV4, + /* take inner headers as inputset for packet with outer ipv6. */ + ICE_RSS_INNER_HEADERS_W_OUTER_IPV6, + /* take outer headers first then inner headers as inputset */ + ICE_RSS_ANY_HEADERS +}; + +struct ice_rss_hash_cfg { + u32 addl_hdrs; /* protocol header fields */ + u64 hash_flds; /* hash bit field (ICE_FLOW_HASH_*) to configure */ + enum ice_rss_cfg_hdr_type hdr_type; /* to specify inner or outer */ + bool symm; /* symmetric or asymmetric hash */ +}; + enum ice_flow_dir { ICE_FLOW_RX = 0x02, }; @@ -289,8 +322,10 @@ enum ice_flow_priority { ICE_FLOW_PRIO_HIGH }; +#define ICE_FLOW_SEG_SINGLE 1 #define ICE_FLOW_SEG_MAX 2 #define ICE_FLOW_SEG_RAW_FLD_MAX 2 +#define ICE_FLOW_SW_FIELD_VECTOR_MAX 48 #define ICE_FLOW_FV_EXTRACT_SZ 2 #define ICE_FLOW_SET_HDRS(seg, val) ((seg)->hdrs |= (u32)(val)) @@ -372,20 +407,21 @@ struct ice_flow_prof { /* software VSI handles referenced by this flow profile */ DECLARE_BITMAP(vsis, ICE_MAX_VSI); + + bool symm; /* Symmetric Hash for RSS */ }; struct ice_rss_cfg { struct list_head l_entry; /* bitmap of VSIs added to the RSS entry */ DECLARE_BITMAP(vsis, ICE_MAX_VSI); - u64 hashed_flds; - u32 packet_hdr; + struct ice_rss_hash_cfg hash; }; int ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir, - u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt, - struct ice_flow_prof **prof); + struct ice_flow_seg_info *segs, u8 segs_cnt, + bool symm, struct ice_flow_prof **prof); int ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id); int ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id, @@ -401,13 +437,13 @@ ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len, int ice_flow_rem_vsi_prof(struct ice_hw *hw, u16 vsi_handle, u64 prof_id); void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle); int ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle); -int ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds); +int ice_set_rss_cfg_symm(struct ice_hw *hw, struct ice_vsi *vsi, bool symm); +int ice_add_avf_rss_cfg(struct ice_hw *hw, struct ice_vsi *vsi, + u64 hashed_flds); int ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle); -int -ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, - u32 addl_hdrs); -int -ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, - u32 addl_hdrs); -u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs); +int ice_add_rss_cfg(struct ice_hw *hw, struct ice_vsi *vsi, + const struct ice_rss_hash_cfg *cfg); +int ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, + const struct ice_rss_hash_cfg *cfg); +u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs, bool *symm); #endif /* _ICE_FLOW_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_fwlog.c b/drivers/net/ethernet/intel/ice/ice_fwlog.c new file mode 100644 index 0000000000..92b5dac481 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_fwlog.c @@ -0,0 +1,470 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022, Intel Corporation. */ + +#include <linux/vmalloc.h> +#include "ice.h" +#include "ice_common.h" +#include "ice_fwlog.h" + +bool ice_fwlog_ring_full(struct ice_fwlog_ring *rings) +{ + u16 head, tail; + + head = rings->head; + tail = rings->tail; + + if (head < tail && (tail - head == (rings->size - 1))) + return true; + else if (head > tail && (tail == (head - 1))) + return true; + + return false; +} + +bool ice_fwlog_ring_empty(struct ice_fwlog_ring *rings) +{ + return rings->head == rings->tail; +} + +void ice_fwlog_ring_increment(u16 *item, u16 size) +{ + *item = (*item + 1) & (size - 1); +} + +static int ice_fwlog_alloc_ring_buffs(struct ice_fwlog_ring *rings) +{ + int i, nr_bytes; + u8 *mem; + + nr_bytes = rings->size * ICE_AQ_MAX_BUF_LEN; + mem = vzalloc(nr_bytes); + if (!mem) + return -ENOMEM; + + for (i = 0; i < rings->size; i++) { + struct ice_fwlog_data *ring = &rings->rings[i]; + + ring->data_size = ICE_AQ_MAX_BUF_LEN; + ring->data = mem; + mem += ICE_AQ_MAX_BUF_LEN; + } + + return 0; +} + +static void ice_fwlog_free_ring_buffs(struct ice_fwlog_ring *rings) +{ + int i; + + for (i = 0; i < rings->size; i++) { + struct ice_fwlog_data *ring = &rings->rings[i]; + + /* the first ring is the base memory for the whole range so + * free it + */ + if (!i) + vfree(ring->data); + + ring->data = NULL; + ring->data_size = 0; + } +} + +#define ICE_FWLOG_INDEX_TO_BYTES(n) ((128 * 1024) << (n)) +/** + * ice_fwlog_realloc_rings - reallocate the FW log rings + * @hw: pointer to the HW structure + * @index: the new index to use to allocate memory for the log data + * + */ +void ice_fwlog_realloc_rings(struct ice_hw *hw, int index) +{ + struct ice_fwlog_ring ring; + int status, ring_size; + + /* convert the number of bytes into a number of 4K buffers. externally + * the driver presents the interface to the FW log data as a number of + * bytes because that's easy for users to understand. internally the + * driver uses a ring of buffers because the driver doesn't know where + * the beginning and end of any line of log data is so the driver has + * to overwrite data as complete blocks. when the data is returned to + * the user the driver knows that the data is correct and the FW log + * can be correctly parsed by the tools + */ + ring_size = ICE_FWLOG_INDEX_TO_BYTES(index) / ICE_AQ_MAX_BUF_LEN; + if (ring_size == hw->fwlog_ring.size) + return; + + /* allocate space for the new rings and buffers then release the + * old rings and buffers. that way if we don't have enough + * memory then we at least have what we had before + */ + ring.rings = kcalloc(ring_size, sizeof(*ring.rings), GFP_KERNEL); + if (!ring.rings) + return; + + ring.size = ring_size; + + status = ice_fwlog_alloc_ring_buffs(&ring); + if (status) { + dev_warn(ice_hw_to_dev(hw), "Unable to allocate memory for FW log ring data buffers\n"); + ice_fwlog_free_ring_buffs(&ring); + kfree(ring.rings); + return; + } + + ice_fwlog_free_ring_buffs(&hw->fwlog_ring); + kfree(hw->fwlog_ring.rings); + + hw->fwlog_ring.rings = ring.rings; + hw->fwlog_ring.size = ring.size; + hw->fwlog_ring.index = index; + hw->fwlog_ring.head = 0; + hw->fwlog_ring.tail = 0; +} + +/** + * ice_fwlog_init - Initialize FW logging configuration + * @hw: pointer to the HW structure + * + * This function should be called on driver initialization during + * ice_init_hw(). + */ +int ice_fwlog_init(struct ice_hw *hw) +{ + /* only support fw log commands on PF 0 */ + if (hw->bus.func) + return -EINVAL; + + ice_fwlog_set_supported(hw); + + if (ice_fwlog_supported(hw)) { + int status; + + /* read the current config from the FW and store it */ + status = ice_fwlog_get(hw, &hw->fwlog_cfg); + if (status) + return status; + + hw->fwlog_ring.rings = kcalloc(ICE_FWLOG_RING_SIZE_DFLT, + sizeof(*hw->fwlog_ring.rings), + GFP_KERNEL); + if (!hw->fwlog_ring.rings) { + dev_warn(ice_hw_to_dev(hw), "Unable to allocate memory for FW log rings\n"); + return -ENOMEM; + } + + hw->fwlog_ring.size = ICE_FWLOG_RING_SIZE_DFLT; + hw->fwlog_ring.index = ICE_FWLOG_RING_SIZE_INDEX_DFLT; + + status = ice_fwlog_alloc_ring_buffs(&hw->fwlog_ring); + if (status) { + dev_warn(ice_hw_to_dev(hw), "Unable to allocate memory for FW log ring data buffers\n"); + ice_fwlog_free_ring_buffs(&hw->fwlog_ring); + kfree(hw->fwlog_ring.rings); + return status; + } + + ice_debugfs_fwlog_init(hw->back); + } else { + dev_warn(ice_hw_to_dev(hw), "FW logging is not supported in this NVM image. Please update the NVM to get FW log support\n"); + } + + return 0; +} + +/** + * ice_fwlog_deinit - unroll FW logging configuration + * @hw: pointer to the HW structure + * + * This function should be called in ice_deinit_hw(). + */ +void ice_fwlog_deinit(struct ice_hw *hw) +{ + struct ice_pf *pf = hw->back; + int status; + + /* only support fw log commands on PF 0 */ + if (hw->bus.func) + return; + + /* make sure FW logging is disabled to not put the FW in a weird state + * for the next driver load + */ + hw->fwlog_cfg.options &= ~ICE_FWLOG_OPTION_ARQ_ENA; + status = ice_fwlog_set(hw, &hw->fwlog_cfg); + if (status) + dev_warn(ice_hw_to_dev(hw), "Unable to turn off FW logging, status: %d\n", + status); + + kfree(pf->ice_debugfs_pf_fwlog_modules); + + pf->ice_debugfs_pf_fwlog_modules = NULL; + + status = ice_fwlog_unregister(hw); + if (status) + dev_warn(ice_hw_to_dev(hw), "Unable to unregister FW logging, status: %d\n", + status); + + if (hw->fwlog_ring.rings) { + ice_fwlog_free_ring_buffs(&hw->fwlog_ring); + kfree(hw->fwlog_ring.rings); + } +} + +/** + * ice_fwlog_supported - Cached for whether FW supports FW logging or not + * @hw: pointer to the HW structure + * + * This will always return false if called before ice_init_hw(), so it must be + * called after ice_init_hw(). + */ +bool ice_fwlog_supported(struct ice_hw *hw) +{ + return hw->fwlog_supported; +} + +/** + * ice_aq_fwlog_set - Set FW logging configuration AQ command (0xFF30) + * @hw: pointer to the HW structure + * @entries: entries to configure + * @num_entries: number of @entries + * @options: options from ice_fwlog_cfg->options structure + * @log_resolution: logging resolution + */ +static int +ice_aq_fwlog_set(struct ice_hw *hw, struct ice_fwlog_module_entry *entries, + u16 num_entries, u16 options, u16 log_resolution) +{ + struct ice_aqc_fw_log_cfg_resp *fw_modules; + struct ice_aqc_fw_log *cmd; + struct ice_aq_desc desc; + int status; + int i; + + fw_modules = kcalloc(num_entries, sizeof(*fw_modules), GFP_KERNEL); + if (!fw_modules) + return -ENOMEM; + + for (i = 0; i < num_entries; i++) { + fw_modules[i].module_identifier = + cpu_to_le16(entries[i].module_id); + fw_modules[i].log_level = entries[i].log_level; + } + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logs_config); + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + + cmd = &desc.params.fw_log; + + cmd->cmd_flags = ICE_AQC_FW_LOG_CONF_SET_VALID; + cmd->ops.cfg.log_resolution = cpu_to_le16(log_resolution); + cmd->ops.cfg.mdl_cnt = cpu_to_le16(num_entries); + + if (options & ICE_FWLOG_OPTION_ARQ_ENA) + cmd->cmd_flags |= ICE_AQC_FW_LOG_CONF_AQ_EN; + if (options & ICE_FWLOG_OPTION_UART_ENA) + cmd->cmd_flags |= ICE_AQC_FW_LOG_CONF_UART_EN; + + status = ice_aq_send_cmd(hw, &desc, fw_modules, + sizeof(*fw_modules) * num_entries, + NULL); + + kfree(fw_modules); + + return status; +} + +/** + * ice_fwlog_set - Set the firmware logging settings + * @hw: pointer to the HW structure + * @cfg: config used to set firmware logging + * + * This function should be called whenever the driver needs to set the firmware + * logging configuration. It can be called on initialization, reset, or during + * runtime. + * + * If the PF wishes to receive FW logging then it must register via + * ice_fwlog_register. Note, that ice_fwlog_register does not need to be called + * for init. + */ +int ice_fwlog_set(struct ice_hw *hw, struct ice_fwlog_cfg *cfg) +{ + if (!ice_fwlog_supported(hw)) + return -EOPNOTSUPP; + + return ice_aq_fwlog_set(hw, cfg->module_entries, + ICE_AQC_FW_LOG_ID_MAX, cfg->options, + cfg->log_resolution); +} + +/** + * ice_aq_fwlog_get - Get the current firmware logging configuration (0xFF32) + * @hw: pointer to the HW structure + * @cfg: firmware logging configuration to populate + */ +static int ice_aq_fwlog_get(struct ice_hw *hw, struct ice_fwlog_cfg *cfg) +{ + struct ice_aqc_fw_log_cfg_resp *fw_modules; + struct ice_aqc_fw_log *cmd; + struct ice_aq_desc desc; + u16 module_id_cnt; + int status; + void *buf; + int i; + + memset(cfg, 0, sizeof(*cfg)); + + buf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logs_query); + cmd = &desc.params.fw_log; + + cmd->cmd_flags = ICE_AQC_FW_LOG_AQ_QUERY; + + status = ice_aq_send_cmd(hw, &desc, buf, ICE_AQ_MAX_BUF_LEN, NULL); + if (status) { + ice_debug(hw, ICE_DBG_FW_LOG, "Failed to get FW log configuration\n"); + goto status_out; + } + + module_id_cnt = le16_to_cpu(cmd->ops.cfg.mdl_cnt); + if (module_id_cnt < ICE_AQC_FW_LOG_ID_MAX) { + ice_debug(hw, ICE_DBG_FW_LOG, "FW returned less than the expected number of FW log module IDs\n"); + } else if (module_id_cnt > ICE_AQC_FW_LOG_ID_MAX) { + ice_debug(hw, ICE_DBG_FW_LOG, "FW returned more than expected number of FW log module IDs, setting module_id_cnt to software expected max %u\n", + ICE_AQC_FW_LOG_ID_MAX); + module_id_cnt = ICE_AQC_FW_LOG_ID_MAX; + } + + cfg->log_resolution = le16_to_cpu(cmd->ops.cfg.log_resolution); + if (cmd->cmd_flags & ICE_AQC_FW_LOG_CONF_AQ_EN) + cfg->options |= ICE_FWLOG_OPTION_ARQ_ENA; + if (cmd->cmd_flags & ICE_AQC_FW_LOG_CONF_UART_EN) + cfg->options |= ICE_FWLOG_OPTION_UART_ENA; + if (cmd->cmd_flags & ICE_AQC_FW_LOG_QUERY_REGISTERED) + cfg->options |= ICE_FWLOG_OPTION_IS_REGISTERED; + + fw_modules = (struct ice_aqc_fw_log_cfg_resp *)buf; + + for (i = 0; i < module_id_cnt; i++) { + struct ice_aqc_fw_log_cfg_resp *fw_module = &fw_modules[i]; + + cfg->module_entries[i].module_id = + le16_to_cpu(fw_module->module_identifier); + cfg->module_entries[i].log_level = fw_module->log_level; + } + +status_out: + kfree(buf); + return status; +} + +/** + * ice_fwlog_get - Get the firmware logging settings + * @hw: pointer to the HW structure + * @cfg: config to populate based on current firmware logging settings + */ +int ice_fwlog_get(struct ice_hw *hw, struct ice_fwlog_cfg *cfg) +{ + if (!ice_fwlog_supported(hw)) + return -EOPNOTSUPP; + + return ice_aq_fwlog_get(hw, cfg); +} + +/** + * ice_aq_fwlog_register - Register PF for firmware logging events (0xFF31) + * @hw: pointer to the HW structure + * @reg: true to register and false to unregister + */ +static int ice_aq_fwlog_register(struct ice_hw *hw, bool reg) +{ + struct ice_aq_desc desc; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logs_register); + + if (reg) + desc.params.fw_log.cmd_flags = ICE_AQC_FW_LOG_AQ_REGISTER; + + return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); +} + +/** + * ice_fwlog_register - Register the PF for firmware logging + * @hw: pointer to the HW structure + * + * After this call the PF will start to receive firmware logging based on the + * configuration set in ice_fwlog_set. + */ +int ice_fwlog_register(struct ice_hw *hw) +{ + int status; + + if (!ice_fwlog_supported(hw)) + return -EOPNOTSUPP; + + status = ice_aq_fwlog_register(hw, true); + if (status) + ice_debug(hw, ICE_DBG_FW_LOG, "Failed to register for firmware logging events over ARQ\n"); + else + hw->fwlog_cfg.options |= ICE_FWLOG_OPTION_IS_REGISTERED; + + return status; +} + +/** + * ice_fwlog_unregister - Unregister the PF from firmware logging + * @hw: pointer to the HW structure + */ +int ice_fwlog_unregister(struct ice_hw *hw) +{ + int status; + + if (!ice_fwlog_supported(hw)) + return -EOPNOTSUPP; + + status = ice_aq_fwlog_register(hw, false); + if (status) + ice_debug(hw, ICE_DBG_FW_LOG, "Failed to unregister from firmware logging events over ARQ\n"); + else + hw->fwlog_cfg.options &= ~ICE_FWLOG_OPTION_IS_REGISTERED; + + return status; +} + +/** + * ice_fwlog_set_supported - Set if FW logging is supported by FW + * @hw: pointer to the HW struct + * + * If FW returns success to the ice_aq_fwlog_get call then it supports FW + * logging, else it doesn't. Set the fwlog_supported flag accordingly. + * + * This function is only meant to be called during driver init to determine if + * the FW support FW logging. + */ +void ice_fwlog_set_supported(struct ice_hw *hw) +{ + struct ice_fwlog_cfg *cfg; + int status; + + hw->fwlog_supported = false; + + cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); + if (!cfg) + return; + + /* don't call ice_fwlog_get() because that would check to see if FW + * logging is supported which is what the driver is determining now + */ + status = ice_aq_fwlog_get(hw, cfg); + if (status) + ice_debug(hw, ICE_DBG_FW_LOG, "ice_aq_fwlog_get failed, FW logging is not supported on this version of FW, status %d\n", + status); + else + hw->fwlog_supported = true; + + kfree(cfg); +} diff --git a/drivers/net/ethernet/intel/ice/ice_fwlog.h b/drivers/net/ethernet/intel/ice/ice_fwlog.h new file mode 100644 index 0000000000..287e71fa4b --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_fwlog.h @@ -0,0 +1,79 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2022, Intel Corporation. */ + +#ifndef _ICE_FWLOG_H_ +#define _ICE_FWLOG_H_ +#include "ice_adminq_cmd.h" + +struct ice_hw; + +/* Only a single log level should be set and all log levels under the set value + * are enabled, e.g. if log level is set to ICE_FW_LOG_LEVEL_VERBOSE, then all + * other log levels are included (except ICE_FW_LOG_LEVEL_NONE) + */ +enum ice_fwlog_level { + ICE_FWLOG_LEVEL_NONE = 0, + ICE_FWLOG_LEVEL_ERROR = 1, + ICE_FWLOG_LEVEL_WARNING = 2, + ICE_FWLOG_LEVEL_NORMAL = 3, + ICE_FWLOG_LEVEL_VERBOSE = 4, + ICE_FWLOG_LEVEL_INVALID, /* all values >= this entry are invalid */ +}; + +struct ice_fwlog_module_entry { + /* module ID for the corresponding firmware logging event */ + u16 module_id; + /* verbosity level for the module_id */ + u8 log_level; +}; + +struct ice_fwlog_cfg { + /* list of modules for configuring log level */ + struct ice_fwlog_module_entry module_entries[ICE_AQC_FW_LOG_ID_MAX]; + /* options used to configure firmware logging */ + u16 options; +#define ICE_FWLOG_OPTION_ARQ_ENA BIT(0) +#define ICE_FWLOG_OPTION_UART_ENA BIT(1) + /* set before calling ice_fwlog_init() so the PF registers for firmware + * logging on initialization + */ +#define ICE_FWLOG_OPTION_REGISTER_ON_INIT BIT(2) + /* set in the ice_fwlog_get() response if the PF is registered for FW + * logging events over ARQ + */ +#define ICE_FWLOG_OPTION_IS_REGISTERED BIT(3) + + /* minimum number of log events sent per Admin Receive Queue event */ + u16 log_resolution; +}; + +struct ice_fwlog_data { + u16 data_size; + u8 *data; +}; + +struct ice_fwlog_ring { + struct ice_fwlog_data *rings; + u16 index; + u16 size; + u16 head; + u16 tail; +}; + +#define ICE_FWLOG_RING_SIZE_INDEX_DFLT 3 +#define ICE_FWLOG_RING_SIZE_DFLT 256 +#define ICE_FWLOG_RING_SIZE_MAX 512 + +bool ice_fwlog_ring_full(struct ice_fwlog_ring *rings); +bool ice_fwlog_ring_empty(struct ice_fwlog_ring *rings); +void ice_fwlog_ring_increment(u16 *item, u16 size); +void ice_fwlog_set_supported(struct ice_hw *hw); +bool ice_fwlog_supported(struct ice_hw *hw); +int ice_fwlog_init(struct ice_hw *hw); +void ice_fwlog_deinit(struct ice_hw *hw); +int ice_fwlog_set(struct ice_hw *hw, struct ice_fwlog_cfg *cfg); +int ice_fwlog_get(struct ice_hw *hw, struct ice_fwlog_cfg *cfg); +int ice_fwlog_register(struct ice_hw *hw); +int ice_fwlog_unregister(struct ice_hw *hw); +void ice_fwlog_realloc_rings(struct ice_hw *hw, int index); +#endif /* _ICE_FWLOG_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h index 86936b758a..cfac1d432c 100644 --- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h +++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h @@ -200,6 +200,8 @@ #define GLINT_VECT2FUNC_PF_NUM_M ICE_M(0x7, 12) #define GLINT_VECT2FUNC_IS_PF_S 16 #define GLINT_VECT2FUNC_IS_PF_M BIT(16) +#define PFINT_ALLOC 0x001D2600 +#define PFINT_ALLOC_FIRST ICE_M(0x7FF, 0) #define PFINT_FW_CTL 0x0016C800 #define PFINT_FW_CTL_MSIX_INDX_M ICE_M(0x7FF, 0) #define PFINT_FW_CTL_ITR_INDX_S 11 @@ -404,6 +406,10 @@ #define GLQF_HMASK_SEL(_i) (0x00410000 + ((_i) * 4)) #define GLQF_HMASK_SEL_MAX_INDEX 127 #define GLQF_HMASK_SEL_MASK_SEL_S 0 +#define GLQF_HSYMM(_i, _j) (0x0040F000 + ((_i) * 4 + (_j) * 512)) +#define GLQF_HSYMM_REG_SIZE 4 +#define GLQF_HSYMM_REG_PER_PROF 6 +#define GLQF_HSYMM_ENABLE_BIT BIT(7) #define E800_PFQF_FD_CNT_FD_GCNT_M GENMASK(14, 0) #define E830_PFQF_FD_CNT_FD_GCNT_M GENMASK(15, 0) #define E800_PFQF_FD_CNT_FD_BCNT_M GENMASK(30, 16) diff --git a/drivers/net/ethernet/intel/ice/ice_hwmon.c b/drivers/net/ethernet/intel/ice/ice_hwmon.c new file mode 100644 index 0000000000..e4c2c1bff6 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_hwmon.c @@ -0,0 +1,126 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2023, Intel Corporation. */ + +#include "ice.h" +#include "ice_hwmon.h" +#include "ice_adminq_cmd.h" + +#include <linux/hwmon.h> + +#define TEMP_FROM_REG(reg) ((reg) * 1000) + +static const struct hwmon_channel_info *ice_hwmon_info[] = { + HWMON_CHANNEL_INFO(temp, + HWMON_T_INPUT | HWMON_T_MAX | + HWMON_T_CRIT | HWMON_T_EMERGENCY), + NULL +}; + +static int ice_hwmon_read(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long *val) +{ + struct ice_aqc_get_sensor_reading_resp resp; + struct ice_pf *pf = dev_get_drvdata(dev); + int ret; + + if (type != hwmon_temp) + return -EOPNOTSUPP; + + ret = ice_aq_get_sensor_reading(&pf->hw, &resp); + if (ret) { + dev_warn_ratelimited(dev, + "%s HW read failure (%d)\n", + __func__, + ret); + return ret; + } + + switch (attr) { + case hwmon_temp_input: + *val = TEMP_FROM_REG(resp.data.s0f0.temp); + break; + case hwmon_temp_max: + *val = TEMP_FROM_REG(resp.data.s0f0.temp_warning_threshold); + break; + case hwmon_temp_crit: + *val = TEMP_FROM_REG(resp.data.s0f0.temp_critical_threshold); + break; + case hwmon_temp_emergency: + *val = TEMP_FROM_REG(resp.data.s0f0.temp_fatal_threshold); + break; + default: + dev_dbg(dev, "%s unsupported attribute (%d)\n", + __func__, attr); + return -EOPNOTSUPP; + } + + return 0; +} + +static umode_t ice_hwmon_is_visible(const void *data, + enum hwmon_sensor_types type, u32 attr, + int channel) +{ + if (type != hwmon_temp) + return 0; + + switch (attr) { + case hwmon_temp_input: + case hwmon_temp_crit: + case hwmon_temp_max: + case hwmon_temp_emergency: + return 0444; + } + + return 0; +} + +static const struct hwmon_ops ice_hwmon_ops = { + .is_visible = ice_hwmon_is_visible, + .read = ice_hwmon_read +}; + +static const struct hwmon_chip_info ice_chip_info = { + .ops = &ice_hwmon_ops, + .info = ice_hwmon_info +}; + +static bool ice_is_internal_reading_supported(struct ice_pf *pf) +{ + /* Only the first PF will report temperature for a chip. + * Note that internal temp reading is not supported + * for older FW (< v4.30). + */ + if (pf->hw.pf_id) + return false; + + unsigned long sensors = pf->hw.dev_caps.supported_sensors; + + return _test_bit(ICE_SENSOR_SUPPORT_E810_INT_TEMP_BIT, &sensors); +}; + +void ice_hwmon_init(struct ice_pf *pf) +{ + struct device *dev = ice_pf_to_dev(pf); + struct device *hdev; + + if (!ice_is_internal_reading_supported(pf)) + return; + + hdev = hwmon_device_register_with_info(dev, "ice", pf, &ice_chip_info, + NULL); + if (IS_ERR(hdev)) { + dev_warn(dev, + "hwmon_device_register_with_info returns error (%ld)", + PTR_ERR(hdev)); + return; + } + pf->hwmon_dev = hdev; +} + +void ice_hwmon_exit(struct ice_pf *pf) +{ + if (!pf->hwmon_dev) + return; + hwmon_device_unregister(pf->hwmon_dev); +} diff --git a/drivers/net/ethernet/intel/ice/ice_hwmon.h b/drivers/net/ethernet/intel/ice/ice_hwmon.h new file mode 100644 index 0000000000..d66d40354f --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_hwmon.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2023, Intel Corporation. */ + +#ifndef _ICE_HWMON_H_ +#define _ICE_HWMON_H_ + +#ifdef CONFIG_ICE_HWMON +void ice_hwmon_init(struct ice_pf *pf); +void ice_hwmon_exit(struct ice_pf *pf); +#else /* CONFIG_ICE_HWMON */ +static inline void ice_hwmon_init(struct ice_pf *pf) { } +static inline void ice_hwmon_exit(struct ice_pf *pf) { } +#endif /* CONFIG_ICE_HWMON */ + +#endif /* _ICE_HWMON_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c index b47cd43ae8..a7a3428099 100644 --- a/drivers/net/ethernet/intel/ice/ice_lag.c +++ b/drivers/net/ethernet/intel/ice/ice_lag.c @@ -152,6 +152,27 @@ ice_lag_find_hw_by_lport(struct ice_lag *lag, u8 lport) } /** + * ice_pkg_has_lport_extract - check if lport extraction supported + * @hw: HW struct + */ +static bool ice_pkg_has_lport_extract(struct ice_hw *hw) +{ + int i; + + for (i = 0; i < hw->blk[ICE_BLK_SW].es.count; i++) { + u16 offset; + u8 fv_prot; + + ice_find_prot_off(hw, ICE_BLK_SW, ICE_SW_DEFAULT_PROFILE, i, + &fv_prot, &offset); + if (fv_prot == ICE_FV_PROT_MDID && + offset == ICE_LP_EXT_BUF_OFFSET) + return true; + } + return false; +} + +/** * ice_lag_find_primary - returns pointer to primary interfaces lag struct * @lag: local interfaces lag struct */ @@ -208,8 +229,7 @@ ice_lag_cfg_fltr(struct ice_lag *lag, u32 act, u16 recipe_id, u16 *rule_idx, eth_hdr = s_rule->hdr_data; ice_fill_eth_hdr(eth_hdr); - act |= (vsi_num << ICE_SINGLE_ACT_VSI_ID_S) & - ICE_SINGLE_ACT_VSI_ID_M; + act |= FIELD_PREP(ICE_SINGLE_ACT_VSI_ID_M, vsi_num); s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX); s_rule->recipe_id = cpu_to_le16(recipe_id); @@ -754,9 +774,7 @@ ice_lag_cfg_cp_fltr(struct ice_lag *lag, bool add) s_rule->act = cpu_to_le32(ICE_FWD_TO_VSI | ICE_SINGLE_ACT_LAN_ENABLE | ICE_SINGLE_ACT_VALID_BIT | - ((vsi->vsi_num << - ICE_SINGLE_ACT_VSI_ID_S) & - ICE_SINGLE_ACT_VSI_ID_M)); + FIELD_PREP(ICE_SINGLE_ACT_VSI_ID_M, vsi->vsi_num)); s_rule->hdr_len = cpu_to_le16(ICE_LAG_SRIOV_TRAIN_PKT_LEN); memcpy(s_rule->hdr_data, lacp_train_pkt, LACP_TRAIN_PKT_LEN); opc = ice_aqc_opc_add_sw_rules; @@ -1209,7 +1227,7 @@ static void ice_lag_del_prune_list(struct ice_lag *lag, struct ice_pf *event_pf) } /** - * ice_lag_init_feature_support_flag - Check for NVM support for LAG + * ice_lag_init_feature_support_flag - Check for package and NVM support for LAG * @pf: PF struct */ static void ice_lag_init_feature_support_flag(struct ice_pf *pf) @@ -1222,7 +1240,7 @@ static void ice_lag_init_feature_support_flag(struct ice_pf *pf) else ice_clear_feature_support(pf, ICE_F_ROCE_LAG); - if (caps->sriov_lag) + if (caps->sriov_lag && ice_pkg_has_lport_extract(&pf->hw)) ice_set_feature_support(pf, ICE_F_SRIOV_LAG); else ice_clear_feature_support(pf, ICE_F_SRIOV_LAG); @@ -2023,7 +2041,7 @@ int ice_init_lag(struct ice_pf *pf) /* associate recipes to profiles */ for (n = 0; n < ICE_PROFID_IPV6_GTPU_IPV6_TCP_INNER; n++) { err = ice_aq_get_recipe_to_profile(&pf->hw, n, - (u8 *)&recipe_bits, NULL); + &recipe_bits, NULL); if (err) continue; @@ -2031,7 +2049,7 @@ int ice_init_lag(struct ice_pf *pf) recipe_bits |= BIT(lag->pf_recipe) | BIT(lag->lport_recipe); ice_aq_map_recipe_to_profile(&pf->hw, n, - (u8 *)&recipe_bits, NULL); + recipe_bits, NULL); } } diff --git a/drivers/net/ethernet/intel/ice/ice_lag.h b/drivers/net/ethernet/intel/ice/ice_lag.h index ede833dfa6..183b38792e 100644 --- a/drivers/net/ethernet/intel/ice/ice_lag.h +++ b/drivers/net/ethernet/intel/ice/ice_lag.h @@ -17,6 +17,9 @@ enum ice_lag_role { #define ICE_LAG_INVALID_PORT 0xFF #define ICE_LAG_RESET_RETRIES 5 +#define ICE_SW_DEFAULT_PROFILE 0 +#define ICE_FV_PROT_MDID 255 +#define ICE_LP_EXT_BUF_OFFSET 32 struct ice_pf; struct ice_vf; diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h index 89f986a75c..d384ddfcb8 100644 --- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h +++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h @@ -673,6 +673,212 @@ struct ice_tlan_ctx { * Use the enum ice_rx_l2_ptype to decode the packet type * ENDIF */ +#define ICE_PTYPES \ + /* L2 Packet types */ \ + ICE_PTT_UNUSED_ENTRY(0), \ + ICE_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), \ + ICE_PTT_UNUSED_ENTRY(2), \ + ICE_PTT_UNUSED_ENTRY(3), \ + ICE_PTT_UNUSED_ENTRY(4), \ + ICE_PTT_UNUSED_ENTRY(5), \ + ICE_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), \ + ICE_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), \ + ICE_PTT_UNUSED_ENTRY(8), \ + ICE_PTT_UNUSED_ENTRY(9), \ + ICE_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), \ + ICE_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), \ + ICE_PTT_UNUSED_ENTRY(12), \ + ICE_PTT_UNUSED_ENTRY(13), \ + ICE_PTT_UNUSED_ENTRY(14), \ + ICE_PTT_UNUSED_ENTRY(15), \ + ICE_PTT_UNUSED_ENTRY(16), \ + ICE_PTT_UNUSED_ENTRY(17), \ + ICE_PTT_UNUSED_ENTRY(18), \ + ICE_PTT_UNUSED_ENTRY(19), \ + ICE_PTT_UNUSED_ENTRY(20), \ + ICE_PTT_UNUSED_ENTRY(21), \ + \ + /* Non Tunneled IPv4 */ \ + ICE_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3), \ + ICE_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3), \ + ICE_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4), \ + ICE_PTT_UNUSED_ENTRY(25), \ + ICE_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4), \ + ICE_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4), \ + ICE_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4), \ + \ + /* IPv4 --> IPv4 */ \ + ICE_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3), \ + ICE_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3), \ + ICE_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4), \ + ICE_PTT_UNUSED_ENTRY(32), \ + ICE_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4), \ + ICE_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), \ + ICE_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), \ + \ + /* IPv4 --> IPv6 */ \ + ICE_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3), \ + ICE_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3), \ + ICE_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4), \ + ICE_PTT_UNUSED_ENTRY(39), \ + ICE_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4), \ + ICE_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), \ + ICE_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), \ + \ + /* IPv4 --> GRE/NAT */ \ + ICE_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), \ + \ + /* IPv4 --> GRE/NAT --> IPv4 */ \ + ICE_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), \ + ICE_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), \ + ICE_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), \ + ICE_PTT_UNUSED_ENTRY(47), \ + ICE_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), \ + ICE_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), \ + ICE_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), \ + \ + /* IPv4 --> GRE/NAT --> IPv6 */ \ + ICE_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), \ + ICE_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), \ + ICE_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), \ + ICE_PTT_UNUSED_ENTRY(54), \ + ICE_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), \ + ICE_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), \ + ICE_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), \ + \ + /* IPv4 --> GRE/NAT --> MAC */ \ + ICE_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), \ + \ + /* IPv4 --> GRE/NAT --> MAC --> IPv4 */ \ + ICE_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), \ + ICE_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), \ + ICE_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), \ + ICE_PTT_UNUSED_ENTRY(62), \ + ICE_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), \ + ICE_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), \ + ICE_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), \ + \ + /* IPv4 --> GRE/NAT -> MAC --> IPv6 */ \ + ICE_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), \ + ICE_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), \ + ICE_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), \ + ICE_PTT_UNUSED_ENTRY(69), \ + ICE_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), \ + ICE_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), \ + ICE_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), \ + \ + /* IPv4 --> GRE/NAT --> MAC/VLAN */ \ + ICE_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), \ + \ + /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */ \ + ICE_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), \ + ICE_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), \ + ICE_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), \ + ICE_PTT_UNUSED_ENTRY(77), \ + ICE_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), \ + ICE_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), \ + ICE_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), \ + \ + /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */ \ + ICE_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), \ + ICE_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), \ + ICE_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), \ + ICE_PTT_UNUSED_ENTRY(84), \ + ICE_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), \ + ICE_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), \ + ICE_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), \ + \ + /* Non Tunneled IPv6 */ \ + ICE_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3), \ + ICE_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3), \ + ICE_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4), \ + ICE_PTT_UNUSED_ENTRY(91), \ + ICE_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4), \ + ICE_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4), \ + ICE_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4), \ + \ + /* IPv6 --> IPv4 */ \ + ICE_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3), \ + ICE_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3), \ + ICE_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4), \ + ICE_PTT_UNUSED_ENTRY(98), \ + ICE_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4), \ + ICE_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), \ + ICE_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), \ + \ + /* IPv6 --> IPv6 */ \ + ICE_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3), \ + ICE_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3), \ + ICE_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4), \ + ICE_PTT_UNUSED_ENTRY(105), \ + ICE_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4), \ + ICE_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), \ + ICE_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), \ + \ + /* IPv6 --> GRE/NAT */ \ + ICE_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), \ + \ + /* IPv6 --> GRE/NAT -> IPv4 */ \ + ICE_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), \ + ICE_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), \ + ICE_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), \ + ICE_PTT_UNUSED_ENTRY(113), \ + ICE_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), \ + ICE_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), \ + ICE_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), \ + \ + /* IPv6 --> GRE/NAT -> IPv6 */ \ + ICE_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), \ + ICE_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), \ + ICE_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), \ + ICE_PTT_UNUSED_ENTRY(120), \ + ICE_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), \ + ICE_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), \ + ICE_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), \ + \ + /* IPv6 --> GRE/NAT -> MAC */ \ + ICE_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), \ + \ + /* IPv6 --> GRE/NAT -> MAC -> IPv4 */ \ + ICE_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), \ + ICE_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), \ + ICE_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), \ + ICE_PTT_UNUSED_ENTRY(128), \ + ICE_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), \ + ICE_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), \ + ICE_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), \ + \ + /* IPv6 --> GRE/NAT -> MAC -> IPv6 */ \ + ICE_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), \ + ICE_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), \ + ICE_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), \ + ICE_PTT_UNUSED_ENTRY(135), \ + ICE_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), \ + ICE_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), \ + ICE_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), \ + \ + /* IPv6 --> GRE/NAT -> MAC/VLAN */ \ + ICE_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), \ + \ + /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */ \ + ICE_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), \ + ICE_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), \ + ICE_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), \ + ICE_PTT_UNUSED_ENTRY(143), \ + ICE_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), \ + ICE_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), \ + ICE_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), \ + \ + /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */ \ + ICE_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), \ + ICE_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), \ + ICE_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), \ + ICE_PTT_UNUSED_ENTRY(150), \ + ICE_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), \ + ICE_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), \ + ICE_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), + +#define ICE_NUM_DEFINED_PTYPES 154 /* macro to make the table lines short, use explicit indexing with [PTYPE] */ #define ICE_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\ @@ -695,212 +901,10 @@ struct ice_tlan_ctx { /* Lookup table mapping in the 10-bit HW PTYPE to the bit field for decoding */ static const struct ice_rx_ptype_decoded ice_ptype_lkup[BIT(10)] = { - /* L2 Packet types */ - ICE_PTT_UNUSED_ENTRY(0), - ICE_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), - ICE_PTT_UNUSED_ENTRY(2), - ICE_PTT_UNUSED_ENTRY(3), - ICE_PTT_UNUSED_ENTRY(4), - ICE_PTT_UNUSED_ENTRY(5), - ICE_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), - ICE_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), - ICE_PTT_UNUSED_ENTRY(8), - ICE_PTT_UNUSED_ENTRY(9), - ICE_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), - ICE_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), - ICE_PTT_UNUSED_ENTRY(12), - ICE_PTT_UNUSED_ENTRY(13), - ICE_PTT_UNUSED_ENTRY(14), - ICE_PTT_UNUSED_ENTRY(15), - ICE_PTT_UNUSED_ENTRY(16), - ICE_PTT_UNUSED_ENTRY(17), - ICE_PTT_UNUSED_ENTRY(18), - ICE_PTT_UNUSED_ENTRY(19), - ICE_PTT_UNUSED_ENTRY(20), - ICE_PTT_UNUSED_ENTRY(21), - - /* Non Tunneled IPv4 */ - ICE_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3), - ICE_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3), - ICE_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4), - ICE_PTT_UNUSED_ENTRY(25), - ICE_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4), - ICE_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4), - ICE_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4), - - /* IPv4 --> IPv4 */ - ICE_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3), - ICE_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3), - ICE_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4), - ICE_PTT_UNUSED_ENTRY(32), - ICE_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4), - ICE_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), - ICE_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), - - /* IPv4 --> IPv6 */ - ICE_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3), - ICE_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3), - ICE_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4), - ICE_PTT_UNUSED_ENTRY(39), - ICE_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4), - ICE_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), - ICE_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), - - /* IPv4 --> GRE/NAT */ - ICE_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), - - /* IPv4 --> GRE/NAT --> IPv4 */ - ICE_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), - ICE_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), - ICE_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), - ICE_PTT_UNUSED_ENTRY(47), - ICE_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), - ICE_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), - ICE_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), - - /* IPv4 --> GRE/NAT --> IPv6 */ - ICE_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), - ICE_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), - ICE_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), - ICE_PTT_UNUSED_ENTRY(54), - ICE_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), - ICE_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), - ICE_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), - - /* IPv4 --> GRE/NAT --> MAC */ - ICE_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), - - /* IPv4 --> GRE/NAT --> MAC --> IPv4 */ - ICE_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), - ICE_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), - ICE_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), - ICE_PTT_UNUSED_ENTRY(62), - ICE_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), - ICE_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), - ICE_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), - - /* IPv4 --> GRE/NAT -> MAC --> IPv6 */ - ICE_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), - ICE_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), - ICE_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), - ICE_PTT_UNUSED_ENTRY(69), - ICE_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), - ICE_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), - ICE_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), - - /* IPv4 --> GRE/NAT --> MAC/VLAN */ - ICE_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), - - /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */ - ICE_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), - ICE_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), - ICE_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), - ICE_PTT_UNUSED_ENTRY(77), - ICE_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), - ICE_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), - ICE_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), - - /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */ - ICE_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), - ICE_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), - ICE_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), - ICE_PTT_UNUSED_ENTRY(84), - ICE_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), - ICE_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), - ICE_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), - - /* Non Tunneled IPv6 */ - ICE_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3), - ICE_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3), - ICE_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4), - ICE_PTT_UNUSED_ENTRY(91), - ICE_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4), - ICE_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4), - ICE_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4), - - /* IPv6 --> IPv4 */ - ICE_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3), - ICE_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3), - ICE_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4), - ICE_PTT_UNUSED_ENTRY(98), - ICE_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4), - ICE_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), - ICE_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), - - /* IPv6 --> IPv6 */ - ICE_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3), - ICE_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3), - ICE_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4), - ICE_PTT_UNUSED_ENTRY(105), - ICE_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4), - ICE_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), - ICE_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), - - /* IPv6 --> GRE/NAT */ - ICE_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), - - /* IPv6 --> GRE/NAT -> IPv4 */ - ICE_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), - ICE_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), - ICE_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), - ICE_PTT_UNUSED_ENTRY(113), - ICE_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), - ICE_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), - ICE_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), - - /* IPv6 --> GRE/NAT -> IPv6 */ - ICE_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), - ICE_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), - ICE_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), - ICE_PTT_UNUSED_ENTRY(120), - ICE_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), - ICE_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), - ICE_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), - - /* IPv6 --> GRE/NAT -> MAC */ - ICE_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), - - /* IPv6 --> GRE/NAT -> MAC -> IPv4 */ - ICE_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), - ICE_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), - ICE_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), - ICE_PTT_UNUSED_ENTRY(128), - ICE_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), - ICE_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), - ICE_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), - - /* IPv6 --> GRE/NAT -> MAC -> IPv6 */ - ICE_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), - ICE_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), - ICE_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), - ICE_PTT_UNUSED_ENTRY(135), - ICE_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), - ICE_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), - ICE_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), - - /* IPv6 --> GRE/NAT -> MAC/VLAN */ - ICE_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), - - /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */ - ICE_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), - ICE_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), - ICE_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), - ICE_PTT_UNUSED_ENTRY(143), - ICE_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), - ICE_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), - ICE_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), - - /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */ - ICE_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), - ICE_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), - ICE_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), - ICE_PTT_UNUSED_ENTRY(150), - ICE_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), - ICE_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), - ICE_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), + ICE_PTYPES /* unused entries */ - [154 ... 1023] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 } + [ICE_NUM_DEFINED_PTYPES ... 1023] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 } }; static inline struct ice_rx_ptype_decoded ice_decode_rx_desc_ptype(u16 ptype) diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index c01950de44..cfc20684f2 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -212,11 +212,18 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi) vsi->alloc_txq)); break; case ICE_VSI_SWITCHDEV_CTRL: - /* The number of queues for ctrl VSI is equal to number of VFs. + /* The number of queues for ctrl VSI is equal to number of PRs * Each ring is associated to the corresponding VF_PR netdev. + * Tx and Rx rings are always equal */ - vsi->alloc_txq = ice_get_num_vfs(pf); - vsi->alloc_rxq = vsi->alloc_txq; + if (vsi->req_txq && vsi->req_rxq) { + vsi->alloc_txq = vsi->req_txq; + vsi->alloc_rxq = vsi->req_rxq; + } else { + vsi->alloc_txq = 1; + vsi->alloc_rxq = 1; + } + vsi->num_q_vectors = 1; break; case ICE_VSI_VF: @@ -519,16 +526,14 @@ static irqreturn_t ice_eswitch_msix_clean_rings(int __always_unused irq, void *d { struct ice_q_vector *q_vector = (struct ice_q_vector *)data; struct ice_pf *pf = q_vector->vsi->back; - struct ice_vf *vf; - unsigned int bkt; + struct ice_repr *repr; + unsigned long id; if (!q_vector->tx.tx_ring && !q_vector->rx.rx_ring) return IRQ_HANDLED; - rcu_read_lock(); - ice_for_each_vf_rcu(pf, bkt, vf) - napi_schedule(&vf->repr->q_vector->napi); - rcu_read_unlock(); + xa_for_each(&pf->eswitch.reprs, id, repr) + napi_schedule(&repr->q_vector->napi); return IRQ_HANDLED; } @@ -969,9 +974,8 @@ static void ice_set_dflt_vsi_ctx(struct ice_hw *hw, struct ice_vsi_ctx *ctxt) /* Traffic from VSI can be sent to LAN */ ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA; /* allow all untagged/tagged packets by default on Tx */ - ctxt->info.inner_vlan_flags = ((ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL & - ICE_AQ_VSI_INNER_VLAN_TX_MODE_M) >> - ICE_AQ_VSI_INNER_VLAN_TX_MODE_S); + ctxt->info.inner_vlan_flags = FIELD_PREP(ICE_AQ_VSI_INNER_VLAN_TX_MODE_M, + ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL); /* SVM - by default bits 3 and 4 in inner_vlan_flags are 0's which * results in legacy behavior (show VLAN, DEI, and UP) in descriptor. * @@ -982,13 +986,11 @@ static void ice_set_dflt_vsi_ctx(struct ice_hw *hw, struct ice_vsi_ctx *ctxt) FIELD_PREP(ICE_AQ_VSI_INNER_VLAN_EMODE_M, ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING); ctxt->info.outer_vlan_flags = - (ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL << - ICE_AQ_VSI_OUTER_VLAN_TX_MODE_S) & - ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M; + FIELD_PREP(ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M, + ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL); ctxt->info.outer_vlan_flags |= - (ICE_AQ_VSI_OUTER_TAG_VLAN_8100 << - ICE_AQ_VSI_OUTER_TAG_TYPE_S) & - ICE_AQ_VSI_OUTER_TAG_TYPE_M; + FIELD_PREP(ICE_AQ_VSI_OUTER_TAG_TYPE_M, + ICE_AQ_VSI_OUTER_TAG_VLAN_8100); ctxt->info.outer_vlan_flags |= FIELD_PREP(ICE_AQ_VSI_OUTER_VLAN_EMODE_M, ICE_AQ_VSI_OUTER_VLAN_EMODE_NOTHING); @@ -1067,10 +1069,8 @@ static int ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) vsi->tc_cfg.tc_info[i].qcount_tx = num_txq_per_tc; vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++; - qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) & - ICE_AQ_VSI_TC_Q_OFFSET_M) | - ((pow << ICE_AQ_VSI_TC_Q_NUM_S) & - ICE_AQ_VSI_TC_Q_NUM_M); + qmap = FIELD_PREP(ICE_AQ_VSI_TC_Q_OFFSET_M, offset); + qmap |= FIELD_PREP(ICE_AQ_VSI_TC_Q_NUM_M, pow); offset += num_rxq_per_tc; tx_count += num_txq_per_tc; ctxt->info.tc_mapping[i] = cpu_to_le16(qmap); @@ -1153,18 +1153,14 @@ static void ice_set_fd_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi) ctxt->info.max_fd_fltr_shared = cpu_to_le16(vsi->num_bfltr); /* default queue index within the VSI of the default FD */ - val = ((dflt_q << ICE_AQ_VSI_FD_DEF_Q_S) & - ICE_AQ_VSI_FD_DEF_Q_M); + val = FIELD_PREP(ICE_AQ_VSI_FD_DEF_Q_M, dflt_q); /* target queue or queue group to the FD filter */ - val |= ((dflt_q_group << ICE_AQ_VSI_FD_DEF_GRP_S) & - ICE_AQ_VSI_FD_DEF_GRP_M); + val |= FIELD_PREP(ICE_AQ_VSI_FD_DEF_GRP_M, dflt_q_group); ctxt->info.fd_def_q = cpu_to_le16(val); /* queue index on which FD filter completion is reported */ - val = ((report_q << ICE_AQ_VSI_FD_REPORT_Q_S) & - ICE_AQ_VSI_FD_REPORT_Q_M); + val = FIELD_PREP(ICE_AQ_VSI_FD_REPORT_Q_M, report_q); /* priority of the default qindex action */ - val |= ((dflt_q_prio << ICE_AQ_VSI_FD_DEF_PRIORITY_S) & - ICE_AQ_VSI_FD_DEF_PRIORITY_M); + val |= FIELD_PREP(ICE_AQ_VSI_FD_DEF_PRIORITY_M, dflt_q_prio); ctxt->info.fd_report_opt = cpu_to_le16(val); } @@ -1187,12 +1183,10 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi) case ICE_VSI_PF: /* PF VSI will inherit RSS instance of PF */ lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF; - hash_type = ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ; break; case ICE_VSI_VF: /* VF VSI will gets a small RSS table which is a VSI LUT type */ lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI; - hash_type = ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ; break; default: dev_dbg(dev, "Unsupported VSI type %s\n", @@ -1200,9 +1194,12 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi) return; } - ctxt->info.q_opt_rss = ((lut_type << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) & - ICE_AQ_VSI_Q_OPT_RSS_LUT_M) | - (hash_type & ICE_AQ_VSI_Q_OPT_RSS_HASH_M); + hash_type = ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ; + vsi->rss_hfunc = hash_type; + + ctxt->info.q_opt_rss = + FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_LUT_M, lut_type) | + FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_HASH_M, hash_type); } static void @@ -1216,10 +1213,8 @@ ice_chnl_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) qcount = min_t(int, vsi->num_rxq, pf->num_lan_msix); pow = order_base_2(qcount); - qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) & - ICE_AQ_VSI_TC_Q_OFFSET_M) | - ((pow << ICE_AQ_VSI_TC_Q_NUM_S) & - ICE_AQ_VSI_TC_Q_NUM_M); + qmap = FIELD_PREP(ICE_AQ_VSI_TC_Q_OFFSET_M, offset); + qmap |= FIELD_PREP(ICE_AQ_VSI_TC_Q_NUM_M, pow); ctxt->info.tc_mapping[0] = cpu_to_le16(qmap); ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG); @@ -1601,12 +1596,44 @@ static void ice_vsi_set_vf_rss_flow_fld(struct ice_vsi *vsi) return; } - status = ice_add_avf_rss_cfg(&pf->hw, vsi->idx, ICE_DEFAULT_RSS_HENA); + status = ice_add_avf_rss_cfg(&pf->hw, vsi, ICE_DEFAULT_RSS_HENA); if (status) dev_dbg(dev, "ice_add_avf_rss_cfg failed for vsi = %d, error = %d\n", vsi->vsi_num, status); } +static const struct ice_rss_hash_cfg default_rss_cfgs[] = { + /* configure RSS for IPv4 with input set IP src/dst */ + {ICE_FLOW_SEG_HDR_IPV4, ICE_FLOW_HASH_IPV4, ICE_RSS_ANY_HEADERS, false}, + /* configure RSS for IPv6 with input set IPv6 src/dst */ + {ICE_FLOW_SEG_HDR_IPV6, ICE_FLOW_HASH_IPV6, ICE_RSS_ANY_HEADERS, false}, + /* configure RSS for tcp4 with input set IP src/dst, TCP src/dst */ + {ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4, + ICE_HASH_TCP_IPV4, ICE_RSS_ANY_HEADERS, false}, + /* configure RSS for udp4 with input set IP src/dst, UDP src/dst */ + {ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4, + ICE_HASH_UDP_IPV4, ICE_RSS_ANY_HEADERS, false}, + /* configure RSS for sctp4 with input set IP src/dst - only support + * RSS on SCTPv4 on outer headers (non-tunneled) + */ + {ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4, + ICE_HASH_SCTP_IPV4, ICE_RSS_OUTER_HEADERS, false}, + /* configure RSS for tcp6 with input set IPv6 src/dst, TCP src/dst */ + {ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6, + ICE_HASH_TCP_IPV6, ICE_RSS_ANY_HEADERS, false}, + /* configure RSS for udp6 with input set IPv6 src/dst, UDP src/dst */ + {ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6, + ICE_HASH_UDP_IPV6, ICE_RSS_ANY_HEADERS, false}, + /* configure RSS for sctp6 with input set IPv6 src/dst - only support + * RSS on SCTPv6 on outer headers (non-tunneled) + */ + {ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6, + ICE_HASH_SCTP_IPV6, ICE_RSS_OUTER_HEADERS, false}, + /* configure RSS for IPSEC ESP SPI with input set MAC_IPV4_SPI */ + {ICE_FLOW_SEG_HDR_ESP, + ICE_FLOW_HASH_ESP_SPI, ICE_RSS_OUTER_HEADERS, false}, +}; + /** * ice_vsi_set_rss_flow_fld - Sets RSS input set for different flows * @vsi: VSI to be configured @@ -1620,11 +1647,12 @@ static void ice_vsi_set_vf_rss_flow_fld(struct ice_vsi *vsi) */ static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi) { - u16 vsi_handle = vsi->idx, vsi_num = vsi->vsi_num; + u16 vsi_num = vsi->vsi_num; struct ice_pf *pf = vsi->back; struct ice_hw *hw = &pf->hw; struct device *dev; int status; + u32 i; dev = ice_pf_to_dev(pf); if (ice_is_safe_mode(pf)) { @@ -1632,67 +1660,15 @@ static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi) vsi_num); return; } - /* configure RSS for IPv4 with input set IP src/dst */ - status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV4, - ICE_FLOW_SEG_HDR_IPV4); - if (status) - dev_dbg(dev, "ice_add_rss_cfg failed for ipv4 flow, vsi = %d, error = %d\n", - vsi_num, status); - - /* configure RSS for IPv6 with input set IPv6 src/dst */ - status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV6, - ICE_FLOW_SEG_HDR_IPV6); - if (status) - dev_dbg(dev, "ice_add_rss_cfg failed for ipv6 flow, vsi = %d, error = %d\n", - vsi_num, status); - - /* configure RSS for tcp4 with input set IP src/dst, TCP src/dst */ - status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_TCP_IPV4, - ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4); - if (status) - dev_dbg(dev, "ice_add_rss_cfg failed for tcp4 flow, vsi = %d, error = %d\n", - vsi_num, status); - - /* configure RSS for udp4 with input set IP src/dst, UDP src/dst */ - status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_UDP_IPV4, - ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4); - if (status) - dev_dbg(dev, "ice_add_rss_cfg failed for udp4 flow, vsi = %d, error = %d\n", - vsi_num, status); - - /* configure RSS for sctp4 with input set IP src/dst */ - status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV4, - ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4); - if (status) - dev_dbg(dev, "ice_add_rss_cfg failed for sctp4 flow, vsi = %d, error = %d\n", - vsi_num, status); - - /* configure RSS for tcp6 with input set IPv6 src/dst, TCP src/dst */ - status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_TCP_IPV6, - ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6); - if (status) - dev_dbg(dev, "ice_add_rss_cfg failed for tcp6 flow, vsi = %d, error = %d\n", - vsi_num, status); + for (i = 0; i < ARRAY_SIZE(default_rss_cfgs); i++) { + const struct ice_rss_hash_cfg *cfg = &default_rss_cfgs[i]; - /* configure RSS for udp6 with input set IPv6 src/dst, UDP src/dst */ - status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_UDP_IPV6, - ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6); - if (status) - dev_dbg(dev, "ice_add_rss_cfg failed for udp6 flow, vsi = %d, error = %d\n", - vsi_num, status); - - /* configure RSS for sctp6 with input set IPv6 src/dst */ - status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV6, - ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6); - if (status) - dev_dbg(dev, "ice_add_rss_cfg failed for sctp6 flow, vsi = %d, error = %d\n", - vsi_num, status); - - status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_ESP_SPI, - ICE_FLOW_SEG_HDR_ESP); - if (status) - dev_dbg(dev, "ice_add_rss_cfg failed for esp/spi flow, vsi = %d, error = %d\n", - vsi_num, status); + status = ice_add_rss_cfg(hw, vsi, cfg); + if (status) + dev_dbg(dev, "ice_add_rss_cfg failed, addl_hdrs = %x, hash_flds = %llx, hdr_type = %d, symm = %d\n", + cfg->addl_hdrs, cfg->hash_flds, + cfg->hdr_type, cfg->symm); + } } /** @@ -1809,11 +1785,8 @@ ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio, QRXFLXP_CNTXT_RXDID_PRIO_M | QRXFLXP_CNTXT_TS_M); - regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) & - QRXFLXP_CNTXT_RXDID_IDX_M; - - regval |= (prio << QRXFLXP_CNTXT_RXDID_PRIO_S) & - QRXFLXP_CNTXT_RXDID_PRIO_M; + regval |= FIELD_PREP(QRXFLXP_CNTXT_RXDID_IDX_M, rxdid); + regval |= FIELD_PREP(QRXFLXP_CNTXT_RXDID_PRIO_M, prio); if (ena_ts) /* Enable TimeSync on this queue */ @@ -2451,6 +2424,10 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params) goto unroll_vector_base; ice_vsi_map_rings_to_vectors(vsi); + + /* Associate q_vector rings to napi */ + ice_vsi_set_napi_queues(vsi); + vsi->stat_offsets_loaded = false; if (ice_is_xdp_ena_vsi(vsi)) { @@ -2927,6 +2904,123 @@ void ice_vsi_dis_irq(struct ice_vsi *vsi) } /** + * __ice_queue_set_napi - Set the napi instance for the queue + * @dev: device to which NAPI and queue belong + * @queue_index: Index of queue + * @type: queue type as RX or TX + * @napi: NAPI context + * @locked: is the rtnl_lock already held + * + * Set the napi instance for the queue. Caller indicates the lock status. + */ +static void +__ice_queue_set_napi(struct net_device *dev, unsigned int queue_index, + enum netdev_queue_type type, struct napi_struct *napi, + bool locked) +{ + if (!locked) + rtnl_lock(); + netif_queue_set_napi(dev, queue_index, type, napi); + if (!locked) + rtnl_unlock(); +} + +/** + * ice_queue_set_napi - Set the napi instance for the queue + * @vsi: VSI being configured + * @queue_index: Index of queue + * @type: queue type as RX or TX + * @napi: NAPI context + * + * Set the napi instance for the queue. The rtnl lock state is derived from the + * execution path. + */ +void +ice_queue_set_napi(struct ice_vsi *vsi, unsigned int queue_index, + enum netdev_queue_type type, struct napi_struct *napi) +{ + struct ice_pf *pf = vsi->back; + + if (!vsi->netdev) + return; + + if (current_work() == &pf->serv_task || + test_bit(ICE_PREPARED_FOR_RESET, pf->state) || + test_bit(ICE_DOWN, pf->state) || + test_bit(ICE_SUSPENDED, pf->state)) + __ice_queue_set_napi(vsi->netdev, queue_index, type, napi, + false); + else + __ice_queue_set_napi(vsi->netdev, queue_index, type, napi, + true); +} + +/** + * __ice_q_vector_set_napi_queues - Map queue[s] associated with the napi + * @q_vector: q_vector pointer + * @locked: is the rtnl_lock already held + * + * Associate the q_vector napi with all the queue[s] on the vector. + * Caller indicates the lock status. + */ +void __ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked) +{ + struct ice_rx_ring *rx_ring; + struct ice_tx_ring *tx_ring; + + ice_for_each_rx_ring(rx_ring, q_vector->rx) + __ice_queue_set_napi(q_vector->vsi->netdev, rx_ring->q_index, + NETDEV_QUEUE_TYPE_RX, &q_vector->napi, + locked); + + ice_for_each_tx_ring(tx_ring, q_vector->tx) + __ice_queue_set_napi(q_vector->vsi->netdev, tx_ring->q_index, + NETDEV_QUEUE_TYPE_TX, &q_vector->napi, + locked); + /* Also set the interrupt number for the NAPI */ + netif_napi_set_irq(&q_vector->napi, q_vector->irq.virq); +} + +/** + * ice_q_vector_set_napi_queues - Map queue[s] associated with the napi + * @q_vector: q_vector pointer + * + * Associate the q_vector napi with all the queue[s] on the vector + */ +void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector) +{ + struct ice_rx_ring *rx_ring; + struct ice_tx_ring *tx_ring; + + ice_for_each_rx_ring(rx_ring, q_vector->rx) + ice_queue_set_napi(q_vector->vsi, rx_ring->q_index, + NETDEV_QUEUE_TYPE_RX, &q_vector->napi); + + ice_for_each_tx_ring(tx_ring, q_vector->tx) + ice_queue_set_napi(q_vector->vsi, tx_ring->q_index, + NETDEV_QUEUE_TYPE_TX, &q_vector->napi); + /* Also set the interrupt number for the NAPI */ + netif_napi_set_irq(&q_vector->napi, q_vector->irq.virq); +} + +/** + * ice_vsi_set_napi_queues + * @vsi: VSI pointer + * + * Associate queue[s] with napi for all vectors + */ +void ice_vsi_set_napi_queues(struct ice_vsi *vsi) +{ + int i; + + if (!vsi->netdev) + return; + + ice_for_each_q_vector(vsi, i) + ice_q_vector_set_napi_queues(vsi->q_vectors[i]); +} + +/** * ice_vsi_release - Delete a VSI and free its resources * @vsi: the VSI being removed * @@ -3071,27 +3165,26 @@ ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi, } /** - * ice_vsi_realloc_stat_arrays - Frees unused stat structures + * ice_vsi_realloc_stat_arrays - Frees unused stat structures or alloc new ones * @vsi: VSI pointer - * @prev_txq: Number of Tx rings before ring reallocation - * @prev_rxq: Number of Rx rings before ring reallocation */ -static void -ice_vsi_realloc_stat_arrays(struct ice_vsi *vsi, int prev_txq, int prev_rxq) +static int +ice_vsi_realloc_stat_arrays(struct ice_vsi *vsi) { + u16 req_txq = vsi->req_txq ? vsi->req_txq : vsi->alloc_txq; + u16 req_rxq = vsi->req_rxq ? vsi->req_rxq : vsi->alloc_rxq; + struct ice_ring_stats **tx_ring_stats; + struct ice_ring_stats **rx_ring_stats; struct ice_vsi_stats *vsi_stat; struct ice_pf *pf = vsi->back; + u16 prev_txq = vsi->alloc_txq; + u16 prev_rxq = vsi->alloc_rxq; int i; - if (!prev_txq || !prev_rxq) - return; - if (vsi->type == ICE_VSI_CHNL) - return; - vsi_stat = pf->vsi_stats[vsi->idx]; - if (vsi->num_txq < prev_txq) { - for (i = vsi->num_txq; i < prev_txq; i++) { + if (req_txq < prev_txq) { + for (i = req_txq; i < prev_txq; i++) { if (vsi_stat->tx_ring_stats[i]) { kfree_rcu(vsi_stat->tx_ring_stats[i], rcu); WRITE_ONCE(vsi_stat->tx_ring_stats[i], NULL); @@ -3099,14 +3192,36 @@ ice_vsi_realloc_stat_arrays(struct ice_vsi *vsi, int prev_txq, int prev_rxq) } } - if (vsi->num_rxq < prev_rxq) { - for (i = vsi->num_rxq; i < prev_rxq; i++) { + tx_ring_stats = vsi_stat->tx_ring_stats; + vsi_stat->tx_ring_stats = + krealloc_array(vsi_stat->tx_ring_stats, req_txq, + sizeof(*vsi_stat->tx_ring_stats), + GFP_KERNEL | __GFP_ZERO); + if (!vsi_stat->tx_ring_stats) { + vsi_stat->tx_ring_stats = tx_ring_stats; + return -ENOMEM; + } + + if (req_rxq < prev_rxq) { + for (i = req_rxq; i < prev_rxq; i++) { if (vsi_stat->rx_ring_stats[i]) { kfree_rcu(vsi_stat->rx_ring_stats[i], rcu); WRITE_ONCE(vsi_stat->rx_ring_stats[i], NULL); } } } + + rx_ring_stats = vsi_stat->rx_ring_stats; + vsi_stat->rx_ring_stats = + krealloc_array(vsi_stat->rx_ring_stats, req_rxq, + sizeof(*vsi_stat->rx_ring_stats), + GFP_KERNEL | __GFP_ZERO); + if (!vsi_stat->rx_ring_stats) { + vsi_stat->rx_ring_stats = rx_ring_stats; + return -ENOMEM; + } + + return 0; } /** @@ -3123,9 +3238,9 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags) { struct ice_vsi_cfg_params params = {}; struct ice_coalesce_stored *coalesce; - int ret, prev_txq, prev_rxq; - int prev_num_q_vectors = 0; + int prev_num_q_vectors; struct ice_pf *pf; + int ret; if (!vsi) return -EINVAL; @@ -3137,6 +3252,15 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags) if (WARN_ON(vsi->type == ICE_VSI_VF && !vsi->vf)) return -EINVAL; + ret = ice_vsi_realloc_stat_arrays(vsi); + if (ret) + goto err_vsi_cfg; + + ice_vsi_decfg(vsi); + ret = ice_vsi_cfg_def(vsi, ¶ms); + if (ret) + goto err_vsi_cfg; + coalesce = kcalloc(vsi->num_q_vectors, sizeof(struct ice_coalesce_stored), GFP_KERNEL); if (!coalesce) @@ -3144,14 +3268,6 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags) prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi, coalesce); - prev_txq = vsi->num_txq; - prev_rxq = vsi->num_rxq; - - ice_vsi_decfg(vsi); - ret = ice_vsi_cfg_def(vsi, ¶ms); - if (ret) - goto err_vsi_cfg; - ret = ice_vsi_cfg_tc_lan(pf, vsi); if (ret) { if (vsi_flags & ICE_VSI_FLAG_INIT) { @@ -3163,8 +3279,6 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags) return ice_schedule_reset(pf, ICE_RESET_PFR); } - ice_vsi_realloc_stat_arrays(vsi, prev_txq, prev_rxq); - ice_vsi_rebuild_set_coalesce(vsi, coalesce, prev_num_q_vectors); kfree(coalesce); @@ -3172,8 +3286,8 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags) err_vsi_cfg_tc_lan: ice_vsi_decfg(vsi); -err_vsi_cfg: kfree(coalesce); +err_vsi_cfg: return ret; } @@ -3316,9 +3430,8 @@ ice_vsi_setup_q_map_mqprio(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt, vsi->tc_cfg.ena_tc = ena_tc ? ena_tc : 1; pow = order_base_2(tc0_qcount); - qmap = ((tc0_offset << ICE_AQ_VSI_TC_Q_OFFSET_S) & - ICE_AQ_VSI_TC_Q_OFFSET_M) | - ((pow << ICE_AQ_VSI_TC_Q_NUM_S) & ICE_AQ_VSI_TC_Q_NUM_M); + qmap = FIELD_PREP(ICE_AQ_VSI_TC_Q_OFFSET_M, tc0_offset); + qmap |= FIELD_PREP(ICE_AQ_VSI_TC_Q_NUM_M, pow); ice_for_each_traffic_class(i) { if (!(vsi->tc_cfg.ena_tc & BIT(i))) { diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h index f24f5d1e6f..bfcfc582a4 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_lib.h @@ -91,6 +91,16 @@ void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc); struct ice_vsi * ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params); +void +ice_queue_set_napi(struct ice_vsi *vsi, unsigned int queue_index, + enum netdev_queue_type type, struct napi_struct *napi); + +void __ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked); + +void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector); + +void ice_vsi_set_napi_queues(struct ice_vsi *vsi); + int ice_vsi_release(struct ice_vsi *vsi); void ice_vsi_close(struct ice_vsi *vsi); diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index dabf33cec3..6d256dbcb7 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -14,6 +14,7 @@ #include "ice_dcb_lib.h" #include "ice_dcb_nl.h" #include "ice_devlink.h" +#include "ice_hwmon.h" /* Including ice_trace.h with CREATE_TRACE_POINTS defined will generate the * ice tracepoint functions. This must be done exactly once across the * ice driver. @@ -979,7 +980,7 @@ static void ice_set_dflt_mib(struct ice_pf *pf) * Octets 13 - 20 are TSA values - leave as zeros */ buf[5] = 0x64; - len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S; + len = FIELD_GET(ICE_LLDP_TLV_LEN_M, typelen); offset += len + 2; tlv = (struct ice_lldp_org_tlv *) ((char *)tlv + sizeof(tlv->typelen) + len); @@ -1013,7 +1014,7 @@ static void ice_set_dflt_mib(struct ice_pf *pf) /* Octet 1 left as all zeros - PFC disabled */ buf[0] = 0x08; - len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S; + len = FIELD_GET(ICE_LLDP_TLV_LEN_M, typelen); offset += len + 2; if (ice_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, offset, NULL)) @@ -1252,6 +1253,32 @@ ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event) } /** + * ice_get_fwlog_data - copy the FW log data from ARQ event + * @pf: PF that the FW log event is associated with + * @event: event structure containing FW log data + */ +static void +ice_get_fwlog_data(struct ice_pf *pf, struct ice_rq_event_info *event) +{ + struct ice_fwlog_data *fwlog; + struct ice_hw *hw = &pf->hw; + + fwlog = &hw->fwlog_ring.rings[hw->fwlog_ring.tail]; + + memset(fwlog->data, 0, PAGE_SIZE); + fwlog->data_size = le16_to_cpu(event->desc.datalen); + + memcpy(fwlog->data, event->msg_buf, fwlog->data_size); + ice_fwlog_ring_increment(&hw->fwlog_ring.tail, hw->fwlog_ring.size); + + if (ice_fwlog_ring_full(&hw->fwlog_ring)) { + /* the rings are full so bump the head to create room */ + ice_fwlog_ring_increment(&hw->fwlog_ring.head, + hw->fwlog_ring.size); + } +} + +/** * ice_aq_prep_for_event - Prepare to wait for an AdminQ event from firmware * @pf: pointer to the PF private structure * @task: intermediate helper storage and identifier for waiting @@ -1532,8 +1559,8 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) ice_vc_process_vf_msg(pf, &event, &data); break; - case ice_aqc_opc_fw_logging: - ice_output_fw_log(hw, &event.desc, event.msg_buf); + case ice_aqc_opc_fw_logs_event: + ice_get_fwlog_data(pf, &event); break; case ice_aqc_opc_lldp_set_mib_change: ice_dcb_process_lldp_set_mib_change(pf, &event); @@ -1744,14 +1771,10 @@ static void ice_handle_mdd_event(struct ice_pf *pf) /* find what triggered an MDD event */ reg = rd32(hw, GL_MDET_TX_PQM); if (reg & GL_MDET_TX_PQM_VALID_M) { - u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >> - GL_MDET_TX_PQM_PF_NUM_S; - u16 vf_num = (reg & GL_MDET_TX_PQM_VF_NUM_M) >> - GL_MDET_TX_PQM_VF_NUM_S; - u8 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >> - GL_MDET_TX_PQM_MAL_TYPE_S; - u16 queue = ((reg & GL_MDET_TX_PQM_QNUM_M) >> - GL_MDET_TX_PQM_QNUM_S); + u8 pf_num = FIELD_GET(GL_MDET_TX_PQM_PF_NUM_M, reg); + u16 vf_num = FIELD_GET(GL_MDET_TX_PQM_VF_NUM_M, reg); + u8 event = FIELD_GET(GL_MDET_TX_PQM_MAL_TYPE_M, reg); + u16 queue = FIELD_GET(GL_MDET_TX_PQM_QNUM_M, reg); if (netif_msg_tx_err(pf)) dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n", @@ -1761,14 +1784,10 @@ static void ice_handle_mdd_event(struct ice_pf *pf) reg = rd32(hw, GL_MDET_TX_TCLAN_BY_MAC(hw)); if (reg & GL_MDET_TX_TCLAN_VALID_M) { - u8 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >> - GL_MDET_TX_TCLAN_PF_NUM_S; - u16 vf_num = (reg & GL_MDET_TX_TCLAN_VF_NUM_M) >> - GL_MDET_TX_TCLAN_VF_NUM_S; - u8 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >> - GL_MDET_TX_TCLAN_MAL_TYPE_S; - u16 queue = ((reg & GL_MDET_TX_TCLAN_QNUM_M) >> - GL_MDET_TX_TCLAN_QNUM_S); + u8 pf_num = FIELD_GET(GL_MDET_TX_TCLAN_PF_NUM_M, reg); + u16 vf_num = FIELD_GET(GL_MDET_TX_TCLAN_VF_NUM_M, reg); + u8 event = FIELD_GET(GL_MDET_TX_TCLAN_MAL_TYPE_M, reg); + u16 queue = FIELD_GET(GL_MDET_TX_TCLAN_QNUM_M, reg); if (netif_msg_tx_err(pf)) dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n", @@ -1778,14 +1797,10 @@ static void ice_handle_mdd_event(struct ice_pf *pf) reg = rd32(hw, GL_MDET_RX); if (reg & GL_MDET_RX_VALID_M) { - u8 pf_num = (reg & GL_MDET_RX_PF_NUM_M) >> - GL_MDET_RX_PF_NUM_S; - u16 vf_num = (reg & GL_MDET_RX_VF_NUM_M) >> - GL_MDET_RX_VF_NUM_S; - u8 event = (reg & GL_MDET_RX_MAL_TYPE_M) >> - GL_MDET_RX_MAL_TYPE_S; - u16 queue = ((reg & GL_MDET_RX_QNUM_M) >> - GL_MDET_RX_QNUM_S); + u8 pf_num = FIELD_GET(GL_MDET_RX_PF_NUM_M, reg); + u16 vf_num = FIELD_GET(GL_MDET_RX_VF_NUM_M, reg); + u8 event = FIELD_GET(GL_MDET_RX_MAL_TYPE_M, reg); + u16 queue = FIELD_GET(GL_MDET_RX_QNUM_M, reg); if (netif_msg_rx_err(pf)) dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n", @@ -3031,6 +3046,7 @@ static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp) static void ice_ena_misc_vector(struct ice_pf *pf) { struct ice_hw *hw = &pf->hw; + u32 pf_intr_start_offset; u32 val; /* Disable anti-spoof detection interrupt to prevent spurious event @@ -3059,6 +3075,47 @@ static void ice_ena_misc_vector(struct ice_pf *pf) /* SW_ITR_IDX = 0, but don't change INTENA */ wr32(hw, GLINT_DYN_CTL(pf->oicr_irq.index), GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M); + + if (!pf->hw.dev_caps.ts_dev_info.ts_ll_int_read) + return; + pf_intr_start_offset = rd32(hw, PFINT_ALLOC) & PFINT_ALLOC_FIRST; + wr32(hw, GLINT_DYN_CTL(pf->ll_ts_irq.index + pf_intr_start_offset), + GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M); +} + +/** + * ice_ll_ts_intr - ll_ts interrupt handler + * @irq: interrupt number + * @data: pointer to a q_vector + */ +static irqreturn_t ice_ll_ts_intr(int __always_unused irq, void *data) +{ + struct ice_pf *pf = data; + u32 pf_intr_start_offset; + struct ice_ptp_tx *tx; + unsigned long flags; + struct ice_hw *hw; + u32 val; + u8 idx; + + hw = &pf->hw; + tx = &pf->ptp.port.tx; + spin_lock_irqsave(&tx->lock, flags); + ice_ptp_complete_tx_single_tstamp(tx); + + idx = find_next_bit_wrap(tx->in_use, tx->len, + tx->last_ll_ts_idx_read + 1); + if (idx != tx->len) + ice_ptp_req_tx_single_tstamp(tx, idx); + spin_unlock_irqrestore(&tx->lock, flags); + + val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M | + (ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S); + pf_intr_start_offset = rd32(hw, PFINT_ALLOC) & PFINT_ALLOC_FIRST; + wr32(hw, GLINT_DYN_CTL(pf->ll_ts_irq.index + pf_intr_start_offset), + val); + + return IRQ_HANDLED; } /** @@ -3069,6 +3126,7 @@ static void ice_ena_misc_vector(struct ice_pf *pf) static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) { struct ice_pf *pf = (struct ice_pf *)data; + irqreturn_t ret = IRQ_HANDLED; struct ice_hw *hw = &pf->hw; struct device *dev; u32 oicr, ena_mask; @@ -3108,8 +3166,8 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) /* we have a reset warning */ ena_mask &= ~PFINT_OICR_GRST_M; - reset = (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_RESET_TYPE_M) >> - GLGEN_RSTAT_RESET_TYPE_S; + reset = FIELD_GET(GLGEN_RSTAT_RESET_TYPE_M, + rd32(hw, GLGEN_RSTAT)); if (reset == ICE_RESET_CORER) pf->corer_count++; @@ -3150,8 +3208,22 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) if (oicr & PFINT_OICR_TSYN_TX_M) { ena_mask &= ~PFINT_OICR_TSYN_TX_M; - if (!hw->reset_ongoing && ice_ptp_pf_handles_tx_interrupt(pf)) + if (ice_pf_state_is_nominal(pf) && + pf->hw.dev_caps.ts_dev_info.ts_ll_int_read) { + struct ice_ptp_tx *tx = &pf->ptp.port.tx; + unsigned long flags; + u8 idx; + + spin_lock_irqsave(&tx->lock, flags); + idx = find_next_bit_wrap(tx->in_use, tx->len, + tx->last_ll_ts_idx_read + 1); + if (idx != tx->len) + ice_ptp_req_tx_single_tstamp(tx, idx); + spin_unlock_irqrestore(&tx->lock, flags); + } else if (ice_ptp_pf_handles_tx_interrupt(pf)) { set_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread); + ret = IRQ_WAKE_THREAD; + } } if (oicr & PFINT_OICR_TSYN_EVNT_M) { @@ -3167,7 +3239,7 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) GLTSYN_STAT_EVENT1_M | GLTSYN_STAT_EVENT2_M); - set_bit(ICE_MISC_THREAD_EXTTS_EVENT, pf->misc_thread); + ice_ptp_extts_event(pf); } } @@ -3190,8 +3262,11 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) set_bit(ICE_PFR_REQ, pf->state); } } + ice_service_task_schedule(pf); + if (ret == IRQ_HANDLED) + ice_irq_dynamic_ena(hw, NULL, NULL); - return IRQ_WAKE_THREAD; + return ret; } /** @@ -3207,12 +3282,7 @@ static irqreturn_t ice_misc_intr_thread_fn(int __always_unused irq, void *data) hw = &pf->hw; if (ice_is_reset_in_progress(pf->state)) - return IRQ_HANDLED; - - ice_service_task_schedule(pf); - - if (test_and_clear_bit(ICE_MISC_THREAD_EXTTS_EVENT, pf->misc_thread)) - ice_ptp_extts_event(pf); + goto skip_irq; if (test_and_clear_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread)) { /* Process outstanding Tx timestamps. If there is more work, @@ -3224,6 +3294,7 @@ static irqreturn_t ice_misc_intr_thread_fn(int __always_unused irq, void *data) } } +skip_irq: ice_irq_dynamic_ena(hw, NULL, NULL); return IRQ_HANDLED; @@ -3254,6 +3325,20 @@ static void ice_dis_ctrlq_interrupts(struct ice_hw *hw) } /** + * ice_free_irq_msix_ll_ts- Unroll ll_ts vector setup + * @pf: board private structure + */ +static void ice_free_irq_msix_ll_ts(struct ice_pf *pf) +{ + int irq_num = pf->ll_ts_irq.virq; + + synchronize_irq(irq_num); + devm_free_irq(ice_pf_to_dev(pf), irq_num, pf); + + ice_free_irq(pf, pf->ll_ts_irq); +} + +/** * ice_free_irq_msix_misc - Unroll misc vector setup * @pf: board private structure */ @@ -3272,6 +3357,8 @@ static void ice_free_irq_msix_misc(struct ice_pf *pf) devm_free_irq(ice_pf_to_dev(pf), misc_irq_num, pf); ice_free_irq(pf, pf->oicr_irq); + if (pf->hw.dev_caps.ts_dev_info.ts_ll_int_read) + ice_free_irq_msix_ll_ts(pf); } /** @@ -3297,10 +3384,12 @@ static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 reg_idx) PFINT_MBX_CTL_CAUSE_ENA_M); wr32(hw, PFINT_MBX_CTL, val); - /* This enables Sideband queue Interrupt causes */ - val = ((reg_idx & PFINT_SB_CTL_MSIX_INDX_M) | - PFINT_SB_CTL_CAUSE_ENA_M); - wr32(hw, PFINT_SB_CTL, val); + if (!hw->dev_caps.ts_dev_info.ts_ll_int_read) { + /* enable Sideband queue Interrupt causes */ + val = ((reg_idx & PFINT_SB_CTL_MSIX_INDX_M) | + PFINT_SB_CTL_CAUSE_ENA_M); + wr32(hw, PFINT_SB_CTL, val); + } ice_flush(hw); } @@ -3317,13 +3406,17 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf) { struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; - struct msi_map oicr_irq; + u32 pf_intr_start_offset; + struct msi_map irq; int err = 0; if (!pf->int_name[0]) snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc", dev_driver_string(dev), dev_name(dev)); + if (!pf->int_name_ll_ts[0]) + snprintf(pf->int_name_ll_ts, sizeof(pf->int_name_ll_ts) - 1, + "%s-%s:ll_ts", dev_driver_string(dev), dev_name(dev)); /* Do not request IRQ but do enable OICR interrupt since settings are * lost during reset. Note that this function is called only during * rebuild path and not while reset is in progress. @@ -3332,11 +3425,11 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf) goto skip_req_irq; /* reserve one vector in irq_tracker for misc interrupts */ - oicr_irq = ice_alloc_irq(pf, false); - if (oicr_irq.index < 0) - return oicr_irq.index; + irq = ice_alloc_irq(pf, false); + if (irq.index < 0) + return irq.index; - pf->oicr_irq = oicr_irq; + pf->oicr_irq = irq; err = devm_request_threaded_irq(dev, pf->oicr_irq.virq, ice_misc_intr, ice_misc_intr_thread_fn, 0, pf->int_name, pf); @@ -3347,10 +3440,34 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf) return err; } + /* reserve one vector in irq_tracker for ll_ts interrupt */ + if (!pf->hw.dev_caps.ts_dev_info.ts_ll_int_read) + goto skip_req_irq; + + irq = ice_alloc_irq(pf, false); + if (irq.index < 0) + return irq.index; + + pf->ll_ts_irq = irq; + err = devm_request_irq(dev, pf->ll_ts_irq.virq, ice_ll_ts_intr, 0, + pf->int_name_ll_ts, pf); + if (err) { + dev_err(dev, "devm_request_irq for %s failed: %d\n", + pf->int_name_ll_ts, err); + ice_free_irq(pf, pf->ll_ts_irq); + return err; + } + skip_req_irq: ice_ena_misc_vector(pf); ice_ena_ctrlq_interrupts(hw, pf->oicr_irq.index); + /* This enables LL TS interrupt */ + pf_intr_start_offset = rd32(hw, PFINT_ALLOC) & PFINT_ALLOC_FIRST; + if (pf->hw.dev_caps.ts_dev_info.ts_ll_int_read) + wr32(hw, PFINT_SB_CTL, + ((pf->ll_ts_irq.index + pf_intr_start_offset) & + PFINT_SB_CTL_MSIX_INDX_M) | PFINT_SB_CTL_CAUSE_ENA_M); wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_irq.index), ITR_REG_ALIGN(ICE_ITR_8K) >> ICE_ITR_GRAN_S); @@ -3375,9 +3492,11 @@ static void ice_napi_add(struct ice_vsi *vsi) if (!vsi->netdev) return; - ice_for_each_q_vector(vsi, v_idx) + ice_for_each_q_vector(vsi, v_idx) { netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi, ice_napi_poll); + __ice_q_vector_set_napi_queues(vsi->q_vectors[v_idx], false); + } } /** @@ -3397,6 +3516,7 @@ static void ice_set_ops(struct ice_vsi *vsi) netdev->netdev_ops = &ice_netdev_ops; netdev->udp_tunnel_nic_info = &pf->hw.udp_tunnel_nic; + netdev->xdp_metadata_ops = &ice_xdp_md_ops; ice_set_ethtool_ops(netdev); if (vsi->type != ICE_VSI_PF) @@ -4361,6 +4481,19 @@ static void ice_print_wake_reason(struct ice_pf *pf) } /** + * ice_pf_fwlog_update_module - update 1 module + * @pf: pointer to the PF struct + * @log_level: log_level to use for the @module + * @module: module to update + */ +void ice_pf_fwlog_update_module(struct ice_pf *pf, int log_level, int module) +{ + struct ice_hw *hw = &pf->hw; + + hw->fwlog_cfg.module_entries[module].log_level = log_level; +} + +/** * ice_register_netdev - register netdev * @vsi: pointer to the VSI struct */ @@ -4685,6 +4818,8 @@ static void ice_init_features(struct ice_pf *pf) if (ice_init_lag(pf)) dev_warn(dev, "Failed to init link aggregation support\n"); + + ice_hwmon_init(pf); } static void ice_deinit_features(struct ice_pf *pf) @@ -4702,6 +4837,8 @@ static void ice_deinit_features(struct ice_pf *pf) ice_ptp_release(pf); if (test_bit(ICE_FLAG_DPLL, pf->flags)) ice_dpll_deinit(pf); + if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) + xa_destroy(&pf->eswitch.reprs); } static void ice_init_wakeup(struct ice_pf *pf) @@ -5203,11 +5340,15 @@ static void ice_remove(struct pci_dev *pdev) msleep(100); } + ice_debugfs_exit(); + if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) { set_bit(ICE_VF_RESETS_DISABLED, pf->state); ice_free_vfs(pf); } + ice_hwmon_exit(pf); + ice_service_task_stop(pf); ice_aq_cancel_waiting_tasks(pf); set_bit(ICE_DOWN, pf->state); @@ -5306,6 +5447,7 @@ static int ice_reinit_interrupt_scheme(struct ice_pf *pf) if (ret) goto err_reinit; ice_vsi_map_rings_to_vectors(pf->vsi[v]); + ice_vsi_set_napi_queues(pf->vsi[v]); } ret = ice_req_irq_msix_misc(pf); @@ -5672,6 +5814,8 @@ static int __init ice_module_init(void) goto err_dest_wq; } + ice_debugfs_init(); + status = pci_register_driver(&ice_driver); if (status) { pr_err("failed to register PCI driver, err %d\n", status); @@ -5682,6 +5826,7 @@ static int __init ice_module_init(void) err_dest_lag_wq: destroy_workqueue(ice_lag_wq); + ice_debugfs_exit(); err_dest_wq: destroy_workqueue(ice_wq); return status; @@ -6041,6 +6186,23 @@ ice_fix_features(struct net_device *netdev, netdev_features_t features) } /** + * ice_set_rx_rings_vlan_proto - update rings with new stripped VLAN proto + * @vsi: PF's VSI + * @vlan_ethertype: VLAN ethertype (802.1Q or 802.1ad) in network byte order + * + * Store current stripped VLAN proto in ring packet context, + * so it can be accessed more efficiently by packet processing code. + */ +static void +ice_set_rx_rings_vlan_proto(struct ice_vsi *vsi, __be16 vlan_ethertype) +{ + u16 i; + + ice_for_each_alloc_rxq(vsi, i) + vsi->rx_rings[i]->pkt_ctx.vlan_proto = vlan_ethertype; +} + +/** * ice_set_vlan_offload_features - set VLAN offload features for the PF VSI * @vsi: PF's VSI * @features: features used to determine VLAN offload settings @@ -6082,6 +6244,9 @@ ice_set_vlan_offload_features(struct ice_vsi *vsi, netdev_features_t features) if (strip_err || insert_err) return -EIO; + ice_set_rx_rings_vlan_proto(vsi, enable_stripping ? + htons(vlan_ethertype) : 0); + return 0; } @@ -6668,13 +6833,11 @@ void ice_update_vsi_stats(struct ice_vsi *vsi) cur_ns->rx_crc_errors = pf->stats.crc_errors; cur_ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes + - pf->stats.rx_len_errors + pf->stats.rx_undersize + pf->hw_csum_rx_error + pf->stats.rx_jabber + pf->stats.rx_fragments + pf->stats.rx_oversize; - cur_ns->rx_length_errors = pf->stats.rx_len_errors; /* record drops from the port level */ cur_ns->rx_missed_errors = pf->stats.eth.rx_discards; } @@ -6814,9 +6977,6 @@ void ice_update_pf_stats(struct ice_pf *pf) &prev_ps->mac_remote_faults, &cur_ps->mac_remote_faults); - ice_stat_update32(hw, GLPRT_RLEC(port), pf->stat_prev_loaded, - &prev_ps->rx_len_errors, &cur_ps->rx_len_errors); - ice_stat_update32(hw, GLPRT_RUC(port), pf->stat_prev_loaded, &prev_ps->rx_undersize, &cur_ps->rx_undersize); @@ -7399,9 +7559,9 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) goto err_vsi_rebuild; } - err = ice_vsi_rebuild_by_type(pf, ICE_VSI_SWITCHDEV_CTRL); + err = ice_eswitch_rebuild(pf); if (err) { - dev_err(dev, "Switchdev CTRL VSI rebuild failed: %d\n", err); + dev_err(dev, "Switchdev rebuild failed: %d\n", err); goto err_vsi_rebuild; } @@ -7702,6 +7862,59 @@ int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed) } /** + * ice_set_rss_hfunc - Set RSS HASH function + * @vsi: Pointer to VSI structure + * @hfunc: hash function (ICE_AQ_VSI_Q_OPT_RSS_*) + * + * Returns 0 on success, negative on failure + */ +int ice_set_rss_hfunc(struct ice_vsi *vsi, u8 hfunc) +{ + struct ice_hw *hw = &vsi->back->hw; + struct ice_vsi_ctx *ctx; + bool symm; + int err; + + if (hfunc == vsi->rss_hfunc) + return 0; + + if (hfunc != ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ && + hfunc != ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ) + return -EOPNOTSUPP; + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID); + ctx->info.q_opt_rss = vsi->info.q_opt_rss; + ctx->info.q_opt_rss &= ~ICE_AQ_VSI_Q_OPT_RSS_HASH_M; + ctx->info.q_opt_rss |= + FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_HASH_M, hfunc); + ctx->info.q_opt_tc = vsi->info.q_opt_tc; + ctx->info.q_opt_flags = vsi->info.q_opt_rss; + + err = ice_update_vsi(hw, vsi->idx, ctx, NULL); + if (err) { + dev_err(ice_pf_to_dev(vsi->back), "Failed to configure RSS hash for VSI %d, error %d\n", + vsi->vsi_num, err); + } else { + vsi->info.q_opt_rss = ctx->info.q_opt_rss; + vsi->rss_hfunc = hfunc; + netdev_info(vsi->netdev, "Hash function set to: %sToeplitz\n", + hfunc == ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ ? + "Symmetric " : ""); + } + kfree(ctx); + if (err) + return err; + + /* Fix the symmetry setting for all existing RSS configurations */ + symm = !!(hfunc == ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ); + return ice_set_rss_cfg_symm(hw, vsi, symm); +} + +/** * ice_bridge_getlink - Get the hardware bridge mode * @skb: skb buff * @pid: process ID @@ -7889,8 +8102,8 @@ static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue) struct ice_hw *hw = &pf->hw; u32 head, val = 0; - head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue])) & - QTX_COMM_HEAD_HEAD_M) >> QTX_COMM_HEAD_HEAD_S; + head = FIELD_GET(QTX_COMM_HEAD_HEAD_M, + rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue]))); /* Read interrupt register */ val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx)); @@ -8138,13 +8351,12 @@ static int ice_add_vsi_to_fdir(struct ice_pf *pf, struct ice_vsi *vsi) for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) { enum ice_flow_priority prio; - u64 prof_id; /* add this VSI to FDir profile for this flow */ prio = ICE_FLOW_PRIO_NORMAL; prof = hw->fdir_prof[flow]; - prof_id = flow + tun * ICE_FLTR_PTYPE_MAX; - status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, + status = ice_flow_add_entry(hw, ICE_BLK_FD, + prof->prof_id[tun], prof->vsi_h[0], vsi->idx, prio, prof->fdir_seg[tun], &entry_h); diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c index f6f52a2480..d4e05d2cb3 100644 --- a/drivers/net/ethernet/intel/ice/ice_nvm.c +++ b/drivers/net/ethernet/intel/ice/ice_nvm.c @@ -571,8 +571,8 @@ ice_get_nvm_ver_info(struct ice_hw *hw, enum ice_bank_select bank, struct ice_nv return status; } - nvm->major = (ver & ICE_NVM_VER_HI_MASK) >> ICE_NVM_VER_HI_SHIFT; - nvm->minor = (ver & ICE_NVM_VER_LO_MASK) >> ICE_NVM_VER_LO_SHIFT; + nvm->major = FIELD_GET(ICE_NVM_VER_HI_MASK, ver); + nvm->minor = FIELD_GET(ICE_NVM_VER_LO_MASK, ver); status = ice_read_nvm_sr_copy(hw, bank, ICE_SR_NVM_EETRACK_LO, &eetrack_lo); if (status) { @@ -706,9 +706,9 @@ ice_get_orom_ver_info(struct ice_hw *hw, enum ice_bank_select bank, struct ice_o combo_ver = le32_to_cpu(civd.combo_ver); - orom->major = (u8)((combo_ver & ICE_OROM_VER_MASK) >> ICE_OROM_VER_SHIFT); - orom->patch = (u8)(combo_ver & ICE_OROM_VER_PATCH_MASK); - orom->build = (u16)((combo_ver & ICE_OROM_VER_BUILD_MASK) >> ICE_OROM_VER_BUILD_SHIFT); + orom->major = FIELD_GET(ICE_OROM_VER_MASK, combo_ver); + orom->patch = FIELD_GET(ICE_OROM_VER_PATCH_MASK, combo_ver); + orom->build = FIELD_GET(ICE_OROM_VER_BUILD_MASK, combo_ver); return 0; } @@ -950,7 +950,8 @@ static int ice_determine_active_flash_banks(struct ice_hw *hw) } /* Check that the control word indicates validity */ - if ((ctrl_word & ICE_SR_CTRL_WORD_1_M) >> ICE_SR_CTRL_WORD_1_S != ICE_SR_CTRL_WORD_VALID) { + if (FIELD_GET(ICE_SR_CTRL_WORD_1_M, ctrl_word) != + ICE_SR_CTRL_WORD_VALID) { ice_debug(hw, ICE_DBG_NVM, "Shadow RAM control word is invalid\n"); return -EIO; } @@ -1027,7 +1028,7 @@ int ice_init_nvm(struct ice_hw *hw) * as the blank mode may be used in the factory line. */ gens_stat = rd32(hw, GLNVM_GENS); - sr_size = (gens_stat & GLNVM_GENS_SR_SIZE_M) >> GLNVM_GENS_SR_SIZE_S; + sr_size = FIELD_GET(GLNVM_GENS_SR_SIZE_M, gens_stat); /* Switching to words (sr_size contains power of 2) */ flash->sr_words = BIT(sr_size) * ICE_SR_WORDS_IN_1KB; diff --git a/drivers/net/ethernet/intel/ice/ice_osdep.h b/drivers/net/ethernet/intel/ice/ice_osdep.h index 82bc54fec7..a2562f0426 100644 --- a/drivers/net/ethernet/intel/ice/ice_osdep.h +++ b/drivers/net/ethernet/intel/ice/ice_osdep.h @@ -24,7 +24,7 @@ #define rd64(a, reg) readq((a)->hw_addr + (reg)) #define ice_flush(a) rd32((a), GLGEN_STAT) -#define ICE_M(m, s) ((m) << (s)) +#define ICE_M(m, s) ((m ## U) << (s)) struct ice_dma_mem { void *va; diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c index e6b1ce76ca..3b6605c858 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp.c @@ -7,7 +7,7 @@ #define E810_OUT_PROP_DELAY_NS 1 -#define UNKNOWN_INCVAL_E822 0x100000000ULL +#define UNKNOWN_INCVAL_E82X 0x100000000ULL static const struct ptp_pin_desc ice_pin_desc_e810t[] = { /* name idx func chan */ @@ -525,6 +525,119 @@ ice_ptp_is_tx_tracker_up(struct ice_ptp_tx *tx) } /** + * ice_ptp_req_tx_single_tstamp - Request Tx timestamp for a port from FW + * @tx: the PTP Tx timestamp tracker + * @idx: index of the timestamp to request + */ +void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx) +{ + struct ice_ptp_port *ptp_port; + struct sk_buff *skb; + struct ice_pf *pf; + + if (!tx->init) + return; + + ptp_port = container_of(tx, struct ice_ptp_port, tx); + pf = ptp_port_to_pf(ptp_port); + + /* Drop packets which have waited for more than 2 seconds */ + if (time_is_before_jiffies(tx->tstamps[idx].start + 2 * HZ)) { + /* Count the number of Tx timestamps that timed out */ + pf->ptp.tx_hwtstamp_timeouts++; + + skb = tx->tstamps[idx].skb; + tx->tstamps[idx].skb = NULL; + clear_bit(idx, tx->in_use); + + dev_kfree_skb_any(skb); + return; + } + + ice_trace(tx_tstamp_fw_req, tx->tstamps[idx].skb, idx); + + /* Write TS index to read to the PF register so the FW can read it */ + wr32(&pf->hw, PF_SB_ATQBAL, + TS_LL_READ_TS_INTR | FIELD_PREP(TS_LL_READ_TS_IDX, idx) | + TS_LL_READ_TS); + tx->last_ll_ts_idx_read = idx; +} + +/** + * ice_ptp_complete_tx_single_tstamp - Complete Tx timestamp for a port + * @tx: the PTP Tx timestamp tracker + */ +void ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx *tx) +{ + struct skb_shared_hwtstamps shhwtstamps = {}; + u8 idx = tx->last_ll_ts_idx_read; + struct ice_ptp_port *ptp_port; + u64 raw_tstamp, tstamp; + bool drop_ts = false; + struct sk_buff *skb; + struct ice_pf *pf; + u32 val; + + if (!tx->init || tx->last_ll_ts_idx_read < 0) + return; + + ptp_port = container_of(tx, struct ice_ptp_port, tx); + pf = ptp_port_to_pf(ptp_port); + + ice_trace(tx_tstamp_fw_done, tx->tstamps[idx].skb, idx); + + val = rd32(&pf->hw, PF_SB_ATQBAL); + + /* When the bit is cleared, the TS is ready in the register */ + if (val & TS_LL_READ_TS) { + dev_err(ice_pf_to_dev(pf), "Failed to get the Tx tstamp - FW not ready"); + return; + } + + /* High 8 bit value of the TS is on the bits 16:23 */ + raw_tstamp = FIELD_GET(TS_LL_READ_TS_HIGH, val); + raw_tstamp <<= 32; + + /* Read the low 32 bit value */ + raw_tstamp |= (u64)rd32(&pf->hw, PF_SB_ATQBAH); + + /* For PHYs which don't implement a proper timestamp ready bitmap, + * verify that the timestamp value is different from the last cached + * timestamp. If it is not, skip this for now assuming it hasn't yet + * been captured by hardware. + */ + if (!drop_ts && tx->verify_cached && + raw_tstamp == tx->tstamps[idx].cached_tstamp) + return; + + if (tx->verify_cached && raw_tstamp) + tx->tstamps[idx].cached_tstamp = raw_tstamp; + clear_bit(idx, tx->in_use); + skb = tx->tstamps[idx].skb; + tx->tstamps[idx].skb = NULL; + if (test_and_clear_bit(idx, tx->stale)) + drop_ts = true; + + if (!skb) + return; + + if (drop_ts) { + dev_kfree_skb_any(skb); + return; + } + + /* Extend the timestamp using cached PHC time */ + tstamp = ice_ptp_extend_40b_ts(pf, raw_tstamp); + if (tstamp) { + shhwtstamps.hwtstamp = ns_to_ktime(tstamp); + ice_trace(tx_tstamp_complete, skb, idx); + } + + skb_tstamp_tx(skb, &shhwtstamps); + dev_kfree_skb_any(skb); +} + +/** * ice_ptp_process_tx_tstamp - Process Tx timestamps for a port * @tx: the PTP Tx timestamp tracker * @@ -575,6 +688,7 @@ ice_ptp_is_tx_tracker_up(struct ice_ptp_tx *tx) static void ice_ptp_process_tx_tstamp(struct ice_ptp_tx *tx) { struct ice_ptp_port *ptp_port; + unsigned long flags; struct ice_pf *pf; struct ice_hw *hw; u64 tstamp_ready; @@ -646,7 +760,7 @@ static void ice_ptp_process_tx_tstamp(struct ice_ptp_tx *tx) drop_ts = true; skip_ts_read: - spin_lock(&tx->lock); + spin_lock_irqsave(&tx->lock, flags); if (tx->verify_cached && raw_tstamp) tx->tstamps[idx].cached_tstamp = raw_tstamp; clear_bit(idx, tx->in_use); @@ -654,7 +768,7 @@ skip_ts_read: tx->tstamps[idx].skb = NULL; if (test_and_clear_bit(idx, tx->stale)) drop_ts = true; - spin_unlock(&tx->lock); + spin_unlock_irqrestore(&tx->lock, flags); /* It is unlikely but possible that the SKB will have been * flushed at this point due to link change or teardown. @@ -705,7 +819,9 @@ static enum ice_tx_tstamp_work ice_ptp_tx_tstamp_owner(struct ice_pf *pf) /* Read the Tx ready status first */ err = ice_get_phy_tx_tstamp_ready(&pf->hw, i, &tstamp_ready); - if (err || tstamp_ready) + if (err) + break; + else if (tstamp_ready) return ICE_TX_TSTAMP_WORK_PENDING; } @@ -722,6 +838,7 @@ static enum ice_tx_tstamp_work ice_ptp_tx_tstamp_owner(struct ice_pf *pf) static enum ice_tx_tstamp_work ice_ptp_tx_tstamp(struct ice_ptp_tx *tx) { bool more_timestamps; + unsigned long flags; if (!tx->init) return ICE_TX_TSTAMP_WORK_DONE; @@ -730,9 +847,9 @@ static enum ice_tx_tstamp_work ice_ptp_tx_tstamp(struct ice_ptp_tx *tx) ice_ptp_process_tx_tstamp(tx); /* Check if there are outstanding Tx timestamps */ - spin_lock(&tx->lock); + spin_lock_irqsave(&tx->lock, flags); more_timestamps = tx->init && !bitmap_empty(tx->in_use, tx->len); - spin_unlock(&tx->lock); + spin_unlock_irqrestore(&tx->lock, flags); if (more_timestamps) return ICE_TX_TSTAMP_WORK_PENDING; @@ -769,6 +886,7 @@ ice_ptp_alloc_tx_tracker(struct ice_ptp_tx *tx) tx->in_use = in_use; tx->stale = stale; tx->init = 1; + tx->last_ll_ts_idx_read = -1; spin_lock_init(&tx->lock); @@ -786,6 +904,7 @@ static void ice_ptp_flush_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx) { struct ice_hw *hw = &pf->hw; + unsigned long flags; u64 tstamp_ready; int err; u8 idx; @@ -809,12 +928,12 @@ ice_ptp_flush_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx) if (!hw->reset_ongoing && (tstamp_ready & BIT_ULL(phy_idx))) ice_clear_phy_tstamp(hw, tx->block, phy_idx); - spin_lock(&tx->lock); + spin_lock_irqsave(&tx->lock, flags); skb = tx->tstamps[idx].skb; tx->tstamps[idx].skb = NULL; clear_bit(idx, tx->in_use); clear_bit(idx, tx->stale); - spin_unlock(&tx->lock); + spin_unlock_irqrestore(&tx->lock, flags); /* Count the number of Tx timestamps flushed */ pf->ptp.tx_hwtstamp_flushed++; @@ -838,9 +957,11 @@ ice_ptp_flush_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx) static void ice_ptp_mark_tx_tracker_stale(struct ice_ptp_tx *tx) { - spin_lock(&tx->lock); + unsigned long flags; + + spin_lock_irqsave(&tx->lock, flags); bitmap_or(tx->stale, tx->stale, tx->in_use, tx->len); - spin_unlock(&tx->lock); + spin_unlock_irqrestore(&tx->lock, flags); } /** @@ -853,9 +974,11 @@ ice_ptp_mark_tx_tracker_stale(struct ice_ptp_tx *tx) static void ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx) { - spin_lock(&tx->lock); + unsigned long flags; + + spin_lock_irqsave(&tx->lock, flags); tx->init = 0; - spin_unlock(&tx->lock); + spin_unlock_irqrestore(&tx->lock, flags); /* wait for potentially outstanding interrupt to complete */ synchronize_irq(pf->oicr_irq.virq); @@ -875,7 +998,7 @@ ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx) } /** - * ice_ptp_init_tx_e822 - Initialize tracking for Tx timestamps + * ice_ptp_init_tx_e82x - Initialize tracking for Tx timestamps * @pf: Board private structure * @tx: the Tx tracking structure to initialize * @port: the port this structure tracks @@ -886,11 +1009,11 @@ ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx) * registers into chunks based on the port number. */ static int -ice_ptp_init_tx_e822(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port) +ice_ptp_init_tx_e82x(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port) { tx->block = port / ICE_PORTS_PER_QUAD; - tx->offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT_E822; - tx->len = INDEX_PER_PORT_E822; + tx->offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT_E82X; + tx->len = INDEX_PER_PORT_E82X; tx->verify_cached = 0; return ice_ptp_alloc_tx_tracker(tx); @@ -1093,10 +1216,10 @@ static u64 ice_base_incval(struct ice_pf *pf) if (ice_is_e810(hw)) incval = ICE_PTP_NOMINAL_INCVAL_E810; - else if (ice_e822_time_ref(hw) < NUM_ICE_TIME_REF_FREQ) - incval = ice_e822_nominal_incval(ice_e822_time_ref(hw)); + else if (ice_e82x_time_ref(hw) < NUM_ICE_TIME_REF_FREQ) + incval = ice_e82x_nominal_incval(ice_e82x_time_ref(hw)); else - incval = UNKNOWN_INCVAL_E822; + incval = UNKNOWN_INCVAL_E82X; dev_dbg(ice_pf_to_dev(pf), "PTP: using base increment value of 0x%016llx\n", incval); @@ -1125,10 +1248,10 @@ static int ice_ptp_check_tx_fifo(struct ice_ptp_port *port) /* need to read FIFO state */ if (offs == 0 || offs == 1) - err = ice_read_quad_reg_e822(hw, quad, Q_REG_FIFO01_STATUS, + err = ice_read_quad_reg_e82x(hw, quad, Q_REG_FIFO01_STATUS, &val); else - err = ice_read_quad_reg_e822(hw, quad, Q_REG_FIFO23_STATUS, + err = ice_read_quad_reg_e82x(hw, quad, Q_REG_FIFO23_STATUS, &val); if (err) { @@ -1138,9 +1261,9 @@ static int ice_ptp_check_tx_fifo(struct ice_ptp_port *port) } if (offs & 0x1) - phy_sts = (val & Q_REG_FIFO13_M) >> Q_REG_FIFO13_S; + phy_sts = FIELD_GET(Q_REG_FIFO13_M, val); else - phy_sts = (val & Q_REG_FIFO02_M) >> Q_REG_FIFO02_S; + phy_sts = FIELD_GET(Q_REG_FIFO02_M, val); if (phy_sts & FIFO_EMPTY) { port->tx_fifo_busy_cnt = FIFO_OK; @@ -1156,7 +1279,7 @@ static int ice_ptp_check_tx_fifo(struct ice_ptp_port *port) dev_dbg(ice_pf_to_dev(pf), "Port %d Tx FIFO still not empty; resetting quad %d\n", port->port_num, quad); - ice_ptp_reset_ts_memory_quad_e822(hw, quad); + ice_ptp_reset_ts_memory_quad_e82x(hw, quad); port->tx_fifo_busy_cnt = FIFO_OK; return 0; } @@ -1201,8 +1324,8 @@ static void ice_ptp_wait_for_offsets(struct kthread_work *work) tx_err = ice_ptp_check_tx_fifo(port); if (!tx_err) - tx_err = ice_phy_cfg_tx_offset_e822(hw, port->port_num); - rx_err = ice_phy_cfg_rx_offset_e822(hw, port->port_num); + tx_err = ice_phy_cfg_tx_offset_e82x(hw, port->port_num); + rx_err = ice_phy_cfg_rx_offset_e82x(hw, port->port_num); if (tx_err || rx_err) { /* Tx and/or Rx offset not yet configured, try again later */ kthread_queue_delayed_work(pf->ptp.kworker, @@ -1231,7 +1354,7 @@ ice_ptp_port_phy_stop(struct ice_ptp_port *ptp_port) kthread_cancel_delayed_work_sync(&ptp_port->ov_work); - err = ice_stop_phy_timer_e822(hw, port, true); + err = ice_stop_phy_timer_e82x(hw, port, true); if (err) dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d down, err %d\n", port, err); @@ -1255,6 +1378,7 @@ ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port) struct ice_pf *pf = ptp_port_to_pf(ptp_port); u8 port = ptp_port->port_num; struct ice_hw *hw = &pf->hw; + unsigned long flags; int err; if (ice_is_e810(hw)) @@ -1268,20 +1392,20 @@ ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port) kthread_cancel_delayed_work_sync(&ptp_port->ov_work); /* temporarily disable Tx timestamps while calibrating PHY offset */ - spin_lock(&ptp_port->tx.lock); + spin_lock_irqsave(&ptp_port->tx.lock, flags); ptp_port->tx.calibrating = true; - spin_unlock(&ptp_port->tx.lock); + spin_unlock_irqrestore(&ptp_port->tx.lock, flags); ptp_port->tx_fifo_busy_cnt = 0; /* Start the PHY timer in Vernier mode */ - err = ice_start_phy_timer_e822(hw, port); + err = ice_start_phy_timer_e82x(hw, port); if (err) goto out_unlock; /* Enable Tx timestamps right away */ - spin_lock(&ptp_port->tx.lock); + spin_lock_irqsave(&ptp_port->tx.lock, flags); ptp_port->tx.calibrating = false; - spin_unlock(&ptp_port->tx.lock); + spin_unlock_irqrestore(&ptp_port->tx.lock, flags); kthread_queue_delayed_work(pf->ptp.kworker, &ptp_port->ov_work, 0); @@ -1323,7 +1447,7 @@ void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup) case ICE_PHY_E810: /* Do not reconfigure E810 PHY */ return; - case ICE_PHY_E822: + case ICE_PHY_E82X: ice_ptp_port_phy_restart(ptp_port); return; default: @@ -1349,7 +1473,7 @@ static int ice_ptp_tx_ena_intr(struct ice_pf *pf, bool ena, u32 threshold) ice_ptp_reset_ts_memory(hw); for (quad = 0; quad < ICE_MAX_QUAD; quad++) { - err = ice_read_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG, + err = ice_read_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG, &val); if (err) break; @@ -1357,13 +1481,13 @@ static int ice_ptp_tx_ena_intr(struct ice_pf *pf, bool ena, u32 threshold) if (ena) { val |= Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M; val &= ~Q_REG_TX_MEM_GBL_CFG_INTR_THR_M; - val |= ((threshold << Q_REG_TX_MEM_GBL_CFG_INTR_THR_S) & - Q_REG_TX_MEM_GBL_CFG_INTR_THR_M); + val |= FIELD_PREP(Q_REG_TX_MEM_GBL_CFG_INTR_THR_M, + threshold); } else { val &= ~Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M; } - err = ice_write_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG, + err = ice_write_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG, val); if (err) break; @@ -1503,8 +1627,7 @@ ice_ptp_cfg_extts(struct ice_pf *pf, bool ena, unsigned int chan, u32 gpio_pin, * + num_in_channels * tmr_idx */ func = 1 + chan + (tmr_idx * 3); - gpio_reg = ((func << GLGEN_GPIO_CTL_PIN_FUNC_S) & - GLGEN_GPIO_CTL_PIN_FUNC_M); + gpio_reg = FIELD_PREP(GLGEN_GPIO_CTL_PIN_FUNC_M, func); pf->ptp.ext_ts_chan |= (1 << chan); } else { /* clear the values we set to reset defaults */ @@ -1601,7 +1724,7 @@ static int ice_ptp_cfg_clkout(struct ice_pf *pf, unsigned int chan, if (ice_is_e810(hw)) start_time -= E810_OUT_PROP_DELAY_NS; else - start_time -= ice_e822_pps_delay(ice_e822_time_ref(hw)); + start_time -= ice_e82x_pps_delay(ice_e82x_time_ref(hw)); /* 2. Write TARGET time */ wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), lower_32_bits(start_time)); @@ -1614,7 +1737,7 @@ static int ice_ptp_cfg_clkout(struct ice_pf *pf, unsigned int chan, /* 4. write GPIO CTL reg */ func = 8 + chan + (tmr_idx * 4); val = GLGEN_GPIO_CTL_PIN_DIR_M | - ((func << GLGEN_GPIO_CTL_PIN_FUNC_S) & GLGEN_GPIO_CTL_PIN_FUNC_M); + FIELD_PREP(GLGEN_GPIO_CTL_PIN_FUNC_M, func); wr32(hw, GLGEN_GPIO_CTL(gpio_pin), val); /* Store the value if requested */ @@ -1840,7 +1963,7 @@ ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts) ice_ptp_enable_all_clkout(pf); /* Recalibrate and re-enable timestamp blocks for E822/E823 */ - if (hw->phy_model == ICE_PHY_E822) + if (hw->phy_model == ICE_PHY_E82X) ice_ptp_restart_all_phy(pf); exit: if (err) { @@ -2127,30 +2250,26 @@ int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr) } /** - * ice_ptp_rx_hwtstamp - Check for an Rx timestamp - * @rx_ring: Ring to get the VSI info + * ice_ptp_get_rx_hwts - Get packet Rx timestamp in ns * @rx_desc: Receive descriptor - * @skb: Particular skb to send timestamp with + * @pkt_ctx: Packet context to get the cached time * * The driver receives a notification in the receive descriptor with timestamp. - * The timestamp is in ns, so we must convert the result first. */ -void -ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring, - union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb) +u64 ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc *rx_desc, + const struct ice_pkt_ctx *pkt_ctx) { - struct skb_shared_hwtstamps *hwtstamps; u64 ts_ns, cached_time; u32 ts_high; if (!(rx_desc->wb.time_stamp_low & ICE_PTP_TS_VALID)) - return; + return 0; - cached_time = READ_ONCE(rx_ring->cached_phctime); + cached_time = READ_ONCE(pkt_ctx->cached_phctime); /* Do not report a timestamp if we don't have a cached PHC time */ if (!cached_time) - return; + return 0; /* Use ice_ptp_extend_32b_ts directly, using the ring-specific cached * PHC value, rather than accessing the PF. This also allows us to @@ -2161,9 +2280,7 @@ ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring, ts_high = le32_to_cpu(rx_desc->wb.flex_ts.ts_high); ts_ns = ice_ptp_extend_32b_ts(cached_time, ts_high); - hwtstamps = skb_hwtstamps(skb); - memset(hwtstamps, 0, sizeof(*hwtstamps)); - hwtstamps->hwtstamp = ns_to_ktime(ts_ns); + return ts_ns; } /** @@ -2378,18 +2495,23 @@ static long ice_ptp_create_clock(struct ice_pf *pf) */ s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb) { + unsigned long flags; u8 idx; - spin_lock(&tx->lock); + spin_lock_irqsave(&tx->lock, flags); /* Check that this tracker is accepting new timestamp requests */ if (!ice_ptp_is_tx_tracker_up(tx)) { - spin_unlock(&tx->lock); + spin_unlock_irqrestore(&tx->lock, flags); return -1; } /* Find and set the first available index */ - idx = find_first_zero_bit(tx->in_use, tx->len); + idx = find_next_zero_bit(tx->in_use, tx->len, + tx->last_ll_ts_idx_read + 1); + if (idx == tx->len) + idx = find_first_zero_bit(tx->in_use, tx->len); + if (idx < tx->len) { /* We got a valid index that no other thread could have set. Store * a reference to the skb and the start time to allow discarding old @@ -2403,7 +2525,7 @@ s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb) ice_trace(tx_tstamp_request, skb, idx); } - spin_unlock(&tx->lock); + spin_unlock_irqrestore(&tx->lock, flags); /* return the appropriate PHY timestamp register index, -1 if no * indexes were available. @@ -2440,6 +2562,54 @@ enum ice_tx_tstamp_work ice_ptp_process_ts(struct ice_pf *pf) } } +/** + * ice_ptp_maybe_trigger_tx_interrupt - Trigger Tx timstamp interrupt + * @pf: Board private structure + * + * The device PHY issues Tx timestamp interrupts to the driver for processing + * timestamp data from the PHY. It will not interrupt again until all + * current timestamp data is read. In rare circumstances, it is possible that + * the driver fails to read all outstanding data. + * + * To avoid getting permanently stuck, periodically check if the PHY has + * outstanding timestamp data. If so, trigger an interrupt from software to + * process this data. + */ +static void ice_ptp_maybe_trigger_tx_interrupt(struct ice_pf *pf) +{ + struct device *dev = ice_pf_to_dev(pf); + struct ice_hw *hw = &pf->hw; + bool trigger_oicr = false; + unsigned int i; + + if (ice_is_e810(hw)) + return; + + if (!ice_pf_src_tmr_owned(pf)) + return; + + for (i = 0; i < ICE_MAX_QUAD; i++) { + u64 tstamp_ready; + int err; + + err = ice_get_phy_tx_tstamp_ready(&pf->hw, i, &tstamp_ready); + if (!err && tstamp_ready) { + trigger_oicr = true; + break; + } + } + + if (trigger_oicr) { + /* Trigger a software interrupt, to ensure this data + * gets processed. + */ + dev_dbg(dev, "PTP periodic task detected waiting timestamps. Triggering Tx timestamp interrupt now.\n"); + + wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M); + ice_flush(hw); + } +} + static void ice_ptp_periodic_work(struct kthread_work *work) { struct ice_ptp *ptp = container_of(work, struct ice_ptp, work.work); @@ -2451,6 +2621,8 @@ static void ice_ptp_periodic_work(struct kthread_work *work) err = ice_ptp_update_cached_phctime(pf); + ice_ptp_maybe_trigger_tx_interrupt(pf); + /* Run twice a second or reschedule if phc update failed */ kthread_queue_delayed_work(ptp->kworker, &ptp->work, msecs_to_jiffies(err ? 10 : 500)); @@ -2468,12 +2640,10 @@ void ice_ptp_reset(struct ice_pf *pf) int err, itr = 1; u64 time_diff; - if (test_bit(ICE_PFR_REQ, pf->state)) + if (test_bit(ICE_PFR_REQ, pf->state) || + !ice_pf_src_tmr_owned(pf)) goto pfr; - if (!ice_pf_src_tmr_owned(pf)) - goto reset_ts; - err = ice_ptp_init_phc(hw); if (err) goto err; @@ -2517,10 +2687,6 @@ void ice_ptp_reset(struct ice_pf *pf) goto err; } -reset_ts: - /* Restart the PHY timestamping block */ - ice_ptp_reset_phy_timestamping(pf); - pfr: /* Init Tx structures */ if (ice_is_e810(&pf->hw)) { @@ -2528,7 +2694,7 @@ pfr: } else { kthread_init_delayed_work(&ptp->port.ov_work, ice_ptp_wait_for_offsets); - err = ice_ptp_init_tx_e822(pf, &ptp->port.tx, + err = ice_ptp_init_tx_e82x(pf, &ptp->port.tx, ptp->port.port_num); } if (err) @@ -2536,6 +2702,11 @@ pfr: set_bit(ICE_FLAG_PTP, pf->flags); + /* Restart the PHY timestamping block */ + if (!test_bit(ICE_PFR_REQ, pf->state) && + ice_pf_src_tmr_owned(pf)) + ice_ptp_restart_all_phy(pf); + /* Start periodic work going */ kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0); @@ -2898,11 +3069,11 @@ static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port) switch (hw->phy_model) { case ICE_PHY_E810: return ice_ptp_init_tx_e810(pf, &ptp_port->tx); - case ICE_PHY_E822: + case ICE_PHY_E82X: kthread_init_delayed_work(&ptp_port->ov_work, ice_ptp_wait_for_offsets); - return ice_ptp_init_tx_e822(pf, &ptp_port->tx, + return ice_ptp_init_tx_e82x(pf, &ptp_port->tx, ptp_port->port_num); default: return -ENODEV; @@ -2991,7 +3162,7 @@ static void ice_ptp_remove_auxbus_device(struct ice_pf *pf) static void ice_ptp_init_tx_interrupt_mode(struct ice_pf *pf) { switch (pf->hw.phy_model) { - case ICE_PHY_E822: + case ICE_PHY_E82X: /* E822 based PHY has the clock owner process the interrupt * for all ports. */ diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h index 06a330867f..087dd32d87 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.h +++ b/drivers/net/ethernet/intel/ice/ice_ptp.h @@ -131,6 +131,7 @@ enum ice_tx_tstamp_work { * @calibrating: if true, the PHY is calibrating the Tx offset. During this * window, timestamps are temporarily disabled. * @verify_cached: if true, verify new timestamp differs from last read value + * @last_ll_ts_idx_read: index of the last LL TS read by the FW */ struct ice_ptp_tx { spinlock_t lock; /* lock protecting in_use bitmap */ @@ -143,11 +144,12 @@ struct ice_ptp_tx { u8 init : 1; u8 calibrating : 1; u8 verify_cached : 1; + s8 last_ll_ts_idx_read; }; /* Quad and port information for initializing timestamp blocks */ #define INDEX_PER_QUAD 64 -#define INDEX_PER_PORT_E822 16 +#define INDEX_PER_PORT_E82X 16 #define INDEX_PER_PORT_E810 64 /** @@ -296,11 +298,12 @@ void ice_ptp_restore_timestamp_mode(struct ice_pf *pf); void ice_ptp_extts_event(struct ice_pf *pf); s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb); +void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx); +void ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx *tx); enum ice_tx_tstamp_work ice_ptp_process_ts(struct ice_pf *pf); -void -ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring, - union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb); +u64 ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc *rx_desc, + const struct ice_pkt_ctx *pkt_ctx); void ice_ptp_reset(struct ice_pf *pf); void ice_ptp_prepare_for_reset(struct ice_pf *pf); void ice_ptp_init(struct ice_pf *pf); @@ -325,13 +328,23 @@ ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb) return -1; } +static inline void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx) +{ } + +static inline void ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx *tx) { } + static inline bool ice_ptp_process_ts(struct ice_pf *pf) { return true; } -static inline void -ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring, - union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb) { } + +static inline u64 +ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc *rx_desc, + const struct ice_pkt_ctx *pkt_ctx) +{ + return 0; +} + static inline void ice_ptp_reset(struct ice_pf *pf) { } static inline void ice_ptp_prepare_for_reset(struct ice_pf *pf) { } static inline void ice_ptp_init(struct ice_pf *pf) { } diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h index 4109aa3b2f..2c4dab0c48 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h +++ b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h @@ -9,17 +9,17 @@ */ /* Constants defined for the PTP 1588 clock hardware. */ -/* struct ice_time_ref_info_e822 +/* struct ice_time_ref_info_e82x * * E822 hardware can use different sources as the reference for the PTP * hardware clock. Each clock has different characteristics such as a slightly * different frequency, etc. * * This lookup table defines several constants that depend on the current time - * reference. See the struct ice_time_ref_info_e822 for information about the + * reference. See the struct ice_time_ref_info_e82x for information about the * meaning of each constant. */ -const struct ice_time_ref_info_e822 e822_time_ref[NUM_ICE_TIME_REF_FREQ] = { +const struct ice_time_ref_info_e82x e822_time_ref[NUM_ICE_TIME_REF_FREQ] = { /* ICE_TIME_REF_FREQ_25_000 -> 25 MHz */ { /* pll_freq */ @@ -81,7 +81,7 @@ const struct ice_time_ref_info_e822 e822_time_ref[NUM_ICE_TIME_REF_FREQ] = { }, }; -const struct ice_cgu_pll_params_e822 e822_cgu_params[NUM_ICE_TIME_REF_FREQ] = { +const struct ice_cgu_pll_params_e82x e822_cgu_params[NUM_ICE_TIME_REF_FREQ] = { /* ICE_TIME_REF_FREQ_25_000 -> 25 MHz */ { /* refclk_pre_div */ @@ -155,7 +155,7 @@ const struct ice_cgu_pll_params_e822 e822_cgu_params[NUM_ICE_TIME_REF_FREQ] = { }, }; -/* struct ice_vernier_info_e822 +/* struct ice_vernier_info_e82x * * E822 hardware calibrates the delay of the timestamp indication from the * actual packet transmission or reception during the initialization of the @@ -168,7 +168,7 @@ const struct ice_cgu_pll_params_e822 e822_cgu_params[NUM_ICE_TIME_REF_FREQ] = { * used by this link speed, and that the register should be cleared by writing * 0. Other values specify the clock frequency in Hz. */ -const struct ice_vernier_info_e822 e822_vernier[NUM_ICE_PTP_LNK_SPD] = { +const struct ice_vernier_info_e82x e822_vernier[NUM_ICE_PTP_LNK_SPD] = { /* ICE_PTP_LNK_SPD_1G */ { /* tx_par_clk */ diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c index a00b55e14a..187ce9b54e 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c @@ -284,19 +284,19 @@ static void ice_ptp_exec_tmr_cmd(struct ice_hw *hw) */ /** - * ice_fill_phy_msg_e822 - Fill message data for a PHY register access + * ice_fill_phy_msg_e82x - Fill message data for a PHY register access * @msg: the PHY message buffer to fill in * @port: the port to access * @offset: the register offset */ static void -ice_fill_phy_msg_e822(struct ice_sbq_msg_input *msg, u8 port, u16 offset) +ice_fill_phy_msg_e82x(struct ice_sbq_msg_input *msg, u8 port, u16 offset) { int phy_port, phy, quadtype; - phy_port = port % ICE_PORTS_PER_PHY_E822; - phy = port / ICE_PORTS_PER_PHY_E822; - quadtype = (port / ICE_PORTS_PER_QUAD) % ICE_QUADS_PER_PHY_E822; + phy_port = port % ICE_PORTS_PER_PHY_E82X; + phy = port / ICE_PORTS_PER_PHY_E82X; + quadtype = (port / ICE_PORTS_PER_QUAD) % ICE_QUADS_PER_PHY_E82X; if (quadtype == 0) { msg->msg_addr_low = P_Q0_L(P_0_BASE + offset, phy_port); @@ -315,7 +315,7 @@ ice_fill_phy_msg_e822(struct ice_sbq_msg_input *msg, u8 port, u16 offset) } /** - * ice_is_64b_phy_reg_e822 - Check if this is a 64bit PHY register + * ice_is_64b_phy_reg_e82x - Check if this is a 64bit PHY register * @low_addr: the low address to check * @high_addr: on return, contains the high address of the 64bit register * @@ -323,7 +323,7 @@ ice_fill_phy_msg_e822(struct ice_sbq_msg_input *msg, u8 port, u16 offset) * represented as two 32bit registers. If it is, return the appropriate high * register offset to use. */ -static bool ice_is_64b_phy_reg_e822(u16 low_addr, u16 *high_addr) +static bool ice_is_64b_phy_reg_e82x(u16 low_addr, u16 *high_addr) { switch (low_addr) { case P_REG_PAR_PCS_TX_OFFSET_L: @@ -368,7 +368,7 @@ static bool ice_is_64b_phy_reg_e822(u16 low_addr, u16 *high_addr) } /** - * ice_is_40b_phy_reg_e822 - Check if this is a 40bit PHY register + * ice_is_40b_phy_reg_e82x - Check if this is a 40bit PHY register * @low_addr: the low address to check * @high_addr: on return, contains the high address of the 40bit value * @@ -377,7 +377,7 @@ static bool ice_is_64b_phy_reg_e822(u16 low_addr, u16 *high_addr) * upper 32 bits in the high register. If it is, return the appropriate high * register offset to use. */ -static bool ice_is_40b_phy_reg_e822(u16 low_addr, u16 *high_addr) +static bool ice_is_40b_phy_reg_e82x(u16 low_addr, u16 *high_addr) { switch (low_addr) { case P_REG_TIMETUS_L: @@ -413,7 +413,7 @@ static bool ice_is_40b_phy_reg_e822(u16 low_addr, u16 *high_addr) } /** - * ice_read_phy_reg_e822 - Read a PHY register + * ice_read_phy_reg_e82x - Read a PHY register * @hw: pointer to the HW struct * @port: PHY port to read from * @offset: PHY register offset to read @@ -422,12 +422,12 @@ static bool ice_is_40b_phy_reg_e822(u16 low_addr, u16 *high_addr) * Read a PHY register for the given port over the device sideband queue. */ static int -ice_read_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 *val) +ice_read_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 offset, u32 *val) { struct ice_sbq_msg_input msg = {0}; int err; - ice_fill_phy_msg_e822(&msg, port, offset); + ice_fill_phy_msg_e82x(&msg, port, offset); msg.opcode = ice_sbq_msg_rd; err = ice_sbq_rw_reg(hw, &msg); @@ -443,7 +443,7 @@ ice_read_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 *val) } /** - * ice_read_64b_phy_reg_e822 - Read a 64bit value from PHY registers + * ice_read_64b_phy_reg_e82x - Read a 64bit value from PHY registers * @hw: pointer to the HW struct * @port: PHY port to read from * @low_addr: offset of the lower register to read from @@ -455,7 +455,7 @@ ice_read_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 *val) * known to be two parts of a 64bit value. */ static int -ice_read_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 *val) +ice_read_64b_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 low_addr, u64 *val) { u32 low, high; u16 high_addr; @@ -464,20 +464,20 @@ ice_read_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 *val) /* Only operate on registers known to be split into two 32bit * registers. */ - if (!ice_is_64b_phy_reg_e822(low_addr, &high_addr)) { + if (!ice_is_64b_phy_reg_e82x(low_addr, &high_addr)) { ice_debug(hw, ICE_DBG_PTP, "Invalid 64b register addr 0x%08x\n", low_addr); return -EINVAL; } - err = ice_read_phy_reg_e822(hw, port, low_addr, &low); + err = ice_read_phy_reg_e82x(hw, port, low_addr, &low); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to read from low register 0x%08x\n, err %d", low_addr, err); return err; } - err = ice_read_phy_reg_e822(hw, port, high_addr, &high); + err = ice_read_phy_reg_e82x(hw, port, high_addr, &high); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to read from high register 0x%08x\n, err %d", high_addr, err); @@ -490,7 +490,7 @@ ice_read_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 *val) } /** - * ice_write_phy_reg_e822 - Write a PHY register + * ice_write_phy_reg_e82x - Write a PHY register * @hw: pointer to the HW struct * @port: PHY port to write to * @offset: PHY register offset to write @@ -499,12 +499,12 @@ ice_read_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 *val) * Write a PHY register for the given port over the device sideband queue. */ static int -ice_write_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 val) +ice_write_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 offset, u32 val) { struct ice_sbq_msg_input msg = {0}; int err; - ice_fill_phy_msg_e822(&msg, port, offset); + ice_fill_phy_msg_e82x(&msg, port, offset); msg.opcode = ice_sbq_msg_wr; msg.data = val; @@ -519,7 +519,7 @@ ice_write_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 val) } /** - * ice_write_40b_phy_reg_e822 - Write a 40b value to the PHY + * ice_write_40b_phy_reg_e82x - Write a 40b value to the PHY * @hw: pointer to the HW struct * @port: port to write to * @low_addr: offset of the low register @@ -529,7 +529,7 @@ ice_write_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 val) * it up into two chunks, the lower 8 bits and the upper 32 bits. */ static int -ice_write_40b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val) +ice_write_40b_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 low_addr, u64 val) { u32 low, high; u16 high_addr; @@ -538,7 +538,7 @@ ice_write_40b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val) /* Only operate on registers known to be split into a lower 8 bit * register and an upper 32 bit register. */ - if (!ice_is_40b_phy_reg_e822(low_addr, &high_addr)) { + if (!ice_is_40b_phy_reg_e82x(low_addr, &high_addr)) { ice_debug(hw, ICE_DBG_PTP, "Invalid 40b register addr 0x%08x\n", low_addr); return -EINVAL; @@ -547,14 +547,14 @@ ice_write_40b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val) low = (u32)(val & P_REG_40B_LOW_M); high = (u32)(val >> P_REG_40B_HIGH_S); - err = ice_write_phy_reg_e822(hw, port, low_addr, low); + err = ice_write_phy_reg_e82x(hw, port, low_addr, low); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to write to low register 0x%08x\n, err %d", low_addr, err); return err; } - err = ice_write_phy_reg_e822(hw, port, high_addr, high); + err = ice_write_phy_reg_e82x(hw, port, high_addr, high); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to write to high register 0x%08x\n, err %d", high_addr, err); @@ -565,7 +565,7 @@ ice_write_40b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val) } /** - * ice_write_64b_phy_reg_e822 - Write a 64bit value to PHY registers + * ice_write_64b_phy_reg_e82x - Write a 64bit value to PHY registers * @hw: pointer to the HW struct * @port: PHY port to read from * @low_addr: offset of the lower register to read from @@ -577,7 +577,7 @@ ice_write_40b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val) * a 64bit value. */ static int -ice_write_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val) +ice_write_64b_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 low_addr, u64 val) { u32 low, high; u16 high_addr; @@ -586,7 +586,7 @@ ice_write_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val) /* Only operate on registers known to be split into two 32bit * registers. */ - if (!ice_is_64b_phy_reg_e822(low_addr, &high_addr)) { + if (!ice_is_64b_phy_reg_e82x(low_addr, &high_addr)) { ice_debug(hw, ICE_DBG_PTP, "Invalid 64b register addr 0x%08x\n", low_addr); return -EINVAL; @@ -595,14 +595,14 @@ ice_write_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val) low = lower_32_bits(val); high = upper_32_bits(val); - err = ice_write_phy_reg_e822(hw, port, low_addr, low); + err = ice_write_phy_reg_e82x(hw, port, low_addr, low); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to write to low register 0x%08x\n, err %d", low_addr, err); return err; } - err = ice_write_phy_reg_e822(hw, port, high_addr, high); + err = ice_write_phy_reg_e82x(hw, port, high_addr, high); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to write to high register 0x%08x\n, err %d", high_addr, err); @@ -613,7 +613,7 @@ ice_write_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val) } /** - * ice_fill_quad_msg_e822 - Fill message data for quad register access + * ice_fill_quad_msg_e82x - Fill message data for quad register access * @msg: the PHY message buffer to fill in * @quad: the quad to access * @offset: the register offset @@ -622,7 +622,7 @@ ice_write_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val) * multiple PHYs. */ static int -ice_fill_quad_msg_e822(struct ice_sbq_msg_input *msg, u8 quad, u16 offset) +ice_fill_quad_msg_e82x(struct ice_sbq_msg_input *msg, u8 quad, u16 offset) { u32 addr; @@ -631,7 +631,7 @@ ice_fill_quad_msg_e822(struct ice_sbq_msg_input *msg, u8 quad, u16 offset) msg->dest_dev = rmn_0; - if ((quad % ICE_QUADS_PER_PHY_E822) == 0) + if ((quad % ICE_QUADS_PER_PHY_E82X) == 0) addr = Q_0_BASE + offset; else addr = Q_1_BASE + offset; @@ -643,7 +643,7 @@ ice_fill_quad_msg_e822(struct ice_sbq_msg_input *msg, u8 quad, u16 offset) } /** - * ice_read_quad_reg_e822 - Read a PHY quad register + * ice_read_quad_reg_e82x - Read a PHY quad register * @hw: pointer to the HW struct * @quad: quad to read from * @offset: quad register offset to read @@ -653,12 +653,12 @@ ice_fill_quad_msg_e822(struct ice_sbq_msg_input *msg, u8 quad, u16 offset) * shared between multiple PHYs. */ int -ice_read_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 *val) +ice_read_quad_reg_e82x(struct ice_hw *hw, u8 quad, u16 offset, u32 *val) { struct ice_sbq_msg_input msg = {0}; int err; - err = ice_fill_quad_msg_e822(&msg, quad, offset); + err = ice_fill_quad_msg_e82x(&msg, quad, offset); if (err) return err; @@ -677,7 +677,7 @@ ice_read_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 *val) } /** - * ice_write_quad_reg_e822 - Write a PHY quad register + * ice_write_quad_reg_e82x - Write a PHY quad register * @hw: pointer to the HW struct * @quad: quad to write to * @offset: quad register offset to write @@ -687,12 +687,12 @@ ice_read_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 *val) * shared between multiple PHYs. */ int -ice_write_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 val) +ice_write_quad_reg_e82x(struct ice_hw *hw, u8 quad, u16 offset, u32 val) { struct ice_sbq_msg_input msg = {0}; int err; - err = ice_fill_quad_msg_e822(&msg, quad, offset); + err = ice_fill_quad_msg_e82x(&msg, quad, offset); if (err) return err; @@ -710,7 +710,7 @@ ice_write_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 val) } /** - * ice_read_phy_tstamp_e822 - Read a PHY timestamp out of the quad block + * ice_read_phy_tstamp_e82x - Read a PHY timestamp out of the quad block * @hw: pointer to the HW struct * @quad: the quad to read from * @idx: the timestamp index to read @@ -721,7 +721,7 @@ ice_write_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 val) * family of devices. */ static int -ice_read_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx, u64 *tstamp) +ice_read_phy_tstamp_e82x(struct ice_hw *hw, u8 quad, u8 idx, u64 *tstamp) { u16 lo_addr, hi_addr; u32 lo, hi; @@ -730,14 +730,14 @@ ice_read_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx, u64 *tstamp) lo_addr = (u16)TS_L(Q_REG_TX_MEMORY_BANK_START, idx); hi_addr = (u16)TS_H(Q_REG_TX_MEMORY_BANK_START, idx); - err = ice_read_quad_reg_e822(hw, quad, lo_addr, &lo); + err = ice_read_quad_reg_e82x(hw, quad, lo_addr, &lo); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to read low PTP timestamp register, err %d\n", err); return err; } - err = ice_read_quad_reg_e822(hw, quad, hi_addr, &hi); + err = ice_read_quad_reg_e82x(hw, quad, hi_addr, &hi); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to read high PTP timestamp register, err %d\n", err); @@ -754,7 +754,7 @@ ice_read_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx, u64 *tstamp) } /** - * ice_clear_phy_tstamp_e822 - Clear a timestamp from the quad block + * ice_clear_phy_tstamp_e82x - Clear a timestamp from the quad block * @hw: pointer to the HW struct * @quad: the quad to read from * @idx: the timestamp index to reset @@ -770,18 +770,18 @@ ice_read_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx, u64 *tstamp) * * To directly clear the contents of the timestamp block entirely, discarding * all timestamp data at once, software should instead use - * ice_ptp_reset_ts_memory_quad_e822(). + * ice_ptp_reset_ts_memory_quad_e82x(). * * This function should only be called on an idx whose bit is set according to * ice_get_phy_tx_tstamp_ready(). */ static int -ice_clear_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx) +ice_clear_phy_tstamp_e82x(struct ice_hw *hw, u8 quad, u8 idx) { u64 unused_tstamp; int err; - err = ice_read_phy_tstamp_e822(hw, quad, idx, &unused_tstamp); + err = ice_read_phy_tstamp_e82x(hw, quad, idx, &unused_tstamp); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to read the timestamp register for quad %u, idx %u, err %d\n", quad, idx, err); @@ -792,33 +792,33 @@ ice_clear_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx) } /** - * ice_ptp_reset_ts_memory_quad_e822 - Clear all timestamps from the quad block + * ice_ptp_reset_ts_memory_quad_e82x - Clear all timestamps from the quad block * @hw: pointer to the HW struct * @quad: the quad to read from * * Clear all timestamps from the PHY quad block that is shared between the * internal PHYs on the E822 devices. */ -void ice_ptp_reset_ts_memory_quad_e822(struct ice_hw *hw, u8 quad) +void ice_ptp_reset_ts_memory_quad_e82x(struct ice_hw *hw, u8 quad) { - ice_write_quad_reg_e822(hw, quad, Q_REG_TS_CTRL, Q_REG_TS_CTRL_M); - ice_write_quad_reg_e822(hw, quad, Q_REG_TS_CTRL, ~(u32)Q_REG_TS_CTRL_M); + ice_write_quad_reg_e82x(hw, quad, Q_REG_TS_CTRL, Q_REG_TS_CTRL_M); + ice_write_quad_reg_e82x(hw, quad, Q_REG_TS_CTRL, ~(u32)Q_REG_TS_CTRL_M); } /** - * ice_ptp_reset_ts_memory_e822 - Clear all timestamps from all quad blocks + * ice_ptp_reset_ts_memory_e82x - Clear all timestamps from all quad blocks * @hw: pointer to the HW struct */ -static void ice_ptp_reset_ts_memory_e822(struct ice_hw *hw) +static void ice_ptp_reset_ts_memory_e82x(struct ice_hw *hw) { unsigned int quad; for (quad = 0; quad < ICE_MAX_QUAD; quad++) - ice_ptp_reset_ts_memory_quad_e822(hw, quad); + ice_ptp_reset_ts_memory_quad_e82x(hw, quad); } /** - * ice_read_cgu_reg_e822 - Read a CGU register + * ice_read_cgu_reg_e82x - Read a CGU register * @hw: pointer to the HW struct * @addr: Register address to read * @val: storage for register value read @@ -827,7 +827,7 @@ static void ice_ptp_reset_ts_memory_e822(struct ice_hw *hw) * applicable to E822 devices. */ static int -ice_read_cgu_reg_e822(struct ice_hw *hw, u32 addr, u32 *val) +ice_read_cgu_reg_e82x(struct ice_hw *hw, u32 addr, u32 *val) { struct ice_sbq_msg_input cgu_msg; int err; @@ -850,7 +850,7 @@ ice_read_cgu_reg_e822(struct ice_hw *hw, u32 addr, u32 *val) } /** - * ice_write_cgu_reg_e822 - Write a CGU register + * ice_write_cgu_reg_e82x - Write a CGU register * @hw: pointer to the HW struct * @addr: Register address to write * @val: value to write into the register @@ -859,7 +859,7 @@ ice_read_cgu_reg_e822(struct ice_hw *hw, u32 addr, u32 *val) * applicable to E822 devices. */ static int -ice_write_cgu_reg_e822(struct ice_hw *hw, u32 addr, u32 val) +ice_write_cgu_reg_e82x(struct ice_hw *hw, u32 addr, u32 val) { struct ice_sbq_msg_input cgu_msg; int err; @@ -925,7 +925,7 @@ static const char *ice_clk_src_str(u8 clk_src) } /** - * ice_cfg_cgu_pll_e822 - Configure the Clock Generation Unit + * ice_cfg_cgu_pll_e82x - Configure the Clock Generation Unit * @hw: pointer to the HW struct * @clk_freq: Clock frequency to program * @clk_src: Clock source to select (TIME_REF, or TCX0) @@ -934,7 +934,7 @@ static const char *ice_clk_src_str(u8 clk_src) * time reference, enabling the PLL which drives the PTP hardware clock. */ static int -ice_cfg_cgu_pll_e822(struct ice_hw *hw, enum ice_time_ref_freq clk_freq, +ice_cfg_cgu_pll_e82x(struct ice_hw *hw, enum ice_time_ref_freq clk_freq, enum ice_clk_src clk_src) { union tspll_ro_bwm_lf bwm_lf; @@ -963,15 +963,15 @@ ice_cfg_cgu_pll_e822(struct ice_hw *hw, enum ice_time_ref_freq clk_freq, return -EINVAL; } - err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD9, &dw9.val); + err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD9, &dw9.val); if (err) return err; - err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD24, &dw24.val); + err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD24, &dw24.val); if (err) return err; - err = ice_read_cgu_reg_e822(hw, TSPLL_RO_BWM_LF, &bwm_lf.val); + err = ice_read_cgu_reg_e82x(hw, TSPLL_RO_BWM_LF, &bwm_lf.val); if (err) return err; @@ -986,43 +986,43 @@ ice_cfg_cgu_pll_e822(struct ice_hw *hw, enum ice_time_ref_freq clk_freq, if (dw24.field.ts_pll_enable) { dw24.field.ts_pll_enable = 0; - err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD24, dw24.val); + err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val); if (err) return err; } /* Set the frequency */ dw9.field.time_ref_freq_sel = clk_freq; - err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD9, dw9.val); + err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD9, dw9.val); if (err) return err; /* Configure the TS PLL feedback divisor */ - err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD19, &dw19.val); + err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD19, &dw19.val); if (err) return err; dw19.field.tspll_fbdiv_intgr = e822_cgu_params[clk_freq].feedback_div; dw19.field.tspll_ndivratio = 1; - err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD19, dw19.val); + err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD19, dw19.val); if (err) return err; /* Configure the TS PLL post divisor */ - err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD22, &dw22.val); + err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD22, &dw22.val); if (err) return err; dw22.field.time1588clk_div = e822_cgu_params[clk_freq].post_pll_div; dw22.field.time1588clk_sel_div2 = 0; - err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD22, dw22.val); + err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD22, dw22.val); if (err) return err; /* Configure the TS PLL pre divisor and clock source */ - err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD24, &dw24.val); + err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD24, &dw24.val); if (err) return err; @@ -1030,21 +1030,21 @@ ice_cfg_cgu_pll_e822(struct ice_hw *hw, enum ice_time_ref_freq clk_freq, dw24.field.tspll_fbdiv_frac = e822_cgu_params[clk_freq].frac_n_div; dw24.field.time_ref_sel = clk_src; - err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD24, dw24.val); + err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val); if (err) return err; /* Finally, enable the PLL */ dw24.field.ts_pll_enable = 1; - err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD24, dw24.val); + err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val); if (err) return err; /* Wait to verify if the PLL locks */ usleep_range(1000, 5000); - err = ice_read_cgu_reg_e822(hw, TSPLL_RO_BWM_LF, &bwm_lf.val); + err = ice_read_cgu_reg_e82x(hw, TSPLL_RO_BWM_LF, &bwm_lf.val); if (err) return err; @@ -1064,18 +1064,18 @@ ice_cfg_cgu_pll_e822(struct ice_hw *hw, enum ice_time_ref_freq clk_freq, } /** - * ice_init_cgu_e822 - Initialize CGU with settings from firmware + * ice_init_cgu_e82x - Initialize CGU with settings from firmware * @hw: pointer to the HW structure * * Initialize the Clock Generation Unit of the E822 device. */ -static int ice_init_cgu_e822(struct ice_hw *hw) +static int ice_init_cgu_e82x(struct ice_hw *hw) { struct ice_ts_func_info *ts_info = &hw->func_caps.ts_func_info; union tspll_cntr_bist_settings cntr_bist; int err; - err = ice_read_cgu_reg_e822(hw, TSPLL_CNTR_BIST_SETTINGS, + err = ice_read_cgu_reg_e82x(hw, TSPLL_CNTR_BIST_SETTINGS, &cntr_bist.val); if (err) return err; @@ -1084,7 +1084,7 @@ static int ice_init_cgu_e822(struct ice_hw *hw) cntr_bist.field.i_plllock_sel_0 = 0; cntr_bist.field.i_plllock_sel_1 = 0; - err = ice_write_cgu_reg_e822(hw, TSPLL_CNTR_BIST_SETTINGS, + err = ice_write_cgu_reg_e82x(hw, TSPLL_CNTR_BIST_SETTINGS, cntr_bist.val); if (err) return err; @@ -1092,7 +1092,7 @@ static int ice_init_cgu_e822(struct ice_hw *hw) /* Configure the CGU PLL using the parameters from the function * capabilities. */ - err = ice_cfg_cgu_pll_e822(hw, ts_info->time_ref, + err = ice_cfg_cgu_pll_e82x(hw, ts_info->time_ref, (enum ice_clk_src)ts_info->clk_src); if (err) return err; @@ -1113,7 +1113,7 @@ static int ice_ptp_set_vernier_wl(struct ice_hw *hw) for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) { int err; - err = ice_write_phy_reg_e822(hw, port, P_REG_WL, + err = ice_write_phy_reg_e82x(hw, port, P_REG_WL, PTP_VERNIER_WL); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to set vernier window length for port %u, err %d\n", @@ -1126,12 +1126,12 @@ static int ice_ptp_set_vernier_wl(struct ice_hw *hw) } /** - * ice_ptp_init_phc_e822 - Perform E822 specific PHC initialization + * ice_ptp_init_phc_e82x - Perform E822 specific PHC initialization * @hw: pointer to HW struct * * Perform PHC initialization steps specific to E822 devices. */ -static int ice_ptp_init_phc_e822(struct ice_hw *hw) +static int ice_ptp_init_phc_e82x(struct ice_hw *hw) { int err; u32 regval; @@ -1145,7 +1145,7 @@ static int ice_ptp_init_phc_e822(struct ice_hw *hw) wr32(hw, PF_SB_REM_DEV_CTL, regval); /* Initialize the Clock Generation Unit */ - err = ice_init_cgu_e822(hw); + err = ice_init_cgu_e82x(hw); if (err) return err; @@ -1154,7 +1154,7 @@ static int ice_ptp_init_phc_e822(struct ice_hw *hw) } /** - * ice_ptp_prep_phy_time_e822 - Prepare PHY port with initial time + * ice_ptp_prep_phy_time_e82x - Prepare PHY port with initial time * @hw: pointer to the HW struct * @time: Time to initialize the PHY port clocks to * @@ -1164,7 +1164,7 @@ static int ice_ptp_init_phc_e822(struct ice_hw *hw) * units of nominal nanoseconds. */ static int -ice_ptp_prep_phy_time_e822(struct ice_hw *hw, u32 time) +ice_ptp_prep_phy_time_e82x(struct ice_hw *hw, u32 time) { u64 phy_time; u8 port; @@ -1177,14 +1177,14 @@ ice_ptp_prep_phy_time_e822(struct ice_hw *hw, u32 time) for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) { /* Tx case */ - err = ice_write_64b_phy_reg_e822(hw, port, + err = ice_write_64b_phy_reg_e82x(hw, port, P_REG_TX_TIMER_INC_PRE_L, phy_time); if (err) goto exit_err; /* Rx case */ - err = ice_write_64b_phy_reg_e822(hw, port, + err = ice_write_64b_phy_reg_e82x(hw, port, P_REG_RX_TIMER_INC_PRE_L, phy_time); if (err) @@ -1201,7 +1201,7 @@ exit_err: } /** - * ice_ptp_prep_port_adj_e822 - Prepare a single port for time adjust + * ice_ptp_prep_port_adj_e82x - Prepare a single port for time adjust * @hw: pointer to HW struct * @port: Port number to be programmed * @time: time in cycles to adjust the port Tx and Rx clocks @@ -1216,7 +1216,7 @@ exit_err: * Negative adjustments are supported using 2s complement arithmetic. */ static int -ice_ptp_prep_port_adj_e822(struct ice_hw *hw, u8 port, s64 time) +ice_ptp_prep_port_adj_e82x(struct ice_hw *hw, u8 port, s64 time) { u32 l_time, u_time; int err; @@ -1225,23 +1225,23 @@ ice_ptp_prep_port_adj_e822(struct ice_hw *hw, u8 port, s64 time) u_time = upper_32_bits(time); /* Tx case */ - err = ice_write_phy_reg_e822(hw, port, P_REG_TX_TIMER_INC_PRE_L, + err = ice_write_phy_reg_e82x(hw, port, P_REG_TX_TIMER_INC_PRE_L, l_time); if (err) goto exit_err; - err = ice_write_phy_reg_e822(hw, port, P_REG_TX_TIMER_INC_PRE_U, + err = ice_write_phy_reg_e82x(hw, port, P_REG_TX_TIMER_INC_PRE_U, u_time); if (err) goto exit_err; /* Rx case */ - err = ice_write_phy_reg_e822(hw, port, P_REG_RX_TIMER_INC_PRE_L, + err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_TIMER_INC_PRE_L, l_time); if (err) goto exit_err; - err = ice_write_phy_reg_e822(hw, port, P_REG_RX_TIMER_INC_PRE_U, + err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_TIMER_INC_PRE_U, u_time); if (err) goto exit_err; @@ -1255,7 +1255,7 @@ exit_err: } /** - * ice_ptp_prep_phy_adj_e822 - Prep PHY ports for a time adjustment + * ice_ptp_prep_phy_adj_e82x - Prep PHY ports for a time adjustment * @hw: pointer to HW struct * @adj: adjustment in nanoseconds * @@ -1264,7 +1264,7 @@ exit_err: * ICE_PTP_ADJ_TIME or ICE_PTP_ADJ_TIME_AT_TIME sync command. */ static int -ice_ptp_prep_phy_adj_e822(struct ice_hw *hw, s32 adj) +ice_ptp_prep_phy_adj_e82x(struct ice_hw *hw, s32 adj) { s64 cycles; u8 port; @@ -1281,7 +1281,7 @@ ice_ptp_prep_phy_adj_e822(struct ice_hw *hw, s32 adj) for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) { int err; - err = ice_ptp_prep_port_adj_e822(hw, port, cycles); + err = ice_ptp_prep_port_adj_e82x(hw, port, cycles); if (err) return err; } @@ -1290,7 +1290,7 @@ ice_ptp_prep_phy_adj_e822(struct ice_hw *hw, s32 adj) } /** - * ice_ptp_prep_phy_incval_e822 - Prepare PHY ports for time adjustment + * ice_ptp_prep_phy_incval_e82x - Prepare PHY ports for time adjustment * @hw: pointer to HW struct * @incval: new increment value to prepare * @@ -1299,13 +1299,13 @@ ice_ptp_prep_phy_adj_e822(struct ice_hw *hw, s32 adj) * issuing an ICE_PTP_INIT_INCVAL command. */ static int -ice_ptp_prep_phy_incval_e822(struct ice_hw *hw, u64 incval) +ice_ptp_prep_phy_incval_e82x(struct ice_hw *hw, u64 incval) { int err; u8 port; for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) { - err = ice_write_40b_phy_reg_e822(hw, port, P_REG_TIMETUS_L, + err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_TIMETUS_L, incval); if (err) goto exit_err; @@ -1337,7 +1337,7 @@ ice_ptp_read_port_capture(struct ice_hw *hw, u8 port, u64 *tx_ts, u64 *rx_ts) int err; /* Tx case */ - err = ice_read_64b_phy_reg_e822(hw, port, P_REG_TX_CAPTURE_L, tx_ts); + err = ice_read_64b_phy_reg_e82x(hw, port, P_REG_TX_CAPTURE_L, tx_ts); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to read REG_TX_CAPTURE, err %d\n", err); @@ -1348,7 +1348,7 @@ ice_ptp_read_port_capture(struct ice_hw *hw, u8 port, u64 *tx_ts, u64 *rx_ts) (unsigned long long)*tx_ts); /* Rx case */ - err = ice_read_64b_phy_reg_e822(hw, port, P_REG_RX_CAPTURE_L, rx_ts); + err = ice_read_64b_phy_reg_e82x(hw, port, P_REG_RX_CAPTURE_L, rx_ts); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_CAPTURE, err %d\n", err); @@ -1362,7 +1362,7 @@ ice_ptp_read_port_capture(struct ice_hw *hw, u8 port, u64 *tx_ts, u64 *rx_ts) } /** - * ice_ptp_write_port_cmd_e822 - Prepare a single PHY port for a timer command + * ice_ptp_write_port_cmd_e82x - Prepare a single PHY port for a timer command * @hw: pointer to HW struct * @port: Port to which cmd has to be sent * @cmd: Command to be sent to the port @@ -1372,8 +1372,8 @@ ice_ptp_read_port_capture(struct ice_hw *hw, u8 port, u64 *tx_ts, u64 *rx_ts) * Do not use this function directly. If you want to configure exactly one * port, use ice_ptp_one_port_cmd() instead. */ -static int -ice_ptp_write_port_cmd_e822(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd) +static int ice_ptp_write_port_cmd_e82x(struct ice_hw *hw, u8 port, + enum ice_ptp_tmr_cmd cmd) { u32 cmd_val, val; u8 tmr_idx; @@ -1403,7 +1403,7 @@ ice_ptp_write_port_cmd_e822(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd /* Tx case */ /* Read, modify, write */ - err = ice_read_phy_reg_e822(hw, port, P_REG_TX_TMR_CMD, &val); + err = ice_read_phy_reg_e82x(hw, port, P_REG_TX_TMR_CMD, &val); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_TMR_CMD, err %d\n", err); @@ -1414,7 +1414,7 @@ ice_ptp_write_port_cmd_e822(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd val &= ~TS_CMD_MASK; val |= cmd_val; - err = ice_write_phy_reg_e822(hw, port, P_REG_TX_TMR_CMD, val); + err = ice_write_phy_reg_e82x(hw, port, P_REG_TX_TMR_CMD, val); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to write back TX_TMR_CMD, err %d\n", err); @@ -1423,7 +1423,7 @@ ice_ptp_write_port_cmd_e822(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd /* Rx case */ /* Read, modify, write */ - err = ice_read_phy_reg_e822(hw, port, P_REG_RX_TMR_CMD, &val); + err = ice_read_phy_reg_e82x(hw, port, P_REG_RX_TMR_CMD, &val); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_TMR_CMD, err %d\n", err); @@ -1434,7 +1434,7 @@ ice_ptp_write_port_cmd_e822(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd val &= ~TS_CMD_MASK; val |= cmd_val; - err = ice_write_phy_reg_e822(hw, port, P_REG_RX_TMR_CMD, val); + err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_TMR_CMD, val); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to write back RX_TMR_CMD, err %d\n", err); @@ -1469,7 +1469,7 @@ ice_ptp_one_port_cmd(struct ice_hw *hw, u8 configured_port, else cmd = ICE_PTP_NOP; - err = ice_ptp_write_port_cmd_e822(hw, port, cmd); + err = ice_ptp_write_port_cmd_e82x(hw, port, cmd); if (err) return err; } @@ -1478,7 +1478,7 @@ ice_ptp_one_port_cmd(struct ice_hw *hw, u8 configured_port, } /** - * ice_ptp_port_cmd_e822 - Prepare all ports for a timer command + * ice_ptp_port_cmd_e82x - Prepare all ports for a timer command * @hw: pointer to the HW struct * @cmd: timer command to prepare * @@ -1486,14 +1486,14 @@ ice_ptp_one_port_cmd(struct ice_hw *hw, u8 configured_port, * command. */ static int -ice_ptp_port_cmd_e822(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd) +ice_ptp_port_cmd_e82x(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd) { u8 port; for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) { int err; - err = ice_ptp_write_port_cmd_e822(hw, port, cmd); + err = ice_ptp_write_port_cmd_e82x(hw, port, cmd); if (err) return err; } @@ -1509,7 +1509,7 @@ ice_ptp_port_cmd_e822(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd) */ /** - * ice_phy_get_speed_and_fec_e822 - Get link speed and FEC based on serdes mode + * ice_phy_get_speed_and_fec_e82x - Get link speed and FEC based on serdes mode * @hw: pointer to HW struct * @port: the port to read from * @link_out: if non-NULL, holds link speed on success @@ -1519,7 +1519,7 @@ ice_ptp_port_cmd_e822(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd) * algorithm. */ static int -ice_phy_get_speed_and_fec_e822(struct ice_hw *hw, u8 port, +ice_phy_get_speed_and_fec_e82x(struct ice_hw *hw, u8 port, enum ice_ptp_link_spd *link_out, enum ice_ptp_fec_mode *fec_out) { @@ -1528,7 +1528,7 @@ ice_phy_get_speed_and_fec_e822(struct ice_hw *hw, u8 port, u32 serdes; int err; - err = ice_read_phy_reg_e822(hw, port, P_REG_LINK_SPEED, &serdes); + err = ice_read_phy_reg_e82x(hw, port, P_REG_LINK_SPEED, &serdes); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to read serdes info\n"); return err; @@ -1585,18 +1585,18 @@ ice_phy_get_speed_and_fec_e822(struct ice_hw *hw, u8 port, } /** - * ice_phy_cfg_lane_e822 - Configure PHY quad for single/multi-lane timestamp + * ice_phy_cfg_lane_e82x - Configure PHY quad for single/multi-lane timestamp * @hw: pointer to HW struct * @port: to configure the quad for */ -static void ice_phy_cfg_lane_e822(struct ice_hw *hw, u8 port) +static void ice_phy_cfg_lane_e82x(struct ice_hw *hw, u8 port) { enum ice_ptp_link_spd link_spd; int err; u32 val; u8 quad; - err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, NULL); + err = ice_phy_get_speed_and_fec_e82x(hw, port, &link_spd, NULL); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to get PHY link speed, err %d\n", err); @@ -1605,7 +1605,7 @@ static void ice_phy_cfg_lane_e822(struct ice_hw *hw, u8 port) quad = port / ICE_PORTS_PER_QUAD; - err = ice_read_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG, &val); + err = ice_read_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG, &val); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_MEM_GLB_CFG, err %d\n", err); @@ -1617,7 +1617,7 @@ static void ice_phy_cfg_lane_e822(struct ice_hw *hw, u8 port) else val |= Q_REG_TX_MEM_GBL_CFG_LANE_TYPE_M; - err = ice_write_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG, val); + err = ice_write_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG, val); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to write back TX_MEM_GBL_CFG, err %d\n", err); @@ -1626,7 +1626,7 @@ static void ice_phy_cfg_lane_e822(struct ice_hw *hw, u8 port) } /** - * ice_phy_cfg_uix_e822 - Configure Serdes UI to TU conversion for E822 + * ice_phy_cfg_uix_e82x - Configure Serdes UI to TU conversion for E822 * @hw: pointer to the HW structure * @port: the port to configure * @@ -1671,12 +1671,12 @@ static void ice_phy_cfg_lane_e822(struct ice_hw *hw, u8 port) * a divide by 390,625,000. This does lose some precision, but avoids * miscalculation due to arithmetic overflow. */ -static int ice_phy_cfg_uix_e822(struct ice_hw *hw, u8 port) +static int ice_phy_cfg_uix_e82x(struct ice_hw *hw, u8 port) { u64 cur_freq, clk_incval, tu_per_sec, uix; int err; - cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw)); + cur_freq = ice_e82x_pll_freq(ice_e82x_time_ref(hw)); clk_incval = ice_ptp_read_src_incval(hw); /* Calculate TUs per second divided by 256 */ @@ -1688,7 +1688,7 @@ static int ice_phy_cfg_uix_e822(struct ice_hw *hw, u8 port) /* Program the 10Gb/40Gb conversion ratio */ uix = div_u64(tu_per_sec * LINE_UI_10G_40G, 390625000); - err = ice_write_64b_phy_reg_e822(hw, port, P_REG_UIX66_10G_40G_L, + err = ice_write_64b_phy_reg_e82x(hw, port, P_REG_UIX66_10G_40G_L, uix); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to write UIX66_10G_40G, err %d\n", @@ -1699,7 +1699,7 @@ static int ice_phy_cfg_uix_e822(struct ice_hw *hw, u8 port) /* Program the 25Gb/100Gb conversion ratio */ uix = div_u64(tu_per_sec * LINE_UI_25G_100G, 390625000); - err = ice_write_64b_phy_reg_e822(hw, port, P_REG_UIX66_25G_100G_L, + err = ice_write_64b_phy_reg_e82x(hw, port, P_REG_UIX66_25G_100G_L, uix); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to write UIX66_25G_100G, err %d\n", @@ -1711,7 +1711,7 @@ static int ice_phy_cfg_uix_e822(struct ice_hw *hw, u8 port) } /** - * ice_phy_cfg_parpcs_e822 - Configure TUs per PAR/PCS clock cycle + * ice_phy_cfg_parpcs_e82x - Configure TUs per PAR/PCS clock cycle * @hw: pointer to the HW struct * @port: port to configure * @@ -1753,18 +1753,18 @@ static int ice_phy_cfg_uix_e822(struct ice_hw *hw, u8 port) * frequency is ~29 bits, so multiplying them together should fit within the * 64 bit arithmetic. */ -static int ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port) +static int ice_phy_cfg_parpcs_e82x(struct ice_hw *hw, u8 port) { u64 cur_freq, clk_incval, tu_per_sec, phy_tus; enum ice_ptp_link_spd link_spd; enum ice_ptp_fec_mode fec_mode; int err; - err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, &fec_mode); + err = ice_phy_get_speed_and_fec_e82x(hw, port, &link_spd, &fec_mode); if (err) return err; - cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw)); + cur_freq = ice_e82x_pll_freq(ice_e82x_time_ref(hw)); clk_incval = ice_ptp_read_src_incval(hw); /* Calculate TUs per cycle of the PHC clock */ @@ -1784,7 +1784,7 @@ static int ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port) else phy_tus = 0; - err = ice_write_40b_phy_reg_e822(hw, port, P_REG_PAR_TX_TUS_L, + err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_PAR_TX_TUS_L, phy_tus); if (err) return err; @@ -1796,7 +1796,7 @@ static int ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port) else phy_tus = 0; - err = ice_write_40b_phy_reg_e822(hw, port, P_REG_PAR_RX_TUS_L, + err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_PAR_RX_TUS_L, phy_tus); if (err) return err; @@ -1808,7 +1808,7 @@ static int ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port) else phy_tus = 0; - err = ice_write_40b_phy_reg_e822(hw, port, P_REG_PCS_TX_TUS_L, + err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_PCS_TX_TUS_L, phy_tus); if (err) return err; @@ -1820,7 +1820,7 @@ static int ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port) else phy_tus = 0; - err = ice_write_40b_phy_reg_e822(hw, port, P_REG_PCS_RX_TUS_L, + err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_PCS_RX_TUS_L, phy_tus); if (err) return err; @@ -1832,7 +1832,7 @@ static int ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port) else phy_tus = 0; - err = ice_write_40b_phy_reg_e822(hw, port, P_REG_DESK_PAR_TX_TUS_L, + err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_DESK_PAR_TX_TUS_L, phy_tus); if (err) return err; @@ -1844,7 +1844,7 @@ static int ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port) else phy_tus = 0; - err = ice_write_40b_phy_reg_e822(hw, port, P_REG_DESK_PAR_RX_TUS_L, + err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_DESK_PAR_RX_TUS_L, phy_tus); if (err) return err; @@ -1856,7 +1856,7 @@ static int ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port) else phy_tus = 0; - err = ice_write_40b_phy_reg_e822(hw, port, P_REG_DESK_PCS_TX_TUS_L, + err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_DESK_PCS_TX_TUS_L, phy_tus); if (err) return err; @@ -1868,23 +1868,23 @@ static int ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port) else phy_tus = 0; - return ice_write_40b_phy_reg_e822(hw, port, P_REG_DESK_PCS_RX_TUS_L, + return ice_write_40b_phy_reg_e82x(hw, port, P_REG_DESK_PCS_RX_TUS_L, phy_tus); } /** - * ice_calc_fixed_tx_offset_e822 - Calculated Fixed Tx offset for a port + * ice_calc_fixed_tx_offset_e82x - Calculated Fixed Tx offset for a port * @hw: pointer to the HW struct * @link_spd: the Link speed to calculate for * * Calculate the fixed offset due to known static latency data. */ static u64 -ice_calc_fixed_tx_offset_e822(struct ice_hw *hw, enum ice_ptp_link_spd link_spd) +ice_calc_fixed_tx_offset_e82x(struct ice_hw *hw, enum ice_ptp_link_spd link_spd) { u64 cur_freq, clk_incval, tu_per_sec, fixed_offset; - cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw)); + cur_freq = ice_e82x_pll_freq(ice_e82x_time_ref(hw)); clk_incval = ice_ptp_read_src_incval(hw); /* Calculate TUs per second */ @@ -1904,7 +1904,7 @@ ice_calc_fixed_tx_offset_e822(struct ice_hw *hw, enum ice_ptp_link_spd link_spd) } /** - * ice_phy_cfg_tx_offset_e822 - Configure total Tx timestamp offset + * ice_phy_cfg_tx_offset_e82x - Configure total Tx timestamp offset * @hw: pointer to the HW struct * @port: the PHY port to configure * @@ -1926,7 +1926,7 @@ ice_calc_fixed_tx_offset_e822(struct ice_hw *hw, enum ice_ptp_link_spd link_spd) * Returns zero on success, -EBUSY if the hardware vernier offset * calibration has not completed, or another error code on failure. */ -int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port) +int ice_phy_cfg_tx_offset_e82x(struct ice_hw *hw, u8 port) { enum ice_ptp_link_spd link_spd; enum ice_ptp_fec_mode fec_mode; @@ -1935,7 +1935,7 @@ int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port) u32 reg; /* Nothing to do if we've already programmed the offset */ - err = ice_read_phy_reg_e822(hw, port, P_REG_TX_OR, ®); + err = ice_read_phy_reg_e82x(hw, port, P_REG_TX_OR, ®); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_OR for port %u, err %d\n", port, err); @@ -1945,7 +1945,7 @@ int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port) if (reg) return 0; - err = ice_read_phy_reg_e822(hw, port, P_REG_TX_OV_STATUS, ®); + err = ice_read_phy_reg_e82x(hw, port, P_REG_TX_OV_STATUS, ®); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_OV_STATUS for port %u, err %d\n", port, err); @@ -1955,11 +1955,11 @@ int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port) if (!(reg & P_REG_TX_OV_STATUS_OV_M)) return -EBUSY; - err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, &fec_mode); + err = ice_phy_get_speed_and_fec_e82x(hw, port, &link_spd, &fec_mode); if (err) return err; - total_offset = ice_calc_fixed_tx_offset_e822(hw, link_spd); + total_offset = ice_calc_fixed_tx_offset_e82x(hw, link_spd); /* Read the first Vernier offset from the PHY register and add it to * the total offset. @@ -1970,7 +1970,7 @@ int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port) link_spd == ICE_PTP_LNK_SPD_25G_RS || link_spd == ICE_PTP_LNK_SPD_40G || link_spd == ICE_PTP_LNK_SPD_50G) { - err = ice_read_64b_phy_reg_e822(hw, port, + err = ice_read_64b_phy_reg_e82x(hw, port, P_REG_PAR_PCS_TX_OFFSET_L, &val); if (err) @@ -1985,7 +1985,7 @@ int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port) */ if (link_spd == ICE_PTP_LNK_SPD_50G_RS || link_spd == ICE_PTP_LNK_SPD_100G_RS) { - err = ice_read_64b_phy_reg_e822(hw, port, + err = ice_read_64b_phy_reg_e82x(hw, port, P_REG_PAR_TX_TIME_L, &val); if (err) @@ -1998,12 +1998,12 @@ int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port) * PHY and indicate that the Tx offset is ready. After this, * timestamps will be enabled. */ - err = ice_write_64b_phy_reg_e822(hw, port, P_REG_TOTAL_TX_OFFSET_L, + err = ice_write_64b_phy_reg_e82x(hw, port, P_REG_TOTAL_TX_OFFSET_L, total_offset); if (err) return err; - err = ice_write_phy_reg_e822(hw, port, P_REG_TX_OR, 1); + err = ice_write_phy_reg_e82x(hw, port, P_REG_TX_OR, 1); if (err) return err; @@ -2014,7 +2014,7 @@ int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port) } /** - * ice_phy_calc_pmd_adj_e822 - Calculate PMD adjustment for Rx + * ice_phy_calc_pmd_adj_e82x - Calculate PMD adjustment for Rx * @hw: pointer to the HW struct * @port: the PHY port to adjust for * @link_spd: the current link speed of the PHY @@ -2026,7 +2026,7 @@ int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port) * various delays caused when receiving a packet. */ static int -ice_phy_calc_pmd_adj_e822(struct ice_hw *hw, u8 port, +ice_phy_calc_pmd_adj_e82x(struct ice_hw *hw, u8 port, enum ice_ptp_link_spd link_spd, enum ice_ptp_fec_mode fec_mode, u64 *pmd_adj) { @@ -2035,7 +2035,7 @@ ice_phy_calc_pmd_adj_e822(struct ice_hw *hw, u8 port, u32 val; int err; - err = ice_read_phy_reg_e822(hw, port, P_REG_PMD_ALIGNMENT, &val); + err = ice_read_phy_reg_e82x(hw, port, P_REG_PMD_ALIGNMENT, &val); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to read PMD alignment, err %d\n", err); @@ -2044,7 +2044,7 @@ ice_phy_calc_pmd_adj_e822(struct ice_hw *hw, u8 port, pmd_align = (u8)val; - cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw)); + cur_freq = ice_e82x_pll_freq(ice_e82x_time_ref(hw)); clk_incval = ice_ptp_read_src_incval(hw); /* Calculate TUs per second */ @@ -2123,7 +2123,7 @@ ice_phy_calc_pmd_adj_e822(struct ice_hw *hw, u8 port, u64 cycle_adj; u8 rx_cycle; - err = ice_read_phy_reg_e822(hw, port, P_REG_RX_40_TO_160_CNT, + err = ice_read_phy_reg_e82x(hw, port, P_REG_RX_40_TO_160_CNT, &val); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to read 25G-RS Rx cycle count, err %d\n", @@ -2145,7 +2145,7 @@ ice_phy_calc_pmd_adj_e822(struct ice_hw *hw, u8 port, u64 cycle_adj; u8 rx_cycle; - err = ice_read_phy_reg_e822(hw, port, P_REG_RX_80_TO_160_CNT, + err = ice_read_phy_reg_e82x(hw, port, P_REG_RX_80_TO_160_CNT, &val); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to read 50G-RS Rx cycle count, err %d\n", @@ -2172,18 +2172,18 @@ ice_phy_calc_pmd_adj_e822(struct ice_hw *hw, u8 port, } /** - * ice_calc_fixed_rx_offset_e822 - Calculated the fixed Rx offset for a port + * ice_calc_fixed_rx_offset_e82x - Calculated the fixed Rx offset for a port * @hw: pointer to HW struct * @link_spd: The Link speed to calculate for * * Determine the fixed Rx latency for a given link speed. */ static u64 -ice_calc_fixed_rx_offset_e822(struct ice_hw *hw, enum ice_ptp_link_spd link_spd) +ice_calc_fixed_rx_offset_e82x(struct ice_hw *hw, enum ice_ptp_link_spd link_spd) { u64 cur_freq, clk_incval, tu_per_sec, fixed_offset; - cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw)); + cur_freq = ice_e82x_pll_freq(ice_e82x_time_ref(hw)); clk_incval = ice_ptp_read_src_incval(hw); /* Calculate TUs per second */ @@ -2203,7 +2203,7 @@ ice_calc_fixed_rx_offset_e822(struct ice_hw *hw, enum ice_ptp_link_spd link_spd) } /** - * ice_phy_cfg_rx_offset_e822 - Configure total Rx timestamp offset + * ice_phy_cfg_rx_offset_e82x - Configure total Rx timestamp offset * @hw: pointer to the HW struct * @port: the PHY port to configure * @@ -2229,7 +2229,7 @@ ice_calc_fixed_rx_offset_e822(struct ice_hw *hw, enum ice_ptp_link_spd link_spd) * Returns zero on success, -EBUSY if the hardware vernier offset * calibration has not completed, or another error code on failure. */ -int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port) +int ice_phy_cfg_rx_offset_e82x(struct ice_hw *hw, u8 port) { enum ice_ptp_link_spd link_spd; enum ice_ptp_fec_mode fec_mode; @@ -2238,7 +2238,7 @@ int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port) u32 reg; /* Nothing to do if we've already programmed the offset */ - err = ice_read_phy_reg_e822(hw, port, P_REG_RX_OR, ®); + err = ice_read_phy_reg_e82x(hw, port, P_REG_RX_OR, ®); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_OR for port %u, err %d\n", port, err); @@ -2248,7 +2248,7 @@ int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port) if (reg) return 0; - err = ice_read_phy_reg_e822(hw, port, P_REG_RX_OV_STATUS, ®); + err = ice_read_phy_reg_e82x(hw, port, P_REG_RX_OV_STATUS, ®); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_OV_STATUS for port %u, err %d\n", port, err); @@ -2258,16 +2258,16 @@ int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port) if (!(reg & P_REG_RX_OV_STATUS_OV_M)) return -EBUSY; - err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, &fec_mode); + err = ice_phy_get_speed_and_fec_e82x(hw, port, &link_spd, &fec_mode); if (err) return err; - total_offset = ice_calc_fixed_rx_offset_e822(hw, link_spd); + total_offset = ice_calc_fixed_rx_offset_e82x(hw, link_spd); /* Read the first Vernier offset from the PHY register and add it to * the total offset. */ - err = ice_read_64b_phy_reg_e822(hw, port, + err = ice_read_64b_phy_reg_e82x(hw, port, P_REG_PAR_PCS_RX_OFFSET_L, &val); if (err) @@ -2282,7 +2282,7 @@ int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port) link_spd == ICE_PTP_LNK_SPD_50G || link_spd == ICE_PTP_LNK_SPD_50G_RS || link_spd == ICE_PTP_LNK_SPD_100G_RS) { - err = ice_read_64b_phy_reg_e822(hw, port, + err = ice_read_64b_phy_reg_e82x(hw, port, P_REG_PAR_RX_TIME_L, &val); if (err) @@ -2292,7 +2292,7 @@ int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port) } /* In addition, Rx must account for the PMD alignment */ - err = ice_phy_calc_pmd_adj_e822(hw, port, link_spd, fec_mode, &pmd); + err = ice_phy_calc_pmd_adj_e82x(hw, port, link_spd, fec_mode, &pmd); if (err) return err; @@ -2308,12 +2308,12 @@ int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port) * PHY and indicate that the Rx offset is ready. After this, * timestamps will be enabled. */ - err = ice_write_64b_phy_reg_e822(hw, port, P_REG_TOTAL_RX_OFFSET_L, + err = ice_write_64b_phy_reg_e82x(hw, port, P_REG_TOTAL_RX_OFFSET_L, total_offset); if (err) return err; - err = ice_write_phy_reg_e822(hw, port, P_REG_RX_OR, 1); + err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_OR, 1); if (err) return err; @@ -2324,7 +2324,7 @@ int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port) } /** - * ice_read_phy_and_phc_time_e822 - Simultaneously capture PHC and PHY time + * ice_read_phy_and_phc_time_e82x - Simultaneously capture PHC and PHY time * @hw: pointer to the HW struct * @port: the PHY port to read * @phy_time: on return, the 64bit PHY timer value @@ -2334,7 +2334,7 @@ int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port) * and PHC timer values. */ static int -ice_read_phy_and_phc_time_e822(struct ice_hw *hw, u8 port, u64 *phy_time, +ice_read_phy_and_phc_time_e82x(struct ice_hw *hw, u8 port, u64 *phy_time, u64 *phc_time) { u64 tx_time, rx_time; @@ -2381,7 +2381,7 @@ ice_read_phy_and_phc_time_e822(struct ice_hw *hw, u8 port, u64 *phy_time, } /** - * ice_sync_phy_timer_e822 - Synchronize the PHY timer with PHC timer + * ice_sync_phy_timer_e82x - Synchronize the PHY timer with PHC timer * @hw: pointer to the HW struct * @port: the PHY port to synchronize * @@ -2392,7 +2392,7 @@ ice_read_phy_and_phc_time_e822(struct ice_hw *hw, u8 port, u64 *phy_time, * to the PHY timer in order to ensure it reads the same value as the * primary PHC timer. */ -static int ice_sync_phy_timer_e822(struct ice_hw *hw, u8 port) +static int ice_sync_phy_timer_e82x(struct ice_hw *hw, u8 port) { u64 phc_time, phy_time, difference; int err; @@ -2402,7 +2402,7 @@ static int ice_sync_phy_timer_e822(struct ice_hw *hw, u8 port) return -EBUSY; } - err = ice_read_phy_and_phc_time_e822(hw, port, &phy_time, &phc_time); + err = ice_read_phy_and_phc_time_e82x(hw, port, &phy_time, &phc_time); if (err) goto err_unlock; @@ -2416,7 +2416,7 @@ static int ice_sync_phy_timer_e822(struct ice_hw *hw, u8 port) */ difference = phc_time - phy_time; - err = ice_ptp_prep_port_adj_e822(hw, port, (s64)difference); + err = ice_ptp_prep_port_adj_e82x(hw, port, (s64)difference); if (err) goto err_unlock; @@ -2433,7 +2433,7 @@ static int ice_sync_phy_timer_e822(struct ice_hw *hw, u8 port) /* Re-capture the timer values to flush the command registers and * verify that the time was properly adjusted. */ - err = ice_read_phy_and_phc_time_e822(hw, port, &phy_time, &phc_time); + err = ice_read_phy_and_phc_time_e82x(hw, port, &phy_time, &phc_time); if (err) goto err_unlock; @@ -2452,7 +2452,7 @@ err_unlock: } /** - * ice_stop_phy_timer_e822 - Stop the PHY clock timer + * ice_stop_phy_timer_e82x - Stop the PHY clock timer * @hw: pointer to the HW struct * @port: the PHY port to stop * @soft_reset: if true, hold the SOFT_RESET bit of P_REG_PS @@ -2462,36 +2462,36 @@ err_unlock: * initialized or when link speed changes. */ int -ice_stop_phy_timer_e822(struct ice_hw *hw, u8 port, bool soft_reset) +ice_stop_phy_timer_e82x(struct ice_hw *hw, u8 port, bool soft_reset) { int err; u32 val; - err = ice_write_phy_reg_e822(hw, port, P_REG_TX_OR, 0); + err = ice_write_phy_reg_e82x(hw, port, P_REG_TX_OR, 0); if (err) return err; - err = ice_write_phy_reg_e822(hw, port, P_REG_RX_OR, 0); + err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_OR, 0); if (err) return err; - err = ice_read_phy_reg_e822(hw, port, P_REG_PS, &val); + err = ice_read_phy_reg_e82x(hw, port, P_REG_PS, &val); if (err) return err; val &= ~P_REG_PS_START_M; - err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val); + err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val); if (err) return err; val &= ~P_REG_PS_ENA_CLK_M; - err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val); + err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val); if (err) return err; if (soft_reset) { val |= P_REG_PS_SFT_RESET_M; - err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val); + err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val); if (err) return err; } @@ -2502,7 +2502,7 @@ ice_stop_phy_timer_e822(struct ice_hw *hw, u8 port, bool soft_reset) } /** - * ice_start_phy_timer_e822 - Start the PHY clock timer + * ice_start_phy_timer_e82x - Start the PHY clock timer * @hw: pointer to the HW struct * @port: the PHY port to start * @@ -2512,7 +2512,7 @@ ice_stop_phy_timer_e822(struct ice_hw *hw, u8 port, bool soft_reset) * * Hardware will take Vernier measurements on Tx or Rx of packets. */ -int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port) +int ice_start_phy_timer_e82x(struct ice_hw *hw, u8 port) { u32 lo, hi, val; u64 incval; @@ -2521,17 +2521,17 @@ int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port) tmr_idx = ice_get_ptp_src_clock_index(hw); - err = ice_stop_phy_timer_e822(hw, port, false); + err = ice_stop_phy_timer_e82x(hw, port, false); if (err) return err; - ice_phy_cfg_lane_e822(hw, port); + ice_phy_cfg_lane_e82x(hw, port); - err = ice_phy_cfg_uix_e822(hw, port); + err = ice_phy_cfg_uix_e82x(hw, port); if (err) return err; - err = ice_phy_cfg_parpcs_e822(hw, port); + err = ice_phy_cfg_parpcs_e82x(hw, port); if (err) return err; @@ -2539,7 +2539,7 @@ int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port) hi = rd32(hw, GLTSYN_INCVAL_H(tmr_idx)); incval = (u64)hi << 32 | lo; - err = ice_write_40b_phy_reg_e822(hw, port, P_REG_TIMETUS_L, incval); + err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_TIMETUS_L, incval); if (err) return err; @@ -2552,22 +2552,22 @@ int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port) ice_ptp_exec_tmr_cmd(hw); - err = ice_read_phy_reg_e822(hw, port, P_REG_PS, &val); + err = ice_read_phy_reg_e82x(hw, port, P_REG_PS, &val); if (err) return err; val |= P_REG_PS_SFT_RESET_M; - err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val); + err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val); if (err) return err; val |= P_REG_PS_START_M; - err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val); + err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val); if (err) return err; val &= ~P_REG_PS_SFT_RESET_M; - err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val); + err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val); if (err) return err; @@ -2578,18 +2578,18 @@ int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port) ice_ptp_exec_tmr_cmd(hw); val |= P_REG_PS_ENA_CLK_M; - err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val); + err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val); if (err) return err; val |= P_REG_PS_LOAD_OFFSET_M; - err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val); + err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val); if (err) return err; ice_ptp_exec_tmr_cmd(hw); - err = ice_sync_phy_timer_e822(hw, port); + err = ice_sync_phy_timer_e82x(hw, port); if (err) return err; @@ -2599,7 +2599,7 @@ int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port) } /** - * ice_get_phy_tx_tstamp_ready_e822 - Read Tx memory status register + * ice_get_phy_tx_tstamp_ready_e82x - Read Tx memory status register * @hw: pointer to the HW struct * @quad: the timestamp quad to read from * @tstamp_ready: contents of the Tx memory status register @@ -2609,19 +2609,19 @@ int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port) * ready to be captured from the PHY timestamp block. */ static int -ice_get_phy_tx_tstamp_ready_e822(struct ice_hw *hw, u8 quad, u64 *tstamp_ready) +ice_get_phy_tx_tstamp_ready_e82x(struct ice_hw *hw, u8 quad, u64 *tstamp_ready) { u32 hi, lo; int err; - err = ice_read_quad_reg_e822(hw, quad, Q_REG_TX_MEMORY_STATUS_U, &hi); + err = ice_read_quad_reg_e82x(hw, quad, Q_REG_TX_MEMORY_STATUS_U, &hi); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_MEMORY_STATUS_U for quad %u, err %d\n", quad, err); return err; } - err = ice_read_quad_reg_e822(hw, quad, Q_REG_TX_MEMORY_STATUS_L, &lo); + err = ice_read_quad_reg_e82x(hw, quad, Q_REG_TX_MEMORY_STATUS_L, &lo); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_MEMORY_STATUS_L for quad %u, err %d\n", quad, err); @@ -3307,7 +3307,7 @@ void ice_ptp_init_phy_model(struct ice_hw *hw) if (ice_is_e810(hw)) hw->phy_model = ICE_PHY_E810; else - hw->phy_model = ICE_PHY_E822; + hw->phy_model = ICE_PHY_E82X; } /** @@ -3332,8 +3332,8 @@ static int ice_ptp_tmr_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd) case ICE_PHY_E810: err = ice_ptp_port_cmd_e810(hw, cmd); break; - case ICE_PHY_E822: - err = ice_ptp_port_cmd_e822(hw, cmd); + case ICE_PHY_E82X: + err = ice_ptp_port_cmd_e82x(hw, cmd); break; default: err = -EOPNOTSUPP; @@ -3384,8 +3384,8 @@ int ice_ptp_init_time(struct ice_hw *hw, u64 time) case ICE_PHY_E810: err = ice_ptp_prep_phy_time_e810(hw, time & 0xFFFFFFFF); break; - case ICE_PHY_E822: - err = ice_ptp_prep_phy_time_e822(hw, time & 0xFFFFFFFF); + case ICE_PHY_E82X: + err = ice_ptp_prep_phy_time_e82x(hw, time & 0xFFFFFFFF); break; default: err = -EOPNOTSUPP; @@ -3426,8 +3426,8 @@ int ice_ptp_write_incval(struct ice_hw *hw, u64 incval) case ICE_PHY_E810: err = ice_ptp_prep_phy_incval_e810(hw, incval); break; - case ICE_PHY_E822: - err = ice_ptp_prep_phy_incval_e822(hw, incval); + case ICE_PHY_E82X: + err = ice_ptp_prep_phy_incval_e82x(hw, incval); break; default: err = -EOPNOTSUPP; @@ -3492,8 +3492,8 @@ int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj) case ICE_PHY_E810: err = ice_ptp_prep_phy_adj_e810(hw, adj); break; - case ICE_PHY_E822: - err = ice_ptp_prep_phy_adj_e822(hw, adj); + case ICE_PHY_E82X: + err = ice_ptp_prep_phy_adj_e82x(hw, adj); break; default: err = -EOPNOTSUPP; @@ -3521,8 +3521,8 @@ int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp) switch (hw->phy_model) { case ICE_PHY_E810: return ice_read_phy_tstamp_e810(hw, block, idx, tstamp); - case ICE_PHY_E822: - return ice_read_phy_tstamp_e822(hw, block, idx, tstamp); + case ICE_PHY_E82X: + return ice_read_phy_tstamp_e82x(hw, block, idx, tstamp); default: return -EOPNOTSUPP; } @@ -3549,8 +3549,8 @@ int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx) switch (hw->phy_model) { case ICE_PHY_E810: return ice_clear_phy_tstamp_e810(hw, block, idx); - case ICE_PHY_E822: - return ice_clear_phy_tstamp_e822(hw, block, idx); + case ICE_PHY_E82X: + return ice_clear_phy_tstamp_e82x(hw, block, idx); default: return -EOPNOTSUPP; } @@ -3608,8 +3608,8 @@ static int ice_get_pf_c827_idx(struct ice_hw *hw, u8 *idx) void ice_ptp_reset_ts_memory(struct ice_hw *hw) { switch (hw->phy_model) { - case ICE_PHY_E822: - ice_ptp_reset_ts_memory_e822(hw); + case ICE_PHY_E82X: + ice_ptp_reset_ts_memory_e82x(hw); break; case ICE_PHY_E810: default: @@ -3636,8 +3636,8 @@ int ice_ptp_init_phc(struct ice_hw *hw) switch (hw->phy_model) { case ICE_PHY_E810: return ice_ptp_init_phc_e810(hw); - case ICE_PHY_E822: - return ice_ptp_init_phc_e822(hw); + case ICE_PHY_E82X: + return ice_ptp_init_phc_e82x(hw); default: return -EOPNOTSUPP; } @@ -3660,8 +3660,8 @@ int ice_get_phy_tx_tstamp_ready(struct ice_hw *hw, u8 block, u64 *tstamp_ready) case ICE_PHY_E810: return ice_get_phy_tx_tstamp_ready_e810(hw, block, tstamp_ready); - case ICE_PHY_E822: - return ice_get_phy_tx_tstamp_ready_e822(hw, block, + case ICE_PHY_E82X: + return ice_get_phy_tx_tstamp_ready_e82x(hw, block, tstamp_ready); break; default: @@ -3942,7 +3942,7 @@ int ice_get_cgu_rclk_pin_info(struct ice_hw *hw, u8 *base_idx, u8 *pin_num) case ICE_DEV_ID_E823C_QSFP: case ICE_DEV_ID_E823C_SFP: case ICE_DEV_ID_E823C_SGMII: - *pin_num = ICE_E822_RCLK_PINS_NUM; + *pin_num = ICE_E82X_RCLK_PINS_NUM; ret = 0; if (hw->cgu_part_number == ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032) diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h index cf76701566..1f3e031244 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h +++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h @@ -42,7 +42,7 @@ enum ice_ptp_fec_mode { }; /** - * struct ice_time_ref_info_e822 + * struct ice_time_ref_info_e82x * @pll_freq: Frequency of PLL that drives timer ticks in Hz * @nominal_incval: increment to generate nanoseconds in GLTSYN_TIME_L * @pps_delay: propagation delay of the PPS output signal @@ -50,14 +50,14 @@ enum ice_ptp_fec_mode { * Characteristic information for the various TIME_REF sources possible in the * E822 devices */ -struct ice_time_ref_info_e822 { +struct ice_time_ref_info_e82x { u64 pll_freq; u64 nominal_incval; u8 pps_delay; }; /** - * struct ice_vernier_info_e822 + * struct ice_vernier_info_e82x * @tx_par_clk: Frequency used to calculate P_REG_PAR_TX_TUS * @rx_par_clk: Frequency used to calculate P_REG_PAR_RX_TUS * @tx_pcs_clk: Frequency used to calculate P_REG_PCS_TX_TUS @@ -80,7 +80,7 @@ struct ice_time_ref_info_e822 { * different link speeds, either the deskew marker for multi-lane link speeds * or the Reed Solomon gearbox marker for RS-FEC. */ -struct ice_vernier_info_e822 { +struct ice_vernier_info_e82x { u32 tx_par_clk; u32 rx_par_clk; u32 tx_pcs_clk; @@ -95,7 +95,7 @@ struct ice_vernier_info_e822 { }; /** - * struct ice_cgu_pll_params_e822 + * struct ice_cgu_pll_params_e82x * @refclk_pre_div: Reference clock pre-divisor * @feedback_div: Feedback divisor * @frac_n_div: Fractional divisor @@ -104,7 +104,7 @@ struct ice_vernier_info_e822 { * Clock Generation Unit parameters used to program the PLL based on the * selected TIME_REF frequency. */ -struct ice_cgu_pll_params_e822 { +struct ice_cgu_pll_params_e82x { u32 refclk_pre_div; u32 feedback_div; u32 frac_n_div; @@ -124,7 +124,7 @@ enum ice_phy_rclk_pins { }; #define ICE_E810_RCLK_PINS_NUM (ICE_RCLKB_PIN + 1) -#define ICE_E822_RCLK_PINS_NUM (ICE_RCLKA_PIN + 1) +#define ICE_E82X_RCLK_PINS_NUM (ICE_RCLKA_PIN + 1) #define E810T_CGU_INPUT_C827(_phy, _pin) ((_phy) * ICE_E810_RCLK_PINS_NUM + \ (_pin) + ZL_REF1P) @@ -183,16 +183,16 @@ struct ice_cgu_pin_desc { }; extern const struct -ice_cgu_pll_params_e822 e822_cgu_params[NUM_ICE_TIME_REF_FREQ]; +ice_cgu_pll_params_e82x e822_cgu_params[NUM_ICE_TIME_REF_FREQ]; #define E810C_QSFP_C827_0_HANDLE 2 #define E810C_QSFP_C827_1_HANDLE 3 /* Table of constants related to possible TIME_REF sources */ -extern const struct ice_time_ref_info_e822 e822_time_ref[NUM_ICE_TIME_REF_FREQ]; +extern const struct ice_time_ref_info_e82x e822_time_ref[NUM_ICE_TIME_REF_FREQ]; /* Table of constants for Vernier calibration on E822 */ -extern const struct ice_vernier_info_e822 e822_vernier[NUM_ICE_PTP_LNK_SPD]; +extern const struct ice_vernier_info_e82x e822_vernier[NUM_ICE_PTP_LNK_SPD]; /* Increment value to generate nanoseconds in the GLTSYN_TIME_L register for * the E810 devices. Based off of a PLL with an 812.5 MHz frequency. @@ -215,23 +215,23 @@ int ice_ptp_init_phc(struct ice_hw *hw); int ice_get_phy_tx_tstamp_ready(struct ice_hw *hw, u8 block, u64 *tstamp_ready); /* E822 family functions */ -int ice_read_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 *val); -int ice_write_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 val); -void ice_ptp_reset_ts_memory_quad_e822(struct ice_hw *hw, u8 quad); +int ice_read_quad_reg_e82x(struct ice_hw *hw, u8 quad, u16 offset, u32 *val); +int ice_write_quad_reg_e82x(struct ice_hw *hw, u8 quad, u16 offset, u32 val); +void ice_ptp_reset_ts_memory_quad_e82x(struct ice_hw *hw, u8 quad); /** - * ice_e822_time_ref - Get the current TIME_REF from capabilities + * ice_e82x_time_ref - Get the current TIME_REF from capabilities * @hw: pointer to the HW structure * * Returns the current TIME_REF from the capabilities structure. */ -static inline enum ice_time_ref_freq ice_e822_time_ref(struct ice_hw *hw) +static inline enum ice_time_ref_freq ice_e82x_time_ref(struct ice_hw *hw) { return hw->func_caps.ts_func_info.time_ref; } /** - * ice_set_e822_time_ref - Set new TIME_REF + * ice_set_e82x_time_ref - Set new TIME_REF * @hw: pointer to the HW structure * @time_ref: new TIME_REF to set * @@ -239,31 +239,31 @@ static inline enum ice_time_ref_freq ice_e822_time_ref(struct ice_hw *hw) * change, such as an update to the CGU registers. */ static inline void -ice_set_e822_time_ref(struct ice_hw *hw, enum ice_time_ref_freq time_ref) +ice_set_e82x_time_ref(struct ice_hw *hw, enum ice_time_ref_freq time_ref) { hw->func_caps.ts_func_info.time_ref = time_ref; } -static inline u64 ice_e822_pll_freq(enum ice_time_ref_freq time_ref) +static inline u64 ice_e82x_pll_freq(enum ice_time_ref_freq time_ref) { return e822_time_ref[time_ref].pll_freq; } -static inline u64 ice_e822_nominal_incval(enum ice_time_ref_freq time_ref) +static inline u64 ice_e82x_nominal_incval(enum ice_time_ref_freq time_ref) { return e822_time_ref[time_ref].nominal_incval; } -static inline u64 ice_e822_pps_delay(enum ice_time_ref_freq time_ref) +static inline u64 ice_e82x_pps_delay(enum ice_time_ref_freq time_ref) { return e822_time_ref[time_ref].pps_delay; } /* E822 Vernier calibration functions */ -int ice_stop_phy_timer_e822(struct ice_hw *hw, u8 port, bool soft_reset); -int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port); -int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port); -int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port); +int ice_stop_phy_timer_e82x(struct ice_hw *hw, u8 port, bool soft_reset); +int ice_start_phy_timer_e82x(struct ice_hw *hw, u8 port); +int ice_phy_cfg_tx_offset_e82x(struct ice_hw *hw, u8 port); +int ice_phy_cfg_rx_offset_e82x(struct ice_hw *hw, u8 port); /* E810 family functions */ int ice_ptp_init_phy_e810(struct ice_hw *hw); @@ -509,6 +509,7 @@ int ice_cgu_get_output_pin_state_caps(struct ice_hw *hw, u8 pin_id, #define TS_LL_READ_RETRIES 200 #define TS_LL_READ_TS_HIGH GENMASK(23, 16) #define TS_LL_READ_TS_IDX GENMASK(29, 24) +#define TS_LL_READ_TS_INTR BIT(30) #define TS_LL_READ_TS BIT(31) /* Internal PHY timestamp address */ diff --git a/drivers/net/ethernet/intel/ice/ice_repr.c b/drivers/net/ethernet/intel/ice/ice_repr.c index c686ac0935..5f30fb131f 100644 --- a/drivers/net/ethernet/intel/ice/ice_repr.c +++ b/drivers/net/ethernet/intel/ice/ice_repr.c @@ -14,7 +14,7 @@ */ static int ice_repr_get_sw_port_id(struct ice_repr *repr) { - return repr->vf->pf->hw.port_info->lport; + return repr->src_vsi->back->hw.port_info->lport; } /** @@ -35,7 +35,7 @@ ice_repr_get_phys_port_name(struct net_device *netdev, char *buf, size_t len) return -EOPNOTSUPP; res = snprintf(buf, len, "pf%dvfr%d", ice_repr_get_sw_port_id(repr), - repr->vf->vf_id); + repr->id); if (res <= 0) return -EOPNOTSUPP; return 0; @@ -278,25 +278,67 @@ ice_repr_reg_netdev(struct net_device *netdev) return register_netdev(netdev); } +static void ice_repr_remove_node(struct devlink_port *devlink_port) +{ + devl_lock(devlink_port->devlink); + devl_rate_leaf_destroy(devlink_port); + devl_unlock(devlink_port->devlink); +} + /** - * ice_repr_add - add representor for VF - * @vf: pointer to VF structure + * ice_repr_rem - remove representor from VF + * @repr: pointer to representor structure */ -static int ice_repr_add(struct ice_vf *vf) +static void ice_repr_rem(struct ice_repr *repr) +{ + kfree(repr->q_vector); + free_netdev(repr->netdev); + kfree(repr); +} + +/** + * ice_repr_rem_vf - remove representor from VF + * @repr: pointer to representor structure + */ +void ice_repr_rem_vf(struct ice_repr *repr) +{ + ice_repr_remove_node(&repr->vf->devlink_port); + unregister_netdev(repr->netdev); + ice_devlink_destroy_vf_port(repr->vf); + ice_virtchnl_set_dflt_ops(repr->vf); + ice_repr_rem(repr); +} + +static void ice_repr_set_tx_topology(struct ice_pf *pf) +{ + struct devlink *devlink; + + /* only export if ADQ and DCB disabled and eswitch enabled*/ + if (ice_is_adq_active(pf) || ice_is_dcb_active(pf) || + !ice_is_switchdev_running(pf)) + return; + + devlink = priv_to_devlink(pf); + ice_devlink_rate_init_tx_topology(devlink, ice_get_main_vsi(pf)); +} + +/** + * ice_repr_add - add representor for generic VSI + * @pf: pointer to PF structure + * @src_vsi: pointer to VSI structure of device to represent + * @parent_mac: device MAC address + */ +static struct ice_repr * +ice_repr_add(struct ice_pf *pf, struct ice_vsi *src_vsi, const u8 *parent_mac) { struct ice_q_vector *q_vector; struct ice_netdev_priv *np; struct ice_repr *repr; - struct ice_vsi *vsi; int err; - vsi = ice_get_vf_vsi(vf); - if (!vsi) - return -EINVAL; - repr = kzalloc(sizeof(*repr), GFP_KERNEL); if (!repr) - return -ENOMEM; + return ERR_PTR(-ENOMEM); repr->netdev = alloc_etherdev(sizeof(struct ice_netdev_priv)); if (!repr->netdev) { @@ -304,9 +346,7 @@ static int ice_repr_add(struct ice_vf *vf) goto err_alloc; } - repr->src_vsi = vsi; - repr->vf = vf; - vf->repr = repr; + repr->src_vsi = src_vsi; np = netdev_priv(repr->netdev); np->repr = repr; @@ -316,10 +356,40 @@ static int ice_repr_add(struct ice_vf *vf) goto err_alloc_q_vector; } repr->q_vector = q_vector; + repr->q_id = repr->id; + + ether_addr_copy(repr->parent_mac, parent_mac); + + return repr; + +err_alloc_q_vector: + free_netdev(repr->netdev); +err_alloc: + kfree(repr); + return ERR_PTR(err); +} + +struct ice_repr *ice_repr_add_vf(struct ice_vf *vf) +{ + struct ice_repr *repr; + struct ice_vsi *vsi; + int err; + + vsi = ice_get_vf_vsi(vf); + if (!vsi) + return ERR_PTR(-ENOENT); err = ice_devlink_create_vf_port(vf); if (err) - goto err_devlink; + return ERR_PTR(err); + + repr = ice_repr_add(vf->pf, vsi, vf->hw_lan_addr); + if (IS_ERR(repr)) { + err = PTR_ERR(repr); + goto err_repr_add; + } + + repr->vf = vf; repr->netdev->min_mtu = ETH_MIN_MTU; repr->netdev->max_mtu = ICE_MAX_MTU; @@ -331,100 +401,23 @@ static int ice_repr_add(struct ice_vf *vf) goto err_netdev; ice_virtchnl_set_repr_ops(vf); + ice_repr_set_tx_topology(vf->pf); - return 0; + return repr; err_netdev: + ice_repr_rem(repr); +err_repr_add: ice_devlink_destroy_vf_port(vf); -err_devlink: - kfree(repr->q_vector); - vf->repr->q_vector = NULL; -err_alloc_q_vector: - free_netdev(repr->netdev); - repr->netdev = NULL; -err_alloc: - kfree(repr); - vf->repr = NULL; - return err; -} - -/** - * ice_repr_rem - remove representor from VF - * @vf: pointer to VF structure - */ -static void ice_repr_rem(struct ice_vf *vf) -{ - if (!vf->repr) - return; - - kfree(vf->repr->q_vector); - vf->repr->q_vector = NULL; - unregister_netdev(vf->repr->netdev); - ice_devlink_destroy_vf_port(vf); - free_netdev(vf->repr->netdev); - vf->repr->netdev = NULL; - kfree(vf->repr); - vf->repr = NULL; - - ice_virtchnl_set_dflt_ops(vf); -} - -/** - * ice_repr_rem_from_all_vfs - remove port representor for all VFs - * @pf: pointer to PF structure - */ -void ice_repr_rem_from_all_vfs(struct ice_pf *pf) -{ - struct devlink *devlink; - struct ice_vf *vf; - unsigned int bkt; - - lockdep_assert_held(&pf->vfs.table_lock); - - ice_for_each_vf(pf, bkt, vf) - ice_repr_rem(vf); - - /* since all port representors are destroyed, there is - * no point in keeping the nodes - */ - devlink = priv_to_devlink(pf); - devl_lock(devlink); - devl_rate_nodes_destroy(devlink); - devl_unlock(devlink); + return ERR_PTR(err); } -/** - * ice_repr_add_for_all_vfs - add port representor for all VFs - * @pf: pointer to PF structure - */ -int ice_repr_add_for_all_vfs(struct ice_pf *pf) +struct ice_repr *ice_repr_get_by_vsi(struct ice_vsi *vsi) { - struct devlink *devlink; - struct ice_vf *vf; - unsigned int bkt; - int err; - - lockdep_assert_held(&pf->vfs.table_lock); - - ice_for_each_vf(pf, bkt, vf) { - err = ice_repr_add(vf); - if (err) - goto err; - } - - /* only export if ADQ and DCB disabled */ - if (ice_is_adq_active(pf) || ice_is_dcb_active(pf)) - return 0; - - devlink = priv_to_devlink(pf); - ice_devlink_rate_init_tx_topology(devlink, ice_get_main_vsi(pf)); - - return 0; - -err: - ice_repr_rem_from_all_vfs(pf); + if (!vsi->vf) + return NULL; - return err; + return xa_load(&vsi->back->eswitch.reprs, vsi->vf->repr_id); } /** diff --git a/drivers/net/ethernet/intel/ice/ice_repr.h b/drivers/net/ethernet/intel/ice/ice_repr.h index e1ee2d2c1d..f9aede3157 100644 --- a/drivers/net/ethernet/intel/ice/ice_repr.h +++ b/drivers/net/ethernet/intel/ice/ice_repr.h @@ -13,14 +13,17 @@ struct ice_repr { struct net_device *netdev; struct metadata_dst *dst; struct ice_esw_br_port *br_port; + int q_id; + u32 id; + u8 parent_mac[ETH_ALEN]; #ifdef CONFIG_ICE_SWITCHDEV /* info about slow path rule */ struct ice_rule_query_data sp_rule; #endif }; -int ice_repr_add_for_all_vfs(struct ice_pf *pf); -void ice_repr_rem_from_all_vfs(struct ice_pf *pf); +struct ice_repr *ice_repr_add_vf(struct ice_vf *vf); +void ice_repr_rem_vf(struct ice_repr *repr); void ice_repr_start_tx_queues(struct ice_repr *repr); void ice_repr_stop_tx_queues(struct ice_repr *repr); @@ -29,4 +32,6 @@ void ice_repr_set_traffic_vsi(struct ice_repr *repr, struct ice_vsi *vsi); struct ice_repr *ice_netdev_to_repr(struct net_device *netdev); bool ice_is_port_repr_netdev(const struct net_device *netdev); + +struct ice_repr *ice_repr_get_by_vsi(struct ice_vsi *vsi); #endif diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c index 2f4a621254..d174a4eeb8 100644 --- a/drivers/net/ethernet/intel/ice/ice_sched.c +++ b/drivers/net/ethernet/intel/ice/ice_sched.c @@ -1387,8 +1387,7 @@ void ice_sched_get_psm_clk_freq(struct ice_hw *hw) u32 val, clk_src; val = rd32(hw, GLGEN_CLKSTAT_SRC); - clk_src = (val & GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_M) >> - GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_S; + clk_src = FIELD_GET(GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_M, val); #define PSM_CLK_SRC_367_MHZ 0x0 #define PSM_CLK_SRC_416_MHZ 0x1 diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c index cd61928700..b0f78c2f27 100644 --- a/drivers/net/ethernet/intel/ice/ice_sriov.c +++ b/drivers/net/ethernet/intel/ice/ice_sriov.c @@ -106,10 +106,8 @@ static void ice_dis_vf_mappings(struct ice_vf *vf) for (v = first; v <= last; v++) { u32 reg; - reg = (((1 << GLINT_VECT2FUNC_IS_PF_S) & - GLINT_VECT2FUNC_IS_PF_M) | - ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) & - GLINT_VECT2FUNC_PF_NUM_M)); + reg = FIELD_PREP(GLINT_VECT2FUNC_IS_PF_M, 1) | + FIELD_PREP(GLINT_VECT2FUNC_PF_NUM_M, hw->pf_id); wr32(hw, GLINT_VECT2FUNC(v), reg); } @@ -172,13 +170,14 @@ void ice_free_vfs(struct ice_pf *pf) else dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n"); - mutex_lock(&vfs->table_lock); + ice_eswitch_reserve_cp_queues(pf, -ice_get_num_vfs(pf)); - ice_eswitch_release(pf); + mutex_lock(&vfs->table_lock); ice_for_each_vf(pf, bkt, vf) { mutex_lock(&vf->cfg_lock); + ice_eswitch_detach(pf, vf); ice_dis_vf_qs(vf); if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) { @@ -274,24 +273,20 @@ static void ice_ena_vf_msix_mappings(struct ice_vf *vf) (device_based_first_msix + vf->num_msix) - 1; device_based_vf_id = vf->vf_id + hw->func_caps.vf_base_id; - reg = (((device_based_first_msix << VPINT_ALLOC_FIRST_S) & - VPINT_ALLOC_FIRST_M) | - ((device_based_last_msix << VPINT_ALLOC_LAST_S) & - VPINT_ALLOC_LAST_M) | VPINT_ALLOC_VALID_M); + reg = FIELD_PREP(VPINT_ALLOC_FIRST_M, device_based_first_msix) | + FIELD_PREP(VPINT_ALLOC_LAST_M, device_based_last_msix) | + VPINT_ALLOC_VALID_M; wr32(hw, VPINT_ALLOC(vf->vf_id), reg); - reg = (((device_based_first_msix << VPINT_ALLOC_PCI_FIRST_S) - & VPINT_ALLOC_PCI_FIRST_M) | - ((device_based_last_msix << VPINT_ALLOC_PCI_LAST_S) & - VPINT_ALLOC_PCI_LAST_M) | VPINT_ALLOC_PCI_VALID_M); + reg = FIELD_PREP(VPINT_ALLOC_PCI_FIRST_M, device_based_first_msix) | + FIELD_PREP(VPINT_ALLOC_PCI_LAST_M, device_based_last_msix) | + VPINT_ALLOC_PCI_VALID_M; wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), reg); /* map the interrupts to its functions */ for (v = pf_based_first_msix; v <= pf_based_last_msix; v++) { - reg = (((device_based_vf_id << GLINT_VECT2FUNC_VF_NUM_S) & - GLINT_VECT2FUNC_VF_NUM_M) | - ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) & - GLINT_VECT2FUNC_PF_NUM_M)); + reg = FIELD_PREP(GLINT_VECT2FUNC_VF_NUM_M, device_based_vf_id) | + FIELD_PREP(GLINT_VECT2FUNC_PF_NUM_M, hw->pf_id); wr32(hw, GLINT_VECT2FUNC(v), reg); } @@ -324,10 +319,8 @@ static void ice_ena_vf_q_mappings(struct ice_vf *vf, u16 max_txq, u16 max_rxq) * VFNUMQ value should be set to (number of queues - 1). A value * of 0 means 1 queue and a value of 255 means 256 queues */ - reg = (((vsi->txq_map[0] << VPLAN_TX_QBASE_VFFIRSTQ_S) & - VPLAN_TX_QBASE_VFFIRSTQ_M) | - (((max_txq - 1) << VPLAN_TX_QBASE_VFNUMQ_S) & - VPLAN_TX_QBASE_VFNUMQ_M)); + reg = FIELD_PREP(VPLAN_TX_QBASE_VFFIRSTQ_M, vsi->txq_map[0]) | + FIELD_PREP(VPLAN_TX_QBASE_VFNUMQ_M, max_txq - 1); wr32(hw, VPLAN_TX_QBASE(vf->vf_id), reg); } else { dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n"); @@ -342,10 +335,8 @@ static void ice_ena_vf_q_mappings(struct ice_vf *vf, u16 max_txq, u16 max_rxq) * VFNUMQ value should be set to (number of queues - 1). A value * of 0 means 1 queue and a value of 255 means 256 queues */ - reg = (((vsi->rxq_map[0] << VPLAN_RX_QBASE_VFFIRSTQ_S) & - VPLAN_RX_QBASE_VFFIRSTQ_M) | - (((max_rxq - 1) << VPLAN_RX_QBASE_VFNUMQ_S) & - VPLAN_RX_QBASE_VFNUMQ_M)); + reg = FIELD_PREP(VPLAN_RX_QBASE_VFFIRSTQ_M, vsi->rxq_map[0]) | + FIELD_PREP(VPLAN_RX_QBASE_VFNUMQ_M, max_rxq - 1); wr32(hw, VPLAN_RX_QBASE(vf->vf_id), reg); } else { dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n"); @@ -609,6 +600,14 @@ static int ice_start_vfs(struct ice_pf *pf) goto teardown; } + retval = ice_eswitch_attach(pf, vf); + if (retval) { + dev_err(ice_pf_to_dev(pf), "Failed to attach VF %d to eswitch, error %d", + vf->vf_id, retval); + ice_vf_vsi_release(vf); + goto teardown; + } + set_bit(ICE_VF_STATE_INIT, vf->vf_states); ice_ena_vf_mappings(vf); wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE); @@ -899,6 +898,7 @@ static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs) goto err_unroll_sriov; } + ice_eswitch_reserve_cp_queues(pf, num_vfs); ret = ice_start_vfs(pf); if (ret) { dev_err(dev, "Failed to start %d VFs, err %d\n", num_vfs, ret); @@ -908,12 +908,6 @@ static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs) clear_bit(ICE_VF_DIS, pf->state); - ret = ice_eswitch_configure(pf); - if (ret) { - dev_err(dev, "Failed to configure eswitch, err %d\n", ret); - goto err_unroll_sriov; - } - /* rearm global interrupts */ if (test_and_clear_bit(ICE_OICR_INTR_DIS, pf->state)) ice_irq_dynamic_ena(hw, NULL, NULL); @@ -1311,8 +1305,7 @@ ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event) dev_dbg(ice_pf_to_dev(pf), "GLDCB_RTCTQ: 0x%08x\n", gldcb_rtctq); /* event returns device global Rx queue number */ - queue = (gldcb_rtctq & GLDCB_RTCTQ_RXQNUM_M) >> - GLDCB_RTCTQ_RXQNUM_S; + queue = FIELD_GET(GLDCB_RTCTQ_RXQNUM_M, gldcb_rtctq); vf = ice_get_vf_from_pfq(pf, ice_globalq_to_pfq(pf, queue)); if (!vf) diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index ee19f3aa3d..ba0ef91e4c 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c @@ -2025,12 +2025,12 @@ error_out: * ice_aq_map_recipe_to_profile - Map recipe to packet profile * @hw: pointer to the HW struct * @profile_id: package profile ID to associate the recipe with - * @r_bitmap: Recipe bitmap filled in and need to be returned as response + * @r_assoc: Recipe bitmap filled in and need to be returned as response * @cd: pointer to command details structure or NULL * Recipe to profile association (0x0291) */ int -ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap, +ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u64 r_assoc, struct ice_sq_cd *cd) { struct ice_aqc_recipe_to_profile *cmd; @@ -2042,7 +2042,7 @@ ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap, /* Set the recipe ID bit in the bitmask to let the device know which * profile we are associating the recipe to */ - memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc)); + cmd->recipe_assoc = cpu_to_le64(r_assoc); return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); } @@ -2051,12 +2051,12 @@ ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap, * ice_aq_get_recipe_to_profile - Map recipe to packet profile * @hw: pointer to the HW struct * @profile_id: package profile ID to associate the recipe with - * @r_bitmap: Recipe bitmap filled in and need to be returned as response + * @r_assoc: Recipe bitmap filled in and need to be returned as response * @cd: pointer to command details structure or NULL * Associate profile ID with given recipe (0x0293) */ int -ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap, +ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u64 *r_assoc, struct ice_sq_cd *cd) { struct ice_aqc_recipe_to_profile *cmd; @@ -2069,7 +2069,7 @@ ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap, status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); if (!status) - memcpy(r_bitmap, cmd->recipe_assoc, sizeof(cmd->recipe_assoc)); + *r_assoc = le64_to_cpu(cmd->recipe_assoc); return status; } @@ -2108,6 +2108,7 @@ int ice_alloc_recipe(struct ice_hw *hw, u16 *rid) static void ice_get_recp_to_prof_map(struct ice_hw *hw) { DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES); + u64 recp_assoc; u16 i; for (i = 0; i < hw->switch_info->max_used_prof_index + 1; i++) { @@ -2115,8 +2116,9 @@ static void ice_get_recp_to_prof_map(struct ice_hw *hw) bitmap_zero(profile_to_recipe[i], ICE_MAX_NUM_RECIPES); bitmap_zero(r_bitmap, ICE_MAX_NUM_RECIPES); - if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL)) + if (ice_aq_get_recipe_to_profile(hw, i, &recp_assoc, NULL)) continue; + bitmap_from_arr64(r_bitmap, &recp_assoc, ICE_MAX_NUM_RECIPES); bitmap_copy(profile_to_recipe[i], r_bitmap, ICE_MAX_NUM_RECIPES); for_each_set_bit(j, r_bitmap, ICE_MAX_NUM_RECIPES) @@ -2492,25 +2494,24 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info, switch (f_info->fltr_act) { case ICE_FWD_TO_VSI: - act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) & - ICE_SINGLE_ACT_VSI_ID_M; + act |= FIELD_PREP(ICE_SINGLE_ACT_VSI_ID_M, + f_info->fwd_id.hw_vsi_id); if (f_info->lkup_type != ICE_SW_LKUP_VLAN) act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT; break; case ICE_FWD_TO_VSI_LIST: act |= ICE_SINGLE_ACT_VSI_LIST; - act |= (f_info->fwd_id.vsi_list_id << - ICE_SINGLE_ACT_VSI_LIST_ID_S) & - ICE_SINGLE_ACT_VSI_LIST_ID_M; + act |= FIELD_PREP(ICE_SINGLE_ACT_VSI_LIST_ID_M, + f_info->fwd_id.vsi_list_id); if (f_info->lkup_type != ICE_SW_LKUP_VLAN) act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT; break; case ICE_FWD_TO_Q: act |= ICE_SINGLE_ACT_TO_Q; - act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) & - ICE_SINGLE_ACT_Q_INDEX_M; + act |= FIELD_PREP(ICE_SINGLE_ACT_Q_INDEX_M, + f_info->fwd_id.q_id); break; case ICE_DROP_PACKET: act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP | @@ -2520,10 +2521,9 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info, q_rgn = f_info->qgrp_size > 0 ? (u8)ilog2(f_info->qgrp_size) : 0; act |= ICE_SINGLE_ACT_TO_Q; - act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) & - ICE_SINGLE_ACT_Q_INDEX_M; - act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) & - ICE_SINGLE_ACT_Q_REGION_M; + act |= FIELD_PREP(ICE_SINGLE_ACT_Q_INDEX_M, + f_info->fwd_id.q_id); + act |= FIELD_PREP(ICE_SINGLE_ACT_Q_REGION_M, q_rgn); break; default: return; @@ -2649,7 +2649,7 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent, m_ent->fltr_info.fwd_id.hw_vsi_id; act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT; - act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) & ICE_LG_ACT_VSI_LIST_ID_M; + act |= FIELD_PREP(ICE_LG_ACT_VSI_LIST_ID_M, id); if (m_ent->vsi_count > 1) act |= ICE_LG_ACT_VSI_LIST; lg_act->act[0] = cpu_to_le32(act); @@ -2657,16 +2657,15 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent, /* Second action descriptor type */ act = ICE_LG_ACT_GENERIC; - act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M; + act |= FIELD_PREP(ICE_LG_ACT_GENERIC_VALUE_M, 1); lg_act->act[1] = cpu_to_le32(act); - act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX << - ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M; + act = FIELD_PREP(ICE_LG_ACT_GENERIC_OFFSET_M, + ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX); /* Third action Marker value */ act |= ICE_LG_ACT_GENERIC; - act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) & - ICE_LG_ACT_GENERIC_VALUE_M; + act |= FIELD_PREP(ICE_LG_ACT_GENERIC_VALUE_M, sw_marker); lg_act->act[2] = cpu_to_le32(act); @@ -2675,9 +2674,9 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent, ice_aqc_opc_update_sw_rules); /* Update the action to point to the large action ID */ - rx_tx->act = cpu_to_le32(ICE_SINGLE_ACT_PTR | - ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) & - ICE_SINGLE_ACT_PTR_VAL_M)); + act = ICE_SINGLE_ACT_PTR; + act |= FIELD_PREP(ICE_SINGLE_ACT_PTR_VAL_M, l_id); + rx_tx->act = cpu_to_le32(act); /* Use the filter rule ID of the previously created rule with single * act. Once the update happens, hardware will treat this as large @@ -4426,8 +4425,8 @@ ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items, int status; buf->num_elems = cpu_to_le16(num_items); - buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) & - ICE_AQC_RES_TYPE_M) | alloc_shared); + buf->res_type = cpu_to_le16(FIELD_PREP(ICE_AQC_RES_TYPE_M, type) | + alloc_shared); status = ice_aq_alloc_free_res(hw, buf, buf_len, ice_aqc_opc_alloc_res); if (status) @@ -4454,8 +4453,8 @@ ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items, int status; buf->num_elems = cpu_to_le16(num_items); - buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) & - ICE_AQC_RES_TYPE_M) | alloc_shared); + buf->res_type = cpu_to_le16(FIELD_PREP(ICE_AQC_RES_TYPE_M, type) | + alloc_shared); buf->elem[0].e.sw_resp = cpu_to_le16(counter_id); status = ice_aq_alloc_free_res(hw, buf, buf_len, ice_aqc_opc_free_res); @@ -4481,18 +4480,15 @@ int ice_share_res(struct ice_hw *hw, u16 type, u8 shared, u16 res_id) { DEFINE_FLEX(struct ice_aqc_alloc_free_res_elem, buf, elem, 1); u16 buf_len = __struct_size(buf); + u16 res_type; int status; buf->num_elems = cpu_to_le16(1); + res_type = FIELD_PREP(ICE_AQC_RES_TYPE_M, type); if (shared) - buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) & - ICE_AQC_RES_TYPE_M) | - ICE_AQC_RES_TYPE_FLAG_SHARED); - else - buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) & - ICE_AQC_RES_TYPE_M) & - ~ICE_AQC_RES_TYPE_FLAG_SHARED); + res_type |= ICE_AQC_RES_TYPE_FLAG_SHARED; + buf->res_type = cpu_to_le16(res_type); buf->elem[0].e.sw_resp = cpu_to_le16(res_id); status = ice_aq_alloc_free_res(hw, buf, buf_len, ice_aqc_opc_share_res); @@ -5024,8 +5020,8 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm, entry->chain_idx = chain_idx; content->result_indx = ICE_AQ_RECIPE_RESULT_EN | - ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) & - ICE_AQ_RECIPE_RESULT_DATA_M); + FIELD_PREP(ICE_AQ_RECIPE_RESULT_DATA_M, + chain_idx); clear_bit(chain_idx, result_idx_bm); chain_idx = find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS); @@ -5396,22 +5392,24 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, */ list_for_each_entry(fvit, &rm->fv_list, list_entry) { DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES); + u64 recp_assoc; u16 j; status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id, - (u8 *)r_bitmap, NULL); + &recp_assoc, NULL); if (status) goto err_unroll; + bitmap_from_arr64(r_bitmap, &recp_assoc, ICE_MAX_NUM_RECIPES); bitmap_or(r_bitmap, r_bitmap, rm->r_bitmap, ICE_MAX_NUM_RECIPES); status = ice_acquire_change_lock(hw, ICE_RES_WRITE); if (status) goto err_unroll; + bitmap_to_arr64(&recp_assoc, r_bitmap, ICE_MAX_NUM_RECIPES); status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id, - (u8 *)r_bitmap, - NULL); + recp_assoc, NULL); ice_release_change_lock(hw); if (status) @@ -6065,6 +6063,7 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, rinfo->sw_act.fltr_act == ICE_FWD_TO_Q || rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP || rinfo->sw_act.fltr_act == ICE_DROP_PACKET || + rinfo->sw_act.fltr_act == ICE_MIRROR_PACKET || rinfo->sw_act.fltr_act == ICE_NOP)) { status = -EIO; goto free_pkt_profile; @@ -6077,9 +6076,11 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, } if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI || - rinfo->sw_act.fltr_act == ICE_NOP) + rinfo->sw_act.fltr_act == ICE_MIRROR_PACKET || + rinfo->sw_act.fltr_act == ICE_NOP) { rinfo->sw_act.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); + } if (rinfo->src_vsi) rinfo->sw_act.src = ice_get_hw_vsi_num(hw, rinfo->src_vsi); @@ -6115,38 +6116,45 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, status = -ENOMEM; goto free_pkt_profile; } - if (!rinfo->flags_info.act_valid) { - act |= ICE_SINGLE_ACT_LAN_ENABLE; - act |= ICE_SINGLE_ACT_LB_ENABLE; - } else { - act |= rinfo->flags_info.act & (ICE_SINGLE_ACT_LAN_ENABLE | - ICE_SINGLE_ACT_LB_ENABLE); + + if (rinfo->sw_act.fltr_act != ICE_MIRROR_PACKET) { + if (!rinfo->flags_info.act_valid) { + act |= ICE_SINGLE_ACT_LAN_ENABLE; + act |= ICE_SINGLE_ACT_LB_ENABLE; + } else { + act |= rinfo->flags_info.act & (ICE_SINGLE_ACT_LAN_ENABLE | + ICE_SINGLE_ACT_LB_ENABLE); + } } switch (rinfo->sw_act.fltr_act) { case ICE_FWD_TO_VSI: - act |= (rinfo->sw_act.fwd_id.hw_vsi_id << - ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M; + act |= FIELD_PREP(ICE_SINGLE_ACT_VSI_ID_M, + rinfo->sw_act.fwd_id.hw_vsi_id); act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT; break; case ICE_FWD_TO_Q: act |= ICE_SINGLE_ACT_TO_Q; - act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) & - ICE_SINGLE_ACT_Q_INDEX_M; + act |= FIELD_PREP(ICE_SINGLE_ACT_Q_INDEX_M, + rinfo->sw_act.fwd_id.q_id); break; case ICE_FWD_TO_QGRP: q_rgn = rinfo->sw_act.qgrp_size > 0 ? (u8)ilog2(rinfo->sw_act.qgrp_size) : 0; act |= ICE_SINGLE_ACT_TO_Q; - act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) & - ICE_SINGLE_ACT_Q_INDEX_M; - act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) & - ICE_SINGLE_ACT_Q_REGION_M; + act |= FIELD_PREP(ICE_SINGLE_ACT_Q_INDEX_M, + rinfo->sw_act.fwd_id.q_id); + act |= FIELD_PREP(ICE_SINGLE_ACT_Q_REGION_M, q_rgn); break; case ICE_DROP_PACKET: act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP | ICE_SINGLE_ACT_VALID_BIT; break; + case ICE_MIRROR_PACKET: + act |= ICE_SINGLE_ACT_OTHER_ACTS; + act |= FIELD_PREP(ICE_SINGLE_ACT_VSI_ID_M, + rinfo->sw_act.fwd_id.hw_vsi_id); + break; case ICE_NOP: act |= FIELD_PREP(ICE_SINGLE_ACT_VSI_ID_M, rinfo->sw_act.fwd_id.hw_vsi_id); diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h index db7e501b7e..89ffa1b51b 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.h +++ b/drivers/net/ethernet/intel/ice/ice_switch.h @@ -424,10 +424,10 @@ int ice_aq_add_recipe(struct ice_hw *hw, struct ice_aqc_recipe_data_elem *s_recipe_list, u16 num_recipes, struct ice_sq_cd *cd); int -ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap, +ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u64 *r_assoc, struct ice_sq_cd *cd); int -ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap, +ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u64 r_assoc, struct ice_sq_cd *cd); #endif /* _ICE_SWITCH_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c index dd03cb69ad..688ccb0615 100644 --- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c @@ -28,6 +28,8 @@ ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers, * - ICE_TC_FLWR_FIELD_VLAN_TPID (present if specified) * - Tunnel flag (present if tunnel) */ + if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS) + lkups_cnt++; if (flags & ICE_TC_FLWR_FIELD_TENANT_ID) lkups_cnt++; @@ -363,6 +365,11 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags, /* Always add direction metadata */ ice_rule_add_direction_metadata(&list[ICE_TC_METADATA_LKUP_IDX]); + if (tc_fltr->direction == ICE_ESWITCH_FLTR_EGRESS) { + ice_rule_add_src_vsi_metadata(&list[i]); + i++; + } + rule_info->tun_type = ice_sw_type_from_tunnel(tc_fltr->tunnel_type); if (tc_fltr->tunnel_type != TNL_LAST) { i = ice_tc_fill_tunnel_outer(flags, tc_fltr, list, i); @@ -653,7 +660,7 @@ static int ice_tc_setup_redirect_action(struct net_device *filter_dev, ice_tc_is_dev_uplink(target_dev)) { repr = ice_netdev_to_repr(filter_dev); - fltr->dest_vsi = repr->src_vsi->back->switchdev.uplink_vsi; + fltr->dest_vsi = repr->src_vsi->back->eswitch.uplink_vsi; fltr->direction = ICE_ESWITCH_FLTR_EGRESS; } else if (ice_tc_is_dev_uplink(filter_dev) && ice_is_port_repr_netdev(target_dev)) { @@ -689,6 +696,41 @@ ice_tc_setup_drop_action(struct net_device *filter_dev, return 0; } +static int ice_tc_setup_mirror_action(struct net_device *filter_dev, + struct ice_tc_flower_fltr *fltr, + struct net_device *target_dev) +{ + struct ice_repr *repr; + + fltr->action.fltr_act = ICE_MIRROR_PACKET; + + if (ice_is_port_repr_netdev(filter_dev) && + ice_is_port_repr_netdev(target_dev)) { + repr = ice_netdev_to_repr(target_dev); + + fltr->dest_vsi = repr->src_vsi; + fltr->direction = ICE_ESWITCH_FLTR_EGRESS; + } else if (ice_is_port_repr_netdev(filter_dev) && + ice_tc_is_dev_uplink(target_dev)) { + repr = ice_netdev_to_repr(filter_dev); + + fltr->dest_vsi = repr->src_vsi->back->eswitch.uplink_vsi; + fltr->direction = ICE_ESWITCH_FLTR_EGRESS; + } else if (ice_tc_is_dev_uplink(filter_dev) && + ice_is_port_repr_netdev(target_dev)) { + repr = ice_netdev_to_repr(target_dev); + + fltr->dest_vsi = repr->src_vsi; + fltr->direction = ICE_ESWITCH_FLTR_INGRESS; + } else { + NL_SET_ERR_MSG_MOD(fltr->extack, + "Unsupported netdevice in switchdev mode"); + return -EINVAL; + } + + return 0; +} + static int ice_eswitch_tc_parse_action(struct net_device *filter_dev, struct ice_tc_flower_fltr *fltr, struct flow_action_entry *act) @@ -710,6 +752,12 @@ static int ice_eswitch_tc_parse_action(struct net_device *filter_dev, break; + case FLOW_ACTION_MIRRED: + err = ice_tc_setup_mirror_action(filter_dev, fltr, act->dev); + if (err) + return err; + break; + default: NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported action in switchdev mode"); return -EINVAL; @@ -731,7 +779,7 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) int ret; int i; - if (!flags || (flags & ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT)) { + if (flags & ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT) { NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported encap field(s)"); return -EOPNOTSUPP; } @@ -765,7 +813,7 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) rule_info.sw_act.src = hw->pf_id; rule_info.flags_info.act = ICE_SINGLE_ACT_LB_ENABLE; } else if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS && - fltr->dest_vsi == vsi->back->switchdev.uplink_vsi) { + fltr->dest_vsi == vsi->back->eswitch.uplink_vsi) { /* VF to Uplink */ rule_info.sw_act.flag |= ICE_FLTR_TX; rule_info.sw_act.src = vsi->idx; @@ -779,6 +827,7 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) /* specify the cookie as filter_rule_id */ rule_info.fltr_rule_id = fltr->cookie; + rule_info.src_vsi = vsi->idx; ret = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added); if (ret == -EEXIST) { @@ -1440,7 +1489,10 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi, (BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID) | - BIT_ULL(FLOW_DISSECTOR_KEY_ENC_PORTS))) { + BIT_ULL(FLOW_DISSECTOR_KEY_ENC_PORTS) | + BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IP) | + BIT_ULL(FLOW_DISSECTOR_KEY_ENC_OPTS) | + BIT_ULL(FLOW_DISSECTOR_KEY_ENC_CONTROL))) { NL_SET_ERR_MSG_MOD(fltr->extack, "Tunnel key used, but device isn't a tunnel"); return -EOPNOTSUPP; } else { diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index 9170a3e8f0..97d41d6ebf 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -552,13 +552,14 @@ ice_rx_frame_truesize(struct ice_rx_ring *rx_ring, const unsigned int size) * @xdp_prog: XDP program to run * @xdp_ring: ring to be used for XDP_TX action * @rx_buf: Rx buffer to store the XDP action + * @eop_desc: Last descriptor in packet to read metadata from * * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR} */ static void ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring, - struct ice_rx_buf *rx_buf) + struct ice_rx_buf *rx_buf, union ice_32b_rx_flex_desc *eop_desc) { unsigned int ret = ICE_XDP_PASS; u32 act; @@ -566,6 +567,8 @@ ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, if (!xdp_prog) goto exit; + ice_xdp_meta_set_desc(xdp, eop_desc); + act = bpf_prog_run_xdp(xdp_prog, xdp); switch (act) { case XDP_PASS: @@ -1176,8 +1179,7 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget) struct sk_buff *skb; unsigned int size; u16 stat_err_bits; - u16 vlan_tag = 0; - u16 rx_ptype; + u16 vlan_tci; /* get the Rx desc from Rx ring based on 'next_to_clean' */ rx_desc = ICE_RX_DESC(rx_ring, ntc); @@ -1237,7 +1239,7 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget) if (ice_is_non_eop(rx_ring, rx_desc)) continue; - ice_run_xdp(rx_ring, xdp, xdp_prog, xdp_ring, rx_buf); + ice_run_xdp(rx_ring, xdp, xdp_prog, xdp_ring, rx_buf, rx_desc); if (rx_buf->act == ICE_XDP_PASS) goto construct_skb; total_rx_bytes += xdp_get_buff_len(xdp); @@ -1275,7 +1277,7 @@ construct_skb: continue; } - vlan_tag = ice_get_vlan_tag_from_rx_desc(rx_desc); + vlan_tci = ice_get_vlan_tci(rx_desc); /* pad the skb if needed, to make a valid ethernet frame */ if (eth_skb_pad(skb)) @@ -1285,14 +1287,11 @@ construct_skb: total_rx_bytes += skb->len; /* populate checksum, VLAN, and protocol */ - rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) & - ICE_RX_FLEX_DESC_PTYPE_M; - - ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype); + ice_process_skb_fields(rx_ring, rx_desc, skb); ice_trace(clean_rx_irq_indicate, rx_ring, rx_desc, skb); /* send completed skb up the stack */ - ice_receive_skb(rx_ring, skb, vlan_tag); + ice_receive_skb(rx_ring, skb, vlan_tci); /* update budget accounting */ total_rx_pkts++; @@ -1493,9 +1492,9 @@ static void ice_set_wb_on_itr(struct ice_q_vector *q_vector) * be static in non-adaptive mode (user configured) */ wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), - ((ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S) & - GLINT_DYN_CTL_ITR_INDX_M) | GLINT_DYN_CTL_INTENA_MSK_M | - GLINT_DYN_CTL_WB_ON_ITR_M); + FIELD_PREP(GLINT_DYN_CTL_ITR_INDX_M, ICE_ITR_NONE) | + FIELD_PREP(GLINT_DYN_CTL_INTENA_MSK_M, 1) | + FIELD_PREP(GLINT_DYN_CTL_WB_ON_ITR_M, 1)); q_vector->wb_on_itr = true; } diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h index b28b9826bb..af955b0e5d 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.h +++ b/drivers/net/ethernet/intel/ice/ice_txrx.h @@ -257,6 +257,20 @@ enum ice_rx_dtype { ICE_RX_DTYPE_SPLIT_ALWAYS = 2, }; +struct ice_pkt_ctx { + u64 cached_phctime; + __be16 vlan_proto; +}; + +struct ice_xdp_buff { + struct xdp_buff xdp_buff; + const union ice_32b_rx_flex_desc *eop_desc; + const struct ice_pkt_ctx *pkt_ctx; +}; + +/* Required for compatibility with xdp_buffs from xsk_pool */ +static_assert(offsetof(struct ice_xdp_buff, xdp_buff) == 0); + /* indices into GLINT_ITR registers */ #define ICE_RX_ITR ICE_IDX_ITR0 #define ICE_TX_ITR ICE_IDX_ITR1 @@ -298,7 +312,6 @@ enum ice_dynamic_itr { /* descriptor ring, associated with a VSI */ struct ice_rx_ring { /* CL1 - 1st cacheline starts here */ - struct ice_rx_ring *next; /* pointer to next ring in q_vector */ void *desc; /* Descriptor ring memory */ struct device *dev; /* Used for DMA mapping */ struct net_device *netdev; /* netdev ring maps to */ @@ -310,13 +323,24 @@ struct ice_rx_ring { u16 count; /* Number of descriptors */ u16 reg_idx; /* HW register index of the ring */ u16 next_to_alloc; - /* CL2 - 2nd cacheline starts here */ + union { struct ice_rx_buf *rx_buf; struct xdp_buff **xdp_buf; }; - struct xdp_buff xdp; + /* CL2 - 2nd cacheline starts here */ + union { + struct ice_xdp_buff xdp_ext; + struct xdp_buff xdp; + }; /* CL3 - 3rd cacheline starts here */ + union { + struct ice_pkt_ctx pkt_ctx; + struct { + u64 cached_phctime; + __be16 vlan_proto; + }; + }; struct bpf_prog *xdp_prog; u16 rx_offset; @@ -332,10 +356,10 @@ struct ice_rx_ring { /* CL4 - 4th cacheline starts here */ struct ice_channel *ch; struct ice_tx_ring *xdp_ring; + struct ice_rx_ring *next; /* pointer to next ring in q_vector */ struct xsk_buff_pool *xsk_pool; u32 nr_frags; dma_addr_t dma; /* physical address of ring */ - u64 cached_phctime; u16 rx_buf_len; u8 dcb_tc; /* Traffic class of ring */ u8 ptp_rx; diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c index 7e06373e14..839e5da24a 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c @@ -63,28 +63,42 @@ static enum pkt_hash_types ice_ptype_to_htype(u16 ptype) } /** - * ice_rx_hash - set the hash value in the skb + * ice_get_rx_hash - get RX hash value from descriptor + * @rx_desc: specific descriptor + * + * Returns hash, if present, 0 otherwise. + */ +static u32 ice_get_rx_hash(const union ice_32b_rx_flex_desc *rx_desc) +{ + const struct ice_32b_rx_flex_desc_nic *nic_mdid; + + if (unlikely(rx_desc->wb.rxdid != ICE_RXDID_FLEX_NIC)) + return 0; + + nic_mdid = (struct ice_32b_rx_flex_desc_nic *)rx_desc; + return le32_to_cpu(nic_mdid->rss_hash); +} + +/** + * ice_rx_hash_to_skb - set the hash value in the skb * @rx_ring: descriptor ring * @rx_desc: specific descriptor * @skb: pointer to current skb * @rx_ptype: the ptype value from the descriptor */ static void -ice_rx_hash(struct ice_rx_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc, - struct sk_buff *skb, u16 rx_ptype) +ice_rx_hash_to_skb(const struct ice_rx_ring *rx_ring, + const union ice_32b_rx_flex_desc *rx_desc, + struct sk_buff *skb, u16 rx_ptype) { - struct ice_32b_rx_flex_desc_nic *nic_mdid; u32 hash; if (!(rx_ring->netdev->features & NETIF_F_RXHASH)) return; - if (rx_desc->wb.rxdid != ICE_RXDID_FLEX_NIC) - return; - - nic_mdid = (struct ice_32b_rx_flex_desc_nic *)rx_desc; - hash = le32_to_cpu(nic_mdid->rss_hash); - skb_set_hash(skb, hash, ice_ptype_to_htype(rx_ptype)); + hash = ice_get_rx_hash(rx_desc); + if (likely(hash)) + skb_set_hash(skb, hash, ice_ptype_to_htype(rx_ptype)); } /** @@ -171,11 +185,38 @@ checksum_fail: } /** + * ice_ptp_rx_hwts_to_skb - Put RX timestamp into skb + * @rx_ring: Ring to get the VSI info + * @rx_desc: Receive descriptor + * @skb: Particular skb to send timestamp with + * + * The timestamp is in ns, so we must convert the result first. + */ +static void +ice_ptp_rx_hwts_to_skb(struct ice_rx_ring *rx_ring, + const union ice_32b_rx_flex_desc *rx_desc, + struct sk_buff *skb) +{ + u64 ts_ns = ice_ptp_get_rx_hwts(rx_desc, &rx_ring->pkt_ctx); + + skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ts_ns); +} + +/** + * ice_get_ptype - Read HW packet type from the descriptor + * @rx_desc: RX descriptor + */ +static u16 ice_get_ptype(const union ice_32b_rx_flex_desc *rx_desc) +{ + return le16_to_cpu(rx_desc->wb.ptype_flex_flags0) & + ICE_RX_FLEX_DESC_PTYPE_M; +} + +/** * ice_process_skb_fields - Populate skb header fields from Rx descriptor * @rx_ring: Rx descriptor ring packet is being transacted on * @rx_desc: pointer to the EOP Rx descriptor * @skb: pointer to current skb being populated - * @ptype: the packet type decoded by hardware * * This function checks the ring, descriptor, and packet information in * order to populate the hash, checksum, VLAN, protocol, and @@ -184,9 +225,11 @@ checksum_fail: void ice_process_skb_fields(struct ice_rx_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc, - struct sk_buff *skb, u16 ptype) + struct sk_buff *skb) { - ice_rx_hash(rx_ring, rx_desc, skb, ptype); + u16 ptype = ice_get_ptype(rx_desc); + + ice_rx_hash_to_skb(rx_ring, rx_desc, skb, ptype); /* modifies the skb - consumes the enet header */ skb->protocol = eth_type_trans(skb, rx_ring->netdev); @@ -194,28 +237,24 @@ ice_process_skb_fields(struct ice_rx_ring *rx_ring, ice_rx_csum(rx_ring, skb, rx_desc, ptype); if (rx_ring->ptp_rx) - ice_ptp_rx_hwtstamp(rx_ring, rx_desc, skb); + ice_ptp_rx_hwts_to_skb(rx_ring, rx_desc, skb); } /** * ice_receive_skb - Send a completed packet up the stack * @rx_ring: Rx ring in play * @skb: packet to send up - * @vlan_tag: VLAN tag for packet + * @vlan_tci: VLAN TCI for packet * * This function sends the completed packet (via. skb) up the stack using * gro receive functions (with/without VLAN tag) */ void -ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag) +ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tci) { - netdev_features_t features = rx_ring->netdev->features; - bool non_zero_vlan = !!(vlan_tag & VLAN_VID_MASK); - - if ((features & NETIF_F_HW_VLAN_CTAG_RX) && non_zero_vlan) - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); - else if ((features & NETIF_F_HW_VLAN_STAG_RX) && non_zero_vlan) - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD), vlan_tag); + if ((vlan_tci & VLAN_VID_MASK) && rx_ring->vlan_proto) + __vlan_hwaccel_put_tag(skb, rx_ring->vlan_proto, + vlan_tci); napi_gro_receive(&rx_ring->q_vector->napi, skb); } @@ -464,3 +503,125 @@ void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res, spin_unlock(&xdp_ring->tx_lock); } } + +/** + * ice_xdp_rx_hw_ts - HW timestamp XDP hint handler + * @ctx: XDP buff pointer + * @ts_ns: destination address + * + * Copy HW timestamp (if available) to the destination address. + */ +static int ice_xdp_rx_hw_ts(const struct xdp_md *ctx, u64 *ts_ns) +{ + const struct ice_xdp_buff *xdp_ext = (void *)ctx; + + *ts_ns = ice_ptp_get_rx_hwts(xdp_ext->eop_desc, + xdp_ext->pkt_ctx); + if (!*ts_ns) + return -ENODATA; + + return 0; +} + +/* Define a ptype index -> XDP hash type lookup table. + * It uses the same ptype definitions as ice_decode_rx_desc_ptype[], + * avoiding possible copy-paste errors. + */ +#undef ICE_PTT +#undef ICE_PTT_UNUSED_ENTRY + +#define ICE_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\ + [PTYPE] = XDP_RSS_L3_##OUTER_IP_VER | XDP_RSS_L4_##I | XDP_RSS_TYPE_##PL + +#define ICE_PTT_UNUSED_ENTRY(PTYPE) [PTYPE] = 0 + +/* A few supplementary definitions for when XDP hash types do not coincide + * with what can be generated from ptype definitions + * by means of preprocessor concatenation. + */ +#define XDP_RSS_L3_NONE XDP_RSS_TYPE_NONE +#define XDP_RSS_L4_NONE XDP_RSS_TYPE_NONE +#define XDP_RSS_TYPE_PAY2 XDP_RSS_TYPE_L2 +#define XDP_RSS_TYPE_PAY3 XDP_RSS_TYPE_NONE +#define XDP_RSS_TYPE_PAY4 XDP_RSS_L4 + +static const enum xdp_rss_hash_type +ice_ptype_to_xdp_hash[ICE_NUM_DEFINED_PTYPES] = { + ICE_PTYPES +}; + +#undef XDP_RSS_L3_NONE +#undef XDP_RSS_L4_NONE +#undef XDP_RSS_TYPE_PAY2 +#undef XDP_RSS_TYPE_PAY3 +#undef XDP_RSS_TYPE_PAY4 + +#undef ICE_PTT +#undef ICE_PTT_UNUSED_ENTRY + +/** + * ice_xdp_rx_hash_type - Get XDP-specific hash type from the RX descriptor + * @eop_desc: End of Packet descriptor + */ +static enum xdp_rss_hash_type +ice_xdp_rx_hash_type(const union ice_32b_rx_flex_desc *eop_desc) +{ + u16 ptype = ice_get_ptype(eop_desc); + + if (unlikely(ptype >= ICE_NUM_DEFINED_PTYPES)) + return 0; + + return ice_ptype_to_xdp_hash[ptype]; +} + +/** + * ice_xdp_rx_hash - RX hash XDP hint handler + * @ctx: XDP buff pointer + * @hash: hash destination address + * @rss_type: XDP hash type destination address + * + * Copy RX hash (if available) and its type to the destination address. + */ +static int ice_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash, + enum xdp_rss_hash_type *rss_type) +{ + const struct ice_xdp_buff *xdp_ext = (void *)ctx; + + *hash = ice_get_rx_hash(xdp_ext->eop_desc); + *rss_type = ice_xdp_rx_hash_type(xdp_ext->eop_desc); + if (!likely(*hash)) + return -ENODATA; + + return 0; +} + +/** + * ice_xdp_rx_vlan_tag - VLAN tag XDP hint handler + * @ctx: XDP buff pointer + * @vlan_proto: destination address for VLAN protocol + * @vlan_tci: destination address for VLAN TCI + * + * Copy VLAN tag (if was stripped) and corresponding protocol + * to the destination address. + */ +static int ice_xdp_rx_vlan_tag(const struct xdp_md *ctx, __be16 *vlan_proto, + u16 *vlan_tci) +{ + const struct ice_xdp_buff *xdp_ext = (void *)ctx; + + *vlan_proto = xdp_ext->pkt_ctx->vlan_proto; + if (!*vlan_proto) + return -ENODATA; + + *vlan_tci = ice_get_vlan_tci(xdp_ext->eop_desc); + if (!*vlan_tci) + return -ENODATA; + + return 0; +} + +const struct xdp_metadata_ops ice_xdp_md_ops = { + .xmo_rx_timestamp = ice_xdp_rx_hw_ts, + .xmo_rx_hash = ice_xdp_rx_hash, + .xmo_rx_vlan_tag = ice_xdp_rx_vlan_tag, +}; diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h index b0e56675f9..afcead4bae 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h @@ -97,7 +97,7 @@ ice_build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag) } /** - * ice_get_vlan_tag_from_rx_desc - get VLAN from Rx flex descriptor + * ice_get_vlan_tci - get VLAN TCI from Rx flex descriptor * @rx_desc: Rx 32b flex descriptor with RXDID=2 * * The OS and current PF implementation only support stripping a single VLAN tag @@ -105,7 +105,7 @@ ice_build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag) * one is found return the tag, else return 0 to mean no VLAN tag was found. */ static inline u16 -ice_get_vlan_tag_from_rx_desc(union ice_32b_rx_flex_desc *rx_desc) +ice_get_vlan_tci(const union ice_32b_rx_flex_desc *rx_desc) { u16 stat_err_bits; @@ -161,7 +161,17 @@ void ice_release_rx_desc(struct ice_rx_ring *rx_ring, u16 val); void ice_process_skb_fields(struct ice_rx_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc, - struct sk_buff *skb, u16 ptype); + struct sk_buff *skb); void -ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag); +ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tci); + +static inline void +ice_xdp_meta_set_desc(struct xdp_buff *xdp, + union ice_32b_rx_flex_desc *eop_desc) +{ + struct ice_xdp_buff *xdp_ext = container_of(xdp, struct ice_xdp_buff, + xdp_buff); + + xdp_ext->eop_desc = eop_desc; +} #endif /* !_ICE_TXRX_LIB_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index a18ca0ff87..a508e917ce 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h @@ -17,6 +17,7 @@ #include "ice_protocol_type.h" #include "ice_sbq_cmd.h" #include "ice_vlan_mode.h" +#include "ice_fwlog.h" static inline bool ice_is_tc_ena(unsigned long bitmap, u8 tc) { @@ -246,6 +247,7 @@ struct ice_fd_hw_prof { int cnt; u64 entry_h[ICE_MAX_FDIR_VSI_PER_FILTER][ICE_FD_HW_SEG_MAX]; u16 vsi_h[ICE_MAX_FDIR_VSI_PER_FILTER]; + u64 prof_id[ICE_FD_HW_SEG_MAX]; }; /* Common HW capabilities for SW use */ @@ -351,6 +353,7 @@ struct ice_ts_func_info { #define ICE_TS_TMR0_ENA_M BIT(25) #define ICE_TS_TMR1_ENA_M BIT(26) #define ICE_TS_LL_TX_TS_READ_M BIT(28) +#define ICE_TS_LL_TX_TS_INT_READ_M BIT(29) struct ice_ts_dev_info { /* Device specific info */ @@ -364,6 +367,7 @@ struct ice_ts_dev_info { u8 tmr0_ena; u8 tmr1_ena; u8 ts_ll_read; + u8 ts_ll_int_read; }; /* Function specific capabilities */ @@ -377,6 +381,8 @@ struct ice_hw_func_caps { struct ice_ts_func_info ts_func_info; }; +#define ICE_SENSOR_SUPPORT_E810_INT_TEMP_BIT 0 + /* Device wide capabilities */ struct ice_hw_dev_caps { struct ice_hw_common_caps common_cap; @@ -385,6 +391,11 @@ struct ice_hw_dev_caps { u32 num_flow_director_fltr; /* Number of FD filters available */ struct ice_ts_dev_info ts_dev_info; u32 num_funcs; + /* bitmap of supported sensors + * bit 0 - internal temperature sensor + * bit 31:1 - Reserved + */ + u32 supported_sensors; }; /* MAC info */ @@ -730,24 +741,6 @@ struct ice_switch_info { DECLARE_BITMAP(prof_res_bm[ICE_MAX_NUM_PROFILES], ICE_MAX_FV_WORDS); }; -/* FW logging configuration */ -struct ice_fw_log_evnt { - u8 cfg : 4; /* New event enables to configure */ - u8 cur : 4; /* Current/active event enables */ -}; - -struct ice_fw_log_cfg { - u8 cq_en : 1; /* FW logging is enabled via the control queue */ - u8 uart_en : 1; /* FW logging is enabled via UART for all PFs */ - u8 actv_evnts; /* Cumulation of currently enabled log events */ - -#define ICE_FW_LOG_EVNT_INFO (ICE_AQC_FW_LOG_INFO_EN >> ICE_AQC_FW_LOG_EN_S) -#define ICE_FW_LOG_EVNT_INIT (ICE_AQC_FW_LOG_INIT_EN >> ICE_AQC_FW_LOG_EN_S) -#define ICE_FW_LOG_EVNT_FLOW (ICE_AQC_FW_LOG_FLOW_EN >> ICE_AQC_FW_LOG_EN_S) -#define ICE_FW_LOG_EVNT_ERR (ICE_AQC_FW_LOG_ERR_EN >> ICE_AQC_FW_LOG_EN_S) - struct ice_fw_log_evnt evnts[ICE_AQC_FW_LOG_ID_MAX]; -}; - /* Enum defining the different states of the mailbox snapshot in the * PF-VF mailbox overflow detection algorithm. The snapshot can be in * states: @@ -827,7 +820,7 @@ struct ice_mbx_data { enum ice_phy_model { ICE_PHY_UNSUP = -1, ICE_PHY_E810 = 1, - ICE_PHY_E822, + ICE_PHY_E82X, }; /* Port hardware description */ @@ -889,7 +882,9 @@ struct ice_hw { u8 fw_patch; /* firmware patch version */ u32 fw_build; /* firmware build number */ - struct ice_fw_log_cfg fw_log; + struct ice_fwlog_cfg fwlog_cfg; + bool fwlog_supported; /* does hardware support FW logging? */ + struct ice_fwlog_ring fwlog_ring; /* Device max aggregate bandwidths corresponding to the GL_PWR_MODE_CTL * register. Used for determining the ITR/INTRL granularity during @@ -910,10 +905,9 @@ struct ice_hw { /* INTRL granularity in 1 us */ u8 intrl_gran; -#define ICE_PHY_PER_NAC_E822 1 #define ICE_MAX_QUAD 2 -#define ICE_QUADS_PER_PHY_E822 2 -#define ICE_PORTS_PER_PHY_E822 8 +#define ICE_QUADS_PER_PHY_E82X 2 +#define ICE_PORTS_PER_PHY_E82X 8 #define ICE_PORTS_PER_QUAD 4 #define ICE_PORTS_PER_PHY_E810 4 #define ICE_NUM_EXTERNAL_PORTS (ICE_MAX_QUAD * ICE_PORTS_PER_QUAD) @@ -1009,7 +1003,6 @@ struct ice_hw_port_stats { u64 error_bytes; /* errbc */ u64 mac_local_faults; /* mlfc */ u64 mac_remote_faults; /* mrfc */ - u64 rx_len_errors; /* rlec */ u64 link_xon_rx; /* lxonrxc */ u64 link_xoff_rx; /* lxoffrxc */ u64 link_xon_tx; /* lxontxc */ @@ -1048,6 +1041,7 @@ enum ice_sw_fwd_act_type { ICE_FWD_TO_Q, ICE_FWD_TO_QGRP, ICE_DROP_PACKET, + ICE_MIRROR_PACKET, ICE_NOP, ICE_INVAL_ACT }; @@ -1078,7 +1072,7 @@ struct ice_aq_get_set_rss_lut_params { #define ICE_OROM_VER_BUILD_SHIFT 8 #define ICE_OROM_VER_BUILD_MASK (0xffff << ICE_OROM_VER_BUILD_SHIFT) #define ICE_OROM_VER_SHIFT 24 -#define ICE_OROM_VER_MASK (0xff << ICE_OROM_VER_SHIFT) +#define ICE_OROM_VER_MASK (0xffU << ICE_OROM_VER_SHIFT) #define ICE_SR_PFA_PTR 0x40 #define ICE_SR_1ST_NVM_BANK_PTR 0x42 #define ICE_SR_NVM_BANK_SIZE 0x43 diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c index 88e3cd09f8..15ade19de5 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c @@ -775,6 +775,7 @@ void ice_reset_all_vfs(struct ice_pf *pf) ice_for_each_vf(pf, bkt, vf) { mutex_lock(&vf->cfg_lock); + ice_eswitch_detach(pf, vf); vf->driver_caps = 0; ice_vc_set_default_allowlist(vf); @@ -790,13 +791,11 @@ void ice_reset_all_vfs(struct ice_pf *pf) ice_vf_rebuild_vsi(vf); ice_vf_post_vsi_rebuild(vf); + ice_eswitch_attach(pf, vf); + mutex_unlock(&vf->cfg_lock); } - if (ice_is_eswitch_mode_switchdev(pf)) - if (ice_eswitch_rebuild(pf)) - dev_warn(dev, "eswitch rebuild failed\n"); - ice_flush(hw); clear_bit(ICE_VF_DIS, pf->state); @@ -864,6 +863,11 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags) return 0; } + if (flags & ICE_VF_RESET_LOCK) + mutex_lock(&vf->cfg_lock); + else + lockdep_assert_held(&vf->cfg_lock); + lag = pf->lag; mutex_lock(&pf->lag_mutex); if (lag && lag->bonded && lag->primary) { @@ -875,11 +879,6 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags) act_prt = ICE_LAG_INVALID_PORT; } - if (flags & ICE_VF_RESET_LOCK) - mutex_lock(&vf->cfg_lock); - else - lockdep_assert_held(&vf->cfg_lock); - if (ice_is_vf_disabled(vf)) { vsi = ice_get_vf_vsi(vf); if (!vsi) { @@ -958,20 +957,20 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags) goto out_unlock; } - ice_eswitch_update_repr(vsi); + ice_eswitch_update_repr(vf->repr_id, vsi); /* if the VF has been reset allow it to come up again */ ice_mbx_clear_malvf(&vf->mbx_info); out_unlock: - if (flags & ICE_VF_RESET_LOCK) - mutex_unlock(&vf->cfg_lock); - if (lag && lag->bonded && lag->primary && act_prt != ICE_LAG_INVALID_PORT) ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt); mutex_unlock(&pf->lag_mutex); + if (flags & ICE_VF_RESET_LOCK) + mutex_unlock(&vf->cfg_lock); + return err; } diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.h b/drivers/net/ethernet/intel/ice/ice_vf_lib.h index 6b41e0f3d3..0cc9034065 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.h @@ -129,7 +129,7 @@ struct ice_vf { struct ice_mdd_vf_events mdd_tx_events; DECLARE_BITMAP(opcodes_allowlist, VIRTCHNL_OP_MAX); - struct ice_repr *repr; + unsigned long repr_id; const struct ice_virtchnl_ops *virtchnl_ops; const struct ice_vf_ops *vf_ops; diff --git a/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c b/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c index 80dc4bcdd3..b3e1bdcb80 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c +++ b/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c @@ -26,24 +26,22 @@ static void ice_port_vlan_on(struct ice_vsi *vsi) struct ice_vsi_vlan_ops *vlan_ops; struct ice_pf *pf = vsi->back; - if (ice_is_dvm_ena(&pf->hw)) { - vlan_ops = &vsi->outer_vlan_ops; - - /* setup outer VLAN ops */ - vlan_ops->set_port_vlan = ice_vsi_set_outer_port_vlan; - vlan_ops->clear_port_vlan = ice_vsi_clear_outer_port_vlan; + /* setup inner VLAN ops */ + vlan_ops = &vsi->inner_vlan_ops; - /* setup inner VLAN ops */ - vlan_ops = &vsi->inner_vlan_ops; + if (ice_is_dvm_ena(&pf->hw)) { vlan_ops->add_vlan = noop_vlan_arg; vlan_ops->del_vlan = noop_vlan_arg; vlan_ops->ena_stripping = ice_vsi_ena_inner_stripping; vlan_ops->dis_stripping = ice_vsi_dis_inner_stripping; vlan_ops->ena_insertion = ice_vsi_ena_inner_insertion; vlan_ops->dis_insertion = ice_vsi_dis_inner_insertion; - } else { - vlan_ops = &vsi->inner_vlan_ops; + /* setup outer VLAN ops */ + vlan_ops = &vsi->outer_vlan_ops; + vlan_ops->set_port_vlan = ice_vsi_set_outer_port_vlan; + vlan_ops->clear_port_vlan = ice_vsi_clear_outer_port_vlan; + } else { vlan_ops->set_port_vlan = ice_vsi_set_inner_port_vlan; vlan_ops->clear_port_vlan = ice_vsi_clear_inner_port_vlan; } diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c index d6348f2082..7b550d7d96 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c @@ -499,7 +499,7 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg) vfres->rss_lut_size = ICE_LUT_VSI_SIZE; vfres->max_mtu = ice_vc_get_max_frame_size(vf); - vfres->vsi_res[0].vsi_id = vf->lan_vsi_num; + vfres->vsi_res[0].vsi_id = ICE_VF_VSI_ID; vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV; vfres->vsi_res[0].num_queue_pairs = vsi->num_txq; ether_addr_copy(vfres->vsi_res[0].default_mac_addr, @@ -545,12 +545,7 @@ static void ice_vc_reset_vf_msg(struct ice_vf *vf) */ bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id) { - struct ice_pf *pf = vf->pf; - struct ice_vsi *vsi; - - vsi = ice_find_vsi(pf, vsi_id); - - return (vsi && (vsi->vf == vf)); + return vsi_id == ICE_VF_VSI_ID; } /** @@ -682,9 +677,7 @@ out: * a specific virtchnl RSS cfg * @hw: pointer to the hardware * @rss_cfg: pointer to the virtchnl RSS cfg - * @addl_hdrs: pointer to the protocol header fields (ICE_FLOW_SEG_HDR_*) - * to configure - * @hash_flds: pointer to the hash bit fields (ICE_FLOW_HASH_*) to configure + * @hash_cfg: pointer to the HW hash configuration * * Return true if all the protocol header and hash fields in the RSS cfg could * be parsed, else return false @@ -692,13 +685,23 @@ out: * This function parses the virtchnl RSS cfg to be the intended * hash fields and the intended header for RSS configuration */ -static bool -ice_vc_parse_rss_cfg(struct ice_hw *hw, struct virtchnl_rss_cfg *rss_cfg, - u32 *addl_hdrs, u64 *hash_flds) +static bool ice_vc_parse_rss_cfg(struct ice_hw *hw, + struct virtchnl_rss_cfg *rss_cfg, + struct ice_rss_hash_cfg *hash_cfg) { const struct ice_vc_hash_field_match_type *hf_list; const struct ice_vc_hdr_match_type *hdr_list; int i, hf_list_len, hdr_list_len; + u32 *addl_hdrs = &hash_cfg->addl_hdrs; + u64 *hash_flds = &hash_cfg->hash_flds; + + /* set outer layer RSS as default */ + hash_cfg->hdr_type = ICE_RSS_OUTER_HEADERS; + + if (rss_cfg->rss_algorithm == VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC) + hash_cfg->symm = true; + else + hash_cfg->symm = false; hf_list = ice_vc_hash_field_list; hf_list_len = ARRAY_SIZE(ice_vc_hash_field_list); @@ -849,18 +852,24 @@ static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add) kfree(ctx); } else { - u32 addl_hdrs = ICE_FLOW_SEG_HDR_NONE; - u64 hash_flds = ICE_HASH_INVALID; + struct ice_rss_hash_cfg cfg; + + /* Only check for none raw pattern case */ + if (!ice_vc_validate_pattern(vf, &rss_cfg->proto_hdrs)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + cfg.addl_hdrs = ICE_FLOW_SEG_HDR_NONE; + cfg.hash_flds = ICE_HASH_INVALID; + cfg.hdr_type = ICE_RSS_ANY_HEADERS; - if (!ice_vc_parse_rss_cfg(hw, rss_cfg, &addl_hdrs, - &hash_flds)) { + if (!ice_vc_parse_rss_cfg(hw, rss_cfg, &cfg)) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } if (add) { - if (ice_add_rss_cfg(hw, vsi->idx, hash_flds, - addl_hdrs)) { + if (ice_add_rss_cfg(hw, vsi, &cfg)) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; dev_err(dev, "ice_add_rss_cfg failed for vsi = %d, v_ret = %d\n", vsi->vsi_num, v_ret); @@ -868,8 +877,7 @@ static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add) } else { int status; - status = ice_rem_rss_cfg(hw, vsi->idx, hash_flds, - addl_hdrs); + status = ice_rem_rss_cfg(hw, vsi->idx, &cfg); /* We just ignore -ENOENT, because if two configurations * share the same profile remove one of them actually * removes both, since the profile is deleted. @@ -980,6 +988,51 @@ error_param: } /** + * ice_vc_config_rss_hfunc + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * + * Configure the VF's RSS Hash function + */ +static int ice_vc_config_rss_hfunc(struct ice_vf *vf, u8 *msg) +{ + struct virtchnl_rss_hfunc *vrh = (struct virtchnl_rss_hfunc *)msg; + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; + u8 hfunc = ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ; + struct ice_vsi *vsi; + + if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + + if (!ice_vc_isvalid_vsi_id(vf, vrh->vsi_id)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + + if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + + vsi = ice_get_vf_vsi(vf); + if (!vsi) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + + if (vrh->rss_algorithm == VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC) + hfunc = ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ; + + if (ice_set_rss_hfunc(vsi, hfunc)) + v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR; +error_param: + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_HFUNC, v_ret, + NULL, 0); +} + +/** * ice_vc_cfg_promiscuous_mode_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer @@ -2625,7 +2678,7 @@ static int ice_vc_set_rss_hena(struct ice_vf *vf, u8 *msg) } if (vrh->hena) { - status = ice_add_avf_rss_cfg(&pf->hw, vsi->idx, vrh->hena); + status = ice_add_avf_rss_cfg(&pf->hw, vsi, vrh->hena); v_ret = ice_err_to_virt_err(status); } @@ -3037,7 +3090,7 @@ static struct ice_vlan ice_vc_to_vlan(struct virtchnl_vlan *vc_vlan) { struct ice_vlan vlan = { 0 }; - vlan.prio = (vc_vlan->tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; + vlan.prio = FIELD_GET(VLAN_PRIO_MASK, vc_vlan->tci); vlan.vid = vc_vlan->tci & VLAN_VID_MASK; vlan.tpid = vc_vlan->tpid; @@ -3746,6 +3799,7 @@ static const struct ice_virtchnl_ops ice_virtchnl_dflt_ops = { .cfg_irq_map_msg = ice_vc_cfg_irq_map_msg, .config_rss_key = ice_vc_config_rss_key, .config_rss_lut = ice_vc_config_rss_lut, + .config_rss_hfunc = ice_vc_config_rss_hfunc, .get_stats_msg = ice_vc_get_stats_msg, .cfg_promiscuous_mode_msg = ice_vc_cfg_promiscuous_mode_msg, .add_vlan_msg = ice_vc_add_vlan_msg, @@ -3875,6 +3929,7 @@ static const struct ice_virtchnl_ops ice_virtchnl_repr_ops = { .cfg_irq_map_msg = ice_vc_cfg_irq_map_msg, .config_rss_key = ice_vc_config_rss_key, .config_rss_lut = ice_vc_config_rss_lut, + .config_rss_hfunc = ice_vc_config_rss_hfunc, .get_stats_msg = ice_vc_get_stats_msg, .cfg_promiscuous_mode_msg = ice_vc_repr_cfg_promiscuous_mode, .add_vlan_msg = ice_vc_add_vlan_msg, @@ -4057,6 +4112,9 @@ error_handler: case VIRTCHNL_OP_CONFIG_RSS_LUT: err = ops->config_rss_lut(vf, msg); break; + case VIRTCHNL_OP_CONFIG_RSS_HFUNC: + err = ops->config_rss_hfunc(vf, msg); + break; case VIRTCHNL_OP_GET_STATS: err = ops->get_stats_msg(vf, msg); break; diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.h b/drivers/net/ethernet/intel/ice/ice_virtchnl.h index cd747718de..3a41158691 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl.h +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.h @@ -19,6 +19,15 @@ #define ICE_MAX_MACADDR_PER_VF 18 #define ICE_FLEX_DESC_RXDID_MAX_NUM 64 +/* VFs only get a single VSI. For ice hardware, the VF does not need to know + * its VSI index. However, the virtchnl interface requires a VSI number, + * mainly due to legacy hardware. + * + * Since the VF doesn't need this information, report a static value to the VF + * instead of leaking any information about the PF or hardware setup. + */ +#define ICE_VF_VSI_ID 1 + struct ice_virtchnl_ops { int (*get_ver_msg)(struct ice_vf *vf, u8 *msg); int (*get_vf_res_msg)(struct ice_vf *vf, u8 *msg); @@ -32,6 +41,7 @@ struct ice_virtchnl_ops { int (*cfg_irq_map_msg)(struct ice_vf *vf, u8 *msg); int (*config_rss_key)(struct ice_vf *vf, u8 *msg); int (*config_rss_lut)(struct ice_vf *vf, u8 *msg); + int (*config_rss_hfunc)(struct ice_vf *vf, u8 *msg); int (*get_stats_msg)(struct ice_vf *vf, u8 *msg); int (*cfg_promiscuous_mode_msg)(struct ice_vf *vf, u8 *msg); int (*add_vlan_msg)(struct ice_vf *vf, u8 *msg); diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c index 588b77f1a4..d796dbd2a4 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c @@ -66,6 +66,7 @@ static const u32 vlan_v2_allowlist_opcodes[] = { static const u32 rss_pf_allowlist_opcodes[] = { VIRTCHNL_OP_CONFIG_RSS_KEY, VIRTCHNL_OP_CONFIG_RSS_LUT, VIRTCHNL_OP_GET_RSS_HENA_CAPS, VIRTCHNL_OP_SET_RSS_HENA, + VIRTCHNL_OP_CONFIG_RSS_HFUNC, }; /* VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC */ diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c index 24b23b7ef0..f001553e1a 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c @@ -10,19 +10,6 @@ #define to_fltr_conf_from_desc(p) \ container_of(p, struct virtchnl_fdir_fltr_conf, input) -#define ICE_FLOW_PROF_TYPE_S 0 -#define ICE_FLOW_PROF_TYPE_M (0xFFFFFFFFULL << ICE_FLOW_PROF_TYPE_S) -#define ICE_FLOW_PROF_VSI_S 32 -#define ICE_FLOW_PROF_VSI_M (0xFFFFFFFFULL << ICE_FLOW_PROF_VSI_S) - -/* Flow profile ID format: - * [0:31] - flow type, flow + tun_offs - * [32:63] - VSI index - */ -#define ICE_FLOW_PROF_FD(vsi, flow, tun_offs) \ - ((u64)(((((flow) + (tun_offs)) & ICE_FLOW_PROF_TYPE_M)) | \ - (((u64)(vsi) << ICE_FLOW_PROF_VSI_S) & ICE_FLOW_PROF_VSI_M))) - #define GTPU_TEID_OFFSET 4 #define GTPU_EH_QFI_OFFSET 1 #define GTPU_EH_QFI_MASK 0x3F @@ -493,6 +480,7 @@ ice_vc_fdir_rem_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, int tun) return; vf_prof = fdir->fdir_prof[flow]; + prof_id = vf_prof->prof_id[tun]; vf_vsi = ice_get_vf_vsi(vf); if (!vf_vsi) { @@ -503,9 +491,6 @@ ice_vc_fdir_rem_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, int tun) if (!fdir->prof_entry_cnt[flow][tun]) return; - prof_id = ICE_FLOW_PROF_FD(vf_vsi->vsi_num, - flow, tun ? ICE_FLTR_PTYPE_MAX : 0); - for (i = 0; i < fdir->prof_entry_cnt[flow][tun]; i++) if (vf_prof->entry_h[i][tun]) { u16 vsi_num = ice_get_hw_vsi_num(hw, vf_prof->vsi_h[i]); @@ -647,7 +632,6 @@ ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, struct ice_hw *hw; u64 entry1_h = 0; u64 entry2_h = 0; - u64 prof_id; int ret; pf = vf->pf; @@ -681,18 +665,15 @@ ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, ice_vc_fdir_rem_prof(vf, flow, tun); } - prof_id = ICE_FLOW_PROF_FD(vf_vsi->vsi_num, flow, - tun ? ICE_FLTR_PTYPE_MAX : 0); - - ret = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg, - tun + 1, &prof); + ret = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, seg, + tun + 1, false, &prof); if (ret) { dev_dbg(dev, "Could not add VSI flow 0x%x for VF %d\n", flow, vf->vf_id); goto err_exit; } - ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx, + ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id, vf_vsi->idx, vf_vsi->idx, ICE_FLOW_PRIO_NORMAL, seg, &entry1_h); if (ret) { @@ -701,7 +682,7 @@ ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, goto err_prof; } - ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx, + ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id, vf_vsi->idx, ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL, seg, &entry2_h); if (ret) { @@ -725,14 +706,16 @@ ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, vf_prof->cnt++; fdir->prof_entry_cnt[flow][tun]++; + vf_prof->prof_id[tun] = prof->id; + return 0; err_entry_1: ice_rem_prof_id_flow(hw, ICE_BLK_FD, - ice_get_hw_vsi_num(hw, vf_vsi->idx), prof_id); + ice_get_hw_vsi_num(hw, vf_vsi->idx), prof->id); ice_flow_rem_entry(hw, ICE_BLK_FD, entry1_h); err_prof: - ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id); + ice_flow_rem_prof(hw, ICE_BLK_FD, prof->id); err_exit: return ret; } @@ -1480,16 +1463,15 @@ ice_vf_verify_rx_desc(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx, int ret; stat_err = le16_to_cpu(ctx->rx_desc.wb.status_error0); - if (((stat_err & ICE_FXD_FLTR_WB_QW1_DD_M) >> - ICE_FXD_FLTR_WB_QW1_DD_S) != ICE_FXD_FLTR_WB_QW1_DD_YES) { + if (FIELD_GET(ICE_FXD_FLTR_WB_QW1_DD_M, stat_err) != + ICE_FXD_FLTR_WB_QW1_DD_YES) { *status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; dev_err(dev, "VF %d: Desc Done not set\n", vf->vf_id); ret = -EINVAL; goto err_exit; } - prog_id = (stat_err & ICE_FXD_FLTR_WB_QW1_PROG_ID_M) >> - ICE_FXD_FLTR_WB_QW1_PROG_ID_S; + prog_id = FIELD_GET(ICE_FXD_FLTR_WB_QW1_PROG_ID_M, stat_err); if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_ADD && ctx->v_opcode != VIRTCHNL_OP_ADD_FDIR_FILTER) { dev_err(dev, "VF %d: Desc show add, but ctx not", @@ -1508,8 +1490,7 @@ ice_vf_verify_rx_desc(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx, goto err_exit; } - error = (stat_err & ICE_FXD_FLTR_WB_QW1_FAIL_M) >> - ICE_FXD_FLTR_WB_QW1_FAIL_S; + error = FIELD_GET(ICE_FXD_FLTR_WB_QW1_FAIL_M, stat_err); if (error == ICE_FXD_FLTR_WB_QW1_FAIL_YES) { if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_ADD) { dev_err(dev, "VF %d, Failed to add FDIR rule due to no space in the table", @@ -1524,8 +1505,7 @@ ice_vf_verify_rx_desc(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx, goto err_exit; } - error = (stat_err & ICE_FXD_FLTR_WB_QW1_FAIL_PROF_M) >> - ICE_FXD_FLTR_WB_QW1_FAIL_PROF_S; + error = FIELD_GET(ICE_FXD_FLTR_WB_QW1_FAIL_PROF_M, stat_err); if (error == ICE_FXD_FLTR_WB_QW1_FAIL_PROF_YES) { dev_err(dev, "VF %d: Profile matching error", vf->vf_id); *status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; diff --git a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c index 8307902115..2e9ad27cb9 100644 --- a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c @@ -487,10 +487,11 @@ int ice_vsi_ena_outer_stripping(struct ice_vsi *vsi, u16 tpid) ctxt->info.outer_vlan_flags = vsi->info.outer_vlan_flags & ~(ICE_AQ_VSI_OUTER_VLAN_EMODE_M | ICE_AQ_VSI_OUTER_TAG_TYPE_M); ctxt->info.outer_vlan_flags |= - ((ICE_AQ_VSI_OUTER_VLAN_EMODE_SHOW_BOTH << - ICE_AQ_VSI_OUTER_VLAN_EMODE_S) | - ((tag_type << ICE_AQ_VSI_OUTER_TAG_TYPE_S) & - ICE_AQ_VSI_OUTER_TAG_TYPE_M)); + /* we want EMODE_SHOW_BOTH, but that value is zero, so the line + * above clears it well enough that we don't need to try to set + * zero here, so just do the tag type + */ + FIELD_PREP(ICE_AQ_VSI_OUTER_TAG_TYPE_M, tag_type); err = ice_update_vsi(hw, vsi->idx, ctxt, NULL); if (err) @@ -595,11 +596,9 @@ int ice_vsi_ena_outer_insertion(struct ice_vsi *vsi, u16 tpid) ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M | ICE_AQ_VSI_OUTER_TAG_TYPE_M); ctxt->info.outer_vlan_flags |= - ((ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL << - ICE_AQ_VSI_OUTER_VLAN_TX_MODE_S) & - ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M) | - ((tag_type << ICE_AQ_VSI_OUTER_TAG_TYPE_S) & - ICE_AQ_VSI_OUTER_TAG_TYPE_M); + FIELD_PREP(ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M, + ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL) | + FIELD_PREP(ICE_AQ_VSI_OUTER_TAG_TYPE_M, tag_type); err = ice_update_vsi(hw, vsi->idx, ctxt, NULL); if (err) @@ -648,9 +647,8 @@ int ice_vsi_dis_outer_insertion(struct ice_vsi *vsi) ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M); ctxt->info.outer_vlan_flags |= ICE_AQ_VSI_OUTER_VLAN_BLOCK_TX_DESC | - ((ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL << - ICE_AQ_VSI_OUTER_VLAN_TX_MODE_S) & - ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M); + FIELD_PREP(ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M, + ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL); err = ice_update_vsi(hw, vsi->idx, ctxt, NULL); if (err) @@ -708,8 +706,7 @@ __ice_vsi_set_outer_port_vlan(struct ice_vsi *vsi, u16 vlan_info, u16 tpid) ctxt->info.outer_vlan_flags = (ICE_AQ_VSI_OUTER_VLAN_EMODE_SHOW << ICE_AQ_VSI_OUTER_VLAN_EMODE_S) | - ((tag_type << ICE_AQ_VSI_OUTER_TAG_TYPE_S) & - ICE_AQ_VSI_OUTER_TAG_TYPE_M) | + FIELD_PREP(ICE_AQ_VSI_OUTER_TAG_TYPE_M, tag_type) | ICE_AQ_VSI_OUTER_VLAN_BLOCK_TX_DESC | (ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ACCEPTUNTAGGED << ICE_AQ_VSI_OUTER_VLAN_TX_MODE_S) | diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c index 0fd5551b10..2eecd0f39a 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.c +++ b/drivers/net/ethernet/intel/ice/ice_xsk.c @@ -459,6 +459,11 @@ static u16 ice_fill_rx_descs(struct xsk_buff_pool *pool, struct xdp_buff **xdp, rx_desc->read.pkt_addr = cpu_to_le64(dma); rx_desc->wb.status_error0 = 0; + /* Put private info that changes on a per-packet basis + * into xdp_buff_xsk->cb. + */ + ice_xdp_meta_set_desc(*xdp, rx_desc); + rx_desc++; xdp++; } @@ -865,8 +870,7 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget) struct xdp_buff *xdp; struct sk_buff *skb; u16 stat_err_bits; - u16 vlan_tag = 0; - u16 rx_ptype; + u16 vlan_tci; rx_desc = ICE_RX_DESC(rx_ring, ntc); @@ -943,13 +947,10 @@ construct_skb: total_rx_bytes += skb->len; total_rx_packets++; - vlan_tag = ice_get_vlan_tag_from_rx_desc(rx_desc); - - rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) & - ICE_RX_FLEX_DESC_PTYPE_M; + vlan_tci = ice_get_vlan_tci(rx_desc); - ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype); - ice_receive_skb(rx_ring, skb, vlan_tag); + ice_process_skb_fields(rx_ring, rx_desc, skb); + ice_receive_skb(rx_ring, skb, vlan_tci); } rx_ring->next_to_clean = ntc; diff --git a/drivers/net/ethernet/intel/idpf/idpf.h b/drivers/net/ethernet/intel/idpf/idpf.h index bee73353b5..0acc125dec 100644 --- a/drivers/net/ethernet/intel/idpf/idpf.h +++ b/drivers/net/ethernet/intel/idpf/idpf.h @@ -15,7 +15,7 @@ struct idpf_vport_max_q; #include <linux/pci.h> #include <linux/bitfield.h> #include <linux/sctp.h> -#include <linux/ethtool.h> +#include <linux/ethtool_netlink.h> #include <net/gro.h> #include <linux/dim.h> @@ -418,11 +418,13 @@ struct idpf_vport { /** * enum idpf_user_flags + * @__IDPF_USER_FLAG_HSPLIT: header split state * @__IDPF_PROMISC_UC: Unicast promiscuous mode * @__IDPF_PROMISC_MC: Multicast promiscuous mode * @__IDPF_USER_FLAGS_NBITS: Must be last */ enum idpf_user_flags { + __IDPF_USER_FLAG_HSPLIT = 0U, __IDPF_PROMISC_UC = 32, __IDPF_PROMISC_MC, @@ -965,4 +967,7 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map); int idpf_send_set_sriov_vfs_msg(struct idpf_adapter *adapter, u16 num_vfs); int idpf_sriov_configure(struct pci_dev *pdev, int num_vfs); +u8 idpf_vport_get_hsplit(const struct idpf_vport *vport); +bool idpf_vport_set_hsplit(const struct idpf_vport *vport, u8 val); + #endif /* !_IDPF_H_ */ diff --git a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c index 52ea38669f..986d429d11 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c +++ b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c @@ -75,14 +75,12 @@ static u32 idpf_get_rxfh_indir_size(struct net_device *netdev) /** * idpf_get_rxfh - get the rx flow hash indirection table * @netdev: network interface device structure - * @indir: indirection table - * @key: hash key - * @hfunc: hash function in use + * @rxfh: pointer to param struct (indir, key, hfunc) * * Reads the indirection table directly from the hardware. Always returns 0. */ -static int idpf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, - u8 *hfunc) +static int idpf_get_rxfh(struct net_device *netdev, + struct ethtool_rxfh_param *rxfh) { struct idpf_netdev_priv *np = netdev_priv(netdev); struct idpf_rss_data *rss_data; @@ -103,15 +101,14 @@ static int idpf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, if (np->state != __IDPF_VPORT_UP) goto unlock_mutex; - if (hfunc) - *hfunc = ETH_RSS_HASH_TOP; + rxfh->hfunc = ETH_RSS_HASH_TOP; - if (key) - memcpy(key, rss_data->rss_key, rss_data->rss_key_size); + if (rxfh->key) + memcpy(rxfh->key, rss_data->rss_key, rss_data->rss_key_size); - if (indir) { + if (rxfh->indir) { for (i = 0; i < rss_data->rss_lut_size; i++) - indir[i] = rss_data->rss_lut[i]; + rxfh->indir[i] = rss_data->rss_lut[i]; } unlock_mutex: @@ -123,15 +120,15 @@ unlock_mutex: /** * idpf_set_rxfh - set the rx flow hash indirection table * @netdev: network interface device structure - * @indir: indirection table - * @key: hash key - * @hfunc: hash function to use + * @rxfh: pointer to param struct (indir, key, hfunc) + * @extack: extended ACK from the Netlink message * * Returns -EINVAL if the table specifies an invalid queue id, otherwise * returns 0 after programming the table. */ -static int idpf_set_rxfh(struct net_device *netdev, const u32 *indir, - const u8 *key, const u8 hfunc) +static int idpf_set_rxfh(struct net_device *netdev, + struct ethtool_rxfh_param *rxfh, + struct netlink_ext_ack *extack) { struct idpf_netdev_priv *np = netdev_priv(netdev); struct idpf_rss_data *rss_data; @@ -154,17 +151,18 @@ static int idpf_set_rxfh(struct net_device *netdev, const u32 *indir, if (np->state != __IDPF_VPORT_UP) goto unlock_mutex; - if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) { + if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE && + rxfh->hfunc != ETH_RSS_HASH_TOP) { err = -EOPNOTSUPP; goto unlock_mutex; } - if (key) - memcpy(rss_data->rss_key, key, rss_data->rss_key_size); + if (rxfh->key) + memcpy(rss_data->rss_key, rxfh->key, rss_data->rss_key_size); - if (indir) { + if (rxfh->indir) { for (lut = 0; lut < rss_data->rss_lut_size; lut++) - rss_data->rss_lut[lut] = indir[lut]; + rss_data->rss_lut[lut] = rxfh->indir[lut]; } err = idpf_config_rss(vport); @@ -320,6 +318,8 @@ static void idpf_get_ringparam(struct net_device *netdev, ring->rx_pending = vport->rxq_desc_count; ring->tx_pending = vport->txq_desc_count; + kring->tcp_data_split = idpf_vport_get_hsplit(vport); + idpf_vport_ctrl_unlock(netdev); } @@ -379,6 +379,14 @@ static int idpf_set_ringparam(struct net_device *netdev, new_rx_count == vport->rxq_desc_count) goto unlock_mutex; + if (!idpf_vport_set_hsplit(vport, kring->tcp_data_split)) { + NL_SET_ERR_MSG_MOD(ext_ack, + "setting TCP data split is not supported"); + err = -EOPNOTSUPP; + + goto unlock_mutex; + } + config_data = &vport->adapter->vport_config[idx]->user_config; config_data->num_req_txq_desc = new_tx_count; config_data->num_req_rxq_desc = new_rx_count; @@ -532,7 +540,7 @@ static void idpf_add_stat_strings(u8 **p, const struct idpf_stats *stats, unsigned int i; for (i = 0; i < size; i++) - ethtool_sprintf(p, "%s", stats[i].stat_string); + ethtool_puts(p, stats[i].stat_string); } /** @@ -1334,6 +1342,7 @@ static int idpf_get_link_ksettings(struct net_device *netdev, static const struct ethtool_ops idpf_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_USE_ADAPTIVE, + .supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT, .get_msglevel = idpf_get_msglevel, .set_msglevel = idpf_set_msglevel, .get_link = ethtool_op_get_link, diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c index 0241e498cc..58179bd733 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_lib.c +++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c @@ -1060,6 +1060,71 @@ static void idpf_vport_dealloc(struct idpf_vport *vport) } /** + * idpf_is_hsplit_supported - check whether the header split is supported + * @vport: virtual port to check the capability for + * + * Return: true if it's supported by the HW/FW, false if not. + */ +static bool idpf_is_hsplit_supported(const struct idpf_vport *vport) +{ + return idpf_is_queue_model_split(vport->rxq_model) && + idpf_is_cap_ena_all(vport->adapter, IDPF_HSPLIT_CAPS, + IDPF_CAP_HSPLIT); +} + +/** + * idpf_vport_get_hsplit - get the current header split feature state + * @vport: virtual port to query the state for + * + * Return: ``ETHTOOL_TCP_DATA_SPLIT_UNKNOWN`` if not supported, + * ``ETHTOOL_TCP_DATA_SPLIT_DISABLED`` if disabled, + * ``ETHTOOL_TCP_DATA_SPLIT_ENABLED`` if active. + */ +u8 idpf_vport_get_hsplit(const struct idpf_vport *vport) +{ + const struct idpf_vport_user_config_data *config; + + if (!idpf_is_hsplit_supported(vport)) + return ETHTOOL_TCP_DATA_SPLIT_UNKNOWN; + + config = &vport->adapter->vport_config[vport->idx]->user_config; + + return test_bit(__IDPF_USER_FLAG_HSPLIT, config->user_flags) ? + ETHTOOL_TCP_DATA_SPLIT_ENABLED : + ETHTOOL_TCP_DATA_SPLIT_DISABLED; +} + +/** + * idpf_vport_set_hsplit - enable or disable header split on a given vport + * @vport: virtual port to configure + * @val: Ethtool flag controlling the header split state + * + * Return: true on success, false if not supported by the HW. + */ +bool idpf_vport_set_hsplit(const struct idpf_vport *vport, u8 val) +{ + struct idpf_vport_user_config_data *config; + + if (!idpf_is_hsplit_supported(vport)) + return val == ETHTOOL_TCP_DATA_SPLIT_UNKNOWN; + + config = &vport->adapter->vport_config[vport->idx]->user_config; + + switch (val) { + case ETHTOOL_TCP_DATA_SPLIT_UNKNOWN: + /* Default is to enable */ + case ETHTOOL_TCP_DATA_SPLIT_ENABLED: + __set_bit(__IDPF_USER_FLAG_HSPLIT, config->user_flags); + return true; + case ETHTOOL_TCP_DATA_SPLIT_DISABLED: + __clear_bit(__IDPF_USER_FLAG_HSPLIT, config->user_flags); + return true; + default: + return false; + } +} + +/** * idpf_vport_alloc - Allocates the next available struct vport in the adapter * @adapter: board private structure * @max_q: vport max queue info diff --git a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c index 20c4b3a647..27b93592c4 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c +++ b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c @@ -328,10 +328,9 @@ static void idpf_tx_singleq_build_ctx_desc(struct idpf_queue *txq, if (offload->tso_segs) { qw1 |= IDPF_TX_CTX_DESC_TSO << IDPF_TXD_CTX_QW1_CMD_S; - qw1 |= ((u64)offload->tso_len << IDPF_TXD_CTX_QW1_TSO_LEN_S) & - IDPF_TXD_CTX_QW1_TSO_LEN_M; - qw1 |= ((u64)offload->mss << IDPF_TXD_CTX_QW1_MSS_S) & - IDPF_TXD_CTX_QW1_MSS_M; + qw1 |= FIELD_PREP(IDPF_TXD_CTX_QW1_TSO_LEN_M, + offload->tso_len); + qw1 |= FIELD_PREP(IDPF_TXD_CTX_QW1_MSS_M, offload->mss); u64_stats_update_begin(&txq->stats_sync); u64_stats_inc(&txq->q_stats.tx.lso_pkts); diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c index 9e942e5baf..017a081d85 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c @@ -505,9 +505,9 @@ static void idpf_rx_post_buf_refill(struct idpf_sw_queue *refillq, u16 buf_id) /* store the buffer ID and the SW maintained GEN bit to the refillq */ refillq->ring[nta] = - ((buf_id << IDPF_RX_BI_BUFID_S) & IDPF_RX_BI_BUFID_M) | - (!!(test_bit(__IDPF_Q_GEN_CHK, refillq->flags)) << - IDPF_RX_BI_GEN_S); + FIELD_PREP(IDPF_RX_BI_BUFID_M, buf_id) | + FIELD_PREP(IDPF_RX_BI_GEN_M, + test_bit(__IDPF_Q_GEN_CHK, refillq->flags)); if (unlikely(++nta == refillq->desc_count)) { nta = 0; @@ -1240,12 +1240,15 @@ static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq) struct idpf_adapter *adapter = vport->adapter; struct idpf_queue *q; int i, k, err = 0; + bool hs; vport->rxq_grps = kcalloc(vport->num_rxq_grp, sizeof(struct idpf_rxq_group), GFP_KERNEL); if (!vport->rxq_grps) return -ENOMEM; + hs = idpf_vport_get_hsplit(vport) == ETHTOOL_TCP_DATA_SPLIT_ENABLED; + for (i = 0; i < vport->num_rxq_grp; i++) { struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; int j; @@ -1298,9 +1301,8 @@ static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq) q->rx_buf_size = vport->bufq_size[j]; q->rx_buffer_low_watermark = IDPF_LOW_WATERMARK; q->rx_buf_stride = IDPF_RX_BUF_STRIDE; - if (idpf_is_cap_ena_all(adapter, IDPF_HSPLIT_CAPS, - IDPF_CAP_HSPLIT) && - idpf_is_queue_model_split(vport->rxq_model)) { + + if (hs) { q->rx_hsplit_en = true; q->rx_hbuf_size = IDPF_HDR_BUF_SIZE; } @@ -1344,9 +1346,7 @@ skip_splitq_rx_init: rx_qgrp->splitq.rxq_sets[j]->refillq1 = &rx_qgrp->splitq.bufq_sets[1].refillqs[j]; - if (idpf_is_cap_ena_all(adapter, IDPF_HSPLIT_CAPS, - IDPF_CAP_HSPLIT) && - idpf_is_queue_model_split(vport->rxq_model)) { + if (hs) { q->rx_hsplit_en = true; q->rx_hbuf_size = IDPF_HDR_BUF_SIZE; } @@ -1825,14 +1825,14 @@ static bool idpf_tx_clean_complq(struct idpf_queue *complq, int budget, u16 gen; /* if the descriptor isn't done, no work yet to do */ - gen = (le16_to_cpu(tx_desc->qid_comptype_gen) & - IDPF_TXD_COMPLQ_GEN_M) >> IDPF_TXD_COMPLQ_GEN_S; + gen = le16_get_bits(tx_desc->qid_comptype_gen, + IDPF_TXD_COMPLQ_GEN_M); if (test_bit(__IDPF_Q_GEN_CHK, complq->flags) != gen) break; /* Find necessary info of TX queue to clean buffers */ - rel_tx_qid = (le16_to_cpu(tx_desc->qid_comptype_gen) & - IDPF_TXD_COMPLQ_QID_M) >> IDPF_TXD_COMPLQ_QID_S; + rel_tx_qid = le16_get_bits(tx_desc->qid_comptype_gen, + IDPF_TXD_COMPLQ_QID_M); if (rel_tx_qid >= complq->txq_grp->num_txq || !complq->txq_grp->txqs[rel_tx_qid]) { dev_err(&complq->vport->adapter->pdev->dev, @@ -1842,9 +1842,8 @@ static bool idpf_tx_clean_complq(struct idpf_queue *complq, int budget, tx_q = complq->txq_grp->txqs[rel_tx_qid]; /* Determine completion type */ - ctype = (le16_to_cpu(tx_desc->qid_comptype_gen) & - IDPF_TXD_COMPLQ_COMPL_TYPE_M) >> - IDPF_TXD_COMPLQ_COMPL_TYPE_S; + ctype = le16_get_bits(tx_desc->qid_comptype_gen, + IDPF_TXD_COMPLQ_COMPL_TYPE_M); switch (ctype) { case IDPF_TXD_COMPLT_RE: hw_head = le16_to_cpu(tx_desc->q_head_compl_tag.q_head); @@ -1945,11 +1944,10 @@ void idpf_tx_splitq_build_ctb(union idpf_tx_flex_desc *desc, u16 td_cmd, u16 size) { desc->q.qw1.cmd_dtype = - cpu_to_le16(params->dtype & IDPF_FLEX_TXD_QW1_DTYPE_M); + le16_encode_bits(params->dtype, IDPF_FLEX_TXD_QW1_DTYPE_M); desc->q.qw1.cmd_dtype |= - cpu_to_le16((td_cmd << IDPF_FLEX_TXD_QW1_CMD_S) & - IDPF_FLEX_TXD_QW1_CMD_M); - desc->q.qw1.buf_size = cpu_to_le16((u16)size); + le16_encode_bits(td_cmd, IDPF_FLEX_TXD_QW1_CMD_M); + desc->q.qw1.buf_size = cpu_to_le16(size); desc->q.qw1.l2tags.l2tag1 = cpu_to_le16(params->td_tag); } @@ -2843,8 +2841,9 @@ static void idpf_rx_splitq_extract_csum_bits(struct virtchnl2_rx_flex_desc_adv_n qword1); csum->ipv6exadd = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_IPV6EXADD_M, qword0); - csum->raw_csum_inv = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_RAW_CSUM_INV_M, - le16_to_cpu(rx_desc->ptype_err_fflags0)); + csum->raw_csum_inv = + le16_get_bits(rx_desc->ptype_err_fflags0, + VIRTCHNL2_RX_FLEX_DESC_ADV_RAW_CSUM_INV_M); csum->raw_csum = le16_to_cpu(rx_desc->misc.raw_cs); } @@ -2938,8 +2937,10 @@ static int idpf_rx_process_skb_fields(struct idpf_queue *rxq, struct idpf_rx_ptype_decoded decoded; u16 rx_ptype; - rx_ptype = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_M, - le16_to_cpu(rx_desc->ptype_err_fflags0)); + rx_ptype = le16_get_bits(rx_desc->ptype_err_fflags0, + VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_M); + + skb->protocol = eth_type_trans(skb, rxq->vport->netdev); decoded = rxq->vport->rx_ptype_lkup[rx_ptype]; /* If we don't know the ptype we can't do anything else with it. Just @@ -2951,10 +2952,8 @@ static int idpf_rx_process_skb_fields(struct idpf_queue *rxq, /* process RSS/hash */ idpf_rx_hash(rxq, skb, rx_desc, &decoded); - skb->protocol = eth_type_trans(skb, rxq->vport->netdev); - - if (FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_RSC_M, - le16_to_cpu(rx_desc->hdrlen_flags))) + if (le16_get_bits(rx_desc->hdrlen_flags, + VIRTCHNL2_RX_FLEX_DESC_ADV_RSC_M)) return idpf_rx_rsc(rxq, skb, rx_desc, &decoded); idpf_rx_splitq_extract_csum_bits(rx_desc, &csum_bits); @@ -3148,8 +3147,8 @@ static int idpf_rx_splitq_clean(struct idpf_queue *rxq, int budget) dma_rmb(); /* if the descriptor isn't done, no work yet to do */ - gen_id = le16_to_cpu(rx_desc->pktlen_gen_bufq_id); - gen_id = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_M, gen_id); + gen_id = le16_get_bits(rx_desc->pktlen_gen_bufq_id, + VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_M); if (test_bit(__IDPF_Q_GEN_CHK, rxq->flags) != gen_id) break; @@ -3164,9 +3163,8 @@ static int idpf_rx_splitq_clean(struct idpf_queue *rxq, int budget) continue; } - pkt_len = le16_to_cpu(rx_desc->pktlen_gen_bufq_id); - pkt_len = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_PBUF_M, - pkt_len); + pkt_len = le16_get_bits(rx_desc->pktlen_gen_bufq_id, + VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_PBUF_M); hbo = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_HBO_M, rx_desc->status_err0_qw1); @@ -3183,14 +3181,12 @@ static int idpf_rx_splitq_clean(struct idpf_queue *rxq, int budget) goto bypass_hsplit; } - hdr_len = le16_to_cpu(rx_desc->hdrlen_flags); - hdr_len = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_HDR_M, - hdr_len); + hdr_len = le16_get_bits(rx_desc->hdrlen_flags, + VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_HDR_M); bypass_hsplit: - bufq_id = le16_to_cpu(rx_desc->pktlen_gen_bufq_id); - bufq_id = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_BUFQ_ID_M, - bufq_id); + bufq_id = le16_get_bits(rx_desc->pktlen_gen_bufq_id, + VIRTCHNL2_RX_FLEX_DESC_ADV_BUFQ_ID_M); rxq_set = container_of(rxq, struct idpf_rxq_set, rxq); if (!bufq_id) diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c index b0c52f1784..390977a76d 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c +++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c @@ -3287,6 +3287,8 @@ void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q) memcpy(vport->rx_itr_profile, rx_itr, IDPF_DIM_PROFILE_SLOTS); memcpy(vport->tx_itr_profile, tx_itr, IDPF_DIM_PROFILE_SLOTS); + idpf_vport_set_hsplit(vport, ETHTOOL_TCP_DATA_SPLIT_ENABLED); + idpf_vport_init_num_qs(vport, vport_msg); idpf_vport_calc_num_q_desc(vport); idpf_vport_calc_num_q_groups(vport); diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c index 8d6e44ee18..64dfc362d1 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.c +++ b/drivers/net/ethernet/intel/igb/e1000_82575.c @@ -222,8 +222,7 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw) } /* set lan id */ - hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >> - E1000_STATUS_FUNC_SHIFT; + hw->bus.func = FIELD_GET(E1000_STATUS_FUNC_MASK, rd32(E1000_STATUS)); /* Set phy->phy_addr and phy->id. */ ret_val = igb_get_phy_id_82575(hw); @@ -262,8 +261,8 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw) if (ret_val) goto out; - data = (data & E1000_M88E1112_MAC_CTRL_1_MODE_MASK) >> - E1000_M88E1112_MAC_CTRL_1_MODE_SHIFT; + data = FIELD_GET(E1000_M88E1112_MAC_CTRL_1_MODE_MASK, + data); if (data == E1000_M88E1112_AUTO_COPPER_SGMII || data == E1000_M88E1112_AUTO_COPPER_BASEX) hw->mac.ops.check_for_link = @@ -330,8 +329,7 @@ static s32 igb_init_nvm_params_82575(struct e1000_hw *hw) u32 eecd = rd32(E1000_EECD); u16 size; - size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> - E1000_EECD_SIZE_EX_SHIFT); + size = FIELD_GET(E1000_EECD_SIZE_EX_MASK, eecd); /* Added to a constant, "size" becomes the left-shift value * for setting word_size. @@ -2798,7 +2796,7 @@ static s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw) return 0; hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg); - if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT) + if (FIELD_GET(NVM_ETS_TYPE_MASK, ets_cfg) != NVM_ETS_TYPE_EMC) return E1000_NOT_IMPLEMENTED; @@ -2808,10 +2806,8 @@ static s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw) for (i = 1; i < num_sensors; i++) { hw->nvm.ops.read(hw, (ets_offset + i), 1, &ets_sensor); - sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >> - NVM_ETS_DATA_INDEX_SHIFT); - sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >> - NVM_ETS_DATA_LOC_SHIFT); + sensor_index = FIELD_GET(NVM_ETS_DATA_INDEX_MASK, ets_sensor); + sensor_location = FIELD_GET(NVM_ETS_DATA_LOC_MASK, ets_sensor); if (sensor_location != 0) hw->phy.ops.read_i2c_byte(hw, @@ -2859,20 +2855,17 @@ static s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw) return 0; hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg); - if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT) + if (FIELD_GET(NVM_ETS_TYPE_MASK, ets_cfg) != NVM_ETS_TYPE_EMC) return E1000_NOT_IMPLEMENTED; - low_thresh_delta = ((ets_cfg & NVM_ETS_LTHRES_DELTA_MASK) >> - NVM_ETS_LTHRES_DELTA_SHIFT); + low_thresh_delta = FIELD_GET(NVM_ETS_LTHRES_DELTA_MASK, ets_cfg); num_sensors = (ets_cfg & NVM_ETS_NUM_SENSORS_MASK); for (i = 1; i <= num_sensors; i++) { hw->nvm.ops.read(hw, (ets_offset + i), 1, &ets_sensor); - sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >> - NVM_ETS_DATA_INDEX_SHIFT); - sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >> - NVM_ETS_DATA_LOC_SHIFT); + sensor_index = FIELD_GET(NVM_ETS_DATA_INDEX_MASK, ets_sensor); + sensor_location = FIELD_GET(NVM_ETS_DATA_LOC_MASK, ets_sensor); therm_limit = ets_sensor & NVM_ETS_DATA_HTHRESH_MASK; hw->phy.ops.write_i2c_byte(hw, diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c index 53b396fd19..503b239868 100644 --- a/drivers/net/ethernet/intel/igb/e1000_i210.c +++ b/drivers/net/ethernet/intel/igb/e1000_i210.c @@ -473,7 +473,7 @@ s32 igb_read_invm_version(struct e1000_hw *hw, /* Check if we have second version location used */ else if ((i == 1) && ((*record & E1000_INVM_VER_FIELD_TWO) == 0)) { - version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3; + version = FIELD_GET(E1000_INVM_VER_FIELD_ONE, *record); status = 0; break; } @@ -483,8 +483,8 @@ s32 igb_read_invm_version(struct e1000_hw *hw, else if ((((*record & E1000_INVM_VER_FIELD_ONE) == 0) && ((*record & 0x3) == 0)) || (((*record & 0x3) != 0) && (i != 1))) { - version = (*next_record & E1000_INVM_VER_FIELD_TWO) - >> 13; + version = FIELD_GET(E1000_INVM_VER_FIELD_TWO, + *next_record); status = 0; break; } @@ -493,15 +493,15 @@ s32 igb_read_invm_version(struct e1000_hw *hw, */ else if (((*record & E1000_INVM_VER_FIELD_TWO) == 0) && ((*record & 0x3) == 0)) { - version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3; + version = FIELD_GET(E1000_INVM_VER_FIELD_ONE, *record); status = 0; break; } } if (!status) { - invm_ver->invm_major = (version & E1000_INVM_MAJOR_MASK) - >> E1000_INVM_MAJOR_SHIFT; + invm_ver->invm_major = FIELD_GET(E1000_INVM_MAJOR_MASK, + version); invm_ver->invm_minor = version & E1000_INVM_MINOR_MASK; } /* Read Image Type */ @@ -520,7 +520,8 @@ s32 igb_read_invm_version(struct e1000_hw *hw, ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) || ((((*record & 0x3) != 0) && (i != 1)))) { invm_ver->invm_img_type = - (*next_record & E1000_INVM_IMGTYPE_FIELD) >> 23; + FIELD_GET(E1000_INVM_IMGTYPE_FIELD, + *next_record); status = 0; break; } diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c index caf91c6f52..fa3dfafd2b 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mac.c +++ b/drivers/net/ethernet/intel/igb/e1000_mac.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2007 - 2018 Intel Corporation. */ +#include <linux/bitfield.h> #include <linux/if_ether.h> #include <linux/delay.h> #include <linux/pci.h> @@ -50,13 +51,12 @@ s32 igb_get_bus_info_pcie(struct e1000_hw *hw) break; } - bus->width = (enum e1000_bus_width)((pcie_link_status & - PCI_EXP_LNKSTA_NLW) >> - PCI_EXP_LNKSTA_NLW_SHIFT); + bus->width = (enum e1000_bus_width)FIELD_GET(PCI_EXP_LNKSTA_NLW, + pcie_link_status); } reg = rd32(E1000_STATUS); - bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT; + bus->func = FIELD_GET(E1000_STATUS_FUNC_MASK, reg); return 0; } diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.c b/drivers/net/ethernet/intel/igb/e1000_nvm.c index 0da57e8959..2dcd64d6de 100644 --- a/drivers/net/ethernet/intel/igb/e1000_nvm.c +++ b/drivers/net/ethernet/intel/igb/e1000_nvm.c @@ -708,10 +708,10 @@ void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers) */ if ((etrack_test & NVM_MAJOR_MASK) != NVM_ETRACK_VALID) { hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version); - fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK) - >> NVM_MAJOR_SHIFT; - fw_vers->eep_minor = (fw_version & NVM_MINOR_MASK) - >> NVM_MINOR_SHIFT; + fw_vers->eep_major = FIELD_GET(NVM_MAJOR_MASK, + fw_version); + fw_vers->eep_minor = FIELD_GET(NVM_MINOR_MASK, + fw_version); fw_vers->eep_build = (fw_version & NVM_IMAGE_ID_MASK); goto etrack_id; } @@ -753,15 +753,13 @@ void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers) return; } hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version); - fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK) - >> NVM_MAJOR_SHIFT; + fw_vers->eep_major = FIELD_GET(NVM_MAJOR_MASK, fw_version); /* check for old style version format in newer images*/ if ((fw_version & NVM_NEW_DEC_MASK) == 0x0) { eeprom_verl = (fw_version & NVM_COMB_VER_MASK); } else { - eeprom_verl = (fw_version & NVM_MINOR_MASK) - >> NVM_MINOR_SHIFT; + eeprom_verl = FIELD_GET(NVM_MINOR_MASK, fw_version); } /* Convert minor value to hex before assigning to output struct * Val to be converted will not be higher than 99, per tool output diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c index 3c1b562a32..cd65008c7e 100644 --- a/drivers/net/ethernet/intel/igb/e1000_phy.c +++ b/drivers/net/ethernet/intel/igb/e1000_phy.c @@ -255,7 +255,7 @@ s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data) } /* Need to byte-swap the 16-bit value. */ - *data = ((i2ccmd >> 8) & 0x00FF) | ((i2ccmd << 8) & 0xFF00); + *data = ((i2ccmd >> 8) & 0x00FF) | FIELD_PREP(0xFF00, i2ccmd); return 0; } @@ -282,7 +282,7 @@ s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data) } /* Swap the data bytes for the I2C interface */ - phy_data_swapped = ((data >> 8) & 0x00FF) | ((data << 8) & 0xFF00); + phy_data_swapped = ((data >> 8) & 0x00FF) | FIELD_PREP(0xFF00, data); /* Set up Op-code, Phy Address, and register address in the I2CCMD * register. The MAC will take care of interfacing with the @@ -1682,8 +1682,7 @@ s32 igb_get_cable_length_m88(struct e1000_hw *hw) if (ret_val) goto out; - index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> - M88E1000_PSSR_CABLE_LENGTH_SHIFT; + index = FIELD_GET(M88E1000_PSSR_CABLE_LENGTH, phy_data); if (index >= ARRAY_SIZE(e1000_m88_cable_length_table) - 1) { ret_val = -E1000_ERR_PHY; goto out; @@ -1796,8 +1795,7 @@ s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw) if (ret_val) goto out; - index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> - M88E1000_PSSR_CABLE_LENGTH_SHIFT; + index = FIELD_GET(M88E1000_PSSR_CABLE_LENGTH, phy_data); if (index >= ARRAY_SIZE(e1000_m88_cable_length_table) - 1) { ret_val = -E1000_ERR_PHY; goto out; @@ -2578,8 +2576,7 @@ s32 igb_get_cable_length_82580(struct e1000_hw *hw) if (ret_val) goto out; - length = (phy_data & I82580_DSTATUS_CABLE_LENGTH) >> - I82580_DSTATUS_CABLE_LENGTH_SHIFT; + length = FIELD_GET(I82580_DSTATUS_CABLE_LENGTH, phy_data); if (length == E1000_CABLE_LENGTH_UNDEFINED) ret_val = -E1000_ERR_PHY; diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index a2b759531c..3c2dc7bdeb 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -637,7 +637,7 @@ struct igb_adapter { struct timespec64 period; } perout[IGB_N_PEROUT]; - char fw_version[32]; + char fw_version[48]; #ifdef CONFIG_IGB_HWMON struct hwmon_buff *igb_hwmon_buff; bool ets; diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index 16d2a55d5e..b66199c9bb 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -2356,11 +2356,9 @@ static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data) break; case ETH_SS_STATS: for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) - ethtool_sprintf(&p, "%s", - igb_gstrings_stats[i].stat_string); + ethtool_puts(&p, igb_gstrings_stats[i].stat_string); for (i = 0; i < IGB_NETDEV_STATS_LEN; i++) - ethtool_sprintf(&p, "%s", - igb_gstrings_net_stats[i].stat_string); + ethtool_puts(&p, igb_gstrings_net_stats[i].stat_string); for (i = 0; i < adapter->num_tx_queues; i++) { ethtool_sprintf(&p, "tx_queue_%u_packets", i); ethtool_sprintf(&p, "tx_queue_%u_bytes", i); @@ -2434,7 +2432,7 @@ static int igb_get_ts_info(struct net_device *dev, } } -#define ETHER_TYPE_FULL_MASK ((__force __be16)~0) +#define ETHER_TYPE_FULL_MASK cpu_to_be16(FIELD_MAX(U16_MAX)) static int igb_get_ethtool_nfc_entry(struct igb_adapter *adapter, struct ethtool_rxnfc *cmd) { @@ -2713,8 +2711,7 @@ static int igb_rxnfc_write_etype_filter(struct igb_adapter *adapter, etqf |= (etype & E1000_ETQF_ETYPE_MASK); etqf &= ~E1000_ETQF_QUEUE_MASK; - etqf |= ((input->action << E1000_ETQF_QUEUE_SHIFT) - & E1000_ETQF_QUEUE_MASK); + etqf |= FIELD_PREP(E1000_ETQF_QUEUE_MASK, input->action); etqf |= E1000_ETQF_QUEUE_ENABLE; wr32(E1000_ETQF(i), etqf); @@ -2733,8 +2730,8 @@ static int igb_rxnfc_write_vlan_prio_filter(struct igb_adapter *adapter, u32 vlapqf; vlapqf = rd32(E1000_VLAPQF); - vlan_priority = (ntohs(input->filter.vlan_tci) & VLAN_PRIO_MASK) - >> VLAN_PRIO_SHIFT; + vlan_priority = FIELD_GET(VLAN_PRIO_MASK, + ntohs(input->filter.vlan_tci)); queue_index = (vlapqf >> (vlan_priority * 4)) & E1000_VLAPQF_QUEUE_MASK; /* check whether this vlan prio is already set */ @@ -2817,7 +2814,7 @@ static void igb_clear_vlan_prio_filter(struct igb_adapter *adapter, u8 vlan_priority; u32 vlapqf; - vlan_priority = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; + vlan_priority = FIELD_GET(VLAN_PRIO_MASK, vlan_tci); vlapqf = rd32(E1000_VLAPQF); vlapqf &= ~E1000_VLAPQF_P_VALID(vlan_priority); @@ -3282,18 +3279,17 @@ static u32 igb_get_rxfh_indir_size(struct net_device *netdev) return IGB_RETA_SIZE; } -static int igb_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, - u8 *hfunc) +static int igb_get_rxfh(struct net_device *netdev, + struct ethtool_rxfh_param *rxfh) { struct igb_adapter *adapter = netdev_priv(netdev); int i; - if (hfunc) - *hfunc = ETH_RSS_HASH_TOP; - if (!indir) + rxfh->hfunc = ETH_RSS_HASH_TOP; + if (!rxfh->indir) return 0; for (i = 0; i < IGB_RETA_SIZE; i++) - indir[i] = adapter->rss_indir_tbl[i]; + rxfh->indir[i] = adapter->rss_indir_tbl[i]; return 0; } @@ -3333,8 +3329,9 @@ void igb_write_rss_indir_tbl(struct igb_adapter *adapter) } } -static int igb_set_rxfh(struct net_device *netdev, const u32 *indir, - const u8 *key, const u8 hfunc) +static int igb_set_rxfh(struct net_device *netdev, + struct ethtool_rxfh_param *rxfh, + struct netlink_ext_ack *extack) { struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; @@ -3342,10 +3339,11 @@ static int igb_set_rxfh(struct net_device *netdev, const u32 *indir, u32 num_queues; /* We do not allow change in unsupported parameters */ - if (key || - (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) + if (rxfh->key || + (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE && + rxfh->hfunc != ETH_RSS_HASH_TOP)) return -EOPNOTSUPP; - if (!indir) + if (!rxfh->indir) return 0; num_queues = adapter->rss_queues; @@ -3362,12 +3360,12 @@ static int igb_set_rxfh(struct net_device *netdev, const u32 *indir, /* Verify user input. */ for (i = 0; i < IGB_RETA_SIZE; i++) - if (indir[i] >= num_queues) + if (rxfh->indir[i] >= num_queues) return -EINVAL; for (i = 0; i < IGB_RETA_SIZE; i++) - adapter->rss_indir_tbl[i] = indir[i]; + adapter->rss_indir_tbl[i] = rxfh->indir[i]; igb_write_rss_indir_tbl(adapter); diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index ada42ba635..7662c42e35 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -3069,7 +3069,6 @@ void igb_set_fw_version(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; struct e1000_fw_version fw; - char *lbuf; igb_get_fw_version(hw, &fw); @@ -3077,34 +3076,36 @@ void igb_set_fw_version(struct igb_adapter *adapter) case e1000_i210: case e1000_i211: if (!(igb_get_flash_presence_i210(hw))) { - lbuf = kasprintf(GFP_KERNEL, "%2d.%2d-%d", - fw.invm_major, fw.invm_minor, - fw.invm_img_type); + snprintf(adapter->fw_version, + sizeof(adapter->fw_version), + "%2d.%2d-%d", + fw.invm_major, fw.invm_minor, + fw.invm_img_type); break; } fallthrough; default: /* if option rom is valid, display its version too */ if (fw.or_valid) { - lbuf = kasprintf(GFP_KERNEL, "%d.%d, 0x%08x, %d.%d.%d", - fw.eep_major, fw.eep_minor, - fw.etrack_id, fw.or_major, fw.or_build, - fw.or_patch); + snprintf(adapter->fw_version, + sizeof(adapter->fw_version), + "%d.%d, 0x%08x, %d.%d.%d", + fw.eep_major, fw.eep_minor, fw.etrack_id, + fw.or_major, fw.or_build, fw.or_patch); /* no option rom */ } else if (fw.etrack_id != 0X0000) { - lbuf = kasprintf(GFP_KERNEL, "%d.%d, 0x%08x", - fw.eep_major, fw.eep_minor, - fw.etrack_id); + snprintf(adapter->fw_version, + sizeof(adapter->fw_version), + "%d.%d, 0x%08x", + fw.eep_major, fw.eep_minor, fw.etrack_id); } else { - lbuf = kasprintf(GFP_KERNEL, "%d.%d.%d", fw.eep_major, - fw.eep_minor, fw.eep_build); + snprintf(adapter->fw_version, + sizeof(adapter->fw_version), + "%d.%d.%d", + fw.eep_major, fw.eep_minor, fw.eep_build); } break; } - - /* the truncate happens here if it doesn't fit */ - strscpy(adapter->fw_version, lbuf, sizeof(adapter->fw_version)); - kfree(lbuf); } /** @@ -7282,7 +7283,7 @@ static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf) static int igb_set_vf_multicasts(struct igb_adapter *adapter, u32 *msgbuf, u32 vf) { - int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT; + int n = FIELD_GET(E1000_VT_MSGINFO_MASK, msgbuf[0]); u16 *hash_list = (u16 *)&msgbuf[1]; struct vf_data_storage *vf_data = &adapter->vf_data[vf]; int i; @@ -7542,7 +7543,7 @@ static int igb_ndo_set_vf_vlan(struct net_device *netdev, int vf, static int igb_set_vf_vlan_msg(struct igb_adapter *adapter, u32 *msgbuf, u32 vf) { - int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT; + int add = FIELD_GET(E1000_VT_MSGINFO_MASK, msgbuf[0]); int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK); int ret; @@ -9797,8 +9798,7 @@ static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate, tx_rate; bcnrc_val = E1000_RTTBCNRC_RS_ENA; - bcnrc_val |= ((rf_int << E1000_RTTBCNRC_RF_INT_SHIFT) & - E1000_RTTBCNRC_RF_INT_MASK); + bcnrc_val |= FIELD_PREP(E1000_RTTBCNRC_RF_INT_MASK, rf_int); bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK); } else { bcnrc_val = 0; @@ -9987,8 +9987,7 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba) hwm = 64 * (pba - 6); reg = rd32(E1000_FCRTC); reg &= ~E1000_FCRTC_RTH_COAL_MASK; - reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT) - & E1000_FCRTC_RTH_COAL_MASK); + reg |= FIELD_PREP(E1000_FCRTC_RTH_COAL_MASK, hwm); wr32(E1000_FCRTC, reg); /* Set the DMA Coalescing Rx threshold to PBA - 2 * max @@ -9997,8 +9996,7 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba) dmac_thr = pba - 10; reg = rd32(E1000_DMACR); reg &= ~E1000_DMACR_DMACTHR_MASK; - reg |= ((dmac_thr << E1000_DMACR_DMACTHR_SHIFT) - & E1000_DMACR_DMACTHR_MASK); + reg |= FIELD_PREP(E1000_DMACR_DMACTHR_MASK, dmac_thr); /* transition to L0x or L1 if available..*/ reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK); diff --git a/drivers/net/ethernet/intel/igbvf/mbx.c b/drivers/net/ethernet/intel/igbvf/mbx.c index a3cd7ac48d..d15282ee5e 100644 --- a/drivers/net/ethernet/intel/igbvf/mbx.c +++ b/drivers/net/ethernet/intel/igbvf/mbx.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2009 - 2018 Intel Corporation. */ +#include <linux/bitfield.h> #include "mbx.h" /** diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index e6c1fbee04..a4d4f00e6a 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -273,9 +273,8 @@ static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter, * that case, it fills the header buffer and spills the rest * into the page. */ - hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info) - & E1000_RXDADV_HDRBUFLEN_MASK) >> - E1000_RXDADV_HDRBUFLEN_SHIFT; + hlen = le16_get_bits(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info, + E1000_RXDADV_HDRBUFLEN_MASK); if (hlen > adapter->rx_ps_hdr_size) hlen = adapter->rx_ps_hdr_size; diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h index 85cc163965..45430e246e 100644 --- a/drivers/net/ethernet/intel/igc/igc.h +++ b/drivers/net/ethernet/intel/igc/igc.h @@ -81,6 +81,21 @@ struct igc_tx_timestamp_request { u32 flags; /* flags that should be added to the tx_buffer */ }; +struct igc_inline_rx_tstamps { + /* Timestamps are saved in little endian at the beginning of the packet + * buffer following the layout: + * + * DWORD: | 0 | 1 | 2 | 3 | + * Field: | Timer1 SYSTIML | Timer1 SYSTIMH | Timer0 SYSTIML | Timer0 SYSTIMH | + * + * SYSTIML holds the nanoseconds part while SYSTIMH holds the seconds + * part of the timestamp. + * + */ + __le32 timer1[2]; + __le32 timer0[2]; +}; + struct igc_ring_container { struct igc_ring *ring; /* pointer to linked list of rings */ unsigned int total_bytes; /* total bytes processed this int */ @@ -261,6 +276,8 @@ struct igc_adapter { unsigned int ptp_flags; /* System time value lock */ spinlock_t tmreg_lock; + /* Free-running timer lock */ + spinlock_t free_timer_lock; struct cyclecounter cc; struct timecounter tc; struct timespec64 prev_ptp_time; /* Pre-reset PTP clock */ @@ -469,6 +486,8 @@ enum igc_tx_flags { IGC_TX_FLAGS_TSTAMP_1 = 0x100, IGC_TX_FLAGS_TSTAMP_2 = 0x200, IGC_TX_FLAGS_TSTAMP_3 = 0x400, + + IGC_TX_FLAGS_TSTAMP_TIMER_1 = 0x800, }; enum igc_boards { @@ -531,7 +550,7 @@ struct igc_rx_buffer { struct igc_xdp_buff { struct xdp_buff xdp; union igc_adv_rx_desc *rx_desc; - ktime_t rx_ts; /* data indication bit IGC_RXDADV_STAT_TSIP */ + struct igc_inline_rx_tstamps *rx_ts; /* data indication bit IGC_RXDADV_STAT_TSIP */ }; struct igc_q_vector { diff --git a/drivers/net/ethernet/intel/igc/igc_base.c b/drivers/net/ethernet/intel/igc/igc_base.c index a1d815af50..9fae8bdec2 100644 --- a/drivers/net/ethernet/intel/igc/igc_base.c +++ b/drivers/net/ethernet/intel/igc/igc_base.c @@ -68,8 +68,7 @@ static s32 igc_init_nvm_params_base(struct igc_hw *hw) u32 eecd = rd32(IGC_EECD); u16 size; - size = (u16)((eecd & IGC_EECD_SIZE_EX_MASK) >> - IGC_EECD_SIZE_EX_SHIFT); + size = FIELD_GET(IGC_EECD_SIZE_EX_MASK, eecd); /* Added to a constant, "size" becomes the left-shift value * for setting word_size. @@ -162,8 +161,7 @@ static s32 igc_init_phy_params_base(struct igc_hw *hw) phy->reset_delay_us = 100; /* set lan id */ - hw->bus.func = (rd32(IGC_STATUS) & IGC_STATUS_FUNC_MASK) >> - IGC_STATUS_FUNC_SHIFT; + hw->bus.func = FIELD_GET(IGC_STATUS_FUNC_MASK, rd32(IGC_STATUS)); /* Make sure the PHY is in a good state. Several people have reported * firmware leaving the PHY's page select register set to something diff --git a/drivers/net/ethernet/intel/igc/igc_base.h b/drivers/net/ethernet/intel/igc/igc_base.h index f7d6491d4c..bf8cdfbba9 100644 --- a/drivers/net/ethernet/intel/igc/igc_base.h +++ b/drivers/net/ethernet/intel/igc/igc_base.h @@ -37,6 +37,10 @@ struct igc_adv_tx_context_desc { #define IGC_ADVTXD_TSTAMP_REG_1 0x00010000 /* Select register 1 for timestamp */ #define IGC_ADVTXD_TSTAMP_REG_2 0x00020000 /* Select register 2 for timestamp */ #define IGC_ADVTXD_TSTAMP_REG_3 0x00030000 /* Select register 3 for timestamp */ +#define IGC_ADVTXD_TSTAMP_TIMER_1 0x00010000 /* Select timer 1 for timestamp */ +#define IGC_ADVTXD_TSTAMP_TIMER_2 0x00020000 /* Select timer 2 for timestamp */ +#define IGC_ADVTXD_TSTAMP_TIMER_3 0x00030000 /* Select timer 3 for timestamp */ + #define IGC_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */ #define IGC_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ #define IGC_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */ diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h index b3037016f3..5f92b3c7c3 100644 --- a/drivers/net/ethernet/intel/igc/igc_defines.h +++ b/drivers/net/ethernet/intel/igc/igc_defines.h @@ -317,6 +317,8 @@ #define IGC_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */ #define IGC_TXD_EXTCMD_TSTAMP 0x00000010 /* IEEE1588 Timestamp packet */ +#define IGC_TXD_PTP2_TIMER_1 0x00000020 + /* IPSec Encrypt Enable */ #define IGC_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ #define IGC_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c index 859b2636f3..b95d2c86e8 100644 --- a/drivers/net/ethernet/intel/igc/igc_ethtool.c +++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c @@ -773,11 +773,9 @@ static void igc_ethtool_get_strings(struct net_device *netdev, u32 stringset, break; case ETH_SS_STATS: for (i = 0; i < IGC_GLOBAL_STATS_LEN; i++) - ethtool_sprintf(&p, "%s", - igc_gstrings_stats[i].stat_string); + ethtool_puts(&p, igc_gstrings_stats[i].stat_string); for (i = 0; i < IGC_NETDEV_STATS_LEN; i++) - ethtool_sprintf(&p, "%s", - igc_gstrings_net_stats[i].stat_string); + ethtool_puts(&p, igc_gstrings_net_stats[i].stat_string); for (i = 0; i < adapter->num_tx_queues; i++) { ethtool_sprintf(&p, "tx_queue_%u_packets", i); ethtool_sprintf(&p, "tx_queue_%u_bytes", i); @@ -1464,45 +1462,46 @@ static u32 igc_ethtool_get_rxfh_indir_size(struct net_device *netdev) return IGC_RETA_SIZE; } -static int igc_ethtool_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, - u8 *hfunc) +static int igc_ethtool_get_rxfh(struct net_device *netdev, + struct ethtool_rxfh_param *rxfh) { struct igc_adapter *adapter = netdev_priv(netdev); int i; - if (hfunc) - *hfunc = ETH_RSS_HASH_TOP; - if (!indir) + rxfh->hfunc = ETH_RSS_HASH_TOP; + if (!rxfh->indir) return 0; for (i = 0; i < IGC_RETA_SIZE; i++) - indir[i] = adapter->rss_indir_tbl[i]; + rxfh->indir[i] = adapter->rss_indir_tbl[i]; return 0; } -static int igc_ethtool_set_rxfh(struct net_device *netdev, const u32 *indir, - const u8 *key, const u8 hfunc) +static int igc_ethtool_set_rxfh(struct net_device *netdev, + struct ethtool_rxfh_param *rxfh, + struct netlink_ext_ack *extack) { struct igc_adapter *adapter = netdev_priv(netdev); u32 num_queues; int i; /* We do not allow change in unsupported parameters */ - if (key || - (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) + if (rxfh->key || + (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE && + rxfh->hfunc != ETH_RSS_HASH_TOP)) return -EOPNOTSUPP; - if (!indir) + if (!rxfh->indir) return 0; num_queues = adapter->rss_queues; /* Verify user input. */ for (i = 0; i < IGC_RETA_SIZE; i++) - if (indir[i] >= num_queues) + if (rxfh->indir[i] >= num_queues) return -EINVAL; for (i = 0; i < IGC_RETA_SIZE; i++) - adapter->rss_indir_tbl[i] = indir[i]; + adapter->rss_indir_tbl[i] = rxfh->indir[i]; igc_write_rss_indir_tbl(adapter); diff --git a/drivers/net/ethernet/intel/igc/igc_i225.c b/drivers/net/ethernet/intel/igc/igc_i225.c index d2562c8e80..0dd61719f1 100644 --- a/drivers/net/ethernet/intel/igc/igc_i225.c +++ b/drivers/net/ethernet/intel/igc/igc_i225.c @@ -579,9 +579,8 @@ s32 igc_set_ltr_i225(struct igc_hw *hw, bool link) /* Calculate tw_system (nsec). */ if (speed == SPEED_100) { - tw_system = ((rd32(IGC_EEE_SU) & - IGC_TW_SYSTEM_100_MASK) >> - IGC_TW_SYSTEM_100_SHIFT) * 500; + tw_system = FIELD_GET(IGC_TW_SYSTEM_100_MASK, + rd32(IGC_EEE_SU)) * 500; } else { tw_system = (rd32(IGC_EEE_SU) & IGC_TW_SYSTEM_1000_MASK) * 500; diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 45716d271d..23bed58a9d 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -1299,14 +1299,16 @@ static void igc_tx_olinfo_status(struct igc_ring *tx_ring, u32 olinfo_status = paylen << IGC_ADVTXD_PAYLEN_SHIFT; /* insert L4 checksum */ - olinfo_status |= (tx_flags & IGC_TX_FLAGS_CSUM) * - ((IGC_TXD_POPTS_TXSM << 8) / - IGC_TX_FLAGS_CSUM); + olinfo_status |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_CSUM, + (IGC_TXD_POPTS_TXSM << 8)); /* insert IPv4 checksum */ - olinfo_status |= (tx_flags & IGC_TX_FLAGS_IPV4) * - (((IGC_TXD_POPTS_IXSM << 8)) / - IGC_TX_FLAGS_IPV4); + olinfo_status |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_IPV4, + (IGC_TXD_POPTS_IXSM << 8)); + + /* Use the second timer (free running, in general) for the timestamp */ + olinfo_status |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP_TIMER_1, + IGC_TXD_PTP2_TIMER_1); tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); } @@ -1640,10 +1642,6 @@ done: if (unlikely(test_bit(IGC_RING_FLAG_TX_HWTSTAMP, &tx_ring->flags) && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { - /* FIXME: add support for retrieving timestamps from - * the other timer registers before skipping the - * timestamping request. - */ unsigned long flags; u32 tstamp_flags; @@ -1651,6 +1649,8 @@ done: if (igc_request_tx_tstamp(adapter, skb, &tstamp_flags)) { skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; tx_flags |= IGC_TX_FLAGS_TSTAMP | tstamp_flags; + if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP_USE_CYCLES) + tx_flags |= IGC_TX_FLAGS_TSTAMP_TIMER_1; } else { adapter->tx_hwtstamp_skipped++; } @@ -1963,9 +1963,9 @@ static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring, static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring, struct igc_rx_buffer *rx_buffer, - struct xdp_buff *xdp, - ktime_t timestamp) + struct igc_xdp_buff *ctx) { + struct xdp_buff *xdp = &ctx->xdp; unsigned int metasize = xdp->data - xdp->data_meta; unsigned int size = xdp->data_end - xdp->data; unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size); @@ -1982,8 +1982,10 @@ static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring, if (unlikely(!skb)) return NULL; - if (timestamp) - skb_hwtstamps(skb)->hwtstamp = timestamp; + if (ctx->rx_ts) { + skb_shinfo(skb)->tx_flags |= SKBTX_HW_TSTAMP_NETDEV; + skb_hwtstamps(skb)->netdev_data = ctx->rx_ts; + } /* Determine available headroom for copy */ headlen = size; @@ -2583,11 +2585,10 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget) int xdp_status = 0, rx_buffer_pgcnt; while (likely(total_packets < budget)) { - union igc_adv_rx_desc *rx_desc; + struct igc_xdp_buff ctx = { .rx_ts = NULL }; struct igc_rx_buffer *rx_buffer; + union igc_adv_rx_desc *rx_desc; unsigned int size, truesize; - struct igc_xdp_buff ctx; - ktime_t timestamp = 0; int pkt_offset = 0; void *pktbuf; @@ -2614,9 +2615,7 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget) pktbuf = page_address(rx_buffer->page) + rx_buffer->page_offset; if (igc_test_staterr(rx_desc, IGC_RXDADV_STAT_TSIP)) { - timestamp = igc_ptp_rx_pktstamp(q_vector->adapter, - pktbuf); - ctx.rx_ts = timestamp; + ctx.rx_ts = pktbuf; pkt_offset = IGC_TS_HDR_LEN; size -= IGC_TS_HDR_LEN; } @@ -2653,8 +2652,7 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget) else if (ring_uses_build_skb(rx_ring)) skb = igc_build_skb(rx_ring, rx_buffer, &ctx.xdp); else - skb = igc_construct_skb(rx_ring, rx_buffer, &ctx.xdp, - timestamp); + skb = igc_construct_skb(rx_ring, rx_buffer, &ctx); /* exit if we failed to retrieve a buffer */ if (!skb) { @@ -2803,9 +2801,7 @@ static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget) ctx->rx_desc = desc; if (igc_test_staterr(desc, IGC_RXDADV_STAT_TSIP)) { - timestamp = igc_ptp_rx_pktstamp(q_vector->adapter, - bi->xdp->data); - ctx->rx_ts = timestamp; + ctx->rx_ts = bi->xdp->data; bi->xdp->data += IGC_TS_HDR_LEN; @@ -3452,8 +3448,8 @@ static int igc_write_flex_filter_ll(struct igc_adapter *adapter, /* Configure filter */ queuing = input->length & IGC_FHFT_LENGTH_MASK; - queuing |= (input->rx_queue << IGC_FHFT_QUEUE_SHIFT) & IGC_FHFT_QUEUE_MASK; - queuing |= (input->prio << IGC_FHFT_PRIO_SHIFT) & IGC_FHFT_PRIO_MASK; + queuing |= FIELD_PREP(IGC_FHFT_QUEUE_MASK, input->rx_queue); + queuing |= FIELD_PREP(IGC_FHFT_PRIO_MASK, input->prio); if (input->immediate_irq) queuing |= IGC_FHFT_IMM_INT; @@ -3712,8 +3708,7 @@ static int igc_enable_nfc_rule(struct igc_adapter *adapter, } if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) { - int prio = (rule->filter.vlan_tci & VLAN_PRIO_MASK) >> - VLAN_PRIO_SHIFT; + int prio = FIELD_GET(VLAN_PRIO_MASK, rule->filter.vlan_tci); err = igc_add_vlan_prio_filter(adapter, prio, rule->action); if (err) @@ -3735,8 +3730,7 @@ static void igc_disable_nfc_rule(struct igc_adapter *adapter, igc_del_etype_filter(adapter, rule->filter.etype); if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) { - int prio = (rule->filter.vlan_tci & VLAN_PRIO_MASK) >> - VLAN_PRIO_SHIFT; + int prio = FIELD_GET(VLAN_PRIO_MASK, rule->filter.vlan_tci); igc_del_vlan_prio_filter(adapter, prio); } @@ -6551,6 +6545,24 @@ int igc_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags) return 0; } +static ktime_t igc_get_tstamp(struct net_device *dev, + const struct skb_shared_hwtstamps *hwtstamps, + bool cycles) +{ + struct igc_adapter *adapter = netdev_priv(dev); + struct igc_inline_rx_tstamps *tstamp; + ktime_t timestamp; + + tstamp = hwtstamps->netdev_data; + + if (cycles) + timestamp = igc_ptp_rx_pktstamp(adapter, tstamp->timer1); + else + timestamp = igc_ptp_rx_pktstamp(adapter, tstamp->timer0); + + return timestamp; +} + static const struct net_device_ops igc_netdev_ops = { .ndo_open = igc_open, .ndo_stop = igc_close, @@ -6568,6 +6580,7 @@ static const struct net_device_ops igc_netdev_ops = { .ndo_bpf = igc_bpf, .ndo_xdp_xmit = igc_xdp_xmit, .ndo_xsk_wakeup = igc_xsk_wakeup, + .ndo_get_tstamp = igc_get_tstamp, }; /* PCIe configuration access */ @@ -6671,9 +6684,11 @@ static int igc_xdp_rx_hash(const struct xdp_md *_ctx, u32 *hash, static int igc_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp) { const struct igc_xdp_buff *ctx = (void *)_ctx; + struct igc_adapter *adapter = netdev_priv(ctx->xdp.rxq->dev); + struct igc_inline_rx_tstamps *tstamp = ctx->rx_ts; if (igc_test_staterr(ctx->rx_desc, IGC_RXDADV_STAT_TSIP)) { - *timestamp = ctx->rx_ts; + *timestamp = igc_ptp_rx_pktstamp(adapter, tstamp->timer0); return 0; } diff --git a/drivers/net/ethernet/intel/igc/igc_phy.c b/drivers/net/ethernet/intel/igc/igc_phy.c index d0d9e71701..861f370768 100644 --- a/drivers/net/ethernet/intel/igc/igc_phy.c +++ b/drivers/net/ethernet/intel/igc/igc_phy.c @@ -130,11 +130,7 @@ void igc_power_down_phy_copper(struct igc_hw *hw) /* The PHY will retain its settings across a power down/up cycle */ hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg); mii_reg |= MII_CR_POWER_DOWN; - - /* Temporary workaround - should be removed when PHY will implement - * IEEE registers as properly - */ - /* hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);*/ + hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg); usleep_range(1000, 2000); } @@ -727,7 +723,7 @@ static s32 igc_write_xmdio_reg(struct igc_hw *hw, u16 addr, */ s32 igc_write_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 data) { - u8 dev_addr = (offset & GPY_MMD_MASK) >> GPY_MMD_SHIFT; + u8 dev_addr = FIELD_GET(GPY_MMD_MASK, offset); s32 ret_val; offset = offset & GPY_REG_MASK; @@ -758,7 +754,7 @@ s32 igc_write_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 data) */ s32 igc_read_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 *data) { - u8 dev_addr = (offset & GPY_MMD_MASK) >> GPY_MMD_SHIFT; + u8 dev_addr = FIELD_GET(GPY_MMD_MASK, offset); s32 ret_val; offset = offset & GPY_REG_MASK; diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c index 928f387922..885faaa7b9 100644 --- a/drivers/net/ethernet/intel/igc/igc_ptp.c +++ b/drivers/net/ethernet/intel/igc/igc_ptp.c @@ -459,12 +459,10 @@ static int igc_ptp_systim_to_hwtstamp(struct igc_adapter *adapter, /** * igc_ptp_rx_pktstamp - Retrieve timestamp from Rx packet buffer * @adapter: Pointer to adapter the packet buffer belongs to - * @buf: Pointer to packet buffer + * @buf: Pointer to start of timestamp in HW format (2 32-bit words) * - * This function retrieves the timestamp saved in the beginning of packet - * buffer. While two timestamps are available, one in timer0 reference and the - * other in timer1 reference, this function considers only the timestamp in - * timer0 reference. + * This function retrieves and converts the timestamp stored at @buf + * to ktime_t, adjusting for hardware latencies. * * Returns timestamp value. */ @@ -474,17 +472,8 @@ ktime_t igc_ptp_rx_pktstamp(struct igc_adapter *adapter, __le32 *buf) u32 secs, nsecs; int adjust; - /* Timestamps are saved in little endian at the beginning of the packet - * buffer following the layout: - * - * DWORD: | 0 | 1 | 2 | 3 | - * Field: | Timer1 SYSTIML | Timer1 SYSTIMH | Timer0 SYSTIML | Timer0 SYSTIMH | - * - * SYSTIML holds the nanoseconds part while SYSTIMH holds the seconds - * part of the timestamp. - */ - nsecs = le32_to_cpu(buf[2]); - secs = le32_to_cpu(buf[3]); + nsecs = le32_to_cpu(buf[0]); + secs = le32_to_cpu(buf[1]); timestamp = ktime_set(secs, nsecs); @@ -542,10 +531,11 @@ static void igc_ptp_enable_rx_timestamp(struct igc_adapter *adapter) for (i = 0; i < adapter->num_rx_queues; i++) { val = rd32(IGC_SRRCTL(i)); - /* FIXME: For now, only support retrieving RX timestamps from - * timer 0. + /* Enable retrieving timestamps from timer 0, the + * "adjustable clock" and timer 1 the "free running + * clock". */ - val |= IGC_SRRCTL_TIMER1SEL(0) | IGC_SRRCTL_TIMER0SEL(0) | + val |= IGC_SRRCTL_TIMER1SEL(1) | IGC_SRRCTL_TIMER0SEL(0) | IGC_SRRCTL_TIMESTAMP; wr32(IGC_SRRCTL(i), val); } @@ -1035,6 +1025,26 @@ static int igc_ptp_getcrosststamp(struct ptp_clock_info *ptp, adapter, &adapter->snapshot, cts); } +static int igc_ptp_getcyclesx64(struct ptp_clock_info *ptp, + struct timespec64 *ts, + struct ptp_system_timestamp *sts) +{ + struct igc_adapter *igc = container_of(ptp, struct igc_adapter, ptp_caps); + struct igc_hw *hw = &igc->hw; + unsigned long flags; + + spin_lock_irqsave(&igc->free_timer_lock, flags); + + ptp_read_system_prets(sts); + ts->tv_nsec = rd32(IGC_SYSTIML_1); + ts->tv_sec = rd32(IGC_SYSTIMH_1); + ptp_read_system_postts(sts); + + spin_unlock_irqrestore(&igc->free_timer_lock, flags); + + return 0; +} + /** * igc_ptp_init - Initialize PTP functionality * @adapter: Board private structure @@ -1088,6 +1098,7 @@ void igc_ptp_init(struct igc_adapter *adapter) adapter->ptp_caps.adjfine = igc_ptp_adjfine_i225; adapter->ptp_caps.adjtime = igc_ptp_adjtime_i225; adapter->ptp_caps.gettimex64 = igc_ptp_gettimex64_i225; + adapter->ptp_caps.getcyclesx64 = igc_ptp_getcyclesx64; adapter->ptp_caps.settime64 = igc_ptp_settime_i225; adapter->ptp_caps.enable = igc_ptp_feature_enable_i225; adapter->ptp_caps.pps = 1; @@ -1108,6 +1119,7 @@ void igc_ptp_init(struct igc_adapter *adapter) } spin_lock_init(&adapter->ptp_tx_lock); + spin_lock_init(&adapter->free_timer_lock); spin_lock_init(&adapter->tmreg_lock); adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h index 20e17f5fbc..d38c87d7e5 100644 --- a/drivers/net/ethernet/intel/igc/igc_regs.h +++ b/drivers/net/ethernet/intel/igc/igc_regs.h @@ -243,6 +243,11 @@ #define IGC_SYSTIMR 0x0B6F8 /* System time register Residue */ #define IGC_TIMINCA 0x0B608 /* Increment attributes register - RW */ +#define IGC_SYSTIML_1 0x0B688 /* System time register Low - RO (timer 1) */ +#define IGC_SYSTIMH_1 0x0B68C /* System time register High - RO (timer 1) */ +#define IGC_SYSTIMR_1 0x0B684 /* System time register Residue (timer 1) */ +#define IGC_TIMINCA_1 0x0B690 /* Increment attributes register - RW (timer 1) */ + /* TX Timestamp Low */ #define IGC_TXSTMPL_0 0x0B618 #define IGC_TXSTMPL_1 0x0B698 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c index 3d56481e16..6835d5f187 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c @@ -794,7 +794,7 @@ static s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); rar_high &= ~IXGBE_RAH_VIND_MASK; - rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK); + rar_high |= FIELD_PREP(IXGBE_RAH_VIND_MASK, vmdq); IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high); return 0; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index b2a0f2aaa0..2e6e036515 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -684,7 +684,7 @@ void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw) u32 reg; reg = IXGBE_READ_REG(hw, IXGBE_STATUS); - bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT; + bus->func = FIELD_GET(IXGBE_STATUS_LAN_ID, reg); bus->lan_id = bus->func; /* check for a port swap */ @@ -695,8 +695,8 @@ void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw) /* Get MAC instance from EEPROM for configuring CS4227 */ if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP) { hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_4, &ee_ctrl_4); - bus->instance_id = (ee_ctrl_4 & IXGBE_EE_CTRL_4_INST_ID) >> - IXGBE_EE_CTRL_4_INST_ID_SHIFT; + bus->instance_id = FIELD_GET(IXGBE_EE_CTRL_4_INST_ID, + ee_ctrl_4); } } @@ -870,10 +870,9 @@ s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw) * SPI EEPROM is assumed here. This code would need to * change if a future EEPROM is not SPI. */ - eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> - IXGBE_EEC_SIZE_SHIFT); + eeprom_size = FIELD_GET(IXGBE_EEC_SIZE, eec); eeprom->word_size = BIT(eeprom_size + - IXGBE_EEPROM_WORD_SIZE_SHIFT); + IXGBE_EEPROM_WORD_SIZE_SHIFT); } if (eec & IXGBE_EEC_ADDR_SIZE) @@ -3935,10 +3934,10 @@ s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw) if (status) return status; - sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >> - IXGBE_ETS_DATA_INDEX_SHIFT); - sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >> - IXGBE_ETS_DATA_LOC_SHIFT); + sensor_index = FIELD_GET(IXGBE_ETS_DATA_INDEX_MASK, + ets_sensor); + sensor_location = FIELD_GET(IXGBE_ETS_DATA_LOC_MASK, + ets_sensor); if (sensor_location != 0) { status = hw->phy.ops.read_i2c_byte(hw, @@ -3982,8 +3981,7 @@ s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw) if (status) return status; - low_thresh_delta = ((ets_cfg & IXGBE_ETS_LTHRES_DELTA_MASK) >> - IXGBE_ETS_LTHRES_DELTA_SHIFT); + low_thresh_delta = FIELD_GET(IXGBE_ETS_LTHRES_DELTA_MASK, ets_cfg); num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK); if (num_sensors > IXGBE_MAX_SENSORS) num_sensors = IXGBE_MAX_SENSORS; @@ -3997,10 +3995,10 @@ s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw) ets_offset + 1 + i); continue; } - sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >> - IXGBE_ETS_DATA_INDEX_SHIFT); - sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >> - IXGBE_ETS_DATA_LOC_SHIFT); + sensor_index = FIELD_GET(IXGBE_ETS_DATA_INDEX_MASK, + ets_sensor); + sensor_location = FIELD_GET(IXGBE_ETS_DATA_LOC_MASK, + ets_sensor); therm_limit = ets_sensor & IXGBE_ETS_DATA_HTHRESH_MASK; hw->phy.ops.write_i2c_byte(hw, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index e47461f3ea..9a63457712 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -1413,12 +1413,11 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset, switch (stringset) { case ETH_SS_TEST: for (i = 0; i < IXGBE_TEST_LEN; i++) - ethtool_sprintf(&p, "%s", ixgbe_gstrings_test[i]); + ethtool_puts(&p, ixgbe_gstrings_test[i]); break; case ETH_SS_STATS: for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) - ethtool_sprintf(&p, "%s", - ixgbe_gstrings_stats[i].stat_string); + ethtool_puts(&p, ixgbe_gstrings_stats[i].stat_string); for (i = 0; i < netdev->num_tx_queues; i++) { ethtool_sprintf(&p, "tx_queue_%u_packets", i); ethtool_sprintf(&p, "tx_queue_%u_bytes", i); @@ -3108,35 +3107,37 @@ static void ixgbe_get_reta(struct ixgbe_adapter *adapter, u32 *indir) indir[i] = adapter->rss_indir_tbl[i] & rss_m; } -static int ixgbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, - u8 *hfunc) +static int ixgbe_get_rxfh(struct net_device *netdev, + struct ethtool_rxfh_param *rxfh) { struct ixgbe_adapter *adapter = netdev_priv(netdev); - if (hfunc) - *hfunc = ETH_RSS_HASH_TOP; + rxfh->hfunc = ETH_RSS_HASH_TOP; - if (indir) - ixgbe_get_reta(adapter, indir); + if (rxfh->indir) + ixgbe_get_reta(adapter, rxfh->indir); - if (key) - memcpy(key, adapter->rss_key, ixgbe_get_rxfh_key_size(netdev)); + if (rxfh->key) + memcpy(rxfh->key, adapter->rss_key, + ixgbe_get_rxfh_key_size(netdev)); return 0; } -static int ixgbe_set_rxfh(struct net_device *netdev, const u32 *indir, - const u8 *key, const u8 hfunc) +static int ixgbe_set_rxfh(struct net_device *netdev, + struct ethtool_rxfh_param *rxfh, + struct netlink_ext_ack *extack) { struct ixgbe_adapter *adapter = netdev_priv(netdev); int i; u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter); - if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) + if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE && + rxfh->hfunc != ETH_RSS_HASH_TOP) return -EOPNOTSUPP; /* Fill out the redirection table */ - if (indir) { + if (rxfh->indir) { int max_queues = min_t(int, adapter->num_rx_queues, ixgbe_rss_indir_tbl_max(adapter)); @@ -3147,18 +3148,19 @@ static int ixgbe_set_rxfh(struct net_device *netdev, const u32 *indir, /* Verify user input. */ for (i = 0; i < reta_entries; i++) - if (indir[i] >= max_queues) + if (rxfh->indir[i] >= max_queues) return -EINVAL; for (i = 0; i < reta_entries; i++) - adapter->rss_indir_tbl[i] = indir[i]; + adapter->rss_indir_tbl[i] = rxfh->indir[i]; ixgbe_store_reta(adapter); } /* Fill out the rss hash key */ - if (key) { - memcpy(adapter->rss_key, key, ixgbe_get_rxfh_key_size(netdev)); + if (rxfh->key) { + memcpy(adapter->rss_key, rxfh->key, + ixgbe_get_rxfh_key_size(netdev)); ixgbe_store_key(adapter); } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c index 7311bd545a..18d63c8c2f 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c @@ -670,8 +670,8 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) int fcoe_i_h = fcoe->offset + ((i + fcreta_size) % fcoe->indices); fcoe_q_h = adapter->rx_ring[fcoe_i_h]->reg_idx; - fcoe_q_h = (fcoe_q_h << IXGBE_FCRETA_ENTRY_HIGH_SHIFT) & - IXGBE_FCRETA_ENTRY_HIGH_MASK; + fcoe_q_h = FIELD_PREP(IXGBE_FCRETA_ENTRY_HIGH_MASK, + fcoe_q_h); } fcoe_i = fcoe->offset + (i % fcoe->indices); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c index 13a6fca310..866024f2b9 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c @@ -914,7 +914,13 @@ int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) goto err_out; } - xs = kzalloc(sizeof(*xs), GFP_KERNEL); + algo = xfrm_aead_get_byname(aes_gcm_name, IXGBE_IPSEC_AUTH_BITS, 1); + if (unlikely(!algo)) { + err = -ENOENT; + goto err_out; + } + + xs = kzalloc(sizeof(*xs), GFP_ATOMIC); if (unlikely(!xs)) { err = -ENOMEM; goto err_out; @@ -930,14 +936,8 @@ int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) memcpy(&xs->id.daddr.a4, sam->addr, sizeof(xs->id.daddr.a4)); xs->xso.dev = adapter->netdev; - algo = xfrm_aead_get_byname(aes_gcm_name, IXGBE_IPSEC_AUTH_BITS, 1); - if (unlikely(!algo)) { - err = -ENOENT; - goto err_xs; - } - aead_len = sizeof(*xs->aead) + IXGBE_IPSEC_KEY_BITS / 8; - xs->aead = kzalloc(aead_len, GFP_KERNEL); + xs->aead = kzalloc(aead_len, GFP_ATOMIC); if (unlikely(!xs->aead)) { err = -ENOMEM; goto err_xs; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index ce234e76ea..99876b765b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -11409,7 +11409,7 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, if ((pf_func & 1) == (pdev->devfn & 1)) { unsigned int device_id; - vf = (req_id & 0x7F) >> 1; + vf = FIELD_GET(0x7F, req_id); e_dev_err("VF %d has caused a PCIe error\n", vf); e_dev_err("TLP: dw0: %8.8x\tdw1: %8.8x\tdw2: " "%8.8x\tdw3: %8.8x\n", diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c index 930dc50719..f28140a05f 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c @@ -276,9 +276,8 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw) return 0; if (hw->phy.nw_mng_if_sel) { - phy_addr = (hw->phy.nw_mng_if_sel & - IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >> - IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT; + phy_addr = FIELD_GET(IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD, + hw->phy.nw_mng_if_sel); if (ixgbe_probe_phy(hw, phy_addr)) return 0; else @@ -1447,8 +1446,7 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw) ret_val = hw->eeprom.ops.read(hw, data_offset, &eword); if (ret_val) goto err_eeprom; - control = (eword & IXGBE_CONTROL_MASK_NL) >> - IXGBE_CONTROL_SHIFT_NL; + control = FIELD_GET(IXGBE_CONTROL_MASK_NL, eword); edata = eword & IXGBE_DATA_MASK_NL; switch (control) { case IXGBE_DELAY_NL: diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index 9379069c55..7299a830f6 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -363,8 +363,7 @@ int ixgbe_pci_sriov_configure(struct pci_dev *dev, int num_vfs) static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) { - int entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) - >> IXGBE_VT_MSGINFO_SHIFT; + int entries = FIELD_GET(IXGBE_VT_MSGINFO_MASK, msgbuf[0]); u16 *hash_list = (u16 *)&msgbuf[1]; struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; struct ixgbe_hw *hw = &adapter->hw; @@ -969,7 +968,7 @@ static int ixgbe_set_vf_mac_addr(struct ixgbe_adapter *adapter, static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) { - u32 add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT; + u32 add = FIELD_GET(IXGBE_VT_MSGINFO_MASK, msgbuf[0]); u32 vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK); u8 tcs = adapter->hw_tcs; @@ -992,8 +991,7 @@ static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) { u8 *new_mac = ((u8 *)(&msgbuf[1])); - int index = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> - IXGBE_VT_MSGINFO_SHIFT; + int index = FIELD_GET(IXGBE_VT_MSGINFO_MASK, msgbuf[0]); int err; if (adapter->vfinfo[vf].pf_set_mac && !adapter->vfinfo[vf].trusted && @@ -1849,5 +1847,6 @@ int ixgbe_ndo_get_vf_config(struct net_device *netdev, ivi->spoofchk = adapter->vfinfo[vf].spoofchk_enabled; ivi->rss_query_en = adapter->vfinfo[vf].rss_query_enabled; ivi->trusted = adapter->vfinfo[vf].trusted; + ivi->linkstate = adapter->vfinfo[vf].link_state; return 0; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c index 15325c549d..57a912e465 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c @@ -187,16 +187,16 @@ s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw) s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw) { struct ixgbe_eeprom_info *eeprom = &hw->eeprom; - u32 eec; - u16 eeprom_size; if (eeprom->type == ixgbe_eeprom_uninitialized) { + u16 eeprom_size; + u32 eec; + eeprom->semaphore_delay = 10; eeprom->type = ixgbe_flash; eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); - eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> - IXGBE_EEC_SIZE_SHIFT); + eeprom_size = FIELD_GET(IXGBE_EEC_SIZE, eec); eeprom->word_size = BIT(eeprom_size + IXGBE_EEPROM_WORD_SIZE_SHIFT); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index cdc912bba8..c1adc94a5a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -630,16 +630,16 @@ static s32 ixgbe_fc_autoneg_fw(struct ixgbe_hw *hw) static s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw) { struct ixgbe_eeprom_info *eeprom = &hw->eeprom; - u32 eec; - u16 eeprom_size; if (eeprom->type == ixgbe_eeprom_uninitialized) { + u16 eeprom_size; + u32 eec; + eeprom->semaphore_delay = 10; eeprom->type = ixgbe_flash; eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); - eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> - IXGBE_EEC_SIZE_SHIFT); + eeprom_size = FIELD_GET(IXGBE_EEC_SIZE, eec); eeprom->word_size = BIT(eeprom_size + IXGBE_EEPROM_WORD_SIZE_SHIFT); @@ -714,8 +714,7 @@ static s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr, ret = ixgbe_iosf_wait(hw, &command); if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) { - error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >> - IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT; + error = FIELD_GET(IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK, command); hw_dbg(hw, "Failed to read, error %x\n", error); ret = -EIO; goto out; @@ -1415,8 +1414,7 @@ static s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr, ret = ixgbe_iosf_wait(hw, &command); if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) { - error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >> - IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT; + error = FIELD_GET(IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK, command); hw_dbg(hw, "Failed to write, error %x\n", error); return -EIO; } @@ -3229,9 +3227,8 @@ static void ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw) */ if (hw->mac.type == ixgbe_mac_x550em_a && hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_MDIO_ACT) { - hw->phy.mdio.prtad = (hw->phy.nw_mng_if_sel & - IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >> - IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT; + hw->phy.mdio.prtad = FIELD_GET(IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD, + hw->phy.nw_mng_if_sel); } } diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c index 296915414a..7ac53171b0 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c +++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c @@ -897,40 +897,41 @@ static u32 ixgbevf_get_rxfh_key_size(struct net_device *netdev) return IXGBEVF_RSS_HASH_KEY_SIZE; } -static int ixgbevf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, - u8 *hfunc) +static int ixgbevf_get_rxfh(struct net_device *netdev, + struct ethtool_rxfh_param *rxfh) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); int err = 0; - if (hfunc) - *hfunc = ETH_RSS_HASH_TOP; + rxfh->hfunc = ETH_RSS_HASH_TOP; if (adapter->hw.mac.type >= ixgbe_mac_X550_vf) { - if (key) - memcpy(key, adapter->rss_key, + if (rxfh->key) + memcpy(rxfh->key, adapter->rss_key, ixgbevf_get_rxfh_key_size(netdev)); - if (indir) { + if (rxfh->indir) { int i; for (i = 0; i < IXGBEVF_X550_VFRETA_SIZE; i++) - indir[i] = adapter->rss_indir_tbl[i]; + rxfh->indir[i] = adapter->rss_indir_tbl[i]; } } else { /* If neither indirection table nor hash key was requested * - just return a success avoiding taking any locks. */ - if (!indir && !key) + if (!rxfh->indir && !rxfh->key) return 0; spin_lock_bh(&adapter->mbx_lock); - if (indir) - err = ixgbevf_get_reta_locked(&adapter->hw, indir, + if (rxfh->indir) + err = ixgbevf_get_reta_locked(&adapter->hw, + rxfh->indir, adapter->num_rx_queues); - if (!err && key) - err = ixgbevf_get_rss_key_locked(&adapter->hw, key); + if (!err && rxfh->key) + err = ixgbevf_get_rss_key_locked(&adapter->hw, + rxfh->key); spin_unlock_bh(&adapter->mbx_lock); } |