diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-08-07 13:17:46 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-08-07 13:17:46 +0000 |
commit | 7f3a4257159dea8e7ef66d1a539dc6df708b8ed3 (patch) | |
tree | bcc69b5f4609f348fac49e2f59e210b29eaea783 /drivers/net/ethernet/intel/ice | |
parent | Adding upstream version 6.9.12. (diff) | |
download | linux-7f3a4257159dea8e7ef66d1a539dc6df708b8ed3.tar.xz linux-7f3a4257159dea8e7ef66d1a539dc6df708b8ed3.zip |
Adding upstream version 6.10.3.upstream/6.10.3
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/net/ethernet/intel/ice')
55 files changed, 2176 insertions, 1694 deletions
diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile index cddd82d4ca..03500e28ac 100644 --- a/drivers/net/ethernet/intel/ice/Makefile +++ b/drivers/net/ethernet/intel/ice/Makefile @@ -5,6 +5,7 @@ # Makefile for the Intel(R) Ethernet Connection E800 Series Linux Driver # +subdir-ccflags-y += -I$(src) obj-$(CONFIG_ICE) += ice.o ice-y := ice_main.o \ @@ -28,7 +29,8 @@ ice-y := ice_main.o \ ice_flex_pipe.o \ ice_flow.o \ ice_idc.o \ - ice_devlink.o \ + devlink/devlink.o \ + devlink/devlink_port.o \ ice_ddp.o \ ice_fw_update.o \ ice_lag.o \ @@ -36,7 +38,8 @@ ice-y := ice_main.o \ ice_repr.o \ ice_tc_lib.o \ ice_fwlog.o \ - ice_debugfs.o + ice_debugfs.o \ + ice_adapter.o ice-$(CONFIG_PCI_IOV) += \ ice_sriov.o \ ice_virtchnl.o \ diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/devlink/devlink.c index b516e42b41..704e9ad514 100644 --- a/drivers/net/ethernet/intel/ice/ice_devlink.c +++ b/drivers/net/ethernet/intel/ice/devlink/devlink.c @@ -5,13 +5,11 @@ #include "ice.h" #include "ice_lib.h" -#include "ice_devlink.h" +#include "devlink.h" #include "ice_eswitch.h" #include "ice_fw_update.h" #include "ice_dcb_lib.h" -static int ice_active_port_option = -1; - /* context for devlink info version reporting */ struct ice_info_ctx { char buf[128]; @@ -478,17 +476,17 @@ ice_devlink_reload_down(struct devlink *devlink, bool netns_change, case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: if (ice_is_eswitch_mode_switchdev(pf)) { NL_SET_ERR_MSG_MOD(extack, - "Go to legacy mode before doing reinit\n"); + "Go to legacy mode before doing reinit"); return -EOPNOTSUPP; } if (ice_is_adq_active(pf)) { NL_SET_ERR_MSG_MOD(extack, - "Turn off ADQ before doing reinit\n"); + "Turn off ADQ before doing reinit"); return -EOPNOTSUPP; } if (ice_has_vfs(pf)) { NL_SET_ERR_MSG_MOD(extack, - "Remove all VFs before doing reinit\n"); + "Remove all VFs before doing reinit"); return -EOPNOTSUPP; } ice_devlink_reinit_down(pf); @@ -526,248 +524,153 @@ ice_devlink_reload_empr_finish(struct ice_pf *pf, } /** - * ice_devlink_port_opt_speed_str - convert speed to a string - * @speed: speed value - */ -static const char *ice_devlink_port_opt_speed_str(u8 speed) -{ - switch (speed & ICE_AQC_PORT_OPT_MAX_LANE_M) { - case ICE_AQC_PORT_OPT_MAX_LANE_100M: - return "0.1"; - case ICE_AQC_PORT_OPT_MAX_LANE_1G: - return "1"; - case ICE_AQC_PORT_OPT_MAX_LANE_2500M: - return "2.5"; - case ICE_AQC_PORT_OPT_MAX_LANE_5G: - return "5"; - case ICE_AQC_PORT_OPT_MAX_LANE_10G: - return "10"; - case ICE_AQC_PORT_OPT_MAX_LANE_25G: - return "25"; - case ICE_AQC_PORT_OPT_MAX_LANE_50G: - return "50"; - case ICE_AQC_PORT_OPT_MAX_LANE_100G: - return "100"; - } - - return "-"; -} - -#define ICE_PORT_OPT_DESC_LEN 50 -/** - * ice_devlink_port_options_print - Print available port split options - * @pf: the PF to print split port options + * ice_get_tx_topo_user_sel - Read user's choice from flash + * @pf: pointer to pf structure + * @layers: value read from flash will be saved here * - * Prints a table with available port split options and max port speeds + * Reads user's preference for Tx Scheduler Topology Tree from PFA TLV. + * + * Return: zero when read was successful, negative values otherwise. */ -static void ice_devlink_port_options_print(struct ice_pf *pf) +static int ice_get_tx_topo_user_sel(struct ice_pf *pf, uint8_t *layers) { - u8 i, j, options_count, cnt, speed, pending_idx, active_idx; - struct ice_aqc_get_port_options_elem *options, *opt; - struct device *dev = ice_pf_to_dev(pf); - bool active_valid, pending_valid; - char desc[ICE_PORT_OPT_DESC_LEN]; - const char *str; - int status; + struct ice_aqc_nvm_tx_topo_user_sel usr_sel = {}; + struct ice_hw *hw = &pf->hw; + int err; - options = kcalloc(ICE_AQC_PORT_OPT_MAX * ICE_MAX_PORT_PER_PCI_DEV, - sizeof(*options), GFP_KERNEL); - if (!options) - return; + err = ice_acquire_nvm(hw, ICE_RES_READ); + if (err) + return err; - for (i = 0; i < ICE_MAX_PORT_PER_PCI_DEV; i++) { - opt = options + i * ICE_AQC_PORT_OPT_MAX; - options_count = ICE_AQC_PORT_OPT_MAX; - active_valid = 0; + err = ice_aq_read_nvm(hw, ICE_AQC_NVM_TX_TOPO_MOD_ID, 0, + sizeof(usr_sel), &usr_sel, true, true, NULL); + if (err) + goto exit_release_res; - status = ice_aq_get_port_options(&pf->hw, opt, &options_count, - i, true, &active_idx, - &active_valid, &pending_idx, - &pending_valid); - if (status) { - dev_dbg(dev, "Couldn't read port option for port %d, err %d\n", - i, status); - goto err; - } - } + if (usr_sel.data & ICE_AQC_NVM_TX_TOPO_USER_SEL) + *layers = ICE_SCHED_5_LAYERS; + else + *layers = ICE_SCHED_9_LAYERS; - dev_dbg(dev, "Available port split options and max port speeds (Gbps):\n"); - dev_dbg(dev, "Status Split Quad 0 Quad 1\n"); - dev_dbg(dev, " count L0 L1 L2 L3 L4 L5 L6 L7\n"); +exit_release_res: + ice_release_nvm(hw); - for (i = 0; i < options_count; i++) { - cnt = 0; + return err; +} - if (i == ice_active_port_option) - str = "Active"; - else if ((i == pending_idx) && pending_valid) - str = "Pending"; - else - str = ""; +/** + * ice_update_tx_topo_user_sel - Save user's preference in flash + * @pf: pointer to pf structure + * @layers: value to be saved in flash + * + * Variable "layers" defines user's preference about number of layers in Tx + * Scheduler Topology Tree. This choice should be stored in PFA TLV field + * and be picked up by driver, next time during init. + * + * Return: zero when save was successful, negative values otherwise. + */ +static int ice_update_tx_topo_user_sel(struct ice_pf *pf, int layers) +{ + struct ice_aqc_nvm_tx_topo_user_sel usr_sel = {}; + struct ice_hw *hw = &pf->hw; + int err; - cnt += snprintf(&desc[cnt], ICE_PORT_OPT_DESC_LEN - cnt, - "%-8s", str); + err = ice_acquire_nvm(hw, ICE_RES_WRITE); + if (err) + return err; - cnt += snprintf(&desc[cnt], ICE_PORT_OPT_DESC_LEN - cnt, - "%-6u", options[i].pmd); + err = ice_aq_read_nvm(hw, ICE_AQC_NVM_TX_TOPO_MOD_ID, 0, + sizeof(usr_sel), &usr_sel, true, true, NULL); + if (err) + goto exit_release_res; - for (j = 0; j < ICE_MAX_PORT_PER_PCI_DEV; ++j) { - speed = options[i + j * ICE_AQC_PORT_OPT_MAX].max_lane_speed; - str = ice_devlink_port_opt_speed_str(speed); - cnt += snprintf(&desc[cnt], ICE_PORT_OPT_DESC_LEN - cnt, - "%3s ", str); - } + if (layers == ICE_SCHED_5_LAYERS) + usr_sel.data |= ICE_AQC_NVM_TX_TOPO_USER_SEL; + else + usr_sel.data &= ~ICE_AQC_NVM_TX_TOPO_USER_SEL; - dev_dbg(dev, "%s\n", desc); - } + err = ice_write_one_nvm_block(pf, ICE_AQC_NVM_TX_TOPO_MOD_ID, 2, + sizeof(usr_sel.data), &usr_sel.data, + true, NULL, NULL); +exit_release_res: + ice_release_nvm(hw); -err: - kfree(options); + return err; } /** - * ice_devlink_aq_set_port_option - Send set port option admin queue command - * @pf: the PF to print split port options - * @option_idx: selected port option - * @extack: extended netdev ack structure + * ice_devlink_tx_sched_layers_get - Get tx_scheduling_layers parameter + * @devlink: pointer to the devlink instance + * @id: the parameter ID to set + * @ctx: context to store the parameter value * - * Sends set port option admin queue command with selected port option and - * calls NVM write activate. + * Return: zero on success and negative value on failure. */ -static int -ice_devlink_aq_set_port_option(struct ice_pf *pf, u8 option_idx, - struct netlink_ext_ack *extack) +static int ice_devlink_tx_sched_layers_get(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx) { - struct device *dev = ice_pf_to_dev(pf); - int status; - - status = ice_aq_set_port_option(&pf->hw, 0, true, option_idx); - if (status) { - dev_dbg(dev, "ice_aq_set_port_option, err %d aq_err %d\n", - status, pf->hw.adminq.sq_last_status); - NL_SET_ERR_MSG_MOD(extack, "Port split request failed"); - return -EIO; - } - - status = ice_acquire_nvm(&pf->hw, ICE_RES_WRITE); - if (status) { - dev_dbg(dev, "ice_acquire_nvm failed, err %d aq_err %d\n", - status, pf->hw.adminq.sq_last_status); - NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore"); - return -EIO; - } - - status = ice_nvm_write_activate(&pf->hw, ICE_AQC_NVM_ACTIV_REQ_EMPR, NULL); - if (status) { - dev_dbg(dev, "ice_nvm_write_activate failed, err %d aq_err %d\n", - status, pf->hw.adminq.sq_last_status); - NL_SET_ERR_MSG_MOD(extack, "Port split request failed to save data"); - ice_release_nvm(&pf->hw); - return -EIO; - } + struct ice_pf *pf = devlink_priv(devlink); + int err; - ice_release_nvm(&pf->hw); + err = ice_get_tx_topo_user_sel(pf, &ctx->val.vu8); + if (err) + return err; - NL_SET_ERR_MSG_MOD(extack, "Reboot required to finish port split"); return 0; } /** - * ice_devlink_port_split - .port_split devlink handler - * @devlink: devlink instance structure - * @port: devlink port structure - * @count: number of ports to split to - * @extack: extended netdev ack structure - * - * Callback for the devlink .port_split operation. - * - * Unfortunately, the devlink expression of available options is limited - * to just a number, so search for an FW port option which supports - * the specified number. As there could be multiple FW port options with - * the same port split count, allow switching between them. When the same - * port split count request is issued again, switch to the next FW port - * option with the same port split count. + * ice_devlink_tx_sched_layers_set - Set tx_scheduling_layers parameter + * @devlink: pointer to the devlink instance + * @id: the parameter ID to set + * @ctx: context to get the parameter value + * @extack: netlink extended ACK structure * - * Return: zero on success or an error code on failure. + * Return: zero on success and negative value on failure. */ -static int -ice_devlink_port_split(struct devlink *devlink, struct devlink_port *port, - unsigned int count, struct netlink_ext_ack *extack) +static int ice_devlink_tx_sched_layers_set(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) { - struct ice_aqc_get_port_options_elem options[ICE_AQC_PORT_OPT_MAX]; - u8 i, j, active_idx, pending_idx, new_option; struct ice_pf *pf = devlink_priv(devlink); - u8 option_count = ICE_AQC_PORT_OPT_MAX; - struct device *dev = ice_pf_to_dev(pf); - bool active_valid, pending_valid; - int status; - - status = ice_aq_get_port_options(&pf->hw, options, &option_count, - 0, true, &active_idx, &active_valid, - &pending_idx, &pending_valid); - if (status) { - dev_dbg(dev, "Couldn't read port split options, err = %d\n", - status); - NL_SET_ERR_MSG_MOD(extack, "Failed to get available port split options"); - return -EIO; - } - - new_option = ICE_AQC_PORT_OPT_MAX; - active_idx = pending_valid ? pending_idx : active_idx; - for (i = 1; i <= option_count; i++) { - /* In order to allow switching between FW port options with - * the same port split count, search for a new option starting - * from the active/pending option (with array wrap around). - */ - j = (active_idx + i) % option_count; - - if (count == options[j].pmd) { - new_option = j; - break; - } - } - - if (new_option == active_idx) { - dev_dbg(dev, "request to split: count: %u is already set and there are no other options\n", - count); - NL_SET_ERR_MSG_MOD(extack, "Requested split count is already set"); - ice_devlink_port_options_print(pf); - return -EINVAL; - } - - if (new_option == ICE_AQC_PORT_OPT_MAX) { - dev_dbg(dev, "request to split: count: %u not found\n", count); - NL_SET_ERR_MSG_MOD(extack, "Port split requested unsupported port config"); - ice_devlink_port_options_print(pf); - return -EINVAL; - } + int err; - status = ice_devlink_aq_set_port_option(pf, new_option, extack); - if (status) - return status; + err = ice_update_tx_topo_user_sel(pf, ctx->val.vu8); + if (err) + return err; - ice_devlink_port_options_print(pf); + NL_SET_ERR_MSG_MOD(extack, + "Tx scheduling layers have been changed on this device. You must do the PCI slot powercycle for the change to take effect."); return 0; } /** - * ice_devlink_port_unsplit - .port_unsplit devlink handler - * @devlink: devlink instance structure - * @port: devlink port structure - * @extack: extended netdev ack structure + * ice_devlink_tx_sched_layers_validate - Validate passed tx_scheduling_layers + * parameter value + * @devlink: unused pointer to devlink instance + * @id: the parameter ID to validate + * @val: value to validate + * @extack: netlink extended ACK structure * - * Callback for the devlink .port_unsplit operation. - * Calls ice_devlink_port_split with split count set to 1. - * There could be no FW option available with split count 1. + * Supported values are: + * - 5 - five layers Tx Scheduler Topology Tree + * - 9 - nine layers Tx Scheduler Topology Tree * - * Return: zero on success or an error code on failure. + * Return: zero when passed parameter value is supported. Negative value on + * error. */ -static int -ice_devlink_port_unsplit(struct devlink *devlink, struct devlink_port *port, - struct netlink_ext_ack *extack) +static int ice_devlink_tx_sched_layers_validate(struct devlink *devlink, u32 id, + union devlink_param_value val, + struct netlink_ext_ack *extack) { - return ice_devlink_port_split(devlink, port, 1, extack); + if (val.vu8 != ICE_SCHED_5_LAYERS && val.vu8 != ICE_SCHED_9_LAYERS) { + NL_SET_ERR_MSG_MOD(extack, + "Wrong number of tx scheduler layers provided."); + return -EINVAL; + } + + return 0; } /** @@ -1290,18 +1193,16 @@ static int ice_devlink_set_parent(struct devlink_rate *devlink_rate, static int ice_devlink_reinit_up(struct ice_pf *pf) { struct ice_vsi *vsi = ice_get_main_vsi(pf); - struct ice_vsi_cfg_params params; int err; err = ice_init_dev(pf); if (err) return err; - params = ice_vsi_to_params(vsi); - params.flags = ICE_VSI_FLAG_INIT; + vsi->flags = ICE_VSI_FLAG_INIT; rtnl_lock(); - err = ice_vsi_cfg(vsi, ¶ms); + err = ice_vsi_cfg(vsi); rtnl_unlock(); if (err) goto err_vsi_cfg; @@ -1391,9 +1292,9 @@ ice_devlink_enable_roce_get(struct devlink *devlink, u32 id, return 0; } -static int -ice_devlink_enable_roce_set(struct devlink *devlink, u32 id, - struct devlink_param_gset_ctx *ctx) +static int ice_devlink_enable_roce_set(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) { struct ice_pf *pf = devlink_priv(devlink); bool roce_ena = ctx->val.vbool; @@ -1442,9 +1343,9 @@ ice_devlink_enable_iw_get(struct devlink *devlink, u32 id, return 0; } -static int -ice_devlink_enable_iw_set(struct devlink *devlink, u32 id, - struct devlink_param_gset_ctx *ctx) +static int ice_devlink_enable_iw_set(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) { struct ice_pf *pf = devlink_priv(devlink); bool iw_ena = ctx->val.vbool; @@ -1482,7 +1383,12 @@ ice_devlink_enable_iw_validate(struct devlink *devlink, u32 id, return 0; } -static const struct devlink_param ice_devlink_params[] = { +enum ice_param_id { + ICE_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX, + ICE_DEVLINK_PARAM_ID_TX_SCHED_LAYERS, +}; + +static const struct devlink_param ice_dvl_rdma_params[] = { DEVLINK_PARAM_GENERIC(ENABLE_ROCE, BIT(DEVLINK_PARAM_CMODE_RUNTIME), ice_devlink_enable_roce_get, ice_devlink_enable_roce_set, @@ -1491,7 +1397,16 @@ static const struct devlink_param ice_devlink_params[] = { ice_devlink_enable_iw_get, ice_devlink_enable_iw_set, ice_devlink_enable_iw_validate), +}; +static const struct devlink_param ice_dvl_sched_params[] = { + DEVLINK_PARAM_DRIVER(ICE_DEVLINK_PARAM_ID_TX_SCHED_LAYERS, + "tx_scheduling_layers", + DEVLINK_PARAM_TYPE_U8, + BIT(DEVLINK_PARAM_CMODE_PERMANENT), + ice_devlink_tx_sched_layers_get, + ice_devlink_tx_sched_layers_set, + ice_devlink_tx_sched_layers_validate), }; static void ice_devlink_free(void *devlink_ptr) @@ -1534,7 +1449,7 @@ void ice_devlink_register(struct ice_pf *pf) { struct devlink *devlink = priv_to_devlink(pf); - devlink_register(devlink); + devl_register(devlink); } /** @@ -1545,197 +1460,38 @@ void ice_devlink_register(struct ice_pf *pf) */ void ice_devlink_unregister(struct ice_pf *pf) { - devlink_unregister(priv_to_devlink(pf)); -} - -/** - * ice_devlink_set_switch_id - Set unique switch id based on pci dsn - * @pf: the PF to create a devlink port for - * @ppid: struct with switch id information - */ -static void -ice_devlink_set_switch_id(struct ice_pf *pf, struct netdev_phys_item_id *ppid) -{ - struct pci_dev *pdev = pf->pdev; - u64 id; - - id = pci_get_dsn(pdev); - - ppid->id_len = sizeof(id); - put_unaligned_be64(id, &ppid->id); + devl_unregister(priv_to_devlink(pf)); } int ice_devlink_register_params(struct ice_pf *pf) { struct devlink *devlink = priv_to_devlink(pf); - - return devlink_params_register(devlink, ice_devlink_params, - ARRAY_SIZE(ice_devlink_params)); -} - -void ice_devlink_unregister_params(struct ice_pf *pf) -{ - devlink_params_unregister(priv_to_devlink(pf), ice_devlink_params, - ARRAY_SIZE(ice_devlink_params)); -} - -/** - * ice_devlink_set_port_split_options - Set port split options - * @pf: the PF to set port split options - * @attrs: devlink attributes - * - * Sets devlink port split options based on available FW port options - */ -static void -ice_devlink_set_port_split_options(struct ice_pf *pf, - struct devlink_port_attrs *attrs) -{ - struct ice_aqc_get_port_options_elem options[ICE_AQC_PORT_OPT_MAX]; - u8 i, active_idx, pending_idx, option_count = ICE_AQC_PORT_OPT_MAX; - bool active_valid, pending_valid; + struct ice_hw *hw = &pf->hw; int status; - status = ice_aq_get_port_options(&pf->hw, options, &option_count, - 0, true, &active_idx, &active_valid, - &pending_idx, &pending_valid); - if (status) { - dev_dbg(ice_pf_to_dev(pf), "Couldn't read port split options, err = %d\n", - status); - return; - } - - /* find the biggest available port split count */ - for (i = 0; i < option_count; i++) - attrs->lanes = max_t(int, attrs->lanes, options[i].pmd); - - attrs->splittable = attrs->lanes ? 1 : 0; - ice_active_port_option = active_idx; -} - -static const struct devlink_port_ops ice_devlink_port_ops = { - .port_split = ice_devlink_port_split, - .port_unsplit = ice_devlink_port_unsplit, -}; - -/** - * ice_devlink_create_pf_port - Create a devlink port for this PF - * @pf: the PF to create a devlink port for - * - * Create and register a devlink_port for this PF. - * This function has to be called under devl_lock. - * - * Return: zero on success or an error code on failure. - */ -int ice_devlink_create_pf_port(struct ice_pf *pf) -{ - struct devlink_port_attrs attrs = {}; - struct devlink_port *devlink_port; - struct devlink *devlink; - struct ice_vsi *vsi; - struct device *dev; - int err; - - devlink = priv_to_devlink(pf); - - dev = ice_pf_to_dev(pf); - - devlink_port = &pf->devlink_port; - - vsi = ice_get_main_vsi(pf); - if (!vsi) - return -EIO; - - attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; - attrs.phys.port_number = pf->hw.bus.func; - - /* As FW supports only port split options for whole device, - * set port split options only for first PF. - */ - if (pf->hw.pf_id == 0) - ice_devlink_set_port_split_options(pf, &attrs); - - ice_devlink_set_switch_id(pf, &attrs.switch_id); - - devlink_port_attrs_set(devlink_port, &attrs); - - err = devl_port_register_with_ops(devlink, devlink_port, vsi->idx, - &ice_devlink_port_ops); - if (err) { - dev_err(dev, "Failed to create devlink port for PF %d, error %d\n", - pf->hw.pf_id, err); - return err; - } + status = devl_params_register(devlink, ice_dvl_rdma_params, + ARRAY_SIZE(ice_dvl_rdma_params)); + if (status) + return status; - return 0; -} + if (hw->func_caps.common_cap.tx_sched_topo_comp_mode_en) + status = devl_params_register(devlink, ice_dvl_sched_params, + ARRAY_SIZE(ice_dvl_sched_params)); -/** - * ice_devlink_destroy_pf_port - Destroy the devlink_port for this PF - * @pf: the PF to cleanup - * - * Unregisters the devlink_port structure associated with this PF. - * This function has to be called under devl_lock. - */ -void ice_devlink_destroy_pf_port(struct ice_pf *pf) -{ - devl_port_unregister(&pf->devlink_port); + return status; } -/** - * ice_devlink_create_vf_port - Create a devlink port for this VF - * @vf: the VF to create a port for - * - * Create and register a devlink_port for this VF. - * - * Return: zero on success or an error code on failure. - */ -int ice_devlink_create_vf_port(struct ice_vf *vf) +void ice_devlink_unregister_params(struct ice_pf *pf) { - struct devlink_port_attrs attrs = {}; - struct devlink_port *devlink_port; - struct devlink *devlink; - struct ice_vsi *vsi; - struct device *dev; - struct ice_pf *pf; - int err; - - pf = vf->pf; - dev = ice_pf_to_dev(pf); - devlink_port = &vf->devlink_port; - - vsi = ice_get_vf_vsi(vf); - if (!vsi) - return -EINVAL; - - attrs.flavour = DEVLINK_PORT_FLAVOUR_PCI_VF; - attrs.pci_vf.pf = pf->hw.bus.func; - attrs.pci_vf.vf = vf->vf_id; - - ice_devlink_set_switch_id(pf, &attrs.switch_id); - - devlink_port_attrs_set(devlink_port, &attrs); - devlink = priv_to_devlink(pf); - - err = devlink_port_register(devlink, devlink_port, vsi->idx); - if (err) { - dev_err(dev, "Failed to create devlink port for VF %d, error %d\n", - vf->vf_id, err); - return err; - } + struct devlink *devlink = priv_to_devlink(pf); + struct ice_hw *hw = &pf->hw; - return 0; -} + devl_params_unregister(devlink, ice_dvl_rdma_params, + ARRAY_SIZE(ice_dvl_rdma_params)); -/** - * ice_devlink_destroy_vf_port - Destroy the devlink_port for this VF - * @vf: the VF to cleanup - * - * Unregisters the devlink_port structure associated with this VF. - */ -void ice_devlink_destroy_vf_port(struct ice_vf *vf) -{ - devl_rate_leaf_destroy(&vf->devlink_port); - devlink_port_unregister(&vf->devlink_port); + if (hw->func_caps.common_cap.tx_sched_topo_comp_mode_en) + devl_params_unregister(devlink, ice_dvl_sched_params, + ARRAY_SIZE(ice_dvl_sched_params)); } #define ICE_DEVLINK_READ_BLK_SIZE (1024 * 1024) @@ -1976,8 +1732,8 @@ void ice_devlink_init_regions(struct ice_pf *pf) u64 nvm_size, sram_size; nvm_size = pf->hw.flash.flash_size; - pf->nvm_region = devlink_region_create(devlink, &ice_nvm_region_ops, 1, - nvm_size); + pf->nvm_region = devl_region_create(devlink, &ice_nvm_region_ops, 1, + nvm_size); if (IS_ERR(pf->nvm_region)) { dev_err(dev, "failed to create NVM devlink region, err %ld\n", PTR_ERR(pf->nvm_region)); @@ -1985,17 +1741,17 @@ void ice_devlink_init_regions(struct ice_pf *pf) } sram_size = pf->hw.flash.sr_words * 2u; - pf->sram_region = devlink_region_create(devlink, &ice_sram_region_ops, - 1, sram_size); + pf->sram_region = devl_region_create(devlink, &ice_sram_region_ops, + 1, sram_size); if (IS_ERR(pf->sram_region)) { dev_err(dev, "failed to create shadow-ram devlink region, err %ld\n", PTR_ERR(pf->sram_region)); pf->sram_region = NULL; } - pf->devcaps_region = devlink_region_create(devlink, - &ice_devcaps_region_ops, 10, - ICE_AQ_MAX_BUF_LEN); + pf->devcaps_region = devl_region_create(devlink, + &ice_devcaps_region_ops, 10, + ICE_AQ_MAX_BUF_LEN); if (IS_ERR(pf->devcaps_region)) { dev_err(dev, "failed to create device-caps devlink region, err %ld\n", PTR_ERR(pf->devcaps_region)); @@ -2012,11 +1768,11 @@ void ice_devlink_init_regions(struct ice_pf *pf) void ice_devlink_destroy_regions(struct ice_pf *pf) { if (pf->nvm_region) - devlink_region_destroy(pf->nvm_region); + devl_region_destroy(pf->nvm_region); if (pf->sram_region) - devlink_region_destroy(pf->sram_region); + devl_region_destroy(pf->sram_region); if (pf->devcaps_region) - devlink_region_destroy(pf->devcaps_region); + devl_region_destroy(pf->devcaps_region); } diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.h b/drivers/net/ethernet/intel/ice/devlink/devlink.h index d291c0e2e1..d291c0e2e1 100644 --- a/drivers/net/ethernet/intel/ice/ice_devlink.h +++ b/drivers/net/ethernet/intel/ice/devlink/devlink.h diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink_port.c b/drivers/net/ethernet/intel/ice/devlink/devlink_port.c new file mode 100644 index 0000000000..13e6790d3c --- /dev/null +++ b/drivers/net/ethernet/intel/ice/devlink/devlink_port.c @@ -0,0 +1,430 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024, Intel Corporation. */ + +#include <linux/vmalloc.h> + +#include "ice.h" +#include "devlink.h" + +static int ice_active_port_option = -1; + +/** + * ice_devlink_port_opt_speed_str - convert speed to a string + * @speed: speed value + */ +static const char *ice_devlink_port_opt_speed_str(u8 speed) +{ + switch (speed & ICE_AQC_PORT_OPT_MAX_LANE_M) { + case ICE_AQC_PORT_OPT_MAX_LANE_100M: + return "0.1"; + case ICE_AQC_PORT_OPT_MAX_LANE_1G: + return "1"; + case ICE_AQC_PORT_OPT_MAX_LANE_2500M: + return "2.5"; + case ICE_AQC_PORT_OPT_MAX_LANE_5G: + return "5"; + case ICE_AQC_PORT_OPT_MAX_LANE_10G: + return "10"; + case ICE_AQC_PORT_OPT_MAX_LANE_25G: + return "25"; + case ICE_AQC_PORT_OPT_MAX_LANE_50G: + return "50"; + case ICE_AQC_PORT_OPT_MAX_LANE_100G: + return "100"; + } + + return "-"; +} + +#define ICE_PORT_OPT_DESC_LEN 50 +/** + * ice_devlink_port_options_print - Print available port split options + * @pf: the PF to print split port options + * + * Prints a table with available port split options and max port speeds + */ +static void ice_devlink_port_options_print(struct ice_pf *pf) +{ + u8 i, j, options_count, cnt, speed, pending_idx, active_idx; + struct ice_aqc_get_port_options_elem *options, *opt; + struct device *dev = ice_pf_to_dev(pf); + bool active_valid, pending_valid; + char desc[ICE_PORT_OPT_DESC_LEN]; + const char *str; + int status; + + options = kcalloc(ICE_AQC_PORT_OPT_MAX * ICE_MAX_PORT_PER_PCI_DEV, + sizeof(*options), GFP_KERNEL); + if (!options) + return; + + for (i = 0; i < ICE_MAX_PORT_PER_PCI_DEV; i++) { + opt = options + i * ICE_AQC_PORT_OPT_MAX; + options_count = ICE_AQC_PORT_OPT_MAX; + active_valid = 0; + + status = ice_aq_get_port_options(&pf->hw, opt, &options_count, + i, true, &active_idx, + &active_valid, &pending_idx, + &pending_valid); + if (status) { + dev_dbg(dev, "Couldn't read port option for port %d, err %d\n", + i, status); + goto err; + } + } + + dev_dbg(dev, "Available port split options and max port speeds (Gbps):\n"); + dev_dbg(dev, "Status Split Quad 0 Quad 1\n"); + dev_dbg(dev, " count L0 L1 L2 L3 L4 L5 L6 L7\n"); + + for (i = 0; i < options_count; i++) { + cnt = 0; + + if (i == ice_active_port_option) + str = "Active"; + else if ((i == pending_idx) && pending_valid) + str = "Pending"; + else + str = ""; + + cnt += snprintf(&desc[cnt], ICE_PORT_OPT_DESC_LEN - cnt, + "%-8s", str); + + cnt += snprintf(&desc[cnt], ICE_PORT_OPT_DESC_LEN - cnt, + "%-6u", options[i].pmd); + + for (j = 0; j < ICE_MAX_PORT_PER_PCI_DEV; ++j) { + speed = options[i + j * ICE_AQC_PORT_OPT_MAX].max_lane_speed; + str = ice_devlink_port_opt_speed_str(speed); + cnt += snprintf(&desc[cnt], ICE_PORT_OPT_DESC_LEN - cnt, + "%3s ", str); + } + + dev_dbg(dev, "%s\n", desc); + } + +err: + kfree(options); +} + +/** + * ice_devlink_aq_set_port_option - Send set port option admin queue command + * @pf: the PF to print split port options + * @option_idx: selected port option + * @extack: extended netdev ack structure + * + * Sends set port option admin queue command with selected port option and + * calls NVM write activate. + */ +static int +ice_devlink_aq_set_port_option(struct ice_pf *pf, u8 option_idx, + struct netlink_ext_ack *extack) +{ + struct device *dev = ice_pf_to_dev(pf); + int status; + + status = ice_aq_set_port_option(&pf->hw, 0, true, option_idx); + if (status) { + dev_dbg(dev, "ice_aq_set_port_option, err %d aq_err %d\n", + status, pf->hw.adminq.sq_last_status); + NL_SET_ERR_MSG_MOD(extack, "Port split request failed"); + return -EIO; + } + + status = ice_acquire_nvm(&pf->hw, ICE_RES_WRITE); + if (status) { + dev_dbg(dev, "ice_acquire_nvm failed, err %d aq_err %d\n", + status, pf->hw.adminq.sq_last_status); + NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore"); + return -EIO; + } + + status = ice_nvm_write_activate(&pf->hw, ICE_AQC_NVM_ACTIV_REQ_EMPR, NULL); + if (status) { + dev_dbg(dev, "ice_nvm_write_activate failed, err %d aq_err %d\n", + status, pf->hw.adminq.sq_last_status); + NL_SET_ERR_MSG_MOD(extack, "Port split request failed to save data"); + ice_release_nvm(&pf->hw); + return -EIO; + } + + ice_release_nvm(&pf->hw); + + NL_SET_ERR_MSG_MOD(extack, "Reboot required to finish port split"); + return 0; +} + +/** + * ice_devlink_port_split - .port_split devlink handler + * @devlink: devlink instance structure + * @port: devlink port structure + * @count: number of ports to split to + * @extack: extended netdev ack structure + * + * Callback for the devlink .port_split operation. + * + * Unfortunately, the devlink expression of available options is limited + * to just a number, so search for an FW port option which supports + * the specified number. As there could be multiple FW port options with + * the same port split count, allow switching between them. When the same + * port split count request is issued again, switch to the next FW port + * option with the same port split count. + * + * Return: zero on success or an error code on failure. + */ +static int +ice_devlink_port_split(struct devlink *devlink, struct devlink_port *port, + unsigned int count, struct netlink_ext_ack *extack) +{ + struct ice_aqc_get_port_options_elem options[ICE_AQC_PORT_OPT_MAX]; + u8 i, j, active_idx, pending_idx, new_option; + struct ice_pf *pf = devlink_priv(devlink); + u8 option_count = ICE_AQC_PORT_OPT_MAX; + struct device *dev = ice_pf_to_dev(pf); + bool active_valid, pending_valid; + int status; + + status = ice_aq_get_port_options(&pf->hw, options, &option_count, + 0, true, &active_idx, &active_valid, + &pending_idx, &pending_valid); + if (status) { + dev_dbg(dev, "Couldn't read port split options, err = %d\n", + status); + NL_SET_ERR_MSG_MOD(extack, "Failed to get available port split options"); + return -EIO; + } + + new_option = ICE_AQC_PORT_OPT_MAX; + active_idx = pending_valid ? pending_idx : active_idx; + for (i = 1; i <= option_count; i++) { + /* In order to allow switching between FW port options with + * the same port split count, search for a new option starting + * from the active/pending option (with array wrap around). + */ + j = (active_idx + i) % option_count; + + if (count == options[j].pmd) { + new_option = j; + break; + } + } + + if (new_option == active_idx) { + dev_dbg(dev, "request to split: count: %u is already set and there are no other options\n", + count); + NL_SET_ERR_MSG_MOD(extack, "Requested split count is already set"); + ice_devlink_port_options_print(pf); + return -EINVAL; + } + + if (new_option == ICE_AQC_PORT_OPT_MAX) { + dev_dbg(dev, "request to split: count: %u not found\n", count); + NL_SET_ERR_MSG_MOD(extack, "Port split requested unsupported port config"); + ice_devlink_port_options_print(pf); + return -EINVAL; + } + + status = ice_devlink_aq_set_port_option(pf, new_option, extack); + if (status) + return status; + + ice_devlink_port_options_print(pf); + + return 0; +} + +/** + * ice_devlink_port_unsplit - .port_unsplit devlink handler + * @devlink: devlink instance structure + * @port: devlink port structure + * @extack: extended netdev ack structure + * + * Callback for the devlink .port_unsplit operation. + * Calls ice_devlink_port_split with split count set to 1. + * There could be no FW option available with split count 1. + * + * Return: zero on success or an error code on failure. + */ +static int +ice_devlink_port_unsplit(struct devlink *devlink, struct devlink_port *port, + struct netlink_ext_ack *extack) +{ + return ice_devlink_port_split(devlink, port, 1, extack); +} + +/** + * ice_devlink_set_port_split_options - Set port split options + * @pf: the PF to set port split options + * @attrs: devlink attributes + * + * Sets devlink port split options based on available FW port options + */ +static void +ice_devlink_set_port_split_options(struct ice_pf *pf, + struct devlink_port_attrs *attrs) +{ + struct ice_aqc_get_port_options_elem options[ICE_AQC_PORT_OPT_MAX]; + u8 i, active_idx, pending_idx, option_count = ICE_AQC_PORT_OPT_MAX; + bool active_valid, pending_valid; + int status; + + status = ice_aq_get_port_options(&pf->hw, options, &option_count, + 0, true, &active_idx, &active_valid, + &pending_idx, &pending_valid); + if (status) { + dev_dbg(ice_pf_to_dev(pf), "Couldn't read port split options, err = %d\n", + status); + return; + } + + /* find the biggest available port split count */ + for (i = 0; i < option_count; i++) + attrs->lanes = max_t(int, attrs->lanes, options[i].pmd); + + attrs->splittable = attrs->lanes ? 1 : 0; + ice_active_port_option = active_idx; +} + +static const struct devlink_port_ops ice_devlink_port_ops = { + .port_split = ice_devlink_port_split, + .port_unsplit = ice_devlink_port_unsplit, +}; + +/** + * ice_devlink_set_switch_id - Set unique switch id based on pci dsn + * @pf: the PF to create a devlink port for + * @ppid: struct with switch id information + */ +static void +ice_devlink_set_switch_id(struct ice_pf *pf, struct netdev_phys_item_id *ppid) +{ + struct pci_dev *pdev = pf->pdev; + u64 id; + + id = pci_get_dsn(pdev); + + ppid->id_len = sizeof(id); + put_unaligned_be64(id, &ppid->id); +} + +/** + * ice_devlink_create_pf_port - Create a devlink port for this PF + * @pf: the PF to create a devlink port for + * + * Create and register a devlink_port for this PF. + * This function has to be called under devl_lock. + * + * Return: zero on success or an error code on failure. + */ +int ice_devlink_create_pf_port(struct ice_pf *pf) +{ + struct devlink_port_attrs attrs = {}; + struct devlink_port *devlink_port; + struct devlink *devlink; + struct ice_vsi *vsi; + struct device *dev; + int err; + + devlink = priv_to_devlink(pf); + + dev = ice_pf_to_dev(pf); + + devlink_port = &pf->devlink_port; + + vsi = ice_get_main_vsi(pf); + if (!vsi) + return -EIO; + + attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; + attrs.phys.port_number = pf->hw.bus.func; + + /* As FW supports only port split options for whole device, + * set port split options only for first PF. + */ + if (pf->hw.pf_id == 0) + ice_devlink_set_port_split_options(pf, &attrs); + + ice_devlink_set_switch_id(pf, &attrs.switch_id); + + devlink_port_attrs_set(devlink_port, &attrs); + + err = devl_port_register_with_ops(devlink, devlink_port, vsi->idx, + &ice_devlink_port_ops); + if (err) { + dev_err(dev, "Failed to create devlink port for PF %d, error %d\n", + pf->hw.pf_id, err); + return err; + } + + return 0; +} + +/** + * ice_devlink_destroy_pf_port - Destroy the devlink_port for this PF + * @pf: the PF to cleanup + * + * Unregisters the devlink_port structure associated with this PF. + * This function has to be called under devl_lock. + */ +void ice_devlink_destroy_pf_port(struct ice_pf *pf) +{ + devl_port_unregister(&pf->devlink_port); +} + +/** + * ice_devlink_create_vf_port - Create a devlink port for this VF + * @vf: the VF to create a port for + * + * Create and register a devlink_port for this VF. + * + * Return: zero on success or an error code on failure. + */ +int ice_devlink_create_vf_port(struct ice_vf *vf) +{ + struct devlink_port_attrs attrs = {}; + struct devlink_port *devlink_port; + struct devlink *devlink; + struct ice_vsi *vsi; + struct device *dev; + struct ice_pf *pf; + int err; + + pf = vf->pf; + dev = ice_pf_to_dev(pf); + devlink_port = &vf->devlink_port; + + vsi = ice_get_vf_vsi(vf); + if (!vsi) + return -EINVAL; + + attrs.flavour = DEVLINK_PORT_FLAVOUR_PCI_VF; + attrs.pci_vf.pf = pf->hw.bus.func; + attrs.pci_vf.vf = vf->vf_id; + + ice_devlink_set_switch_id(pf, &attrs.switch_id); + + devlink_port_attrs_set(devlink_port, &attrs); + devlink = priv_to_devlink(pf); + + err = devlink_port_register(devlink, devlink_port, vsi->idx); + if (err) { + dev_err(dev, "Failed to create devlink port for VF %d, error %d\n", + vf->vf_id, err); + return err; + } + + return 0; +} + +/** + * ice_devlink_destroy_vf_port - Destroy the devlink_port for this VF + * @vf: the VF to cleanup + * + * Unregisters the devlink_port structure associated with this VF. + */ +void ice_devlink_destroy_vf_port(struct ice_vf *vf) +{ + devl_rate_leaf_destroy(&vf->devlink_port); + devlink_port_unregister(&vf->devlink_port); +} diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink_port.h b/drivers/net/ethernet/intel/ice/devlink/devlink_port.h new file mode 100644 index 0000000000..9223bcdb64 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/devlink/devlink_port.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2024, Intel Corporation. */ + +#ifndef _DEVLINK_PORT_H_ +#define _DEVLINK_PORT_H_ + +int ice_devlink_create_pf_port(struct ice_pf *pf); +void ice_devlink_destroy_pf_port(struct ice_pf *pf); +int ice_devlink_create_vf_port(struct ice_vf *vf); +void ice_devlink_destroy_vf_port(struct ice_vf *vf); + +#endif /* _DEVLINK_PORT_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index 8e40f26aa5..99a75a5907 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -77,6 +77,7 @@ #include "ice_gnss.h" #include "ice_irq.h" #include "ice_dpll.h" +#include "ice_adapter.h" #define ICE_BAR0 0 #define ICE_REQ_DESC_MULTIPLE 32 @@ -330,7 +331,6 @@ struct ice_vsi { struct net_device *netdev; struct ice_sw *vsw; /* switch this VSI is on */ struct ice_pf *back; /* back pointer to PF */ - struct ice_port_info *port_info; /* back pointer to port_info */ struct ice_rx_ring **rx_rings; /* Rx ring array */ struct ice_tx_ring **tx_rings; /* Tx ring array */ struct ice_q_vector **q_vectors; /* q_vector array */ @@ -348,12 +348,9 @@ struct ice_vsi { /* tell if only dynamic irq allocation is allowed */ bool irq_dyn_alloc; - enum ice_vsi_type type; u16 vsi_num; /* HW (absolute) index of this VSI */ u16 idx; /* software index in pf->vsi[] */ - struct ice_vf *vf; /* VF associated with this VSI */ - u16 num_gfltr; u16 num_bfltr; @@ -444,12 +441,18 @@ struct ice_vsi { u8 old_numtc; u16 old_ena_tc; - struct ice_channel *ch; - /* setup back reference, to which aggregator node this VSI * corresponds to */ struct ice_agg_node *agg_node; + + struct_group_tagged(ice_vsi_cfg_params, params, + struct ice_port_info *port_info; /* back pointer to port_info */ + struct ice_channel *ch; /* VSI's channel structure, may be NULL */ + struct ice_vf *vf; /* VF associated with this VSI, may be NULL */ + u32 flags; /* VSI flags used for rebuild and configuration */ + enum ice_vsi_type type; /* the type of the VSI */ + ); } ____cacheline_internodealigned_in_smp; /* struct that defines an interrupt vector */ @@ -457,7 +460,7 @@ struct ice_q_vector { struct ice_vsi *vsi; u16 v_idx; /* index in the vsi->q_vector array. */ - u16 reg_idx; + u16 reg_idx; /* PF relative register index */ u8 num_ring_rx; /* total number of Rx rings in vector */ u8 num_ring_tx; /* total number of Tx rings in vector */ u8 wb_on_itr:1; /* if true, WB on ITR is enabled */ @@ -479,6 +482,7 @@ struct ice_q_vector { char name[ICE_INT_NAME_STR_LEN]; u16 total_events; /* net_dim(): number of interrupts processed */ + u16 vf_reg_idx; /* VF relative register index */ struct msi_map irq; } ____cacheline_internodealigned_in_smp; @@ -521,17 +525,10 @@ enum ice_misc_thread_tasks { }; struct ice_eswitch { - struct ice_vsi *control_vsi; struct ice_vsi *uplink_vsi; struct ice_esw_br_offloads *br_offloads; struct xarray reprs; bool is_running; - /* struct to allow cp queues management optimization */ - struct { - int to_reach; - int value; - bool is_reaching; - } qs; }; struct ice_agg_node { @@ -543,6 +540,7 @@ struct ice_agg_node { struct ice_pf { struct pci_dev *pdev; + struct ice_adapter *adapter; struct devlink_region *nvm_region; struct devlink_region *sram_region; diff --git a/drivers/net/ethernet/intel/ice/ice_adapter.c b/drivers/net/ethernet/intel/ice/ice_adapter.c new file mode 100644 index 0000000000..52d15ef7f4 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_adapter.c @@ -0,0 +1,116 @@ +// SPDX-License-Identifier: GPL-2.0-only +// SPDX-FileCopyrightText: Copyright Red Hat + +#include <linux/bitfield.h> +#include <linux/cleanup.h> +#include <linux/mutex.h> +#include <linux/pci.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/xarray.h> +#include "ice_adapter.h" + +static DEFINE_XARRAY(ice_adapters); + +/* PCI bus number is 8 bits. Slot is 5 bits. Domain can have the rest. */ +#define INDEX_FIELD_DOMAIN GENMASK(BITS_PER_LONG - 1, 13) +#define INDEX_FIELD_BUS GENMASK(12, 5) +#define INDEX_FIELD_SLOT GENMASK(4, 0) + +static unsigned long ice_adapter_index(const struct pci_dev *pdev) +{ + unsigned int domain = pci_domain_nr(pdev->bus); + + WARN_ON(domain > FIELD_MAX(INDEX_FIELD_DOMAIN)); + + return FIELD_PREP(INDEX_FIELD_DOMAIN, domain) | + FIELD_PREP(INDEX_FIELD_BUS, pdev->bus->number) | + FIELD_PREP(INDEX_FIELD_SLOT, PCI_SLOT(pdev->devfn)); +} + +static struct ice_adapter *ice_adapter_new(void) +{ + struct ice_adapter *adapter; + + adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); + if (!adapter) + return NULL; + + spin_lock_init(&adapter->ptp_gltsyn_time_lock); + refcount_set(&adapter->refcount, 1); + + return adapter; +} + +static void ice_adapter_free(struct ice_adapter *adapter) +{ + kfree(adapter); +} + +DEFINE_FREE(ice_adapter_free, struct ice_adapter*, if (_T) ice_adapter_free(_T)) + +/** + * ice_adapter_get - Get a shared ice_adapter structure. + * @pdev: Pointer to the pci_dev whose driver is getting the ice_adapter. + * + * Gets a pointer to a shared ice_adapter structure. Physical functions (PFs) + * of the same multi-function PCI device share one ice_adapter structure. + * The ice_adapter is reference-counted. The PF driver must use ice_adapter_put + * to release its reference. + * + * Context: Process, may sleep. + * Return: Pointer to ice_adapter on success. + * ERR_PTR() on error. -ENOMEM is the only possible error. + */ +struct ice_adapter *ice_adapter_get(const struct pci_dev *pdev) +{ + struct ice_adapter *ret, __free(ice_adapter_free) *adapter = NULL; + unsigned long index = ice_adapter_index(pdev); + + adapter = ice_adapter_new(); + if (!adapter) + return ERR_PTR(-ENOMEM); + + xa_lock(&ice_adapters); + ret = __xa_cmpxchg(&ice_adapters, index, NULL, adapter, GFP_KERNEL); + if (xa_is_err(ret)) { + ret = ERR_PTR(xa_err(ret)); + goto unlock; + } + if (ret) { + refcount_inc(&ret->refcount); + goto unlock; + } + ret = no_free_ptr(adapter); +unlock: + xa_unlock(&ice_adapters); + return ret; +} + +/** + * ice_adapter_put - Release a reference to the shared ice_adapter structure. + * @pdev: Pointer to the pci_dev whose driver is releasing the ice_adapter. + * + * Releases the reference to ice_adapter previously obtained with + * ice_adapter_get. + * + * Context: Any. + */ +void ice_adapter_put(const struct pci_dev *pdev) +{ + unsigned long index = ice_adapter_index(pdev); + struct ice_adapter *adapter; + + xa_lock(&ice_adapters); + adapter = xa_load(&ice_adapters, index); + if (WARN_ON(!adapter)) + goto unlock; + + if (!refcount_dec_and_test(&adapter->refcount)) + goto unlock; + + WARN_ON(__xa_erase(&ice_adapters, index) != adapter); + ice_adapter_free(adapter); +unlock: + xa_unlock(&ice_adapters); +} diff --git a/drivers/net/ethernet/intel/ice/ice_adapter.h b/drivers/net/ethernet/intel/ice/ice_adapter.h new file mode 100644 index 0000000000..9d11014ec0 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_adapter.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* SPDX-FileCopyrightText: Copyright Red Hat */ + +#ifndef _ICE_ADAPTER_H_ +#define _ICE_ADAPTER_H_ + +#include <linux/spinlock_types.h> +#include <linux/refcount_types.h> + +struct pci_dev; + +/** + * struct ice_adapter - PCI adapter resources shared across PFs + * @ptp_gltsyn_time_lock: Spinlock protecting access to the GLTSYN_TIME + * register of the PTP clock. + * @refcount: Reference count. struct ice_pf objects hold the references. + */ +struct ice_adapter { + /* For access to the GLTSYN_TIME register */ + spinlock_t ptp_gltsyn_time_lock; + + refcount_t refcount; +}; + +struct ice_adapter *ice_adapter_get(const struct pci_dev *pdev); +void ice_adapter_put(const struct pci_dev *pdev); + +#endif /* _ICE_ADAPTER_H */ diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index 1f3e7a6903..e76c388b99 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -121,6 +121,7 @@ struct ice_aqc_list_caps_elem { #define ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE 0x0076 #define ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT 0x0077 #define ICE_AQC_CAPS_NVM_MGMT 0x0080 +#define ICE_AQC_CAPS_TX_SCHED_TOPO_COMP_MODE 0x0085 #define ICE_AQC_CAPS_FW_LAG_SUPPORT 0x0092 #define ICE_AQC_BIT_ROCEV2_LAG 0x01 #define ICE_AQC_BIT_SRIOV_LAG 0x02 @@ -264,6 +265,8 @@ struct ice_aqc_set_port_params { #define ICE_AQC_RES_TYPE_FLAG_SHARED BIT(7) #define ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM BIT(12) #define ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX BIT(13) +#define ICE_AQC_RES_TYPE_FLAG_SUBSCRIBE_SHARED BIT(14) +#define ICE_AQC_RES_TYPE_FLAG_SUBSCRIBE_CTL BIT(15) #define ICE_AQC_RES_TYPE_FLAG_DEDICATED 0x00 @@ -808,6 +811,23 @@ struct ice_aqc_get_topo { __le32 addr_low; }; +/* Get/Set Tx Topology (indirect 0x0418/0x0417) */ +struct ice_aqc_get_set_tx_topo { + u8 set_flags; +#define ICE_AQC_TX_TOPO_FLAGS_CORRER BIT(0) +#define ICE_AQC_TX_TOPO_FLAGS_SRC_RAM BIT(1) +#define ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW BIT(4) +#define ICE_AQC_TX_TOPO_FLAGS_ISSUED BIT(5) + + u8 get_flags; +#define ICE_AQC_TX_TOPO_GET_RAM 2 + + __le16 reserved1; + __le32 reserved2; + __le32 addr_high; + __le32 addr_low; +}; + /* Update TSE (indirect 0x0403) * Get TSE (indirect 0x0404) * Add TSE (indirect 0x0401) @@ -1664,6 +1684,15 @@ struct ice_aqc_nvm { #define ICE_AQC_NVM_START_POINT 0 +#define ICE_AQC_NVM_TX_TOPO_MOD_ID 0x14B + +struct ice_aqc_nvm_tx_topo_user_sel { + __le16 length; + u8 data; +#define ICE_AQC_NVM_TX_TOPO_USER_SEL BIT(4) + u8 reserved; +}; + /* NVM Checksum Command (direct, 0x0706) */ struct ice_aqc_nvm_checksum { u8 flags; @@ -2536,6 +2565,7 @@ struct ice_aq_desc { struct ice_aqc_get_link_topo get_link_topo; struct ice_aqc_i2c read_write_i2c; struct ice_aqc_read_i2c_resp read_i2c_resp; + struct ice_aqc_get_set_tx_topo get_set_tx_topo; } params; }; @@ -2642,6 +2672,10 @@ enum ice_adminq_opc { ice_aqc_opc_query_sched_res = 0x0412, ice_aqc_opc_remove_rl_profiles = 0x0415, + /* tx topology commands */ + ice_aqc_opc_set_tx_topo = 0x0417, + ice_aqc_opc_get_tx_topo = 0x0418, + /* PHY commands */ ice_aqc_opc_get_phy_caps = 0x0600, ice_aqc_opc_set_phy_cfg = 0x0601, diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c index 9d23a436d2..5d396c1a77 100644 --- a/drivers/net/ethernet/intel/ice/ice_base.c +++ b/drivers/net/ethernet/intel/ice/ice_base.c @@ -121,7 +121,7 @@ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, u16 v_idx) q_vector->irq.index = -ENOENT; if (vsi->type == ICE_VSI_VF) { - q_vector->reg_idx = ice_calc_vf_reg_idx(vsi->vf, q_vector); + ice_calc_vf_reg_idx(vsi->vf, q_vector); goto out; } else if (vsi->type == ICE_VSI_CTRL && vsi->vf) { struct ice_vsi *ctrl_vsi = ice_get_vf_ctrl_vsi(pf, vsi); @@ -145,6 +145,7 @@ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, u16 v_idx) skip_alloc: q_vector->reg_idx = q_vector->irq.index; + q_vector->vf_reg_idx = q_vector->irq.index; /* only set affinity_mask if the CPU is online */ if (cpu_online(v_idx)) @@ -264,30 +265,6 @@ static u16 ice_calc_txq_handle(struct ice_vsi *vsi, struct ice_tx_ring *ring, u8 } /** - * ice_eswitch_calc_txq_handle - * @ring: pointer to ring which unique index is needed - * - * To correctly work with many netdevs ring->q_index of Tx rings on switchdev - * VSI can repeat. Hardware ring setup requires unique q_index. Calculate it - * here by finding index in vsi->tx_rings of this ring. - * - * Return ICE_INVAL_Q_INDEX when index wasn't found. Should never happen, - * because VSI is get from ring->vsi, so it has to be present in this VSI. - */ -static u16 ice_eswitch_calc_txq_handle(struct ice_tx_ring *ring) -{ - const struct ice_vsi *vsi = ring->vsi; - int i; - - ice_for_each_txq(vsi, i) { - if (vsi->tx_rings[i] == ring) - return i; - } - - return ICE_INVAL_Q_INDEX; -} - -/** * ice_cfg_xps_tx_ring - Configure XPS for a Tx ring * @ring: The Tx ring to configure * @@ -353,9 +330,6 @@ ice_setup_tx_ctx(struct ice_tx_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf->vf_id; tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF; break; - case ICE_VSI_SWITCHDEV_CTRL: - tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VMQ; - break; default: return; } @@ -479,6 +453,14 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring) /* Rx queue threshold in units of 64 */ rlan_ctx.lrxqthresh = 1; + /* PF acts as uplink for switchdev; set flex descriptor with src_vsi + * metadata and flags to allow redirecting to PR netdev + */ + if (ice_is_eswitch_mode_switchdev(vsi->back)) { + ring->flags |= ICE_RX_FLAGS_MULTIDEV; + rxdid = ICE_RXDID_FLEX_NIC_2; + } + /* Enable Flexible Descriptors in the queue context which * allows this driver to select a specific receive descriptor format * increasing context priority to pick up profile ID; default is 0x01; @@ -922,14 +904,7 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring, /* Add unique software queue handle of the Tx queue per * TC into the VSI Tx ring */ - if (vsi->type == ICE_VSI_SWITCHDEV_CTRL) { - ring->q_handle = ice_eswitch_calc_txq_handle(ring); - - if (ring->q_handle == ICE_INVAL_Q_INDEX) - return -ENODEV; - } else { - ring->q_handle = ice_calc_txq_handle(vsi, ring, tc); - } + ring->q_handle = ice_calc_txq_handle(vsi, ring, tc); if (ch) status = ice_ena_vsi_txq(vsi->port_info, ch->ch_vsi->idx, 0, diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index e7d28432ba..24716a3b49 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -160,10 +160,16 @@ static int ice_set_mac_type(struct ice_hw *hw) case ICE_DEV_ID_E825C_SGMII: hw->mac_type = ICE_MAC_GENERIC_3K_E825; break; - case ICE_DEV_ID_E830_BACKPLANE: - case ICE_DEV_ID_E830_QSFP56: - case ICE_DEV_ID_E830_SFP: - case ICE_DEV_ID_E830_SFP_DD: + case ICE_DEV_ID_E830CC_BACKPLANE: + case ICE_DEV_ID_E830CC_QSFP56: + case ICE_DEV_ID_E830CC_SFP: + case ICE_DEV_ID_E830CC_SFP_DD: + case ICE_DEV_ID_E830C_BACKPLANE: + case ICE_DEV_ID_E830_XXV_BACKPLANE: + case ICE_DEV_ID_E830C_QSFP: + case ICE_DEV_ID_E830_XXV_QSFP: + case ICE_DEV_ID_E830C_SFP: + case ICE_DEV_ID_E830_XXV_SFP: hw->mac_type = ICE_MAC_E830; break; default: @@ -1142,6 +1148,8 @@ int ice_init_hw(struct ice_hw *hw) if (status) goto err_unroll_fltr_mgmt_struct; mutex_init(&hw->tnl_lock); + ice_init_chk_recipe_reuse_support(hw); + return 0; err_unroll_fltr_mgmt_struct: @@ -1615,6 +1623,8 @@ ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf, case ice_aqc_opc_set_port_params: case ice_aqc_opc_get_vlan_mode_parameters: case ice_aqc_opc_set_vlan_mode_parameters: + case ice_aqc_opc_set_tx_topo: + case ice_aqc_opc_get_tx_topo: case ice_aqc_opc_add_recipe: case ice_aqc_opc_recipe_to_profile: case ice_aqc_opc_get_recipe: @@ -2171,6 +2181,9 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps, ice_debug(hw, ICE_DBG_INIT, "%s: sriov_lag = %u\n", prefix, caps->sriov_lag); break; + case ICE_AQC_CAPS_TX_SCHED_TOPO_COMP_MODE: + caps->tx_sched_topo_comp_mode_en = (number == 1); + break; default: /* Not one of the recognized common capabilities */ found = false; diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c index 6e20ee6100..a94e7072b5 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c @@ -3,7 +3,7 @@ #include "ice_dcb_lib.h" #include "ice_dcb_nl.h" -#include "ice_devlink.h" +#include "devlink/devlink.h" /** * ice_dcb_get_ena_tc - return bitmap of enabled TCs @@ -291,7 +291,6 @@ static void ice_dcb_ena_dis_vsi(struct ice_pf *pf, bool ena, bool locked) switch (vsi->type) { case ICE_VSI_CHNL: - case ICE_VSI_SWITCHDEV_CTRL: case ICE_VSI_PF: if (ena) ice_ena_vsi(vsi, locked); @@ -776,8 +775,7 @@ void ice_pf_dcb_recfg(struct ice_pf *pf, bool locked) /* no need to proceed with remaining cfg if it is CHNL * or switchdev VSI */ - if (vsi->type == ICE_VSI_CHNL || - vsi->type == ICE_VSI_SWITCHDEV_CTRL) + if (vsi->type == ICE_VSI_CHNL) continue; ice_vsi_map_rings_to_vectors(vsi); diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.c b/drivers/net/ethernet/intel/ice/ice_ddp.c index 6e7d58243c..f182179529 100644 --- a/drivers/net/ethernet/intel/ice/ice_ddp.c +++ b/drivers/net/ethernet/intel/ice/ice_ddp.c @@ -4,6 +4,7 @@ #include "ice_common.h" #include "ice.h" #include "ice_ddp.h" +#include "ice_sched.h" /* For supporting double VLAN mode, it is necessary to enable or disable certain * boost tcam entries. The metadata labels names that match the following @@ -721,6 +722,12 @@ static bool ice_is_gtp_c_profile(u16 prof_idx) } } +static bool ice_is_pfcp_profile(u16 prof_idx) +{ + return prof_idx >= ICE_PROFID_IPV4_PFCP_NODE && + prof_idx <= ICE_PROFID_IPV6_PFCP_SESSION; +} + /** * ice_get_sw_prof_type - determine switch profile type * @hw: pointer to the HW structure @@ -738,6 +745,9 @@ static enum ice_prof_type ice_get_sw_prof_type(struct ice_hw *hw, if (ice_is_gtp_u_profile(prof_idx)) return ICE_PROF_TUN_GTPU; + if (ice_is_pfcp_profile(prof_idx)) + return ICE_PROF_TUN_PFCP; + for (i = 0; i < hw->blk[ICE_BLK_SW].es.fvw; i++) { /* UDP tunnel will have UDP_OF protocol ID and VNI offset */ if (fv->ew[i].prot_id == (u8)ICE_PROT_UDP_OF && @@ -2282,3 +2292,211 @@ enum ice_ddp_state ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, return state; } + +/** + * ice_get_set_tx_topo - get or set Tx topology + * @hw: pointer to the HW struct + * @buf: pointer to Tx topology buffer + * @buf_size: buffer size + * @cd: pointer to command details structure or NULL + * @flags: pointer to descriptor flags + * @set: 0-get, 1-set topology + * + * The function will get or set Tx topology + * + * Return: zero when set was successful, negative values otherwise. + */ +static int +ice_get_set_tx_topo(struct ice_hw *hw, u8 *buf, u16 buf_size, + struct ice_sq_cd *cd, u8 *flags, bool set) +{ + struct ice_aqc_get_set_tx_topo *cmd; + struct ice_aq_desc desc; + int status; + + cmd = &desc.params.get_set_tx_topo; + if (set) { + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_tx_topo); + cmd->set_flags = ICE_AQC_TX_TOPO_FLAGS_ISSUED; + /* requested to update a new topology, not a default topology */ + if (buf) + cmd->set_flags |= ICE_AQC_TX_TOPO_FLAGS_SRC_RAM | + ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW; + + if (ice_is_e825c(hw)) + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + } else { + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_tx_topo); + cmd->get_flags = ICE_AQC_TX_TOPO_GET_RAM; + } + + if (!ice_is_e825c(hw)) + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + + status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); + if (status) + return status; + /* read the return flag values (first byte) for get operation */ + if (!set && flags) + *flags = desc.params.get_set_tx_topo.set_flags; + + return 0; +} + +/** + * ice_cfg_tx_topo - Initialize new Tx topology if available + * @hw: pointer to the HW struct + * @buf: pointer to Tx topology buffer + * @len: buffer size + * + * The function will apply the new Tx topology from the package buffer + * if available. + * + * Return: zero when update was successful, negative values otherwise. + */ +int ice_cfg_tx_topo(struct ice_hw *hw, u8 *buf, u32 len) +{ + u8 *current_topo, *new_topo = NULL; + struct ice_run_time_cfg_seg *seg; + struct ice_buf_hdr *section; + struct ice_pkg_hdr *pkg_hdr; + enum ice_ddp_state state; + u16 offset, size = 0; + u32 reg = 0; + int status; + u8 flags; + + if (!buf || !len) + return -EINVAL; + + /* Does FW support new Tx topology mode ? */ + if (!hw->func_caps.common_cap.tx_sched_topo_comp_mode_en) { + ice_debug(hw, ICE_DBG_INIT, "FW doesn't support compatibility mode\n"); + return -EOPNOTSUPP; + } + + current_topo = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); + if (!current_topo) + return -ENOMEM; + + /* Get the current Tx topology */ + status = ice_get_set_tx_topo(hw, current_topo, ICE_AQ_MAX_BUF_LEN, NULL, + &flags, false); + + kfree(current_topo); + + if (status) { + ice_debug(hw, ICE_DBG_INIT, "Get current topology is failed\n"); + return status; + } + + /* Is default topology already applied ? */ + if (!(flags & ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW) && + hw->num_tx_sched_layers == ICE_SCHED_9_LAYERS) { + ice_debug(hw, ICE_DBG_INIT, "Default topology already applied\n"); + return -EEXIST; + } + + /* Is new topology already applied ? */ + if ((flags & ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW) && + hw->num_tx_sched_layers == ICE_SCHED_5_LAYERS) { + ice_debug(hw, ICE_DBG_INIT, "New topology already applied\n"); + return -EEXIST; + } + + /* Setting topology already issued? */ + if (flags & ICE_AQC_TX_TOPO_FLAGS_ISSUED) { + ice_debug(hw, ICE_DBG_INIT, "Update Tx topology was done by another PF\n"); + /* Add a small delay before exiting */ + msleep(2000); + return -EEXIST; + } + + /* Change the topology from new to default (5 to 9) */ + if (!(flags & ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW) && + hw->num_tx_sched_layers == ICE_SCHED_5_LAYERS) { + ice_debug(hw, ICE_DBG_INIT, "Change topology from 5 to 9 layers\n"); + goto update_topo; + } + + pkg_hdr = (struct ice_pkg_hdr *)buf; + state = ice_verify_pkg(pkg_hdr, len); + if (state) { + ice_debug(hw, ICE_DBG_INIT, "Failed to verify pkg (err: %d)\n", + state); + return -EIO; + } + + /* Find runtime configuration segment */ + seg = (struct ice_run_time_cfg_seg *) + ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE_RUN_TIME_CFG, pkg_hdr); + if (!seg) { + ice_debug(hw, ICE_DBG_INIT, "5 layer topology segment is missing\n"); + return -EIO; + } + + if (le32_to_cpu(seg->buf_table.buf_count) < ICE_MIN_S_COUNT) { + ice_debug(hw, ICE_DBG_INIT, "5 layer topology segment count(%d) is wrong\n", + seg->buf_table.buf_count); + return -EIO; + } + + section = ice_pkg_val_buf(seg->buf_table.buf_array); + if (!section || le32_to_cpu(section->section_entry[0].type) != + ICE_SID_TX_5_LAYER_TOPO) { + ice_debug(hw, ICE_DBG_INIT, "5 layer topology section type is wrong\n"); + return -EIO; + } + + size = le16_to_cpu(section->section_entry[0].size); + offset = le16_to_cpu(section->section_entry[0].offset); + if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ) { + ice_debug(hw, ICE_DBG_INIT, "5 layer topology section size is wrong\n"); + return -EIO; + } + + /* Make sure the section fits in the buffer */ + if (offset + size > ICE_PKG_BUF_SIZE) { + ice_debug(hw, ICE_DBG_INIT, "5 layer topology buffer > 4K\n"); + return -EIO; + } + + /* Get the new topology buffer */ + new_topo = ((u8 *)section) + offset; + +update_topo: + /* Acquire global lock to make sure that set topology issued + * by one PF. + */ + status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, ICE_RES_WRITE, + ICE_GLOBAL_CFG_LOCK_TIMEOUT); + if (status) { + ice_debug(hw, ICE_DBG_INIT, "Failed to acquire global lock\n"); + return status; + } + + /* Check if reset was triggered already. */ + reg = rd32(hw, GLGEN_RSTAT); + if (reg & GLGEN_RSTAT_DEVSTATE_M) { + /* Reset is in progress, re-init the HW again */ + ice_debug(hw, ICE_DBG_INIT, "Reset is in progress. Layer topology might be applied already\n"); + ice_check_reset(hw); + return 0; + } + + /* Set new topology */ + status = ice_get_set_tx_topo(hw, new_topo, size, NULL, NULL, true); + if (status) { + ice_debug(hw, ICE_DBG_INIT, "Failed setting Tx topology\n"); + return status; + } + + /* New topology is updated, delay 1 second before issuing the CORER */ + msleep(1000); + ice_reset(hw, ICE_RESET_CORER); + /* CORER will clear the global lock, so no explicit call + * required for release. + */ + + return 0; +} diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.h b/drivers/net/ethernet/intel/ice/ice_ddp.h index ff66c2ffb1..622543f08b 100644 --- a/drivers/net/ethernet/intel/ice/ice_ddp.h +++ b/drivers/net/ethernet/intel/ice/ice_ddp.h @@ -454,4 +454,6 @@ u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld); void *ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state, u32 sect_type); +int ice_cfg_tx_topo(struct ice_hw *hw, u8 *buf, u32 len); + #endif diff --git a/drivers/net/ethernet/intel/ice/ice_devids.h b/drivers/net/ethernet/intel/ice/ice_devids.h index 9dfae9bce7..34fd604132 100644 --- a/drivers/net/ethernet/intel/ice/ice_devids.h +++ b/drivers/net/ethernet/intel/ice/ice_devids.h @@ -16,14 +16,26 @@ #define ICE_DEV_ID_E823L_1GBE 0x124F /* Intel(R) Ethernet Connection E823-L for QSFP */ #define ICE_DEV_ID_E823L_QSFP 0x151D +/* Intel(R) Ethernet Controller E830-CC for backplane */ +#define ICE_DEV_ID_E830CC_BACKPLANE 0x12D1 +/* Intel(R) Ethernet Controller E830-CC for QSFP */ +#define ICE_DEV_ID_E830CC_QSFP56 0x12D2 +/* Intel(R) Ethernet Controller E830-CC for SFP */ +#define ICE_DEV_ID_E830CC_SFP 0x12D3 +/* Intel(R) Ethernet Controller E830-CC for SFP-DD */ +#define ICE_DEV_ID_E830CC_SFP_DD 0x12D4 /* Intel(R) Ethernet Controller E830-C for backplane */ -#define ICE_DEV_ID_E830_BACKPLANE 0x12D1 +#define ICE_DEV_ID_E830C_BACKPLANE 0x12D5 /* Intel(R) Ethernet Controller E830-C for QSFP */ -#define ICE_DEV_ID_E830_QSFP56 0x12D2 +#define ICE_DEV_ID_E830C_QSFP 0x12D8 /* Intel(R) Ethernet Controller E830-C for SFP */ -#define ICE_DEV_ID_E830_SFP 0x12D3 -/* Intel(R) Ethernet Controller E830-C for SFP-DD */ -#define ICE_DEV_ID_E830_SFP_DD 0x12D4 +#define ICE_DEV_ID_E830C_SFP 0x12DA +/* Intel(R) Ethernet Controller E830-XXV for backplane */ +#define ICE_DEV_ID_E830_XXV_BACKPLANE 0x12DC +/* Intel(R) Ethernet Controller E830-XXV for QSFP */ +#define ICE_DEV_ID_E830_XXV_QSFP 0x12DD +/* Intel(R) Ethernet Controller E830-XXV for SFP */ +#define ICE_DEV_ID_E830_XXV_SFP 0x12DE /* Intel(R) Ethernet Controller E810-C for backplane */ #define ICE_DEV_ID_E810C_BACKPLANE 0x1591 /* Intel(R) Ethernet Controller E810-C for QSFP */ diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c index 9069725c71..b102db8b82 100644 --- a/drivers/net/ethernet/intel/ice/ice_eswitch.c +++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c @@ -7,89 +7,10 @@ #include "ice_eswitch_br.h" #include "ice_fltr.h" #include "ice_repr.h" -#include "ice_devlink.h" +#include "devlink/devlink.h" #include "ice_tc_lib.h" /** - * ice_eswitch_del_sp_rules - delete adv rules added on PRs - * @pf: pointer to the PF struct - * - * Delete all advanced rules that were used to forward packets with the - * device's VSI index to the corresponding eswitch ctrl VSI queue. - */ -static void ice_eswitch_del_sp_rules(struct ice_pf *pf) -{ - struct ice_repr *repr; - unsigned long id; - - xa_for_each(&pf->eswitch.reprs, id, repr) { - if (repr->sp_rule.rid) - ice_rem_adv_rule_by_id(&pf->hw, &repr->sp_rule); - } -} - -/** - * ice_eswitch_add_sp_rule - add adv rule with device's VSI index - * @pf: pointer to PF struct - * @repr: pointer to the repr struct - * - * This function adds advanced rule that forwards packets with - * device's VSI index to the corresponding eswitch ctrl VSI queue. - */ -static int ice_eswitch_add_sp_rule(struct ice_pf *pf, struct ice_repr *repr) -{ - struct ice_vsi *ctrl_vsi = pf->eswitch.control_vsi; - struct ice_adv_rule_info rule_info = { 0 }; - struct ice_adv_lkup_elem *list; - struct ice_hw *hw = &pf->hw; - const u16 lkups_cnt = 1; - int err; - - list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC); - if (!list) - return -ENOMEM; - - ice_rule_add_src_vsi_metadata(list); - - rule_info.sw_act.flag = ICE_FLTR_TX; - rule_info.sw_act.vsi_handle = ctrl_vsi->idx; - rule_info.sw_act.fltr_act = ICE_FWD_TO_Q; - rule_info.sw_act.fwd_id.q_id = hw->func_caps.common_cap.rxq_first_id + - ctrl_vsi->rxq_map[repr->q_id]; - rule_info.flags_info.act |= ICE_SINGLE_ACT_LB_ENABLE; - rule_info.flags_info.act_valid = true; - rule_info.tun_type = ICE_SW_TUN_AND_NON_TUN; - rule_info.src_vsi = repr->src_vsi->idx; - - err = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, - &repr->sp_rule); - if (err) - dev_err(ice_pf_to_dev(pf), "Unable to add slow-path rule for eswitch for PR %d", - repr->id); - - kfree(list); - return err; -} - -static int -ice_eswitch_add_sp_rules(struct ice_pf *pf) -{ - struct ice_repr *repr; - unsigned long id; - int err; - - xa_for_each(&pf->eswitch.reprs, id, repr) { - err = ice_eswitch_add_sp_rule(pf, repr); - if (err) { - ice_eswitch_del_sp_rules(pf); - return err; - } - } - - return 0; -} - -/** * ice_eswitch_setup_env - configure eswitch HW filters * @pf: pointer to PF struct * @@ -99,10 +20,13 @@ ice_eswitch_add_sp_rules(struct ice_pf *pf) static int ice_eswitch_setup_env(struct ice_pf *pf) { struct ice_vsi *uplink_vsi = pf->eswitch.uplink_vsi; - struct ice_vsi *ctrl_vsi = pf->eswitch.control_vsi; struct net_device *netdev = uplink_vsi->netdev; + bool if_running = netif_running(netdev); struct ice_vsi_vlan_ops *vlan_ops; - bool rule_added = false; + + if (if_running && !test_and_set_bit(ICE_VSI_DOWN, uplink_vsi->state)) + if (ice_down(uplink_vsi)) + return -ENODEV; ice_remove_vsi_fltr(&pf->hw, uplink_vsi->idx); @@ -112,98 +36,53 @@ static int ice_eswitch_setup_env(struct ice_pf *pf) netif_addr_unlock_bh(netdev); if (ice_vsi_add_vlan_zero(uplink_vsi)) + goto err_vlan_zero; + + if (ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, true, + ICE_FLTR_RX)) goto err_def_rx; - if (!ice_is_dflt_vsi_in_use(uplink_vsi->port_info)) { - if (ice_set_dflt_vsi(uplink_vsi)) - goto err_def_rx; - rule_added = true; - } + if (ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, true, + ICE_FLTR_TX)) + goto err_def_tx; vlan_ops = ice_get_compat_vsi_vlan_ops(uplink_vsi); if (vlan_ops->dis_rx_filtering(uplink_vsi)) - goto err_dis_rx; + goto err_vlan_filtering; if (ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_set_allow_override)) goto err_override_uplink; - if (ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_set_allow_override)) - goto err_override_control; - if (ice_vsi_update_local_lb(uplink_vsi, true)) goto err_override_local_lb; + if (if_running && ice_up(uplink_vsi)) + goto err_up; + return 0; +err_up: + ice_vsi_update_local_lb(uplink_vsi, false); err_override_local_lb: - ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override); -err_override_control: ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override); err_override_uplink: vlan_ops->ena_rx_filtering(uplink_vsi); -err_dis_rx: - if (rule_added) - ice_clear_dflt_vsi(uplink_vsi); +err_vlan_filtering: + ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, false, + ICE_FLTR_TX); +err_def_tx: + ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, false, + ICE_FLTR_RX); err_def_rx: + ice_vsi_del_vlan_zero(uplink_vsi); +err_vlan_zero: ice_fltr_add_mac_and_broadcast(uplink_vsi, uplink_vsi->port_info->mac.perm_addr, ICE_FWD_TO_VSI); - return -ENODEV; -} - -/** - * ice_eswitch_remap_rings_to_vectors - reconfigure rings of eswitch ctrl VSI - * @eswitch: pointer to eswitch struct - * - * In eswitch number of allocated Tx/Rx rings is equal. - * - * This function fills q_vectors structures associated with representor and - * move each ring pairs to port representor netdevs. Each port representor - * will have dedicated 1 Tx/Rx ring pair, so number of rings pair is equal to - * number of VFs. - */ -static void ice_eswitch_remap_rings_to_vectors(struct ice_eswitch *eswitch) -{ - struct ice_vsi *vsi = eswitch->control_vsi; - unsigned long repr_id = 0; - int q_id; - - ice_for_each_txq(vsi, q_id) { - struct ice_q_vector *q_vector; - struct ice_tx_ring *tx_ring; - struct ice_rx_ring *rx_ring; - struct ice_repr *repr; - - repr = xa_find(&eswitch->reprs, &repr_id, U32_MAX, - XA_PRESENT); - if (!repr) - break; - - repr_id += 1; - repr->q_id = q_id; - q_vector = repr->q_vector; - tx_ring = vsi->tx_rings[q_id]; - rx_ring = vsi->rx_rings[q_id]; - - q_vector->vsi = vsi; - q_vector->reg_idx = vsi->q_vectors[0]->reg_idx; - - q_vector->num_ring_tx = 1; - q_vector->tx.tx_ring = tx_ring; - tx_ring->q_vector = q_vector; - tx_ring->next = NULL; - tx_ring->netdev = repr->netdev; - /* In switchdev mode, from OS stack perspective, there is only - * one queue for given netdev, so it needs to be indexed as 0. - */ - tx_ring->q_index = 0; + if (if_running) + ice_up(uplink_vsi); - q_vector->num_ring_rx = 1; - q_vector->rx.rx_ring = rx_ring; - rx_ring->q_vector = q_vector; - rx_ring->next = NULL; - rx_ring->netdev = repr->netdev; - } + return -ENODEV; } /** @@ -225,8 +104,6 @@ ice_eswitch_release_repr(struct ice_pf *pf, struct ice_repr *repr) repr->dst = NULL; ice_fltr_add_mac_and_broadcast(vsi, repr->parent_mac, ICE_FWD_TO_VSI); - - netif_napi_del(&repr->q_vector->napi); } /** @@ -236,7 +113,7 @@ ice_eswitch_release_repr(struct ice_pf *pf, struct ice_repr *repr) */ static int ice_eswitch_setup_repr(struct ice_pf *pf, struct ice_repr *repr) { - struct ice_vsi *ctrl_vsi = pf->eswitch.control_vsi; + struct ice_vsi *uplink_vsi = pf->eswitch.uplink_vsi; struct ice_vsi *vsi = repr->src_vsi; struct metadata_dst *dst; @@ -252,15 +129,11 @@ static int ice_eswitch_setup_repr(struct ice_pf *pf, struct ice_repr *repr) if (ice_vsi_add_vlan_zero(vsi)) goto err_update_security; - netif_napi_add(repr->netdev, &repr->q_vector->napi, - ice_napi_poll); - - netif_keep_dst(repr->netdev); + netif_keep_dst(uplink_vsi->netdev); dst = repr->dst; dst->u.port_info.port_id = vsi->vsi_num; - dst->u.port_info.lower_dev = repr->netdev; - ice_repr_set_traffic_vsi(repr, ctrl_vsi); + dst->u.port_info.lower_dev = uplink_vsi->netdev; return 0; @@ -318,27 +191,19 @@ void ice_eswitch_update_repr(unsigned long repr_id, struct ice_vsi *vsi) netdev_tx_t ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev) { - struct ice_netdev_priv *np; - struct ice_repr *repr; - struct ice_vsi *vsi; - - np = netdev_priv(netdev); - vsi = np->vsi; - - if (!vsi || !ice_is_switchdev_running(vsi->back)) - return NETDEV_TX_BUSY; - - if (ice_is_reset_in_progress(vsi->back->state) || - test_bit(ICE_VF_DIS, vsi->back->state)) - return NETDEV_TX_BUSY; + struct ice_repr *repr = ice_netdev_to_repr(netdev); + unsigned int len = skb->len; + int ret; - repr = ice_netdev_to_repr(netdev); skb_dst_drop(skb); dst_hold((struct dst_entry *)repr->dst); skb_dst_set(skb, (struct dst_entry *)repr->dst); - skb->queue_mapping = repr->q_id; + skb->dev = repr->dst->u.port_info.lower_dev; + + ret = dev_queue_xmit(skb); + ice_repr_inc_tx_stats(repr, len, ret); - return ice_start_xmit(skb, netdev); + return ret; } /** @@ -374,71 +239,29 @@ ice_eswitch_set_target_vsi(struct sk_buff *skb, static void ice_eswitch_release_env(struct ice_pf *pf) { struct ice_vsi *uplink_vsi = pf->eswitch.uplink_vsi; - struct ice_vsi *ctrl_vsi = pf->eswitch.control_vsi; struct ice_vsi_vlan_ops *vlan_ops; vlan_ops = ice_get_compat_vsi_vlan_ops(uplink_vsi); ice_vsi_update_local_lb(uplink_vsi, false); - ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override); ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override); vlan_ops->ena_rx_filtering(uplink_vsi); - ice_clear_dflt_vsi(uplink_vsi); + ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, false, + ICE_FLTR_TX); + ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, false, + ICE_FLTR_RX); ice_fltr_add_mac_and_broadcast(uplink_vsi, uplink_vsi->port_info->mac.perm_addr, ICE_FWD_TO_VSI); } /** - * ice_eswitch_vsi_setup - configure eswitch control VSI - * @pf: pointer to PF structure - * @pi: pointer to port_info structure - */ -static struct ice_vsi * -ice_eswitch_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) -{ - struct ice_vsi_cfg_params params = {}; - - params.type = ICE_VSI_SWITCHDEV_CTRL; - params.pi = pi; - params.flags = ICE_VSI_FLAG_INIT; - - return ice_vsi_setup(pf, ¶ms); -} - -/** - * ice_eswitch_napi_enable - enable NAPI for all port representors - * @reprs: xarray of reprs - */ -static void ice_eswitch_napi_enable(struct xarray *reprs) -{ - struct ice_repr *repr; - unsigned long id; - - xa_for_each(reprs, id, repr) - napi_enable(&repr->q_vector->napi); -} - -/** - * ice_eswitch_napi_disable - disable NAPI for all port representors - * @reprs: xarray of reprs - */ -static void ice_eswitch_napi_disable(struct xarray *reprs) -{ - struct ice_repr *repr; - unsigned long id; - - xa_for_each(reprs, id, repr) - napi_disable(&repr->q_vector->napi); -} - -/** * ice_eswitch_enable_switchdev - configure eswitch in switchdev mode * @pf: pointer to PF structure */ static int ice_eswitch_enable_switchdev(struct ice_pf *pf) { - struct ice_vsi *ctrl_vsi, *uplink_vsi; + struct ice_vsi *uplink_vsi; uplink_vsi = ice_get_main_vsi(pf); if (!uplink_vsi) @@ -450,17 +273,10 @@ static int ice_eswitch_enable_switchdev(struct ice_pf *pf) return -EINVAL; } - pf->eswitch.control_vsi = ice_eswitch_vsi_setup(pf, pf->hw.port_info); - if (!pf->eswitch.control_vsi) - return -ENODEV; - - ctrl_vsi = pf->eswitch.control_vsi; - /* cp VSI is createad with 1 queue as default */ - pf->eswitch.qs.value = 1; pf->eswitch.uplink_vsi = uplink_vsi; if (ice_eswitch_setup_env(pf)) - goto err_vsi; + return -ENODEV; if (ice_eswitch_br_offloads_init(pf)) goto err_br_offloads; @@ -471,8 +287,6 @@ static int ice_eswitch_enable_switchdev(struct ice_pf *pf) err_br_offloads: ice_eswitch_release_env(pf); -err_vsi: - ice_vsi_release(ctrl_vsi); return -ENODEV; } @@ -482,14 +296,10 @@ err_vsi: */ static void ice_eswitch_disable_switchdev(struct ice_pf *pf) { - struct ice_vsi *ctrl_vsi = pf->eswitch.control_vsi; - ice_eswitch_br_offloads_deinit(pf); ice_eswitch_release_env(pf); - ice_vsi_release(ctrl_vsi); pf->eswitch.is_running = false; - pf->eswitch.qs.is_reaching = false; } /** @@ -530,7 +340,7 @@ ice_eswitch_mode_set(struct devlink *devlink, u16 mode, dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to switchdev", pf->hw.pf_id); - xa_init_flags(&pf->eswitch.reprs, XA_FLAGS_ALLOC); + xa_init(&pf->eswitch.reprs); NL_SET_ERR_MSG_MOD(extack, "Changed eswitch mode to switchdev"); break; } @@ -602,56 +412,18 @@ void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf) static void ice_eswitch_stop_reprs(struct ice_pf *pf) { - ice_eswitch_del_sp_rules(pf); ice_eswitch_stop_all_tx_queues(pf); - ice_eswitch_napi_disable(&pf->eswitch.reprs); } static void ice_eswitch_start_reprs(struct ice_pf *pf) { - ice_eswitch_napi_enable(&pf->eswitch.reprs); ice_eswitch_start_all_tx_queues(pf); - ice_eswitch_add_sp_rules(pf); -} - -static void -ice_eswitch_cp_change_queues(struct ice_eswitch *eswitch, int change) -{ - struct ice_vsi *cp = eswitch->control_vsi; - int queues = 0; - - if (eswitch->qs.is_reaching) { - if (eswitch->qs.to_reach >= eswitch->qs.value + change) { - queues = eswitch->qs.to_reach; - eswitch->qs.is_reaching = false; - } else { - queues = 0; - } - } else if ((change > 0 && cp->alloc_txq <= eswitch->qs.value) || - change < 0) { - queues = cp->alloc_txq + change; - } - - if (queues) { - cp->req_txq = queues; - cp->req_rxq = queues; - ice_vsi_close(cp); - ice_vsi_rebuild(cp, ICE_VSI_FLAG_NO_INIT); - ice_vsi_open(cp); - } else if (!change) { - /* change == 0 means that VSI wasn't open, open it here */ - ice_vsi_open(cp); - } - - eswitch->qs.value += change; - ice_eswitch_remap_rings_to_vectors(eswitch); } int ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf) { struct ice_repr *repr; - int change = 1; int err; if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY) @@ -661,9 +433,6 @@ ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf) err = ice_eswitch_enable_switchdev(pf); if (err) return err; - /* Control plane VSI is created with 1 queue as default */ - pf->eswitch.qs.to_reach -= 1; - change = 0; } ice_eswitch_stop_reprs(pf); @@ -678,14 +447,12 @@ ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf) if (err) goto err_setup_repr; - err = xa_alloc(&pf->eswitch.reprs, &repr->id, repr, - XA_LIMIT(1, INT_MAX), GFP_KERNEL); + err = xa_insert(&pf->eswitch.reprs, repr->id, repr, GFP_KERNEL); if (err) goto err_xa_alloc; vf->repr_id = repr->id; - ice_eswitch_cp_change_queues(&pf->eswitch, change); ice_eswitch_start_reprs(pf); return 0; @@ -715,8 +482,6 @@ void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf) if (xa_empty(&pf->eswitch.reprs)) ice_eswitch_disable_switchdev(pf); - else - ice_eswitch_cp_change_queues(&pf->eswitch, -1); ice_eswitch_release_repr(pf, repr); ice_repr_rem_vf(repr); @@ -738,37 +503,37 @@ void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf) * ice_eswitch_rebuild - rebuild eswitch * @pf: pointer to PF structure */ -int ice_eswitch_rebuild(struct ice_pf *pf) +void ice_eswitch_rebuild(struct ice_pf *pf) { struct ice_repr *repr; unsigned long id; - int err; if (!ice_is_switchdev_running(pf)) - return 0; - - err = ice_vsi_rebuild(pf->eswitch.control_vsi, ICE_VSI_FLAG_INIT); - if (err) - return err; + return; xa_for_each(&pf->eswitch.reprs, id, repr) ice_eswitch_detach(pf, repr->vf); - - return 0; } /** - * ice_eswitch_reserve_cp_queues - reserve control plane VSI queues - * @pf: pointer to PF structure - * @change: how many more (or less) queues is needed + * ice_eswitch_get_target - get netdev based on src_vsi from descriptor + * @rx_ring: ring used to receive the packet + * @rx_desc: descriptor used to get src_vsi value * - * Remember to call ice_eswitch_attach/detach() the "change" times. + * Get src_vsi value from descriptor and load correct representor. If it isn't + * found return rx_ring->netdev. */ -void ice_eswitch_reserve_cp_queues(struct ice_pf *pf, int change) +struct net_device *ice_eswitch_get_target(struct ice_rx_ring *rx_ring, + union ice_32b_rx_flex_desc *rx_desc) { - if (pf->eswitch.qs.value + change < 0) - return; + struct ice_eswitch *eswitch = &rx_ring->vsi->back->eswitch; + struct ice_32b_rx_flex_desc_nic_2 *desc; + struct ice_repr *repr; + + desc = (struct ice_32b_rx_flex_desc_nic_2 *)rx_desc; + repr = xa_load(&eswitch->reprs, le16_to_cpu(desc->src_vsi)); + if (!repr) + return rx_ring->netdev; - pf->eswitch.qs.to_reach = pf->eswitch.qs.value + change; - pf->eswitch.qs.is_reaching = true; + return repr->netdev; } diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.h b/drivers/net/ethernet/intel/ice/ice_eswitch.h index 1a288a03a7..e2e5c0c75e 100644 --- a/drivers/net/ethernet/intel/ice/ice_eswitch.h +++ b/drivers/net/ethernet/intel/ice/ice_eswitch.h @@ -10,7 +10,7 @@ void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf); int ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf); -int ice_eswitch_rebuild(struct ice_pf *pf); +void ice_eswitch_rebuild(struct ice_pf *pf); int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode); int @@ -26,7 +26,8 @@ void ice_eswitch_set_target_vsi(struct sk_buff *skb, struct ice_tx_offload_params *off); netdev_tx_t ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev); -void ice_eswitch_reserve_cp_queues(struct ice_pf *pf, int change); +struct net_device *ice_eswitch_get_target(struct ice_rx_ring *rx_ring, + union ice_32b_rx_flex_desc *rx_desc); #else /* CONFIG_ICE_SWITCHDEV */ static inline void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf) { } @@ -78,7 +79,11 @@ ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev) return NETDEV_TX_BUSY; } -static inline void -ice_eswitch_reserve_cp_queues(struct ice_pf *pf, int change) { } +static inline struct net_device * +ice_eswitch_get_target(struct ice_rx_ring *rx_ring, + union ice_32b_rx_flex_desc *rx_desc) +{ + return rx_ring->netdev; +} #endif /* CONFIG_ICE_SWITCHDEV */ #endif /* _ICE_ESWITCH_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c index 9a1a04f5f1..5412eff8ef 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c @@ -41,6 +41,8 @@ static struct in6_addr zero_ipv6_addr_mask = { static int ice_fltr_to_ethtool_flow(enum ice_fltr_ptype flow) { switch (flow) { + case ICE_FLTR_PTYPE_NONF_ETH: + return ETHER_FLOW; case ICE_FLTR_PTYPE_NONF_IPV4_TCP: return TCP_V4_FLOW; case ICE_FLTR_PTYPE_NONF_IPV4_UDP: @@ -72,6 +74,8 @@ static int ice_fltr_to_ethtool_flow(enum ice_fltr_ptype flow) static enum ice_fltr_ptype ice_ethtool_flow_to_fltr(int eth) { switch (eth) { + case ETHER_FLOW: + return ICE_FLTR_PTYPE_NONF_ETH; case TCP_V4_FLOW: return ICE_FLTR_PTYPE_NONF_IPV4_TCP; case UDP_V4_FLOW: @@ -137,6 +141,10 @@ int ice_get_ethtool_fdir_entry(struct ice_hw *hw, struct ethtool_rxnfc *cmd) memset(&fsp->m_ext, 0, sizeof(fsp->m_ext)); switch (fsp->flow_type) { + case ETHER_FLOW: + fsp->h_u.ether_spec = rule->eth; + fsp->m_u.ether_spec = rule->eth_mask; + break; case IPV4_USER_FLOW: fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; fsp->h_u.usr_ip4_spec.proto = 0; @@ -526,7 +534,7 @@ ice_parse_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp, * * Returns the number of available flow director filters to this VSI */ -static int ice_fdir_num_avail_fltr(struct ice_hw *hw, struct ice_vsi *vsi) +int ice_fdir_num_avail_fltr(struct ice_hw *hw, struct ice_vsi *vsi) { u16 vsi_num = ice_get_hw_vsi_num(hw, vsi->idx); u16 num_guar; @@ -1194,6 +1202,122 @@ ice_set_fdir_ip6_usr_seg(struct ice_flow_seg_info *seg, } /** + * ice_fdir_vlan_valid - validate VLAN data for Flow Director rule + * @dev: network interface device structure + * @fsp: pointer to ethtool Rx flow specification + * + * Return: true if vlan data is valid, false otherwise + */ +static bool ice_fdir_vlan_valid(struct device *dev, + struct ethtool_rx_flow_spec *fsp) +{ + if (fsp->m_ext.vlan_etype && !eth_type_vlan(fsp->h_ext.vlan_etype)) + return false; + + if (fsp->m_ext.vlan_tci && ntohs(fsp->h_ext.vlan_tci) >= VLAN_N_VID) + return false; + + /* proto and vlan must have vlan-etype defined */ + if (fsp->m_u.ether_spec.h_proto && fsp->m_ext.vlan_tci && + !fsp->m_ext.vlan_etype) { + dev_warn(dev, "Filter with proto and vlan require also vlan-etype"); + return false; + } + + return true; +} + +/** + * ice_set_ether_flow_seg - set address and protocol segments for ether flow + * @dev: network interface device structure + * @seg: flow segment for programming + * @eth_spec: mask data from ethtool + * + * Return: 0 on success and errno in case of error. + */ +static int ice_set_ether_flow_seg(struct device *dev, + struct ice_flow_seg_info *seg, + struct ethhdr *eth_spec) +{ + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ETH); + + /* empty rules are not valid */ + if (is_zero_ether_addr(eth_spec->h_source) && + is_zero_ether_addr(eth_spec->h_dest) && + !eth_spec->h_proto) + return -EINVAL; + + /* Ethertype */ + if (eth_spec->h_proto == htons(0xFFFF)) { + ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_ETH_TYPE, + ICE_FLOW_FLD_OFF_INVAL, + ICE_FLOW_FLD_OFF_INVAL, + ICE_FLOW_FLD_OFF_INVAL, false); + } else if (eth_spec->h_proto) { + dev_warn(dev, "Only 0x0000 or 0xffff proto mask is allowed for flow-type ether"); + return -EOPNOTSUPP; + } + + /* Source MAC address */ + if (is_broadcast_ether_addr(eth_spec->h_source)) + ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_ETH_SA, + ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, + ICE_FLOW_FLD_OFF_INVAL, false); + else if (!is_zero_ether_addr(eth_spec->h_source)) + goto err_mask; + + /* Destination MAC address */ + if (is_broadcast_ether_addr(eth_spec->h_dest)) + ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_ETH_DA, + ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, + ICE_FLOW_FLD_OFF_INVAL, false); + else if (!is_zero_ether_addr(eth_spec->h_dest)) + goto err_mask; + + return 0; + +err_mask: + dev_warn(dev, "Only 00:00:00:00:00:00 or ff:ff:ff:ff:ff:ff MAC address mask is allowed for flow-type ether"); + return -EOPNOTSUPP; +} + +/** + * ice_set_fdir_vlan_seg - set vlan segments for ether flow + * @seg: flow segment for programming + * @ext_masks: masks for additional RX flow fields + * + * Return: 0 on success and errno in case of error. + */ +static int +ice_set_fdir_vlan_seg(struct ice_flow_seg_info *seg, + struct ethtool_flow_ext *ext_masks) +{ + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_VLAN); + + if (ext_masks->vlan_etype) { + if (ext_masks->vlan_etype != htons(0xFFFF)) + return -EOPNOTSUPP; + + ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_S_VLAN, + ICE_FLOW_FLD_OFF_INVAL, + ICE_FLOW_FLD_OFF_INVAL, + ICE_FLOW_FLD_OFF_INVAL, false); + } + + if (ext_masks->vlan_tci) { + if (ext_masks->vlan_tci != htons(0xFFFF)) + return -EOPNOTSUPP; + + ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_C_VLAN, + ICE_FLOW_FLD_OFF_INVAL, + ICE_FLOW_FLD_OFF_INVAL, + ICE_FLOW_FLD_OFF_INVAL, false); + } + + return 0; +} + +/** * ice_cfg_fdir_xtrct_seq - Configure extraction sequence for the given filter * @pf: PF structure * @fsp: pointer to ethtool Rx flow specification @@ -1209,7 +1333,7 @@ ice_cfg_fdir_xtrct_seq(struct ice_pf *pf, struct ethtool_rx_flow_spec *fsp, struct device *dev = ice_pf_to_dev(pf); enum ice_fltr_ptype fltr_idx; struct ice_hw *hw = &pf->hw; - bool perfect_filter; + bool perfect_filter = false; int ret; seg = devm_kzalloc(dev, sizeof(*seg), GFP_KERNEL); @@ -1262,6 +1386,16 @@ ice_cfg_fdir_xtrct_seq(struct ice_pf *pf, struct ethtool_rx_flow_spec *fsp, ret = ice_set_fdir_ip6_usr_seg(seg, &fsp->m_u.usr_ip6_spec, &perfect_filter); break; + case ETHER_FLOW: + ret = ice_set_ether_flow_seg(dev, seg, &fsp->m_u.ether_spec); + if (!ret && (fsp->m_ext.vlan_etype || fsp->m_ext.vlan_tci)) { + if (!ice_fdir_vlan_valid(dev, fsp)) { + ret = -EINVAL; + break; + } + ret = ice_set_fdir_vlan_seg(seg, &fsp->m_ext); + } + break; default: ret = -EINVAL; } @@ -1823,6 +1957,10 @@ ice_set_fdir_input_set(struct ice_vsi *vsi, struct ethtool_rx_flow_spec *fsp, input->mask.v6.tc = fsp->m_u.usr_ip6_spec.tclass; input->mask.v6.proto = fsp->m_u.usr_ip6_spec.l4_proto; break; + case ETHER_FLOW: + input->eth = fsp->h_u.ether_spec; + input->eth_mask = fsp->m_u.ether_spec; + break; default: /* not doing un-parsed flow types */ return -EINVAL; diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.c b/drivers/net/ethernet/intel/ice/ice_fdir.c index 5840c3e04a..26b357c0ae 100644 --- a/drivers/net/ethernet/intel/ice/ice_fdir.c +++ b/drivers/net/ethernet/intel/ice/ice_fdir.c @@ -4,6 +4,8 @@ #include "ice_common.h" /* These are training packet headers used to program flow director filters. */ +static const u8 ice_fdir_eth_pkt[22]; + static const u8 ice_fdir_tcpv4_pkt[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, @@ -417,6 +419,11 @@ static const u8 ice_fdir_ip6_tun_pkt[] = { /* Flow Director no-op training packet table */ static const struct ice_fdir_base_pkt ice_fdir_pkt[] = { { + ICE_FLTR_PTYPE_NONF_ETH, + sizeof(ice_fdir_eth_pkt), ice_fdir_eth_pkt, + sizeof(ice_fdir_eth_pkt), ice_fdir_eth_pkt, + }, + { ICE_FLTR_PTYPE_NONF_IPV4_TCP, sizeof(ice_fdir_tcpv4_pkt), ice_fdir_tcpv4_pkt, sizeof(ice_fdir_tcp4_tun_pkt), ice_fdir_tcp4_tun_pkt, @@ -914,6 +921,21 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input, * perspective. The input from user is from Rx filter perspective. */ switch (flow) { + case ICE_FLTR_PTYPE_NONF_ETH: + ice_pkt_insert_mac_addr(loc, input->eth.h_dest); + ice_pkt_insert_mac_addr(loc + ETH_ALEN, input->eth.h_source); + if (input->ext_data.vlan_tag || input->ext_data.vlan_type) { + ice_pkt_insert_u16(loc, ICE_ETH_TYPE_F_OFFSET, + input->ext_data.vlan_type); + ice_pkt_insert_u16(loc, ICE_ETH_VLAN_TCI_OFFSET, + input->ext_data.vlan_tag); + ice_pkt_insert_u16(loc, ICE_ETH_TYPE_VLAN_OFFSET, + input->eth.h_proto); + } else { + ice_pkt_insert_u16(loc, ICE_ETH_TYPE_F_OFFSET, + input->eth.h_proto); + } + break; case ICE_FLTR_PTYPE_NONF_IPV4_TCP: ice_pkt_insert_u32(loc, ICE_IPV4_DST_ADDR_OFFSET, input->ip.v4.src_ip); @@ -1189,52 +1211,58 @@ static int ice_cmp_ipv6_addr(__be32 *a, __be32 *b) * ice_fdir_comp_rules - compare 2 filters * @a: a Flow Director filter data structure * @b: a Flow Director filter data structure - * @v6: bool true if v6 filter * * Returns true if the filters match */ static bool -ice_fdir_comp_rules(struct ice_fdir_fltr *a, struct ice_fdir_fltr *b, bool v6) +ice_fdir_comp_rules(struct ice_fdir_fltr *a, struct ice_fdir_fltr *b) { enum ice_fltr_ptype flow_type = a->flow_type; /* The calling function already checks that the two filters have the * same flow_type. */ - if (!v6) { - if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_TCP || - flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP || - flow_type == ICE_FLTR_PTYPE_NONF_IPV4_SCTP) { - if (a->ip.v4.dst_ip == b->ip.v4.dst_ip && - a->ip.v4.src_ip == b->ip.v4.src_ip && - a->ip.v4.dst_port == b->ip.v4.dst_port && - a->ip.v4.src_port == b->ip.v4.src_port) - return true; - } else if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_OTHER) { - if (a->ip.v4.dst_ip == b->ip.v4.dst_ip && - a->ip.v4.src_ip == b->ip.v4.src_ip && - a->ip.v4.l4_header == b->ip.v4.l4_header && - a->ip.v4.proto == b->ip.v4.proto && - a->ip.v4.ip_ver == b->ip.v4.ip_ver && - a->ip.v4.tos == b->ip.v4.tos) - return true; - } - } else { - if (flow_type == ICE_FLTR_PTYPE_NONF_IPV6_UDP || - flow_type == ICE_FLTR_PTYPE_NONF_IPV6_TCP || - flow_type == ICE_FLTR_PTYPE_NONF_IPV6_SCTP) { - if (a->ip.v6.dst_port == b->ip.v6.dst_port && - a->ip.v6.src_port == b->ip.v6.src_port && - !ice_cmp_ipv6_addr(a->ip.v6.dst_ip, - b->ip.v6.dst_ip) && - !ice_cmp_ipv6_addr(a->ip.v6.src_ip, - b->ip.v6.src_ip)) - return true; - } else if (flow_type == ICE_FLTR_PTYPE_NONF_IPV6_OTHER) { - if (a->ip.v6.dst_port == b->ip.v6.dst_port && - a->ip.v6.src_port == b->ip.v6.src_port) - return true; - } + switch (flow_type) { + case ICE_FLTR_PTYPE_NONF_ETH: + if (!memcmp(&a->eth, &b->eth, sizeof(a->eth))) + return true; + break; + case ICE_FLTR_PTYPE_NONF_IPV4_TCP: + case ICE_FLTR_PTYPE_NONF_IPV4_UDP: + case ICE_FLTR_PTYPE_NONF_IPV4_SCTP: + if (a->ip.v4.dst_ip == b->ip.v4.dst_ip && + a->ip.v4.src_ip == b->ip.v4.src_ip && + a->ip.v4.dst_port == b->ip.v4.dst_port && + a->ip.v4.src_port == b->ip.v4.src_port) + return true; + break; + case ICE_FLTR_PTYPE_NONF_IPV4_OTHER: + if (a->ip.v4.dst_ip == b->ip.v4.dst_ip && + a->ip.v4.src_ip == b->ip.v4.src_ip && + a->ip.v4.l4_header == b->ip.v4.l4_header && + a->ip.v4.proto == b->ip.v4.proto && + a->ip.v4.ip_ver == b->ip.v4.ip_ver && + a->ip.v4.tos == b->ip.v4.tos) + return true; + break; + case ICE_FLTR_PTYPE_NONF_IPV6_UDP: + case ICE_FLTR_PTYPE_NONF_IPV6_TCP: + case ICE_FLTR_PTYPE_NONF_IPV6_SCTP: + if (a->ip.v6.dst_port == b->ip.v6.dst_port && + a->ip.v6.src_port == b->ip.v6.src_port && + !ice_cmp_ipv6_addr(a->ip.v6.dst_ip, + b->ip.v6.dst_ip) && + !ice_cmp_ipv6_addr(a->ip.v6.src_ip, + b->ip.v6.src_ip)) + return true; + break; + case ICE_FLTR_PTYPE_NONF_IPV6_OTHER: + if (a->ip.v6.dst_port == b->ip.v6.dst_port && + a->ip.v6.src_port == b->ip.v6.src_port) + return true; + break; + default: + break; } return false; @@ -1253,19 +1281,10 @@ bool ice_fdir_is_dup_fltr(struct ice_hw *hw, struct ice_fdir_fltr *input) bool ret = false; list_for_each_entry(rule, &hw->fdir_list_head, fltr_node) { - enum ice_fltr_ptype flow_type; - if (rule->flow_type != input->flow_type) continue; - flow_type = input->flow_type; - if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_TCP || - flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP || - flow_type == ICE_FLTR_PTYPE_NONF_IPV4_SCTP || - flow_type == ICE_FLTR_PTYPE_NONF_IPV4_OTHER) - ret = ice_fdir_comp_rules(rule, input, false); - else - ret = ice_fdir_comp_rules(rule, input, true); + ret = ice_fdir_comp_rules(rule, input); if (ret) { if (rule->fltr_id == input->fltr_id && rule->q_index != input->q_index) diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.h b/drivers/net/ethernet/intel/ice/ice_fdir.h index 1b9b844906..ab5b118daa 100644 --- a/drivers/net/ethernet/intel/ice/ice_fdir.h +++ b/drivers/net/ethernet/intel/ice/ice_fdir.h @@ -8,6 +8,9 @@ #define ICE_FDIR_MAX_RAW_PKT_SIZE (512 + ICE_FDIR_TUN_PKT_OFF) /* macros for offsets into packets for flow director programming */ +#define ICE_ETH_TYPE_F_OFFSET 12 +#define ICE_ETH_VLAN_TCI_OFFSET 14 +#define ICE_ETH_TYPE_VLAN_OFFSET 16 #define ICE_IPV4_SRC_ADDR_OFFSET 26 #define ICE_IPV4_DST_ADDR_OFFSET 30 #define ICE_IPV4_TCP_SRC_PORT_OFFSET 34 @@ -159,6 +162,8 @@ struct ice_fdir_fltr { struct list_head fltr_node; enum ice_fltr_ptype flow_type; + struct ethhdr eth, eth_mask; + union { struct ice_fdir_v4 v4; struct ice_fdir_v6 v6; @@ -202,6 +207,8 @@ struct ice_fdir_base_pkt { const u8 *tun_pkt; }; +struct ice_vsi; + int ice_alloc_fd_res_cntr(struct ice_hw *hw, u16 *cntr_id); int ice_free_fd_res_cntr(struct ice_hw *hw, u16 cntr_id); int ice_alloc_fd_guar_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr); @@ -213,6 +220,7 @@ int ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input, u8 *pkt, bool frag, bool tun); int ice_get_fdir_cnt_all(struct ice_hw *hw); +int ice_fdir_num_avail_fltr(struct ice_hw *hw, struct ice_vsi *vsi); bool ice_fdir_is_dup_fltr(struct ice_hw *hw, struct ice_fdir_fltr *input); bool ice_fdir_has_frag(enum ice_fltr_ptype flow); struct ice_fdir_fltr * diff --git a/drivers/net/ethernet/intel/ice/ice_flex_type.h b/drivers/net/ethernet/intel/ice/ice_flex_type.h index d427a79d00..817beca591 100644 --- a/drivers/net/ethernet/intel/ice/ice_flex_type.h +++ b/drivers/net/ethernet/intel/ice/ice_flex_type.h @@ -93,6 +93,7 @@ enum ice_tunnel_type { TNL_GRETAP, TNL_GTPC, TNL_GTPU, + TNL_PFCP, __TNL_TYPE_CNT, TNL_LAST = 0xFF, TNL_ALL = 0xFF, @@ -358,7 +359,8 @@ enum ice_prof_type { ICE_PROF_TUN_GRE = 0x4, ICE_PROF_TUN_GTPU = 0x8, ICE_PROF_TUN_GTPC = 0x10, - ICE_PROF_TUN_ALL = 0x1E, + ICE_PROF_TUN_PFCP = 0x20, + ICE_PROF_TUN_ALL = 0x3E, ICE_PROF_ALL = 0xFF, }; diff --git a/drivers/net/ethernet/intel/ice/ice_fw_update.c b/drivers/net/ethernet/intel/ice/ice_fw_update.c index 319a2d6fe2..f81db6c107 100644 --- a/drivers/net/ethernet/intel/ice/ice_fw_update.c +++ b/drivers/net/ethernet/intel/ice/ice_fw_update.c @@ -286,10 +286,9 @@ ice_send_component_table(struct pldmfw *context, struct pldmfw_component *compon * * Returns: zero on success, or a negative error code on failure. */ -static int -ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset, - u16 block_size, u8 *block, bool last_cmd, - u8 *reset_level, struct netlink_ext_ack *extack) +int ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset, + u16 block_size, u8 *block, bool last_cmd, + u8 *reset_level, struct netlink_ext_ack *extack) { u16 completion_module, completion_retval; struct device *dev = ice_pf_to_dev(pf); diff --git a/drivers/net/ethernet/intel/ice/ice_fw_update.h b/drivers/net/ethernet/intel/ice/ice_fw_update.h index 7505748857..04b2004627 100644 --- a/drivers/net/ethernet/intel/ice/ice_fw_update.h +++ b/drivers/net/ethernet/intel/ice/ice_fw_update.h @@ -9,5 +9,8 @@ int ice_devlink_flash_update(struct devlink *devlink, struct netlink_ext_ack *extack); int ice_get_pending_updates(struct ice_pf *pf, u8 *pending, struct netlink_ext_ack *extack); +int ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset, + u16 block_size, u8 *block, bool last_cmd, + u8 *reset_level, struct netlink_ext_ack *extack); #endif diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c index f0e76f0a6d..1ccb572ce2 100644 --- a/drivers/net/ethernet/intel/ice/ice_lag.c +++ b/drivers/net/ethernet/intel/ice/ice_lag.c @@ -202,11 +202,12 @@ static struct ice_lag *ice_lag_find_primary(struct ice_lag *lag) * @act: rule action * @recipe_id: recipe id for the new rule * @rule_idx: pointer to rule index + * @direction: ICE_FLTR_RX or ICE_FLTR_TX * @add: boolean on whether we are adding filters */ static int ice_lag_cfg_fltr(struct ice_lag *lag, u32 act, u16 recipe_id, u16 *rule_idx, - bool add) + u8 direction, bool add) { struct ice_sw_rule_lkup_rx_tx *s_rule; u16 s_rule_sz, vsi_num; @@ -231,9 +232,16 @@ ice_lag_cfg_fltr(struct ice_lag *lag, u32 act, u16 recipe_id, u16 *rule_idx, act |= FIELD_PREP(ICE_SINGLE_ACT_VSI_ID_M, vsi_num); - s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX); s_rule->recipe_id = cpu_to_le16(recipe_id); - s_rule->src = cpu_to_le16(hw->port_info->lport); + if (direction == ICE_FLTR_RX) { + s_rule->hdr.type = + cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX); + s_rule->src = cpu_to_le16(hw->port_info->lport); + } else { + s_rule->hdr.type = + cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX); + s_rule->src = cpu_to_le16(vsi_num); + } s_rule->act = cpu_to_le32(act); s_rule->hdr_len = cpu_to_le16(DUMMY_ETH_HDR_LEN); opc = ice_aqc_opc_add_sw_rules; @@ -266,9 +274,27 @@ ice_lag_cfg_dflt_fltr(struct ice_lag *lag, bool add) { u32 act = ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT | ICE_SINGLE_ACT_LAN_ENABLE; + int err; + + err = ice_lag_cfg_fltr(lag, act, lag->pf_recipe, &lag->pf_rx_rule_id, + ICE_FLTR_RX, add); + if (err) + goto err_rx; - return ice_lag_cfg_fltr(lag, act, lag->pf_recipe, - &lag->pf_rule_id, add); + act = ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT | + ICE_SINGLE_ACT_LB_ENABLE; + err = ice_lag_cfg_fltr(lag, act, lag->pf_recipe, &lag->pf_tx_rule_id, + ICE_FLTR_TX, add); + if (err) + goto err_tx; + + return 0; + +err_tx: + ice_lag_cfg_fltr(lag, act, lag->pf_recipe, &lag->pf_rx_rule_id, + ICE_FLTR_RX, !add); +err_rx: + return err; } /** @@ -284,7 +310,7 @@ ice_lag_cfg_drop_fltr(struct ice_lag *lag, bool add) ICE_SINGLE_ACT_DROP; return ice_lag_cfg_fltr(lag, act, lag->lport_recipe, - &lag->lport_rule_idx, add); + &lag->lport_rule_idx, ICE_FLTR_RX, add); } /** @@ -310,7 +336,7 @@ ice_lag_cfg_pf_fltrs(struct ice_lag *lag, void *ptr) dev = ice_pf_to_dev(lag->pf); /* interface not active - remove old default VSI rule */ - if (bonding_info->slave.state && lag->pf_rule_id) { + if (bonding_info->slave.state && lag->pf_rx_rule_id) { if (ice_lag_cfg_dflt_fltr(lag, false)) dev_err(dev, "Error removing old default VSI filter\n"); if (ice_lag_cfg_drop_fltr(lag, true)) @@ -319,7 +345,7 @@ ice_lag_cfg_pf_fltrs(struct ice_lag *lag, void *ptr) } /* interface becoming active - add new default VSI rule */ - if (!bonding_info->slave.state && !lag->pf_rule_id) { + if (!bonding_info->slave.state && !lag->pf_rx_rule_id) { if (ice_lag_cfg_dflt_fltr(lag, true)) dev_err(dev, "Error adding new default VSI filter\n"); if (lag->lport_rule_idx && ice_lag_cfg_drop_fltr(lag, false)) @@ -714,8 +740,7 @@ static void ice_lag_move_vf_nodes(struct ice_lag *lag, u8 oldport, u8 newport) pf = lag->pf; ice_for_each_vsi(pf, i) - if (pf->vsi[i] && (pf->vsi[i]->type == ICE_VSI_VF || - pf->vsi[i]->type == ICE_VSI_SWITCHDEV_CTRL)) + if (pf->vsi[i] && pf->vsi[i]->type == ICE_VSI_VF) ice_lag_move_single_vf_nodes(lag, oldport, newport, i); } @@ -953,8 +978,7 @@ ice_lag_reclaim_vf_nodes(struct ice_lag *lag, struct ice_hw *src_hw) pf = lag->pf; ice_for_each_vsi(pf, i) - if (pf->vsi[i] && (pf->vsi[i]->type == ICE_VSI_VF || - pf->vsi[i]->type == ICE_VSI_SWITCHDEV_CTRL)) + if (pf->vsi[i] && pf->vsi[i]->type == ICE_VSI_VF) ice_for_each_traffic_class(tc) ice_lag_reclaim_vf_tc(lag, src_hw, i, tc); } @@ -1976,8 +2000,7 @@ ice_lag_move_vf_nodes_sync(struct ice_lag *lag, struct ice_hw *dest_hw) pf = lag->pf; ice_for_each_vsi(pf, i) - if (pf->vsi[i] && (pf->vsi[i]->type == ICE_VSI_VF || - pf->vsi[i]->type == ICE_VSI_SWITCHDEV_CTRL)) + if (pf->vsi[i] && pf->vsi[i]->type == ICE_VSI_VF) ice_for_each_traffic_class(tc) ice_lag_move_vf_nodes_tc_sync(lag, dest_hw, i, tc); @@ -2149,7 +2172,7 @@ void ice_lag_rebuild(struct ice_pf *pf) ice_lag_cfg_cp_fltr(lag, true); - if (lag->pf_rule_id) + if (lag->pf_rx_rule_id) if (ice_lag_cfg_dflt_fltr(lag, true)) dev_err(ice_pf_to_dev(pf), "Error adding default VSI rule in rebuild\n"); diff --git a/drivers/net/ethernet/intel/ice/ice_lag.h b/drivers/net/ethernet/intel/ice/ice_lag.h index 183b38792e..bab2c83142 100644 --- a/drivers/net/ethernet/intel/ice/ice_lag.h +++ b/drivers/net/ethernet/intel/ice/ice_lag.h @@ -43,7 +43,8 @@ struct ice_lag { u8 primary:1; /* this is primary */ u16 pf_recipe; u16 lport_recipe; - u16 pf_rule_id; + u16 pf_rx_rule_id; + u16 pf_tx_rule_id; u16 cp_rule_idx; u16 lport_rule_idx; u8 role; diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h index d384ddfcb8..611577ebc2 100644 --- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h +++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h @@ -160,64 +160,6 @@ struct ice_fltr_desc { (0x1ULL << ICE_FXD_FLTR_WB_QW1_FAIL_PROF_S) #define ICE_FXD_FLTR_WB_QW1_FAIL_PROF_YES 0x1ULL -struct ice_rx_ptype_decoded { - u32 known:1; - u32 outer_ip:1; - u32 outer_ip_ver:2; - u32 outer_frag:1; - u32 tunnel_type:3; - u32 tunnel_end_prot:2; - u32 tunnel_end_frag:1; - u32 inner_prot:4; - u32 payload_layer:3; -}; - -enum ice_rx_ptype_outer_ip { - ICE_RX_PTYPE_OUTER_L2 = 0, - ICE_RX_PTYPE_OUTER_IP = 1, -}; - -enum ice_rx_ptype_outer_ip_ver { - ICE_RX_PTYPE_OUTER_NONE = 0, - ICE_RX_PTYPE_OUTER_IPV4 = 1, - ICE_RX_PTYPE_OUTER_IPV6 = 2, -}; - -enum ice_rx_ptype_outer_fragmented { - ICE_RX_PTYPE_NOT_FRAG = 0, - ICE_RX_PTYPE_FRAG = 1, -}; - -enum ice_rx_ptype_tunnel_type { - ICE_RX_PTYPE_TUNNEL_NONE = 0, - ICE_RX_PTYPE_TUNNEL_IP_IP = 1, - ICE_RX_PTYPE_TUNNEL_IP_GRENAT = 2, - ICE_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3, - ICE_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4, -}; - -enum ice_rx_ptype_tunnel_end_prot { - ICE_RX_PTYPE_TUNNEL_END_NONE = 0, - ICE_RX_PTYPE_TUNNEL_END_IPV4 = 1, - ICE_RX_PTYPE_TUNNEL_END_IPV6 = 2, -}; - -enum ice_rx_ptype_inner_prot { - ICE_RX_PTYPE_INNER_PROT_NONE = 0, - ICE_RX_PTYPE_INNER_PROT_UDP = 1, - ICE_RX_PTYPE_INNER_PROT_TCP = 2, - ICE_RX_PTYPE_INNER_PROT_SCTP = 3, - ICE_RX_PTYPE_INNER_PROT_ICMP = 4, - ICE_RX_PTYPE_INNER_PROT_TIMESYNC = 5, -}; - -enum ice_rx_ptype_payload_layer { - ICE_RX_PTYPE_PAYLOAD_LAYER_NONE = 0, - ICE_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1, - ICE_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2, - ICE_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3, -}; - /* Rx Flex Descriptor * This descriptor is used instead of the legacy version descriptor when * ice_rlan_ctx.adv_desc is set @@ -651,266 +593,4 @@ struct ice_tlan_ctx { u8 int_q_state; /* width not needed - internal - DO NOT WRITE!!! */ }; -/* The ice_ptype_lkup table is used to convert from the 10-bit ptype in the - * hardware to a bit-field that can be used by SW to more easily determine the - * packet type. - * - * Macros are used to shorten the table lines and make this table human - * readable. - * - * We store the PTYPE in the top byte of the bit field - this is just so that - * we can check that the table doesn't have a row missing, as the index into - * the table should be the PTYPE. - * - * Typical work flow: - * - * IF NOT ice_ptype_lkup[ptype].known - * THEN - * Packet is unknown - * ELSE IF ice_ptype_lkup[ptype].outer_ip == ICE_RX_PTYPE_OUTER_IP - * Use the rest of the fields to look at the tunnels, inner protocols, etc - * ELSE - * Use the enum ice_rx_l2_ptype to decode the packet type - * ENDIF - */ -#define ICE_PTYPES \ - /* L2 Packet types */ \ - ICE_PTT_UNUSED_ENTRY(0), \ - ICE_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), \ - ICE_PTT_UNUSED_ENTRY(2), \ - ICE_PTT_UNUSED_ENTRY(3), \ - ICE_PTT_UNUSED_ENTRY(4), \ - ICE_PTT_UNUSED_ENTRY(5), \ - ICE_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), \ - ICE_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), \ - ICE_PTT_UNUSED_ENTRY(8), \ - ICE_PTT_UNUSED_ENTRY(9), \ - ICE_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), \ - ICE_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), \ - ICE_PTT_UNUSED_ENTRY(12), \ - ICE_PTT_UNUSED_ENTRY(13), \ - ICE_PTT_UNUSED_ENTRY(14), \ - ICE_PTT_UNUSED_ENTRY(15), \ - ICE_PTT_UNUSED_ENTRY(16), \ - ICE_PTT_UNUSED_ENTRY(17), \ - ICE_PTT_UNUSED_ENTRY(18), \ - ICE_PTT_UNUSED_ENTRY(19), \ - ICE_PTT_UNUSED_ENTRY(20), \ - ICE_PTT_UNUSED_ENTRY(21), \ - \ - /* Non Tunneled IPv4 */ \ - ICE_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3), \ - ICE_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3), \ - ICE_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4), \ - ICE_PTT_UNUSED_ENTRY(25), \ - ICE_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4), \ - ICE_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4), \ - ICE_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4), \ - \ - /* IPv4 --> IPv4 */ \ - ICE_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3), \ - ICE_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3), \ - ICE_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4), \ - ICE_PTT_UNUSED_ENTRY(32), \ - ICE_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4), \ - ICE_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), \ - ICE_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), \ - \ - /* IPv4 --> IPv6 */ \ - ICE_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3), \ - ICE_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3), \ - ICE_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4), \ - ICE_PTT_UNUSED_ENTRY(39), \ - ICE_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4), \ - ICE_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), \ - ICE_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), \ - \ - /* IPv4 --> GRE/NAT */ \ - ICE_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), \ - \ - /* IPv4 --> GRE/NAT --> IPv4 */ \ - ICE_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), \ - ICE_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), \ - ICE_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), \ - ICE_PTT_UNUSED_ENTRY(47), \ - ICE_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), \ - ICE_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), \ - ICE_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), \ - \ - /* IPv4 --> GRE/NAT --> IPv6 */ \ - ICE_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), \ - ICE_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), \ - ICE_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), \ - ICE_PTT_UNUSED_ENTRY(54), \ - ICE_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), \ - ICE_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), \ - ICE_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), \ - \ - /* IPv4 --> GRE/NAT --> MAC */ \ - ICE_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), \ - \ - /* IPv4 --> GRE/NAT --> MAC --> IPv4 */ \ - ICE_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), \ - ICE_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), \ - ICE_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), \ - ICE_PTT_UNUSED_ENTRY(62), \ - ICE_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), \ - ICE_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), \ - ICE_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), \ - \ - /* IPv4 --> GRE/NAT -> MAC --> IPv6 */ \ - ICE_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), \ - ICE_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), \ - ICE_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), \ - ICE_PTT_UNUSED_ENTRY(69), \ - ICE_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), \ - ICE_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), \ - ICE_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), \ - \ - /* IPv4 --> GRE/NAT --> MAC/VLAN */ \ - ICE_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), \ - \ - /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */ \ - ICE_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), \ - ICE_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), \ - ICE_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), \ - ICE_PTT_UNUSED_ENTRY(77), \ - ICE_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), \ - ICE_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), \ - ICE_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), \ - \ - /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */ \ - ICE_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), \ - ICE_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), \ - ICE_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), \ - ICE_PTT_UNUSED_ENTRY(84), \ - ICE_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), \ - ICE_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), \ - ICE_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), \ - \ - /* Non Tunneled IPv6 */ \ - ICE_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3), \ - ICE_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3), \ - ICE_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4), \ - ICE_PTT_UNUSED_ENTRY(91), \ - ICE_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4), \ - ICE_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4), \ - ICE_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4), \ - \ - /* IPv6 --> IPv4 */ \ - ICE_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3), \ - ICE_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3), \ - ICE_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4), \ - ICE_PTT_UNUSED_ENTRY(98), \ - ICE_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4), \ - ICE_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), \ - ICE_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), \ - \ - /* IPv6 --> IPv6 */ \ - ICE_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3), \ - ICE_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3), \ - ICE_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4), \ - ICE_PTT_UNUSED_ENTRY(105), \ - ICE_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4), \ - ICE_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), \ - ICE_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), \ - \ - /* IPv6 --> GRE/NAT */ \ - ICE_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), \ - \ - /* IPv6 --> GRE/NAT -> IPv4 */ \ - ICE_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), \ - ICE_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), \ - ICE_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), \ - ICE_PTT_UNUSED_ENTRY(113), \ - ICE_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), \ - ICE_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), \ - ICE_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), \ - \ - /* IPv6 --> GRE/NAT -> IPv6 */ \ - ICE_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), \ - ICE_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), \ - ICE_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), \ - ICE_PTT_UNUSED_ENTRY(120), \ - ICE_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), \ - ICE_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), \ - ICE_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), \ - \ - /* IPv6 --> GRE/NAT -> MAC */ \ - ICE_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), \ - \ - /* IPv6 --> GRE/NAT -> MAC -> IPv4 */ \ - ICE_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), \ - ICE_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), \ - ICE_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), \ - ICE_PTT_UNUSED_ENTRY(128), \ - ICE_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), \ - ICE_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), \ - ICE_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), \ - \ - /* IPv6 --> GRE/NAT -> MAC -> IPv6 */ \ - ICE_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), \ - ICE_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), \ - ICE_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), \ - ICE_PTT_UNUSED_ENTRY(135), \ - ICE_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), \ - ICE_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), \ - ICE_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), \ - \ - /* IPv6 --> GRE/NAT -> MAC/VLAN */ \ - ICE_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), \ - \ - /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */ \ - ICE_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), \ - ICE_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), \ - ICE_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), \ - ICE_PTT_UNUSED_ENTRY(143), \ - ICE_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), \ - ICE_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), \ - ICE_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), \ - \ - /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */ \ - ICE_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), \ - ICE_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), \ - ICE_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), \ - ICE_PTT_UNUSED_ENTRY(150), \ - ICE_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), \ - ICE_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), \ - ICE_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), - -#define ICE_NUM_DEFINED_PTYPES 154 - -/* macro to make the table lines short, use explicit indexing with [PTYPE] */ -#define ICE_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\ - [PTYPE] = { \ - 1, \ - ICE_RX_PTYPE_OUTER_##OUTER_IP, \ - ICE_RX_PTYPE_OUTER_##OUTER_IP_VER, \ - ICE_RX_PTYPE_##OUTER_FRAG, \ - ICE_RX_PTYPE_TUNNEL_##T, \ - ICE_RX_PTYPE_TUNNEL_END_##TE, \ - ICE_RX_PTYPE_##TEF, \ - ICE_RX_PTYPE_INNER_PROT_##I, \ - ICE_RX_PTYPE_PAYLOAD_LAYER_##PL } - -#define ICE_PTT_UNUSED_ENTRY(PTYPE) [PTYPE] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 } - -/* shorter macros makes the table fit but are terse */ -#define ICE_RX_PTYPE_NOF ICE_RX_PTYPE_NOT_FRAG -#define ICE_RX_PTYPE_FRG ICE_RX_PTYPE_FRAG - -/* Lookup table mapping in the 10-bit HW PTYPE to the bit field for decoding */ -static const struct ice_rx_ptype_decoded ice_ptype_lkup[BIT(10)] = { - ICE_PTYPES - - /* unused entries */ - [ICE_NUM_DEFINED_PTYPES ... 1023] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 } -}; - -static inline struct ice_rx_ptype_decoded ice_decode_rx_desc_ptype(u16 ptype) -{ - return ice_ptype_lkup[ptype]; -} - - #endif /* _ICE_LAN_TX_RX_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index acf732ce04..7629b01905 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -7,7 +7,6 @@ #include "ice_lib.h" #include "ice_fltr.h" #include "ice_dcb_lib.h" -#include "ice_devlink.h" #include "ice_vsi_vlan_ops.h" /** @@ -27,8 +26,6 @@ const char *ice_vsi_type_str(enum ice_vsi_type vsi_type) return "ICE_VSI_CHNL"; case ICE_VSI_LB: return "ICE_VSI_LB"; - case ICE_VSI_SWITCHDEV_CTRL: - return "ICE_VSI_SWITCHDEV_CTRL"; default: return "unknown"; } @@ -138,7 +135,6 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi) { switch (vsi->type) { case ICE_VSI_PF: - case ICE_VSI_SWITCHDEV_CTRL: case ICE_VSI_CTRL: case ICE_VSI_LB: /* a user could change the values of num_[tr]x_desc using @@ -205,21 +201,6 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi) max_t(int, vsi->alloc_rxq, vsi->alloc_txq)); break; - case ICE_VSI_SWITCHDEV_CTRL: - /* The number of queues for ctrl VSI is equal to number of PRs - * Each ring is associated to the corresponding VF_PR netdev. - * Tx and Rx rings are always equal - */ - if (vsi->req_txq && vsi->req_rxq) { - vsi->alloc_txq = vsi->req_txq; - vsi->alloc_rxq = vsi->req_rxq; - } else { - vsi->alloc_txq = 1; - vsi->alloc_rxq = 1; - } - - vsi->num_q_vectors = 1; - break; case ICE_VSI_VF: if (vf->num_req_qs) vf->num_vf_qs = vf->num_req_qs; @@ -514,22 +495,6 @@ static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data) return IRQ_HANDLED; } -static irqreturn_t ice_eswitch_msix_clean_rings(int __always_unused irq, void *data) -{ - struct ice_q_vector *q_vector = (struct ice_q_vector *)data; - struct ice_pf *pf = q_vector->vsi->back; - struct ice_repr *repr; - unsigned long id; - - if (!q_vector->tx.tx_ring && !q_vector->rx.rx_ring) - return IRQ_HANDLED; - - xa_for_each(&pf->eswitch.reprs, id, repr) - napi_schedule(&repr->q_vector->napi); - - return IRQ_HANDLED; -} - /** * ice_vsi_alloc_stat_arrays - Allocate statistics arrays * @vsi: VSI pointer @@ -592,10 +557,6 @@ ice_vsi_alloc_def(struct ice_vsi *vsi, struct ice_channel *ch) } switch (vsi->type) { - case ICE_VSI_SWITCHDEV_CTRL: - /* Setup eswitch MSIX irq handler for VSI */ - vsi->irq_handler = ice_eswitch_msix_clean_rings; - break; case ICE_VSI_PF: /* Setup default MSIX irq handler for VSI */ vsi->irq_handler = ice_msix_clean_rings; @@ -925,11 +886,6 @@ static void ice_vsi_set_rss_params(struct ice_vsi *vsi) max_rss_size); vsi->rss_lut_type = ICE_LUT_PF; break; - case ICE_VSI_SWITCHDEV_CTRL: - vsi->rss_table_size = ICE_LUT_VSI_SIZE; - vsi->rss_size = min_t(u16, num_online_cpus(), max_rss_size); - vsi->rss_lut_type = ICE_LUT_VSI; - break; case ICE_VSI_VF: /* VF VSI will get a small RSS table. * For VSI_LUT, LUT size should be set to 64 bytes. @@ -1255,7 +1211,6 @@ static int ice_vsi_init(struct ice_vsi *vsi, u32 vsi_flags) case ICE_VSI_PF: ctxt->flags = ICE_AQ_VSI_TYPE_PF; break; - case ICE_VSI_SWITCHDEV_CTRL: case ICE_VSI_CHNL: ctxt->flags = ICE_AQ_VSI_TYPE_VMDQ2; break; @@ -2137,7 +2092,6 @@ static void ice_set_agg_vsi(struct ice_vsi *vsi) case ICE_VSI_CHNL: case ICE_VSI_LB: case ICE_VSI_PF: - case ICE_VSI_SWITCHDEV_CTRL: max_agg_nodes = ICE_MAX_PF_AGG_NODES; agg_node_id_start = ICE_PF_AGG_NODE_ID_START; agg_node_iter = &pf->pf_agg_node[0]; @@ -2265,10 +2219,8 @@ static int ice_vsi_cfg_tc_lan(struct ice_pf *pf, struct ice_vsi *vsi) /** * ice_vsi_cfg_def - configure default VSI based on the type * @vsi: pointer to VSI - * @params: the parameters to configure this VSI with */ -static int -ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params) +static int ice_vsi_cfg_def(struct ice_vsi *vsi) { struct device *dev = ice_pf_to_dev(vsi->back); struct ice_pf *pf = vsi->back; @@ -2276,7 +2228,7 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params) vsi->vsw = pf->first_sw; - ret = ice_vsi_alloc_def(vsi, params->ch); + ret = ice_vsi_alloc_def(vsi, vsi->ch); if (ret) return ret; @@ -2301,7 +2253,7 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params) ice_vsi_set_tc_cfg(vsi); /* create the VSI */ - ret = ice_vsi_init(vsi, params->flags); + ret = ice_vsi_init(vsi, vsi->flags); if (ret) goto unroll_get_qs; @@ -2309,7 +2261,6 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params) switch (vsi->type) { case ICE_VSI_CTRL: - case ICE_VSI_SWITCHDEV_CTRL: case ICE_VSI_PF: ret = ice_vsi_alloc_q_vectors(vsi); if (ret) @@ -2423,23 +2374,16 @@ unroll_vsi_alloc: /** * ice_vsi_cfg - configure a previously allocated VSI * @vsi: pointer to VSI - * @params: parameters used to configure this VSI */ -int ice_vsi_cfg(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params) +int ice_vsi_cfg(struct ice_vsi *vsi) { struct ice_pf *pf = vsi->back; int ret; - if (WARN_ON(params->type == ICE_VSI_VF && !params->vf)) + if (WARN_ON(vsi->type == ICE_VSI_VF && !vsi->vf)) return -EINVAL; - vsi->type = params->type; - vsi->port_info = params->pi; - - /* For VSIs which don't have a connected VF, this will be NULL */ - vsi->vf = params->vf; - - ret = ice_vsi_cfg_def(vsi, params); + ret = ice_vsi_cfg_def(vsi); if (ret) return ret; @@ -2525,7 +2469,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params) * a port_info structure for it. */ if (WARN_ON(!(params->flags & ICE_VSI_FLAG_INIT)) || - WARN_ON(!params->pi)) + WARN_ON(!params->port_info)) return NULL; vsi = ice_vsi_alloc(pf); @@ -2534,7 +2478,8 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params) return NULL; } - ret = ice_vsi_cfg(vsi, params); + vsi->params = *params; + ret = ice_vsi_cfg(vsi); if (ret) goto err_vsi_cfg; @@ -2743,8 +2688,7 @@ void ice_dis_vsi(struct ice_vsi *vsi, bool locked) } else { ice_vsi_close(vsi); } - } else if (vsi->type == ICE_VSI_CTRL || - vsi->type == ICE_VSI_SWITCHDEV_CTRL) { + } else if (vsi->type == ICE_VSI_CTRL) { ice_vsi_close(vsi); } } @@ -3082,7 +3026,6 @@ ice_vsi_realloc_stat_arrays(struct ice_vsi *vsi) */ int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags) { - struct ice_vsi_cfg_params params = {}; struct ice_coalesce_stored *coalesce; int prev_num_q_vectors; struct ice_pf *pf; @@ -3091,9 +3034,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags) if (!vsi) return -EINVAL; - params = ice_vsi_to_params(vsi); - params.flags = vsi_flags; - + vsi->flags = vsi_flags; pf = vsi->back; if (WARN_ON(vsi->type == ICE_VSI_VF && !vsi->vf)) return -EINVAL; @@ -3103,7 +3044,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags) goto err_vsi_cfg; ice_vsi_decfg(vsi); - ret = ice_vsi_cfg_def(vsi, ¶ms); + ret = ice_vsi_cfg_def(vsi); if (ret) goto err_vsi_cfg; diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h index 9cd23afe5f..94ce8964dd 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_lib.h @@ -11,43 +11,6 @@ #define ICE_VSI_FLAG_INIT BIT(0) #define ICE_VSI_FLAG_NO_INIT 0 -/** - * struct ice_vsi_cfg_params - VSI configuration parameters - * @pi: pointer to the port_info instance for the VSI - * @ch: pointer to the channel structure for the VSI, may be NULL - * @vf: pointer to the VF associated with this VSI, may be NULL - * @type: the type of VSI to configure - * @flags: VSI flags used for rebuild and configuration - * - * Parameter structure used when configuring a new VSI. - */ -struct ice_vsi_cfg_params { - struct ice_port_info *pi; - struct ice_channel *ch; - struct ice_vf *vf; - enum ice_vsi_type type; - u32 flags; -}; - -/** - * ice_vsi_to_params - Get parameters for an existing VSI - * @vsi: the VSI to get parameters for - * - * Fill a parameter structure for reconfiguring a VSI with its current - * parameters, such as during a rebuild operation. - */ -static inline struct ice_vsi_cfg_params ice_vsi_to_params(struct ice_vsi *vsi) -{ - struct ice_vsi_cfg_params params = {}; - - params.pi = vsi->port_info; - params.ch = vsi->ch; - params.vf = vsi->vf; - params.type = vsi->type; - - return params; -} - const char *ice_vsi_type_str(enum ice_vsi_type vsi_type); bool ice_pf_state_is_nominal(struct ice_pf *pf); @@ -101,7 +64,7 @@ void ice_vsi_decfg(struct ice_vsi *vsi); void ice_dis_vsi(struct ice_vsi *vsi, bool locked); int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags); -int ice_vsi_cfg(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params); +int ice_vsi_cfg(struct ice_vsi *vsi); bool ice_is_reset_in_progress(unsigned long *state); int ice_wait_for_reset(struct ice_pf *pf, unsigned long timeout); diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 88d4675cc3..55a42aad92 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -13,7 +13,8 @@ #include "ice_fltr.h" #include "ice_dcb_lib.h" #include "ice_dcb_nl.h" -#include "ice_devlink.h" +#include "devlink/devlink.h" +#include "devlink/devlink_port.h" #include "ice_hwmon.h" /* Including ice_trace.h with CREATE_TRACE_POINTS defined will generate the * ice tracepoint functions. This must be done exactly once across the @@ -36,6 +37,7 @@ static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation."; MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); MODULE_DESCRIPTION(DRV_SUMMARY); +MODULE_IMPORT_NS(LIBIE); MODULE_LICENSE("GPL v2"); MODULE_FIRMWARE(ICE_DDP_PKG_FILE); @@ -1748,6 +1750,39 @@ static void ice_service_timer(struct timer_list *t) } /** + * ice_mdd_maybe_reset_vf - reset VF after MDD event + * @pf: pointer to the PF structure + * @vf: pointer to the VF structure + * @reset_vf_tx: whether Tx MDD has occurred + * @reset_vf_rx: whether Rx MDD has occurred + * + * Since the queue can get stuck on VF MDD events, the PF can be configured to + * automatically reset the VF by enabling the private ethtool flag + * mdd-auto-reset-vf. + */ +static void ice_mdd_maybe_reset_vf(struct ice_pf *pf, struct ice_vf *vf, + bool reset_vf_tx, bool reset_vf_rx) +{ + struct device *dev = ice_pf_to_dev(pf); + + if (!test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)) + return; + + /* VF MDD event counters will be cleared by reset, so print the event + * prior to reset. + */ + if (reset_vf_tx) + ice_print_vf_tx_mdd_event(vf); + + if (reset_vf_rx) + ice_print_vf_rx_mdd_event(vf); + + dev_info(dev, "PF-to-VF reset on PF %d VF %d due to MDD event\n", + pf->hw.pf_id, vf->vf_id); + ice_reset_vf(vf, ICE_VF_RESET_NOTIFY | ICE_VF_RESET_LOCK); +} + +/** * ice_handle_mdd_event - handle malicious driver detect event * @pf: pointer to the PF structure * @@ -1840,6 +1875,8 @@ static void ice_handle_mdd_event(struct ice_pf *pf) */ mutex_lock(&pf->vfs.table_lock); ice_for_each_vf(pf, bkt, vf) { + bool reset_vf_tx = false, reset_vf_rx = false; + reg = rd32(hw, VP_MDET_TX_PQM(vf->vf_id)); if (reg & VP_MDET_TX_PQM_VALID_M) { wr32(hw, VP_MDET_TX_PQM(vf->vf_id), 0xFFFF); @@ -1848,6 +1885,8 @@ static void ice_handle_mdd_event(struct ice_pf *pf) if (netif_msg_tx_err(pf)) dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n", vf->vf_id); + + reset_vf_tx = true; } reg = rd32(hw, VP_MDET_TX_TCLAN(vf->vf_id)); @@ -1858,6 +1897,8 @@ static void ice_handle_mdd_event(struct ice_pf *pf) if (netif_msg_tx_err(pf)) dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n", vf->vf_id); + + reset_vf_tx = true; } reg = rd32(hw, VP_MDET_TX_TDPU(vf->vf_id)); @@ -1868,6 +1909,8 @@ static void ice_handle_mdd_event(struct ice_pf *pf) if (netif_msg_tx_err(pf)) dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n", vf->vf_id); + + reset_vf_tx = true; } reg = rd32(hw, VP_MDET_RX(vf->vf_id)); @@ -1879,18 +1922,12 @@ static void ice_handle_mdd_event(struct ice_pf *pf) dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n", vf->vf_id); - /* Since the queue is disabled on VF Rx MDD events, the - * PF can be configured to reset the VF through ethtool - * private flag mdd-auto-reset-vf. - */ - if (test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)) { - /* VF MDD event counters will be cleared by - * reset, so print the event prior to reset. - */ - ice_print_vf_rx_mdd_event(vf); - ice_reset_vf(vf, ICE_VF_RESET_LOCK); - } + reset_vf_rx = true; } + + if (reset_vf_tx || reset_vf_rx) + ice_mdd_maybe_reset_vf(pf, vf, reset_vf_tx, + reset_vf_rx); } mutex_unlock(&pf->vfs.table_lock); @@ -3671,7 +3708,7 @@ ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) struct ice_vsi_cfg_params params = {}; params.type = ICE_VSI_PF; - params.pi = pi; + params.port_info = pi; params.flags = ICE_VSI_FLAG_INIT; return ice_vsi_setup(pf, ¶ms); @@ -3684,7 +3721,7 @@ ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, struct ice_vsi_cfg_params params = {}; params.type = ICE_VSI_CHNL; - params.pi = pi; + params.port_info = pi; params.ch = ch; params.flags = ICE_VSI_FLAG_INIT; @@ -3705,7 +3742,7 @@ ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) struct ice_vsi_cfg_params params = {}; params.type = ICE_VSI_CTRL; - params.pi = pi; + params.port_info = pi; params.flags = ICE_VSI_FLAG_INIT; return ice_vsi_setup(pf, ¶ms); @@ -3725,7 +3762,7 @@ ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) struct ice_vsi_cfg_params params = {}; params.type = ICE_VSI_LB; - params.pi = pi; + params.port_info = pi; params.flags = ICE_VSI_FLAG_INIT; return ice_vsi_setup(pf, ¶ms); @@ -4448,11 +4485,13 @@ static char *ice_get_opt_fw_name(struct ice_pf *pf) /** * ice_request_fw - Device initialization routine * @pf: pointer to the PF instance + * @firmware: double pointer to firmware struct + * + * Return: zero when successful, negative values otherwise. */ -static void ice_request_fw(struct ice_pf *pf) +static int ice_request_fw(struct ice_pf *pf, const struct firmware **firmware) { char *opt_fw_filename = ice_get_opt_fw_name(pf); - const struct firmware *firmware = NULL; struct device *dev = ice_pf_to_dev(pf); int err = 0; @@ -4461,29 +4500,95 @@ static void ice_request_fw(struct ice_pf *pf) * and warning messages for other errors. */ if (opt_fw_filename) { - err = firmware_request_nowarn(&firmware, opt_fw_filename, dev); - if (err) { - kfree(opt_fw_filename); - goto dflt_pkg_load; - } - - /* request for firmware was successful. Download to device */ - ice_load_pkg(firmware, pf); + err = firmware_request_nowarn(firmware, opt_fw_filename, dev); kfree(opt_fw_filename); - release_firmware(firmware); - return; + if (!err) + return err; + } + err = request_firmware(firmware, ICE_DDP_PKG_FILE, dev); + if (err) + dev_err(dev, "The DDP package file was not found or could not be read. Entering Safe Mode\n"); + + return err; +} + +/** + * ice_init_tx_topology - performs Tx topology initialization + * @hw: pointer to the hardware structure + * @firmware: pointer to firmware structure + * + * Return: zero when init was successful, negative values otherwise. + */ +static int +ice_init_tx_topology(struct ice_hw *hw, const struct firmware *firmware) +{ + u8 num_tx_sched_layers = hw->num_tx_sched_layers; + struct ice_pf *pf = hw->back; + struct device *dev; + u8 *buf_copy; + int err; + + dev = ice_pf_to_dev(pf); + /* ice_cfg_tx_topo buf argument is not a constant, + * so we have to make a copy + */ + buf_copy = kmemdup(firmware->data, firmware->size, GFP_KERNEL); + + err = ice_cfg_tx_topo(hw, buf_copy, firmware->size); + if (!err) { + if (hw->num_tx_sched_layers > num_tx_sched_layers) + dev_info(dev, "Tx scheduling layers switching feature disabled\n"); + else + dev_info(dev, "Tx scheduling layers switching feature enabled\n"); + /* if there was a change in topology ice_cfg_tx_topo triggered + * a CORER and we need to re-init hw + */ + ice_deinit_hw(hw); + err = ice_init_hw(hw); + + return err; + } else if (err == -EIO) { + dev_info(dev, "DDP package does not support Tx scheduling layers switching feature - please update to the latest DDP package and try again\n"); } -dflt_pkg_load: - err = request_firmware(&firmware, ICE_DDP_PKG_FILE, dev); + return 0; +} + +/** + * ice_init_ddp_config - DDP related configuration + * @hw: pointer to the hardware structure + * @pf: pointer to pf structure + * + * This function loads DDP file from the disk, then initializes Tx + * topology. At the end DDP package is loaded on the card. + * + * Return: zero when init was successful, negative values otherwise. + */ +static int ice_init_ddp_config(struct ice_hw *hw, struct ice_pf *pf) +{ + struct device *dev = ice_pf_to_dev(pf); + const struct firmware *firmware = NULL; + int err; + + err = ice_request_fw(pf, &firmware); if (err) { - dev_err(dev, "The DDP package file was not found or could not be read. Entering Safe Mode\n"); - return; + dev_err(dev, "Fail during requesting FW: %d\n", err); + return err; } - /* request for firmware was successful. Download to device */ + err = ice_init_tx_topology(hw, firmware); + if (err) { + dev_err(dev, "Fail during initialization of Tx topology: %d\n", + err); + release_firmware(firmware); + return err; + } + + /* Download firmware to device */ ice_load_pkg(firmware, pf); release_firmware(firmware); + + return 0; } /** @@ -4656,9 +4761,11 @@ int ice_init_dev(struct ice_pf *pf) ice_init_feature_support(pf); - ice_request_fw(pf); + err = ice_init_ddp_config(hw, pf); + if (err) + return err; - /* if ice_request_fw fails, ICE_FLAG_ADV_FEATURES bit won't be + /* if ice_init_ddp_config fails, ICE_FLAG_ADV_FEATURES bit won't be * set in pf->state, which will cause ice_is_safe_mode to return * true */ @@ -5124,6 +5231,7 @@ static int ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) { struct device *dev = &pdev->dev; + struct ice_adapter *adapter; struct ice_pf *pf; struct ice_hw *hw; int err; @@ -5176,7 +5284,12 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) pci_set_master(pdev); + adapter = ice_adapter_get(pdev); + if (IS_ERR(adapter)) + return PTR_ERR(adapter); + pf->pdev = pdev; + pf->adapter = adapter; pci_set_drvdata(pdev, pf); set_bit(ICE_DOWN, pf->state); /* Disable service task until DOWN bit is cleared */ @@ -5210,23 +5323,23 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) devl_lock(priv_to_devlink(pf)); err = ice_load(pf); - devl_unlock(priv_to_devlink(pf)); if (err) goto err_load; err = ice_init_devlink(pf); if (err) goto err_init_devlink; + devl_unlock(priv_to_devlink(pf)); return 0; err_init_devlink: - devl_lock(priv_to_devlink(pf)); ice_unload(pf); - devl_unlock(priv_to_devlink(pf)); err_load: + devl_unlock(priv_to_devlink(pf)); ice_deinit(pf); err_init: + ice_adapter_put(pdev); pci_disable_device(pdev); return err; } @@ -5321,9 +5434,9 @@ static void ice_remove(struct pci_dev *pdev) if (!ice_is_safe_mode(pf)) ice_remove_arfs(pf); + devl_lock(priv_to_devlink(pf)); ice_deinit_devlink(pf); - devl_lock(priv_to_devlink(pf)); ice_unload(pf); devl_unlock(priv_to_devlink(pf)); @@ -5333,6 +5446,7 @@ static void ice_remove(struct pci_dev *pdev) ice_setup_mc_magic_wake(pf); ice_set_wake(pf); + ice_adapter_put(pdev); pci_disable_device(pdev); } @@ -5352,7 +5466,6 @@ static void ice_shutdown(struct pci_dev *pdev) } } -#ifdef CONFIG_PM /** * ice_prepare_for_shutdown - prep for PCI shutdown * @pf: board private structure @@ -5441,7 +5554,7 @@ err_reinit: * Power Management callback to quiesce the device and prepare * for D3 transition. */ -static int __maybe_unused ice_suspend(struct device *dev) +static int ice_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct ice_pf *pf; @@ -5508,7 +5621,7 @@ static int __maybe_unused ice_suspend(struct device *dev) * ice_resume - PM callback for waking up from D3 * @dev: generic device information structure */ -static int __maybe_unused ice_resume(struct device *dev) +static int ice_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); enum ice_reset_req reset_type; @@ -5564,7 +5677,6 @@ static int __maybe_unused ice_resume(struct device *dev) return 0; } -#endif /* CONFIG_PM */ /** * ice_pci_err_detected - warning that PCI error has been detected @@ -5729,16 +5841,22 @@ static const struct pci_device_id ice_pci_tbl[] = { { PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_QSFP), }, { PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_SFP), }, { PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_SGMII), }, - { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_BACKPLANE) }, - { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_QSFP56) }, - { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_SFP) }, - { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_SFP_DD) }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830CC_BACKPLANE) }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830CC_QSFP56) }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830CC_SFP) }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830CC_SFP_DD) }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830C_BACKPLANE), }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_XXV_BACKPLANE), }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830C_QSFP), }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_XXV_QSFP), }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830C_SFP), }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_XXV_SFP), }, /* required last entry */ {} }; MODULE_DEVICE_TABLE(pci, ice_pci_tbl); -static __maybe_unused SIMPLE_DEV_PM_OPS(ice_pm_ops, ice_suspend, ice_resume); +static DEFINE_SIMPLE_DEV_PM_OPS(ice_pm_ops, ice_suspend, ice_resume); static const struct pci_error_handlers ice_pci_err_handler = { .error_detected = ice_pci_err_detected, @@ -5753,9 +5871,7 @@ static struct pci_driver ice_driver = { .id_table = ice_pci_tbl, .probe = ice_probe, .remove = ice_remove, -#ifdef CONFIG_PM - .driver.pm = &ice_pm_ops, -#endif /* CONFIG_PM */ + .driver.pm = pm_sleep_ptr(&ice_pm_ops), .shutdown = ice_shutdown, .sriov_configure = ice_sriov_configure, .sriov_get_vf_total_msix = ice_sriov_get_vf_total_msix, @@ -7091,13 +7207,11 @@ int ice_down(struct ice_vsi *vsi) WARN_ON(!test_bit(ICE_VSI_DOWN, vsi->state)); - if (vsi->netdev && vsi->type == ICE_VSI_PF) { + if (vsi->netdev) { vlan_err = ice_vsi_del_vlan_zero(vsi); ice_ptp_link_change(vsi->back, vsi->back->hw.pf_id, false); netif_carrier_off(vsi->netdev); netif_tx_disable(vsi->netdev); - } else if (vsi->type == ICE_VSI_SWITCHDEV_CTRL) { - ice_eswitch_stop_all_tx_queues(vsi->back); } ice_vsi_dis_irq(vsi); @@ -7580,11 +7694,7 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) goto err_vsi_rebuild; } - err = ice_eswitch_rebuild(pf); - if (err) { - dev_err(dev, "Switchdev rebuild failed: %d\n", err); - goto err_vsi_rebuild; - } + ice_eswitch_rebuild(pf); if (reset_type == ICE_RESET_PFR) { err = ice_rebuild_channels(pf); @@ -7702,7 +7812,7 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu) return -EBUSY; } - netdev->mtu = (unsigned int)new_mtu; + WRITE_ONCE(netdev->mtu, (unsigned int)new_mtu); err = ice_down_up(vsi); if (err) return err; @@ -8035,12 +8145,9 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, if (!br_spec) return -EINVAL; - nla_for_each_nested(attr, br_spec, rem) { - __u16 mode; + nla_for_each_nested_type(attr, IFLA_BRIDGE_MODE, br_spec, rem) { + __u16 mode = nla_get_u16(attr); - if (nla_type(attr) != IFLA_BRIDGE_MODE) - continue; - mode = nla_get_u16(attr); if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB) return -EINVAL; /* Continue if bridge mode is not being flipped */ diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c index 8510a02afe..59e8879ac0 100644 --- a/drivers/net/ethernet/intel/ice/ice_nvm.c +++ b/drivers/net/ethernet/intel/ice/ice_nvm.c @@ -18,10 +18,9 @@ * * Read the NVM using the admin queue commands (0x0701) */ -static int -ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length, - void *data, bool last_command, bool read_shadow_ram, - struct ice_sq_cd *cd) +int ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, + u16 length, void *data, bool last_command, + bool read_shadow_ram, struct ice_sq_cd *cd) { struct ice_aq_desc desc; struct ice_aqc_nvm *cmd; diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.h b/drivers/net/ethernet/intel/ice/ice_nvm.h index 774c231796..63cdc6bdac 100644 --- a/drivers/net/ethernet/intel/ice/ice_nvm.h +++ b/drivers/net/ethernet/intel/ice/ice_nvm.h @@ -14,6 +14,9 @@ struct ice_orom_civd_info { int ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access); void ice_release_nvm(struct ice_hw *hw); +int ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, + u16 length, void *data, bool last_command, + bool read_shadow_ram, struct ice_sq_cd *cd); int ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data, bool read_shadow_ram); diff --git a/drivers/net/ethernet/intel/ice/ice_protocol_type.h b/drivers/net/ethernet/intel/ice/ice_protocol_type.h index f6f27361c3..755a9c5526 100644 --- a/drivers/net/ethernet/intel/ice/ice_protocol_type.h +++ b/drivers/net/ethernet/intel/ice/ice_protocol_type.h @@ -43,6 +43,7 @@ enum ice_protocol_type { ICE_NVGRE, ICE_GTP, ICE_GTP_NO_PAY, + ICE_PFCP, ICE_PPPOE, ICE_L2TPV3, ICE_VLAN_EX, @@ -61,6 +62,7 @@ enum ice_sw_tunnel_type { ICE_SW_TUN_NVGRE, ICE_SW_TUN_GTPU, ICE_SW_TUN_GTPC, + ICE_SW_TUN_PFCP, ICE_ALL_TUNNELS /* All tunnel types including NVGRE */ }; @@ -202,6 +204,15 @@ struct ice_udp_gtp_hdr { u8 rsvrd; }; +struct ice_pfcp_hdr { + u8 flags; + u8 msg_type; + __be16 length; + __be64 seid; + __be32 seq; + u8 spare; +} __packed __aligned(__alignof__(u16)); + struct ice_pppoe_hdr { u8 rsrvd_ver_type; u8 rsrvd_code; @@ -418,6 +429,7 @@ union ice_prot_hdr { struct ice_udp_tnl_hdr tnl_hdr; struct ice_nvgre_hdr nvgre_hdr; struct ice_udp_gtp_hdr gtp_hdr; + struct ice_pfcp_hdr pfcp_hdr; struct ice_pppoe_hdr pppoe_hdr; struct ice_l2tpv3_sess_hdr l2tpv3_sess_hdr; struct ice_hw_metadata metadata; diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c index f46d879c62..fefaf52fd6 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp.c @@ -374,6 +374,7 @@ ice_ptp_read_src_clk_reg(struct ice_pf *pf, struct ptp_system_timestamp *sts) u8 tmr_idx; tmr_idx = ice_get_ptp_src_clock_index(hw); + guard(spinlock)(&pf->adapter->ptp_gltsyn_time_lock); /* Read the system timestamp pre PHC read */ ptp_read_system_prets(sts); @@ -1166,26 +1167,6 @@ static void ice_ptp_reset_cached_phctime(struct ice_pf *pf) } /** - * ice_ptp_read_time - Read the time from the device - * @pf: Board private structure - * @ts: timespec structure to hold the current time value - * @sts: Optional parameter for holding a pair of system timestamps from - * the system clock. Will be ignored if NULL is given. - * - * This function reads the source clock registers and stores them in a timespec. - * However, since the registers are 64 bits of nanoseconds, we must convert the - * result to a timespec before we can return. - */ -static void -ice_ptp_read_time(struct ice_pf *pf, struct timespec64 *ts, - struct ptp_system_timestamp *sts) -{ - u64 time_ns = ice_ptp_read_src_clk_reg(pf, sts); - - *ts = ns_to_timespec64(time_ns); -} - -/** * ice_ptp_write_init - Set PHC time to provided value * @pf: Board private structure * @ts: timespec structure that holds the new time value @@ -1990,16 +1971,10 @@ ice_ptp_gettimex64(struct ptp_clock_info *info, struct timespec64 *ts, struct ptp_system_timestamp *sts) { struct ice_pf *pf = ptp_info_to_pf(info); - struct ice_hw *hw = &pf->hw; - - if (!ice_ptp_lock(hw)) { - dev_err(ice_pf_to_dev(pf), "PTP failed to get time\n"); - return -EBUSY; - } - - ice_ptp_read_time(pf, ts, sts); - ice_ptp_unlock(hw); + u64 time_ns; + time_ns = ice_ptp_read_src_clk_reg(pf, sts); + *ts = ns_to_timespec64(time_ns); return 0; } diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c index 187ce9b54e..2b9423a173 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c @@ -274,6 +274,9 @@ void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd) */ static void ice_ptp_exec_tmr_cmd(struct ice_hw *hw) { + struct ice_pf *pf = container_of(hw, struct ice_pf, hw); + + guard(spinlock)(&pf->adapter->ptp_gltsyn_time_lock); wr32(hw, GLTSYN_CMD_SYNC, SYNC_EXEC_CMD); ice_flush(hw); } diff --git a/drivers/net/ethernet/intel/ice/ice_repr.c b/drivers/net/ethernet/intel/ice/ice_repr.c index 5f30fb131f..d367f4c66d 100644 --- a/drivers/net/ethernet/intel/ice/ice_repr.c +++ b/drivers/net/ethernet/intel/ice/ice_repr.c @@ -3,42 +3,51 @@ #include "ice.h" #include "ice_eswitch.h" -#include "ice_devlink.h" +#include "devlink/devlink.h" +#include "devlink/devlink_port.h" #include "ice_sriov.h" #include "ice_tc_lib.h" #include "ice_dcb_lib.h" /** - * ice_repr_get_sw_port_id - get port ID associated with representor - * @repr: pointer to port representor + * ice_repr_inc_tx_stats - increment Tx statistic by one packet + * @repr: repr to increment stats on + * @len: length of the packet + * @xmit_status: value returned by xmit function */ -static int ice_repr_get_sw_port_id(struct ice_repr *repr) +void ice_repr_inc_tx_stats(struct ice_repr *repr, unsigned int len, + int xmit_status) { - return repr->src_vsi->back->hw.port_info->lport; + struct ice_repr_pcpu_stats *stats; + + if (unlikely(xmit_status != NET_XMIT_SUCCESS && + xmit_status != NET_XMIT_CN)) { + this_cpu_inc(repr->stats->tx_drops); + return; + } + + stats = this_cpu_ptr(repr->stats); + u64_stats_update_begin(&stats->syncp); + stats->tx_packets++; + stats->tx_bytes += len; + u64_stats_update_end(&stats->syncp); } /** - * ice_repr_get_phys_port_name - get phys port name - * @netdev: pointer to port representor netdev - * @buf: write here port name - * @len: max length of buf + * ice_repr_inc_rx_stats - increment Rx statistic by one packet + * @netdev: repr netdev to increment stats on + * @len: length of the packet */ -static int -ice_repr_get_phys_port_name(struct net_device *netdev, char *buf, size_t len) +void ice_repr_inc_rx_stats(struct net_device *netdev, unsigned int len) { - struct ice_netdev_priv *np = netdev_priv(netdev); - struct ice_repr *repr = np->repr; - int res; - - /* Devlink port is registered and devlink core is taking care of name formatting. */ - if (repr->vf->devlink_port.devlink) - return -EOPNOTSUPP; + struct ice_repr *repr = ice_netdev_to_repr(netdev); + struct ice_repr_pcpu_stats *stats; - res = snprintf(buf, len, "pf%dvfr%d", ice_repr_get_sw_port_id(repr), - repr->id); - if (res <= 0) - return -EOPNOTSUPP; - return 0; + stats = this_cpu_ptr(repr->stats); + u64_stats_update_begin(&stats->syncp); + stats->rx_packets++; + stats->rx_bytes += len; + u64_stats_update_end(&stats->syncp); } /** @@ -76,7 +85,7 @@ ice_repr_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) * ice_netdev_to_repr - Get port representor for given netdevice * @netdev: pointer to port representor netdev */ -struct ice_repr *ice_netdev_to_repr(struct net_device *netdev) +struct ice_repr *ice_netdev_to_repr(const struct net_device *netdev) { struct ice_netdev_priv *np = netdev_priv(netdev); @@ -139,38 +148,35 @@ static int ice_repr_stop(struct net_device *netdev) * ice_repr_sp_stats64 - get slow path stats for port representor * @dev: network interface device structure * @stats: netlink stats structure - * - * RX/TX stats are being swapped here to be consistent with VF stats. In slow - * path, port representor receives data when the corresponding VF is sending it - * (and vice versa), TX and RX bytes/packets are effectively swapped on port - * representor. */ static int ice_repr_sp_stats64(const struct net_device *dev, struct rtnl_link_stats64 *stats) { - struct ice_netdev_priv *np = netdev_priv(dev); - int vf_id = np->repr->vf->vf_id; - struct ice_tx_ring *tx_ring; - struct ice_rx_ring *rx_ring; - u64 pkts, bytes; - - tx_ring = np->vsi->tx_rings[vf_id]; - ice_fetch_u64_stats_per_ring(&tx_ring->ring_stats->syncp, - tx_ring->ring_stats->stats, - &pkts, &bytes); - stats->rx_packets = pkts; - stats->rx_bytes = bytes; - - rx_ring = np->vsi->rx_rings[vf_id]; - ice_fetch_u64_stats_per_ring(&rx_ring->ring_stats->syncp, - rx_ring->ring_stats->stats, - &pkts, &bytes); - stats->tx_packets = pkts; - stats->tx_bytes = bytes; - stats->tx_dropped = rx_ring->ring_stats->rx_stats.alloc_page_failed + - rx_ring->ring_stats->rx_stats.alloc_buf_failed; - + struct ice_repr *repr = ice_netdev_to_repr(dev); + int i; + + for_each_possible_cpu(i) { + u64 tbytes, tpkts, tdrops, rbytes, rpkts; + struct ice_repr_pcpu_stats *repr_stats; + unsigned int start; + + repr_stats = per_cpu_ptr(repr->stats, i); + do { + start = u64_stats_fetch_begin(&repr_stats->syncp); + tbytes = repr_stats->tx_bytes; + tpkts = repr_stats->tx_packets; + tdrops = repr_stats->tx_drops; + rbytes = repr_stats->rx_bytes; + rpkts = repr_stats->rx_packets; + } while (u64_stats_fetch_retry(&repr_stats->syncp, start)); + + stats->tx_bytes += tbytes; + stats->tx_packets += tpkts; + stats->tx_dropped += tdrops; + stats->rx_bytes += rbytes; + stats->rx_packets += rpkts; + } return 0; } @@ -240,7 +246,6 @@ ice_repr_setup_tc(struct net_device *netdev, enum tc_setup_type type, } static const struct net_device_ops ice_repr_netdev_ops = { - .ndo_get_phys_port_name = ice_repr_get_phys_port_name, .ndo_get_stats64 = ice_repr_get_stats64, .ndo_open = ice_repr_open, .ndo_stop = ice_repr_stop, @@ -291,7 +296,7 @@ static void ice_repr_remove_node(struct devlink_port *devlink_port) */ static void ice_repr_rem(struct ice_repr *repr) { - kfree(repr->q_vector); + free_percpu(repr->stats); free_netdev(repr->netdev); kfree(repr); } @@ -331,7 +336,6 @@ static void ice_repr_set_tx_topology(struct ice_pf *pf) static struct ice_repr * ice_repr_add(struct ice_pf *pf, struct ice_vsi *src_vsi, const u8 *parent_mac) { - struct ice_q_vector *q_vector; struct ice_netdev_priv *np; struct ice_repr *repr; int err; @@ -346,23 +350,22 @@ ice_repr_add(struct ice_pf *pf, struct ice_vsi *src_vsi, const u8 *parent_mac) goto err_alloc; } + repr->stats = netdev_alloc_pcpu_stats(struct ice_repr_pcpu_stats); + if (!repr->stats) { + err = -ENOMEM; + goto err_stats; + } + repr->src_vsi = src_vsi; + repr->id = src_vsi->vsi_num; np = netdev_priv(repr->netdev); np->repr = repr; - q_vector = kzalloc(sizeof(*q_vector), GFP_KERNEL); - if (!q_vector) { - err = -ENOMEM; - goto err_alloc_q_vector; - } - repr->q_vector = q_vector; - repr->q_id = repr->id; - ether_addr_copy(repr->parent_mac, parent_mac); return repr; -err_alloc_q_vector: +err_stats: free_netdev(repr->netdev); err_alloc: kfree(repr); @@ -439,15 +442,3 @@ void ice_repr_stop_tx_queues(struct ice_repr *repr) netif_carrier_off(repr->netdev); netif_tx_stop_all_queues(repr->netdev); } - -/** - * ice_repr_set_traffic_vsi - set traffic VSI for port representor - * @repr: repr on with VSI will be set - * @vsi: pointer to VSI that will be used by port representor to pass traffic - */ -void ice_repr_set_traffic_vsi(struct ice_repr *repr, struct ice_vsi *vsi) -{ - struct ice_netdev_priv *np = netdev_priv(repr->netdev); - - np->vsi = vsi; -} diff --git a/drivers/net/ethernet/intel/ice/ice_repr.h b/drivers/net/ethernet/intel/ice/ice_repr.h index f9aede3157..cff730b15c 100644 --- a/drivers/net/ethernet/intel/ice/ice_repr.h +++ b/drivers/net/ethernet/intel/ice/ice_repr.h @@ -6,20 +6,24 @@ #include <net/dst_metadata.h> +struct ice_repr_pcpu_stats { + struct u64_stats_sync syncp; + u64 rx_packets; + u64 rx_bytes; + u64 tx_packets; + u64 tx_bytes; + u64 tx_drops; +}; + struct ice_repr { struct ice_vsi *src_vsi; struct ice_vf *vf; - struct ice_q_vector *q_vector; struct net_device *netdev; struct metadata_dst *dst; struct ice_esw_br_port *br_port; - int q_id; + struct ice_repr_pcpu_stats __percpu *stats; u32 id; u8 parent_mac[ETH_ALEN]; -#ifdef CONFIG_ICE_SWITCHDEV - /* info about slow path rule */ - struct ice_rule_query_data sp_rule; -#endif }; struct ice_repr *ice_repr_add_vf(struct ice_vf *vf); @@ -28,10 +32,12 @@ void ice_repr_rem_vf(struct ice_repr *repr); void ice_repr_start_tx_queues(struct ice_repr *repr); void ice_repr_stop_tx_queues(struct ice_repr *repr); -void ice_repr_set_traffic_vsi(struct ice_repr *repr, struct ice_vsi *vsi); - -struct ice_repr *ice_netdev_to_repr(struct net_device *netdev); +struct ice_repr *ice_netdev_to_repr(const struct net_device *netdev); bool ice_is_port_repr_netdev(const struct net_device *netdev); struct ice_repr *ice_repr_get_by_vsi(struct ice_vsi *vsi); + +void ice_repr_inc_tx_stats(struct ice_repr *repr, unsigned int len, + int xmit_status); +void ice_repr_inc_rx_stats(struct net_device *netdev, unsigned int len); #endif diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c index a1525992d1..ecf8f5d602 100644 --- a/drivers/net/ethernet/intel/ice/ice_sched.c +++ b/drivers/net/ethernet/intel/ice/ice_sched.c @@ -1128,12 +1128,11 @@ u8 ice_sched_get_vsi_layer(struct ice_hw *hw) * 5 or less sw_entry_point_layer */ /* calculate the VSI layer based on number of layers. */ - if (hw->num_tx_sched_layers > ICE_VSI_LAYER_OFFSET + 1) { - u8 layer = hw->num_tx_sched_layers - ICE_VSI_LAYER_OFFSET; - - if (layer > hw->sw_entry_point_layer) - return layer; - } + if (hw->num_tx_sched_layers == ICE_SCHED_9_LAYERS) + return hw->num_tx_sched_layers - ICE_VSI_LAYER_OFFSET; + else if (hw->num_tx_sched_layers == ICE_SCHED_5_LAYERS) + /* qgroup and VSI layers are same */ + return hw->num_tx_sched_layers - ICE_QGRP_LAYER_OFFSET; return hw->sw_entry_point_layer; } @@ -1150,13 +1149,10 @@ u8 ice_sched_get_agg_layer(struct ice_hw *hw) * 7 or less sw_entry_point_layer */ /* calculate the aggregator layer based on number of layers. */ - if (hw->num_tx_sched_layers > ICE_AGG_LAYER_OFFSET + 1) { - u8 layer = hw->num_tx_sched_layers - ICE_AGG_LAYER_OFFSET; - - if (layer > hw->sw_entry_point_layer) - return layer; - } - return hw->sw_entry_point_layer; + if (hw->num_tx_sched_layers == ICE_SCHED_9_LAYERS) + return hw->num_tx_sched_layers - ICE_AGG_LAYER_OFFSET; + else + return hw->sw_entry_point_layer; } /** @@ -1510,10 +1506,11 @@ ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc, { struct ice_sched_node *vsi_node, *qgrp_node; struct ice_vsi_ctx *vsi_ctx; + u8 qgrp_layer, vsi_layer; u16 max_children; - u8 qgrp_layer; qgrp_layer = ice_sched_get_qgrp_layer(pi->hw); + vsi_layer = ice_sched_get_vsi_layer(pi->hw); max_children = pi->hw->max_children[qgrp_layer]; vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle); @@ -1524,6 +1521,12 @@ ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc, if (!vsi_node) return NULL; + /* If the queue group and VSI layer are same then queues + * are all attached directly to VSI + */ + if (qgrp_layer == vsi_layer) + return vsi_node; + /* get the first queue group node from VSI sub-tree */ qgrp_node = ice_sched_get_first_node(pi, vsi_node, qgrp_layer); while (qgrp_node) { @@ -3199,7 +3202,7 @@ ice_sched_add_rl_profile(struct ice_port_info *pi, u8 profile_type; int status; - if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM) + if (!pi || layer_num >= pi->hw->num_tx_sched_layers) return NULL; switch (rl_type) { case ICE_MIN_BW: @@ -3215,8 +3218,6 @@ ice_sched_add_rl_profile(struct ice_port_info *pi, return NULL; } - if (!pi) - return NULL; hw = pi->hw; list_for_each_entry(rl_prof_elem, &pi->rl_prof_list[layer_num], list_entry) @@ -3446,7 +3447,7 @@ ice_sched_rm_rl_profile(struct ice_port_info *pi, u8 layer_num, u8 profile_type, struct ice_aqc_rl_profile_info *rl_prof_elem; int status = 0; - if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM) + if (layer_num >= pi->hw->num_tx_sched_layers) return -EINVAL; /* Check the existing list for RL profile */ list_for_each_entry(rl_prof_elem, &pi->rl_prof_list[layer_num], diff --git a/drivers/net/ethernet/intel/ice/ice_sched.h b/drivers/net/ethernet/intel/ice/ice_sched.h index 1aef05ea5a..7b668083be 100644 --- a/drivers/net/ethernet/intel/ice/ice_sched.h +++ b/drivers/net/ethernet/intel/ice/ice_sched.h @@ -6,6 +6,17 @@ #include "ice_common.h" +/** + * DOC: ice_sched.h + * + * This header file stores everything that is needed for broadly understood + * scheduler. It consists of defines related to layers, structures related to + * aggregator, functions declarations and others. + */ + +#define ICE_SCHED_5_LAYERS 5 +#define ICE_SCHED_9_LAYERS 9 + #define SCHED_NODE_NAME_MAX_LEN 32 #define ICE_QGRP_LAYER_OFFSET 2 diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c index a958fcf3e6..067712f492 100644 --- a/drivers/net/ethernet/intel/ice/ice_sriov.c +++ b/drivers/net/ethernet/intel/ice/ice_sriov.c @@ -170,8 +170,6 @@ void ice_free_vfs(struct ice_pf *pf) else dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n"); - ice_eswitch_reserve_cp_queues(pf, -ice_get_num_vfs(pf)); - mutex_lock(&vfs->table_lock); ice_for_each_vf(pf, bkt, vf) { @@ -227,7 +225,7 @@ static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf) struct ice_vsi *vsi; params.type = ICE_VSI_VF; - params.pi = ice_vf_get_port_info(vf); + params.port_info = ice_vf_get_port_info(vf); params.vf = vf; params.flags = ICE_VSI_FLAG_INIT; @@ -362,13 +360,14 @@ static void ice_ena_vf_mappings(struct ice_vf *vf) * @vf: VF to calculate the register index for * @q_vector: a q_vector associated to the VF */ -int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector) +void ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector) { if (!vf || !q_vector) - return -EINVAL; + return; /* always add one to account for the OICR being the first MSIX */ - return vf->first_vector_idx + q_vector->v_idx + 1; + q_vector->vf_reg_idx = q_vector->v_idx + ICE_NONQ_VECS_VF; + q_vector->reg_idx = vf->first_vector_idx + q_vector->vf_reg_idx; } /** @@ -833,11 +832,6 @@ static int ice_create_vf_entries(struct ice_pf *pf, u16 num_vfs) pci_dev_get(vfdev); - /* set default number of MSI-X */ - vf->num_msix = pf->vfs.num_msix_per; - vf->num_vf_qs = pf->vfs.num_qps_per; - ice_vc_set_default_allowlist(vf); - hash_add_rcu(vfs->table, &vf->entry, vf_id); } @@ -897,7 +891,6 @@ static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs) goto err_unroll_sriov; } - ice_eswitch_reserve_cp_queues(pf, num_vfs); ret = ice_start_vfs(pf); if (ret) { dev_err(dev, "Failed to start %d VFs, err %d\n", num_vfs, ret); @@ -1869,6 +1862,24 @@ void ice_print_vf_rx_mdd_event(struct ice_vf *vf) } /** + * ice_print_vf_tx_mdd_event - print VF Tx malicious driver detect event + * @vf: pointer to the VF structure + */ +void ice_print_vf_tx_mdd_event(struct ice_vf *vf) +{ + struct ice_pf *pf = vf->pf; + struct device *dev; + + dev = ice_pf_to_dev(pf); + + dev_info(dev, "%d Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-reset-vfs=%s\n", + vf->mdd_tx_events.count, pf->hw.pf_id, vf->vf_id, + vf->dev_lan_addr, + test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags) + ? "on" : "off"); +} + +/** * ice_print_vfs_mdd_events - print VFs malicious driver detect event * @pf: pointer to the PF structure * @@ -1876,8 +1887,6 @@ void ice_print_vf_rx_mdd_event(struct ice_vf *vf) */ void ice_print_vfs_mdd_events(struct ice_pf *pf) { - struct device *dev = ice_pf_to_dev(pf); - struct ice_hw *hw = &pf->hw; struct ice_vf *vf; unsigned int bkt; @@ -1904,10 +1913,7 @@ void ice_print_vfs_mdd_events(struct ice_pf *pf) if (vf->mdd_tx_events.count != vf->mdd_tx_events.last_printed) { vf->mdd_tx_events.last_printed = vf->mdd_tx_events.count; - - dev_info(dev, "%d Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pM.\n", - vf->mdd_tx_events.count, hw->pf_id, vf->vf_id, - vf->dev_lan_addr); + ice_print_vf_tx_mdd_event(vf); } } mutex_unlock(&pf->vfs.table_lock); diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.h b/drivers/net/ethernet/intel/ice/ice_sriov.h index 8488df38b5..8f22313474 100644 --- a/drivers/net/ethernet/intel/ice/ice_sriov.h +++ b/drivers/net/ethernet/intel/ice/ice_sriov.h @@ -49,7 +49,7 @@ int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state); int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena); -int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector); +void ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector); int ice_get_vf_stats(struct net_device *netdev, int vf_id, @@ -58,6 +58,7 @@ void ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event); void ice_print_vfs_mdd_events(struct ice_pf *pf); void ice_print_vf_rx_mdd_event(struct ice_vf *vf); +void ice_print_vf_tx_mdd_event(struct ice_vf *vf); bool ice_vc_validate_pattern(struct ice_vf *vf, struct virtchnl_proto_hdrs *proto); u32 ice_sriov_get_vf_total_msix(struct pci_dev *pdev); @@ -69,6 +70,7 @@ static inline void ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event) { } static inline void ice_print_vfs_mdd_events(struct ice_pf *pf) { } static inline void ice_print_vf_rx_mdd_event(struct ice_vf *vf) { } +static inline void ice_print_vf_tx_mdd_event(struct ice_vf *vf) { } static inline void ice_restore_all_vfs_msi_state(struct ice_pf *pf) { } static inline int @@ -130,11 +132,10 @@ ice_set_vf_bw(struct net_device __always_unused *netdev, return -EOPNOTSUPP; } -static inline int +static inline void ice_calc_vf_reg_idx(struct ice_vf __always_unused *vf, struct ice_q_vector __always_unused *q_vector) { - return 0; } static inline int diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index 1472385eb6..ffd6c42bda 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c @@ -42,6 +42,7 @@ enum { ICE_PKT_KMALLOC = BIT(9), ICE_PKT_PPPOE = BIT(10), ICE_PKT_L2TPV3 = BIT(11), + ICE_PKT_PFCP = BIT(12), }; struct ice_dummy_pkt_offsets { @@ -1110,6 +1111,77 @@ ICE_DECLARE_PKT_TEMPLATE(ipv6_gtp) = { 0x00, 0x00, }; +ICE_DECLARE_PKT_OFFSETS(pfcp_session_ipv4) = { + { ICE_MAC_OFOS, 0 }, + { ICE_ETYPE_OL, 12 }, + { ICE_IPV4_OFOS, 14 }, + { ICE_UDP_ILOS, 34 }, + { ICE_PFCP, 42 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +ICE_DECLARE_PKT_TEMPLATE(pfcp_session_ipv4) = { + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x08, 0x00, /* ICE_ETYPE_OL 12 */ + + 0x45, 0x00, 0x00, 0x2c, /* ICE_IPV4_OFOS 14 */ + 0x00, 0x01, 0x00, 0x00, + 0x00, 0x11, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x22, 0x65, /* ICE_UDP_ILOS 34 */ + 0x00, 0x18, 0x00, 0x00, + + 0x21, 0x01, 0x00, 0x0c, /* ICE_PFCP 42 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, /* 2 bytes for 4 byte alignment */ +}; + +ICE_DECLARE_PKT_OFFSETS(pfcp_session_ipv6) = { + { ICE_MAC_OFOS, 0 }, + { ICE_ETYPE_OL, 12 }, + { ICE_IPV6_OFOS, 14 }, + { ICE_UDP_ILOS, 54 }, + { ICE_PFCP, 62 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +ICE_DECLARE_PKT_TEMPLATE(pfcp_session_ipv6) = { + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x86, 0xdd, /* ICE_ETYPE_OL 12 */ + + 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */ + 0x00, 0x10, 0x11, 0x00, /* Next header UDP */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x22, 0x65, /* ICE_UDP_ILOS 54 */ + 0x00, 0x18, 0x00, 0x00, + + 0x21, 0x01, 0x00, 0x0c, /* ICE_PFCP 62 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, /* 2 bytes for 4 byte alignment */ +}; + ICE_DECLARE_PKT_OFFSETS(pppoe_ipv4_tcp) = { { ICE_MAC_OFOS, 0 }, { ICE_ETYPE_OL, 12 }, @@ -1343,6 +1415,8 @@ static const struct ice_dummy_pkt_profile ice_dummy_pkt_profiles[] = { ICE_PKT_PROFILE(ipv4_gtpu_ipv4_tcp, ICE_PKT_TUN_GTPU), ICE_PKT_PROFILE(ipv6_gtp, ICE_PKT_TUN_GTPC | ICE_PKT_OUTER_IPV6), ICE_PKT_PROFILE(ipv4_gtpu_ipv4, ICE_PKT_TUN_GTPC), + ICE_PKT_PROFILE(pfcp_session_ipv6, ICE_PKT_PFCP | ICE_PKT_OUTER_IPV6), + ICE_PKT_PROFILE(pfcp_session_ipv4, ICE_PKT_PFCP), ICE_PKT_PROFILE(pppoe_ipv6_udp, ICE_PKT_PPPOE | ICE_PKT_OUTER_IPV6 | ICE_PKT_INNER_UDP), ICE_PKT_PROFILE(pppoe_ipv6_tcp, ICE_PKT_PPPOE | ICE_PKT_OUTER_IPV6), @@ -2076,6 +2150,18 @@ ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u64 *r_assoc, } /** + * ice_init_chk_recipe_reuse_support - check if recipe reuse is supported + * @hw: pointer to the hardware structure + */ +void ice_init_chk_recipe_reuse_support(struct ice_hw *hw) +{ + struct ice_nvm_info *nvm = &hw->flash.nvm; + + hw->recp_reuse = (nvm->major == 0x4 && nvm->minor >= 0x30) || + nvm->major > 0x4; +} + +/** * ice_alloc_recipe - add recipe resource * @hw: pointer to the hardware structure * @rid: recipe ID returned as response to AQ call @@ -2084,12 +2170,16 @@ int ice_alloc_recipe(struct ice_hw *hw, u16 *rid) { DEFINE_RAW_FLEX(struct ice_aqc_alloc_free_res_elem, sw_buf, elem, 1); u16 buf_len = __struct_size(sw_buf); + u16 res_type; int status; sw_buf->num_elems = cpu_to_le16(1); - sw_buf->res_type = cpu_to_le16((ICE_AQC_RES_TYPE_RECIPE << - ICE_AQC_RES_TYPE_S) | - ICE_AQC_RES_TYPE_FLAG_SHARED); + res_type = FIELD_PREP(ICE_AQC_RES_TYPE_M, ICE_AQC_RES_TYPE_RECIPE); + if (hw->recp_reuse) + res_type |= ICE_AQC_RES_TYPE_FLAG_SUBSCRIBE_SHARED; + else + res_type |= ICE_AQC_RES_TYPE_FLAG_SHARED; + sw_buf->res_type = cpu_to_le16(res_type); status = ice_aq_alloc_free_res(hw, sw_buf, buf_len, ice_aqc_opc_alloc_res); if (!status) @@ -2099,6 +2189,70 @@ int ice_alloc_recipe(struct ice_hw *hw, u16 *rid) } /** + * ice_free_recipe_res - free recipe resource + * @hw: pointer to the hardware structure + * @rid: recipe ID to free + * + * Return: 0 on success, and others on error + */ +static int ice_free_recipe_res(struct ice_hw *hw, u16 rid) +{ + return ice_free_hw_res(hw, ICE_AQC_RES_TYPE_RECIPE, 1, &rid); +} + +/** + * ice_release_recipe_res - disassociate and free recipe resource + * @hw: pointer to the hardware structure + * @recp: the recipe struct resource to unassociate and free + * + * Return: 0 on success, and others on error + */ +static int ice_release_recipe_res(struct ice_hw *hw, + struct ice_sw_recipe *recp) +{ + DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES); + struct ice_switch_info *sw = hw->switch_info; + u64 recp_assoc; + u32 rid, prof; + int status; + + for_each_set_bit(rid, recp->r_bitmap, ICE_MAX_NUM_RECIPES) { + for_each_set_bit(prof, recipe_to_profile[rid], + ICE_MAX_NUM_PROFILES) { + status = ice_aq_get_recipe_to_profile(hw, prof, + &recp_assoc, + NULL); + if (status) + return status; + + bitmap_from_arr64(r_bitmap, &recp_assoc, + ICE_MAX_NUM_RECIPES); + bitmap_andnot(r_bitmap, r_bitmap, recp->r_bitmap, + ICE_MAX_NUM_RECIPES); + bitmap_to_arr64(&recp_assoc, r_bitmap, + ICE_MAX_NUM_RECIPES); + ice_aq_map_recipe_to_profile(hw, prof, + recp_assoc, NULL); + + clear_bit(rid, profile_to_recipe[prof]); + clear_bit(prof, recipe_to_profile[rid]); + } + + status = ice_free_recipe_res(hw, rid); + if (status) + return status; + + sw->recp_list[rid].recp_created = false; + sw->recp_list[rid].adv_rule = false; + memset(&sw->recp_list[rid].lkup_exts, 0, + sizeof(sw->recp_list[rid].lkup_exts)); + clear_bit(rid, recp->r_bitmap); + } + + return 0; +} + +/** * ice_get_recp_to_prof_map - updates recipe to profile mapping * @hw: pointer to hardware structure * @@ -2147,6 +2301,7 @@ ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf, * @recps: struct that we need to populate * @rid: recipe ID that we are populating * @refresh_required: true if we should get recipe to profile mapping from FW + * @is_add: flag of adding recipe * * This function is used to populate all the necessary entries into our * bookkeeping so that we have a current list of all the recipes that are @@ -2154,7 +2309,7 @@ ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf, */ static int ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid, - bool *refresh_required) + bool *refresh_required, bool is_add) { DECLARE_BITMAP(result_bm, ICE_MAX_FV_WORDS); struct ice_aqc_recipe_data_elem *tmp; @@ -2258,10 +2413,10 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid, /* Propagate some data to the recipe database */ recps[idx].is_root = !!is_root; recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority; - recps[idx].need_pass_l2 = root_bufs.content.act_ctrl & - ICE_AQ_RECIPE_ACT_NEED_PASS_L2; - recps[idx].allow_pass_l2 = root_bufs.content.act_ctrl & - ICE_AQ_RECIPE_ACT_ALLOW_PASS_L2; + recps[idx].need_pass_l2 = !!(root_bufs.content.act_ctrl & + ICE_AQ_RECIPE_ACT_NEED_PASS_L2); + recps[idx].allow_pass_l2 = !!(root_bufs.content.act_ctrl & + ICE_AQ_RECIPE_ACT_ALLOW_PASS_L2); bitmap_zero(recps[idx].res_idxs, ICE_MAX_FV_WORDS); if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) { recps[idx].chain_idx = root_bufs.content.result_indx & @@ -2271,8 +2426,12 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid, recps[idx].chain_idx = ICE_INVAL_CHAIN_IND; } - if (!is_root) + if (!is_root) { + if (hw->recp_reuse && is_add) + recps[idx].recp_created = true; + continue; + } /* Only do the following for root recipes entries */ memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap, @@ -2296,7 +2455,8 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid, /* Copy result indexes */ bitmap_copy(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS); - recps[rid].recp_created = true; + if (is_add) + recps[rid].recp_created = true; err_unroll: kfree(tmp); @@ -2447,6 +2607,9 @@ static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi) fi->lan_en = true; } } + + if (fi->flag & ICE_FLTR_TX_ONLY) + fi->lan_en = false; } /** @@ -3823,6 +3986,7 @@ ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set, } else if (f_info.flag & ICE_FLTR_TX) { f_info.src_id = ICE_SRC_ID_VSI; f_info.src = hw_vsi_id; + f_info.flag |= ICE_FLTR_TX_ONLY; } f_list_entry.fltr_info = f_info; @@ -4530,6 +4694,7 @@ static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = { ICE_PROTOCOL_ENTRY(ICE_NVGRE, 0, 2, 4, 6), ICE_PROTOCOL_ENTRY(ICE_GTP, 8, 10, 12, 14, 16, 18, 20, 22), ICE_PROTOCOL_ENTRY(ICE_GTP_NO_PAY, 8, 10, 12, 14), + ICE_PROTOCOL_ENTRY(ICE_PFCP, 8, 10, 12, 14, 16, 18, 20, 22), ICE_PROTOCOL_ENTRY(ICE_PPPOE, 0, 2, 4, 6), ICE_PROTOCOL_ENTRY(ICE_L2TPV3, 0, 2, 4, 6, 8, 10), ICE_PROTOCOL_ENTRY(ICE_VLAN_EX, 2, 0), @@ -4563,6 +4728,7 @@ static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = { { ICE_NVGRE, ICE_GRE_OF_HW }, { ICE_GTP, ICE_UDP_OF_HW }, { ICE_GTP_NO_PAY, ICE_UDP_ILOS_HW }, + { ICE_PFCP, ICE_UDP_ILOS_HW }, { ICE_PPPOE, ICE_PPPOE_HW }, { ICE_L2TPV3, ICE_L2TPV3_HW }, { ICE_VLAN_EX, ICE_VLAN_OF_HW }, @@ -4575,12 +4741,13 @@ static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = { * @hw: pointer to the hardware structure * @lkup_exts: extension sequence to match * @rinfo: information regarding the rule e.g. priority and action info + * @is_add: flag of adding recipe * * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found. */ static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts, - const struct ice_adv_rule_info *rinfo) + const struct ice_adv_rule_info *rinfo, bool is_add) { bool refresh_required = true; struct ice_sw_recipe *recp; @@ -4594,11 +4761,12 @@ ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts, * entry update it in our SW bookkeeping and continue with the * matching. */ - if (!recp[i].recp_created) + if (hw->recp_reuse) { if (ice_get_recp_frm_fw(hw, hw->switch_info->recp_list, i, - &refresh_required)) + &refresh_required, is_add)) continue; + } /* Skip inverse action recipes */ if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl & @@ -5270,6 +5438,9 @@ ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo, case ICE_SW_TUN_GTPC: prof_type = ICE_PROF_TUN_GTPC; break; + case ICE_SW_TUN_PFCP: + prof_type = ICE_PROF_TUN_PFCP; + break; case ICE_SW_TUN_AND_NON_TUN: default: prof_type = ICE_PROF_ALL; @@ -5280,6 +5451,49 @@ ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo, } /** + * ice_subscribe_recipe - subscribe to an existing recipe + * @hw: pointer to the hardware structure + * @rid: recipe ID to subscribe to + * + * Return: 0 on success, and others on error + */ +static int ice_subscribe_recipe(struct ice_hw *hw, u16 rid) +{ + DEFINE_RAW_FLEX(struct ice_aqc_alloc_free_res_elem, sw_buf, elem, 1); + u16 buf_len = __struct_size(sw_buf); + u16 res_type; + int status; + + /* Prepare buffer to allocate resource */ + sw_buf->num_elems = cpu_to_le16(1); + res_type = FIELD_PREP(ICE_AQC_RES_TYPE_M, ICE_AQC_RES_TYPE_RECIPE) | + ICE_AQC_RES_TYPE_FLAG_SUBSCRIBE_SHARED | + ICE_AQC_RES_TYPE_FLAG_SUBSCRIBE_CTL; + sw_buf->res_type = cpu_to_le16(res_type); + + sw_buf->elem[0].e.sw_resp = cpu_to_le16(rid); + + status = ice_aq_alloc_free_res(hw, sw_buf, buf_len, + ice_aqc_opc_alloc_res); + + return status; +} + +/** + * ice_subscribable_recp_shared - share an existing subscribable recipe + * @hw: pointer to the hardware structure + * @rid: recipe ID to subscribe to + */ +static void ice_subscribable_recp_shared(struct ice_hw *hw, u16 rid) +{ + struct ice_sw_recipe *recps = hw->switch_info->recp_list; + u16 sub_rid; + + for_each_set_bit(sub_rid, recps[rid].r_bitmap, ICE_MAX_NUM_RECIPES) + ice_subscribe_recipe(hw, sub_rid); +} + +/** * ice_add_adv_recipe - Add an advanced recipe that is not part of the default * @hw: pointer to hardware structure * @lkups: lookup elements or match criteria for the advanced recipe, one @@ -5301,6 +5515,7 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, struct ice_sw_fv_list_entry *tmp; struct ice_sw_recipe *rm; int status = 0; + u16 rid_tmp; u8 i; if (!lkups_cnt) @@ -5378,10 +5593,14 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, } /* Look for a recipe which matches our requested fv / mask list */ - *rid = ice_find_recp(hw, lkup_exts, rinfo); - if (*rid < ICE_MAX_NUM_RECIPES) + *rid = ice_find_recp(hw, lkup_exts, rinfo, true); + if (*rid < ICE_MAX_NUM_RECIPES) { /* Success if found a recipe that match the existing criteria */ + if (hw->recp_reuse) + ice_subscribable_recp_shared(hw, *rid); + goto err_unroll; + } rm->tun_type = rinfo->tun_type; /* Recipe we need does not exist, add a recipe */ @@ -5400,14 +5619,14 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id, &recp_assoc, NULL); if (status) - goto err_unroll; + goto err_free_recipe; bitmap_from_arr64(r_bitmap, &recp_assoc, ICE_MAX_NUM_RECIPES); bitmap_or(r_bitmap, r_bitmap, rm->r_bitmap, ICE_MAX_NUM_RECIPES); status = ice_acquire_change_lock(hw, ICE_RES_WRITE); if (status) - goto err_unroll; + goto err_free_recipe; bitmap_to_arr64(&recp_assoc, r_bitmap, ICE_MAX_NUM_RECIPES); status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id, @@ -5415,7 +5634,7 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, ice_release_change_lock(hw); if (status) - goto err_unroll; + goto err_free_recipe; /* Update profile to recipe bitmap array */ bitmap_copy(profile_to_recipe[fvit->profile_id], r_bitmap, @@ -5429,6 +5648,16 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, *rid = rm->root_rid; memcpy(&hw->switch_info->recp_list[*rid].lkup_exts, lkup_exts, sizeof(*lkup_exts)); + goto err_unroll; + +err_free_recipe: + if (hw->recp_reuse) { + for_each_set_bit(rid_tmp, rm->r_bitmap, ICE_MAX_NUM_RECIPES) { + if (!ice_free_recipe_res(hw, rid_tmp)) + clear_bit(rid_tmp, rm->r_bitmap); + } + } + err_unroll: list_for_each_entry_safe(r_entry, r_tmp, &rm->rg_list, l_entry) { list_del(&r_entry->l_entry); @@ -5554,6 +5783,9 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, case ICE_SW_TUN_VXLAN: match |= ICE_PKT_TUN_UDP; break; + case ICE_SW_TUN_PFCP: + match |= ICE_PKT_PFCP; + break; default: break; } @@ -5694,6 +5926,9 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, case ICE_GTP: len = sizeof(struct ice_udp_gtp_hdr); break; + case ICE_PFCP: + len = sizeof(struct ice_pfcp_hdr); + break; case ICE_PPPOE: len = sizeof(struct ice_pppoe_hdr); break; @@ -6442,7 +6677,7 @@ ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, return -EIO; } - rid = ice_find_recp(hw, &lkup_exts, rinfo); + rid = ice_find_recp(hw, &lkup_exts, rinfo, false); /* If did not find a recipe that match the existing criteria */ if (rid == ICE_MAX_NUM_RECIPES) return -EINVAL; @@ -6486,14 +6721,21 @@ ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, ice_aqc_opc_remove_sw_rules, NULL); if (!status || status == -ENOENT) { struct ice_switch_info *sw = hw->switch_info; + struct ice_sw_recipe *r_list = sw->recp_list; mutex_lock(rule_lock); list_del(&list_elem->list_entry); devm_kfree(ice_hw_to_dev(hw), list_elem->lkups); devm_kfree(ice_hw_to_dev(hw), list_elem); mutex_unlock(rule_lock); - if (list_empty(&sw->recp_list[rid].filt_rules)) - sw->recp_list[rid].adv_rule = false; + if (list_empty(&r_list[rid].filt_rules)) { + r_list[rid].adv_rule = false; + + /* All rules for this recipe are now removed */ + if (hw->recp_reuse) + ice_release_recipe_res(hw, + &r_list[rid]); + } } kfree(s_rule); } diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h index 89ffa1b51b..ad98e98c81 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.h +++ b/drivers/net/ethernet/intel/ice/ice_switch.h @@ -8,8 +8,9 @@ #define ICE_SW_CFG_MAX_BUF_LEN 2048 #define ICE_DFLT_VSI_INVAL 0xff -#define ICE_FLTR_RX BIT(0) -#define ICE_FLTR_TX BIT(1) +#define ICE_FLTR_RX BIT(0) +#define ICE_FLTR_TX BIT(1) +#define ICE_FLTR_TX_ONLY BIT(2) #define ICE_VSI_INVAL_ID 0xffff #define ICE_INVAL_Q_HANDLE 0xFFFF @@ -21,6 +22,8 @@ #define ICE_PROFID_IPV6_GTPC_NO_TEID 45 #define ICE_PROFID_IPV6_GTPU_TEID 46 #define ICE_PROFID_IPV6_GTPU_IPV6_TCP_INNER 70 +#define ICE_PROFID_IPV4_PFCP_NODE 79 +#define ICE_PROFID_IPV6_PFCP_SESSION 82 #define ICE_SW_RULE_VSI_LIST_SIZE(s, n) struct_size((s), vsi, (n)) #define ICE_SW_RULE_RX_TX_HDR_SIZE(s, l) struct_size((s), hdr_data, (l)) @@ -429,5 +432,6 @@ ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u64 *r_assoc, int ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u64 r_assoc, struct ice_sq_cd *cd); +void ice_init_chk_recipe_reuse_support(struct ice_hw *hw); #endif /* _ICE_SWITCH_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c index 688ccb0615..8bd24b33f3 100644 --- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c @@ -37,7 +37,10 @@ ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers, if (flags & ICE_TC_FLWR_FIELD_ENC_DST_MAC) lkups_cnt++; - if (flags & ICE_TC_FLWR_FIELD_ENC_OPTS) + if (flags & ICE_TC_FLWR_FIELD_GTP_OPTS) + lkups_cnt++; + + if (flags & ICE_TC_FLWR_FIELD_PFCP_OPTS) lkups_cnt++; if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 | @@ -140,6 +143,8 @@ ice_proto_type_from_tunnel(enum ice_tunnel_type type) return ICE_GTP; case TNL_GTPC: return ICE_GTP_NO_PAY; + case TNL_PFCP: + return ICE_PFCP; default: return 0; } @@ -159,6 +164,8 @@ ice_sw_type_from_tunnel(enum ice_tunnel_type type) return ICE_SW_TUN_GTPU; case TNL_GTPC: return ICE_SW_TUN_GTPC; + case TNL_PFCP: + return ICE_SW_TUN_PFCP; default: return ICE_NON_TUN; } @@ -221,8 +228,7 @@ ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr, i++; } - if (flags & ICE_TC_FLWR_FIELD_ENC_OPTS && - (fltr->tunnel_type == TNL_GTPU || fltr->tunnel_type == TNL_GTPC)) { + if (flags & ICE_TC_FLWR_FIELD_GTP_OPTS) { list[i].type = ice_proto_type_from_tunnel(fltr->tunnel_type); if (fltr->gtp_pdu_info_masks.pdu_type) { @@ -239,6 +245,22 @@ ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr, i++; } + if (flags & ICE_TC_FLWR_FIELD_PFCP_OPTS) { + struct ice_pfcp_hdr *hdr_h, *hdr_m; + + hdr_h = &list[i].h_u.pfcp_hdr; + hdr_m = &list[i].m_u.pfcp_hdr; + list[i].type = ICE_PFCP; + + hdr_h->flags = fltr->pfcp_meta_keys.type; + hdr_m->flags = fltr->pfcp_meta_masks.type & 0x01; + + hdr_h->seid = fltr->pfcp_meta_keys.seid; + hdr_m->seid = fltr->pfcp_meta_masks.seid; + + i++; + } + if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 | ICE_TC_FLWR_FIELD_ENC_DEST_IPV4)) { list[i].type = ice_proto_type_from_ipv4(false); @@ -374,8 +396,11 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags, if (tc_fltr->tunnel_type != TNL_LAST) { i = ice_tc_fill_tunnel_outer(flags, tc_fltr, list, i); - headers = &tc_fltr->inner_headers; - inner = true; + /* PFCP is considered non-tunneled - don't swap headers. */ + if (tc_fltr->tunnel_type != TNL_PFCP) { + headers = &tc_fltr->inner_headers; + inner = true; + } } if (flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID) { @@ -629,6 +654,8 @@ static int ice_tc_tun_get_type(struct net_device *tunnel_dev) */ if (netif_is_gtp(tunnel_dev)) return TNL_GTPU; + if (netif_is_pfcp(tunnel_dev)) + return TNL_PFCP; return TNL_LAST; } @@ -642,13 +669,19 @@ static bool ice_tc_is_dev_uplink(struct net_device *dev) return netif_is_ice(dev) || ice_is_tunnel_supported(dev); } -static int ice_tc_setup_redirect_action(struct net_device *filter_dev, - struct ice_tc_flower_fltr *fltr, - struct net_device *target_dev) +static int ice_tc_setup_action(struct net_device *filter_dev, + struct ice_tc_flower_fltr *fltr, + struct net_device *target_dev, + enum ice_sw_fwd_act_type action) { struct ice_repr *repr; - fltr->action.fltr_act = ICE_FWD_TO_VSI; + if (action != ICE_FWD_TO_VSI && action != ICE_MIRROR_PACKET) { + NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported action to setup provided"); + return -EINVAL; + } + + fltr->action.fltr_act = action; if (ice_is_port_repr_netdev(filter_dev) && ice_is_port_repr_netdev(target_dev)) { @@ -696,41 +729,6 @@ ice_tc_setup_drop_action(struct net_device *filter_dev, return 0; } -static int ice_tc_setup_mirror_action(struct net_device *filter_dev, - struct ice_tc_flower_fltr *fltr, - struct net_device *target_dev) -{ - struct ice_repr *repr; - - fltr->action.fltr_act = ICE_MIRROR_PACKET; - - if (ice_is_port_repr_netdev(filter_dev) && - ice_is_port_repr_netdev(target_dev)) { - repr = ice_netdev_to_repr(target_dev); - - fltr->dest_vsi = repr->src_vsi; - fltr->direction = ICE_ESWITCH_FLTR_EGRESS; - } else if (ice_is_port_repr_netdev(filter_dev) && - ice_tc_is_dev_uplink(target_dev)) { - repr = ice_netdev_to_repr(filter_dev); - - fltr->dest_vsi = repr->src_vsi->back->eswitch.uplink_vsi; - fltr->direction = ICE_ESWITCH_FLTR_EGRESS; - } else if (ice_tc_is_dev_uplink(filter_dev) && - ice_is_port_repr_netdev(target_dev)) { - repr = ice_netdev_to_repr(target_dev); - - fltr->dest_vsi = repr->src_vsi; - fltr->direction = ICE_ESWITCH_FLTR_INGRESS; - } else { - NL_SET_ERR_MSG_MOD(fltr->extack, - "Unsupported netdevice in switchdev mode"); - return -EINVAL; - } - - return 0; -} - static int ice_eswitch_tc_parse_action(struct net_device *filter_dev, struct ice_tc_flower_fltr *fltr, struct flow_action_entry *act) @@ -746,16 +744,19 @@ static int ice_eswitch_tc_parse_action(struct net_device *filter_dev, break; case FLOW_ACTION_REDIRECT: - err = ice_tc_setup_redirect_action(filter_dev, fltr, act->dev); + err = ice_tc_setup_action(filter_dev, fltr, + act->dev, ICE_FWD_TO_VSI); if (err) return err; break; case FLOW_ACTION_MIRRED: - err = ice_tc_setup_mirror_action(filter_dev, fltr, act->dev); + err = ice_tc_setup_action(filter_dev, fltr, + act->dev, ICE_MIRROR_PACKET); if (err) return err; + break; default: @@ -1409,7 +1410,8 @@ ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule, } } - if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS)) { + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS) && + (fltr->tunnel_type == TNL_GTPU || fltr->tunnel_type == TNL_GTPC)) { struct flow_match_enc_opts match; flow_rule_match_enc_opts(rule, &match); @@ -1420,7 +1422,21 @@ ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule, memcpy(&fltr->gtp_pdu_info_masks, &match.mask->data[0], sizeof(struct gtp_pdu_session_info)); - fltr->flags |= ICE_TC_FLWR_FIELD_ENC_OPTS; + fltr->flags |= ICE_TC_FLWR_FIELD_GTP_OPTS; + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS) && + fltr->tunnel_type == TNL_PFCP) { + struct flow_match_enc_opts match; + + flow_rule_match_enc_opts(rule, &match); + + memcpy(&fltr->pfcp_meta_keys, match.key->data, + sizeof(struct pfcp_metadata)); + memcpy(&fltr->pfcp_meta_masks, match.mask->data, + sizeof(struct pfcp_metadata)); + + fltr->flags |= ICE_TC_FLWR_FIELD_PFCP_OPTS; } return 0; @@ -1481,10 +1497,14 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi, return err; } - /* header pointers should point to the inner headers, outer - * header were already set by ice_parse_tunnel_attr - */ - headers = &fltr->inner_headers; + /* PFCP is considered non-tunneled - don't swap headers. */ + if (fltr->tunnel_type != TNL_PFCP) { + /* Header pointers should point to the inner headers, + * outer header were already set by + * ice_parse_tunnel_attr(). + */ + headers = &fltr->inner_headers; + } } else if (dissector->used_keys & (BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | @@ -1638,6 +1658,10 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi, flow_rule_match_control(rule, &match); addr_type = match.key->addr_type; + + if (flow_rule_has_control_flags(match.mask->flags, + fltr->extack)) + return -EOPNOTSUPP; } if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.h b/drivers/net/ethernet/intel/ice/ice_tc_lib.h index 65d387163a..d84f153517 100644 --- a/drivers/net/ethernet/intel/ice/ice_tc_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.h @@ -4,6 +4,9 @@ #ifndef _ICE_TC_LIB_H_ #define _ICE_TC_LIB_H_ +#include <linux/bits.h> +#include <net/pfcp.h> + #define ICE_TC_FLWR_FIELD_DST_MAC BIT(0) #define ICE_TC_FLWR_FIELD_SRC_MAC BIT(1) #define ICE_TC_FLWR_FIELD_VLAN BIT(2) @@ -22,7 +25,7 @@ #define ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT BIT(15) #define ICE_TC_FLWR_FIELD_ENC_DST_MAC BIT(16) #define ICE_TC_FLWR_FIELD_ETH_TYPE_ID BIT(17) -#define ICE_TC_FLWR_FIELD_ENC_OPTS BIT(18) +#define ICE_TC_FLWR_FIELD_GTP_OPTS BIT(18) #define ICE_TC_FLWR_FIELD_CVLAN BIT(19) #define ICE_TC_FLWR_FIELD_PPPOE_SESSID BIT(20) #define ICE_TC_FLWR_FIELD_PPP_PROTO BIT(21) @@ -34,6 +37,7 @@ #define ICE_TC_FLWR_FIELD_VLAN_PRIO BIT(27) #define ICE_TC_FLWR_FIELD_CVLAN_PRIO BIT(28) #define ICE_TC_FLWR_FIELD_VLAN_TPID BIT(29) +#define ICE_TC_FLWR_FIELD_PFCP_OPTS BIT(30) #define ICE_TC_FLOWER_MASK_32 0xFFFFFFFF @@ -161,6 +165,8 @@ struct ice_tc_flower_fltr { __be32 tenant_id; struct gtp_pdu_session_info gtp_pdu_info_keys; struct gtp_pdu_session_info gtp_pdu_info_masks; + struct pfcp_metadata pfcp_meta_keys; + struct pfcp_metadata pfcp_meta_masks; u32 flags; u8 tunnel_type; struct ice_tc_flower_action action; diff --git a/drivers/net/ethernet/intel/ice/ice_trace.h b/drivers/net/ethernet/intel/ice/ice_trace.h index b2f5c9fe01..244cddd2a9 100644 --- a/drivers/net/ethernet/intel/ice/ice_trace.h +++ b/drivers/net/ethernet/intel/ice/ice_trace.h @@ -69,7 +69,7 @@ DECLARE_EVENT_CLASS(ice_rx_dim_template, TP_fast_assign(__entry->q_vector = q_vector; __entry->dim = dim; - __assign_str(devname, q_vector->rx.rx_ring->netdev->name);), + __assign_str(devname);), TP_printk("netdev: %s Rx-Q: %d dim-state: %d dim-profile: %d dim-tune: %d dim-st-right: %d dim-st-left: %d dim-tired: %d", __get_str(devname), @@ -96,7 +96,7 @@ DECLARE_EVENT_CLASS(ice_tx_dim_template, TP_fast_assign(__entry->q_vector = q_vector; __entry->dim = dim; - __assign_str(devname, q_vector->tx.tx_ring->netdev->name);), + __assign_str(devname);), TP_printk("netdev: %s Tx-Q: %d dim-state: %d dim-profile: %d dim-tune: %d dim-st-right: %d dim-st-left: %d dim-tired: %d", __get_str(devname), @@ -128,7 +128,7 @@ DECLARE_EVENT_CLASS(ice_tx_template, TP_fast_assign(__entry->ring = ring; __entry->desc = desc; __entry->buf = buf; - __assign_str(devname, ring->netdev->name);), + __assign_str(devname);), TP_printk("netdev: %s ring: %pK desc: %pK buf %pK", __get_str(devname), __entry->ring, __entry->desc, __entry->buf) @@ -156,7 +156,7 @@ DECLARE_EVENT_CLASS(ice_rx_template, TP_fast_assign(__entry->ring = ring; __entry->desc = desc; - __assign_str(devname, ring->netdev->name);), + __assign_str(devname);), TP_printk("netdev: %s ring: %pK desc: %pK", __get_str(devname), __entry->ring, __entry->desc) @@ -180,7 +180,7 @@ DECLARE_EVENT_CLASS(ice_rx_indicate_template, TP_fast_assign(__entry->ring = ring; __entry->desc = desc; __entry->skb = skb; - __assign_str(devname, ring->netdev->name);), + __assign_str(devname);), TP_printk("netdev: %s ring: %pK desc: %pK skb %pK", __get_str(devname), __entry->ring, __entry->desc, __entry->skb) @@ -203,7 +203,7 @@ DECLARE_EVENT_CLASS(ice_xmit_template, TP_fast_assign(__entry->ring = ring; __entry->skb = skb; - __assign_str(devname, ring->netdev->name);), + __assign_str(devname);), TP_printk("netdev: %s skb: %pK ring: %pK", __get_str(devname), __entry->skb, __entry->ring) diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index 97d41d6ebf..8bb743f78f 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -1051,8 +1051,7 @@ ice_construct_skb(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp) } /* allocate a skb to store the frags */ - skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE, - GFP_ATOMIC | __GFP_NOWARN); + skb = napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE); if (unlikely(!skb)) return NULL; diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h index af955b0e5d..feba314a3f 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.h +++ b/drivers/net/ethernet/intel/ice/ice_txrx.h @@ -365,6 +365,7 @@ struct ice_rx_ring { u8 ptp_rx; #define ICE_RX_FLAGS_RING_BUILD_SKB BIT(1) #define ICE_RX_FLAGS_CRC_STRIP_DIS BIT(2) +#define ICE_RX_FLAGS_MULTIDEV BIT(3) u8 flags; /* CL5 - 5th cacheline starts here */ struct xdp_rxq_info xdp_rxq; diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c index f8f1d2bdc1..2719f0e209 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c @@ -2,6 +2,7 @@ /* Copyright (c) 2019, Intel Corporation. */ #include <linux/filter.h> +#include <linux/net/intel/libie/rx.h> #include "ice_txrx_lib.h" #include "ice_eswitch.h" @@ -39,30 +40,6 @@ void ice_release_rx_desc(struct ice_rx_ring *rx_ring, u16 val) } /** - * ice_ptype_to_htype - get a hash type - * @ptype: the ptype value from the descriptor - * - * Returns appropriate hash type (such as PKT_HASH_TYPE_L2/L3/L4) to be used by - * skb_set_hash based on PTYPE as parsed by HW Rx pipeline and is part of - * Rx desc. - */ -static enum pkt_hash_types ice_ptype_to_htype(u16 ptype) -{ - struct ice_rx_ptype_decoded decoded = ice_decode_rx_desc_ptype(ptype); - - if (!decoded.known) - return PKT_HASH_TYPE_NONE; - if (decoded.payload_layer == ICE_RX_PTYPE_PAYLOAD_LAYER_PAY4) - return PKT_HASH_TYPE_L4; - if (decoded.payload_layer == ICE_RX_PTYPE_PAYLOAD_LAYER_PAY3) - return PKT_HASH_TYPE_L3; - if (decoded.outer_ip == ICE_RX_PTYPE_OUTER_L2) - return PKT_HASH_TYPE_L2; - - return PKT_HASH_TYPE_NONE; -} - -/** * ice_get_rx_hash - get RX hash value from descriptor * @rx_desc: specific descriptor * @@ -91,14 +68,16 @@ ice_rx_hash_to_skb(const struct ice_rx_ring *rx_ring, const union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb, u16 rx_ptype) { + struct libeth_rx_pt decoded; u32 hash; - if (!(rx_ring->netdev->features & NETIF_F_RXHASH)) + decoded = libie_rx_pt_parse(rx_ptype); + if (!libeth_rx_pt_has_hash(rx_ring->netdev, decoded)) return; hash = ice_get_rx_hash(rx_desc); if (likely(hash)) - skb_set_hash(skb, hash, ice_ptype_to_htype(rx_ptype)); + libeth_rx_pt_set_hash(skb, hash, decoded); } /** @@ -114,34 +93,26 @@ static void ice_rx_csum(struct ice_rx_ring *ring, struct sk_buff *skb, union ice_32b_rx_flex_desc *rx_desc, u16 ptype) { - struct ice_rx_ptype_decoded decoded; + struct libeth_rx_pt decoded; u16 rx_status0, rx_status1; bool ipv4, ipv6; - rx_status0 = le16_to_cpu(rx_desc->wb.status_error0); - rx_status1 = le16_to_cpu(rx_desc->wb.status_error1); - - decoded = ice_decode_rx_desc_ptype(ptype); - /* Start with CHECKSUM_NONE and by default csum_level = 0 */ skb->ip_summed = CHECKSUM_NONE; - skb_checksum_none_assert(skb); - /* check if Rx checksum is enabled */ - if (!(ring->netdev->features & NETIF_F_RXCSUM)) + decoded = libie_rx_pt_parse(ptype); + if (!libeth_rx_pt_has_checksum(ring->netdev, decoded)) return; + rx_status0 = le16_to_cpu(rx_desc->wb.status_error0); + rx_status1 = le16_to_cpu(rx_desc->wb.status_error1); + /* check if HW has decoded the packet and checksum */ if (!(rx_status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S))) return; - if (!(decoded.known && decoded.outer_ip)) - return; - - ipv4 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) && - (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV4); - ipv6 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) && - (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV6); + ipv4 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV4; + ipv6 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV6; if (ipv4 && (rx_status0 & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))) { ring->vsi->back->hw_rx_eipe_error++; @@ -169,19 +140,10 @@ ice_rx_csum(struct ice_rx_ring *ring, struct sk_buff *skb, * we need to bump the checksum level by 1 to reflect the fact that * we are indicating we validated the inner checksum. */ - if (decoded.tunnel_type >= ICE_RX_PTYPE_TUNNEL_IP_GRENAT) + if (decoded.tunnel_type >= LIBETH_RX_PT_TUNNEL_IP_GRENAT) skb->csum_level = 1; - /* Only report checksum unnecessary for TCP, UDP, or SCTP */ - switch (decoded.inner_prot) { - case ICE_RX_PTYPE_INNER_PROT_TCP: - case ICE_RX_PTYPE_INNER_PROT_UDP: - case ICE_RX_PTYPE_INNER_PROT_SCTP: - skb->ip_summed = CHECKSUM_UNNECESSARY; - break; - default: - break; - } + skb->ip_summed = CHECKSUM_UNNECESSARY; return; checksum_fail: @@ -236,7 +198,16 @@ ice_process_skb_fields(struct ice_rx_ring *rx_ring, ice_rx_hash_to_skb(rx_ring, rx_desc, skb, ptype); /* modifies the skb - consumes the enet header */ - skb->protocol = eth_type_trans(skb, rx_ring->netdev); + if (unlikely(rx_ring->flags & ICE_RX_FLAGS_MULTIDEV)) { + struct net_device *netdev = ice_eswitch_get_target(rx_ring, + rx_desc); + + if (ice_is_port_repr_netdev(netdev)) + ice_repr_inc_rx_stats(netdev, skb->len); + skb->protocol = eth_type_trans(skb, netdev); + } else { + skb->protocol = eth_type_trans(skb, rx_ring->netdev); + } ice_rx_csum(rx_ring, skb, rx_desc, ptype); @@ -527,42 +498,6 @@ static int ice_xdp_rx_hw_ts(const struct xdp_md *ctx, u64 *ts_ns) return 0; } -/* Define a ptype index -> XDP hash type lookup table. - * It uses the same ptype definitions as ice_decode_rx_desc_ptype[], - * avoiding possible copy-paste errors. - */ -#undef ICE_PTT -#undef ICE_PTT_UNUSED_ENTRY - -#define ICE_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\ - [PTYPE] = XDP_RSS_L3_##OUTER_IP_VER | XDP_RSS_L4_##I | XDP_RSS_TYPE_##PL - -#define ICE_PTT_UNUSED_ENTRY(PTYPE) [PTYPE] = 0 - -/* A few supplementary definitions for when XDP hash types do not coincide - * with what can be generated from ptype definitions - * by means of preprocessor concatenation. - */ -#define XDP_RSS_L3_NONE XDP_RSS_TYPE_NONE -#define XDP_RSS_L4_NONE XDP_RSS_TYPE_NONE -#define XDP_RSS_TYPE_PAY2 XDP_RSS_TYPE_L2 -#define XDP_RSS_TYPE_PAY3 XDP_RSS_TYPE_NONE -#define XDP_RSS_TYPE_PAY4 XDP_RSS_L4 - -static const enum xdp_rss_hash_type -ice_ptype_to_xdp_hash[ICE_NUM_DEFINED_PTYPES] = { - ICE_PTYPES -}; - -#undef XDP_RSS_L3_NONE -#undef XDP_RSS_L4_NONE -#undef XDP_RSS_TYPE_PAY2 -#undef XDP_RSS_TYPE_PAY3 -#undef XDP_RSS_TYPE_PAY4 - -#undef ICE_PTT -#undef ICE_PTT_UNUSED_ENTRY - /** * ice_xdp_rx_hash_type - Get XDP-specific hash type from the RX descriptor * @eop_desc: End of Packet descriptor @@ -570,12 +505,7 @@ ice_ptype_to_xdp_hash[ICE_NUM_DEFINED_PTYPES] = { static enum xdp_rss_hash_type ice_xdp_rx_hash_type(const union ice_32b_rx_flex_desc *eop_desc) { - u16 ptype = ice_get_ptype(eop_desc); - - if (unlikely(ptype >= ICE_NUM_DEFINED_PTYPES)) - return 0; - - return ice_ptype_to_xdp_hash[ptype]; + return libie_rx_pt_parse(ice_get_ptype(eop_desc)).hash_type; } /** diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index 0aacd0d050..eef397e5ba 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h @@ -150,7 +150,6 @@ enum ice_vsi_type { ICE_VSI_CTRL = 3, /* equates to ICE_VSI_PF with 1 queue pair */ ICE_VSI_CHNL = 4, ICE_VSI_LB = 6, - ICE_VSI_SWITCHDEV_CTRL = 7, }; struct ice_link_status { @@ -204,6 +203,7 @@ struct ice_phy_info { enum ice_fltr_ptype { /* NONE - used for undef/error */ ICE_FLTR_PTYPE_NONF_NONE = 0, + ICE_FLTR_PTYPE_NONF_ETH, ICE_FLTR_PTYPE_NONF_IPV4_UDP, ICE_FLTR_PTYPE_NONF_IPV4_TCP, ICE_FLTR_PTYPE_NONF_IPV4_SCTP, @@ -296,6 +296,7 @@ struct ice_hw_common_caps { bool pcie_reset_avoidance; /* Post update reset restriction */ bool reset_restrict_support; + bool tx_sched_topo_comp_mode_en; }; /* IEEE 1588 TIME_SYNC specific info */ @@ -851,6 +852,8 @@ struct ice_hw { u16 max_burst_size; /* driver sets this value */ + u8 recp_reuse:1; /* indicates whether FW supports recipe reuse */ + /* Tx Scheduler values */ u8 num_tx_sched_layers; u8 num_tx_sched_phys_layers; diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c index d10a4be965..48a8d462d7 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c @@ -259,20 +259,18 @@ static void ice_vf_pre_vsi_rebuild(struct ice_vf *vf) int ice_vf_reconfig_vsi(struct ice_vf *vf) { struct ice_vsi *vsi = ice_get_vf_vsi(vf); - struct ice_vsi_cfg_params params = {}; struct ice_pf *pf = vf->pf; int err; if (WARN_ON(!vsi)) return -EINVAL; - params = ice_vsi_to_params(vsi); - params.flags = ICE_VSI_FLAG_NO_INIT; + vsi->flags = ICE_VSI_FLAG_NO_INIT; ice_vsi_decfg(vsi); ice_fltr_remove_all(vsi); - err = ice_vsi_cfg(vsi, ¶ms); + err = ice_vsi_cfg(vsi); if (err) { dev_err(ice_pf_to_dev(pf), "Failed to reconfigure the VF%u's VSI, error %d\n", @@ -992,10 +990,13 @@ void ice_initialize_vf_entry(struct ice_vf *vf) /* assign default capabilities */ vf->spoofchk = true; - vf->num_vf_qs = vfs->num_qps_per; ice_vc_set_default_allowlist(vf); ice_virtchnl_set_dflt_ops(vf); + /* set default number of MSI-X */ + vf->num_msix = vfs->num_msix_per; + vf->num_vf_qs = vfs->num_qps_per; + /* ctrl_vsi_idx will be set to a valid value only when iAVF * creates its first fdir rule. */ @@ -1240,7 +1241,7 @@ struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf) struct ice_vsi *vsi; params.type = ICE_VSI_CTRL; - params.pi = ice_vf_get_port_info(vf); + params.port_info = ice_vf_get_port_info(vf); params.vf = vf; params.flags = ICE_VSI_FLAG_INIT; diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c index 1ff9818b4c..1c6ce0c4ed 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c @@ -1505,13 +1505,12 @@ error_param: * ice_cfg_interrupt * @vf: pointer to the VF info * @vsi: the VSI being configured - * @vector_id: vector ID * @map: vector map for mapping vectors to queues * @q_vector: structure for interrupt vector * configure the IRQ to queue map */ -static int -ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, u16 vector_id, +static enum virtchnl_status_code +ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, struct virtchnl_vector_map *map, struct ice_q_vector *q_vector) { @@ -1531,7 +1530,8 @@ ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, u16 vector_id, q_vector->num_ring_rx++; q_vector->rx.itr_idx = map->rxitr_idx; vsi->rx_rings[vsi_q_id]->q_vector = q_vector; - ice_cfg_rxq_interrupt(vsi, vsi_q_id, vector_id, + ice_cfg_rxq_interrupt(vsi, vsi_q_id, + q_vector->vf_reg_idx, q_vector->rx.itr_idx); } @@ -1545,7 +1545,8 @@ ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, u16 vector_id, q_vector->num_ring_tx++; q_vector->tx.itr_idx = map->txitr_idx; vsi->tx_rings[vsi_q_id]->q_vector = q_vector; - ice_cfg_txq_interrupt(vsi, vsi_q_id, vector_id, + ice_cfg_txq_interrupt(vsi, vsi_q_id, + q_vector->vf_reg_idx, q_vector->tx.itr_idx); } @@ -1619,8 +1620,7 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg) } /* lookout for the invalid queue index */ - v_ret = (enum virtchnl_status_code) - ice_cfg_interrupt(vf, vsi, vector_id, map, q_vector); + v_ret = ice_cfg_interrupt(vf, vsi, map, q_vector); if (v_ret) goto error_param; } diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c index 8e4ff3af86..b4feb09276 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c @@ -536,6 +536,8 @@ static void ice_vc_fdir_reset_cnt_all(struct ice_vf_fdir *fdir) fdir->fdir_fltr_cnt[flow][0] = 0; fdir->fdir_fltr_cnt[flow][1] = 0; } + + fdir->fdir_fltr_cnt_total = 0; } /** @@ -1560,6 +1562,7 @@ ice_vc_add_fdir_fltr_post(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx, resp->status = status; resp->flow_id = conf->flow_id; vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]++; + vf->fdir.fdir_fltr_cnt_total++; ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret, (u8 *)resp, len); @@ -1624,6 +1627,7 @@ ice_vc_del_fdir_fltr_post(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx, resp->status = status; ice_vc_fdir_remove_entry(vf, conf, conf->flow_id); vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]--; + vf->fdir.fdir_fltr_cnt_total--; ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret, (u8 *)resp, len); @@ -1790,6 +1794,7 @@ int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg) struct virtchnl_fdir_add *stat = NULL; struct virtchnl_fdir_fltr_conf *conf; enum virtchnl_status_code v_ret; + struct ice_vsi *vf_vsi; struct device *dev; struct ice_pf *pf; int is_tun = 0; @@ -1798,6 +1803,17 @@ int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg) pf = vf->pf; dev = ice_pf_to_dev(pf); + vf_vsi = ice_get_vf_vsi(vf); + +#define ICE_VF_MAX_FDIR_FILTERS 128 + if (!ice_fdir_num_avail_fltr(&pf->hw, vf_vsi) || + vf->fdir.fdir_fltr_cnt_total >= ICE_VF_MAX_FDIR_FILTERS) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + dev_err(dev, "Max number of FDIR filters for VF %d is reached\n", + vf->vf_id); + goto err_exit; + } + ret = ice_vc_fdir_param_check(vf, fltr->vsi_id); if (ret) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h index c5bcc8d748..ac6dcab454 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h @@ -29,6 +29,7 @@ struct ice_vf_fdir_ctx { struct ice_vf_fdir { u16 fdir_fltr_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX]; int prof_entry_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX]; + u16 fdir_fltr_cnt_total; struct ice_fd_hw_prof **fdir_prof; struct idr fdir_rule_idr; diff --git a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.c b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.c index 4a6c850d83..7aae7fdcfc 100644 --- a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.c +++ b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.c @@ -72,7 +72,6 @@ void ice_vsi_init_vlan_ops(struct ice_vsi *vsi) switch (vsi->type) { case ICE_VSI_PF: - case ICE_VSI_SWITCHDEV_CTRL: ice_pf_vsi_init_vlan_ops(vsi); break; case ICE_VSI_VF: diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c index 86a865788e..a65955eb23 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.c +++ b/drivers/net/ethernet/intel/ice/ice_xsk.c @@ -554,8 +554,7 @@ ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp) } net_prefetch(xdp->data_meta); - skb = __napi_alloc_skb(&rx_ring->q_vector->napi, totalsize, - GFP_ATOMIC | __GFP_NOWARN); + skb = napi_alloc_skb(&rx_ring->q_vector->napi, totalsize); if (unlikely(!skb)) return NULL; @@ -878,7 +877,7 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget) ICE_RX_FLX_DESC_PKT_LEN_M; xsk_buff_set_size(xdp, size); - xsk_buff_dma_sync_for_cpu(xdp, xsk_pool); + xsk_buff_dma_sync_for_cpu(xdp); if (!first) { first = xdp; |