diff options
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx5')
52 files changed, 760 insertions, 412 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/crdump.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/crdump.c index 28d02749d3..7659ad21e6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/crdump.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/crdump.c @@ -55,7 +55,10 @@ int mlx5_crdump_collect(struct mlx5_core_dev *dev, u32 *cr_data) ret = mlx5_vsc_sem_set_space(dev, MLX5_SEMAPHORE_SW_RESET, MLX5_VSC_LOCK); if (ret) { - mlx5_core_warn(dev, "Failed to lock SW reset semaphore\n"); + if (ret == -EBUSY) + mlx5_core_info(dev, "SW reset semaphore is already in use\n"); + else + mlx5_core_warn(dev, "Failed to lock SW reset semaphore\n"); goto unlock_gw; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dpll.c b/drivers/net/ethernet/mellanox/mlx5/core/dpll.c index 8ce5c8bcda..d74a5aaf42 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/dpll.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/dpll.c @@ -36,11 +36,17 @@ static int mlx5_dpll_clock_id_get(struct mlx5_core_dev *mdev, u64 *clock_id) return 0; } +struct mlx5_dpll_synce_status { + enum mlx5_msees_admin_status admin_status; + enum mlx5_msees_oper_status oper_status; + bool ho_acq; + bool oper_freq_measure; + s32 frequency_diff; +}; + static int mlx5_dpll_synce_status_get(struct mlx5_core_dev *mdev, - enum mlx5_msees_admin_status *admin_status, - enum mlx5_msees_oper_status *oper_status, - bool *ho_acq) + struct mlx5_dpll_synce_status *synce_status) { u32 out[MLX5_ST_SZ_DW(msees_reg)] = {}; u32 in[MLX5_ST_SZ_DW(msees_reg)] = {}; @@ -50,11 +56,11 @@ mlx5_dpll_synce_status_get(struct mlx5_core_dev *mdev, MLX5_REG_MSEES, 0, 0); if (err) return err; - if (admin_status) - *admin_status = MLX5_GET(msees_reg, out, admin_status); - *oper_status = MLX5_GET(msees_reg, out, oper_status); - if (ho_acq) - *ho_acq = MLX5_GET(msees_reg, out, ho_acq); + synce_status->admin_status = MLX5_GET(msees_reg, out, admin_status); + synce_status->oper_status = MLX5_GET(msees_reg, out, oper_status); + synce_status->ho_acq = MLX5_GET(msees_reg, out, ho_acq); + synce_status->oper_freq_measure = MLX5_GET(msees_reg, out, oper_freq_measure); + synce_status->frequency_diff = MLX5_GET(msees_reg, out, frequency_diff); return 0; } @@ -67,21 +73,23 @@ mlx5_dpll_synce_status_set(struct mlx5_core_dev *mdev, MLX5_SET(msees_reg, in, field_select, MLX5_MSEES_FIELD_SELECT_ENABLE | + MLX5_MSEES_FIELD_SELECT_ADMIN_FREQ_MEASURE | MLX5_MSEES_FIELD_SELECT_ADMIN_STATUS); MLX5_SET(msees_reg, in, admin_status, admin_status); + MLX5_SET(msees_reg, in, admin_freq_measure, true); return mlx5_core_access_reg(mdev, in, sizeof(in), out, sizeof(out), MLX5_REG_MSEES, 0, 1); } static enum dpll_lock_status -mlx5_dpll_lock_status_get(enum mlx5_msees_oper_status oper_status, bool ho_acq) +mlx5_dpll_lock_status_get(struct mlx5_dpll_synce_status *synce_status) { - switch (oper_status) { + switch (synce_status->oper_status) { case MLX5_MSEES_OPER_STATUS_SELF_TRACK: fallthrough; case MLX5_MSEES_OPER_STATUS_OTHER_TRACK: - return ho_acq ? DPLL_LOCK_STATUS_LOCKED_HO_ACQ : - DPLL_LOCK_STATUS_LOCKED; + return synce_status->ho_acq ? DPLL_LOCK_STATUS_LOCKED_HO_ACQ : + DPLL_LOCK_STATUS_LOCKED; case MLX5_MSEES_OPER_STATUS_HOLDOVER: fallthrough; case MLX5_MSEES_OPER_STATUS_FAIL_HOLDOVER: @@ -92,31 +100,37 @@ mlx5_dpll_lock_status_get(enum mlx5_msees_oper_status oper_status, bool ho_acq) } static enum dpll_pin_state -mlx5_dpll_pin_state_get(enum mlx5_msees_admin_status admin_status, - enum mlx5_msees_oper_status oper_status) +mlx5_dpll_pin_state_get(struct mlx5_dpll_synce_status *synce_status) { - return (admin_status == MLX5_MSEES_ADMIN_STATUS_TRACK && - (oper_status == MLX5_MSEES_OPER_STATUS_SELF_TRACK || - oper_status == MLX5_MSEES_OPER_STATUS_OTHER_TRACK)) ? + return (synce_status->admin_status == MLX5_MSEES_ADMIN_STATUS_TRACK && + (synce_status->oper_status == MLX5_MSEES_OPER_STATUS_SELF_TRACK || + synce_status->oper_status == MLX5_MSEES_OPER_STATUS_OTHER_TRACK)) ? DPLL_PIN_STATE_CONNECTED : DPLL_PIN_STATE_DISCONNECTED; } +static int +mlx5_dpll_pin_ffo_get(struct mlx5_dpll_synce_status *synce_status, + s64 *ffo) +{ + if (!synce_status->oper_freq_measure) + return -ENODATA; + *ffo = synce_status->frequency_diff; + return 0; +} + static int mlx5_dpll_device_lock_status_get(const struct dpll_device *dpll, void *priv, enum dpll_lock_status *status, struct netlink_ext_ack *extack) { - enum mlx5_msees_oper_status oper_status; + struct mlx5_dpll_synce_status synce_status; struct mlx5_dpll *mdpll = priv; - bool ho_acq; int err; - err = mlx5_dpll_synce_status_get(mdpll->mdev, NULL, - &oper_status, &ho_acq); + err = mlx5_dpll_synce_status_get(mdpll->mdev, &synce_status); if (err) return err; - - *status = mlx5_dpll_lock_status_get(oper_status, ho_acq); + *status = mlx5_dpll_lock_status_get(&synce_status); return 0; } @@ -128,18 +142,9 @@ static int mlx5_dpll_device_mode_get(const struct dpll_device *dpll, return 0; } -static bool mlx5_dpll_device_mode_supported(const struct dpll_device *dpll, - void *priv, - enum dpll_mode mode, - struct netlink_ext_ack *extack) -{ - return mode == DPLL_MODE_MANUAL; -} - static const struct dpll_device_ops mlx5_dpll_device_ops = { .lock_status_get = mlx5_dpll_device_lock_status_get, .mode_get = mlx5_dpll_device_mode_get, - .mode_supported = mlx5_dpll_device_mode_supported, }; static int mlx5_dpll_pin_direction_get(const struct dpll_pin *pin, @@ -160,16 +165,14 @@ static int mlx5_dpll_state_on_dpll_get(const struct dpll_pin *pin, enum dpll_pin_state *state, struct netlink_ext_ack *extack) { - enum mlx5_msees_admin_status admin_status; - enum mlx5_msees_oper_status oper_status; + struct mlx5_dpll_synce_status synce_status; struct mlx5_dpll *mdpll = pin_priv; int err; - err = mlx5_dpll_synce_status_get(mdpll->mdev, &admin_status, - &oper_status, NULL); + err = mlx5_dpll_synce_status_get(mdpll->mdev, &synce_status); if (err) return err; - *state = mlx5_dpll_pin_state_get(admin_status, oper_status); + *state = mlx5_dpll_pin_state_get(&synce_status); return 0; } @@ -188,10 +191,25 @@ static int mlx5_dpll_state_on_dpll_set(const struct dpll_pin *pin, MLX5_MSEES_ADMIN_STATUS_FREE_RUNNING); } +static int mlx5_dpll_ffo_get(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + s64 *ffo, struct netlink_ext_ack *extack) +{ + struct mlx5_dpll_synce_status synce_status; + struct mlx5_dpll *mdpll = pin_priv; + int err; + + err = mlx5_dpll_synce_status_get(mdpll->mdev, &synce_status); + if (err) + return err; + return mlx5_dpll_pin_ffo_get(&synce_status, ffo); +} + static const struct dpll_pin_ops mlx5_dpll_pins_ops = { .direction_get = mlx5_dpll_pin_direction_get, .state_on_dpll_get = mlx5_dpll_state_on_dpll_get, .state_on_dpll_set = mlx5_dpll_state_on_dpll_set, + .ffo_get = mlx5_dpll_ffo_get, }; static const struct dpll_pin_properties mlx5_dpll_pin_properties = { @@ -211,19 +229,16 @@ static void mlx5_dpll_periodic_work(struct work_struct *work) { struct mlx5_dpll *mdpll = container_of(work, struct mlx5_dpll, work.work); - enum mlx5_msees_admin_status admin_status; - enum mlx5_msees_oper_status oper_status; + struct mlx5_dpll_synce_status synce_status; enum dpll_lock_status lock_status; enum dpll_pin_state pin_state; - bool ho_acq; int err; - err = mlx5_dpll_synce_status_get(mdpll->mdev, &admin_status, - &oper_status, &ho_acq); + err = mlx5_dpll_synce_status_get(mdpll->mdev, &synce_status); if (err) goto err_out; - lock_status = mlx5_dpll_lock_status_get(oper_status, ho_acq); - pin_state = mlx5_dpll_pin_state_get(admin_status, oper_status); + lock_status = mlx5_dpll_lock_status_get(&synce_status); + pin_state = mlx5_dpll_pin_state_get(&synce_status); if (!mdpll->last.valid) goto invalid_out; @@ -246,7 +261,7 @@ static void mlx5_dpll_netdev_dpll_pin_set(struct mlx5_dpll *mdpll, { if (mdpll->tracking_netdev) return; - netdev_dpll_pin_set(netdev, mdpll->dpll_pin); + dpll_netdev_pin_set(netdev, mdpll->dpll_pin); mdpll->tracking_netdev = netdev; } @@ -254,7 +269,7 @@ static void mlx5_dpll_netdev_dpll_pin_clear(struct mlx5_dpll *mdpll) { if (!mdpll->tracking_netdev) return; - netdev_dpll_pin_clear(mdpll->tracking_netdev); + dpll_netdev_pin_clear(mdpll->tracking_netdev); mdpll->tracking_netdev = NULL; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 729a11b5fb..55c6ace0ac 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -72,7 +72,6 @@ struct page_pool; #define MLX5E_HW2SW_MTU(params, hwmtu) ((hwmtu) - ((params)->hard_mtu)) #define MLX5E_SW2HW_MTU(params, swmtu) ((swmtu) + ((params)->hard_mtu)) -#define MLX5E_MAX_NUM_TC 8 #define MLX5E_MAX_NUM_MQPRIO_CH_TC TC_QOPT_MAX_QUEUE #define MLX5_RX_HEADROOM NET_SKB_PAD @@ -364,7 +363,7 @@ struct mlx5e_cq { /* control */ struct net_device *netdev; struct mlx5_core_dev *mdev; - struct mlx5e_priv *priv; + struct workqueue_struct *workqueue; struct mlx5_wq_ctrl wq_ctrl; } ____cacheline_aligned_in_smp; @@ -484,10 +483,12 @@ struct mlx5e_xdp_info_fifo { struct mlx5e_xdpsq; struct mlx5e_xmit_data; +struct xsk_tx_metadata; typedef int (*mlx5e_fp_xmit_xdp_frame_check)(struct mlx5e_xdpsq *); typedef bool (*mlx5e_fp_xmit_xdp_frame)(struct mlx5e_xdpsq *, struct mlx5e_xmit_data *, - int); + int, + struct xsk_tx_metadata *); struct mlx5e_xdpsq { /* data path */ @@ -756,7 +757,7 @@ struct mlx5e_channel { /* data path */ struct mlx5e_rq rq; struct mlx5e_xdpsq rq_xdpsq; - struct mlx5e_txqsq sq[MLX5E_MAX_NUM_TC]; + struct mlx5e_txqsq sq[MLX5_MAX_NUM_TC]; struct mlx5e_icosq icosq; /* internal control operations */ struct mlx5e_txqsq __rcu * __rcu *qos_sqs; bool xdp; @@ -806,7 +807,7 @@ struct mlx5e_channels { struct mlx5e_channel_stats { struct mlx5e_ch_stats ch; - struct mlx5e_sq_stats sq[MLX5E_MAX_NUM_TC]; + struct mlx5e_sq_stats sq[MLX5_MAX_NUM_TC]; struct mlx5e_rq_stats rq; struct mlx5e_rq_stats xskrq; struct mlx5e_xdpsq_stats rq_xdpsq; @@ -816,8 +817,8 @@ struct mlx5e_channel_stats { struct mlx5e_ptp_stats { struct mlx5e_ch_stats ch; - struct mlx5e_sq_stats sq[MLX5E_MAX_NUM_TC]; - struct mlx5e_ptp_cq_stats cq[MLX5E_MAX_NUM_TC]; + struct mlx5e_sq_stats sq[MLX5_MAX_NUM_TC]; + struct mlx5e_ptp_cq_stats cq[MLX5_MAX_NUM_TC]; struct mlx5e_rq_stats rq; } ____cacheline_aligned_in_smp; @@ -885,7 +886,6 @@ struct mlx5e_priv { struct mlx5e_rq drop_rq; struct mlx5e_channels channels; - u32 tisn[MLX5_MAX_PORTS][MLX5E_MAX_NUM_TC]; struct mlx5e_rx_res *rx_res; u32 *tx_rates; @@ -983,6 +983,8 @@ struct mlx5e_profile { void (*update_stats)(struct mlx5e_priv *priv); void (*update_carrier)(struct mlx5e_priv *priv); int (*max_nch_limit)(struct mlx5_core_dev *mdev); + u32 (*get_tisn)(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv, + u8 lag_port, u8 tc); unsigned int (*stats_grps_num)(struct mlx5e_priv *priv); mlx5e_stats_grp_t *stats_grps; const struct mlx5e_rx_handlers *rx_handlers; @@ -990,6 +992,11 @@ struct mlx5e_profile { u32 features; }; +u32 mlx5e_profile_get_tisn(struct mlx5_core_dev *mdev, + struct mlx5e_priv *priv, + const struct mlx5e_profile *profile, + u8 lag_port, u8 tc); + #define mlx5e_profile_feature_cap(profile, feature) \ ((profile)->features & BIT(MLX5E_PROFILE_FEATURE_##feature)) @@ -1037,6 +1044,8 @@ int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params, void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq); struct mlx5e_create_cq_param { + struct net_device *netdev; + struct workqueue_struct *wq; struct napi_struct *napi; struct mlx5e_ch_stats *ch_stats; int node; @@ -1044,7 +1053,7 @@ struct mlx5e_create_cq_param { }; struct mlx5e_cq_param; -int mlx5e_open_cq(struct mlx5e_priv *priv, struct dim_cq_moder moder, +int mlx5e_open_cq(struct mlx5_core_dev *mdev, struct dim_cq_moder moder, struct mlx5e_cq_param *param, struct mlx5e_create_cq_param *ccp, struct mlx5e_cq *cq); void mlx5e_close_cq(struct mlx5e_cq *cq); @@ -1115,7 +1124,7 @@ static inline bool mlx5_tx_swp_supported(struct mlx5_core_dev *mdev) extern const struct ethtool_ops mlx5e_ethtool_ops; int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, u32 *mkey); -int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev); +int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev, bool create_tises); void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev); int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb, bool enable_mc_lb); @@ -1131,8 +1140,6 @@ void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq); int mlx5e_create_tis(struct mlx5_core_dev *mdev, void *in, u32 *tisn); void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn); -int mlx5e_create_tises(struct mlx5e_priv *priv); -void mlx5e_destroy_tises(struct mlx5e_priv *priv); int mlx5e_update_nic_rx(struct mlx5e_priv *priv); void mlx5e_update_carrier(struct mlx5e_priv *priv); int mlx5e_close(struct net_device *netdev); @@ -1174,9 +1181,9 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv, struct ethtool_link_ksettings *link_ksettings); int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv, const struct ethtool_link_ksettings *link_ksettings); -int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc); -int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key, - const u8 hfunc); +int mlx5e_get_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh); +int mlx5e_set_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh, + struct netlink_ext_ack *extack); u32 mlx5e_ethtool_get_rxfh_key_size(struct mlx5e_priv *priv); u32 mlx5e_ethtool_get_rxfh_indir_size(struct mlx5e_priv *priv); int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c index 254c847390..40c8df1117 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c @@ -36,7 +36,7 @@ int mlx5e_monitor_counter_supported(struct mlx5e_priv *priv) return true; } -void mlx5e_monitor_counter_arm(struct mlx5e_priv *priv) +static void mlx5e_monitor_counter_arm(struct mlx5e_priv *priv) { u32 in[MLX5_ST_SZ_DW(arm_monitor_counter_in)] = {}; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.h index e1ac4b3d22..6beba7f075 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.h @@ -7,6 +7,5 @@ int mlx5e_monitor_counter_supported(struct mlx5e_priv *priv); void mlx5e_monitor_counter_init(struct mlx5e_priv *priv); void mlx5e_monitor_counter_cleanup(struct mlx5e_priv *priv); -void mlx5e_monitor_counter_arm(struct mlx5e_priv *priv); #endif /* __MLX5_MONITOR_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c index 30507b7c2f..5d213a9886 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c @@ -669,6 +669,8 @@ void mlx5e_build_rq_params(struct mlx5_core_dev *mdev, void mlx5e_build_create_cq_param(struct mlx5e_create_cq_param *ccp, struct mlx5e_channel *c) { *ccp = (struct mlx5e_create_cq_param) { + .netdev = c->netdev, + .wq = c->priv->wq, .napi = &c->napi, .ch_stats = c->stats, .node = cpu_to_node(c->cpu), diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c index 15d97c685a..ca05b3252a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c @@ -518,9 +518,11 @@ static int mlx5e_ptp_open_txqsqs(struct mlx5e_ptp *c, for (tc = 0; tc < num_tc; tc++) { int txq_ix = ix_base + tc; + u32 tisn; - err = mlx5e_ptp_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix, - cparams, tc, &c->ptpsq[tc]); + tisn = mlx5e_profile_get_tisn(c->mdev, c->priv, c->priv->profile, + c->lag_port, tc); + err = mlx5e_ptp_open_txqsq(c, tisn, txq_ix, cparams, tc, &c->ptpsq[tc]); if (err) goto close_txqsq; } @@ -555,6 +557,8 @@ static int mlx5e_ptp_open_tx_cqs(struct mlx5e_ptp *c, num_tc = mlx5e_get_dcb_num_tc(params); + ccp.netdev = c->netdev; + ccp.wq = c->priv->wq; ccp.node = dev_to_node(mlx5_core_dma_dev(c->mdev)); ccp.ch_stats = c->stats; ccp.napi = &c->napi; @@ -565,7 +569,7 @@ static int mlx5e_ptp_open_tx_cqs(struct mlx5e_ptp *c, for (tc = 0; tc < num_tc; tc++) { struct mlx5e_cq *cq = &c->ptpsq[tc].txqsq.cq; - err = mlx5e_open_cq(c->priv, ptp_moder, cq_param, &ccp, cq); + err = mlx5e_open_cq(c->mdev, ptp_moder, cq_param, &ccp, cq); if (err) goto out_err_txqsq_cq; } @@ -574,7 +578,7 @@ static int mlx5e_ptp_open_tx_cqs(struct mlx5e_ptp *c, struct mlx5e_cq *cq = &c->ptpsq[tc].ts_cq; struct mlx5e_ptpsq *ptpsq = &c->ptpsq[tc]; - err = mlx5e_open_cq(c->priv, ptp_moder, cq_param, &ccp, cq); + err = mlx5e_open_cq(c->mdev, ptp_moder, cq_param, &ccp, cq); if (err) goto out_err_ts_cq; @@ -602,6 +606,8 @@ static int mlx5e_ptp_open_rx_cq(struct mlx5e_ptp *c, struct mlx5e_cq_param *cq_param; struct mlx5e_cq *cq = &c->rq.cq; + ccp.netdev = c->netdev; + ccp.wq = c->priv->wq; ccp.node = dev_to_node(mlx5_core_dma_dev(c->mdev)); ccp.ch_stats = c->stats; ccp.napi = &c->napi; @@ -609,7 +615,7 @@ static int mlx5e_ptp_open_rx_cq(struct mlx5e_ptp *c, cq_param = &cparams->rq_param.cqp; - return mlx5e_open_cq(c->priv, ptp_moder, cq_param, &ccp, cq); + return mlx5e_open_cq(c->mdev, ptp_moder, cq_param, &ccp, cq); } static void mlx5e_ptp_close_tx_cqs(struct mlx5e_ptp *c) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h index 7b700d0f95..883c044852 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h @@ -49,7 +49,7 @@ enum { struct mlx5e_ptp { /* data path */ - struct mlx5e_ptpsq ptpsq[MLX5E_MAX_NUM_TC]; + struct mlx5e_ptpsq ptpsq[MLX5_MAX_NUM_TC]; struct mlx5e_rq rq; struct napi_struct napi; struct device *pdev; @@ -95,9 +95,15 @@ static inline void mlx5e_ptp_metadata_fifo_push(struct mlx5e_ptp_metadata_fifo * } static inline u8 +mlx5e_ptp_metadata_fifo_peek(struct mlx5e_ptp_metadata_fifo *fifo) +{ + return fifo->data[fifo->mask & fifo->cc]; +} + +static inline void mlx5e_ptp_metadata_fifo_pop(struct mlx5e_ptp_metadata_fifo *fifo) { - return fifo->data[fifo->mask & fifo->cc++]; + fifo->cc++; } static inline void diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c index 244bc15a42..922bc5b7c1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c @@ -77,29 +77,31 @@ int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs, struct mlx5e_params *params; struct mlx5e_channel *c; struct mlx5e_txqsq *sq; + u32 tisn; params = &chs->params; txq_ix = mlx5e_qid_from_qos(chs, node_qid); - WARN_ON(node_qid > priv->htb_max_qos_sqs); - if (node_qid == priv->htb_max_qos_sqs) { - struct mlx5e_sq_stats *stats, **stats_list = NULL; + WARN_ON(node_qid >= mlx5e_htb_cur_leaf_nodes(priv->htb)); + if (!priv->htb_qos_sq_stats) { + struct mlx5e_sq_stats **stats_list; + + stats_list = kvcalloc(mlx5e_qos_max_leaf_nodes(priv->mdev), + sizeof(*stats_list), GFP_KERNEL); + if (!stats_list) + return -ENOMEM; + + WRITE_ONCE(priv->htb_qos_sq_stats, stats_list); + } + + if (!priv->htb_qos_sq_stats[node_qid]) { + struct mlx5e_sq_stats *stats; - if (priv->htb_max_qos_sqs == 0) { - stats_list = kvcalloc(mlx5e_qos_max_leaf_nodes(priv->mdev), - sizeof(*stats_list), - GFP_KERNEL); - if (!stats_list) - return -ENOMEM; - } stats = kzalloc(sizeof(*stats), GFP_KERNEL); - if (!stats) { - kvfree(stats_list); + if (!stats) return -ENOMEM; - } - if (stats_list) - WRITE_ONCE(priv->htb_qos_sq_stats, stats_list); + WRITE_ONCE(priv->htb_qos_sq_stats[node_qid], stats); /* Order htb_max_qos_sqs increment after writing the array pointer. * Pairs with smp_load_acquire in en_stats.c. @@ -123,11 +125,13 @@ int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs, memset(¶m_cq, 0, sizeof(param_cq)); mlx5e_build_sq_param(priv->mdev, params, ¶m_sq); mlx5e_build_tx_cq_param(priv->mdev, params, ¶m_cq); - err = mlx5e_open_cq(priv, params->tx_cq_moderation, ¶m_cq, &ccp, &sq->cq); + err = mlx5e_open_cq(c->mdev, params->tx_cq_moderation, ¶m_cq, &ccp, &sq->cq); if (err) goto err_free_sq; - err = mlx5e_open_txqsq(c, priv->tisn[c->lag_port][0], txq_ix, params, - ¶m_sq, sq, 0, hw_id, + + tisn = mlx5e_profile_get_tisn(c->mdev, c->priv, c->priv->profile, + c->lag_port, 0); + err = mlx5e_open_txqsq(c, tisn, txq_ix, params, ¶m_sq, sq, 0, hw_id, priv->htb_qos_sq_stats[node_qid]); if (err) goto err_close_cq; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c index b12fe3c5a2..a55452c69f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c @@ -147,6 +147,20 @@ mlx5e_rep_setup_tc_cls_flower(struct mlx5e_priv *priv, } } +static void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv, + struct tc_cls_matchall_offload *ma) +{ + struct mlx5e_rep_priv *rpriv = priv->ppriv; + u64 dbytes; + u64 dpkts; + + dpkts = priv->stats.rep_stats.vport_rx_packets - rpriv->prev_vf_vport_stats.rx_packets; + dbytes = priv->stats.rep_stats.vport_rx_bytes - rpriv->prev_vf_vport_stats.rx_bytes; + mlx5e_stats_copy_rep_stats(&rpriv->prev_vf_vport_stats, &priv->stats.rep_stats); + flow_stats_update(&ma->stats, dbytes, dpkts, 0, jiffies, + FLOW_ACTION_HW_STATS_DELAYED); +} + static int mlx5e_rep_setup_tc_cls_matchall(struct mlx5e_priv *priv, struct tc_cls_matchall_offload *ma) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/selq.c b/drivers/net/ethernet/mellanox/mlx5/core/en/selq.c index f675b19263..f66bbc8464 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/selq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/selq.c @@ -57,6 +57,7 @@ int mlx5e_selq_init(struct mlx5e_selq *selq, struct mutex *state_lock) void mlx5e_selq_cleanup(struct mlx5e_selq *selq) { + mutex_lock(selq->state_lock); WARN_ON_ONCE(selq->is_prepared); kvfree(selq->standby); @@ -67,6 +68,7 @@ void mlx5e_selq_cleanup(struct mlx5e_selq *selq) kvfree(selq->standby); selq->standby = NULL; + mutex_unlock(selq->state_lock); } void mlx5e_selq_prepare_params(struct mlx5e_selq *selq, struct mlx5e_params *params) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c index 368a95fa77..b14cd62edf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c @@ -48,7 +48,8 @@ mlx5e_tc_act_pedit_parse_action(struct mlx5e_priv *priv, struct pedit_headers_action *hdrs, struct netlink_ext_ack *extack) { - u8 cmd = (act->id == FLOW_ACTION_MANGLE) ? 0 : 1; + u8 cmd = (act->id == FLOW_ACTION_MANGLE) ? TCA_PEDIT_KEY_EX_CMD_SET : + TCA_PEDIT_KEY_EX_CMD_ADD; u8 htype = act->mangle.htype; int err = -EOPNOTSUPP; u32 mask, val, offset; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c index 5620d9f975..ac458a8d10 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c @@ -68,11 +68,13 @@ static int mlx5e_open_trap_rq(struct mlx5e_priv *priv, struct mlx5e_trap *t) node = dev_to_node(mdev->device); + ccp.netdev = priv->netdev; + ccp.wq = priv->wq; ccp.node = node; ccp.ch_stats = t->stats; ccp.napi = &t->napi; ccp.ix = 0; - err = mlx5e_open_cq(priv, trap_moder, &rq_param->cqp, &ccp, &rq->cq); + err = mlx5e_open_cq(priv->mdev, trap_moder, &rq_param->cqp, &ccp, &rq->cq); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c index 13c7ed1bb3..82b5ca1be4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c @@ -103,7 +103,7 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq, xdptxd->dma_addr = dma_addr; if (unlikely(!INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe, - mlx5e_xmit_xdp_frame, sq, xdptxd, 0))) + mlx5e_xmit_xdp_frame, sq, xdptxd, 0, NULL))) return false; /* xmit_mode == MLX5E_XDP_XMIT_MODE_FRAME */ @@ -145,7 +145,7 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq, xdptxd->dma_addr = dma_addr; if (unlikely(!INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe, - mlx5e_xmit_xdp_frame, sq, xdptxd, 0))) + mlx5e_xmit_xdp_frame, sq, xdptxd, 0, NULL))) return false; /* xmit_mode == MLX5E_XDP_XMIT_MODE_PAGE */ @@ -256,9 +256,55 @@ static int mlx5e_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash, return 0; } +static int mlx5e_xdp_rx_vlan_tag(const struct xdp_md *ctx, __be16 *vlan_proto, + u16 *vlan_tci) +{ + const struct mlx5e_xdp_buff *_ctx = (void *)ctx; + const struct mlx5_cqe64 *cqe = _ctx->cqe; + + if (!cqe_has_vlan(cqe)) + return -ENODATA; + + *vlan_proto = htons(ETH_P_8021Q); + *vlan_tci = be16_to_cpu(cqe->vlan_info); + return 0; +} + const struct xdp_metadata_ops mlx5e_xdp_metadata_ops = { .xmo_rx_timestamp = mlx5e_xdp_rx_timestamp, .xmo_rx_hash = mlx5e_xdp_rx_hash, + .xmo_rx_vlan_tag = mlx5e_xdp_rx_vlan_tag, +}; + +struct mlx5e_xsk_tx_complete { + struct mlx5_cqe64 *cqe; + struct mlx5e_cq *cq; +}; + +static u64 mlx5e_xsk_fill_timestamp(void *_priv) +{ + struct mlx5e_xsk_tx_complete *priv = _priv; + u64 ts; + + ts = get_cqe_ts(priv->cqe); + + if (mlx5_is_real_time_rq(priv->cq->mdev) || mlx5_is_real_time_sq(priv->cq->mdev)) + return mlx5_real_time_cyc2time(&priv->cq->mdev->clock, ts); + + return mlx5_timecounter_cyc2time(&priv->cq->mdev->clock, ts); +} + +static void mlx5e_xsk_request_checksum(u16 csum_start, u16 csum_offset, void *priv) +{ + struct mlx5_wqe_eth_seg *eseg = priv; + + /* HW/FW is doing parsing, so offsets are largely ignored. */ + eseg->cs_flags |= MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM; +} + +const struct xsk_tx_metadata_ops mlx5e_xsk_tx_metadata_ops = { + .tmo_fill_timestamp = mlx5e_xsk_fill_timestamp, + .tmo_request_checksum = mlx5e_xsk_request_checksum, }; /* returns true if packet was consumed by xdp */ @@ -398,11 +444,11 @@ INDIRECT_CALLABLE_SCOPE int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq INDIRECT_CALLABLE_SCOPE bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd, - int check_result); + int check_result, struct xsk_tx_metadata *meta); INDIRECT_CALLABLE_SCOPE bool mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd, - int check_result) + int check_result, struct xsk_tx_metadata *meta) { struct mlx5e_tx_mpwqe *session = &sq->mpwqe; struct mlx5e_xdpsq_stats *stats = sq->stats; @@ -420,7 +466,7 @@ mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptx */ if (unlikely(sq->mpwqe.wqe)) mlx5e_xdp_mpwqe_complete(sq); - return mlx5e_xmit_xdp_frame(sq, xdptxd, 0); + return mlx5e_xmit_xdp_frame(sq, xdptxd, 0, meta); } if (!xdptxd->len) { skb_frag_t *frag = &xdptxdf->sinfo->frags[0]; @@ -450,6 +496,7 @@ mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptx * and it's safe to complete it at any time. */ mlx5e_xdp_mpwqe_session_start(sq); + xsk_tx_metadata_request(meta, &mlx5e_xsk_tx_metadata_ops, &session->wqe->eth); } mlx5e_xdp_mpwqe_add_dseg(sq, p, stats); @@ -480,7 +527,7 @@ INDIRECT_CALLABLE_SCOPE int mlx5e_xmit_xdp_frame_check(struct mlx5e_xdpsq *sq) INDIRECT_CALLABLE_SCOPE bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd, - int check_result) + int check_result, struct xsk_tx_metadata *meta) { struct mlx5e_xmit_data_frags *xdptxdf = container_of(xdptxd, struct mlx5e_xmit_data_frags, xd); @@ -601,6 +648,8 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd, sq->pc++; } + xsk_tx_metadata_request(meta, &mlx5e_xsk_tx_metadata_ops, eseg); + sq->doorbell_cseg = cseg; stats->xmit++; @@ -610,7 +659,9 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd, static void mlx5e_free_xdpsq_desc(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_wqe_info *wi, u32 *xsk_frames, - struct xdp_frame_bulk *bq) + struct xdp_frame_bulk *bq, + struct mlx5e_cq *cq, + struct mlx5_cqe64 *cqe) { struct mlx5e_xdp_info_fifo *xdpi_fifo = &sq->db.xdpi_fifo; u16 i; @@ -670,10 +721,24 @@ static void mlx5e_free_xdpsq_desc(struct mlx5e_xdpsq *sq, break; } - case MLX5E_XDP_XMIT_MODE_XSK: + case MLX5E_XDP_XMIT_MODE_XSK: { /* AF_XDP send */ + struct xsk_tx_metadata_compl *compl = NULL; + struct mlx5e_xsk_tx_complete priv = { + .cqe = cqe, + .cq = cq, + }; + + if (xp_tx_metadata_enabled(sq->xsk_pool)) { + xdpi = mlx5e_xdpi_fifo_pop(xdpi_fifo); + compl = &xdpi.xsk_meta; + + xsk_tx_metadata_complete(compl, &mlx5e_xsk_tx_metadata_ops, &priv); + } + (*xsk_frames)++; break; + } default: WARN_ON_ONCE(true); } @@ -722,7 +787,7 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq) sqcc += wi->num_wqebbs; - mlx5e_free_xdpsq_desc(sq, wi, &xsk_frames, &bq); + mlx5e_free_xdpsq_desc(sq, wi, &xsk_frames, &bq, cq, cqe); } while (!last_wqe); if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) { @@ -769,7 +834,7 @@ void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq) sq->cc += wi->num_wqebbs; - mlx5e_free_xdpsq_desc(sq, wi, &xsk_frames, &bq); + mlx5e_free_xdpsq_desc(sq, wi, &xsk_frames, &bq, NULL, NULL); } xdp_flush_frame_bulk(&bq); @@ -842,7 +907,7 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, } ret = INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe, - mlx5e_xmit_xdp_frame, sq, xdptxd, 0); + mlx5e_xmit_xdp_frame, sq, xdptxd, 0, NULL); if (unlikely(!ret)) { int j; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h index ecfe93a479..e054db1e10 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h @@ -33,6 +33,7 @@ #define __MLX5_EN_XDP_H__ #include <linux/indirect_call_wrapper.h> +#include <net/xdp_sock.h> #include "en.h" #include "en/txrx.h" @@ -82,7 +83,7 @@ enum mlx5e_xdp_xmit_mode { * num, page_1, page_2, ... , page_num. * * MLX5E_XDP_XMIT_MODE_XSK: - * none. + * frame.xsk_meta. */ #define MLX5E_XDP_FIFO_ENTRIES2DS_MAX_RATIO 4 @@ -97,6 +98,7 @@ union mlx5e_xdp_info { u8 num; struct page *page; } page; + struct xsk_tx_metadata_compl xsk_meta; }; struct mlx5e_xsk_param; @@ -112,13 +114,16 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, u32 flags); extern const struct xdp_metadata_ops mlx5e_xdp_metadata_ops; +extern const struct xsk_tx_metadata_ops mlx5e_xsk_tx_metadata_ops; INDIRECT_CALLABLE_DECLARE(bool mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd, - int check_result)); + int check_result, + struct xsk_tx_metadata *meta)); INDIRECT_CALLABLE_DECLARE(bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd, - int check_result)); + int check_result, + struct xsk_tx_metadata *meta)); INDIRECT_CALLABLE_DECLARE(int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq *sq)); INDIRECT_CALLABLE_DECLARE(int mlx5e_xmit_xdp_frame_check(struct mlx5e_xdpsq *sq)); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c index 36826b5824..82e6abbc17 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c @@ -127,7 +127,7 @@ int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params, mlx5e_build_xsk_cparam(priv->mdev, params, xsk, priv->q_counter, cparam); - err = mlx5e_open_cq(c->priv, params->rx_cq_moderation, &cparam->rq.cqp, &ccp, + err = mlx5e_open_cq(c->mdev, params->rx_cq_moderation, &cparam->rq.cqp, &ccp, &c->xskrq.cq); if (unlikely(err)) goto err_free_cparam; @@ -136,7 +136,7 @@ int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params, if (unlikely(err)) goto err_close_rx_cq; - err = mlx5e_open_cq(c->priv, params->tx_cq_moderation, &cparam->xdp_sq.cqp, &ccp, + err = mlx5e_open_cq(c->mdev, params->tx_cq_moderation, &cparam->xdp_sq.cqp, &ccp, &c->xsksq.cq); if (unlikely(err)) goto err_close_rq; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c index 597f319d47..a59199ed59 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c @@ -55,12 +55,16 @@ static void mlx5e_xsk_tx_post_err(struct mlx5e_xdpsq *sq, nopwqe = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc); mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, *xdpi); + if (xp_tx_metadata_enabled(sq->xsk_pool)) + mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, + (union mlx5e_xdp_info) { .xsk_meta = {} }); sq->doorbell_cseg = &nopwqe->ctrl; } bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget) { struct xsk_buff_pool *pool = sq->xsk_pool; + struct xsk_tx_metadata *meta = NULL; union mlx5e_xdp_info xdpi; bool work_done = true; bool flush = false; @@ -93,12 +97,13 @@ bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget) xdptxd.dma_addr = xsk_buff_raw_get_dma(pool, desc.addr); xdptxd.data = xsk_buff_raw_get_data(pool, desc.addr); xdptxd.len = desc.len; + meta = xsk_buff_get_metadata(pool, desc.addr); xsk_buff_raw_dma_sync_for_device(pool, xdptxd.dma_addr, xdptxd.len); ret = INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe, mlx5e_xmit_xdp_frame, sq, &xdptxd, - check_result); + check_result, meta); if (unlikely(!ret)) { if (sq->mpwqe.wqe) mlx5e_xdp_mpwqe_complete(sq); @@ -106,6 +111,16 @@ bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget) mlx5e_xsk_tx_post_err(sq, &xdpi); } else { mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, xdpi); + if (xp_tx_metadata_enabled(sq->xsk_pool)) { + struct xsk_tx_metadata_compl compl; + + xsk_tx_metadata_to_compl(meta, &compl); + XSK_TX_COMPL_FITS(void *); + + mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, + (union mlx5e_xdp_info) + { .xsk_meta = compl }); + } } flush = true; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c index b2cabd6ab8..cc9bcc4200 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c @@ -1640,6 +1640,7 @@ static const struct macsec_ops macsec_offload_ops = { .mdo_add_secy = mlx5e_macsec_add_secy, .mdo_upd_secy = mlx5e_macsec_upd_secy, .mdo_del_secy = mlx5e_macsec_del_secy, + .rx_uses_md_dst = true, }; bool mlx5e_macsec_handle_tx_skb(struct mlx5e_macsec *macsec, struct sk_buff *skb) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c index e66f486faa..415fec7763 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c @@ -45,6 +45,10 @@ struct arfs_table { struct hlist_head rules_hash[ARFS_HASH_SIZE]; }; +enum { + MLX5E_ARFS_STATE_ENABLED, +}; + enum arfs_type { ARFS_IPV4_TCP, ARFS_IPV6_TCP, @@ -59,6 +63,7 @@ struct mlx5e_arfs_tables { spinlock_t arfs_lock; int last_filter_id; struct workqueue_struct *wq; + unsigned long state; }; struct arfs_tuple { @@ -169,6 +174,8 @@ int mlx5e_arfs_enable(struct mlx5e_flow_steering *fs) return err; } } + set_bit(MLX5E_ARFS_STATE_ENABLED, &arfs->state); + return 0; } @@ -454,6 +461,8 @@ static void arfs_del_rules(struct mlx5e_flow_steering *fs) int i; int j; + clear_bit(MLX5E_ARFS_STATE_ENABLED, &arfs->state); + spin_lock_bh(&arfs->arfs_lock); mlx5e_for_each_arfs_rule(rule, htmp, arfs->arfs_tables, i, j) { hlist_del_init(&rule->hlist); @@ -626,17 +635,8 @@ static void arfs_handle_work(struct work_struct *work) struct mlx5_flow_handle *rule; arfs = mlx5e_fs_get_arfs(priv->fs); - mutex_lock(&priv->state_lock); - if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { - spin_lock_bh(&arfs->arfs_lock); - hlist_del(&arfs_rule->hlist); - spin_unlock_bh(&arfs->arfs_lock); - - mutex_unlock(&priv->state_lock); - kfree(arfs_rule); - goto out; - } - mutex_unlock(&priv->state_lock); + if (!test_bit(MLX5E_ARFS_STATE_ENABLED, &arfs->state)) + return; if (!arfs_rule->rule) { rule = arfs_add_rule(priv, arfs_rule); @@ -752,6 +752,11 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, return -EPROTONOSUPPORT; spin_lock_bh(&arfs->arfs_lock); + if (!test_bit(MLX5E_ARFS_STATE_ENABLED, &arfs->state)) { + spin_unlock_bh(&arfs->arfs_lock); + return -EPERM; + } + arfs_rule = arfs_find_rule(arfs_t, &fk); if (arfs_rule) { if (arfs_rule->rxq == rxq_index || work_busy(&arfs_rule->arfs_work)) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c index 41c396e764..6ed3a32b7e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c @@ -74,7 +74,73 @@ int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, u32 *mkey) return err; } -int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev) +int mlx5e_create_tis(struct mlx5_core_dev *mdev, void *in, u32 *tisn) +{ + void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx); + + MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.hw_objs.td.tdn); + + if (mlx5_lag_is_lacp_owner(mdev)) + MLX5_SET(tisc, tisc, strict_lag_tx_port_affinity, 1); + + return mlx5_core_create_tis(mdev, in, tisn); +} + +void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn) +{ + mlx5_core_destroy_tis(mdev, tisn); +} + +static void mlx5e_destroy_tises(struct mlx5_core_dev *mdev, u32 tisn[MLX5_MAX_PORTS][MLX5_MAX_NUM_TC]) +{ + int tc, i; + + for (i = 0; i < mlx5e_get_num_lag_ports(mdev); i++) + for (tc = 0; tc < MLX5_MAX_NUM_TC; tc++) + mlx5e_destroy_tis(mdev, tisn[i][tc]); +} + +static bool mlx5_lag_should_assign_affinity(struct mlx5_core_dev *mdev) +{ + return MLX5_CAP_GEN(mdev, lag_tx_port_affinity) && mlx5e_get_num_lag_ports(mdev) > 1; +} + +static int mlx5e_create_tises(struct mlx5_core_dev *mdev, u32 tisn[MLX5_MAX_PORTS][MLX5_MAX_NUM_TC]) +{ + int tc, i; + int err; + + for (i = 0; i < mlx5e_get_num_lag_ports(mdev); i++) { + for (tc = 0; tc < MLX5_MAX_NUM_TC; tc++) { + u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {}; + void *tisc; + + tisc = MLX5_ADDR_OF(create_tis_in, in, ctx); + + MLX5_SET(tisc, tisc, prio, tc << 1); + + if (mlx5_lag_should_assign_affinity(mdev)) + MLX5_SET(tisc, tisc, lag_tx_port_affinity, i + 1); + + err = mlx5e_create_tis(mdev, in, &tisn[i][tc]); + if (err) + goto err_close_tises; + } + } + + return 0; + +err_close_tises: + for (; i >= 0; i--) { + for (tc--; tc >= 0; tc--) + mlx5e_destroy_tis(mdev, tisn[i][tc]); + tc = MLX5_MAX_NUM_TC; + } + + return err; +} + +int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev, bool create_tises) { struct mlx5e_hw_objs *res = &mdev->mlx5e_res.hw_objs; int err; @@ -103,6 +169,15 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev) goto err_destroy_mkey; } + if (create_tises) { + err = mlx5e_create_tises(mdev, res->tisn); + if (err) { + mlx5_core_err(mdev, "alloc tises failed, %d\n", err); + goto err_destroy_bfreg; + } + res->tisn_valid = true; + } + INIT_LIST_HEAD(&res->td.tirs_list); mutex_init(&res->td.list_lock); @@ -115,6 +190,8 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev) return 0; +err_destroy_bfreg: + mlx5_free_bfreg(mdev, &res->bfreg); err_destroy_mkey: mlx5_core_destroy_mkey(mdev, res->mkey); err_dealloc_transport_domain: @@ -130,6 +207,8 @@ void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev) mlx5_crypto_dek_cleanup(mdev->mlx5e_res.dek_priv); mdev->mlx5e_res.dek_priv = NULL; + if (res->tisn_valid) + mlx5e_destroy_tises(mdev, res->tisn); mlx5_free_bfreg(mdev, &res->bfreg); mlx5_core_destroy_mkey(mdev, res->mkey); mlx5_core_dealloc_transport_domain(mdev, res->td.tdn); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index c7c1b667b1..93461b0c57 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -451,6 +451,23 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv, mutex_lock(&priv->state_lock); + /* If RXFH is configured, changing the channels number is allowed only if + * it does not require resizing the RSS table. This is because the previous + * configuration may no longer be compatible with the new RSS table. + */ + if (netif_is_rxfh_configured(priv->netdev)) { + int cur_rqt_size = mlx5e_rqt_size(priv->mdev, cur_params->num_channels); + int new_rqt_size = mlx5e_rqt_size(priv->mdev, count); + + if (new_rqt_size != cur_rqt_size) { + err = -EINVAL; + netdev_err(priv->netdev, + "%s: RXFH is configured, block changing channels number that affects RSS table size (new: %d, current: %d)\n", + __func__, new_rqt_size, cur_rqt_size); + goto out; + } + } + /* Don't allow changing the number of channels if HTB offload is active, * because the numeration of the QoS SQs will change, while per-queue * qdiscs are attached. @@ -1262,27 +1279,29 @@ static u32 mlx5e_get_rxfh_indir_size(struct net_device *netdev) return mlx5e_ethtool_get_rxfh_indir_size(priv); } -static int mlx5e_get_rxfh_context(struct net_device *dev, u32 *indir, - u8 *key, u8 *hfunc, u32 rss_context) +int mlx5e_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh) { - struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5e_priv *priv = netdev_priv(netdev); + u32 rss_context = rxfh->rss_context; int err; mutex_lock(&priv->state_lock); - err = mlx5e_rx_res_rss_get_rxfh(priv->rx_res, rss_context, indir, key, hfunc); + err = mlx5e_rx_res_rss_get_rxfh(priv->rx_res, rss_context, + rxfh->indir, rxfh->key, &rxfh->hfunc); mutex_unlock(&priv->state_lock); return err; } -static int mlx5e_set_rxfh_context(struct net_device *dev, const u32 *indir, - const u8 *key, const u8 hfunc, - u32 *rss_context, bool delete) +int mlx5e_set_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh, + struct netlink_ext_ack *extack) { struct mlx5e_priv *priv = netdev_priv(dev); + u32 *rss_context = &rxfh->rss_context; + u8 hfunc = rxfh->hfunc; int err; mutex_lock(&priv->state_lock); - if (delete) { + if (*rss_context && rxfh->rss_delete) { err = mlx5e_rx_res_rss_destroy(priv->rx_res, *rss_context); goto unlock; } @@ -1295,7 +1314,8 @@ static int mlx5e_set_rxfh_context(struct net_device *dev, const u32 *indir, goto unlock; } - err = mlx5e_rx_res_rss_set_rxfh(priv->rx_res, *rss_context, indir, key, + err = mlx5e_rx_res_rss_set_rxfh(priv->rx_res, *rss_context, + rxfh->indir, rxfh->key, hfunc == ETH_RSS_HASH_NO_CHANGE ? NULL : &hfunc); unlock: @@ -1303,25 +1323,6 @@ unlock: return err; } -int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, - u8 *hfunc) -{ - return mlx5e_get_rxfh_context(netdev, indir, key, hfunc, 0); -} - -int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, - const u8 *key, const u8 hfunc) -{ - struct mlx5e_priv *priv = netdev_priv(dev); - int err; - - mutex_lock(&priv->state_lock); - err = mlx5e_rx_res_rss_set_rxfh(priv->rx_res, 0, indir, key, - hfunc == ETH_RSS_HASH_NO_CHANGE ? NULL : &hfunc); - mutex_unlock(&priv->state_lock); - return err; -} - #define MLX5E_PFC_PREVEN_AUTO_TOUT_MSEC 100 #define MLX5E_PFC_PREVEN_TOUT_MAX_MSEC 8000 #define MLX5E_PFC_PREVEN_MINOR_PRECENT 85 @@ -2398,6 +2399,7 @@ static void mlx5e_get_rmon_stats(struct net_device *netdev, } const struct ethtool_ops mlx5e_ethtool_ops = { + .cap_rss_ctx_supported = true, .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_MAX_FRAMES | ETHTOOL_COALESCE_USE_ADAPTIVE | @@ -2420,8 +2422,6 @@ const struct ethtool_ops mlx5e_ethtool_ops = { .get_rxfh_indir_size = mlx5e_get_rxfh_indir_size, .get_rxfh = mlx5e_get_rxfh, .set_rxfh = mlx5e_set_rxfh, - .get_rxfh_context = mlx5e_get_rxfh_context, - .set_rxfh_context = mlx5e_set_rxfh_context, .get_rxnfc = mlx5e_get_rxnfc, .set_rxnfc = mlx5e_set_rxnfc, .get_tunable = mlx5e_get_tunable, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 0c87ddb8a7..952f1f9813 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -902,6 +902,7 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params, pp_params.nid = node; pp_params.dev = rq->pdev; pp_params.napi = rq->cq.napi; + pp_params.netdev = rq->netdev; pp_params.dma_dir = rq->buff.map_dir; pp_params.max_len = PAGE_SIZE; @@ -1351,6 +1352,17 @@ void mlx5e_close_rq(struct mlx5e_rq *rq) mlx5e_free_rq(rq); } +u32 mlx5e_profile_get_tisn(struct mlx5_core_dev *mdev, + struct mlx5e_priv *priv, + const struct mlx5e_profile *profile, + u8 lag_port, u8 tc) +{ + if (profile->get_tisn) + return profile->get_tisn(mdev, priv, lag_port, tc); + + return mdev->mlx5e_res.hw_objs.tisn[lag_port][tc]; +} + static void mlx5e_free_xdpsq_db(struct mlx5e_xdpsq *sq) { kvfree(sq->db.xdpi_fifo.xi); @@ -1919,7 +1931,8 @@ int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params, return err; csp.tis_lst_sz = 1; - csp.tisn = c->priv->tisn[c->lag_port][0]; /* tc = 0 */ + csp.tisn = mlx5e_profile_get_tisn(c->mdev, c->priv, c->priv->profile, + c->lag_port, 0); /* tc = 0 */ csp.cqn = sq->cq.mcq.cqn; csp.wq_ctrl = &sq->wq_ctrl; csp.min_inline_mode = sq->min_inline_mode; @@ -1981,11 +1994,12 @@ void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq) mlx5e_free_xdpsq(sq); } -static int mlx5e_alloc_cq_common(struct mlx5e_priv *priv, +static int mlx5e_alloc_cq_common(struct mlx5_core_dev *mdev, + struct net_device *netdev, + struct workqueue_struct *workqueue, struct mlx5e_cq_param *param, struct mlx5e_cq *cq) { - struct mlx5_core_dev *mdev = priv->mdev; struct mlx5_core_cq *mcq = &cq->mcq; int err; u32 i; @@ -2012,13 +2026,13 @@ static int mlx5e_alloc_cq_common(struct mlx5e_priv *priv, } cq->mdev = mdev; - cq->netdev = priv->netdev; - cq->priv = priv; + cq->netdev = netdev; + cq->workqueue = workqueue; return 0; } -static int mlx5e_alloc_cq(struct mlx5e_priv *priv, +static int mlx5e_alloc_cq(struct mlx5_core_dev *mdev, struct mlx5e_cq_param *param, struct mlx5e_create_cq_param *ccp, struct mlx5e_cq *cq) @@ -2029,7 +2043,7 @@ static int mlx5e_alloc_cq(struct mlx5e_priv *priv, param->wq.db_numa_node = ccp->node; param->eq_ix = ccp->ix; - err = mlx5e_alloc_cq_common(priv, param, cq); + err = mlx5e_alloc_cq_common(mdev, ccp->netdev, ccp->wq, param, cq); cq->napi = ccp->napi; cq->ch_stats = ccp->ch_stats; @@ -2095,14 +2109,13 @@ static void mlx5e_destroy_cq(struct mlx5e_cq *cq) mlx5_core_destroy_cq(cq->mdev, &cq->mcq); } -int mlx5e_open_cq(struct mlx5e_priv *priv, struct dim_cq_moder moder, +int mlx5e_open_cq(struct mlx5_core_dev *mdev, struct dim_cq_moder moder, struct mlx5e_cq_param *param, struct mlx5e_create_cq_param *ccp, struct mlx5e_cq *cq) { - struct mlx5_core_dev *mdev = priv->mdev; int err; - err = mlx5e_alloc_cq(priv, param, ccp, cq); + err = mlx5e_alloc_cq(mdev, param, ccp, cq); if (err) return err; @@ -2135,7 +2148,7 @@ static int mlx5e_open_tx_cqs(struct mlx5e_channel *c, int tc; for (tc = 0; tc < c->num_tc; tc++) { - err = mlx5e_open_cq(c->priv, params->tx_cq_moderation, &cparam->txq_sq.cqp, + err = mlx5e_open_cq(c->mdev, params->tx_cq_moderation, &cparam->txq_sq.cqp, ccp, &c->sq[tc].cq); if (err) goto err_close_tx_cqs; @@ -2203,12 +2216,15 @@ static int mlx5e_open_sqs(struct mlx5e_channel *c, for (tc = 0; tc < mlx5e_get_dcb_num_tc(params); tc++) { int txq_ix = c->ix + tc * params->num_channels; u32 qos_queue_group_id; + u32 tisn; + tisn = mlx5e_profile_get_tisn(c->mdev, c->priv, c->priv->profile, + c->lag_port, tc); err = mlx5e_txq_get_qos_node_hw_id(params, txq_ix, &qos_queue_group_id); if (err) goto err_close_sqs; - err = mlx5e_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix, + err = mlx5e_open_txqsq(c, tisn, txq_ix, params, &cparam->txq_sq, &c->sq[tc], tc, qos_queue_group_id, &c->priv->channel_stats[c->ix]->sq[tc]); @@ -2336,12 +2352,12 @@ static int mlx5e_open_queues(struct mlx5e_channel *c, mlx5e_build_create_cq_param(&ccp, c); - err = mlx5e_open_cq(c->priv, icocq_moder, &cparam->async_icosq.cqp, &ccp, + err = mlx5e_open_cq(c->mdev, icocq_moder, &cparam->async_icosq.cqp, &ccp, &c->async_icosq.cq); if (err) return err; - err = mlx5e_open_cq(c->priv, icocq_moder, &cparam->icosq.cqp, &ccp, + err = mlx5e_open_cq(c->mdev, icocq_moder, &cparam->icosq.cqp, &ccp, &c->icosq.cq); if (err) goto err_close_async_icosq_cq; @@ -2350,17 +2366,17 @@ static int mlx5e_open_queues(struct mlx5e_channel *c, if (err) goto err_close_icosq_cq; - err = mlx5e_open_cq(c->priv, params->tx_cq_moderation, &cparam->xdp_sq.cqp, &ccp, + err = mlx5e_open_cq(c->mdev, params->tx_cq_moderation, &cparam->xdp_sq.cqp, &ccp, &c->xdpsq.cq); if (err) goto err_close_tx_cqs; - err = mlx5e_open_cq(c->priv, params->rx_cq_moderation, &cparam->rq.cqp, &ccp, + err = mlx5e_open_cq(c->mdev, params->rx_cq_moderation, &cparam->rq.cqp, &ccp, &c->rq.cq); if (err) goto err_close_xdp_tx_cqs; - err = c->xdp ? mlx5e_open_cq(c->priv, params->tx_cq_moderation, &cparam->xdp_sq.cqp, + err = c->xdp ? mlx5e_open_cq(c->mdev, params->tx_cq_moderation, &cparam->xdp_sq.cqp, &ccp, &c->rq_xdpsq.cq) : 0; if (err) goto err_close_rx_cq; @@ -3307,7 +3323,7 @@ static int mlx5e_alloc_drop_cq(struct mlx5e_priv *priv, param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev)); param->wq.db_numa_node = dev_to_node(mlx5_core_dma_dev(mdev)); - return mlx5e_alloc_cq_common(priv, param, cq); + return mlx5e_alloc_cq_common(priv->mdev, priv->netdev, priv->wq, param, cq); } int mlx5e_open_drop_rq(struct mlx5e_priv *priv, @@ -3363,75 +3379,6 @@ void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq) mlx5e_free_cq(&drop_rq->cq); } -int mlx5e_create_tis(struct mlx5_core_dev *mdev, void *in, u32 *tisn) -{ - void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx); - - MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.hw_objs.td.tdn); - - if (MLX5_GET(tisc, tisc, tls_en)) - MLX5_SET(tisc, tisc, pd, mdev->mlx5e_res.hw_objs.pdn); - - if (mlx5_lag_is_lacp_owner(mdev)) - MLX5_SET(tisc, tisc, strict_lag_tx_port_affinity, 1); - - return mlx5_core_create_tis(mdev, in, tisn); -} - -void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn) -{ - mlx5_core_destroy_tis(mdev, tisn); -} - -void mlx5e_destroy_tises(struct mlx5e_priv *priv) -{ - int tc, i; - - for (i = 0; i < mlx5e_get_num_lag_ports(priv->mdev); i++) - for (tc = 0; tc < priv->profile->max_tc; tc++) - mlx5e_destroy_tis(priv->mdev, priv->tisn[i][tc]); -} - -static bool mlx5e_lag_should_assign_affinity(struct mlx5_core_dev *mdev) -{ - return MLX5_CAP_GEN(mdev, lag_tx_port_affinity) && mlx5e_get_num_lag_ports(mdev) > 1; -} - -int mlx5e_create_tises(struct mlx5e_priv *priv) -{ - int tc, i; - int err; - - for (i = 0; i < mlx5e_get_num_lag_ports(priv->mdev); i++) { - for (tc = 0; tc < priv->profile->max_tc; tc++) { - u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {}; - void *tisc; - - tisc = MLX5_ADDR_OF(create_tis_in, in, ctx); - - MLX5_SET(tisc, tisc, prio, tc << 1); - - if (mlx5e_lag_should_assign_affinity(priv->mdev)) - MLX5_SET(tisc, tisc, lag_tx_port_affinity, i + 1); - - err = mlx5e_create_tis(priv->mdev, in, &priv->tisn[i][tc]); - if (err) - goto err_close_tises; - } - } - - return 0; - -err_close_tises: - for (; i >= 0; i--) { - for (tc--; tc >= 0; tc--) - mlx5e_destroy_tis(priv->mdev, priv->tisn[i][tc]); - tc = priv->profile->max_tc; - } - - return err; -} - static void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv) { if (priv->mqprio_rl) { @@ -3440,7 +3387,6 @@ static void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv) priv->mqprio_rl = NULL; } mlx5e_accel_cleanup_tx(priv); - mlx5e_destroy_tises(priv); } static int mlx5e_modify_channels_vsd(struct mlx5e_channels *chs, bool vsd) @@ -3542,7 +3488,7 @@ static int mlx5e_setup_tc_mqprio_dcb(struct mlx5e_priv *priv, mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; - if (tc && tc != MLX5E_MAX_NUM_TC) + if (tc && tc != MLX5_MAX_NUM_TC) return -EINVAL; new_params = priv->channels.params; @@ -5185,6 +5131,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) netdev->netdev_ops = &mlx5e_netdev_ops; netdev->xdp_metadata_ops = &mlx5e_xdp_metadata_ops; + netdev->xsk_tx_metadata_ops = &mlx5e_xsk_tx_metadata_ops; mlx5e_dcbnl_build_netdev(netdev); @@ -5265,7 +5212,6 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) netdev->gso_partial_features |= NETIF_F_GSO_UDP_L4; netdev->hw_features |= NETIF_F_GSO_UDP_L4; - netdev->features |= NETIF_F_GSO_UDP_L4; mlx5_query_port_fcs(mdev, &fcs_supported, &fcs_enabled); @@ -5505,23 +5451,13 @@ static int mlx5e_init_nic_tx(struct mlx5e_priv *priv) { int err; - err = mlx5e_create_tises(priv); - if (err) { - mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err); - return err; - } - err = mlx5e_accel_init_tx(priv); if (err) - goto err_destroy_tises; + return err; mlx5e_set_mqprio_rl(priv); mlx5e_dcbnl_initialize(priv); return 0; - -err_destroy_tises: - mlx5e_destroy_tises(priv); - return err; } static void mlx5e_nic_enable(struct mlx5e_priv *priv) @@ -5616,7 +5552,7 @@ static const struct mlx5e_profile mlx5e_nic_profile = { .update_stats = mlx5e_stats_update_ndo_stats, .update_carrier = mlx5e_update_carrier, .rx_handlers = &mlx5e_rx_handlers_nic, - .max_tc = MLX5E_MAX_NUM_TC, + .max_tc = MLX5_MAX_NUM_TC, .stats_grps = mlx5e_nic_stats_grps, .stats_grps_num = mlx5e_nic_stats_grps_num, .features = BIT(MLX5E_PROFILE_FEATURE_PTP_RX) | @@ -5759,9 +5695,7 @@ void mlx5e_priv_cleanup(struct mlx5e_priv *priv) kfree(priv->tx_rates); kfree(priv->txq2sq); destroy_workqueue(priv->wq); - mutex_lock(&priv->state_lock); mlx5e_selq_cleanup(&priv->selq); - mutex_unlock(&priv->state_lock); free_cpumask_var(priv->scratchpad.cpumask); for (i = 0; i < priv->htb_max_qos_sqs; i++) @@ -6056,7 +5990,7 @@ static int mlx5e_resume(struct auxiliary_device *adev) if (netif_device_present(netdev)) return 0; - err = mlx5e_create_mdev_resources(mdev); + err = mlx5e_create_mdev_resources(mdev, true); if (err) return err; @@ -6069,7 +6003,7 @@ static int mlx5e_resume(struct auxiliary_device *adev) return 0; } -static int mlx5e_suspend(struct auxiliary_device *adev, pm_message_t state) +static int _mlx5e_suspend(struct auxiliary_device *adev) { struct mlx5e_dev *mlx5e_dev = auxiliary_get_drvdata(adev); struct mlx5e_priv *priv = mlx5e_dev->priv; @@ -6087,15 +6021,18 @@ static int mlx5e_suspend(struct auxiliary_device *adev, pm_message_t state) return 0; } -static int mlx5e_probe(struct auxiliary_device *adev, - const struct auxiliary_device_id *id) +static int mlx5e_suspend(struct auxiliary_device *adev, pm_message_t state) +{ + return _mlx5e_suspend(adev); +} + +static int _mlx5e_probe(struct auxiliary_device *adev) { struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev); const struct mlx5e_profile *profile = &mlx5e_nic_profile; struct mlx5_core_dev *mdev = edev->mdev; struct mlx5e_dev *mlx5e_dev; struct net_device *netdev; - pm_message_t state = {}; struct mlx5e_priv *priv; int err; @@ -6150,7 +6087,7 @@ static int mlx5e_probe(struct auxiliary_device *adev, return 0; err_resume: - mlx5e_suspend(adev, state); + _mlx5e_suspend(adev); err_profile_cleanup: profile->cleanup(priv); err_destroy_netdev: @@ -6162,16 +6099,21 @@ err_devlink_unregister: return err; } +static int mlx5e_probe(struct auxiliary_device *adev, + const struct auxiliary_device_id *id) +{ + return _mlx5e_probe(adev); +} + static void mlx5e_remove(struct auxiliary_device *adev) { struct mlx5e_dev *mlx5e_dev = auxiliary_get_drvdata(adev); struct mlx5e_priv *priv = mlx5e_dev->priv; - pm_message_t state = {}; mlx5_core_uplink_netdev_set(priv->mdev, NULL); mlx5e_dcbnl_delete_app(priv); unregister_netdev(priv->netdev); - mlx5e_suspend(adev, state); + _mlx5e_suspend(adev); priv->profile->cleanup(priv); mlx5e_destroy_netdev(priv); mlx5e_devlink_port_unregister(mlx5e_dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index e92d4f8359..05527418fa 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -112,8 +112,18 @@ static const struct counter_desc vport_rep_stats_desc[] = { tx_vport_rdma_multicast_bytes) }, }; +static const struct counter_desc vport_rep_loopback_stats_desc[] = { + { MLX5E_DECLARE_STAT(struct mlx5e_rep_stats, + vport_loopback_packets) }, + { MLX5E_DECLARE_STAT(struct mlx5e_rep_stats, + vport_loopback_bytes) }, +}; + #define NUM_VPORT_REP_SW_COUNTERS ARRAY_SIZE(sw_rep_stats_desc) #define NUM_VPORT_REP_HW_COUNTERS ARRAY_SIZE(vport_rep_stats_desc) +#define NUM_VPORT_REP_LOOPBACK_COUNTERS(dev) \ + (MLX5_CAP_GEN(dev, vport_counter_local_loopback) ? \ + ARRAY_SIZE(vport_rep_loopback_stats_desc) : 0) static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(sw_rep) { @@ -157,7 +167,8 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw_rep) static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vport_rep) { - return NUM_VPORT_REP_HW_COUNTERS; + return NUM_VPORT_REP_HW_COUNTERS + + NUM_VPORT_REP_LOOPBACK_COUNTERS(priv->mdev); } static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport_rep) @@ -166,6 +177,9 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport_rep) for (i = 0; i < NUM_VPORT_REP_HW_COUNTERS; i++) strcpy(data + (idx++) * ETH_GSTRING_LEN, vport_rep_stats_desc[i].format); + for (i = 0; i < NUM_VPORT_REP_LOOPBACK_COUNTERS(priv->mdev); i++) + strcpy(data + (idx++) * ETH_GSTRING_LEN, + vport_rep_loopback_stats_desc[i].format); return idx; } @@ -176,6 +190,9 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport_rep) for (i = 0; i < NUM_VPORT_REP_HW_COUNTERS; i++) data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.rep_stats, vport_rep_stats_desc, i); + for (i = 0; i < NUM_VPORT_REP_LOOPBACK_COUNTERS(priv->mdev); i++) + data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.rep_stats, + vport_rep_loopback_stats_desc, i); return idx; } @@ -247,6 +264,13 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport_rep) rep_stats->tx_vport_rdma_multicast_bytes = MLX5_GET_CTR(out, received_ib_multicast.octets); + if (MLX5_CAP_GEN(priv->mdev, vport_counter_local_loopback)) { + rep_stats->vport_loopback_packets = + MLX5_GET_CTR(out, local_loopback.packets); + rep_stats->vport_loopback_bytes = + MLX5_GET_CTR(out, local_loopback.octets); + } + out: kvfree(out); } @@ -1156,12 +1180,6 @@ static int mlx5e_init_rep_tx(struct mlx5e_priv *priv) struct mlx5e_rep_priv *rpriv = priv->ppriv; int err; - err = mlx5e_create_tises(priv); - if (err) { - mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err); - return err; - } - err = mlx5e_rep_neigh_init(rpriv); if (err) goto err_neigh_init; @@ -1184,7 +1202,6 @@ err_ht_init: err_init_tx: mlx5e_rep_neigh_cleanup(rpriv); err_neigh_init: - mlx5e_destroy_tises(priv); return err; } @@ -1198,7 +1215,6 @@ static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv) mlx5e_cleanup_uplink_rep_tx(rpriv); mlx5e_rep_neigh_cleanup(rpriv); - mlx5e_destroy_tises(priv); } static void mlx5e_rep_enable(struct mlx5e_priv *priv) @@ -1428,7 +1444,7 @@ static const struct mlx5e_profile mlx5e_uplink_rep_profile = { .update_stats = mlx5e_stats_update_ndo_stats, .update_carrier = mlx5e_update_carrier, .rx_handlers = &mlx5e_rx_handlers_rep, - .max_tc = MLX5E_MAX_NUM_TC, + .max_tc = MLX5_MAX_NUM_TC, .stats_grps = mlx5e_ul_rep_stats_grps, .stats_grps_num = mlx5e_ul_rep_stats_grps_num, }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 8d9743a5e4..d601b5faae 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -298,8 +298,8 @@ static void mlx5e_page_release_fragmented(struct mlx5e_rq *rq, u16 drain_count = MLX5E_PAGECNT_BIAS_MAX - frag_page->frags; struct page *page = frag_page->page; - if (page_pool_defrag_page(page, drain_count) == 0) - page_pool_put_defragged_page(rq->page_pool, page, -1, true); + if (page_pool_unref_page(page, drain_count) == 0) + page_pool_put_unrefed_page(rq->page_pool, page, -1, true); } static inline int mlx5e_get_rx_frag(struct mlx5e_rq *rq, @@ -1039,7 +1039,7 @@ int mlx5e_poll_ico_cq(struct mlx5e_cq *cq) (struct mlx5_err_cqe *)cqe); mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs); if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) - queue_work(cq->priv->wq, &sq->recover_work); + queue_work(cq->workqueue, &sq->recover_work); break; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 477c547dcc..12b3607afe 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -476,6 +476,8 @@ struct mlx5e_rep_stats { u64 tx_vport_rdma_multicast_packets; u64 rx_vport_rdma_multicast_bytes; u64 tx_vport_rdma_multicast_bytes; + u64 vport_loopback_packets; + u64 vport_loopback_bytes; }; struct mlx5e_stats { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 404dd1d9b2..9fb2c057bd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -3210,10 +3210,10 @@ static int offload_pedit_fields(struct mlx5e_priv *priv, headers_c = mlx5e_get_match_headers_criteria(*action_flags, &parse_attr->spec); headers_v = mlx5e_get_match_headers_value(*action_flags, &parse_attr->spec); - set_masks = &hdrs[0].masks; - add_masks = &hdrs[1].masks; - set_vals = &hdrs[0].vals; - add_vals = &hdrs[1].vals; + set_masks = &hdrs[TCA_PEDIT_KEY_EX_CMD_SET].masks; + add_masks = &hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].masks; + set_vals = &hdrs[TCA_PEDIT_KEY_EX_CMD_SET].vals; + add_vals = &hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].vals; for (i = 0; i < ARRAY_SIZE(fields); i++) { bool skip; @@ -5031,22 +5031,6 @@ int mlx5e_tc_delete_matchall(struct mlx5e_priv *priv, return apply_police_params(priv, 0, extack); } -void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv, - struct tc_cls_matchall_offload *ma) -{ - struct mlx5e_rep_priv *rpriv = priv->ppriv; - struct rtnl_link_stats64 cur_stats; - u64 dbytes; - u64 dpkts; - - mlx5e_stats_copy_rep_stats(&cur_stats, &priv->stats.rep_stats); - dpkts = cur_stats.rx_packets - rpriv->prev_vf_vport_stats.rx_packets; - dbytes = cur_stats.rx_bytes - rpriv->prev_vf_vport_stats.rx_bytes; - rpriv->prev_vf_vport_stats = cur_stats; - flow_stats_update(&ma->stats, dbytes, dpkts, 0, jiffies, - FLOW_ACTION_HW_STATS_DELAYED); -} - static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h index adb39e30f9..c24bda56b2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h @@ -203,8 +203,6 @@ int mlx5e_tc_configure_matchall(struct mlx5e_priv *priv, struct tc_cls_matchall_offload *f); int mlx5e_tc_delete_matchall(struct mlx5e_priv *priv, struct tc_cls_matchall_offload *f); -void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv, - struct tc_cls_matchall_offload *ma); struct mlx5e_encap_entry; void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 1ead69c5f5..e21a3b4128 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -398,6 +398,8 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb, (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) { u8 metadata_index = be32_to_cpu(eseg->flow_table_metadata); + mlx5e_ptp_metadata_fifo_pop(&sq->ptpsq->metadata_freelist); + mlx5e_skb_cb_hwtstamp_init(skb); mlx5e_ptp_metadata_map_put(&sq->ptpsq->metadata_map, skb, metadata_index); @@ -496,9 +498,6 @@ mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb, err_drop: stats->dropped++; - if (unlikely(sq->ptpsq && (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) - mlx5e_ptp_metadata_fifo_push(&sq->ptpsq->metadata_freelist, - be32_to_cpu(eseg->flow_table_metadata)); dev_kfree_skb_any(skb); mlx5e_tx_flush(sq); } @@ -657,7 +656,7 @@ static void mlx5e_cqe_ts_id_eseg(struct mlx5e_ptpsq *ptpsq, struct sk_buff *skb, { if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) eseg->flow_table_metadata = - cpu_to_be32(mlx5e_ptp_metadata_fifo_pop(&ptpsq->metadata_freelist)); + cpu_to_be32(mlx5e_ptp_metadata_fifo_peek(&ptpsq->metadata_freelist)); } static void mlx5e_txwqe_build_eseg(struct mlx5e_priv *priv, struct mlx5e_txqsq *sq, @@ -863,7 +862,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) mlx5e_dump_error_cqe(&sq->cq, sq->sqn, (struct mlx5_err_cqe *)cqe); mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs); - queue_work(cq->priv->wq, &sq->recover_work); + queue_work(cq->workqueue, &sq->recover_work); } stats->cqe_err++; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index 3047d7015c..1789800faa 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -1868,6 +1868,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) if (err) goto abort; + dev->priv.eswitch = esw; err = esw_offloads_init(esw); if (err) goto reps_err; @@ -1892,11 +1893,6 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC; else esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE; - if (MLX5_ESWITCH_MANAGER(dev) && - mlx5_esw_vport_match_metadata_supported(esw)) - esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA; - - dev->priv.eswitch = esw; BLOCKING_INIT_NOTIFIER_HEAD(&esw->n_head); esw_info(dev, @@ -1908,6 +1904,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) reps_err: mlx5_esw_vports_cleanup(esw); + dev->priv.eswitch = NULL; abort: if (esw->work_queue) destroy_workqueue(esw->work_queue); @@ -1926,7 +1923,6 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) esw_info(esw->dev, "cleanup\n"); - esw->dev->priv.eswitch = NULL; destroy_workqueue(esw->work_queue); WARN_ON(refcount_read(&esw->qos.refcnt)); mutex_destroy(&esw->state_lock); @@ -1937,6 +1933,7 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) mutex_destroy(&esw->offloads.encap_tbl_lock); mutex_destroy(&esw->offloads.decap_tbl_lock); esw_offloads_cleanup(esw); + esw->dev->priv.eswitch = NULL; mlx5_esw_vports_cleanup(esw); debugfs_remove_recursive(esw->debugfs_root); devl_params_unregister(priv_to_devlink(esw->dev), mlx5_eswitch_params, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index b4eb17141e..349e28a6dd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -618,13 +618,6 @@ static inline bool mlx5_esw_allowed(const struct mlx5_eswitch *esw) return esw && MLX5_ESWITCH_MANAGER(esw->dev); } -/* The returned number is valid only when the dev is eswitch manager. */ -static inline u16 mlx5_eswitch_manager_vport(struct mlx5_core_dev *dev) -{ - return mlx5_core_is_ecpf_esw_manager(dev) ? - MLX5_VPORT_ECPF : MLX5_VPORT_PF; -} - static inline bool mlx5_esw_is_manager_vport(const struct mlx5_eswitch *esw, u16 vport_num) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index baaae628b0..e3cce110e5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -2476,6 +2476,10 @@ int esw_offloads_init(struct mlx5_eswitch *esw) if (err) return err; + if (MLX5_ESWITCH_MANAGER(esw->dev) && + mlx5_esw_vport_match_metadata_supported(esw)) + esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA; + err = devl_params_register(priv_to_devlink(esw->dev), esw_devlink_params, ARRAY_SIZE(esw_devlink_params)); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index b29299c49a..9b8599c200 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c @@ -1146,3 +1146,37 @@ const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type typ return mlx5_fs_cmd_get_stub_cmds(); } } + +int mlx5_fs_cmd_set_l2table_entry_silent(struct mlx5_core_dev *dev, u8 silent_mode) +{ + u32 in[MLX5_ST_SZ_DW(set_l2_table_entry_in)] = {}; + + if (silent_mode && !MLX5_CAP_GEN(dev, silent_mode)) + return -EOPNOTSUPP; + + MLX5_SET(set_l2_table_entry_in, in, opcode, MLX5_CMD_OP_SET_L2_TABLE_ENTRY); + MLX5_SET(set_l2_table_entry_in, in, silent_mode_valid, 1); + MLX5_SET(set_l2_table_entry_in, in, silent_mode, silent_mode); + + return mlx5_cmd_exec_in(dev, set_l2_table_entry, in); +} + +int mlx5_fs_cmd_set_tx_flow_table_root(struct mlx5_core_dev *dev, u32 ft_id, bool disconnect) +{ + u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {}; + u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {}; + + if (disconnect && MLX5_CAP_FLOWTABLE_NIC_TX(dev, reset_root_to_default)) + return -EOPNOTSUPP; + + MLX5_SET(set_flow_table_root_in, in, opcode, + MLX5_CMD_OP_SET_FLOW_TABLE_ROOT); + MLX5_SET(set_flow_table_root_in, in, table_type, + FS_FT_NIC_TX); + if (disconnect) + MLX5_SET(set_flow_table_root_in, in, op_mod, 1); + else + MLX5_SET(set_flow_table_root_in, in, table_id, ft_id); + + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h index 7790ae5531..53e0e5137d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h @@ -122,4 +122,6 @@ int mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, u32 base_id, int bulk_len, const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type type); const struct mlx5_flow_cmds *mlx5_fs_cmd_get_fw_cmds(void); +int mlx5_fs_cmd_set_l2table_entry_silent(struct mlx5_core_dev *dev, u8 silent_mode); +int mlx5_fs_cmd_set_tx_flow_table_root(struct mlx5_core_dev *dev, u32 ft_id, bool disconnect); #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index e6bfa7e4f1..cf085a478e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -1664,6 +1664,16 @@ static int create_auto_flow_group(struct mlx5_flow_table *ft, return err; } +static bool mlx5_pkt_reformat_cmp(struct mlx5_pkt_reformat *p1, + struct mlx5_pkt_reformat *p2) +{ + return p1->owner == p2->owner && + (p1->owner == MLX5_FLOW_RESOURCE_OWNER_FW ? + p1->id == p2->id : + mlx5_fs_dr_action_get_pkt_reformat_id(p1) == + mlx5_fs_dr_action_get_pkt_reformat_id(p2)); +} + static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1, struct mlx5_flow_destination *d2) { @@ -1675,8 +1685,8 @@ static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1, ((d1->vport.flags & MLX5_FLOW_DEST_VPORT_VHCA_ID) ? (d1->vport.vhca_id == d2->vport.vhca_id) : true) && ((d1->vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID) ? - (d1->vport.pkt_reformat->id == - d2->vport.pkt_reformat->id) : true)) || + mlx5_pkt_reformat_cmp(d1->vport.pkt_reformat, + d2->vport.pkt_reformat) : true)) || (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE && d1->ft == d2->ft) || (d1->type == MLX5_FLOW_DESTINATION_TYPE_TIR && @@ -1808,8 +1818,9 @@ static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg, } trace_mlx5_fs_set_fte(fte, false); + /* Link newly added rules into the tree. */ for (i = 0; i < handle->num_rules; i++) { - if (refcount_read(&handle->rule[i]->node.refcount) == 1) { + if (!handle->rule[i]->node.parent) { tree_add_node(&handle->rule[i]->node, &fte->node); trace_mlx5_fs_add_rule(handle->rule[i]); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h index 4aed1768b8..78eb6b7097 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h @@ -181,7 +181,7 @@ struct mlx5_flow_rule { struct mlx5_flow_handle { int num_rules; - struct mlx5_flow_rule *rule[]; + struct mlx5_flow_rule *rule[] __counted_by(num_rules); }; /* Type of children is mlx5_flow_group */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c index 17fe30a4c0..0c26d707ee 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c @@ -539,7 +539,7 @@ struct mlx5_fc_bulk { u32 base_id; int bulk_len; unsigned long *bitmask; - struct mlx5_fc fcs[]; + struct mlx5_fc fcs[] __counted_by(bulk_len); }; static void mlx5_fc_init(struct mlx5_fc *counter, struct mlx5_fc_bulk *bulk, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c index 3a9cdf7940..2911aa34a5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c @@ -348,6 +348,25 @@ static int mlx5_check_hotplug_interrupt(struct mlx5_core_dev *dev) } #endif +static const struct pci_device_id mgt_ifc_device_ids[] = { + { PCI_VDEVICE(MELLANOX, 0xc2d2) }, /* BlueField1 MGT interface device ID */ + { PCI_VDEVICE(MELLANOX, 0xc2d3) }, /* BlueField2 MGT interface device ID */ + { PCI_VDEVICE(MELLANOX, 0xc2d4) }, /* BlueField3-Lx MGT interface device ID */ + { PCI_VDEVICE(MELLANOX, 0xc2d5) }, /* BlueField3 MGT interface device ID */ + { PCI_VDEVICE(MELLANOX, 0xc2d6) }, /* BlueField4 MGT interface device ID */ +}; + +static bool mlx5_is_mgt_ifc_pci_device(struct mlx5_core_dev *dev, u16 dev_id) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(mgt_ifc_device_ids); ++i) + if (mgt_ifc_device_ids[i].device == dev_id) + return true; + + return false; +} + static int mlx5_check_dev_ids(struct mlx5_core_dev *dev, u16 dev_id) { struct pci_bus *bridge_bus = dev->pdev->bus; @@ -362,10 +381,15 @@ static int mlx5_check_dev_ids(struct mlx5_core_dev *dev, u16 dev_id) err = pci_read_config_word(sdev, PCI_DEVICE_ID, &sdev_id); if (err) return pcibios_err_to_errno(err); - if (sdev_id != dev_id) { - mlx5_core_warn(dev, "unrecognized dev_id (0x%x)\n", sdev_id); - return -EPERM; - } + + if (sdev_id == dev_id) + continue; + + if (mlx5_is_mgt_ifc_pci_device(dev, sdev_id)) + continue; + + mlx5_core_warn(dev, "unrecognized dev_id (0x%x)\n", sdev_id); + return -EPERM; } return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index 2bf77a5251..d77be1b4dd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c @@ -339,7 +339,7 @@ static int mlx5i_init_tx(struct mlx5e_priv *priv) return err; } - err = mlx5i_create_tis(priv->mdev, ipriv->qpn, &priv->tisn[0][0]); + err = mlx5i_create_tis(priv->mdev, ipriv->qpn, &ipriv->tisn); if (err) { mlx5_core_warn(priv->mdev, "create tis failed, %d\n", err); goto err_destroy_underlay_qp; @@ -356,7 +356,7 @@ static void mlx5i_cleanup_tx(struct mlx5e_priv *priv) { struct mlx5i_priv *ipriv = priv->ppriv; - mlx5e_destroy_tis(priv->mdev, priv->tisn[0][0]); + mlx5e_destroy_tis(priv->mdev, ipriv->tisn); mlx5i_destroy_underlay_qp(priv->mdev, ipriv->qpn); } @@ -483,6 +483,18 @@ static unsigned int mlx5i_stats_grps_num(struct mlx5e_priv *priv) return ARRAY_SIZE(mlx5i_stats_grps); } +u32 mlx5i_get_tisn(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv, u8 lag_port, u8 tc) +{ + struct mlx5i_priv *ipriv = priv->ppriv; + + if (WARN(lag_port || tc, + "IPoIB unexpected non-zero value: lag_port (%u), tc (%u)\n", + lag_port, tc)) + return 0; + + return ipriv->tisn; +} + static const struct mlx5e_profile mlx5i_nic_profile = { .init = mlx5i_init, .cleanup = mlx5i_cleanup, @@ -499,6 +511,7 @@ static const struct mlx5e_profile mlx5i_nic_profile = { .max_tc = MLX5I_MAX_NUM_TC, .stats_grps = mlx5i_stats_grps, .stats_grps_num = mlx5i_stats_grps_num, + .get_tisn = mlx5i_get_tisn, }; /* mlx5i netdev NDos */ @@ -770,7 +783,7 @@ static int mlx5_rdma_setup_rn(struct ib_device *ibdev, u32 port_num, } /* This should only be called once per mdev */ - err = mlx5e_create_mdev_resources(mdev); + err = mlx5e_create_mdev_resources(mdev, false); if (err) goto destroy_ht; } @@ -829,7 +842,7 @@ int mlx5_rdma_rn_get_params(struct mlx5_core_dev *mdev, *params = (struct rdma_netdev_alloc_params){ .sizeof_priv = sizeof(struct mlx5i_priv) + sizeof(struct mlx5e_priv), - .txqs = nch * MLX5E_MAX_NUM_TC, + .txqs = nch * MLX5_MAX_NUM_TC, .rxqs = nch, .param = mdev, .initialize_rdma_netdev = mlx5_rdma_setup_rn, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h index f3f2af9720..2ab6437a1c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h @@ -53,6 +53,7 @@ extern const struct mlx5e_rx_handlers mlx5i_rx_handlers; struct mlx5i_priv { struct rdma_netdev rn; /* keep this first */ u32 qpn; + u32 tisn; bool sub_interface; u32 num_sub_interfaces; u32 qkey; @@ -63,6 +64,7 @@ struct mlx5i_priv { }; int mlx5i_create_tis(struct mlx5_core_dev *mdev, u32 underlay_qpn, u32 *tisn); +u32 mlx5i_get_tisn(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv, u8 lag_port, u8 tc); /* Underlay QP create/destroy functions */ int mlx5i_create_underlay_qp(struct mlx5e_priv *priv); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c index 03e6812979..f87471306f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c @@ -218,7 +218,7 @@ static int mlx5i_pkey_open(struct net_device *netdev) goto err_unint_underlay_qp; } - err = mlx5i_create_tis(mdev, ipriv->qpn, &epriv->tisn[0][0]); + err = mlx5i_create_tis(mdev, ipriv->qpn, &ipriv->tisn); if (err) { mlx5_core_warn(mdev, "create child tis failed, %d\n", err); goto err_remove_rx_uderlay_qp; @@ -240,7 +240,7 @@ static int mlx5i_pkey_open(struct net_device *netdev) err_close_channels: mlx5e_close_channels(&epriv->channels); err_clear_state_opened_flag: - mlx5e_destroy_tis(mdev, epriv->tisn[0][0]); + mlx5e_destroy_tis(mdev, ipriv->tisn); err_remove_rx_uderlay_qp: mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qpn); err_unint_underlay_qp: @@ -269,7 +269,7 @@ static int mlx5i_pkey_close(struct net_device *netdev) mlx5i_uninit_underlay_qp(priv); mlx5e_deactivate_priv_channels(priv); mlx5e_close_channels(&priv->channels); - mlx5e_destroy_tis(mdev, priv->tisn[0][0]); + mlx5e_destroy_tis(mdev, ipriv->tisn); unlock: mutex_unlock(&priv->state_lock); return 0; @@ -361,6 +361,7 @@ static const struct mlx5e_profile mlx5i_pkey_nic_profile = { .update_stats = NULL, .rx_handlers = &mlx5i_rx_handlers, .max_tc = MLX5I_MAX_NUM_TC, + .get_tisn = mlx5i_get_tisn, }; const struct mlx5e_profile *mlx5i_pkey_get_profile(void) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c index d14459e5c0..69d482f7c5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c @@ -703,8 +703,10 @@ int mlx5_deactivate_lag(struct mlx5_lag *ldev) return err; } - if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &flags)) + if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &flags)) { mlx5_lag_port_sel_destroy(ldev); + ldev->buckets = 1; + } if (mlx5_lag_has_drop_rule(ldev)) mlx5_lag_drop_rule_cleanup(ldev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c index 0c83ef1742..0361741632 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c @@ -266,9 +266,6 @@ static int mlx5_ptp_settime_real_time(struct mlx5_core_dev *mdev, { u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {}; - if (!mlx5_modify_mtutc_allowed(mdev)) - return 0; - if (ts->tv_sec < 0 || ts->tv_sec > U32_MAX || ts->tv_nsec < 0 || ts->tv_nsec > NSEC_PER_SEC) return -EINVAL; @@ -286,12 +283,15 @@ static int mlx5_ptp_settime(struct ptp_clock_info *ptp, const struct timespec64 struct mlx5_timer *timer = &clock->timer; struct mlx5_core_dev *mdev; unsigned long flags; - int err; mdev = container_of(clock, struct mlx5_core_dev, clock); - err = mlx5_ptp_settime_real_time(mdev, ts); - if (err) - return err; + + if (mlx5_modify_mtutc_allowed(mdev)) { + int err = mlx5_ptp_settime_real_time(mdev, ts); + + if (err) + return err; + } write_seqlock_irqsave(&clock->lock, flags); timecounter_init(&timer->tc, &timer->cycles, timespec64_to_ns(ts)); @@ -341,9 +341,6 @@ static int mlx5_ptp_adjtime_real_time(struct mlx5_core_dev *mdev, s64 delta) { u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {}; - if (!mlx5_modify_mtutc_allowed(mdev)) - return 0; - /* HW time adjustment range is checked. If out of range, settime instead */ if (!mlx5_is_mtutc_time_adj_cap(mdev, delta)) { struct timespec64 ts; @@ -367,13 +364,16 @@ static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) struct mlx5_timer *timer = &clock->timer; struct mlx5_core_dev *mdev; unsigned long flags; - int err; mdev = container_of(clock, struct mlx5_core_dev, clock); - err = mlx5_ptp_adjtime_real_time(mdev, delta); - if (err) - return err; + if (mlx5_modify_mtutc_allowed(mdev)) { + int err = mlx5_ptp_adjtime_real_time(mdev, delta); + + if (err) + return err; + } + write_seqlock_irqsave(&clock->lock, flags); timecounter_adjtime(&timer->tc, delta); mlx5_update_clock_info_page(mdev); @@ -396,15 +396,14 @@ static int mlx5_ptp_freq_adj_real_time(struct mlx5_core_dev *mdev, long scaled_p { u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {}; - if (!mlx5_modify_mtutc_allowed(mdev)) - return 0; - MLX5_SET(mtutc_reg, in, operation, MLX5_MTUTC_OPERATION_ADJUST_FREQ_UTC); - if (MLX5_CAP_MCAM_FEATURE(mdev, mtutc_freq_adj_units)) { + if (MLX5_CAP_MCAM_FEATURE(mdev, mtutc_freq_adj_units) && + scaled_ppm <= S32_MAX && scaled_ppm >= S32_MIN) { + /* HW scaled_ppm support on mlx5 devices only supports a 32-bit value */ MLX5_SET(mtutc_reg, in, freq_adj_units, MLX5_MTUTC_FREQ_ADJ_UNITS_SCALED_PPM); - MLX5_SET(mtutc_reg, in, freq_adjustment, scaled_ppm); + MLX5_SET(mtutc_reg, in, freq_adjustment, (s32)scaled_ppm); } else { MLX5_SET(mtutc_reg, in, freq_adj_units, MLX5_MTUTC_FREQ_ADJ_UNITS_PPB); MLX5_SET(mtutc_reg, in, freq_adjustment, scaled_ppm_to_ppb(scaled_ppm)); @@ -420,13 +419,15 @@ static int mlx5_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) struct mlx5_core_dev *mdev; unsigned long flags; u32 mult; - int err; mdev = container_of(clock, struct mlx5_core_dev, clock); - err = mlx5_ptp_freq_adj_real_time(mdev, scaled_ppm); - if (err) - return err; + if (mlx5_modify_mtutc_allowed(mdev)) { + int err = mlx5_ptp_freq_adj_real_time(mdev, scaled_ppm); + + if (err) + return err; + } mult = (u32)adjust_by_scaled_ppm(timer->nominal_c_mult, scaled_ppm); @@ -1004,14 +1005,38 @@ static void mlx5_init_clock_info(struct mlx5_core_dev *mdev) info->frac = timer->tc.frac; } +static void mlx5_init_timer_max_freq_adjustment(struct mlx5_core_dev *mdev) +{ + struct mlx5_clock *clock = &mdev->clock; + u32 out[MLX5_ST_SZ_DW(mtutc_reg)] = {}; + u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {}; + u8 log_max_freq_adjustment = 0; + int err; + + err = mlx5_core_access_reg(mdev, in, sizeof(in), out, sizeof(out), + MLX5_REG_MTUTC, 0, 0); + if (!err) + log_max_freq_adjustment = + MLX5_GET(mtutc_reg, out, log_max_freq_adjustment); + + if (log_max_freq_adjustment) + clock->ptp_info.max_adj = + min(S32_MAX, 1 << log_max_freq_adjustment); +} + static void mlx5_init_timer_clock(struct mlx5_core_dev *mdev) { struct mlx5_clock *clock = &mdev->clock; + /* Configure the PHC */ + clock->ptp_info = mlx5_ptp_clock_info; + + if (MLX5_CAP_MCAM_REG(mdev, mtutc)) + mlx5_init_timer_max_freq_adjustment(mdev); + mlx5_timecounter_init(mdev); mlx5_init_clock_info(mdev); mlx5_init_overflow_period(clock); - clock->ptp_info = mlx5_ptp_clock_info; if (mlx5_real_time_mode(mdev)) { struct timespec64 ts; @@ -1042,11 +1067,10 @@ void mlx5_init_clock(struct mlx5_core_dev *mdev) } seqlock_init(&clock->lock); - mlx5_init_timer_clock(mdev); INIT_WORK(&clock->pps_info.out_work, mlx5_pps_out); - /* Configure the PHC */ - clock->ptp_info = mlx5_ptp_clock_info; + /* Initialize the device clock */ + mlx5_init_timer_clock(mdev); /* Initialize 1PPS data structures */ mlx5_init_pps(mdev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c index e8e50563e9..e7d59cfa87 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c @@ -256,6 +256,13 @@ void mlx5_devcom_unregister_component(struct mlx5_devcom_comp_dev *devcom) devcom_free_comp_dev(devcom); } +int mlx5_devcom_comp_get_size(struct mlx5_devcom_comp_dev *devcom) +{ + struct mlx5_devcom_comp *comp = devcom->comp; + + return kref_read(&comp->ref); +} + int mlx5_devcom_send_event(struct mlx5_devcom_comp_dev *devcom, int event, int rollback_event, void *event_data) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h index fc23bbef87..ec32b686f5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h @@ -31,6 +31,7 @@ void mlx5_devcom_unregister_component(struct mlx5_devcom_comp_dev *devcom); int mlx5_devcom_send_event(struct mlx5_devcom_comp_dev *devcom, int event, int rollback_event, void *event_data); +int mlx5_devcom_comp_get_size(struct mlx5_devcom_comp_dev *devcom); void mlx5_devcom_comp_set_ready(struct mlx5_devcom_comp_dev *devcom, bool ready); bool mlx5_devcom_comp_is_ready(struct mlx5_devcom_comp_dev *devcom); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c index 9482e51ac8..7c5516b0a8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c @@ -13,11 +13,13 @@ struct mlx5_dm { unsigned long *steering_sw_icm_alloc_blocks; unsigned long *header_modify_sw_icm_alloc_blocks; unsigned long *header_modify_pattern_sw_icm_alloc_blocks; + unsigned long *header_encap_sw_icm_alloc_blocks; }; struct mlx5_dm *mlx5_dm_create(struct mlx5_core_dev *dev) { u64 header_modify_pattern_icm_blocks = 0; + u64 header_sw_encap_icm_blocks = 0; u64 header_modify_icm_blocks = 0; u64 steering_icm_blocks = 0; struct mlx5_dm *dm; @@ -54,6 +56,17 @@ struct mlx5_dm *mlx5_dm_create(struct mlx5_core_dev *dev) goto err_modify_hdr; } + if (MLX5_CAP_DEV_MEM(dev, log_indirect_encap_sw_icm_size)) { + header_sw_encap_icm_blocks = + BIT(MLX5_CAP_DEV_MEM(dev, log_indirect_encap_sw_icm_size) - + MLX5_LOG_SW_ICM_BLOCK_SIZE(dev)); + + dm->header_encap_sw_icm_alloc_blocks = + bitmap_zalloc(header_sw_encap_icm_blocks, GFP_KERNEL); + if (!dm->header_encap_sw_icm_alloc_blocks) + goto err_pattern; + } + support_v2 = MLX5_CAP_FLOWTABLE_NIC_RX(dev, sw_owner_v2) && MLX5_CAP_FLOWTABLE_NIC_TX(dev, sw_owner_v2) && MLX5_CAP64_DEV_MEM(dev, header_modify_pattern_sw_icm_start_address); @@ -66,11 +79,14 @@ struct mlx5_dm *mlx5_dm_create(struct mlx5_core_dev *dev) dm->header_modify_pattern_sw_icm_alloc_blocks = bitmap_zalloc(header_modify_pattern_icm_blocks, GFP_KERNEL); if (!dm->header_modify_pattern_sw_icm_alloc_blocks) - goto err_pattern; + goto err_sw_encap; } return dm; +err_sw_encap: + bitmap_free(dm->header_encap_sw_icm_alloc_blocks); + err_pattern: bitmap_free(dm->header_modify_sw_icm_alloc_blocks); @@ -105,6 +121,14 @@ void mlx5_dm_cleanup(struct mlx5_core_dev *dev) bitmap_free(dm->header_modify_sw_icm_alloc_blocks); } + if (dm->header_encap_sw_icm_alloc_blocks) { + WARN_ON(!bitmap_empty(dm->header_encap_sw_icm_alloc_blocks, + BIT(MLX5_CAP_DEV_MEM(dev, + log_indirect_encap_sw_icm_size) - + MLX5_LOG_SW_ICM_BLOCK_SIZE(dev)))); + bitmap_free(dm->header_encap_sw_icm_alloc_blocks); + } + if (dm->header_modify_pattern_sw_icm_alloc_blocks) { WARN_ON(!bitmap_empty(dm->header_modify_pattern_sw_icm_alloc_blocks, BIT(MLX5_CAP_DEV_MEM(dev, @@ -164,6 +188,13 @@ int mlx5_dm_sw_icm_alloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type, log_header_modify_pattern_sw_icm_size); block_map = dm->header_modify_pattern_sw_icm_alloc_blocks; break; + case MLX5_SW_ICM_TYPE_SW_ENCAP: + icm_start_addr = MLX5_CAP64_DEV_MEM(dev, + indirect_encap_sw_icm_start_address); + log_icm_size = MLX5_CAP_DEV_MEM(dev, + log_indirect_encap_sw_icm_size); + block_map = dm->header_encap_sw_icm_alloc_blocks; + break; default: return -EINVAL; } @@ -242,6 +273,11 @@ int mlx5_dm_sw_icm_dealloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type header_modify_pattern_sw_icm_start_address); block_map = dm->header_modify_pattern_sw_icm_alloc_blocks; break; + case MLX5_SW_ICM_TYPE_SW_ENCAP: + icm_start_addr = MLX5_CAP64_DEV_MEM(dev, + indirect_encap_sw_icm_start_address); + block_map = dm->header_encap_sw_icm_alloc_blocks; + break; default: return -EINVAL; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index a17152c1cb..e285823bd0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -219,7 +219,6 @@ static void mlx5_set_driver_version(struct mlx5_core_dev *dev) int driver_ver_sz = MLX5_FLD_SZ_BYTES(set_driver_version_in, driver_version); u8 in[MLX5_ST_SZ_BYTES(set_driver_version_in)] = {}; - int remaining_size = driver_ver_sz; char *string; if (!MLX5_CAP_GEN(dev, driver_version)) @@ -227,22 +226,9 @@ static void mlx5_set_driver_version(struct mlx5_core_dev *dev) string = MLX5_ADDR_OF(set_driver_version_in, in, driver_version); - strncpy(string, "Linux", remaining_size); - - remaining_size = max_t(int, 0, driver_ver_sz - strlen(string)); - strncat(string, ",", remaining_size); - - remaining_size = max_t(int, 0, driver_ver_sz - strlen(string)); - strncat(string, KBUILD_MODNAME, remaining_size); - - remaining_size = max_t(int, 0, driver_ver_sz - strlen(string)); - strncat(string, ",", remaining_size); - - remaining_size = max_t(int, 0, driver_ver_sz - strlen(string)); - - snprintf(string + strlen(string), remaining_size, "%u.%u.%u", - LINUX_VERSION_MAJOR, LINUX_VERSION_PATCHLEVEL, - LINUX_VERSION_SUBLEVEL); + snprintf(string, driver_ver_sz, "Linux,%s,%u.%u.%u", + KBUILD_MODNAME, LINUX_VERSION_MAJOR, + LINUX_VERSION_PATCHLEVEL, LINUX_VERSION_SUBLEVEL); /*Send the command*/ MLX5_SET(set_driver_version_in, in, opcode, @@ -1494,6 +1480,14 @@ int mlx5_init_one_devl_locked(struct mlx5_core_dev *dev) if (err) goto err_register; + err = mlx5_crdump_enable(dev); + if (err) + mlx5_core_err(dev, "mlx5_crdump_enable failed with error code %d\n", err); + + err = mlx5_hwmon_dev_register(dev); + if (err) + mlx5_core_err(dev, "mlx5_hwmon_dev_register failed with error code %d\n", err); + mutex_unlock(&dev->intf_state_mutex); return 0; @@ -1519,7 +1513,10 @@ int mlx5_init_one(struct mlx5_core_dev *dev) int err; devl_lock(devlink); + devl_register(devlink); err = mlx5_init_one_devl_locked(dev); + if (err) + devl_unregister(devlink); devl_unlock(devlink); return err; } @@ -1531,6 +1528,8 @@ void mlx5_uninit_one(struct mlx5_core_dev *dev) devl_lock(devlink); mutex_lock(&dev->intf_state_mutex); + mlx5_hwmon_dev_unregister(dev); + mlx5_crdump_disable(dev); mlx5_unregister_device(dev); if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) { @@ -1548,6 +1547,7 @@ void mlx5_uninit_one(struct mlx5_core_dev *dev) mlx5_function_teardown(dev, true); out: mutex_unlock(&dev->intf_state_mutex); + devl_unregister(devlink); devl_unlock(devlink); } @@ -1694,16 +1694,23 @@ int mlx5_init_one_light(struct mlx5_core_dev *dev) } devl_lock(devlink); + devl_register(devlink); + err = mlx5_devlink_params_register(priv_to_devlink(dev)); - devl_unlock(devlink); if (err) { mlx5_core_warn(dev, "mlx5_devlink_param_reg err = %d\n", err); - goto query_hca_caps_err; + goto params_reg_err; } + devl_unlock(devlink); return 0; +params_reg_err: + devl_unregister(devlink); + devl_unlock(devlink); query_hca_caps_err: + devl_unregister(devlink); + devl_unlock(devlink); mlx5_function_disable(dev, true); out: dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR; @@ -1716,6 +1723,7 @@ void mlx5_uninit_one_light(struct mlx5_core_dev *dev) devl_lock(devlink); mlx5_devlink_params_unregister(priv_to_devlink(dev)); + devl_unregister(devlink); devl_unlock(devlink); if (dev->state != MLX5_DEVICE_STATE_UP) return; @@ -1957,16 +1965,7 @@ static int probe_one(struct pci_dev *pdev, const struct pci_device_id *id) goto err_init_one; } - err = mlx5_crdump_enable(dev); - if (err) - dev_err(&pdev->dev, "mlx5_crdump_enable failed with error code %d\n", err); - - err = mlx5_hwmon_dev_register(dev); - if (err) - mlx5_core_err(dev, "mlx5_hwmon_dev_register failed with error code %d\n", err); - pci_save_state(pdev); - devlink_register(devlink); return 0; err_init_one: @@ -1987,16 +1986,9 @@ static void remove_one(struct pci_dev *pdev) struct devlink *devlink = priv_to_devlink(dev); set_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state); - /* mlx5_drain_fw_reset() and mlx5_drain_health_wq() are using - * devlink notify APIs. - * Hence, we must drain them before unregistering the devlink. - */ mlx5_drain_fw_reset(dev); mlx5_drain_health_wq(dev); - devlink_unregister(devlink); mlx5_sriov_disable(pdev, false); - mlx5_hwmon_dev_unregister(dev); - mlx5_crdump_disable(dev); mlx5_uninit_one(dev); mlx5_pci_close(dev); mlx5_mdev_uninit(dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index 6b14e347d9..a79b795936 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -243,6 +243,7 @@ int mlx5_query_mcam_reg(struct mlx5_core_dev *dev, u32 *mcap, u8 feature_group, u8 access_reg_group); int mlx5_query_qcam_reg(struct mlx5_core_dev *mdev, u32 *qcam, u8 feature_group, u8 access_reg_group); +int mlx5_query_mpir_reg(struct mlx5_core_dev *dev, u32 *mpir); void mlx5_lag_add_netdev(struct mlx5_core_dev *dev, struct net_device *netdev); void mlx5_lag_remove_netdev(struct mlx5_core_dev *dev, struct net_device *netdev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c index 4dcf995cb1..6bac8ad70b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c @@ -19,6 +19,7 @@ #define MLX5_IRQ_CTRL_SF_MAX 8 /* min num of vectors for SFs to be enabled */ #define MLX5_IRQ_VEC_COMP_BASE_SF 2 +#define MLX5_IRQ_VEC_COMP_BASE 1 #define MLX5_EQ_SHARE_IRQ_MAX_COMP (8) #define MLX5_EQ_SHARE_IRQ_MAX_CTRL (UINT_MAX) @@ -246,6 +247,7 @@ static void irq_set_name(struct mlx5_irq_pool *pool, char *name, int vecidx) return; } + vecidx -= MLX5_IRQ_VEC_COMP_BASE; snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", vecidx); } @@ -585,7 +587,7 @@ struct mlx5_irq *mlx5_irq_request_vector(struct mlx5_core_dev *dev, u16 cpu, struct mlx5_irq_table *table = mlx5_irq_table_get(dev); struct mlx5_irq_pool *pool = table->pcif_pool; struct irq_affinity_desc af_desc; - int offset = 1; + int offset = MLX5_IRQ_VEC_COMP_BASE; if (!pool->xa_num_irqs.max) offset = 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index 7d8c732818..7fba1c46e2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -1206,3 +1206,13 @@ int mlx5_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed) *speed = max_speed; return 0; } + +int mlx5_query_mpir_reg(struct mlx5_core_dev *dev, u32 *mpir) +{ + u32 in[MLX5_ST_SZ_DW(mpir_reg)] = {}; + int sz = MLX5_ST_SZ_BYTES(mpir_reg); + + MLX5_SET(mpir_reg, in, local_port, 1); + + return mlx5_core_access_reg(dev, in, sz, mpir, sz, MLX5_REG_MPIR, 0, 0); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c index 169c2c68ed..7ebe712808 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c @@ -75,7 +75,6 @@ static int mlx5_sf_dev_probe(struct auxiliary_device *adev, const struct auxilia goto peer_devlink_set_err; } - devlink_register(devlink); return 0; peer_devlink_set_err: @@ -95,24 +94,28 @@ mdev_err: static void mlx5_sf_dev_remove(struct auxiliary_device *adev) { struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev); - struct devlink *devlink = priv_to_devlink(sf_dev->mdev); + struct mlx5_core_dev *mdev = sf_dev->mdev; + struct devlink *devlink; - mlx5_drain_health_wq(sf_dev->mdev); - devlink_unregister(devlink); - if (mlx5_dev_is_lightweight(sf_dev->mdev)) - mlx5_uninit_one_light(sf_dev->mdev); + devlink = priv_to_devlink(mdev); + set_bit(MLX5_BREAK_FW_WAIT, &mdev->intf_state); + mlx5_drain_health_wq(mdev); + if (mlx5_dev_is_lightweight(mdev)) + mlx5_uninit_one_light(mdev); else - mlx5_uninit_one(sf_dev->mdev); - iounmap(sf_dev->mdev->iseg); - mlx5_mdev_uninit(sf_dev->mdev); + mlx5_uninit_one(mdev); + iounmap(mdev->iseg); + mlx5_mdev_uninit(mdev); mlx5_devlink_free(devlink); } static void mlx5_sf_dev_shutdown(struct auxiliary_device *adev) { struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev); + struct mlx5_core_dev *mdev = sf_dev->mdev; - mlx5_unload_one(sf_dev->mdev, false); + set_bit(MLX5_BREAK_FW_WAIT, &mdev->intf_state); + mlx5_unload_one(mdev, false); } static const struct auxiliary_device_id mlx5_sf_dev_id_table[] = { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c index d2b65a0ce4..2ebb61ef3e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c @@ -1177,7 +1177,6 @@ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn, bool ignore_flow_level, u32 flow_source) { - struct mlx5dr_cmd_flow_destination_hw_info tmp_hw_dest; struct mlx5dr_cmd_flow_destination_hw_info *hw_dests; struct mlx5dr_action **ref_actions; struct mlx5dr_action *action; @@ -1256,11 +1255,8 @@ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn, * one that done in the TX. * So, if one of the ft target is wire, put it at the end of the dest list. */ - if (is_ft_wire && num_dst_ft > 1) { - tmp_hw_dest = hw_dests[last_dest]; - hw_dests[last_dest] = hw_dests[num_of_dests - 1]; - hw_dests[num_of_dests - 1] = tmp_hw_dest; - } + if (is_ft_wire && num_dst_ft > 1) + swap(hw_dests[last_dest], hw_dests[num_of_dests - 1]); action = dr_action_create_generic(DR_ACTION_TYP_FT); if (!action) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c index 21753f3278..1005bb6935 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c @@ -440,6 +440,27 @@ out: } EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_system_image_guid); +int mlx5_query_nic_vport_sd_group(struct mlx5_core_dev *mdev, u8 *sd_group) +{ + int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out); + u32 *out; + int err; + + out = kvzalloc(outlen, GFP_KERNEL); + if (!out) + return -ENOMEM; + + err = mlx5_query_nic_vport_context(mdev, 0, out); + if (err) + goto out; + + *sd_group = MLX5_GET(query_nic_vport_context_out, out, + nic_vport_context.sd_group); +out: + kvfree(out); + return err; +} + int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid) { u32 *out; |