summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/mellanox/mlx5/core/en
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx5/core/en')
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/channels.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/channels.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c48
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/qos.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c130
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rss.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rss.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c62
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/trap.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c14
18 files changed, 278 insertions, 116 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/channels.c b/drivers/net/ethernet/mellanox/mlx5/core/en/channels.c
index 48581ea3a..874a10166 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/channels.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/channels.c
@@ -23,20 +23,26 @@ bool mlx5e_channels_is_xsk(struct mlx5e_channels *chs, unsigned int ix)
return test_bit(MLX5E_CHANNEL_STATE_XSK, c->state);
}
-void mlx5e_channels_get_regular_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn)
+void mlx5e_channels_get_regular_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn,
+ u32 *vhca_id)
{
struct mlx5e_channel *c = mlx5e_channels_get(chs, ix);
*rqn = c->rq.rqn;
+ if (vhca_id)
+ *vhca_id = MLX5_CAP_GEN(c->mdev, vhca_id);
}
-void mlx5e_channels_get_xsk_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn)
+void mlx5e_channels_get_xsk_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn,
+ u32 *vhca_id)
{
struct mlx5e_channel *c = mlx5e_channels_get(chs, ix);
WARN_ON_ONCE(!test_bit(MLX5E_CHANNEL_STATE_XSK, c->state));
*rqn = c->xskrq.rqn;
+ if (vhca_id)
+ *vhca_id = MLX5_CAP_GEN(c->mdev, vhca_id);
}
bool mlx5e_channels_get_ptp_rqn(struct mlx5e_channels *chs, u32 *rqn)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/channels.h b/drivers/net/ethernet/mellanox/mlx5/core/en/channels.h
index 637ca90da..6715aa938 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/channels.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/channels.h
@@ -10,8 +10,10 @@ struct mlx5e_channels;
unsigned int mlx5e_channels_get_num(struct mlx5e_channels *chs);
bool mlx5e_channels_is_xsk(struct mlx5e_channels *chs, unsigned int ix);
-void mlx5e_channels_get_regular_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn);
-void mlx5e_channels_get_xsk_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn);
+void mlx5e_channels_get_regular_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn,
+ u32 *vhca_id);
+void mlx5e_channels_get_xsk_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn,
+ u32 *vhca_id);
bool mlx5e_channels_get_ptp_rqn(struct mlx5e_channels *chs, u32 *rqn);
#endif /* __MLX5_EN_CHANNELS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c
index 40c8df111..e2d8d2754 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c
@@ -20,10 +20,8 @@
#define NUM_REQ_PPCNT_COUNTER_S1 MLX5_CMD_SET_MONITOR_NUM_PPCNT_COUNTER_SET1
#define NUM_REQ_Q_COUNTERS_S1 MLX5_CMD_SET_MONITOR_NUM_Q_COUNTERS_SET1
-int mlx5e_monitor_counter_supported(struct mlx5e_priv *priv)
+static int mlx5e_monitor_counter_cap(struct mlx5_core_dev *mdev)
{
- struct mlx5_core_dev *mdev = priv->mdev;
-
if (!MLX5_CAP_GEN(mdev, max_num_of_monitor_counters))
return false;
if (MLX5_CAP_PCAM_REG(mdev, ppcnt) &&
@@ -36,24 +34,38 @@ int mlx5e_monitor_counter_supported(struct mlx5e_priv *priv)
return true;
}
-static void mlx5e_monitor_counter_arm(struct mlx5e_priv *priv)
+int mlx5e_monitor_counter_supported(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *pos;
+ int i;
+
+ mlx5_sd_for_each_dev(i, priv->mdev, pos)
+ if (!mlx5e_monitor_counter_cap(pos))
+ return false;
+ return true;
+}
+
+static void mlx5e_monitor_counter_arm(struct mlx5_core_dev *mdev)
{
u32 in[MLX5_ST_SZ_DW(arm_monitor_counter_in)] = {};
MLX5_SET(arm_monitor_counter_in, in, opcode,
MLX5_CMD_OP_ARM_MONITOR_COUNTER);
- mlx5_cmd_exec_in(priv->mdev, arm_monitor_counter, in);
+ mlx5_cmd_exec_in(mdev, arm_monitor_counter, in);
}
static void mlx5e_monitor_counters_work(struct work_struct *work)
{
struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
monitor_counters_work);
+ struct mlx5_core_dev *pos;
+ int i;
mutex_lock(&priv->state_lock);
mlx5e_stats_update_ndo_stats(priv);
mutex_unlock(&priv->state_lock);
- mlx5e_monitor_counter_arm(priv);
+ mlx5_sd_for_each_dev(i, priv->mdev, pos)
+ mlx5e_monitor_counter_arm(pos);
}
static int mlx5e_monitor_event_handler(struct notifier_block *nb,
@@ -97,15 +109,13 @@ static int fill_monitor_counter_q_counter_set1(int cnt, int q_counter, u32 *in)
}
/* check if mlx5e_monitor_counter_supported before calling this function*/
-static void mlx5e_set_monitor_counter(struct mlx5e_priv *priv)
+static void mlx5e_set_monitor_counter(struct mlx5_core_dev *mdev, int q_counter)
{
- struct mlx5_core_dev *mdev = priv->mdev;
int max_num_of_counters = MLX5_CAP_GEN(mdev, max_num_of_monitor_counters);
int num_q_counters = MLX5_CAP_GEN(mdev, num_q_monitor_counters);
int num_ppcnt_counters = !MLX5_CAP_PCAM_REG(mdev, ppcnt) ? 0 :
MLX5_CAP_GEN(mdev, num_ppcnt_monitor_counters);
u32 in[MLX5_ST_SZ_DW(set_monitor_counter_in)] = {};
- int q_counter = priv->q_counter;
int cnt = 0;
if (num_ppcnt_counters >= NUM_REQ_PPCNT_COUNTER_S1 &&
@@ -127,13 +137,17 @@ static void mlx5e_set_monitor_counter(struct mlx5e_priv *priv)
/* check if mlx5e_monitor_counter_supported before calling this function*/
void mlx5e_monitor_counter_init(struct mlx5e_priv *priv)
{
+ struct mlx5_core_dev *pos;
+ int i;
+
INIT_WORK(&priv->monitor_counters_work, mlx5e_monitor_counters_work);
MLX5_NB_INIT(&priv->monitor_counters_nb, mlx5e_monitor_event_handler,
MONITOR_COUNTER);
- mlx5_eq_notifier_register(priv->mdev, &priv->monitor_counters_nb);
-
- mlx5e_set_monitor_counter(priv);
- mlx5e_monitor_counter_arm(priv);
+ mlx5_sd_for_each_dev(i, priv->mdev, pos) {
+ mlx5_eq_notifier_register(pos, &priv->monitor_counters_nb);
+ mlx5e_set_monitor_counter(pos, priv->q_counter[i]);
+ mlx5e_monitor_counter_arm(pos);
+ }
queue_work(priv->wq, &priv->update_stats_work);
}
@@ -141,11 +155,15 @@ void mlx5e_monitor_counter_init(struct mlx5e_priv *priv)
void mlx5e_monitor_counter_cleanup(struct mlx5e_priv *priv)
{
u32 in[MLX5_ST_SZ_DW(set_monitor_counter_in)] = {};
+ struct mlx5_core_dev *pos;
+ int i;
MLX5_SET(set_monitor_counter_in, in, opcode,
MLX5_CMD_OP_SET_MONITOR_COUNTER);
- mlx5_cmd_exec_in(priv->mdev, set_monitor_counter, in);
- mlx5_eq_notifier_unregister(priv->mdev, &priv->monitor_counters_nb);
+ mlx5_sd_for_each_dev(i, priv->mdev, pos) {
+ mlx5_cmd_exec_in(pos, set_monitor_counter, in);
+ mlx5_eq_notifier_unregister(pos, &priv->monitor_counters_nb);
+ }
cancel_work_sync(&priv->monitor_counters_work);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index 5d213a988..a3f31d9d5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -240,11 +240,14 @@ static u32 mlx5e_rx_get_linear_sz_xsk(struct mlx5e_params *params,
return xsk->headroom + hw_mtu;
}
-static u32 mlx5e_rx_get_linear_sz_skb(struct mlx5e_params *params, bool xsk)
+static u32 mlx5e_rx_get_linear_sz_skb(struct mlx5e_params *params, bool no_head_tail_room)
{
- /* SKBs built on XDP_PASS on XSK RQs don't have headroom. */
- u16 headroom = xsk ? 0 : mlx5e_get_linear_rq_headroom(params, NULL);
u32 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
+ u16 headroom;
+
+ if (no_head_tail_room)
+ return SKB_DATA_ALIGN(hw_mtu);
+ headroom = mlx5e_get_linear_rq_headroom(params, NULL);
return MLX5_SKB_FRAG_SZ(headroom + hw_mtu);
}
@@ -254,6 +257,7 @@ static u32 mlx5e_rx_get_linear_stride_sz(struct mlx5_core_dev *mdev,
struct mlx5e_xsk_param *xsk,
bool mpwqe)
{
+ bool no_head_tail_room;
u32 sz;
/* XSK frames are mapped as individual pages, because frames may come in
@@ -262,7 +266,13 @@ static u32 mlx5e_rx_get_linear_stride_sz(struct mlx5_core_dev *mdev,
if (xsk)
return mpwqe ? 1 << mlx5e_mpwrq_page_shift(mdev, xsk) : PAGE_SIZE;
- sz = roundup_pow_of_two(mlx5e_rx_get_linear_sz_skb(params, false));
+ no_head_tail_room = params->xdp_prog && mpwqe && !mlx5e_rx_is_linear_skb(mdev, params, xsk);
+
+ /* When no_head_tail_room is set, headroom and tailroom are excluded from skb calculations.
+ * no_head_tail_room should be set in the case of XDP with Striding RQ
+ * when SKB is not linear. This is because another page is allocated for the linear part.
+ */
+ sz = roundup_pow_of_two(mlx5e_rx_get_linear_sz_skb(params, no_head_tail_room));
/* XDP in mlx5e doesn't support multiple packets per page.
* Do not assume sz <= PAGE_SIZE if params->xdp_prog is set.
@@ -289,7 +299,11 @@ bool mlx5e_rx_is_linear_skb(struct mlx5_core_dev *mdev,
if (params->packet_merge.type != MLX5E_PACKET_MERGE_NONE)
return false;
- /* Both XSK and non-XSK cases allocate an SKB on XDP_PASS. Packet data
+ /* Call mlx5e_rx_get_linear_sz_skb with the no_head_tail_room parameter set
+ * to exclude headroom and tailroom from calculations.
+ * no_head_tail_room is true when SKB is built on XDP_PASS on XSK RQs
+ * since packet data buffers don't have headroom and tailroom resreved for the SKB.
+ * Both XSK and non-XSK cases allocate an SKB on XDP_PASS. Packet data
* must fit into a CPU page.
*/
if (mlx5e_rx_get_linear_sz_skb(params, xsk) > PAGE_SIZE)
@@ -674,7 +688,7 @@ void mlx5e_build_create_cq_param(struct mlx5e_create_cq_param *ccp, struct mlx5e
.napi = &c->napi,
.ch_stats = c->stats,
.node = cpu_to_node(c->cpu),
- .ix = c->ix,
+ .ix = c->vec_ix,
};
}
@@ -945,7 +959,6 @@ static u8 rq_end_pad_mode(struct mlx5_core_dev *mdev, struct mlx5e_params *param
int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk,
- u16 q_counter,
struct mlx5e_rq_param *param)
{
void *rqc = param->rqc;
@@ -1007,7 +1020,6 @@ int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
MLX5_SET(wq, wq, log_wq_stride,
mlx5e_get_rqwq_log_stride(params->rq_wq_type, ndsegs));
MLX5_SET(wq, wq, pd, mdev->mlx5e_res.hw_objs.pdn);
- MLX5_SET(rqc, rqc, counter_set_id, q_counter);
MLX5_SET(rqc, rqc, vsd, params->vlan_strip_disable);
MLX5_SET(rqc, rqc, scatter_fcs, params->scatter_fcs_en);
@@ -1018,7 +1030,6 @@ int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
}
void mlx5e_build_drop_rq_param(struct mlx5_core_dev *mdev,
- u16 q_counter,
struct mlx5e_rq_param *param)
{
void *rqc = param->rqc;
@@ -1027,7 +1038,6 @@ void mlx5e_build_drop_rq_param(struct mlx5_core_dev *mdev,
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
MLX5_SET(wq, wq, log_wq_stride,
mlx5e_get_rqwq_log_stride(MLX5_WQ_TYPE_CYCLIC, 1));
- MLX5_SET(rqc, rqc, counter_set_id, q_counter);
param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
}
@@ -1292,13 +1302,12 @@ void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev,
int mlx5e_build_channel_param(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
- u16 q_counter,
struct mlx5e_channel_param *cparam)
{
u8 icosq_log_wq_sz, async_icosq_log_wq_sz;
int err;
- err = mlx5e_build_rq_param(mdev, params, NULL, q_counter, &cparam->rq);
+ err = mlx5e_build_rq_param(mdev, params, NULL, &cparam->rq);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
index 6800949da..9a781f18b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
@@ -130,10 +130,8 @@ void mlx5e_build_create_cq_param(struct mlx5e_create_cq_param *ccp, struct mlx5e
int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk,
- u16 q_counter,
struct mlx5e_rq_param *param);
void mlx5e_build_drop_rq_param(struct mlx5_core_dev *mdev,
- u16 q_counter,
struct mlx5e_rq_param *param);
void mlx5e_build_sq_param_common(struct mlx5_core_dev *mdev,
struct mlx5e_sq_param *param);
@@ -149,7 +147,6 @@ void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev,
struct mlx5e_sq_param *param);
int mlx5e_build_channel_param(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
- u16 q_counter,
struct mlx5e_channel_param *cparam);
u16 mlx5e_calc_sq_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
index ca05b3252..d0af7271d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
@@ -646,7 +646,6 @@ static void mlx5e_ptp_build_sq_param(struct mlx5_core_dev *mdev,
static void mlx5e_ptp_build_rq_param(struct mlx5_core_dev *mdev,
struct net_device *netdev,
- u16 q_counter,
struct mlx5e_ptp_params *ptp_params)
{
struct mlx5e_rq_param *rq_params = &ptp_params->rq_param;
@@ -655,7 +654,7 @@ static void mlx5e_ptp_build_rq_param(struct mlx5_core_dev *mdev,
params->rq_wq_type = MLX5_WQ_TYPE_CYCLIC;
mlx5e_init_rq_type_params(mdev, params);
params->sw_mtu = netdev->max_mtu;
- mlx5e_build_rq_param(mdev, params, NULL, q_counter, rq_params);
+ mlx5e_build_rq_param(mdev, params, NULL, rq_params);
}
static void mlx5e_ptp_build_params(struct mlx5e_ptp *c,
@@ -681,7 +680,7 @@ static void mlx5e_ptp_build_params(struct mlx5e_ptp *c,
/* RQ */
if (test_bit(MLX5E_PTP_STATE_RX, c->state)) {
params->vlan_strip_disable = orig->vlan_strip_disable;
- mlx5e_ptp_build_rq_param(c->mdev, c->netdev, c->priv->q_counter, cparams);
+ mlx5e_ptp_build_rq_param(c->mdev, c->netdev, cparams);
}
}
@@ -714,13 +713,16 @@ static int mlx5e_ptp_open_rq(struct mlx5e_ptp *c, struct mlx5e_params *params,
struct mlx5e_rq_param *rq_param)
{
int node = dev_to_node(c->mdev->device);
- int err;
+ int err, sd_ix;
+ u16 q_counter;
err = mlx5e_init_ptp_rq(c, params, &c->rq);
if (err)
return err;
- return mlx5e_open_rq(params, rq_param, NULL, node, &c->rq);
+ sd_ix = mlx5_sd_ch_ix_get_dev_ix(c->mdev, MLX5E_PTP_CHANNEL_IX);
+ q_counter = c->priv->q_counter[sd_ix];
+ return mlx5e_open_rq(params, rq_param, NULL, node, q_counter, &c->rq);
}
static int mlx5e_ptp_open_queues(struct mlx5e_ptp *c,
@@ -935,6 +937,7 @@ void mlx5e_ptp_activate_channel(struct mlx5e_ptp *c)
if (test_bit(MLX5E_PTP_STATE_RX, c->state)) {
mlx5e_ptp_rx_set_fs(c->priv);
mlx5e_activate_rq(&c->rq);
+ netif_queue_set_napi(c->netdev, c->rq.ix, NETDEV_QUEUE_TYPE_RX, &c->napi);
}
mlx5e_trigger_napi_sched(&c->napi);
}
@@ -943,8 +946,10 @@ void mlx5e_ptp_deactivate_channel(struct mlx5e_ptp *c)
{
int tc;
- if (test_bit(MLX5E_PTP_STATE_RX, c->state))
+ if (test_bit(MLX5E_PTP_STATE_RX, c->state)) {
+ netif_queue_set_napi(c->netdev, c->rq.ix, NETDEV_QUEUE_TYPE_RX, NULL);
mlx5e_deactivate_rq(&c->rq);
+ }
if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
for (tc = 0; tc < c->num_tc; tc++)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
index 922bc5b7c..6743806b8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
@@ -123,8 +123,8 @@ int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs,
memset(&param_sq, 0, sizeof(param_sq));
memset(&param_cq, 0, sizeof(param_cq));
- mlx5e_build_sq_param(priv->mdev, params, &param_sq);
- mlx5e_build_tx_cq_param(priv->mdev, params, &param_cq);
+ mlx5e_build_sq_param(c->mdev, params, &param_sq);
+ mlx5e_build_tx_cq_param(c->mdev, params, &param_cq);
err = mlx5e_open_cq(c->mdev, params->tx_cq_moderation, &param_cq, &ccp, &sq->cq);
if (err)
goto err_free_sq;
@@ -177,7 +177,7 @@ int mlx5e_activate_qos_sq(void *data, u16 node_qid, u32 hw_id)
*/
smp_wmb();
- qos_dbg(priv->mdev, "Activate QoS SQ qid %u\n", node_qid);
+ qos_dbg(sq->mdev, "Activate QoS SQ qid %u\n", node_qid);
mlx5e_activate_txqsq(sq);
return 0;
@@ -191,7 +191,7 @@ void mlx5e_deactivate_qos_sq(struct mlx5e_priv *priv, u16 qid)
if (!sq) /* Handle the case when the SQ failed to open. */
return;
- qos_dbg(priv->mdev, "Deactivate QoS SQ qid %u\n", qid);
+ qos_dbg(sq->mdev, "Deactivate QoS SQ qid %u\n", qid);
mlx5e_deactivate_txqsq(sq);
priv->txq2sq[mlx5e_qid_from_qos(&priv->channels, qid)] = NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
index 4358798d6..25d751eba 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
@@ -294,8 +294,8 @@ static void mlx5e_rx_reporter_diagnose_generic_rq(struct mlx5e_rq *rq,
params = &priv->channels.params;
rq_sz = mlx5e_rqwq_get_size(rq);
- real_time = mlx5_is_real_time_rq(priv->mdev);
- rq_stride = BIT(mlx5e_mpwqe_get_log_stride_size(priv->mdev, params, NULL));
+ real_time = mlx5_is_real_time_rq(rq->mdev);
+ rq_stride = BIT(mlx5e_mpwqe_get_log_stride_size(rq->mdev, params, NULL));
mlx5e_health_fmsg_named_obj_nest_start(fmsg, "RQ");
devlink_fmsg_u8_pair_put(fmsg, "type", params->rq_wq_type);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
index 6b44ddce1..22918b2ef 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
@@ -108,7 +108,10 @@ static int mlx5e_tx_reporter_err_cqe_recover(void *ctx)
mlx5e_reset_txqsq_cc_pc(sq);
sq->stats->recover++;
clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state);
+ rtnl_lock();
mlx5e_activate_txqsq(sq);
+ rtnl_unlock();
+
if (sq->channel)
mlx5e_trigger_napi_icosq(sq->channel);
else
@@ -179,12 +182,16 @@ static int mlx5e_tx_reporter_ptpsq_unhealthy_recover(void *ctx)
carrier_ok = netif_carrier_ok(netdev);
netif_carrier_off(netdev);
+ rtnl_lock();
mlx5e_deactivate_priv_channels(priv);
+ rtnl_unlock();
mlx5e_ptp_close(chs->ptp);
err = mlx5e_ptp_open(priv, &chs->params, chs->c[0]->lag_port, &chs->ptp);
+ rtnl_lock();
mlx5e_activate_priv_channels(priv);
+ rtnl_unlock();
/* return carrier back if needed */
if (carrier_ok)
@@ -219,7 +226,6 @@ mlx5e_tx_reporter_build_diagnose_output_sq_common(struct devlink_fmsg *fmsg,
struct mlx5e_txqsq *sq, int tc)
{
bool stopped = netif_xmit_stopped(sq->txq);
- struct mlx5e_priv *priv = sq->priv;
u8 state;
int err;
@@ -227,7 +233,7 @@ mlx5e_tx_reporter_build_diagnose_output_sq_common(struct devlink_fmsg *fmsg,
devlink_fmsg_u32_pair_put(fmsg, "txq ix", sq->txq_ix);
devlink_fmsg_u32_pair_put(fmsg, "sqn", sq->sqn);
- err = mlx5_core_query_sq_state(priv->mdev, sq->sqn, &state);
+ err = mlx5_core_query_sq_state(sq->mdev, sq->sqn, &state);
if (!err)
devlink_fmsg_u8_pair_put(fmsg, "HW state", state);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c
index 7b8ff7a71..8d9a3b5ec 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c
@@ -4,6 +4,33 @@
#include "rqt.h"
#include <linux/mlx5/transobj.h>
+static bool verify_num_vhca_ids(struct mlx5_core_dev *mdev, u32 *vhca_ids,
+ unsigned int size)
+{
+ unsigned int max_num_vhca_id = MLX5_CAP_GEN_2(mdev, max_rqt_vhca_id);
+ int i;
+
+ /* Verify that all vhca_ids are in range [0, max_num_vhca_ids - 1] */
+ for (i = 0; i < size; i++)
+ if (vhca_ids[i] >= max_num_vhca_id)
+ return false;
+ return true;
+}
+
+static bool rqt_verify_vhca_ids(struct mlx5_core_dev *mdev, u32 *vhca_ids,
+ unsigned int size)
+{
+ if (!vhca_ids)
+ return true;
+
+ if (!MLX5_CAP_GEN(mdev, cross_vhca_rqt))
+ return false;
+ if (!verify_num_vhca_ids(mdev, vhca_ids, size))
+ return false;
+
+ return true;
+}
+
void mlx5e_rss_params_indir_init_uniform(struct mlx5e_rss_params_indir *indir,
unsigned int num_channels)
{
@@ -13,19 +40,38 @@ void mlx5e_rss_params_indir_init_uniform(struct mlx5e_rss_params_indir *indir,
indir->table[i] = i % num_channels;
}
+static void fill_rqn_list(void *rqtc, u32 *rqns, u32 *vhca_ids, unsigned int size)
+{
+ unsigned int i;
+
+ if (vhca_ids) {
+ MLX5_SET(rqtc, rqtc, rq_vhca_id_format, 1);
+ for (i = 0; i < size; i++) {
+ MLX5_SET(rqtc, rqtc, rq_vhca[i].rq_num, rqns[i]);
+ MLX5_SET(rqtc, rqtc, rq_vhca[i].rq_vhca_id, vhca_ids[i]);
+ }
+ } else {
+ for (i = 0; i < size; i++)
+ MLX5_SET(rqtc, rqtc, rq_num[i], rqns[i]);
+ }
+}
static int mlx5e_rqt_init(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev,
- u16 max_size, u32 *init_rqns, u16 init_size)
+ u16 max_size, u32 *init_rqns, u32 *init_vhca_ids, u16 init_size)
{
+ int entry_sz;
void *rqtc;
int inlen;
int err;
u32 *in;
- int i;
+
+ if (!rqt_verify_vhca_ids(mdev, init_vhca_ids, init_size))
+ return -EOPNOTSUPP;
rqt->mdev = mdev;
rqt->size = max_size;
- inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * init_size;
+ entry_sz = init_vhca_ids ? MLX5_ST_SZ_BYTES(rq_vhca) : MLX5_ST_SZ_BYTES(rq_num);
+ inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + entry_sz * init_size;
in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -33,10 +79,9 @@ static int mlx5e_rqt_init(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev,
rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
MLX5_SET(rqtc, rqtc, rqt_max_size, rqt->size);
-
MLX5_SET(rqtc, rqtc, rqt_actual_size, init_size);
- for (i = 0; i < init_size; i++)
- MLX5_SET(rqtc, rqtc, rq_num[i], init_rqns[i]);
+
+ fill_rqn_list(rqtc, init_rqns, init_vhca_ids, init_size);
err = mlx5_core_create_rqt(rqt->mdev, in, inlen, &rqt->rqtn);
@@ -49,7 +94,7 @@ int mlx5e_rqt_init_direct(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev,
{
u16 max_size = indir_enabled ? indir_table_size : 1;
- return mlx5e_rqt_init(rqt, mdev, max_size, &init_rqn, 1);
+ return mlx5e_rqt_init(rqt, mdev, max_size, &init_rqn, NULL, 1);
}
static int mlx5e_bits_invert(unsigned long a, int size)
@@ -63,7 +108,8 @@ static int mlx5e_bits_invert(unsigned long a, int size)
return inv;
}
-static int mlx5e_calc_indir_rqns(u32 *rss_rqns, u32 *rqns, unsigned int num_rqns,
+static int mlx5e_calc_indir_rqns(u32 *rss_rqns, u32 *rqns, u32 *rss_vhca_ids, u32 *vhca_ids,
+ unsigned int num_rqns,
u8 hfunc, struct mlx5e_rss_params_indir *indir)
{
unsigned int i;
@@ -82,30 +128,42 @@ static int mlx5e_calc_indir_rqns(u32 *rss_rqns, u32 *rqns, unsigned int num_rqns
*/
return -EINVAL;
rss_rqns[i] = rqns[ix];
+ if (vhca_ids)
+ rss_vhca_ids[i] = vhca_ids[ix];
}
return 0;
}
int mlx5e_rqt_init_indir(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev,
- u32 *rqns, unsigned int num_rqns,
+ u32 *rqns, u32 *vhca_ids, unsigned int num_rqns,
u8 hfunc, struct mlx5e_rss_params_indir *indir)
{
- u32 *rss_rqns;
+ u32 *rss_rqns, *rss_vhca_ids = NULL;
int err;
rss_rqns = kvmalloc_array(indir->actual_table_size, sizeof(*rss_rqns), GFP_KERNEL);
if (!rss_rqns)
return -ENOMEM;
- err = mlx5e_calc_indir_rqns(rss_rqns, rqns, num_rqns, hfunc, indir);
+ if (vhca_ids) {
+ rss_vhca_ids = kvmalloc_array(indir->actual_table_size, sizeof(*rss_vhca_ids),
+ GFP_KERNEL);
+ if (!rss_vhca_ids) {
+ kvfree(rss_rqns);
+ return -ENOMEM;
+ }
+ }
+
+ err = mlx5e_calc_indir_rqns(rss_rqns, rqns, rss_vhca_ids, vhca_ids, num_rqns, hfunc, indir);
if (err)
goto out;
- err = mlx5e_rqt_init(rqt, mdev, indir->max_table_size, rss_rqns,
+ err = mlx5e_rqt_init(rqt, mdev, indir->max_table_size, rss_rqns, rss_vhca_ids,
indir->actual_table_size);
out:
+ kvfree(rss_vhca_ids);
kvfree(rss_rqns);
return err;
}
@@ -121,20 +179,32 @@ u32 mlx5e_rqt_size(struct mlx5_core_dev *mdev, unsigned int num_channels)
return min_t(u32, rqt_size, max_cap_rqt_size);
}
+#define MLX5E_MAX_RQT_SIZE_ALLOWED_WITH_XOR8_HASH 256
+
+unsigned int mlx5e_rqt_max_num_channels_allowed_for_xor8(void)
+{
+ return MLX5E_MAX_RQT_SIZE_ALLOWED_WITH_XOR8_HASH / MLX5E_UNIFORM_SPREAD_RQT_FACTOR;
+}
+
void mlx5e_rqt_destroy(struct mlx5e_rqt *rqt)
{
mlx5_core_destroy_rqt(rqt->mdev, rqt->rqtn);
}
-static int mlx5e_rqt_redirect(struct mlx5e_rqt *rqt, u32 *rqns, unsigned int size)
+static int mlx5e_rqt_redirect(struct mlx5e_rqt *rqt, u32 *rqns, u32 *vhca_ids,
+ unsigned int size)
{
- unsigned int i;
+ int entry_sz;
void *rqtc;
int inlen;
u32 *in;
int err;
- inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + sizeof(u32) * size;
+ if (!rqt_verify_vhca_ids(rqt->mdev, vhca_ids, size))
+ return -EINVAL;
+
+ entry_sz = vhca_ids ? MLX5_ST_SZ_BYTES(rq_vhca) : MLX5_ST_SZ_BYTES(rq_num);
+ inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + entry_sz * size;
in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -143,8 +213,8 @@ static int mlx5e_rqt_redirect(struct mlx5e_rqt *rqt, u32 *rqns, unsigned int siz
MLX5_SET(modify_rqt_in, in, bitmask.rqn_list, 1);
MLX5_SET(rqtc, rqtc, rqt_actual_size, size);
- for (i = 0; i < size; i++)
- MLX5_SET(rqtc, rqtc, rq_num[i], rqns[i]);
+
+ fill_rqn_list(rqtc, rqns, vhca_ids, size);
err = mlx5_core_modify_rqt(rqt->mdev, rqt->rqtn, in, inlen);
@@ -152,17 +222,21 @@ static int mlx5e_rqt_redirect(struct mlx5e_rqt *rqt, u32 *rqns, unsigned int siz
return err;
}
-int mlx5e_rqt_redirect_direct(struct mlx5e_rqt *rqt, u32 rqn)
+int mlx5e_rqt_redirect_direct(struct mlx5e_rqt *rqt, u32 rqn, u32 *vhca_id)
{
- return mlx5e_rqt_redirect(rqt, &rqn, 1);
+ return mlx5e_rqt_redirect(rqt, &rqn, vhca_id, 1);
}
-int mlx5e_rqt_redirect_indir(struct mlx5e_rqt *rqt, u32 *rqns, unsigned int num_rqns,
+int mlx5e_rqt_redirect_indir(struct mlx5e_rqt *rqt, u32 *rqns, u32 *vhca_ids,
+ unsigned int num_rqns,
u8 hfunc, struct mlx5e_rss_params_indir *indir)
{
- u32 *rss_rqns;
+ u32 *rss_rqns, *rss_vhca_ids = NULL;
int err;
+ if (!rqt_verify_vhca_ids(rqt->mdev, vhca_ids, num_rqns))
+ return -EINVAL;
+
if (WARN_ON(rqt->size != indir->max_table_size))
return -EINVAL;
@@ -170,13 +244,23 @@ int mlx5e_rqt_redirect_indir(struct mlx5e_rqt *rqt, u32 *rqns, unsigned int num_
if (!rss_rqns)
return -ENOMEM;
- err = mlx5e_calc_indir_rqns(rss_rqns, rqns, num_rqns, hfunc, indir);
+ if (vhca_ids) {
+ rss_vhca_ids = kvmalloc_array(indir->actual_table_size, sizeof(*rss_vhca_ids),
+ GFP_KERNEL);
+ if (!rss_vhca_ids) {
+ kvfree(rss_rqns);
+ return -ENOMEM;
+ }
+ }
+
+ err = mlx5e_calc_indir_rqns(rss_rqns, rqns, rss_vhca_ids, vhca_ids, num_rqns, hfunc, indir);
if (err)
goto out;
- err = mlx5e_rqt_redirect(rqt, rss_rqns, indir->actual_table_size);
+ err = mlx5e_rqt_redirect(rqt, rss_rqns, rss_vhca_ids, indir->actual_table_size);
out:
+ kvfree(rss_vhca_ids);
kvfree(rss_rqns);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h
index 77fba3ebd..2f9e04a84 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h
@@ -20,7 +20,7 @@ void mlx5e_rss_params_indir_init_uniform(struct mlx5e_rss_params_indir *indir,
unsigned int num_channels);
struct mlx5e_rqt {
- struct mlx5_core_dev *mdev;
+ struct mlx5_core_dev *mdev; /* primary */
u32 rqtn;
u16 size;
};
@@ -28,7 +28,7 @@ struct mlx5e_rqt {
int mlx5e_rqt_init_direct(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev,
bool indir_enabled, u32 init_rqn, u32 indir_table_size);
int mlx5e_rqt_init_indir(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev,
- u32 *rqns, unsigned int num_rqns,
+ u32 *rqns, u32 *vhca_ids, unsigned int num_rqns,
u8 hfunc, struct mlx5e_rss_params_indir *indir);
void mlx5e_rqt_destroy(struct mlx5e_rqt *rqt);
@@ -38,8 +38,10 @@ static inline u32 mlx5e_rqt_get_rqtn(struct mlx5e_rqt *rqt)
}
u32 mlx5e_rqt_size(struct mlx5_core_dev *mdev, unsigned int num_channels);
-int mlx5e_rqt_redirect_direct(struct mlx5e_rqt *rqt, u32 rqn);
-int mlx5e_rqt_redirect_indir(struct mlx5e_rqt *rqt, u32 *rqns, unsigned int num_rqns,
+unsigned int mlx5e_rqt_max_num_channels_allowed_for_xor8(void);
+int mlx5e_rqt_redirect_direct(struct mlx5e_rqt *rqt, u32 rqn, u32 *vhca_id);
+int mlx5e_rqt_redirect_indir(struct mlx5e_rqt *rqt, u32 *rqns, u32 *vhca_ids,
+ unsigned int num_rqns,
u8 hfunc, struct mlx5e_rss_params_indir *indir);
#endif /* __MLX5_EN_RQT_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c
index c1545a2e8..5f742f896 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c
@@ -74,7 +74,7 @@ struct mlx5e_rss {
struct mlx5e_tir *tir[MLX5E_NUM_INDIR_TIRS];
struct mlx5e_tir *inner_tir[MLX5E_NUM_INDIR_TIRS];
struct mlx5e_rqt rqt;
- struct mlx5_core_dev *mdev;
+ struct mlx5_core_dev *mdev; /* primary */
u32 drop_rqn;
bool inner_ft_support;
bool enabled;
@@ -473,21 +473,22 @@ int mlx5e_rss_obtain_tirn(struct mlx5e_rss *rss,
return 0;
}
-static int mlx5e_rss_apply(struct mlx5e_rss *rss, u32 *rqns, unsigned int num_rqns)
+static int mlx5e_rss_apply(struct mlx5e_rss *rss, u32 *rqns, u32 *vhca_ids, unsigned int num_rqns)
{
int err;
- err = mlx5e_rqt_redirect_indir(&rss->rqt, rqns, num_rqns, rss->hash.hfunc, &rss->indir);
+ err = mlx5e_rqt_redirect_indir(&rss->rqt, rqns, vhca_ids, num_rqns, rss->hash.hfunc,
+ &rss->indir);
if (err)
mlx5e_rss_warn(rss->mdev, "Failed to redirect RQT %#x to channels: err = %d\n",
mlx5e_rqt_get_rqtn(&rss->rqt), err);
return err;
}
-void mlx5e_rss_enable(struct mlx5e_rss *rss, u32 *rqns, unsigned int num_rqns)
+void mlx5e_rss_enable(struct mlx5e_rss *rss, u32 *rqns, u32 *vhca_ids, unsigned int num_rqns)
{
rss->enabled = true;
- mlx5e_rss_apply(rss, rqns, num_rqns);
+ mlx5e_rss_apply(rss, rqns, vhca_ids, num_rqns);
}
void mlx5e_rss_disable(struct mlx5e_rss *rss)
@@ -495,7 +496,7 @@ void mlx5e_rss_disable(struct mlx5e_rss *rss)
int err;
rss->enabled = false;
- err = mlx5e_rqt_redirect_direct(&rss->rqt, rss->drop_rqn);
+ err = mlx5e_rqt_redirect_direct(&rss->rqt, rss->drop_rqn, NULL);
if (err)
mlx5e_rss_warn(rss->mdev, "Failed to redirect RQT %#x to drop RQ %#x: err = %d\n",
mlx5e_rqt_get_rqtn(&rss->rqt), rss->drop_rqn, err);
@@ -568,7 +569,7 @@ int mlx5e_rss_get_rxfh(struct mlx5e_rss *rss, u32 *indir, u8 *key, u8 *hfunc)
int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir,
const u8 *key, const u8 *hfunc,
- u32 *rqns, unsigned int num_rqns)
+ u32 *rqns, u32 *vhca_ids, unsigned int num_rqns)
{
bool changed_indir = false;
bool changed_hash = false;
@@ -608,7 +609,7 @@ int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir,
}
if (changed_indir && rss->enabled) {
- err = mlx5e_rss_apply(rss, rqns, num_rqns);
+ err = mlx5e_rss_apply(rss, rqns, vhca_ids, num_rqns);
if (err) {
mlx5e_rss_copy(rss, old_rss);
goto out;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h
index d1d0bc350..d0df98963 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h
@@ -39,7 +39,7 @@ int mlx5e_rss_obtain_tirn(struct mlx5e_rss *rss,
const struct mlx5e_packet_merge_param *init_pkt_merge_param,
bool inner, u32 *tirn);
-void mlx5e_rss_enable(struct mlx5e_rss *rss, u32 *rqns, unsigned int num_rqns);
+void mlx5e_rss_enable(struct mlx5e_rss *rss, u32 *rqns, u32 *vhca_ids, unsigned int num_rqns);
void mlx5e_rss_disable(struct mlx5e_rss *rss);
int mlx5e_rss_packet_merge_set_param(struct mlx5e_rss *rss,
@@ -47,7 +47,7 @@ int mlx5e_rss_packet_merge_set_param(struct mlx5e_rss *rss,
int mlx5e_rss_get_rxfh(struct mlx5e_rss *rss, u32 *indir, u8 *key, u8 *hfunc);
int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir,
const u8 *key, const u8 *hfunc,
- u32 *rqns, unsigned int num_rqns);
+ u32 *rqns, u32 *vhca_ids, unsigned int num_rqns);
struct mlx5e_rss_params_hash mlx5e_rss_get_hash(struct mlx5e_rss *rss);
u8 mlx5e_rss_get_hash_fields(struct mlx5e_rss *rss, enum mlx5_traffic_types tt);
int mlx5e_rss_set_hash_fields(struct mlx5e_rss *rss, enum mlx5_traffic_types tt,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
index b23e224e3..a86eade9a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
@@ -8,7 +8,7 @@
#define MLX5E_MAX_NUM_RSS 16
struct mlx5e_rx_res {
- struct mlx5_core_dev *mdev;
+ struct mlx5_core_dev *mdev; /* primary */
enum mlx5e_rx_res_features features;
unsigned int max_nch;
u32 drop_rqn;
@@ -19,6 +19,7 @@ struct mlx5e_rx_res {
struct mlx5e_rss *rss[MLX5E_MAX_NUM_RSS];
bool rss_active;
u32 *rss_rqns;
+ u32 *rss_vhca_ids;
unsigned int rss_nch;
struct {
@@ -34,6 +35,13 @@ struct mlx5e_rx_res {
/* API for rx_res_rss_* */
+static u32 *get_vhca_ids(struct mlx5e_rx_res *res, int offset)
+{
+ bool multi_vhca = res->features & MLX5E_RX_RES_FEATURE_MULTI_VHCA;
+
+ return multi_vhca ? res->rss_vhca_ids + offset : NULL;
+}
+
void mlx5e_rx_res_rss_update_num_channels(struct mlx5e_rx_res *res, u32 nch)
{
int i;
@@ -85,8 +93,11 @@ int mlx5e_rx_res_rss_init(struct mlx5e_rx_res *res, u32 *rss_idx, unsigned int i
return PTR_ERR(rss);
mlx5e_rss_set_indir_uniform(rss, init_nch);
- if (res->rss_active)
- mlx5e_rss_enable(rss, res->rss_rqns, res->rss_nch);
+ if (res->rss_active) {
+ u32 *vhca_ids = get_vhca_ids(res, 0);
+
+ mlx5e_rss_enable(rss, res->rss_rqns, vhca_ids, res->rss_nch);
+ }
res->rss[i] = rss;
*rss_idx = i;
@@ -153,10 +164,12 @@ static void mlx5e_rx_res_rss_enable(struct mlx5e_rx_res *res)
for (i = 0; i < MLX5E_MAX_NUM_RSS; i++) {
struct mlx5e_rss *rss = res->rss[i];
+ u32 *vhca_ids;
if (!rss)
continue;
- mlx5e_rss_enable(rss, res->rss_rqns, res->rss_nch);
+ vhca_ids = get_vhca_ids(res, 0);
+ mlx5e_rss_enable(rss, res->rss_rqns, vhca_ids, res->rss_nch);
}
}
@@ -200,6 +213,7 @@ int mlx5e_rx_res_rss_get_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
int mlx5e_rx_res_rss_set_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
const u32 *indir, const u8 *key, const u8 *hfunc)
{
+ u32 *vhca_ids = get_vhca_ids(res, 0);
struct mlx5e_rss *rss;
if (rss_idx >= MLX5E_MAX_NUM_RSS)
@@ -209,7 +223,8 @@ int mlx5e_rx_res_rss_set_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
if (!rss)
return -ENOENT;
- return mlx5e_rss_set_rxfh(rss, indir, key, hfunc, res->rss_rqns, res->rss_nch);
+ return mlx5e_rss_set_rxfh(rss, indir, key, hfunc, res->rss_rqns, vhca_ids,
+ res->rss_nch);
}
int mlx5e_rx_res_rss_get_hash_fields(struct mlx5e_rx_res *res, u32 rss_idx,
@@ -280,11 +295,13 @@ struct mlx5e_rss *mlx5e_rx_res_rss_get(struct mlx5e_rx_res *res, u32 rss_idx)
static void mlx5e_rx_res_free(struct mlx5e_rx_res *res)
{
+ kvfree(res->rss_vhca_ids);
kvfree(res->rss_rqns);
kvfree(res);
}
-static struct mlx5e_rx_res *mlx5e_rx_res_alloc(struct mlx5_core_dev *mdev, unsigned int max_nch)
+static struct mlx5e_rx_res *mlx5e_rx_res_alloc(struct mlx5_core_dev *mdev, unsigned int max_nch,
+ bool multi_vhca)
{
struct mlx5e_rx_res *rx_res;
@@ -298,6 +315,15 @@ static struct mlx5e_rx_res *mlx5e_rx_res_alloc(struct mlx5_core_dev *mdev, unsig
return NULL;
}
+ if (multi_vhca) {
+ rx_res->rss_vhca_ids = kvcalloc(max_nch, sizeof(*rx_res->rss_vhca_ids), GFP_KERNEL);
+ if (!rx_res->rss_vhca_ids) {
+ kvfree(rx_res->rss_rqns);
+ kvfree(rx_res);
+ return NULL;
+ }
+ }
+
return rx_res;
}
@@ -424,10 +450,11 @@ mlx5e_rx_res_create(struct mlx5_core_dev *mdev, enum mlx5e_rx_res_features featu
const struct mlx5e_packet_merge_param *init_pkt_merge_param,
unsigned int init_nch)
{
+ bool multi_vhca = features & MLX5E_RX_RES_FEATURE_MULTI_VHCA;
struct mlx5e_rx_res *res;
int err;
- res = mlx5e_rx_res_alloc(mdev, max_nch);
+ res = mlx5e_rx_res_alloc(mdev, max_nch, multi_vhca);
if (!res)
return ERR_PTR(-ENOMEM);
@@ -504,10 +531,11 @@ static void mlx5e_rx_res_channel_activate_direct(struct mlx5e_rx_res *res,
struct mlx5e_channels *chs,
unsigned int ix)
{
+ u32 *vhca_id = get_vhca_ids(res, ix);
u32 rqn = res->rss_rqns[ix];
int err;
- err = mlx5e_rqt_redirect_direct(&res->channels[ix].direct_rqt, rqn);
+ err = mlx5e_rqt_redirect_direct(&res->channels[ix].direct_rqt, rqn, vhca_id);
if (err)
mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to RQ %#x (channel %u): err = %d\n",
mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt),
@@ -519,7 +547,7 @@ static void mlx5e_rx_res_channel_deactivate_direct(struct mlx5e_rx_res *res,
{
int err;
- err = mlx5e_rqt_redirect_direct(&res->channels[ix].direct_rqt, res->drop_rqn);
+ err = mlx5e_rqt_redirect_direct(&res->channels[ix].direct_rqt, res->drop_rqn, NULL);
if (err)
mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to drop RQ %#x (channel %u): err = %d\n",
mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt),
@@ -534,10 +562,12 @@ void mlx5e_rx_res_channels_activate(struct mlx5e_rx_res *res, struct mlx5e_chann
nch = mlx5e_channels_get_num(chs);
for (ix = 0; ix < chs->num; ix++) {
+ u32 *vhca_id = get_vhca_ids(res, ix);
+
if (mlx5e_channels_is_xsk(chs, ix))
- mlx5e_channels_get_xsk_rqn(chs, ix, &res->rss_rqns[ix]);
+ mlx5e_channels_get_xsk_rqn(chs, ix, &res->rss_rqns[ix], vhca_id);
else
- mlx5e_channels_get_regular_rqn(chs, ix, &res->rss_rqns[ix]);
+ mlx5e_channels_get_regular_rqn(chs, ix, &res->rss_rqns[ix], vhca_id);
}
res->rss_nch = chs->num;
@@ -554,7 +584,7 @@ void mlx5e_rx_res_channels_activate(struct mlx5e_rx_res *res, struct mlx5e_chann
if (!mlx5e_channels_get_ptp_rqn(chs, &rqn))
rqn = res->drop_rqn;
- err = mlx5e_rqt_redirect_direct(&res->ptp.rqt, rqn);
+ err = mlx5e_rqt_redirect_direct(&res->ptp.rqt, rqn, NULL);
if (err)
mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to RQ %#x (PTP): err = %d\n",
mlx5e_rqt_get_rqtn(&res->ptp.rqt),
@@ -573,7 +603,7 @@ void mlx5e_rx_res_channels_deactivate(struct mlx5e_rx_res *res)
mlx5e_rx_res_channel_deactivate_direct(res, ix);
if (res->features & MLX5E_RX_RES_FEATURE_PTP) {
- err = mlx5e_rqt_redirect_direct(&res->ptp.rqt, res->drop_rqn);
+ err = mlx5e_rqt_redirect_direct(&res->ptp.rqt, res->drop_rqn, NULL);
if (err)
mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to drop RQ %#x (PTP): err = %d\n",
mlx5e_rqt_get_rqtn(&res->ptp.rqt),
@@ -584,10 +614,12 @@ void mlx5e_rx_res_channels_deactivate(struct mlx5e_rx_res *res)
void mlx5e_rx_res_xsk_update(struct mlx5e_rx_res *res, struct mlx5e_channels *chs,
unsigned int ix, bool xsk)
{
+ u32 *vhca_id = get_vhca_ids(res, ix);
+
if (xsk)
- mlx5e_channels_get_xsk_rqn(chs, ix, &res->rss_rqns[ix]);
+ mlx5e_channels_get_xsk_rqn(chs, ix, &res->rss_rqns[ix], vhca_id);
else
- mlx5e_channels_get_regular_rqn(chs, ix, &res->rss_rqns[ix]);
+ mlx5e_channels_get_regular_rqn(chs, ix, &res->rss_rqns[ix], vhca_id);
mlx5e_rx_res_rss_enable(res);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
index 82aaba8a8..7b1a9f0f1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
@@ -18,6 +18,7 @@ struct mlx5e_rss_params_hash;
enum mlx5e_rx_res_features {
MLX5E_RX_RES_FEATURE_INNER_FT = BIT(0),
MLX5E_RX_RES_FEATURE_PTP = BIT(1),
+ MLX5E_RX_RES_FEATURE_MULTI_VHCA = BIT(2),
};
/* Setup */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
index ac458a8d1..53ca16cb9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
@@ -63,10 +63,12 @@ static int mlx5e_open_trap_rq(struct mlx5e_priv *priv, struct mlx5e_trap *t)
struct mlx5e_create_cq_param ccp = {};
struct dim_cq_moder trap_moder = {};
struct mlx5e_rq *rq = &t->rq;
+ u16 q_counter;
int node;
int err;
node = dev_to_node(mdev->device);
+ q_counter = priv->q_counter[0];
ccp.netdev = priv->netdev;
ccp.wq = priv->wq;
@@ -79,7 +81,7 @@ static int mlx5e_open_trap_rq(struct mlx5e_priv *priv, struct mlx5e_trap *t)
return err;
mlx5e_init_trap_rq(t, &t->params, rq);
- err = mlx5e_open_rq(&t->params, rq_param, NULL, node, rq);
+ err = mlx5e_open_rq(&t->params, rq_param, NULL, node, q_counter, rq);
if (err)
goto err_destroy_cq;
@@ -116,15 +118,14 @@ static int mlx5e_create_trap_direct_rq_tir(struct mlx5_core_dev *mdev, struct ml
}
static void mlx5e_build_trap_params(struct mlx5_core_dev *mdev,
- int max_mtu, u16 q_counter,
- struct mlx5e_trap *t)
+ int max_mtu, struct mlx5e_trap *t)
{
struct mlx5e_params *params = &t->params;
params->rq_wq_type = MLX5_WQ_TYPE_CYCLIC;
mlx5e_init_rq_type_params(mdev, params);
params->sw_mtu = max_mtu;
- mlx5e_build_rq_param(mdev, params, NULL, q_counter, &t->rq_param);
+ mlx5e_build_rq_param(mdev, params, NULL, &t->rq_param);
}
static struct mlx5e_trap *mlx5e_open_trap(struct mlx5e_priv *priv)
@@ -138,7 +139,7 @@ static struct mlx5e_trap *mlx5e_open_trap(struct mlx5e_priv *priv)
if (!t)
return ERR_PTR(-ENOMEM);
- mlx5e_build_trap_params(priv->mdev, netdev->max_mtu, priv->q_counter, t);
+ mlx5e_build_trap_params(priv->mdev, netdev->max_mtu, t);
t->priv = priv;
t->mdev = priv->mdev;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c
index ebada0c5a..db776e515 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c
@@ -6,10 +6,10 @@
#include "setup.h"
#include "en/params.h"
-static int mlx5e_xsk_map_pool(struct mlx5e_priv *priv,
+static int mlx5e_xsk_map_pool(struct mlx5_core_dev *mdev,
struct xsk_buff_pool *pool)
{
- struct device *dev = mlx5_core_dma_dev(priv->mdev);
+ struct device *dev = mlx5_core_dma_dev(mdev);
return xsk_pool_dma_map(pool, dev, DMA_ATTR_SKIP_CPU_SYNC);
}
@@ -89,7 +89,7 @@ static int mlx5e_xsk_enable_locked(struct mlx5e_priv *priv,
if (unlikely(!mlx5e_xsk_is_pool_sane(pool)))
return -EINVAL;
- err = mlx5e_xsk_map_pool(priv, pool);
+ err = mlx5e_xsk_map_pool(mlx5_sd_ch_ix_get_dev(priv->mdev, ix), pool);
if (unlikely(err))
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
index c969b8e70..06592b9f0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
@@ -28,10 +28,8 @@ bool mlx5e_validate_xsk_param(struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk,
struct mlx5_core_dev *mdev)
{
- /* AF_XDP doesn't support frames larger than PAGE_SIZE,
- * and xsk->chunk_size is limited to 65535 bytes.
- */
- if ((size_t)xsk->chunk_size > PAGE_SIZE || xsk->chunk_size < MLX5E_MIN_XSK_CHUNK_SIZE) {
+ /* AF_XDP doesn't support frames larger than PAGE_SIZE. */
+ if (xsk->chunk_size > PAGE_SIZE || xsk->chunk_size < MLX5E_MIN_XSK_CHUNK_SIZE) {
mlx5_core_err(mdev, "XSK chunk size %u out of bounds [%u, %lu]\n", xsk->chunk_size,
MLX5E_MIN_XSK_CHUNK_SIZE, PAGE_SIZE);
return false;
@@ -51,10 +49,9 @@ bool mlx5e_validate_xsk_param(struct mlx5e_params *params,
static void mlx5e_build_xsk_cparam(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk,
- u16 q_counter,
struct mlx5e_channel_param *cparam)
{
- mlx5e_build_rq_param(mdev, params, xsk, q_counter, &cparam->rq);
+ mlx5e_build_rq_param(mdev, params, xsk, &cparam->rq);
mlx5e_build_xdpsq_param(mdev, params, xsk, &cparam->xdp_sq);
}
@@ -95,6 +92,7 @@ static int mlx5e_open_xsk_rq(struct mlx5e_channel *c, struct mlx5e_params *param
struct mlx5e_rq_param *rq_params, struct xsk_buff_pool *pool,
struct mlx5e_xsk_param *xsk)
{
+ u16 q_counter = c->priv->q_counter[c->sd_ix];
struct mlx5e_rq *xskrq = &c->xskrq;
int err;
@@ -102,7 +100,7 @@ static int mlx5e_open_xsk_rq(struct mlx5e_channel *c, struct mlx5e_params *param
if (err)
return err;
- err = mlx5e_open_rq(params, rq_params, xsk, cpu_to_node(c->cpu), xskrq);
+ err = mlx5e_open_rq(params, rq_params, xsk, cpu_to_node(c->cpu), q_counter, xskrq);
if (err)
return err;
@@ -127,7 +125,7 @@ int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params,
if (!cparam)
return -ENOMEM;
- mlx5e_build_xsk_cparam(priv->mdev, params, xsk, priv->q_counter, cparam);
+ mlx5e_build_xsk_cparam(priv->mdev, params, xsk, cparam);
err = mlx5e_open_cq(c->mdev, params->rx_cq_moderation, &cparam->rq.cqp, &ccp,
&c->xskrq.cq);