summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/mellanox/mlx5
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 17:40:19 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 17:40:19 +0000
commit9f0fc191371843c4fc000a226b0a26b6c059aacd (patch)
tree35f8be3ef04506ac891ad001e8c41e535ae8d01d /drivers/net/ethernet/mellanox/mlx5
parentReleasing progress-linux version 6.6.15-2~progress7.99u1. (diff)
downloadlinux-9f0fc191371843c4fc000a226b0a26b6c059aacd.tar.xz
linux-9f0fc191371843c4fc000a226b0a26b6c059aacd.zip
Merging upstream version 6.7.7.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx5')
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c70
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c122
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c49
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c118
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dpll.c432
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/health.c187
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/health.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c422
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c346
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rss.c144
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rss.h20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c105
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h47
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c393
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c97
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c96
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/events.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c129
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c47
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c542
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c101
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c242
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c69
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c9
61 files changed, 2700 insertions, 1587 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index c4f4de82e2..685335832a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -189,3 +189,11 @@ config MLX5_SF_MANAGER
port is managed through devlink. A subfunction supports RDMA, netdevice
and vdpa device. It is similar to a SRIOV VF but it doesn't require
SRIOV support.
+
+config MLX5_DPLL
+ tristate "Mellanox 5th generation network adapters (ConnectX series) DPLL support"
+ depends on NETDEVICES && ETHERNET && PCI && MLX5_CORE
+ select DPLL
+ help
+ DPLL support in Mellanox Technologies ConnectX NICs.
+
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 7e94caca48..c44870b175 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -128,3 +128,6 @@ mlx5_core-$(CONFIG_MLX5_SF) += sf/vhca_event.o sf/dev/dev.o sf/dev/driver.o irq_
# SF manager
#
mlx5_core-$(CONFIG_MLX5_SF_MANAGER) += sf/cmd.o sf/hw_table.o sf/devlink.o
+
+obj-$(CONFIG_MLX5_DPLL) += mlx5_dpll.o
+mlx5_dpll-y := dpll.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 55efb932ab..4957412ff1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -528,6 +528,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_SAVE_VHCA_STATE:
case MLX5_CMD_OP_LOAD_VHCA_STATE:
case MLX5_CMD_OP_SYNC_CRYPTO:
+ case MLX5_CMD_OP_ALLOW_OTHER_VHCA_ACCESS:
*status = MLX5_DRIVER_STATUS_ABORTED;
*synd = MLX5_DRIVER_SYND;
return -ENOLINK;
@@ -731,6 +732,7 @@ const char *mlx5_command_str(int command)
MLX5_COMMAND_STR_CASE(SAVE_VHCA_STATE);
MLX5_COMMAND_STR_CASE(LOAD_VHCA_STATE);
MLX5_COMMAND_STR_CASE(SYNC_CRYPTO);
+ MLX5_COMMAND_STR_CASE(ALLOW_OTHER_VHCA_ACCESS);
default: return "unknown command opcode";
}
}
@@ -2093,6 +2095,74 @@ int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,
}
EXPORT_SYMBOL(mlx5_cmd_exec_cb);
+int mlx5_cmd_allow_other_vhca_access(struct mlx5_core_dev *dev,
+ struct mlx5_cmd_allow_other_vhca_access_attr *attr)
+{
+ u32 out[MLX5_ST_SZ_DW(allow_other_vhca_access_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(allow_other_vhca_access_in)] = {};
+ void *key;
+
+ MLX5_SET(allow_other_vhca_access_in,
+ in, opcode, MLX5_CMD_OP_ALLOW_OTHER_VHCA_ACCESS);
+ MLX5_SET(allow_other_vhca_access_in,
+ in, object_type_to_be_accessed, attr->obj_type);
+ MLX5_SET(allow_other_vhca_access_in,
+ in, object_id_to_be_accessed, attr->obj_id);
+
+ key = MLX5_ADDR_OF(allow_other_vhca_access_in, in, access_key);
+ memcpy(key, attr->access_key, sizeof(attr->access_key));
+
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_cmd_alias_obj_create(struct mlx5_core_dev *dev,
+ struct mlx5_cmd_alias_obj_create_attr *alias_attr,
+ u32 *obj_id)
+{
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
+ u32 in[MLX5_ST_SZ_DW(create_alias_obj_in)] = {};
+ void *param;
+ void *attr;
+ void *key;
+ int ret;
+
+ attr = MLX5_ADDR_OF(create_alias_obj_in, in, hdr);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, obj_type, alias_attr->obj_type);
+ param = MLX5_ADDR_OF(general_obj_in_cmd_hdr, in, op_param);
+ MLX5_SET(general_obj_create_param, param, alias_object, 1);
+
+ attr = MLX5_ADDR_OF(create_alias_obj_in, in, alias_ctx);
+ MLX5_SET(alias_context, attr, vhca_id_to_be_accessed, alias_attr->vhca_id);
+ MLX5_SET(alias_context, attr, object_id_to_be_accessed, alias_attr->obj_id);
+
+ key = MLX5_ADDR_OF(alias_context, attr, access_key);
+ memcpy(key, alias_attr->access_key, sizeof(alias_attr->access_key));
+
+ ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ if (ret)
+ return ret;
+
+ *obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+
+ return 0;
+}
+
+int mlx5_cmd_alias_obj_destroy(struct mlx5_core_dev *dev, u32 obj_id,
+ u16 obj_type)
+{
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
+ u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
+
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, obj_type);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, obj_id);
+
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+
static void destroy_msg_cache(struct mlx5_core_dev *dev)
{
struct cmd_msg_cache *ch;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
index 7909f378dc..cf0477f53d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -38,8 +38,6 @@
#include "devlink.h"
#include "lag/lag.h"
-/* intf dev list mutex */
-static DEFINE_MUTEX(mlx5_intf_mutex);
static DEFINE_IDA(mlx5_adev_ida);
static bool is_eth_rep_supported(struct mlx5_core_dev *dev)
@@ -206,6 +204,19 @@ static bool is_ib_enabled(struct mlx5_core_dev *dev)
return err ? false : val.vbool;
}
+static bool is_dpll_supported(struct mlx5_core_dev *dev)
+{
+ if (!IS_ENABLED(CONFIG_MLX5_DPLL))
+ return false;
+
+ if (!MLX5_CAP_MCAM_REG2(dev, synce_registers)) {
+ mlx5_core_warn(dev, "Missing SyncE capability\n");
+ return false;
+ }
+
+ return true;
+}
+
enum {
MLX5_INTERFACE_PROTOCOL_ETH,
MLX5_INTERFACE_PROTOCOL_ETH_REP,
@@ -215,6 +226,8 @@ enum {
MLX5_INTERFACE_PROTOCOL_MPIB,
MLX5_INTERFACE_PROTOCOL_VNET,
+
+ MLX5_INTERFACE_PROTOCOL_DPLL,
};
static const struct mlx5_adev_device {
@@ -237,6 +250,8 @@ static const struct mlx5_adev_device {
.is_supported = &is_ib_rep_supported },
[MLX5_INTERFACE_PROTOCOL_MPIB] = { .suffix = "multiport",
.is_supported = &is_mp_supported },
+ [MLX5_INTERFACE_PROTOCOL_DPLL] = { .suffix = "dpll",
+ .is_supported = &is_dpll_supported },
};
int mlx5_adev_idx_alloc(void)
@@ -320,9 +335,9 @@ static void del_adev(struct auxiliary_device *adev)
void mlx5_dev_set_lightweight(struct mlx5_core_dev *dev)
{
- mutex_lock(&mlx5_intf_mutex);
+ mlx5_devcom_comp_lock(dev->priv.hca_devcom_comp);
dev->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV;
- mutex_unlock(&mlx5_intf_mutex);
+ mlx5_devcom_comp_unlock(dev->priv.hca_devcom_comp);
}
bool mlx5_dev_is_lightweight(struct mlx5_core_dev *dev)
@@ -338,7 +353,7 @@ int mlx5_attach_device(struct mlx5_core_dev *dev)
int ret = 0, i;
devl_assert_locked(priv_to_devlink(dev));
- mutex_lock(&mlx5_intf_mutex);
+ mlx5_devcom_comp_lock(dev->priv.hca_devcom_comp);
priv->flags &= ~MLX5_PRIV_FLAGS_DETACH;
for (i = 0; i < ARRAY_SIZE(mlx5_adev_devices); i++) {
if (!priv->adev[i]) {
@@ -383,7 +398,7 @@ int mlx5_attach_device(struct mlx5_core_dev *dev)
break;
}
}
- mutex_unlock(&mlx5_intf_mutex);
+ mlx5_devcom_comp_unlock(dev->priv.hca_devcom_comp);
return ret;
}
@@ -396,7 +411,7 @@ void mlx5_detach_device(struct mlx5_core_dev *dev, bool suspend)
int i;
devl_assert_locked(priv_to_devlink(dev));
- mutex_lock(&mlx5_intf_mutex);
+ mlx5_devcom_comp_lock(dev->priv.hca_devcom_comp);
for (i = ARRAY_SIZE(mlx5_adev_devices) - 1; i >= 0; i--) {
if (!priv->adev[i])
continue;
@@ -426,7 +441,7 @@ skip_suspend:
priv->adev[i] = NULL;
}
priv->flags |= MLX5_PRIV_FLAGS_DETACH;
- mutex_unlock(&mlx5_intf_mutex);
+ mlx5_devcom_comp_unlock(dev->priv.hca_devcom_comp);
}
int mlx5_register_device(struct mlx5_core_dev *dev)
@@ -434,10 +449,10 @@ int mlx5_register_device(struct mlx5_core_dev *dev)
int ret;
devl_assert_locked(priv_to_devlink(dev));
- mutex_lock(&mlx5_intf_mutex);
+ mlx5_devcom_comp_lock(dev->priv.hca_devcom_comp);
dev->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV;
ret = mlx5_rescan_drivers_locked(dev);
- mutex_unlock(&mlx5_intf_mutex);
+ mlx5_devcom_comp_unlock(dev->priv.hca_devcom_comp);
if (ret)
mlx5_unregister_device(dev);
@@ -447,10 +462,10 @@ int mlx5_register_device(struct mlx5_core_dev *dev)
void mlx5_unregister_device(struct mlx5_core_dev *dev)
{
devl_assert_locked(priv_to_devlink(dev));
- mutex_lock(&mlx5_intf_mutex);
+ mlx5_devcom_comp_lock(dev->priv.hca_devcom_comp);
dev->priv.flags = MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV;
mlx5_rescan_drivers_locked(dev);
- mutex_unlock(&mlx5_intf_mutex);
+ mlx5_devcom_comp_unlock(dev->priv.hca_devcom_comp);
}
static int add_drivers(struct mlx5_core_dev *dev)
@@ -528,7 +543,6 @@ int mlx5_rescan_drivers_locked(struct mlx5_core_dev *dev)
{
struct mlx5_priv *priv = &dev->priv;
- lockdep_assert_held(&mlx5_intf_mutex);
if (priv->flags & MLX5_PRIV_FLAGS_DETACH)
return 0;
@@ -548,85 +562,3 @@ bool mlx5_same_hw_devs(struct mlx5_core_dev *dev, struct mlx5_core_dev *peer_dev
return (fsystem_guid && psystem_guid && fsystem_guid == psystem_guid);
}
-
-static u32 mlx5_gen_pci_id(const struct mlx5_core_dev *dev)
-{
- return (u32)((pci_domain_nr(dev->pdev->bus) << 16) |
- (dev->pdev->bus->number << 8) |
- PCI_SLOT(dev->pdev->devfn));
-}
-
-static int _next_phys_dev(struct mlx5_core_dev *mdev,
- const struct mlx5_core_dev *curr)
-{
- if (!mlx5_core_is_pf(mdev))
- return 0;
-
- if (mdev == curr)
- return 0;
-
- if (!mlx5_same_hw_devs(mdev, (struct mlx5_core_dev *)curr) &&
- mlx5_gen_pci_id(mdev) != mlx5_gen_pci_id(curr))
- return 0;
-
- return 1;
-}
-
-static void *pci_get_other_drvdata(struct device *this, struct device *other)
-{
- if (this->driver != other->driver)
- return NULL;
-
- return pci_get_drvdata(to_pci_dev(other));
-}
-
-static int next_phys_dev_lag(struct device *dev, const void *data)
-{
- struct mlx5_core_dev *mdev, *this = (struct mlx5_core_dev *)data;
-
- mdev = pci_get_other_drvdata(this->device, dev);
- if (!mdev)
- return 0;
-
- if (!mlx5_lag_is_supported(mdev))
- return 0;
-
- return _next_phys_dev(mdev, data);
-}
-
-static struct mlx5_core_dev *mlx5_get_next_dev(struct mlx5_core_dev *dev,
- int (*match)(struct device *dev, const void *data))
-{
- struct device *next;
-
- if (!mlx5_core_is_pf(dev))
- return NULL;
-
- next = bus_find_device(&pci_bus_type, NULL, dev, match);
- if (!next)
- return NULL;
-
- put_device(next);
- return pci_get_drvdata(to_pci_dev(next));
-}
-
-/* Must be called with intf_mutex held */
-struct mlx5_core_dev *mlx5_get_next_phys_dev_lag(struct mlx5_core_dev *dev)
-{
- lockdep_assert_held(&mlx5_intf_mutex);
- return mlx5_get_next_dev(dev, &next_phys_dev_lag);
-}
-
-void mlx5_dev_list_lock(void)
-{
- mutex_lock(&mlx5_intf_mutex);
-}
-void mlx5_dev_list_unlock(void)
-{
- mutex_unlock(&mlx5_intf_mutex);
-}
-
-int mlx5_dev_list_trylock(void)
-{
- return mutex_trylock(&mlx5_intf_mutex);
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index af8460bb25..3e064234f6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -138,7 +138,6 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change,
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
struct pci_dev *pdev = dev->pdev;
- bool sf_dev_allocated;
int ret = 0;
if (mlx5_dev_is_lightweight(dev)) {
@@ -148,16 +147,6 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change,
return 0;
}
- sf_dev_allocated = mlx5_sf_dev_allocated(dev);
- if (sf_dev_allocated) {
- /* Reload results in deleting SF device which further results in
- * unregistering devlink instance while holding devlink_mutext.
- * Hence, do not support reload.
- */
- NL_SET_ERR_MSG_MOD(extack, "reload is unsupported when SFs are allocated");
- return -EOPNOTSUPP;
- }
-
if (mlx5_lag_is_active(dev)) {
NL_SET_ERR_MSG_MOD(extack, "reload is unsupported in Lag mode");
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
index 85d3bfa078..080e7eab52 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
@@ -889,36 +889,16 @@ int mlx5_fw_tracer_trigger_core_dump_general(struct mlx5_core_dev *dev)
return 0;
}
-static int
+static void
mlx5_devlink_fmsg_fill_trace(struct devlink_fmsg *fmsg,
struct mlx5_fw_trace_data *trace_data)
{
- int err;
-
- err = devlink_fmsg_obj_nest_start(fmsg);
- if (err)
- return err;
-
- err = devlink_fmsg_u64_pair_put(fmsg, "timestamp", trace_data->timestamp);
- if (err)
- return err;
-
- err = devlink_fmsg_bool_pair_put(fmsg, "lost", trace_data->lost);
- if (err)
- return err;
-
- err = devlink_fmsg_u8_pair_put(fmsg, "event_id", trace_data->event_id);
- if (err)
- return err;
-
- err = devlink_fmsg_string_pair_put(fmsg, "msg", trace_data->msg);
- if (err)
- return err;
-
- err = devlink_fmsg_obj_nest_end(fmsg);
- if (err)
- return err;
- return 0;
+ devlink_fmsg_obj_nest_start(fmsg);
+ devlink_fmsg_u64_pair_put(fmsg, "timestamp", trace_data->timestamp);
+ devlink_fmsg_bool_pair_put(fmsg, "lost", trace_data->lost);
+ devlink_fmsg_u8_pair_put(fmsg, "event_id", trace_data->event_id);
+ devlink_fmsg_string_pair_put(fmsg, "msg", trace_data->msg);
+ devlink_fmsg_obj_nest_end(fmsg);
}
int mlx5_fw_tracer_get_saved_traces_objects(struct mlx5_fw_tracer *tracer,
@@ -927,7 +907,6 @@ int mlx5_fw_tracer_get_saved_traces_objects(struct mlx5_fw_tracer *tracer,
struct mlx5_fw_trace_data *straces = tracer->st_arr.straces;
u32 index, start_index, end_index;
u32 saved_traces_index;
- int err;
if (!straces[0].timestamp)
return -ENOMSG;
@@ -940,22 +919,18 @@ int mlx5_fw_tracer_get_saved_traces_objects(struct mlx5_fw_tracer *tracer,
start_index = 0;
end_index = (saved_traces_index - 1) & (SAVED_TRACES_NUM - 1);
- err = devlink_fmsg_arr_pair_nest_start(fmsg, "dump fw traces");
- if (err)
- goto unlock;
+ devlink_fmsg_arr_pair_nest_start(fmsg, "dump fw traces");
index = start_index;
while (index != end_index) {
- err = mlx5_devlink_fmsg_fill_trace(fmsg, &straces[index]);
- if (err)
- goto unlock;
+ mlx5_devlink_fmsg_fill_trace(fmsg, &straces[index]);
index = (index + 1) & (SAVED_TRACES_NUM - 1);
}
- err = devlink_fmsg_arr_pair_nest_end(fmsg);
-unlock:
+ devlink_fmsg_arr_pair_nest_end(fmsg);
mutex_unlock(&tracer->st_arr.lock);
- return err;
+
+ return 0;
}
static void mlx5_fw_tracer_update_db(struct work_struct *work)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c
index e869c65d8e..c7216e84ef 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c
@@ -13,106 +13,55 @@ struct mlx5_vnic_diag_stats {
__be64 query_vnic_env_out[MLX5_ST_SZ_QW(query_vnic_env_out)];
};
-int mlx5_reporter_vnic_diagnose_counters(struct mlx5_core_dev *dev,
- struct devlink_fmsg *fmsg,
- u16 vport_num, bool other_vport)
+void mlx5_reporter_vnic_diagnose_counters(struct mlx5_core_dev *dev,
+ struct devlink_fmsg *fmsg,
+ u16 vport_num, bool other_vport)
{
u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {};
struct mlx5_vnic_diag_stats vnic;
- int err;
MLX5_SET(query_vnic_env_in, in, opcode, MLX5_CMD_OP_QUERY_VNIC_ENV);
MLX5_SET(query_vnic_env_in, in, vport_number, vport_num);
MLX5_SET(query_vnic_env_in, in, other_vport, !!other_vport);
- err = mlx5_cmd_exec_inout(dev, query_vnic_env, in, &vnic.query_vnic_env_out);
- if (err)
- return err;
+ mlx5_cmd_exec_inout(dev, query_vnic_env, in, &vnic.query_vnic_env_out);
- err = devlink_fmsg_pair_nest_start(fmsg, "vNIC env counters");
- if (err)
- return err;
-
- err = devlink_fmsg_obj_nest_start(fmsg);
- if (err)
- return err;
+ devlink_fmsg_pair_nest_start(fmsg, "vNIC env counters");
+ devlink_fmsg_obj_nest_start(fmsg);
if (MLX5_CAP_GEN(dev, vnic_env_queue_counters)) {
- err = devlink_fmsg_u32_pair_put(fmsg, "total_error_queues",
- VNIC_ENV_GET(&vnic, total_error_queues));
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "send_queue_priority_update_flow",
- VNIC_ENV_GET(&vnic,
- send_queue_priority_update_flow));
- if (err)
- return err;
+ devlink_fmsg_u32_pair_put(fmsg, "total_error_queues",
+ VNIC_ENV_GET(&vnic, total_error_queues));
+ devlink_fmsg_u32_pair_put(fmsg, "send_queue_priority_update_flow",
+ VNIC_ENV_GET(&vnic, send_queue_priority_update_flow));
}
-
if (MLX5_CAP_GEN(dev, eq_overrun_count)) {
- err = devlink_fmsg_u32_pair_put(fmsg, "comp_eq_overrun",
- VNIC_ENV_GET(&vnic, comp_eq_overrun));
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "async_eq_overrun",
- VNIC_ENV_GET(&vnic, async_eq_overrun));
- if (err)
- return err;
- }
-
- if (MLX5_CAP_GEN(dev, vnic_env_cq_overrun)) {
- err = devlink_fmsg_u32_pair_put(fmsg, "cq_overrun",
- VNIC_ENV_GET(&vnic, cq_overrun));
- if (err)
- return err;
- }
-
- if (MLX5_CAP_GEN(dev, invalid_command_count)) {
- err = devlink_fmsg_u32_pair_put(fmsg, "invalid_command",
- VNIC_ENV_GET(&vnic, invalid_command));
- if (err)
- return err;
- }
-
- if (MLX5_CAP_GEN(dev, quota_exceeded_count)) {
- err = devlink_fmsg_u32_pair_put(fmsg, "quota_exceeded_command",
- VNIC_ENV_GET(&vnic, quota_exceeded_command));
- if (err)
- return err;
+ devlink_fmsg_u32_pair_put(fmsg, "comp_eq_overrun",
+ VNIC_ENV_GET(&vnic, comp_eq_overrun));
+ devlink_fmsg_u32_pair_put(fmsg, "async_eq_overrun",
+ VNIC_ENV_GET(&vnic, async_eq_overrun));
}
-
- if (MLX5_CAP_GEN(dev, nic_receive_steering_discard)) {
- err = devlink_fmsg_u64_pair_put(fmsg, "nic_receive_steering_discard",
- VNIC_ENV_GET64(&vnic,
- nic_receive_steering_discard));
- if (err)
- return err;
- }
-
+ if (MLX5_CAP_GEN(dev, vnic_env_cq_overrun))
+ devlink_fmsg_u32_pair_put(fmsg, "cq_overrun",
+ VNIC_ENV_GET(&vnic, cq_overrun));
+ if (MLX5_CAP_GEN(dev, invalid_command_count))
+ devlink_fmsg_u32_pair_put(fmsg, "invalid_command",
+ VNIC_ENV_GET(&vnic, invalid_command));
+ if (MLX5_CAP_GEN(dev, quota_exceeded_count))
+ devlink_fmsg_u32_pair_put(fmsg, "quota_exceeded_command",
+ VNIC_ENV_GET(&vnic, quota_exceeded_command));
+ if (MLX5_CAP_GEN(dev, nic_receive_steering_discard))
+ devlink_fmsg_u64_pair_put(fmsg, "nic_receive_steering_discard",
+ VNIC_ENV_GET64(&vnic, nic_receive_steering_discard));
if (MLX5_CAP_GEN(dev, vnic_env_cnt_steering_fail)) {
- err = devlink_fmsg_u64_pair_put(fmsg, "generated_pkt_steering_fail",
- VNIC_ENV_GET64(&vnic,
- generated_pkt_steering_fail));
- if (err)
- return err;
-
- err = devlink_fmsg_u64_pair_put(fmsg, "handled_pkt_steering_fail",
- VNIC_ENV_GET64(&vnic, handled_pkt_steering_fail));
- if (err)
- return err;
+ devlink_fmsg_u64_pair_put(fmsg, "generated_pkt_steering_fail",
+ VNIC_ENV_GET64(&vnic, generated_pkt_steering_fail));
+ devlink_fmsg_u64_pair_put(fmsg, "handled_pkt_steering_fail",
+ VNIC_ENV_GET64(&vnic, handled_pkt_steering_fail));
}
- err = devlink_fmsg_obj_nest_end(fmsg);
- if (err)
- return err;
-
- err = devlink_fmsg_pair_nest_end(fmsg);
- if (err)
- return err;
-
- return 0;
+ devlink_fmsg_obj_nest_end(fmsg);
+ devlink_fmsg_pair_nest_end(fmsg);
}
static int mlx5_reporter_vnic_diagnose(struct devlink_health_reporter *reporter,
@@ -121,7 +70,8 @@ static int mlx5_reporter_vnic_diagnose(struct devlink_health_reporter *reporter,
{
struct mlx5_core_dev *dev = devlink_health_reporter_priv(reporter);
- return mlx5_reporter_vnic_diagnose_counters(dev, fmsg, 0, false);
+ mlx5_reporter_vnic_diagnose_counters(dev, fmsg, 0, false);
+ return 0;
}
static const struct devlink_health_reporter_ops mlx5_reporter_vnic_ops = {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.h
index eba87a39e9..fbc31256f7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.h
@@ -9,8 +9,8 @@
void mlx5_reporter_vnic_create(struct mlx5_core_dev *dev);
void mlx5_reporter_vnic_destroy(struct mlx5_core_dev *dev);
-int mlx5_reporter_vnic_diagnose_counters(struct mlx5_core_dev *dev,
- struct devlink_fmsg *fmsg,
- u16 vport_num, bool other_vport);
+void mlx5_reporter_vnic_diagnose_counters(struct mlx5_core_dev *dev,
+ struct devlink_fmsg *fmsg,
+ u16 vport_num, bool other_vport);
#endif /* __MLX5_REPORTER_VNIC_H */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dpll.c b/drivers/net/ethernet/mellanox/mlx5/core/dpll.c
new file mode 100644
index 0000000000..8ce5c8bcda
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dpll.c
@@ -0,0 +1,432 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#include <linux/dpll.h>
+#include <linux/mlx5/driver.h>
+
+/* This structure represents a reference to DPLL, one is created
+ * per mdev instance.
+ */
+struct mlx5_dpll {
+ struct dpll_device *dpll;
+ struct dpll_pin *dpll_pin;
+ struct mlx5_core_dev *mdev;
+ struct workqueue_struct *wq;
+ struct delayed_work work;
+ struct {
+ bool valid;
+ enum dpll_lock_status lock_status;
+ enum dpll_pin_state pin_state;
+ } last;
+ struct notifier_block mdev_nb;
+ struct net_device *tracking_netdev;
+};
+
+static int mlx5_dpll_clock_id_get(struct mlx5_core_dev *mdev, u64 *clock_id)
+{
+ u32 out[MLX5_ST_SZ_DW(msecq_reg)] = {};
+ u32 in[MLX5_ST_SZ_DW(msecq_reg)] = {};
+ int err;
+
+ err = mlx5_core_access_reg(mdev, in, sizeof(in), out, sizeof(out),
+ MLX5_REG_MSECQ, 0, 0);
+ if (err)
+ return err;
+ *clock_id = MLX5_GET64(msecq_reg, out, local_clock_identity);
+ return 0;
+}
+
+static int
+mlx5_dpll_synce_status_get(struct mlx5_core_dev *mdev,
+ enum mlx5_msees_admin_status *admin_status,
+ enum mlx5_msees_oper_status *oper_status,
+ bool *ho_acq)
+{
+ u32 out[MLX5_ST_SZ_DW(msees_reg)] = {};
+ u32 in[MLX5_ST_SZ_DW(msees_reg)] = {};
+ int err;
+
+ err = mlx5_core_access_reg(mdev, in, sizeof(in), out, sizeof(out),
+ MLX5_REG_MSEES, 0, 0);
+ if (err)
+ return err;
+ if (admin_status)
+ *admin_status = MLX5_GET(msees_reg, out, admin_status);
+ *oper_status = MLX5_GET(msees_reg, out, oper_status);
+ if (ho_acq)
+ *ho_acq = MLX5_GET(msees_reg, out, ho_acq);
+ return 0;
+}
+
+static int
+mlx5_dpll_synce_status_set(struct mlx5_core_dev *mdev,
+ enum mlx5_msees_admin_status admin_status)
+{
+ u32 out[MLX5_ST_SZ_DW(msees_reg)] = {};
+ u32 in[MLX5_ST_SZ_DW(msees_reg)] = {};
+
+ MLX5_SET(msees_reg, in, field_select,
+ MLX5_MSEES_FIELD_SELECT_ENABLE |
+ MLX5_MSEES_FIELD_SELECT_ADMIN_STATUS);
+ MLX5_SET(msees_reg, in, admin_status, admin_status);
+ return mlx5_core_access_reg(mdev, in, sizeof(in), out, sizeof(out),
+ MLX5_REG_MSEES, 0, 1);
+}
+
+static enum dpll_lock_status
+mlx5_dpll_lock_status_get(enum mlx5_msees_oper_status oper_status, bool ho_acq)
+{
+ switch (oper_status) {
+ case MLX5_MSEES_OPER_STATUS_SELF_TRACK:
+ fallthrough;
+ case MLX5_MSEES_OPER_STATUS_OTHER_TRACK:
+ return ho_acq ? DPLL_LOCK_STATUS_LOCKED_HO_ACQ :
+ DPLL_LOCK_STATUS_LOCKED;
+ case MLX5_MSEES_OPER_STATUS_HOLDOVER:
+ fallthrough;
+ case MLX5_MSEES_OPER_STATUS_FAIL_HOLDOVER:
+ return DPLL_LOCK_STATUS_HOLDOVER;
+ default:
+ return DPLL_LOCK_STATUS_UNLOCKED;
+ }
+}
+
+static enum dpll_pin_state
+mlx5_dpll_pin_state_get(enum mlx5_msees_admin_status admin_status,
+ enum mlx5_msees_oper_status oper_status)
+{
+ return (admin_status == MLX5_MSEES_ADMIN_STATUS_TRACK &&
+ (oper_status == MLX5_MSEES_OPER_STATUS_SELF_TRACK ||
+ oper_status == MLX5_MSEES_OPER_STATUS_OTHER_TRACK)) ?
+ DPLL_PIN_STATE_CONNECTED : DPLL_PIN_STATE_DISCONNECTED;
+}
+
+static int mlx5_dpll_device_lock_status_get(const struct dpll_device *dpll,
+ void *priv,
+ enum dpll_lock_status *status,
+ struct netlink_ext_ack *extack)
+{
+ enum mlx5_msees_oper_status oper_status;
+ struct mlx5_dpll *mdpll = priv;
+ bool ho_acq;
+ int err;
+
+ err = mlx5_dpll_synce_status_get(mdpll->mdev, NULL,
+ &oper_status, &ho_acq);
+ if (err)
+ return err;
+
+ *status = mlx5_dpll_lock_status_get(oper_status, ho_acq);
+ return 0;
+}
+
+static int mlx5_dpll_device_mode_get(const struct dpll_device *dpll,
+ void *priv, enum dpll_mode *mode,
+ struct netlink_ext_ack *extack)
+{
+ *mode = DPLL_MODE_MANUAL;
+ return 0;
+}
+
+static bool mlx5_dpll_device_mode_supported(const struct dpll_device *dpll,
+ void *priv,
+ enum dpll_mode mode,
+ struct netlink_ext_ack *extack)
+{
+ return mode == DPLL_MODE_MANUAL;
+}
+
+static const struct dpll_device_ops mlx5_dpll_device_ops = {
+ .lock_status_get = mlx5_dpll_device_lock_status_get,
+ .mode_get = mlx5_dpll_device_mode_get,
+ .mode_supported = mlx5_dpll_device_mode_supported,
+};
+
+static int mlx5_dpll_pin_direction_get(const struct dpll_pin *pin,
+ void *pin_priv,
+ const struct dpll_device *dpll,
+ void *dpll_priv,
+ enum dpll_pin_direction *direction,
+ struct netlink_ext_ack *extack)
+{
+ *direction = DPLL_PIN_DIRECTION_INPUT;
+ return 0;
+}
+
+static int mlx5_dpll_state_on_dpll_get(const struct dpll_pin *pin,
+ void *pin_priv,
+ const struct dpll_device *dpll,
+ void *dpll_priv,
+ enum dpll_pin_state *state,
+ struct netlink_ext_ack *extack)
+{
+ enum mlx5_msees_admin_status admin_status;
+ enum mlx5_msees_oper_status oper_status;
+ struct mlx5_dpll *mdpll = pin_priv;
+ int err;
+
+ err = mlx5_dpll_synce_status_get(mdpll->mdev, &admin_status,
+ &oper_status, NULL);
+ if (err)
+ return err;
+ *state = mlx5_dpll_pin_state_get(admin_status, oper_status);
+ return 0;
+}
+
+static int mlx5_dpll_state_on_dpll_set(const struct dpll_pin *pin,
+ void *pin_priv,
+ const struct dpll_device *dpll,
+ void *dpll_priv,
+ enum dpll_pin_state state,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_dpll *mdpll = pin_priv;
+
+ return mlx5_dpll_synce_status_set(mdpll->mdev,
+ state == DPLL_PIN_STATE_CONNECTED ?
+ MLX5_MSEES_ADMIN_STATUS_TRACK :
+ MLX5_MSEES_ADMIN_STATUS_FREE_RUNNING);
+}
+
+static const struct dpll_pin_ops mlx5_dpll_pins_ops = {
+ .direction_get = mlx5_dpll_pin_direction_get,
+ .state_on_dpll_get = mlx5_dpll_state_on_dpll_get,
+ .state_on_dpll_set = mlx5_dpll_state_on_dpll_set,
+};
+
+static const struct dpll_pin_properties mlx5_dpll_pin_properties = {
+ .type = DPLL_PIN_TYPE_SYNCE_ETH_PORT,
+ .capabilities = DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE,
+};
+
+#define MLX5_DPLL_PERIODIC_WORK_INTERVAL 500 /* ms */
+
+static void mlx5_dpll_periodic_work_queue(struct mlx5_dpll *mdpll)
+{
+ queue_delayed_work(mdpll->wq, &mdpll->work,
+ msecs_to_jiffies(MLX5_DPLL_PERIODIC_WORK_INTERVAL));
+}
+
+static void mlx5_dpll_periodic_work(struct work_struct *work)
+{
+ struct mlx5_dpll *mdpll = container_of(work, struct mlx5_dpll,
+ work.work);
+ enum mlx5_msees_admin_status admin_status;
+ enum mlx5_msees_oper_status oper_status;
+ enum dpll_lock_status lock_status;
+ enum dpll_pin_state pin_state;
+ bool ho_acq;
+ int err;
+
+ err = mlx5_dpll_synce_status_get(mdpll->mdev, &admin_status,
+ &oper_status, &ho_acq);
+ if (err)
+ goto err_out;
+ lock_status = mlx5_dpll_lock_status_get(oper_status, ho_acq);
+ pin_state = mlx5_dpll_pin_state_get(admin_status, oper_status);
+
+ if (!mdpll->last.valid)
+ goto invalid_out;
+
+ if (mdpll->last.lock_status != lock_status)
+ dpll_device_change_ntf(mdpll->dpll);
+ if (mdpll->last.pin_state != pin_state)
+ dpll_pin_change_ntf(mdpll->dpll_pin);
+
+invalid_out:
+ mdpll->last.lock_status = lock_status;
+ mdpll->last.pin_state = pin_state;
+ mdpll->last.valid = true;
+err_out:
+ mlx5_dpll_periodic_work_queue(mdpll);
+}
+
+static void mlx5_dpll_netdev_dpll_pin_set(struct mlx5_dpll *mdpll,
+ struct net_device *netdev)
+{
+ if (mdpll->tracking_netdev)
+ return;
+ netdev_dpll_pin_set(netdev, mdpll->dpll_pin);
+ mdpll->tracking_netdev = netdev;
+}
+
+static void mlx5_dpll_netdev_dpll_pin_clear(struct mlx5_dpll *mdpll)
+{
+ if (!mdpll->tracking_netdev)
+ return;
+ netdev_dpll_pin_clear(mdpll->tracking_netdev);
+ mdpll->tracking_netdev = NULL;
+}
+
+static int mlx5_dpll_mdev_notifier_event(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct mlx5_dpll *mdpll = container_of(nb, struct mlx5_dpll, mdev_nb);
+ struct net_device *netdev = data;
+
+ switch (event) {
+ case MLX5_DRIVER_EVENT_UPLINK_NETDEV:
+ if (netdev)
+ mlx5_dpll_netdev_dpll_pin_set(mdpll, netdev);
+ else
+ mlx5_dpll_netdev_dpll_pin_clear(mdpll);
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return NOTIFY_OK;
+}
+
+static void mlx5_dpll_mdev_netdev_track(struct mlx5_dpll *mdpll,
+ struct mlx5_core_dev *mdev)
+{
+ mdpll->mdev_nb.notifier_call = mlx5_dpll_mdev_notifier_event;
+ mlx5_blocking_notifier_register(mdev, &mdpll->mdev_nb);
+ mlx5_core_uplink_netdev_event_replay(mdev);
+}
+
+static void mlx5_dpll_mdev_netdev_untrack(struct mlx5_dpll *mdpll,
+ struct mlx5_core_dev *mdev)
+{
+ mlx5_blocking_notifier_unregister(mdev, &mdpll->mdev_nb);
+ mlx5_dpll_netdev_dpll_pin_clear(mdpll);
+}
+
+static int mlx5_dpll_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
+{
+ struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev);
+ struct mlx5_core_dev *mdev = edev->mdev;
+ struct mlx5_dpll *mdpll;
+ u64 clock_id;
+ int err;
+
+ err = mlx5_dpll_synce_status_set(mdev,
+ MLX5_MSEES_ADMIN_STATUS_FREE_RUNNING);
+ if (err)
+ return err;
+
+ err = mlx5_dpll_clock_id_get(mdev, &clock_id);
+ if (err)
+ return err;
+
+ mdpll = kzalloc(sizeof(*mdpll), GFP_KERNEL);
+ if (!mdpll)
+ return -ENOMEM;
+ mdpll->mdev = mdev;
+ auxiliary_set_drvdata(adev, mdpll);
+
+ /* Multiple mdev instances might share one DPLL device. */
+ mdpll->dpll = dpll_device_get(clock_id, 0, THIS_MODULE);
+ if (IS_ERR(mdpll->dpll)) {
+ err = PTR_ERR(mdpll->dpll);
+ goto err_free_mdpll;
+ }
+
+ err = dpll_device_register(mdpll->dpll, DPLL_TYPE_EEC,
+ &mlx5_dpll_device_ops, mdpll);
+ if (err)
+ goto err_put_dpll_device;
+
+ /* Multiple mdev instances might share one DPLL pin. */
+ mdpll->dpll_pin = dpll_pin_get(clock_id, mlx5_get_dev_index(mdev),
+ THIS_MODULE, &mlx5_dpll_pin_properties);
+ if (IS_ERR(mdpll->dpll_pin)) {
+ err = PTR_ERR(mdpll->dpll_pin);
+ goto err_unregister_dpll_device;
+ }
+
+ err = dpll_pin_register(mdpll->dpll, mdpll->dpll_pin,
+ &mlx5_dpll_pins_ops, mdpll);
+ if (err)
+ goto err_put_dpll_pin;
+
+ mdpll->wq = create_singlethread_workqueue("mlx5_dpll");
+ if (!mdpll->wq) {
+ err = -ENOMEM;
+ goto err_unregister_dpll_pin;
+ }
+
+ mlx5_dpll_mdev_netdev_track(mdpll, mdev);
+
+ INIT_DELAYED_WORK(&mdpll->work, &mlx5_dpll_periodic_work);
+ mlx5_dpll_periodic_work_queue(mdpll);
+
+ return 0;
+
+err_unregister_dpll_pin:
+ dpll_pin_unregister(mdpll->dpll, mdpll->dpll_pin,
+ &mlx5_dpll_pins_ops, mdpll);
+err_put_dpll_pin:
+ dpll_pin_put(mdpll->dpll_pin);
+err_unregister_dpll_device:
+ dpll_device_unregister(mdpll->dpll, &mlx5_dpll_device_ops, mdpll);
+err_put_dpll_device:
+ dpll_device_put(mdpll->dpll);
+err_free_mdpll:
+ kfree(mdpll);
+ return err;
+}
+
+static void mlx5_dpll_remove(struct auxiliary_device *adev)
+{
+ struct mlx5_dpll *mdpll = auxiliary_get_drvdata(adev);
+ struct mlx5_core_dev *mdev = mdpll->mdev;
+
+ cancel_delayed_work_sync(&mdpll->work);
+ mlx5_dpll_mdev_netdev_untrack(mdpll, mdev);
+ destroy_workqueue(mdpll->wq);
+ dpll_pin_unregister(mdpll->dpll, mdpll->dpll_pin,
+ &mlx5_dpll_pins_ops, mdpll);
+ dpll_pin_put(mdpll->dpll_pin);
+ dpll_device_unregister(mdpll->dpll, &mlx5_dpll_device_ops, mdpll);
+ dpll_device_put(mdpll->dpll);
+ kfree(mdpll);
+
+ mlx5_dpll_synce_status_set(mdev,
+ MLX5_MSEES_ADMIN_STATUS_FREE_RUNNING);
+}
+
+static int mlx5_dpll_suspend(struct auxiliary_device *adev, pm_message_t state)
+{
+ return 0;
+}
+
+static int mlx5_dpll_resume(struct auxiliary_device *adev)
+{
+ return 0;
+}
+
+static const struct auxiliary_device_id mlx5_dpll_id_table[] = {
+ { .name = MLX5_ADEV_NAME ".dpll", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(auxiliary, mlx5_dpll_id_table);
+
+static struct auxiliary_driver mlx5_dpll_driver = {
+ .name = "dpll",
+ .probe = mlx5_dpll_probe,
+ .remove = mlx5_dpll_remove,
+ .suspend = mlx5_dpll_suspend,
+ .resume = mlx5_dpll_resume,
+ .id_table = mlx5_dpll_id_table,
+};
+
+static int __init mlx5_dpll_init(void)
+{
+ return auxiliary_driver_register(&mlx5_dpll_driver);
+}
+
+static void __exit mlx5_dpll_exit(void)
+{
+ auxiliary_driver_unregister(&mlx5_dpll_driver);
+}
+
+module_init(mlx5_dpll_init);
+module_exit(mlx5_dpll_exit);
+
+MODULE_AUTHOR("Jiri Pirko <jiri@nvidia.com>");
+MODULE_DESCRIPTION("Mellanox 5th generation network adapters (ConnectX series) DPLL driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 20a6bc1a23..729a11b5fb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -141,7 +141,7 @@ struct page_pool;
#define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW 0x2
#define MLX5E_MIN_NUM_CHANNELS 0x1
-#define MLX5E_MAX_NUM_CHANNELS (MLX5E_INDIR_RQT_SIZE / 2)
+#define MLX5E_MAX_NUM_CHANNELS 256
#define MLX5E_TX_CQ_POLL_BUDGET 128
#define MLX5E_TX_XSK_POLL_BUDGET 64
#define MLX5E_SQ_RECOVER_MIN_INTERVAL 500 /* msecs */
@@ -168,6 +168,13 @@ struct page_pool;
#define mlx5e_state_dereference(priv, p) \
rcu_dereference_protected((p), lockdep_is_held(&(priv)->state_lock))
+enum mlx5e_devcom_events {
+ MPV_DEVCOM_MASTER_UP,
+ MPV_DEVCOM_MASTER_DOWN,
+ MPV_DEVCOM_IPSEC_MASTER_UP,
+ MPV_DEVCOM_IPSEC_MASTER_DOWN,
+};
+
static inline u8 mlx5e_get_num_lag_ports(struct mlx5_core_dev *mdev)
{
if (mlx5_lag_is_lacp_owner(mdev))
@@ -193,7 +200,8 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
{
return is_kdump_kernel() ?
MLX5E_MIN_NUM_CHANNELS :
- min_t(int, mlx5_comp_vectors_max(mdev), MLX5E_MAX_NUM_CHANNELS);
+ min3(mlx5_comp_vectors_max(mdev), (u32)MLX5E_MAX_NUM_CHANNELS,
+ (u32)(1 << MLX5_CAP_GEN(mdev, log_max_rqt_size)));
}
/* The maximum WQE size can be retrieved by max_wqe_sz_sq in
@@ -937,6 +945,7 @@ struct mlx5e_priv {
struct mlx5e_htb *htb;
struct mlx5e_mqprio_rl *mqprio_rl;
struct dentry *dfs_root;
+ struct mlx5_devcom_comp_dev *devcom;
};
struct mlx5e_dev {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
index c6b6e290fd..0b1ac6e5c8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
@@ -12,11 +12,19 @@ struct mlx5e_dev *mlx5e_create_devlink(struct device *dev,
{
struct mlx5e_dev *mlx5e_dev;
struct devlink *devlink;
+ int err;
devlink = devlink_alloc_ns(&mlx5e_devlink_ops, sizeof(*mlx5e_dev),
devlink_net(priv_to_devlink(mdev)), dev);
if (!devlink)
return ERR_PTR(-ENOMEM);
+
+ err = devl_nested_devlink_set(priv_to_devlink(mdev), devlink);
+ if (err) {
+ devlink_free(devlink);
+ return ERR_PTR(err);
+ }
+
devlink_register(devlink);
return devlink_priv(devlink);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
index e5a44b0b96..4d6225e0ee 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
@@ -150,7 +150,6 @@ struct mlx5e_flow_steering *mlx5e_fs_init(const struct mlx5e_profile *profile,
struct dentry *dfs_root);
void mlx5e_fs_cleanup(struct mlx5e_flow_steering *fs);
struct mlx5e_vlan_table *mlx5e_fs_get_vlan(struct mlx5e_flow_steering *fs);
-void mlx5e_fs_set_tc(struct mlx5e_flow_steering *fs, struct mlx5e_tc_table *tc);
struct mlx5e_tc_table *mlx5e_fs_get_tc(struct mlx5e_flow_steering *fs);
struct mlx5e_l2_table *mlx5e_fs_get_l2(struct mlx5e_flow_steering *fs);
struct mlx5_flow_namespace *mlx5e_fs_get_ns(struct mlx5e_flow_steering *fs, bool egress);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
index 6f4e6c34b2..81523825fa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
@@ -5,134 +5,59 @@
#include "lib/eq.h"
#include "lib/mlx5.h"
-int mlx5e_health_fmsg_named_obj_nest_start(struct devlink_fmsg *fmsg, char *name)
+void mlx5e_health_fmsg_named_obj_nest_start(struct devlink_fmsg *fmsg, char *name)
{
- int err;
-
- err = devlink_fmsg_pair_nest_start(fmsg, name);
- if (err)
- return err;
-
- err = devlink_fmsg_obj_nest_start(fmsg);
- if (err)
- return err;
-
- return 0;
+ devlink_fmsg_pair_nest_start(fmsg, name);
+ devlink_fmsg_obj_nest_start(fmsg);
}
-int mlx5e_health_fmsg_named_obj_nest_end(struct devlink_fmsg *fmsg)
+void mlx5e_health_fmsg_named_obj_nest_end(struct devlink_fmsg *fmsg)
{
- int err;
-
- err = devlink_fmsg_obj_nest_end(fmsg);
- if (err)
- return err;
-
- err = devlink_fmsg_pair_nest_end(fmsg);
- if (err)
- return err;
-
- return 0;
+ devlink_fmsg_obj_nest_end(fmsg);
+ devlink_fmsg_pair_nest_end(fmsg);
}
-int mlx5e_health_cq_diag_fmsg(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg)
+void mlx5e_health_cq_diag_fmsg(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg)
{
u32 out[MLX5_ST_SZ_DW(query_cq_out)] = {};
u8 hw_status;
void *cqc;
- int err;
-
- err = mlx5_core_query_cq(cq->mdev, &cq->mcq, out);
- if (err)
- return err;
+ mlx5_core_query_cq(cq->mdev, &cq->mcq, out);
cqc = MLX5_ADDR_OF(query_cq_out, out, cq_context);
hw_status = MLX5_GET(cqc, cqc, status);
- err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "CQ");
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "cqn", cq->mcq.cqn);
- if (err)
- return err;
-
- err = devlink_fmsg_u8_pair_put(fmsg, "HW status", hw_status);
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "ci", mlx5_cqwq_get_ci(&cq->wq));
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "size", mlx5_cqwq_get_size(&cq->wq));
- if (err)
- return err;
-
- err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
- if (err)
- return err;
-
- return 0;
+ mlx5e_health_fmsg_named_obj_nest_start(fmsg, "CQ");
+ devlink_fmsg_u32_pair_put(fmsg, "cqn", cq->mcq.cqn);
+ devlink_fmsg_u8_pair_put(fmsg, "HW status", hw_status);
+ devlink_fmsg_u32_pair_put(fmsg, "ci", mlx5_cqwq_get_ci(&cq->wq));
+ devlink_fmsg_u32_pair_put(fmsg, "size", mlx5_cqwq_get_size(&cq->wq));
+ mlx5e_health_fmsg_named_obj_nest_end(fmsg);
}
-int mlx5e_health_cq_common_diag_fmsg(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg)
+void mlx5e_health_cq_common_diag_fmsg(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg)
{
u8 cq_log_stride;
u32 cq_sz;
- int err;
cq_sz = mlx5_cqwq_get_size(&cq->wq);
cq_log_stride = mlx5_cqwq_get_log_stride_size(&cq->wq);
- err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "CQ");
- if (err)
- return err;
-
- err = devlink_fmsg_u64_pair_put(fmsg, "stride size", BIT(cq_log_stride));
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "size", cq_sz);
- if (err)
- return err;
-
- err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
- if (err)
- return err;
-
- return 0;
+ mlx5e_health_fmsg_named_obj_nest_start(fmsg, "CQ");
+ devlink_fmsg_u64_pair_put(fmsg, "stride size", BIT(cq_log_stride));
+ devlink_fmsg_u32_pair_put(fmsg, "size", cq_sz);
+ mlx5e_health_fmsg_named_obj_nest_end(fmsg);
}
-int mlx5e_health_eq_diag_fmsg(struct mlx5_eq_comp *eq, struct devlink_fmsg *fmsg)
+void mlx5e_health_eq_diag_fmsg(struct mlx5_eq_comp *eq, struct devlink_fmsg *fmsg)
{
- int err;
-
- err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "EQ");
- if (err)
- return err;
-
- err = devlink_fmsg_u8_pair_put(fmsg, "eqn", eq->core.eqn);
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "irqn", eq->core.irqn);
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "vecidx", eq->core.vecidx);
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "ci", eq->core.cons_index);
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "size", eq_get_size(&eq->core));
- if (err)
- return err;
-
- return mlx5e_health_fmsg_named_obj_nest_end(fmsg);
+ mlx5e_health_fmsg_named_obj_nest_start(fmsg, "EQ");
+ devlink_fmsg_u8_pair_put(fmsg, "eqn", eq->core.eqn);
+ devlink_fmsg_u32_pair_put(fmsg, "irqn", eq->core.irqn);
+ devlink_fmsg_u32_pair_put(fmsg, "vecidx", eq->core.vecidx);
+ devlink_fmsg_u32_pair_put(fmsg, "ci", eq->core.cons_index);
+ devlink_fmsg_u32_pair_put(fmsg, "size", eq_get_size(&eq->core));
+ mlx5e_health_fmsg_named_obj_nest_end(fmsg);
}
void mlx5e_health_create_reporters(struct mlx5e_priv *priv)
@@ -235,23 +160,19 @@ int mlx5e_health_report(struct mlx5e_priv *priv,
}
#define MLX5_HEALTH_DEVLINK_MAX_SIZE 1024
-static int mlx5e_health_rsc_fmsg_binary(struct devlink_fmsg *fmsg,
- const void *value, u32 value_len)
+static void mlx5e_health_rsc_fmsg_binary(struct devlink_fmsg *fmsg,
+ const void *value, u32 value_len)
{
u32 data_size;
- int err = 0;
u32 offset;
for (offset = 0; offset < value_len; offset += data_size) {
data_size = value_len - offset;
if (data_size > MLX5_HEALTH_DEVLINK_MAX_SIZE)
data_size = MLX5_HEALTH_DEVLINK_MAX_SIZE;
- err = devlink_fmsg_binary_put(fmsg, value + offset, data_size);
- if (err)
- break;
+ devlink_fmsg_binary_put(fmsg, value + offset, data_size);
}
- return err;
}
int mlx5e_health_rsc_fmsg_dump(struct mlx5e_priv *priv, struct mlx5_rsc_key *key,
@@ -259,9 +180,8 @@ int mlx5e_health_rsc_fmsg_dump(struct mlx5e_priv *priv, struct mlx5_rsc_key *key
{
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5_rsc_dump_cmd *cmd;
+ int cmd_err, err = 0;
struct page *page;
- int cmd_err, err;
- int end_err;
int size;
if (IS_ERR_OR_NULL(mdev->rsc_dump))
@@ -271,9 +191,7 @@ int mlx5e_health_rsc_fmsg_dump(struct mlx5e_priv *priv, struct mlx5_rsc_key *key
if (!page)
return -ENOMEM;
- err = devlink_fmsg_binary_pair_nest_start(fmsg, "data");
- if (err)
- goto free_page;
+ devlink_fmsg_binary_pair_nest_start(fmsg, "data");
cmd = mlx5_rsc_dump_cmd_create(mdev, key);
if (IS_ERR(cmd)) {
@@ -288,52 +206,31 @@ int mlx5e_health_rsc_fmsg_dump(struct mlx5e_priv *priv, struct mlx5_rsc_key *key
goto destroy_cmd;
}
- err = mlx5e_health_rsc_fmsg_binary(fmsg, page_address(page), size);
- if (err)
- goto destroy_cmd;
-
+ mlx5e_health_rsc_fmsg_binary(fmsg, page_address(page), size);
} while (cmd_err > 0);
destroy_cmd:
mlx5_rsc_dump_cmd_destroy(cmd);
- end_err = devlink_fmsg_binary_pair_nest_end(fmsg);
- if (end_err)
- err = end_err;
+ devlink_fmsg_binary_pair_nest_end(fmsg);
free_page:
__free_page(page);
return err;
}
-int mlx5e_health_queue_dump(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg,
- int queue_idx, char *lbl)
+void mlx5e_health_queue_dump(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg,
+ int queue_idx, char *lbl)
{
struct mlx5_rsc_key key = {};
- int err;
key.rsc = MLX5_SGMT_TYPE_FULL_QPC;
key.index1 = queue_idx;
key.size = PAGE_SIZE;
key.num_of_obj1 = 1;
- err = devlink_fmsg_obj_nest_start(fmsg);
- if (err)
- return err;
-
- err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, lbl);
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "index", queue_idx);
- if (err)
- return err;
-
- err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
- if (err)
- return err;
-
- err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
- if (err)
- return err;
-
- return devlink_fmsg_obj_nest_end(fmsg);
+ devlink_fmsg_obj_nest_start(fmsg);
+ mlx5e_health_fmsg_named_obj_nest_start(fmsg, lbl);
+ devlink_fmsg_u32_pair_put(fmsg, "index", queue_idx);
+ mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
+ mlx5e_health_fmsg_named_obj_nest_end(fmsg);
+ devlink_fmsg_obj_nest_end(fmsg);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h
index 415840c3ef..84be3dd6f7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h
@@ -20,11 +20,11 @@ void mlx5e_reporter_tx_err_cqe(struct mlx5e_txqsq *sq);
int mlx5e_reporter_tx_timeout(struct mlx5e_txqsq *sq);
void mlx5e_reporter_tx_ptpsq_unhealthy(struct mlx5e_ptpsq *ptpsq);
-int mlx5e_health_cq_diag_fmsg(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg);
-int mlx5e_health_cq_common_diag_fmsg(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg);
-int mlx5e_health_eq_diag_fmsg(struct mlx5_eq_comp *eq, struct devlink_fmsg *fmsg);
-int mlx5e_health_fmsg_named_obj_nest_start(struct devlink_fmsg *fmsg, char *name);
-int mlx5e_health_fmsg_named_obj_nest_end(struct devlink_fmsg *fmsg);
+void mlx5e_health_cq_diag_fmsg(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg);
+void mlx5e_health_cq_common_diag_fmsg(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg);
+void mlx5e_health_eq_diag_fmsg(struct mlx5_eq_comp *eq, struct devlink_fmsg *fmsg);
+void mlx5e_health_fmsg_named_obj_nest_start(struct devlink_fmsg *fmsg, char *name);
+void mlx5e_health_fmsg_named_obj_nest_end(struct devlink_fmsg *fmsg);
void mlx5e_reporter_rx_create(struct mlx5e_priv *priv);
void mlx5e_reporter_rx_destroy(struct mlx5e_priv *priv);
@@ -54,6 +54,6 @@ void mlx5e_health_destroy_reporters(struct mlx5e_priv *priv);
void mlx5e_health_channels_update(struct mlx5e_priv *priv);
int mlx5e_health_rsc_fmsg_dump(struct mlx5e_priv *priv, struct mlx5_rsc_key *key,
struct devlink_fmsg *fmsg);
-int mlx5e_health_queue_dump(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg,
- int queue_idx, char *lbl);
+void mlx5e_health_queue_dump(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg,
+ int queue_idx, char *lbl);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
index 03b119a434..4358798d6c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
@@ -199,78 +199,38 @@ static int mlx5e_rx_reporter_recover(struct devlink_health_reporter *reporter,
mlx5e_health_recover_channels(priv);
}
-static int mlx5e_reporter_icosq_diagnose(struct mlx5e_icosq *icosq, u8 hw_state,
- struct devlink_fmsg *fmsg)
+static void mlx5e_reporter_icosq_diagnose(struct mlx5e_icosq *icosq, u8 hw_state,
+ struct devlink_fmsg *fmsg)
{
- int err;
-
- err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "ICOSQ");
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "sqn", icosq->sqn);
- if (err)
- return err;
-
- err = devlink_fmsg_u8_pair_put(fmsg, "HW state", hw_state);
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "cc", icosq->cc);
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "pc", icosq->pc);
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "WQE size",
- mlx5_wq_cyc_get_size(&icosq->wq));
- if (err)
- return err;
-
- err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "CQ");
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "cqn", icosq->cq.mcq.cqn);
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "cc", icosq->cq.wq.cc);
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "size", mlx5_cqwq_get_size(&icosq->cq.wq));
- if (err)
- return err;
-
- err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
- if (err)
- return err;
-
- return mlx5e_health_fmsg_named_obj_nest_end(fmsg);
+ mlx5e_health_fmsg_named_obj_nest_start(fmsg, "ICOSQ");
+ devlink_fmsg_u32_pair_put(fmsg, "sqn", icosq->sqn);
+ devlink_fmsg_u8_pair_put(fmsg, "HW state", hw_state);
+ devlink_fmsg_u32_pair_put(fmsg, "cc", icosq->cc);
+ devlink_fmsg_u32_pair_put(fmsg, "pc", icosq->pc);
+ devlink_fmsg_u32_pair_put(fmsg, "WQE size", mlx5_wq_cyc_get_size(&icosq->wq));
+
+ mlx5e_health_fmsg_named_obj_nest_start(fmsg, "CQ");
+ devlink_fmsg_u32_pair_put(fmsg, "cqn", icosq->cq.mcq.cqn);
+ devlink_fmsg_u32_pair_put(fmsg, "cc", icosq->cq.wq.cc);
+ devlink_fmsg_u32_pair_put(fmsg, "size", mlx5_cqwq_get_size(&icosq->cq.wq));
+ mlx5e_health_fmsg_named_obj_nest_end(fmsg);
+
+ mlx5e_health_fmsg_named_obj_nest_end(fmsg);
}
-static int mlx5e_health_rq_put_sw_state(struct devlink_fmsg *fmsg, struct mlx5e_rq *rq)
+static void mlx5e_health_rq_put_sw_state(struct devlink_fmsg *fmsg, struct mlx5e_rq *rq)
{
- int err;
int i;
BUILD_BUG_ON_MSG(ARRAY_SIZE(rq_sw_state_type_name) != MLX5E_NUM_RQ_STATES,
"rq_sw_state_type_name string array must be consistent with MLX5E_RQ_STATE_* enum in en.h");
- err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SW State");
- if (err)
- return err;
+ mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SW State");
- for (i = 0; i < ARRAY_SIZE(rq_sw_state_type_name); ++i) {
- err = devlink_fmsg_u32_pair_put(fmsg, rq_sw_state_type_name[i],
- test_bit(i, &rq->state));
- if (err)
- return err;
- }
+ for (i = 0; i < ARRAY_SIZE(rq_sw_state_type_name); ++i)
+ devlink_fmsg_u32_pair_put(fmsg, rq_sw_state_type_name[i],
+ test_bit(i, &rq->state));
- return mlx5e_health_fmsg_named_obj_nest_end(fmsg);
+ mlx5e_health_fmsg_named_obj_nest_end(fmsg);
}
static int
@@ -291,184 +251,93 @@ mlx5e_rx_reporter_build_diagnose_output_rq_common(struct mlx5e_rq *rq,
wq_head = mlx5e_rqwq_get_head(rq);
wqe_counter = mlx5e_rqwq_get_wqe_counter(rq);
- err = devlink_fmsg_u32_pair_put(fmsg, "rqn", rq->rqn);
- if (err)
- return err;
-
- err = devlink_fmsg_u8_pair_put(fmsg, "HW state", hw_state);
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "WQE counter", wqe_counter);
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "posted WQEs", wqes_sz);
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "cc", wq_head);
- if (err)
- return err;
-
- err = mlx5e_health_rq_put_sw_state(fmsg, rq);
- if (err)
- return err;
-
- err = mlx5e_health_cq_diag_fmsg(&rq->cq, fmsg);
- if (err)
- return err;
-
- err = mlx5e_health_eq_diag_fmsg(rq->cq.mcq.eq, fmsg);
- if (err)
- return err;
+ devlink_fmsg_u32_pair_put(fmsg, "rqn", rq->rqn);
+ devlink_fmsg_u8_pair_put(fmsg, "HW state", hw_state);
+ devlink_fmsg_u32_pair_put(fmsg, "WQE counter", wqe_counter);
+ devlink_fmsg_u32_pair_put(fmsg, "posted WQEs", wqes_sz);
+ devlink_fmsg_u32_pair_put(fmsg, "cc", wq_head);
+ mlx5e_health_rq_put_sw_state(fmsg, rq);
+ mlx5e_health_cq_diag_fmsg(&rq->cq, fmsg);
+ mlx5e_health_eq_diag_fmsg(rq->cq.mcq.eq, fmsg);
if (rq->icosq) {
struct mlx5e_icosq *icosq = rq->icosq;
u8 icosq_hw_state;
+ int err;
err = mlx5_core_query_sq_state(rq->mdev, icosq->sqn, &icosq_hw_state);
if (err)
return err;
- err = mlx5e_reporter_icosq_diagnose(icosq, icosq_hw_state, fmsg);
- if (err)
- return err;
+ mlx5e_reporter_icosq_diagnose(icosq, icosq_hw_state, fmsg);
}
return 0;
}
-static int mlx5e_rx_reporter_build_diagnose_output(struct mlx5e_rq *rq,
- struct devlink_fmsg *fmsg)
+static void mlx5e_rx_reporter_build_diagnose_output(struct mlx5e_rq *rq,
+ struct devlink_fmsg *fmsg)
{
- int err;
-
- err = devlink_fmsg_obj_nest_start(fmsg);
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "channel ix", rq->ix);
- if (err)
- return err;
-
- err = mlx5e_rx_reporter_build_diagnose_output_rq_common(rq, fmsg);
- if (err)
- return err;
-
- return devlink_fmsg_obj_nest_end(fmsg);
+ devlink_fmsg_obj_nest_start(fmsg);
+ devlink_fmsg_u32_pair_put(fmsg, "channel ix", rq->ix);
+ mlx5e_rx_reporter_build_diagnose_output_rq_common(rq, fmsg);
+ devlink_fmsg_obj_nest_end(fmsg);
}
-static int mlx5e_rx_reporter_diagnose_generic_rq(struct mlx5e_rq *rq,
- struct devlink_fmsg *fmsg)
+static void mlx5e_rx_reporter_diagnose_generic_rq(struct mlx5e_rq *rq,
+ struct devlink_fmsg *fmsg)
{
struct mlx5e_priv *priv = rq->priv;
struct mlx5e_params *params;
u32 rq_stride, rq_sz;
bool real_time;
- int err;
params = &priv->channels.params;
rq_sz = mlx5e_rqwq_get_size(rq);
real_time = mlx5_is_real_time_rq(priv->mdev);
rq_stride = BIT(mlx5e_mpwqe_get_log_stride_size(priv->mdev, params, NULL));
- err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "RQ");
- if (err)
- return err;
-
- err = devlink_fmsg_u8_pair_put(fmsg, "type", params->rq_wq_type);
- if (err)
- return err;
-
- err = devlink_fmsg_u64_pair_put(fmsg, "stride size", rq_stride);
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "size", rq_sz);
- if (err)
- return err;
-
- err = devlink_fmsg_string_pair_put(fmsg, "ts_format", real_time ? "RT" : "FRC");
- if (err)
- return err;
-
- err = mlx5e_health_cq_common_diag_fmsg(&rq->cq, fmsg);
- if (err)
- return err;
-
- return mlx5e_health_fmsg_named_obj_nest_end(fmsg);
+ mlx5e_health_fmsg_named_obj_nest_start(fmsg, "RQ");
+ devlink_fmsg_u8_pair_put(fmsg, "type", params->rq_wq_type);
+ devlink_fmsg_u64_pair_put(fmsg, "stride size", rq_stride);
+ devlink_fmsg_u32_pair_put(fmsg, "size", rq_sz);
+ devlink_fmsg_string_pair_put(fmsg, "ts_format", real_time ? "RT" : "FRC");
+ mlx5e_health_cq_common_diag_fmsg(&rq->cq, fmsg);
+ mlx5e_health_fmsg_named_obj_nest_end(fmsg);
}
-static int
+static void
mlx5e_rx_reporter_diagnose_common_ptp_config(struct mlx5e_priv *priv, struct mlx5e_ptp *ptp_ch,
struct devlink_fmsg *fmsg)
{
- int err;
-
- err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "PTP");
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "filter_type", priv->tstamp.rx_filter);
- if (err)
- return err;
-
- err = mlx5e_rx_reporter_diagnose_generic_rq(&ptp_ch->rq, fmsg);
- if (err)
- return err;
-
- return mlx5e_health_fmsg_named_obj_nest_end(fmsg);
+ mlx5e_health_fmsg_named_obj_nest_start(fmsg, "PTP");
+ devlink_fmsg_u32_pair_put(fmsg, "filter_type", priv->tstamp.rx_filter);
+ mlx5e_rx_reporter_diagnose_generic_rq(&ptp_ch->rq, fmsg);
+ mlx5e_health_fmsg_named_obj_nest_end(fmsg);
}
-static int
+static void
mlx5e_rx_reporter_diagnose_common_config(struct devlink_health_reporter *reporter,
struct devlink_fmsg *fmsg)
{
struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
struct mlx5e_rq *generic_rq = &priv->channels.c[0]->rq;
struct mlx5e_ptp *ptp_ch = priv->channels.ptp;
- int err;
-
- err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "Common config");
- if (err)
- return err;
-
- err = mlx5e_rx_reporter_diagnose_generic_rq(generic_rq, fmsg);
- if (err)
- return err;
- if (ptp_ch && test_bit(MLX5E_PTP_STATE_RX, ptp_ch->state)) {
- err = mlx5e_rx_reporter_diagnose_common_ptp_config(priv, ptp_ch, fmsg);
- if (err)
- return err;
- }
-
- return mlx5e_health_fmsg_named_obj_nest_end(fmsg);
+ mlx5e_health_fmsg_named_obj_nest_start(fmsg, "Common config");
+ mlx5e_rx_reporter_diagnose_generic_rq(generic_rq, fmsg);
+ if (ptp_ch && test_bit(MLX5E_PTP_STATE_RX, ptp_ch->state))
+ mlx5e_rx_reporter_diagnose_common_ptp_config(priv, ptp_ch, fmsg);
+ mlx5e_health_fmsg_named_obj_nest_end(fmsg);
}
-static int mlx5e_rx_reporter_build_diagnose_output_ptp_rq(struct mlx5e_rq *rq,
- struct devlink_fmsg *fmsg)
+static void mlx5e_rx_reporter_build_diagnose_output_ptp_rq(struct mlx5e_rq *rq,
+ struct devlink_fmsg *fmsg)
{
- int err;
-
- err = devlink_fmsg_obj_nest_start(fmsg);
- if (err)
- return err;
-
- err = devlink_fmsg_string_pair_put(fmsg, "channel", "ptp");
- if (err)
- return err;
-
- err = mlx5e_rx_reporter_build_diagnose_output_rq_common(rq, fmsg);
- if (err)
- return err;
-
- err = devlink_fmsg_obj_nest_end(fmsg);
- if (err)
- return err;
-
- return 0;
+ devlink_fmsg_obj_nest_start(fmsg);
+ devlink_fmsg_string_pair_put(fmsg, "channel", "ptp");
+ mlx5e_rx_reporter_build_diagnose_output_rq_common(rq, fmsg);
+ devlink_fmsg_obj_nest_end(fmsg);
}
static int mlx5e_rx_reporter_diagnose(struct devlink_health_reporter *reporter,
@@ -477,20 +346,15 @@ static int mlx5e_rx_reporter_diagnose(struct devlink_health_reporter *reporter,
{
struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
struct mlx5e_ptp *ptp_ch = priv->channels.ptp;
- int i, err = 0;
+ int i;
mutex_lock(&priv->state_lock);
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
goto unlock;
- err = mlx5e_rx_reporter_diagnose_common_config(reporter, fmsg);
- if (err)
- goto unlock;
-
- err = devlink_fmsg_arr_pair_nest_start(fmsg, "RQs");
- if (err)
- goto unlock;
+ mlx5e_rx_reporter_diagnose_common_config(reporter, fmsg);
+ devlink_fmsg_arr_pair_nest_start(fmsg, "RQs");
for (i = 0; i < priv->channels.num; i++) {
struct mlx5e_channel *c = priv->channels.c[i];
@@ -499,19 +363,14 @@ static int mlx5e_rx_reporter_diagnose(struct devlink_health_reporter *reporter,
rq = test_bit(MLX5E_CHANNEL_STATE_XSK, c->state) ?
&c->xskrq : &c->rq;
- err = mlx5e_rx_reporter_build_diagnose_output(rq, fmsg);
- if (err)
- goto unlock;
- }
- if (ptp_ch && test_bit(MLX5E_PTP_STATE_RX, ptp_ch->state)) {
- err = mlx5e_rx_reporter_build_diagnose_output_ptp_rq(&ptp_ch->rq, fmsg);
- if (err)
- goto unlock;
+ mlx5e_rx_reporter_build_diagnose_output(rq, fmsg);
}
- err = devlink_fmsg_arr_pair_nest_end(fmsg);
+ if (ptp_ch && test_bit(MLX5E_PTP_STATE_RX, ptp_ch->state))
+ mlx5e_rx_reporter_build_diagnose_output_ptp_rq(&ptp_ch->rq, fmsg);
+ devlink_fmsg_arr_pair_nest_end(fmsg);
unlock:
mutex_unlock(&priv->state_lock);
- return err;
+ return 0;
}
static int mlx5e_rx_reporter_dump_icosq(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg,
@@ -519,61 +378,34 @@ static int mlx5e_rx_reporter_dump_icosq(struct mlx5e_priv *priv, struct devlink_
{
struct mlx5e_txqsq *icosq = ctx;
struct mlx5_rsc_key key = {};
- int err;
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
return 0;
- err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SX Slice");
- if (err)
- return err;
-
+ mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SX Slice");
key.size = PAGE_SIZE;
key.rsc = MLX5_SGMT_TYPE_SX_SLICE_ALL;
- err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
- if (err)
- return err;
-
- err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
- if (err)
- return err;
-
- err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "ICOSQ");
- if (err)
- return err;
+ mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
+ mlx5e_health_fmsg_named_obj_nest_end(fmsg);
- err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "QPC");
- if (err)
- return err;
+ mlx5e_health_fmsg_named_obj_nest_start(fmsg, "ICOSQ");
+ mlx5e_health_fmsg_named_obj_nest_start(fmsg, "QPC");
key.rsc = MLX5_SGMT_TYPE_FULL_QPC;
key.index1 = icosq->sqn;
key.num_of_obj1 = 1;
+ mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
+ mlx5e_health_fmsg_named_obj_nest_end(fmsg);
- err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
- if (err)
- return err;
-
- err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
- if (err)
- return err;
-
- err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "send_buff");
- if (err)
- return err;
-
+ mlx5e_health_fmsg_named_obj_nest_start(fmsg, "send_buff");
key.rsc = MLX5_SGMT_TYPE_SND_BUFF;
key.num_of_obj2 = MLX5_RSC_DUMP_ALL;
+ mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
+ mlx5e_health_fmsg_named_obj_nest_end(fmsg);
- err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
- if (err)
- return err;
-
- err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
- if (err)
- return err;
+ mlx5e_health_fmsg_named_obj_nest_end(fmsg);
- return mlx5e_health_fmsg_named_obj_nest_end(fmsg);
+ return 0;
}
static int mlx5e_rx_reporter_dump_rq(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg,
@@ -581,60 +413,34 @@ static int mlx5e_rx_reporter_dump_rq(struct mlx5e_priv *priv, struct devlink_fms
{
struct mlx5_rsc_key key = {};
struct mlx5e_rq *rq = ctx;
- int err;
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
return 0;
- err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "RX Slice");
- if (err)
- return err;
-
+ mlx5e_health_fmsg_named_obj_nest_start(fmsg, "RX Slice");
key.size = PAGE_SIZE;
key.rsc = MLX5_SGMT_TYPE_RX_SLICE_ALL;
- err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
- if (err)
- return err;
-
- err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
- if (err)
- return err;
-
- err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "RQ");
- if (err)
- return err;
+ mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
+ mlx5e_health_fmsg_named_obj_nest_end(fmsg);
- err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "QPC");
- if (err)
- return err;
+ mlx5e_health_fmsg_named_obj_nest_start(fmsg, "RQ");
+ mlx5e_health_fmsg_named_obj_nest_start(fmsg, "QPC");
key.rsc = MLX5_SGMT_TYPE_FULL_QPC;
key.index1 = rq->rqn;
key.num_of_obj1 = 1;
+ mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
+ mlx5e_health_fmsg_named_obj_nest_end(fmsg);
- err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
- if (err)
- return err;
-
- err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
- if (err)
- return err;
-
- err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "receive_buff");
- if (err)
- return err;
-
+ mlx5e_health_fmsg_named_obj_nest_start(fmsg, "receive_buff");
key.rsc = MLX5_SGMT_TYPE_RCV_BUFF;
key.num_of_obj2 = MLX5_RSC_DUMP_ALL;
- err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
- if (err)
- return err;
+ mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
+ mlx5e_health_fmsg_named_obj_nest_end(fmsg);
- err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
- if (err)
- return err;
+ mlx5e_health_fmsg_named_obj_nest_end(fmsg);
- return mlx5e_health_fmsg_named_obj_nest_end(fmsg);
+ return 0;
}
static int mlx5e_rx_reporter_dump_all_rqs(struct mlx5e_priv *priv,
@@ -642,44 +448,28 @@ static int mlx5e_rx_reporter_dump_all_rqs(struct mlx5e_priv *priv,
{
struct mlx5e_ptp *ptp_ch = priv->channels.ptp;
struct mlx5_rsc_key key = {};
- int i, err;
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
return 0;
- err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "RX Slice");
- if (err)
- return err;
-
+ mlx5e_health_fmsg_named_obj_nest_start(fmsg, "RX Slice");
key.size = PAGE_SIZE;
key.rsc = MLX5_SGMT_TYPE_RX_SLICE_ALL;
- err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
- if (err)
- return err;
-
- err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
- if (err)
- return err;
-
- err = devlink_fmsg_arr_pair_nest_start(fmsg, "RQs");
- if (err)
- return err;
+ mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
+ mlx5e_health_fmsg_named_obj_nest_end(fmsg);
+ devlink_fmsg_arr_pair_nest_start(fmsg, "RQs");
- for (i = 0; i < priv->channels.num; i++) {
+ for (int i = 0; i < priv->channels.num; i++) {
struct mlx5e_rq *rq = &priv->channels.c[i]->rq;
- err = mlx5e_health_queue_dump(priv, fmsg, rq->rqn, "RQ");
- if (err)
- return err;
+ mlx5e_health_queue_dump(priv, fmsg, rq->rqn, "RQ");
}
- if (ptp_ch && test_bit(MLX5E_PTP_STATE_RX, ptp_ch->state)) {
- err = mlx5e_health_queue_dump(priv, fmsg, ptp_ch->rq.rqn, "PTP RQ");
- if (err)
- return err;
- }
+ if (ptp_ch && test_bit(MLX5E_PTP_STATE_RX, ptp_ch->state))
+ mlx5e_health_queue_dump(priv, fmsg, ptp_ch->rq.rqn, "PTP RQ");
- return devlink_fmsg_arr_pair_nest_end(fmsg);
+ devlink_fmsg_arr_pair_nest_end(fmsg);
+ return 0;
}
static int mlx5e_rx_reporter_dump_from_ctx(struct mlx5e_priv *priv,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
index ff8242f67c..6b44ddce14 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
@@ -50,25 +50,19 @@ static void mlx5e_reset_txqsq_cc_pc(struct mlx5e_txqsq *sq)
sq->pc = 0;
}
-static int mlx5e_health_sq_put_sw_state(struct devlink_fmsg *fmsg, struct mlx5e_txqsq *sq)
+static void mlx5e_health_sq_put_sw_state(struct devlink_fmsg *fmsg, struct mlx5e_txqsq *sq)
{
- int err;
int i;
BUILD_BUG_ON_MSG(ARRAY_SIZE(sq_sw_state_type_name) != MLX5E_NUM_SQ_STATES,
"sq_sw_state_type_name string array must be consistent with MLX5E_SQ_STATE_* enum in en.h");
- err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SW State");
- if (err)
- return err;
+ mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SW State");
- for (i = 0; i < ARRAY_SIZE(sq_sw_state_type_name); ++i) {
- err = devlink_fmsg_u32_pair_put(fmsg, sq_sw_state_type_name[i],
- test_bit(i, &sq->state));
- if (err)
- return err;
- }
+ for (i = 0; i < ARRAY_SIZE(sq_sw_state_type_name); ++i)
+ devlink_fmsg_u32_pair_put(fmsg, sq_sw_state_type_name[i],
+ test_bit(i, &sq->state));
- return mlx5e_health_fmsg_named_obj_nest_end(fmsg);
+ mlx5e_health_fmsg_named_obj_nest_end(fmsg);
}
static int mlx5e_tx_reporter_err_cqe_recover(void *ctx)
@@ -220,7 +214,7 @@ static int mlx5e_tx_reporter_recover(struct devlink_health_reporter *reporter,
mlx5e_health_recover_channels(priv);
}
-static int
+static void
mlx5e_tx_reporter_build_diagnose_output_sq_common(struct devlink_fmsg *fmsg,
struct mlx5e_txqsq *sq, int tc)
{
@@ -229,164 +223,71 @@ mlx5e_tx_reporter_build_diagnose_output_sq_common(struct devlink_fmsg *fmsg,
u8 state;
int err;
- err = mlx5_core_query_sq_state(priv->mdev, sq->sqn, &state);
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "tc", tc);
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "txq ix", sq->txq_ix);
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "sqn", sq->sqn);
- if (err)
- return err;
-
- err = devlink_fmsg_u8_pair_put(fmsg, "HW state", state);
- if (err)
- return err;
-
- err = devlink_fmsg_bool_pair_put(fmsg, "stopped", stopped);
- if (err)
- return err;
+ devlink_fmsg_u32_pair_put(fmsg, "tc", tc);
+ devlink_fmsg_u32_pair_put(fmsg, "txq ix", sq->txq_ix);
+ devlink_fmsg_u32_pair_put(fmsg, "sqn", sq->sqn);
- err = devlink_fmsg_u32_pair_put(fmsg, "cc", sq->cc);
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "pc", sq->pc);
- if (err)
- return err;
-
- err = mlx5e_health_sq_put_sw_state(fmsg, sq);
- if (err)
- return err;
-
- err = mlx5e_health_cq_diag_fmsg(&sq->cq, fmsg);
- if (err)
- return err;
-
- return mlx5e_health_eq_diag_fmsg(sq->cq.mcq.eq, fmsg);
+ err = mlx5_core_query_sq_state(priv->mdev, sq->sqn, &state);
+ if (!err)
+ devlink_fmsg_u8_pair_put(fmsg, "HW state", state);
+
+ devlink_fmsg_bool_pair_put(fmsg, "stopped", stopped);
+ devlink_fmsg_u32_pair_put(fmsg, "cc", sq->cc);
+ devlink_fmsg_u32_pair_put(fmsg, "pc", sq->pc);
+ mlx5e_health_sq_put_sw_state(fmsg, sq);
+ mlx5e_health_cq_diag_fmsg(&sq->cq, fmsg);
+ mlx5e_health_eq_diag_fmsg(sq->cq.mcq.eq, fmsg);
}
-static int
+static void
mlx5e_tx_reporter_build_diagnose_output(struct devlink_fmsg *fmsg,
struct mlx5e_txqsq *sq, int tc)
{
- int err;
-
- err = devlink_fmsg_obj_nest_start(fmsg);
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "channel ix", sq->ch_ix);
- if (err)
- return err;
-
- err = mlx5e_tx_reporter_build_diagnose_output_sq_common(fmsg, sq, tc);
- if (err)
- return err;
-
- err = devlink_fmsg_obj_nest_end(fmsg);
- if (err)
- return err;
-
- return 0;
+ devlink_fmsg_obj_nest_start(fmsg);
+ devlink_fmsg_u32_pair_put(fmsg, "channel ix", sq->ch_ix);
+ mlx5e_tx_reporter_build_diagnose_output_sq_common(fmsg, sq, tc);
+ devlink_fmsg_obj_nest_end(fmsg);
}
-static int
+static void
mlx5e_tx_reporter_build_diagnose_output_ptpsq(struct devlink_fmsg *fmsg,
struct mlx5e_ptpsq *ptpsq, int tc)
{
- int err;
-
- err = devlink_fmsg_obj_nest_start(fmsg);
- if (err)
- return err;
-
- err = devlink_fmsg_string_pair_put(fmsg, "channel", "ptp");
- if (err)
- return err;
-
- err = mlx5e_tx_reporter_build_diagnose_output_sq_common(fmsg, &ptpsq->txqsq, tc);
- if (err)
- return err;
-
- err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "Port TS");
- if (err)
- return err;
-
- err = mlx5e_health_cq_diag_fmsg(&ptpsq->ts_cq, fmsg);
- if (err)
- return err;
-
- err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
- if (err)
- return err;
-
- err = devlink_fmsg_obj_nest_end(fmsg);
- if (err)
- return err;
-
- return 0;
+ devlink_fmsg_obj_nest_start(fmsg);
+ devlink_fmsg_string_pair_put(fmsg, "channel", "ptp");
+ mlx5e_tx_reporter_build_diagnose_output_sq_common(fmsg, &ptpsq->txqsq, tc);
+ mlx5e_health_fmsg_named_obj_nest_start(fmsg, "Port TS");
+ mlx5e_health_cq_diag_fmsg(&ptpsq->ts_cq, fmsg);
+ mlx5e_health_fmsg_named_obj_nest_end(fmsg);
+ devlink_fmsg_obj_nest_end(fmsg);
}
-static int
+static void
mlx5e_tx_reporter_diagnose_generic_txqsq(struct devlink_fmsg *fmsg,
struct mlx5e_txqsq *txqsq)
{
- u32 sq_stride, sq_sz;
- bool real_time;
- int err;
-
- err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SQ");
- if (err)
- return err;
-
- real_time = mlx5_is_real_time_sq(txqsq->mdev);
- sq_sz = mlx5_wq_cyc_get_size(&txqsq->wq);
- sq_stride = MLX5_SEND_WQE_BB;
-
- err = devlink_fmsg_u64_pair_put(fmsg, "stride size", sq_stride);
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "size", sq_sz);
- if (err)
- return err;
-
- err = devlink_fmsg_string_pair_put(fmsg, "ts_format", real_time ? "RT" : "FRC");
- if (err)
- return err;
-
- err = mlx5e_health_cq_common_diag_fmsg(&txqsq->cq, fmsg);
- if (err)
- return err;
-
- return mlx5e_health_fmsg_named_obj_nest_end(fmsg);
+ bool real_time = mlx5_is_real_time_sq(txqsq->mdev);
+ u32 sq_sz = mlx5_wq_cyc_get_size(&txqsq->wq);
+ u32 sq_stride = MLX5_SEND_WQE_BB;
+
+ mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SQ");
+ devlink_fmsg_u64_pair_put(fmsg, "stride size", sq_stride);
+ devlink_fmsg_u32_pair_put(fmsg, "size", sq_sz);
+ devlink_fmsg_string_pair_put(fmsg, "ts_format", real_time ? "RT" : "FRC");
+ mlx5e_health_cq_common_diag_fmsg(&txqsq->cq, fmsg);
+ mlx5e_health_fmsg_named_obj_nest_end(fmsg);
}
-static int
+static void
mlx5e_tx_reporter_diagnose_generic_tx_port_ts(struct devlink_fmsg *fmsg,
struct mlx5e_ptpsq *ptpsq)
{
- int err;
-
- err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "Port TS");
- if (err)
- return err;
-
- err = mlx5e_health_cq_common_diag_fmsg(&ptpsq->ts_cq, fmsg);
- if (err)
- return err;
-
- return mlx5e_health_fmsg_named_obj_nest_end(fmsg);
+ mlx5e_health_fmsg_named_obj_nest_start(fmsg, "Port TS");
+ mlx5e_health_cq_common_diag_fmsg(&ptpsq->ts_cq, fmsg);
+ mlx5e_health_fmsg_named_obj_nest_end(fmsg);
}
-static int
+static void
mlx5e_tx_reporter_diagnose_common_config(struct devlink_health_reporter *reporter,
struct devlink_fmsg *fmsg)
{
@@ -394,39 +295,20 @@ mlx5e_tx_reporter_diagnose_common_config(struct devlink_health_reporter *reporte
struct mlx5e_txqsq *generic_sq = priv->txq2sq[0];
struct mlx5e_ptp *ptp_ch = priv->channels.ptp;
struct mlx5e_ptpsq *generic_ptpsq;
- int err;
- err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "Common Config");
- if (err)
- return err;
-
- err = mlx5e_tx_reporter_diagnose_generic_txqsq(fmsg, generic_sq);
- if (err)
- return err;
+ mlx5e_health_fmsg_named_obj_nest_start(fmsg, "Common Config");
+ mlx5e_tx_reporter_diagnose_generic_txqsq(fmsg, generic_sq);
if (!ptp_ch || !test_bit(MLX5E_PTP_STATE_TX, ptp_ch->state))
goto out;
generic_ptpsq = &ptp_ch->ptpsq[0];
-
- err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "PTP");
- if (err)
- return err;
-
- err = mlx5e_tx_reporter_diagnose_generic_txqsq(fmsg, &generic_ptpsq->txqsq);
- if (err)
- return err;
-
- err = mlx5e_tx_reporter_diagnose_generic_tx_port_ts(fmsg, generic_ptpsq);
- if (err)
- return err;
-
- err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
- if (err)
- return err;
-
+ mlx5e_health_fmsg_named_obj_nest_start(fmsg, "PTP");
+ mlx5e_tx_reporter_diagnose_generic_txqsq(fmsg, &generic_ptpsq->txqsq);
+ mlx5e_tx_reporter_diagnose_generic_tx_port_ts(fmsg, generic_ptpsq);
+ mlx5e_health_fmsg_named_obj_nest_end(fmsg);
out:
- return mlx5e_health_fmsg_named_obj_nest_end(fmsg);
+ mlx5e_health_fmsg_named_obj_nest_end(fmsg);
}
static int mlx5e_tx_reporter_diagnose(struct devlink_health_reporter *reporter,
@@ -436,20 +318,15 @@ static int mlx5e_tx_reporter_diagnose(struct devlink_health_reporter *reporter,
struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
struct mlx5e_ptp *ptp_ch = priv->channels.ptp;
- int i, tc, err = 0;
+ int i, tc;
mutex_lock(&priv->state_lock);
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
goto unlock;
- err = mlx5e_tx_reporter_diagnose_common_config(reporter, fmsg);
- if (err)
- goto unlock;
-
- err = devlink_fmsg_arr_pair_nest_start(fmsg, "SQs");
- if (err)
- goto unlock;
+ mlx5e_tx_reporter_diagnose_common_config(reporter, fmsg);
+ devlink_fmsg_arr_pair_nest_start(fmsg, "SQs");
for (i = 0; i < priv->channels.num; i++) {
struct mlx5e_channel *c = priv->channels.c[i];
@@ -457,31 +334,23 @@ static int mlx5e_tx_reporter_diagnose(struct devlink_health_reporter *reporter,
for (tc = 0; tc < mlx5e_get_dcb_num_tc(&priv->channels.params); tc++) {
struct mlx5e_txqsq *sq = &c->sq[tc];
- err = mlx5e_tx_reporter_build_diagnose_output(fmsg, sq, tc);
- if (err)
- goto unlock;
+ mlx5e_tx_reporter_build_diagnose_output(fmsg, sq, tc);
}
}
if (!ptp_ch || !test_bit(MLX5E_PTP_STATE_TX, ptp_ch->state))
goto close_sqs_nest;
- for (tc = 0; tc < mlx5e_get_dcb_num_tc(&priv->channels.params); tc++) {
- err = mlx5e_tx_reporter_build_diagnose_output_ptpsq(fmsg,
- &ptp_ch->ptpsq[tc],
- tc);
- if (err)
- goto unlock;
- }
+ for (tc = 0; tc < mlx5e_get_dcb_num_tc(&priv->channels.params); tc++)
+ mlx5e_tx_reporter_build_diagnose_output_ptpsq(fmsg,
+ &ptp_ch->ptpsq[tc],
+ tc);
close_sqs_nest:
- err = devlink_fmsg_arr_pair_nest_end(fmsg);
- if (err)
- goto unlock;
-
+ devlink_fmsg_arr_pair_nest_end(fmsg);
unlock:
mutex_unlock(&priv->state_lock);
- return err;
+ return 0;
}
static int mlx5e_tx_reporter_dump_sq(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg,
@@ -489,60 +358,33 @@ static int mlx5e_tx_reporter_dump_sq(struct mlx5e_priv *priv, struct devlink_fms
{
struct mlx5_rsc_key key = {};
struct mlx5e_txqsq *sq = ctx;
- int err;
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
return 0;
- err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SX Slice");
- if (err)
- return err;
-
+ mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SX Slice");
key.size = PAGE_SIZE;
key.rsc = MLX5_SGMT_TYPE_SX_SLICE_ALL;
- err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
- if (err)
- return err;
-
- err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
- if (err)
- return err;
-
- err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SQ");
- if (err)
- return err;
-
- err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "QPC");
- if (err)
- return err;
+ mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
+ mlx5e_health_fmsg_named_obj_nest_end(fmsg);
+ mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SQ");
+ mlx5e_health_fmsg_named_obj_nest_start(fmsg, "QPC");
key.rsc = MLX5_SGMT_TYPE_FULL_QPC;
key.index1 = sq->sqn;
key.num_of_obj1 = 1;
+ mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
+ mlx5e_health_fmsg_named_obj_nest_end(fmsg);
- err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
- if (err)
- return err;
-
- err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
- if (err)
- return err;
-
- err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "send_buff");
- if (err)
- return err;
-
+ mlx5e_health_fmsg_named_obj_nest_start(fmsg, "send_buff");
key.rsc = MLX5_SGMT_TYPE_SND_BUFF;
key.num_of_obj2 = MLX5_RSC_DUMP_ALL;
- err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
- if (err)
- return err;
+ mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
+ mlx5e_health_fmsg_named_obj_nest_end(fmsg);
- err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
- if (err)
- return err;
+ mlx5e_health_fmsg_named_obj_nest_end(fmsg);
- return mlx5e_health_fmsg_named_obj_nest_end(fmsg);
+ return 0;
}
static int mlx5e_tx_reporter_timeout_dump(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg,
@@ -567,28 +409,17 @@ static int mlx5e_tx_reporter_dump_all_sqs(struct mlx5e_priv *priv,
{
struct mlx5e_ptp *ptp_ch = priv->channels.ptp;
struct mlx5_rsc_key key = {};
- int i, tc, err;
+ int i, tc;
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
return 0;
- err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SX Slice");
- if (err)
- return err;
-
+ mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SX Slice");
key.size = PAGE_SIZE;
key.rsc = MLX5_SGMT_TYPE_SX_SLICE_ALL;
- err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
- if (err)
- return err;
-
- err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
- if (err)
- return err;
-
- err = devlink_fmsg_arr_pair_nest_start(fmsg, "SQs");
- if (err)
- return err;
+ mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
+ mlx5e_health_fmsg_named_obj_nest_end(fmsg);
+ devlink_fmsg_arr_pair_nest_start(fmsg, "SQs");
for (i = 0; i < priv->channels.num; i++) {
struct mlx5e_channel *c = priv->channels.c[i];
@@ -596,9 +427,7 @@ static int mlx5e_tx_reporter_dump_all_sqs(struct mlx5e_priv *priv,
for (tc = 0; tc < mlx5e_get_dcb_num_tc(&priv->channels.params); tc++) {
struct mlx5e_txqsq *sq = &c->sq[tc];
- err = mlx5e_health_queue_dump(priv, fmsg, sq->sqn, "SQ");
- if (err)
- return err;
+ mlx5e_health_queue_dump(priv, fmsg, sq->sqn, "SQ");
}
}
@@ -606,13 +435,12 @@ static int mlx5e_tx_reporter_dump_all_sqs(struct mlx5e_priv *priv,
for (tc = 0; tc < mlx5e_get_dcb_num_tc(&priv->channels.params); tc++) {
struct mlx5e_txqsq *sq = &ptp_ch->ptpsq[tc].txqsq;
- err = mlx5e_health_queue_dump(priv, fmsg, sq->sqn, "PTP SQ");
- if (err)
- return err;
+ mlx5e_health_queue_dump(priv, fmsg, sq->sqn, "PTP SQ");
}
}
- return devlink_fmsg_arr_pair_nest_end(fmsg);
+ devlink_fmsg_arr_pair_nest_end(fmsg);
+ return 0;
}
static int mlx5e_tx_reporter_dump_from_ctx(struct mlx5e_priv *priv,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c
index b915fb29dd..7b8ff7a710 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c
@@ -9,7 +9,7 @@ void mlx5e_rss_params_indir_init_uniform(struct mlx5e_rss_params_indir *indir,
{
unsigned int i;
- for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++)
+ for (i = 0; i < indir->actual_table_size; i++)
indir->table[i] = i % num_channels;
}
@@ -45,9 +45,9 @@ static int mlx5e_rqt_init(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev,
}
int mlx5e_rqt_init_direct(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev,
- bool indir_enabled, u32 init_rqn)
+ bool indir_enabled, u32 init_rqn, u32 indir_table_size)
{
- u16 max_size = indir_enabled ? MLX5E_INDIR_RQT_SIZE : 1;
+ u16 max_size = indir_enabled ? indir_table_size : 1;
return mlx5e_rqt_init(rqt, mdev, max_size, &init_rqn, 1);
}
@@ -68,11 +68,11 @@ static int mlx5e_calc_indir_rqns(u32 *rss_rqns, u32 *rqns, unsigned int num_rqns
{
unsigned int i;
- for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++) {
+ for (i = 0; i < indir->actual_table_size; i++) {
unsigned int ix = i;
if (hfunc == ETH_RSS_HASH_XOR)
- ix = mlx5e_bits_invert(ix, ilog2(MLX5E_INDIR_RQT_SIZE));
+ ix = mlx5e_bits_invert(ix, ilog2(indir->actual_table_size));
ix = indir->table[ix];
@@ -94,7 +94,7 @@ int mlx5e_rqt_init_indir(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev,
u32 *rss_rqns;
int err;
- rss_rqns = kvmalloc_array(MLX5E_INDIR_RQT_SIZE, sizeof(*rss_rqns), GFP_KERNEL);
+ rss_rqns = kvmalloc_array(indir->actual_table_size, sizeof(*rss_rqns), GFP_KERNEL);
if (!rss_rqns)
return -ENOMEM;
@@ -102,13 +102,25 @@ int mlx5e_rqt_init_indir(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev,
if (err)
goto out;
- err = mlx5e_rqt_init(rqt, mdev, MLX5E_INDIR_RQT_SIZE, rss_rqns, MLX5E_INDIR_RQT_SIZE);
+ err = mlx5e_rqt_init(rqt, mdev, indir->max_table_size, rss_rqns,
+ indir->actual_table_size);
out:
kvfree(rss_rqns);
return err;
}
+#define MLX5E_UNIFORM_SPREAD_RQT_FACTOR 2
+
+u32 mlx5e_rqt_size(struct mlx5_core_dev *mdev, unsigned int num_channels)
+{
+ u32 rqt_size = max_t(u32, MLX5E_INDIR_MIN_RQT_SIZE,
+ roundup_pow_of_two(num_channels * MLX5E_UNIFORM_SPREAD_RQT_FACTOR));
+ u32 max_cap_rqt_size = 1 << MLX5_CAP_GEN(mdev, log_max_rqt_size);
+
+ return min_t(u32, rqt_size, max_cap_rqt_size);
+}
+
void mlx5e_rqt_destroy(struct mlx5e_rqt *rqt)
{
mlx5_core_destroy_rqt(rqt->mdev, rqt->rqtn);
@@ -151,10 +163,10 @@ int mlx5e_rqt_redirect_indir(struct mlx5e_rqt *rqt, u32 *rqns, unsigned int num_
u32 *rss_rqns;
int err;
- if (WARN_ON(rqt->size != MLX5E_INDIR_RQT_SIZE))
+ if (WARN_ON(rqt->size != indir->max_table_size))
return -EINVAL;
- rss_rqns = kvmalloc_array(MLX5E_INDIR_RQT_SIZE, sizeof(*rss_rqns), GFP_KERNEL);
+ rss_rqns = kvmalloc_array(indir->actual_table_size, sizeof(*rss_rqns), GFP_KERNEL);
if (!rss_rqns)
return -ENOMEM;
@@ -162,7 +174,7 @@ int mlx5e_rqt_redirect_indir(struct mlx5e_rqt *rqt, u32 *rqns, unsigned int num_
if (err)
goto out;
- err = mlx5e_rqt_redirect(rqt, rss_rqns, MLX5E_INDIR_RQT_SIZE);
+ err = mlx5e_rqt_redirect(rqt, rss_rqns, indir->actual_table_size);
out:
kvfree(rss_rqns);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h
index 60c985a12f..77fba3ebd1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h
@@ -6,12 +6,14 @@
#include <linux/kernel.h>
-#define MLX5E_INDIR_RQT_SIZE (1 << 8)
+#define MLX5E_INDIR_MIN_RQT_SIZE (BIT(8))
struct mlx5_core_dev;
struct mlx5e_rss_params_indir {
- u32 table[MLX5E_INDIR_RQT_SIZE];
+ u32 *table;
+ u32 actual_table_size;
+ u32 max_table_size;
};
void mlx5e_rss_params_indir_init_uniform(struct mlx5e_rss_params_indir *indir,
@@ -24,7 +26,7 @@ struct mlx5e_rqt {
};
int mlx5e_rqt_init_direct(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev,
- bool indir_enabled, u32 init_rqn);
+ bool indir_enabled, u32 init_rqn, u32 indir_table_size);
int mlx5e_rqt_init_indir(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev,
u32 *rqns, unsigned int num_rqns,
u8 hfunc, struct mlx5e_rss_params_indir *indir);
@@ -35,6 +37,7 @@ static inline u32 mlx5e_rqt_get_rqtn(struct mlx5e_rqt *rqt)
return rqt->rqtn;
}
+u32 mlx5e_rqt_size(struct mlx5_core_dev *mdev, unsigned int num_channels);
int mlx5e_rqt_redirect_direct(struct mlx5e_rqt *rqt, u32 rqn);
int mlx5e_rqt_redirect_indir(struct mlx5e_rqt *rqt, u32 *rqns, unsigned int num_rqns,
u8 hfunc, struct mlx5e_rss_params_indir *indir);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c
index 7f93426b88..c1545a2e8d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c
@@ -81,14 +81,75 @@ struct mlx5e_rss {
refcount_t refcnt;
};
-struct mlx5e_rss *mlx5e_rss_alloc(void)
+void mlx5e_rss_params_indir_modify_actual_size(struct mlx5e_rss *rss, u32 num_channels)
{
- return kvzalloc(sizeof(struct mlx5e_rss), GFP_KERNEL);
+ rss->indir.actual_table_size = mlx5e_rqt_size(rss->mdev, num_channels);
}
-void mlx5e_rss_free(struct mlx5e_rss *rss)
+int mlx5e_rss_params_indir_init(struct mlx5e_rss_params_indir *indir, struct mlx5_core_dev *mdev,
+ u32 actual_table_size, u32 max_table_size)
{
+ indir->table = kvmalloc_array(max_table_size, sizeof(*indir->table), GFP_KERNEL);
+ if (!indir->table)
+ return -ENOMEM;
+
+ indir->max_table_size = max_table_size;
+ indir->actual_table_size = actual_table_size;
+
+ return 0;
+}
+
+void mlx5e_rss_params_indir_cleanup(struct mlx5e_rss_params_indir *indir)
+{
+ kvfree(indir->table);
+}
+
+static int mlx5e_rss_copy(struct mlx5e_rss *to, const struct mlx5e_rss *from)
+{
+ u32 *dst_indir_table;
+
+ if (to->indir.actual_table_size != from->indir.actual_table_size ||
+ to->indir.max_table_size != from->indir.max_table_size) {
+ mlx5e_rss_warn(to->mdev,
+ "Failed to copy RSS due to size mismatch, src (actual %u, max %u) != dst (actual %u, max %u)\n",
+ from->indir.actual_table_size, from->indir.max_table_size,
+ to->indir.actual_table_size, to->indir.max_table_size);
+ return -EINVAL;
+ }
+
+ dst_indir_table = to->indir.table;
+ *to = *from;
+ to->indir.table = dst_indir_table;
+ memcpy(to->indir.table, from->indir.table,
+ from->indir.actual_table_size * sizeof(*from->indir.table));
+ return 0;
+}
+
+static struct mlx5e_rss *mlx5e_rss_init_copy(const struct mlx5e_rss *from)
+{
+ struct mlx5e_rss *rss;
+ int err;
+
+ rss = kvzalloc(sizeof(*rss), GFP_KERNEL);
+ if (!rss)
+ return ERR_PTR(-ENOMEM);
+
+ err = mlx5e_rss_params_indir_init(&rss->indir, from->mdev, from->indir.actual_table_size,
+ from->indir.max_table_size);
+ if (err)
+ goto err_free_rss;
+
+ err = mlx5e_rss_copy(rss, from);
+ if (err)
+ goto err_free_indir;
+
+ return rss;
+
+err_free_indir:
+ mlx5e_rss_params_indir_cleanup(&rss->indir);
+err_free_rss:
kvfree(rss);
+ return ERR_PTR(err);
}
static void mlx5e_rss_params_init(struct mlx5e_rss *rss)
@@ -282,28 +343,43 @@ static int mlx5e_rss_update_tirs(struct mlx5e_rss *rss)
return retval;
}
-int mlx5e_rss_init_no_tirs(struct mlx5e_rss *rss, struct mlx5_core_dev *mdev,
- bool inner_ft_support, u32 drop_rqn)
+static int mlx5e_rss_init_no_tirs(struct mlx5e_rss *rss)
{
- rss->mdev = mdev;
- rss->inner_ft_support = inner_ft_support;
- rss->drop_rqn = drop_rqn;
-
mlx5e_rss_params_init(rss);
refcount_set(&rss->refcnt, 1);
- return mlx5e_rqt_init_direct(&rss->rqt, mdev, true, drop_rqn);
+ return mlx5e_rqt_init_direct(&rss->rqt, rss->mdev, true,
+ rss->drop_rqn, rss->indir.max_table_size);
}
-int mlx5e_rss_init(struct mlx5e_rss *rss, struct mlx5_core_dev *mdev,
- bool inner_ft_support, u32 drop_rqn,
- const struct mlx5e_packet_merge_param *init_pkt_merge_param)
+struct mlx5e_rss *mlx5e_rss_init(struct mlx5_core_dev *mdev, bool inner_ft_support, u32 drop_rqn,
+ const struct mlx5e_packet_merge_param *init_pkt_merge_param,
+ enum mlx5e_rss_init_type type, unsigned int nch,
+ unsigned int max_nch)
{
+ struct mlx5e_rss *rss;
int err;
- err = mlx5e_rss_init_no_tirs(rss, mdev, inner_ft_support, drop_rqn);
+ rss = kvzalloc(sizeof(*rss), GFP_KERNEL);
+ if (!rss)
+ return ERR_PTR(-ENOMEM);
+
+ err = mlx5e_rss_params_indir_init(&rss->indir, mdev,
+ mlx5e_rqt_size(mdev, nch),
+ mlx5e_rqt_size(mdev, max_nch));
+ if (err)
+ goto err_free_rss;
+
+ rss->mdev = mdev;
+ rss->inner_ft_support = inner_ft_support;
+ rss->drop_rqn = drop_rqn;
+
+ err = mlx5e_rss_init_no_tirs(rss);
if (err)
- goto err_out;
+ goto err_free_indir;
+
+ if (type == MLX5E_RSS_INIT_NO_TIRS)
+ goto out;
err = mlx5e_rss_create_tirs(rss, init_pkt_merge_param, false);
if (err)
@@ -315,14 +391,18 @@ int mlx5e_rss_init(struct mlx5e_rss *rss, struct mlx5_core_dev *mdev,
goto err_destroy_tirs;
}
- return 0;
+out:
+ return rss;
err_destroy_tirs:
mlx5e_rss_destroy_tirs(rss, false);
err_destroy_rqt:
mlx5e_rqt_destroy(&rss->rqt);
-err_out:
- return err;
+err_free_indir:
+ mlx5e_rss_params_indir_cleanup(&rss->indir);
+err_free_rss:
+ kvfree(rss);
+ return ERR_PTR(err);
}
int mlx5e_rss_cleanup(struct mlx5e_rss *rss)
@@ -336,6 +416,8 @@ int mlx5e_rss_cleanup(struct mlx5e_rss *rss)
mlx5e_rss_destroy_tirs(rss, true);
mlx5e_rqt_destroy(&rss->rqt);
+ mlx5e_rss_params_indir_cleanup(&rss->indir);
+ kvfree(rss);
return 0;
}
@@ -470,11 +552,9 @@ inner_tir:
int mlx5e_rss_get_rxfh(struct mlx5e_rss *rss, u32 *indir, u8 *key, u8 *hfunc)
{
- unsigned int i;
-
if (indir)
- for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++)
- indir[i] = rss->indir.table[i];
+ memcpy(indir, rss->indir.table,
+ rss->indir.actual_table_size * sizeof(*rss->indir.table));
if (key)
memcpy(key, rss->hash.toeplitz_hash_key,
@@ -495,11 +575,9 @@ int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir,
struct mlx5e_rss *old_rss;
int err = 0;
- old_rss = mlx5e_rss_alloc();
- if (!old_rss)
- return -ENOMEM;
-
- *old_rss = *rss;
+ old_rss = mlx5e_rss_init_copy(rss);
+ if (IS_ERR(old_rss))
+ return PTR_ERR(old_rss);
if (hfunc && *hfunc != rss->hash.hfunc) {
switch (*hfunc) {
@@ -523,18 +601,16 @@ int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir,
}
if (indir) {
- unsigned int i;
-
changed_indir = true;
- for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++)
- rss->indir.table[i] = indir[i];
+ memcpy(rss->indir.table, indir,
+ rss->indir.actual_table_size * sizeof(*rss->indir.table));
}
if (changed_indir && rss->enabled) {
err = mlx5e_rss_apply(rss, rqns, num_rqns);
if (err) {
- *rss = *old_rss;
+ mlx5e_rss_copy(rss, old_rss);
goto out;
}
}
@@ -543,7 +619,9 @@ int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir,
mlx5e_rss_update_tirs(rss);
out:
- mlx5e_rss_free(old_rss);
+ mlx5e_rss_params_indir_cleanup(&old_rss->indir);
+ kvfree(old_rss);
+
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h
index c6b2164163..d1d0bc350e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h
@@ -8,18 +8,24 @@
#include "tir.h"
#include "fs.h"
+enum mlx5e_rss_init_type {
+ MLX5E_RSS_INIT_NO_TIRS = 0,
+ MLX5E_RSS_INIT_TIRS
+};
+
struct mlx5e_rss_params_traffic_type
mlx5e_rss_get_default_tt_config(enum mlx5_traffic_types tt);
struct mlx5e_rss;
-struct mlx5e_rss *mlx5e_rss_alloc(void);
-void mlx5e_rss_free(struct mlx5e_rss *rss);
-int mlx5e_rss_init(struct mlx5e_rss *rss, struct mlx5_core_dev *mdev,
- bool inner_ft_support, u32 drop_rqn,
- const struct mlx5e_packet_merge_param *init_pkt_merge_param);
-int mlx5e_rss_init_no_tirs(struct mlx5e_rss *rss, struct mlx5_core_dev *mdev,
- bool inner_ft_support, u32 drop_rqn);
+int mlx5e_rss_params_indir_init(struct mlx5e_rss_params_indir *indir, struct mlx5_core_dev *mdev,
+ u32 actual_table_size, u32 max_table_size);
+void mlx5e_rss_params_indir_cleanup(struct mlx5e_rss_params_indir *indir);
+void mlx5e_rss_params_indir_modify_actual_size(struct mlx5e_rss *rss, u32 num_channels);
+struct mlx5e_rss *mlx5e_rss_init(struct mlx5_core_dev *mdev, bool inner_ft_support, u32 drop_rqn,
+ const struct mlx5e_packet_merge_param *init_pkt_merge_param,
+ enum mlx5e_rss_init_type type, unsigned int nch,
+ unsigned int max_nch);
int mlx5e_rss_cleanup(struct mlx5e_rss *rss);
void mlx5e_rss_refcnt_inc(struct mlx5e_rss *rss);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
index 56e6b8c750..b23e224e37 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
@@ -18,7 +18,7 @@ struct mlx5e_rx_res {
struct mlx5e_rss *rss[MLX5E_MAX_NUM_RSS];
bool rss_active;
- u32 rss_rqns[MLX5E_INDIR_RQT_SIZE];
+ u32 *rss_rqns;
unsigned int rss_nch;
struct {
@@ -34,41 +34,42 @@ struct mlx5e_rx_res {
/* API for rx_res_rss_* */
+void mlx5e_rx_res_rss_update_num_channels(struct mlx5e_rx_res *res, u32 nch)
+{
+ int i;
+
+ for (i = 0; i < MLX5E_MAX_NUM_RSS; i++) {
+ if (res->rss[i])
+ mlx5e_rss_params_indir_modify_actual_size(res->rss[i], nch);
+ }
+}
+
static int mlx5e_rx_res_rss_init_def(struct mlx5e_rx_res *res,
unsigned int init_nch)
{
bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
struct mlx5e_rss *rss;
- int err;
if (WARN_ON(res->rss[0]))
return -EINVAL;
- rss = mlx5e_rss_alloc();
- if (!rss)
- return -ENOMEM;
-
- err = mlx5e_rss_init(rss, res->mdev, inner_ft_support, res->drop_rqn,
- &res->pkt_merge_param);
- if (err)
- goto err_rss_free;
+ rss = mlx5e_rss_init(res->mdev, inner_ft_support, res->drop_rqn,
+ &res->pkt_merge_param, MLX5E_RSS_INIT_TIRS, init_nch, res->max_nch);
+ if (IS_ERR(rss))
+ return PTR_ERR(rss);
mlx5e_rss_set_indir_uniform(rss, init_nch);
res->rss[0] = rss;
return 0;
-
-err_rss_free:
- mlx5e_rss_free(rss);
- return err;
}
int mlx5e_rx_res_rss_init(struct mlx5e_rx_res *res, u32 *rss_idx, unsigned int init_nch)
{
bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
struct mlx5e_rss *rss;
- int err, i;
+ int i;
for (i = 1; i < MLX5E_MAX_NUM_RSS; i++)
if (!res->rss[i])
@@ -77,13 +78,11 @@ int mlx5e_rx_res_rss_init(struct mlx5e_rx_res *res, u32 *rss_idx, unsigned int i
if (i == MLX5E_MAX_NUM_RSS)
return -ENOSPC;
- rss = mlx5e_rss_alloc();
- if (!rss)
- return -ENOMEM;
-
- err = mlx5e_rss_init_no_tirs(rss, res->mdev, inner_ft_support, res->drop_rqn);
- if (err)
- goto err_rss_free;
+ rss = mlx5e_rss_init(res->mdev, inner_ft_support, res->drop_rqn,
+ &res->pkt_merge_param, MLX5E_RSS_INIT_NO_TIRS, init_nch,
+ res->max_nch);
+ if (IS_ERR(rss))
+ return PTR_ERR(rss);
mlx5e_rss_set_indir_uniform(rss, init_nch);
if (res->rss_active)
@@ -93,10 +92,6 @@ int mlx5e_rx_res_rss_init(struct mlx5e_rx_res *res, u32 *rss_idx, unsigned int i
*rss_idx = i;
return 0;
-
-err_rss_free:
- mlx5e_rss_free(rss);
- return err;
}
static int __mlx5e_rx_res_rss_destroy(struct mlx5e_rx_res *res, u32 rss_idx)
@@ -108,7 +103,6 @@ static int __mlx5e_rx_res_rss_destroy(struct mlx5e_rx_res *res, u32 rss_idx)
if (err)
return err;
- mlx5e_rss_free(rss);
res->rss[rss_idx] = NULL;
return 0;
@@ -284,9 +278,27 @@ struct mlx5e_rss *mlx5e_rx_res_rss_get(struct mlx5e_rx_res *res, u32 rss_idx)
/* End of API rx_res_rss_* */
-struct mlx5e_rx_res *mlx5e_rx_res_alloc(void)
+static void mlx5e_rx_res_free(struct mlx5e_rx_res *res)
{
- return kvzalloc(sizeof(struct mlx5e_rx_res), GFP_KERNEL);
+ kvfree(res->rss_rqns);
+ kvfree(res);
+}
+
+static struct mlx5e_rx_res *mlx5e_rx_res_alloc(struct mlx5_core_dev *mdev, unsigned int max_nch)
+{
+ struct mlx5e_rx_res *rx_res;
+
+ rx_res = kvzalloc(sizeof(*rx_res), GFP_KERNEL);
+ if (!rx_res)
+ return NULL;
+
+ rx_res->rss_rqns = kvcalloc(max_nch, sizeof(*rx_res->rss_rqns), GFP_KERNEL);
+ if (!rx_res->rss_rqns) {
+ kvfree(rx_res);
+ return NULL;
+ }
+
+ return rx_res;
}
static int mlx5e_rx_res_channels_init(struct mlx5e_rx_res *res)
@@ -308,7 +320,8 @@ static int mlx5e_rx_res_channels_init(struct mlx5e_rx_res *res)
for (ix = 0; ix < res->max_nch; ix++) {
err = mlx5e_rqt_init_direct(&res->channels[ix].direct_rqt,
- res->mdev, false, res->drop_rqn);
+ res->mdev, false, res->drop_rqn,
+ mlx5e_rqt_size(res->mdev, res->max_nch));
if (err) {
mlx5_core_warn(res->mdev, "Failed to create a direct RQT: err = %d, ix = %u\n",
err, ix);
@@ -362,7 +375,8 @@ static int mlx5e_rx_res_ptp_init(struct mlx5e_rx_res *res)
if (!builder)
return -ENOMEM;
- err = mlx5e_rqt_init_direct(&res->ptp.rqt, res->mdev, false, res->drop_rqn);
+ err = mlx5e_rqt_init_direct(&res->ptp.rqt, res->mdev, false, res->drop_rqn,
+ mlx5e_rqt_size(res->mdev, res->max_nch));
if (err)
goto out;
@@ -404,13 +418,19 @@ static void mlx5e_rx_res_ptp_destroy(struct mlx5e_rx_res *res)
mlx5e_rqt_destroy(&res->ptp.rqt);
}
-int mlx5e_rx_res_init(struct mlx5e_rx_res *res, struct mlx5_core_dev *mdev,
- enum mlx5e_rx_res_features features, unsigned int max_nch,
- u32 drop_rqn, const struct mlx5e_packet_merge_param *init_pkt_merge_param,
- unsigned int init_nch)
+struct mlx5e_rx_res *
+mlx5e_rx_res_create(struct mlx5_core_dev *mdev, enum mlx5e_rx_res_features features,
+ unsigned int max_nch, u32 drop_rqn,
+ const struct mlx5e_packet_merge_param *init_pkt_merge_param,
+ unsigned int init_nch)
{
+ struct mlx5e_rx_res *res;
int err;
+ res = mlx5e_rx_res_alloc(mdev, max_nch);
+ if (!res)
+ return ERR_PTR(-ENOMEM);
+
res->mdev = mdev;
res->features = features;
res->max_nch = max_nch;
@@ -421,7 +441,7 @@ int mlx5e_rx_res_init(struct mlx5e_rx_res *res, struct mlx5_core_dev *mdev,
err = mlx5e_rx_res_rss_init_def(res, init_nch);
if (err)
- goto err_out;
+ goto err_rx_res_free;
err = mlx5e_rx_res_channels_init(res);
if (err)
@@ -431,14 +451,15 @@ int mlx5e_rx_res_init(struct mlx5e_rx_res *res, struct mlx5_core_dev *mdev,
if (err)
goto err_channels_destroy;
- return 0;
+ return res;
err_channels_destroy:
mlx5e_rx_res_channels_destroy(res);
err_rss_destroy:
__mlx5e_rx_res_rss_destroy(res, 0);
-err_out:
- return err;
+err_rx_res_free:
+ mlx5e_rx_res_free(res);
+ return ERR_PTR(err);
}
void mlx5e_rx_res_destroy(struct mlx5e_rx_res *res)
@@ -446,11 +467,7 @@ void mlx5e_rx_res_destroy(struct mlx5e_rx_res *res)
mlx5e_rx_res_ptp_destroy(res);
mlx5e_rx_res_channels_destroy(res);
mlx5e_rx_res_rss_destroy_all(res);
-}
-
-void mlx5e_rx_res_free(struct mlx5e_rx_res *res)
-{
- kvfree(res);
+ mlx5e_rx_res_free(res);
}
u32 mlx5e_rx_res_get_tirn_direct(struct mlx5e_rx_res *res, unsigned int ix)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
index 580fe8bc3c..82aaba8a82 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
@@ -21,13 +21,12 @@ enum mlx5e_rx_res_features {
};
/* Setup */
-struct mlx5e_rx_res *mlx5e_rx_res_alloc(void);
-int mlx5e_rx_res_init(struct mlx5e_rx_res *res, struct mlx5_core_dev *mdev,
- enum mlx5e_rx_res_features features, unsigned int max_nch,
- u32 drop_rqn, const struct mlx5e_packet_merge_param *init_pkt_merge_param,
- unsigned int init_nch);
+struct mlx5e_rx_res *
+mlx5e_rx_res_create(struct mlx5_core_dev *mdev, enum mlx5e_rx_res_features features,
+ unsigned int max_nch, u32 drop_rqn,
+ const struct mlx5e_packet_merge_param *init_pkt_merge_param,
+ unsigned int init_nch);
void mlx5e_rx_res_destroy(struct mlx5e_rx_res *res);
-void mlx5e_rx_res_free(struct mlx5e_rx_res *res);
/* TIRN getters for flow steering */
u32 mlx5e_rx_res_get_tirn_direct(struct mlx5e_rx_res *res, unsigned int ix);
@@ -60,6 +59,7 @@ int mlx5e_rx_res_rss_destroy(struct mlx5e_rx_res *res, u32 rss_idx);
int mlx5e_rx_res_rss_cnt(struct mlx5e_rx_res *res);
int mlx5e_rx_res_rss_index(struct mlx5e_rx_res *res, struct mlx5e_rss *rss);
struct mlx5e_rss *mlx5e_rx_res_rss_get(struct mlx5e_rx_res *res, u32 rss_idx);
+void mlx5e_rx_res_rss_update_num_channels(struct mlx5e_rx_res *res, u32 nch);
/* Workaround for hairpin */
struct mlx5e_rss_params_hash mlx5e_rx_res_get_current_hash(struct mlx5e_rx_res *res);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
index 00a04fdd75..8dfb57f712 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
@@ -302,6 +302,7 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
e->encap_size = ipv4_encap_size;
e->encap_header = encap_header;
+ encap_header = NULL;
if (!(nud_state & NUD_VALID)) {
neigh_event_send(attr.n, NULL);
@@ -313,8 +314,8 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
memset(&reformat_params, 0, sizeof(reformat_params));
reformat_params.type = e->reformat_type;
- reformat_params.size = ipv4_encap_size;
- reformat_params.data = encap_header;
+ reformat_params.size = e->encap_size;
+ reformat_params.data = e->encap_header;
e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev, &reformat_params,
MLX5_FLOW_NAMESPACE_FDB);
if (IS_ERR(e->pkt_reformat)) {
@@ -407,6 +408,7 @@ int mlx5e_tc_tun_update_header_ipv4(struct mlx5e_priv *priv,
e->encap_size = ipv4_encap_size;
kfree(e->encap_header);
e->encap_header = encap_header;
+ encap_header = NULL;
if (!(nud_state & NUD_VALID)) {
neigh_event_send(attr.n, NULL);
@@ -418,8 +420,8 @@ int mlx5e_tc_tun_update_header_ipv4(struct mlx5e_priv *priv,
memset(&reformat_params, 0, sizeof(reformat_params));
reformat_params.type = e->reformat_type;
- reformat_params.size = ipv4_encap_size;
- reformat_params.data = encap_header;
+ reformat_params.size = e->encap_size;
+ reformat_params.data = e->encap_header;
e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev, &reformat_params,
MLX5_FLOW_NAMESPACE_FDB);
if (IS_ERR(e->pkt_reformat)) {
@@ -570,6 +572,7 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
e->encap_size = ipv6_encap_size;
e->encap_header = encap_header;
+ encap_header = NULL;
if (!(nud_state & NUD_VALID)) {
neigh_event_send(attr.n, NULL);
@@ -581,8 +584,8 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
memset(&reformat_params, 0, sizeof(reformat_params));
reformat_params.type = e->reformat_type;
- reformat_params.size = ipv6_encap_size;
- reformat_params.data = encap_header;
+ reformat_params.size = e->encap_size;
+ reformat_params.data = e->encap_header;
e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev, &reformat_params,
MLX5_FLOW_NAMESPACE_FDB);
if (IS_ERR(e->pkt_reformat)) {
@@ -674,6 +677,7 @@ int mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv,
e->encap_size = ipv6_encap_size;
kfree(e->encap_header);
e->encap_header = encap_header;
+ encap_header = NULL;
if (!(nud_state & NUD_VALID)) {
neigh_event_send(attr.n, NULL);
@@ -685,8 +689,8 @@ int mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv,
memset(&reformat_params, 0, sizeof(reformat_params));
reformat_params.type = e->reformat_type;
- reformat_params.size = ipv6_encap_size;
- reformat_params.data = encap_header;
+ reformat_params.size = e->encap_size;
+ reformat_params.data = e->encap_header;
e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev, &reformat_params,
MLX5_FLOW_NAMESPACE_FDB);
if (IS_ERR(e->pkt_reformat)) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index b723ff5e52..13c7ed1bb3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -895,7 +895,7 @@ void mlx5e_xdp_rx_poll_complete(struct mlx5e_rq *rq)
mlx5e_xmit_xdp_doorbell(xdpsq);
if (test_bit(MLX5E_RQ_FLAG_XDP_REDIRECT, rq->flags)) {
- xdp_do_flush_map();
+ xdp_do_flush();
__clear_bit(MLX5E_RQ_FLAG_XDP_REDIRECT, rq->flags);
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index e2ffc572de..05612d9c60 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -56,7 +56,7 @@ static struct mlx5e_ipsec_pol_entry *to_ipsec_pol_entry(struct xfrm_policy *x)
return (struct mlx5e_ipsec_pol_entry *)x->xdo.offload_handle;
}
-static void mlx5e_ipsec_handle_tx_limit(struct work_struct *_work)
+static void mlx5e_ipsec_handle_sw_limits(struct work_struct *_work)
{
struct mlx5e_ipsec_dwork *dwork =
container_of(_work, struct mlx5e_ipsec_dwork, dwork.work);
@@ -520,9 +520,15 @@ static int mlx5e_xfrm_validate_state(struct mlx5_core_dev *mdev,
return -EINVAL;
}
- if (x->lft.hard_byte_limit != XFRM_INF ||
- x->lft.soft_byte_limit != XFRM_INF) {
- NL_SET_ERR_MSG_MOD(extack, "Device doesn't support limits in bytes");
+ if (x->lft.soft_byte_limit >= x->lft.hard_byte_limit &&
+ x->lft.hard_byte_limit != XFRM_INF) {
+ /* XFRM stack doesn't prevent such configuration :(. */
+ NL_SET_ERR_MSG_MOD(extack, "Hard byte limit must be greater than soft one");
+ return -EINVAL;
+ }
+
+ if (!x->lft.soft_byte_limit || !x->lft.hard_byte_limit) {
+ NL_SET_ERR_MSG_MOD(extack, "Soft/hard byte limits can't be 0");
return -EINVAL;
}
@@ -658,11 +664,10 @@ static int mlx5e_ipsec_create_dwork(struct mlx5e_ipsec_sa_entry *sa_entry)
if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
return 0;
- if (x->xso.dir != XFRM_DEV_OFFLOAD_OUT)
- return 0;
-
if (x->lft.soft_packet_limit == XFRM_INF &&
- x->lft.hard_packet_limit == XFRM_INF)
+ x->lft.hard_packet_limit == XFRM_INF &&
+ x->lft.soft_byte_limit == XFRM_INF &&
+ x->lft.hard_byte_limit == XFRM_INF)
return 0;
dwork = kzalloc(sizeof(*dwork), GFP_KERNEL);
@@ -670,7 +675,7 @@ static int mlx5e_ipsec_create_dwork(struct mlx5e_ipsec_sa_entry *sa_entry)
return -ENOMEM;
dwork->sa_entry = sa_entry;
- INIT_DELAYED_WORK(&dwork->dwork, mlx5e_ipsec_handle_tx_limit);
+ INIT_DELAYED_WORK(&dwork->dwork, mlx5e_ipsec_handle_sw_limits);
sa_entry->dwork = dwork;
return 0;
}
@@ -884,6 +889,7 @@ void mlx5e_ipsec_init(struct mlx5e_priv *priv)
xa_init_flags(&ipsec->sadb, XA_FLAGS_ALLOC);
ipsec->mdev = priv->mdev;
+ init_completion(&ipsec->comp);
ipsec->wq = alloc_workqueue("mlx5e_ipsec: %s", WQ_UNBOUND, 0,
priv->netdev->name);
if (!ipsec->wq)
@@ -904,7 +910,7 @@ void mlx5e_ipsec_init(struct mlx5e_priv *priv)
}
ipsec->is_uplink_rep = mlx5e_is_uplink_rep(priv);
- ret = mlx5e_accel_ipsec_fs_init(ipsec);
+ ret = mlx5e_accel_ipsec_fs_init(ipsec, &priv->devcom);
if (ret)
goto err_fs_init;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
index 9e7c42c2f7..adaea34931 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
@@ -38,6 +38,7 @@
#include <net/xfrm.h>
#include <linux/idr.h>
#include "lib/aso.h"
+#include "lib/devcom.h"
#define MLX5E_IPSEC_SADB_RX_BITS 10
#define MLX5E_IPSEC_ESN_SCOPE_MID 0x80000000L
@@ -188,11 +189,19 @@ struct mlx5e_ipsec_ft {
u32 refcnt;
};
+struct mlx5e_ipsec_drop {
+ struct mlx5_flow_handle *rule;
+ struct mlx5_fc *fc;
+};
+
struct mlx5e_ipsec_rule {
struct mlx5_flow_handle *rule;
struct mlx5_modify_hdr *modify_hdr;
struct mlx5_pkt_reformat *pkt_reformat;
struct mlx5_fc *fc;
+ struct mlx5e_ipsec_drop replay;
+ struct mlx5e_ipsec_drop auth;
+ struct mlx5e_ipsec_drop trailer;
};
struct mlx5e_ipsec_miss {
@@ -200,19 +209,6 @@ struct mlx5e_ipsec_miss {
struct mlx5_flow_handle *rule;
};
-struct mlx5e_ipsec_rx {
- struct mlx5e_ipsec_ft ft;
- struct mlx5e_ipsec_miss pol;
- struct mlx5e_ipsec_miss sa;
- struct mlx5e_ipsec_rule status;
- struct mlx5e_ipsec_miss status_drop;
- struct mlx5_fc *status_drop_cnt;
- struct mlx5e_ipsec_fc *fc;
- struct mlx5_fs_chains *chains;
- u8 allow_tunnel_mode : 1;
- struct xarray ipsec_obj_id_map;
-};
-
struct mlx5e_ipsec_tx_create_attr {
int prio;
int pol_level;
@@ -221,12 +217,20 @@ struct mlx5e_ipsec_tx_create_attr {
enum mlx5_flow_namespace_type chains_ns;
};
+struct mlx5e_ipsec_mpv_work {
+ int event;
+ struct work_struct work;
+ struct mlx5e_priv *slave_priv;
+ struct mlx5e_priv *master_priv;
+};
+
struct mlx5e_ipsec {
struct mlx5_core_dev *mdev;
struct xarray sadb;
struct mlx5e_ipsec_sw_stats sw_stats;
struct mlx5e_ipsec_hw_stats hw_stats;
struct workqueue_struct *wq;
+ struct completion comp;
struct mlx5e_flow_steering *fs;
struct mlx5e_ipsec_rx *rx_ipv4;
struct mlx5e_ipsec_rx *rx_ipv6;
@@ -238,6 +242,8 @@ struct mlx5e_ipsec {
struct notifier_block netevent_nb;
struct mlx5_ipsec_fs *roce;
u8 is_uplink_rep: 1;
+ struct mlx5e_ipsec_mpv_work mpv_work;
+ struct xarray ipsec_obj_id_map;
};
struct mlx5e_ipsec_esn_state {
@@ -302,7 +308,7 @@ void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv);
void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv);
void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec);
-int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec);
+int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec, struct mlx5_devcom_comp_dev **devcom);
int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry);
void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry);
int mlx5e_accel_ipsec_fs_add_pol(struct mlx5e_ipsec_pol_entry *pol_entry);
@@ -328,6 +334,10 @@ void mlx5e_accel_ipsec_fs_read_stats(struct mlx5e_priv *priv,
void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
struct mlx5_accel_esp_xfrm_attrs *attrs);
+void mlx5e_ipsec_handle_mpv_event(int event, struct mlx5e_priv *slave_priv,
+ struct mlx5e_priv *master_priv);
+void mlx5e_ipsec_send_event(struct mlx5e_priv *priv, int event);
+
static inline struct mlx5_core_dev *
mlx5e_ipsec_sa2dev(struct mlx5e_ipsec_sa_entry *sa_entry)
{
@@ -363,6 +373,15 @@ static inline u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
{
return 0;
}
+
+static inline void mlx5e_ipsec_handle_mpv_event(int event, struct mlx5e_priv *slave_priv,
+ struct mlx5e_priv *master_priv)
+{
+}
+
+static inline void mlx5e_ipsec_send_event(struct mlx5e_priv *priv, int event)
+{
+}
#endif
#endif /* __MLX5E_IPSEC_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index 81e6aa6434..41a2543a52 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -32,6 +32,22 @@ struct mlx5e_ipsec_tx {
u8 allow_tunnel_mode : 1;
};
+struct mlx5e_ipsec_status_checks {
+ struct mlx5_flow_group *drop_all_group;
+ struct mlx5e_ipsec_drop all;
+};
+
+struct mlx5e_ipsec_rx {
+ struct mlx5e_ipsec_ft ft;
+ struct mlx5e_ipsec_miss pol;
+ struct mlx5e_ipsec_miss sa;
+ struct mlx5e_ipsec_rule status;
+ struct mlx5e_ipsec_status_checks status_drops;
+ struct mlx5e_ipsec_fc *fc;
+ struct mlx5_fs_chains *chains;
+ u8 allow_tunnel_mode : 1;
+};
+
/* IPsec RX flow steering */
static enum mlx5_traffic_types family2tt(u32 family)
{
@@ -131,9 +147,9 @@ static struct mlx5_flow_table *ipsec_ft_create(struct mlx5_flow_namespace *ns,
static void ipsec_rx_status_drop_destroy(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx *rx)
{
- mlx5_del_flow_rules(rx->status_drop.rule);
- mlx5_destroy_flow_group(rx->status_drop.group);
- mlx5_fc_destroy(ipsec->mdev, rx->status_drop_cnt);
+ mlx5_del_flow_rules(rx->status_drops.all.rule);
+ mlx5_fc_destroy(ipsec->mdev, rx->status_drops.all.fc);
+ mlx5_destroy_flow_group(rx->status_drops.drop_all_group);
}
static void ipsec_rx_status_pass_destroy(struct mlx5e_ipsec *ipsec,
@@ -149,8 +165,149 @@ static void ipsec_rx_status_pass_destroy(struct mlx5e_ipsec *ipsec,
#endif
}
-static int ipsec_rx_status_drop_create(struct mlx5e_ipsec *ipsec,
- struct mlx5e_ipsec_rx *rx)
+static int rx_add_rule_drop_auth_trailer(struct mlx5e_ipsec_sa_entry *sa_entry,
+ struct mlx5e_ipsec_rx *rx)
+{
+ struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
+ struct mlx5_flow_table *ft = rx->ft.status;
+ struct mlx5_core_dev *mdev = ipsec->mdev;
+ struct mlx5_flow_destination dest = {};
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_handle *rule;
+ struct mlx5_fc *flow_counter;
+ struct mlx5_flow_spec *spec;
+ int err;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ flow_counter = mlx5_fc_create(mdev, true);
+ if (IS_ERR(flow_counter)) {
+ err = PTR_ERR(flow_counter);
+ mlx5_core_err(mdev,
+ "Failed to add ipsec rx status drop rule counter, err=%d\n", err);
+ goto err_cnt;
+ }
+ sa_entry->ipsec_rule.auth.fc = flow_counter;
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ flow_act.flags = FLOW_ACT_NO_APPEND;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest.counter_id = mlx5_fc_id(flow_counter);
+ if (rx == ipsec->rx_esw)
+ spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
+
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.ipsec_syndrome);
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.ipsec_syndrome, 1);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_2);
+ MLX5_SET(fte_match_param, spec->match_value,
+ misc_parameters_2.metadata_reg_c_2,
+ sa_entry->ipsec_obj_id | BIT(31));
+ spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
+ rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev,
+ "Failed to add ipsec rx status drop rule, err=%d\n", err);
+ goto err_rule;
+ }
+ sa_entry->ipsec_rule.auth.rule = rule;
+
+ flow_counter = mlx5_fc_create(mdev, true);
+ if (IS_ERR(flow_counter)) {
+ err = PTR_ERR(flow_counter);
+ mlx5_core_err(mdev,
+ "Failed to add ipsec rx status drop rule counter, err=%d\n", err);
+ goto err_cnt_2;
+ }
+ sa_entry->ipsec_rule.trailer.fc = flow_counter;
+
+ dest.counter_id = mlx5_fc_id(flow_counter);
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.ipsec_syndrome, 2);
+ rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev,
+ "Failed to add ipsec rx status drop rule, err=%d\n", err);
+ goto err_rule_2;
+ }
+ sa_entry->ipsec_rule.trailer.rule = rule;
+
+ kvfree(spec);
+ return 0;
+
+err_rule_2:
+ mlx5_fc_destroy(mdev, sa_entry->ipsec_rule.trailer.fc);
+err_cnt_2:
+ mlx5_del_flow_rules(sa_entry->ipsec_rule.auth.rule);
+err_rule:
+ mlx5_fc_destroy(mdev, sa_entry->ipsec_rule.auth.fc);
+err_cnt:
+ kvfree(spec);
+ return err;
+}
+
+static int rx_add_rule_drop_replay(struct mlx5e_ipsec_sa_entry *sa_entry, struct mlx5e_ipsec_rx *rx)
+{
+ struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
+ struct mlx5_flow_table *ft = rx->ft.status;
+ struct mlx5_core_dev *mdev = ipsec->mdev;
+ struct mlx5_flow_destination dest = {};
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_handle *rule;
+ struct mlx5_fc *flow_counter;
+ struct mlx5_flow_spec *spec;
+ int err;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ flow_counter = mlx5_fc_create(mdev, true);
+ if (IS_ERR(flow_counter)) {
+ err = PTR_ERR(flow_counter);
+ mlx5_core_err(mdev,
+ "Failed to add ipsec rx status drop rule counter, err=%d\n", err);
+ goto err_cnt;
+ }
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ flow_act.flags = FLOW_ACT_NO_APPEND;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest.counter_id = mlx5_fc_id(flow_counter);
+ if (rx == ipsec->rx_esw)
+ spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
+
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_4);
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_4, 1);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_2);
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_2,
+ sa_entry->ipsec_obj_id | BIT(31));
+ spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
+ rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev,
+ "Failed to add ipsec rx status drop rule, err=%d\n", err);
+ goto err_rule;
+ }
+
+ sa_entry->ipsec_rule.replay.rule = rule;
+ sa_entry->ipsec_rule.replay.fc = flow_counter;
+
+ kvfree(spec);
+ return 0;
+
+err_rule:
+ mlx5_fc_destroy(mdev, flow_counter);
+err_cnt:
+ kvfree(spec);
+ return err;
+}
+
+static int ipsec_rx_status_drop_all_create(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_table *ft = rx->ft.status;
@@ -202,9 +359,9 @@ static int ipsec_rx_status_drop_create(struct mlx5e_ipsec *ipsec,
goto err_rule;
}
- rx->status_drop.group = g;
- rx->status_drop.rule = rule;
- rx->status_drop_cnt = flow_counter;
+ rx->status_drops.drop_all_group = g;
+ rx->status_drops.all.rule = rule;
+ rx->status_drops.all.fc = flow_counter;
kvfree(flow_group_in);
kvfree(spec);
@@ -235,8 +392,12 @@ static int ipsec_rx_status_pass_create(struct mlx5e_ipsec *ipsec,
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
misc_parameters_2.ipsec_syndrome);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ misc_parameters_2.metadata_reg_c_4);
MLX5_SET(fte_match_param, spec->match_value,
misc_parameters_2.ipsec_syndrome, 0);
+ MLX5_SET(fte_match_param, spec->match_value,
+ misc_parameters_2.metadata_reg_c_4, 0);
if (rx == ipsec->rx_esw)
spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
@@ -273,7 +434,7 @@ static int mlx5_ipsec_rx_status_create(struct mlx5e_ipsec *ipsec,
{
int err;
- err = ipsec_rx_status_drop_create(ipsec, rx);
+ err = ipsec_rx_status_drop_all_create(ipsec, rx);
if (err)
return err;
@@ -332,6 +493,83 @@ out:
return err;
}
+static void handle_ipsec_rx_bringup(struct mlx5e_ipsec *ipsec, u32 family)
+{
+ struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, XFRM_DEV_OFFLOAD_PACKET);
+ struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(ipsec->fs, false);
+ struct mlx5_flow_destination old_dest, new_dest;
+
+ old_dest = mlx5_ttc_get_default_dest(mlx5e_fs_get_ttc(ipsec->fs, false),
+ family2tt(family));
+
+ mlx5_ipsec_fs_roce_rx_create(ipsec->mdev, ipsec->roce, ns, &old_dest, family,
+ MLX5E_ACCEL_FS_ESP_FT_ROCE_LEVEL, MLX5E_NIC_PRIO);
+
+ new_dest.ft = mlx5_ipsec_fs_roce_ft_get(ipsec->roce, family);
+ new_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ mlx5_modify_rule_destination(rx->status.rule, &new_dest, &old_dest);
+ mlx5_modify_rule_destination(rx->sa.rule, &new_dest, &old_dest);
+}
+
+static void handle_ipsec_rx_cleanup(struct mlx5e_ipsec *ipsec, u32 family)
+{
+ struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, XFRM_DEV_OFFLOAD_PACKET);
+ struct mlx5_flow_destination old_dest, new_dest;
+
+ old_dest.ft = mlx5_ipsec_fs_roce_ft_get(ipsec->roce, family);
+ old_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ new_dest = mlx5_ttc_get_default_dest(mlx5e_fs_get_ttc(ipsec->fs, false),
+ family2tt(family));
+ mlx5_modify_rule_destination(rx->sa.rule, &new_dest, &old_dest);
+ mlx5_modify_rule_destination(rx->status.rule, &new_dest, &old_dest);
+
+ mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family, ipsec->mdev);
+}
+
+static void ipsec_mpv_work_handler(struct work_struct *_work)
+{
+ struct mlx5e_ipsec_mpv_work *work = container_of(_work, struct mlx5e_ipsec_mpv_work, work);
+ struct mlx5e_ipsec *ipsec = work->slave_priv->ipsec;
+
+ switch (work->event) {
+ case MPV_DEVCOM_IPSEC_MASTER_UP:
+ mutex_lock(&ipsec->tx->ft.mutex);
+ if (ipsec->tx->ft.refcnt)
+ mlx5_ipsec_fs_roce_tx_create(ipsec->mdev, ipsec->roce, ipsec->tx->ft.pol,
+ true);
+ mutex_unlock(&ipsec->tx->ft.mutex);
+
+ mutex_lock(&ipsec->rx_ipv4->ft.mutex);
+ if (ipsec->rx_ipv4->ft.refcnt)
+ handle_ipsec_rx_bringup(ipsec, AF_INET);
+ mutex_unlock(&ipsec->rx_ipv4->ft.mutex);
+
+ mutex_lock(&ipsec->rx_ipv6->ft.mutex);
+ if (ipsec->rx_ipv6->ft.refcnt)
+ handle_ipsec_rx_bringup(ipsec, AF_INET6);
+ mutex_unlock(&ipsec->rx_ipv6->ft.mutex);
+ break;
+ case MPV_DEVCOM_IPSEC_MASTER_DOWN:
+ mutex_lock(&ipsec->tx->ft.mutex);
+ if (ipsec->tx->ft.refcnt)
+ mlx5_ipsec_fs_roce_tx_destroy(ipsec->roce, ipsec->mdev);
+ mutex_unlock(&ipsec->tx->ft.mutex);
+
+ mutex_lock(&ipsec->rx_ipv4->ft.mutex);
+ if (ipsec->rx_ipv4->ft.refcnt)
+ handle_ipsec_rx_cleanup(ipsec, AF_INET);
+ mutex_unlock(&ipsec->rx_ipv4->ft.mutex);
+
+ mutex_lock(&ipsec->rx_ipv6->ft.mutex);
+ if (ipsec->rx_ipv6->ft.refcnt)
+ handle_ipsec_rx_cleanup(ipsec, AF_INET6);
+ mutex_unlock(&ipsec->rx_ipv6->ft.mutex);
+ break;
+ }
+
+ complete(&work->master_priv->ipsec->comp);
+}
+
static void ipsec_rx_ft_disconnect(struct mlx5e_ipsec *ipsec, u32 family)
{
struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(ipsec->fs, false);
@@ -362,7 +600,7 @@ static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
mlx5_ipsec_rx_status_destroy(ipsec, rx);
mlx5_destroy_flow_table(rx->ft.status);
- mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family);
+ mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family, mdev);
}
static void ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec,
@@ -440,7 +678,7 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
if (err)
return err;
- ft = ipsec_ft_create(attr.ns, attr.status_level, attr.prio, 1, 0);
+ ft = ipsec_ft_create(attr.ns, attr.status_level, attr.prio, 3, 0);
if (IS_ERR(ft)) {
err = PTR_ERR(ft);
goto err_fs_ft_status;
@@ -517,7 +755,7 @@ err_fs_ft:
err_add:
mlx5_destroy_flow_table(rx->ft.status);
err_fs_ft_status:
- mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family);
+ mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family, mdev);
return err;
}
@@ -657,7 +895,7 @@ err_rule:
static void tx_destroy(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx,
struct mlx5_ipsec_fs *roce)
{
- mlx5_ipsec_fs_roce_tx_destroy(roce);
+ mlx5_ipsec_fs_roce_tx_destroy(roce, ipsec->mdev);
if (tx->chains) {
ipsec_chains_destroy(tx->chains);
} else {
@@ -760,7 +998,7 @@ static int tx_create(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx,
}
connect_roce:
- err = mlx5_ipsec_fs_roce_tx_create(mdev, roce, tx->ft.pol);
+ err = mlx5_ipsec_fs_roce_tx_create(mdev, roce, tx->ft.pol, false);
if (err)
goto err_roce;
return 0;
@@ -1079,29 +1317,48 @@ static int setup_modify_header(struct mlx5e_ipsec *ipsec, int type, u32 val, u8
struct mlx5_flow_act *flow_act)
{
enum mlx5_flow_namespace_type ns_type = ipsec_fs_get_ns(ipsec, type, dir);
- u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
+ u8 action[3][MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
struct mlx5_core_dev *mdev = ipsec->mdev;
struct mlx5_modify_hdr *modify_hdr;
+ u8 num_of_actions = 1;
- MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
+ MLX5_SET(set_action_in, action[0], action_type, MLX5_ACTION_TYPE_SET);
switch (dir) {
case XFRM_DEV_OFFLOAD_IN:
- MLX5_SET(set_action_in, action, field,
+ MLX5_SET(set_action_in, action[0], field,
MLX5_ACTION_IN_FIELD_METADATA_REG_B);
+
+ num_of_actions++;
+ MLX5_SET(set_action_in, action[1], action_type, MLX5_ACTION_TYPE_SET);
+ MLX5_SET(set_action_in, action[1], field, MLX5_ACTION_IN_FIELD_METADATA_REG_C_2);
+ MLX5_SET(set_action_in, action[1], data, val);
+ MLX5_SET(set_action_in, action[1], offset, 0);
+ MLX5_SET(set_action_in, action[1], length, 32);
+
+ if (type == XFRM_DEV_OFFLOAD_CRYPTO) {
+ num_of_actions++;
+ MLX5_SET(set_action_in, action[2], action_type,
+ MLX5_ACTION_TYPE_SET);
+ MLX5_SET(set_action_in, action[2], field,
+ MLX5_ACTION_IN_FIELD_METADATA_REG_C_4);
+ MLX5_SET(set_action_in, action[2], data, 0);
+ MLX5_SET(set_action_in, action[2], offset, 0);
+ MLX5_SET(set_action_in, action[2], length, 32);
+ }
break;
case XFRM_DEV_OFFLOAD_OUT:
- MLX5_SET(set_action_in, action, field,
+ MLX5_SET(set_action_in, action[0], field,
MLX5_ACTION_IN_FIELD_METADATA_REG_C_4);
break;
default:
return -EINVAL;
}
- MLX5_SET(set_action_in, action, data, val);
- MLX5_SET(set_action_in, action, offset, 0);
- MLX5_SET(set_action_in, action, length, 32);
+ MLX5_SET(set_action_in, action[0], data, val);
+ MLX5_SET(set_action_in, action[0], offset, 0);
+ MLX5_SET(set_action_in, action[0], length, 32);
- modify_hdr = mlx5_modify_header_alloc(mdev, ns_type, 1, action);
+ modify_hdr = mlx5_modify_header_alloc(mdev, ns_type, num_of_actions, action);
if (IS_ERR(modify_hdr)) {
mlx5_core_err(mdev, "Failed to allocate modify_header %ld\n",
PTR_ERR(modify_hdr));
@@ -1354,15 +1611,17 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
setup_fte_no_frags(spec);
setup_fte_upper_proto_match(spec, &attrs->upspec);
- if (rx != ipsec->rx_esw)
- err = setup_modify_header(ipsec, attrs->type,
- sa_entry->ipsec_obj_id | BIT(31),
- XFRM_DEV_OFFLOAD_IN, &flow_act);
- else
- err = mlx5_esw_ipsec_rx_setup_modify_header(sa_entry, &flow_act);
+ if (!attrs->drop) {
+ if (rx != ipsec->rx_esw)
+ err = setup_modify_header(ipsec, attrs->type,
+ sa_entry->ipsec_obj_id | BIT(31),
+ XFRM_DEV_OFFLOAD_IN, &flow_act);
+ else
+ err = mlx5_esw_ipsec_rx_setup_modify_header(sa_entry, &flow_act);
- if (err)
- goto err_mod_header;
+ if (err)
+ goto err_mod_header;
+ }
switch (attrs->type) {
case XFRM_DEV_OFFLOAD_PACKET:
@@ -1398,6 +1657,15 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
mlx5_core_err(mdev, "fail to add RX ipsec rule err=%d\n", err);
goto err_add_flow;
}
+ if (attrs->type == XFRM_DEV_OFFLOAD_PACKET)
+ err = rx_add_rule_drop_replay(sa_entry, rx);
+ if (err)
+ goto err_add_replay;
+
+ err = rx_add_rule_drop_auth_trailer(sa_entry, rx);
+ if (err)
+ goto err_drop_reason;
+
kvfree(spec);
sa_entry->ipsec_rule.rule = rule;
@@ -1406,13 +1674,21 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
sa_entry->ipsec_rule.pkt_reformat = flow_act.pkt_reformat;
return 0;
+err_drop_reason:
+ if (sa_entry->ipsec_rule.replay.rule) {
+ mlx5_del_flow_rules(sa_entry->ipsec_rule.replay.rule);
+ mlx5_fc_destroy(mdev, sa_entry->ipsec_rule.replay.fc);
+ }
+err_add_replay:
+ mlx5_del_flow_rules(rule);
err_add_flow:
mlx5_fc_destroy(mdev, counter);
err_add_cnt:
if (flow_act.pkt_reformat)
mlx5_packet_reformat_dealloc(mdev, flow_act.pkt_reformat);
err_pkt_reformat:
- mlx5_modify_header_dealloc(mdev, flow_act.modify_hdr);
+ if (flow_act.modify_hdr)
+ mlx5_modify_header_dealloc(mdev, flow_act.modify_hdr);
err_mod_header:
kvfree(spec);
err_alloc:
@@ -1913,7 +2189,19 @@ void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
return;
}
- mlx5_modify_header_dealloc(mdev, ipsec_rule->modify_hdr);
+ if (ipsec_rule->modify_hdr)
+ mlx5_modify_header_dealloc(mdev, ipsec_rule->modify_hdr);
+
+ mlx5_del_flow_rules(ipsec_rule->trailer.rule);
+ mlx5_fc_destroy(mdev, ipsec_rule->trailer.fc);
+
+ mlx5_del_flow_rules(ipsec_rule->auth.rule);
+ mlx5_fc_destroy(mdev, ipsec_rule->auth.fc);
+
+ if (ipsec_rule->replay.rule) {
+ mlx5_del_flow_rules(ipsec_rule->replay.rule);
+ mlx5_fc_destroy(mdev, ipsec_rule->replay.fc);
+ }
mlx5_esw_ipsec_rx_id_mapping_remove(sa_entry);
rx_ft_put(sa_entry->ipsec, sa_entry->attrs.family, sa_entry->attrs.type);
}
@@ -1984,7 +2272,7 @@ void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec)
kfree(ipsec->rx_ipv6);
if (ipsec->is_uplink_rep) {
- xa_destroy(&ipsec->rx_esw->ipsec_obj_id_map);
+ xa_destroy(&ipsec->ipsec_obj_id_map);
mutex_destroy(&ipsec->tx_esw->ft.mutex);
WARN_ON(ipsec->tx_esw->ft.refcnt);
@@ -1996,7 +2284,8 @@ void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec)
}
}
-int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec)
+int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec,
+ struct mlx5_devcom_comp_dev **devcom)
{
struct mlx5_core_dev *mdev = ipsec->mdev;
struct mlx5_flow_namespace *ns, *ns_esw;
@@ -2046,9 +2335,11 @@ int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec)
mutex_init(&ipsec->tx_esw->ft.mutex);
mutex_init(&ipsec->rx_esw->ft.mutex);
ipsec->tx_esw->ns = ns_esw;
- xa_init_flags(&ipsec->rx_esw->ipsec_obj_id_map, XA_FLAGS_ALLOC1);
+ xa_init_flags(&ipsec->ipsec_obj_id_map, XA_FLAGS_ALLOC1);
} else if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_ROCE) {
- ipsec->roce = mlx5_ipsec_fs_roce_init(mdev);
+ ipsec->roce = mlx5_ipsec_fs_roce_init(mdev, devcom);
+ } else {
+ mlx5_core_warn(mdev, "IPsec was initialized without RoCE support\n");
}
return 0;
@@ -2095,3 +2386,33 @@ bool mlx5e_ipsec_fs_tunnel_enabled(struct mlx5e_ipsec_sa_entry *sa_entry)
return rx->allow_tunnel_mode;
}
+
+void mlx5e_ipsec_handle_mpv_event(int event, struct mlx5e_priv *slave_priv,
+ struct mlx5e_priv *master_priv)
+{
+ struct mlx5e_ipsec_mpv_work *work;
+
+ reinit_completion(&master_priv->ipsec->comp);
+
+ if (!slave_priv->ipsec) {
+ complete(&master_priv->ipsec->comp);
+ return;
+ }
+
+ work = &slave_priv->ipsec->mpv_work;
+
+ INIT_WORK(&work->work, ipsec_mpv_work_handler);
+ work->event = event;
+ work->slave_priv = slave_priv;
+ work->master_priv = master_priv;
+ queue_work(slave_priv->ipsec->wq, &work->work);
+}
+
+void mlx5e_ipsec_send_event(struct mlx5e_priv *priv, int event)
+{
+ if (!priv->ipsec)
+ return; /* IPsec not supported */
+
+ mlx5_devcom_send_event(priv->devcom, event, event, priv);
+ wait_for_completion(&priv->ipsec->comp);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
index ce29e31721..6e00afe467 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
@@ -5,6 +5,7 @@
#include "en.h"
#include "ipsec.h"
#include "lib/crypto.h"
+#include "lib/ipsec_fs_roce.h"
#include "fs_core.h"
#include "eswitch.h"
@@ -68,7 +69,7 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
caps |= MLX5_IPSEC_CAP_ESPINUDP;
}
- if (mlx5_get_roce_state(mdev) &&
+ if (mlx5_get_roce_state(mdev) && mlx5_ipsec_fs_is_mpv_roce_supported(mdev) &&
MLX5_CAP_GEN_2(mdev, flow_table_type_2_type) & MLX5_FT_NIC_RX_2_NIC_RX_RDMA &&
MLX5_CAP_GEN_2(mdev, flow_table_type_2_type) & MLX5_FT_NIC_TX_RDMA_2_NIC_TX)
caps |= MLX5_IPSEC_CAP_ROCE;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
index 9ee014a8ad..2ed99772f1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
@@ -54,7 +54,6 @@ struct mlx5e_accel_tx_ipsec_state {
#ifdef CONFIG_MLX5_EN_IPSEC
-void mlx5e_ipsec_inverse_table_init(void);
void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x,
struct xfrm_offload *xo);
void mlx5e_ipsec_set_iv(struct sk_buff *skb, struct xfrm_state *x,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 38263d5c98..c7c1b667b1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -1252,7 +1252,7 @@ static u32 mlx5e_get_rxfh_key_size(struct net_device *netdev)
u32 mlx5e_ethtool_get_rxfh_indir_size(struct mlx5e_priv *priv)
{
- return MLX5E_INDIR_RQT_SIZE;
+ return mlx5e_rqt_size(priv->mdev, priv->channels.params.num_channels);
}
static u32 mlx5e_get_rxfh_indir_size(struct net_device *netdev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index 934b0d5ce1..777d311d44 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -1283,9 +1283,7 @@ static int mlx5e_create_inner_ttc_table(struct mlx5e_flow_steering *fs,
mlx5e_set_inner_ttc_params(fs, rx_res, &ttc_params);
fs->inner_ttc = mlx5_create_inner_ttc_table(fs->mdev,
&ttc_params);
- if (IS_ERR(fs->inner_ttc))
- return PTR_ERR(fs->inner_ttc);
- return 0;
+ return PTR_ERR_OR_ZERO(fs->inner_ttc);
}
int mlx5e_create_ttc_table(struct mlx5e_flow_steering *fs,
@@ -1295,9 +1293,7 @@ int mlx5e_create_ttc_table(struct mlx5e_flow_steering *fs,
mlx5e_set_ttc_params(fs, rx_res, &ttc_params, true);
fs->ttc = mlx5_create_ttc_table(fs->mdev, &ttc_params);
- if (IS_ERR(fs->ttc))
- return PTR_ERR(fs->ttc);
- return 0;
+ return PTR_ERR_OR_ZERO(fs->ttc);
}
int mlx5e_create_flow_steering(struct mlx5e_flow_steering *fs,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index c3961c2bbc..0c87ddb8a7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -69,6 +69,7 @@
#include "en/htb.h"
#include "qos.h"
#include "en/trap.h"
+#include "lib/devcom.h"
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev, u8 page_shift,
enum mlx5e_mpwrq_umr_mode umr_mode)
@@ -178,6 +179,61 @@ static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
mlx5_notifier_unregister(priv->mdev, &priv->events_nb);
}
+static int mlx5e_devcom_event_mpv(int event, void *my_data, void *event_data)
+{
+ struct mlx5e_priv *slave_priv = my_data;
+
+ switch (event) {
+ case MPV_DEVCOM_MASTER_UP:
+ mlx5_devcom_comp_set_ready(slave_priv->devcom, true);
+ break;
+ case MPV_DEVCOM_MASTER_DOWN:
+ /* no need for comp set ready false since we unregister after
+ * and it hurts cleanup flow.
+ */
+ break;
+ case MPV_DEVCOM_IPSEC_MASTER_UP:
+ case MPV_DEVCOM_IPSEC_MASTER_DOWN:
+ mlx5e_ipsec_handle_mpv_event(event, my_data, event_data);
+ break;
+ }
+
+ return 0;
+}
+
+static int mlx5e_devcom_init_mpv(struct mlx5e_priv *priv, u64 *data)
+{
+ priv->devcom = mlx5_devcom_register_component(priv->mdev->priv.devc,
+ MLX5_DEVCOM_MPV,
+ *data,
+ mlx5e_devcom_event_mpv,
+ priv);
+ if (IS_ERR_OR_NULL(priv->devcom))
+ return -EOPNOTSUPP;
+
+ if (mlx5_core_is_mp_master(priv->mdev)) {
+ mlx5_devcom_send_event(priv->devcom, MPV_DEVCOM_MASTER_UP,
+ MPV_DEVCOM_MASTER_UP, priv);
+ mlx5e_ipsec_send_event(priv, MPV_DEVCOM_IPSEC_MASTER_UP);
+ }
+
+ return 0;
+}
+
+static void mlx5e_devcom_cleanup_mpv(struct mlx5e_priv *priv)
+{
+ if (IS_ERR_OR_NULL(priv->devcom))
+ return;
+
+ if (mlx5_core_is_mp_master(priv->mdev)) {
+ mlx5_devcom_send_event(priv->devcom, MPV_DEVCOM_MASTER_DOWN,
+ MPV_DEVCOM_MASTER_DOWN, priv);
+ mlx5e_ipsec_send_event(priv, MPV_DEVCOM_IPSEC_MASTER_DOWN);
+ }
+
+ mlx5_devcom_unregister_component(priv->devcom);
+}
+
static int blocking_event(struct notifier_block *nb, unsigned long event, void *data)
{
struct mlx5e_priv *priv = container_of(nb, struct mlx5e_priv, blocking_events_nb);
@@ -192,6 +248,13 @@ static int blocking_event(struct notifier_block *nb, unsigned long event, void *
return NOTIFY_BAD;
}
break;
+ case MLX5_DRIVER_EVENT_AFFILIATION_DONE:
+ if (mlx5e_devcom_init_mpv(priv, data))
+ return NOTIFY_BAD;
+ break;
+ case MLX5_DRIVER_EVENT_AFFILIATION_REMOVED:
+ mlx5e_devcom_cleanup_mpv(priv);
+ break;
default:
return NOTIFY_DONE;
}
@@ -834,7 +897,7 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
struct page_pool_params pp_params = { 0 };
pp_params.order = 0;
- pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV | PP_FLAG_PAGE_FRAG;
+ pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
pp_params.pool_size = pool_size;
pp_params.nid = node;
pp_params.dev = rq->pdev;
@@ -2886,8 +2949,12 @@ static int mlx5e_num_channels_changed(struct mlx5e_priv *priv)
mlx5e_set_default_xps_cpumasks(priv, &priv->channels.params);
/* This function may be called on attach, before priv->rx_res is created. */
- if (!netif_is_rxfh_configured(priv->netdev) && priv->rx_res)
- mlx5e_rx_res_rss_set_indir_uniform(priv->rx_res, count);
+ if (priv->rx_res) {
+ mlx5e_rx_res_rss_update_num_channels(priv->rx_res, count);
+
+ if (!netif_is_rxfh_configured(priv->netdev))
+ mlx5e_rx_res_rss_set_indir_uniform(priv->rx_res, count);
+ }
return 0;
}
@@ -5347,10 +5414,6 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
enum mlx5e_rx_res_features features;
int err;
- priv->rx_res = mlx5e_rx_res_alloc();
- if (!priv->rx_res)
- return -ENOMEM;
-
mlx5e_create_q_counters(priv);
err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
@@ -5362,12 +5425,16 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
features = MLX5E_RX_RES_FEATURE_PTP;
if (mlx5_tunnel_inner_ft_supported(mdev))
features |= MLX5E_RX_RES_FEATURE_INNER_FT;
- err = mlx5e_rx_res_init(priv->rx_res, priv->mdev, features,
- priv->max_nch, priv->drop_rq.rqn,
- &priv->channels.params.packet_merge,
- priv->channels.params.num_channels);
- if (err)
+
+ priv->rx_res = mlx5e_rx_res_create(priv->mdev, features, priv->max_nch, priv->drop_rq.rqn,
+ &priv->channels.params.packet_merge,
+ priv->channels.params.num_channels);
+ if (IS_ERR(priv->rx_res)) {
+ err = PTR_ERR(priv->rx_res);
+ priv->rx_res = NULL;
+ mlx5_core_err(mdev, "create rx resources failed, %d\n", err);
goto err_close_drop_rq;
+ }
err = mlx5e_create_flow_steering(priv->fs, priv->rx_res, priv->profile,
priv->netdev);
@@ -5397,12 +5464,11 @@ err_destroy_flow_steering:
priv->profile);
err_destroy_rx_res:
mlx5e_rx_res_destroy(priv->rx_res);
+ priv->rx_res = NULL;
err_close_drop_rq:
mlx5e_close_drop_rq(&priv->drop_rq);
err_destroy_q_counters:
mlx5e_destroy_q_counters(priv);
- mlx5e_rx_res_free(priv->rx_res);
- priv->rx_res = NULL;
return err;
}
@@ -5413,10 +5479,9 @@ static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv)
mlx5e_destroy_flow_steering(priv->fs, !!(priv->netdev->hw_features & NETIF_F_NTUPLE),
priv->profile);
mlx5e_rx_res_destroy(priv->rx_res);
+ priv->rx_res = NULL;
mlx5e_close_drop_rq(&priv->drop_rq);
mlx5e_destroy_q_counters(priv);
- mlx5e_rx_res_free(priv->rx_res);
- priv->rx_res = NULL;
}
static void mlx5e_set_mqprio_rl(struct mlx5e_priv *priv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 751d3ffcd2..e92d4f8359 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -1010,26 +1010,22 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
struct mlx5_core_dev *mdev = priv->mdev;
int err;
- priv->rx_res = mlx5e_rx_res_alloc();
- if (!priv->rx_res) {
- err = -ENOMEM;
- goto err_free_fs;
- }
-
mlx5e_fs_init_l2_addr(priv->fs, priv->netdev);
err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
if (err) {
mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
- goto err_rx_res_free;
+ goto err_free_fs;
}
- err = mlx5e_rx_res_init(priv->rx_res, priv->mdev, 0,
- priv->max_nch, priv->drop_rq.rqn,
- &priv->channels.params.packet_merge,
- priv->channels.params.num_channels);
- if (err)
+ priv->rx_res = mlx5e_rx_res_create(priv->mdev, 0, priv->max_nch, priv->drop_rq.rqn,
+ &priv->channels.params.packet_merge,
+ priv->channels.params.num_channels);
+ if (IS_ERR(priv->rx_res)) {
+ err = PTR_ERR(priv->rx_res);
+ mlx5_core_err(mdev, "Create rx resources failed, err=%d\n", err);
goto err_close_drop_rq;
+ }
err = mlx5e_create_rep_ttc_table(priv);
if (err)
@@ -1053,11 +1049,9 @@ err_destroy_ttc_table:
mlx5_destroy_ttc_table(mlx5e_fs_get_ttc(priv->fs, false));
err_destroy_rx_res:
mlx5e_rx_res_destroy(priv->rx_res);
+ priv->rx_res = ERR_PTR(-EINVAL);
err_close_drop_rq:
mlx5e_close_drop_rq(&priv->drop_rq);
-err_rx_res_free:
- mlx5e_rx_res_free(priv->rx_res);
- priv->rx_res = NULL;
err_free_fs:
mlx5e_fs_cleanup(priv->fs);
priv->fs = NULL;
@@ -1071,9 +1065,8 @@ static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
mlx5e_destroy_rep_root_ft(priv);
mlx5_destroy_ttc_table(mlx5e_fs_get_ttc(priv->fs, false));
mlx5e_rx_res_destroy(priv->rx_res);
+ priv->rx_res = ERR_PTR(-EINVAL);
mlx5e_close_drop_rq(&priv->drop_rq);
- mlx5e_rx_res_free(priv->rx_res);
- priv->rx_res = NULL;
}
static void mlx5e_rep_mpesw_work(struct work_struct *work)
@@ -1367,8 +1360,9 @@ mlx5e_rep_vnic_reporter_diagnose(struct devlink_health_reporter *reporter,
struct mlx5e_rep_priv *rpriv = devlink_health_reporter_priv(reporter);
struct mlx5_eswitch_rep *rep = rpriv->rep;
- return mlx5_reporter_vnic_diagnose_counters(rep->esw->dev, fmsg,
- rep->vport, true);
+ mlx5_reporter_vnic_diagnose_counters(rep->esw->dev, fmsg, rep->vport,
+ true);
+ return 0;
}
static const struct devlink_health_reporter_ops mlx5_rep_vnic_reporter_ops = {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index dc9b157a44..404dd1d9b2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -756,19 +756,21 @@ static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
{
struct mlx5e_priv *priv = hp->func_priv;
struct mlx5_core_dev *mdev = priv->mdev;
- struct mlx5e_rss_params_indir *indir;
+ struct mlx5e_rss_params_indir indir;
int err;
- indir = kvmalloc(sizeof(*indir), GFP_KERNEL);
- if (!indir)
- return -ENOMEM;
+ err = mlx5e_rss_params_indir_init(&indir, mdev,
+ mlx5e_rqt_size(mdev, hp->num_channels),
+ mlx5e_rqt_size(mdev, hp->num_channels));
+ if (err)
+ return err;
- mlx5e_rss_params_indir_init_uniform(indir, hp->num_channels);
+ mlx5e_rss_params_indir_init_uniform(&indir, hp->num_channels);
err = mlx5e_rqt_init_indir(&hp->indir_rqt, mdev, hp->pair->rqn, hp->num_channels,
mlx5e_rx_res_get_current_hash(priv->rx_res).hfunc,
- indir);
+ &indir);
- kvfree(indir);
+ mlx5e_rss_params_indir_cleanup(&indir);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
index d5d33c3b3a..190f10aba1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
@@ -50,7 +50,7 @@ int mlx5_esw_ipsec_rx_setup_modify_header(struct mlx5e_ipsec_sa_entry *sa_entry,
u32 mapped_id;
int err;
- err = xa_alloc_bh(&ipsec->rx_esw->ipsec_obj_id_map, &mapped_id,
+ err = xa_alloc_bh(&ipsec->ipsec_obj_id_map, &mapped_id,
xa_mk_value(sa_entry->ipsec_obj_id),
XA_LIMIT(1, ESW_IPSEC_RX_MAPPED_ID_MASK), 0);
if (err)
@@ -81,7 +81,7 @@ int mlx5_esw_ipsec_rx_setup_modify_header(struct mlx5e_ipsec_sa_entry *sa_entry,
return 0;
err_header_alloc:
- xa_erase_bh(&ipsec->rx_esw->ipsec_obj_id_map, mapped_id);
+ xa_erase_bh(&ipsec->ipsec_obj_id_map, mapped_id);
return err;
}
@@ -90,7 +90,7 @@ void mlx5_esw_ipsec_rx_id_mapping_remove(struct mlx5e_ipsec_sa_entry *sa_entry)
struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
if (sa_entry->rx_mapped_id)
- xa_erase_bh(&ipsec->rx_esw->ipsec_obj_id_map,
+ xa_erase_bh(&ipsec->ipsec_obj_id_map,
sa_entry->rx_mapped_id);
}
@@ -100,7 +100,7 @@ int mlx5_esw_ipsec_rx_ipsec_obj_id_search(struct mlx5e_priv *priv, u32 id,
struct mlx5e_ipsec *ipsec = priv->ipsec;
void *val;
- val = xa_load(&ipsec->rx_esw->ipsec_obj_id_map, id);
+ val = xa_load(&ipsec->ipsec_obj_id_map, id);
if (!val)
return -ENOENT;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
index 1887a24ee4..d2ebe56c39 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
#include "eswitch.h"
+#include "lib/mlx5.h"
#include "esw/qos.h"
#include "en/port.h"
#define CREATE_TRACE_POINTS
@@ -701,10 +702,75 @@ int mlx5_esw_qos_set_vport_rate(struct mlx5_eswitch *esw, struct mlx5_vport *vpo
return err;
}
+static u32 mlx5_esw_qos_lag_link_speed_get_locked(struct mlx5_core_dev *mdev)
+{
+ struct ethtool_link_ksettings lksettings;
+ struct net_device *slave, *master;
+ u32 speed = SPEED_UNKNOWN;
+
+ /* Lock ensures a stable reference to master and slave netdevice
+ * while port speed of master is queried.
+ */
+ ASSERT_RTNL();
+
+ slave = mlx5_uplink_netdev_get(mdev);
+ if (!slave)
+ goto out;
+
+ master = netdev_master_upper_dev_get(slave);
+ if (master && !__ethtool_get_link_ksettings(master, &lksettings))
+ speed = lksettings.base.speed;
+
+out:
+ return speed;
+}
+
+static int mlx5_esw_qos_max_link_speed_get(struct mlx5_core_dev *mdev, u32 *link_speed_max,
+ bool hold_rtnl_lock, struct netlink_ext_ack *extack)
+{
+ int err;
+
+ if (!mlx5_lag_is_active(mdev))
+ goto skip_lag;
+
+ if (hold_rtnl_lock)
+ rtnl_lock();
+
+ *link_speed_max = mlx5_esw_qos_lag_link_speed_get_locked(mdev);
+
+ if (hold_rtnl_lock)
+ rtnl_unlock();
+
+ if (*link_speed_max != (u32)SPEED_UNKNOWN)
+ return 0;
+
+skip_lag:
+ err = mlx5_port_max_linkspeed(mdev, link_speed_max);
+ if (err)
+ NL_SET_ERR_MSG_MOD(extack, "Failed to get link maximum speed");
+
+ return err;
+}
+
+static int mlx5_esw_qos_link_speed_verify(struct mlx5_core_dev *mdev,
+ const char *name, u32 link_speed_max,
+ u64 value, struct netlink_ext_ack *extack)
+{
+ if (value > link_speed_max) {
+ pr_err("%s rate value %lluMbps exceed link maximum speed %u.\n",
+ name, value, link_speed_max);
+ NL_SET_ERR_MSG_MOD(extack, "TX rate value exceed link maximum speed");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32 rate_mbps)
{
u32 ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
struct mlx5_vport *vport;
+ u32 link_speed_max;
u32 bitmask;
int err;
@@ -712,6 +778,17 @@ int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32
if (IS_ERR(vport))
return PTR_ERR(vport);
+ if (rate_mbps) {
+ err = mlx5_esw_qos_max_link_speed_get(esw->dev, &link_speed_max, false, NULL);
+ if (err)
+ return err;
+
+ err = mlx5_esw_qos_link_speed_verify(esw->dev, "Police",
+ link_speed_max, rate_mbps, NULL);
+ if (err)
+ return err;
+ }
+
mutex_lock(&esw->state_lock);
if (!vport->qos.enabled) {
/* Eswitch QoS wasn't enabled yet. Enable it and vport QoS. */
@@ -744,12 +821,6 @@ static int esw_qos_devlink_rate_to_mbps(struct mlx5_core_dev *mdev, const char *
u64 value;
int err;
- err = mlx5_port_max_linkspeed(mdev, &link_speed_max);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "Failed to get link maximum speed");
- return err;
- }
-
value = div_u64_rem(*rate, MLX5_LINKSPEED_UNIT, &remainder);
if (remainder) {
pr_err("%s rate value %lluBps not in link speed units of 1Mbps.\n",
@@ -758,12 +829,13 @@ static int esw_qos_devlink_rate_to_mbps(struct mlx5_core_dev *mdev, const char *
return -EINVAL;
}
- if (value > link_speed_max) {
- pr_err("%s rate value %lluMbps exceed link maximum speed %u.\n",
- name, value, link_speed_max);
- NL_SET_ERR_MSG_MOD(extack, "TX rate value exceed link maximum speed");
- return -EINVAL;
- }
+ err = mlx5_esw_qos_max_link_speed_get(mdev, &link_speed_max, true, extack);
+ if (err)
+ return err;
+
+ err = mlx5_esw_qos_link_speed_verify(mdev, name, link_speed_max, value, extack);
+ if (err)
+ return err;
*rate = value;
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/events.c b/drivers/net/ethernet/mellanox/mlx5/core/events.c
index 3ec892d51f..d91ea53eb3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/events.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/events.c
@@ -441,8 +441,3 @@ int mlx5_blocking_notifier_call_chain(struct mlx5_core_dev *dev, unsigned int ev
return blocking_notifier_call_chain(&events->sw_nh, event, data);
}
-
-void mlx5_events_work_enqueue(struct mlx5_core_dev *dev, struct work_struct *work)
-{
- queue_work(dev->priv.events->wq, work);
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index a13b9c2bd1..e6bfa7e4f1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -114,9 +114,9 @@
#define ETHTOOL_NUM_PRIOS 11
#define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS)
/* Promiscuous, Vlan, mac, ttc, inner ttc, {UDP/ANY/aRFS/accel/{esp, esp_err}}, IPsec policy,
- * IPsec RoCE policy
+ * {IPsec RoCE MPV,Alias table},IPsec RoCE policy
*/
-#define KERNEL_NIC_PRIO_NUM_LEVELS 9
+#define KERNEL_NIC_PRIO_NUM_LEVELS 11
#define KERNEL_NIC_NUM_PRIOS 1
/* One more level for tc */
#define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1)
@@ -137,7 +137,7 @@
#define LAG_MIN_LEVEL (OFFLOADS_MIN_LEVEL + KERNEL_RX_MACSEC_MIN_LEVEL + 1)
#define KERNEL_TX_IPSEC_NUM_PRIOS 1
-#define KERNEL_TX_IPSEC_NUM_LEVELS 3
+#define KERNEL_TX_IPSEC_NUM_LEVELS 4
#define KERNEL_TX_IPSEC_MIN_LEVEL (KERNEL_TX_IPSEC_NUM_LEVELS)
#define KERNEL_TX_MACSEC_NUM_PRIOS 1
@@ -231,7 +231,7 @@ enum {
};
#define RDMA_RX_IPSEC_NUM_PRIOS 1
-#define RDMA_RX_IPSEC_NUM_LEVELS 2
+#define RDMA_RX_IPSEC_NUM_LEVELS 4
#define RDMA_RX_IPSEC_MIN_LEVEL (RDMA_RX_IPSEC_NUM_LEVELS)
#define RDMA_RX_BYPASS_MIN_LEVEL MLX5_BY_PASS_NUM_REGULAR_PRIOS
@@ -288,7 +288,7 @@ enum {
#define RDMA_TX_BYPASS_MIN_LEVEL MLX5_BY_PASS_NUM_PRIOS
#define RDMA_TX_COUNTERS_MIN_LEVEL (RDMA_TX_BYPASS_MIN_LEVEL + 1)
-#define RDMA_TX_IPSEC_NUM_PRIOS 1
+#define RDMA_TX_IPSEC_NUM_PRIOS 2
#define RDMA_TX_IPSEC_PRIO_NUM_LEVELS 1
#define RDMA_TX_IPSEC_MIN_LEVEL (RDMA_TX_COUNTERS_MIN_LEVEL + RDMA_TX_IPSEC_NUM_PRIOS)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 2fb2598b77..8ff6dc9bc8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -365,6 +365,8 @@ static const char *hsynd_str(u8 synd)
return "FFSER error";
case MLX5_INITIAL_SEG_HEALTH_SYNDROME_HIGH_TEMP_ERR:
return "High temperature";
+ case MLX5_INITIAL_SEG_HEALTH_SYNDROME_ICM_PCI_POISONED_ERR:
+ return "ICM fetch PCI data poisoned error";
default:
return "unrecognized error";
}
@@ -448,14 +450,15 @@ mlx5_fw_reporter_diagnose(struct devlink_health_reporter *reporter,
struct mlx5_core_dev *dev = devlink_health_reporter_priv(reporter);
struct mlx5_core_health *health = &dev->priv.health;
struct health_buffer __iomem *h = health->health;
- u8 synd;
- int err;
+ u8 synd = ioread8(&h->synd);
- synd = ioread8(&h->synd);
- err = devlink_fmsg_u8_pair_put(fmsg, "Syndrome", synd);
- if (err || !synd)
- return err;
- return devlink_fmsg_string_pair_put(fmsg, "Description", hsynd_str(synd));
+ if (!synd)
+ return 0;
+
+ devlink_fmsg_u8_pair_put(fmsg, "Syndrome", synd);
+ devlink_fmsg_string_pair_put(fmsg, "Description", hsynd_str(synd));
+
+ return 0;
}
struct mlx5_fw_reporter_ctx {
@@ -463,94 +466,47 @@ struct mlx5_fw_reporter_ctx {
int miss_counter;
};
-static int
+static void
mlx5_fw_reporter_ctx_pairs_put(struct devlink_fmsg *fmsg,
struct mlx5_fw_reporter_ctx *fw_reporter_ctx)
{
- int err;
-
- err = devlink_fmsg_u8_pair_put(fmsg, "syndrome",
- fw_reporter_ctx->err_synd);
- if (err)
- return err;
- err = devlink_fmsg_u32_pair_put(fmsg, "fw_miss_counter",
- fw_reporter_ctx->miss_counter);
- if (err)
- return err;
- return 0;
+ devlink_fmsg_u8_pair_put(fmsg, "syndrome", fw_reporter_ctx->err_synd);
+ devlink_fmsg_u32_pair_put(fmsg, "fw_miss_counter", fw_reporter_ctx->miss_counter);
}
-static int
+static void
mlx5_fw_reporter_heath_buffer_data_put(struct mlx5_core_dev *dev,
struct devlink_fmsg *fmsg)
{
struct mlx5_core_health *health = &dev->priv.health;
struct health_buffer __iomem *h = health->health;
u8 rfr_severity;
- int err;
int i;
if (!ioread8(&h->synd))
- return 0;
-
- err = devlink_fmsg_pair_nest_start(fmsg, "health buffer");
- if (err)
- return err;
- err = devlink_fmsg_obj_nest_start(fmsg);
- if (err)
- return err;
- err = devlink_fmsg_arr_pair_nest_start(fmsg, "assert_var");
- if (err)
- return err;
+ return;
- for (i = 0; i < ARRAY_SIZE(h->assert_var); i++) {
- err = devlink_fmsg_u32_put(fmsg, ioread32be(h->assert_var + i));
- if (err)
- return err;
- }
- err = devlink_fmsg_arr_pair_nest_end(fmsg);
- if (err)
- return err;
- err = devlink_fmsg_u32_pair_put(fmsg, "assert_exit_ptr",
- ioread32be(&h->assert_exit_ptr));
- if (err)
- return err;
- err = devlink_fmsg_u32_pair_put(fmsg, "assert_callra",
- ioread32be(&h->assert_callra));
- if (err)
- return err;
- err = devlink_fmsg_u32_pair_put(fmsg, "time", ioread32be(&h->time));
- if (err)
- return err;
- err = devlink_fmsg_u32_pair_put(fmsg, "hw_id", ioread32be(&h->hw_id));
- if (err)
- return err;
+ devlink_fmsg_pair_nest_start(fmsg, "health buffer");
+ devlink_fmsg_obj_nest_start(fmsg);
+ devlink_fmsg_arr_pair_nest_start(fmsg, "assert_var");
+ for (i = 0; i < ARRAY_SIZE(h->assert_var); i++)
+ devlink_fmsg_u32_put(fmsg, ioread32be(h->assert_var + i));
+ devlink_fmsg_arr_pair_nest_end(fmsg);
+ devlink_fmsg_u32_pair_put(fmsg, "assert_exit_ptr",
+ ioread32be(&h->assert_exit_ptr));
+ devlink_fmsg_u32_pair_put(fmsg, "assert_callra",
+ ioread32be(&h->assert_callra));
+ devlink_fmsg_u32_pair_put(fmsg, "time", ioread32be(&h->time));
+ devlink_fmsg_u32_pair_put(fmsg, "hw_id", ioread32be(&h->hw_id));
rfr_severity = ioread8(&h->rfr_severity);
- err = devlink_fmsg_u8_pair_put(fmsg, "rfr", mlx5_health_get_rfr(rfr_severity));
- if (err)
- return err;
- err = devlink_fmsg_u8_pair_put(fmsg, "severity", mlx5_health_get_severity(rfr_severity));
- if (err)
- return err;
- err = devlink_fmsg_u8_pair_put(fmsg, "irisc_index",
- ioread8(&h->irisc_index));
- if (err)
- return err;
- err = devlink_fmsg_u8_pair_put(fmsg, "synd", ioread8(&h->synd));
- if (err)
- return err;
- err = devlink_fmsg_u32_pair_put(fmsg, "ext_synd",
- ioread16be(&h->ext_synd));
- if (err)
- return err;
- err = devlink_fmsg_u32_pair_put(fmsg, "raw_fw_ver",
- ioread32be(&h->fw_ver));
- if (err)
- return err;
- err = devlink_fmsg_obj_nest_end(fmsg);
- if (err)
- return err;
- return devlink_fmsg_pair_nest_end(fmsg);
+ devlink_fmsg_u8_pair_put(fmsg, "rfr", mlx5_health_get_rfr(rfr_severity));
+ devlink_fmsg_u8_pair_put(fmsg, "severity", mlx5_health_get_severity(rfr_severity));
+ devlink_fmsg_u8_pair_put(fmsg, "irisc_index", ioread8(&h->irisc_index));
+ devlink_fmsg_u8_pair_put(fmsg, "synd", ioread8(&h->synd));
+ devlink_fmsg_u32_pair_put(fmsg, "ext_synd", ioread16be(&h->ext_synd));
+ devlink_fmsg_u32_pair_put(fmsg, "raw_fw_ver", ioread32be(&h->fw_ver));
+ devlink_fmsg_obj_nest_end(fmsg);
+ devlink_fmsg_pair_nest_end(fmsg);
}
static int
@@ -568,14 +524,11 @@ mlx5_fw_reporter_dump(struct devlink_health_reporter *reporter,
if (priv_ctx) {
struct mlx5_fw_reporter_ctx *fw_reporter_ctx = priv_ctx;
- err = mlx5_fw_reporter_ctx_pairs_put(fmsg, fw_reporter_ctx);
- if (err)
- return err;
+ mlx5_fw_reporter_ctx_pairs_put(fmsg, fw_reporter_ctx);
}
- err = mlx5_fw_reporter_heath_buffer_data_put(dev, fmsg);
- if (err)
- return err;
+ mlx5_fw_reporter_heath_buffer_data_put(dev, fmsg);
+
return mlx5_fw_tracer_get_saved_traces_objects(dev->tracer, fmsg);
}
@@ -641,12 +594,10 @@ mlx5_fw_fatal_reporter_dump(struct devlink_health_reporter *reporter,
if (priv_ctx) {
struct mlx5_fw_reporter_ctx *fw_reporter_ctx = priv_ctx;
- err = mlx5_fw_reporter_ctx_pairs_put(fmsg, fw_reporter_ctx);
- if (err)
- goto free_data;
+ mlx5_fw_reporter_ctx_pairs_put(fmsg, fw_reporter_ctx);
}
- err = devlink_fmsg_binary_pair_put(fmsg, "crdump_data", cr_data, crdump_size);
+ devlink_fmsg_binary_pair_put(fmsg, "crdump_data", cr_data, crdump_size);
free_data:
kvfree(cr_data);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index baa7ef8123..2bf77a5251 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -418,12 +418,6 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv)
return -ENOMEM;
}
- priv->rx_res = mlx5e_rx_res_alloc();
- if (!priv->rx_res) {
- err = -ENOMEM;
- goto err_free_fs;
- }
-
mlx5e_create_q_counters(priv);
err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
@@ -432,12 +426,13 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv)
goto err_destroy_q_counters;
}
- err = mlx5e_rx_res_init(priv->rx_res, priv->mdev, 0,
- priv->max_nch, priv->drop_rq.rqn,
- &priv->channels.params.packet_merge,
- priv->channels.params.num_channels);
- if (err)
+ priv->rx_res = mlx5e_rx_res_create(priv->mdev, 0, priv->max_nch, priv->drop_rq.rqn,
+ &priv->channels.params.packet_merge,
+ priv->channels.params.num_channels);
+ if (IS_ERR(priv->rx_res)) {
+ err = PTR_ERR(priv->rx_res);
goto err_close_drop_rq;
+ }
err = mlx5i_create_flow_steering(priv);
if (err)
@@ -447,13 +442,11 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv)
err_destroy_rx_res:
mlx5e_rx_res_destroy(priv->rx_res);
+ priv->rx_res = ERR_PTR(-EINVAL);
err_close_drop_rq:
mlx5e_close_drop_rq(&priv->drop_rq);
err_destroy_q_counters:
mlx5e_destroy_q_counters(priv);
- mlx5e_rx_res_free(priv->rx_res);
- priv->rx_res = NULL;
-err_free_fs:
mlx5e_fs_cleanup(priv->fs);
return err;
}
@@ -462,10 +455,9 @@ static void mlx5i_cleanup_rx(struct mlx5e_priv *priv)
{
mlx5i_destroy_flow_steering(priv);
mlx5e_rx_res_destroy(priv->rx_res);
+ priv->rx_res = ERR_PTR(-EINVAL);
mlx5e_close_drop_rq(&priv->drop_rq);
mlx5e_destroy_q_counters(priv);
- mlx5e_rx_res_free(priv->rx_res);
- priv->rx_res = NULL;
mlx5e_fs_cleanup(priv->fs);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
index af3fac090b..d14459e5c0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
@@ -943,6 +943,26 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
}
}
+/* The last mdev to unregister will destroy the workqueue before removing the
+ * devcom component, and as all the mdevs use the same devcom component we are
+ * guaranteed that the devcom is valid while the calling work is running.
+ */
+struct mlx5_devcom_comp_dev *mlx5_lag_get_devcom_comp(struct mlx5_lag *ldev)
+{
+ struct mlx5_devcom_comp_dev *devcom = NULL;
+ int i;
+
+ mutex_lock(&ldev->lock);
+ for (i = 0; i < ldev->ports; i++) {
+ if (ldev->pf[i].dev) {
+ devcom = ldev->pf[i].dev->priv.hca_devcom_comp;
+ break;
+ }
+ }
+ mutex_unlock(&ldev->lock);
+ return devcom;
+}
+
static void mlx5_queue_bond_work(struct mlx5_lag *ldev, unsigned long delay)
{
queue_delayed_work(ldev->wq, &ldev->bond_work, delay);
@@ -953,9 +973,14 @@ static void mlx5_do_bond_work(struct work_struct *work)
struct delayed_work *delayed_work = to_delayed_work(work);
struct mlx5_lag *ldev = container_of(delayed_work, struct mlx5_lag,
bond_work);
+ struct mlx5_devcom_comp_dev *devcom;
int status;
- status = mlx5_dev_list_trylock();
+ devcom = mlx5_lag_get_devcom_comp(ldev);
+ if (!devcom)
+ return;
+
+ status = mlx5_devcom_comp_trylock(devcom);
if (!status) {
mlx5_queue_bond_work(ldev, HZ);
return;
@@ -964,14 +989,14 @@ static void mlx5_do_bond_work(struct work_struct *work)
mutex_lock(&ldev->lock);
if (ldev->mode_changes_in_progress) {
mutex_unlock(&ldev->lock);
- mlx5_dev_list_unlock();
+ mlx5_devcom_comp_unlock(devcom);
mlx5_queue_bond_work(ldev, HZ);
return;
}
mlx5_do_bond(ldev);
mutex_unlock(&ldev->lock);
- mlx5_dev_list_unlock();
+ mlx5_devcom_comp_unlock(devcom);
}
static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
@@ -1212,13 +1237,14 @@ static void mlx5_ldev_remove_mdev(struct mlx5_lag *ldev,
dev->priv.lag = NULL;
}
-/* Must be called with intf_mutex held */
+/* Must be called with HCA devcom component lock held */
static int __mlx5_lag_dev_add_mdev(struct mlx5_core_dev *dev)
{
+ struct mlx5_devcom_comp_dev *pos = NULL;
struct mlx5_lag *ldev = NULL;
struct mlx5_core_dev *tmp_dev;
- tmp_dev = mlx5_get_next_phys_dev_lag(dev);
+ tmp_dev = mlx5_devcom_get_next_peer_data(dev->priv.hca_devcom_comp, &pos);
if (tmp_dev)
ldev = mlx5_lag_dev(tmp_dev);
@@ -1275,10 +1301,13 @@ void mlx5_lag_add_mdev(struct mlx5_core_dev *dev)
if (!mlx5_lag_is_supported(dev))
return;
+ if (IS_ERR_OR_NULL(dev->priv.hca_devcom_comp))
+ return;
+
recheck:
- mlx5_dev_list_lock();
+ mlx5_devcom_comp_lock(dev->priv.hca_devcom_comp);
err = __mlx5_lag_dev_add_mdev(dev);
- mlx5_dev_list_unlock();
+ mlx5_devcom_comp_unlock(dev->priv.hca_devcom_comp);
if (err) {
msleep(100);
@@ -1431,7 +1460,7 @@ void mlx5_lag_disable_change(struct mlx5_core_dev *dev)
if (!ldev)
return;
- mlx5_dev_list_lock();
+ mlx5_devcom_comp_lock(dev->priv.hca_devcom_comp);
mutex_lock(&ldev->lock);
ldev->mode_changes_in_progress++;
@@ -1439,7 +1468,7 @@ void mlx5_lag_disable_change(struct mlx5_core_dev *dev)
mlx5_disable_lag(ldev);
mutex_unlock(&ldev->lock);
- mlx5_dev_list_unlock();
+ mlx5_devcom_comp_unlock(dev->priv.hca_devcom_comp);
}
void mlx5_lag_enable_change(struct mlx5_core_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
index 481e92f39f..50fcb1eee5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
@@ -112,6 +112,7 @@ void mlx5_disable_lag(struct mlx5_lag *ldev);
void mlx5_lag_remove_devices(struct mlx5_lag *ldev);
int mlx5_deactivate_lag(struct mlx5_lag *ldev);
void mlx5_lag_add_devices(struct mlx5_lag *ldev);
+struct mlx5_devcom_comp_dev *mlx5_lag_get_devcom_comp(struct mlx5_lag *ldev);
static inline bool mlx5_lag_is_supported(struct mlx5_core_dev *dev)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
index 4bf1539152..82889f3050 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
@@ -65,12 +65,12 @@ err_metadata:
return err;
}
-#define MLX5_LAG_MPESW_OFFLOADS_SUPPORTED_PORTS 2
+#define MLX5_LAG_MPESW_OFFLOADS_SUPPORTED_PORTS 4
static int enable_mpesw(struct mlx5_lag *ldev)
{
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
- struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
int err;
+ int i;
if (ldev->mode != MLX5_LAG_MODE_NONE)
return -EINVAL;
@@ -98,11 +98,11 @@ static int enable_mpesw(struct mlx5_lag *ldev)
dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
mlx5_rescan_drivers_locked(dev0);
- err = mlx5_eswitch_reload_reps(dev0->priv.eswitch);
- if (!err)
- err = mlx5_eswitch_reload_reps(dev1->priv.eswitch);
- if (err)
- goto err_rescan_drivers;
+ for (i = 0; i < ldev->ports; i++) {
+ err = mlx5_eswitch_reload_reps(ldev->pf[i].dev->priv.eswitch);
+ if (err)
+ goto err_rescan_drivers;
+ }
return 0;
@@ -112,8 +112,8 @@ err_rescan_drivers:
mlx5_deactivate_lag(ldev);
err_add_devices:
mlx5_lag_add_devices(ldev);
- mlx5_eswitch_reload_reps(dev0->priv.eswitch);
- mlx5_eswitch_reload_reps(dev1->priv.eswitch);
+ for (i = 0; i < ldev->ports; i++)
+ mlx5_eswitch_reload_reps(ldev->pf[i].dev->priv.eswitch);
mlx5_mpesw_metadata_cleanup(ldev);
return err;
}
@@ -129,9 +129,14 @@ static void disable_mpesw(struct mlx5_lag *ldev)
static void mlx5_mpesw_work(struct work_struct *work)
{
struct mlx5_mpesw_work_st *mpesww = container_of(work, struct mlx5_mpesw_work_st, work);
+ struct mlx5_devcom_comp_dev *devcom;
struct mlx5_lag *ldev = mpesww->lag;
- mlx5_dev_list_lock();
+ devcom = mlx5_lag_get_devcom_comp(ldev);
+ if (!devcom)
+ return;
+
+ mlx5_devcom_comp_lock(devcom);
mutex_lock(&ldev->lock);
if (ldev->mode_changes_in_progress) {
mpesww->result = -EAGAIN;
@@ -144,7 +149,7 @@ static void mlx5_mpesw_work(struct work_struct *work)
disable_mpesw(ldev);
unlock:
mutex_unlock(&ldev->lock);
- mlx5_dev_list_unlock();
+ mlx5_devcom_comp_unlock(devcom);
complete(&mpesww->comp);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
index 7d9bbb494d..101b3bb908 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
@@ -507,10 +507,7 @@ static int mlx5_lag_create_ttc_table(struct mlx5_lag *ldev)
mlx5_lag_set_outer_ttc_params(ldev, &ttc_params);
port_sel->outer.ttc = mlx5_create_ttc_table(dev, &ttc_params);
- if (IS_ERR(port_sel->outer.ttc))
- return PTR_ERR(port_sel->outer.ttc);
-
- return 0;
+ return PTR_ERR_OR_ZERO(port_sel->outer.ttc);
}
static int mlx5_lag_create_inner_ttc_table(struct mlx5_lag *ldev)
@@ -521,10 +518,7 @@ static int mlx5_lag_create_inner_ttc_table(struct mlx5_lag *ldev)
mlx5_lag_set_inner_ttc_params(ldev, &ttc_params);
port_sel->inner.ttc = mlx5_create_inner_ttc_table(dev, &ttc_params);
- if (IS_ERR(port_sel->inner.ttc))
- return PTR_ERR(port_sel->inner.ttc);
-
- return 0;
+ return PTR_ERR_OR_ZERO(port_sel->inner.ttc);
}
int mlx5_lag_port_sel_create(struct mlx5_lag *ldev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
index 00e67910e3..e8e50563e9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
@@ -31,6 +31,7 @@ struct mlx5_devcom_comp {
struct kref ref;
bool ready;
struct rw_semaphore sem;
+ struct lock_class_key lock_key;
};
struct mlx5_devcom_comp_dev {
@@ -119,6 +120,8 @@ mlx5_devcom_comp_alloc(u64 id, u64 key, mlx5_devcom_event_handler_t handler)
comp->key = key;
comp->handler = handler;
init_rwsem(&comp->sem);
+ lockdep_register_key(&comp->lock_key);
+ lockdep_set_class(&comp->sem, &comp->lock_key);
kref_init(&comp->ref);
INIT_LIST_HEAD(&comp->comp_dev_list_head);
@@ -133,6 +136,7 @@ mlx5_devcom_comp_release(struct kref *ref)
mutex_lock(&comp_list_lock);
list_del(&comp->comp_list);
mutex_unlock(&comp_list_lock);
+ lockdep_unregister_key(&comp->lock_key);
kfree(comp);
}
@@ -383,3 +387,24 @@ void *mlx5_devcom_get_next_peer_data_rcu(struct mlx5_devcom_comp_dev *devcom,
*pos = tmp;
return data;
}
+
+void mlx5_devcom_comp_lock(struct mlx5_devcom_comp_dev *devcom)
+{
+ if (IS_ERR_OR_NULL(devcom))
+ return;
+ down_write(&devcom->comp->sem);
+}
+
+void mlx5_devcom_comp_unlock(struct mlx5_devcom_comp_dev *devcom)
+{
+ if (IS_ERR_OR_NULL(devcom))
+ return;
+ up_write(&devcom->comp->sem);
+}
+
+int mlx5_devcom_comp_trylock(struct mlx5_devcom_comp_dev *devcom)
+{
+ if (IS_ERR_OR_NULL(devcom))
+ return 0;
+ return down_write_trylock(&devcom->comp->sem);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h
index 8389ac0af7..fc23bbef87 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h
@@ -8,6 +8,8 @@
enum mlx5_devcom_component {
MLX5_DEVCOM_ESW_OFFLOADS,
+ MLX5_DEVCOM_MPV,
+ MLX5_DEVCOM_HCA_PORTS,
MLX5_DEVCOM_NUM_COMPONENTS,
};
@@ -51,4 +53,8 @@ void *mlx5_devcom_get_next_peer_data_rcu(struct mlx5_devcom_comp_dev *devcom,
data; \
data = mlx5_devcom_get_next_peer_data_rcu(devcom, &pos))
+void mlx5_devcom_comp_lock(struct mlx5_devcom_comp_dev *devcom);
+void mlx5_devcom_comp_unlock(struct mlx5_devcom_comp_dev *devcom);
+int mlx5_devcom_comp_trylock(struct mlx5_devcom_comp_dev *devcom);
+
#endif /* __LIB_MLX5_DEVCOM_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
index 69a7545977..4b7f7131c5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
@@ -85,7 +85,6 @@ void mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq);
struct mlx5_eq_comp *mlx5_eqn2comp_eq(struct mlx5_core_dev *dev, int eqn);
struct mlx5_eq *mlx5_get_async_eq(struct mlx5_core_dev *dev);
void mlx5_cq_tasklet_cb(struct tasklet_struct *t);
-struct cpumask *mlx5_eq_comp_cpumask(struct mlx5_core_dev *dev, int ix);
u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq_comp *eq);
void mlx5_cmd_eq_recover(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c
index 6e3f178d6f..234cd00f71 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c
@@ -2,8 +2,11 @@
/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
#include "fs_core.h"
+#include "fs_cmd.h"
+#include "en.h"
#include "lib/ipsec_fs_roce.h"
#include "mlx5_core.h"
+#include <linux/random.h>
struct mlx5_ipsec_miss {
struct mlx5_flow_group *group;
@@ -15,6 +18,12 @@ struct mlx5_ipsec_rx_roce {
struct mlx5_flow_table *ft;
struct mlx5_flow_handle *rule;
struct mlx5_ipsec_miss roce_miss;
+ struct mlx5_flow_table *nic_master_ft;
+ struct mlx5_flow_group *nic_master_group;
+ struct mlx5_flow_handle *nic_master_rule;
+ struct mlx5_flow_table *goto_alias_ft;
+ u32 alias_id;
+ char key[ACCESS_KEY_LEN];
struct mlx5_flow_table *ft_rdma;
struct mlx5_flow_namespace *ns_rdma;
@@ -24,6 +33,9 @@ struct mlx5_ipsec_tx_roce {
struct mlx5_flow_group *g;
struct mlx5_flow_table *ft;
struct mlx5_flow_handle *rule;
+ struct mlx5_flow_table *goto_alias_ft;
+ u32 alias_id;
+ char key[ACCESS_KEY_LEN];
struct mlx5_flow_namespace *ns;
};
@@ -31,6 +43,7 @@ struct mlx5_ipsec_fs {
struct mlx5_ipsec_rx_roce ipv4_rx;
struct mlx5_ipsec_rx_roce ipv6_rx;
struct mlx5_ipsec_tx_roce tx;
+ struct mlx5_devcom_comp_dev **devcom;
};
static void ipsec_fs_roce_setup_udp_dport(struct mlx5_flow_spec *spec,
@@ -43,11 +56,83 @@ static void ipsec_fs_roce_setup_udp_dport(struct mlx5_flow_spec *spec,
MLX5_SET(fte_match_param, spec->match_value, outer_headers.udp_dport, dport);
}
+static bool ipsec_fs_create_alias_supported_one(struct mlx5_core_dev *mdev)
+{
+ u64 obj_allowed = MLX5_CAP_GEN_2_64(mdev, allowed_object_for_other_vhca_access);
+ u32 obj_supp = MLX5_CAP_GEN_2(mdev, cross_vhca_object_to_object_supported);
+
+ if (!(obj_supp &
+ MLX5_CROSS_VHCA_OBJ_TO_OBJ_SUPPORTED_LOCAL_FLOW_TABLE_TO_REMOTE_FLOW_TABLE_MISS))
+ return false;
+
+ if (!(obj_allowed & MLX5_ALLOWED_OBJ_FOR_OTHER_VHCA_ACCESS_FLOW_TABLE))
+ return false;
+
+ return true;
+}
+
+static bool ipsec_fs_create_alias_supported(struct mlx5_core_dev *mdev,
+ struct mlx5_core_dev *master_mdev)
+{
+ if (ipsec_fs_create_alias_supported_one(mdev) &&
+ ipsec_fs_create_alias_supported_one(master_mdev))
+ return true;
+
+ return false;
+}
+
+static int ipsec_fs_create_aliased_ft(struct mlx5_core_dev *ibv_owner,
+ struct mlx5_core_dev *ibv_allowed,
+ struct mlx5_flow_table *ft,
+ u32 *obj_id, char *alias_key, bool from_event)
+{
+ u32 aliased_object_id = (ft->type << FT_ID_FT_TYPE_OFFSET) | ft->id;
+ u16 vhca_id_to_be_accessed = MLX5_CAP_GEN(ibv_owner, vhca_id);
+ struct mlx5_cmd_allow_other_vhca_access_attr allow_attr = {};
+ struct mlx5_cmd_alias_obj_create_attr alias_attr = {};
+ int ret;
+ int i;
+
+ if (!ipsec_fs_create_alias_supported(ibv_owner, ibv_allowed))
+ return -EOPNOTSUPP;
+
+ for (i = 0; i < ACCESS_KEY_LEN; i++)
+ if (!from_event)
+ alias_key[i] = get_random_u64() & 0xFF;
+
+ memcpy(allow_attr.access_key, alias_key, ACCESS_KEY_LEN);
+ allow_attr.obj_type = MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS;
+ allow_attr.obj_id = aliased_object_id;
+
+ if (!from_event) {
+ ret = mlx5_cmd_allow_other_vhca_access(ibv_owner, &allow_attr);
+ if (ret) {
+ mlx5_core_err(ibv_owner, "Failed to allow other vhca access err=%d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ memcpy(alias_attr.access_key, alias_key, ACCESS_KEY_LEN);
+ alias_attr.obj_id = aliased_object_id;
+ alias_attr.obj_type = MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS;
+ alias_attr.vhca_id = vhca_id_to_be_accessed;
+ ret = mlx5_cmd_alias_obj_create(ibv_allowed, &alias_attr, obj_id);
+ if (ret) {
+ mlx5_core_err(ibv_allowed, "Failed to create alias object err=%d\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static int
ipsec_fs_roce_rx_rule_setup(struct mlx5_core_dev *mdev,
struct mlx5_flow_destination *default_dst,
struct mlx5_ipsec_rx_roce *roce)
{
+ bool is_mpv_slave = mlx5_core_is_mp_slave(mdev);
struct mlx5_flow_destination dst = {};
MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_handle *rule;
@@ -61,14 +146,19 @@ ipsec_fs_roce_rx_rule_setup(struct mlx5_core_dev *mdev,
ipsec_fs_roce_setup_udp_dport(spec, ROCE_V2_UDP_DPORT);
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- dst.type = MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE;
- dst.ft = roce->ft_rdma;
+ if (is_mpv_slave) {
+ dst.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dst.ft = roce->goto_alias_ft;
+ } else {
+ dst.type = MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE;
+ dst.ft = roce->ft_rdma;
+ }
rule = mlx5_add_flow_rules(roce->ft, spec, &flow_act, &dst, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
mlx5_core_err(mdev, "Fail to add RX RoCE IPsec rule err=%d\n",
err);
- goto fail_add_rule;
+ goto out;
}
roce->rule = rule;
@@ -84,12 +174,30 @@ ipsec_fs_roce_rx_rule_setup(struct mlx5_core_dev *mdev,
roce->roce_miss.rule = rule;
+ if (!is_mpv_slave)
+ goto out;
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ dst.type = MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE;
+ dst.ft = roce->ft_rdma;
+ rule = mlx5_add_flow_rules(roce->nic_master_ft, NULL, &flow_act, &dst,
+ 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev, "Fail to add RX RoCE IPsec rule for alias err=%d\n",
+ err);
+ goto fail_add_nic_master_rule;
+ }
+ roce->nic_master_rule = rule;
+
kvfree(spec);
return 0;
+fail_add_nic_master_rule:
+ mlx5_del_flow_rules(roce->roce_miss.rule);
fail_add_default_rule:
mlx5_del_flow_rules(roce->rule);
-fail_add_rule:
+out:
kvfree(spec);
return err;
}
@@ -120,25 +228,373 @@ out:
return err;
}
-void mlx5_ipsec_fs_roce_tx_destroy(struct mlx5_ipsec_fs *ipsec_roce)
+static int ipsec_fs_roce_tx_mpv_rule_setup(struct mlx5_core_dev *mdev,
+ struct mlx5_ipsec_tx_roce *roce,
+ struct mlx5_flow_table *pol_ft)
{
+ struct mlx5_flow_destination dst = {};
+ MLX5_DECLARE_FLOW_ACT(flow_act);
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ int err = 0;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters.source_vhca_port);
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters.source_vhca_port,
+ MLX5_CAP_GEN(mdev, native_port_num));
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ dst.type = MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE;
+ dst.ft = roce->goto_alias_ft;
+ rule = mlx5_add_flow_rules(roce->ft, spec, &flow_act, &dst, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev, "Fail to add TX RoCE IPsec rule err=%d\n",
+ err);
+ goto out;
+ }
+ roce->rule = rule;
+
+ /* No need for miss rule, since on miss we go to next PRIO, in which
+ * if master is configured, he will catch the traffic to go to his
+ * encryption table.
+ */
+
+out:
+ kvfree(spec);
+ return err;
+}
+
+#define MLX5_TX_ROCE_GROUP_SIZE BIT(0)
+#define MLX5_IPSEC_RDMA_TX_FT_LEVEL 0
+#define MLX5_IPSEC_NIC_GOTO_ALIAS_FT_LEVEL 3 /* Since last used level in NIC ipsec is 2 */
+
+static int ipsec_fs_roce_tx_mpv_create_ft(struct mlx5_core_dev *mdev,
+ struct mlx5_ipsec_tx_roce *roce,
+ struct mlx5_flow_table *pol_ft,
+ struct mlx5e_priv *peer_priv,
+ bool from_event)
+{
+ struct mlx5_flow_namespace *roce_ns, *nic_ns;
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_table next_ft;
+ struct mlx5_flow_table *ft;
+ int err;
+
+ roce_ns = mlx5_get_flow_namespace(peer_priv->mdev, MLX5_FLOW_NAMESPACE_RDMA_TX_IPSEC);
+ if (!roce_ns)
+ return -EOPNOTSUPP;
+
+ nic_ns = mlx5_get_flow_namespace(peer_priv->mdev, MLX5_FLOW_NAMESPACE_EGRESS_IPSEC);
+ if (!nic_ns)
+ return -EOPNOTSUPP;
+
+ err = ipsec_fs_create_aliased_ft(mdev, peer_priv->mdev, pol_ft, &roce->alias_id, roce->key,
+ from_event);
+ if (err)
+ return err;
+
+ next_ft.id = roce->alias_id;
+ ft_attr.max_fte = 1;
+ ft_attr.next_ft = &next_ft;
+ ft_attr.level = MLX5_IPSEC_NIC_GOTO_ALIAS_FT_LEVEL;
+ ft_attr.flags = MLX5_FLOW_TABLE_UNMANAGED;
+ ft = mlx5_create_flow_table(nic_ns, &ft_attr);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ mlx5_core_err(mdev, "Fail to create RoCE IPsec goto alias ft err=%d\n", err);
+ goto destroy_alias;
+ }
+
+ roce->goto_alias_ft = ft;
+
+ memset(&ft_attr, 0, sizeof(ft_attr));
+ ft_attr.max_fte = 1;
+ ft_attr.level = MLX5_IPSEC_RDMA_TX_FT_LEVEL;
+ ft = mlx5_create_flow_table(roce_ns, &ft_attr);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ mlx5_core_err(mdev, "Fail to create RoCE IPsec tx ft err=%d\n", err);
+ goto destroy_alias_ft;
+ }
+
+ roce->ft = ft;
+
+ return 0;
+
+destroy_alias_ft:
+ mlx5_destroy_flow_table(roce->goto_alias_ft);
+destroy_alias:
+ mlx5_cmd_alias_obj_destroy(peer_priv->mdev, roce->alias_id,
+ MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS);
+ return err;
+}
+
+static int ipsec_fs_roce_tx_mpv_create_group_rules(struct mlx5_core_dev *mdev,
+ struct mlx5_ipsec_tx_roce *roce,
+ struct mlx5_flow_table *pol_ft,
+ u32 *in)
+{
+ struct mlx5_flow_group *g;
+ int ix = 0;
+ int err;
+ u8 *mc;
+
+ mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+ MLX5_SET_TO_ONES(fte_match_param, mc, misc_parameters.source_vhca_port);
+ MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_MISC_PARAMETERS);
+
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += MLX5_TX_ROCE_GROUP_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ g = mlx5_create_flow_group(roce->ft, in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ mlx5_core_err(mdev, "Fail to create RoCE IPsec tx group err=%d\n", err);
+ return err;
+ }
+ roce->g = g;
+
+ err = ipsec_fs_roce_tx_mpv_rule_setup(mdev, roce, pol_ft);
+ if (err) {
+ mlx5_core_err(mdev, "Fail to create RoCE IPsec tx rules err=%d\n", err);
+ goto destroy_group;
+ }
+
+ return 0;
+
+destroy_group:
+ mlx5_destroy_flow_group(roce->g);
+ return err;
+}
+
+static int ipsec_fs_roce_tx_mpv_create(struct mlx5_core_dev *mdev,
+ struct mlx5_ipsec_fs *ipsec_roce,
+ struct mlx5_flow_table *pol_ft,
+ u32 *in, bool from_event)
+{
+ struct mlx5_devcom_comp_dev *tmp = NULL;
+ struct mlx5_ipsec_tx_roce *roce;
+ struct mlx5e_priv *peer_priv;
+ int err;
+
+ if (!mlx5_devcom_for_each_peer_begin(*ipsec_roce->devcom))
+ return -EOPNOTSUPP;
+
+ peer_priv = mlx5_devcom_get_next_peer_data(*ipsec_roce->devcom, &tmp);
+ if (!peer_priv) {
+ err = -EOPNOTSUPP;
+ goto release_peer;
+ }
+
+ roce = &ipsec_roce->tx;
+
+ err = ipsec_fs_roce_tx_mpv_create_ft(mdev, roce, pol_ft, peer_priv, from_event);
+ if (err) {
+ mlx5_core_err(mdev, "Fail to create RoCE IPsec tables err=%d\n", err);
+ goto release_peer;
+ }
+
+ err = ipsec_fs_roce_tx_mpv_create_group_rules(mdev, roce, pol_ft, in);
+ if (err) {
+ mlx5_core_err(mdev, "Fail to create RoCE IPsec tx group/rule err=%d\n", err);
+ goto destroy_tables;
+ }
+
+ mlx5_devcom_for_each_peer_end(*ipsec_roce->devcom);
+ return 0;
+
+destroy_tables:
+ mlx5_destroy_flow_table(roce->ft);
+ mlx5_destroy_flow_table(roce->goto_alias_ft);
+ mlx5_cmd_alias_obj_destroy(peer_priv->mdev, roce->alias_id,
+ MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS);
+release_peer:
+ mlx5_devcom_for_each_peer_end(*ipsec_roce->devcom);
+ return err;
+}
+
+static void roce_rx_mpv_destroy_tables(struct mlx5_core_dev *mdev, struct mlx5_ipsec_rx_roce *roce)
+{
+ mlx5_destroy_flow_table(roce->goto_alias_ft);
+ mlx5_cmd_alias_obj_destroy(mdev, roce->alias_id,
+ MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS);
+ mlx5_destroy_flow_group(roce->nic_master_group);
+ mlx5_destroy_flow_table(roce->nic_master_ft);
+}
+
+#define MLX5_RX_ROCE_GROUP_SIZE BIT(0)
+#define MLX5_IPSEC_RX_IPV4_FT_LEVEL 3
+#define MLX5_IPSEC_RX_IPV6_FT_LEVEL 2
+
+static int ipsec_fs_roce_rx_mpv_create(struct mlx5_core_dev *mdev,
+ struct mlx5_ipsec_fs *ipsec_roce,
+ struct mlx5_flow_namespace *ns,
+ u32 family, u32 level, u32 prio)
+{
+ struct mlx5_flow_namespace *roce_ns, *nic_ns;
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_devcom_comp_dev *tmp = NULL;
+ struct mlx5_ipsec_rx_roce *roce;
+ struct mlx5_flow_table next_ft;
+ struct mlx5_flow_table *ft;
+ struct mlx5_flow_group *g;
+ struct mlx5e_priv *peer_priv;
+ int ix = 0;
+ u32 *in;
+ int err;
+
+ roce = (family == AF_INET) ? &ipsec_roce->ipv4_rx :
+ &ipsec_roce->ipv6_rx;
+
+ if (!mlx5_devcom_for_each_peer_begin(*ipsec_roce->devcom))
+ return -EOPNOTSUPP;
+
+ peer_priv = mlx5_devcom_get_next_peer_data(*ipsec_roce->devcom, &tmp);
+ if (!peer_priv) {
+ err = -EOPNOTSUPP;
+ goto release_peer;
+ }
+
+ roce_ns = mlx5_get_flow_namespace(peer_priv->mdev, MLX5_FLOW_NAMESPACE_RDMA_RX_IPSEC);
+ if (!roce_ns) {
+ err = -EOPNOTSUPP;
+ goto release_peer;
+ }
+
+ nic_ns = mlx5_get_flow_namespace(peer_priv->mdev, MLX5_FLOW_NAMESPACE_KERNEL);
+ if (!nic_ns) {
+ err = -EOPNOTSUPP;
+ goto release_peer;
+ }
+
+ in = kvzalloc(MLX5_ST_SZ_BYTES(create_flow_group_in), GFP_KERNEL);
+ if (!in) {
+ err = -ENOMEM;
+ goto release_peer;
+ }
+
+ ft_attr.level = (family == AF_INET) ? MLX5_IPSEC_RX_IPV4_FT_LEVEL :
+ MLX5_IPSEC_RX_IPV6_FT_LEVEL;
+ ft_attr.max_fte = 1;
+ ft = mlx5_create_flow_table(roce_ns, &ft_attr);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ mlx5_core_err(mdev, "Fail to create RoCE IPsec rx ft at rdma master err=%d\n", err);
+ goto free_in;
+ }
+
+ roce->ft_rdma = ft;
+
+ ft_attr.max_fte = 1;
+ ft_attr.prio = prio;
+ ft_attr.level = level + 2;
+ ft = mlx5_create_flow_table(nic_ns, &ft_attr);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ mlx5_core_err(mdev, "Fail to create RoCE IPsec rx ft at NIC master err=%d\n", err);
+ goto destroy_ft_rdma;
+ }
+ roce->nic_master_ft = ft;
+
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += 1;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ g = mlx5_create_flow_group(roce->nic_master_ft, in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ mlx5_core_err(mdev, "Fail to create RoCE IPsec rx group aliased err=%d\n", err);
+ goto destroy_nic_master_ft;
+ }
+ roce->nic_master_group = g;
+
+ err = ipsec_fs_create_aliased_ft(peer_priv->mdev, mdev, roce->nic_master_ft,
+ &roce->alias_id, roce->key, false);
+ if (err) {
+ mlx5_core_err(mdev, "Fail to create RoCE IPsec rx alias FT err=%d\n", err);
+ goto destroy_group;
+ }
+
+ next_ft.id = roce->alias_id;
+ ft_attr.max_fte = 1;
+ ft_attr.prio = prio;
+ ft_attr.level = roce->ft->level + 1;
+ ft_attr.flags = MLX5_FLOW_TABLE_UNMANAGED;
+ ft_attr.next_ft = &next_ft;
+ ft = mlx5_create_flow_table(ns, &ft_attr);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ mlx5_core_err(mdev, "Fail to create RoCE IPsec rx ft at NIC slave err=%d\n", err);
+ goto destroy_alias;
+ }
+ roce->goto_alias_ft = ft;
+
+ kvfree(in);
+ mlx5_devcom_for_each_peer_end(*ipsec_roce->devcom);
+ return 0;
+
+destroy_alias:
+ mlx5_cmd_alias_obj_destroy(mdev, roce->alias_id,
+ MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS);
+destroy_group:
+ mlx5_destroy_flow_group(roce->nic_master_group);
+destroy_nic_master_ft:
+ mlx5_destroy_flow_table(roce->nic_master_ft);
+destroy_ft_rdma:
+ mlx5_destroy_flow_table(roce->ft_rdma);
+free_in:
+ kvfree(in);
+release_peer:
+ mlx5_devcom_for_each_peer_end(*ipsec_roce->devcom);
+ return err;
+}
+
+void mlx5_ipsec_fs_roce_tx_destroy(struct mlx5_ipsec_fs *ipsec_roce,
+ struct mlx5_core_dev *mdev)
+{
+ struct mlx5_devcom_comp_dev *tmp = NULL;
struct mlx5_ipsec_tx_roce *tx_roce;
+ struct mlx5e_priv *peer_priv;
if (!ipsec_roce)
return;
tx_roce = &ipsec_roce->tx;
+ if (!tx_roce->ft)
+ return; /* Incase RoCE was cleaned from MPV event flow */
+
mlx5_del_flow_rules(tx_roce->rule);
mlx5_destroy_flow_group(tx_roce->g);
mlx5_destroy_flow_table(tx_roce->ft);
-}
-#define MLX5_TX_ROCE_GROUP_SIZE BIT(0)
+ if (!mlx5_core_is_mp_slave(mdev))
+ return;
+
+ if (!mlx5_devcom_for_each_peer_begin(*ipsec_roce->devcom))
+ return;
+
+ peer_priv = mlx5_devcom_get_next_peer_data(*ipsec_roce->devcom, &tmp);
+ if (!peer_priv) {
+ mlx5_devcom_for_each_peer_end(*ipsec_roce->devcom);
+ return;
+ }
+
+ mlx5_destroy_flow_table(tx_roce->goto_alias_ft);
+ mlx5_cmd_alias_obj_destroy(peer_priv->mdev, tx_roce->alias_id,
+ MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS);
+ mlx5_devcom_for_each_peer_end(*ipsec_roce->devcom);
+ tx_roce->ft = NULL;
+}
int mlx5_ipsec_fs_roce_tx_create(struct mlx5_core_dev *mdev,
struct mlx5_ipsec_fs *ipsec_roce,
- struct mlx5_flow_table *pol_ft)
+ struct mlx5_flow_table *pol_ft,
+ bool from_event)
{
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_ipsec_tx_roce *roce;
@@ -157,7 +613,14 @@ int mlx5_ipsec_fs_roce_tx_create(struct mlx5_core_dev *mdev,
if (!in)
return -ENOMEM;
+ if (mlx5_core_is_mp_slave(mdev)) {
+ err = ipsec_fs_roce_tx_mpv_create(mdev, ipsec_roce, pol_ft, in, from_event);
+ goto free_in;
+ }
+
ft_attr.max_fte = 1;
+ ft_attr.prio = 1;
+ ft_attr.level = MLX5_IPSEC_RDMA_TX_FT_LEVEL;
ft = mlx5_create_flow_table(roce->ns, &ft_attr);
if (IS_ERR(ft)) {
err = PTR_ERR(ft);
@@ -209,8 +672,10 @@ struct mlx5_flow_table *mlx5_ipsec_fs_roce_ft_get(struct mlx5_ipsec_fs *ipsec_ro
return rx_roce->ft;
}
-void mlx5_ipsec_fs_roce_rx_destroy(struct mlx5_ipsec_fs *ipsec_roce, u32 family)
+void mlx5_ipsec_fs_roce_rx_destroy(struct mlx5_ipsec_fs *ipsec_roce, u32 family,
+ struct mlx5_core_dev *mdev)
{
+ bool is_mpv_slave = mlx5_core_is_mp_slave(mdev);
struct mlx5_ipsec_rx_roce *rx_roce;
if (!ipsec_roce)
@@ -218,23 +683,29 @@ void mlx5_ipsec_fs_roce_rx_destroy(struct mlx5_ipsec_fs *ipsec_roce, u32 family)
rx_roce = (family == AF_INET) ? &ipsec_roce->ipv4_rx :
&ipsec_roce->ipv6_rx;
+ if (!rx_roce->ft)
+ return; /* Incase RoCE was cleaned from MPV event flow */
+ if (is_mpv_slave)
+ mlx5_del_flow_rules(rx_roce->nic_master_rule);
mlx5_del_flow_rules(rx_roce->roce_miss.rule);
mlx5_del_flow_rules(rx_roce->rule);
+ if (is_mpv_slave)
+ roce_rx_mpv_destroy_tables(mdev, rx_roce);
mlx5_destroy_flow_table(rx_roce->ft_rdma);
mlx5_destroy_flow_group(rx_roce->roce_miss.group);
mlx5_destroy_flow_group(rx_roce->g);
mlx5_destroy_flow_table(rx_roce->ft);
+ rx_roce->ft = NULL;
}
-#define MLX5_RX_ROCE_GROUP_SIZE BIT(0)
-
int mlx5_ipsec_fs_roce_rx_create(struct mlx5_core_dev *mdev,
struct mlx5_ipsec_fs *ipsec_roce,
struct mlx5_flow_namespace *ns,
struct mlx5_flow_destination *default_dst,
u32 family, u32 level, u32 prio)
{
+ bool is_mpv_slave = mlx5_core_is_mp_slave(mdev);
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_ipsec_rx_roce *roce;
struct mlx5_flow_table *ft;
@@ -298,18 +769,28 @@ int mlx5_ipsec_fs_roce_rx_create(struct mlx5_core_dev *mdev,
}
roce->roce_miss.group = g;
- memset(&ft_attr, 0, sizeof(ft_attr));
- if (family == AF_INET)
- ft_attr.level = 1;
- ft = mlx5_create_flow_table(roce->ns_rdma, &ft_attr);
- if (IS_ERR(ft)) {
- err = PTR_ERR(ft);
- mlx5_core_err(mdev, "Fail to create RoCE IPsec rx ft at rdma err=%d\n", err);
- goto fail_rdma_table;
+ if (is_mpv_slave) {
+ err = ipsec_fs_roce_rx_mpv_create(mdev, ipsec_roce, ns, family, level, prio);
+ if (err) {
+ mlx5_core_err(mdev, "Fail to create RoCE IPsec rx alias err=%d\n", err);
+ goto fail_mpv_create;
+ }
+ } else {
+ memset(&ft_attr, 0, sizeof(ft_attr));
+ if (family == AF_INET)
+ ft_attr.level = 1;
+ ft_attr.max_fte = 1;
+ ft = mlx5_create_flow_table(roce->ns_rdma, &ft_attr);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ mlx5_core_err(mdev,
+ "Fail to create RoCE IPsec rx ft at rdma err=%d\n", err);
+ goto fail_rdma_table;
+ }
+
+ roce->ft_rdma = ft;
}
- roce->ft_rdma = ft;
-
err = ipsec_fs_roce_rx_rule_setup(mdev, default_dst, roce);
if (err) {
mlx5_core_err(mdev, "Fail to create RoCE IPsec rx rules err=%d\n", err);
@@ -320,7 +801,10 @@ int mlx5_ipsec_fs_roce_rx_create(struct mlx5_core_dev *mdev,
return 0;
fail_setup_rule:
+ if (is_mpv_slave)
+ roce_rx_mpv_destroy_tables(mdev, roce);
mlx5_destroy_flow_table(roce->ft_rdma);
+fail_mpv_create:
fail_rdma_table:
mlx5_destroy_flow_group(roce->roce_miss.group);
fail_mgroup:
@@ -332,12 +816,24 @@ fail_nomem:
return err;
}
+bool mlx5_ipsec_fs_is_mpv_roce_supported(struct mlx5_core_dev *mdev)
+{
+ if (!mlx5_core_mp_enabled(mdev))
+ return true;
+
+ if (ipsec_fs_create_alias_supported_one(mdev))
+ return true;
+
+ return false;
+}
+
void mlx5_ipsec_fs_roce_cleanup(struct mlx5_ipsec_fs *ipsec_roce)
{
kfree(ipsec_roce);
}
-struct mlx5_ipsec_fs *mlx5_ipsec_fs_roce_init(struct mlx5_core_dev *mdev)
+struct mlx5_ipsec_fs *mlx5_ipsec_fs_roce_init(struct mlx5_core_dev *mdev,
+ struct mlx5_devcom_comp_dev **devcom)
{
struct mlx5_ipsec_fs *roce_ipsec;
struct mlx5_flow_namespace *ns;
@@ -363,6 +859,8 @@ struct mlx5_ipsec_fs *mlx5_ipsec_fs_roce_init(struct mlx5_core_dev *mdev)
roce_ipsec->tx.ns = ns;
+ roce_ipsec->devcom = devcom;
+
return roce_ipsec;
err_tx:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.h
index 9712d705fe..2a1af78309 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.h
@@ -4,22 +4,28 @@
#ifndef __MLX5_LIB_IPSEC_H__
#define __MLX5_LIB_IPSEC_H__
+#include "lib/devcom.h"
+
struct mlx5_ipsec_fs;
struct mlx5_flow_table *
mlx5_ipsec_fs_roce_ft_get(struct mlx5_ipsec_fs *ipsec_roce, u32 family);
void mlx5_ipsec_fs_roce_rx_destroy(struct mlx5_ipsec_fs *ipsec_roce,
- u32 family);
+ u32 family, struct mlx5_core_dev *mdev);
int mlx5_ipsec_fs_roce_rx_create(struct mlx5_core_dev *mdev,
struct mlx5_ipsec_fs *ipsec_roce,
struct mlx5_flow_namespace *ns,
struct mlx5_flow_destination *default_dst,
u32 family, u32 level, u32 prio);
-void mlx5_ipsec_fs_roce_tx_destroy(struct mlx5_ipsec_fs *ipsec_roce);
+void mlx5_ipsec_fs_roce_tx_destroy(struct mlx5_ipsec_fs *ipsec_roce,
+ struct mlx5_core_dev *mdev);
int mlx5_ipsec_fs_roce_tx_create(struct mlx5_core_dev *mdev,
struct mlx5_ipsec_fs *ipsec_roce,
- struct mlx5_flow_table *pol_ft);
+ struct mlx5_flow_table *pol_ft,
+ bool from_event);
void mlx5_ipsec_fs_roce_cleanup(struct mlx5_ipsec_fs *ipsec_roce);
-struct mlx5_ipsec_fs *mlx5_ipsec_fs_roce_init(struct mlx5_core_dev *mdev);
+struct mlx5_ipsec_fs *mlx5_ipsec_fs_roce_init(struct mlx5_core_dev *mdev,
+ struct mlx5_devcom_comp_dev **devcom);
+bool mlx5_ipsec_fs_is_mpv_roce_supported(struct mlx5_core_dev *mdev);
#endif /* __MLX5_LIB_IPSEC_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 6ca91c0e8a..a17152c1cb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -73,6 +73,7 @@
#include "sf/sf.h"
#include "mlx5_irq.h"
#include "hwmon.h"
+#include "lag/lag.h"
MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
MODULE_DESCRIPTION("Mellanox 5th generation network adapters (ConnectX series) core driver");
@@ -952,6 +953,27 @@ static void mlx5_pci_close(struct mlx5_core_dev *dev)
mlx5_pci_disable_device(dev);
}
+static void mlx5_register_hca_devcom_comp(struct mlx5_core_dev *dev)
+{
+ /* This component is use to sync adding core_dev to lag_dev and to sync
+ * changes of mlx5_adev_devices between LAG layer and other layers.
+ */
+ if (!mlx5_lag_is_supported(dev))
+ return;
+
+ dev->priv.hca_devcom_comp =
+ mlx5_devcom_register_component(dev->priv.devc, MLX5_DEVCOM_HCA_PORTS,
+ mlx5_query_nic_system_image_guid(dev),
+ NULL, dev);
+ if (IS_ERR_OR_NULL(dev->priv.hca_devcom_comp))
+ mlx5_core_err(dev, "Failed to register devcom HCA component\n");
+}
+
+static void mlx5_unregister_hca_devcom_comp(struct mlx5_core_dev *dev)
+{
+ mlx5_devcom_unregister_component(dev->priv.hca_devcom_comp);
+}
+
static int mlx5_init_once(struct mlx5_core_dev *dev)
{
int err;
@@ -960,6 +982,7 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
if (IS_ERR(dev->priv.devc))
mlx5_core_warn(dev, "failed to register devcom device %ld\n",
PTR_ERR(dev->priv.devc));
+ mlx5_register_hca_devcom_comp(dev);
err = mlx5_query_board_id(dev);
if (err) {
@@ -1094,6 +1117,7 @@ err_eq_cleanup:
err_irq_cleanup:
mlx5_irq_table_cleanup(dev);
err_devcom:
+ mlx5_unregister_hca_devcom_comp(dev);
mlx5_devcom_unregister_device(dev->priv.devc);
return err;
@@ -1123,6 +1147,7 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
mlx5_events_cleanup(dev);
mlx5_eq_table_cleanup(dev);
mlx5_irq_table_cleanup(dev);
+ mlx5_unregister_hca_devcom_comp(dev);
mlx5_devcom_unregister_device(dev->priv.devc);
}
@@ -1411,9 +1436,9 @@ err_irq_table:
static void mlx5_unload(struct mlx5_core_dev *dev)
{
+ mlx5_eswitch_disable(dev->priv.eswitch);
mlx5_devlink_traps_unregister(priv_to_devlink(dev));
mlx5_sf_dev_table_destroy(dev);
- mlx5_eswitch_disable(dev->priv.eswitch);
mlx5_sriov_detach(dev);
mlx5_lag_remove_mdev(dev);
mlx5_ec_cleanup(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 124352459c..6b14e347d9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -41,6 +41,7 @@
#include <linux/mlx5/cq.h>
#include <linux/mlx5/fs.h>
#include <linux/mlx5/driver.h>
+#include "lib/devcom.h"
extern uint mlx5_core_debug_mask;
@@ -97,6 +98,22 @@ do { \
__func__, __LINE__, current->pid, \
##__VA_ARGS__)
+#define ACCESS_KEY_LEN 32
+#define FT_ID_FT_TYPE_OFFSET 24
+
+struct mlx5_cmd_allow_other_vhca_access_attr {
+ u16 obj_type;
+ u32 obj_id;
+ u8 access_key[ACCESS_KEY_LEN];
+};
+
+struct mlx5_cmd_alias_obj_create_attr {
+ u32 obj_id;
+ u16 vhca_id;
+ u16 obj_type;
+ u8 access_key[ACCESS_KEY_LEN];
+};
+
static inline void mlx5_printk(struct mlx5_core_dev *dev, int level, const char *format, ...)
{
struct device *device = dev->device;
@@ -143,6 +160,8 @@ enum mlx5_semaphore_space_address {
#define MLX5_DEFAULT_PROF 2
#define MLX5_SF_PROF 3
+#define MLX5_NUM_FW_CMD_THREADS 8
+#define MLX5_DEV_MAX_WQS MLX5_NUM_FW_CMD_THREADS
static inline int mlx5_flexible_inlen(struct mlx5_core_dev *dev, size_t fixed,
size_t item_size, size_t num_items,
@@ -248,10 +267,6 @@ int mlx5_register_device(struct mlx5_core_dev *dev);
void mlx5_unregister_device(struct mlx5_core_dev *dev);
void mlx5_dev_set_lightweight(struct mlx5_core_dev *dev);
bool mlx5_dev_is_lightweight(struct mlx5_core_dev *dev);
-struct mlx5_core_dev *mlx5_get_next_phys_dev_lag(struct mlx5_core_dev *dev);
-void mlx5_dev_list_lock(void);
-void mlx5_dev_list_unlock(void);
-int mlx5_dev_list_trylock(void);
void mlx5_fw_reporters_create(struct mlx5_core_dev *dev);
int mlx5_query_mtpps(struct mlx5_core_dev *dev, u32 *mtpps, u32 mtpps_size);
@@ -290,14 +305,12 @@ static inline int mlx5_rescan_drivers(struct mlx5_core_dev *dev)
{
int ret;
- mlx5_dev_list_lock();
+ mlx5_devcom_comp_lock(dev->priv.hca_devcom_comp);
ret = mlx5_rescan_drivers_locked(dev);
- mlx5_dev_list_unlock();
+ mlx5_devcom_comp_unlock(dev->priv.hca_devcom_comp);
return ret;
}
-void mlx5_lag_update(struct mlx5_core_dev *dev);
-
enum {
MLX5_NIC_IFC_FULL = 0,
MLX5_NIC_IFC_DISABLED = 1,
@@ -331,7 +344,6 @@ int mlx5_vport_set_other_func_cap(struct mlx5_core_dev *dev, const void *hca_cap
#define mlx5_vport_get_other_func_general_cap(dev, vport, out) \
mlx5_vport_get_other_func_cap(dev, vport, out, MLX5_CAP_GENERAL)
-void mlx5_events_work_enqueue(struct mlx5_core_dev *dev, struct work_struct *work);
static inline u32 mlx5_sriov_get_vf_total_msix(struct pci_dev *pdev)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
@@ -343,6 +355,12 @@ bool mlx5_eth_supported(struct mlx5_core_dev *dev);
bool mlx5_rdma_supported(struct mlx5_core_dev *dev);
bool mlx5_vnet_supported(struct mlx5_core_dev *dev);
bool mlx5_same_hw_devs(struct mlx5_core_dev *dev, struct mlx5_core_dev *peer_dev);
+int mlx5_cmd_allow_other_vhca_access(struct mlx5_core_dev *dev,
+ struct mlx5_cmd_allow_other_vhca_access_attr *attr);
+int mlx5_cmd_alias_obj_create(struct mlx5_core_dev *dev,
+ struct mlx5_cmd_alias_obj_create_attr *alias_attr,
+ u32 *obj_id);
+int mlx5_cmd_alias_obj_destroy(struct mlx5_core_dev *dev, u32 obj_id, u16 obj_type);
static inline u16 mlx5_core_ec_vf_vport_base(const struct mlx5_core_dev *dev)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index be70d1f23a..7d8c732818 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -1098,10 +1098,11 @@ static const u32 mlx5e_ext_link_speed[MLX5E_EXT_LINK_MODES_NUMBER] = {
[MLX5E_CAUI_4_100GBASE_CR4_KR4] = 100000,
[MLX5E_100GAUI_2_100GBASE_CR2_KR2] = 100000,
[MLX5E_200GAUI_4_200GBASE_CR4_KR4] = 200000,
- [MLX5E_400GAUI_8] = 400000,
+ [MLX5E_400GAUI_8_400GBASE_CR8] = 400000,
[MLX5E_100GAUI_1_100GBASE_CR_KR] = 100000,
[MLX5E_200GAUI_2_200GBASE_CR2_KR2] = 200000,
[MLX5E_400GAUI_4_400GBASE_CR4_KR4] = 400000,
+ [MLX5E_800GAUI_8_800GBASE_CR8_KR8] = 800000,
};
int mlx5_port_query_eth_proto(struct mlx5_core_dev *dev, u8 port, bool ext,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
index 05e148db98..c93492b677 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
@@ -14,17 +14,22 @@
struct mlx5_sf_dev_table {
struct xarray devices;
- unsigned int max_sfs;
phys_addr_t base_address;
u64 sf_bar_length;
struct notifier_block nb;
- struct mutex table_lock; /* Serializes sf life cycle and vhca state change handler */
struct workqueue_struct *active_wq;
struct work_struct work;
u8 stop_active_wq:1;
struct mlx5_core_dev *dev;
};
+struct mlx5_sf_dev_active_work_ctx {
+ struct work_struct work;
+ struct mlx5_vhca_state_event event;
+ struct mlx5_sf_dev_table *table;
+ int sf_index;
+};
+
static bool mlx5_sf_dev_supported(const struct mlx5_core_dev *dev)
{
return MLX5_CAP_GEN(dev, sf) && mlx5_vhca_event_supported(dev);
@@ -110,12 +115,6 @@ static void mlx5_sf_dev_add(struct mlx5_core_dev *dev, u16 sf_index, u16 fn_id,
sf_dev->parent_mdev = dev;
sf_dev->fn_id = fn_id;
- if (!table->max_sfs) {
- mlx5_adev_idx_free(id);
- kfree(sf_dev);
- err = -EOPNOTSUPP;
- goto add_err;
- }
sf_dev->bar_base_addr = table->base_address + (sf_index * table->sf_bar_length);
trace_mlx5_sf_dev_add(dev, sf_dev, id);
@@ -172,7 +171,6 @@ mlx5_sf_dev_state_change_handler(struct notifier_block *nb, unsigned long event_
return 0;
sf_index = event->function_id - base_id;
- mutex_lock(&table->table_lock);
sf_dev = xa_load(&table->devices, sf_index);
switch (event->new_vhca_state) {
case MLX5_VHCA_STATE_INVALID:
@@ -196,7 +194,6 @@ mlx5_sf_dev_state_change_handler(struct notifier_block *nb, unsigned long event_
default:
break;
}
- mutex_unlock(&table->table_lock);
return 0;
}
@@ -221,15 +218,44 @@ static int mlx5_sf_dev_vhca_arm_all(struct mlx5_sf_dev_table *table)
return 0;
}
-static void mlx5_sf_dev_add_active_work(struct work_struct *work)
+static void mlx5_sf_dev_add_active_work(struct work_struct *_work)
{
- struct mlx5_sf_dev_table *table = container_of(work, struct mlx5_sf_dev_table, work);
+ struct mlx5_sf_dev_active_work_ctx *work_ctx;
+
+ work_ctx = container_of(_work, struct mlx5_sf_dev_active_work_ctx, work);
+ if (work_ctx->table->stop_active_wq)
+ goto out;
+ /* Don't probe device which is already probe */
+ if (!xa_load(&work_ctx->table->devices, work_ctx->sf_index))
+ mlx5_sf_dev_add(work_ctx->table->dev, work_ctx->sf_index,
+ work_ctx->event.function_id, work_ctx->event.sw_function_id);
+ /* There is a race where SF got inactive after the query
+ * above. e.g.: the query returns that the state of the
+ * SF is active, and after that the eswitch manager set it to
+ * inactive.
+ * This case cannot be managed in SW, since the probing of the
+ * SF is on one system, and the inactivation is on a different
+ * system.
+ * If the inactive is done after the SF perform init_hca(),
+ * the SF will fully probe and then removed. If it was
+ * done before init_hca(), the SF probe will fail.
+ */
+out:
+ kfree(work_ctx);
+}
+
+/* In case SFs are generated externally, probe active SFs */
+static void mlx5_sf_dev_queue_active_works(struct work_struct *_work)
+{
+ struct mlx5_sf_dev_table *table = container_of(_work, struct mlx5_sf_dev_table, work);
u32 out[MLX5_ST_SZ_DW(query_vhca_state_out)] = {};
+ struct mlx5_sf_dev_active_work_ctx *work_ctx;
struct mlx5_core_dev *dev = table->dev;
u16 max_functions;
u16 function_id;
u16 sw_func_id;
int err = 0;
+ int wq_idx;
u8 state;
int i;
@@ -249,27 +275,22 @@ static void mlx5_sf_dev_add_active_work(struct work_struct *work)
continue;
sw_func_id = MLX5_GET(query_vhca_state_out, out, vhca_state_context.sw_function_id);
- mutex_lock(&table->table_lock);
- /* Don't probe device which is already probe */
- if (!xa_load(&table->devices, i))
- mlx5_sf_dev_add(dev, i, function_id, sw_func_id);
- /* There is a race where SF got inactive after the query
- * above. e.g.: the query returns that the state of the
- * SF is active, and after that the eswitch manager set it to
- * inactive.
- * This case cannot be managed in SW, since the probing of the
- * SF is on one system, and the inactivation is on a different
- * system.
- * If the inactive is done after the SF perform init_hca(),
- * the SF will fully probe and then removed. If it was
- * done before init_hca(), the SF probe will fail.
- */
- mutex_unlock(&table->table_lock);
+ work_ctx = kzalloc(sizeof(*work_ctx), GFP_KERNEL);
+ if (!work_ctx)
+ return;
+
+ INIT_WORK(&work_ctx->work, &mlx5_sf_dev_add_active_work);
+ work_ctx->event.function_id = function_id;
+ work_ctx->event.sw_function_id = sw_func_id;
+ work_ctx->table = table;
+ work_ctx->sf_index = i;
+ wq_idx = work_ctx->event.function_id % MLX5_DEV_MAX_WQS;
+ mlx5_vhca_events_work_enqueue(dev, wq_idx, &work_ctx->work);
}
}
/* In case SFs are generated externally, probe active SFs */
-static int mlx5_sf_dev_queue_active_work(struct mlx5_sf_dev_table *table)
+static int mlx5_sf_dev_create_active_works(struct mlx5_sf_dev_table *table)
{
if (MLX5_CAP_GEN(table->dev, eswitch_manager))
return 0; /* the table is local */
@@ -280,12 +301,12 @@ static int mlx5_sf_dev_queue_active_work(struct mlx5_sf_dev_table *table)
table->active_wq = create_singlethread_workqueue("mlx5_active_sf");
if (!table->active_wq)
return -ENOMEM;
- INIT_WORK(&table->work, &mlx5_sf_dev_add_active_work);
+ INIT_WORK(&table->work, &mlx5_sf_dev_queue_active_works);
queue_work(table->active_wq, &table->work);
return 0;
}
-static void mlx5_sf_dev_destroy_active_work(struct mlx5_sf_dev_table *table)
+static void mlx5_sf_dev_destroy_active_works(struct mlx5_sf_dev_table *table)
{
if (table->active_wq) {
table->stop_active_wq = true;
@@ -296,7 +317,6 @@ static void mlx5_sf_dev_destroy_active_work(struct mlx5_sf_dev_table *table)
void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev)
{
struct mlx5_sf_dev_table *table;
- unsigned int max_sfs;
int err;
if (!mlx5_sf_dev_supported(dev))
@@ -310,37 +330,30 @@ void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev)
table->nb.notifier_call = mlx5_sf_dev_state_change_handler;
table->dev = dev;
- if (MLX5_CAP_GEN(dev, max_num_sf))
- max_sfs = MLX5_CAP_GEN(dev, max_num_sf);
- else
- max_sfs = 1 << MLX5_CAP_GEN(dev, log_max_sf);
table->sf_bar_length = 1 << (MLX5_CAP_GEN(dev, log_min_sf_size) + 12);
table->base_address = pci_resource_start(dev->pdev, 2);
- table->max_sfs = max_sfs;
xa_init(&table->devices);
- mutex_init(&table->table_lock);
dev->priv.sf_dev_table = table;
err = mlx5_vhca_event_notifier_register(dev, &table->nb);
if (err)
goto vhca_err;
- err = mlx5_sf_dev_queue_active_work(table);
+ err = mlx5_sf_dev_create_active_works(table);
if (err)
goto add_active_err;
err = mlx5_sf_dev_vhca_arm_all(table);
if (err)
goto arm_err;
- mlx5_core_dbg(dev, "SF DEV: max sf devices=%d\n", max_sfs);
return;
arm_err:
- mlx5_sf_dev_destroy_active_work(table);
+ mlx5_sf_dev_destroy_active_works(table);
add_active_err:
mlx5_vhca_event_notifier_unregister(dev, &table->nb);
+ mlx5_vhca_event_work_queues_flush(dev);
vhca_err:
- table->max_sfs = 0;
kfree(table);
dev->priv.sf_dev_table = NULL;
table_err:
@@ -365,9 +378,9 @@ void mlx5_sf_dev_table_destroy(struct mlx5_core_dev *dev)
if (!table)
return;
- mlx5_sf_dev_destroy_active_work(table);
+ mlx5_sf_dev_destroy_active_works(table);
mlx5_vhca_event_notifier_unregister(dev, &table->nb);
- mutex_destroy(&table->table_lock);
+ mlx5_vhca_event_work_queues_flush(dev);
/* Now that event handler is not running, it is safe to destroy
* the sf device without race.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h
index 2a66a427ef..b99131e95e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h
@@ -19,6 +19,12 @@ struct mlx5_sf_dev {
u16 fn_id;
};
+struct mlx5_sf_peer_devlink_event_ctx {
+ u16 fn_id;
+ struct devlink *devlink;
+ int err;
+};
+
void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev);
void mlx5_sf_dev_table_destroy(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
index 8fe82f1191..169c2c68ed 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
@@ -8,6 +8,20 @@
#include "dev.h"
#include "devlink.h"
+static int mlx5_core_peer_devlink_set(struct mlx5_sf_dev *sf_dev, struct devlink *devlink)
+{
+ struct mlx5_sf_peer_devlink_event_ctx event_ctx = {
+ .fn_id = sf_dev->fn_id,
+ .devlink = devlink,
+ };
+ int ret;
+
+ ret = mlx5_blocking_notifier_call_chain(sf_dev->parent_mdev,
+ MLX5_DRIVER_EVENT_SF_PEER_DEVLINK,
+ &event_ctx);
+ return ret == NOTIFY_OK ? event_ctx.err : 0;
+}
+
static int mlx5_sf_dev_probe(struct auxiliary_device *adev, const struct auxiliary_device_id *id)
{
struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev);
@@ -54,9 +68,21 @@ static int mlx5_sf_dev_probe(struct auxiliary_device *adev, const struct auxilia
mlx5_core_warn(mdev, "mlx5_init_one err=%d\n", err);
goto init_one_err;
}
+
+ err = mlx5_core_peer_devlink_set(sf_dev, devlink);
+ if (err) {
+ mlx5_core_warn(mdev, "mlx5_core_peer_devlink_set err=%d\n", err);
+ goto peer_devlink_set_err;
+ }
+
devlink_register(devlink);
return 0;
+peer_devlink_set_err:
+ if (mlx5_dev_is_lightweight(sf_dev->mdev))
+ mlx5_uninit_one_light(sf_dev->mdev);
+ else
+ mlx5_uninit_one(sf_dev->mdev);
init_one_err:
iounmap(mdev->iseg);
remap_err:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
index e34a8f88c5..6c11e075ca 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
@@ -20,43 +20,36 @@ struct mlx5_sf {
u16 hw_state;
};
+static void *mlx5_sf_by_dl_port(struct devlink_port *dl_port)
+{
+ struct mlx5_devlink_port *mlx5_dl_port = mlx5_devlink_port_get(dl_port);
+
+ return container_of(mlx5_dl_port, struct mlx5_sf, dl_port);
+}
+
struct mlx5_sf_table {
struct mlx5_core_dev *dev; /* To refer from notifier context. */
- struct xarray port_indices; /* port index based lookup. */
- refcount_t refcount;
- struct completion disable_complete;
+ struct xarray function_ids; /* function id based lookup. */
struct mutex sf_state_lock; /* Serializes sf state among user cmds & vhca event handler. */
struct notifier_block esw_nb;
struct notifier_block vhca_nb;
+ struct notifier_block mdev_nb;
};
static struct mlx5_sf *
-mlx5_sf_lookup_by_index(struct mlx5_sf_table *table, unsigned int port_index)
-{
- return xa_load(&table->port_indices, port_index);
-}
-
-static struct mlx5_sf *
mlx5_sf_lookup_by_function_id(struct mlx5_sf_table *table, unsigned int fn_id)
{
- unsigned long index;
- struct mlx5_sf *sf;
-
- xa_for_each(&table->port_indices, index, sf) {
- if (sf->hw_fn_id == fn_id)
- return sf;
- }
- return NULL;
+ return xa_load(&table->function_ids, fn_id);
}
-static int mlx5_sf_id_insert(struct mlx5_sf_table *table, struct mlx5_sf *sf)
+static int mlx5_sf_function_id_insert(struct mlx5_sf_table *table, struct mlx5_sf *sf)
{
- return xa_insert(&table->port_indices, sf->port_index, sf, GFP_KERNEL);
+ return xa_insert(&table->function_ids, sf->hw_fn_id, sf, GFP_KERNEL);
}
-static void mlx5_sf_id_erase(struct mlx5_sf_table *table, struct mlx5_sf *sf)
+static void mlx5_sf_function_id_erase(struct mlx5_sf_table *table, struct mlx5_sf *sf)
{
- xa_erase(&table->port_indices, sf->port_index);
+ xa_erase(&table->function_ids, sf->hw_fn_id);
}
static struct mlx5_sf *
@@ -93,7 +86,7 @@ mlx5_sf_alloc(struct mlx5_sf_table *table, struct mlx5_eswitch *esw,
sf->hw_state = MLX5_VHCA_STATE_ALLOCATED;
sf->controller = controller;
- err = mlx5_sf_id_insert(table, sf);
+ err = mlx5_sf_function_id_insert(table, sf);
if (err)
goto insert_err;
@@ -111,28 +104,11 @@ id_err:
static void mlx5_sf_free(struct mlx5_sf_table *table, struct mlx5_sf *sf)
{
- mlx5_sf_id_erase(table, sf);
mlx5_sf_hw_table_sf_free(table->dev, sf->controller, sf->id);
trace_mlx5_sf_free(table->dev, sf->port_index, sf->controller, sf->hw_fn_id);
kfree(sf);
}
-static struct mlx5_sf_table *mlx5_sf_table_try_get(struct mlx5_core_dev *dev)
-{
- struct mlx5_sf_table *table = dev->priv.sf_table;
-
- if (!table)
- return NULL;
-
- return refcount_inc_not_zero(&table->refcount) ? table : NULL;
-}
-
-static void mlx5_sf_table_put(struct mlx5_sf_table *table)
-{
- if (refcount_dec_and_test(&table->refcount))
- complete(&table->disable_complete);
-}
-
static enum devlink_port_fn_state mlx5_sf_to_devlink_state(u8 hw_state)
{
switch (hw_state) {
@@ -172,26 +148,14 @@ int mlx5_devlink_sf_port_fn_state_get(struct devlink_port *dl_port,
struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(dl_port->devlink);
- struct mlx5_sf_table *table;
- struct mlx5_sf *sf;
- int err = 0;
-
- table = mlx5_sf_table_try_get(dev);
- if (!table)
- return -EOPNOTSUPP;
+ struct mlx5_sf_table *table = dev->priv.sf_table;
+ struct mlx5_sf *sf = mlx5_sf_by_dl_port(dl_port);
- sf = mlx5_sf_lookup_by_index(table, dl_port->index);
- if (!sf) {
- err = -EOPNOTSUPP;
- goto sf_err;
- }
mutex_lock(&table->sf_state_lock);
*state = mlx5_sf_to_devlink_state(sf->hw_state);
*opstate = mlx5_sf_to_devlink_opstate(sf->hw_state);
mutex_unlock(&table->sf_state_lock);
-sf_err:
- mlx5_sf_table_put(table);
- return err;
+ return 0;
}
static int mlx5_sf_activate(struct mlx5_core_dev *dev, struct mlx5_sf *sf,
@@ -257,26 +221,10 @@ int mlx5_devlink_sf_port_fn_state_set(struct devlink_port *dl_port,
struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(dl_port->devlink);
- struct mlx5_sf_table *table;
- struct mlx5_sf *sf;
- int err;
-
- table = mlx5_sf_table_try_get(dev);
- if (!table) {
- NL_SET_ERR_MSG_MOD(extack,
- "Port state set is only supported in eswitch switchdev mode or SF ports are disabled.");
- return -EOPNOTSUPP;
- }
- sf = mlx5_sf_lookup_by_index(table, dl_port->index);
- if (!sf) {
- err = -ENODEV;
- goto out;
- }
+ struct mlx5_sf_table *table = dev->priv.sf_table;
+ struct mlx5_sf *sf = mlx5_sf_by_dl_port(dl_port);
- err = mlx5_sf_state_set(dev, table, sf, state, extack);
-out:
- mlx5_sf_table_put(table);
- return err;
+ return mlx5_sf_state_set(dev, table, sf, state, extack);
}
static int mlx5_sf_add(struct mlx5_core_dev *dev, struct mlx5_sf_table *table,
@@ -335,32 +283,45 @@ mlx5_sf_new_check_attr(struct mlx5_core_dev *dev, const struct devlink_port_new_
return 0;
}
+static bool mlx5_sf_table_supported(const struct mlx5_core_dev *dev)
+{
+ return dev->priv.eswitch && MLX5_ESWITCH_MANAGER(dev) &&
+ mlx5_sf_hw_table_supported(dev);
+}
+
int mlx5_devlink_sf_port_new(struct devlink *devlink,
const struct devlink_port_new_attrs *new_attr,
struct netlink_ext_ack *extack,
struct devlink_port **dl_port)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
- struct mlx5_sf_table *table;
+ struct mlx5_sf_table *table = dev->priv.sf_table;
int err;
err = mlx5_sf_new_check_attr(dev, new_attr, extack);
if (err)
return err;
- table = mlx5_sf_table_try_get(dev);
- if (!table) {
+ if (!mlx5_sf_table_supported(dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "SF ports are not supported.");
+ return -EOPNOTSUPP;
+ }
+
+ if (!is_mdev_switchdev_mode(dev)) {
NL_SET_ERR_MSG_MOD(extack,
- "Port add is only supported in eswitch switchdev mode or SF ports are disabled.");
+ "SF ports are only supported in eswitch switchdev mode.");
return -EOPNOTSUPP;
}
- err = mlx5_sf_add(dev, table, new_attr, extack, dl_port);
- mlx5_sf_table_put(table);
- return err;
+
+ return mlx5_sf_add(dev, table, new_attr, extack, dl_port);
}
static void mlx5_sf_dealloc(struct mlx5_sf_table *table, struct mlx5_sf *sf)
{
+ mutex_lock(&table->sf_state_lock);
+
+ mlx5_sf_function_id_erase(table, sf);
+
if (sf->hw_state == MLX5_VHCA_STATE_ALLOCATED) {
mlx5_sf_free(table, sf);
} else if (mlx5_sf_is_active(sf)) {
@@ -376,6 +337,16 @@ static void mlx5_sf_dealloc(struct mlx5_sf_table *table, struct mlx5_sf *sf)
mlx5_sf_hw_table_sf_deferred_free(table->dev, sf->controller, sf->id);
kfree(sf);
}
+
+ mutex_unlock(&table->sf_state_lock);
+}
+
+static void mlx5_sf_del(struct mlx5_sf_table *table, struct mlx5_sf *sf)
+{
+ struct mlx5_eswitch *esw = table->dev->priv.eswitch;
+
+ mlx5_eswitch_unload_sf_vport(esw, sf->hw_fn_id);
+ mlx5_sf_dealloc(table, sf);
}
int mlx5_devlink_sf_port_del(struct devlink *devlink,
@@ -383,32 +354,11 @@ int mlx5_devlink_sf_port_del(struct devlink *devlink,
struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
- struct mlx5_eswitch *esw = dev->priv.eswitch;
- struct mlx5_sf_table *table;
- struct mlx5_sf *sf;
- int err = 0;
-
- table = mlx5_sf_table_try_get(dev);
- if (!table) {
- NL_SET_ERR_MSG_MOD(extack,
- "Port del is only supported in eswitch switchdev mode or SF ports are disabled.");
- return -EOPNOTSUPP;
- }
- sf = mlx5_sf_lookup_by_index(table, dl_port->index);
- if (!sf) {
- err = -ENODEV;
- goto sf_err;
- }
-
- mlx5_eswitch_unload_sf_vport(esw, sf->hw_fn_id);
- mlx5_sf_id_erase(table, sf);
+ struct mlx5_sf_table *table = dev->priv.sf_table;
+ struct mlx5_sf *sf = mlx5_sf_by_dl_port(dl_port);
- mutex_lock(&table->sf_state_lock);
- mlx5_sf_dealloc(table, sf);
- mutex_unlock(&table->sf_state_lock);
-sf_err:
- mlx5_sf_table_put(table);
- return err;
+ mlx5_sf_del(table, sf);
+ return 0;
}
static bool mlx5_sf_state_update_check(const struct mlx5_sf *sf, u8 new_state)
@@ -433,14 +383,10 @@ static int mlx5_sf_vhca_event(struct notifier_block *nb, unsigned long opcode, v
bool update = false;
struct mlx5_sf *sf;
- table = mlx5_sf_table_try_get(table->dev);
- if (!table)
- return 0;
-
mutex_lock(&table->sf_state_lock);
sf = mlx5_sf_lookup_by_function_id(table, event->function_id);
if (!sf)
- goto sf_err;
+ goto unlock;
/* When driver is attached or detached to a function, an event
* notifies such state change.
@@ -450,46 +396,18 @@ static int mlx5_sf_vhca_event(struct notifier_block *nb, unsigned long opcode, v
sf->hw_state = event->new_vhca_state;
trace_mlx5_sf_update_state(table->dev, sf->port_index, sf->controller,
sf->hw_fn_id, sf->hw_state);
-sf_err:
+unlock:
mutex_unlock(&table->sf_state_lock);
- mlx5_sf_table_put(table);
return 0;
}
-static void mlx5_sf_table_enable(struct mlx5_sf_table *table)
-{
- init_completion(&table->disable_complete);
- refcount_set(&table->refcount, 1);
-}
-
-static void mlx5_sf_deactivate_all(struct mlx5_sf_table *table)
+static void mlx5_sf_del_all(struct mlx5_sf_table *table)
{
- struct mlx5_eswitch *esw = table->dev->priv.eswitch;
unsigned long index;
struct mlx5_sf *sf;
- /* At this point, no new user commands can start and no vhca event can
- * arrive. It is safe to destroy all user created SFs.
- */
- xa_for_each(&table->port_indices, index, sf) {
- mlx5_eswitch_unload_sf_vport(esw, sf->hw_fn_id);
- mlx5_sf_id_erase(table, sf);
- mlx5_sf_dealloc(table, sf);
- }
-}
-
-static void mlx5_sf_table_disable(struct mlx5_sf_table *table)
-{
- if (!refcount_read(&table->refcount))
- return;
-
- /* Balances with refcount_set; drop the reference so that new user cmd cannot start
- * and new vhca event handler cannot run.
- */
- mlx5_sf_table_put(table);
- wait_for_completion(&table->disable_complete);
-
- mlx5_sf_deactivate_all(table);
+ xa_for_each(&table->function_ids, index, sf)
+ mlx5_sf_del(table, sf);
}
static int mlx5_sf_esw_event(struct notifier_block *nb, unsigned long event, void *data)
@@ -498,11 +416,8 @@ static int mlx5_sf_esw_event(struct notifier_block *nb, unsigned long event, voi
const struct mlx5_esw_event_info *mode = data;
switch (mode->new_mode) {
- case MLX5_ESWITCH_OFFLOADS:
- mlx5_sf_table_enable(table);
- break;
case MLX5_ESWITCH_LEGACY:
- mlx5_sf_table_disable(table);
+ mlx5_sf_del_all(table);
break;
default:
break;
@@ -511,10 +426,29 @@ static int mlx5_sf_esw_event(struct notifier_block *nb, unsigned long event, voi
return 0;
}
-static bool mlx5_sf_table_supported(const struct mlx5_core_dev *dev)
+static int mlx5_sf_mdev_event(struct notifier_block *nb, unsigned long event, void *data)
{
- return dev->priv.eswitch && MLX5_ESWITCH_MANAGER(dev) &&
- mlx5_sf_hw_table_supported(dev);
+ struct mlx5_sf_table *table = container_of(nb, struct mlx5_sf_table, mdev_nb);
+ struct mlx5_sf_peer_devlink_event_ctx *event_ctx = data;
+ int ret = NOTIFY_DONE;
+ struct mlx5_sf *sf;
+
+ if (event != MLX5_DRIVER_EVENT_SF_PEER_DEVLINK)
+ return NOTIFY_DONE;
+
+
+ mutex_lock(&table->sf_state_lock);
+ sf = mlx5_sf_lookup_by_function_id(table, event_ctx->fn_id);
+ if (!sf)
+ goto out;
+
+ event_ctx->err = devl_port_fn_devlink_set(&sf->dl_port.dl_port,
+ event_ctx->devlink);
+
+ ret = NOTIFY_OK;
+out:
+ mutex_unlock(&table->sf_state_lock);
+ return ret;
}
int mlx5_sf_table_init(struct mlx5_core_dev *dev)
@@ -531,9 +465,8 @@ int mlx5_sf_table_init(struct mlx5_core_dev *dev)
mutex_init(&table->sf_state_lock);
table->dev = dev;
- xa_init(&table->port_indices);
+ xa_init(&table->function_ids);
dev->priv.sf_table = table;
- refcount_set(&table->refcount, 0);
table->esw_nb.notifier_call = mlx5_sf_esw_event;
err = mlx5_esw_event_notifier_register(dev->priv.eswitch, &table->esw_nb);
if (err)
@@ -544,6 +477,9 @@ int mlx5_sf_table_init(struct mlx5_core_dev *dev)
if (err)
goto vhca_err;
+ table->mdev_nb.notifier_call = mlx5_sf_mdev_event;
+ mlx5_blocking_notifier_register(dev, &table->mdev_nb);
+
return 0;
vhca_err:
@@ -562,10 +498,10 @@ void mlx5_sf_table_cleanup(struct mlx5_core_dev *dev)
if (!table)
return;
+ mlx5_blocking_notifier_unregister(dev, &table->mdev_nb);
mlx5_vhca_event_notifier_unregister(table->dev, &table->vhca_nb);
mlx5_esw_event_notifier_unregister(dev->priv.eswitch, &table->esw_nb);
- WARN_ON(refcount_read(&table->refcount));
mutex_destroy(&table->sf_state_lock);
- WARN_ON(!xa_empty(&table->port_indices));
+ WARN_ON(!xa_empty(&table->function_ids));
kfree(table);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c
index d908fba968..cda01ba441 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c
@@ -21,6 +21,15 @@ struct mlx5_vhca_event_work {
struct mlx5_vhca_state_event event;
};
+struct mlx5_vhca_event_handler {
+ struct workqueue_struct *wq;
+};
+
+struct mlx5_vhca_events {
+ struct mlx5_core_dev *dev;
+ struct mlx5_vhca_event_handler handler[MLX5_DEV_MAX_WQS];
+};
+
int mlx5_cmd_query_vhca_state(struct mlx5_core_dev *dev, u16 function_id, u32 *out, u32 outlen)
{
u32 in[MLX5_ST_SZ_DW(query_vhca_state_in)] = {};
@@ -99,6 +108,11 @@ static void mlx5_vhca_state_work_handler(struct work_struct *_work)
kfree(work);
}
+void mlx5_vhca_events_work_enqueue(struct mlx5_core_dev *dev, int idx, struct work_struct *work)
+{
+ queue_work(dev->priv.vhca_events->handler[idx].wq, work);
+}
+
static int
mlx5_vhca_state_change_notifier(struct notifier_block *nb, unsigned long type, void *data)
{
@@ -106,6 +120,7 @@ mlx5_vhca_state_change_notifier(struct notifier_block *nb, unsigned long type, v
mlx5_nb_cof(nb, struct mlx5_vhca_state_notifier, nb);
struct mlx5_vhca_event_work *work;
struct mlx5_eqe *eqe = data;
+ int wq_idx;
work = kzalloc(sizeof(*work), GFP_ATOMIC);
if (!work)
@@ -113,7 +128,8 @@ mlx5_vhca_state_change_notifier(struct notifier_block *nb, unsigned long type, v
INIT_WORK(&work->work, &mlx5_vhca_state_work_handler);
work->notifier = notifier;
work->event.function_id = be16_to_cpu(eqe->data.vhca_state.function_id);
- mlx5_events_work_enqueue(notifier->dev, &work->work);
+ wq_idx = work->event.function_id % MLX5_DEV_MAX_WQS;
+ mlx5_vhca_events_work_enqueue(notifier->dev, wq_idx, &work->work);
return NOTIFY_OK;
}
@@ -132,28 +148,75 @@ void mlx5_vhca_state_cap_handle(struct mlx5_core_dev *dev, void *set_hca_cap)
int mlx5_vhca_event_init(struct mlx5_core_dev *dev)
{
struct mlx5_vhca_state_notifier *notifier;
+ char wq_name[MLX5_CMD_WQ_MAX_NAME];
+ struct mlx5_vhca_events *events;
+ int err, i;
if (!mlx5_vhca_event_supported(dev))
return 0;
- notifier = kzalloc(sizeof(*notifier), GFP_KERNEL);
- if (!notifier)
+ events = kzalloc(sizeof(*events), GFP_KERNEL);
+ if (!events)
return -ENOMEM;
+ events->dev = dev;
+ dev->priv.vhca_events = events;
+ for (i = 0; i < MLX5_DEV_MAX_WQS; i++) {
+ snprintf(wq_name, MLX5_CMD_WQ_MAX_NAME, "mlx5_vhca_event%d", i);
+ events->handler[i].wq = create_singlethread_workqueue(wq_name);
+ if (!events->handler[i].wq) {
+ err = -ENOMEM;
+ goto err_create_wq;
+ }
+ }
+
+ notifier = kzalloc(sizeof(*notifier), GFP_KERNEL);
+ if (!notifier) {
+ err = -ENOMEM;
+ goto err_notifier;
+ }
+
dev->priv.vhca_state_notifier = notifier;
notifier->dev = dev;
BLOCKING_INIT_NOTIFIER_HEAD(&notifier->n_head);
MLX5_NB_INIT(&notifier->nb, mlx5_vhca_state_change_notifier, VHCA_STATE_CHANGE);
return 0;
+
+err_notifier:
+err_create_wq:
+ for (--i; i >= 0; i--)
+ destroy_workqueue(events->handler[i].wq);
+ kfree(events);
+ return err;
+}
+
+void mlx5_vhca_event_work_queues_flush(struct mlx5_core_dev *dev)
+{
+ struct mlx5_vhca_events *vhca_events;
+ int i;
+
+ if (!mlx5_vhca_event_supported(dev))
+ return;
+
+ vhca_events = dev->priv.vhca_events;
+ for (i = 0; i < MLX5_DEV_MAX_WQS; i++)
+ flush_workqueue(vhca_events->handler[i].wq);
}
void mlx5_vhca_event_cleanup(struct mlx5_core_dev *dev)
{
+ struct mlx5_vhca_events *vhca_events;
+ int i;
+
if (!mlx5_vhca_event_supported(dev))
return;
kfree(dev->priv.vhca_state_notifier);
dev->priv.vhca_state_notifier = NULL;
+ vhca_events = dev->priv.vhca_events;
+ for (i = 0; i < MLX5_DEV_MAX_WQS; i++)
+ destroy_workqueue(vhca_events->handler[i].wq);
+ kvfree(vhca_events);
}
void mlx5_vhca_event_start(struct mlx5_core_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.h
index 013cdfe906..1725ba64f8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.h
@@ -28,6 +28,9 @@ int mlx5_modify_vhca_sw_id(struct mlx5_core_dev *dev, u16 function_id, u32 sw_fn
int mlx5_vhca_event_arm(struct mlx5_core_dev *dev, u16 function_id);
int mlx5_cmd_query_vhca_state(struct mlx5_core_dev *dev, u16 function_id,
u32 *out, u32 outlen);
+void mlx5_vhca_events_work_enqueue(struct mlx5_core_dev *dev, int idx, struct work_struct *work);
+void mlx5_vhca_event_work_queues_flush(struct mlx5_core_dev *dev);
+
#else
static inline void mlx5_vhca_state_cap_handle(struct mlx5_core_dev *dev, void *set_hca_cap)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
index 90c38cbbde..d2b65a0ce4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
@@ -55,6 +55,13 @@ static const char *dr_action_id_to_str(enum mlx5dr_action_type action_id)
return action_type_to_str[action_id];
}
+static bool mlx5dr_action_supp_fwd_fdb_multi_ft(struct mlx5_core_dev *dev)
+{
+ return (MLX5_CAP_GEN(dev, steering_format_version) < MLX5_STEERING_FORMAT_CONNECTX_6DX ||
+ MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_any_table_limit_regc) ||
+ MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_any_table));
+}
+
static const enum dr_action_valid_state
next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] = {
[DR_ACTION_DOMAIN_NIC_INGRESS] = {
@@ -1170,12 +1177,16 @@ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
bool ignore_flow_level,
u32 flow_source)
{
+ struct mlx5dr_cmd_flow_destination_hw_info tmp_hw_dest;
struct mlx5dr_cmd_flow_destination_hw_info *hw_dests;
struct mlx5dr_action **ref_actions;
struct mlx5dr_action *action;
bool reformat_req = false;
+ bool is_ft_wire = false;
+ u16 num_dst_ft = 0;
u32 num_of_ref = 0;
u32 ref_act_cnt;
+ u16 last_dest;
int ret;
int i;
@@ -1217,11 +1228,22 @@ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
break;
case DR_ACTION_TYP_FT:
+ if (num_dst_ft &&
+ !mlx5dr_action_supp_fwd_fdb_multi_ft(dmn->mdev)) {
+ mlx5dr_dbg(dmn, "multiple FT destinations not supported\n");
+ goto free_ref_actions;
+ }
+ num_dst_ft++;
hw_dests[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- if (dest_action->dest_tbl->is_fw_tbl)
+ if (dest_action->dest_tbl->is_fw_tbl) {
hw_dests[i].ft_id = dest_action->dest_tbl->fw_tbl.id;
- else
+ } else {
hw_dests[i].ft_id = dest_action->dest_tbl->tbl->table_id;
+ if (dest_action->dest_tbl->is_wire_ft) {
+ is_ft_wire = true;
+ last_dest = i;
+ }
+ }
break;
default:
@@ -1230,6 +1252,16 @@ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
}
}
+ /* In multidest, the FW does the iterator in the RX except of the last
+ * one that done in the TX.
+ * So, if one of the ft target is wire, put it at the end of the dest list.
+ */
+ if (is_ft_wire && num_dst_ft > 1) {
+ tmp_hw_dest = hw_dests[last_dest];
+ hw_dests[last_dest] = hw_dests[num_of_dests - 1];
+ hw_dests[num_of_dests - 1] = tmp_hw_dest;
+ }
+
action = dr_action_create_generic(DR_ACTION_TYP_FT);
if (!action)
goto free_ref_actions;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
index 6c59de3e28..81eff6c410 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
@@ -436,10 +436,6 @@ void mlx5dr_ste_build_mpls(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-void mlx5dr_ste_build_tnl_mpls(struct mlx5dr_ste_ctx *ste_ctx,
- struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask,
- bool inner, bool rx);
void mlx5dr_ste_build_tnl_mpls_over_gre(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
@@ -1064,6 +1060,7 @@ struct mlx5dr_action_sampler {
struct mlx5dr_action_dest_tbl {
u8 is_fw_tbl:1;
+ u8 is_wire_ft:1;
union {
struct mlx5dr_table *tbl;
struct {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
index 14f6df88b1..50c2554c9c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
@@ -209,10 +209,17 @@ static struct mlx5dr_action *create_ft_action(struct mlx5dr_domain *domain,
struct mlx5_flow_rule *dst)
{
struct mlx5_flow_table *dest_ft = dst->dest_attr.ft;
+ struct mlx5dr_action *tbl_action;
if (mlx5dr_is_fw_table(dest_ft))
return mlx5dr_action_create_dest_flow_fw_table(domain, dest_ft);
- return mlx5dr_action_create_dest_table(dest_ft->fs_dr_table.dr_table);
+
+ tbl_action = mlx5dr_action_create_dest_table(dest_ft->fs_dr_table.dr_table);
+ if (tbl_action)
+ tbl_action->dest_tbl->is_wire_ft =
+ dest_ft->flags & MLX5_FLOW_TABLE_UPLINK_VPORT ? 1 : 0;
+
+ return tbl_action;
}
static struct mlx5dr_action *create_range_action(struct mlx5dr_domain *domain,