summaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mlx5
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 17:39:57 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 17:39:57 +0000
commitdc50eab76b709d68175a358d6e23a5a3890764d3 (patch)
treec754d0390db060af0213ff994f0ac310e4cfd6e9 /drivers/infiniband/hw/mlx5
parentAdding debian version 6.6.15-2. (diff)
downloadlinux-dc50eab76b709d68175a358d6e23a5a3890764d3.tar.xz
linux-dc50eab76b709d68175a358d6e23a5a3890764d3.zip
Merging upstream version 6.7.7.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/infiniband/hw/mlx5')
-rw-r--r--drivers/infiniband/hw/mlx5/cong.c6
-rw-r--r--drivers/infiniband/hw/mlx5/mad.c13
-rw-r--r--drivers/infiniband/hw/mlx5/main.c6
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h21
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c324
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c2
-rw-r--r--drivers/infiniband/hw/mlx5/umr.c4
7 files changed, 194 insertions, 182 deletions
diff --git a/drivers/infiniband/hw/mlx5/cong.c b/drivers/infiniband/hw/mlx5/cong.c
index f87531318f..a78a067e3c 100644
--- a/drivers/infiniband/hw/mlx5/cong.c
+++ b/drivers/infiniband/hw/mlx5/cong.c
@@ -458,6 +458,12 @@ void mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev, u32 port_num)
dbg_cc_params->root = debugfs_create_dir("cc_params", mlx5_debugfs_get_dev_root(mdev));
for (i = 0; i < MLX5_IB_DBG_CC_MAX; i++) {
+ if ((i == MLX5_IB_DBG_CC_GENERAL_RTT_RESP_DSCP_VALID ||
+ i == MLX5_IB_DBG_CC_GENERAL_RTT_RESP_DSCP))
+ if (!MLX5_CAP_GEN(mdev, roce) ||
+ !MLX5_CAP_ROCE(mdev, roce_cc_general))
+ continue;
+
dbg_cc_params->params[i].offset = i;
dbg_cc_params->params[i].dev = dev;
dbg_cc_params->params[i].port_num = port_num;
diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c
index 8102ef113b..0c3c4e6481 100644
--- a/drivers/infiniband/hw/mlx5/mad.c
+++ b/drivers/infiniband/hw/mlx5/mad.c
@@ -619,6 +619,19 @@ int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u32 port,
}
}
+ /* Check if extended speeds 2 (XDR/...) are supported */
+ if (props->port_cap_flags & IB_PORT_CAP_MASK2_SUP &&
+ props->port_cap_flags2 & IB_PORT_EXTENDED_SPEEDS2_SUP) {
+ ext_active_speed = (out_mad->data[56] >> 4) & 0x6;
+
+ switch (ext_active_speed) {
+ case 2:
+ if (props->port_cap_flags2 & IB_PORT_LINK_SPEED_XDR_SUP)
+ props->active_speed = IB_SPEED_XDR;
+ break;
+ }
+ }
+
/* If reported active speed is QDR, check if is FDR-10 */
if (props->active_speed == 4) {
if (dev->port_caps[port - 1].ext_port_cap &
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 102ead4971..650a15b6cf 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -444,7 +444,7 @@ static int translate_eth_ext_proto_oper(u32 eth_proto_oper, u16 *active_speed,
*active_width = IB_WIDTH_2X;
*active_speed = IB_SPEED_NDR;
break;
- case MLX5E_PROT_MASK(MLX5E_400GAUI_8):
+ case MLX5E_PROT_MASK(MLX5E_400GAUI_8_400GBASE_CR8):
*active_width = IB_WIDTH_8X;
*active_speed = IB_SPEED_HDR;
break;
@@ -452,6 +452,10 @@ static int translate_eth_ext_proto_oper(u32 eth_proto_oper, u16 *active_speed,
*active_width = IB_WIDTH_4X;
*active_speed = IB_SPEED_NDR;
break;
+ case MLX5E_PROT_MASK(MLX5E_800GAUI_8_800GBASE_CR8_KR8):
+ *active_width = IB_WIDTH_8X;
+ *active_speed = IB_SPEED_NDR;
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 16713baf0d..bbe79b86c7 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -753,10 +753,25 @@ struct umr_common {
unsigned int state;
};
+#define NUM_MKEYS_PER_PAGE \
+ ((PAGE_SIZE - sizeof(struct list_head)) / sizeof(u32))
+
+struct mlx5_mkeys_page {
+ u32 mkeys[NUM_MKEYS_PER_PAGE];
+ struct list_head list;
+};
+static_assert(sizeof(struct mlx5_mkeys_page) == PAGE_SIZE);
+
+struct mlx5_mkeys_queue {
+ struct list_head pages_list;
+ u32 num_pages;
+ unsigned long ci;
+ spinlock_t lock; /* sync list ops */
+};
+
struct mlx5_cache_ent {
- struct xarray mkeys;
- unsigned long stored;
- unsigned long reserved;
+ struct mlx5_mkeys_queue mkeys_queue;
+ u32 pending;
char name[4];
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index e0629898c3..18e459b557 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -143,110 +143,47 @@ static void create_mkey_warn(struct mlx5_ib_dev *dev, int status, void *out)
mlx5_cmd_out_err(dev->mdev, MLX5_CMD_OP_CREATE_MKEY, 0, out);
}
-static int push_mkey_locked(struct mlx5_cache_ent *ent, bool limit_pendings,
- void *to_store)
+static int push_mkey_locked(struct mlx5_cache_ent *ent, u32 mkey)
{
- XA_STATE(xas, &ent->mkeys, 0);
- void *curr;
+ unsigned long tmp = ent->mkeys_queue.ci % NUM_MKEYS_PER_PAGE;
+ struct mlx5_mkeys_page *page;
- if (limit_pendings &&
- (ent->reserved - ent->stored) > MAX_PENDING_REG_MR)
- return -EAGAIN;
-
- while (1) {
- /*
- * This is cmpxchg (NULL, XA_ZERO_ENTRY) however this version
- * doesn't transparently unlock. Instead we set the xas index to
- * the current value of reserved every iteration.
- */
- xas_set(&xas, ent->reserved);
- curr = xas_load(&xas);
- if (!curr) {
- if (to_store && ent->stored == ent->reserved)
- xas_store(&xas, to_store);
- else
- xas_store(&xas, XA_ZERO_ENTRY);
- if (xas_valid(&xas)) {
- ent->reserved++;
- if (to_store) {
- if (ent->stored != ent->reserved)
- __xa_store(&ent->mkeys,
- ent->stored,
- to_store,
- GFP_KERNEL);
- ent->stored++;
- queue_adjust_cache_locked(ent);
- WRITE_ONCE(ent->dev->cache.last_add,
- jiffies);
- }
- }
- }
- xa_unlock_irq(&ent->mkeys);
-
- /*
- * Notice xas_nomem() must always be called as it cleans
- * up any cached allocation.
- */
- if (!xas_nomem(&xas, GFP_KERNEL))
- break;
- xa_lock_irq(&ent->mkeys);
+ lockdep_assert_held(&ent->mkeys_queue.lock);
+ if (ent->mkeys_queue.ci >=
+ ent->mkeys_queue.num_pages * NUM_MKEYS_PER_PAGE) {
+ page = kzalloc(sizeof(*page), GFP_ATOMIC);
+ if (!page)
+ return -ENOMEM;
+ ent->mkeys_queue.num_pages++;
+ list_add_tail(&page->list, &ent->mkeys_queue.pages_list);
+ } else {
+ page = list_last_entry(&ent->mkeys_queue.pages_list,
+ struct mlx5_mkeys_page, list);
}
- xa_lock_irq(&ent->mkeys);
- if (xas_error(&xas))
- return xas_error(&xas);
- if (WARN_ON(curr))
- return -EINVAL;
- return 0;
-}
-
-static int push_mkey(struct mlx5_cache_ent *ent, bool limit_pendings,
- void *to_store)
-{
- int ret;
-
- xa_lock_irq(&ent->mkeys);
- ret = push_mkey_locked(ent, limit_pendings, to_store);
- xa_unlock_irq(&ent->mkeys);
- return ret;
-}
-
-static void undo_push_reserve_mkey(struct mlx5_cache_ent *ent)
-{
- void *old;
-
- ent->reserved--;
- old = __xa_erase(&ent->mkeys, ent->reserved);
- WARN_ON(old);
-}
-
-static void push_to_reserved(struct mlx5_cache_ent *ent, u32 mkey)
-{
- void *old;
- old = __xa_store(&ent->mkeys, ent->stored, xa_mk_value(mkey), 0);
- WARN_ON(old);
- ent->stored++;
+ page->mkeys[tmp] = mkey;
+ ent->mkeys_queue.ci++;
+ return 0;
}
-static u32 pop_stored_mkey(struct mlx5_cache_ent *ent)
+static int pop_mkey_locked(struct mlx5_cache_ent *ent)
{
- void *old, *xa_mkey;
-
- ent->stored--;
- ent->reserved--;
+ unsigned long tmp = (ent->mkeys_queue.ci - 1) % NUM_MKEYS_PER_PAGE;
+ struct mlx5_mkeys_page *last_page;
+ u32 mkey;
- if (ent->stored == ent->reserved) {
- xa_mkey = __xa_erase(&ent->mkeys, ent->stored);
- WARN_ON(!xa_mkey);
- return (u32)xa_to_value(xa_mkey);
+ lockdep_assert_held(&ent->mkeys_queue.lock);
+ last_page = list_last_entry(&ent->mkeys_queue.pages_list,
+ struct mlx5_mkeys_page, list);
+ mkey = last_page->mkeys[tmp];
+ last_page->mkeys[tmp] = 0;
+ ent->mkeys_queue.ci--;
+ if (ent->mkeys_queue.num_pages > 1 && !tmp) {
+ list_del(&last_page->list);
+ ent->mkeys_queue.num_pages--;
+ kfree(last_page);
}
-
- xa_mkey = __xa_store(&ent->mkeys, ent->stored, XA_ZERO_ENTRY,
- GFP_KERNEL);
- WARN_ON(!xa_mkey || xa_is_err(xa_mkey));
- old = __xa_erase(&ent->mkeys, ent->reserved);
- WARN_ON(old);
- return (u32)xa_to_value(xa_mkey);
+ return mkey;
}
static void create_mkey_callback(int status, struct mlx5_async_work *context)
@@ -260,10 +197,10 @@ static void create_mkey_callback(int status, struct mlx5_async_work *context)
if (status) {
create_mkey_warn(dev, status, mkey_out->out);
kfree(mkey_out);
- xa_lock_irqsave(&ent->mkeys, flags);
- undo_push_reserve_mkey(ent);
+ spin_lock_irqsave(&ent->mkeys_queue.lock, flags);
+ ent->pending--;
WRITE_ONCE(dev->fill_delay, 1);
- xa_unlock_irqrestore(&ent->mkeys, flags);
+ spin_unlock_irqrestore(&ent->mkeys_queue.lock, flags);
mod_timer(&dev->delay_timer, jiffies + HZ);
return;
}
@@ -272,11 +209,12 @@ static void create_mkey_callback(int status, struct mlx5_async_work *context)
MLX5_GET(create_mkey_out, mkey_out->out, mkey_index));
WRITE_ONCE(dev->cache.last_add, jiffies);
- xa_lock_irqsave(&ent->mkeys, flags);
- push_to_reserved(ent, mkey_out->mkey);
+ spin_lock_irqsave(&ent->mkeys_queue.lock, flags);
+ push_mkey_locked(ent, mkey_out->mkey);
/* If we are doing fill_to_high_water then keep going. */
queue_adjust_cache_locked(ent);
- xa_unlock_irqrestore(&ent->mkeys, flags);
+ ent->pending--;
+ spin_unlock_irqrestore(&ent->mkeys_queue.lock, flags);
kfree(mkey_out);
}
@@ -333,24 +271,28 @@ static int add_keys(struct mlx5_cache_ent *ent, unsigned int num)
set_cache_mkc(ent, mkc);
async_create->ent = ent;
- err = push_mkey(ent, true, NULL);
- if (err)
+ spin_lock_irq(&ent->mkeys_queue.lock);
+ if (ent->pending >= MAX_PENDING_REG_MR) {
+ err = -EAGAIN;
goto free_async_create;
+ }
+ ent->pending++;
+ spin_unlock_irq(&ent->mkeys_queue.lock);
err = mlx5_ib_create_mkey_cb(async_create);
if (err) {
mlx5_ib_warn(ent->dev, "create mkey failed %d\n", err);
- goto err_undo_reserve;
+ goto err_create_mkey;
}
}
return 0;
-err_undo_reserve:
- xa_lock_irq(&ent->mkeys);
- undo_push_reserve_mkey(ent);
- xa_unlock_irq(&ent->mkeys);
+err_create_mkey:
+ spin_lock_irq(&ent->mkeys_queue.lock);
+ ent->pending--;
free_async_create:
+ spin_unlock_irq(&ent->mkeys_queue.lock);
kfree(async_create);
return err;
}
@@ -383,36 +325,36 @@ static void remove_cache_mr_locked(struct mlx5_cache_ent *ent)
{
u32 mkey;
- lockdep_assert_held(&ent->mkeys.xa_lock);
- if (!ent->stored)
+ lockdep_assert_held(&ent->mkeys_queue.lock);
+ if (!ent->mkeys_queue.ci)
return;
- mkey = pop_stored_mkey(ent);
- xa_unlock_irq(&ent->mkeys);
+ mkey = pop_mkey_locked(ent);
+ spin_unlock_irq(&ent->mkeys_queue.lock);
mlx5_core_destroy_mkey(ent->dev->mdev, mkey);
- xa_lock_irq(&ent->mkeys);
+ spin_lock_irq(&ent->mkeys_queue.lock);
}
static int resize_available_mrs(struct mlx5_cache_ent *ent, unsigned int target,
bool limit_fill)
- __acquires(&ent->mkeys) __releases(&ent->mkeys)
+ __acquires(&ent->mkeys_queue.lock) __releases(&ent->mkeys_queue.lock)
{
int err;
- lockdep_assert_held(&ent->mkeys.xa_lock);
+ lockdep_assert_held(&ent->mkeys_queue.lock);
while (true) {
if (limit_fill)
target = ent->limit * 2;
- if (target == ent->reserved)
+ if (target == ent->pending + ent->mkeys_queue.ci)
return 0;
- if (target > ent->reserved) {
- u32 todo = target - ent->reserved;
+ if (target > ent->pending + ent->mkeys_queue.ci) {
+ u32 todo = target - (ent->pending + ent->mkeys_queue.ci);
- xa_unlock_irq(&ent->mkeys);
+ spin_unlock_irq(&ent->mkeys_queue.lock);
err = add_keys(ent, todo);
if (err == -EAGAIN)
usleep_range(3000, 5000);
- xa_lock_irq(&ent->mkeys);
+ spin_lock_irq(&ent->mkeys_queue.lock);
if (err) {
if (err != -EAGAIN)
return err;
@@ -440,7 +382,7 @@ static ssize_t size_write(struct file *filp, const char __user *buf,
* cannot free MRs that are in use. Compute the target value for stored
* mkeys.
*/
- xa_lock_irq(&ent->mkeys);
+ spin_lock_irq(&ent->mkeys_queue.lock);
if (target < ent->in_use) {
err = -EINVAL;
goto err_unlock;
@@ -453,12 +395,12 @@ static ssize_t size_write(struct file *filp, const char __user *buf,
err = resize_available_mrs(ent, target, false);
if (err)
goto err_unlock;
- xa_unlock_irq(&ent->mkeys);
+ spin_unlock_irq(&ent->mkeys_queue.lock);
return count;
err_unlock:
- xa_unlock_irq(&ent->mkeys);
+ spin_unlock_irq(&ent->mkeys_queue.lock);
return err;
}
@@ -469,7 +411,8 @@ static ssize_t size_read(struct file *filp, char __user *buf, size_t count,
char lbuf[20];
int err;
- err = snprintf(lbuf, sizeof(lbuf), "%ld\n", ent->stored + ent->in_use);
+ err = snprintf(lbuf, sizeof(lbuf), "%ld\n",
+ ent->mkeys_queue.ci + ent->in_use);
if (err < 0)
return err;
@@ -498,10 +441,10 @@ static ssize_t limit_write(struct file *filp, const char __user *buf,
* Upon set we immediately fill the cache to high water mark implied by
* the limit.
*/
- xa_lock_irq(&ent->mkeys);
+ spin_lock_irq(&ent->mkeys_queue.lock);
ent->limit = var;
err = resize_available_mrs(ent, 0, true);
- xa_unlock_irq(&ent->mkeys);
+ spin_unlock_irq(&ent->mkeys_queue.lock);
if (err)
return err;
return count;
@@ -537,9 +480,9 @@ static bool someone_adding(struct mlx5_mkey_cache *cache)
mutex_lock(&cache->rb_lock);
for (node = rb_first(&cache->rb_root); node; node = rb_next(node)) {
ent = rb_entry(node, struct mlx5_cache_ent, node);
- xa_lock_irq(&ent->mkeys);
- ret = ent->stored < ent->limit;
- xa_unlock_irq(&ent->mkeys);
+ spin_lock_irq(&ent->mkeys_queue.lock);
+ ret = ent->mkeys_queue.ci < ent->limit;
+ spin_unlock_irq(&ent->mkeys_queue.lock);
if (ret) {
mutex_unlock(&cache->rb_lock);
return true;
@@ -556,26 +499,26 @@ static bool someone_adding(struct mlx5_mkey_cache *cache)
*/
static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent)
{
- lockdep_assert_held(&ent->mkeys.xa_lock);
+ lockdep_assert_held(&ent->mkeys_queue.lock);
if (ent->disabled || READ_ONCE(ent->dev->fill_delay) || ent->is_tmp)
return;
- if (ent->stored < ent->limit) {
+ if (ent->mkeys_queue.ci < ent->limit) {
ent->fill_to_high_water = true;
mod_delayed_work(ent->dev->cache.wq, &ent->dwork, 0);
} else if (ent->fill_to_high_water &&
- ent->reserved < 2 * ent->limit) {
+ ent->mkeys_queue.ci + ent->pending < 2 * ent->limit) {
/*
* Once we start populating due to hitting a low water mark
* continue until we pass the high water mark.
*/
mod_delayed_work(ent->dev->cache.wq, &ent->dwork, 0);
- } else if (ent->stored == 2 * ent->limit) {
+ } else if (ent->mkeys_queue.ci == 2 * ent->limit) {
ent->fill_to_high_water = false;
- } else if (ent->stored > 2 * ent->limit) {
+ } else if (ent->mkeys_queue.ci > 2 * ent->limit) {
/* Queue deletion of excess entries */
ent->fill_to_high_water = false;
- if (ent->stored != ent->reserved)
+ if (ent->pending)
queue_delayed_work(ent->dev->cache.wq, &ent->dwork,
msecs_to_jiffies(1000));
else
@@ -589,15 +532,16 @@ static void __cache_work_func(struct mlx5_cache_ent *ent)
struct mlx5_mkey_cache *cache = &dev->cache;
int err;
- xa_lock_irq(&ent->mkeys);
+ spin_lock_irq(&ent->mkeys_queue.lock);
if (ent->disabled)
goto out;
- if (ent->fill_to_high_water && ent->reserved < 2 * ent->limit &&
+ if (ent->fill_to_high_water &&
+ ent->mkeys_queue.ci + ent->pending < 2 * ent->limit &&
!READ_ONCE(dev->fill_delay)) {
- xa_unlock_irq(&ent->mkeys);
+ spin_unlock_irq(&ent->mkeys_queue.lock);
err = add_keys(ent, 1);
- xa_lock_irq(&ent->mkeys);
+ spin_lock_irq(&ent->mkeys_queue.lock);
if (ent->disabled)
goto out;
if (err) {
@@ -615,7 +559,7 @@ static void __cache_work_func(struct mlx5_cache_ent *ent)
msecs_to_jiffies(1000));
}
}
- } else if (ent->stored > 2 * ent->limit) {
+ } else if (ent->mkeys_queue.ci > 2 * ent->limit) {
bool need_delay;
/*
@@ -630,11 +574,11 @@ static void __cache_work_func(struct mlx5_cache_ent *ent)
* the garbage collection work to try to run in next cycle, in
* order to free CPU resources to other tasks.
*/
- xa_unlock_irq(&ent->mkeys);
+ spin_unlock_irq(&ent->mkeys_queue.lock);
need_delay = need_resched() || someone_adding(cache) ||
!time_after(jiffies,
READ_ONCE(cache->last_add) + 300 * HZ);
- xa_lock_irq(&ent->mkeys);
+ spin_lock_irq(&ent->mkeys_queue.lock);
if (ent->disabled)
goto out;
if (need_delay) {
@@ -645,7 +589,7 @@ static void __cache_work_func(struct mlx5_cache_ent *ent)
queue_adjust_cache_locked(ent);
}
out:
- xa_unlock_irq(&ent->mkeys);
+ spin_unlock_irq(&ent->mkeys_queue.lock);
}
static void delayed_cache_work_func(struct work_struct *work)
@@ -753,25 +697,25 @@ static struct mlx5_ib_mr *_mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
if (!mr)
return ERR_PTR(-ENOMEM);
- xa_lock_irq(&ent->mkeys);
+ spin_lock_irq(&ent->mkeys_queue.lock);
ent->in_use++;
- if (!ent->stored) {
+ if (!ent->mkeys_queue.ci) {
queue_adjust_cache_locked(ent);
ent->miss++;
- xa_unlock_irq(&ent->mkeys);
+ spin_unlock_irq(&ent->mkeys_queue.lock);
err = create_cache_mkey(ent, &mr->mmkey.key);
if (err) {
- xa_lock_irq(&ent->mkeys);
+ spin_lock_irq(&ent->mkeys_queue.lock);
ent->in_use--;
- xa_unlock_irq(&ent->mkeys);
+ spin_unlock_irq(&ent->mkeys_queue.lock);
kfree(mr);
return ERR_PTR(err);
}
} else {
- mr->mmkey.key = pop_stored_mkey(ent);
+ mr->mmkey.key = pop_mkey_locked(ent);
queue_adjust_cache_locked(ent);
- xa_unlock_irq(&ent->mkeys);
+ spin_unlock_irq(&ent->mkeys_queue.lock);
}
mr->mmkey.cache_ent = ent;
mr->mmkey.type = MLX5_MKEY_MR;
@@ -825,14 +769,14 @@ static void clean_keys(struct mlx5_ib_dev *dev, struct mlx5_cache_ent *ent)
u32 mkey;
cancel_delayed_work(&ent->dwork);
- xa_lock_irq(&ent->mkeys);
- while (ent->stored) {
- mkey = pop_stored_mkey(ent);
- xa_unlock_irq(&ent->mkeys);
+ spin_lock_irq(&ent->mkeys_queue.lock);
+ while (ent->mkeys_queue.ci) {
+ mkey = pop_mkey_locked(ent);
+ spin_unlock_irq(&ent->mkeys_queue.lock);
mlx5_core_destroy_mkey(dev->mdev, mkey);
- xa_lock_irq(&ent->mkeys);
+ spin_lock_irq(&ent->mkeys_queue.lock);
}
- xa_unlock_irq(&ent->mkeys);
+ spin_unlock_irq(&ent->mkeys_queue.lock);
}
static void mlx5_mkey_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
@@ -860,7 +804,7 @@ static void mlx5_mkey_cache_debugfs_add_ent(struct mlx5_ib_dev *dev,
dir = debugfs_create_dir(ent->name, dev->cache.fs_root);
debugfs_create_file("size", 0600, dir, ent, &size_fops);
debugfs_create_file("limit", 0600, dir, ent, &limit_fops);
- debugfs_create_ulong("cur", 0400, dir, &ent->stored);
+ debugfs_create_ulong("cur", 0400, dir, &ent->mkeys_queue.ci);
debugfs_create_u32("miss", 0600, dir, &ent->miss);
}
@@ -882,6 +826,31 @@ static void delay_time_func(struct timer_list *t)
WRITE_ONCE(dev->fill_delay, 0);
}
+static int mlx5r_mkeys_init(struct mlx5_cache_ent *ent)
+{
+ struct mlx5_mkeys_page *page;
+
+ page = kzalloc(sizeof(*page), GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+ INIT_LIST_HEAD(&ent->mkeys_queue.pages_list);
+ spin_lock_init(&ent->mkeys_queue.lock);
+ list_add_tail(&page->list, &ent->mkeys_queue.pages_list);
+ ent->mkeys_queue.num_pages++;
+ return 0;
+}
+
+static void mlx5r_mkeys_uninit(struct mlx5_cache_ent *ent)
+{
+ struct mlx5_mkeys_page *page;
+
+ WARN_ON(ent->mkeys_queue.ci || ent->mkeys_queue.num_pages > 1);
+ page = list_last_entry(&ent->mkeys_queue.pages_list,
+ struct mlx5_mkeys_page, list);
+ list_del(&page->list);
+ kfree(page);
+}
+
struct mlx5_cache_ent *
mlx5r_cache_create_ent_locked(struct mlx5_ib_dev *dev,
struct mlx5r_cache_rb_key rb_key,
@@ -895,7 +864,9 @@ mlx5r_cache_create_ent_locked(struct mlx5_ib_dev *dev,
if (!ent)
return ERR_PTR(-ENOMEM);
- xa_init_flags(&ent->mkeys, XA_FLAGS_LOCK_IRQ);
+ ret = mlx5r_mkeys_init(ent);
+ if (ret)
+ goto mkeys_err;
ent->rb_key = rb_key;
ent->dev = dev;
ent->is_tmp = !persistent_entry;
@@ -903,10 +874,8 @@ mlx5r_cache_create_ent_locked(struct mlx5_ib_dev *dev,
INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func);
ret = mlx5_cache_ent_insert(&dev->cache, ent);
- if (ret) {
- kfree(ent);
- return ERR_PTR(ret);
- }
+ if (ret)
+ goto ent_insert_err;
if (persistent_entry) {
if (rb_key.access_mode == MLX5_MKC_ACCESS_MODE_KSM)
@@ -929,6 +898,11 @@ mlx5r_cache_create_ent_locked(struct mlx5_ib_dev *dev,
}
return ent;
+ent_insert_err:
+ mlx5r_mkeys_uninit(ent);
+mkeys_err:
+ kfree(ent);
+ return ERR_PTR(ret);
}
static void remove_ent_work_func(struct work_struct *work)
@@ -946,13 +920,13 @@ static void remove_ent_work_func(struct work_struct *work)
cur = rb_prev(cur);
mutex_unlock(&cache->rb_lock);
- xa_lock_irq(&ent->mkeys);
+ spin_lock_irq(&ent->mkeys_queue.lock);
if (!ent->is_tmp) {
- xa_unlock_irq(&ent->mkeys);
+ spin_unlock_irq(&ent->mkeys_queue.lock);
mutex_lock(&cache->rb_lock);
continue;
}
- xa_unlock_irq(&ent->mkeys);
+ spin_unlock_irq(&ent->mkeys_queue.lock);
clean_keys(ent->dev, ent);
mutex_lock(&cache->rb_lock);
@@ -1002,9 +976,9 @@ int mlx5_mkey_cache_init(struct mlx5_ib_dev *dev)
mutex_unlock(&cache->rb_lock);
for (node = rb_first(root); node; node = rb_next(node)) {
ent = rb_entry(node, struct mlx5_cache_ent, node);
- xa_lock_irq(&ent->mkeys);
+ spin_lock_irq(&ent->mkeys_queue.lock);
queue_adjust_cache_locked(ent);
- xa_unlock_irq(&ent->mkeys);
+ spin_unlock_irq(&ent->mkeys_queue.lock);
}
return 0;
@@ -1029,9 +1003,9 @@ void mlx5_mkey_cache_cleanup(struct mlx5_ib_dev *dev)
cancel_delayed_work(&dev->cache.remove_ent_dwork);
for (node = rb_first(root); node; node = rb_next(node)) {
ent = rb_entry(node, struct mlx5_cache_ent, node);
- xa_lock_irq(&ent->mkeys);
+ spin_lock_irq(&ent->mkeys_queue.lock);
ent->disabled = true;
- xa_unlock_irq(&ent->mkeys);
+ spin_unlock_irq(&ent->mkeys_queue.lock);
cancel_delayed_work(&ent->dwork);
}
mutex_unlock(&dev->cache.rb_lock);
@@ -1053,6 +1027,7 @@ void mlx5_mkey_cache_cleanup(struct mlx5_ib_dev *dev)
node = rb_next(node);
clean_keys(dev, ent);
rb_erase(&ent->node, root);
+ mlx5r_mkeys_uninit(ent);
kfree(ent);
}
mutex_unlock(&dev->cache.rb_lock);
@@ -1825,7 +1800,7 @@ static int cache_ent_find_and_store(struct mlx5_ib_dev *dev,
int ret;
if (mr->mmkey.cache_ent) {
- xa_lock_irq(&mr->mmkey.cache_ent->mkeys);
+ spin_lock_irq(&mr->mmkey.cache_ent->mkeys_queue.lock);
mr->mmkey.cache_ent->in_use--;
goto end;
}
@@ -1839,7 +1814,7 @@ static int cache_ent_find_and_store(struct mlx5_ib_dev *dev,
return -EOPNOTSUPP;
}
mr->mmkey.cache_ent = ent;
- xa_lock_irq(&mr->mmkey.cache_ent->mkeys);
+ spin_lock_irq(&mr->mmkey.cache_ent->mkeys_queue.lock);
mutex_unlock(&cache->rb_lock);
goto end;
}
@@ -1851,12 +1826,11 @@ static int cache_ent_find_and_store(struct mlx5_ib_dev *dev,
return PTR_ERR(ent);
mr->mmkey.cache_ent = ent;
- xa_lock_irq(&mr->mmkey.cache_ent->mkeys);
+ spin_lock_irq(&mr->mmkey.cache_ent->mkeys_queue.lock);
end:
- ret = push_mkey_locked(mr->mmkey.cache_ent, false,
- xa_mk_value(mr->mmkey.key));
- xa_unlock_irq(&mr->mmkey.cache_ent->mkeys);
+ ret = push_mkey_locked(mr->mmkey.cache_ent, mr->mmkey.key);
+ spin_unlock_irq(&mr->mmkey.cache_ent->mkeys_queue.lock);
return ret;
}
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 2340baaba8..8115ab1071 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -3436,7 +3436,7 @@ static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate)
if (rate == IB_RATE_PORT_CURRENT)
return 0;
- if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_600_GBPS)
+ if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_800_GBPS)
return -EINVAL;
stat_rate_support = MLX5_CAP_GEN(dev->mdev, stat_rate_support);
diff --git a/drivers/infiniband/hw/mlx5/umr.c b/drivers/infiniband/hw/mlx5/umr.c
index 234bf30db7..e76142f6fa 100644
--- a/drivers/infiniband/hw/mlx5/umr.c
+++ b/drivers/infiniband/hw/mlx5/umr.c
@@ -332,8 +332,8 @@ static int mlx5r_umr_post_send_wait(struct mlx5_ib_dev *dev, u32 mkey,
WARN_ON_ONCE(1);
mlx5_ib_warn(dev,
- "reg umr failed (%u). Trying to recover and resubmit the flushed WQEs\n",
- umr_context.status);
+ "reg umr failed (%u). Trying to recover and resubmit the flushed WQEs, mkey = %u\n",
+ umr_context.status, mkey);
mutex_lock(&umrc->lock);
err = mlx5r_umr_recover(dev);
mutex_unlock(&umrc->lock);