diff options
Diffstat (limited to 'drivers/vdpa')
-rw-r--r-- | drivers/vdpa/mlx5/core/mlx5_vdpa.h | 10 | ||||
-rw-r--r-- | drivers/vdpa/mlx5/core/mr.c | 69 | ||||
-rw-r--r-- | drivers/vdpa/mlx5/net/mlx5_vnet.c | 209 | ||||
-rw-r--r-- | drivers/vdpa/vdpa.c | 4 | ||||
-rw-r--r-- | drivers/vdpa/vdpa_user/vduse_dev.c | 2 |
5 files changed, 254 insertions, 40 deletions
diff --git a/drivers/vdpa/mlx5/core/mlx5_vdpa.h b/drivers/vdpa/mlx5/core/mlx5_vdpa.h index 84547d998b..50aac8fe57 100644 --- a/drivers/vdpa/mlx5/core/mlx5_vdpa.h +++ b/drivers/vdpa/mlx5/core/mlx5_vdpa.h @@ -35,6 +35,9 @@ struct mlx5_vdpa_mr { struct vhost_iotlb *iotlb; bool user_mr; + + refcount_t refcount; + struct list_head mr_list; }; struct mlx5_vdpa_resources { @@ -93,6 +96,7 @@ struct mlx5_vdpa_dev { u32 generation; struct mlx5_vdpa_mr *mr[MLX5_VDPA_NUM_AS]; + struct list_head mr_list_head; /* serialize mr access */ struct mutex mr_mtx; struct mlx5_control_vq cvq; @@ -118,8 +122,10 @@ int mlx5_vdpa_destroy_mkey(struct mlx5_vdpa_dev *mvdev, u32 mkey); struct mlx5_vdpa_mr *mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb); void mlx5_vdpa_destroy_mr_resources(struct mlx5_vdpa_dev *mvdev); -void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev, - struct mlx5_vdpa_mr *mr); +void mlx5_vdpa_get_mr(struct mlx5_vdpa_dev *mvdev, + struct mlx5_vdpa_mr *mr); +void mlx5_vdpa_put_mr(struct mlx5_vdpa_dev *mvdev, + struct mlx5_vdpa_mr *mr); void mlx5_vdpa_update_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr, unsigned int asid); diff --git a/drivers/vdpa/mlx5/core/mr.c b/drivers/vdpa/mlx5/core/mr.c index 2197c46e56..4758914ccf 100644 --- a/drivers/vdpa/mlx5/core/mr.c +++ b/drivers/vdpa/mlx5/core/mr.c @@ -498,32 +498,54 @@ static void destroy_user_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr static void _mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr) { + if (WARN_ON(!mr)) + return; + if (mr->user_mr) destroy_user_mr(mvdev, mr); else destroy_dma_mr(mvdev, mr); vhost_iotlb_free(mr->iotlb); + + list_del(&mr->mr_list); + + kfree(mr); } -void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev, - struct mlx5_vdpa_mr *mr) +static void _mlx5_vdpa_put_mr(struct mlx5_vdpa_dev *mvdev, + struct mlx5_vdpa_mr *mr) { if (!mr) return; + if (refcount_dec_and_test(&mr->refcount)) + _mlx5_vdpa_destroy_mr(mvdev, mr); +} + +void mlx5_vdpa_put_mr(struct mlx5_vdpa_dev *mvdev, + struct mlx5_vdpa_mr *mr) +{ mutex_lock(&mvdev->mr_mtx); + _mlx5_vdpa_put_mr(mvdev, mr); + mutex_unlock(&mvdev->mr_mtx); +} - _mlx5_vdpa_destroy_mr(mvdev, mr); +static void _mlx5_vdpa_get_mr(struct mlx5_vdpa_dev *mvdev, + struct mlx5_vdpa_mr *mr) +{ + if (!mr) + return; - for (int i = 0; i < MLX5_VDPA_NUM_AS; i++) { - if (mvdev->mr[i] == mr) - mvdev->mr[i] = NULL; - } + refcount_inc(&mr->refcount); +} +void mlx5_vdpa_get_mr(struct mlx5_vdpa_dev *mvdev, + struct mlx5_vdpa_mr *mr) +{ + mutex_lock(&mvdev->mr_mtx); + _mlx5_vdpa_get_mr(mvdev, mr); mutex_unlock(&mvdev->mr_mtx); - - kfree(mr); } void mlx5_vdpa_update_mr(struct mlx5_vdpa_dev *mvdev, @@ -534,10 +556,23 @@ void mlx5_vdpa_update_mr(struct mlx5_vdpa_dev *mvdev, mutex_lock(&mvdev->mr_mtx); + _mlx5_vdpa_put_mr(mvdev, old_mr); mvdev->mr[asid] = new_mr; - if (old_mr) { - _mlx5_vdpa_destroy_mr(mvdev, old_mr); - kfree(old_mr); + + mutex_unlock(&mvdev->mr_mtx); +} + +static void mlx5_vdpa_show_mr_leaks(struct mlx5_vdpa_dev *mvdev) +{ + struct mlx5_vdpa_mr *mr; + + mutex_lock(&mvdev->mr_mtx); + + list_for_each_entry(mr, &mvdev->mr_list_head, mr_list) { + + mlx5_vdpa_warn(mvdev, "mkey still alive after resource delete: " + "mr: %p, mkey: 0x%x, refcount: %u\n", + mr, mr->mkey, refcount_read(&mr->refcount)); } mutex_unlock(&mvdev->mr_mtx); @@ -547,9 +582,11 @@ void mlx5_vdpa_update_mr(struct mlx5_vdpa_dev *mvdev, void mlx5_vdpa_destroy_mr_resources(struct mlx5_vdpa_dev *mvdev) { for (int i = 0; i < MLX5_VDPA_NUM_AS; i++) - mlx5_vdpa_destroy_mr(mvdev, mvdev->mr[i]); + mlx5_vdpa_update_mr(mvdev, NULL, i); prune_iotlb(mvdev->cvq.iotlb); + + mlx5_vdpa_show_mr_leaks(mvdev); } static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, @@ -576,6 +613,8 @@ static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, if (err) goto err_iotlb; + list_add_tail(&mr->mr_list, &mvdev->mr_list_head); + return 0; err_iotlb: @@ -607,6 +646,8 @@ struct mlx5_vdpa_mr *mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, if (err) goto out_err; + refcount_set(&mr->refcount, 1); + return mr; out_err: @@ -651,7 +692,7 @@ int mlx5_vdpa_reset_mr(struct mlx5_vdpa_dev *mvdev, unsigned int asid) if (asid >= MLX5_VDPA_NUM_AS) return -EINVAL; - mlx5_vdpa_destroy_mr(mvdev, mvdev->mr[asid]); + mlx5_vdpa_update_mr(mvdev, NULL, asid); if (asid == 0 && MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) { if (mlx5_vdpa_create_dma_mr(mvdev)) diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c index 7795d2b7fc..ecfc16151d 100644 --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c @@ -120,6 +120,12 @@ struct mlx5_vdpa_virtqueue { u16 avail_idx; u16 used_idx; int fw_state; + + u64 modified_fields; + + struct mlx5_vdpa_mr *vq_mr; + struct mlx5_vdpa_mr *desc_mr; + struct msi_map map; /* keep last in the struct */ @@ -941,6 +947,14 @@ static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtque kfree(in); mvq->virtq_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); + mlx5_vdpa_get_mr(mvdev, vq_mr); + mvq->vq_mr = vq_mr; + + if (vq_desc_mr && MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, desc_group_mkey_supported)) { + mlx5_vdpa_get_mr(mvdev, vq_desc_mr); + mvq->desc_mr = vq_desc_mr; + } + return 0; err_cmd: @@ -967,6 +981,12 @@ static void destroy_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtq } mvq->fw_state = MLX5_VIRTIO_NET_Q_OBJECT_NONE; umems_destroy(ndev, mvq); + + mlx5_vdpa_put_mr(&ndev->mvdev, mvq->vq_mr); + mvq->vq_mr = NULL; + + mlx5_vdpa_put_mr(&ndev->mvdev, mvq->desc_mr); + mvq->desc_mr = NULL; } static u32 get_rqpn(struct mlx5_vdpa_virtqueue *mvq, bool fw) @@ -1165,7 +1185,12 @@ err_cmd: return err; } -static bool is_valid_state_change(int oldstate, int newstate) +static bool is_resumable(struct mlx5_vdpa_net *ndev) +{ + return ndev->mvdev.vdev.config->resume; +} + +static bool is_valid_state_change(int oldstate, int newstate, bool resumable) { switch (oldstate) { case MLX5_VIRTIO_NET_Q_OBJECT_STATE_INIT: @@ -1173,25 +1198,43 @@ static bool is_valid_state_change(int oldstate, int newstate) case MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY: return newstate == MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND; case MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND: + return resumable ? newstate == MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY : false; case MLX5_VIRTIO_NET_Q_OBJECT_STATE_ERR: default: return false; } } -static int modify_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int state) +static bool modifiable_virtqueue_fields(struct mlx5_vdpa_virtqueue *mvq) +{ + /* Only state is always modifiable */ + if (mvq->modified_fields & ~MLX5_VIRTQ_MODIFY_MASK_STATE) + return mvq->fw_state == MLX5_VIRTIO_NET_Q_OBJECT_STATE_INIT || + mvq->fw_state == MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND; + + return true; +} + +static int modify_virtqueue(struct mlx5_vdpa_net *ndev, + struct mlx5_vdpa_virtqueue *mvq, + int state) { int inlen = MLX5_ST_SZ_BYTES(modify_virtio_net_q_in); u32 out[MLX5_ST_SZ_DW(modify_virtio_net_q_out)] = {}; + struct mlx5_vdpa_dev *mvdev = &ndev->mvdev; + struct mlx5_vdpa_mr *desc_mr = NULL; + struct mlx5_vdpa_mr *vq_mr = NULL; + bool state_change = false; void *obj_context; void *cmd_hdr; + void *vq_ctx; void *in; int err; if (mvq->fw_state == MLX5_VIRTIO_NET_Q_OBJECT_NONE) return 0; - if (!is_valid_state_change(mvq->fw_state, state)) + if (!modifiable_virtqueue_fields(mvq)) return -EINVAL; in = kzalloc(inlen, GFP_KERNEL); @@ -1206,17 +1249,83 @@ static int modify_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtque MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid); obj_context = MLX5_ADDR_OF(modify_virtio_net_q_in, in, obj_context); - MLX5_SET64(virtio_net_q_object, obj_context, modify_field_select, - MLX5_VIRTQ_MODIFY_MASK_STATE); - MLX5_SET(virtio_net_q_object, obj_context, state, state); + vq_ctx = MLX5_ADDR_OF(virtio_net_q_object, obj_context, virtio_q_context); + + if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_STATE) { + if (!is_valid_state_change(mvq->fw_state, state, is_resumable(ndev))) { + err = -EINVAL; + goto done; + } + + MLX5_SET(virtio_net_q_object, obj_context, state, state); + state_change = true; + } + + if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_ADDRS) { + MLX5_SET64(virtio_q, vq_ctx, desc_addr, mvq->desc_addr); + MLX5_SET64(virtio_q, vq_ctx, used_addr, mvq->device_addr); + MLX5_SET64(virtio_q, vq_ctx, available_addr, mvq->driver_addr); + } + + if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_AVAIL_IDX) + MLX5_SET(virtio_net_q_object, obj_context, hw_available_index, mvq->avail_idx); + + if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_USED_IDX) + MLX5_SET(virtio_net_q_object, obj_context, hw_used_index, mvq->used_idx); + + if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY) { + vq_mr = mvdev->mr[mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP]]; + + if (vq_mr) + MLX5_SET(virtio_q, vq_ctx, virtio_q_mkey, vq_mr->mkey); + else + mvq->modified_fields &= ~MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY; + } + + if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY) { + desc_mr = mvdev->mr[mvdev->group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]]; + + if (desc_mr && MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, desc_group_mkey_supported)) + MLX5_SET(virtio_q, vq_ctx, desc_group_mkey, desc_mr->mkey); + else + mvq->modified_fields &= ~MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY; + } + + MLX5_SET64(virtio_net_q_object, obj_context, modify_field_select, mvq->modified_fields); err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out)); - kfree(in); - if (!err) + if (err) + goto done; + + if (state_change) mvq->fw_state = state; + if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY) { + mlx5_vdpa_put_mr(mvdev, mvq->vq_mr); + mlx5_vdpa_get_mr(mvdev, vq_mr); + mvq->vq_mr = vq_mr; + } + + if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY) { + mlx5_vdpa_put_mr(mvdev, mvq->desc_mr); + mlx5_vdpa_get_mr(mvdev, desc_mr); + mvq->desc_mr = desc_mr; + } + + mvq->modified_fields = 0; + +done: + kfree(in); return err; } +static int modify_virtqueue_state(struct mlx5_vdpa_net *ndev, + struct mlx5_vdpa_virtqueue *mvq, + unsigned int state) +{ + mvq->modified_fields |= MLX5_VIRTQ_MODIFY_MASK_STATE; + return modify_virtqueue(ndev, mvq, state); +} + static int counter_set_alloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) { u32 in[MLX5_ST_SZ_DW(create_virtio_q_counters_in)] = {}; @@ -1345,7 +1454,7 @@ static int setup_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) goto err_vq; if (mvq->ready) { - err = modify_virtqueue(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY); + err = modify_virtqueue_state(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY); if (err) { mlx5_vdpa_warn(&ndev->mvdev, "failed to modify to ready vq idx %d(%d)\n", idx, err); @@ -1380,7 +1489,7 @@ static void suspend_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *m if (mvq->fw_state != MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY) return; - if (modify_virtqueue(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND)) + if (modify_virtqueue_state(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND)) mlx5_vdpa_warn(&ndev->mvdev, "modify to suspend failed\n"); if (query_virtqueue(ndev, mvq, &attr)) { @@ -1399,12 +1508,31 @@ static void suspend_vqs(struct mlx5_vdpa_net *ndev) suspend_vq(ndev, &ndev->vqs[i]); } +static void resume_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) +{ + if (!mvq->initialized || !is_resumable(ndev)) + return; + + if (mvq->fw_state != MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND) + return; + + if (modify_virtqueue_state(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY)) + mlx5_vdpa_warn(&ndev->mvdev, "modify to resume failed for vq %u\n", mvq->index); +} + +static void resume_vqs(struct mlx5_vdpa_net *ndev) +{ + for (int i = 0; i < ndev->mvdev.max_vqs; i++) + resume_vq(ndev, &ndev->vqs[i]); +} + static void teardown_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) { if (!mvq->initialized) return; suspend_vq(ndev, mvq); + mvq->modified_fields = 0; destroy_virtqueue(ndev, mvq); dealloc_vector(ndev, mvq); counter_set_dealloc(ndev, mvq); @@ -2136,6 +2264,7 @@ static int mlx5_vdpa_set_vq_address(struct vdpa_device *vdev, u16 idx, u64 desc_ mvq->desc_addr = desc_area; mvq->device_addr = device_area; mvq->driver_addr = driver_area; + mvq->modified_fields |= MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_ADDRS; return 0; } @@ -2212,7 +2341,7 @@ static void mlx5_vdpa_set_vq_ready(struct vdpa_device *vdev, u16 idx, bool ready if (!ready) { suspend_vq(ndev, mvq); } else { - err = modify_virtqueue(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY); + err = modify_virtqueue_state(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY); if (err) { mlx5_vdpa_warn(mvdev, "modify VQ %d to ready failed (%d)\n", idx, err); ready = false; @@ -2260,6 +2389,8 @@ static int mlx5_vdpa_set_vq_state(struct vdpa_device *vdev, u16 idx, mvq->used_idx = state->split.avail_index; mvq->avail_idx = state->split.avail_index; + mvq->modified_fields |= MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_AVAIL_IDX | + MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_USED_IDX; return 0; } @@ -2708,24 +2839,35 @@ static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev, unsigned int asid) { struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); + bool teardown = !is_resumable(ndev); int err; suspend_vqs(ndev); - err = save_channels_info(ndev); - if (err) - return err; + if (teardown) { + err = save_channels_info(ndev); + if (err) + return err; - teardown_driver(ndev); + teardown_driver(ndev); + } mlx5_vdpa_update_mr(mvdev, new_mr, asid); + for (int i = 0; i < ndev->cur_num_vqs; i++) + ndev->vqs[i].modified_fields |= MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY | + MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY; + if (!(mvdev->status & VIRTIO_CONFIG_S_DRIVER_OK) || mvdev->suspended) return 0; - restore_channels_info(ndev); - err = setup_driver(mvdev); - if (err) - return err; + if (teardown) { + restore_channels_info(ndev); + err = setup_driver(mvdev); + if (err) + return err; + } + + resume_vqs(ndev); return 0; } @@ -2809,8 +2951,10 @@ static void clear_vqs_ready(struct mlx5_vdpa_net *ndev) { int i; - for (i = 0; i < ndev->mvdev.max_vqs; i++) + for (i = 0; i < ndev->mvdev.max_vqs; i++) { ndev->vqs[i].ready = false; + ndev->vqs[i].modified_fields = 0; + } ndev->mvdev.cvq.ready = false; } @@ -2987,7 +3131,7 @@ static int set_map_data(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb, return mlx5_vdpa_update_cvq_iotlb(mvdev, iotlb, asid); out_err: - mlx5_vdpa_destroy_mr(mvdev, new_mr); + mlx5_vdpa_put_mr(mvdev, new_mr); return err; } @@ -3234,6 +3378,23 @@ static int mlx5_vdpa_suspend(struct vdpa_device *vdev) return 0; } +static int mlx5_vdpa_resume(struct vdpa_device *vdev) +{ + struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); + struct mlx5_vdpa_net *ndev; + + ndev = to_mlx5_vdpa_ndev(mvdev); + + mlx5_vdpa_info(mvdev, "resuming device\n"); + + down_write(&ndev->reslock); + mvdev->suspended = false; + resume_vqs(ndev); + register_link_notifier(ndev); + up_write(&ndev->reslock); + return 0; +} + static int mlx5_set_group_asid(struct vdpa_device *vdev, u32 group, unsigned int asid) { @@ -3290,6 +3451,7 @@ static const struct vdpa_config_ops mlx5_vdpa_ops = { .get_vq_dma_dev = mlx5_get_vq_dma_dev, .free = mlx5_vdpa_free, .suspend = mlx5_vdpa_suspend, + .resume = mlx5_vdpa_resume, /* Op disabled if not supported. */ }; static int query_mtu(struct mlx5_core_dev *mdev, u16 *mtu) @@ -3565,6 +3727,8 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name, if (err) goto err_mpfs; + INIT_LIST_HEAD(&mvdev->mr_list_head); + if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) { err = mlx5_vdpa_create_dma_mr(mvdev); if (err) @@ -3661,6 +3825,9 @@ static int mlx5v_probe(struct auxiliary_device *adev, if (!MLX5_CAP_DEV_VDPA_EMULATION(mdev, desc_group_mkey_supported)) mgtdev->vdpa_ops.get_vq_desc_group = NULL; + if (!MLX5_CAP_DEV_VDPA_EMULATION(mdev, freeze_to_rdy_supported)) + mgtdev->vdpa_ops.resume = NULL; + err = vdpa_mgmtdev_register(&mgtdev->mgtdev); if (err) goto reg_err; diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c index a7612e0783..d0695680b2 100644 --- a/drivers/vdpa/vdpa.c +++ b/drivers/vdpa/vdpa.c @@ -131,7 +131,7 @@ static void vdpa_release_dev(struct device *d) if (ops->free) ops->free(vdev); - ida_simple_remove(&vdpa_index_ida, vdev->index); + ida_free(&vdpa_index_ida, vdev->index); kfree(vdev->driver_override); kfree(vdev); } @@ -205,7 +205,7 @@ struct vdpa_device *__vdpa_alloc_device(struct device *parent, return vdev; err_name: - ida_simple_remove(&vdpa_index_ida, vdev->index); + ida_free(&vdpa_index_ida, vdev->index); err_ida: kfree(vdev); err: diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c index 6cb5ce4a8b..1d24da79c3 100644 --- a/drivers/vdpa/vdpa_user/vduse_dev.c +++ b/drivers/vdpa/vdpa_user/vduse_dev.c @@ -1157,7 +1157,7 @@ static long vduse_dev_ioctl(struct file *file, unsigned int cmd, fput(f); break; } - ret = receive_fd(f, perm_to_file_flags(entry.perm)); + ret = receive_fd(f, NULL, perm_to_file_flags(entry.perm)); fput(f); break; } |