diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-08-07 13:11:40 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-08-07 13:11:40 +0000 |
commit | 8b0a8165cdad0f4133837d753649ef4682e42c3b (patch) | |
tree | 5c58f869f31ddb1f7bd6e8bdea269b680b36c5b6 /drivers/accel/qaic | |
parent | Releasing progress-linux version 6.8.12-1~progress7.99u1. (diff) | |
download | linux-8b0a8165cdad0f4133837d753649ef4682e42c3b.tar.xz linux-8b0a8165cdad0f4133837d753649ef4682e42c3b.zip |
Merging upstream version 6.9.7.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/accel/qaic')
-rw-r--r-- | drivers/accel/qaic/mhi_controller.c | 6 | ||||
-rw-r--r-- | drivers/accel/qaic/qaic.h | 3 | ||||
-rw-r--r-- | drivers/accel/qaic/qaic_data.c | 59 | ||||
-rw-r--r-- | drivers/accel/qaic/qaic_drv.c | 140 |
4 files changed, 119 insertions, 89 deletions
diff --git a/drivers/accel/qaic/mhi_controller.c b/drivers/accel/qaic/mhi_controller.c index cb77d048ed..ada9b1eb07 100644 --- a/drivers/accel/qaic/mhi_controller.c +++ b/drivers/accel/qaic/mhi_controller.c @@ -20,7 +20,7 @@ static unsigned int mhi_timeout_ms = 2000; /* 2 sec default */ module_param(mhi_timeout_ms, uint, 0600); MODULE_PARM_DESC(mhi_timeout_ms, "MHI controller timeout value"); -static struct mhi_channel_config aic100_channels[] = { +static const struct mhi_channel_config aic100_channels[] = { { .name = "QAIC_LOOPBACK", .num = 0, @@ -358,8 +358,8 @@ static struct mhi_channel_config aic100_channels[] = { .wake_capable = false, }, { - .num = 21, .name = "QAIC_TIMESYNC", + .num = 21, .num_elements = 32, .local_elements = 0, .event_ring = 0, @@ -390,8 +390,8 @@ static struct mhi_channel_config aic100_channels[] = { .wake_capable = false, }, { - .num = 23, .name = "QAIC_TIMESYNC_PERIODIC", + .num = 23, .num_elements = 32, .local_elements = 0, .event_ring = 0, diff --git a/drivers/accel/qaic/qaic.h b/drivers/accel/qaic/qaic.h index 582836f953..9256653b30 100644 --- a/drivers/accel/qaic/qaic.h +++ b/drivers/accel/qaic/qaic.h @@ -30,6 +30,7 @@ #define to_qaic_drm_device(dev) container_of(dev, struct qaic_drm_device, drm) #define to_drm(qddev) (&(qddev)->drm) #define to_accel_kdev(qddev) (to_drm(qddev)->accel->kdev) /* Return Linux device of accel node */ +#define to_qaic_device(dev) (to_qaic_drm_device((dev))->qdev) enum __packed dev_states { /* Device is offline or will be very soon */ @@ -191,8 +192,6 @@ struct qaic_bo { u32 nr_slice; /* Number of slice that have been transferred by DMA engine */ u32 nr_slice_xfer_done; - /* true = BO is queued for execution, true = BO is not queued */ - bool queued; /* * If true then user has attached slicing information to this BO by * calling DRM_IOCTL_QAIC_ATTACH_SLICE_BO ioctl. diff --git a/drivers/accel/qaic/qaic_data.c b/drivers/accel/qaic/qaic_data.c index 03c9a793da..2459fe4a3f 100644 --- a/drivers/accel/qaic/qaic_data.c +++ b/drivers/accel/qaic/qaic_data.c @@ -141,6 +141,11 @@ struct dbc_rsp { __le16 status; } __packed; +static inline bool bo_queued(struct qaic_bo *bo) +{ + return !list_empty(&bo->xfer_list); +} + inline int get_dbc_req_elem_size(void) { return sizeof(struct dbc_req); @@ -569,6 +574,9 @@ static void qaic_free_sgt(struct sg_table *sgt) { struct scatterlist *sg; + if (!sgt) + return; + for (sg = sgt->sgl; sg; sg = sg_next(sg)) if (sg_page(sg)) __free_pages(sg_page(sg), get_order(sg->length)); @@ -648,6 +656,7 @@ static void qaic_init_bo(struct qaic_bo *bo, bool reinit) } complete_all(&bo->xfer_done); INIT_LIST_HEAD(&bo->slices); + INIT_LIST_HEAD(&bo->xfer_list); } static struct qaic_bo *qaic_alloc_init_bo(void) @@ -709,9 +718,13 @@ int qaic_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *fi if (ret) goto free_bo; + ret = drm_gem_create_mmap_offset(obj); + if (ret) + goto free_bo; + ret = drm_gem_handle_create(file_priv, obj, &args->handle); if (ret) - goto free_sgt; + goto free_bo; bo->handle = args->handle; drm_gem_object_put(obj); @@ -720,10 +733,8 @@ int qaic_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *fi return 0; -free_sgt: - qaic_free_sgt(bo->sgt); free_bo: - kfree(bo); + drm_gem_object_put(obj); unlock_dev_srcu: srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id); unlock_usr_srcu: @@ -738,7 +749,7 @@ int qaic_mmap_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file struct drm_gem_object *obj; struct qaic_device *qdev; struct qaic_user *usr; - int ret; + int ret = 0; usr = file_priv->driver_priv; usr_rcu_id = srcu_read_lock(&usr->qddev_lock); @@ -760,9 +771,7 @@ int qaic_mmap_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file goto unlock_dev_srcu; } - ret = drm_gem_create_mmap_offset(obj); - if (ret == 0) - args->offset = drm_vma_node_offset_addr(&obj->vma_node); + args->offset = drm_vma_node_offset_addr(&obj->vma_node); drm_gem_object_put(obj); @@ -828,9 +837,6 @@ static int qaic_prepare_import_bo(struct qaic_bo *bo, struct qaic_attach_slice_h struct sg_table *sgt; int ret; - if (obj->import_attach->dmabuf->size < hdr->size) - return -EINVAL; - sgt = dma_buf_map_attachment(obj->import_attach, hdr->dir); if (IS_ERR(sgt)) { ret = PTR_ERR(sgt); @@ -847,9 +853,6 @@ static int qaic_prepare_export_bo(struct qaic_device *qdev, struct qaic_bo *bo, { int ret; - if (bo->base.size < hdr->size) - return -EINVAL; - ret = dma_map_sgtable(&qdev->pdev->dev, bo->sgt, hdr->dir, 0); if (ret) return -EFAULT; @@ -950,9 +953,6 @@ int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi if (arg_size / args->hdr.count != sizeof(*slice_ent)) return -EINVAL; - if (args->hdr.size == 0) - return -EINVAL; - if (!(args->hdr.dir == DMA_TO_DEVICE || args->hdr.dir == DMA_FROM_DEVICE)) return -EINVAL; @@ -992,16 +992,16 @@ int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi goto free_slice_ent; } - ret = qaic_validate_req(qdev, slice_ent, args->hdr.count, args->hdr.size); - if (ret) - goto free_slice_ent; - obj = drm_gem_object_lookup(file_priv, args->hdr.handle); if (!obj) { ret = -ENOENT; goto free_slice_ent; } + ret = qaic_validate_req(qdev, slice_ent, args->hdr.count, obj->size); + if (ret) + goto put_bo; + bo = to_qaic_bo(obj); ret = mutex_lock_interruptible(&bo->lock); if (ret) @@ -1173,7 +1173,6 @@ static int send_bo_list_to_device(struct qaic_device *qdev, struct drm_file *fil struct bo_slice *slice; unsigned long flags; struct qaic_bo *bo; - bool queued; int i, j; int ret; @@ -1205,9 +1204,7 @@ static int send_bo_list_to_device(struct qaic_device *qdev, struct drm_file *fil } spin_lock_irqsave(&dbc->xfer_lock, flags); - queued = bo->queued; - bo->queued = true; - if (queued) { + if (bo_queued(bo)) { spin_unlock_irqrestore(&dbc->xfer_lock, flags); ret = -EINVAL; goto unlock_bo; @@ -1230,7 +1227,6 @@ static int send_bo_list_to_device(struct qaic_device *qdev, struct drm_file *fil else ret = copy_exec_reqs(qdev, slice, dbc->id, head, tail); if (ret) { - bo->queued = false; spin_unlock_irqrestore(&dbc->xfer_lock, flags); goto unlock_bo; } @@ -1253,8 +1249,7 @@ failed_to_send_bo: spin_lock_irqsave(&dbc->xfer_lock, flags); bo = list_last_entry(&dbc->xfer_list, struct qaic_bo, xfer_list); obj = &bo->base; - bo->queued = false; - list_del(&bo->xfer_list); + list_del_init(&bo->xfer_list); spin_unlock_irqrestore(&dbc->xfer_lock, flags); dma_sync_sgtable_for_cpu(&qdev->pdev->dev, bo->sgt, bo->dir); drm_gem_object_put(obj); @@ -1615,8 +1610,7 @@ read_fifo: */ dma_sync_sgtable_for_cpu(&qdev->pdev->dev, bo->sgt, bo->dir); bo->nr_slice_xfer_done = 0; - bo->queued = false; - list_del(&bo->xfer_list); + list_del_init(&bo->xfer_list); bo->perf_stats.req_processed_ts = ktime_get_ns(); complete_all(&bo->xfer_done); drm_gem_object_put(&bo->base); @@ -1875,7 +1869,7 @@ int qaic_detach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi /* Check if BO is committed to H/W for DMA */ spin_lock_irqsave(&dbc->xfer_lock, flags); - if (bo->queued) { + if (bo_queued(bo)) { spin_unlock_irqrestore(&dbc->xfer_lock, flags); ret = -EBUSY; goto unlock_ch_srcu; @@ -1905,8 +1899,7 @@ static void empty_xfer_list(struct qaic_device *qdev, struct dma_bridge_chan *db spin_lock_irqsave(&dbc->xfer_lock, flags); while (!list_empty(&dbc->xfer_list)) { bo = list_first_entry(&dbc->xfer_list, typeof(*bo), xfer_list); - bo->queued = false; - list_del(&bo->xfer_list); + list_del_init(&bo->xfer_list); spin_unlock_irqrestore(&dbc->xfer_lock, flags); bo->nr_slice_xfer_done = 0; bo->req_id = 0; diff --git a/drivers/accel/qaic/qaic_drv.c b/drivers/accel/qaic/qaic_drv.c index 2a313eb69b..d1a632dbae 100644 --- a/drivers/accel/qaic/qaic_drv.c +++ b/drivers/accel/qaic/qaic_drv.c @@ -44,6 +44,53 @@ MODULE_PARM_DESC(datapath_polling, "Operate the datapath in polling mode"); static bool link_up; static DEFINE_IDA(qaic_usrs); +static void qaicm_wq_release(struct drm_device *dev, void *res) +{ + struct workqueue_struct *wq = res; + + destroy_workqueue(wq); +} + +static struct workqueue_struct *qaicm_wq_init(struct drm_device *dev, const char *fmt) +{ + struct workqueue_struct *wq; + int ret; + + wq = alloc_workqueue(fmt, WQ_UNBOUND, 0); + if (!wq) + return ERR_PTR(-ENOMEM); + ret = drmm_add_action_or_reset(dev, qaicm_wq_release, wq); + if (ret) + return ERR_PTR(ret); + + return wq; +} + +static void qaicm_srcu_release(struct drm_device *dev, void *res) +{ + struct srcu_struct *lock = res; + + cleanup_srcu_struct(lock); +} + +static int qaicm_srcu_init(struct drm_device *dev, struct srcu_struct *lock) +{ + int ret; + + ret = init_srcu_struct(lock); + if (ret) + return ret; + + return drmm_add_action_or_reset(dev, qaicm_srcu_release, lock); +} + +static void qaicm_pci_release(struct drm_device *dev, void *res) +{ + struct qaic_device *qdev = to_qaic_device(dev); + + pci_set_drvdata(qdev->pdev, NULL); +} + static void free_usr(struct kref *kref) { struct qaic_user *usr = container_of(kref, struct qaic_user, ref_count); @@ -299,74 +346,73 @@ void qaic_dev_reset_clean_local_state(struct qaic_device *qdev) release_dbc(qdev, i); } -static void cleanup_qdev(struct qaic_device *qdev) -{ - int i; - - for (i = 0; i < qdev->num_dbc; ++i) - cleanup_srcu_struct(&qdev->dbc[i].ch_lock); - cleanup_srcu_struct(&qdev->dev_lock); - pci_set_drvdata(qdev->pdev, NULL); - destroy_workqueue(qdev->cntl_wq); - destroy_workqueue(qdev->qts_wq); -} - static struct qaic_device *create_qdev(struct pci_dev *pdev, const struct pci_device_id *id) { + struct device *dev = &pdev->dev; struct qaic_drm_device *qddev; struct qaic_device *qdev; - int i; + struct drm_device *drm; + int i, ret; - qdev = devm_kzalloc(&pdev->dev, sizeof(*qdev), GFP_KERNEL); + qdev = devm_kzalloc(dev, sizeof(*qdev), GFP_KERNEL); if (!qdev) return NULL; qdev->dev_state = QAIC_OFFLINE; if (id->device == PCI_DEV_AIC100) { qdev->num_dbc = 16; - qdev->dbc = devm_kcalloc(&pdev->dev, qdev->num_dbc, sizeof(*qdev->dbc), GFP_KERNEL); + qdev->dbc = devm_kcalloc(dev, qdev->num_dbc, sizeof(*qdev->dbc), GFP_KERNEL); if (!qdev->dbc) return NULL; } - qdev->cntl_wq = alloc_workqueue("qaic_cntl", WQ_UNBOUND, 0); - if (!qdev->cntl_wq) + qddev = devm_drm_dev_alloc(&pdev->dev, &qaic_accel_driver, struct qaic_drm_device, drm); + if (IS_ERR(qddev)) + return NULL; + + drm = to_drm(qddev); + pci_set_drvdata(pdev, qdev); + + ret = drmm_mutex_init(drm, &qddev->users_mutex); + if (ret) + return NULL; + ret = drmm_add_action_or_reset(drm, qaicm_pci_release, NULL); + if (ret) + return NULL; + ret = drmm_mutex_init(drm, &qdev->cntl_mutex); + if (ret) return NULL; - qdev->qts_wq = alloc_workqueue("qaic_ts", WQ_UNBOUND, 0); - if (!qdev->qts_wq) { - destroy_workqueue(qdev->cntl_wq); + qdev->cntl_wq = qaicm_wq_init(drm, "qaic_cntl"); + if (IS_ERR(qdev->cntl_wq)) + return NULL; + qdev->qts_wq = qaicm_wq_init(drm, "qaic_ts"); + if (IS_ERR(qdev->qts_wq)) return NULL; - } - pci_set_drvdata(pdev, qdev); + ret = qaicm_srcu_init(drm, &qdev->dev_lock); + if (ret) + return NULL; + + qdev->qddev = qddev; qdev->pdev = pdev; + qddev->qdev = qdev; - mutex_init(&qdev->cntl_mutex); INIT_LIST_HEAD(&qdev->cntl_xfer_list); - init_srcu_struct(&qdev->dev_lock); + INIT_LIST_HEAD(&qddev->users); for (i = 0; i < qdev->num_dbc; ++i) { spin_lock_init(&qdev->dbc[i].xfer_lock); qdev->dbc[i].qdev = qdev; qdev->dbc[i].id = i; INIT_LIST_HEAD(&qdev->dbc[i].xfer_list); - init_srcu_struct(&qdev->dbc[i].ch_lock); + ret = qaicm_srcu_init(drm, &qdev->dbc[i].ch_lock); + if (ret) + return NULL; init_waitqueue_head(&qdev->dbc[i].dbc_release); INIT_LIST_HEAD(&qdev->dbc[i].bo_lists); } - qddev = devm_drm_dev_alloc(&pdev->dev, &qaic_accel_driver, struct qaic_drm_device, drm); - if (IS_ERR(qddev)) { - cleanup_qdev(qdev); - return NULL; - } - - drmm_mutex_init(to_drm(qddev), &qddev->users_mutex); - INIT_LIST_HEAD(&qddev->users); - qddev->qdev = qdev; - qdev->qddev = qddev; - return qdev; } @@ -472,35 +518,28 @@ static int qaic_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) ret = init_pci(qdev, pdev); if (ret) - goto cleanup_qdev; + return ret; for (i = 0; i < qdev->num_dbc; ++i) qdev->dbc[i].dbc_base = qdev->bar_2 + QAIC_DBC_OFF(i); mhi_irq = init_msi(qdev, pdev); - if (mhi_irq < 0) { - ret = mhi_irq; - goto cleanup_qdev; - } + if (mhi_irq < 0) + return mhi_irq; ret = qaic_create_drm_device(qdev, QAIC_NO_PARTITION); if (ret) - goto cleanup_qdev; + return ret; qdev->mhi_cntrl = qaic_mhi_register_controller(pdev, qdev->bar_0, mhi_irq, qdev->single_msi); if (IS_ERR(qdev->mhi_cntrl)) { ret = PTR_ERR(qdev->mhi_cntrl); - goto cleanup_drm_dev; + qaic_destroy_drm_device(qdev, QAIC_NO_PARTITION); + return ret; } return 0; - -cleanup_drm_dev: - qaic_destroy_drm_device(qdev, QAIC_NO_PARTITION); -cleanup_qdev: - cleanup_qdev(qdev); - return ret; } static void qaic_pci_remove(struct pci_dev *pdev) @@ -511,9 +550,8 @@ static void qaic_pci_remove(struct pci_dev *pdev) return; qaic_dev_reset_clean_local_state(qdev); - qaic_destroy_drm_device(qdev, QAIC_NO_PARTITION); qaic_mhi_free_controller(qdev->mhi_cntrl, link_up); - cleanup_qdev(qdev); + qaic_destroy_drm_device(qdev, QAIC_NO_PARTITION); } static void qaic_pci_shutdown(struct pci_dev *pdev) |