summaryrefslogtreecommitdiffstats
path: root/drivers/accel/qaic
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 17:35:05 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 17:39:31 +0000
commit85c675d0d09a45a135bddd15d7b385f8758c32fb (patch)
tree76267dbc9b9a130337be3640948fe397b04ac629 /drivers/accel/qaic
parentAdding upstream version 6.6.15. (diff)
downloadlinux-85c675d0d09a45a135bddd15d7b385f8758c32fb.tar.xz
linux-85c675d0d09a45a135bddd15d7b385f8758c32fb.zip
Adding upstream version 6.7.7.upstream/6.7.7
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/accel/qaic')
-rw-r--r--drivers/accel/qaic/qaic.h13
-rw-r--r--drivers/accel/qaic/qaic_data.c187
-rw-r--r--drivers/accel/qaic/qaic_drv.c119
3 files changed, 202 insertions, 117 deletions
diff --git a/drivers/accel/qaic/qaic.h b/drivers/accel/qaic/qaic.h
index f2bd637a0..e3f4c30f3 100644
--- a/drivers/accel/qaic/qaic.h
+++ b/drivers/accel/qaic/qaic.h
@@ -27,6 +27,9 @@
#define QAIC_DBC_OFF(i) ((i) * QAIC_DBC_SIZE + QAIC_DBC_BASE)
#define to_qaic_bo(obj) container_of(obj, struct qaic_bo, base)
+#define to_qaic_drm_device(dev) container_of(dev, struct qaic_drm_device, drm)
+#define to_drm(qddev) (&(qddev)->drm)
+#define to_accel_kdev(qddev) (to_drm(qddev)->accel->kdev) /* Return Linux device of accel node */
extern bool datapath_polling;
@@ -137,6 +140,8 @@ struct qaic_device {
};
struct qaic_drm_device {
+ /* The drm device struct of this drm device */
+ struct drm_device drm;
/* Pointer to the root device struct driven by this driver */
struct qaic_device *qdev;
/*
@@ -146,8 +151,6 @@ struct qaic_drm_device {
* device is the actual physical device
*/
s32 partition_id;
- /* Pointer to the drm device struct of this drm device */
- struct drm_device *ddev;
/* Head in list of users who have opened this drm device */
struct list_head users;
/* Synchronizes access to users list */
@@ -158,8 +161,6 @@ struct qaic_bo {
struct drm_gem_object base;
/* Scatter/gather table for allocate/imported BO */
struct sg_table *sgt;
- /* BO size requested by user. GEM object might be bigger in size. */
- u64 size;
/* Head in list of slices of this BO */
struct list_head slices;
/* Total nents, for all slices of this BO */
@@ -221,7 +222,8 @@ struct qaic_bo {
*/
u32 queue_level_before;
} perf_stats;
-
+ /* Synchronizes BO operations */
+ struct mutex lock;
};
struct bo_slice {
@@ -277,6 +279,7 @@ int qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *f
int qaic_partial_execute_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
int qaic_wait_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
int qaic_perf_stats_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
+int qaic_detach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
void irq_polling_work(struct work_struct *work);
#endif /* _QAIC_H_ */
diff --git a/drivers/accel/qaic/qaic_data.c b/drivers/accel/qaic/qaic_data.c
index ed1a5af43..d42f002bc 100644
--- a/drivers/accel/qaic/qaic_data.c
+++ b/drivers/accel/qaic/qaic_data.c
@@ -154,6 +154,7 @@ static void free_slice(struct kref *kref)
{
struct bo_slice *slice = container_of(kref, struct bo_slice, ref_count);
+ slice->bo->total_slice_nents -= slice->nents;
list_del(&slice->slice);
drm_gem_object_put(&slice->bo->base);
sg_free_table(slice->sgt);
@@ -579,7 +580,7 @@ static void qaic_gem_print_info(struct drm_printer *p, unsigned int indent,
{
struct qaic_bo *bo = to_qaic_bo(obj);
- drm_printf_indent(p, indent, "user requested size=%llu\n", bo->size);
+ drm_printf_indent(p, indent, "BO DMA direction %d\n", bo->dir);
}
static const struct vm_operations_struct drm_vm_ops = {
@@ -623,6 +624,7 @@ static void qaic_free_object(struct drm_gem_object *obj)
qaic_free_sgt(bo->sgt);
}
+ mutex_destroy(&bo->lock);
drm_gem_object_release(obj);
kfree(bo);
}
@@ -634,6 +636,19 @@ static const struct drm_gem_object_funcs qaic_gem_funcs = {
.vm_ops = &drm_vm_ops,
};
+static void qaic_init_bo(struct qaic_bo *bo, bool reinit)
+{
+ if (reinit) {
+ bo->sliced = false;
+ reinit_completion(&bo->xfer_done);
+ } else {
+ mutex_init(&bo->lock);
+ init_completion(&bo->xfer_done);
+ }
+ complete_all(&bo->xfer_done);
+ INIT_LIST_HEAD(&bo->slices);
+}
+
static struct qaic_bo *qaic_alloc_init_bo(void)
{
struct qaic_bo *bo;
@@ -642,9 +657,7 @@ static struct qaic_bo *qaic_alloc_init_bo(void)
if (!bo)
return ERR_PTR(-ENOMEM);
- INIT_LIST_HEAD(&bo->slices);
- init_completion(&bo->xfer_done);
- complete_all(&bo->xfer_done);
+ qaic_init_bo(bo, false);
return bo;
}
@@ -695,8 +708,6 @@ int qaic_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *fi
if (ret)
goto free_bo;
- bo->size = args->size;
-
ret = drm_gem_handle_create(file_priv, obj, &args->handle);
if (ret)
goto free_sgt;
@@ -826,7 +837,6 @@ static int qaic_prepare_import_bo(struct qaic_bo *bo, struct qaic_attach_slice_h
}
bo->sgt = sgt;
- bo->size = hdr->size;
return 0;
}
@@ -836,7 +846,7 @@ static int qaic_prepare_export_bo(struct qaic_device *qdev, struct qaic_bo *bo,
{
int ret;
- if (bo->size != hdr->size)
+ if (bo->base.size < hdr->size)
return -EINVAL;
ret = dma_map_sgtable(&qdev->pdev->dev, bo->sgt, hdr->dir, 0);
@@ -855,9 +865,9 @@ static int qaic_prepare_bo(struct qaic_device *qdev, struct qaic_bo *bo,
ret = qaic_prepare_import_bo(bo, hdr);
else
ret = qaic_prepare_export_bo(qdev, bo, hdr);
-
- if (ret == 0)
- bo->dir = hdr->dir;
+ bo->dir = hdr->dir;
+ bo->dbc = &qdev->dbc[hdr->dbc_id];
+ bo->nr_slice = hdr->count;
return ret;
}
@@ -866,7 +876,6 @@ static void qaic_unprepare_import_bo(struct qaic_bo *bo)
{
dma_buf_unmap_attachment(bo->base.import_attach, bo->sgt, bo->dir);
bo->sgt = NULL;
- bo->size = 0;
}
static void qaic_unprepare_export_bo(struct qaic_device *qdev, struct qaic_bo *bo)
@@ -882,6 +891,8 @@ static void qaic_unprepare_bo(struct qaic_device *qdev, struct qaic_bo *bo)
qaic_unprepare_export_bo(qdev, bo);
bo->dir = 0;
+ bo->dbc = NULL;
+ bo->nr_slice = 0;
}
static void qaic_free_slices_bo(struct qaic_bo *bo)
@@ -890,6 +901,9 @@ static void qaic_free_slices_bo(struct qaic_bo *bo)
list_for_each_entry_safe(slice, temp, &bo->slices, slice)
kref_put(&slice->ref_count, free_slice);
+ if (WARN_ON_ONCE(bo->total_slice_nents != 0))
+ bo->total_slice_nents = 0;
+ bo->nr_slice = 0;
}
static int qaic_attach_slicing_bo(struct qaic_device *qdev, struct qaic_bo *bo,
@@ -906,15 +920,11 @@ static int qaic_attach_slicing_bo(struct qaic_device *qdev, struct qaic_bo *bo,
}
}
- if (bo->total_slice_nents > qdev->dbc[hdr->dbc_id].nelem) {
+ if (bo->total_slice_nents > bo->dbc->nelem) {
qaic_free_slices_bo(bo);
return -ENOSPC;
}
- bo->sliced = true;
- bo->nr_slice = hdr->count;
- list_add_tail(&bo->bo_list, &qdev->dbc[hdr->dbc_id].bo_lists);
-
return 0;
}
@@ -992,10 +1002,13 @@ int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi
}
bo = to_qaic_bo(obj);
+ ret = mutex_lock_interruptible(&bo->lock);
+ if (ret)
+ goto put_bo;
if (bo->sliced) {
ret = -EINVAL;
- goto put_bo;
+ goto unlock_bo;
}
dbc = &qdev->dbc[args->hdr.dbc_id];
@@ -1016,9 +1029,10 @@ int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi
if (args->hdr.dir == DMA_TO_DEVICE)
dma_sync_sgtable_for_cpu(&qdev->pdev->dev, bo->sgt, args->hdr.dir);
- bo->dbc = dbc;
+ bo->sliced = true;
+ list_add_tail(&bo->bo_list, &bo->dbc->bo_lists);
srcu_read_unlock(&dbc->ch_lock, rcu_id);
- drm_gem_object_put(obj);
+ mutex_unlock(&bo->lock);
kfree(slice_ent);
srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
@@ -1029,6 +1043,8 @@ unprepare_bo:
qaic_unprepare_bo(qdev, bo);
unlock_ch_srcu:
srcu_read_unlock(&dbc->ch_lock, rcu_id);
+unlock_bo:
+ mutex_unlock(&bo->lock);
put_bo:
drm_gem_object_put(obj);
free_slice_ent:
@@ -1183,15 +1199,18 @@ static int send_bo_list_to_device(struct qaic_device *qdev, struct drm_file *fil
}
bo = to_qaic_bo(obj);
+ ret = mutex_lock_interruptible(&bo->lock);
+ if (ret)
+ goto failed_to_send_bo;
if (!bo->sliced) {
ret = -EINVAL;
- goto failed_to_send_bo;
+ goto unlock_bo;
}
- if (is_partial && pexec[i].resize > bo->size) {
+ if (is_partial && pexec[i].resize > bo->base.size) {
ret = -EINVAL;
- goto failed_to_send_bo;
+ goto unlock_bo;
}
spin_lock_irqsave(&dbc->xfer_lock, flags);
@@ -1200,7 +1219,7 @@ static int send_bo_list_to_device(struct qaic_device *qdev, struct drm_file *fil
if (queued) {
spin_unlock_irqrestore(&dbc->xfer_lock, flags);
ret = -EINVAL;
- goto failed_to_send_bo;
+ goto unlock_bo;
}
bo->req_id = dbc->next_req_id++;
@@ -1231,17 +1250,20 @@ static int send_bo_list_to_device(struct qaic_device *qdev, struct drm_file *fil
if (ret) {
bo->queued = false;
spin_unlock_irqrestore(&dbc->xfer_lock, flags);
- goto failed_to_send_bo;
+ goto unlock_bo;
}
}
reinit_completion(&bo->xfer_done);
list_add_tail(&bo->xfer_list, &dbc->xfer_list);
spin_unlock_irqrestore(&dbc->xfer_lock, flags);
dma_sync_sgtable_for_device(&qdev->pdev->dev, bo->sgt, bo->dir);
+ mutex_unlock(&bo->lock);
}
return 0;
+unlock_bo:
+ mutex_unlock(&bo->lock);
failed_to_send_bo:
if (likely(obj))
drm_gem_object_put(obj);
@@ -1797,6 +1819,91 @@ unlock_usr_srcu:
return ret;
}
+static void detach_slice_bo(struct qaic_device *qdev, struct qaic_bo *bo)
+{
+ qaic_free_slices_bo(bo);
+ qaic_unprepare_bo(qdev, bo);
+ qaic_init_bo(bo, true);
+ list_del(&bo->bo_list);
+ drm_gem_object_put(&bo->base);
+}
+
+int qaic_detach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ struct qaic_detach_slice *args = data;
+ int rcu_id, usr_rcu_id, qdev_rcu_id;
+ struct dma_bridge_chan *dbc;
+ struct drm_gem_object *obj;
+ struct qaic_device *qdev;
+ struct qaic_user *usr;
+ unsigned long flags;
+ struct qaic_bo *bo;
+ int ret;
+
+ if (args->pad != 0)
+ return -EINVAL;
+
+ usr = file_priv->driver_priv;
+ usr_rcu_id = srcu_read_lock(&usr->qddev_lock);
+ if (!usr->qddev) {
+ ret = -ENODEV;
+ goto unlock_usr_srcu;
+ }
+
+ qdev = usr->qddev->qdev;
+ qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
+ if (qdev->in_reset) {
+ ret = -ENODEV;
+ goto unlock_dev_srcu;
+ }
+
+ obj = drm_gem_object_lookup(file_priv, args->handle);
+ if (!obj) {
+ ret = -ENOENT;
+ goto unlock_dev_srcu;
+ }
+
+ bo = to_qaic_bo(obj);
+ ret = mutex_lock_interruptible(&bo->lock);
+ if (ret)
+ goto put_bo;
+
+ if (!bo->sliced) {
+ ret = -EINVAL;
+ goto unlock_bo;
+ }
+
+ dbc = bo->dbc;
+ rcu_id = srcu_read_lock(&dbc->ch_lock);
+ if (dbc->usr != usr) {
+ ret = -EINVAL;
+ goto unlock_ch_srcu;
+ }
+
+ /* Check if BO is committed to H/W for DMA */
+ spin_lock_irqsave(&dbc->xfer_lock, flags);
+ if (bo->queued) {
+ spin_unlock_irqrestore(&dbc->xfer_lock, flags);
+ ret = -EBUSY;
+ goto unlock_ch_srcu;
+ }
+ spin_unlock_irqrestore(&dbc->xfer_lock, flags);
+
+ detach_slice_bo(qdev, bo);
+
+unlock_ch_srcu:
+ srcu_read_unlock(&dbc->ch_lock, rcu_id);
+unlock_bo:
+ mutex_unlock(&bo->lock);
+put_bo:
+ drm_gem_object_put(obj);
+unlock_dev_srcu:
+ srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
+unlock_usr_srcu:
+ srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
+ return ret;
+}
+
static void empty_xfer_list(struct qaic_device *qdev, struct dma_bridge_chan *dbc)
{
unsigned long flags;
@@ -1808,6 +1915,12 @@ static void empty_xfer_list(struct qaic_device *qdev, struct dma_bridge_chan *db
bo->queued = false;
list_del(&bo->xfer_list);
spin_unlock_irqrestore(&dbc->xfer_lock, flags);
+ bo->nr_slice_xfer_done = 0;
+ bo->req_id = 0;
+ bo->perf_stats.req_received_ts = 0;
+ bo->perf_stats.req_submit_ts = 0;
+ bo->perf_stats.req_processed_ts = 0;
+ bo->perf_stats.queue_level_before = 0;
dma_sync_sgtable_for_cpu(&qdev->pdev->dev, bo->sgt, bo->dir);
complete_all(&bo->xfer_done);
drm_gem_object_put(&bo->base);
@@ -1855,7 +1968,6 @@ void wakeup_dbc(struct qaic_device *qdev, u32 dbc_id)
void release_dbc(struct qaic_device *qdev, u32 dbc_id)
{
- struct bo_slice *slice, *slice_temp;
struct qaic_bo *bo, *bo_temp;
struct dma_bridge_chan *dbc;
@@ -1873,24 +1985,11 @@ void release_dbc(struct qaic_device *qdev, u32 dbc_id)
dbc->usr = NULL;
list_for_each_entry_safe(bo, bo_temp, &dbc->bo_lists, bo_list) {
- list_for_each_entry_safe(slice, slice_temp, &bo->slices, slice)
- kref_put(&slice->ref_count, free_slice);
- bo->sliced = false;
- INIT_LIST_HEAD(&bo->slices);
- bo->total_slice_nents = 0;
- bo->dir = 0;
- bo->dbc = NULL;
- bo->nr_slice = 0;
- bo->nr_slice_xfer_done = 0;
- bo->queued = false;
- bo->req_id = 0;
- init_completion(&bo->xfer_done);
- complete_all(&bo->xfer_done);
- list_del(&bo->bo_list);
- bo->perf_stats.req_received_ts = 0;
- bo->perf_stats.req_submit_ts = 0;
- bo->perf_stats.req_processed_ts = 0;
- bo->perf_stats.queue_level_before = 0;
+ drm_gem_object_get(&bo->base);
+ mutex_lock(&bo->lock);
+ detach_slice_bo(qdev, bo);
+ mutex_unlock(&bo->lock);
+ drm_gem_object_put(&bo->base);
}
dbc->in_use = false;
diff --git a/drivers/accel/qaic/qaic_drv.c b/drivers/accel/qaic/qaic_drv.c
index b5de82e6e..6f5809576 100644
--- a/drivers/accel/qaic/qaic_drv.c
+++ b/drivers/accel/qaic/qaic_drv.c
@@ -22,6 +22,7 @@
#include <drm/drm_file.h>
#include <drm/drm_gem.h>
#include <drm/drm_ioctl.h>
+#include <drm/drm_managed.h>
#include <uapi/drm/qaic_accel.h>
#include "mhi_controller.h"
@@ -55,7 +56,7 @@ static void free_usr(struct kref *kref)
static int qaic_open(struct drm_device *dev, struct drm_file *file)
{
- struct qaic_drm_device *qddev = dev->dev_private;
+ struct qaic_drm_device *qddev = to_qaic_drm_device(dev);
struct qaic_device *qdev = qddev->qdev;
struct qaic_user *usr;
int rcu_id;
@@ -150,6 +151,7 @@ static const struct drm_ioctl_desc qaic_drm_ioctls[] = {
DRM_IOCTL_DEF_DRV(QAIC_PARTIAL_EXECUTE_BO, qaic_partial_execute_bo_ioctl, 0),
DRM_IOCTL_DEF_DRV(QAIC_WAIT_BO, qaic_wait_bo_ioctl, 0),
DRM_IOCTL_DEF_DRV(QAIC_PERF_STATS_BO, qaic_perf_stats_bo_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(QAIC_DETACH_SLICE_BO, qaic_detach_slice_bo_ioctl, 0),
};
static const struct drm_driver qaic_accel_driver = {
@@ -170,64 +172,39 @@ static const struct drm_driver qaic_accel_driver = {
static int qaic_create_drm_device(struct qaic_device *qdev, s32 partition_id)
{
- struct qaic_drm_device *qddev;
- struct drm_device *ddev;
- struct device *pdev;
+ struct qaic_drm_device *qddev = qdev->qddev;
+ struct drm_device *drm = to_drm(qddev);
int ret;
/* Hold off implementing partitions until the uapi is determined */
if (partition_id != QAIC_NO_PARTITION)
return -EINVAL;
- pdev = &qdev->pdev->dev;
-
- qddev = kzalloc(sizeof(*qddev), GFP_KERNEL);
- if (!qddev)
- return -ENOMEM;
-
- ddev = drm_dev_alloc(&qaic_accel_driver, pdev);
- if (IS_ERR(ddev)) {
- ret = PTR_ERR(ddev);
- goto ddev_fail;
- }
-
- ddev->dev_private = qddev;
- qddev->ddev = ddev;
-
- qddev->qdev = qdev;
qddev->partition_id = partition_id;
- INIT_LIST_HEAD(&qddev->users);
- mutex_init(&qddev->users_mutex);
-
- qdev->qddev = qddev;
-
- ret = drm_dev_register(ddev, 0);
- if (ret) {
- pci_dbg(qdev->pdev, "%s: drm_dev_register failed %d\n", __func__, ret);
- goto drm_reg_fail;
- }
- return 0;
+ /*
+ * drm_dev_unregister() sets the driver data to NULL and
+ * drm_dev_register() does not update the driver data. During a SOC
+ * reset drm dev is unregistered and registered again leaving the
+ * driver data to NULL.
+ */
+ dev_set_drvdata(to_accel_kdev(qddev), drm->accel);
+ ret = drm_dev_register(drm, 0);
+ if (ret)
+ pci_dbg(qdev->pdev, "drm_dev_register failed %d\n", ret);
-drm_reg_fail:
- mutex_destroy(&qddev->users_mutex);
- qdev->qddev = NULL;
- drm_dev_put(ddev);
-ddev_fail:
- kfree(qddev);
return ret;
}
static void qaic_destroy_drm_device(struct qaic_device *qdev, s32 partition_id)
{
- struct qaic_drm_device *qddev;
+ struct qaic_drm_device *qddev = qdev->qddev;
+ struct drm_device *drm = to_drm(qddev);
struct qaic_user *usr;
- qddev = qdev->qddev;
- qdev->qddev = NULL;
- if (!qddev)
- return;
-
+ drm_dev_get(drm);
+ drm_dev_unregister(drm);
+ qddev->partition_id = 0;
/*
* Existing users get unresolvable errors till they close FDs.
* Need to sync carefully with users calling close(). The
@@ -254,13 +231,7 @@ static void qaic_destroy_drm_device(struct qaic_device *qdev, s32 partition_id)
mutex_lock(&qddev->users_mutex);
}
mutex_unlock(&qddev->users_mutex);
-
- if (qddev->ddev) {
- drm_dev_unregister(qddev->ddev);
- drm_dev_put(qddev->ddev);
- }
-
- kfree(qddev);
+ drm_dev_put(drm);
}
static int qaic_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id)
@@ -344,8 +315,20 @@ void qaic_dev_reset_clean_local_state(struct qaic_device *qdev, bool exit_reset)
qdev->in_reset = false;
}
+static void cleanup_qdev(struct qaic_device *qdev)
+{
+ int i;
+
+ for (i = 0; i < qdev->num_dbc; ++i)
+ cleanup_srcu_struct(&qdev->dbc[i].ch_lock);
+ cleanup_srcu_struct(&qdev->dev_lock);
+ pci_set_drvdata(qdev->pdev, NULL);
+ destroy_workqueue(qdev->cntl_wq);
+}
+
static struct qaic_device *create_qdev(struct pci_dev *pdev, const struct pci_device_id *id)
{
+ struct qaic_drm_device *qddev;
struct qaic_device *qdev;
int i;
@@ -381,18 +364,18 @@ static struct qaic_device *create_qdev(struct pci_dev *pdev, const struct pci_de
INIT_LIST_HEAD(&qdev->dbc[i].bo_lists);
}
- return qdev;
-}
+ qddev = devm_drm_dev_alloc(&pdev->dev, &qaic_accel_driver, struct qaic_drm_device, drm);
+ if (IS_ERR(qddev)) {
+ cleanup_qdev(qdev);
+ return NULL;
+ }
-static void cleanup_qdev(struct qaic_device *qdev)
-{
- int i;
+ drmm_mutex_init(to_drm(qddev), &qddev->users_mutex);
+ INIT_LIST_HEAD(&qddev->users);
+ qddev->qdev = qdev;
+ qdev->qddev = qddev;
- for (i = 0; i < qdev->num_dbc; ++i)
- cleanup_srcu_struct(&qdev->dbc[i].ch_lock);
- cleanup_srcu_struct(&qdev->dev_lock);
- pci_set_drvdata(qdev->pdev, NULL);
- destroy_workqueue(qdev->cntl_wq);
+ return qdev;
}
static int init_pci(struct qaic_device *qdev, struct pci_dev *pdev)
@@ -591,22 +574,22 @@ static int __init qaic_init(void)
{
int ret;
- ret = mhi_driver_register(&qaic_mhi_driver);
+ ret = pci_register_driver(&qaic_pci_driver);
if (ret) {
- pr_debug("qaic: mhi_driver_register failed %d\n", ret);
+ pr_debug("qaic: pci_register_driver failed %d\n", ret);
return ret;
}
- ret = pci_register_driver(&qaic_pci_driver);
+ ret = mhi_driver_register(&qaic_mhi_driver);
if (ret) {
- pr_debug("qaic: pci_register_driver failed %d\n", ret);
- goto free_mhi;
+ pr_debug("qaic: mhi_driver_register failed %d\n", ret);
+ goto free_pci;
}
return 0;
-free_mhi:
- mhi_driver_unregister(&qaic_mhi_driver);
+free_pci:
+ pci_unregister_driver(&qaic_pci_driver);
return ret;
}
@@ -628,8 +611,8 @@ static void __exit qaic_exit(void)
* reinitializing the link_up state after the cleanup is done.
*/
link_up = true;
- pci_unregister_driver(&qaic_pci_driver);
mhi_driver_unregister(&qaic_mhi_driver);
+ pci_unregister_driver(&qaic_pci_driver);
}
module_init(qaic_init);