summaryrefslogtreecommitdiffstats
path: root/drivers/accel/qaic
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 18:50:03 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 18:50:03 +0000
commit01a69402cf9d38ff180345d55c2ee51c7e89fbc7 (patch)
treeb406c5242a088c4f59c6e4b719b783f43aca6ae9 /drivers/accel/qaic
parentAdding upstream version 6.7.12. (diff)
downloadlinux-01a69402cf9d38ff180345d55c2ee51c7e89fbc7.tar.xz
linux-01a69402cf9d38ff180345d55c2ee51c7e89fbc7.zip
Adding upstream version 6.8.9.upstream/6.8.9
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/accel/qaic')
-rw-r--r--drivers/accel/qaic/Makefile3
-rw-r--r--drivers/accel/qaic/mhi_controller.c44
-rw-r--r--drivers/accel/qaic/mhi_controller.h2
-rw-r--r--drivers/accel/qaic/qaic.h21
-rw-r--r--drivers/accel/qaic/qaic_control.c7
-rw-r--r--drivers/accel/qaic/qaic_data.c147
-rw-r--r--drivers/accel/qaic/qaic_drv.c98
-rw-r--r--drivers/accel/qaic/qaic_timesync.c395
-rw-r--r--drivers/accel/qaic/qaic_timesync.h11
9 files changed, 598 insertions, 130 deletions
diff --git a/drivers/accel/qaic/Makefile b/drivers/accel/qaic/Makefile
index 2418418f7..3f7f6dfde 100644
--- a/drivers/accel/qaic/Makefile
+++ b/drivers/accel/qaic/Makefile
@@ -9,4 +9,5 @@ qaic-y := \
mhi_controller.o \
qaic_control.o \
qaic_data.o \
- qaic_drv.o
+ qaic_drv.o \
+ qaic_timesync.o
diff --git a/drivers/accel/qaic/mhi_controller.c b/drivers/accel/qaic/mhi_controller.c
index 1405623b0..cb77d048e 100644
--- a/drivers/accel/qaic/mhi_controller.c
+++ b/drivers/accel/qaic/mhi_controller.c
@@ -348,7 +348,7 @@ static struct mhi_channel_config aic100_channels[] = {
.local_elements = 0,
.event_ring = 0,
.dir = DMA_TO_DEVICE,
- .ee_mask = MHI_CH_EE_SBL | MHI_CH_EE_AMSS,
+ .ee_mask = MHI_CH_EE_SBL,
.pollcfg = 0,
.doorbell = MHI_DB_BRST_DISABLE,
.lpm_notify = false,
@@ -364,7 +364,39 @@ static struct mhi_channel_config aic100_channels[] = {
.local_elements = 0,
.event_ring = 0,
.dir = DMA_FROM_DEVICE,
- .ee_mask = MHI_CH_EE_SBL | MHI_CH_EE_AMSS,
+ .ee_mask = MHI_CH_EE_SBL,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_TIMESYNC_PERIODIC",
+ .num = 22,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_TO_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .num = 23,
+ .name = "QAIC_TIMESYNC_PERIODIC",
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_FROM_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
.pollcfg = 0,
.doorbell = MHI_DB_BRST_DISABLE,
.lpm_notify = false,
@@ -450,7 +482,7 @@ static void mhi_status_cb(struct mhi_controller *mhi_cntrl, enum mhi_callback re
pci_err(qdev->pdev, "Fatal error received from device. Attempting to recover\n");
/* this event occurs in non-atomic context */
if (reason == MHI_CB_SYS_ERROR)
- qaic_dev_reset_clean_local_state(qdev, true);
+ qaic_dev_reset_clean_local_state(qdev);
}
static int mhi_reset_and_async_power_up(struct mhi_controller *mhi_cntrl)
@@ -481,7 +513,7 @@ static int mhi_reset_and_async_power_up(struct mhi_controller *mhi_cntrl)
}
struct mhi_controller *qaic_mhi_register_controller(struct pci_dev *pci_dev, void __iomem *mhi_bar,
- int mhi_irq)
+ int mhi_irq, bool shared_msi)
{
struct mhi_controller *mhi_cntrl;
int ret;
@@ -513,6 +545,10 @@ struct mhi_controller *qaic_mhi_register_controller(struct pci_dev *pci_dev, voi
return ERR_PTR(-ENOMEM);
mhi_cntrl->irq[0] = mhi_irq;
+
+ if (shared_msi) /* MSI shared with data path, no IRQF_NO_SUSPEND */
+ mhi_cntrl->irq_flags = IRQF_SHARED;
+
mhi_cntrl->fw_image = "qcom/aic100/sbl.bin";
/* use latest configured timeout */
diff --git a/drivers/accel/qaic/mhi_controller.h b/drivers/accel/qaic/mhi_controller.h
index 2ae45d768..500e7f4af 100644
--- a/drivers/accel/qaic/mhi_controller.h
+++ b/drivers/accel/qaic/mhi_controller.h
@@ -8,7 +8,7 @@
#define MHICONTROLLERQAIC_H_
struct mhi_controller *qaic_mhi_register_controller(struct pci_dev *pci_dev, void __iomem *mhi_bar,
- int mhi_irq);
+ int mhi_irq, bool shared_msi);
void qaic_mhi_free_controller(struct mhi_controller *mhi_cntrl, bool link_up);
void qaic_mhi_start_reset(struct mhi_controller *mhi_cntrl);
void qaic_mhi_reset_done(struct mhi_controller *mhi_cntrl);
diff --git a/drivers/accel/qaic/qaic.h b/drivers/accel/qaic/qaic.h
index e3f4c30f3..582836f95 100644
--- a/drivers/accel/qaic/qaic.h
+++ b/drivers/accel/qaic/qaic.h
@@ -31,6 +31,15 @@
#define to_drm(qddev) (&(qddev)->drm)
#define to_accel_kdev(qddev) (to_drm(qddev)->accel->kdev) /* Return Linux device of accel node */
+enum __packed dev_states {
+ /* Device is offline or will be very soon */
+ QAIC_OFFLINE,
+ /* Device is booting, not clear if it's in a usable state */
+ QAIC_BOOT,
+ /* Device is fully operational */
+ QAIC_ONLINE,
+};
+
extern bool datapath_polling;
struct qaic_user {
@@ -121,8 +130,10 @@ struct qaic_device {
struct workqueue_struct *cntl_wq;
/* Synchronizes all the users of device during cleanup */
struct srcu_struct dev_lock;
- /* true: Device under reset; false: Device not under reset */
- bool in_reset;
+ /* Track the state of the device during resets */
+ enum dev_states dev_state;
+ /* true: single MSI is used to operate device */
+ bool single_msi;
/*
* true: A tx MHI transaction has failed and a rx buffer is still queued
* in control device. Such a buffer is considered lost rx buffer
@@ -137,6 +148,10 @@ struct qaic_device {
u32 (*gen_crc)(void *msg);
/* Validate the CRC of a control message */
bool (*valid_crc)(void *msg);
+ /* MHI "QAIC_TIMESYNC" channel device */
+ struct mhi_device *qts_ch;
+ /* Work queue for tasks related to MHI "QAIC_TIMESYNC" channel */
+ struct workqueue_struct *qts_wq;
};
struct qaic_drm_device {
@@ -268,7 +283,7 @@ void wakeup_dbc(struct qaic_device *qdev, u32 dbc_id);
void release_dbc(struct qaic_device *qdev, u32 dbc_id);
void wake_all_cntl(struct qaic_device *qdev);
-void qaic_dev_reset_clean_local_state(struct qaic_device *qdev, bool exit_reset);
+void qaic_dev_reset_clean_local_state(struct qaic_device *qdev);
struct drm_gem_object *qaic_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf);
diff --git a/drivers/accel/qaic/qaic_control.c b/drivers/accel/qaic/qaic_control.c
index 388abd400..9e8a8cbad 100644
--- a/drivers/accel/qaic/qaic_control.c
+++ b/drivers/accel/qaic/qaic_control.c
@@ -1022,7 +1022,8 @@ static void *msg_xfer(struct qaic_device *qdev, struct wrapper_list *wrappers, u
int xfer_count = 0;
int retry_count;
- if (qdev->in_reset) {
+ /* Allow QAIC_BOOT state since we need to check control protocol version */
+ if (qdev->dev_state == QAIC_OFFLINE) {
mutex_unlock(&qdev->cntl_mutex);
return ERR_PTR(-ENODEV);
}
@@ -1138,7 +1139,7 @@ static int abort_dma_cont(struct qaic_device *qdev, struct wrapper_list *wrapper
if (!list_is_first(&wrapper->list, &wrappers->list))
kref_put(&wrapper->ref_count, free_wrapper);
- wrapper = add_wrapper(wrappers, offsetof(struct wrapper_msg, trans) + sizeof(*out_trans));
+ wrapper = add_wrapper(wrappers, sizeof(*wrapper));
if (!wrapper)
return -ENOMEM;
@@ -1306,7 +1307,7 @@ int qaic_manage_ioctl(struct drm_device *dev, void *data, struct drm_file *file_
qdev = usr->qddev->qdev;
qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
- if (qdev->in_reset) {
+ if (qdev->dev_state != QAIC_ONLINE) {
srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
return -ENODEV;
diff --git a/drivers/accel/qaic/qaic_data.c b/drivers/accel/qaic/qaic_data.c
index d42f002bc..03c9a793d 100644
--- a/drivers/accel/qaic/qaic_data.c
+++ b/drivers/accel/qaic/qaic_data.c
@@ -51,6 +51,7 @@
})
#define NUM_EVENTS 128
#define NUM_DELAYS 10
+#define fifo_at(base, offset) ((base) + (offset) * get_dbc_req_elem_size())
static unsigned int wait_exec_default_timeout_ms = 5000; /* 5 sec default */
module_param(wait_exec_default_timeout_ms, uint, 0600);
@@ -451,7 +452,7 @@ static int create_sgt(struct qaic_device *qdev, struct sg_table **sgt_out, u64 s
* later
*/
buf_extra = (PAGE_SIZE - size % PAGE_SIZE) % PAGE_SIZE;
- max_order = min(MAX_ORDER - 1, get_order(size));
+ max_order = min(MAX_PAGE_ORDER, get_order(size));
} else {
/* allocate a single page for book keeping */
nr_pages = 1;
@@ -689,7 +690,7 @@ int qaic_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *fi
qdev = usr->qddev->qdev;
qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
- if (qdev->in_reset) {
+ if (qdev->dev_state != QAIC_ONLINE) {
ret = -ENODEV;
goto unlock_dev_srcu;
}
@@ -748,7 +749,7 @@ int qaic_mmap_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file
qdev = usr->qddev->qdev;
qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
- if (qdev->in_reset) {
+ if (qdev->dev_state != QAIC_ONLINE) {
ret = -ENODEV;
goto unlock_dev_srcu;
}
@@ -967,7 +968,7 @@ int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi
qdev = usr->qddev->qdev;
qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
- if (qdev->in_reset) {
+ if (qdev->dev_state != QAIC_ONLINE) {
ret = -ENODEV;
goto unlock_dev_srcu;
}
@@ -1056,6 +1057,16 @@ unlock_usr_srcu:
return ret;
}
+static inline u32 fifo_space_avail(u32 head, u32 tail, u32 q_size)
+{
+ u32 avail = head - tail - 1;
+
+ if (head <= tail)
+ avail += q_size;
+
+ return avail;
+}
+
static inline int copy_exec_reqs(struct qaic_device *qdev, struct bo_slice *slice, u32 dbc_id,
u32 head, u32 *ptail)
{
@@ -1064,27 +1075,20 @@ static inline int copy_exec_reqs(struct qaic_device *qdev, struct bo_slice *slic
u32 tail = *ptail;
u32 avail;
- avail = head - tail;
- if (head <= tail)
- avail += dbc->nelem;
-
- --avail;
-
+ avail = fifo_space_avail(head, tail, dbc->nelem);
if (avail < slice->nents)
return -EAGAIN;
if (tail + slice->nents > dbc->nelem) {
avail = dbc->nelem - tail;
avail = min_t(u32, avail, slice->nents);
- memcpy(dbc->req_q_base + tail * get_dbc_req_elem_size(), reqs,
- sizeof(*reqs) * avail);
+ memcpy(fifo_at(dbc->req_q_base, tail), reqs, sizeof(*reqs) * avail);
reqs += avail;
avail = slice->nents - avail;
if (avail)
memcpy(dbc->req_q_base, reqs, sizeof(*reqs) * avail);
} else {
- memcpy(dbc->req_q_base + tail * get_dbc_req_elem_size(), reqs,
- sizeof(*reqs) * slice->nents);
+ memcpy(fifo_at(dbc->req_q_base, tail), reqs, sizeof(*reqs) * slice->nents);
}
*ptail = (tail + slice->nents) % dbc->nelem;
@@ -1092,46 +1096,31 @@ static inline int copy_exec_reqs(struct qaic_device *qdev, struct bo_slice *slic
return 0;
}
-/*
- * Based on the value of resize we may only need to transmit first_n
- * entries and the last entry, with last_bytes to send from the last entry.
- * Note that first_n could be 0.
- */
static inline int copy_partial_exec_reqs(struct qaic_device *qdev, struct bo_slice *slice,
- u64 resize, u32 dbc_id, u32 head, u32 *ptail)
+ u64 resize, struct dma_bridge_chan *dbc, u32 head,
+ u32 *ptail)
{
- struct dma_bridge_chan *dbc = &qdev->dbc[dbc_id];
struct dbc_req *reqs = slice->reqs;
struct dbc_req *last_req;
u32 tail = *ptail;
- u64 total_bytes;
u64 last_bytes;
u32 first_n;
u32 avail;
- int ret;
- int i;
-
- avail = head - tail;
- if (head <= tail)
- avail += dbc->nelem;
- --avail;
+ avail = fifo_space_avail(head, tail, dbc->nelem);
- total_bytes = 0;
- for (i = 0; i < slice->nents; i++) {
- total_bytes += le32_to_cpu(reqs[i].len);
- if (total_bytes >= resize)
+ /*
+ * After this for loop is complete, first_n represents the index
+ * of the last DMA request of this slice that needs to be
+ * transferred after resizing and last_bytes represents DMA size
+ * of that request.
+ */
+ last_bytes = resize;
+ for (first_n = 0; first_n < slice->nents; first_n++)
+ if (last_bytes > le32_to_cpu(reqs[first_n].len))
+ last_bytes -= le32_to_cpu(reqs[first_n].len);
+ else
break;
- }
-
- if (total_bytes < resize) {
- /* User space should have used the full buffer path. */
- ret = -EINVAL;
- return ret;
- }
-
- first_n = i;
- last_bytes = i ? resize + le32_to_cpu(reqs[i].len) - total_bytes : resize;
if (avail < (first_n + 1))
return -EAGAIN;
@@ -1140,22 +1129,21 @@ static inline int copy_partial_exec_reqs(struct qaic_device *qdev, struct bo_sli
if (tail + first_n > dbc->nelem) {
avail = dbc->nelem - tail;
avail = min_t(u32, avail, first_n);
- memcpy(dbc->req_q_base + tail * get_dbc_req_elem_size(), reqs,
- sizeof(*reqs) * avail);
+ memcpy(fifo_at(dbc->req_q_base, tail), reqs, sizeof(*reqs) * avail);
last_req = reqs + avail;
avail = first_n - avail;
if (avail)
memcpy(dbc->req_q_base, last_req, sizeof(*reqs) * avail);
} else {
- memcpy(dbc->req_q_base + tail * get_dbc_req_elem_size(), reqs,
- sizeof(*reqs) * first_n);
+ memcpy(fifo_at(dbc->req_q_base, tail), reqs, sizeof(*reqs) * first_n);
}
}
- /* Copy over the last entry. Here we need to adjust len to the left over
+ /*
+ * Copy over the last entry. Here we need to adjust len to the left over
* size, and set src and dst to the entry it is copied to.
*/
- last_req = dbc->req_q_base + (tail + first_n) % dbc->nelem * get_dbc_req_elem_size();
+ last_req = fifo_at(dbc->req_q_base, (tail + first_n) % dbc->nelem);
memcpy(last_req, reqs + slice->nents - 1, sizeof(*reqs));
/*
@@ -1166,6 +1154,9 @@ static inline int copy_partial_exec_reqs(struct qaic_device *qdev, struct bo_sli
last_req->len = cpu_to_le32((u32)last_bytes);
last_req->src_addr = reqs[first_n].src_addr;
last_req->dest_addr = reqs[first_n].dest_addr;
+ if (!last_bytes)
+ /* Disable DMA transfer */
+ last_req->cmd = GENMASK(7, 2) & reqs[first_n].cmd;
*ptail = (tail + first_n + 1) % dbc->nelem;
@@ -1225,26 +1216,17 @@ static int send_bo_list_to_device(struct qaic_device *qdev, struct drm_file *fil
bo->req_id = dbc->next_req_id++;
list_for_each_entry(slice, &bo->slices, slice) {
- /*
- * If this slice does not fall under the given
- * resize then skip this slice and continue the loop
- */
- if (is_partial && pexec[i].resize && pexec[i].resize <= slice->offset)
- continue;
-
for (j = 0; j < slice->nents; j++)
slice->reqs[j].req_id = cpu_to_le16(bo->req_id);
- /*
- * If it is a partial execute ioctl call then check if
- * resize has cut this slice short then do a partial copy
- * else do complete copy
- */
- if (is_partial && pexec[i].resize &&
- pexec[i].resize < slice->offset + slice->size)
+ if (is_partial && (!pexec[i].resize || pexec[i].resize <= slice->offset))
+ /* Configure the slice for no DMA transfer */
+ ret = copy_partial_exec_reqs(qdev, slice, 0, dbc, head, tail);
+ else if (is_partial && pexec[i].resize < slice->offset + slice->size)
+ /* Configure the slice to be partially DMA transferred */
ret = copy_partial_exec_reqs(qdev, slice,
- pexec[i].resize - slice->offset,
- dbc->id, head, tail);
+ pexec[i].resize - slice->offset, dbc,
+ head, tail);
else
ret = copy_exec_reqs(qdev, slice, dbc->id, head, tail);
if (ret) {
@@ -1357,7 +1339,7 @@ static int __qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct dr
qdev = usr->qddev->qdev;
qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
- if (qdev->in_reset) {
+ if (qdev->dev_state != QAIC_ONLINE) {
ret = -ENODEV;
goto unlock_dev_srcu;
}
@@ -1464,6 +1446,16 @@ irqreturn_t dbc_irq_handler(int irq, void *data)
rcu_id = srcu_read_lock(&dbc->ch_lock);
+ if (datapath_polling) {
+ srcu_read_unlock(&dbc->ch_lock, rcu_id);
+ /*
+ * Normally datapath_polling will not have irqs enabled, but
+ * when running with only one MSI the interrupt is shared with
+ * MHI so it cannot be disabled. Return ASAP instead.
+ */
+ return IRQ_HANDLED;
+ }
+
if (!dbc->usr) {
srcu_read_unlock(&dbc->ch_lock, rcu_id);
return IRQ_HANDLED;
@@ -1486,7 +1478,8 @@ irqreturn_t dbc_irq_handler(int irq, void *data)
return IRQ_NONE;
}
- disable_irq_nosync(irq);
+ if (!dbc->qdev->single_msi)
+ disable_irq_nosync(irq);
srcu_read_unlock(&dbc->ch_lock, rcu_id);
return IRQ_WAKE_THREAD;
}
@@ -1502,7 +1495,7 @@ void irq_polling_work(struct work_struct *work)
rcu_id = srcu_read_lock(&dbc->ch_lock);
while (1) {
- if (dbc->qdev->in_reset) {
+ if (dbc->qdev->dev_state != QAIC_ONLINE) {
srcu_read_unlock(&dbc->ch_lock, rcu_id);
return;
}
@@ -1557,12 +1550,12 @@ irqreturn_t dbc_irq_threaded_fn(int irq, void *data)
u32 tail;
rcu_id = srcu_read_lock(&dbc->ch_lock);
+ qdev = dbc->qdev;
head = readl(dbc->dbc_base + RSPHP_OFF);
if (head == U32_MAX) /* PCI link error */
goto error_out;
- qdev = dbc->qdev;
read_fifo:
if (!event_count) {
@@ -1643,14 +1636,14 @@ read_fifo:
goto read_fifo;
normal_out:
- if (likely(!datapath_polling))
+ if (!qdev->single_msi && likely(!datapath_polling))
enable_irq(irq);
- else
+ else if (unlikely(datapath_polling))
schedule_work(&dbc->poll_work);
/* checking the fifo and enabling irqs is a race, missed event check */
tail = readl(dbc->dbc_base + RSPTP_OFF);
if (tail != U32_MAX && head != tail) {
- if (likely(!datapath_polling))
+ if (!qdev->single_msi && likely(!datapath_polling))
disable_irq_nosync(irq);
goto read_fifo;
}
@@ -1659,9 +1652,9 @@ normal_out:
error_out:
srcu_read_unlock(&dbc->ch_lock, rcu_id);
- if (likely(!datapath_polling))
+ if (!qdev->single_msi && likely(!datapath_polling))
enable_irq(irq);
- else
+ else if (unlikely(datapath_polling))
schedule_work(&dbc->poll_work);
return IRQ_HANDLED;
@@ -1692,7 +1685,7 @@ int qaic_wait_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file
qdev = usr->qddev->qdev;
qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
- if (qdev->in_reset) {
+ if (qdev->dev_state != QAIC_ONLINE) {
ret = -ENODEV;
goto unlock_dev_srcu;
}
@@ -1761,7 +1754,7 @@ int qaic_perf_stats_bo_ioctl(struct drm_device *dev, void *data, struct drm_file
qdev = usr->qddev->qdev;
qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
- if (qdev->in_reset) {
+ if (qdev->dev_state != QAIC_ONLINE) {
ret = -ENODEV;
goto unlock_dev_srcu;
}
@@ -1852,7 +1845,7 @@ int qaic_detach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi
qdev = usr->qddev->qdev;
qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
- if (qdev->in_reset) {
+ if (qdev->dev_state != QAIC_ONLINE) {
ret = -ENODEV;
goto unlock_dev_srcu;
}
diff --git a/drivers/accel/qaic/qaic_drv.c b/drivers/accel/qaic/qaic_drv.c
index 6f5809576..2a313eb69 100644
--- a/drivers/accel/qaic/qaic_drv.c
+++ b/drivers/accel/qaic/qaic_drv.c
@@ -8,6 +8,7 @@
#include <linux/idr.h>
#include <linux/interrupt.h>
#include <linux/list.h>
+#include <linux/kobject.h>
#include <linux/kref.h>
#include <linux/mhi.h>
#include <linux/module.h>
@@ -27,6 +28,7 @@
#include "mhi_controller.h"
#include "qaic.h"
+#include "qaic_timesync.h"
MODULE_IMPORT_NS(DMA_BUF);
@@ -42,9 +44,6 @@ MODULE_PARM_DESC(datapath_polling, "Operate the datapath in polling mode");
static bool link_up;
static DEFINE_IDA(qaic_usrs);
-static int qaic_create_drm_device(struct qaic_device *qdev, s32 partition_id);
-static void qaic_destroy_drm_device(struct qaic_device *qdev, s32 partition_id);
-
static void free_usr(struct kref *kref)
{
struct qaic_user *usr = container_of(kref, struct qaic_user, ref_count);
@@ -63,7 +62,7 @@ static int qaic_open(struct drm_device *dev, struct drm_file *file)
int ret;
rcu_id = srcu_read_lock(&qdev->dev_lock);
- if (qdev->in_reset) {
+ if (qdev->dev_state != QAIC_ONLINE) {
ret = -ENODEV;
goto dev_unlock;
}
@@ -120,7 +119,7 @@ static void qaic_postclose(struct drm_device *dev, struct drm_file *file)
if (qddev) {
qdev = qddev->qdev;
qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
- if (!qdev->in_reset) {
+ if (qdev->dev_state == QAIC_ONLINE) {
qaic_release_usr(qdev, usr);
for (i = 0; i < qdev->num_dbc; ++i)
if (qdev->dbc[i].usr && qdev->dbc[i].usr->handle == usr->handle)
@@ -182,13 +181,6 @@ static int qaic_create_drm_device(struct qaic_device *qdev, s32 partition_id)
qddev->partition_id = partition_id;
- /*
- * drm_dev_unregister() sets the driver data to NULL and
- * drm_dev_register() does not update the driver data. During a SOC
- * reset drm dev is unregistered and registered again leaving the
- * driver data to NULL.
- */
- dev_set_drvdata(to_accel_kdev(qddev), drm->accel);
ret = drm_dev_register(drm, 0);
if (ret)
pci_dbg(qdev->pdev, "drm_dev_register failed %d\n", ret);
@@ -202,7 +194,6 @@ static void qaic_destroy_drm_device(struct qaic_device *qdev, s32 partition_id)
struct drm_device *drm = to_drm(qddev);
struct qaic_user *usr;
- drm_dev_get(drm);
drm_dev_unregister(drm);
qddev->partition_id = 0;
/*
@@ -231,7 +222,6 @@ static void qaic_destroy_drm_device(struct qaic_device *qdev, s32 partition_id)
mutex_lock(&qddev->users_mutex);
}
mutex_unlock(&qddev->users_mutex);
- drm_dev_put(drm);
}
static int qaic_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id)
@@ -253,8 +243,6 @@ static int qaic_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_id
qdev = pci_get_drvdata(to_pci_dev(mhi_dev->mhi_cntrl->cntrl_dev));
- qdev->in_reset = false;
-
dev_set_drvdata(&mhi_dev->dev, qdev);
qdev->cntl_ch = mhi_dev;
@@ -264,6 +252,7 @@ static int qaic_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_id
return ret;
}
+ qdev->dev_state = QAIC_BOOT;
ret = get_cntl_version(qdev, NULL, &major, &minor);
if (ret || major != CNTL_MAJOR || minor > CNTL_MINOR) {
pci_err(qdev->pdev, "%s: Control protocol version (%d.%d) not supported. Supported version is (%d.%d). Ret: %d\n",
@@ -271,8 +260,8 @@ static int qaic_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_id
ret = -EINVAL;
goto close_control;
}
-
- ret = qaic_create_drm_device(qdev, QAIC_NO_PARTITION);
+ qdev->dev_state = QAIC_ONLINE;
+ kobject_uevent(&(to_accel_kdev(qdev->qddev))->kobj, KOBJ_ONLINE);
return ret;
@@ -290,7 +279,8 @@ static void qaic_notify_reset(struct qaic_device *qdev)
{
int i;
- qdev->in_reset = true;
+ kobject_uevent(&(to_accel_kdev(qdev->qddev))->kobj, KOBJ_OFFLINE);
+ qdev->dev_state = QAIC_OFFLINE;
/* wake up any waiters to avoid waiting for timeouts at sync */
wake_all_cntl(qdev);
for (i = 0; i < qdev->num_dbc; ++i)
@@ -298,21 +288,15 @@ static void qaic_notify_reset(struct qaic_device *qdev)
synchronize_srcu(&qdev->dev_lock);
}
-void qaic_dev_reset_clean_local_state(struct qaic_device *qdev, bool exit_reset)
+void qaic_dev_reset_clean_local_state(struct qaic_device *qdev)
{
int i;
qaic_notify_reset(qdev);
- /* remove drmdevs to prevent new users from coming in */
- qaic_destroy_drm_device(qdev, QAIC_NO_PARTITION);
-
/* start tearing things down */
for (i = 0; i < qdev->num_dbc; ++i)
release_dbc(qdev, i);
-
- if (exit_reset)
- qdev->in_reset = false;
}
static void cleanup_qdev(struct qaic_device *qdev)
@@ -324,6 +308,7 @@ static void cleanup_qdev(struct qaic_device *qdev)
cleanup_srcu_struct(&qdev->dev_lock);
pci_set_drvdata(qdev->pdev, NULL);
destroy_workqueue(qdev->cntl_wq);
+ destroy_workqueue(qdev->qts_wq);
}
static struct qaic_device *create_qdev(struct pci_dev *pdev, const struct pci_device_id *id)
@@ -336,6 +321,7 @@ static struct qaic_device *create_qdev(struct pci_dev *pdev, const struct pci_de
if (!qdev)
return NULL;
+ qdev->dev_state = QAIC_OFFLINE;
if (id->device == PCI_DEV_AIC100) {
qdev->num_dbc = 16;
qdev->dbc = devm_kcalloc(&pdev->dev, qdev->num_dbc, sizeof(*qdev->dbc), GFP_KERNEL);
@@ -347,6 +333,12 @@ static struct qaic_device *create_qdev(struct pci_dev *pdev, const struct pci_de
if (!qdev->cntl_wq)
return NULL;
+ qdev->qts_wq = alloc_workqueue("qaic_ts", WQ_UNBOUND, 0);
+ if (!qdev->qts_wq) {
+ destroy_workqueue(qdev->cntl_wq);
+ return NULL;
+ }
+
pci_set_drvdata(pdev, qdev);
qdev->pdev = pdev;
@@ -424,14 +416,24 @@ static int init_msi(struct qaic_device *qdev, struct pci_dev *pdev)
int i;
/* Managed release since we use pcim_enable_device */
- ret = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI);
- if (ret < 0)
- return ret;
+ ret = pci_alloc_irq_vectors(pdev, 32, 32, PCI_IRQ_MSI);
+ if (ret == -ENOSPC) {
+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
+ if (ret < 0)
+ return ret;
- if (ret < 32) {
- pci_err(pdev, "%s: Requested 32 MSIs. Obtained %d MSIs which is less than the 32 required.\n",
- __func__, ret);
- return -ENODEV;
+ /*
+ * Operate in one MSI mode. All interrupts will be directed to
+ * MSI0; every interrupt will wake up all the interrupt handlers
+ * (MHI and DBC[0-15]). Since the interrupt is now shared, it is
+ * not disabled during DBC threaded handler, but only one thread
+ * will be allowed to run per DBC, so while it can be
+ * interrupted, it shouldn't race with itself.
+ */
+ qdev->single_msi = true;
+ pci_info(pdev, "Allocating 32 MSIs failed, operating in 1 MSI mode. Performance may be impacted.\n");
+ } else if (ret < 0) {
+ return ret;
}
mhi_irq = pci_irq_vector(pdev, 0);
@@ -439,15 +441,17 @@ static int init_msi(struct qaic_device *qdev, struct pci_dev *pdev)
return mhi_irq;
for (i = 0; i < qdev->num_dbc; ++i) {
- ret = devm_request_threaded_irq(&pdev->dev, pci_irq_vector(pdev, i + 1),
+ ret = devm_request_threaded_irq(&pdev->dev,
+ pci_irq_vector(pdev, qdev->single_msi ? 0 : i + 1),
dbc_irq_handler, dbc_irq_threaded_fn, IRQF_SHARED,
"qaic_dbc", &qdev->dbc[i]);
if (ret)
return ret;
if (datapath_polling) {
- qdev->dbc[i].irq = pci_irq_vector(pdev, i + 1);
- disable_irq_nosync(qdev->dbc[i].irq);
+ qdev->dbc[i].irq = pci_irq_vector(pdev, qdev->single_msi ? 0 : i + 1);
+ if (!qdev->single_msi)
+ disable_irq_nosync(qdev->dbc[i].irq);
INIT_WORK(&qdev->dbc[i].poll_work, irq_polling_work);
}
}
@@ -479,14 +483,21 @@ static int qaic_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto cleanup_qdev;
}
- qdev->mhi_cntrl = qaic_mhi_register_controller(pdev, qdev->bar_0, mhi_irq);
+ ret = qaic_create_drm_device(qdev, QAIC_NO_PARTITION);
+ if (ret)
+ goto cleanup_qdev;
+
+ qdev->mhi_cntrl = qaic_mhi_register_controller(pdev, qdev->bar_0, mhi_irq,
+ qdev->single_msi);
if (IS_ERR(qdev->mhi_cntrl)) {
ret = PTR_ERR(qdev->mhi_cntrl);
- goto cleanup_qdev;
+ goto cleanup_drm_dev;
}
return 0;
+cleanup_drm_dev:
+ qaic_destroy_drm_device(qdev, QAIC_NO_PARTITION);
cleanup_qdev:
cleanup_qdev(qdev);
return ret;
@@ -499,7 +510,8 @@ static void qaic_pci_remove(struct pci_dev *pdev)
if (!qdev)
return;
- qaic_dev_reset_clean_local_state(qdev, false);
+ qaic_dev_reset_clean_local_state(qdev);
+ qaic_destroy_drm_device(qdev, QAIC_NO_PARTITION);
qaic_mhi_free_controller(qdev->mhi_cntrl, link_up);
cleanup_qdev(qdev);
}
@@ -522,14 +534,13 @@ static void qaic_pci_reset_prepare(struct pci_dev *pdev)
qaic_notify_reset(qdev);
qaic_mhi_start_reset(qdev->mhi_cntrl);
- qaic_dev_reset_clean_local_state(qdev, false);
+ qaic_dev_reset_clean_local_state(qdev);
}
static void qaic_pci_reset_done(struct pci_dev *pdev)
{
struct qaic_device *qdev = pci_get_drvdata(pdev);
- qdev->in_reset = false;
qaic_mhi_reset_done(qdev->mhi_cntrl);
}
@@ -586,6 +597,10 @@ static int __init qaic_init(void)
goto free_pci;
}
+ ret = qaic_timesync_init();
+ if (ret)
+ pr_debug("qaic: qaic_timesync_init failed %d\n", ret);
+
return 0;
free_pci:
@@ -611,6 +626,7 @@ static void __exit qaic_exit(void)
* reinitializing the link_up state after the cleanup is done.
*/
link_up = true;
+ qaic_timesync_deinit();
mhi_driver_unregister(&qaic_mhi_driver);
pci_unregister_driver(&qaic_pci_driver);
}
diff --git a/drivers/accel/qaic/qaic_timesync.c b/drivers/accel/qaic/qaic_timesync.c
new file mode 100644
index 000000000..301f4462d
--- /dev/null
+++ b/drivers/accel/qaic/qaic_timesync.c
@@ -0,0 +1,395 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. */
+
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/math64.h>
+#include <linux/mhi.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/time64.h>
+#include <linux/timer.h>
+
+#include "qaic.h"
+#include "qaic_timesync.h"
+
+#define QTIMER_REG_OFFSET 0xa28
+#define QAIC_TIMESYNC_SIGNATURE 0x55aa
+#define QAIC_CONV_QTIMER_TO_US(qtimer) (mul_u64_u32_div(qtimer, 10, 192))
+
+static unsigned int timesync_delay_ms = 1000; /* 1 sec default */
+module_param(timesync_delay_ms, uint, 0600);
+MODULE_PARM_DESC(timesync_delay_ms, "Delay in ms between two consecutive timesync operations");
+
+enum qts_msg_type {
+ QAIC_TS_CMD_TO_HOST,
+ QAIC_TS_SYNC_REQ,
+ QAIC_TS_ACK_TO_HOST,
+ QAIC_TS_MSG_TYPE_MAX
+};
+
+/**
+ * struct qts_hdr - Timesync message header structure.
+ * @signature: Unique signature to identify the timesync message.
+ * @reserved_1: Reserved for future use.
+ * @reserved_2: Reserved for future use.
+ * @msg_type: sub-type of the timesync message.
+ * @reserved_3: Reserved for future use.
+ */
+struct qts_hdr {
+ __le16 signature;
+ __le16 reserved_1;
+ u8 reserved_2;
+ u8 msg_type;
+ __le16 reserved_3;
+} __packed;
+
+/**
+ * struct qts_timeval - Structure to carry time information.
+ * @tv_sec: Seconds part of the time.
+ * @tv_usec: uS (microseconds) part of the time.
+ */
+struct qts_timeval {
+ __le64 tv_sec;
+ __le64 tv_usec;
+} __packed;
+
+/**
+ * struct qts_host_time_sync_msg_data - Structure to denote the timesync message.
+ * @header: Header of the timesync message.
+ * @data: Time information.
+ */
+struct qts_host_time_sync_msg_data {
+ struct qts_hdr header;
+ struct qts_timeval data;
+} __packed;
+
+/**
+ * struct mqts_dev - MHI QAIC Timesync Control device.
+ * @qdev: Pointer to the root device struct driven by QAIC driver.
+ * @mhi_dev: Pointer to associated MHI device.
+ * @timer: Timer handle used for timesync.
+ * @qtimer_addr: Device QTimer register pointer.
+ * @buff_in_use: atomic variable to track if the sync_msg buffer is in use.
+ * @dev: Device pointer to qdev->pdev->dev stored for easy access.
+ * @sync_msg: Buffer used to send timesync message over MHI.
+ */
+struct mqts_dev {
+ struct qaic_device *qdev;
+ struct mhi_device *mhi_dev;
+ struct timer_list timer;
+ void __iomem *qtimer_addr;
+ atomic_t buff_in_use;
+ struct device *dev;
+ struct qts_host_time_sync_msg_data *sync_msg;
+};
+
+struct qts_resp_msg {
+ struct qts_hdr hdr;
+} __packed;
+
+struct qts_resp {
+ struct qts_resp_msg data;
+ struct work_struct work;
+ struct qaic_device *qdev;
+};
+
+#ifdef readq
+static u64 read_qtimer(const volatile void __iomem *addr)
+{
+ return readq(addr);
+}
+#else
+static u64 read_qtimer(const volatile void __iomem *addr)
+{
+ u64 low, high;
+
+ low = readl(addr);
+ high = readl(addr + sizeof(u32));
+ return low | (high << 32);
+}
+#endif
+
+static void qaic_timesync_ul_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result)
+{
+ struct mqts_dev *mqtsdev = dev_get_drvdata(&mhi_dev->dev);
+
+ dev_dbg(mqtsdev->dev, "%s status: %d xfer_len: %zu\n", __func__,
+ mhi_result->transaction_status, mhi_result->bytes_xferd);
+
+ atomic_set(&mqtsdev->buff_in_use, 0);
+}
+
+static void qaic_timesync_dl_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result)
+{
+ struct mqts_dev *mqtsdev = dev_get_drvdata(&mhi_dev->dev);
+
+ dev_err(mqtsdev->dev, "%s no data expected on dl channel\n", __func__);
+}
+
+static void qaic_timesync_timer(struct timer_list *t)
+{
+ struct mqts_dev *mqtsdev = from_timer(mqtsdev, t, timer);
+ struct qts_host_time_sync_msg_data *sync_msg;
+ u64 device_qtimer_us;
+ u64 device_qtimer;
+ u64 host_time_us;
+ u64 offset_us;
+ u64 host_sec;
+ int ret;
+
+ if (atomic_read(&mqtsdev->buff_in_use)) {
+ dev_dbg(mqtsdev->dev, "%s buffer not free, schedule next cycle\n", __func__);
+ goto mod_timer;
+ }
+ atomic_set(&mqtsdev->buff_in_use, 1);
+
+ sync_msg = mqtsdev->sync_msg;
+ sync_msg->header.signature = cpu_to_le16(QAIC_TIMESYNC_SIGNATURE);
+ sync_msg->header.msg_type = QAIC_TS_SYNC_REQ;
+ /* Read host UTC time and convert to uS*/
+ host_time_us = div_u64(ktime_get_real_ns(), NSEC_PER_USEC);
+ device_qtimer = read_qtimer(mqtsdev->qtimer_addr);
+ device_qtimer_us = QAIC_CONV_QTIMER_TO_US(device_qtimer);
+ /* Offset between host UTC and device time */
+ offset_us = host_time_us - device_qtimer_us;
+
+ host_sec = div_u64(offset_us, USEC_PER_SEC);
+ sync_msg->data.tv_usec = cpu_to_le64(offset_us - host_sec * USEC_PER_SEC);
+ sync_msg->data.tv_sec = cpu_to_le64(host_sec);
+ ret = mhi_queue_buf(mqtsdev->mhi_dev, DMA_TO_DEVICE, sync_msg, sizeof(*sync_msg), MHI_EOT);
+ if (ret && (ret != -EAGAIN)) {
+ dev_err(mqtsdev->dev, "%s unable to queue to mhi:%d\n", __func__, ret);
+ return;
+ } else if (ret == -EAGAIN) {
+ atomic_set(&mqtsdev->buff_in_use, 0);
+ }
+
+mod_timer:
+ ret = mod_timer(t, jiffies + msecs_to_jiffies(timesync_delay_ms));
+ if (ret)
+ dev_err(mqtsdev->dev, "%s mod_timer error:%d\n", __func__, ret);
+}
+
+static int qaic_timesync_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id)
+{
+ struct qaic_device *qdev = pci_get_drvdata(to_pci_dev(mhi_dev->mhi_cntrl->cntrl_dev));
+ struct mqts_dev *mqtsdev;
+ struct timer_list *timer;
+ int ret;
+
+ mqtsdev = kzalloc(sizeof(*mqtsdev), GFP_KERNEL);
+ if (!mqtsdev) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ timer = &mqtsdev->timer;
+ mqtsdev->mhi_dev = mhi_dev;
+ mqtsdev->qdev = qdev;
+ mqtsdev->dev = &qdev->pdev->dev;
+
+ mqtsdev->sync_msg = kzalloc(sizeof(*mqtsdev->sync_msg), GFP_KERNEL);
+ if (!mqtsdev->sync_msg) {
+ ret = -ENOMEM;
+ goto free_mqts_dev;
+ }
+ atomic_set(&mqtsdev->buff_in_use, 0);
+
+ ret = mhi_prepare_for_transfer(mhi_dev);
+ if (ret)
+ goto free_sync_msg;
+
+ /* Qtimer register pointer */
+ mqtsdev->qtimer_addr = qdev->bar_0 + QTIMER_REG_OFFSET;
+ timer_setup(timer, qaic_timesync_timer, 0);
+ timer->expires = jiffies + msecs_to_jiffies(timesync_delay_ms);
+ add_timer(timer);
+ dev_set_drvdata(&mhi_dev->dev, mqtsdev);
+
+ return 0;
+
+free_sync_msg:
+ kfree(mqtsdev->sync_msg);
+free_mqts_dev:
+ kfree(mqtsdev);
+out:
+ return ret;
+};
+
+static void qaic_timesync_remove(struct mhi_device *mhi_dev)
+{
+ struct mqts_dev *mqtsdev = dev_get_drvdata(&mhi_dev->dev);
+
+ del_timer_sync(&mqtsdev->timer);
+ mhi_unprepare_from_transfer(mqtsdev->mhi_dev);
+ kfree(mqtsdev->sync_msg);
+ kfree(mqtsdev);
+}
+
+static const struct mhi_device_id qaic_timesync_match_table[] = {
+ { .chan = "QAIC_TIMESYNC_PERIODIC"},
+ {},
+};
+
+MODULE_DEVICE_TABLE(mhi, qaic_timesync_match_table);
+
+static struct mhi_driver qaic_timesync_driver = {
+ .id_table = qaic_timesync_match_table,
+ .remove = qaic_timesync_remove,
+ .probe = qaic_timesync_probe,
+ .ul_xfer_cb = qaic_timesync_ul_xfer_cb,
+ .dl_xfer_cb = qaic_timesync_dl_xfer_cb,
+ .driver = {
+ .name = "qaic_timesync_periodic",
+ },
+};
+
+static void qaic_boot_timesync_worker(struct work_struct *work)
+{
+ struct qts_resp *resp = container_of(work, struct qts_resp, work);
+ struct qts_host_time_sync_msg_data *req;
+ struct qts_resp_msg data = resp->data;
+ struct qaic_device *qdev = resp->qdev;
+ struct mhi_device *mhi_dev;
+ struct timespec64 ts;
+ int ret;
+
+ mhi_dev = qdev->qts_ch;
+ /* Queue the response message beforehand to avoid race conditions */
+ ret = mhi_queue_buf(mhi_dev, DMA_FROM_DEVICE, &resp->data, sizeof(resp->data), MHI_EOT);
+ if (ret) {
+ kfree(resp);
+ dev_warn(&mhi_dev->dev, "Failed to re-queue response buffer %d\n", ret);
+ return;
+ }
+
+ switch (data.hdr.msg_type) {
+ case QAIC_TS_CMD_TO_HOST:
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ break;
+
+ req->header = data.hdr;
+ req->header.msg_type = QAIC_TS_SYNC_REQ;
+ ktime_get_real_ts64(&ts);
+ req->data.tv_sec = cpu_to_le64(ts.tv_sec);
+ req->data.tv_usec = cpu_to_le64(div_u64(ts.tv_nsec, NSEC_PER_USEC));
+
+ ret = mhi_queue_buf(mhi_dev, DMA_TO_DEVICE, req, sizeof(*req), MHI_EOT);
+ if (ret) {
+ kfree(req);
+ dev_dbg(&mhi_dev->dev, "Failed to send request message. Error %d\n", ret);
+ }
+ break;
+ case QAIC_TS_ACK_TO_HOST:
+ dev_dbg(&mhi_dev->dev, "ACK received from device\n");
+ break;
+ default:
+ dev_err(&mhi_dev->dev, "Invalid message type %u.\n", data.hdr.msg_type);
+ }
+}
+
+static int qaic_boot_timesync_queue_resp(struct mhi_device *mhi_dev, struct qaic_device *qdev)
+{
+ struct qts_resp *resp;
+ int ret;
+
+ resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+ if (!resp)
+ return -ENOMEM;
+
+ resp->qdev = qdev;
+ INIT_WORK(&resp->work, qaic_boot_timesync_worker);
+
+ ret = mhi_queue_buf(mhi_dev, DMA_FROM_DEVICE, &resp->data, sizeof(resp->data), MHI_EOT);
+ if (ret) {
+ kfree(resp);
+ dev_warn(&mhi_dev->dev, "Failed to queue response buffer %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void qaic_boot_timesync_remove(struct mhi_device *mhi_dev)
+{
+ struct qaic_device *qdev;
+
+ qdev = dev_get_drvdata(&mhi_dev->dev);
+ mhi_unprepare_from_transfer(qdev->qts_ch);
+ qdev->qts_ch = NULL;
+}
+
+static int qaic_boot_timesync_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id)
+{
+ struct qaic_device *qdev = pci_get_drvdata(to_pci_dev(mhi_dev->mhi_cntrl->cntrl_dev));
+ int ret;
+
+ ret = mhi_prepare_for_transfer(mhi_dev);
+ if (ret)
+ return ret;
+
+ qdev->qts_ch = mhi_dev;
+ dev_set_drvdata(&mhi_dev->dev, qdev);
+
+ ret = qaic_boot_timesync_queue_resp(mhi_dev, qdev);
+ if (ret) {
+ dev_set_drvdata(&mhi_dev->dev, NULL);
+ qdev->qts_ch = NULL;
+ mhi_unprepare_from_transfer(mhi_dev);
+ }
+
+ return ret;
+}
+
+static void qaic_boot_timesync_ul_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result)
+{
+ kfree(mhi_result->buf_addr);
+}
+
+static void qaic_boot_timesync_dl_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result)
+{
+ struct qts_resp *resp = container_of(mhi_result->buf_addr, struct qts_resp, data);
+
+ if (mhi_result->transaction_status || mhi_result->bytes_xferd != sizeof(resp->data)) {
+ kfree(resp);
+ return;
+ }
+
+ queue_work(resp->qdev->qts_wq, &resp->work);
+}
+
+static const struct mhi_device_id qaic_boot_timesync_match_table[] = {
+ { .chan = "QAIC_TIMESYNC"},
+ {},
+};
+
+static struct mhi_driver qaic_boot_timesync_driver = {
+ .id_table = qaic_boot_timesync_match_table,
+ .remove = qaic_boot_timesync_remove,
+ .probe = qaic_boot_timesync_probe,
+ .ul_xfer_cb = qaic_boot_timesync_ul_xfer_cb,
+ .dl_xfer_cb = qaic_boot_timesync_dl_xfer_cb,
+ .driver = {
+ .name = "qaic_timesync",
+ },
+};
+
+int qaic_timesync_init(void)
+{
+ int ret;
+
+ ret = mhi_driver_register(&qaic_timesync_driver);
+ if (ret)
+ return ret;
+ ret = mhi_driver_register(&qaic_boot_timesync_driver);
+
+ return ret;
+}
+
+void qaic_timesync_deinit(void)
+{
+ mhi_driver_unregister(&qaic_boot_timesync_driver);
+ mhi_driver_unregister(&qaic_timesync_driver);
+}
diff --git a/drivers/accel/qaic/qaic_timesync.h b/drivers/accel/qaic/qaic_timesync.h
new file mode 100644
index 000000000..851b7acd4
--- /dev/null
+++ b/drivers/accel/qaic/qaic_timesync.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __QAIC_TIMESYNC_H__
+#define __QAIC_TIMESYNC_H__
+
+int qaic_timesync_init(void);
+void qaic_timesync_deinit(void);
+#endif /* __QAIC_TIMESYNC_H__ */