summaryrefslogtreecommitdiffstats
path: root/drivers/accel/habanalabs/common
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 17:40:19 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 17:40:19 +0000
commit9f0fc191371843c4fc000a226b0a26b6c059aacd (patch)
tree35f8be3ef04506ac891ad001e8c41e535ae8d01d /drivers/accel/habanalabs/common
parentReleasing progress-linux version 6.6.15-2~progress7.99u1. (diff)
downloadlinux-9f0fc191371843c4fc000a226b0a26b6c059aacd.tar.xz
linux-9f0fc191371843c4fc000a226b0a26b6c059aacd.zip
Merging upstream version 6.7.7.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/accel/habanalabs/common')
-rw-r--r--drivers/accel/habanalabs/common/command_buffer.c5
-rw-r--r--drivers/accel/habanalabs/common/command_submission.c488
-rw-r--r--drivers/accel/habanalabs/common/context.c9
-rw-r--r--drivers/accel/habanalabs/common/debugfs.c22
-rw-r--r--drivers/accel/habanalabs/common/device.c430
-rw-r--r--drivers/accel/habanalabs/common/firmware_if.c45
-rw-r--r--drivers/accel/habanalabs/common/habanalabs.h214
-rw-r--r--drivers/accel/habanalabs/common/habanalabs_drv.c189
-rw-r--r--drivers/accel/habanalabs/common/habanalabs_ioctl.c112
-rw-r--r--drivers/accel/habanalabs/common/irq.c180
-rw-r--r--drivers/accel/habanalabs/common/memory.c308
-rw-r--r--drivers/accel/habanalabs/common/mmu/mmu.c1
-rw-r--r--drivers/accel/habanalabs/common/sysfs.c3
13 files changed, 1211 insertions, 795 deletions
diff --git a/drivers/accel/habanalabs/common/command_buffer.c b/drivers/accel/habanalabs/common/command_buffer.c
index 08f7aee426..0f0d295116 100644
--- a/drivers/accel/habanalabs/common/command_buffer.c
+++ b/drivers/accel/habanalabs/common/command_buffer.c
@@ -361,10 +361,11 @@ out:
return rc;
}
-int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data)
+int hl_cb_ioctl(struct drm_device *ddev, void *data, struct drm_file *file_priv)
{
- union hl_cb_args *args = data;
+ struct hl_fpriv *hpriv = file_priv->driver_priv;
struct hl_device *hdev = hpriv->hdev;
+ union hl_cb_args *args = data;
u64 handle = 0, device_va = 0;
enum hl_device_status status;
u32 usage_cnt = 0;
diff --git a/drivers/accel/habanalabs/common/command_submission.c b/drivers/accel/habanalabs/common/command_submission.c
index c23829dab9..3aa6eeef44 100644
--- a/drivers/accel/habanalabs/common/command_submission.c
+++ b/drivers/accel/habanalabs/common/command_submission.c
@@ -31,6 +31,24 @@ enum hl_cs_wait_status {
CS_WAIT_STATUS_GONE
};
+/*
+ * Data used while handling wait/timestamp nodes.
+ * The purpose of this struct is to store the needed data for both operations
+ * in one variable instead of passing large number of arguments to functions.
+ */
+struct wait_interrupt_data {
+ struct hl_user_interrupt *interrupt;
+ struct hl_mmap_mem_buf *buf;
+ struct hl_mem_mgr *mmg;
+ struct hl_cb *cq_cb;
+ u64 ts_handle;
+ u64 ts_offset;
+ u64 cq_handle;
+ u64 cq_offset;
+ u64 target_value;
+ u64 intr_timeout_us;
+};
+
static void job_wq_completion(struct work_struct *work);
static int _hl_cs_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx, u64 timeout_us, u64 seq,
enum hl_cs_wait_status *status, s64 *timestamp);
@@ -1079,19 +1097,22 @@ static void
wake_pending_user_interrupt_threads(struct hl_user_interrupt *interrupt)
{
struct hl_user_pending_interrupt *pend, *temp;
+ unsigned long flags;
- spin_lock(&interrupt->wait_list_lock);
- list_for_each_entry_safe(pend, temp, &interrupt->wait_list_head, wait_list_node) {
- if (pend->ts_reg_info.buf) {
- list_del(&pend->wait_list_node);
- hl_mmap_mem_buf_put(pend->ts_reg_info.buf);
- hl_cb_put(pend->ts_reg_info.cq_cb);
- } else {
- pend->fence.error = -EIO;
- complete_all(&pend->fence.completion);
- }
+ spin_lock_irqsave(&interrupt->wait_list_lock, flags);
+ list_for_each_entry_safe(pend, temp, &interrupt->wait_list_head, list_node) {
+ pend->fence.error = -EIO;
+ complete_all(&pend->fence.completion);
}
- spin_unlock(&interrupt->wait_list_lock);
+ spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
+
+ spin_lock_irqsave(&interrupt->ts_list_lock, flags);
+ list_for_each_entry_safe(pend, temp, &interrupt->ts_list_head, list_node) {
+ list_del(&pend->list_node);
+ hl_mmap_mem_buf_put(pend->ts_reg_info.buf);
+ hl_cb_put(pend->ts_reg_info.cq_cb);
+ }
+ spin_unlock_irqrestore(&interrupt->ts_list_lock, flags);
}
void hl_release_pending_user_interrupts(struct hl_device *hdev)
@@ -1730,16 +1751,11 @@ static int hl_cs_ctx_switch(struct hl_fpriv *hpriv, union hl_cs_args *args,
/* Need to wait for restore completion before execution phase */
if (num_chunks) {
enum hl_cs_wait_status status;
-wait_again:
+
ret = _hl_cs_wait_ioctl(hdev, ctx,
jiffies_to_usecs(hdev->timeout_jiffies),
*cs_seq, &status, NULL);
if (ret) {
- if (ret == -ERESTARTSYS) {
- usleep_range(100, 200);
- goto wait_again;
- }
-
dev_err(hdev->dev,
"Restore CS for context %d failed to complete %d\n",
ctx->asid, ret);
@@ -2539,8 +2555,9 @@ static int cs_ioctl_flush_pci_hbw_writes(struct hl_fpriv *hpriv)
return 0;
}
-int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
+int hl_cs_ioctl(struct drm_device *ddev, void *data, struct drm_file *file_priv)
{
+ struct hl_fpriv *hpriv = file_priv->driver_priv;
union hl_cs_args *args = data;
enum hl_cs_type cs_type = 0;
u64 cs_seq = ULONG_MAX;
@@ -3197,166 +3214,241 @@ static int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
return 0;
}
-static int ts_buff_get_kernel_ts_record(struct hl_mmap_mem_buf *buf,
- struct hl_cb *cq_cb,
- u64 ts_offset, u64 cq_offset, u64 target_value,
- spinlock_t *wait_list_lock,
- struct hl_user_pending_interrupt **pend)
+static inline void set_record_cq_info(struct hl_user_pending_interrupt *record,
+ struct hl_cb *cq_cb, u32 cq_offset, u32 target_value)
{
- struct hl_ts_buff *ts_buff = buf->private;
- struct hl_user_pending_interrupt *requested_offset_record =
- (struct hl_user_pending_interrupt *)ts_buff->kernel_buff_address +
- ts_offset;
- struct hl_user_pending_interrupt *cb_last =
- (struct hl_user_pending_interrupt *)ts_buff->kernel_buff_address +
+ record->ts_reg_info.cq_cb = cq_cb;
+ record->cq_kernel_addr = (u64 *) cq_cb->kernel_address + cq_offset;
+ record->cq_target_value = target_value;
+}
+
+static int validate_and_get_ts_record(struct device *dev,
+ struct hl_ts_buff *ts_buff, u64 ts_offset,
+ struct hl_user_pending_interrupt **req_event_record)
+{
+ struct hl_user_pending_interrupt *ts_cb_last;
+
+ *req_event_record = (struct hl_user_pending_interrupt *)ts_buff->kernel_buff_address +
+ ts_offset;
+ ts_cb_last = (struct hl_user_pending_interrupt *)ts_buff->kernel_buff_address +
(ts_buff->kernel_buff_size / sizeof(struct hl_user_pending_interrupt));
- unsigned long iter_counter = 0;
- u64 current_cq_counter;
- ktime_t timestamp;
/* Validate ts_offset not exceeding last max */
- if (requested_offset_record >= cb_last) {
- dev_err(buf->mmg->dev, "Ts offset exceeds max CB offset(0x%llx)\n",
- (u64)(uintptr_t)cb_last);
+ if (*req_event_record >= ts_cb_last) {
+ dev_err(dev, "Ts offset(%llu) exceeds max CB offset(0x%llx)\n",
+ ts_offset, (u64)(uintptr_t)ts_cb_last);
return -EINVAL;
}
- timestamp = ktime_get();
+ return 0;
+}
-start_over:
- spin_lock(wait_list_lock);
+static void unregister_timestamp_node(struct hl_device *hdev,
+ struct hl_user_pending_interrupt *record, bool need_lock)
+{
+ struct hl_user_interrupt *interrupt = record->ts_reg_info.interrupt;
+ bool ts_rec_found = false;
+ unsigned long flags;
- /* Unregister only if we didn't reach the target value
- * since in this case there will be no handling in irq context
- * and then it's safe to delete the node out of the interrupt list
- * then re-use it on other interrupt
- */
- if (requested_offset_record->ts_reg_info.in_use) {
- current_cq_counter = *requested_offset_record->cq_kernel_addr;
- if (current_cq_counter < requested_offset_record->cq_target_value) {
- list_del(&requested_offset_record->wait_list_node);
- spin_unlock(wait_list_lock);
+ if (need_lock)
+ spin_lock_irqsave(&interrupt->ts_list_lock, flags);
- hl_mmap_mem_buf_put(requested_offset_record->ts_reg_info.buf);
- hl_cb_put(requested_offset_record->ts_reg_info.cq_cb);
+ if (record->ts_reg_info.in_use) {
+ record->ts_reg_info.in_use = false;
+ list_del(&record->list_node);
+ ts_rec_found = true;
+ }
- dev_dbg(buf->mmg->dev,
- "ts node removed from interrupt list now can re-use\n");
- } else {
- dev_dbg(buf->mmg->dev,
- "ts node in middle of irq handling\n");
-
- /* irq thread handling in the middle give it time to finish */
- spin_unlock(wait_list_lock);
- usleep_range(100, 1000);
- if (++iter_counter == MAX_TS_ITER_NUM) {
- dev_err(buf->mmg->dev,
- "Timestamp offset processing reached timeout of %lld ms\n",
- ktime_ms_delta(ktime_get(), timestamp));
- return -EAGAIN;
- }
+ if (need_lock)
+ spin_unlock_irqrestore(&interrupt->ts_list_lock, flags);
- goto start_over;
+ /* Put refcounts that were taken when we registered the event */
+ if (ts_rec_found) {
+ hl_mmap_mem_buf_put(record->ts_reg_info.buf);
+ hl_cb_put(record->ts_reg_info.cq_cb);
+ }
+}
+
+static int ts_get_and_handle_kernel_record(struct hl_device *hdev, struct hl_ctx *ctx,
+ struct wait_interrupt_data *data, unsigned long *flags,
+ struct hl_user_pending_interrupt **pend)
+{
+ struct hl_user_pending_interrupt *req_offset_record;
+ struct hl_ts_buff *ts_buff = data->buf->private;
+ bool need_lock = false;
+ int rc;
+
+ rc = validate_and_get_ts_record(data->buf->mmg->dev, ts_buff, data->ts_offset,
+ &req_offset_record);
+ if (rc)
+ return rc;
+
+ /* In case the node already registered, need to unregister first then re-use */
+ if (req_offset_record->ts_reg_info.in_use) {
+ dev_dbg(data->buf->mmg->dev,
+ "Requested record %p is in use on irq: %u ts addr: %p, unregister first then put on irq: %u\n",
+ req_offset_record,
+ req_offset_record->ts_reg_info.interrupt->interrupt_id,
+ req_offset_record->ts_reg_info.timestamp_kernel_addr,
+ data->interrupt->interrupt_id);
+ /*
+ * Since interrupt here can be different than the one the node currently registered
+ * on, and we don't want to lock two lists while we're doing unregister, so
+ * unlock the new interrupt wait list here and acquire the lock again after you done
+ */
+ if (data->interrupt->interrupt_id !=
+ req_offset_record->ts_reg_info.interrupt->interrupt_id) {
+
+ need_lock = true;
+ spin_unlock_irqrestore(&data->interrupt->ts_list_lock, *flags);
}
- } else {
- /* Fill up the new registration node info */
- requested_offset_record->ts_reg_info.buf = buf;
- requested_offset_record->ts_reg_info.cq_cb = cq_cb;
- requested_offset_record->ts_reg_info.timestamp_kernel_addr =
- (u64 *) ts_buff->user_buff_address + ts_offset;
- requested_offset_record->cq_kernel_addr =
- (u64 *) cq_cb->kernel_address + cq_offset;
- requested_offset_record->cq_target_value = target_value;
- spin_unlock(wait_list_lock);
+ unregister_timestamp_node(hdev, req_offset_record, need_lock);
+
+ if (need_lock)
+ spin_lock_irqsave(&data->interrupt->ts_list_lock, *flags);
}
- *pend = requested_offset_record;
+ /* Fill up the new registration node info and add it to the list */
+ req_offset_record->ts_reg_info.in_use = true;
+ req_offset_record->ts_reg_info.buf = data->buf;
+ req_offset_record->ts_reg_info.timestamp_kernel_addr =
+ (u64 *) ts_buff->user_buff_address + data->ts_offset;
+ req_offset_record->ts_reg_info.interrupt = data->interrupt;
+ set_record_cq_info(req_offset_record, data->cq_cb, data->cq_offset,
+ data->target_value);
- dev_dbg(buf->mmg->dev, "Found available node in TS kernel CB %p\n",
- requested_offset_record);
- return 0;
+ *pend = req_offset_record;
+
+ return rc;
+}
+
+static int _hl_interrupt_ts_reg_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
+ struct wait_interrupt_data *data,
+ u32 *status, u64 *timestamp)
+{
+ struct hl_user_pending_interrupt *pend;
+ unsigned long flags;
+ int rc = 0;
+
+ hl_ctx_get(ctx);
+
+ data->cq_cb = hl_cb_get(data->mmg, data->cq_handle);
+ if (!data->cq_cb) {
+ rc = -EINVAL;
+ goto put_ctx;
+ }
+
+ /* Validate the cq offset */
+ if (((u64 *) data->cq_cb->kernel_address + data->cq_offset) >=
+ ((u64 *) data->cq_cb->kernel_address + (data->cq_cb->size / sizeof(u64)))) {
+ rc = -EINVAL;
+ goto put_cq_cb;
+ }
+
+ dev_dbg(hdev->dev, "Timestamp registration: interrupt id: %u, handle: 0x%llx, ts offset: %llu, cq_offset: %llu\n",
+ data->interrupt->interrupt_id, data->ts_handle,
+ data->ts_offset, data->cq_offset);
+
+ data->buf = hl_mmap_mem_buf_get(data->mmg, data->ts_handle);
+ if (!data->buf) {
+ rc = -EINVAL;
+ goto put_cq_cb;
+ }
+
+ spin_lock_irqsave(&data->interrupt->ts_list_lock, flags);
+
+ /* get ts buffer record */
+ rc = ts_get_and_handle_kernel_record(hdev, ctx, data, &flags, &pend);
+ if (rc) {
+ spin_unlock_irqrestore(&data->interrupt->ts_list_lock, flags);
+ goto put_ts_buff;
+ }
+
+ /* We check for completion value as interrupt could have been received
+ * before we add the timestamp node to the ts list.
+ */
+ if (*pend->cq_kernel_addr >= data->target_value) {
+ spin_unlock_irqrestore(&data->interrupt->ts_list_lock, flags);
+
+ dev_dbg(hdev->dev, "Target value already reached release ts record: pend: %p, offset: %llu, interrupt: %u\n",
+ pend, data->ts_offset, data->interrupt->interrupt_id);
+
+ pend->ts_reg_info.in_use = 0;
+ *status = HL_WAIT_CS_STATUS_COMPLETED;
+ *pend->ts_reg_info.timestamp_kernel_addr = ktime_get_ns();
+
+ goto put_ts_buff;
+ }
+
+ list_add_tail(&pend->list_node, &data->interrupt->ts_list_head);
+ spin_unlock_irqrestore(&data->interrupt->ts_list_lock, flags);
+
+ rc = *status = HL_WAIT_CS_STATUS_COMPLETED;
+
+ hl_ctx_put(ctx);
+
+ return rc;
+
+put_ts_buff:
+ hl_mmap_mem_buf_put(data->buf);
+put_cq_cb:
+ hl_cb_put(data->cq_cb);
+put_ctx:
+ hl_ctx_put(ctx);
+
+ return rc;
}
static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
- struct hl_mem_mgr *cb_mmg, struct hl_mem_mgr *mmg,
- u64 timeout_us, u64 cq_counters_handle, u64 cq_counters_offset,
- u64 target_value, struct hl_user_interrupt *interrupt,
- bool register_ts_record, u64 ts_handle, u64 ts_offset,
+ struct wait_interrupt_data *data,
u32 *status, u64 *timestamp)
{
struct hl_user_pending_interrupt *pend;
- struct hl_mmap_mem_buf *buf;
- struct hl_cb *cq_cb;
- unsigned long timeout;
+ unsigned long timeout, flags;
long completion_rc;
int rc = 0;
- timeout = hl_usecs64_to_jiffies(timeout_us);
+ timeout = hl_usecs64_to_jiffies(data->intr_timeout_us);
hl_ctx_get(ctx);
- cq_cb = hl_cb_get(cb_mmg, cq_counters_handle);
- if (!cq_cb) {
+ data->cq_cb = hl_cb_get(data->mmg, data->cq_handle);
+ if (!data->cq_cb) {
rc = -EINVAL;
goto put_ctx;
}
/* Validate the cq offset */
- if (((u64 *) cq_cb->kernel_address + cq_counters_offset) >=
- ((u64 *) cq_cb->kernel_address + (cq_cb->size / sizeof(u64)))) {
+ if (((u64 *) data->cq_cb->kernel_address + data->cq_offset) >=
+ ((u64 *) data->cq_cb->kernel_address + (data->cq_cb->size / sizeof(u64)))) {
rc = -EINVAL;
goto put_cq_cb;
}
- if (register_ts_record) {
- dev_dbg(hdev->dev, "Timestamp registration: interrupt id: %u, ts offset: %llu, cq_offset: %llu\n",
- interrupt->interrupt_id, ts_offset, cq_counters_offset);
- buf = hl_mmap_mem_buf_get(mmg, ts_handle);
- if (!buf) {
- rc = -EINVAL;
- goto put_cq_cb;
- }
-
- /* get ts buffer record */
- rc = ts_buff_get_kernel_ts_record(buf, cq_cb, ts_offset,
- cq_counters_offset, target_value,
- &interrupt->wait_list_lock, &pend);
- if (rc)
- goto put_ts_buff;
- } else {
- pend = kzalloc(sizeof(*pend), GFP_KERNEL);
- if (!pend) {
- rc = -ENOMEM;
- goto put_cq_cb;
- }
- hl_fence_init(&pend->fence, ULONG_MAX);
- pend->cq_kernel_addr = (u64 *) cq_cb->kernel_address + cq_counters_offset;
- pend->cq_target_value = target_value;
+ pend = kzalloc(sizeof(*pend), GFP_KERNEL);
+ if (!pend) {
+ rc = -ENOMEM;
+ goto put_cq_cb;
}
- spin_lock(&interrupt->wait_list_lock);
+ hl_fence_init(&pend->fence, ULONG_MAX);
+ pend->cq_kernel_addr = (u64 *) data->cq_cb->kernel_address + data->cq_offset;
+ pend->cq_target_value = data->target_value;
+ spin_lock_irqsave(&data->interrupt->wait_list_lock, flags);
+
/* We check for completion value as interrupt could have been received
- * before we added the node to the wait list
+ * before we add the wait node to the wait list.
*/
- if (*pend->cq_kernel_addr >= target_value) {
- if (register_ts_record)
- pend->ts_reg_info.in_use = 0;
- spin_unlock(&interrupt->wait_list_lock);
+ if (*pend->cq_kernel_addr >= data->target_value || (!data->intr_timeout_us)) {
+ spin_unlock_irqrestore(&data->interrupt->wait_list_lock, flags);
- *status = HL_WAIT_CS_STATUS_COMPLETED;
+ if (*pend->cq_kernel_addr >= data->target_value)
+ *status = HL_WAIT_CS_STATUS_COMPLETED;
+ else
+ *status = HL_WAIT_CS_STATUS_BUSY;
- if (register_ts_record) {
- *pend->ts_reg_info.timestamp_kernel_addr = ktime_get_ns();
- goto put_ts_buff;
- } else {
- pend->fence.timestamp = ktime_get();
- goto set_timestamp;
- }
- } else if (!timeout_us) {
- spin_unlock(&interrupt->wait_list_lock);
- *status = HL_WAIT_CS_STATUS_BUSY;
pend->fence.timestamp = ktime_get();
goto set_timestamp;
}
@@ -3366,55 +3458,38 @@ static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
* Note that we cannot have sorted list by target value,
* in order to shorten the list pass loop, since
* same list could have nodes for different cq counter handle.
- * Note:
- * Mark ts buff offset as in use here in the spinlock protection area
- * to avoid getting in the re-use section in ts_buff_get_kernel_ts_record
- * before adding the node to the list. this scenario might happen when
- * multiple threads are racing on same offset and one thread could
- * set the ts buff in ts_buff_get_kernel_ts_record then the other thread
- * takes over and get to ts_buff_get_kernel_ts_record and then we will try
- * to re-use the same ts buff offset, and will try to delete a non existing
- * node from the list.
*/
- if (register_ts_record)
- pend->ts_reg_info.in_use = 1;
-
- list_add_tail(&pend->wait_list_node, &interrupt->wait_list_head);
- spin_unlock(&interrupt->wait_list_lock);
-
- if (register_ts_record) {
- rc = *status = HL_WAIT_CS_STATUS_COMPLETED;
- goto ts_registration_exit;
- }
+ list_add_tail(&pend->list_node, &data->interrupt->wait_list_head);
+ spin_unlock_irqrestore(&data->interrupt->wait_list_lock, flags);
/* Wait for interrupt handler to signal completion */
completion_rc = wait_for_completion_interruptible_timeout(&pend->fence.completion,
timeout);
if (completion_rc > 0) {
- *status = HL_WAIT_CS_STATUS_COMPLETED;
+ if (pend->fence.error == -EIO) {
+ dev_err_ratelimited(hdev->dev,
+ "interrupt based wait ioctl aborted(error:%d) due to a reset cycle initiated\n",
+ pend->fence.error);
+ rc = -EIO;
+ *status = HL_WAIT_CS_STATUS_ABORTED;
+ } else {
+ *status = HL_WAIT_CS_STATUS_COMPLETED;
+ }
} else {
if (completion_rc == -ERESTARTSYS) {
dev_err_ratelimited(hdev->dev,
"user process got signal while waiting for interrupt ID %d\n",
- interrupt->interrupt_id);
+ data->interrupt->interrupt_id);
rc = -EINTR;
*status = HL_WAIT_CS_STATUS_ABORTED;
} else {
- if (pend->fence.error == -EIO) {
- dev_err_ratelimited(hdev->dev,
- "interrupt based wait ioctl aborted(error:%d) due to a reset cycle initiated\n",
- pend->fence.error);
- rc = -EIO;
- *status = HL_WAIT_CS_STATUS_ABORTED;
- } else {
- /* The wait has timed-out. We don't know anything beyond that
- * because the workload wasn't submitted through the driver.
- * Therefore, from driver's perspective, the workload is still
- * executing.
- */
- rc = 0;
- *status = HL_WAIT_CS_STATUS_BUSY;
- }
+ /* The wait has timed-out. We don't know anything beyond that
+ * because the workload was not submitted through the driver.
+ * Therefore, from driver's perspective, the workload is still
+ * executing.
+ */
+ rc = 0;
+ *status = HL_WAIT_CS_STATUS_BUSY;
}
}
@@ -3424,23 +3499,20 @@ static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
* for ts record, the node will be deleted in the irq handler after
* we reach the target value.
*/
- spin_lock(&interrupt->wait_list_lock);
- list_del(&pend->wait_list_node);
- spin_unlock(&interrupt->wait_list_lock);
+ spin_lock_irqsave(&data->interrupt->wait_list_lock, flags);
+ list_del(&pend->list_node);
+ spin_unlock_irqrestore(&data->interrupt->wait_list_lock, flags);
set_timestamp:
*timestamp = ktime_to_ns(pend->fence.timestamp);
kfree(pend);
- hl_cb_put(cq_cb);
-ts_registration_exit:
+ hl_cb_put(data->cq_cb);
hl_ctx_put(ctx);
return rc;
-put_ts_buff:
- hl_mmap_mem_buf_put(buf);
put_cq_cb:
- hl_cb_put(cq_cb);
+ hl_cb_put(data->cq_cb);
put_ctx:
hl_ctx_put(ctx);
@@ -3454,7 +3526,7 @@ static int _hl_interrupt_wait_ioctl_user_addr(struct hl_device *hdev, struct hl_
u64 *timestamp)
{
struct hl_user_pending_interrupt *pend;
- unsigned long timeout;
+ unsigned long timeout, flags;
u64 completion_value;
long completion_rc;
int rc = 0;
@@ -3474,9 +3546,9 @@ static int _hl_interrupt_wait_ioctl_user_addr(struct hl_device *hdev, struct hl_
/* Add pending user interrupt to relevant list for the interrupt
* handler to monitor
*/
- spin_lock(&interrupt->wait_list_lock);
- list_add_tail(&pend->wait_list_node, &interrupt->wait_list_head);
- spin_unlock(&interrupt->wait_list_lock);
+ spin_lock_irqsave(&interrupt->wait_list_lock, flags);
+ list_add_tail(&pend->list_node, &interrupt->wait_list_head);
+ spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
/* We check for completion value as interrupt could have been received
* before we added the node to the wait list
@@ -3507,14 +3579,14 @@ wait_again:
* If comparison fails, keep waiting until timeout expires
*/
if (completion_rc > 0) {
- spin_lock(&interrupt->wait_list_lock);
+ spin_lock_irqsave(&interrupt->wait_list_lock, flags);
/* reinit_completion must be called before we check for user
* completion value, otherwise, if interrupt is received after
* the comparison and before the next wait_for_completion,
* we will reach timeout and fail
*/
reinit_completion(&pend->fence.completion);
- spin_unlock(&interrupt->wait_list_lock);
+ spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
if (copy_from_user(&completion_value, u64_to_user_ptr(user_address), 8)) {
dev_err(hdev->dev, "Failed to copy completion value from user\n");
@@ -3551,9 +3623,9 @@ wait_again:
}
remove_pending_user_interrupt:
- spin_lock(&interrupt->wait_list_lock);
- list_del(&pend->wait_list_node);
- spin_unlock(&interrupt->wait_list_lock);
+ spin_lock_irqsave(&interrupt->wait_list_lock, flags);
+ list_del(&pend->list_node);
+ spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
*timestamp = ktime_to_ns(pend->fence.timestamp);
@@ -3611,19 +3683,42 @@ static int hl_interrupt_wait_ioctl(struct hl_fpriv *hpriv, void *data)
return -EINVAL;
}
- if (args->in.flags & HL_WAIT_CS_FLAGS_INTERRUPT_KERNEL_CQ)
- rc = _hl_interrupt_wait_ioctl(hdev, hpriv->ctx, &hpriv->mem_mgr, &hpriv->mem_mgr,
- args->in.interrupt_timeout_us, args->in.cq_counters_handle,
- args->in.cq_counters_offset,
- args->in.target, interrupt,
- !!(args->in.flags & HL_WAIT_CS_FLAGS_REGISTER_INTERRUPT),
- args->in.timestamp_handle, args->in.timestamp_offset,
- &status, &timestamp);
- else
+ if (args->in.flags & HL_WAIT_CS_FLAGS_INTERRUPT_KERNEL_CQ) {
+ struct wait_interrupt_data wait_intr_data = {0};
+
+ wait_intr_data.interrupt = interrupt;
+ wait_intr_data.mmg = &hpriv->mem_mgr;
+ wait_intr_data.cq_handle = args->in.cq_counters_handle;
+ wait_intr_data.cq_offset = args->in.cq_counters_offset;
+ wait_intr_data.ts_handle = args->in.timestamp_handle;
+ wait_intr_data.ts_offset = args->in.timestamp_offset;
+ wait_intr_data.target_value = args->in.target;
+ wait_intr_data.intr_timeout_us = args->in.interrupt_timeout_us;
+
+ if (args->in.flags & HL_WAIT_CS_FLAGS_REGISTER_INTERRUPT) {
+ /*
+ * Allow only one registration at a time. this is needed in order to prevent
+ * issues while handling the flow of re-use of the same offset.
+ * Since the registration flow is protected only by the interrupt lock,
+ * re-use flow might request to move ts node to another interrupt list,
+ * and in such case we're not protected.
+ */
+ mutex_lock(&hpriv->ctx->ts_reg_lock);
+
+ rc = _hl_interrupt_ts_reg_ioctl(hdev, hpriv->ctx, &wait_intr_data,
+ &status, &timestamp);
+
+ mutex_unlock(&hpriv->ctx->ts_reg_lock);
+ } else
+ rc = _hl_interrupt_wait_ioctl(hdev, hpriv->ctx, &wait_intr_data,
+ &status, &timestamp);
+ } else {
rc = _hl_interrupt_wait_ioctl_user_addr(hdev, hpriv->ctx,
args->in.interrupt_timeout_us, args->in.addr,
args->in.target, interrupt, &status,
&timestamp);
+ }
+
if (rc)
return rc;
@@ -3638,8 +3733,9 @@ static int hl_interrupt_wait_ioctl(struct hl_fpriv *hpriv, void *data)
return 0;
}
-int hl_wait_ioctl(struct hl_fpriv *hpriv, void *data)
+int hl_wait_ioctl(struct drm_device *ddev, void *data, struct drm_file *file_priv)
{
+ struct hl_fpriv *hpriv = file_priv->driver_priv;
struct hl_device *hdev = hpriv->hdev;
union hl_wait_cs_args *args = data;
u32 flags = args->in.flags;
diff --git a/drivers/accel/habanalabs/common/context.c b/drivers/accel/habanalabs/common/context.c
index 9c8b1b37b5..b83141f583 100644
--- a/drivers/accel/habanalabs/common/context.c
+++ b/drivers/accel/habanalabs/common/context.c
@@ -102,7 +102,7 @@ static void hl_ctx_fini(struct hl_ctx *ctx)
kfree(ctx->cs_pending);
if (ctx->asid != HL_KERNEL_ASID_ID) {
- dev_dbg(hdev->dev, "closing user context %d\n", ctx->asid);
+ dev_dbg(hdev->dev, "closing user context, asid=%u\n", ctx->asid);
/* The engines are stopped as there is no executing CS, but the
* Coresight might be still working by accessing addresses
@@ -119,6 +119,7 @@ static void hl_ctx_fini(struct hl_ctx *ctx)
hl_vm_ctx_fini(ctx);
hl_asid_free(hdev, ctx->asid);
hl_encaps_sig_mgr_fini(hdev, &ctx->sig_mgr);
+ mutex_destroy(&ctx->ts_reg_lock);
} else {
dev_dbg(hdev->dev, "closing kernel context\n");
hdev->asic_funcs->ctx_fini(ctx);
@@ -198,6 +199,7 @@ out_err:
int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx)
{
+ char task_comm[TASK_COMM_LEN];
int rc = 0, i;
ctx->hdev = hdev;
@@ -267,7 +269,10 @@ int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx)
hl_encaps_sig_mgr_init(&ctx->sig_mgr);
- dev_dbg(hdev->dev, "create user context %d\n", ctx->asid);
+ mutex_init(&ctx->ts_reg_lock);
+
+ dev_dbg(hdev->dev, "create user context, comm=\"%s\", asid=%u\n",
+ get_task_comm(task_comm, current), ctx->asid);
}
return 0;
diff --git a/drivers/accel/habanalabs/common/debugfs.c b/drivers/accel/habanalabs/common/debugfs.c
index 9e84a47a21..01f071d525 100644
--- a/drivers/accel/habanalabs/common/debugfs.c
+++ b/drivers/accel/habanalabs/common/debugfs.c
@@ -18,8 +18,6 @@
#define MMU_KBUF_SIZE (MMU_ADDR_BUF_SIZE + MMU_ASID_BUF_SIZE)
#define I2C_MAX_TRANSACTION_LEN 8
-static struct dentry *hl_debug_root;
-
static int hl_debugfs_i2c_read(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr,
u8 i2c_reg, u8 i2c_len, u64 *val)
{
@@ -1788,20 +1786,14 @@ void hl_debugfs_add_device(struct hl_device *hdev)
{
struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
- dev_entry->root = debugfs_create_dir(dev_name(hdev->dev), hl_debug_root);
+ dev_entry->root = hdev->drm.accel->debugfs_root;
add_files_to_device(hdev, dev_entry, dev_entry->root);
+
if (!hdev->asic_prop.fw_security_enabled)
add_secured_nodes(dev_entry, dev_entry->root);
}
-void hl_debugfs_remove_device(struct hl_device *hdev)
-{
- struct hl_dbg_device_entry *entry = &hdev->hl_debugfs;
-
- debugfs_remove_recursive(entry->root);
-}
-
void hl_debugfs_add_file(struct hl_fpriv *hpriv)
{
struct hl_dbg_device_entry *dev_entry = &hpriv->hdev->hl_debugfs;
@@ -1932,13 +1924,3 @@ void hl_debugfs_set_state_dump(struct hl_device *hdev, char *data,
up_write(&dev_entry->state_dump_sem);
}
-
-void __init hl_debugfs_init(void)
-{
- hl_debug_root = debugfs_create_dir("habanalabs", NULL);
-}
-
-void hl_debugfs_fini(void)
-{
- debugfs_remove_recursive(hl_debug_root);
-}
diff --git a/drivers/accel/habanalabs/common/device.c b/drivers/accel/habanalabs/common/device.c
index b97339d1f7..9290d43745 100644
--- a/drivers/accel/habanalabs/common/device.c
+++ b/drivers/accel/habanalabs/common/device.c
@@ -14,11 +14,14 @@
#include <linux/hwmon.h>
#include <linux/vmalloc.h>
+#include <drm/drm_accel.h>
+#include <drm/drm_drv.h>
+
#include <trace/events/habanalabs.h>
#define HL_RESET_DELAY_USEC 10000 /* 10ms */
-#define HL_DEVICE_RELEASE_WATCHDOG_TIMEOUT_SEC 5
+#define HL_DEVICE_RELEASE_WATCHDOG_TIMEOUT_SEC 30
enum dma_alloc_type {
DMA_ALLOC_COHERENT,
@@ -185,7 +188,36 @@ void hl_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size, void *
hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, size, vaddr);
}
-int hl_dma_map_sgtable(struct hl_device *hdev, struct sg_table *sgt, enum dma_data_direction dir)
+int hl_dma_map_sgtable_caller(struct hl_device *hdev, struct sg_table *sgt,
+ enum dma_data_direction dir, const char *caller)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct scatterlist *sg;
+ int rc, i;
+
+ rc = hdev->asic_funcs->dma_map_sgtable(hdev, sgt, dir);
+ if (rc)
+ return rc;
+
+ if (!trace_habanalabs_dma_map_page_enabled())
+ return 0;
+
+ for_each_sgtable_dma_sg(sgt, sg, i)
+ trace_habanalabs_dma_map_page(hdev->dev,
+ page_to_phys(sg_page(sg)),
+ sg->dma_address - prop->device_dma_offset_for_host_access,
+#ifdef CONFIG_NEED_SG_DMA_LENGTH
+ sg->dma_length,
+#else
+ sg->length,
+#endif
+ dir, caller);
+
+ return 0;
+}
+
+int hl_asic_dma_map_sgtable(struct hl_device *hdev, struct sg_table *sgt,
+ enum dma_data_direction dir)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct scatterlist *sg;
@@ -203,7 +235,30 @@ int hl_dma_map_sgtable(struct hl_device *hdev, struct sg_table *sgt, enum dma_da
return 0;
}
-void hl_dma_unmap_sgtable(struct hl_device *hdev, struct sg_table *sgt, enum dma_data_direction dir)
+void hl_dma_unmap_sgtable_caller(struct hl_device *hdev, struct sg_table *sgt,
+ enum dma_data_direction dir, const char *caller)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct scatterlist *sg;
+ int i;
+
+ hdev->asic_funcs->dma_unmap_sgtable(hdev, sgt, dir);
+
+ if (trace_habanalabs_dma_unmap_page_enabled()) {
+ for_each_sgtable_dma_sg(sgt, sg, i)
+ trace_habanalabs_dma_unmap_page(hdev->dev, page_to_phys(sg_page(sg)),
+ sg->dma_address - prop->device_dma_offset_for_host_access,
+#ifdef CONFIG_NEED_SG_DMA_LENGTH
+ sg->dma_length,
+#else
+ sg->length,
+#endif
+ dir, caller);
+ }
+}
+
+void hl_asic_dma_unmap_sgtable(struct hl_device *hdev, struct sg_table *sgt,
+ enum dma_data_direction dir)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct scatterlist *sg;
@@ -315,7 +370,9 @@ enum hl_device_status hl_device_status(struct hl_device *hdev)
{
enum hl_device_status status;
- if (hdev->reset_info.in_reset) {
+ if (hdev->device_fini_pending) {
+ status = HL_DEVICE_STATUS_MALFUNCTION;
+ } else if (hdev->reset_info.in_reset) {
if (hdev->reset_info.in_compute_reset)
status = HL_DEVICE_STATUS_IN_RESET_AFTER_DEVICE_RELEASE;
else
@@ -343,9 +400,9 @@ bool hl_device_operational(struct hl_device *hdev,
*status = current_status;
switch (current_status) {
+ case HL_DEVICE_STATUS_MALFUNCTION:
case HL_DEVICE_STATUS_IN_RESET:
case HL_DEVICE_STATUS_IN_RESET_AFTER_DEVICE_RELEASE:
- case HL_DEVICE_STATUS_MALFUNCTION:
case HL_DEVICE_STATUS_NEEDS_RESET:
return false;
case HL_DEVICE_STATUS_OPERATIONAL:
@@ -406,8 +463,6 @@ static void hpriv_release(struct kref *ref)
hdev->asic_funcs->send_device_activity(hdev, false);
- put_pid(hpriv->taskpid);
-
hl_debugfs_remove_file(hpriv);
mutex_destroy(&hpriv->ctx_lock);
@@ -424,7 +479,7 @@ static void hpriv_release(struct kref *ref)
/* Check the device idle status and reset if not idle.
* Skip it if already in reset, or if device is going to be reset in any case.
*/
- if (!hdev->reset_info.in_reset && !reset_device && hdev->pdev && !hdev->pldm)
+ if (!hdev->reset_info.in_reset && !reset_device && !hdev->pldm)
device_is_idle = hdev->asic_funcs->is_device_idle(hdev, idle_mask,
HL_BUSY_ENGINES_MASK_EXT_SIZE, NULL);
if (!device_is_idle) {
@@ -446,14 +501,18 @@ static void hpriv_release(struct kref *ref)
list_del(&hpriv->dev_node);
mutex_unlock(&hdev->fpriv_list_lock);
+ put_pid(hpriv->taskpid);
+
if (reset_device) {
hl_device_reset(hdev, HL_DRV_RESET_DEV_RELEASE);
} else {
/* Scrubbing is handled within hl_device_reset(), so here need to do it directly */
int rc = hdev->asic_funcs->scrub_device_mem(hdev);
- if (rc)
+ if (rc) {
dev_err(hdev->dev, "failed to scrub memory from hpriv release (%d)\n", rc);
+ hl_device_reset(hdev, HL_DRV_RESET_HARD);
+ }
}
/* Now we can mark the compute_ctx as not active. Even if a reset is running in a different
@@ -516,24 +575,20 @@ static void print_device_in_use_info(struct hl_device *hdev, const char *message
}
/*
- * hl_device_release - release function for habanalabs device
- *
- * @inode: pointer to inode structure
- * @filp: pointer to file structure
+ * hl_device_release() - release function for habanalabs device.
+ * @ddev: pointer to DRM device structure.
+ * @file: pointer to DRM file private data structure.
*
* Called when process closes an habanalabs device
*/
-static int hl_device_release(struct inode *inode, struct file *filp)
+void hl_device_release(struct drm_device *ddev, struct drm_file *file_priv)
{
- struct hl_fpriv *hpriv = filp->private_data;
- struct hl_device *hdev = hpriv->hdev;
-
- filp->private_data = NULL;
+ struct hl_fpriv *hpriv = file_priv->driver_priv;
+ struct hl_device *hdev = to_hl_device(ddev);
if (!hdev) {
pr_crit("Closing FD after device was removed. Memory leak will occur and it is advised to reboot.\n");
put_pid(hpriv->taskpid);
- return 0;
}
hl_ctx_mgr_fini(hdev, &hpriv->ctx_mgr);
@@ -551,8 +606,6 @@ static int hl_device_release(struct inode *inode, struct file *filp)
}
hdev->last_open_session_duration_jif = jiffies - hdev->last_successful_open_jif;
-
- return 0;
}
static int hl_device_release_ctrl(struct inode *inode, struct file *filp)
@@ -571,11 +624,6 @@ static int hl_device_release_ctrl(struct inode *inode, struct file *filp)
list_del(&hpriv->dev_node);
mutex_unlock(&hdev->fpriv_ctrl_list_lock);
out:
- /* release the eventfd */
- if (hpriv->notifier_event.eventfd)
- eventfd_ctx_put(hpriv->notifier_event.eventfd);
-
- mutex_destroy(&hpriv->notifier_event.lock);
put_pid(hpriv->taskpid);
kfree(hpriv);
@@ -583,18 +631,8 @@ out:
return 0;
}
-/*
- * hl_mmap - mmap function for habanalabs device
- *
- * @*filp: pointer to file structure
- * @*vma: pointer to vm_area_struct of the process
- *
- * Called when process does an mmap on habanalabs device. Call the relevant mmap
- * function at the end of the common code.
- */
-static int hl_mmap(struct file *filp, struct vm_area_struct *vma)
+static int __hl_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma)
{
- struct hl_fpriv *hpriv = filp->private_data;
struct hl_device *hdev = hpriv->hdev;
unsigned long vm_pgoff;
@@ -617,14 +655,22 @@ static int hl_mmap(struct file *filp, struct vm_area_struct *vma)
return -EINVAL;
}
-static const struct file_operations hl_ops = {
- .owner = THIS_MODULE,
- .open = hl_device_open,
- .release = hl_device_release,
- .mmap = hl_mmap,
- .unlocked_ioctl = hl_ioctl,
- .compat_ioctl = hl_ioctl
-};
+/*
+ * hl_mmap - mmap function for habanalabs device
+ *
+ * @*filp: pointer to file structure
+ * @*vma: pointer to vm_area_struct of the process
+ *
+ * Called when process does an mmap on habanalabs device. Call the relevant mmap
+ * function at the end of the common code.
+ */
+int hl_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct drm_file *file_priv = filp->private_data;
+ struct hl_fpriv *hpriv = file_priv->driver_priv;
+
+ return __hl_mmap(hpriv, vma);
+}
static const struct file_operations hl_ctrl_ops = {
.owner = THIS_MODULE,
@@ -645,14 +691,14 @@ static void device_release_func(struct device *dev)
* @hdev: pointer to habanalabs device structure
* @class: pointer to the class object of the device
* @minor: minor number of the specific device
- * @fpos: file operations to install for this device
+ * @fops: file operations to install for this device
* @name: name of the device as it will appear in the filesystem
* @cdev: pointer to the char device object that will be initialized
* @dev: pointer to the device object that will be initialized
*
* Initialize a cdev and a Linux device for habanalabs's device.
*/
-static int device_init_cdev(struct hl_device *hdev, struct class *class,
+static int device_init_cdev(struct hl_device *hdev, const struct class *class,
int minor, const struct file_operations *fops,
char *name, struct cdev *cdev,
struct device **dev)
@@ -676,23 +722,26 @@ static int device_init_cdev(struct hl_device *hdev, struct class *class,
static int cdev_sysfs_debugfs_add(struct hl_device *hdev)
{
+ const struct class *accel_class = hdev->drm.accel->kdev->class;
+ char name[32];
int rc;
- rc = cdev_device_add(&hdev->cdev, hdev->dev);
- if (rc) {
- dev_err(hdev->dev,
- "failed to add a char device to the system\n");
+ hdev->cdev_idx = hdev->drm.accel->index;
+
+ /* Initialize cdev and device structures for the control device */
+ snprintf(name, sizeof(name), "accel_controlD%d", hdev->cdev_idx);
+ rc = device_init_cdev(hdev, accel_class, hdev->cdev_idx, &hl_ctrl_ops, name,
+ &hdev->cdev_ctrl, &hdev->dev_ctrl);
+ if (rc)
return rc;
- }
rc = cdev_device_add(&hdev->cdev_ctrl, hdev->dev_ctrl);
if (rc) {
- dev_err(hdev->dev,
- "failed to add a control char device to the system\n");
- goto delete_cdev_device;
+ dev_err(hdev->dev_ctrl,
+ "failed to add an accel control char device to the system\n");
+ goto free_ctrl_device;
}
- /* hl_sysfs_init() must be done after adding the device to the system */
rc = hl_sysfs_init(hdev);
if (rc) {
dev_err(hdev->dev, "failed to initialize sysfs\n");
@@ -707,23 +756,19 @@ static int cdev_sysfs_debugfs_add(struct hl_device *hdev)
delete_ctrl_cdev_device:
cdev_device_del(&hdev->cdev_ctrl, hdev->dev_ctrl);
-delete_cdev_device:
- cdev_device_del(&hdev->cdev, hdev->dev);
+free_ctrl_device:
+ put_device(hdev->dev_ctrl);
return rc;
}
static void cdev_sysfs_debugfs_remove(struct hl_device *hdev)
{
if (!hdev->cdev_sysfs_debugfs_created)
- goto put_devices;
+ return;
- hl_debugfs_remove_device(hdev);
hl_sysfs_fini(hdev);
- cdev_device_del(&hdev->cdev_ctrl, hdev->dev_ctrl);
- cdev_device_del(&hdev->cdev, hdev->dev);
-put_devices:
- put_device(hdev->dev);
+ cdev_device_del(&hdev->cdev_ctrl, hdev->dev_ctrl);
put_device(hdev->dev_ctrl);
}
@@ -808,6 +853,9 @@ static int device_early_init(struct hl_device *hdev)
gaudi2_set_asic_funcs(hdev);
strscpy(hdev->asic_name, "GAUDI2B", sizeof(hdev->asic_name));
break;
+ case ASIC_GAUDI2C:
+ gaudi2_set_asic_funcs(hdev);
+ strscpy(hdev->asic_name, "GAUDI2C", sizeof(hdev->asic_name));
break;
default:
dev_err(hdev->dev, "Unrecognized ASIC type %d\n",
@@ -996,6 +1044,21 @@ static bool is_pci_link_healthy(struct hl_device *hdev)
return (vendor_id == PCI_VENDOR_ID_HABANALABS);
}
+static int hl_device_eq_heartbeat_check(struct hl_device *hdev)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+
+ if (!prop->cpucp_info.eq_health_check_supported)
+ return 0;
+
+ if (hdev->eq_heartbeat_received)
+ hdev->eq_heartbeat_received = false;
+ else
+ return -EIO;
+
+ return 0;
+}
+
static void hl_device_heartbeat(struct work_struct *work)
{
struct hl_device *hdev = container_of(work, struct hl_device,
@@ -1003,10 +1066,16 @@ static void hl_device_heartbeat(struct work_struct *work)
struct hl_info_fw_err_info info = {0};
u64 event_mask = HL_NOTIFIER_EVENT_DEVICE_RESET | HL_NOTIFIER_EVENT_DEVICE_UNAVAILABLE;
- if (!hl_device_operational(hdev, NULL))
+ /* Start heartbeat checks only after driver has enabled events from FW */
+ if (!hl_device_operational(hdev, NULL) || !hdev->init_done)
goto reschedule;
- if (!hdev->asic_funcs->send_heartbeat(hdev))
+ /*
+ * For EQ health check need to check if driver received the heartbeat eq event
+ * in order to validate the eq is working.
+ * Only if both the EQ is healthy and we managed to send the next heartbeat reschedule.
+ */
+ if ((!hl_device_eq_heartbeat_check(hdev)) && (!hdev->asic_funcs->send_heartbeat(hdev)))
goto reschedule;
if (hl_device_operational(hdev, NULL))
@@ -1062,7 +1131,15 @@ static int device_late_init(struct hl_device *hdev)
hdev->high_pll = hdev->asic_prop.high_pll;
if (hdev->heartbeat) {
+ /*
+ * Before scheduling the heartbeat driver will check if eq event has received.
+ * for the first schedule we need to set the indication as true then for the next
+ * one this indication will be true only if eq event was sent by FW.
+ */
+ hdev->eq_heartbeat_received = true;
+
INIT_DELAYED_WORK(&hdev->work_heartbeat, hl_device_heartbeat);
+
schedule_delayed_work(&hdev->work_heartbeat,
usecs_to_jiffies(HL_HEARTBEAT_PER_USEC));
}
@@ -1302,18 +1379,18 @@ disable_device:
static int device_kill_open_processes(struct hl_device *hdev, u32 timeout, bool control_dev)
{
struct task_struct *task = NULL;
- struct list_head *fd_list;
- struct hl_fpriv *hpriv;
- struct mutex *fd_lock;
+ struct list_head *hpriv_list;
+ struct hl_fpriv *hpriv;
+ struct mutex *hpriv_lock;
u32 pending_cnt;
- fd_lock = control_dev ? &hdev->fpriv_ctrl_list_lock : &hdev->fpriv_list_lock;
- fd_list = control_dev ? &hdev->fpriv_ctrl_list : &hdev->fpriv_list;
+ hpriv_lock = control_dev ? &hdev->fpriv_ctrl_list_lock : &hdev->fpriv_list_lock;
+ hpriv_list = control_dev ? &hdev->fpriv_ctrl_list : &hdev->fpriv_list;
/* Giving time for user to close FD, and for processes that are inside
* hl_device_open to finish
*/
- if (!list_empty(fd_list))
+ if (!list_empty(hpriv_list))
ssleep(1);
if (timeout) {
@@ -1329,12 +1406,12 @@ static int device_kill_open_processes(struct hl_device *hdev, u32 timeout, bool
}
}
- mutex_lock(fd_lock);
+ mutex_lock(hpriv_lock);
/* This section must be protected because we are dereferencing
* pointers that are freed if the process exits
*/
- list_for_each_entry(hpriv, fd_list, dev_node) {
+ list_for_each_entry(hpriv, hpriv_list, dev_node) {
task = get_pid_task(hpriv->taskpid, PIDTYPE_PID);
if (task) {
dev_info(hdev->dev, "Killing user process pid=%d\n",
@@ -1344,17 +1421,13 @@ static int device_kill_open_processes(struct hl_device *hdev, u32 timeout, bool
put_task_struct(task);
} else {
- /*
- * If we got here, it means that process was killed from outside the driver
- * right after it started looping on fd_list and before get_pid_task, thus
- * we don't need to kill it.
- */
dev_dbg(hdev->dev,
- "Can't get task struct for user process, assuming process was killed from outside the driver\n");
+ "Can't get task struct for user process %d, process was killed from outside the driver\n",
+ pid_nr(hpriv->taskpid));
}
}
- mutex_unlock(fd_lock);
+ mutex_unlock(hpriv_lock);
/*
* We killed the open users, but that doesn't mean they are closed.
@@ -1366,7 +1439,7 @@ static int device_kill_open_processes(struct hl_device *hdev, u32 timeout, bool
*/
wait_for_processes:
- while ((!list_empty(fd_list)) && (pending_cnt)) {
+ while ((!list_empty(hpriv_list)) && (pending_cnt)) {
dev_dbg(hdev->dev,
"Waiting for all unmap operations to finish before hard reset\n");
@@ -1376,7 +1449,7 @@ wait_for_processes:
}
/* All processes exited successfully */
- if (list_empty(fd_list))
+ if (list_empty(hpriv_list))
return 0;
/* Give up waiting for processes to exit */
@@ -1390,17 +1463,17 @@ wait_for_processes:
static void device_disable_open_processes(struct hl_device *hdev, bool control_dev)
{
- struct list_head *fd_list;
+ struct list_head *hpriv_list;
struct hl_fpriv *hpriv;
- struct mutex *fd_lock;
+ struct mutex *hpriv_lock;
- fd_lock = control_dev ? &hdev->fpriv_ctrl_list_lock : &hdev->fpriv_list_lock;
- fd_list = control_dev ? &hdev->fpriv_ctrl_list : &hdev->fpriv_list;
+ hpriv_lock = control_dev ? &hdev->fpriv_ctrl_list_lock : &hdev->fpriv_list_lock;
+ hpriv_list = control_dev ? &hdev->fpriv_ctrl_list : &hdev->fpriv_list;
- mutex_lock(fd_lock);
- list_for_each_entry(hpriv, fd_list, dev_node)
+ mutex_lock(hpriv_lock);
+ list_for_each_entry(hpriv, hpriv_list, dev_node)
hpriv->hdev = NULL;
- mutex_unlock(fd_lock);
+ mutex_unlock(hpriv_lock);
}
static void send_disable_pci_access(struct hl_device *hdev, u32 flags)
@@ -1916,7 +1989,16 @@ int hl_device_cond_reset(struct hl_device *hdev, u32 flags, u64 event_mask)
}
ctx = hl_get_compute_ctx(hdev);
- if (!ctx || !ctx->hpriv->notifier_event.eventfd)
+ if (!ctx)
+ goto device_reset;
+
+ /*
+ * There is no point in postponing the reset if user is not registered for events.
+ * However if no eventfd_ctx exists but the device release watchdog is already scheduled, it
+ * just implies that user has unregistered as part of handling a previous event. In this
+ * case an immediate reset is not required.
+ */
+ if (!ctx->hpriv->notifier_event.eventfd && !hdev->reset_info.watchdog_active)
goto device_reset;
/* Schedule the device release watchdog work unless reset is already in progress or if the
@@ -1928,8 +2010,10 @@ int hl_device_cond_reset(struct hl_device *hdev, u32 flags, u64 event_mask)
goto device_reset;
}
- if (hdev->reset_info.watchdog_active)
+ if (hdev->reset_info.watchdog_active) {
+ hdev->device_release_watchdog_work.flags |= flags;
goto out;
+ }
hdev->device_release_watchdog_work.flags = flags;
dev_dbg(hdev->dev, "Device is going to be hard-reset in %u sec unless being released\n",
@@ -1990,59 +2074,6 @@ void hl_notifier_event_send_all(struct hl_device *hdev, u64 event_mask)
hl_notifier_event_send(&hpriv->notifier_event, event_mask);
mutex_unlock(&hdev->fpriv_list_lock);
-
- /* control device */
- mutex_lock(&hdev->fpriv_ctrl_list_lock);
-
- list_for_each_entry(hpriv, &hdev->fpriv_ctrl_list, dev_node)
- hl_notifier_event_send(&hpriv->notifier_event, event_mask);
-
- mutex_unlock(&hdev->fpriv_ctrl_list_lock);
-}
-
-static int create_cdev(struct hl_device *hdev)
-{
- char *name;
- int rc;
-
- hdev->cdev_idx = hdev->id / 2;
-
- name = kasprintf(GFP_KERNEL, "hl%d", hdev->cdev_idx);
- if (!name) {
- rc = -ENOMEM;
- goto out_err;
- }
-
- /* Initialize cdev and device structures */
- rc = device_init_cdev(hdev, hdev->hclass, hdev->id, &hl_ops, name,
- &hdev->cdev, &hdev->dev);
-
- kfree(name);
-
- if (rc)
- goto out_err;
-
- name = kasprintf(GFP_KERNEL, "hl_controlD%d", hdev->cdev_idx);
- if (!name) {
- rc = -ENOMEM;
- goto free_dev;
- }
-
- /* Initialize cdev and device structures for control device */
- rc = device_init_cdev(hdev, hdev->hclass, hdev->id_control, &hl_ctrl_ops,
- name, &hdev->cdev_ctrl, &hdev->dev_ctrl);
-
- kfree(name);
-
- if (rc)
- goto free_dev;
-
- return 0;
-
-free_dev:
- put_device(hdev->dev);
-out_err:
- return rc;
}
/*
@@ -2057,16 +2088,14 @@ out_err:
int hl_device_init(struct hl_device *hdev)
{
int i, rc, cq_cnt, user_interrupt_cnt, cq_ready_cnt;
+ struct hl_ts_free_jobs *free_jobs_data;
bool expose_interfaces_on_err = false;
-
- rc = create_cdev(hdev);
- if (rc)
- goto out_disabled;
+ void *p;
/* Initialize ASIC function pointers and perform early init */
rc = device_early_init(hdev);
if (rc)
- goto free_dev;
+ goto out_disabled;
user_interrupt_cnt = hdev->asic_prop.user_dec_intr_count +
hdev->asic_prop.user_interrupt_count;
@@ -2078,15 +2107,43 @@ int hl_device_init(struct hl_device *hdev)
rc = -ENOMEM;
goto early_fini;
}
+
+ /* Timestamp records supported only if CQ supported in device */
+ if (hdev->asic_prop.first_available_cq[0] != USHRT_MAX) {
+ for (i = 0 ; i < user_interrupt_cnt ; i++) {
+ p = vzalloc(TIMESTAMP_FREE_NODES_NUM *
+ sizeof(struct timestamp_reg_free_node));
+ if (!p) {
+ rc = -ENOMEM;
+ goto free_usr_intr_mem;
+ }
+ free_jobs_data = &hdev->user_interrupt[i].ts_free_jobs_data;
+ free_jobs_data->free_nodes_pool = p;
+ free_jobs_data->free_nodes_length = TIMESTAMP_FREE_NODES_NUM;
+ free_jobs_data->next_avail_free_node_idx = 0;
+ }
+ }
+ }
+
+ free_jobs_data = &hdev->common_user_cq_interrupt.ts_free_jobs_data;
+ p = vzalloc(TIMESTAMP_FREE_NODES_NUM *
+ sizeof(struct timestamp_reg_free_node));
+ if (!p) {
+ rc = -ENOMEM;
+ goto free_usr_intr_mem;
}
+ free_jobs_data->free_nodes_pool = p;
+ free_jobs_data->free_nodes_length = TIMESTAMP_FREE_NODES_NUM;
+ free_jobs_data->next_avail_free_node_idx = 0;
+
/*
* Start calling ASIC initialization. First S/W then H/W and finally
* late init
*/
rc = hdev->asic_funcs->sw_init(hdev);
if (rc)
- goto free_usr_intr_mem;
+ goto free_common_usr_intr_mem;
/* initialize completion structure for multi CS wait */
@@ -2253,6 +2310,14 @@ int hl_device_init(struct hl_device *hdev)
* From here there is no need to expose them in case of an error.
*/
expose_interfaces_on_err = false;
+
+ rc = drm_dev_register(&hdev->drm, 0);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to register DRM device, rc %d\n", rc);
+ rc = 0;
+ goto out_disabled;
+ }
+
rc = cdev_sysfs_debugfs_add(hdev);
if (rc) {
dev_err(hdev->dev, "Failed to add char devices and sysfs/debugfs files\n");
@@ -2284,8 +2349,6 @@ int hl_device_init(struct hl_device *hdev)
"Successfully added device %s to habanalabs driver\n",
dev_name(&(hdev)->pdev->dev));
- hdev->init_done = true;
-
/* After initialization is done, we are ready to receive events from
* the F/W. We can't do it before because we will ignore events and if
* those events are fatal, we won't know about it and the device will
@@ -2293,6 +2356,8 @@ int hl_device_init(struct hl_device *hdev)
*/
hdev->asic_funcs->enable_events_from_fw(hdev);
+ hdev->init_done = true;
+
return 0;
cb_pool_fini:
@@ -2317,19 +2382,27 @@ hw_queues_destroy:
hl_hw_queues_destroy(hdev);
sw_fini:
hdev->asic_funcs->sw_fini(hdev);
+free_common_usr_intr_mem:
+ vfree(hdev->common_user_cq_interrupt.ts_free_jobs_data.free_nodes_pool);
free_usr_intr_mem:
- kfree(hdev->user_interrupt);
+ if (user_interrupt_cnt) {
+ for (i = 0 ; i < user_interrupt_cnt ; i++) {
+ if (!hdev->user_interrupt[i].ts_free_jobs_data.free_nodes_pool)
+ break;
+ vfree(hdev->user_interrupt[i].ts_free_jobs_data.free_nodes_pool);
+ }
+ kfree(hdev->user_interrupt);
+ }
early_fini:
device_early_fini(hdev);
-free_dev:
- put_device(hdev->dev_ctrl);
- put_device(hdev->dev);
out_disabled:
hdev->disabled = true;
- if (expose_interfaces_on_err)
+ if (expose_interfaces_on_err) {
+ drm_dev_register(&hdev->drm, 0);
cdev_sysfs_debugfs_add(hdev);
- dev_err(&hdev->pdev->dev,
- "Failed to initialize hl%d. Device %s is NOT usable !\n",
+ }
+
+ pr_err("Failed to initialize accel%d. Device %s is NOT usable!\n",
hdev->cdev_idx, dev_name(&hdev->pdev->dev));
return rc;
@@ -2344,12 +2417,13 @@ out_disabled:
*/
void hl_device_fini(struct hl_device *hdev)
{
+ u32 user_interrupt_cnt;
bool device_in_reset;
ktime_t timeout;
u64 reset_sec;
int i, rc;
- dev_info(hdev->dev, "Removing device\n");
+ dev_info(hdev->dev, "Removing device %s\n", dev_name(&(hdev)->pdev->dev));
hdev->device_fini_pending = 1;
flush_delayed_work(&hdev->device_reset_work.reset_work);
@@ -2425,14 +2499,14 @@ void hl_device_fini(struct hl_device *hdev)
hdev->process_kill_trial_cnt = 0;
rc = device_kill_open_processes(hdev, HL_WAIT_PROCESS_KILL_ON_DEVICE_FINI, false);
if (rc) {
- dev_crit(hdev->dev, "Failed to kill all open processes\n");
+ dev_crit(hdev->dev, "Failed to kill all open processes (%d)\n", rc);
device_disable_open_processes(hdev, false);
}
hdev->process_kill_trial_cnt = 0;
rc = device_kill_open_processes(hdev, 0, true);
if (rc) {
- dev_crit(hdev->dev, "Failed to kill all control device open processes\n");
+ dev_crit(hdev->dev, "Failed to kill all control device open processes (%d)\n", rc);
device_disable_open_processes(hdev, true);
}
@@ -2464,7 +2538,20 @@ void hl_device_fini(struct hl_device *hdev)
for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
hl_cq_fini(hdev, &hdev->completion_queue[i]);
kfree(hdev->completion_queue);
- kfree(hdev->user_interrupt);
+
+ user_interrupt_cnt = hdev->asic_prop.user_dec_intr_count +
+ hdev->asic_prop.user_interrupt_count;
+
+ if (user_interrupt_cnt) {
+ if (hdev->asic_prop.first_available_cq[0] != USHRT_MAX) {
+ for (i = 0 ; i < user_interrupt_cnt ; i++)
+ vfree(hdev->user_interrupt[i].ts_free_jobs_data.free_nodes_pool);
+ }
+
+ kfree(hdev->user_interrupt);
+ }
+
+ vfree(hdev->common_user_cq_interrupt.ts_free_jobs_data.free_nodes_pool);
hl_hw_queues_destroy(hdev);
@@ -2475,6 +2562,7 @@ void hl_device_fini(struct hl_device *hdev)
/* Hide devices and sysfs/debugfs files from user */
cdev_sysfs_debugfs_remove(hdev);
+ drm_dev_unregister(&hdev->drm);
hl_debugfs_device_fini(hdev);
@@ -2690,6 +2778,20 @@ void hl_handle_fw_err(struct hl_device *hdev, struct hl_info_fw_err_info *info)
*info->event_mask |= HL_NOTIFIER_EVENT_CRITICL_FW_ERR;
}
+void hl_capture_engine_err(struct hl_device *hdev, u16 engine_id, u16 error_count)
+{
+ struct engine_err_info *info = &hdev->captured_err_info.engine_err;
+
+ /* Capture only the first engine error */
+ if (atomic_cmpxchg(&info->event_detected, 0, 1))
+ return;
+
+ info->event.timestamp = ktime_to_ns(ktime_get());
+ info->event.engine_id = engine_id;
+ info->event.error_count = error_count;
+ info->event_info_available = true;
+}
+
void hl_enable_err_info_capture(struct hl_error_info *captured_err_info)
{
vfree(captured_err_info->page_fault_info.user_mappings);
diff --git a/drivers/accel/habanalabs/common/firmware_if.c b/drivers/accel/habanalabs/common/firmware_if.c
index acbc1a6b5c..47e8384134 100644
--- a/drivers/accel/habanalabs/common/firmware_if.c
+++ b/drivers/accel/habanalabs/common/firmware_if.c
@@ -6,7 +6,7 @@
*/
#include "habanalabs.h"
-#include "../include/common/hl_boot_if.h"
+#include <linux/habanalabs/hl_boot_if.h>
#include <linux/firmware.h>
#include <linux/crc32.h>
@@ -724,6 +724,11 @@ static bool fw_report_boot_dev0(struct hl_device *hdev, u32 err_val,
err_exists = true;
}
+ if (err_val & CPU_BOOT_ERR0_TMP_THRESH_INIT_FAIL) {
+ dev_err(hdev->dev, "Device boot error - Failed to set threshold for temperature sensor\n");
+ err_exists = true;
+ }
+
if (err_val & CPU_BOOT_ERR0_DEVICE_UNUSABLE_FAIL) {
/* Ignore this bit, don't prevent driver loading */
dev_dbg(hdev->dev, "device unusable status is set\n");
@@ -1459,6 +1464,10 @@ static void detect_cpu_boot_status(struct hl_device *hdev, u32 status)
dev_err(hdev->dev,
"Device boot progress - Stuck in preboot after security initialization\n");
break;
+ case CPU_BOOT_STATUS_FW_SHUTDOWN_PREP:
+ dev_err(hdev->dev,
+ "Device boot progress - Stuck in preparation for shutdown\n");
+ break;
default:
dev_err(hdev->dev,
"Device boot progress - Invalid or unexpected status code %d\n", status);
@@ -1469,8 +1478,9 @@ static void detect_cpu_boot_status(struct hl_device *hdev, u32 status)
int hl_fw_wait_preboot_ready(struct hl_device *hdev)
{
struct pre_fw_load_props *pre_fw_load = &hdev->fw_loader.pre_fw_load;
- u32 status;
- int rc;
+ u32 status = 0, timeout;
+ int rc, tries = 1;
+ bool preboot_still_runs;
/* Need to check two possible scenarios:
*
@@ -1480,6 +1490,8 @@ int hl_fw_wait_preboot_ready(struct hl_device *hdev)
* All other status values - for older firmwares where the uboot was
* loaded from the FLASH
*/
+ timeout = pre_fw_load->wait_for_preboot_timeout;
+retry:
rc = hl_poll_timeout(
hdev,
pre_fw_load->cpu_boot_status_reg,
@@ -1488,7 +1500,24 @@ int hl_fw_wait_preboot_ready(struct hl_device *hdev)
(status == CPU_BOOT_STATUS_READY_TO_BOOT) ||
(status == CPU_BOOT_STATUS_WAITING_FOR_BOOT_FIT),
hdev->fw_poll_interval_usec,
- pre_fw_load->wait_for_preboot_timeout);
+ timeout);
+ /*
+ * if F/W reports "security-ready" it means preboot might take longer.
+ * If the field 'wait_for_preboot_extended_timeout' is non 0 we wait again
+ * with that timeout
+ */
+ preboot_still_runs = (status == CPU_BOOT_STATUS_SECURITY_READY ||
+ status == CPU_BOOT_STATUS_IN_PREBOOT ||
+ status == CPU_BOOT_STATUS_FW_SHUTDOWN_PREP ||
+ status == CPU_BOOT_STATUS_DRAM_RDY);
+
+ if (rc && tries && preboot_still_runs) {
+ tries--;
+ if (pre_fw_load->wait_for_preboot_extended_timeout) {
+ timeout = pre_fw_load->wait_for_preboot_extended_timeout;
+ goto retry;
+ }
+ }
if (rc) {
detect_cpu_boot_status(hdev, status);
@@ -2743,7 +2772,8 @@ static int hl_fw_dynamic_init_cpu(struct hl_device *hdev,
if (!(hdev->fw_components & FW_TYPE_BOOT_CPU)) {
struct lkd_fw_binning_info *binning_info;
- rc = hl_fw_dynamic_request_descriptor(hdev, fw_loader, 0);
+ rc = hl_fw_dynamic_request_descriptor(hdev, fw_loader,
+ sizeof(struct lkd_msg_comms));
if (rc)
goto protocol_err;
@@ -2777,6 +2807,11 @@ static int hl_fw_dynamic_init_cpu(struct hl_device *hdev,
hdev->decoder_binning, hdev->rotator_binning);
}
+ if (hdev->asic_prop.support_dynamic_resereved_fw_size) {
+ hdev->asic_prop.reserved_fw_mem_size =
+ le32_to_cpu(fw_loader->dynamic_loader.comm_desc.rsvd_mem_size_mb);
+ }
+
return 0;
}
diff --git a/drivers/accel/habanalabs/common/habanalabs.h b/drivers/accel/habanalabs/common/habanalabs.h
index 2f027d5a82..d0fd77bb6a 100644
--- a/drivers/accel/habanalabs/common/habanalabs.h
+++ b/drivers/accel/habanalabs/common/habanalabs.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0
*
- * Copyright 2016-2022 HabanaLabs, Ltd.
+ * Copyright 2016-2023 HabanaLabs, Ltd.
* All Rights Reserved.
*
*/
@@ -8,7 +8,7 @@
#ifndef HABANALABSP_H_
#define HABANALABSP_H_
-#include "../include/common/cpucp_if.h"
+#include <linux/habanalabs/cpucp_if.h>
#include "../include/common/qman_if.h"
#include "../include/hw_ip/mmu/mmu_general.h"
#include <uapi/drm/habanalabs_accel.h>
@@ -29,6 +29,9 @@
#include <linux/coresight.h>
#include <linux/dma-buf.h>
+#include <drm/drm_device.h>
+#include <drm/drm_file.h>
+
#include "security.h"
#define HL_NAME "habanalabs"
@@ -82,8 +85,6 @@ struct hl_fpriv;
#define HL_PCI_ELBI_TIMEOUT_MSEC 10 /* 10ms */
-#define HL_SIM_MAX_TIMEOUT_US 100000000 /* 100s */
-
#define HL_INVALID_QUEUE UINT_MAX
#define HL_COMMON_USER_CQ_INTERRUPT_ID 0xFFF
@@ -103,6 +104,8 @@ struct hl_fpriv;
/* MMU */
#define MMU_HASH_TABLE_BITS 7 /* 1 << 7 buckets */
+#define TIMESTAMP_FREE_NODES_NUM 512
+
/**
* enum hl_mmu_page_table_location - mmu page table location
* @MMU_DR_PGT: page-table is located on device DRAM.
@@ -154,6 +157,11 @@ enum hl_mmu_page_table_location {
#define hl_asic_dma_pool_free(hdev, vaddr, dma_addr) \
hl_asic_dma_pool_free_caller(hdev, vaddr, dma_addr, __func__)
+#define hl_dma_map_sgtable(hdev, sgt, dir) \
+ hl_dma_map_sgtable_caller(hdev, sgt, dir, __func__)
+#define hl_dma_unmap_sgtable(hdev, sgt, dir) \
+ hl_dma_unmap_sgtable_caller(hdev, sgt, dir, __func__)
+
/*
* Reset Flags
*
@@ -545,8 +553,7 @@ struct hl_hints_range {
* allocated with huge pages.
* @hints_dram_reserved_va_range: dram hint addresses reserved range.
* @hints_host_reserved_va_range: host hint addresses reserved range.
- * @hints_host_hpage_reserved_va_range: host huge page hint addresses reserved
- * range.
+ * @hints_host_hpage_reserved_va_range: host huge page hint addresses reserved range.
* @sram_base_address: SRAM physical start address.
* @sram_end_address: SRAM physical end address.
* @sram_user_base_address - SRAM physical start address for user access.
@@ -585,7 +592,7 @@ struct hl_hints_range {
* @mmu_pte_size: PTE size in MMU page tables.
* @mmu_hop_table_size: MMU hop table size.
* @mmu_hop0_tables_total_size: total size of MMU hop0 tables.
- * @dram_page_size: page size for MMU DRAM allocation.
+ * @dram_page_size: The DRAM physical page size.
* @cfg_size: configuration space size on SRAM.
* @sram_size: total size of SRAM.
* @max_asid: maximum number of open contexts (ASIDs).
@@ -641,6 +648,7 @@ struct hl_hints_range {
* @glbl_err_cause_num: global err cause number.
* @hbw_flush_reg: register to read to generate HBW flush. value of 0 means HBW flush is
* not supported.
+ * @reserved_fw_mem_size: size in MB of dram memory reserved for FW.
* @collective_first_sob: first sync object available for collective use
* @collective_first_mon: first monitor available for collective use
* @sync_stream_first_sob: first sync object available for sync stream use
@@ -686,9 +694,10 @@ struct hl_hints_range {
* @configurable_stop_on_err: is stop-on-error option configurable via debugfs.
* @set_max_power_on_device_init: true if need to set max power in F/W on device init.
* @supports_user_set_page_size: true if user can set the allocation page size.
- * @dma_mask: the dma mask to be set for this device
+ * @dma_mask: the dma mask to be set for this device.
* @supports_advanced_cpucp_rc: true if new cpucp opcodes are supported.
* @supports_engine_modes: true if changing engines/engine_cores modes is supported.
+ * @support_dynamic_resereved_fw_size: true if we support dynamic reserved size for fw.
*/
struct asic_fixed_properties {
struct hw_queue_properties *hw_queues_props;
@@ -772,6 +781,7 @@ struct asic_fixed_properties {
u32 num_of_special_blocks;
u32 glbl_err_cause_num;
u32 hbw_flush_reg;
+ u32 reserved_fw_mem_size;
u16 collective_first_sob;
u16 collective_first_mon;
u16 sync_stream_first_sob;
@@ -808,6 +818,7 @@ struct asic_fixed_properties {
u8 dma_mask;
u8 supports_advanced_cpucp_rc;
u8 supports_engine_modes;
+ u8 support_dynamic_resereved_fw_size;
};
/**
@@ -1098,19 +1109,41 @@ enum hl_user_interrupt_type {
};
/**
+ * struct hl_ts_free_jobs - holds user interrupt ts free nodes related data
+ * @free_nodes_pool: pool of nodes to be used for free timestamp jobs
+ * @free_nodes_length: number of nodes in free_nodes_pool
+ * @next_avail_free_node_idx: index of the next free node in the pool
+ *
+ * the free nodes pool must be protected by the user interrupt lock
+ * to avoid race between different interrupts which are using the same
+ * ts buffer with different offsets.
+ */
+struct hl_ts_free_jobs {
+ struct timestamp_reg_free_node *free_nodes_pool;
+ u32 free_nodes_length;
+ u32 next_avail_free_node_idx;
+};
+
+/**
* struct hl_user_interrupt - holds user interrupt information
* @hdev: pointer to the device structure
+ * @ts_free_jobs_data: timestamp free jobs related data
* @type: user interrupt type
* @wait_list_head: head to the list of user threads pending on this interrupt
+ * @ts_list_head: head to the list of timestamp records
* @wait_list_lock: protects wait_list_head
+ * @ts_list_lock: protects ts_list_head
* @timestamp: last timestamp taken upon interrupt
* @interrupt_id: msix interrupt id
*/
struct hl_user_interrupt {
struct hl_device *hdev;
+ struct hl_ts_free_jobs ts_free_jobs_data;
enum hl_user_interrupt_type type;
struct list_head wait_list_head;
+ struct list_head ts_list_head;
spinlock_t wait_list_lock;
+ spinlock_t ts_list_lock;
ktime_t timestamp;
u32 interrupt_id;
};
@@ -1120,11 +1153,15 @@ struct hl_user_interrupt {
* @free_objects_node: node in the list free_obj_jobs
* @cq_cb: pointer to cq command buffer to be freed
* @buf: pointer to timestamp buffer to be freed
+ * @in_use: indicates whether the node still in use in workqueue thread.
+ * @dynamic_alloc: indicates whether the node was allocated dynamically in the interrupt handler
*/
struct timestamp_reg_free_node {
struct list_head free_objects_node;
struct hl_cb *cq_cb;
struct hl_mmap_mem_buf *buf;
+ atomic_t in_use;
+ u8 dynamic_alloc;
};
/* struct timestamp_reg_work_obj - holds the timestamp registration free objects job
@@ -1133,17 +1170,21 @@ struct timestamp_reg_free_node {
* @free_obj: workqueue object to free timestamp registration node objects
* @hdev: pointer to the device structure
* @free_obj_head: list of free jobs nodes (node type timestamp_reg_free_node)
+ * @dynamic_alloc_free_obj_head: list of free jobs nodes which were dynamically allocated in the
+ * interrupt handler.
*/
struct timestamp_reg_work_obj {
struct work_struct free_obj;
struct hl_device *hdev;
struct list_head *free_obj_head;
+ struct list_head *dynamic_alloc_free_obj_head;
};
/* struct timestamp_reg_info - holds the timestamp registration related data.
* @buf: pointer to the timestamp buffer which include both user/kernel buffers.
* relevant only when doing timestamps records registration.
* @cq_cb: pointer to CQ counter CB.
+ * @interrupt: interrupt that the node hanged on it's wait list.
* @timestamp_kernel_addr: timestamp handle address, where to set timestamp
* relevant only when doing timestamps records
* registration.
@@ -1153,17 +1194,18 @@ struct timestamp_reg_work_obj {
* allocating records dynamically.
*/
struct timestamp_reg_info {
- struct hl_mmap_mem_buf *buf;
- struct hl_cb *cq_cb;
- u64 *timestamp_kernel_addr;
- u8 in_use;
+ struct hl_mmap_mem_buf *buf;
+ struct hl_cb *cq_cb;
+ struct hl_user_interrupt *interrupt;
+ u64 *timestamp_kernel_addr;
+ bool in_use;
};
/**
* struct hl_user_pending_interrupt - holds a context to a user thread
* pending on an interrupt
* @ts_reg_info: holds the timestamps registration nodes info
- * @wait_list_node: node in the list of user threads pending on an interrupt
+ * @list_node: node in the list of user threads pending on an interrupt or timestamp
* @fence: hl fence object for interrupt completion
* @cq_target_value: CQ target value
* @cq_kernel_addr: CQ kernel address, to be used in the cq interrupt
@@ -1171,7 +1213,7 @@ struct timestamp_reg_info {
*/
struct hl_user_pending_interrupt {
struct timestamp_reg_info ts_reg_info;
- struct list_head wait_list_node;
+ struct list_head list_node;
struct hl_fence fence;
u64 cq_target_value;
u64 *cq_kernel_addr;
@@ -1220,6 +1262,7 @@ struct hl_dec {
* @ASIC_GAUDI_SEC: Gaudi secured device (HL-2000).
* @ASIC_GAUDI2: Gaudi2 device.
* @ASIC_GAUDI2B: Gaudi2B device.
+ * @ASIC_GAUDI2C: Gaudi2C device.
*/
enum hl_asic_type {
ASIC_INVALID,
@@ -1228,6 +1271,7 @@ enum hl_asic_type {
ASIC_GAUDI_SEC,
ASIC_GAUDI2,
ASIC_GAUDI2B,
+ ASIC_GAUDI2C,
};
struct hl_cs_parser;
@@ -1370,6 +1414,8 @@ struct dynamic_fw_load_mgr {
* @boot_err0_reg: boot_err0 register address
* @boot_err1_reg: boot_err1 register address
* @wait_for_preboot_timeout: timeout to poll for preboot ready
+ * @wait_for_preboot_extended_timeout: timeout to pull for preboot ready in case where we know
+ * preboot needs longer time.
*/
struct pre_fw_load_props {
u32 cpu_boot_status_reg;
@@ -1378,6 +1424,7 @@ struct pre_fw_load_props {
u32 boot_err0_reg;
u32 boot_err1_reg;
u32 wait_for_preboot_timeout;
+ u32 wait_for_preboot_extended_timeout;
};
/**
@@ -1477,11 +1524,9 @@ struct engines_data {
* @asic_dma_pool_free: free small DMA allocation from pool.
* @cpu_accessible_dma_pool_alloc: allocate CPU PQ packet from DMA pool.
* @cpu_accessible_dma_pool_free: free CPU PQ packet from DMA pool.
- * @asic_dma_unmap_single: unmap a single DMA buffer
- * @asic_dma_map_single: map a single buffer to a DMA
- * @hl_dma_unmap_sgtable: DMA unmap scatter-gather table.
+ * @dma_unmap_sgtable: DMA unmap scatter-gather table.
+ * @dma_map_sgtable: DMA map scatter-gather table.
* @cs_parser: parse Command Submission.
- * @asic_dma_map_sgtable: DMA map scatter-gather table.
* @add_end_of_cb_packets: Add packets to the end of CB, if device requires it.
* @update_eq_ci: update event queue CI.
* @context_switch: called upon ASID context switch.
@@ -1602,18 +1647,11 @@ struct hl_asic_funcs {
size_t size, dma_addr_t *dma_handle);
void (*cpu_accessible_dma_pool_free)(struct hl_device *hdev,
size_t size, void *vaddr);
- void (*asic_dma_unmap_single)(struct hl_device *hdev,
- dma_addr_t dma_addr, int len,
- enum dma_data_direction dir);
- dma_addr_t (*asic_dma_map_single)(struct hl_device *hdev,
- void *addr, int len,
+ void (*dma_unmap_sgtable)(struct hl_device *hdev, struct sg_table *sgt,
enum dma_data_direction dir);
- void (*hl_dma_unmap_sgtable)(struct hl_device *hdev,
- struct sg_table *sgt,
+ int (*dma_map_sgtable)(struct hl_device *hdev, struct sg_table *sgt,
enum dma_data_direction dir);
int (*cs_parser)(struct hl_device *hdev, struct hl_cs_parser *parser);
- int (*asic_dma_map_sgtable)(struct hl_device *hdev, struct sg_table *sgt,
- enum dma_data_direction dir);
void (*add_end_of_cb_packets)(struct hl_device *hdev,
void *kernel_address, u32 len,
u32 original_len,
@@ -1771,16 +1809,19 @@ struct hl_cs_counters_atomic {
* @phys_pg_pack: pointer to physical page pack if the dma-buf was exported
* where virtual memory is supported.
* @memhash_hnode: pointer to the memhash node. this object holds the export count.
- * @device_address: physical address of the device's memory. Relevant only
- * if phys_pg_pack is NULL (dma-buf was exported from address).
- * The total size can be taken from the dmabuf object.
+ * @offset: the offset into the buffer from which the memory is exported.
+ * Relevant only if virtual memory is supported and phys_pg_pack is being used.
+ * device_phys_addr: physical address of the device's memory. Relevant only
+ * if phys_pg_pack is NULL (dma-buf was exported from address).
+ * The total size can be taken from the dmabuf object.
*/
struct hl_dmabuf_priv {
struct dma_buf *dmabuf;
struct hl_ctx *ctx;
struct hl_vm_phys_pg_pack *phys_pg_pack;
struct hl_vm_hash_node *memhash_hnode;
- uint64_t device_address;
+ u64 offset;
+ u64 device_phys_addr;
};
#define HL_CS_OUTCOME_HISTORY_LEN 256
@@ -1835,6 +1876,7 @@ struct hl_cs_outcome_store {
* @va_range: holds available virtual addresses for host and dram mappings.
* @mem_hash_lock: protects the mem_hash.
* @hw_block_list_lock: protects the HW block memory list.
+ * @ts_reg_lock: timestamp registration ioctls lock.
* @debugfs_list: node in debugfs list of contexts.
* @hw_block_mem_list: list of HW block virtual mapped addresses.
* @cs_counters: context command submission counters.
@@ -1871,6 +1913,7 @@ struct hl_ctx {
struct hl_va_range *va_range[HL_VA_RANGE_TYPE_MAX];
struct mutex mem_hash_lock;
struct mutex hw_block_list_lock;
+ struct mutex ts_reg_lock;
struct list_head debugfs_list;
struct list_head hw_block_mem_list;
struct hl_cs_counters_atomic cs_counters;
@@ -1917,17 +1960,17 @@ struct hl_ctx_mgr {
* @dma_mapped: true if the SG was mapped to DMA addresses, false otherwise.
*/
struct hl_userptr {
- enum vm_type vm_type; /* must be first */
- struct list_head job_node;
- struct page **pages;
- unsigned int npages;
- struct sg_table *sgt;
- enum dma_data_direction dir;
- struct list_head debugfs_list;
- pid_t pid;
- u64 addr;
- u64 size;
- u8 dma_mapped;
+ enum vm_type vm_type; /* must be first */
+ struct list_head job_node;
+ struct page **pages;
+ unsigned int npages;
+ struct sg_table *sgt;
+ enum dma_data_direction dir;
+ struct list_head debugfs_list;
+ pid_t pid;
+ u64 addr;
+ u64 size;
+ u8 dma_mapped;
};
/**
@@ -2148,7 +2191,6 @@ struct hl_vm_hw_block_list_node {
* @pages: the physical page array.
* @npages: num physical pages in the pack.
* @total_size: total size of all the pages in this list.
- * @exported_size: buffer exported size.
* @node: used to attach to deletion list that is used when all the allocations are cleared
* at the teardown of the context.
* @mapping_cnt: number of shared mappings.
@@ -2165,7 +2207,6 @@ struct hl_vm_phys_pg_pack {
u64 *pages;
u64 npages;
u64 total_size;
- u64 exported_size;
struct list_head node;
atomic_t mapping_cnt;
u32 asid;
@@ -2250,7 +2291,7 @@ struct hl_notifier_event {
/**
* struct hl_fpriv - process information stored in FD private data.
* @hdev: habanalabs device structure.
- * @filp: pointer to the given file structure.
+ * @file_priv: pointer to the DRM file private data structure.
* @taskpid: current process ID.
* @ctx: current executing context. TODO: remove for multiple ctx per process
* @ctx_mgr: context manager to handle multiple context for this FD.
@@ -2265,7 +2306,7 @@ struct hl_notifier_event {
*/
struct hl_fpriv {
struct hl_device *hdev;
- struct file *filp;
+ struct drm_file *file_priv;
struct pid *taskpid;
struct hl_ctx *ctx;
struct hl_ctx_mgr ctx_mgr;
@@ -2706,6 +2747,8 @@ void hl_wreg(struct hl_device *hdev, u32 reg, u32 val);
usr_intr.type = intr_type; \
INIT_LIST_HEAD(&usr_intr.wait_list_head); \
spin_lock_init(&usr_intr.wait_list_lock); \
+ INIT_LIST_HEAD(&usr_intr.ts_list_head); \
+ spin_lock_init(&usr_intr.ts_list_lock); \
})
struct hwmon_chip_info;
@@ -3055,6 +3098,20 @@ struct fw_err_info {
};
/**
+ * struct engine_err_info - engine error information.
+ * @event: holds information on the event.
+ * @event_detected: if set as 1, then an engine event was discovered for the
+ * first time after the driver has finished booting-up.
+ * @event_info_available: indicates that an engine event info is now available.
+ */
+struct engine_err_info {
+ struct hl_info_engine_err_event event;
+ atomic_t event_detected;
+ bool event_info_available;
+};
+
+
+/**
* struct hl_error_info - holds information collected during an error.
* @cs_timeout: CS timeout error information.
* @razwi_info: RAZWI information.
@@ -3062,6 +3119,7 @@ struct fw_err_info {
* @page_fault_info: page fault information.
* @hw_err: (fatal) hardware error information.
* @fw_err: firmware error information.
+ * @engine_err: engine error information.
*/
struct hl_error_info {
struct cs_timeout_info cs_timeout;
@@ -3070,6 +3128,7 @@ struct hl_error_info {
struct page_fault_info page_fault_info;
struct hw_err_info hw_err;
struct fw_err_info fw_err;
+ struct engine_err_info engine_err;
};
/**
@@ -3117,8 +3176,7 @@ struct hl_reset_info {
* (required only for PCI address match mode)
* @pcie_bar: array of available PCIe bars virtual addresses.
* @rmmio: configuration area address on SRAM.
- * @hclass: pointer to the habanalabs class.
- * @cdev: related char device.
+ * @drm: related DRM device.
* @cdev_ctrl: char device for control operations only (INFO IOCTL)
* @dev: related kernel basic device structure.
* @dev_ctrl: related kernel device structure for the control device
@@ -3245,8 +3303,7 @@ struct hl_reset_info {
* @rotator_binning: contains mask of rotators engines that is received from the f/w
* which indicates which rotator engines are binned-out(Gaudi3 and above).
* @id: device minor.
- * @id_control: minor of the control device.
- * @cdev_idx: char device index. Used for setting its name.
+ * @cdev_idx: char device index.
* @cpu_pci_msb_addr: 50-bit extension bits for the device CPU's 40-bit
* addresses.
* @is_in_dram_scrub: true if dram scrub operation is on going.
@@ -3289,6 +3346,7 @@ struct hl_reset_info {
* device.
* @supports_ctx_switch: true if a ctx switch is required upon first submission.
* @support_preboot_binning: true if we support read binning info from preboot.
+ * @eq_heartbeat_received: indication that eq heartbeat event has received from FW.
* @nic_ports_mask: Controls which NIC ports are enabled. Used only for testing.
* @fw_components: Controls which f/w components to load to the device. There are multiple f/w
* stages and sometimes we want to stop at a certain stage. Used only for testing.
@@ -3308,8 +3366,7 @@ struct hl_device {
u64 pcie_bar_phys[HL_PCI_NUM_BARS];
void __iomem *pcie_bar[HL_PCI_NUM_BARS];
void __iomem *rmmio;
- struct class *hclass;
- struct cdev cdev;
+ struct drm_device drm;
struct cdev cdev_ctrl;
struct device *dev;
struct device *dev_ctrl;
@@ -3418,7 +3475,6 @@ struct hl_device {
u32 device_release_watchdog_timeout_sec;
u32 rotator_binning;
u16 id;
- u16 id_control;
u16 cdev_idx;
u16 cpu_pci_msb_addr;
u8 is_in_dram_scrub;
@@ -3451,6 +3507,7 @@ struct hl_device {
u8 reset_upon_device_release;
u8 supports_ctx_switch;
u8 support_preboot_binning;
+ u8 eq_heartbeat_received;
/* Parameters for bring-up to be upstreamed */
u64 nic_ports_mask;
@@ -3582,6 +3639,11 @@ static inline bool hl_mem_area_inside_range(u64 address, u64 size,
return false;
}
+static inline struct hl_device *to_hl_device(struct drm_device *ddev)
+{
+ return container_of(ddev, struct hl_device, drm);
+}
+
/**
* hl_mem_area_crosses_range() - Checks whether address+size crossing a range.
* @address: The start address of the area we want to validate.
@@ -3611,8 +3673,13 @@ void *hl_asic_dma_pool_zalloc_caller(struct hl_device *hdev, size_t size, gfp_t
dma_addr_t *dma_handle, const char *caller);
void hl_asic_dma_pool_free_caller(struct hl_device *hdev, void *vaddr, dma_addr_t dma_addr,
const char *caller);
-int hl_dma_map_sgtable(struct hl_device *hdev, struct sg_table *sgt, enum dma_data_direction dir);
-void hl_dma_unmap_sgtable(struct hl_device *hdev, struct sg_table *sgt,
+int hl_dma_map_sgtable_caller(struct hl_device *hdev, struct sg_table *sgt,
+ enum dma_data_direction dir, const char *caller);
+void hl_dma_unmap_sgtable_caller(struct hl_device *hdev, struct sg_table *sgt,
+ enum dma_data_direction dir, const char *caller);
+int hl_asic_dma_map_sgtable(struct hl_device *hdev, struct sg_table *sgt,
+ enum dma_data_direction dir);
+void hl_asic_dma_unmap_sgtable(struct hl_device *hdev, struct sg_table *sgt,
enum dma_data_direction dir);
int hl_access_sram_dram_region(struct hl_device *hdev, u64 addr, u64 *val,
enum debugfs_access_type acc_type, enum pci_region region_type, bool set_dram_bar);
@@ -3620,7 +3687,12 @@ int hl_access_cfg_region(struct hl_device *hdev, u64 addr, u64 *val,
enum debugfs_access_type acc_type);
int hl_access_dev_mem(struct hl_device *hdev, enum pci_region region_type,
u64 addr, u64 *val, enum debugfs_access_type acc_type);
-int hl_device_open(struct inode *inode, struct file *filp);
+
+int hl_mmap(struct file *filp, struct vm_area_struct *vma);
+
+int hl_device_open(struct drm_device *drm, struct drm_file *file_priv);
+void hl_device_release(struct drm_device *ddev, struct drm_file *file_priv);
+
int hl_device_open_ctrl(struct inode *inode, struct file *filp);
bool hl_device_operational(struct hl_device *hdev,
enum hl_device_status *status);
@@ -3652,8 +3724,9 @@ void hl_eq_reset(struct hl_device *hdev, struct hl_eq *q);
irqreturn_t hl_irq_handler_cq(int irq, void *arg);
irqreturn_t hl_irq_handler_eq(int irq, void *arg);
irqreturn_t hl_irq_handler_dec_abnrm(int irq, void *arg);
-irqreturn_t hl_irq_handler_user_interrupt(int irq, void *arg);
+irqreturn_t hl_irq_user_interrupt_handler(int irq, void *arg);
irqreturn_t hl_irq_user_interrupt_thread_handler(int irq, void *arg);
+irqreturn_t hl_irq_eq_error_interrupt_thread_handler(int irq, void *arg);
u32 hl_cq_inc_ptr(u32 ptr);
int hl_asid_init(struct hl_device *hdev);
@@ -3944,16 +4017,14 @@ void hl_handle_page_fault(struct hl_device *hdev, u64 addr, u16 eng_id, bool is_
u64 *event_mask);
void hl_handle_critical_hw_err(struct hl_device *hdev, u16 event_id, u64 *event_mask);
void hl_handle_fw_err(struct hl_device *hdev, struct hl_info_fw_err_info *info);
+void hl_capture_engine_err(struct hl_device *hdev, u16 engine_id, u16 error_count);
void hl_enable_err_info_capture(struct hl_error_info *captured_err_info);
#ifdef CONFIG_DEBUG_FS
-void hl_debugfs_init(void);
-void hl_debugfs_fini(void);
int hl_debugfs_device_init(struct hl_device *hdev);
void hl_debugfs_device_fini(struct hl_device *hdev);
void hl_debugfs_add_device(struct hl_device *hdev);
-void hl_debugfs_remove_device(struct hl_device *hdev);
void hl_debugfs_add_file(struct hl_fpriv *hpriv);
void hl_debugfs_remove_file(struct hl_fpriv *hpriv);
void hl_debugfs_add_cb(struct hl_cb *cb);
@@ -3972,14 +4043,6 @@ void hl_debugfs_set_state_dump(struct hl_device *hdev, char *data,
#else
-static inline void __init hl_debugfs_init(void)
-{
-}
-
-static inline void hl_debugfs_fini(void)
-{
-}
-
static inline int hl_debugfs_device_init(struct hl_device *hdev)
{
return 0;
@@ -3993,10 +4056,6 @@ static inline void hl_debugfs_add_device(struct hl_device *hdev)
{
}
-static inline void hl_debugfs_remove_device(struct hl_device *hdev)
-{
-}
-
static inline void hl_debugfs_add_file(struct hl_fpriv *hpriv)
{
}
@@ -4108,11 +4167,12 @@ void hl_ack_pb_single_dcore(struct hl_device *hdev, u32 dcore_offset,
const u32 pb_blocks[], u32 blocks_array_size);
/* IOCTLs */
-long hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
long hl_ioctl_control(struct file *filep, unsigned int cmd, unsigned long arg);
-int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data);
-int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data);
-int hl_wait_ioctl(struct hl_fpriv *hpriv, void *data);
-int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data);
+int hl_info_ioctl(struct drm_device *ddev, void *data, struct drm_file *file_priv);
+int hl_cb_ioctl(struct drm_device *ddev, void *data, struct drm_file *file_priv);
+int hl_cs_ioctl(struct drm_device *ddev, void *data, struct drm_file *file_priv);
+int hl_wait_ioctl(struct drm_device *ddev, void *data, struct drm_file *file_priv);
+int hl_mem_ioctl(struct drm_device *ddev, void *data, struct drm_file *file_priv);
+int hl_debug_ioctl(struct drm_device *ddev, void *data, struct drm_file *file_priv);
#endif /* HABANALABSP_H_ */
diff --git a/drivers/accel/habanalabs/common/habanalabs_drv.c b/drivers/accel/habanalabs/common/habanalabs_drv.c
index 7263e84c1a..51fb04bbe3 100644
--- a/drivers/accel/habanalabs/common/habanalabs_drv.c
+++ b/drivers/accel/habanalabs/common/habanalabs_drv.c
@@ -14,6 +14,11 @@
#include <linux/pci.h>
#include <linux/module.h>
#include <linux/vmalloc.h>
+#include <linux/version.h>
+
+#include <drm/drm_accel.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_ioctl.h>
#define CREATE_TRACE_POINTS
#include <trace/events/habanalabs.h>
@@ -27,7 +32,6 @@ MODULE_DESCRIPTION(HL_DRIVER_DESC);
MODULE_LICENSE("GPL v2");
static int hl_major;
-static struct class *hl_class;
static DEFINE_IDR(hl_devs_idr);
static DEFINE_MUTEX(hl_devs_idr_lock);
@@ -70,6 +74,42 @@ static const struct pci_device_id ids[] = {
};
MODULE_DEVICE_TABLE(pci, ids);
+static const struct drm_ioctl_desc hl_drm_ioctls[] = {
+ DRM_IOCTL_DEF_DRV(HL_INFO, hl_info_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(HL_CB, hl_cb_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(HL_CS, hl_cs_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(HL_WAIT_CS, hl_wait_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(HL_MEMORY, hl_mem_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(HL_DEBUG, hl_debug_ioctl, 0),
+};
+
+static const struct file_operations hl_fops = {
+ .owner = THIS_MODULE,
+ .open = accel_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .compat_ioctl = drm_compat_ioctl,
+ .llseek = noop_llseek,
+ .mmap = hl_mmap
+};
+
+static const struct drm_driver hl_driver = {
+ .driver_features = DRIVER_COMPUTE_ACCEL,
+
+ .name = HL_NAME,
+ .desc = HL_DRIVER_DESC,
+ .major = LINUX_VERSION_MAJOR,
+ .minor = LINUX_VERSION_PATCHLEVEL,
+ .patchlevel = LINUX_VERSION_SUBLEVEL,
+ .date = "20190505",
+
+ .fops = &hl_fops,
+ .open = hl_device_open,
+ .postclose = hl_device_release,
+ .ioctls = hl_drm_ioctls,
+ .num_ioctls = ARRAY_SIZE(hl_drm_ioctls)
+};
+
/*
* get_asic_type - translate device id to asic type
*
@@ -101,6 +141,9 @@ static enum hl_asic_type get_asic_type(struct hl_device *hdev)
case REV_ID_B:
asic_type = ASIC_GAUDI2B;
break;
+ case REV_ID_C:
+ asic_type = ASIC_GAUDI2C;
+ break;
default:
break;
}
@@ -123,43 +166,28 @@ static bool is_asic_secured(enum hl_asic_type asic_type)
}
/*
- * hl_device_open - open function for habanalabs device
- *
- * @inode: pointer to inode structure
- * @filp: pointer to file structure
+ * hl_device_open() - open function for habanalabs device.
+ * @ddev: pointer to DRM device structure.
+ * @file: pointer to DRM file private data structure.
*
* Called when process opens an habanalabs device.
*/
-int hl_device_open(struct inode *inode, struct file *filp)
+int hl_device_open(struct drm_device *ddev, struct drm_file *file_priv)
{
+ struct hl_device *hdev = to_hl_device(ddev);
enum hl_device_status status;
- struct hl_device *hdev;
struct hl_fpriv *hpriv;
int rc;
- mutex_lock(&hl_devs_idr_lock);
- hdev = idr_find(&hl_devs_idr, iminor(inode));
- mutex_unlock(&hl_devs_idr_lock);
-
- if (!hdev) {
- pr_err("Couldn't find device %d:%d\n",
- imajor(inode), iminor(inode));
- return -ENXIO;
- }
-
hpriv = kzalloc(sizeof(*hpriv), GFP_KERNEL);
if (!hpriv)
return -ENOMEM;
hpriv->hdev = hdev;
- filp->private_data = hpriv;
- hpriv->filp = filp;
-
mutex_init(&hpriv->notifier_event.lock);
mutex_init(&hpriv->restore_phase_mutex);
mutex_init(&hpriv->ctx_lock);
kref_init(&hpriv->refcount);
- nonseekable_open(inode, filp);
hl_ctx_mgr_init(&hpriv->ctx_mgr);
hl_mem_mgr_init(hpriv->hdev->dev, &hpriv->mem_mgr);
@@ -225,6 +253,9 @@ int hl_device_open(struct inode *inode, struct file *filp)
hdev->last_successful_open_jif = jiffies;
hdev->last_successful_open_ktime = ktime_get();
+ file_priv->driver_priv = hpriv;
+ hpriv->file_priv = file_priv;
+
return 0;
out_err:
@@ -232,7 +263,6 @@ out_err:
hl_mem_mgr_fini(&hpriv->mem_mgr);
hl_mem_mgr_idr_destroy(&hpriv->mem_mgr);
hl_ctx_mgr_fini(hpriv->hdev, &hpriv->ctx_mgr);
- filp->private_data = NULL;
mutex_destroy(&hpriv->ctx_lock);
mutex_destroy(&hpriv->restore_phase_mutex);
mutex_destroy(&hpriv->notifier_event.lock);
@@ -268,9 +298,7 @@ int hl_device_open_ctrl(struct inode *inode, struct file *filp)
*/
hpriv->hdev = hdev;
filp->private_data = hpriv;
- hpriv->filp = filp;
- mutex_init(&hpriv->notifier_event.lock);
nonseekable_open(inode, filp);
hpriv->taskpid = get_task_pid(current, PIDTYPE_PID);
@@ -317,7 +345,6 @@ static void copy_kernel_module_params_to_device(struct hl_device *hdev)
hdev->asic_prop.fw_security_enabled = is_asic_secured(hdev->asic_type);
hdev->major = hl_major;
- hdev->hclass = hl_class;
hdev->memory_scrub = memory_scrub;
hdev->reset_on_lockup = reset_on_lockup;
hdev->boot_error_status_mask = boot_error_status_mask;
@@ -383,6 +410,31 @@ static int fixup_device_params(struct hl_device *hdev)
return 0;
}
+static int allocate_device_id(struct hl_device *hdev)
+{
+ int id;
+
+ mutex_lock(&hl_devs_idr_lock);
+ id = idr_alloc(&hl_devs_idr, hdev, 0, HL_MAX_MINORS, GFP_KERNEL);
+ mutex_unlock(&hl_devs_idr_lock);
+
+ if (id < 0) {
+ if (id == -ENOSPC)
+ pr_err("too many devices in the system\n");
+ return -EBUSY;
+ }
+
+ hdev->id = id;
+
+ /*
+ * Firstly initialized with the internal device ID.
+ * Will be updated later after the DRM device registration to hold the minor ID.
+ */
+ hdev->cdev_idx = hdev->id;
+
+ return 0;
+}
+
/**
* create_hdev - create habanalabs device instance
*
@@ -395,27 +447,29 @@ static int fixup_device_params(struct hl_device *hdev)
*/
static int create_hdev(struct hl_device **dev, struct pci_dev *pdev)
{
- int main_id, ctrl_id = 0, rc = 0;
struct hl_device *hdev;
+ int rc;
*dev = NULL;
- hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
- if (!hdev)
- return -ENOMEM;
+ hdev = devm_drm_dev_alloc(&pdev->dev, &hl_driver, struct hl_device, drm);
+ if (IS_ERR(hdev))
+ return PTR_ERR(hdev);
+
+ hdev->dev = hdev->drm.dev;
/* Will be NULL in case of simulator device */
hdev->pdev = pdev;
/* Assign status description string */
- strncpy(hdev->status[HL_DEVICE_STATUS_OPERATIONAL], "operational", HL_STR_MAX);
- strncpy(hdev->status[HL_DEVICE_STATUS_IN_RESET], "in reset", HL_STR_MAX);
- strncpy(hdev->status[HL_DEVICE_STATUS_MALFUNCTION], "disabled", HL_STR_MAX);
- strncpy(hdev->status[HL_DEVICE_STATUS_NEEDS_RESET], "needs reset", HL_STR_MAX);
- strncpy(hdev->status[HL_DEVICE_STATUS_IN_DEVICE_CREATION],
- "in device creation", HL_STR_MAX);
- strncpy(hdev->status[HL_DEVICE_STATUS_IN_RESET_AFTER_DEVICE_RELEASE],
- "in reset after device release", HL_STR_MAX);
+ strscpy(hdev->status[HL_DEVICE_STATUS_OPERATIONAL], "operational", HL_STR_MAX);
+ strscpy(hdev->status[HL_DEVICE_STATUS_IN_RESET], "in reset", HL_STR_MAX);
+ strscpy(hdev->status[HL_DEVICE_STATUS_MALFUNCTION], "disabled", HL_STR_MAX);
+ strscpy(hdev->status[HL_DEVICE_STATUS_NEEDS_RESET], "needs reset", HL_STR_MAX);
+ strscpy(hdev->status[HL_DEVICE_STATUS_IN_DEVICE_CREATION],
+ "in device creation", HL_STR_MAX);
+ strscpy(hdev->status[HL_DEVICE_STATUS_IN_RESET_AFTER_DEVICE_RELEASE],
+ "in reset after device release", HL_STR_MAX);
/* First, we must find out which ASIC are we handling. This is needed
@@ -425,7 +479,7 @@ static int create_hdev(struct hl_device **dev, struct pci_dev *pdev)
if (hdev->asic_type == ASIC_INVALID) {
dev_err(&pdev->dev, "Unsupported ASIC\n");
rc = -ENODEV;
- goto free_hdev;
+ goto out_err;
}
copy_kernel_module_params_to_device(hdev);
@@ -434,42 +488,15 @@ static int create_hdev(struct hl_device **dev, struct pci_dev *pdev)
fixup_device_params(hdev);
- mutex_lock(&hl_devs_idr_lock);
-
- /* Always save 2 numbers, 1 for main device and 1 for control.
- * They must be consecutive
- */
- main_id = idr_alloc(&hl_devs_idr, hdev, 0, HL_MAX_MINORS, GFP_KERNEL);
-
- if (main_id >= 0)
- ctrl_id = idr_alloc(&hl_devs_idr, hdev, main_id + 1,
- main_id + 2, GFP_KERNEL);
-
- mutex_unlock(&hl_devs_idr_lock);
-
- if ((main_id < 0) || (ctrl_id < 0)) {
- if ((main_id == -ENOSPC) || (ctrl_id == -ENOSPC))
- pr_err("too many devices in the system\n");
-
- if (main_id >= 0) {
- mutex_lock(&hl_devs_idr_lock);
- idr_remove(&hl_devs_idr, main_id);
- mutex_unlock(&hl_devs_idr_lock);
- }
-
- rc = -EBUSY;
- goto free_hdev;
- }
-
- hdev->id = main_id;
- hdev->id_control = ctrl_id;
+ rc = allocate_device_id(hdev);
+ if (rc)
+ goto out_err;
*dev = hdev;
return 0;
-free_hdev:
- kfree(hdev);
+out_err:
return rc;
}
@@ -484,10 +511,8 @@ static void destroy_hdev(struct hl_device *hdev)
/* Remove device from the device list */
mutex_lock(&hl_devs_idr_lock);
idr_remove(&hl_devs_idr, hdev->id);
- idr_remove(&hl_devs_idr, hdev->id_control);
mutex_unlock(&hl_devs_idr_lock);
- kfree(hdev);
}
static int hl_pmops_suspend(struct device *dev)
@@ -691,28 +716,16 @@ static int __init hl_init(void)
hl_major = MAJOR(dev);
- hl_class = class_create(HL_NAME);
- if (IS_ERR(hl_class)) {
- pr_err("failed to allocate class\n");
- rc = PTR_ERR(hl_class);
- goto remove_major;
- }
-
- hl_debugfs_init();
-
rc = pci_register_driver(&hl_pci_driver);
if (rc) {
pr_err("failed to register pci device\n");
- goto remove_debugfs;
+ goto remove_major;
}
pr_debug("driver loaded\n");
return 0;
-remove_debugfs:
- hl_debugfs_fini();
- class_destroy(hl_class);
remove_major:
unregister_chrdev_region(MKDEV(hl_major, 0), HL_MAX_MINORS);
return rc;
@@ -725,14 +738,6 @@ static void __exit hl_exit(void)
{
pci_unregister_driver(&hl_pci_driver);
- /*
- * Removing debugfs must be after all devices or simulator devices
- * have been removed because otherwise we get a bug in the
- * debugfs module for referencing NULL objects
- */
- hl_debugfs_fini();
-
- class_destroy(hl_class);
unregister_chrdev_region(MKDEV(hl_major, 0), HL_MAX_MINORS);
idr_destroy(&hl_devs_idr);
diff --git a/drivers/accel/habanalabs/common/habanalabs_ioctl.c b/drivers/accel/habanalabs/common/habanalabs_ioctl.c
index a7f6c54c12..a7cd625d82 100644
--- a/drivers/accel/habanalabs/common/habanalabs_ioctl.c
+++ b/drivers/accel/habanalabs/common/habanalabs_ioctl.c
@@ -17,6 +17,8 @@
#include <linux/uaccess.h>
#include <linux/vmalloc.h>
+#include <asm/msr.h>
+
static u32 hl_debug_struct_size[HL_DEBUG_OP_TIMESTAMP + 1] = {
[HL_DEBUG_OP_ETR] = sizeof(struct hl_debug_params_etr),
[HL_DEBUG_OP_ETF] = sizeof(struct hl_debug_params_etf),
@@ -320,6 +322,7 @@ static int time_sync_info(struct hl_device *hdev, struct hl_info_args *args)
time_sync.device_time = hdev->asic_funcs->get_device_time(hdev);
time_sync.host_time = ktime_get_raw_ns();
+ time_sync.tsc_time = rdtsc();
return copy_to_user(out, &time_sync,
min((size_t) max_size, sizeof(time_sync))) ? -EFAULT : 0;
@@ -875,6 +878,28 @@ static int fw_err_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
return rc ? -EFAULT : 0;
}
+static int engine_err_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
+{
+ void __user *user_buf = (void __user *) (uintptr_t) args->return_pointer;
+ struct hl_device *hdev = hpriv->hdev;
+ u32 user_buf_size = args->return_size;
+ struct engine_err_info *info;
+ int rc;
+
+ if (!user_buf)
+ return -EINVAL;
+
+ info = &hdev->captured_err_info.engine_err;
+ if (!info->event_info_available)
+ return 0;
+
+ if (user_buf_size < sizeof(struct hl_info_engine_err_event))
+ return -ENOMEM;
+
+ rc = copy_to_user(user_buf, &info->event, sizeof(struct hl_info_engine_err_event));
+ return rc ? -EFAULT : 0;
+}
+
static int send_fw_generic_request(struct hl_device *hdev, struct hl_info_args *info_args)
{
void __user *buff = (void __user *) (uintptr_t) info_args->return_pointer;
@@ -1001,6 +1026,9 @@ static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
case HL_INFO_FW_ERR_EVENT:
return fw_err_info(hpriv, args);
+ case HL_INFO_USER_ENGINE_ERR_EVENT:
+ return engine_err_info(hpriv, args);
+
case HL_INFO_DRAM_USAGE:
return dram_usage_info(hpriv, args);
default:
@@ -1070,20 +1098,34 @@ static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
return rc;
}
-static int hl_info_ioctl(struct hl_fpriv *hpriv, void *data)
+int hl_info_ioctl(struct drm_device *ddev, void *data, struct drm_file *file_priv)
{
+ struct hl_fpriv *hpriv = file_priv->driver_priv;
+
return _hl_info_ioctl(hpriv, data, hpriv->hdev->dev);
}
static int hl_info_ioctl_control(struct hl_fpriv *hpriv, void *data)
{
+ struct hl_info_args *args = data;
+
+ switch (args->op) {
+ case HL_INFO_GET_EVENTS:
+ case HL_INFO_UNREGISTER_EVENTFD:
+ case HL_INFO_REGISTER_EVENTFD:
+ return -EOPNOTSUPP;
+ default:
+ break;
+ }
+
return _hl_info_ioctl(hpriv, data, hpriv->hdev->dev_ctrl);
}
-static int hl_debug_ioctl(struct hl_fpriv *hpriv, void *data)
+int hl_debug_ioctl(struct drm_device *ddev, void *data, struct drm_file *file_priv)
{
- struct hl_debug_args *args = data;
+ struct hl_fpriv *hpriv = file_priv->driver_priv;
struct hl_device *hdev = hpriv->hdev;
+ struct hl_debug_args *args = data;
enum hl_device_status status;
int rc = 0;
@@ -1126,25 +1168,15 @@ static int hl_debug_ioctl(struct hl_fpriv *hpriv, void *data)
}
#define HL_IOCTL_DEF(ioctl, _func) \
- [_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func}
-
-static const struct hl_ioctl_desc hl_ioctls[] = {
- HL_IOCTL_DEF(HL_IOCTL_INFO, hl_info_ioctl),
- HL_IOCTL_DEF(HL_IOCTL_CB, hl_cb_ioctl),
- HL_IOCTL_DEF(HL_IOCTL_CS, hl_cs_ioctl),
- HL_IOCTL_DEF(HL_IOCTL_WAIT_CS, hl_wait_ioctl),
- HL_IOCTL_DEF(HL_IOCTL_MEMORY, hl_mem_ioctl),
- HL_IOCTL_DEF(HL_IOCTL_DEBUG, hl_debug_ioctl)
-};
+ [_IOC_NR(ioctl) - HL_COMMAND_START] = {.cmd = ioctl, .func = _func}
static const struct hl_ioctl_desc hl_ioctls_control[] = {
- HL_IOCTL_DEF(HL_IOCTL_INFO, hl_info_ioctl_control)
+ HL_IOCTL_DEF(DRM_IOCTL_HL_INFO, hl_info_ioctl_control)
};
-static long _hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg,
- const struct hl_ioctl_desc *ioctl, struct device *dev)
+static long _hl_ioctl(struct hl_fpriv *hpriv, unsigned int cmd, unsigned long arg,
+ const struct hl_ioctl_desc *ioctl, struct device *dev)
{
- struct hl_fpriv *hpriv = filep->private_data;
unsigned int nr = _IOC_NR(cmd);
char stack_kdata[128] = {0};
char *kdata = NULL;
@@ -1194,9 +1226,13 @@ static long _hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg,
retcode = -EFAULT;
out_err:
- if (retcode)
- dev_dbg_ratelimited(dev, "error in ioctl: pid=%d, cmd=0x%02x, nr=0x%02x\n",
- task_pid_nr(current), cmd, nr);
+ if (retcode) {
+ char task_comm[TASK_COMM_LEN];
+
+ dev_dbg_ratelimited(dev,
+ "error in ioctl: pid=%d, comm=\"%s\", cmd=%#010x, nr=%#04x\n",
+ task_pid_nr(current), get_task_comm(task_comm, current), cmd, nr);
+ }
if (kdata != stack_kdata)
kfree(kdata);
@@ -1204,29 +1240,6 @@ out_err:
return retcode;
}
-long hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
-{
- struct hl_fpriv *hpriv = filep->private_data;
- struct hl_device *hdev = hpriv->hdev;
- const struct hl_ioctl_desc *ioctl = NULL;
- unsigned int nr = _IOC_NR(cmd);
-
- if (!hdev) {
- pr_err_ratelimited("Sending ioctl after device was removed! Please close FD\n");
- return -ENODEV;
- }
-
- if ((nr >= HL_COMMAND_START) && (nr < HL_COMMAND_END)) {
- ioctl = &hl_ioctls[nr];
- } else {
- dev_dbg_ratelimited(hdev->dev, "invalid ioctl: pid=%d, nr=0x%02x\n",
- task_pid_nr(current), nr);
- return -ENOTTY;
- }
-
- return _hl_ioctl(filep, cmd, arg, ioctl, hdev->dev);
-}
-
long hl_ioctl_control(struct file *filep, unsigned int cmd, unsigned long arg)
{
struct hl_fpriv *hpriv = filep->private_data;
@@ -1239,13 +1252,16 @@ long hl_ioctl_control(struct file *filep, unsigned int cmd, unsigned long arg)
return -ENODEV;
}
- if (nr == _IOC_NR(HL_IOCTL_INFO)) {
- ioctl = &hl_ioctls_control[nr];
+ if (nr == _IOC_NR(DRM_IOCTL_HL_INFO)) {
+ ioctl = &hl_ioctls_control[nr - HL_COMMAND_START];
} else {
- dev_dbg_ratelimited(hdev->dev_ctrl, "invalid ioctl: pid=%d, nr=0x%02x\n",
- task_pid_nr(current), nr);
+ char task_comm[TASK_COMM_LEN];
+
+ dev_dbg_ratelimited(hdev->dev_ctrl,
+ "invalid ioctl: pid=%d, comm=\"%s\", cmd=%#010x, nr=%#04x\n",
+ task_pid_nr(current), get_task_comm(task_comm, current), cmd, nr);
return -ENOTTY;
}
- return _hl_ioctl(filep, cmd, arg, ioctl, hdev->dev_ctrl);
+ return _hl_ioctl(hpriv, cmd, arg, ioctl, hdev->dev_ctrl);
}
diff --git a/drivers/accel/habanalabs/common/irq.c b/drivers/accel/habanalabs/common/irq.c
index b1010d206c..978b7f4d5e 100644
--- a/drivers/accel/habanalabs/common/irq.c
+++ b/drivers/accel/habanalabs/common/irq.c
@@ -204,8 +204,10 @@ static void hl_ts_free_objects(struct work_struct *work)
{
struct timestamp_reg_work_obj *job =
container_of(work, struct timestamp_reg_work_obj, free_obj);
+ struct list_head *dynamic_alloc_free_list_head = job->dynamic_alloc_free_obj_head;
struct timestamp_reg_free_node *free_obj, *temp_free_obj;
struct list_head *free_list_head = job->free_obj_head;
+
struct hl_device *hdev = job->hdev;
list_for_each_entry_safe(free_obj, temp_free_obj, free_list_head, free_objects_node) {
@@ -215,10 +217,28 @@ static void hl_ts_free_objects(struct work_struct *work)
hl_mmap_mem_buf_put(free_obj->buf);
hl_cb_put(free_obj->cq_cb);
- kfree(free_obj);
+ atomic_set(&free_obj->in_use, 0);
}
kfree(free_list_head);
+
+ if (dynamic_alloc_free_list_head) {
+ list_for_each_entry_safe(free_obj, temp_free_obj, dynamic_alloc_free_list_head,
+ free_objects_node) {
+ dev_dbg(hdev->dev,
+ "Dynamic_Alloc list: About to put refcount to buf (%p) cq_cb(%p)\n",
+ free_obj->buf,
+ free_obj->cq_cb);
+
+ hl_mmap_mem_buf_put(free_obj->buf);
+ hl_cb_put(free_obj->cq_cb);
+ list_del(&free_obj->free_objects_node);
+ kfree(free_obj);
+ }
+
+ kfree(dynamic_alloc_free_list_head);
+ }
+
kfree(job);
}
@@ -233,11 +253,18 @@ static void hl_ts_free_objects(struct work_struct *work)
* list to a dedicated workqueue to do the actual put.
*/
static int handle_registration_node(struct hl_device *hdev, struct hl_user_pending_interrupt *pend,
- struct list_head **free_list, ktime_t now)
+ struct list_head **free_list,
+ struct list_head **dynamic_alloc_list,
+ struct hl_user_interrupt *intr)
{
+ struct hl_ts_free_jobs *ts_free_jobs_data;
struct timestamp_reg_free_node *free_node;
+ u32 free_node_index;
u64 timestamp;
+ ts_free_jobs_data = &intr->ts_free_jobs_data;
+ free_node_index = ts_free_jobs_data->next_avail_free_node_idx;
+
if (!(*free_list)) {
/* Alloc/Init the timestamp registration free objects list */
*free_list = kmalloc(sizeof(struct list_head), GFP_ATOMIC);
@@ -247,39 +274,65 @@ static int handle_registration_node(struct hl_device *hdev, struct hl_user_pendi
INIT_LIST_HEAD(*free_list);
}
- free_node = kmalloc(sizeof(*free_node), GFP_ATOMIC);
- if (!free_node)
- return -ENOMEM;
+ free_node = &ts_free_jobs_data->free_nodes_pool[free_node_index];
+ if (atomic_cmpxchg(&free_node->in_use, 0, 1)) {
+ dev_dbg(hdev->dev,
+ "Timestamp free node pool is full, buff: %p, record: %p, irq: %u\n",
+ pend->ts_reg_info.buf,
+ pend,
+ intr->interrupt_id);
- timestamp = ktime_to_ns(now);
+ if (!(*dynamic_alloc_list)) {
+ *dynamic_alloc_list = kmalloc(sizeof(struct list_head), GFP_ATOMIC);
+ if (!(*dynamic_alloc_list))
+ return -ENOMEM;
- *pend->ts_reg_info.timestamp_kernel_addr = timestamp;
+ INIT_LIST_HEAD(*dynamic_alloc_list);
+ }
+
+ free_node = kmalloc(sizeof(struct timestamp_reg_free_node), GFP_ATOMIC);
+ if (!free_node)
+ return -ENOMEM;
+
+ free_node->dynamic_alloc = 1;
+ }
- dev_dbg(hdev->dev, "Timestamp is set to ts cb address (%p), ts: 0x%llx\n",
- pend->ts_reg_info.timestamp_kernel_addr,
- *(u64 *)pend->ts_reg_info.timestamp_kernel_addr);
+ timestamp = ktime_to_ns(intr->timestamp);
- list_del(&pend->wait_list_node);
+ *pend->ts_reg_info.timestamp_kernel_addr = timestamp;
+
+ dev_dbg(hdev->dev, "Irq handle: Timestamp record (%p) ts cb address (%p), interrupt_id: %u\n",
+ pend, pend->ts_reg_info.timestamp_kernel_addr, intr->interrupt_id);
- /* Mark kernel CB node as free */
- pend->ts_reg_info.in_use = 0;
+ list_del(&pend->list_node);
/* Putting the refcount for ts_buff and cq_cb objects will be handled
* in workqueue context, just add job to free_list.
*/
free_node->buf = pend->ts_reg_info.buf;
free_node->cq_cb = pend->ts_reg_info.cq_cb;
- list_add(&free_node->free_objects_node, *free_list);
+
+ if (free_node->dynamic_alloc) {
+ list_add(&free_node->free_objects_node, *dynamic_alloc_list);
+ } else {
+ ts_free_jobs_data->next_avail_free_node_idx =
+ (++free_node_index) % ts_free_jobs_data->free_nodes_length;
+ list_add(&free_node->free_objects_node, *free_list);
+ }
+
+ /* Mark TS record as free */
+ pend->ts_reg_info.in_use = false;
return 0;
}
-static void handle_user_interrupt(struct hl_device *hdev, struct hl_user_interrupt *intr)
+static void handle_user_interrupt_ts_list(struct hl_device *hdev, struct hl_user_interrupt *intr)
{
+ struct list_head *ts_reg_free_list_head = NULL, *dynamic_alloc_list_head = NULL;
struct hl_user_pending_interrupt *pend, *temp_pend;
- struct list_head *ts_reg_free_list_head = NULL;
struct timestamp_reg_work_obj *job;
bool reg_node_handle_fail = false;
+ unsigned long flags;
int rc;
/* For registration nodes:
@@ -288,36 +341,32 @@ static void handle_user_interrupt(struct hl_device *hdev, struct hl_user_interru
* or in irq handler context at all (since release functions are long and
* might sleep), so we will need to handle that part in workqueue context.
* To avoid handling kmalloc failure which compels us rolling back actions
- * and move nodes hanged on the free list back to the interrupt wait list
+ * and move nodes hanged on the free list back to the interrupt ts list
* we always alloc the job of the WQ at the beginning.
*/
job = kmalloc(sizeof(*job), GFP_ATOMIC);
if (!job)
return;
- spin_lock(&intr->wait_list_lock);
- list_for_each_entry_safe(pend, temp_pend, &intr->wait_list_head, wait_list_node) {
+ spin_lock_irqsave(&intr->ts_list_lock, flags);
+ list_for_each_entry_safe(pend, temp_pend, &intr->ts_list_head, list_node) {
if ((pend->cq_kernel_addr && *(pend->cq_kernel_addr) >= pend->cq_target_value) ||
!pend->cq_kernel_addr) {
- if (pend->ts_reg_info.buf) {
- if (!reg_node_handle_fail) {
- rc = handle_registration_node(hdev, pend,
- &ts_reg_free_list_head, intr->timestamp);
- if (rc)
- reg_node_handle_fail = true;
- }
- } else {
- /* Handle wait target value node */
- pend->fence.timestamp = intr->timestamp;
- complete_all(&pend->fence.completion);
+ if (!reg_node_handle_fail) {
+ rc = handle_registration_node(hdev, pend,
+ &ts_reg_free_list_head,
+ &dynamic_alloc_list_head, intr);
+ if (rc)
+ reg_node_handle_fail = true;
}
}
}
- spin_unlock(&intr->wait_list_lock);
+ spin_unlock_irqrestore(&intr->ts_list_lock, flags);
if (ts_reg_free_list_head) {
INIT_WORK(&job->free_obj, hl_ts_free_objects);
job->free_obj_head = ts_reg_free_list_head;
+ job->dynamic_alloc_free_obj_head = dynamic_alloc_list_head;
job->hdev = hdev;
queue_work(hdev->ts_free_obj_wq, &job->free_obj);
} else {
@@ -325,6 +374,23 @@ static void handle_user_interrupt(struct hl_device *hdev, struct hl_user_interru
}
}
+static void handle_user_interrupt_wait_list(struct hl_device *hdev, struct hl_user_interrupt *intr)
+{
+ struct hl_user_pending_interrupt *pend, *temp_pend;
+ unsigned long flags;
+
+ spin_lock_irqsave(&intr->wait_list_lock, flags);
+ list_for_each_entry_safe(pend, temp_pend, &intr->wait_list_head, list_node) {
+ if ((pend->cq_kernel_addr && *(pend->cq_kernel_addr) >= pend->cq_target_value) ||
+ !pend->cq_kernel_addr) {
+ /* Handle wait target value node */
+ pend->fence.timestamp = intr->timestamp;
+ complete_all(&pend->fence.completion);
+ }
+ }
+ spin_unlock_irqrestore(&intr->wait_list_lock, flags);
+}
+
static void handle_tpc_interrupt(struct hl_device *hdev)
{
u64 event_mask;
@@ -346,19 +412,38 @@ static void handle_unexpected_user_interrupt(struct hl_device *hdev)
}
/**
- * hl_irq_handler_user_interrupt - irq handler for user interrupts
+ * hl_irq_user_interrupt_handler - irq handler for user interrupts.
*
* @irq: irq number
* @arg: pointer to user interrupt structure
- *
*/
-irqreturn_t hl_irq_handler_user_interrupt(int irq, void *arg)
+irqreturn_t hl_irq_user_interrupt_handler(int irq, void *arg)
{
struct hl_user_interrupt *user_int = arg;
+ struct hl_device *hdev = user_int->hdev;
user_int->timestamp = ktime_get();
+ switch (user_int->type) {
+ case HL_USR_INTERRUPT_CQ:
+ /* First handle user waiters threads */
+ handle_user_interrupt_wait_list(hdev, &hdev->common_user_cq_interrupt);
+ handle_user_interrupt_wait_list(hdev, user_int);
- return IRQ_WAKE_THREAD;
+ /* Second handle user timestamp registrations */
+ handle_user_interrupt_ts_list(hdev, &hdev->common_user_cq_interrupt);
+ handle_user_interrupt_ts_list(hdev, user_int);
+ break;
+ case HL_USR_INTERRUPT_DECODER:
+ handle_user_interrupt_wait_list(hdev, &hdev->common_decoder_interrupt);
+
+ /* Handle decoder interrupt registered on this specific irq */
+ handle_user_interrupt_wait_list(hdev, user_int);
+ break;
+ default:
+ break;
+ }
+
+ return IRQ_HANDLED;
}
/**
@@ -374,19 +459,8 @@ irqreturn_t hl_irq_user_interrupt_thread_handler(int irq, void *arg)
struct hl_user_interrupt *user_int = arg;
struct hl_device *hdev = user_int->hdev;
+ user_int->timestamp = ktime_get();
switch (user_int->type) {
- case HL_USR_INTERRUPT_CQ:
- handle_user_interrupt(hdev, &hdev->common_user_cq_interrupt);
-
- /* Handle user cq interrupt registered on this specific irq */
- handle_user_interrupt(hdev, user_int);
- break;
- case HL_USR_INTERRUPT_DECODER:
- handle_user_interrupt(hdev, &hdev->common_decoder_interrupt);
-
- /* Handle decoder interrupt registered on this specific irq */
- handle_user_interrupt(hdev, user_int);
- break;
case HL_USR_INTERRUPT_TPC:
handle_tpc_interrupt(hdev);
break;
@@ -400,6 +474,18 @@ irqreturn_t hl_irq_user_interrupt_thread_handler(int irq, void *arg)
return IRQ_HANDLED;
}
+irqreturn_t hl_irq_eq_error_interrupt_thread_handler(int irq, void *arg)
+{
+ u64 event_mask = HL_NOTIFIER_EVENT_DEVICE_RESET | HL_NOTIFIER_EVENT_DEVICE_UNAVAILABLE;
+ struct hl_device *hdev = arg;
+
+ dev_err(hdev->dev, "EQ error interrupt received\n");
+
+ hl_device_cond_reset(hdev, HL_DRV_RESET_HARD, event_mask);
+
+ return IRQ_HANDLED;
+}
+
/**
* hl_irq_handler_eq - irq handler for event queue
*
diff --git a/drivers/accel/habanalabs/common/memory.c b/drivers/accel/habanalabs/common/memory.c
index 4fc72a07d2..0b8689fe0b 100644
--- a/drivers/accel/habanalabs/common/memory.c
+++ b/drivers/accel/habanalabs/common/memory.c
@@ -244,7 +244,7 @@ static int dma_map_host_va(struct hl_device *hdev, u64 addr, u64 size,
*p_userptr = userptr;
- rc = hdev->asic_funcs->asic_dma_map_sgtable(hdev, userptr->sgt, DMA_BIDIRECTIONAL);
+ rc = hl_dma_map_sgtable(hdev, userptr->sgt, DMA_BIDIRECTIONAL);
if (rc) {
dev_err(hdev->dev, "failed to map sgt with DMA region\n");
goto dma_map_err;
@@ -832,7 +832,6 @@ int hl_unreserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx,
* physical pages
*
* This function does the following:
- * - Pin the physical pages related to the given virtual block.
* - Create a physical page pack from the physical pages related to the given
* virtual block.
*/
@@ -1532,24 +1531,20 @@ static int set_dma_sg(struct scatterlist *sg, u64 bar_address, u64 chunk_size,
}
static struct sg_table *alloc_sgt_from_device_pages(struct hl_device *hdev, u64 *pages, u64 npages,
- u64 page_size, u64 exported_size,
+ u64 page_size, u64 exported_size, u64 offset,
struct device *dev, enum dma_data_direction dir)
{
- u64 chunk_size, bar_address, dma_max_seg_size, cur_size_to_export, cur_npages;
- struct asic_fixed_properties *prop;
- int rc, i, j, nents, cur_page;
+ u64 dma_max_seg_size, curr_page, size, chunk_size, left_size_to_export, left_size_in_page,
+ left_size_in_dma_seg, device_address, bar_address, start_page;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
struct scatterlist *sg;
+ unsigned int nents, i;
struct sg_table *sgt;
+ bool next_sg_entry;
+ int rc;
- prop = &hdev->asic_prop;
-
- dma_max_seg_size = dma_get_max_seg_size(dev);
-
- /* We would like to align the max segment size to PAGE_SIZE, so the
- * SGL will contain aligned addresses that can be easily mapped to
- * an MMU
- */
- dma_max_seg_size = ALIGN_DOWN(dma_max_seg_size, PAGE_SIZE);
+ /* Align max segment size to PAGE_SIZE to fit the minimal IOMMU mapping granularity */
+ dma_max_seg_size = ALIGN_DOWN(dma_get_max_seg_size(dev), PAGE_SIZE);
if (dma_max_seg_size < PAGE_SIZE) {
dev_err_ratelimited(hdev->dev,
"dma_max_seg_size %llu can't be smaller than PAGE_SIZE\n",
@@ -1561,121 +1556,149 @@ static struct sg_table *alloc_sgt_from_device_pages(struct hl_device *hdev, u64
if (!sgt)
return ERR_PTR(-ENOMEM);
- /* remove export size restrictions in case not explicitly defined */
- cur_size_to_export = exported_size ? exported_size : (npages * page_size);
-
- /* If the size of each page is larger than the dma max segment size,
- * then we can't combine pages and the number of entries in the SGL
- * will just be the
- * <number of pages> * <chunks of max segment size in each page>
- */
- if (page_size > dma_max_seg_size) {
- /* we should limit number of pages according to the exported size */
- cur_npages = DIV_ROUND_UP_SECTOR_T(cur_size_to_export, page_size);
- nents = cur_npages * DIV_ROUND_UP_SECTOR_T(page_size, dma_max_seg_size);
- } else {
- cur_npages = npages;
-
- /* Get number of non-contiguous chunks */
- for (i = 1, nents = 1, chunk_size = page_size ; i < cur_npages ; i++) {
- if (pages[i - 1] + page_size != pages[i] ||
- chunk_size + page_size > dma_max_seg_size) {
- nents++;
- chunk_size = page_size;
- continue;
- }
+ /* Use the offset to move to the actual first page that is exported */
+ for (start_page = 0 ; start_page < npages ; ++start_page) {
+ if (offset < page_size)
+ break;
- chunk_size += page_size;
- }
+ /* The offset value was validated so there can't be an underflow */
+ offset -= page_size;
}
- rc = sg_alloc_table(sgt, nents, GFP_KERNEL | __GFP_ZERO);
- if (rc)
- goto error_free;
+ /* Calculate the required number of entries for the SG table */
+ curr_page = start_page;
+ nents = 1;
+ left_size_to_export = exported_size;
+ left_size_in_page = page_size - offset;
+ left_size_in_dma_seg = dma_max_seg_size;
+ next_sg_entry = false;
- cur_page = 0;
+ while (true) {
+ size = min3(left_size_to_export, left_size_in_page, left_size_in_dma_seg);
+ left_size_to_export -= size;
+ left_size_in_page -= size;
+ left_size_in_dma_seg -= size;
- if (page_size > dma_max_seg_size) {
- u64 size_left, cur_device_address = 0;
+ if (!left_size_to_export)
+ break;
- size_left = page_size;
+ if (!left_size_in_page) {
+ /* left_size_to_export is not zero so there must be another page */
+ if (pages[curr_page] + page_size != pages[curr_page + 1])
+ next_sg_entry = true;
- /* Need to split each page into the number of chunks of
- * dma_max_seg_size
- */
- for_each_sgtable_dma_sg(sgt, sg, i) {
- if (size_left == page_size)
- cur_device_address =
- pages[cur_page] - prop->dram_base_address;
- else
- cur_device_address += dma_max_seg_size;
+ ++curr_page;
+ left_size_in_page = page_size;
+ }
- /* make sure not to export over exported size */
- chunk_size = min3(size_left, dma_max_seg_size, cur_size_to_export);
+ if (!left_size_in_dma_seg) {
+ next_sg_entry = true;
+ left_size_in_dma_seg = dma_max_seg_size;
+ }
- bar_address = hdev->dram_pci_bar_start + cur_device_address;
+ if (next_sg_entry) {
+ ++nents;
+ next_sg_entry = false;
+ }
+ }
- rc = set_dma_sg(sg, bar_address, chunk_size, dev, dir);
- if (rc)
- goto error_unmap;
+ rc = sg_alloc_table(sgt, nents, GFP_KERNEL | __GFP_ZERO);
+ if (rc)
+ goto err_free_sgt;
- cur_size_to_export -= chunk_size;
+ /* Prepare the SG table entries */
+ curr_page = start_page;
+ device_address = pages[curr_page] + offset;
+ left_size_to_export = exported_size;
+ left_size_in_page = page_size - offset;
+ left_size_in_dma_seg = dma_max_seg_size;
+ next_sg_entry = false;
- if (size_left > dma_max_seg_size) {
- size_left -= dma_max_seg_size;
- } else {
- cur_page++;
- size_left = page_size;
+ for_each_sgtable_dma_sg(sgt, sg, i) {
+ bar_address = hdev->dram_pci_bar_start + (device_address - prop->dram_base_address);
+ chunk_size = 0;
+
+ for ( ; curr_page < npages ; ++curr_page) {
+ size = min3(left_size_to_export, left_size_in_page, left_size_in_dma_seg);
+ chunk_size += size;
+ left_size_to_export -= size;
+ left_size_in_page -= size;
+ left_size_in_dma_seg -= size;
+
+ if (!left_size_to_export)
+ break;
+
+ if (!left_size_in_page) {
+ /* left_size_to_export is not zero so there must be another page */
+ if (pages[curr_page] + page_size != pages[curr_page + 1]) {
+ device_address = pages[curr_page + 1];
+ next_sg_entry = true;
+ }
+
+ left_size_in_page = page_size;
}
- }
- } else {
- /* Merge pages and put them into the scatterlist */
- for_each_sgtable_dma_sg(sgt, sg, i) {
- chunk_size = page_size;
- for (j = cur_page + 1 ; j < cur_npages ; j++) {
- if (pages[j - 1] + page_size != pages[j] ||
- chunk_size + page_size > dma_max_seg_size)
- break;
-
- chunk_size += page_size;
+
+ if (!left_size_in_dma_seg) {
+ /*
+ * Skip setting a new device address if already moving to a page
+ * which is not contiguous with the current page.
+ */
+ if (!next_sg_entry) {
+ device_address += chunk_size;
+ next_sg_entry = true;
+ }
+
+ left_size_in_dma_seg = dma_max_seg_size;
}
- bar_address = hdev->dram_pci_bar_start +
- (pages[cur_page] - prop->dram_base_address);
+ if (next_sg_entry) {
+ next_sg_entry = false;
+ break;
+ }
+ }
- /* make sure not to export over exported size */
- chunk_size = min(chunk_size, cur_size_to_export);
- rc = set_dma_sg(sg, bar_address, chunk_size, dev, dir);
- if (rc)
- goto error_unmap;
+ rc = set_dma_sg(sg, bar_address, chunk_size, dev, dir);
+ if (rc)
+ goto err_unmap;
+ }
- cur_size_to_export -= chunk_size;
- cur_page = j;
- }
+ /* There should be nothing left to export exactly after looping over all SG elements */
+ if (left_size_to_export) {
+ dev_err(hdev->dev,
+ "left size to export %#llx after initializing %u SG elements\n",
+ left_size_to_export, sgt->nents);
+ rc = -ENOMEM;
+ goto err_unmap;
}
- /* Because we are not going to include a CPU list we want to have some
- * chance that other users will detect this by setting the orig_nents
- * to 0 and using only nents (length of DMA list) when going over the
- * sgl
+ /*
+ * Because we are not going to include a CPU list, we want to have some chance that other
+ * users will detect this when going over SG table, by setting the orig_nents to 0 and using
+ * only nents (length of DMA list).
*/
sgt->orig_nents = 0;
+ dev_dbg(hdev->dev, "prepared SG table with %u entries for importer %s\n",
+ nents, dev_name(dev));
+ for_each_sgtable_dma_sg(sgt, sg, i)
+ dev_dbg(hdev->dev,
+ "SG entry %d: address %#llx, length %#x\n",
+ i, sg_dma_address(sg), sg_dma_len(sg));
+
return sgt;
-error_unmap:
+err_unmap:
for_each_sgtable_dma_sg(sgt, sg, i) {
if (!sg_dma_len(sg))
continue;
- dma_unmap_resource(dev, sg_dma_address(sg),
- sg_dma_len(sg), dir,
+ dma_unmap_resource(dev, sg_dma_address(sg), sg_dma_len(sg), dir,
DMA_ATTR_SKIP_CPU_SYNC);
}
sg_free_table(sgt);
-error_free:
+err_free_sgt:
kfree(sgt);
return ERR_PTR(rc);
}
@@ -1700,6 +1723,7 @@ static int hl_dmabuf_attach(struct dma_buf *dmabuf,
static struct sg_table *hl_map_dmabuf(struct dma_buf_attachment *attachment,
enum dma_data_direction dir)
{
+ u64 *pages, npages, page_size, exported_size, offset;
struct dma_buf *dma_buf = attachment->dmabuf;
struct hl_vm_phys_pg_pack *phys_pg_pack;
struct hl_dmabuf_priv *hl_dmabuf;
@@ -1708,30 +1732,28 @@ static struct sg_table *hl_map_dmabuf(struct dma_buf_attachment *attachment,
hl_dmabuf = dma_buf->priv;
hdev = hl_dmabuf->ctx->hdev;
- phys_pg_pack = hl_dmabuf->phys_pg_pack;
if (!attachment->peer2peer) {
dev_dbg(hdev->dev, "Failed to map dmabuf because p2p is disabled\n");
return ERR_PTR(-EPERM);
}
- if (phys_pg_pack)
- sgt = alloc_sgt_from_device_pages(hdev,
- phys_pg_pack->pages,
- phys_pg_pack->npages,
- phys_pg_pack->page_size,
- phys_pg_pack->exported_size,
- attachment->dev,
- dir);
- else
- sgt = alloc_sgt_from_device_pages(hdev,
- &hl_dmabuf->device_address,
- 1,
- hl_dmabuf->dmabuf->size,
- 0,
- attachment->dev,
- dir);
+ exported_size = hl_dmabuf->dmabuf->size;
+ offset = hl_dmabuf->offset;
+ phys_pg_pack = hl_dmabuf->phys_pg_pack;
+ if (phys_pg_pack) {
+ pages = phys_pg_pack->pages;
+ npages = phys_pg_pack->npages;
+ page_size = phys_pg_pack->page_size;
+ } else {
+ pages = &hl_dmabuf->device_phys_addr;
+ npages = 1;
+ page_size = hl_dmabuf->dmabuf->size;
+ }
+
+ sgt = alloc_sgt_from_device_pages(hdev, pages, npages, page_size, exported_size, offset,
+ attachment->dev, dir);
if (IS_ERR(sgt))
dev_err(hdev->dev, "failed (%ld) to initialize sgt for dmabuf\n", PTR_ERR(sgt));
@@ -1818,7 +1840,7 @@ static void hl_release_dmabuf(struct dma_buf *dmabuf)
hl_ctx_put(ctx);
/* Paired with get_file() in export_dmabuf() */
- fput(ctx->hpriv->filp);
+ fput(ctx->hpriv->file_priv->filp);
kfree(hl_dmabuf);
}
@@ -1864,7 +1886,7 @@ static int export_dmabuf(struct hl_ctx *ctx,
* released first and only then the compute device.
* Paired with fput() in hl_release_dmabuf().
*/
- get_file(ctx->hpriv->filp);
+ get_file(ctx->hpriv->file_priv->filp);
*dmabuf_fd = fd;
@@ -1876,22 +1898,29 @@ err_dma_buf_put:
return rc;
}
-static int validate_export_params_common(struct hl_device *hdev, u64 device_addr, u64 size)
+static int validate_export_params_common(struct hl_device *hdev, u64 addr, u64 size, u64 offset)
{
- if (!IS_ALIGNED(device_addr, PAGE_SIZE)) {
+ if (!PAGE_ALIGNED(addr)) {
dev_dbg(hdev->dev,
- "exported device memory address 0x%llx should be aligned to 0x%lx\n",
- device_addr, PAGE_SIZE);
+ "exported device memory address 0x%llx should be aligned to PAGE_SIZE 0x%lx\n",
+ addr, PAGE_SIZE);
return -EINVAL;
}
- if (size < PAGE_SIZE) {
+ if (!size || !PAGE_ALIGNED(size)) {
dev_dbg(hdev->dev,
- "exported device memory size %llu should be equal to or greater than %lu\n",
+ "exported device memory size %llu should be a multiple of PAGE_SIZE %lu\n",
size, PAGE_SIZE);
return -EINVAL;
}
+ if (!PAGE_ALIGNED(offset)) {
+ dev_dbg(hdev->dev,
+ "exported device memory offset %llu should be a multiple of PAGE_SIZE %lu\n",
+ offset, PAGE_SIZE);
+ return -EINVAL;
+ }
+
return 0;
}
@@ -1901,13 +1930,13 @@ static int validate_export_params_no_mmu(struct hl_device *hdev, u64 device_addr
u64 bar_address;
int rc;
- rc = validate_export_params_common(hdev, device_addr, size);
+ rc = validate_export_params_common(hdev, device_addr, size, 0);
if (rc)
return rc;
if (device_addr < prop->dram_user_base_address ||
- (device_addr + size) > prop->dram_end_address ||
- (device_addr + size) < device_addr) {
+ (device_addr + size) > prop->dram_end_address ||
+ (device_addr + size) < device_addr) {
dev_dbg(hdev->dev,
"DRAM memory range 0x%llx (+0x%llx) is outside of DRAM boundaries\n",
device_addr, size);
@@ -1934,29 +1963,26 @@ static int validate_export_params(struct hl_device *hdev, u64 device_addr, u64 s
u64 bar_address;
int i, rc;
- rc = validate_export_params_common(hdev, device_addr, size);
+ rc = validate_export_params_common(hdev, device_addr, size, offset);
if (rc)
return rc;
if ((offset + size) > phys_pg_pack->total_size) {
dev_dbg(hdev->dev, "offset %#llx and size %#llx exceed total map size %#llx\n",
- offset, size, phys_pg_pack->total_size);
+ offset, size, phys_pg_pack->total_size);
return -EINVAL;
}
for (i = 0 ; i < phys_pg_pack->npages ; i++) {
-
bar_address = hdev->dram_pci_bar_start +
- (phys_pg_pack->pages[i] - prop->dram_base_address);
+ (phys_pg_pack->pages[i] - prop->dram_base_address);
if ((bar_address + phys_pg_pack->page_size) >
(hdev->dram_pci_bar_start + prop->dram_pci_bar_size) ||
(bar_address + phys_pg_pack->page_size) < bar_address) {
dev_dbg(hdev->dev,
"DRAM memory range 0x%llx (+0x%x) is outside of PCI BAR boundaries\n",
- phys_pg_pack->pages[i],
- phys_pg_pack->page_size);
-
+ phys_pg_pack->pages[i], phys_pg_pack->page_size);
return -EINVAL;
}
}
@@ -2012,7 +2038,6 @@ static int export_dmabuf_from_addr(struct hl_ctx *ctx, u64 addr, u64 size, u64 o
struct asic_fixed_properties *prop;
struct hl_dmabuf_priv *hl_dmabuf;
struct hl_device *hdev;
- u64 export_addr;
int rc;
hdev = ctx->hdev;
@@ -2024,8 +2049,6 @@ static int export_dmabuf_from_addr(struct hl_ctx *ctx, u64 addr, u64 size, u64 o
return -EINVAL;
}
- export_addr = addr + offset;
-
hl_dmabuf = kzalloc(sizeof(*hl_dmabuf), GFP_KERNEL);
if (!hl_dmabuf)
return -ENOMEM;
@@ -2041,20 +2064,20 @@ static int export_dmabuf_from_addr(struct hl_ctx *ctx, u64 addr, u64 size, u64 o
rc = PTR_ERR(phys_pg_pack);
goto dec_memhash_export_cnt;
}
- rc = validate_export_params(hdev, export_addr, size, offset, phys_pg_pack);
+ rc = validate_export_params(hdev, addr, size, offset, phys_pg_pack);
if (rc)
goto dec_memhash_export_cnt;
- phys_pg_pack->exported_size = size;
hl_dmabuf->phys_pg_pack = phys_pg_pack;
hl_dmabuf->memhash_hnode = hnode;
+ hl_dmabuf->offset = offset;
} else {
- rc = validate_export_params_no_mmu(hdev, export_addr, size);
+ rc = validate_export_params_no_mmu(hdev, addr, size);
if (rc)
goto err_free_dmabuf_wrapper;
- }
- hl_dmabuf->device_address = export_addr;
+ hl_dmabuf->device_phys_addr = addr;
+ }
rc = export_dmabuf(ctx, hl_dmabuf, size, flags, dmabuf_fd);
if (rc)
@@ -2171,8 +2194,9 @@ static int allocate_timestamps_buffers(struct hl_fpriv *hpriv, struct hl_mem_in
return 0;
}
-int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data)
+int hl_mem_ioctl(struct drm_device *ddev, void *data, struct drm_file *file_priv)
{
+ struct hl_fpriv *hpriv = file_priv->driver_priv;
enum hl_device_status status;
union hl_mem_args *args = data;
struct hl_device *hdev = hpriv->hdev;
@@ -2420,7 +2444,7 @@ void hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr)
hl_debugfs_remove_userptr(hdev, userptr);
if (userptr->dma_mapped)
- hdev->asic_funcs->hl_dma_unmap_sgtable(hdev, userptr->sgt, userptr->dir);
+ hl_dma_unmap_sgtable(hdev, userptr->sgt, userptr->dir);
unpin_user_pages_dirty_lock(userptr->pages, userptr->npages, true);
kvfree(userptr->pages);
diff --git a/drivers/accel/habanalabs/common/mmu/mmu.c b/drivers/accel/habanalabs/common/mmu/mmu.c
index b2145716c6..b654302a68 100644
--- a/drivers/accel/habanalabs/common/mmu/mmu.c
+++ b/drivers/accel/habanalabs/common/mmu/mmu.c
@@ -596,6 +596,7 @@ int hl_mmu_if_set_funcs(struct hl_device *hdev)
break;
case ASIC_GAUDI2:
case ASIC_GAUDI2B:
+ case ASIC_GAUDI2C:
/* MMUs in Gaudi2 are always host resident */
hl_mmu_v2_hr_set_funcs(hdev, &hdev->mmu_func[MMU_HR_PGT]);
break;
diff --git a/drivers/accel/habanalabs/common/sysfs.c b/drivers/accel/habanalabs/common/sysfs.c
index 01f89f0293..2786063730 100644
--- a/drivers/accel/habanalabs/common/sysfs.c
+++ b/drivers/accel/habanalabs/common/sysfs.c
@@ -251,6 +251,9 @@ static ssize_t device_type_show(struct device *dev,
case ASIC_GAUDI2B:
str = "GAUDI2B";
break;
+ case ASIC_GAUDI2C:
+ str = "GAUDI2C";
+ break;
default:
dev_err(hdev->dev, "Unrecognized ASIC type %d\n",
hdev->asic_type);