summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_job.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c287
1 files changed, 287 insertions, 0 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
new file mode 100644
index 000000000..dcfe8a3b0
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -0,0 +1,287 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ */
+#include <linux/kthread.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+
+#include "amdgpu.h"
+#include "amdgpu_trace.h"
+
+static void amdgpu_job_timedout(struct drm_sched_job *s_job)
+{
+ struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
+ struct amdgpu_job *job = to_amdgpu_job(s_job);
+ struct amdgpu_task_info ti;
+ struct amdgpu_device *adev = ring->adev;
+
+ memset(&ti, 0, sizeof(struct amdgpu_task_info));
+
+ if (amdgpu_gpu_recovery &&
+ amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) {
+ DRM_ERROR("ring %s timeout, but soft recovered\n",
+ s_job->sched->name);
+ return;
+ }
+
+ amdgpu_vm_get_task_info(ring->adev, job->pasid, &ti);
+ DRM_ERROR("ring %s timeout, signaled seq=%u, emitted seq=%u\n",
+ job->base.sched->name, atomic_read(&ring->fence_drv.last_seq),
+ ring->fence_drv.sync_seq);
+ DRM_ERROR("Process information: process %s pid %d thread %s pid %d\n",
+ ti.process_name, ti.tgid, ti.task_name, ti.pid);
+
+ if (amdgpu_device_should_recover_gpu(ring->adev)) {
+ amdgpu_device_gpu_recover(ring->adev, job);
+ } else {
+ drm_sched_suspend_timeout(&ring->sched);
+ if (amdgpu_sriov_vf(adev))
+ adev->virt.tdr_debug = true;
+ }
+}
+
+int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
+ struct amdgpu_job **job, struct amdgpu_vm *vm)
+{
+ size_t size = sizeof(struct amdgpu_job);
+
+ if (num_ibs == 0)
+ return -EINVAL;
+
+ size += sizeof(struct amdgpu_ib) * num_ibs;
+
+ *job = kzalloc(size, GFP_KERNEL);
+ if (!*job)
+ return -ENOMEM;
+
+ /*
+ * Initialize the scheduler to at least some ring so that we always
+ * have a pointer to adev.
+ */
+ (*job)->base.sched = &adev->rings[0]->sched;
+ (*job)->vm = vm;
+ (*job)->ibs = (void *)&(*job)[1];
+ (*job)->num_ibs = num_ibs;
+
+ amdgpu_sync_create(&(*job)->sync);
+ amdgpu_sync_create(&(*job)->sched_sync);
+ (*job)->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
+ (*job)->vm_pd_addr = AMDGPU_BO_INVALID_OFFSET;
+
+ return 0;
+}
+
+int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
+ enum amdgpu_ib_pool_type pool_type,
+ struct amdgpu_job **job)
+{
+ int r;
+
+ r = amdgpu_job_alloc(adev, 1, job, NULL);
+ if (r)
+ return r;
+
+ r = amdgpu_ib_get(adev, NULL, size, pool_type, &(*job)->ibs[0]);
+ if (r)
+ kfree(*job);
+
+ return r;
+}
+
+void amdgpu_job_free_resources(struct amdgpu_job *job)
+{
+ struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched);
+ struct dma_fence *f;
+ unsigned i;
+
+ /* use sched fence if available */
+ f = job->base.s_fence ? &job->base.s_fence->finished : job->fence;
+
+ for (i = 0; i < job->num_ibs; ++i)
+ amdgpu_ib_free(ring->adev, &job->ibs[i], f);
+}
+
+static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
+{
+ struct amdgpu_job *job = to_amdgpu_job(s_job);
+
+ drm_sched_job_cleanup(s_job);
+
+ dma_fence_put(job->fence);
+ amdgpu_sync_free(&job->sync);
+ amdgpu_sync_free(&job->sched_sync);
+ kfree(job);
+}
+
+void amdgpu_job_free(struct amdgpu_job *job)
+{
+ amdgpu_job_free_resources(job);
+
+ dma_fence_put(job->fence);
+ amdgpu_sync_free(&job->sync);
+ amdgpu_sync_free(&job->sched_sync);
+ kfree(job);
+}
+
+int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
+ void *owner, struct dma_fence **f)
+{
+ int r;
+
+ if (!f)
+ return -EINVAL;
+
+ r = drm_sched_job_init(&job->base, entity, owner);
+ if (r)
+ return r;
+
+ *f = dma_fence_get(&job->base.s_fence->finished);
+ amdgpu_job_free_resources(job);
+ drm_sched_entity_push_job(&job->base, entity);
+
+ return 0;
+}
+
+int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
+ struct dma_fence **fence)
+{
+ int r;
+
+ job->base.sched = &ring->sched;
+ r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, NULL, fence);
+ job->fence = dma_fence_get(*fence);
+ if (r)
+ return r;
+
+ amdgpu_job_free(job);
+ return 0;
+}
+
+static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
+ struct drm_sched_entity *s_entity)
+{
+ struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->rq->sched);
+ struct amdgpu_job *job = to_amdgpu_job(sched_job);
+ struct amdgpu_vm *vm = job->vm;
+ struct dma_fence *fence;
+ int r;
+
+ fence = amdgpu_sync_get_fence(&job->sync);
+ if (fence && drm_sched_dependency_optimized(fence, s_entity)) {
+ r = amdgpu_sync_fence(&job->sched_sync, fence);
+ if (r)
+ DRM_ERROR("Error adding fence (%d)\n", r);
+ }
+
+ while (fence == NULL && vm && !job->vmid) {
+ r = amdgpu_vmid_grab(vm, ring, &job->sync,
+ &job->base.s_fence->finished,
+ job);
+ if (r)
+ DRM_ERROR("Error getting VM ID (%d)\n", r);
+
+ fence = amdgpu_sync_get_fence(&job->sync);
+ }
+
+ return fence;
+}
+
+static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
+{
+ struct amdgpu_ring *ring = to_amdgpu_ring(sched_job->sched);
+ struct dma_fence *fence = NULL, *finished;
+ struct amdgpu_job *job;
+ int r = 0;
+
+ job = to_amdgpu_job(sched_job);
+ finished = &job->base.s_fence->finished;
+
+ BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL));
+
+ trace_amdgpu_sched_run_job(job);
+
+ if (job->vram_lost_counter != atomic_read(&ring->adev->vram_lost_counter))
+ dma_fence_set_error(finished, -ECANCELED);/* skip IB as well if VRAM lost */
+
+ if (finished->error < 0) {
+ DRM_INFO("Skip scheduling IBs!\n");
+ } else {
+ r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job,
+ &fence);
+ if (r)
+ DRM_ERROR("Error scheduling IBs (%d)\n", r);
+ }
+ /* if gpu reset, hw fence will be replaced here */
+ dma_fence_put(job->fence);
+ job->fence = dma_fence_get(fence);
+
+ amdgpu_job_free_resources(job);
+
+ fence = r ? ERR_PTR(r) : fence;
+ return fence;
+}
+
+#define to_drm_sched_job(sched_job) \
+ container_of((sched_job), struct drm_sched_job, queue_node)
+
+void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched)
+{
+ struct drm_sched_job *s_job;
+ struct drm_sched_entity *s_entity = NULL;
+ int i;
+
+ /* Signal all jobs not yet scheduled */
+ for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
+ struct drm_sched_rq *rq = &sched->sched_rq[i];
+
+ if (!rq)
+ continue;
+
+ spin_lock(&rq->lock);
+ list_for_each_entry(s_entity, &rq->entities, list) {
+ while ((s_job = to_drm_sched_job(spsc_queue_pop(&s_entity->job_queue)))) {
+ struct drm_sched_fence *s_fence = s_job->s_fence;
+
+ dma_fence_signal(&s_fence->scheduled);
+ dma_fence_set_error(&s_fence->finished, -EHWPOISON);
+ dma_fence_signal(&s_fence->finished);
+ }
+ }
+ spin_unlock(&rq->lock);
+ }
+
+ /* Signal all jobs already scheduled to HW */
+ list_for_each_entry(s_job, &sched->ring_mirror_list, node) {
+ struct drm_sched_fence *s_fence = s_job->s_fence;
+
+ dma_fence_set_error(&s_fence->finished, -EHWPOISON);
+ dma_fence_signal(&s_fence->finished);
+ }
+}
+
+const struct drm_sched_backend_ops amdgpu_sched_ops = {
+ .dependency = amdgpu_job_dependency,
+ .run_job = amdgpu_job_run,
+ .timedout_job = amdgpu_job_timedout,
+ .free_job = amdgpu_job_free_cb
+};