summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/scheduler
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
commitace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch)
treeb2d64bc10158fdd5497876388cd68142ca374ed3 /drivers/gpu/drm/scheduler
parentInitial commit. (diff)
downloadlinux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz
linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/gpu/drm/scheduler')
-rw-r--r--drivers/gpu/drm/scheduler/Makefile25
-rw-r--r--drivers/gpu/drm/scheduler/gpu_scheduler_trace.h114
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c596
-rw-r--r--drivers/gpu/drm/scheduler/sched_fence.c240
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c1208
5 files changed, 2183 insertions, 0 deletions
diff --git a/drivers/gpu/drm/scheduler/Makefile b/drivers/gpu/drm/scheduler/Makefile
new file mode 100644
index 000000000..538636218
--- /dev/null
+++ b/drivers/gpu/drm/scheduler/Makefile
@@ -0,0 +1,25 @@
+#
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
+gpu-sched-y := sched_main.o sched_fence.o sched_entity.o
+
+obj-$(CONFIG_DRM_SCHED) += gpu-sched.o
diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
new file mode 100644
index 000000000..3143ecaaf
--- /dev/null
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#if !defined(_GPU_SCHED_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _GPU_SCHED_TRACE_H_
+
+#include <linux/stringify.h>
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM gpu_scheduler
+#define TRACE_INCLUDE_FILE gpu_scheduler_trace
+
+DECLARE_EVENT_CLASS(drm_sched_job,
+ TP_PROTO(struct drm_sched_job *sched_job, struct drm_sched_entity *entity),
+ TP_ARGS(sched_job, entity),
+ TP_STRUCT__entry(
+ __field(struct drm_sched_entity *, entity)
+ __field(struct dma_fence *, fence)
+ __string(name, sched_job->sched->name)
+ __field(uint64_t, id)
+ __field(u32, job_count)
+ __field(int, hw_job_count)
+ ),
+
+ TP_fast_assign(
+ __entry->entity = entity;
+ __entry->id = sched_job->id;
+ __entry->fence = &sched_job->s_fence->finished;
+ __assign_str(name, sched_job->sched->name);
+ __entry->job_count = spsc_queue_count(&entity->job_queue);
+ __entry->hw_job_count = atomic_read(
+ &sched_job->sched->hw_rq_count);
+ ),
+ TP_printk("entity=%p, id=%llu, fence=%p, ring=%s, job count:%u, hw job count:%d",
+ __entry->entity, __entry->id,
+ __entry->fence, __get_str(name),
+ __entry->job_count, __entry->hw_job_count)
+);
+
+DEFINE_EVENT(drm_sched_job, drm_sched_job,
+ TP_PROTO(struct drm_sched_job *sched_job, struct drm_sched_entity *entity),
+ TP_ARGS(sched_job, entity)
+);
+
+DEFINE_EVENT(drm_sched_job, drm_run_job,
+ TP_PROTO(struct drm_sched_job *sched_job, struct drm_sched_entity *entity),
+ TP_ARGS(sched_job, entity)
+);
+
+TRACE_EVENT(drm_sched_process_job,
+ TP_PROTO(struct drm_sched_fence *fence),
+ TP_ARGS(fence),
+ TP_STRUCT__entry(
+ __field(struct dma_fence *, fence)
+ ),
+
+ TP_fast_assign(
+ __entry->fence = &fence->finished;
+ ),
+ TP_printk("fence=%p signaled", __entry->fence)
+);
+
+TRACE_EVENT(drm_sched_job_wait_dep,
+ TP_PROTO(struct drm_sched_job *sched_job, struct dma_fence *fence),
+ TP_ARGS(sched_job, fence),
+ TP_STRUCT__entry(
+ __string(name, sched_job->sched->name)
+ __field(uint64_t, id)
+ __field(struct dma_fence *, fence)
+ __field(uint64_t, ctx)
+ __field(unsigned, seqno)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, sched_job->sched->name);
+ __entry->id = sched_job->id;
+ __entry->fence = fence;
+ __entry->ctx = fence->context;
+ __entry->seqno = fence->seqno;
+ ),
+ TP_printk("job ring=%s, id=%llu, depends fence=%p, context=%llu, seq=%u",
+ __get_str(name), __entry->id,
+ __entry->fence, __entry->ctx,
+ __entry->seqno)
+);
+
+#endif
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/scheduler
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
new file mode 100644
index 000000000..a42763e14
--- /dev/null
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -0,0 +1,596 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/kthread.h>
+#include <linux/slab.h>
+#include <linux/completion.h>
+
+#include <drm/drm_print.h>
+#include <drm/gpu_scheduler.h>
+
+#include "gpu_scheduler_trace.h"
+
+#define to_drm_sched_job(sched_job) \
+ container_of((sched_job), struct drm_sched_job, queue_node)
+
+/**
+ * drm_sched_entity_init - Init a context entity used by scheduler when
+ * submit to HW ring.
+ *
+ * @entity: scheduler entity to init
+ * @priority: priority of the entity
+ * @sched_list: the list of drm scheds on which jobs from this
+ * entity can be submitted
+ * @num_sched_list: number of drm sched in sched_list
+ * @guilty: atomic_t set to 1 when a job on this queue
+ * is found to be guilty causing a timeout
+ *
+ * Note that the &sched_list must have at least one element to schedule the entity.
+ *
+ * For changing @priority later on at runtime see
+ * drm_sched_entity_set_priority(). For changing the set of schedulers
+ * @sched_list at runtime see drm_sched_entity_modify_sched().
+ *
+ * An entity is cleaned up by callind drm_sched_entity_fini(). See also
+ * drm_sched_entity_destroy().
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int drm_sched_entity_init(struct drm_sched_entity *entity,
+ enum drm_sched_priority priority,
+ struct drm_gpu_scheduler **sched_list,
+ unsigned int num_sched_list,
+ atomic_t *guilty)
+{
+ if (!(entity && sched_list && (num_sched_list == 0 || sched_list[0])))
+ return -EINVAL;
+
+ memset(entity, 0, sizeof(struct drm_sched_entity));
+ INIT_LIST_HEAD(&entity->list);
+ entity->rq = NULL;
+ entity->guilty = guilty;
+ entity->num_sched_list = num_sched_list;
+ entity->priority = priority;
+ entity->sched_list = num_sched_list > 1 ? sched_list : NULL;
+ RCU_INIT_POINTER(entity->last_scheduled, NULL);
+ RB_CLEAR_NODE(&entity->rb_tree_node);
+
+ if(num_sched_list)
+ entity->rq = &sched_list[0]->sched_rq[entity->priority];
+
+ init_completion(&entity->entity_idle);
+
+ /* We start in an idle state. */
+ complete_all(&entity->entity_idle);
+
+ spin_lock_init(&entity->rq_lock);
+ spsc_queue_init(&entity->job_queue);
+
+ atomic_set(&entity->fence_seq, 0);
+ entity->fence_context = dma_fence_context_alloc(2);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_sched_entity_init);
+
+/**
+ * drm_sched_entity_modify_sched - Modify sched of an entity
+ * @entity: scheduler entity to init
+ * @sched_list: the list of new drm scheds which will replace
+ * existing entity->sched_list
+ * @num_sched_list: number of drm sched in sched_list
+ *
+ * Note that this must be called under the same common lock for @entity as
+ * drm_sched_job_arm() and drm_sched_entity_push_job(), or the driver needs to
+ * guarantee through some other means that this is never called while new jobs
+ * can be pushed to @entity.
+ */
+void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
+ struct drm_gpu_scheduler **sched_list,
+ unsigned int num_sched_list)
+{
+ WARN_ON(!num_sched_list || !sched_list);
+
+ entity->sched_list = sched_list;
+ entity->num_sched_list = num_sched_list;
+}
+EXPORT_SYMBOL(drm_sched_entity_modify_sched);
+
+static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
+{
+ rmb(); /* for list_empty to work without lock */
+
+ if (list_empty(&entity->list) ||
+ spsc_queue_count(&entity->job_queue) == 0 ||
+ entity->stopped)
+ return true;
+
+ return false;
+}
+
+/* Return true if entity could provide a job. */
+bool drm_sched_entity_is_ready(struct drm_sched_entity *entity)
+{
+ if (spsc_queue_peek(&entity->job_queue) == NULL)
+ return false;
+
+ if (READ_ONCE(entity->dependency))
+ return false;
+
+ return true;
+}
+
+/**
+ * drm_sched_entity_error - return error of last scheduled job
+ * @entity: scheduler entity to check
+ *
+ * Opportunistically return the error of the last scheduled job. Result can
+ * change any time when new jobs are pushed to the hw.
+ */
+int drm_sched_entity_error(struct drm_sched_entity *entity)
+{
+ struct dma_fence *fence;
+ int r;
+
+ rcu_read_lock();
+ fence = rcu_dereference(entity->last_scheduled);
+ r = fence ? fence->error : 0;
+ rcu_read_unlock();
+
+ return r;
+}
+EXPORT_SYMBOL(drm_sched_entity_error);
+
+static void drm_sched_entity_kill_jobs_work(struct work_struct *wrk)
+{
+ struct drm_sched_job *job = container_of(wrk, typeof(*job), work);
+
+ drm_sched_fence_finished(job->s_fence, -ESRCH);
+ WARN_ON(job->s_fence->parent);
+ job->sched->ops->free_job(job);
+}
+
+/* Signal the scheduler finished fence when the entity in question is killed. */
+static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
+ struct dma_fence_cb *cb)
+{
+ struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
+ finish_cb);
+ unsigned long index;
+
+ dma_fence_put(f);
+
+ /* Wait for all dependencies to avoid data corruptions */
+ xa_for_each(&job->dependencies, index, f) {
+ struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
+
+ if (s_fence && f == &s_fence->scheduled) {
+ /* The dependencies array had a reference on the scheduled
+ * fence, and the finished fence refcount might have
+ * dropped to zero. Use dma_fence_get_rcu() so we get
+ * a NULL fence in that case.
+ */
+ f = dma_fence_get_rcu(&s_fence->finished);
+
+ /* Now that we have a reference on the finished fence,
+ * we can release the reference the dependencies array
+ * had on the scheduled fence.
+ */
+ dma_fence_put(&s_fence->scheduled);
+ }
+
+ xa_erase(&job->dependencies, index);
+ if (f && !dma_fence_add_callback(f, &job->finish_cb,
+ drm_sched_entity_kill_jobs_cb))
+ return;
+
+ dma_fence_put(f);
+ }
+
+ INIT_WORK(&job->work, drm_sched_entity_kill_jobs_work);
+ schedule_work(&job->work);
+}
+
+/* Remove the entity from the scheduler and kill all pending jobs */
+static void drm_sched_entity_kill(struct drm_sched_entity *entity)
+{
+ struct drm_sched_job *job;
+ struct dma_fence *prev;
+
+ if (!entity->rq)
+ return;
+
+ spin_lock(&entity->rq_lock);
+ entity->stopped = true;
+ drm_sched_rq_remove_entity(entity->rq, entity);
+ spin_unlock(&entity->rq_lock);
+
+ /* Make sure this entity is not used by the scheduler at the moment */
+ wait_for_completion(&entity->entity_idle);
+
+ /* The entity is guaranteed to not be used by the scheduler */
+ prev = rcu_dereference_check(entity->last_scheduled, true);
+ dma_fence_get(prev);
+ while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) {
+ struct drm_sched_fence *s_fence = job->s_fence;
+
+ dma_fence_get(&s_fence->finished);
+ if (!prev || dma_fence_add_callback(prev, &job->finish_cb,
+ drm_sched_entity_kill_jobs_cb))
+ drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
+
+ prev = &s_fence->finished;
+ }
+ dma_fence_put(prev);
+}
+
+/**
+ * drm_sched_entity_flush - Flush a context entity
+ *
+ * @entity: scheduler entity
+ * @timeout: time to wait in for Q to become empty in jiffies.
+ *
+ * Splitting drm_sched_entity_fini() into two functions, The first one does the
+ * waiting, removes the entity from the runqueue and returns an error when the
+ * process was killed.
+ *
+ * Returns the remaining time in jiffies left from the input timeout
+ */
+long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
+{
+ struct drm_gpu_scheduler *sched;
+ struct task_struct *last_user;
+ long ret = timeout;
+
+ if (!entity->rq)
+ return 0;
+
+ sched = entity->rq->sched;
+ /**
+ * The client will not queue more IBs during this fini, consume existing
+ * queued IBs or discard them on SIGKILL
+ */
+ if (current->flags & PF_EXITING) {
+ if (timeout)
+ ret = wait_event_timeout(
+ sched->job_scheduled,
+ drm_sched_entity_is_idle(entity),
+ timeout);
+ } else {
+ wait_event_killable(sched->job_scheduled,
+ drm_sched_entity_is_idle(entity));
+ }
+
+ /* For killed process disable any more IBs enqueue right now */
+ last_user = cmpxchg(&entity->last_user, current->group_leader, NULL);
+ if ((!last_user || last_user == current->group_leader) &&
+ (current->flags & PF_EXITING) && (current->exit_code == SIGKILL))
+ drm_sched_entity_kill(entity);
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_sched_entity_flush);
+
+/**
+ * drm_sched_entity_fini - Destroy a context entity
+ *
+ * @entity: scheduler entity
+ *
+ * Cleanups up @entity which has been initialized by drm_sched_entity_init().
+ *
+ * If there are potentially job still in flight or getting newly queued
+ * drm_sched_entity_flush() must be called first. This function then goes over
+ * the entity and signals all jobs with an error code if the process was killed.
+ */
+void drm_sched_entity_fini(struct drm_sched_entity *entity)
+{
+ /*
+ * If consumption of existing IBs wasn't completed. Forcefully remove
+ * them here. Also makes sure that the scheduler won't touch this entity
+ * any more.
+ */
+ drm_sched_entity_kill(entity);
+
+ if (entity->dependency) {
+ dma_fence_remove_callback(entity->dependency, &entity->cb);
+ dma_fence_put(entity->dependency);
+ entity->dependency = NULL;
+ }
+
+ dma_fence_put(rcu_dereference_check(entity->last_scheduled, true));
+ RCU_INIT_POINTER(entity->last_scheduled, NULL);
+}
+EXPORT_SYMBOL(drm_sched_entity_fini);
+
+/**
+ * drm_sched_entity_destroy - Destroy a context entity
+ * @entity: scheduler entity
+ *
+ * Calls drm_sched_entity_flush() and drm_sched_entity_fini() as a
+ * convenience wrapper.
+ */
+void drm_sched_entity_destroy(struct drm_sched_entity *entity)
+{
+ drm_sched_entity_flush(entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY);
+ drm_sched_entity_fini(entity);
+}
+EXPORT_SYMBOL(drm_sched_entity_destroy);
+
+/* drm_sched_entity_clear_dep - callback to clear the entities dependency */
+static void drm_sched_entity_clear_dep(struct dma_fence *f,
+ struct dma_fence_cb *cb)
+{
+ struct drm_sched_entity *entity =
+ container_of(cb, struct drm_sched_entity, cb);
+
+ entity->dependency = NULL;
+ dma_fence_put(f);
+}
+
+/*
+ * drm_sched_entity_clear_dep - callback to clear the entities dependency and
+ * wake up scheduler
+ */
+static void drm_sched_entity_wakeup(struct dma_fence *f,
+ struct dma_fence_cb *cb)
+{
+ struct drm_sched_entity *entity =
+ container_of(cb, struct drm_sched_entity, cb);
+
+ drm_sched_entity_clear_dep(f, cb);
+ drm_sched_wakeup_if_can_queue(entity->rq->sched);
+}
+
+/**
+ * drm_sched_entity_set_priority - Sets priority of the entity
+ *
+ * @entity: scheduler entity
+ * @priority: scheduler priority
+ *
+ * Update the priority of runqueus used for the entity.
+ */
+void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
+ enum drm_sched_priority priority)
+{
+ spin_lock(&entity->rq_lock);
+ entity->priority = priority;
+ spin_unlock(&entity->rq_lock);
+}
+EXPORT_SYMBOL(drm_sched_entity_set_priority);
+
+/*
+ * Add a callback to the current dependency of the entity to wake up the
+ * scheduler when the entity becomes available.
+ */
+static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
+{
+ struct drm_gpu_scheduler *sched = entity->rq->sched;
+ struct dma_fence *fence = entity->dependency;
+ struct drm_sched_fence *s_fence;
+
+ if (fence->context == entity->fence_context ||
+ fence->context == entity->fence_context + 1) {
+ /*
+ * Fence is a scheduled/finished fence from a job
+ * which belongs to the same entity, we can ignore
+ * fences from ourself
+ */
+ dma_fence_put(entity->dependency);
+ return false;
+ }
+
+ s_fence = to_drm_sched_fence(fence);
+ if (!fence->error && s_fence && s_fence->sched == sched &&
+ !test_bit(DRM_SCHED_FENCE_DONT_PIPELINE, &fence->flags)) {
+
+ /*
+ * Fence is from the same scheduler, only need to wait for
+ * it to be scheduled
+ */
+ fence = dma_fence_get(&s_fence->scheduled);
+ dma_fence_put(entity->dependency);
+ entity->dependency = fence;
+ if (!dma_fence_add_callback(fence, &entity->cb,
+ drm_sched_entity_clear_dep))
+ return true;
+
+ /* Ignore it when it is already scheduled */
+ dma_fence_put(fence);
+ return false;
+ }
+
+ if (!dma_fence_add_callback(entity->dependency, &entity->cb,
+ drm_sched_entity_wakeup))
+ return true;
+
+ dma_fence_put(entity->dependency);
+ return false;
+}
+
+static struct dma_fence *
+drm_sched_job_dependency(struct drm_sched_job *job,
+ struct drm_sched_entity *entity)
+{
+ struct dma_fence *f;
+
+ /* We keep the fence around, so we can iterate over all dependencies
+ * in drm_sched_entity_kill_jobs_cb() to ensure all deps are signaled
+ * before killing the job.
+ */
+ f = xa_load(&job->dependencies, job->last_dependency);
+ if (f) {
+ job->last_dependency++;
+ return dma_fence_get(f);
+ }
+
+ if (job->sched->ops->prepare_job)
+ return job->sched->ops->prepare_job(job, entity);
+
+ return NULL;
+}
+
+struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
+{
+ struct drm_sched_job *sched_job;
+
+ sched_job = to_drm_sched_job(spsc_queue_peek(&entity->job_queue));
+ if (!sched_job)
+ return NULL;
+
+ while ((entity->dependency =
+ drm_sched_job_dependency(sched_job, entity))) {
+ trace_drm_sched_job_wait_dep(sched_job, entity->dependency);
+
+ if (drm_sched_entity_add_dependency_cb(entity))
+ return NULL;
+ }
+
+ /* skip jobs from entity that marked guilty */
+ if (entity->guilty && atomic_read(entity->guilty))
+ dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED);
+
+ dma_fence_put(rcu_dereference_check(entity->last_scheduled, true));
+ rcu_assign_pointer(entity->last_scheduled,
+ dma_fence_get(&sched_job->s_fence->finished));
+
+ /*
+ * If the queue is empty we allow drm_sched_entity_select_rq() to
+ * locklessly access ->last_scheduled. This only works if we set the
+ * pointer before we dequeue and if we a write barrier here.
+ */
+ smp_wmb();
+
+ spsc_queue_pop(&entity->job_queue);
+
+ /*
+ * Update the entity's location in the min heap according to
+ * the timestamp of the next job, if any.
+ */
+ if (drm_sched_policy == DRM_SCHED_POLICY_FIFO) {
+ struct drm_sched_job *next;
+
+ next = to_drm_sched_job(spsc_queue_peek(&entity->job_queue));
+ if (next)
+ drm_sched_rq_update_fifo(entity, next->submit_ts);
+ }
+
+ /* Jobs and entities might have different lifecycles. Since we're
+ * removing the job from the entities queue, set the jobs entity pointer
+ * to NULL to prevent any future access of the entity through this job.
+ */
+ sched_job->entity = NULL;
+
+ return sched_job;
+}
+
+void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
+{
+ struct dma_fence *fence;
+ struct drm_gpu_scheduler *sched;
+ struct drm_sched_rq *rq;
+
+ /* single possible engine and already selected */
+ if (!entity->sched_list)
+ return;
+
+ /* queue non-empty, stay on the same engine */
+ if (spsc_queue_count(&entity->job_queue))
+ return;
+
+ /*
+ * Only when the queue is empty are we guaranteed that the scheduler
+ * thread cannot change ->last_scheduled. To enforce ordering we need
+ * a read barrier here. See drm_sched_entity_pop_job() for the other
+ * side.
+ */
+ smp_rmb();
+
+ fence = rcu_dereference_check(entity->last_scheduled, true);
+
+ /* stay on the same engine if the previous job hasn't finished */
+ if (fence && !dma_fence_is_signaled(fence))
+ return;
+
+ spin_lock(&entity->rq_lock);
+ sched = drm_sched_pick_best(entity->sched_list, entity->num_sched_list);
+ rq = sched ? &sched->sched_rq[entity->priority] : NULL;
+ if (rq != entity->rq) {
+ drm_sched_rq_remove_entity(entity->rq, entity);
+ entity->rq = rq;
+ }
+ spin_unlock(&entity->rq_lock);
+
+ if (entity->num_sched_list == 1)
+ entity->sched_list = NULL;
+}
+
+/**
+ * drm_sched_entity_push_job - Submit a job to the entity's job queue
+ * @sched_job: job to submit
+ *
+ * Note: To guarantee that the order of insertion to queue matches the job's
+ * fence sequence number this function should be called with drm_sched_job_arm()
+ * under common lock for the struct drm_sched_entity that was set up for
+ * @sched_job in drm_sched_job_init().
+ *
+ * Returns 0 for success, negative error code otherwise.
+ */
+void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
+{
+ struct drm_sched_entity *entity = sched_job->entity;
+ bool first;
+ ktime_t submit_ts;
+
+ trace_drm_sched_job(sched_job, entity);
+ atomic_inc(entity->rq->sched->score);
+ WRITE_ONCE(entity->last_user, current->group_leader);
+
+ /*
+ * After the sched_job is pushed into the entity queue, it may be
+ * completed and freed up at any time. We can no longer access it.
+ * Make sure to set the submit_ts first, to avoid a race.
+ */
+ sched_job->submit_ts = submit_ts = ktime_get();
+ first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
+
+ /* first job wakes up scheduler */
+ if (first) {
+ /* Add the entity to the run queue */
+ spin_lock(&entity->rq_lock);
+ if (entity->stopped) {
+ spin_unlock(&entity->rq_lock);
+
+ DRM_ERROR("Trying to push to a killed entity\n");
+ return;
+ }
+
+ drm_sched_rq_add_entity(entity->rq, entity);
+ spin_unlock(&entity->rq_lock);
+
+ if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
+ drm_sched_rq_update_fifo(entity, submit_ts);
+
+ drm_sched_wakeup_if_can_queue(entity->rq->sched);
+ }
+}
+EXPORT_SYMBOL(drm_sched_entity_push_job);
diff --git a/drivers/gpu/drm/scheduler/sched_fence.c b/drivers/gpu/drm/scheduler/sched_fence.c
new file mode 100644
index 000000000..06cedfe4b
--- /dev/null
+++ b/drivers/gpu/drm/scheduler/sched_fence.c
@@ -0,0 +1,240 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+
+#include <drm/gpu_scheduler.h>
+
+static struct kmem_cache *sched_fence_slab;
+
+static int __init drm_sched_fence_slab_init(void)
+{
+ sched_fence_slab = kmem_cache_create(
+ "drm_sched_fence", sizeof(struct drm_sched_fence), 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!sched_fence_slab)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void __exit drm_sched_fence_slab_fini(void)
+{
+ rcu_barrier();
+ kmem_cache_destroy(sched_fence_slab);
+}
+
+static void drm_sched_fence_set_parent(struct drm_sched_fence *s_fence,
+ struct dma_fence *fence)
+{
+ /*
+ * smp_store_release() to ensure another thread racing us
+ * in drm_sched_fence_set_deadline_finished() sees the
+ * fence's parent set before test_bit()
+ */
+ smp_store_release(&s_fence->parent, dma_fence_get(fence));
+ if (test_bit(DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT,
+ &s_fence->finished.flags))
+ dma_fence_set_deadline(fence, s_fence->deadline);
+}
+
+void drm_sched_fence_scheduled(struct drm_sched_fence *fence,
+ struct dma_fence *parent)
+{
+ /* Set the parent before signaling the scheduled fence, such that,
+ * any waiter expecting the parent to be filled after the job has
+ * been scheduled (which is the case for drivers delegating waits
+ * to some firmware) doesn't have to busy wait for parent to show
+ * up.
+ */
+ if (!IS_ERR_OR_NULL(parent))
+ drm_sched_fence_set_parent(fence, parent);
+
+ dma_fence_signal(&fence->scheduled);
+}
+
+void drm_sched_fence_finished(struct drm_sched_fence *fence, int result)
+{
+ if (result)
+ dma_fence_set_error(&fence->finished, result);
+ dma_fence_signal(&fence->finished);
+}
+
+static const char *drm_sched_fence_get_driver_name(struct dma_fence *fence)
+{
+ return "drm_sched";
+}
+
+static const char *drm_sched_fence_get_timeline_name(struct dma_fence *f)
+{
+ struct drm_sched_fence *fence = to_drm_sched_fence(f);
+ return (const char *)fence->sched->name;
+}
+
+static void drm_sched_fence_free_rcu(struct rcu_head *rcu)
+{
+ struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
+ struct drm_sched_fence *fence = to_drm_sched_fence(f);
+
+ if (!WARN_ON_ONCE(!fence))
+ kmem_cache_free(sched_fence_slab, fence);
+}
+
+/**
+ * drm_sched_fence_free - free up an uninitialized fence
+ *
+ * @fence: fence to free
+ *
+ * Free up the fence memory. Should only be used if drm_sched_fence_init()
+ * has not been called yet.
+ */
+void drm_sched_fence_free(struct drm_sched_fence *fence)
+{
+ /* This function should not be called if the fence has been initialized. */
+ if (!WARN_ON_ONCE(fence->sched))
+ kmem_cache_free(sched_fence_slab, fence);
+}
+
+/**
+ * drm_sched_fence_release_scheduled - callback that fence can be freed
+ *
+ * @f: fence
+ *
+ * This function is called when the reference count becomes zero.
+ * It just RCU schedules freeing up the fence.
+ */
+static void drm_sched_fence_release_scheduled(struct dma_fence *f)
+{
+ struct drm_sched_fence *fence = to_drm_sched_fence(f);
+
+ dma_fence_put(fence->parent);
+ call_rcu(&fence->finished.rcu, drm_sched_fence_free_rcu);
+}
+
+/**
+ * drm_sched_fence_release_finished - drop extra reference
+ *
+ * @f: fence
+ *
+ * Drop the extra reference from the scheduled fence to the base fence.
+ */
+static void drm_sched_fence_release_finished(struct dma_fence *f)
+{
+ struct drm_sched_fence *fence = to_drm_sched_fence(f);
+
+ dma_fence_put(&fence->scheduled);
+}
+
+static void drm_sched_fence_set_deadline_finished(struct dma_fence *f,
+ ktime_t deadline)
+{
+ struct drm_sched_fence *fence = to_drm_sched_fence(f);
+ struct dma_fence *parent;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fence->lock, flags);
+
+ /* If we already have an earlier deadline, keep it: */
+ if (test_bit(DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT, &f->flags) &&
+ ktime_before(fence->deadline, deadline)) {
+ spin_unlock_irqrestore(&fence->lock, flags);
+ return;
+ }
+
+ fence->deadline = deadline;
+ set_bit(DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT, &f->flags);
+
+ spin_unlock_irqrestore(&fence->lock, flags);
+
+ /*
+ * smp_load_aquire() to ensure that if we are racing another
+ * thread calling drm_sched_fence_set_parent(), that we see
+ * the parent set before it calls test_bit(HAS_DEADLINE_BIT)
+ */
+ parent = smp_load_acquire(&fence->parent);
+ if (parent)
+ dma_fence_set_deadline(parent, deadline);
+}
+
+static const struct dma_fence_ops drm_sched_fence_ops_scheduled = {
+ .get_driver_name = drm_sched_fence_get_driver_name,
+ .get_timeline_name = drm_sched_fence_get_timeline_name,
+ .release = drm_sched_fence_release_scheduled,
+};
+
+static const struct dma_fence_ops drm_sched_fence_ops_finished = {
+ .get_driver_name = drm_sched_fence_get_driver_name,
+ .get_timeline_name = drm_sched_fence_get_timeline_name,
+ .release = drm_sched_fence_release_finished,
+ .set_deadline = drm_sched_fence_set_deadline_finished,
+};
+
+struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f)
+{
+ if (f->ops == &drm_sched_fence_ops_scheduled)
+ return container_of(f, struct drm_sched_fence, scheduled);
+
+ if (f->ops == &drm_sched_fence_ops_finished)
+ return container_of(f, struct drm_sched_fence, finished);
+
+ return NULL;
+}
+EXPORT_SYMBOL(to_drm_sched_fence);
+
+struct drm_sched_fence *drm_sched_fence_alloc(struct drm_sched_entity *entity,
+ void *owner)
+{
+ struct drm_sched_fence *fence = NULL;
+
+ fence = kmem_cache_zalloc(sched_fence_slab, GFP_KERNEL);
+ if (fence == NULL)
+ return NULL;
+
+ fence->owner = owner;
+ spin_lock_init(&fence->lock);
+
+ return fence;
+}
+
+void drm_sched_fence_init(struct drm_sched_fence *fence,
+ struct drm_sched_entity *entity)
+{
+ unsigned seq;
+
+ fence->sched = entity->rq->sched;
+ seq = atomic_inc_return(&entity->fence_seq);
+ dma_fence_init(&fence->scheduled, &drm_sched_fence_ops_scheduled,
+ &fence->lock, entity->fence_context, seq);
+ dma_fence_init(&fence->finished, &drm_sched_fence_ops_finished,
+ &fence->lock, entity->fence_context + 1, seq);
+}
+
+module_init(drm_sched_fence_slab_init);
+module_exit(drm_sched_fence_slab_fini);
+
+MODULE_DESCRIPTION("DRM GPU scheduler");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
new file mode 100644
index 000000000..5a3a622fc
--- /dev/null
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -0,0 +1,1208 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+/**
+ * DOC: Overview
+ *
+ * The GPU scheduler provides entities which allow userspace to push jobs
+ * into software queues which are then scheduled on a hardware run queue.
+ * The software queues have a priority among them. The scheduler selects the entities
+ * from the run queue using a FIFO. The scheduler provides dependency handling
+ * features among jobs. The driver is supposed to provide callback functions for
+ * backend operations to the scheduler like submitting a job to hardware run queue,
+ * returning the dependencies of a job etc.
+ *
+ * The organisation of the scheduler is the following:
+ *
+ * 1. Each hw run queue has one scheduler
+ * 2. Each scheduler has multiple run queues with different priorities
+ * (e.g., HIGH_HW,HIGH_SW, KERNEL, NORMAL)
+ * 3. Each scheduler run queue has a queue of entities to schedule
+ * 4. Entities themselves maintain a queue of jobs that will be scheduled on
+ * the hardware.
+ *
+ * The jobs in a entity are always scheduled in the order that they were pushed.
+ *
+ * Note that once a job was taken from the entities queue and pushed to the
+ * hardware, i.e. the pending queue, the entity must not be referenced anymore
+ * through the jobs entity pointer.
+ */
+
+#include <linux/kthread.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/completion.h>
+#include <linux/dma-resv.h>
+#include <uapi/linux/sched/types.h>
+
+#include <drm/drm_print.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_syncobj.h>
+#include <drm/gpu_scheduler.h>
+#include <drm/spsc_queue.h>
+
+#define CREATE_TRACE_POINTS
+#include "gpu_scheduler_trace.h"
+
+#define to_drm_sched_job(sched_job) \
+ container_of((sched_job), struct drm_sched_job, queue_node)
+
+int drm_sched_policy = DRM_SCHED_POLICY_FIFO;
+
+/**
+ * DOC: sched_policy (int)
+ * Used to override default entities scheduling policy in a run queue.
+ */
+MODULE_PARM_DESC(sched_policy, "Specify the scheduling policy for entities on a run-queue, " __stringify(DRM_SCHED_POLICY_RR) " = Round Robin, " __stringify(DRM_SCHED_POLICY_FIFO) " = FIFO (default).");
+module_param_named(sched_policy, drm_sched_policy, int, 0444);
+
+static __always_inline bool drm_sched_entity_compare_before(struct rb_node *a,
+ const struct rb_node *b)
+{
+ struct drm_sched_entity *ent_a = rb_entry((a), struct drm_sched_entity, rb_tree_node);
+ struct drm_sched_entity *ent_b = rb_entry((b), struct drm_sched_entity, rb_tree_node);
+
+ return ktime_before(ent_a->oldest_job_waiting, ent_b->oldest_job_waiting);
+}
+
+static inline void drm_sched_rq_remove_fifo_locked(struct drm_sched_entity *entity)
+{
+ struct drm_sched_rq *rq = entity->rq;
+
+ if (!RB_EMPTY_NODE(&entity->rb_tree_node)) {
+ rb_erase_cached(&entity->rb_tree_node, &rq->rb_tree_root);
+ RB_CLEAR_NODE(&entity->rb_tree_node);
+ }
+}
+
+void drm_sched_rq_update_fifo(struct drm_sched_entity *entity, ktime_t ts)
+{
+ /*
+ * Both locks need to be grabbed, one to protect from entity->rq change
+ * for entity from within concurrent drm_sched_entity_select_rq and the
+ * other to update the rb tree structure.
+ */
+ spin_lock(&entity->rq_lock);
+ spin_lock(&entity->rq->lock);
+
+ drm_sched_rq_remove_fifo_locked(entity);
+
+ entity->oldest_job_waiting = ts;
+
+ rb_add_cached(&entity->rb_tree_node, &entity->rq->rb_tree_root,
+ drm_sched_entity_compare_before);
+
+ spin_unlock(&entity->rq->lock);
+ spin_unlock(&entity->rq_lock);
+}
+
+/**
+ * drm_sched_rq_init - initialize a given run queue struct
+ *
+ * @sched: scheduler instance to associate with this run queue
+ * @rq: scheduler run queue
+ *
+ * Initializes a scheduler runqueue.
+ */
+static void drm_sched_rq_init(struct drm_gpu_scheduler *sched,
+ struct drm_sched_rq *rq)
+{
+ spin_lock_init(&rq->lock);
+ INIT_LIST_HEAD(&rq->entities);
+ rq->rb_tree_root = RB_ROOT_CACHED;
+ rq->current_entity = NULL;
+ rq->sched = sched;
+}
+
+/**
+ * drm_sched_rq_add_entity - add an entity
+ *
+ * @rq: scheduler run queue
+ * @entity: scheduler entity
+ *
+ * Adds a scheduler entity to the run queue.
+ */
+void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
+ struct drm_sched_entity *entity)
+{
+ if (!list_empty(&entity->list))
+ return;
+
+ spin_lock(&rq->lock);
+
+ atomic_inc(rq->sched->score);
+ list_add_tail(&entity->list, &rq->entities);
+
+ spin_unlock(&rq->lock);
+}
+
+/**
+ * drm_sched_rq_remove_entity - remove an entity
+ *
+ * @rq: scheduler run queue
+ * @entity: scheduler entity
+ *
+ * Removes a scheduler entity from the run queue.
+ */
+void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
+ struct drm_sched_entity *entity)
+{
+ if (list_empty(&entity->list))
+ return;
+
+ spin_lock(&rq->lock);
+
+ atomic_dec(rq->sched->score);
+ list_del_init(&entity->list);
+
+ if (rq->current_entity == entity)
+ rq->current_entity = NULL;
+
+ if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
+ drm_sched_rq_remove_fifo_locked(entity);
+
+ spin_unlock(&rq->lock);
+}
+
+/**
+ * drm_sched_rq_select_entity_rr - Select an entity which could provide a job to run
+ *
+ * @rq: scheduler run queue to check.
+ *
+ * Try to find a ready entity, returns NULL if none found.
+ */
+static struct drm_sched_entity *
+drm_sched_rq_select_entity_rr(struct drm_sched_rq *rq)
+{
+ struct drm_sched_entity *entity;
+
+ spin_lock(&rq->lock);
+
+ entity = rq->current_entity;
+ if (entity) {
+ list_for_each_entry_continue(entity, &rq->entities, list) {
+ if (drm_sched_entity_is_ready(entity)) {
+ rq->current_entity = entity;
+ reinit_completion(&entity->entity_idle);
+ spin_unlock(&rq->lock);
+ return entity;
+ }
+ }
+ }
+
+ list_for_each_entry(entity, &rq->entities, list) {
+
+ if (drm_sched_entity_is_ready(entity)) {
+ rq->current_entity = entity;
+ reinit_completion(&entity->entity_idle);
+ spin_unlock(&rq->lock);
+ return entity;
+ }
+
+ if (entity == rq->current_entity)
+ break;
+ }
+
+ spin_unlock(&rq->lock);
+
+ return NULL;
+}
+
+/**
+ * drm_sched_rq_select_entity_fifo - Select an entity which provides a job to run
+ *
+ * @rq: scheduler run queue to check.
+ *
+ * Find oldest waiting ready entity, returns NULL if none found.
+ */
+static struct drm_sched_entity *
+drm_sched_rq_select_entity_fifo(struct drm_sched_rq *rq)
+{
+ struct rb_node *rb;
+
+ spin_lock(&rq->lock);
+ for (rb = rb_first_cached(&rq->rb_tree_root); rb; rb = rb_next(rb)) {
+ struct drm_sched_entity *entity;
+
+ entity = rb_entry(rb, struct drm_sched_entity, rb_tree_node);
+ if (drm_sched_entity_is_ready(entity)) {
+ rq->current_entity = entity;
+ reinit_completion(&entity->entity_idle);
+ break;
+ }
+ }
+ spin_unlock(&rq->lock);
+
+ return rb ? rb_entry(rb, struct drm_sched_entity, rb_tree_node) : NULL;
+}
+
+/**
+ * drm_sched_job_done - complete a job
+ * @s_job: pointer to the job which is done
+ *
+ * Finish the job's fence and wake up the worker thread.
+ */
+static void drm_sched_job_done(struct drm_sched_job *s_job, int result)
+{
+ struct drm_sched_fence *s_fence = s_job->s_fence;
+ struct drm_gpu_scheduler *sched = s_fence->sched;
+
+ atomic_dec(&sched->hw_rq_count);
+ atomic_dec(sched->score);
+
+ trace_drm_sched_process_job(s_fence);
+
+ dma_fence_get(&s_fence->finished);
+ drm_sched_fence_finished(s_fence, result);
+ dma_fence_put(&s_fence->finished);
+ wake_up_interruptible(&sched->wake_up_worker);
+}
+
+/**
+ * drm_sched_job_done_cb - the callback for a done job
+ * @f: fence
+ * @cb: fence callbacks
+ */
+static void drm_sched_job_done_cb(struct dma_fence *f, struct dma_fence_cb *cb)
+{
+ struct drm_sched_job *s_job = container_of(cb, struct drm_sched_job, cb);
+
+ drm_sched_job_done(s_job, f->error);
+}
+
+/**
+ * drm_sched_start_timeout - start timeout for reset worker
+ *
+ * @sched: scheduler instance to start the worker for
+ *
+ * Start the timeout for the given scheduler.
+ */
+static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched)
+{
+ if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
+ !list_empty(&sched->pending_list))
+ queue_delayed_work(sched->timeout_wq, &sched->work_tdr, sched->timeout);
+}
+
+/**
+ * drm_sched_fault - immediately start timeout handler
+ *
+ * @sched: scheduler where the timeout handling should be started.
+ *
+ * Start timeout handling immediately when the driver detects a hardware fault.
+ */
+void drm_sched_fault(struct drm_gpu_scheduler *sched)
+{
+ if (sched->timeout_wq)
+ mod_delayed_work(sched->timeout_wq, &sched->work_tdr, 0);
+}
+EXPORT_SYMBOL(drm_sched_fault);
+
+/**
+ * drm_sched_suspend_timeout - Suspend scheduler job timeout
+ *
+ * @sched: scheduler instance for which to suspend the timeout
+ *
+ * Suspend the delayed work timeout for the scheduler. This is done by
+ * modifying the delayed work timeout to an arbitrary large value,
+ * MAX_SCHEDULE_TIMEOUT in this case.
+ *
+ * Returns the timeout remaining
+ *
+ */
+unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched)
+{
+ unsigned long sched_timeout, now = jiffies;
+
+ sched_timeout = sched->work_tdr.timer.expires;
+
+ /*
+ * Modify the timeout to an arbitrarily large value. This also prevents
+ * the timeout to be restarted when new submissions arrive
+ */
+ if (mod_delayed_work(sched->timeout_wq, &sched->work_tdr, MAX_SCHEDULE_TIMEOUT)
+ && time_after(sched_timeout, now))
+ return sched_timeout - now;
+ else
+ return sched->timeout;
+}
+EXPORT_SYMBOL(drm_sched_suspend_timeout);
+
+/**
+ * drm_sched_resume_timeout - Resume scheduler job timeout
+ *
+ * @sched: scheduler instance for which to resume the timeout
+ * @remaining: remaining timeout
+ *
+ * Resume the delayed work timeout for the scheduler.
+ */
+void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
+ unsigned long remaining)
+{
+ spin_lock(&sched->job_list_lock);
+
+ if (list_empty(&sched->pending_list))
+ cancel_delayed_work(&sched->work_tdr);
+ else
+ mod_delayed_work(sched->timeout_wq, &sched->work_tdr, remaining);
+
+ spin_unlock(&sched->job_list_lock);
+}
+EXPORT_SYMBOL(drm_sched_resume_timeout);
+
+static void drm_sched_job_begin(struct drm_sched_job *s_job)
+{
+ struct drm_gpu_scheduler *sched = s_job->sched;
+
+ spin_lock(&sched->job_list_lock);
+ list_add_tail(&s_job->list, &sched->pending_list);
+ drm_sched_start_timeout(sched);
+ spin_unlock(&sched->job_list_lock);
+}
+
+static void drm_sched_job_timedout(struct work_struct *work)
+{
+ struct drm_gpu_scheduler *sched;
+ struct drm_sched_job *job;
+ enum drm_gpu_sched_stat status = DRM_GPU_SCHED_STAT_NOMINAL;
+
+ sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
+
+ /* Protects against concurrent deletion in drm_sched_get_cleanup_job */
+ spin_lock(&sched->job_list_lock);
+ job = list_first_entry_or_null(&sched->pending_list,
+ struct drm_sched_job, list);
+
+ if (job) {
+ /*
+ * Remove the bad job so it cannot be freed by concurrent
+ * drm_sched_cleanup_jobs. It will be reinserted back after sched->thread
+ * is parked at which point it's safe.
+ */
+ list_del_init(&job->list);
+ spin_unlock(&sched->job_list_lock);
+
+ status = job->sched->ops->timedout_job(job);
+
+ /*
+ * Guilty job did complete and hence needs to be manually removed
+ * See drm_sched_stop doc.
+ */
+ if (sched->free_guilty) {
+ job->sched->ops->free_job(job);
+ sched->free_guilty = false;
+ }
+ } else {
+ spin_unlock(&sched->job_list_lock);
+ }
+
+ if (status != DRM_GPU_SCHED_STAT_ENODEV) {
+ spin_lock(&sched->job_list_lock);
+ drm_sched_start_timeout(sched);
+ spin_unlock(&sched->job_list_lock);
+ }
+}
+
+/**
+ * drm_sched_stop - stop the scheduler
+ *
+ * @sched: scheduler instance
+ * @bad: job which caused the time out
+ *
+ * Stop the scheduler and also removes and frees all completed jobs.
+ * Note: bad job will not be freed as it might be used later and so it's
+ * callers responsibility to release it manually if it's not part of the
+ * pending list any more.
+ *
+ */
+void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
+{
+ struct drm_sched_job *s_job, *tmp;
+
+ kthread_park(sched->thread);
+
+ /*
+ * Reinsert back the bad job here - now it's safe as
+ * drm_sched_get_cleanup_job cannot race against us and release the
+ * bad job at this point - we parked (waited for) any in progress
+ * (earlier) cleanups and drm_sched_get_cleanup_job will not be called
+ * now until the scheduler thread is unparked.
+ */
+ if (bad && bad->sched == sched)
+ /*
+ * Add at the head of the queue to reflect it was the earliest
+ * job extracted.
+ */
+ list_add(&bad->list, &sched->pending_list);
+
+ /*
+ * Iterate the job list from later to earlier one and either deactive
+ * their HW callbacks or remove them from pending list if they already
+ * signaled.
+ * This iteration is thread safe as sched thread is stopped.
+ */
+ list_for_each_entry_safe_reverse(s_job, tmp, &sched->pending_list,
+ list) {
+ if (s_job->s_fence->parent &&
+ dma_fence_remove_callback(s_job->s_fence->parent,
+ &s_job->cb)) {
+ dma_fence_put(s_job->s_fence->parent);
+ s_job->s_fence->parent = NULL;
+ atomic_dec(&sched->hw_rq_count);
+ } else {
+ /*
+ * remove job from pending_list.
+ * Locking here is for concurrent resume timeout
+ */
+ spin_lock(&sched->job_list_lock);
+ list_del_init(&s_job->list);
+ spin_unlock(&sched->job_list_lock);
+
+ /*
+ * Wait for job's HW fence callback to finish using s_job
+ * before releasing it.
+ *
+ * Job is still alive so fence refcount at least 1
+ */
+ dma_fence_wait(&s_job->s_fence->finished, false);
+
+ /*
+ * We must keep bad job alive for later use during
+ * recovery by some of the drivers but leave a hint
+ * that the guilty job must be released.
+ */
+ if (bad != s_job)
+ sched->ops->free_job(s_job);
+ else
+ sched->free_guilty = true;
+ }
+ }
+
+ /*
+ * Stop pending timer in flight as we rearm it in drm_sched_start. This
+ * avoids the pending timeout work in progress to fire right away after
+ * this TDR finished and before the newly restarted jobs had a
+ * chance to complete.
+ */
+ cancel_delayed_work(&sched->work_tdr);
+}
+
+EXPORT_SYMBOL(drm_sched_stop);
+
+/**
+ * drm_sched_start - recover jobs after a reset
+ *
+ * @sched: scheduler instance
+ * @full_recovery: proceed with complete sched restart
+ *
+ */
+void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
+{
+ struct drm_sched_job *s_job, *tmp;
+ int r;
+
+ /*
+ * Locking the list is not required here as the sched thread is parked
+ * so no new jobs are being inserted or removed. Also concurrent
+ * GPU recovers can't run in parallel.
+ */
+ list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
+ struct dma_fence *fence = s_job->s_fence->parent;
+
+ atomic_inc(&sched->hw_rq_count);
+
+ if (!full_recovery)
+ continue;
+
+ if (fence) {
+ r = dma_fence_add_callback(fence, &s_job->cb,
+ drm_sched_job_done_cb);
+ if (r == -ENOENT)
+ drm_sched_job_done(s_job, fence->error);
+ else if (r)
+ DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n",
+ r);
+ } else
+ drm_sched_job_done(s_job, -ECANCELED);
+ }
+
+ if (full_recovery) {
+ spin_lock(&sched->job_list_lock);
+ drm_sched_start_timeout(sched);
+ spin_unlock(&sched->job_list_lock);
+ }
+
+ kthread_unpark(sched->thread);
+}
+EXPORT_SYMBOL(drm_sched_start);
+
+/**
+ * drm_sched_resubmit_jobs - Deprecated, don't use in new code!
+ *
+ * @sched: scheduler instance
+ *
+ * Re-submitting jobs was a concept AMD came up as cheap way to implement
+ * recovery after a job timeout.
+ *
+ * This turned out to be not working very well. First of all there are many
+ * problem with the dma_fence implementation and requirements. Either the
+ * implementation is risking deadlocks with core memory management or violating
+ * documented implementation details of the dma_fence object.
+ *
+ * Drivers can still save and restore their state for recovery operations, but
+ * we shouldn't make this a general scheduler feature around the dma_fence
+ * interface.
+ */
+void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
+{
+ struct drm_sched_job *s_job, *tmp;
+ uint64_t guilty_context;
+ bool found_guilty = false;
+ struct dma_fence *fence;
+
+ list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
+ struct drm_sched_fence *s_fence = s_job->s_fence;
+
+ if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) {
+ found_guilty = true;
+ guilty_context = s_job->s_fence->scheduled.context;
+ }
+
+ if (found_guilty && s_job->s_fence->scheduled.context == guilty_context)
+ dma_fence_set_error(&s_fence->finished, -ECANCELED);
+
+ fence = sched->ops->run_job(s_job);
+
+ if (IS_ERR_OR_NULL(fence)) {
+ if (IS_ERR(fence))
+ dma_fence_set_error(&s_fence->finished, PTR_ERR(fence));
+
+ s_job->s_fence->parent = NULL;
+ } else {
+
+ s_job->s_fence->parent = dma_fence_get(fence);
+
+ /* Drop for orignal kref_init */
+ dma_fence_put(fence);
+ }
+ }
+}
+EXPORT_SYMBOL(drm_sched_resubmit_jobs);
+
+/**
+ * drm_sched_job_init - init a scheduler job
+ * @job: scheduler job to init
+ * @entity: scheduler entity to use
+ * @owner: job owner for debugging
+ *
+ * Refer to drm_sched_entity_push_job() documentation
+ * for locking considerations.
+ *
+ * Drivers must make sure drm_sched_job_cleanup() if this function returns
+ * successfully, even when @job is aborted before drm_sched_job_arm() is called.
+ *
+ * WARNING: amdgpu abuses &drm_sched.ready to signal when the hardware
+ * has died, which can mean that there's no valid runqueue for a @entity.
+ * This function returns -ENOENT in this case (which probably should be -EIO as
+ * a more meanigful return value).
+ *
+ * Returns 0 for success, negative error code otherwise.
+ */
+int drm_sched_job_init(struct drm_sched_job *job,
+ struct drm_sched_entity *entity,
+ void *owner)
+{
+ if (!entity->rq)
+ return -ENOENT;
+
+ job->entity = entity;
+ job->s_fence = drm_sched_fence_alloc(entity, owner);
+ if (!job->s_fence)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&job->list);
+
+ xa_init_flags(&job->dependencies, XA_FLAGS_ALLOC);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_sched_job_init);
+
+/**
+ * drm_sched_job_arm - arm a scheduler job for execution
+ * @job: scheduler job to arm
+ *
+ * This arms a scheduler job for execution. Specifically it initializes the
+ * &drm_sched_job.s_fence of @job, so that it can be attached to struct dma_resv
+ * or other places that need to track the completion of this job.
+ *
+ * Refer to drm_sched_entity_push_job() documentation for locking
+ * considerations.
+ *
+ * This can only be called if drm_sched_job_init() succeeded.
+ */
+void drm_sched_job_arm(struct drm_sched_job *job)
+{
+ struct drm_gpu_scheduler *sched;
+ struct drm_sched_entity *entity = job->entity;
+
+ BUG_ON(!entity);
+ drm_sched_entity_select_rq(entity);
+ sched = entity->rq->sched;
+
+ job->sched = sched;
+ job->s_priority = entity->rq - sched->sched_rq;
+ job->id = atomic64_inc_return(&sched->job_id_count);
+
+ drm_sched_fence_init(job->s_fence, job->entity);
+}
+EXPORT_SYMBOL(drm_sched_job_arm);
+
+/**
+ * drm_sched_job_add_dependency - adds the fence as a job dependency
+ * @job: scheduler job to add the dependencies to
+ * @fence: the dma_fence to add to the list of dependencies.
+ *
+ * Note that @fence is consumed in both the success and error cases.
+ *
+ * Returns:
+ * 0 on success, or an error on failing to expand the array.
+ */
+int drm_sched_job_add_dependency(struct drm_sched_job *job,
+ struct dma_fence *fence)
+{
+ struct dma_fence *entry;
+ unsigned long index;
+ u32 id = 0;
+ int ret;
+
+ if (!fence)
+ return 0;
+
+ /* Deduplicate if we already depend on a fence from the same context.
+ * This lets the size of the array of deps scale with the number of
+ * engines involved, rather than the number of BOs.
+ */
+ xa_for_each(&job->dependencies, index, entry) {
+ if (entry->context != fence->context)
+ continue;
+
+ if (dma_fence_is_later(fence, entry)) {
+ dma_fence_put(entry);
+ xa_store(&job->dependencies, index, fence, GFP_KERNEL);
+ } else {
+ dma_fence_put(fence);
+ }
+ return 0;
+ }
+
+ ret = xa_alloc(&job->dependencies, &id, fence, xa_limit_32b, GFP_KERNEL);
+ if (ret != 0)
+ dma_fence_put(fence);
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_sched_job_add_dependency);
+
+/**
+ * drm_sched_job_add_syncobj_dependency - adds a syncobj's fence as a job dependency
+ * @job: scheduler job to add the dependencies to
+ * @file: drm file private pointer
+ * @handle: syncobj handle to lookup
+ * @point: timeline point
+ *
+ * This adds the fence matching the given syncobj to @job.
+ *
+ * Returns:
+ * 0 on success, or an error on failing to expand the array.
+ */
+int drm_sched_job_add_syncobj_dependency(struct drm_sched_job *job,
+ struct drm_file *file,
+ u32 handle,
+ u32 point)
+{
+ struct dma_fence *fence;
+ int ret;
+
+ ret = drm_syncobj_find_fence(file, handle, point, 0, &fence);
+ if (ret)
+ return ret;
+
+ return drm_sched_job_add_dependency(job, fence);
+}
+EXPORT_SYMBOL(drm_sched_job_add_syncobj_dependency);
+
+/**
+ * drm_sched_job_add_resv_dependencies - add all fences from the resv to the job
+ * @job: scheduler job to add the dependencies to
+ * @resv: the dma_resv object to get the fences from
+ * @usage: the dma_resv_usage to use to filter the fences
+ *
+ * This adds all fences matching the given usage from @resv to @job.
+ * Must be called with the @resv lock held.
+ *
+ * Returns:
+ * 0 on success, or an error on failing to expand the array.
+ */
+int drm_sched_job_add_resv_dependencies(struct drm_sched_job *job,
+ struct dma_resv *resv,
+ enum dma_resv_usage usage)
+{
+ struct dma_resv_iter cursor;
+ struct dma_fence *fence;
+ int ret;
+
+ dma_resv_assert_held(resv);
+
+ dma_resv_for_each_fence(&cursor, resv, usage, fence) {
+ /* Make sure to grab an additional ref on the added fence */
+ dma_fence_get(fence);
+ ret = drm_sched_job_add_dependency(job, fence);
+ if (ret) {
+ dma_fence_put(fence);
+ return ret;
+ }
+ }
+ return 0;
+}
+EXPORT_SYMBOL(drm_sched_job_add_resv_dependencies);
+
+/**
+ * drm_sched_job_add_implicit_dependencies - adds implicit dependencies as job
+ * dependencies
+ * @job: scheduler job to add the dependencies to
+ * @obj: the gem object to add new dependencies from.
+ * @write: whether the job might write the object (so we need to depend on
+ * shared fences in the reservation object).
+ *
+ * This should be called after drm_gem_lock_reservations() on your array of
+ * GEM objects used in the job but before updating the reservations with your
+ * own fences.
+ *
+ * Returns:
+ * 0 on success, or an error on failing to expand the array.
+ */
+int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
+ struct drm_gem_object *obj,
+ bool write)
+{
+ return drm_sched_job_add_resv_dependencies(job, obj->resv,
+ dma_resv_usage_rw(write));
+}
+EXPORT_SYMBOL(drm_sched_job_add_implicit_dependencies);
+
+/**
+ * drm_sched_job_cleanup - clean up scheduler job resources
+ * @job: scheduler job to clean up
+ *
+ * Cleans up the resources allocated with drm_sched_job_init().
+ *
+ * Drivers should call this from their error unwind code if @job is aborted
+ * before drm_sched_job_arm() is called.
+ *
+ * After that point of no return @job is committed to be executed by the
+ * scheduler, and this function should be called from the
+ * &drm_sched_backend_ops.free_job callback.
+ */
+void drm_sched_job_cleanup(struct drm_sched_job *job)
+{
+ struct dma_fence *fence;
+ unsigned long index;
+
+ if (kref_read(&job->s_fence->finished.refcount)) {
+ /* drm_sched_job_arm() has been called */
+ dma_fence_put(&job->s_fence->finished);
+ } else {
+ /* aborted job before committing to run it */
+ drm_sched_fence_free(job->s_fence);
+ }
+
+ job->s_fence = NULL;
+
+ xa_for_each(&job->dependencies, index, fence) {
+ dma_fence_put(fence);
+ }
+ xa_destroy(&job->dependencies);
+
+}
+EXPORT_SYMBOL(drm_sched_job_cleanup);
+
+/**
+ * drm_sched_can_queue -- Can we queue more to the hardware?
+ * @sched: scheduler instance
+ *
+ * Return true if we can push more jobs to the hw, otherwise false.
+ */
+static bool drm_sched_can_queue(struct drm_gpu_scheduler *sched)
+{
+ return atomic_read(&sched->hw_rq_count) <
+ sched->hw_submission_limit;
+}
+
+/**
+ * drm_sched_wakeup_if_can_queue - Wake up the scheduler
+ * @sched: scheduler instance
+ *
+ * Wake up the scheduler if we can queue jobs.
+ */
+void drm_sched_wakeup_if_can_queue(struct drm_gpu_scheduler *sched)
+{
+ if (drm_sched_can_queue(sched))
+ wake_up_interruptible(&sched->wake_up_worker);
+}
+
+/**
+ * drm_sched_select_entity - Select next entity to process
+ *
+ * @sched: scheduler instance
+ *
+ * Returns the entity to process or NULL if none are found.
+ */
+static struct drm_sched_entity *
+drm_sched_select_entity(struct drm_gpu_scheduler *sched)
+{
+ struct drm_sched_entity *entity;
+ int i;
+
+ if (!drm_sched_can_queue(sched))
+ return NULL;
+
+ /* Kernel run queue has higher priority than normal run queue*/
+ for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
+ entity = drm_sched_policy == DRM_SCHED_POLICY_FIFO ?
+ drm_sched_rq_select_entity_fifo(&sched->sched_rq[i]) :
+ drm_sched_rq_select_entity_rr(&sched->sched_rq[i]);
+ if (entity)
+ break;
+ }
+
+ return entity;
+}
+
+/**
+ * drm_sched_get_cleanup_job - fetch the next finished job to be destroyed
+ *
+ * @sched: scheduler instance
+ *
+ * Returns the next finished job from the pending list (if there is one)
+ * ready for it to be destroyed.
+ */
+static struct drm_sched_job *
+drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
+{
+ struct drm_sched_job *job, *next;
+
+ spin_lock(&sched->job_list_lock);
+
+ job = list_first_entry_or_null(&sched->pending_list,
+ struct drm_sched_job, list);
+
+ if (job && dma_fence_is_signaled(&job->s_fence->finished)) {
+ /* remove job from pending_list */
+ list_del_init(&job->list);
+
+ /* cancel this job's TO timer */
+ cancel_delayed_work(&sched->work_tdr);
+ /* make the scheduled timestamp more accurate */
+ next = list_first_entry_or_null(&sched->pending_list,
+ typeof(*next), list);
+
+ if (next) {
+ next->s_fence->scheduled.timestamp =
+ dma_fence_timestamp(&job->s_fence->finished);
+ /* start TO timer for next job */
+ drm_sched_start_timeout(sched);
+ }
+ } else {
+ job = NULL;
+ }
+
+ spin_unlock(&sched->job_list_lock);
+
+ return job;
+}
+
+/**
+ * drm_sched_pick_best - Get a drm sched from a sched_list with the least load
+ * @sched_list: list of drm_gpu_schedulers
+ * @num_sched_list: number of drm_gpu_schedulers in the sched_list
+ *
+ * Returns pointer of the sched with the least load or NULL if none of the
+ * drm_gpu_schedulers are ready
+ */
+struct drm_gpu_scheduler *
+drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
+ unsigned int num_sched_list)
+{
+ struct drm_gpu_scheduler *sched, *picked_sched = NULL;
+ int i;
+ unsigned int min_score = UINT_MAX, num_score;
+
+ for (i = 0; i < num_sched_list; ++i) {
+ sched = sched_list[i];
+
+ if (!sched->ready) {
+ DRM_WARN("scheduler %s is not ready, skipping",
+ sched->name);
+ continue;
+ }
+
+ num_score = atomic_read(sched->score);
+ if (num_score < min_score) {
+ min_score = num_score;
+ picked_sched = sched;
+ }
+ }
+
+ return picked_sched;
+}
+EXPORT_SYMBOL(drm_sched_pick_best);
+
+/**
+ * drm_sched_blocked - check if the scheduler is blocked
+ *
+ * @sched: scheduler instance
+ *
+ * Returns true if blocked, otherwise false.
+ */
+static bool drm_sched_blocked(struct drm_gpu_scheduler *sched)
+{
+ if (kthread_should_park()) {
+ kthread_parkme();
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * drm_sched_main - main scheduler thread
+ *
+ * @param: scheduler instance
+ *
+ * Returns 0.
+ */
+static int drm_sched_main(void *param)
+{
+ struct drm_gpu_scheduler *sched = (struct drm_gpu_scheduler *)param;
+ int r;
+
+ sched_set_fifo_low(current);
+
+ while (!kthread_should_stop()) {
+ struct drm_sched_entity *entity = NULL;
+ struct drm_sched_fence *s_fence;
+ struct drm_sched_job *sched_job;
+ struct dma_fence *fence;
+ struct drm_sched_job *cleanup_job = NULL;
+
+ wait_event_interruptible(sched->wake_up_worker,
+ (cleanup_job = drm_sched_get_cleanup_job(sched)) ||
+ (!drm_sched_blocked(sched) &&
+ (entity = drm_sched_select_entity(sched))) ||
+ kthread_should_stop());
+
+ if (cleanup_job)
+ sched->ops->free_job(cleanup_job);
+
+ if (!entity)
+ continue;
+
+ sched_job = drm_sched_entity_pop_job(entity);
+
+ if (!sched_job) {
+ complete_all(&entity->entity_idle);
+ continue;
+ }
+
+ s_fence = sched_job->s_fence;
+
+ atomic_inc(&sched->hw_rq_count);
+ drm_sched_job_begin(sched_job);
+
+ trace_drm_run_job(sched_job, entity);
+ fence = sched->ops->run_job(sched_job);
+ complete_all(&entity->entity_idle);
+ drm_sched_fence_scheduled(s_fence, fence);
+
+ if (!IS_ERR_OR_NULL(fence)) {
+ /* Drop for original kref_init of the fence */
+ dma_fence_put(fence);
+
+ r = dma_fence_add_callback(fence, &sched_job->cb,
+ drm_sched_job_done_cb);
+ if (r == -ENOENT)
+ drm_sched_job_done(sched_job, fence->error);
+ else if (r)
+ DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n",
+ r);
+ } else {
+ drm_sched_job_done(sched_job, IS_ERR(fence) ?
+ PTR_ERR(fence) : 0);
+ }
+
+ wake_up(&sched->job_scheduled);
+ }
+ return 0;
+}
+
+/**
+ * drm_sched_init - Init a gpu scheduler instance
+ *
+ * @sched: scheduler instance
+ * @ops: backend operations for this scheduler
+ * @hw_submission: number of hw submissions that can be in flight
+ * @hang_limit: number of times to allow a job to hang before dropping it
+ * @timeout: timeout value in jiffies for the scheduler
+ * @timeout_wq: workqueue to use for timeout work. If NULL, the system_wq is
+ * used
+ * @score: optional score atomic shared with other schedulers
+ * @name: name used for debugging
+ * @dev: target &struct device
+ *
+ * Return 0 on success, otherwise error code.
+ */
+int drm_sched_init(struct drm_gpu_scheduler *sched,
+ const struct drm_sched_backend_ops *ops,
+ unsigned hw_submission, unsigned hang_limit,
+ long timeout, struct workqueue_struct *timeout_wq,
+ atomic_t *score, const char *name, struct device *dev)
+{
+ int i, ret;
+ sched->ops = ops;
+ sched->hw_submission_limit = hw_submission;
+ sched->name = name;
+ sched->timeout = timeout;
+ sched->timeout_wq = timeout_wq ? : system_wq;
+ sched->hang_limit = hang_limit;
+ sched->score = score ? score : &sched->_score;
+ sched->dev = dev;
+ for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_COUNT; i++)
+ drm_sched_rq_init(sched, &sched->sched_rq[i]);
+
+ init_waitqueue_head(&sched->wake_up_worker);
+ init_waitqueue_head(&sched->job_scheduled);
+ INIT_LIST_HEAD(&sched->pending_list);
+ spin_lock_init(&sched->job_list_lock);
+ atomic_set(&sched->hw_rq_count, 0);
+ INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout);
+ atomic_set(&sched->_score, 0);
+ atomic64_set(&sched->job_id_count, 0);
+
+ /* Each scheduler will run on a seperate kernel thread */
+ sched->thread = kthread_run(drm_sched_main, sched, sched->name);
+ if (IS_ERR(sched->thread)) {
+ ret = PTR_ERR(sched->thread);
+ sched->thread = NULL;
+ DRM_DEV_ERROR(sched->dev, "Failed to create scheduler for %s.\n", name);
+ return ret;
+ }
+
+ sched->ready = true;
+ return 0;
+}
+EXPORT_SYMBOL(drm_sched_init);
+
+/**
+ * drm_sched_fini - Destroy a gpu scheduler
+ *
+ * @sched: scheduler instance
+ *
+ * Tears down and cleans up the scheduler.
+ */
+void drm_sched_fini(struct drm_gpu_scheduler *sched)
+{
+ struct drm_sched_entity *s_entity;
+ int i;
+
+ if (sched->thread)
+ kthread_stop(sched->thread);
+
+ for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
+ struct drm_sched_rq *rq = &sched->sched_rq[i];
+
+ spin_lock(&rq->lock);
+ list_for_each_entry(s_entity, &rq->entities, list)
+ /*
+ * Prevents reinsertion and marks job_queue as idle,
+ * it will removed from rq in drm_sched_entity_fini
+ * eventually
+ */
+ s_entity->stopped = true;
+ spin_unlock(&rq->lock);
+
+ }
+
+ /* Wakeup everyone stuck in drm_sched_entity_flush for this scheduler */
+ wake_up_all(&sched->job_scheduled);
+
+ /* Confirm no work left behind accessing device structures */
+ cancel_delayed_work_sync(&sched->work_tdr);
+
+ sched->ready = false;
+}
+EXPORT_SYMBOL(drm_sched_fini);
+
+/**
+ * drm_sched_increase_karma - Update sched_entity guilty flag
+ *
+ * @bad: The job guilty of time out
+ *
+ * Increment on every hang caused by the 'bad' job. If this exceeds the hang
+ * limit of the scheduler then the respective sched entity is marked guilty and
+ * jobs from it will not be scheduled further
+ */
+void drm_sched_increase_karma(struct drm_sched_job *bad)
+{
+ int i;
+ struct drm_sched_entity *tmp;
+ struct drm_sched_entity *entity;
+ struct drm_gpu_scheduler *sched = bad->sched;
+
+ /* don't change @bad's karma if it's from KERNEL RQ,
+ * because sometimes GPU hang would cause kernel jobs (like VM updating jobs)
+ * corrupt but keep in mind that kernel jobs always considered good.
+ */
+ if (bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) {
+ atomic_inc(&bad->karma);
+
+ for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_KERNEL;
+ i++) {
+ struct drm_sched_rq *rq = &sched->sched_rq[i];
+
+ spin_lock(&rq->lock);
+ list_for_each_entry_safe(entity, tmp, &rq->entities, list) {
+ if (bad->s_fence->scheduled.context ==
+ entity->fence_context) {
+ if (entity->guilty)
+ atomic_set(entity->guilty, 1);
+ break;
+ }
+ }
+ spin_unlock(&rq->lock);
+ if (&entity->list != &rq->entities)
+ break;
+ }
+ }
+}
+EXPORT_SYMBOL(drm_sched_increase_karma);