ASR_BASE

Change-Id: Icf3719cc0afe3eeb3edc7fa80a2eb5199ca9dda1
diff --git a/marvell/linux/drivers/gpu/drm/scheduler/Makefile b/marvell/linux/drivers/gpu/drm/scheduler/Makefile
new file mode 100644
index 0000000..5386362
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/scheduler/Makefile
@@ -0,0 +1,25 @@
+#
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
+gpu-sched-y := sched_main.o sched_fence.o sched_entity.o
+
+obj-$(CONFIG_DRM_SCHED) += gpu-sched.o
diff --git a/marvell/linux/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h b/marvell/linux/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
new file mode 100644
index 0000000..d790864
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#if !defined(_GPU_SCHED_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _GPU_SCHED_TRACE_H_
+
+#include <linux/stringify.h>
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM gpu_scheduler
+#define TRACE_INCLUDE_FILE gpu_scheduler_trace
+
+TRACE_EVENT(drm_sched_job,
+	    TP_PROTO(struct drm_sched_job *sched_job, struct drm_sched_entity *entity),
+	    TP_ARGS(sched_job, entity),
+	    TP_STRUCT__entry(
+			     __field(struct drm_sched_entity *, entity)
+			     __field(struct dma_fence *, fence)
+			     __field(const char *, name)
+			     __field(uint64_t, id)
+			     __field(u32, job_count)
+			     __field(int, hw_job_count)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->entity = entity;
+			   __entry->id = sched_job->id;
+			   __entry->fence = &sched_job->s_fence->finished;
+			   __entry->name = sched_job->sched->name;
+			   __entry->job_count = spsc_queue_count(&entity->job_queue);
+			   __entry->hw_job_count = atomic_read(
+				   &sched_job->sched->hw_rq_count);
+			   ),
+	    TP_printk("entity=%p, id=%llu, fence=%p, ring=%s, job count:%u, hw job count:%d",
+		      __entry->entity, __entry->id,
+		      __entry->fence, __entry->name,
+		      __entry->job_count, __entry->hw_job_count)
+);
+
+TRACE_EVENT(drm_sched_process_job,
+	    TP_PROTO(struct drm_sched_fence *fence),
+	    TP_ARGS(fence),
+	    TP_STRUCT__entry(
+		    __field(struct dma_fence *, fence)
+		    ),
+
+	    TP_fast_assign(
+		    __entry->fence = &fence->finished;
+		    ),
+	    TP_printk("fence=%p signaled", __entry->fence)
+);
+
+TRACE_EVENT(drm_sched_job_wait_dep,
+	    TP_PROTO(struct drm_sched_job *sched_job, struct dma_fence *fence),
+	    TP_ARGS(sched_job, fence),
+	    TP_STRUCT__entry(
+			     __field(const char *,name)
+			     __field(uint64_t, id)
+			     __field(struct dma_fence *, fence)
+			     __field(uint64_t, ctx)
+			     __field(unsigned, seqno)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->name = sched_job->sched->name;
+			   __entry->id = sched_job->id;
+			   __entry->fence = fence;
+			   __entry->ctx = fence->context;
+			   __entry->seqno = fence->seqno;
+			   ),
+	    TP_printk("job ring=%s, id=%llu, depends fence=%p, context=%llu, seq=%u",
+		      __entry->name, __entry->id,
+		      __entry->fence, __entry->ctx,
+		      __entry->seqno)
+);
+
+#endif
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/scheduler
+#include <trace/define_trace.h>
diff --git a/marvell/linux/drivers/gpu/drm/scheduler/sched_entity.c b/marvell/linux/drivers/gpu/drm/scheduler/sched_entity.c
new file mode 100644
index 0000000..57f9baa
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/scheduler/sched_entity.c
@@ -0,0 +1,548 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/kthread.h>
+#include <linux/slab.h>
+
+#include <drm/drm_print.h>
+#include <drm/gpu_scheduler.h>
+
+#include "gpu_scheduler_trace.h"
+
+#define to_drm_sched_job(sched_job)		\
+		container_of((sched_job), struct drm_sched_job, queue_node)
+
+/**
+ * drm_sched_entity_init - Init a context entity used by scheduler when
+ * submit to HW ring.
+ *
+ * @entity: scheduler entity to init
+ * @rq_list: the list of run queue on which jobs from this
+ *           entity can be submitted
+ * @num_rq_list: number of run queue in rq_list
+ * @guilty: atomic_t set to 1 when a job on this queue
+ *          is found to be guilty causing a timeout
+ *
+ * Note: the rq_list should have atleast one element to schedule
+ *       the entity
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int drm_sched_entity_init(struct drm_sched_entity *entity,
+			  struct drm_sched_rq **rq_list,
+			  unsigned int num_rq_list,
+			  atomic_t *guilty)
+{
+	int i;
+
+	if (!(entity && rq_list && (num_rq_list == 0 || rq_list[0])))
+		return -EINVAL;
+
+	memset(entity, 0, sizeof(struct drm_sched_entity));
+	INIT_LIST_HEAD(&entity->list);
+	entity->rq = NULL;
+	entity->guilty = guilty;
+	entity->num_rq_list = num_rq_list;
+	entity->rq_list = kcalloc(num_rq_list, sizeof(struct drm_sched_rq *),
+				GFP_KERNEL);
+	if (!entity->rq_list)
+		return -ENOMEM;
+
+	for (i = 0; i < num_rq_list; ++i)
+		entity->rq_list[i] = rq_list[i];
+
+	if (num_rq_list)
+		entity->rq = rq_list[0];
+
+	entity->last_scheduled = NULL;
+
+	spin_lock_init(&entity->rq_lock);
+	spsc_queue_init(&entity->job_queue);
+
+	atomic_set(&entity->fence_seq, 0);
+	entity->fence_context = dma_fence_context_alloc(2);
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_sched_entity_init);
+
+/**
+ * drm_sched_entity_is_idle - Check if entity is idle
+ *
+ * @entity: scheduler entity
+ *
+ * Returns true if the entity does not have any unscheduled jobs.
+ */
+static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
+{
+	rmb(); /* for list_empty to work without lock */
+
+	if (list_empty(&entity->list) ||
+	    spsc_queue_count(&entity->job_queue) == 0)
+		return true;
+
+	return false;
+}
+
+/**
+ * drm_sched_entity_is_ready - Check if entity is ready
+ *
+ * @entity: scheduler entity
+ *
+ * Return true if entity could provide a job.
+ */
+bool drm_sched_entity_is_ready(struct drm_sched_entity *entity)
+{
+	if (spsc_queue_peek(&entity->job_queue) == NULL)
+		return false;
+
+	if (READ_ONCE(entity->dependency))
+		return false;
+
+	return true;
+}
+
+/**
+ * drm_sched_entity_get_free_sched - Get the rq from rq_list with least load
+ *
+ * @entity: scheduler entity
+ *
+ * Return the pointer to the rq with least load.
+ */
+static struct drm_sched_rq *
+drm_sched_entity_get_free_sched(struct drm_sched_entity *entity)
+{
+	struct drm_sched_rq *rq = NULL;
+	unsigned int min_jobs = UINT_MAX, num_jobs;
+	int i;
+
+	for (i = 0; i < entity->num_rq_list; ++i) {
+		struct drm_gpu_scheduler *sched = entity->rq_list[i]->sched;
+
+		if (!entity->rq_list[i]->sched->ready) {
+			DRM_WARN("sched%s is not ready, skipping", sched->name);
+			continue;
+		}
+
+		num_jobs = atomic_read(&sched->num_jobs);
+		if (num_jobs < min_jobs) {
+			min_jobs = num_jobs;
+			rq = entity->rq_list[i];
+		}
+	}
+
+	return rq;
+}
+
+/**
+ * drm_sched_entity_flush - Flush a context entity
+ *
+ * @entity: scheduler entity
+ * @timeout: time to wait in for Q to become empty in jiffies.
+ *
+ * Splitting drm_sched_entity_fini() into two functions, The first one does the
+ * waiting, removes the entity from the runqueue and returns an error when the
+ * process was killed.
+ *
+ * Returns the remaining time in jiffies left from the input timeout
+ */
+long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
+{
+	struct drm_gpu_scheduler *sched;
+	struct task_struct *last_user;
+	long ret = timeout;
+
+	if (!entity->rq)
+		return 0;
+
+	sched = entity->rq->sched;
+	/**
+	 * The client will not queue more IBs during this fini, consume existing
+	 * queued IBs or discard them on SIGKILL
+	 */
+	if (current->flags & PF_EXITING) {
+		if (timeout)
+			ret = wait_event_timeout(
+					sched->job_scheduled,
+					drm_sched_entity_is_idle(entity),
+					timeout);
+	} else {
+		wait_event_killable(sched->job_scheduled,
+				    drm_sched_entity_is_idle(entity));
+	}
+
+	/* For killed process disable any more IBs enqueue right now */
+	last_user = cmpxchg(&entity->last_user, current->group_leader, NULL);
+	if ((!last_user || last_user == current->group_leader) &&
+	    (current->flags & PF_EXITING) && (current->exit_code == SIGKILL)) {
+		spin_lock(&entity->rq_lock);
+		entity->stopped = true;
+		drm_sched_rq_remove_entity(entity->rq, entity);
+		spin_unlock(&entity->rq_lock);
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL(drm_sched_entity_flush);
+
+/**
+ * drm_sched_entity_kill_jobs - helper for drm_sched_entity_kill_jobs
+ *
+ * @f: signaled fence
+ * @cb: our callback structure
+ *
+ * Signal the scheduler finished fence when the entity in question is killed.
+ */
+static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
+					  struct dma_fence_cb *cb)
+{
+	struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
+						 finish_cb);
+
+	drm_sched_fence_finished(job->s_fence);
+	WARN_ON(job->s_fence->parent);
+	job->sched->ops->free_job(job);
+}
+
+/**
+ * drm_sched_entity_kill_jobs - Make sure all remaining jobs are killed
+ *
+ * @entity: entity which is cleaned up
+ *
+ * Makes sure that all remaining jobs in an entity are killed before it is
+ * destroyed.
+ */
+static void drm_sched_entity_kill_jobs(struct drm_sched_entity *entity)
+{
+	struct drm_sched_job *job;
+	struct dma_fence *f;
+	int r;
+
+	while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) {
+		struct drm_sched_fence *s_fence = job->s_fence;
+
+		/* Wait for all dependencies to avoid data corruptions */
+		while ((f = job->sched->ops->dependency(job, entity)))
+			dma_fence_wait(f, false);
+
+		drm_sched_fence_scheduled(s_fence);
+		dma_fence_set_error(&s_fence->finished, -ESRCH);
+
+		/*
+		 * When pipe is hanged by older entity, new entity might
+		 * not even have chance to submit it's first job to HW
+		 * and so entity->last_scheduled will remain NULL
+		 */
+		if (!entity->last_scheduled) {
+			drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
+			continue;
+		}
+
+		r = dma_fence_add_callback(entity->last_scheduled,
+					   &job->finish_cb,
+					   drm_sched_entity_kill_jobs_cb);
+		if (r == -ENOENT)
+			drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
+		else if (r)
+			DRM_ERROR("fence add callback failed (%d)\n", r);
+	}
+}
+
+/**
+ * drm_sched_entity_cleanup - Destroy a context entity
+ *
+ * @entity: scheduler entity
+ *
+ * This should be called after @drm_sched_entity_do_release. It goes over the
+ * entity and signals all jobs with an error code if the process was killed.
+ *
+ */
+void drm_sched_entity_fini(struct drm_sched_entity *entity)
+{
+	struct drm_gpu_scheduler *sched = NULL;
+
+	if (entity->rq) {
+		sched = entity->rq->sched;
+		drm_sched_rq_remove_entity(entity->rq, entity);
+	}
+
+	/* Consumption of existing IBs wasn't completed. Forcefully
+	 * remove them here.
+	 */
+	if (spsc_queue_count(&entity->job_queue)) {
+		if (sched) {
+			/* Park the kernel for a moment to make sure it isn't processing
+			 * our enity.
+			 */
+			kthread_park(sched->thread);
+			kthread_unpark(sched->thread);
+		}
+		if (entity->dependency) {
+			dma_fence_remove_callback(entity->dependency,
+						  &entity->cb);
+			dma_fence_put(entity->dependency);
+			entity->dependency = NULL;
+		}
+
+		drm_sched_entity_kill_jobs(entity);
+	}
+
+	dma_fence_put(entity->last_scheduled);
+	entity->last_scheduled = NULL;
+	kfree(entity->rq_list);
+}
+EXPORT_SYMBOL(drm_sched_entity_fini);
+
+/**
+ * drm_sched_entity_fini - Destroy a context entity
+ *
+ * @entity: scheduler entity
+ *
+ * Calls drm_sched_entity_do_release() and drm_sched_entity_cleanup()
+ */
+void drm_sched_entity_destroy(struct drm_sched_entity *entity)
+{
+	drm_sched_entity_flush(entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY);
+	drm_sched_entity_fini(entity);
+}
+EXPORT_SYMBOL(drm_sched_entity_destroy);
+
+/**
+ * drm_sched_entity_clear_dep - callback to clear the entities dependency
+ */
+static void drm_sched_entity_clear_dep(struct dma_fence *f,
+				       struct dma_fence_cb *cb)
+{
+	struct drm_sched_entity *entity =
+		container_of(cb, struct drm_sched_entity, cb);
+
+	entity->dependency = NULL;
+	dma_fence_put(f);
+}
+
+/**
+ * drm_sched_entity_clear_dep - callback to clear the entities dependency and
+ * wake up scheduler
+ */
+static void drm_sched_entity_wakeup(struct dma_fence *f,
+				    struct dma_fence_cb *cb)
+{
+	struct drm_sched_entity *entity =
+		container_of(cb, struct drm_sched_entity, cb);
+
+	drm_sched_entity_clear_dep(f, cb);
+	drm_sched_wakeup(entity->rq->sched);
+}
+
+/**
+ * drm_sched_entity_set_rq_priority - helper for drm_sched_entity_set_priority
+ */
+static void drm_sched_entity_set_rq_priority(struct drm_sched_rq **rq,
+					     enum drm_sched_priority priority)
+{
+	*rq = &(*rq)->sched->sched_rq[priority];
+}
+
+/**
+ * drm_sched_entity_set_priority - Sets priority of the entity
+ *
+ * @entity: scheduler entity
+ * @priority: scheduler priority
+ *
+ * Update the priority of runqueus used for the entity.
+ */
+void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
+				   enum drm_sched_priority priority)
+{
+	unsigned int i;
+
+	spin_lock(&entity->rq_lock);
+
+	for (i = 0; i < entity->num_rq_list; ++i)
+		drm_sched_entity_set_rq_priority(&entity->rq_list[i], priority);
+
+	if (entity->rq) {
+		drm_sched_rq_remove_entity(entity->rq, entity);
+		drm_sched_entity_set_rq_priority(&entity->rq, priority);
+		drm_sched_rq_add_entity(entity->rq, entity);
+	}
+
+	spin_unlock(&entity->rq_lock);
+}
+EXPORT_SYMBOL(drm_sched_entity_set_priority);
+
+/**
+ * drm_sched_entity_add_dependency_cb - add callback for the entities dependency
+ *
+ * @entity: entity with dependency
+ *
+ * Add a callback to the current dependency of the entity to wake up the
+ * scheduler when the entity becomes available.
+ */
+static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
+{
+	struct drm_gpu_scheduler *sched = entity->rq->sched;
+	struct dma_fence *fence = entity->dependency;
+	struct drm_sched_fence *s_fence;
+
+	if (fence->context == entity->fence_context ||
+	    fence->context == entity->fence_context + 1) {
+		/*
+		 * Fence is a scheduled/finished fence from a job
+		 * which belongs to the same entity, we can ignore
+		 * fences from ourself
+		 */
+		dma_fence_put(entity->dependency);
+		return false;
+	}
+
+	s_fence = to_drm_sched_fence(fence);
+	if (s_fence && s_fence->sched == sched) {
+
+		/*
+		 * Fence is from the same scheduler, only need to wait for
+		 * it to be scheduled
+		 */
+		fence = dma_fence_get(&s_fence->scheduled);
+		dma_fence_put(entity->dependency);
+		entity->dependency = fence;
+		if (!dma_fence_add_callback(fence, &entity->cb,
+					    drm_sched_entity_clear_dep))
+			return true;
+
+		/* Ignore it when it is already scheduled */
+		dma_fence_put(fence);
+		return false;
+	}
+
+	if (!dma_fence_add_callback(entity->dependency, &entity->cb,
+				    drm_sched_entity_wakeup))
+		return true;
+
+	dma_fence_put(entity->dependency);
+	return false;
+}
+
+/**
+ * drm_sched_entity_pop_job - get a ready to be scheduled job from the entity
+ *
+ * @entity: entity to get the job from
+ *
+ * Process all dependencies and try to get one job from the entities queue.
+ */
+struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
+{
+	struct drm_gpu_scheduler *sched = entity->rq->sched;
+	struct drm_sched_job *sched_job;
+
+	sched_job = to_drm_sched_job(spsc_queue_peek(&entity->job_queue));
+	if (!sched_job)
+		return NULL;
+
+	while ((entity->dependency =
+			sched->ops->dependency(sched_job, entity))) {
+		trace_drm_sched_job_wait_dep(sched_job, entity->dependency);
+
+		if (drm_sched_entity_add_dependency_cb(entity))
+			return NULL;
+	}
+
+	/* skip jobs from entity that marked guilty */
+	if (entity->guilty && atomic_read(entity->guilty))
+		dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED);
+
+	dma_fence_put(entity->last_scheduled);
+	entity->last_scheduled = dma_fence_get(&sched_job->s_fence->finished);
+
+	spsc_queue_pop(&entity->job_queue);
+	return sched_job;
+}
+
+/**
+ * drm_sched_entity_select_rq - select a new rq for the entity
+ *
+ * @entity: scheduler entity
+ *
+ * Check all prerequisites and select a new rq for the entity for load
+ * balancing.
+ */
+void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
+{
+	struct dma_fence *fence;
+	struct drm_sched_rq *rq;
+
+	if (spsc_queue_count(&entity->job_queue) || entity->num_rq_list <= 1)
+		return;
+
+	fence = READ_ONCE(entity->last_scheduled);
+	if (fence && !dma_fence_is_signaled(fence))
+		return;
+
+	rq = drm_sched_entity_get_free_sched(entity);
+	if (rq == entity->rq)
+		return;
+
+	spin_lock(&entity->rq_lock);
+	drm_sched_rq_remove_entity(entity->rq, entity);
+	entity->rq = rq;
+	spin_unlock(&entity->rq_lock);
+}
+
+/**
+ * drm_sched_entity_push_job - Submit a job to the entity's job queue
+ *
+ * @sched_job: job to submit
+ * @entity: scheduler entity
+ *
+ * Note: To guarantee that the order of insertion to queue matches
+ * the job's fence sequence number this function should be
+ * called with drm_sched_job_init under common lock.
+ *
+ * Returns 0 for success, negative error code otherwise.
+ */
+void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
+			       struct drm_sched_entity *entity)
+{
+	bool first;
+
+	trace_drm_sched_job(sched_job, entity);
+	atomic_inc(&entity->rq->sched->num_jobs);
+	WRITE_ONCE(entity->last_user, current->group_leader);
+	first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
+
+	/* first job wakes up scheduler */
+	if (first) {
+		/* Add the entity to the run queue */
+		spin_lock(&entity->rq_lock);
+		if (entity->stopped) {
+			spin_unlock(&entity->rq_lock);
+
+			DRM_ERROR("Trying to push to a killed entity\n");
+			return;
+		}
+		drm_sched_rq_add_entity(entity->rq, entity);
+		spin_unlock(&entity->rq_lock);
+		drm_sched_wakeup(entity->rq->sched);
+	}
+}
+EXPORT_SYMBOL(drm_sched_entity_push_job);
diff --git a/marvell/linux/drivers/gpu/drm/scheduler/sched_fence.c b/marvell/linux/drivers/gpu/drm/scheduler/sched_fence.c
new file mode 100644
index 0000000..5497740
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/scheduler/sched_fence.c
@@ -0,0 +1,182 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+
+#include <drm/gpu_scheduler.h>
+
+static struct kmem_cache *sched_fence_slab;
+
+static int __init drm_sched_fence_slab_init(void)
+{
+	sched_fence_slab = kmem_cache_create(
+		"drm_sched_fence", sizeof(struct drm_sched_fence), 0,
+		SLAB_HWCACHE_ALIGN, NULL);
+	if (!sched_fence_slab)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static void __exit drm_sched_fence_slab_fini(void)
+{
+	rcu_barrier();
+	kmem_cache_destroy(sched_fence_slab);
+}
+
+void drm_sched_fence_scheduled(struct drm_sched_fence *fence)
+{
+	int ret = dma_fence_signal(&fence->scheduled);
+
+	if (!ret)
+		DMA_FENCE_TRACE(&fence->scheduled,
+				"signaled from irq context\n");
+	else
+		DMA_FENCE_TRACE(&fence->scheduled,
+				"was already signaled\n");
+}
+
+void drm_sched_fence_finished(struct drm_sched_fence *fence)
+{
+	int ret = dma_fence_signal(&fence->finished);
+
+	if (!ret)
+		DMA_FENCE_TRACE(&fence->finished,
+				"signaled from irq context\n");
+	else
+		DMA_FENCE_TRACE(&fence->finished,
+				"was already signaled\n");
+}
+
+static const char *drm_sched_fence_get_driver_name(struct dma_fence *fence)
+{
+	return "drm_sched";
+}
+
+static const char *drm_sched_fence_get_timeline_name(struct dma_fence *f)
+{
+	struct drm_sched_fence *fence = to_drm_sched_fence(f);
+	return (const char *)fence->sched->name;
+}
+
+/**
+ * drm_sched_fence_free - free up the fence memory
+ *
+ * @rcu: RCU callback head
+ *
+ * Free up the fence memory after the RCU grace period.
+ */
+static void drm_sched_fence_free(struct rcu_head *rcu)
+{
+	struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
+	struct drm_sched_fence *fence = to_drm_sched_fence(f);
+
+	kmem_cache_free(sched_fence_slab, fence);
+}
+
+/**
+ * drm_sched_fence_release_scheduled - callback that fence can be freed
+ *
+ * @fence: fence
+ *
+ * This function is called when the reference count becomes zero.
+ * It just RCU schedules freeing up the fence.
+ */
+static void drm_sched_fence_release_scheduled(struct dma_fence *f)
+{
+	struct drm_sched_fence *fence = to_drm_sched_fence(f);
+
+	dma_fence_put(fence->parent);
+	call_rcu(&fence->finished.rcu, drm_sched_fence_free);
+}
+
+/**
+ * drm_sched_fence_release_finished - drop extra reference
+ *
+ * @f: fence
+ *
+ * Drop the extra reference from the scheduled fence to the base fence.
+ */
+static void drm_sched_fence_release_finished(struct dma_fence *f)
+{
+	struct drm_sched_fence *fence = to_drm_sched_fence(f);
+
+	dma_fence_put(&fence->scheduled);
+}
+
+const struct dma_fence_ops drm_sched_fence_ops_scheduled = {
+	.get_driver_name = drm_sched_fence_get_driver_name,
+	.get_timeline_name = drm_sched_fence_get_timeline_name,
+	.release = drm_sched_fence_release_scheduled,
+};
+
+const struct dma_fence_ops drm_sched_fence_ops_finished = {
+	.get_driver_name = drm_sched_fence_get_driver_name,
+	.get_timeline_name = drm_sched_fence_get_timeline_name,
+	.release = drm_sched_fence_release_finished,
+};
+
+struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f)
+{
+	if (f->ops == &drm_sched_fence_ops_scheduled)
+		return container_of(f, struct drm_sched_fence, scheduled);
+
+	if (f->ops == &drm_sched_fence_ops_finished)
+		return container_of(f, struct drm_sched_fence, finished);
+
+	return NULL;
+}
+EXPORT_SYMBOL(to_drm_sched_fence);
+
+struct drm_sched_fence *drm_sched_fence_create(struct drm_sched_entity *entity,
+					       void *owner)
+{
+	struct drm_sched_fence *fence = NULL;
+	unsigned seq;
+
+	fence = kmem_cache_zalloc(sched_fence_slab, GFP_KERNEL);
+	if (fence == NULL)
+		return NULL;
+
+	fence->owner = owner;
+	fence->sched = entity->rq->sched;
+	spin_lock_init(&fence->lock);
+
+	seq = atomic_inc_return(&entity->fence_seq);
+	dma_fence_init(&fence->scheduled, &drm_sched_fence_ops_scheduled,
+		       &fence->lock, entity->fence_context, seq);
+	dma_fence_init(&fence->finished, &drm_sched_fence_ops_finished,
+		       &fence->lock, entity->fence_context + 1, seq);
+
+	return fence;
+}
+
+module_init(drm_sched_fence_slab_init);
+module_exit(drm_sched_fence_slab_fini);
+
+MODULE_DESCRIPTION("DRM GPU scheduler");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/marvell/linux/drivers/gpu/drm/scheduler/sched_main.c b/marvell/linux/drivers/gpu/drm/scheduler/sched_main.c
new file mode 100644
index 0000000..3767950
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/scheduler/sched_main.c
@@ -0,0 +1,859 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+/**
+ * DOC: Overview
+ *
+ * The GPU scheduler provides entities which allow userspace to push jobs
+ * into software queues which are then scheduled on a hardware run queue.
+ * The software queues have a priority among them. The scheduler selects the entities
+ * from the run queue using a FIFO. The scheduler provides dependency handling
+ * features among jobs. The driver is supposed to provide callback functions for
+ * backend operations to the scheduler like submitting a job to hardware run queue,
+ * returning the dependencies of a job etc.
+ *
+ * The organisation of the scheduler is the following:
+ *
+ * 1. Each hw run queue has one scheduler
+ * 2. Each scheduler has multiple run queues with different priorities
+ *    (e.g., HIGH_HW,HIGH_SW, KERNEL, NORMAL)
+ * 3. Each scheduler run queue has a queue of entities to schedule
+ * 4. Entities themselves maintain a queue of jobs that will be scheduled on
+ *    the hardware.
+ *
+ * The jobs in a entity are always scheduled in the order that they were pushed.
+ */
+
+#include <linux/kthread.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <uapi/linux/sched/types.h>
+
+#include <drm/drm_print.h>
+#include <drm/gpu_scheduler.h>
+#include <drm/spsc_queue.h>
+
+#define CREATE_TRACE_POINTS
+#include "gpu_scheduler_trace.h"
+
+#define to_drm_sched_job(sched_job)		\
+		container_of((sched_job), struct drm_sched_job, queue_node)
+
+static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb);
+
+/**
+ * drm_sched_rq_init - initialize a given run queue struct
+ *
+ * @rq: scheduler run queue
+ *
+ * Initializes a scheduler runqueue.
+ */
+static void drm_sched_rq_init(struct drm_gpu_scheduler *sched,
+			      struct drm_sched_rq *rq)
+{
+	spin_lock_init(&rq->lock);
+	INIT_LIST_HEAD(&rq->entities);
+	rq->current_entity = NULL;
+	rq->sched = sched;
+}
+
+/**
+ * drm_sched_rq_add_entity - add an entity
+ *
+ * @rq: scheduler run queue
+ * @entity: scheduler entity
+ *
+ * Adds a scheduler entity to the run queue.
+ */
+void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
+			     struct drm_sched_entity *entity)
+{
+	if (!list_empty(&entity->list))
+		return;
+	spin_lock(&rq->lock);
+	list_add_tail(&entity->list, &rq->entities);
+	spin_unlock(&rq->lock);
+}
+
+/**
+ * drm_sched_rq_remove_entity - remove an entity
+ *
+ * @rq: scheduler run queue
+ * @entity: scheduler entity
+ *
+ * Removes a scheduler entity from the run queue.
+ */
+void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
+				struct drm_sched_entity *entity)
+{
+	if (list_empty(&entity->list))
+		return;
+	spin_lock(&rq->lock);
+	list_del_init(&entity->list);
+	if (rq->current_entity == entity)
+		rq->current_entity = NULL;
+	spin_unlock(&rq->lock);
+}
+
+/**
+ * drm_sched_rq_select_entity - Select an entity which could provide a job to run
+ *
+ * @rq: scheduler run queue to check.
+ *
+ * Try to find a ready entity, returns NULL if none found.
+ */
+static struct drm_sched_entity *
+drm_sched_rq_select_entity(struct drm_sched_rq *rq)
+{
+	struct drm_sched_entity *entity;
+
+	spin_lock(&rq->lock);
+
+	entity = rq->current_entity;
+	if (entity) {
+		list_for_each_entry_continue(entity, &rq->entities, list) {
+			if (drm_sched_entity_is_ready(entity)) {
+				rq->current_entity = entity;
+				spin_unlock(&rq->lock);
+				return entity;
+			}
+		}
+	}
+
+	list_for_each_entry(entity, &rq->entities, list) {
+
+		if (drm_sched_entity_is_ready(entity)) {
+			rq->current_entity = entity;
+			spin_unlock(&rq->lock);
+			return entity;
+		}
+
+		if (entity == rq->current_entity)
+			break;
+	}
+
+	spin_unlock(&rq->lock);
+
+	return NULL;
+}
+
+/**
+ * drm_sched_dependency_optimized
+ *
+ * @fence: the dependency fence
+ * @entity: the entity which depends on the above fence
+ *
+ * Returns true if the dependency can be optimized and false otherwise
+ */
+bool drm_sched_dependency_optimized(struct dma_fence* fence,
+				    struct drm_sched_entity *entity)
+{
+	struct drm_gpu_scheduler *sched = entity->rq->sched;
+	struct drm_sched_fence *s_fence;
+
+	if (!fence || dma_fence_is_signaled(fence))
+		return false;
+	if (fence->context == entity->fence_context)
+		return true;
+	s_fence = to_drm_sched_fence(fence);
+	if (s_fence && s_fence->sched == sched)
+		return true;
+
+	return false;
+}
+EXPORT_SYMBOL(drm_sched_dependency_optimized);
+
+/**
+ * drm_sched_start_timeout - start timeout for reset worker
+ *
+ * @sched: scheduler instance to start the worker for
+ *
+ * Start the timeout for the given scheduler.
+ */
+static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched)
+{
+	if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
+	    !list_empty(&sched->ring_mirror_list))
+		schedule_delayed_work(&sched->work_tdr, sched->timeout);
+}
+
+/**
+ * drm_sched_fault - immediately start timeout handler
+ *
+ * @sched: scheduler where the timeout handling should be started.
+ *
+ * Start timeout handling immediately when the driver detects a hardware fault.
+ */
+void drm_sched_fault(struct drm_gpu_scheduler *sched)
+{
+	mod_delayed_work(system_wq, &sched->work_tdr, 0);
+}
+EXPORT_SYMBOL(drm_sched_fault);
+
+/**
+ * drm_sched_suspend_timeout - Suspend scheduler job timeout
+ *
+ * @sched: scheduler instance for which to suspend the timeout
+ *
+ * Suspend the delayed work timeout for the scheduler. This is done by
+ * modifying the delayed work timeout to an arbitrary large value,
+ * MAX_SCHEDULE_TIMEOUT in this case. Note that this function can be
+ * called from an IRQ context.
+ *
+ * Returns the timeout remaining
+ *
+ */
+unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched)
+{
+	unsigned long sched_timeout, now = jiffies;
+
+	sched_timeout = sched->work_tdr.timer.expires;
+
+	/*
+	 * Modify the timeout to an arbitrarily large value. This also prevents
+	 * the timeout to be restarted when new submissions arrive
+	 */
+	if (mod_delayed_work(system_wq, &sched->work_tdr, MAX_SCHEDULE_TIMEOUT)
+			&& time_after(sched_timeout, now))
+		return sched_timeout - now;
+	else
+		return sched->timeout;
+}
+EXPORT_SYMBOL(drm_sched_suspend_timeout);
+
+/**
+ * drm_sched_resume_timeout - Resume scheduler job timeout
+ *
+ * @sched: scheduler instance for which to resume the timeout
+ * @remaining: remaining timeout
+ *
+ * Resume the delayed work timeout for the scheduler. Note that
+ * this function can be called from an IRQ context.
+ */
+void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
+		unsigned long remaining)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&sched->job_list_lock, flags);
+
+	if (list_empty(&sched->ring_mirror_list))
+		cancel_delayed_work(&sched->work_tdr);
+	else
+		mod_delayed_work(system_wq, &sched->work_tdr, remaining);
+
+	spin_unlock_irqrestore(&sched->job_list_lock, flags);
+}
+EXPORT_SYMBOL(drm_sched_resume_timeout);
+
+static void drm_sched_job_begin(struct drm_sched_job *s_job)
+{
+	struct drm_gpu_scheduler *sched = s_job->sched;
+	unsigned long flags;
+
+	spin_lock_irqsave(&sched->job_list_lock, flags);
+	list_add_tail(&s_job->node, &sched->ring_mirror_list);
+	drm_sched_start_timeout(sched);
+	spin_unlock_irqrestore(&sched->job_list_lock, flags);
+}
+
+static void drm_sched_job_timedout(struct work_struct *work)
+{
+	struct drm_gpu_scheduler *sched;
+	struct drm_sched_job *job;
+	unsigned long flags;
+
+	sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
+
+	/* Protects against concurrent deletion in drm_sched_get_cleanup_job */
+	spin_lock_irqsave(&sched->job_list_lock, flags);
+	job = list_first_entry_or_null(&sched->ring_mirror_list,
+				       struct drm_sched_job, node);
+
+	if (job) {
+		/*
+		 * Remove the bad job so it cannot be freed by concurrent
+		 * drm_sched_cleanup_jobs. It will be reinserted back after sched->thread
+		 * is parked at which point it's safe.
+		 */
+		list_del_init(&job->node);
+		spin_unlock_irqrestore(&sched->job_list_lock, flags);
+
+		job->sched->ops->timedout_job(job);
+
+		/*
+		 * Guilty job did complete and hence needs to be manually removed
+		 * See drm_sched_stop doc.
+		 */
+		if (sched->free_guilty) {
+			job->sched->ops->free_job(job);
+			sched->free_guilty = false;
+		}
+	} else {
+		spin_unlock_irqrestore(&sched->job_list_lock, flags);
+	}
+
+	spin_lock_irqsave(&sched->job_list_lock, flags);
+	drm_sched_start_timeout(sched);
+	spin_unlock_irqrestore(&sched->job_list_lock, flags);
+}
+
+ /**
+  * drm_sched_increase_karma - Update sched_entity guilty flag
+  *
+  * @bad: The job guilty of time out
+  *
+  * Increment on every hang caused by the 'bad' job. If this exceeds the hang
+  * limit of the scheduler then the respective sched entity is marked guilty and
+  * jobs from it will not be scheduled further
+  */
+void drm_sched_increase_karma(struct drm_sched_job *bad)
+{
+	int i;
+	struct drm_sched_entity *tmp;
+	struct drm_sched_entity *entity;
+	struct drm_gpu_scheduler *sched = bad->sched;
+
+	/* don't increase @bad's karma if it's from KERNEL RQ,
+	 * because sometimes GPU hang would cause kernel jobs (like VM updating jobs)
+	 * corrupt but keep in mind that kernel jobs always considered good.
+	 */
+	if (bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) {
+		atomic_inc(&bad->karma);
+		for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_KERNEL;
+		     i++) {
+			struct drm_sched_rq *rq = &sched->sched_rq[i];
+
+			spin_lock(&rq->lock);
+			list_for_each_entry_safe(entity, tmp, &rq->entities, list) {
+				if (bad->s_fence->scheduled.context ==
+				    entity->fence_context) {
+					if (atomic_read(&bad->karma) >
+					    bad->sched->hang_limit)
+						if (entity->guilty)
+							atomic_set(entity->guilty, 1);
+					break;
+				}
+			}
+			spin_unlock(&rq->lock);
+			if (&entity->list != &rq->entities)
+				break;
+		}
+	}
+}
+EXPORT_SYMBOL(drm_sched_increase_karma);
+
+/**
+ * drm_sched_stop - stop the scheduler
+ *
+ * @sched: scheduler instance
+ * @bad: job which caused the time out
+ *
+ * Stop the scheduler and also removes and frees all completed jobs.
+ * Note: bad job will not be freed as it might be used later and so it's
+ * callers responsibility to release it manually if it's not part of the
+ * mirror list any more.
+ *
+ */
+void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
+{
+	struct drm_sched_job *s_job, *tmp;
+	unsigned long flags;
+
+	kthread_park(sched->thread);
+
+	/*
+	 * Reinsert back the bad job here - now it's safe as
+	 * drm_sched_get_cleanup_job cannot race against us and release the
+	 * bad job at this point - we parked (waited for) any in progress
+	 * (earlier) cleanups and drm_sched_get_cleanup_job will not be called
+	 * now until the scheduler thread is unparked.
+	 */
+	if (bad && bad->sched == sched)
+		/*
+		 * Add at the head of the queue to reflect it was the earliest
+		 * job extracted.
+		 */
+		list_add(&bad->node, &sched->ring_mirror_list);
+
+	/*
+	 * Iterate the job list from later to  earlier one and either deactive
+	 * their HW callbacks or remove them from mirror list if they already
+	 * signaled.
+	 * This iteration is thread safe as sched thread is stopped.
+	 */
+	list_for_each_entry_safe_reverse(s_job, tmp, &sched->ring_mirror_list, node) {
+		if (s_job->s_fence->parent &&
+		    dma_fence_remove_callback(s_job->s_fence->parent,
+					      &s_job->cb)) {
+			atomic_dec(&sched->hw_rq_count);
+		} else {
+			/*
+			 * remove job from ring_mirror_list.
+			 * Locking here is for concurrent resume timeout
+			 */
+			spin_lock_irqsave(&sched->job_list_lock, flags);
+			list_del_init(&s_job->node);
+			spin_unlock_irqrestore(&sched->job_list_lock, flags);
+
+			/*
+			 * Wait for job's HW fence callback to finish using s_job
+			 * before releasing it.
+			 *
+			 * Job is still alive so fence refcount at least 1
+			 */
+			dma_fence_wait(&s_job->s_fence->finished, false);
+
+			/*
+			 * We must keep bad job alive for later use during
+			 * recovery by some of the drivers but leave a hint
+			 * that the guilty job must be released.
+			 */
+			if (bad != s_job)
+				sched->ops->free_job(s_job);
+			else
+				sched->free_guilty = true;
+		}
+	}
+
+	/*
+	 * Stop pending timer in flight as we rearm it in  drm_sched_start. This
+	 * avoids the pending timeout work in progress to fire right away after
+	 * this TDR finished and before the newly restarted jobs had a
+	 * chance to complete.
+	 */
+	cancel_delayed_work(&sched->work_tdr);
+}
+
+EXPORT_SYMBOL(drm_sched_stop);
+
+/**
+ * drm_sched_job_recovery - recover jobs after a reset
+ *
+ * @sched: scheduler instance
+ * @full_recovery: proceed with complete sched restart
+ *
+ */
+void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
+{
+	struct drm_sched_job *s_job, *tmp;
+	unsigned long flags;
+	int r;
+
+	/*
+	 * Locking the list is not required here as the sched thread is parked
+	 * so no new jobs are being inserted or removed. Also concurrent
+	 * GPU recovers can't run in parallel.
+	 */
+	list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
+		struct dma_fence *fence = s_job->s_fence->parent;
+
+		atomic_inc(&sched->hw_rq_count);
+
+		if (!full_recovery)
+			continue;
+
+		if (fence) {
+			r = dma_fence_add_callback(fence, &s_job->cb,
+						   drm_sched_process_job);
+			if (r == -ENOENT)
+				drm_sched_process_job(fence, &s_job->cb);
+			else if (r)
+				DRM_ERROR("fence add callback failed (%d)\n",
+					  r);
+		} else
+			drm_sched_process_job(NULL, &s_job->cb);
+	}
+
+	if (full_recovery) {
+		spin_lock_irqsave(&sched->job_list_lock, flags);
+		drm_sched_start_timeout(sched);
+		spin_unlock_irqrestore(&sched->job_list_lock, flags);
+	}
+
+	kthread_unpark(sched->thread);
+}
+EXPORT_SYMBOL(drm_sched_start);
+
+/**
+ * drm_sched_resubmit_jobs - helper to relunch job from mirror ring list
+ *
+ * @sched: scheduler instance
+ *
+ */
+void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
+{
+	struct drm_sched_job *s_job, *tmp;
+	uint64_t guilty_context;
+	bool found_guilty = false;
+	struct dma_fence *fence;
+
+	list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
+		struct drm_sched_fence *s_fence = s_job->s_fence;
+
+		if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) {
+			found_guilty = true;
+			guilty_context = s_job->s_fence->scheduled.context;
+		}
+
+		if (found_guilty && s_job->s_fence->scheduled.context == guilty_context)
+			dma_fence_set_error(&s_fence->finished, -ECANCELED);
+
+		dma_fence_put(s_job->s_fence->parent);
+		fence = sched->ops->run_job(s_job);
+
+		if (IS_ERR_OR_NULL(fence)) {
+			if (IS_ERR(fence))
+				dma_fence_set_error(&s_fence->finished, PTR_ERR(fence));
+
+			s_job->s_fence->parent = NULL;
+		} else {
+			s_job->s_fence->parent = fence;
+		}
+
+
+	}
+}
+EXPORT_SYMBOL(drm_sched_resubmit_jobs);
+
+/**
+ * drm_sched_job_init - init a scheduler job
+ *
+ * @job: scheduler job to init
+ * @entity: scheduler entity to use
+ * @owner: job owner for debugging
+ *
+ * Refer to drm_sched_entity_push_job() documentation
+ * for locking considerations.
+ *
+ * Returns 0 for success, negative error code otherwise.
+ */
+int drm_sched_job_init(struct drm_sched_job *job,
+		       struct drm_sched_entity *entity,
+		       void *owner)
+{
+	struct drm_gpu_scheduler *sched;
+
+	drm_sched_entity_select_rq(entity);
+	if (!entity->rq)
+		return -ENOENT;
+
+	sched = entity->rq->sched;
+
+	job->sched = sched;
+	job->entity = entity;
+	job->s_priority = entity->rq - sched->sched_rq;
+	job->s_fence = drm_sched_fence_create(entity, owner);
+	if (!job->s_fence)
+		return -ENOMEM;
+	job->id = atomic64_inc_return(&sched->job_id_count);
+
+	INIT_LIST_HEAD(&job->node);
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_sched_job_init);
+
+/**
+ * drm_sched_job_cleanup - clean up scheduler job resources
+ *
+ * @job: scheduler job to clean up
+ */
+void drm_sched_job_cleanup(struct drm_sched_job *job)
+{
+	dma_fence_put(&job->s_fence->finished);
+	job->s_fence = NULL;
+}
+EXPORT_SYMBOL(drm_sched_job_cleanup);
+
+/**
+ * drm_sched_ready - is the scheduler ready
+ *
+ * @sched: scheduler instance
+ *
+ * Return true if we can push more jobs to the hw, otherwise false.
+ */
+static bool drm_sched_ready(struct drm_gpu_scheduler *sched)
+{
+	return atomic_read(&sched->hw_rq_count) <
+		sched->hw_submission_limit;
+}
+
+/**
+ * drm_sched_wakeup - Wake up the scheduler when it is ready
+ *
+ * @sched: scheduler instance
+ *
+ */
+void drm_sched_wakeup(struct drm_gpu_scheduler *sched)
+{
+	if (drm_sched_ready(sched))
+		wake_up_interruptible(&sched->wake_up_worker);
+}
+
+/**
+ * drm_sched_select_entity - Select next entity to process
+ *
+ * @sched: scheduler instance
+ *
+ * Returns the entity to process or NULL if none are found.
+ */
+static struct drm_sched_entity *
+drm_sched_select_entity(struct drm_gpu_scheduler *sched)
+{
+	struct drm_sched_entity *entity;
+	int i;
+
+	if (!drm_sched_ready(sched))
+		return NULL;
+
+	/* Kernel run queue has higher priority than normal run queue*/
+	for (i = DRM_SCHED_PRIORITY_MAX - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
+		entity = drm_sched_rq_select_entity(&sched->sched_rq[i]);
+		if (entity)
+			break;
+	}
+
+	return entity;
+}
+
+/**
+ * drm_sched_process_job - process a job
+ *
+ * @f: fence
+ * @cb: fence callbacks
+ *
+ * Called after job has finished execution.
+ */
+static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
+{
+	struct drm_sched_job *s_job = container_of(cb, struct drm_sched_job, cb);
+	struct drm_sched_fence *s_fence = s_job->s_fence;
+	struct drm_gpu_scheduler *sched = s_fence->sched;
+
+	atomic_dec(&sched->hw_rq_count);
+	atomic_dec(&sched->num_jobs);
+
+	trace_drm_sched_process_job(s_fence);
+
+	dma_fence_get(&s_fence->finished);
+	drm_sched_fence_finished(s_fence);
+	dma_fence_put(&s_fence->finished);
+	wake_up_interruptible(&sched->wake_up_worker);
+}
+
+/**
+ * drm_sched_get_cleanup_job - fetch the next finished job to be destroyed
+ *
+ * @sched: scheduler instance
+ *
+ * Returns the next finished job from the mirror list (if there is one)
+ * ready for it to be destroyed.
+ */
+static struct drm_sched_job *
+drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
+{
+	struct drm_sched_job *job;
+	unsigned long flags;
+
+	/* Don't destroy jobs while the timeout worker is running */
+	if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
+	    !cancel_delayed_work(&sched->work_tdr))
+		return NULL;
+
+	spin_lock_irqsave(&sched->job_list_lock, flags);
+
+	job = list_first_entry_or_null(&sched->ring_mirror_list,
+				       struct drm_sched_job, node);
+
+	if (job && dma_fence_is_signaled(&job->s_fence->finished)) {
+		/* remove job from ring_mirror_list */
+		list_del_init(&job->node);
+	} else {
+		job = NULL;
+		/* queue timeout for next job */
+		drm_sched_start_timeout(sched);
+	}
+
+	spin_unlock_irqrestore(&sched->job_list_lock, flags);
+
+	return job;
+}
+
+/**
+ * drm_sched_blocked - check if the scheduler is blocked
+ *
+ * @sched: scheduler instance
+ *
+ * Returns true if blocked, otherwise false.
+ */
+static bool drm_sched_blocked(struct drm_gpu_scheduler *sched)
+{
+	if (kthread_should_park()) {
+		kthread_parkme();
+		return true;
+	}
+
+	return false;
+}
+
+/**
+ * drm_sched_main - main scheduler thread
+ *
+ * @param: scheduler instance
+ *
+ * Returns 0.
+ */
+static int drm_sched_main(void *param)
+{
+	struct sched_param sparam = {.sched_priority = 1};
+	struct drm_gpu_scheduler *sched = (struct drm_gpu_scheduler *)param;
+	int r;
+
+	sched_setscheduler(current, SCHED_FIFO, &sparam);
+
+	while (!kthread_should_stop()) {
+		struct drm_sched_entity *entity = NULL;
+		struct drm_sched_fence *s_fence;
+		struct drm_sched_job *sched_job;
+		struct dma_fence *fence;
+		struct drm_sched_job *cleanup_job = NULL;
+
+		wait_event_interruptible(sched->wake_up_worker,
+					 (cleanup_job = drm_sched_get_cleanup_job(sched)) ||
+					 (!drm_sched_blocked(sched) &&
+					  (entity = drm_sched_select_entity(sched))) ||
+					 kthread_should_stop());
+
+		if (cleanup_job) {
+			sched->ops->free_job(cleanup_job);
+			/* queue timeout for next job */
+			drm_sched_start_timeout(sched);
+		}
+
+		if (!entity)
+			continue;
+
+		sched_job = drm_sched_entity_pop_job(entity);
+		if (!sched_job)
+			continue;
+
+		s_fence = sched_job->s_fence;
+
+		atomic_inc(&sched->hw_rq_count);
+		drm_sched_job_begin(sched_job);
+
+		fence = sched->ops->run_job(sched_job);
+		drm_sched_fence_scheduled(s_fence);
+
+		if (!IS_ERR_OR_NULL(fence)) {
+			s_fence->parent = dma_fence_get(fence);
+			r = dma_fence_add_callback(fence, &sched_job->cb,
+						   drm_sched_process_job);
+			if (r == -ENOENT)
+				drm_sched_process_job(fence, &sched_job->cb);
+			else if (r)
+				DRM_ERROR("fence add callback failed (%d)\n",
+					  r);
+			dma_fence_put(fence);
+		} else {
+			if (IS_ERR(fence))
+				dma_fence_set_error(&s_fence->finished, PTR_ERR(fence));
+
+			drm_sched_process_job(NULL, &sched_job->cb);
+		}
+
+		wake_up(&sched->job_scheduled);
+	}
+	return 0;
+}
+
+/**
+ * drm_sched_init - Init a gpu scheduler instance
+ *
+ * @sched: scheduler instance
+ * @ops: backend operations for this scheduler
+ * @hw_submission: number of hw submissions that can be in flight
+ * @hang_limit: number of times to allow a job to hang before dropping it
+ * @timeout: timeout value in jiffies for the scheduler
+ * @name: name used for debugging
+ *
+ * Return 0 on success, otherwise error code.
+ */
+int drm_sched_init(struct drm_gpu_scheduler *sched,
+		   const struct drm_sched_backend_ops *ops,
+		   unsigned hw_submission,
+		   unsigned hang_limit,
+		   long timeout,
+		   const char *name)
+{
+	int i, ret;
+	sched->ops = ops;
+	sched->hw_submission_limit = hw_submission;
+	sched->name = name;
+	sched->timeout = timeout;
+	sched->hang_limit = hang_limit;
+	for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_MAX; i++)
+		drm_sched_rq_init(sched, &sched->sched_rq[i]);
+
+	init_waitqueue_head(&sched->wake_up_worker);
+	init_waitqueue_head(&sched->job_scheduled);
+	INIT_LIST_HEAD(&sched->ring_mirror_list);
+	spin_lock_init(&sched->job_list_lock);
+	atomic_set(&sched->hw_rq_count, 0);
+	INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout);
+	atomic_set(&sched->num_jobs, 0);
+	atomic64_set(&sched->job_id_count, 0);
+
+	/* Each scheduler will run on a seperate kernel thread */
+	sched->thread = kthread_run(drm_sched_main, sched, sched->name);
+	if (IS_ERR(sched->thread)) {
+		ret = PTR_ERR(sched->thread);
+		sched->thread = NULL;
+		DRM_ERROR("Failed to create scheduler for %s.\n", name);
+		return ret;
+	}
+
+	sched->ready = true;
+	return 0;
+}
+EXPORT_SYMBOL(drm_sched_init);
+
+/**
+ * drm_sched_fini - Destroy a gpu scheduler
+ *
+ * @sched: scheduler instance
+ *
+ * Tears down and cleans up the scheduler.
+ */
+void drm_sched_fini(struct drm_gpu_scheduler *sched)
+{
+	if (sched->thread)
+		kthread_stop(sched->thread);
+
+	/* Confirm no work left behind accessing device structures */
+	cancel_delayed_work_sync(&sched->work_tdr);
+
+	sched->ready = false;
+}
+EXPORT_SYMBOL(drm_sched_fini);