aboutsummaryrefslogtreecommitdiffstats
path: root/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/2837-drm-move-amd_gpu_scheduler-into-common-location.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/2837-drm-move-amd_gpu_scheduler-into-common-location.patch')
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/2837-drm-move-amd_gpu_scheduler-into-common-location.patch3734
1 files changed, 3734 insertions, 0 deletions
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/2837-drm-move-amd_gpu_scheduler-into-common-location.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/2837-drm-move-amd_gpu_scheduler-into-common-location.patch
new file mode 100644
index 00000000..4ac30a80
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/2837-drm-move-amd_gpu_scheduler-into-common-location.patch
@@ -0,0 +1,3734 @@
+From a1903701852429ceefd535123dcd15e64c5f67e3 Mon Sep 17 00:00:00 2001
+From: Lucas Stach <l.stach@pengutronix.de>
+Date: Wed, 6 Dec 2017 17:49:39 +0100
+Subject: [PATCH 2837/4131] drm: move amd_gpu_scheduler into common location
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+This moves and renames the AMDGPU scheduler to a common location in DRM
+in order to facilitate re-use by other drivers. This is mostly a straight
+forward rename with no code changes.
+
+One notable exception is the function to_drm_sched_fence(), which is no
+longer a inline header function to avoid the need to export the
+drm_sched_fence_ops_scheduled and drm_sched_fence_ops_finished structures.
+
+Change-Id: I2d8f033671e43e6ee76a4a0db52a092224a37c4f
+Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
+Tested-by: Dieter Nützel <Dieter@nuetzel-hh.de>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/Kconfig | 5 +
+ drivers/gpu/drm/Makefile | 1 +
+ drivers/gpu/drm/amd/amdgpu/Makefile | 5 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu.h | 16 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 6 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 38 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 4 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 4 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | 4 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 20 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c | 14 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h | 12 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c | 20 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h | 2 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c | 8 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 8 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h | 4 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 8 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h | 4 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 8 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h | 2 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 14 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h | 4 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 10 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 7 +-
+ drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 4 +-
+ drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c | 8 +-
+ drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c | 8 +-
+ drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h | 60 --
+ drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | 745 ------------------------
+ drivers/gpu/drm/amd/scheduler/gpu_scheduler.h | 185 ------
+ drivers/gpu/drm/amd/scheduler/sched_fence.c | 173 ------
+ drivers/gpu/drm/amd/scheduler/spsc_queue.h | 121 ----
+ drivers/gpu/drm/scheduler/Makefile | 4 +
+ drivers/gpu/drm/scheduler/gpu_scheduler.c | 744 +++++++++++++++++++++++
+ drivers/gpu/drm/scheduler/sched_fence.c | 187 ++++++
+ include/drm/gpu_scheduler.h | 176 ++++++
+ include/drm/gpu_scheduler_trace.h | 59 ++
+ include/drm/spsc_queue.h | 122 ++++
+ 39 files changed, 1418 insertions(+), 1406 deletions(-)
+ delete mode 100644 drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
+ delete mode 100644 drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+ delete mode 100644 drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+ delete mode 100644 drivers/gpu/drm/amd/scheduler/sched_fence.c
+ delete mode 100644 drivers/gpu/drm/amd/scheduler/spsc_queue.h
+ create mode 100644 drivers/gpu/drm/scheduler/Makefile
+ create mode 100644 drivers/gpu/drm/scheduler/gpu_scheduler.c
+ create mode 100644 drivers/gpu/drm/scheduler/sched_fence.c
+ create mode 100644 include/drm/gpu_scheduler.h
+ create mode 100644 include/drm/gpu_scheduler_trace.h
+ create mode 100644 include/drm/spsc_queue.h
+
+diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
+index e36bb0d..e486c52e9 100644
+--- a/drivers/gpu/drm/Kconfig
++++ b/drivers/gpu/drm/Kconfig
+@@ -149,6 +149,10 @@ config DRM_VM
+ bool
+ depends on DRM && MMU
+
++config DRM_SCHED
++ tristate
++ depends on DRM
++
+ source "drivers/gpu/drm/i2c/Kconfig"
+
+ source "drivers/gpu/drm/arm/Kconfig"
+@@ -179,6 +183,7 @@ config DRM_AMDGPU
+ depends on DRM && PCI && MMU
+ select FW_LOADER
+ select DRM_KMS_HELPER
++ select DRM_SCHED
+ select DRM_TTM
+ select POWER_SUPPLY
+ select HWMON
+diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
+index 0105e9f..2db9906 100644
+--- a/drivers/gpu/drm/Makefile
++++ b/drivers/gpu/drm/Makefile
+@@ -102,3 +102,4 @@ obj-$(CONFIG_DRM_ZTE) += zte/
+ obj-$(CONFIG_DRM_MXSFB) += mxsfb/
+ obj-$(CONFIG_DRM_TINYDRM) += tinydrm/
+ obj-$(CONFIG_DRM_PL111) += pl111/
++obj-$(CONFIG_DRM_SCHED) += scheduler/
+diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
+index 728ab8c..dd784de 100755
+--- a/drivers/gpu/drm/amd/amdgpu/Makefile
++++ b/drivers/gpu/drm/amd/amdgpu/Makefile
+@@ -117,10 +117,7 @@ amdgpu-y += \
+ amdgpu-y += amdgpu_cgs.o
+
+ # GPU scheduler
+-amdgpu-y += \
+- ../scheduler/gpu_scheduler.o \
+- ../scheduler/sched_fence.o \
+- amdgpu_job.o
++amdgpu-y += amdgpu_job.o
+
+ # ACP componet
+ ifneq ($(CONFIG_DRM_AMD_ACP),)
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index c32bfc4..092d637 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -45,6 +45,7 @@
+ #include <drm/drmP.h>
+ #include <drm/drm_gem.h>
+ #include <drm/amdgpu_drm.h>
++#include <drm/gpu_scheduler.h>
+
+ #include <kgd_kfd_interface.h>
+ #include "dm_pp_interface.h"
+@@ -69,7 +70,6 @@
+ #include "amdgpu_vcn.h"
+ #include "amdgpu_dm.h"
+ #include "amdgpu_mn.h"
+-#include "gpu_scheduler.h"
+ #include "amdgpu_virt.h"
+ #include "amdgpu_gart.h"
+
+@@ -708,7 +708,7 @@ struct amdgpu_ib {
+ uint32_t flags;
+ };
+
+-extern const struct amd_sched_backend_ops amdgpu_sched_ops;
++extern const struct drm_sched_backend_ops amdgpu_sched_ops;
+
+ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
+ struct amdgpu_job **job, struct amdgpu_vm *vm);
+@@ -718,7 +718,7 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
+ void amdgpu_job_free_resources(struct amdgpu_job *job);
+ void amdgpu_job_free(struct amdgpu_job *job);
+ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
+- struct amd_sched_entity *entity, void *owner,
++ struct drm_sched_entity *entity, void *owner,
+ struct dma_fence **f);
+
+ /*
+@@ -751,7 +751,7 @@ int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
+ struct amdgpu_ctx_ring {
+ uint64_t sequence;
+ struct dma_fence **fences;
+- struct amd_sched_entity entity;
++ struct drm_sched_entity entity;
+ struct list_head sem_dep_list;
+ struct mutex sem_lock;
+ };
+@@ -767,8 +767,8 @@ struct amdgpu_ctx {
+ struct dma_fence **fences;
+ struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS];
+ bool preamble_presented;
+- enum amd_sched_priority init_priority;
+- enum amd_sched_priority override_priority;
++ enum drm_sched_priority init_priority;
++ enum drm_sched_priority override_priority;
+ struct mutex lock;
+ atomic_t guilty;
+ };
+@@ -788,7 +788,7 @@ int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
+ struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
+ struct amdgpu_ring *ring, uint64_t seq);
+ void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
+- enum amd_sched_priority priority);
++ enum drm_sched_priority priority);
+
+ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+@@ -1140,7 +1140,7 @@ struct amdgpu_cs_parser {
+ #define AMDGPU_HAVE_CTX_SWITCH (1 << 2) /* bit set means context switch occured */
+
+ struct amdgpu_job {
+- struct amd_sched_job base;
++ struct drm_sched_job base;
+ struct amdgpu_device *adev;
+ struct amdgpu_vm *vm;
+ struct amdgpu_ring *ring;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+index 073633c..ad1a3d2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+@@ -1197,7 +1197,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
+ union drm_amdgpu_cs *cs)
+ {
+ struct amdgpu_ring *ring = p->job->ring;
+- struct amd_sched_entity *entity = &p->ctx->rings[ring->idx].entity;
++ struct drm_sched_entity *entity = &p->ctx->rings[ring->idx].entity;
+ struct amdgpu_job *job;
+ unsigned i;
+ uint64_t seq;
+@@ -1220,7 +1220,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
+ job = p->job;
+ p->job = NULL;
+
+- r = amd_sched_job_init(&job->base, &ring->sched, entity, p->filp);
++ r = drm_sched_job_init(&job->base, &ring->sched, entity, p->filp);
+ if (r) {
+ amdgpu_job_free(job);
+ amdgpu_mn_unlock(p->mn);
+@@ -1251,7 +1251,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
+ amdgpu_ring_priority_get(job->ring, job->base.s_priority);
+
+ trace_amdgpu_cs_ioctl(job);
+- amd_sched_entity_push_job(&job->base, entity);
++ drm_sched_entity_push_job(&job->base, entity);
+
+ ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
+ amdgpu_mn_unlock(p->mn);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+index 10c1eeb..6008f5a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+@@ -28,10 +28,10 @@
+ #include "amdgpu_sched.h"
+
+ static int amdgpu_ctx_priority_permit(struct drm_file *filp,
+- enum amd_sched_priority priority)
++ enum drm_sched_priority priority)
+ {
+ /* NORMAL and below are accessible by everyone */
+- if (priority <= AMD_SCHED_PRIORITY_NORMAL)
++ if (priority <= DRM_SCHED_PRIORITY_NORMAL)
+ return 0;
+
+ if (capable(CAP_SYS_NICE))
+@@ -44,14 +44,14 @@ static int amdgpu_ctx_priority_permit(struct drm_file *filp,
+ }
+
+ static int amdgpu_ctx_init(struct amdgpu_device *adev,
+- enum amd_sched_priority priority,
++ enum drm_sched_priority priority,
+ struct drm_file *filp,
+ struct amdgpu_ctx *ctx)
+ {
+ unsigned i, j;
+ int r;
+
+- if (priority < 0 || priority >= AMD_SCHED_PRIORITY_MAX)
++ if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX)
+ return -EINVAL;
+
+ r = amdgpu_ctx_priority_permit(filp, priority);
+@@ -80,19 +80,19 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
+ ctx->reset_counter_query = ctx->reset_counter;
+ ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
+ ctx->init_priority = priority;
+- ctx->override_priority = AMD_SCHED_PRIORITY_UNSET;
++ ctx->override_priority = DRM_SCHED_PRIORITY_UNSET;
+
+ /* create context entity for each ring */
+ for (i = 0; i < adev->num_rings; i++) {
+ struct amdgpu_ring *ring = adev->rings[i];
+- struct amd_sched_rq *rq;
++ struct drm_sched_rq *rq;
+
+ rq = &ring->sched.sched_rq[priority];
+
+ if (ring == &adev->gfx.kiq.ring)
+ continue;
+
+- r = amd_sched_entity_init(&ring->sched, &ctx->rings[i].entity,
++ r = drm_sched_entity_init(&ring->sched, &ctx->rings[i].entity,
+ rq, amdgpu_sched_jobs, &ctx->guilty);
+ if (r)
+ goto failed;
+@@ -106,7 +106,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
+
+ failed:
+ for (j = 0; j < i; j++)
+- amd_sched_entity_fini(&adev->rings[j]->sched,
++ drm_sched_entity_fini(&adev->rings[j]->sched,
+ &ctx->rings[j].entity);
+ kfree(ctx->fences);
+ ctx->fences = NULL;
+@@ -143,7 +143,7 @@ static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
+ ctx->fences = NULL;
+
+ for (i = 0; i < adev->num_rings; i++)
+- amd_sched_entity_fini(&adev->rings[i]->sched,
++ drm_sched_entity_fini(&adev->rings[i]->sched,
+ &ctx->rings[i].entity);
+
+ amdgpu_queue_mgr_fini(adev, &ctx->queue_mgr);
+@@ -154,7 +154,7 @@ static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
+ static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
+ struct amdgpu_fpriv *fpriv,
+ struct drm_file *filp,
+- enum amd_sched_priority priority,
++ enum drm_sched_priority priority,
+ uint32_t *id)
+ {
+ struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
+@@ -283,7 +283,7 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
+ {
+ int r;
+ uint32_t id;
+- enum amd_sched_priority priority;
++ enum drm_sched_priority priority;
+
+ union drm_amdgpu_ctx *args = data;
+ struct amdgpu_device *adev = dev->dev_private;
+@@ -295,8 +295,8 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
+
+ /* For backwards compatibility reasons, we need to accept
+ * ioctls with garbage in the priority field */
+- if (priority == AMD_SCHED_PRIORITY_INVALID)
+- priority = AMD_SCHED_PRIORITY_NORMAL;
++ if (priority == DRM_SCHED_PRIORITY_INVALID)
++ priority = DRM_SCHED_PRIORITY_NORMAL;
+
+ switch (args->in.op) {
+ case AMDGPU_CTX_OP_ALLOC_CTX:
+@@ -402,18 +402,18 @@ struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
+ }
+
+ void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
+- enum amd_sched_priority priority)
++ enum drm_sched_priority priority)
+ {
+ int i;
+ struct amdgpu_device *adev = ctx->adev;
+- struct amd_sched_rq *rq;
+- struct amd_sched_entity *entity;
++ struct drm_sched_rq *rq;
++ struct drm_sched_entity *entity;
+ struct amdgpu_ring *ring;
+- enum amd_sched_priority ctx_prio;
++ enum drm_sched_priority ctx_prio;
+
+ ctx->override_priority = priority;
+
+- ctx_prio = (ctx->override_priority == AMD_SCHED_PRIORITY_UNSET) ?
++ ctx_prio = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ?
+ ctx->init_priority : ctx->override_priority;
+
+ for (i = 0; i < adev->num_rings; i++) {
+@@ -424,7 +424,7 @@ void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
+ if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
+ continue;
+
+- amd_sched_entity_set_rq(entity, rq);
++ drm_sched_entity_set_rq(entity, rq);
+ }
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 4fc4439..33ecb9ca 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -3013,7 +3013,7 @@ int amdgpu_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job *job)
+ continue;
+
+ kthread_park(ring->sched.thread);
+- amd_sched_hw_job_reset(&ring->sched, &job->base);
++ drm_sched_hw_job_reset(&ring->sched, &job->base);
+
+ /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
+ amdgpu_fence_driver_force_completion(ring);
+@@ -3066,7 +3066,7 @@ int amdgpu_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job *job)
+ if (job && job->ring->idx != i)
+ continue;
+
+- amd_sched_job_recovery(&ring->sched);
++ drm_sched_job_recovery(&ring->sched);
+ kthread_unpark(ring->sched.thread);
+ }
+ } else {
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+index d2a958a..db27485 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -900,7 +900,7 @@ static int __init amdgpu_init(void)
+ if (r)
+ goto error_fence;
+
+- r = amd_sched_fence_slab_init();
++ r = drm_sched_fence_slab_init();
+ if (r)
+ goto error_sched;
+
+@@ -932,7 +932,7 @@ static void __exit amdgpu_exit(void)
+ pci_unregister_driver(pdriver);
+ amdgpu_unregister_atpx_handler();
+ amdgpu_sync_fini();
+- amd_sched_fence_slab_fini();
++ drm_sched_fence_slab_fini();
+ amdgpu_fence_slab_fini();
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+index d0e5aeb..8d93143 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+@@ -445,7 +445,7 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
+ */
+ timeout = MAX_SCHEDULE_TIMEOUT;
+ }
+- r = amd_sched_init(&ring->sched, &amdgpu_sched_ops,
++ r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
+ num_hw_submission, amdgpu_job_hang_limit,
+ timeout, ring->name);
+ if (r) {
+@@ -503,7 +503,7 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
+ }
+ amdgpu_irq_put(adev, ring->fence_drv.irq_src,
+ ring->fence_drv.irq_type);
+- amd_sched_fini(&ring->sched);
++ drm_sched_fini(&ring->sched);
+ del_timer_sync(&ring->fence_drv.fallback_timer);
+ for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
+ dma_fence_put(ring->fence_drv.fences[j]);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+index a7ec57b..9ca22a4 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+@@ -28,7 +28,7 @@
+ #include "amdgpu.h"
+ #include "amdgpu_trace.h"
+
+-static void amdgpu_job_timedout(struct amd_sched_job *s_job)
++static void amdgpu_job_timedout(struct drm_sched_job *s_job)
+ {
+ struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
+
+@@ -96,7 +96,7 @@ void amdgpu_job_free_resources(struct amdgpu_job *job)
+ amdgpu_ib_free(job->adev, &job->ibs[i], f);
+ }
+
+-static void amdgpu_job_free_cb(struct amd_sched_job *s_job)
++static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
+ {
+ struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
+
+@@ -118,7 +118,7 @@ void amdgpu_job_free(struct amdgpu_job *job)
+ }
+
+ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
+- struct amd_sched_entity *entity, void *owner,
++ struct drm_sched_entity *entity, void *owner,
+ struct dma_fence **f)
+ {
+ int r;
+@@ -127,7 +127,7 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
+ if (!f)
+ return -EINVAL;
+
+- r = amd_sched_job_init(&job->base, &ring->sched, entity, owner);
++ r = drm_sched_job_init(&job->base, &ring->sched, entity, owner);
+ if (r)
+ return r;
+
+@@ -136,13 +136,13 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
+ *f = dma_fence_get(&job->base.s_fence->finished);
+ amdgpu_job_free_resources(job);
+ amdgpu_ring_priority_get(job->ring, job->base.s_priority);
+- amd_sched_entity_push_job(&job->base, entity);
++ drm_sched_entity_push_job(&job->base, entity);
+
+ return 0;
+ }
+
+-static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job,
+- struct amd_sched_entity *s_entity)
++static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
++ struct drm_sched_entity *s_entity)
+ {
+ struct amdgpu_job *job = to_amdgpu_job(sched_job);
+ struct amdgpu_vm *vm = job->vm;
+@@ -151,7 +151,7 @@ static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job,
+ struct dma_fence *fence = amdgpu_sync_get_fence(&job->sync, &explicit);
+
+ if (fence && explicit) {
+- if (amd_sched_dependency_optimized(fence, s_entity)) {
++ if (drm_sched_dependency_optimized(fence, s_entity)) {
+ r = amdgpu_sync_fence(job->adev, &job->sched_sync, fence, false);
+ if (r)
+ DRM_ERROR("Error adding fence to sync (%d)\n", r);
+@@ -173,7 +173,7 @@ static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job,
+ return fence;
+ }
+
+-static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job)
++static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
+ {
+ struct dma_fence *fence = NULL, *finished;
+ struct amdgpu_device *adev;
+@@ -211,7 +211,7 @@ static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job)
+ return fence;
+ }
+
+-const struct amd_sched_backend_ops amdgpu_sched_ops = {
++const struct drm_sched_backend_ops amdgpu_sched_ops = {
+ .dependency = amdgpu_job_dependency,
+ .run_job = amdgpu_job_run,
+ .timedout_job = amdgpu_job_timedout,
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+index a98fbbb..41c75f9 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+@@ -164,7 +164,7 @@ void amdgpu_ring_undo(struct amdgpu_ring *ring)
+ * Release a request for executing at @priority
+ */
+ void amdgpu_ring_priority_put(struct amdgpu_ring *ring,
+- enum amd_sched_priority priority)
++ enum drm_sched_priority priority)
+ {
+ int i;
+
+@@ -175,7 +175,7 @@ void amdgpu_ring_priority_put(struct amdgpu_ring *ring,
+ return;
+
+ /* no need to restore if the job is already at the lowest priority */
+- if (priority == AMD_SCHED_PRIORITY_NORMAL)
++ if (priority == DRM_SCHED_PRIORITY_NORMAL)
+ return;
+
+ mutex_lock(&ring->priority_mutex);
+@@ -184,8 +184,8 @@ void amdgpu_ring_priority_put(struct amdgpu_ring *ring,
+ goto out_unlock;
+
+ /* decay priority to the next level with a job available */
+- for (i = priority; i >= AMD_SCHED_PRIORITY_MIN; i--) {
+- if (i == AMD_SCHED_PRIORITY_NORMAL
++ for (i = priority; i >= DRM_SCHED_PRIORITY_MIN; i--) {
++ if (i == DRM_SCHED_PRIORITY_NORMAL
+ || atomic_read(&ring->num_jobs[i])) {
+ ring->priority = i;
+ ring->funcs->set_priority(ring, i);
+@@ -206,7 +206,7 @@ void amdgpu_ring_priority_put(struct amdgpu_ring *ring,
+ * Request a ring's priority to be raised to @priority (refcounted).
+ */
+ void amdgpu_ring_priority_get(struct amdgpu_ring *ring,
+- enum amd_sched_priority priority)
++ enum drm_sched_priority priority)
+ {
+ if (!ring->funcs->set_priority)
+ return;
+@@ -317,12 +317,12 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
+ }
+
+ ring->max_dw = max_dw;
+- ring->priority = AMD_SCHED_PRIORITY_NORMAL;
++ ring->priority = DRM_SCHED_PRIORITY_NORMAL;
+ mutex_init(&ring->priority_mutex);
+ INIT_LIST_HEAD(&ring->lru_list);
+ amdgpu_ring_lru_touch(adev, ring);
+
+- for (i = 0; i < AMD_SCHED_PRIORITY_MAX; ++i)
++ for (i = 0; i < DRM_SCHED_PRIORITY_MAX; ++i)
+ atomic_set(&ring->num_jobs[i], 0);
+
+ if (amdgpu_debugfs_ring_init(adev, ring)) {
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+index 907e1e0..fb04185 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+@@ -25,7 +25,7 @@
+ #define __AMDGPU_RING_H__
+
+ #include <drm/amdgpu_drm.h>
+-#include "gpu_scheduler.h"
++#include <drm/gpu_scheduler.h>
+
+ /* max number of rings */
+ #define AMDGPU_MAX_RINGS 18
+@@ -155,14 +155,14 @@ struct amdgpu_ring_funcs {
+ void (*emit_tmz)(struct amdgpu_ring *ring, bool start);
+ /* priority functions */
+ void (*set_priority) (struct amdgpu_ring *ring,
+- enum amd_sched_priority priority);
++ enum drm_sched_priority priority);
+ };
+
+ struct amdgpu_ring {
+ struct amdgpu_device *adev;
+ const struct amdgpu_ring_funcs *funcs;
+ struct amdgpu_fence_driver fence_drv;
+- struct amd_gpu_scheduler sched;
++ struct drm_gpu_scheduler sched;
+ struct list_head lru_list;
+
+ struct amdgpu_bo *ring_obj;
+@@ -197,7 +197,7 @@ struct amdgpu_ring {
+ unsigned vm_inv_eng;
+ bool has_compute_vm_bug;
+
+- atomic_t num_jobs[AMD_SCHED_PRIORITY_MAX];
++ atomic_t num_jobs[DRM_SCHED_PRIORITY_MAX];
+ struct mutex priority_mutex;
+ /* protected by priority_mutex */
+ int priority;
+@@ -213,9 +213,9 @@ void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
+ void amdgpu_ring_commit(struct amdgpu_ring *ring);
+ void amdgpu_ring_undo(struct amdgpu_ring *ring);
+ void amdgpu_ring_priority_get(struct amdgpu_ring *ring,
+- enum amd_sched_priority priority);
++ enum drm_sched_priority priority);
+ void amdgpu_ring_priority_put(struct amdgpu_ring *ring,
+- enum amd_sched_priority priority);
++ enum drm_sched_priority priority);
+ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
+ unsigned ring_size, struct amdgpu_irq_src *irq_src,
+ unsigned irq_type);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
+index 290cc3f..86a0715 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
+@@ -29,29 +29,29 @@
+
+ #include "amdgpu_vm.h"
+
+-enum amd_sched_priority amdgpu_to_sched_priority(int amdgpu_priority)
++enum drm_sched_priority amdgpu_to_sched_priority(int amdgpu_priority)
+ {
+ switch (amdgpu_priority) {
+ case AMDGPU_CTX_PRIORITY_VERY_HIGH:
+- return AMD_SCHED_PRIORITY_HIGH_HW;
++ return DRM_SCHED_PRIORITY_HIGH_HW;
+ case AMDGPU_CTX_PRIORITY_HIGH:
+- return AMD_SCHED_PRIORITY_HIGH_SW;
++ return DRM_SCHED_PRIORITY_HIGH_SW;
+ case AMDGPU_CTX_PRIORITY_NORMAL:
+- return AMD_SCHED_PRIORITY_NORMAL;
++ return DRM_SCHED_PRIORITY_NORMAL;
+ case AMDGPU_CTX_PRIORITY_LOW:
+ case AMDGPU_CTX_PRIORITY_VERY_LOW:
+- return AMD_SCHED_PRIORITY_LOW;
++ return DRM_SCHED_PRIORITY_LOW;
+ case AMDGPU_CTX_PRIORITY_UNSET:
+- return AMD_SCHED_PRIORITY_UNSET;
++ return DRM_SCHED_PRIORITY_UNSET;
+ default:
+ WARN(1, "Invalid context priority %d\n", amdgpu_priority);
+- return AMD_SCHED_PRIORITY_INVALID;
++ return DRM_SCHED_PRIORITY_INVALID;
+ }
+ }
+
+ static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev,
+ int fd,
+- enum amd_sched_priority priority)
++ enum drm_sched_priority priority)
+ {
+ struct file *filp = fcheck(fd);
+ struct drm_file *file;
+@@ -86,11 +86,11 @@ int amdgpu_sched_ioctl(struct drm_device *dev, void *data,
+ {
+ union drm_amdgpu_sched *args = data;
+ struct amdgpu_device *adev = dev->dev_private;
+- enum amd_sched_priority priority;
++ enum drm_sched_priority priority;
+ int r;
+
+ priority = amdgpu_to_sched_priority(args->in.priority);
+- if (args->in.flags || priority == AMD_SCHED_PRIORITY_INVALID)
++ if (args->in.flags || priority == DRM_SCHED_PRIORITY_INVALID)
+ return -EINVAL;
+
+ switch (args->in.op) {
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h
+index b28c067..2a1a0c7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h
+@@ -27,7 +27,7 @@
+
+ #include <drm/drmP.h>
+
+-enum amd_sched_priority amdgpu_to_sched_priority(int amdgpu_priority);
++enum drm_sched_priority amdgpu_to_sched_priority(int amdgpu_priority);
+ int amdgpu_sched_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+index 6313edc..10059d9 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+@@ -65,7 +65,7 @@ void amdgpu_sync_create(struct amdgpu_sync *sync)
+ static bool amdgpu_sync_same_dev(struct amdgpu_device *adev,
+ struct dma_fence *f)
+ {
+- struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
++ struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
+
+ if (s_fence) {
+ struct amdgpu_ring *ring;
+@@ -86,13 +86,13 @@ static bool amdgpu_sync_same_dev(struct amdgpu_device *adev,
+ */
+ static void *amdgpu_sync_get_owner(struct dma_fence *f)
+ {
+- struct amd_sched_fence *s_fence;
++ struct drm_sched_fence *s_fence;
+ struct amdgpu_amdkfd_fence *kfd_fence;
+
+ if (f == NULL)
+ return AMDGPU_FENCE_OWNER_UNDEFINED;
+
+- s_fence = to_amd_sched_fence(f);
++ s_fence = to_drm_sched_fence(f);
+ if (s_fence)
+ return s_fence->owner;
+
+@@ -262,7 +262,7 @@ struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
+
+ hash_for_each_safe(sync->fences, i, tmp, e, node) {
+ struct dma_fence *f = e->fence;
+- struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
++ struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
+
+ if (dma_fence_is_signaled(f)) {
+ hash_del(&e->node);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+index df20a004..7e5095c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+@@ -77,7 +77,7 @@ static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
+ {
+ struct drm_global_reference *global_ref;
+ struct amdgpu_ring *ring;
+- struct amd_sched_rq *rq;
++ struct drm_sched_rq *rq;
+ int r;
+
+ adev->mman.mem_global_referenced = false;
+@@ -109,8 +109,8 @@ static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
+ mutex_init(&adev->mman.gtt_window_lock);
+
+ ring = adev->mman.buffer_funcs_ring;
+- rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL];
+- r = amd_sched_entity_init(&ring->sched, &adev->mman.entity,
++ rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
++ r = drm_sched_entity_init(&ring->sched, &adev->mman.entity,
+ rq, amdgpu_sched_jobs, NULL);
+ if (r) {
+ DRM_ERROR("Failed setting up TTM BO move run queue.\n");
+@@ -132,7 +132,7 @@ static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
+ static void amdgpu_ttm_global_fini(struct amdgpu_device *adev)
+ {
+ if (adev->mman.mem_global_referenced) {
+- amd_sched_entity_fini(adev->mman.entity.sched,
++ drm_sched_entity_fini(adev->mman.entity.sched,
+ &adev->mman.entity);
+ mutex_destroy(&adev->mman.gtt_window_lock);
+ drm_global_item_unref(&adev->mman.bo_global_ref.ref);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+index bf3bc84..4a8384c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+@@ -25,7 +25,7 @@
+ #define __AMDGPU_TTM_H__
+
+ #include "amdgpu.h"
+-#include "gpu_scheduler.h"
++#include <drm/gpu_scheduler.h>
+
+ #define AMDGPU_PL_GDS (TTM_PL_PRIV + 0)
+ #define AMDGPU_PL_GWS (TTM_PL_PRIV + 1)
+@@ -59,7 +59,7 @@ struct amdgpu_mman {
+
+ struct mutex gtt_window_lock;
+ /* Scheduler entity for buffer moves */
+- struct amd_sched_entity entity;
++ struct drm_sched_entity entity;
+ };
+
+ struct amdgpu_copy_mem {
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+index af7e83d..ed73661 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+@@ -116,7 +116,7 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work);
+ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
+ {
+ struct amdgpu_ring *ring;
+- struct amd_sched_rq *rq;
++ struct drm_sched_rq *rq;
+ unsigned long bo_size;
+ const char *fw_name;
+ const struct common_firmware_header *hdr;
+@@ -230,8 +230,8 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
+ }
+
+ ring = &adev->uvd.ring;
+- rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
+- r = amd_sched_entity_init(&ring->sched, &adev->uvd.entity,
++ rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
++ r = drm_sched_entity_init(&ring->sched, &adev->uvd.entity,
+ rq, amdgpu_sched_jobs, NULL);
+ if (r != 0) {
+ DRM_ERROR("Failed setting up UVD run queue.\n");
+@@ -272,7 +272,7 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
+ int i;
+ kfree(adev->uvd.saved_bo);
+
+- amd_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity);
++ drm_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity);
+
+ amdgpu_bo_free_kernel(&adev->uvd.vcpu_bo,
+ &adev->uvd.gpu_addr,
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
+index 845eea9..32ea20b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
+@@ -51,8 +51,8 @@ struct amdgpu_uvd {
+ struct amdgpu_irq_src irq;
+ bool address_64_bit;
+ bool use_ctx_buf;
+- struct amd_sched_entity entity;
+- struct amd_sched_entity entity_enc;
++ struct drm_sched_entity entity;
++ struct drm_sched_entity entity_enc;
+ uint32_t srbm_soft_reset;
+ unsigned num_enc_rings;
+ };
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+index a91abfb..5b70088 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+@@ -85,7 +85,7 @@ static void amdgpu_vce_idle_work_handler(struct work_struct *work);
+ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
+ {
+ struct amdgpu_ring *ring;
+- struct amd_sched_rq *rq;
++ struct drm_sched_rq *rq;
+ const char *fw_name;
+ const struct common_firmware_header *hdr;
+ unsigned ucode_version, version_major, version_minor, binary_id;
+@@ -174,8 +174,8 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
+ }
+
+ ring = &adev->vce.ring[0];
+- rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
+- r = amd_sched_entity_init(&ring->sched, &adev->vce.entity,
++ rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
++ r = drm_sched_entity_init(&ring->sched, &adev->vce.entity,
+ rq, amdgpu_sched_jobs, NULL);
+ if (r != 0) {
+ DRM_ERROR("Failed setting up VCE run queue.\n");
+@@ -207,7 +207,7 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
+ if (adev->vce.vcpu_bo == NULL)
+ return 0;
+
+- amd_sched_entity_fini(&adev->vce.ring[0].sched, &adev->vce.entity);
++ drm_sched_entity_fini(&adev->vce.ring[0].sched, &adev->vce.entity);
+
+ amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr,
+ (void **)&adev->vce.cpu_addr);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
+index 5ce54cd..162cae9 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
+@@ -46,7 +46,7 @@ struct amdgpu_vce {
+ struct amdgpu_ring ring[AMDGPU_MAX_VCE_RINGS];
+ struct amdgpu_irq_src irq;
+ unsigned harvest_config;
+- struct amd_sched_entity entity;
++ struct drm_sched_entity entity;
+ uint32_t srbm_soft_reset;
+ unsigned num_rings;
+ };
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+index c18d6135..bdbdfd9 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+@@ -51,7 +51,7 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
+ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
+ {
+ struct amdgpu_ring *ring;
+- struct amd_sched_rq *rq;
++ struct drm_sched_rq *rq;
+ unsigned long bo_size;
+ const char *fw_name;
+ const struct common_firmware_header *hdr;
+@@ -106,8 +106,8 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
+ }
+
+ ring = &adev->vcn.ring_dec;
+- rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
+- r = amd_sched_entity_init(&ring->sched, &adev->vcn.entity_dec,
++ rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
++ r = drm_sched_entity_init(&ring->sched, &adev->vcn.entity_dec,
+ rq, amdgpu_sched_jobs, NULL);
+ if (r != 0) {
+ DRM_ERROR("Failed setting up VCN dec run queue.\n");
+@@ -115,8 +115,8 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
+ }
+
+ ring = &adev->vcn.ring_enc[0];
+- rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
+- r = amd_sched_entity_init(&ring->sched, &adev->vcn.entity_enc,
++ rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
++ r = drm_sched_entity_init(&ring->sched, &adev->vcn.entity_enc,
+ rq, amdgpu_sched_jobs, NULL);
+ if (r != 0) {
+ DRM_ERROR("Failed setting up VCN enc run queue.\n");
+@@ -132,9 +132,9 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
+
+ kfree(adev->vcn.saved_bo);
+
+- amd_sched_entity_fini(&adev->vcn.ring_dec.sched, &adev->vcn.entity_dec);
++ drm_sched_entity_fini(&adev->vcn.ring_dec.sched, &adev->vcn.entity_dec);
+
+- amd_sched_entity_fini(&adev->vcn.ring_enc[0].sched, &adev->vcn.entity_enc);
++ drm_sched_entity_fini(&adev->vcn.ring_enc[0].sched, &adev->vcn.entity_enc);
+
+ amdgpu_bo_free_kernel(&adev->vcn.vcpu_bo,
+ &adev->vcn.gpu_addr,
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+index d50ba06..2fd7db8 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+@@ -56,8 +56,8 @@ struct amdgpu_vcn {
+ struct amdgpu_ring ring_dec;
+ struct amdgpu_ring ring_enc[AMDGPU_VCN_MAX_ENC_RINGS];
+ struct amdgpu_irq_src irq;
+- struct amd_sched_entity entity_dec;
+- struct amd_sched_entity entity_enc;
++ struct drm_sched_entity entity_dec;
++ struct drm_sched_entity entity_enc;
+ unsigned num_enc_rings;
+ };
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index e384ee3c..90a3bdb 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -2680,7 +2680,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ AMDGPU_VM_PTE_COUNT(adev) * 8);
+ unsigned ring_instance;
+ struct amdgpu_ring *ring;
+- struct amd_sched_rq *rq;
++ struct drm_sched_rq *rq;
+ int r, i;
+ u64 flags;
+ uint64_t init_pde_value = 0;
+@@ -2700,8 +2700,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ ring_instance = atomic_inc_return(&adev->vm_manager.vm_pte_next_ring);
+ ring_instance %= adev->vm_manager.vm_pte_num_rings;
+ ring = adev->vm_manager.vm_pte_rings[ring_instance];
+- rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL];
+- r = amd_sched_entity_init(&ring->sched, &vm->entity,
++ rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
++ r = drm_sched_entity_init(&ring->sched, &vm->entity,
+ rq, amdgpu_sched_jobs, NULL);
+ if (r)
+ return r;
+@@ -2798,7 +2798,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ vm->root.base.bo = NULL;
+
+ error_free_sched_entity:
+- amd_sched_entity_fini(&ring->sched, &vm->entity);
++ drm_sched_entity_fini(&ring->sched, &vm->entity);
+
+ return r;
+ }
+@@ -2879,7 +2879,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
+ mutex_unlock(&id_mgr->lock);
+ }
+
+- amd_sched_entity_fini(vm->entity.sched, &vm->entity);
++ drm_sched_entity_fini(vm->entity.sched, &vm->entity);
+
+ if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
+ dev_err(adev->dev, "still active bo inside vm\n");
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+index 693f13f..870341d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+@@ -24,10 +24,11 @@
+ #ifndef __AMDGPU_VM_H__
+ #define __AMDGPU_VM_H__
+
+-#include <linux/rbtree.h>
+ #include <linux/idr.h>
++#include <linux/kfifo.h>
++#include <linux/rbtree.h>
++#include <drm/gpu_scheduler.h>
+
+-#include "gpu_scheduler.h"
+ #include "amdgpu_sync.h"
+ #include "amdgpu_ring.h"
+
+@@ -175,7 +176,7 @@ struct amdgpu_vm {
+ spinlock_t freed_lock;
+
+ /* Scheduler entity for page table updates */
+- struct amd_sched_entity entity;
++ struct drm_sched_entity entity;
+
+ /* client id and PASID (TODO: replace client_id with PASID) */
+ u64 client_id;
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+index e1d20f9..cfabb4e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+@@ -6475,10 +6475,10 @@ static void gfx_v8_0_hqd_set_priority(struct amdgpu_device *adev,
+ mutex_unlock(&adev->srbm_mutex);
+ }
+ static void gfx_v8_0_ring_set_priority_compute(struct amdgpu_ring *ring,
+- enum amd_sched_priority priority)
++ enum drm_sched_priority priority)
+ {
+ struct amdgpu_device *adev = ring->adev;
+- bool acquire = priority == AMD_SCHED_PRIORITY_HIGH_HW;
++ bool acquire = priority == DRM_SCHED_PRIORITY_HIGH_HW;
+
+ if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE)
+ return;
+diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+index 0aa111f..2d4159f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+@@ -412,10 +412,10 @@ static int uvd_v6_0_sw_init(void *handle)
+ return r;
+
+ if (uvd_v6_0_enc_support(adev)) {
+- struct amd_sched_rq *rq;
++ struct drm_sched_rq *rq;
+ ring = &adev->uvd.ring_enc[0];
+- rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
+- r = amd_sched_entity_init(&ring->sched, &adev->uvd.entity_enc,
++ rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
++ r = drm_sched_entity_init(&ring->sched, &adev->uvd.entity_enc,
+ rq, amdgpu_sched_jobs, NULL);
+ if (r) {
+ DRM_ERROR("Failed setting up UVD ENC run queue.\n");
+@@ -456,7 +456,7 @@ static int uvd_v6_0_sw_fini(void *handle)
+ return r;
+
+ if (uvd_v6_0_enc_support(adev)) {
+- amd_sched_entity_fini(&adev->uvd.ring_enc[0].sched, &adev->uvd.entity_enc);
++ drm_sched_entity_fini(&adev->uvd.ring_enc[0].sched, &adev->uvd.entity_enc);
+
+ for (i = 0; i < adev->uvd.num_enc_rings; ++i)
+ amdgpu_ring_fini(&adev->uvd.ring_enc[i]);
+diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+index 660fa41..4166111 100644
+--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+@@ -385,7 +385,7 @@ static int uvd_v7_0_early_init(void *handle)
+ static int uvd_v7_0_sw_init(void *handle)
+ {
+ struct amdgpu_ring *ring;
+- struct amd_sched_rq *rq;
++ struct drm_sched_rq *rq;
+ int i, r;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+@@ -416,8 +416,8 @@ static int uvd_v7_0_sw_init(void *handle)
+ }
+
+ ring = &adev->uvd.ring_enc[0];
+- rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
+- r = amd_sched_entity_init(&ring->sched, &adev->uvd.entity_enc,
++ rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
++ r = drm_sched_entity_init(&ring->sched, &adev->uvd.entity_enc,
+ rq, amdgpu_sched_jobs, NULL);
+ if (r) {
+ DRM_ERROR("Failed setting up UVD ENC run queue.\n");
+@@ -472,7 +472,7 @@ static int uvd_v7_0_sw_fini(void *handle)
+ if (r)
+ return r;
+
+- amd_sched_entity_fini(&adev->uvd.ring_enc[0].sched, &adev->uvd.entity_enc);
++ drm_sched_entity_fini(&adev->uvd.ring_enc[0].sched, &adev->uvd.entity_enc);
+
+ for (i = 0; i < adev->uvd.num_enc_rings; ++i)
+ amdgpu_ring_fini(&adev->uvd.ring_enc[i]);
+diff --git a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
+deleted file mode 100644
+index eebe323..0000000
+--- a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
++++ /dev/null
+@@ -1,60 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0 */
+-#if !defined(_GPU_SCHED_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+-#define _GPU_SCHED_TRACE_H_
+-
+-#include <linux/stringify.h>
+-#include <linux/types.h>
+-#include <linux/tracepoint.h>
+-
+-#include <drm/drmP.h>
+-
+-#undef TRACE_SYSTEM
+-#define TRACE_SYSTEM gpu_sched
+-#define TRACE_INCLUDE_FILE gpu_sched_trace
+-
+-TRACE_EVENT(amd_sched_job,
+- TP_PROTO(struct amd_sched_job *sched_job, struct amd_sched_entity *entity),
+- TP_ARGS(sched_job, entity),
+- TP_STRUCT__entry(
+- __field(struct amd_sched_entity *, entity)
+- __field(struct dma_fence *, fence)
+- __field(const char *, name)
+- __field(uint64_t, id)
+- __field(u32, job_count)
+- __field(int, hw_job_count)
+- ),
+-
+- TP_fast_assign(
+- __entry->entity = entity;
+- __entry->id = sched_job->id;
+- __entry->fence = &sched_job->s_fence->finished;
+- __entry->name = sched_job->sched->name;
+- __entry->job_count = spsc_queue_count(&entity->job_queue);
+- __entry->hw_job_count = atomic_read(
+- &sched_job->sched->hw_rq_count);
+- ),
+- TP_printk("entity=%p, id=%llu, fence=%p, ring=%s, job count:%u, hw job count:%d",
+- __entry->entity, __entry->id,
+- __entry->fence, __entry->name,
+- __entry->job_count, __entry->hw_job_count)
+-);
+-
+-TRACE_EVENT(amd_sched_process_job,
+- TP_PROTO(struct amd_sched_fence *fence),
+- TP_ARGS(fence),
+- TP_STRUCT__entry(
+- __field(struct dma_fence *, fence)
+- ),
+-
+- TP_fast_assign(
+- __entry->fence = &fence->finished;
+- ),
+- TP_printk("fence=%p signaled", __entry->fence)
+-);
+-
+-#endif
+-
+-/* This part must be outside protection */
+-#undef TRACE_INCLUDE_PATH
+-#define TRACE_INCLUDE_PATH .
+-#include <trace/define_trace.h>
+diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+deleted file mode 100644
+index a8423c3..0000000
+--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
++++ /dev/null
+@@ -1,745 +0,0 @@
+-/*
+- * Copyright 2015 Advanced Micro Devices, Inc.
+- *
+- * Permission is hereby granted, free of charge, to any person obtaining a
+- * copy of this software and associated documentation files (the "Software"),
+- * to deal in the Software without restriction, including without limitation
+- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+- * and/or sell copies of the Software, and to permit persons to whom the
+- * Software is furnished to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice shall be included in
+- * all copies or substantial portions of the Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+- * OTHER DEALINGS IN THE SOFTWARE.
+- *
+- *
+- */
+-#include <linux/kthread.h>
+-#include <linux/wait.h>
+-#include <linux/sched.h>
+-#include <uapi/linux/sched/types.h>
+-#include <drm/drmP.h>
+-#include "gpu_scheduler.h"
+-
+-#include "spsc_queue.h"
+-
+-#define CREATE_TRACE_POINTS
+-#include "gpu_sched_trace.h"
+-
+-#define to_amd_sched_job(sched_job) \
+- container_of((sched_job), struct amd_sched_job, queue_node)
+-
+-static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity);
+-static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
+-static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb);
+-
+-/* Initialize a given run queue struct */
+-static void amd_sched_rq_init(struct amd_sched_rq *rq)
+-{
+- spin_lock_init(&rq->lock);
+- INIT_LIST_HEAD(&rq->entities);
+- rq->current_entity = NULL;
+-}
+-
+-static void amd_sched_rq_add_entity(struct amd_sched_rq *rq,
+- struct amd_sched_entity *entity)
+-{
+- if (!list_empty(&entity->list))
+- return;
+- spin_lock(&rq->lock);
+- list_add_tail(&entity->list, &rq->entities);
+- spin_unlock(&rq->lock);
+-}
+-
+-static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq,
+- struct amd_sched_entity *entity)
+-{
+- if (list_empty(&entity->list))
+- return;
+- spin_lock(&rq->lock);
+- list_del_init(&entity->list);
+- if (rq->current_entity == entity)
+- rq->current_entity = NULL;
+- spin_unlock(&rq->lock);
+-}
+-
+-/**
+- * Select an entity which could provide a job to run
+- *
+- * @rq The run queue to check.
+- *
+- * Try to find a ready entity, returns NULL if none found.
+- */
+-static struct amd_sched_entity *
+-amd_sched_rq_select_entity(struct amd_sched_rq *rq)
+-{
+- struct amd_sched_entity *entity;
+-
+- spin_lock(&rq->lock);
+-
+- entity = rq->current_entity;
+- if (entity) {
+- list_for_each_entry_continue(entity, &rq->entities, list) {
+- if (amd_sched_entity_is_ready(entity)) {
+- rq->current_entity = entity;
+- spin_unlock(&rq->lock);
+- return entity;
+- }
+- }
+- }
+-
+- list_for_each_entry(entity, &rq->entities, list) {
+-
+- if (amd_sched_entity_is_ready(entity)) {
+- rq->current_entity = entity;
+- spin_unlock(&rq->lock);
+- return entity;
+- }
+-
+- if (entity == rq->current_entity)
+- break;
+- }
+-
+- spin_unlock(&rq->lock);
+-
+- return NULL;
+-}
+-
+-/**
+- * Init a context entity used by scheduler when submit to HW ring.
+- *
+- * @sched The pointer to the scheduler
+- * @entity The pointer to a valid amd_sched_entity
+- * @rq The run queue this entity belongs
+- * @kernel If this is an entity for the kernel
+- * @jobs The max number of jobs in the job queue
+- *
+- * return 0 if succeed. negative error code on failure
+-*/
+-int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
+- struct amd_sched_entity *entity,
+- struct amd_sched_rq *rq,
+- uint32_t jobs, atomic_t *guilty)
+-{
+- if (!(sched && entity && rq))
+- return -EINVAL;
+-
+- memset(entity, 0, sizeof(struct amd_sched_entity));
+- INIT_LIST_HEAD(&entity->list);
+- entity->rq = rq;
+- entity->sched = sched;
+- entity->guilty = guilty;
+-
+- spin_lock_init(&entity->rq_lock);
+- spin_lock_init(&entity->queue_lock);
+- spsc_queue_init(&entity->job_queue);
+-
+- atomic_set(&entity->fence_seq, 0);
+- entity->fence_context = dma_fence_context_alloc(2);
+-
+- return 0;
+-}
+-
+-/**
+- * Query if entity is initialized
+- *
+- * @sched Pointer to scheduler instance
+- * @entity The pointer to a valid scheduler entity
+- *
+- * return true if entity is initialized, false otherwise
+-*/
+-static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched,
+- struct amd_sched_entity *entity)
+-{
+- return entity->sched == sched &&
+- entity->rq != NULL;
+-}
+-
+-/**
+- * Check if entity is idle
+- *
+- * @entity The pointer to a valid scheduler entity
+- *
+- * Return true if entity don't has any unscheduled jobs.
+- */
+-static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity)
+-{
+- rmb();
+- if (spsc_queue_peek(&entity->job_queue) == NULL)
+- return true;
+-
+- return false;
+-}
+-
+-/**
+- * Check if entity is ready
+- *
+- * @entity The pointer to a valid scheduler entity
+- *
+- * Return true if entity could provide a job.
+- */
+-static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity)
+-{
+- if (spsc_queue_peek(&entity->job_queue) == NULL)
+- return false;
+-
+- if (ACCESS_ONCE(entity->dependency))
+- return false;
+-
+- return true;
+-}
+-
+-/**
+- * Destroy a context entity
+- *
+- * @sched Pointer to scheduler instance
+- * @entity The pointer to a valid scheduler entity
+- *
+- * Cleanup and free the allocated resources.
+- */
+-void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
+- struct amd_sched_entity *entity)
+-{
+- int r;
+-
+- if (!amd_sched_entity_is_initialized(sched, entity))
+- return;
+- /**
+- * The client will not queue more IBs during this fini, consume existing
+- * queued IBs or discard them on SIGKILL
+- */
+- if ((current->flags & PF_SIGNALED) && current->exit_code == SIGKILL)
+- r = -ERESTARTSYS;
+- else
+- r = wait_event_killable(sched->job_scheduled,
+- amd_sched_entity_is_idle(entity));
+- amd_sched_entity_set_rq(entity, NULL);
+- if (r) {
+- struct amd_sched_job *job;
+-
+- /* Park the kernel for a moment to make sure it isn't processing
+- * our enity.
+- */
+- kthread_park(sched->thread);
+- kthread_unpark(sched->thread);
+- if (entity->dependency) {
+- dma_fence_remove_callback(entity->dependency,
+- &entity->cb);
+- dma_fence_put(entity->dependency);
+- entity->dependency = NULL;
+- }
+-
+- while ((job = to_amd_sched_job(spsc_queue_pop(&entity->job_queue)))) {
+- struct amd_sched_fence *s_fence = job->s_fence;
+-
+- amd_sched_fence_scheduled(s_fence);
+- dma_fence_set_error(&s_fence->finished, -ESRCH);
+- amd_sched_fence_finished(s_fence);
+- WARN_ON(s_fence->parent);
+- dma_fence_put(&s_fence->finished);
+- sched->ops->free_job(job);
+- }
+- }
+-}
+-
+-static void amd_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb)
+-{
+- struct amd_sched_entity *entity =
+- container_of(cb, struct amd_sched_entity, cb);
+- entity->dependency = NULL;
+- dma_fence_put(f);
+- amd_sched_wakeup(entity->sched);
+-}
+-
+-static void amd_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb *cb)
+-{
+- struct amd_sched_entity *entity =
+- container_of(cb, struct amd_sched_entity, cb);
+- entity->dependency = NULL;
+- dma_fence_put(f);
+-}
+-
+-void amd_sched_entity_set_rq(struct amd_sched_entity *entity,
+- struct amd_sched_rq *rq)
+-{
+- if (entity->rq == rq)
+- return;
+-
+- spin_lock(&entity->rq_lock);
+-
+- if (entity->rq)
+- amd_sched_rq_remove_entity(entity->rq, entity);
+-
+- entity->rq = rq;
+- if (rq)
+- amd_sched_rq_add_entity(rq, entity);
+-
+- spin_unlock(&entity->rq_lock);
+-}
+-
+-bool amd_sched_dependency_optimized(struct dma_fence* fence,
+- struct amd_sched_entity *entity)
+-{
+- struct amd_gpu_scheduler *sched = entity->sched;
+- struct amd_sched_fence *s_fence;
+-
+- if (!fence || dma_fence_is_signaled(fence))
+- return false;
+- if (fence->context == entity->fence_context)
+- return true;
+- s_fence = to_amd_sched_fence(fence);
+- if (s_fence && s_fence->sched == sched)
+- return true;
+-
+- return false;
+-}
+-
+-static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
+-{
+- struct amd_gpu_scheduler *sched = entity->sched;
+- struct dma_fence * fence = entity->dependency;
+- struct amd_sched_fence *s_fence;
+-
+- if (fence->context == entity->fence_context) {
+- /* We can ignore fences from ourself */
+- dma_fence_put(entity->dependency);
+- return false;
+- }
+-
+- s_fence = to_amd_sched_fence(fence);
+- if (s_fence && s_fence->sched == sched) {
+-
+- /*
+- * Fence is from the same scheduler, only need to wait for
+- * it to be scheduled
+- */
+- fence = dma_fence_get(&s_fence->scheduled);
+- dma_fence_put(entity->dependency);
+- entity->dependency = fence;
+- if (!dma_fence_add_callback(fence, &entity->cb,
+- amd_sched_entity_clear_dep))
+- return true;
+-
+- /* Ignore it when it is already scheduled */
+- dma_fence_put(fence);
+- return false;
+- }
+-
+- if (!dma_fence_add_callback(entity->dependency, &entity->cb,
+- amd_sched_entity_wakeup))
+- return true;
+-
+- dma_fence_put(entity->dependency);
+- return false;
+-}
+-
+-static struct amd_sched_job *
+-amd_sched_entity_pop_job(struct amd_sched_entity *entity)
+-{
+- struct amd_gpu_scheduler *sched = entity->sched;
+- struct amd_sched_job *sched_job = to_amd_sched_job(
+- spsc_queue_peek(&entity->job_queue));
+-
+- if (!sched_job)
+- return NULL;
+-
+- while ((entity->dependency = sched->ops->dependency(sched_job, entity)))
+- if (amd_sched_entity_add_dependency_cb(entity))
+- return NULL;
+-
+- /* skip jobs from entity that marked guilty */
+- if (entity->guilty && atomic_read(entity->guilty))
+- dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED);
+-
+- spsc_queue_pop(&entity->job_queue);
+- return sched_job;
+-}
+-
+-/**
+- * Submit a job to the job queue
+- *
+- * @sched_job The pointer to job required to submit
+- *
+- * Returns 0 for success, negative error code otherwise.
+- */
+-void amd_sched_entity_push_job(struct amd_sched_job *sched_job,
+- struct amd_sched_entity *entity)
+-{
+- struct amd_gpu_scheduler *sched = sched_job->sched;
+- bool first = false;
+-
+- trace_amd_sched_job(sched_job, entity);
+-
+- spin_lock(&entity->queue_lock);
+- first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
+-
+- spin_unlock(&entity->queue_lock);
+-
+- /* first job wakes up scheduler */
+- if (first) {
+- /* Add the entity to the run queue */
+- spin_lock(&entity->rq_lock);
+- amd_sched_rq_add_entity(entity->rq, entity);
+- spin_unlock(&entity->rq_lock);
+- amd_sched_wakeup(sched);
+- }
+-}
+-
+-/* job_finish is called after hw fence signaled
+- */
+-static void amd_sched_job_finish(struct work_struct *work)
+-{
+- struct amd_sched_job *s_job = container_of(work, struct amd_sched_job,
+- finish_work);
+- struct amd_gpu_scheduler *sched = s_job->sched;
+-
+- /* remove job from ring_mirror_list */
+- spin_lock(&sched->job_list_lock);
+- list_del_init(&s_job->node);
+- if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
+- struct amd_sched_job *next;
+-
+- spin_unlock(&sched->job_list_lock);
+- cancel_delayed_work_sync(&s_job->work_tdr);
+- spin_lock(&sched->job_list_lock);
+-
+- /* queue TDR for next job */
+- next = list_first_entry_or_null(&sched->ring_mirror_list,
+- struct amd_sched_job, node);
+-
+- if (next)
+- schedule_delayed_work(&next->work_tdr, sched->timeout);
+- }
+- spin_unlock(&sched->job_list_lock);
+- dma_fence_put(&s_job->s_fence->finished);
+- sched->ops->free_job(s_job);
+-}
+-
+-static void amd_sched_job_finish_cb(struct dma_fence *f,
+- struct dma_fence_cb *cb)
+-{
+- struct amd_sched_job *job = container_of(cb, struct amd_sched_job,
+- finish_cb);
+- schedule_work(&job->finish_work);
+-}
+-
+-static void amd_sched_job_begin(struct amd_sched_job *s_job)
+-{
+- struct amd_gpu_scheduler *sched = s_job->sched;
+-
+- dma_fence_add_callback(&s_job->s_fence->finished, &s_job->finish_cb,
+- amd_sched_job_finish_cb);
+-
+- spin_lock(&sched->job_list_lock);
+- list_add_tail(&s_job->node, &sched->ring_mirror_list);
+- if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
+- list_first_entry_or_null(&sched->ring_mirror_list,
+- struct amd_sched_job, node) == s_job)
+- schedule_delayed_work(&s_job->work_tdr, sched->timeout);
+- spin_unlock(&sched->job_list_lock);
+-}
+-
+-static void amd_sched_job_timedout(struct work_struct *work)
+-{
+- struct amd_sched_job *job = container_of(work, struct amd_sched_job,
+- work_tdr.work);
+-
+- job->sched->ops->timedout_job(job);
+-}
+-
+-void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched, struct amd_sched_job *bad)
+-{
+- struct amd_sched_job *s_job;
+- struct amd_sched_entity *entity, *tmp;
+- int i;;
+-
+- spin_lock(&sched->job_list_lock);
+- list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) {
+- if (s_job->s_fence->parent &&
+- dma_fence_remove_callback(s_job->s_fence->parent,
+- &s_job->s_fence->cb)) {
+- dma_fence_put(s_job->s_fence->parent);
+- s_job->s_fence->parent = NULL;
+- atomic_dec(&sched->hw_rq_count);
+- }
+- }
+- spin_unlock(&sched->job_list_lock);
+-
+- if (bad && bad->s_priority != AMD_SCHED_PRIORITY_KERNEL) {
+- atomic_inc(&bad->karma);
+- /* don't increase @bad's karma if it's from KERNEL RQ,
+- * becuase sometimes GPU hang would cause kernel jobs (like VM updating jobs)
+- * corrupt but keep in mind that kernel jobs always considered good.
+- */
+- for (i = AMD_SCHED_PRIORITY_MIN; i < AMD_SCHED_PRIORITY_KERNEL; i++ ) {
+- struct amd_sched_rq *rq = &sched->sched_rq[i];
+-
+- spin_lock(&rq->lock);
+- list_for_each_entry_safe(entity, tmp, &rq->entities, list) {
+- if (bad->s_fence->scheduled.context == entity->fence_context) {
+- if (atomic_read(&bad->karma) > bad->sched->hang_limit)
+- if (entity->guilty)
+- atomic_set(entity->guilty, 1);
+- break;
+- }
+- }
+- spin_unlock(&rq->lock);
+- if (&entity->list != &rq->entities)
+- break;
+- }
+- }
+-}
+-
+-void amd_sched_job_kickout(struct amd_sched_job *s_job)
+-{
+- struct amd_gpu_scheduler *sched = s_job->sched;
+-
+- spin_lock(&sched->job_list_lock);
+- list_del_init(&s_job->node);
+- spin_unlock(&sched->job_list_lock);
+-}
+-
+-void amd_sched_job_recovery(struct amd_gpu_scheduler *sched)
+-{
+- struct amd_sched_job *s_job, *tmp;
+- bool found_guilty = false;
+- int r;
+-
+- spin_lock(&sched->job_list_lock);
+- s_job = list_first_entry_or_null(&sched->ring_mirror_list,
+- struct amd_sched_job, node);
+- if (s_job && sched->timeout != MAX_SCHEDULE_TIMEOUT)
+- schedule_delayed_work(&s_job->work_tdr, sched->timeout);
+-
+- list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
+- struct amd_sched_fence *s_fence = s_job->s_fence;
+- struct dma_fence *fence;
+- uint64_t guilty_context;
+-
+- if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) {
+- found_guilty = true;
+- guilty_context = s_job->s_fence->scheduled.context;
+- }
+-
+- if (found_guilty && s_job->s_fence->scheduled.context == guilty_context)
+- dma_fence_set_error(&s_fence->finished, -ECANCELED);
+-
+- spin_unlock(&sched->job_list_lock);
+- fence = sched->ops->run_job(s_job);
+- atomic_inc(&sched->hw_rq_count);
+- if (fence) {
+- s_fence->parent = dma_fence_get(fence);
+- r = dma_fence_add_callback(fence, &s_fence->cb,
+- amd_sched_process_job);
+- if (r == -ENOENT)
+- amd_sched_process_job(fence, &s_fence->cb);
+- else if (r)
+- DRM_ERROR("fence add callback failed (%d)\n",
+- r);
+- dma_fence_put(fence);
+- } else {
+- amd_sched_process_job(NULL, &s_fence->cb);
+- }
+- spin_lock(&sched->job_list_lock);
+- }
+- spin_unlock(&sched->job_list_lock);
+-}
+-
+-/* init a sched_job with basic field */
+-int amd_sched_job_init(struct amd_sched_job *job,
+- struct amd_gpu_scheduler *sched,
+- struct amd_sched_entity *entity,
+- void *owner)
+-{
+- job->sched = sched;
+- job->s_priority = entity->rq - sched->sched_rq;
+- job->s_fence = amd_sched_fence_create(entity, owner);
+- if (!job->s_fence)
+- return -ENOMEM;
+- job->id = atomic64_inc_return(&sched->job_id_count);
+-
+- INIT_WORK(&job->finish_work, amd_sched_job_finish);
+- INIT_LIST_HEAD(&job->node);
+- INIT_DELAYED_WORK(&job->work_tdr, amd_sched_job_timedout);
+-
+- return 0;
+-}
+-
+-/**
+- * Return ture if we can push more jobs to the hw.
+- */
+-static bool amd_sched_ready(struct amd_gpu_scheduler *sched)
+-{
+- return atomic_read(&sched->hw_rq_count) <
+- sched->hw_submission_limit;
+-}
+-
+-/**
+- * Wake up the scheduler when it is ready
+- */
+-static void amd_sched_wakeup(struct amd_gpu_scheduler *sched)
+-{
+- if (amd_sched_ready(sched))
+- wake_up_interruptible(&sched->wake_up_worker);
+-}
+-
+-/**
+- * Select next entity to process
+-*/
+-static struct amd_sched_entity *
+-amd_sched_select_entity(struct amd_gpu_scheduler *sched)
+-{
+- struct amd_sched_entity *entity;
+- int i;
+-
+- if (!amd_sched_ready(sched))
+- return NULL;
+-
+- /* Kernel run queue has higher priority than normal run queue*/
+- for (i = AMD_SCHED_PRIORITY_MAX - 1; i >= AMD_SCHED_PRIORITY_MIN; i--) {
+- entity = amd_sched_rq_select_entity(&sched->sched_rq[i]);
+- if (entity)
+- break;
+- }
+-
+- return entity;
+-}
+-
+-static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
+-{
+- struct amd_sched_fence *s_fence =
+- container_of(cb, struct amd_sched_fence, cb);
+- struct amd_gpu_scheduler *sched = s_fence->sched;
+-
+- dma_fence_get(&s_fence->finished);
+- atomic_dec(&sched->hw_rq_count);
+- amd_sched_fence_finished(s_fence);
+-
+- trace_amd_sched_process_job(s_fence);
+- dma_fence_put(&s_fence->finished);
+- wake_up_interruptible(&sched->wake_up_worker);
+-}
+-
+-static bool amd_sched_blocked(struct amd_gpu_scheduler *sched)
+-{
+- if (kthread_should_park()) {
+- kthread_parkme();
+- return true;
+- }
+-
+- return false;
+-}
+-
+-static int amd_sched_main(void *param)
+-{
+- struct sched_param sparam = {.sched_priority = 1};
+- struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param;
+- int r;
+-
+- sched_setscheduler(current, SCHED_FIFO, &sparam);
+-
+- while (!kthread_should_stop()) {
+- struct amd_sched_entity *entity = NULL;
+- struct amd_sched_fence *s_fence;
+- struct amd_sched_job *sched_job;
+- struct dma_fence *fence;
+-
+- wait_event_interruptible(sched->wake_up_worker,
+- (!amd_sched_blocked(sched) &&
+- (entity = amd_sched_select_entity(sched))) ||
+- kthread_should_stop());
+-
+- if (!entity)
+- continue;
+-
+- sched_job = amd_sched_entity_pop_job(entity);
+- if (!sched_job)
+- continue;
+-
+- s_fence = sched_job->s_fence;
+-
+- atomic_inc(&sched->hw_rq_count);
+- amd_sched_job_begin(sched_job);
+-
+- fence = sched->ops->run_job(sched_job);
+- amd_sched_fence_scheduled(s_fence);
+-
+- if (fence) {
+- s_fence->parent = dma_fence_get(fence);
+- r = dma_fence_add_callback(fence, &s_fence->cb,
+- amd_sched_process_job);
+- if (r == -ENOENT)
+- amd_sched_process_job(fence, &s_fence->cb);
+- else if (r)
+- DRM_ERROR("fence add callback failed (%d)\n",
+- r);
+- dma_fence_put(fence);
+- } else {
+- amd_sched_process_job(NULL, &s_fence->cb);
+- }
+-
+- wake_up(&sched->job_scheduled);
+- }
+- return 0;
+-}
+-
+-/**
+- * Init a gpu scheduler instance
+- *
+- * @sched The pointer to the scheduler
+- * @ops The backend operations for this scheduler.
+- * @hw_submissions Number of hw submissions to do.
+- * @name Name used for debugging
+- *
+- * Return 0 on success, otherwise error code.
+-*/
+-int amd_sched_init(struct amd_gpu_scheduler *sched,
+- const struct amd_sched_backend_ops *ops,
+- unsigned hw_submission,
+- unsigned hang_limit,
+- long timeout,
+- const char *name)
+-{
+- int i;
+- sched->ops = ops;
+- sched->hw_submission_limit = hw_submission;
+- sched->name = name;
+- sched->timeout = timeout;
+- sched->hang_limit = hang_limit;
+- for (i = AMD_SCHED_PRIORITY_MIN; i < AMD_SCHED_PRIORITY_MAX; i++)
+- amd_sched_rq_init(&sched->sched_rq[i]);
+-
+- init_waitqueue_head(&sched->wake_up_worker);
+- init_waitqueue_head(&sched->job_scheduled);
+- INIT_LIST_HEAD(&sched->ring_mirror_list);
+- spin_lock_init(&sched->job_list_lock);
+- atomic_set(&sched->hw_rq_count, 0);
+- atomic64_set(&sched->job_id_count, 0);
+-
+- /* Each scheduler will run on a seperate kernel thread */
+- sched->thread = kthread_run(amd_sched_main, sched, sched->name);
+- if (IS_ERR(sched->thread)) {
+- DRM_ERROR("Failed to create scheduler for %s.\n", name);
+- return PTR_ERR(sched->thread);
+- }
+-
+- return 0;
+-}
+-
+-/**
+- * Destroy a gpu scheduler
+- *
+- * @sched The pointer to the scheduler
+- */
+-void amd_sched_fini(struct amd_gpu_scheduler *sched)
+-{
+- if (sched->thread)
+- kthread_stop(sched->thread);
+-}
+diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+deleted file mode 100644
+index 13c8e87..0000000
+--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
++++ /dev/null
+@@ -1,185 +0,0 @@
+-/*
+- * Copyright 2015 Advanced Micro Devices, Inc.
+- *
+- * Permission is hereby granted, free of charge, to any person obtaining a
+- * copy of this software and associated documentation files (the "Software"),
+- * to deal in the Software without restriction, including without limitation
+- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+- * and/or sell copies of the Software, and to permit persons to whom the
+- * Software is furnished to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice shall be included in
+- * all copies or substantial portions of the Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+- * OTHER DEALINGS IN THE SOFTWARE.
+- *
+- */
+-
+-#ifndef _GPU_SCHEDULER_H_
+-#define _GPU_SCHEDULER_H_
+-
+-#include <linux/kfifo.h>
+-#include "spsc_queue.h"
+-
+-struct amd_gpu_scheduler;
+-struct amd_sched_rq;
+-
+-enum amd_sched_priority {
+- AMD_SCHED_PRIORITY_MIN,
+- AMD_SCHED_PRIORITY_LOW = AMD_SCHED_PRIORITY_MIN,
+- AMD_SCHED_PRIORITY_NORMAL,
+- AMD_SCHED_PRIORITY_HIGH_SW,
+- AMD_SCHED_PRIORITY_HIGH_HW,
+- AMD_SCHED_PRIORITY_KERNEL,
+- AMD_SCHED_PRIORITY_MAX,
+- AMD_SCHED_PRIORITY_INVALID = -1,
+- AMD_SCHED_PRIORITY_UNSET = -2
+-};
+-
+-
+-/**
+- * A scheduler entity is a wrapper around a job queue or a group
+- * of other entities. Entities take turns emitting jobs from their
+- * job queues to corresponding hardware ring based on scheduling
+- * policy.
+-*/
+-struct amd_sched_entity {
+- struct list_head list;
+- struct amd_sched_rq *rq;
+- spinlock_t rq_lock;
+- struct amd_gpu_scheduler *sched;
+-
+- spinlock_t queue_lock;
+- struct spsc_queue job_queue;
+-
+- atomic_t fence_seq;
+- uint64_t fence_context;
+-
+- struct dma_fence *dependency;
+- struct dma_fence_cb cb;
+- atomic_t *guilty; /* points to ctx's guilty */
+-};
+-
+-/**
+- * Run queue is a set of entities scheduling command submissions for
+- * one specific ring. It implements the scheduling policy that selects
+- * the next entity to emit commands from.
+-*/
+-struct amd_sched_rq {
+- spinlock_t lock;
+- struct list_head entities;
+- struct amd_sched_entity *current_entity;
+-};
+-
+-struct amd_sched_fence {
+- struct dma_fence scheduled;
+- struct dma_fence finished;
+- struct dma_fence_cb cb;
+- struct dma_fence *parent;
+- struct amd_gpu_scheduler *sched;
+- spinlock_t lock;
+- void *owner;
+-};
+-
+-struct amd_sched_job {
+- struct spsc_node queue_node;
+- struct amd_gpu_scheduler *sched;
+- struct amd_sched_fence *s_fence;
+- struct dma_fence_cb finish_cb;
+- struct work_struct finish_work;
+- struct list_head node;
+- struct delayed_work work_tdr;
+- uint64_t id;
+- atomic_t karma;
+- enum amd_sched_priority s_priority;
+-};
+-
+-extern const struct dma_fence_ops amd_sched_fence_ops_scheduled;
+-extern const struct dma_fence_ops amd_sched_fence_ops_finished;
+-static inline struct amd_sched_fence *to_amd_sched_fence(struct dma_fence *f)
+-{
+- if (f->ops == &amd_sched_fence_ops_scheduled)
+- return container_of(f, struct amd_sched_fence, scheduled);
+-
+- if (f->ops == &amd_sched_fence_ops_finished)
+- return container_of(f, struct amd_sched_fence, finished);
+-
+- return NULL;
+-}
+-
+-static inline bool amd_sched_invalidate_job(struct amd_sched_job *s_job, int threshold)
+-{
+- return (s_job && atomic_inc_return(&s_job->karma) > threshold);
+-}
+-
+-/**
+- * Define the backend operations called by the scheduler,
+- * these functions should be implemented in driver side
+-*/
+-struct amd_sched_backend_ops {
+- struct dma_fence *(*dependency)(struct amd_sched_job *sched_job,
+- struct amd_sched_entity *s_entity);
+- struct dma_fence *(*run_job)(struct amd_sched_job *sched_job);
+- void (*timedout_job)(struct amd_sched_job *sched_job);
+- void (*free_job)(struct amd_sched_job *sched_job);
+-};
+-
+-/**
+- * One scheduler is implemented for each hardware ring
+-*/
+-struct amd_gpu_scheduler {
+- const struct amd_sched_backend_ops *ops;
+- uint32_t hw_submission_limit;
+- long timeout;
+- const char *name;
+- struct amd_sched_rq sched_rq[AMD_SCHED_PRIORITY_MAX];
+- wait_queue_head_t wake_up_worker;
+- wait_queue_head_t job_scheduled;
+- atomic_t hw_rq_count;
+- atomic64_t job_id_count;
+- struct task_struct *thread;
+- struct list_head ring_mirror_list;
+- spinlock_t job_list_lock;
+- int hang_limit;
+-};
+-
+-int amd_sched_init(struct amd_gpu_scheduler *sched,
+- const struct amd_sched_backend_ops *ops,
+- uint32_t hw_submission, unsigned hang_limit, long timeout, const char *name);
+-void amd_sched_fini(struct amd_gpu_scheduler *sched);
+-
+-int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
+- struct amd_sched_entity *entity,
+- struct amd_sched_rq *rq,
+- uint32_t jobs, atomic_t* guilty);
+-void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
+- struct amd_sched_entity *entity);
+-void amd_sched_entity_push_job(struct amd_sched_job *sched_job,
+- struct amd_sched_entity *entity);
+-void amd_sched_entity_set_rq(struct amd_sched_entity *entity,
+- struct amd_sched_rq *rq);
+-
+-int amd_sched_fence_slab_init(void);
+-void amd_sched_fence_slab_fini(void);
+-
+-struct amd_sched_fence *amd_sched_fence_create(
+- struct amd_sched_entity *s_entity, void *owner);
+-void amd_sched_fence_scheduled(struct amd_sched_fence *fence);
+-void amd_sched_fence_finished(struct amd_sched_fence *fence);
+-int amd_sched_job_init(struct amd_sched_job *job,
+- struct amd_gpu_scheduler *sched,
+- struct amd_sched_entity *entity,
+- void *owner);
+-void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched, struct amd_sched_job *job);
+-void amd_sched_job_recovery(struct amd_gpu_scheduler *sched);
+-bool amd_sched_dependency_optimized(struct dma_fence* fence,
+- struct amd_sched_entity *entity);
+-void amd_sched_job_kickout(struct amd_sched_job *s_job);
+-
+-#endif
+diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c
+deleted file mode 100644
+index 33f54d0..0000000
+--- a/drivers/gpu/drm/amd/scheduler/sched_fence.c
++++ /dev/null
+@@ -1,173 +0,0 @@
+-/*
+- * Copyright 2015 Advanced Micro Devices, Inc.
+- *
+- * Permission is hereby granted, free of charge, to any person obtaining a
+- * copy of this software and associated documentation files (the "Software"),
+- * to deal in the Software without restriction, including without limitation
+- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+- * and/or sell copies of the Software, and to permit persons to whom the
+- * Software is furnished to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice shall be included in
+- * all copies or substantial portions of the Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+- * OTHER DEALINGS IN THE SOFTWARE.
+- *
+- *
+- */
+-#include <linux/kthread.h>
+-#include <linux/wait.h>
+-#include <linux/sched.h>
+-#include <drm/drmP.h>
+-#include "gpu_scheduler.h"
+-
+-static struct kmem_cache *sched_fence_slab;
+-
+-int amd_sched_fence_slab_init(void)
+-{
+- sched_fence_slab = kmem_cache_create(
+- "amd_sched_fence", sizeof(struct amd_sched_fence), 0,
+- SLAB_HWCACHE_ALIGN, NULL);
+- if (!sched_fence_slab)
+- return -ENOMEM;
+-
+- return 0;
+-}
+-
+-void amd_sched_fence_slab_fini(void)
+-{
+- rcu_barrier();
+- kmem_cache_destroy(sched_fence_slab);
+-}
+-
+-struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *entity,
+- void *owner)
+-{
+- struct amd_sched_fence *fence = NULL;
+- unsigned seq;
+-
+- fence = kmem_cache_zalloc(sched_fence_slab, GFP_KERNEL);
+- if (fence == NULL)
+- return NULL;
+-
+- fence->owner = owner;
+- fence->sched = entity->sched;
+- spin_lock_init(&fence->lock);
+-
+- seq = atomic_inc_return(&entity->fence_seq);
+- dma_fence_init(&fence->scheduled, &amd_sched_fence_ops_scheduled,
+- &fence->lock, entity->fence_context, seq);
+- dma_fence_init(&fence->finished, &amd_sched_fence_ops_finished,
+- &fence->lock, entity->fence_context + 1, seq);
+-
+- return fence;
+-}
+-
+-void amd_sched_fence_scheduled(struct amd_sched_fence *fence)
+-{
+- int ret = dma_fence_signal(&fence->scheduled);
+-
+- if (!ret)
+- DMA_FENCE_TRACE(&fence->scheduled,
+- "signaled from irq context\n");
+- else
+- DMA_FENCE_TRACE(&fence->scheduled,
+- "was already signaled\n");
+-}
+-
+-void amd_sched_fence_finished(struct amd_sched_fence *fence)
+-{
+- int ret = dma_fence_signal(&fence->finished);
+-
+- if (!ret)
+- DMA_FENCE_TRACE(&fence->finished,
+- "signaled from irq context\n");
+- else
+- DMA_FENCE_TRACE(&fence->finished,
+- "was already signaled\n");
+-}
+-
+-static const char *amd_sched_fence_get_driver_name(struct dma_fence *fence)
+-{
+- return "amd_sched";
+-}
+-
+-static const char *amd_sched_fence_get_timeline_name(struct dma_fence *f)
+-{
+- struct amd_sched_fence *fence = to_amd_sched_fence(f);
+- return (const char *)fence->sched->name;
+-}
+-
+-static bool amd_sched_fence_enable_signaling(struct dma_fence *f)
+-{
+- return true;
+-}
+-
+-/**
+- * amd_sched_fence_free - free up the fence memory
+- *
+- * @rcu: RCU callback head
+- *
+- * Free up the fence memory after the RCU grace period.
+- */
+-static void amd_sched_fence_free(struct rcu_head *rcu)
+-{
+- struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
+- struct amd_sched_fence *fence = to_amd_sched_fence(f);
+-
+- dma_fence_put(fence->parent);
+- kmem_cache_free(sched_fence_slab, fence);
+-}
+-
+-/**
+- * amd_sched_fence_release_scheduled - callback that fence can be freed
+- *
+- * @fence: fence
+- *
+- * This function is called when the reference count becomes zero.
+- * It just RCU schedules freeing up the fence.
+- */
+-static void amd_sched_fence_release_scheduled(struct dma_fence *f)
+-{
+- struct amd_sched_fence *fence = to_amd_sched_fence(f);
+-
+- call_rcu(&fence->finished.rcu, amd_sched_fence_free);
+-}
+-
+-/**
+- * amd_sched_fence_release_finished - drop extra reference
+- *
+- * @f: fence
+- *
+- * Drop the extra reference from the scheduled fence to the base fence.
+- */
+-static void amd_sched_fence_release_finished(struct dma_fence *f)
+-{
+- struct amd_sched_fence *fence = to_amd_sched_fence(f);
+-
+- dma_fence_put(&fence->scheduled);
+-}
+-
+-const struct dma_fence_ops amd_sched_fence_ops_scheduled = {
+- .get_driver_name = amd_sched_fence_get_driver_name,
+- .get_timeline_name = amd_sched_fence_get_timeline_name,
+- .enable_signaling = amd_sched_fence_enable_signaling,
+- .signaled = NULL,
+- .wait = dma_fence_default_wait,
+- .release = amd_sched_fence_release_scheduled,
+-};
+-
+-const struct dma_fence_ops amd_sched_fence_ops_finished = {
+- .get_driver_name = amd_sched_fence_get_driver_name,
+- .get_timeline_name = amd_sched_fence_get_timeline_name,
+- .enable_signaling = amd_sched_fence_enable_signaling,
+- .signaled = NULL,
+- .wait = dma_fence_default_wait,
+- .release = amd_sched_fence_release_finished,
+-};
+diff --git a/drivers/gpu/drm/amd/scheduler/spsc_queue.h b/drivers/gpu/drm/amd/scheduler/spsc_queue.h
+deleted file mode 100644
+index 5902f35..0000000
+--- a/drivers/gpu/drm/amd/scheduler/spsc_queue.h
++++ /dev/null
+@@ -1,121 +0,0 @@
+-/*
+- * Copyright 2017 Advanced Micro Devices, Inc.
+- *
+- * Permission is hereby granted, free of charge, to any person obtaining a
+- * copy of this software and associated documentation files (the "Software"),
+- * to deal in the Software without restriction, including without limitation
+- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+- * and/or sell copies of the Software, and to permit persons to whom the
+- * Software is furnished to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice shall be included in
+- * all copies or substantial portions of the Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+- * OTHER DEALINGS IN THE SOFTWARE.
+- *
+- */
+-
+-#ifndef AMD_SCHEDULER_SPSC_QUEUE_H_
+-#define AMD_SCHEDULER_SPSC_QUEUE_H_
+-
+-#include <linux/atomic.h>
+-
+-/** SPSC lockless queue */
+-
+-struct spsc_node {
+-
+- /* Stores spsc_node* */
+- struct spsc_node *next;
+-};
+-
+-struct spsc_queue {
+-
+- struct spsc_node *head;
+-
+- /* atomic pointer to struct spsc_node* */
+- atomic_long_t tail;
+-
+- atomic_t job_count;
+-};
+-
+-static inline void spsc_queue_init(struct spsc_queue *queue)
+-{
+- queue->head = NULL;
+- atomic_long_set(&queue->tail, (long)&queue->head);
+- atomic_set(&queue->job_count, 0);
+-}
+-
+-static inline struct spsc_node *spsc_queue_peek(struct spsc_queue *queue)
+-{
+- return queue->head;
+-}
+-
+-static inline int spsc_queue_count(struct spsc_queue *queue)
+-{
+- return atomic_read(&queue->job_count);
+-}
+-
+-static inline bool spsc_queue_push(struct spsc_queue *queue, struct spsc_node *node)
+-{
+- struct spsc_node **tail;
+-
+- node->next = NULL;
+-
+- preempt_disable();
+-
+- tail = (struct spsc_node **)atomic_long_xchg(&queue->tail, (long)&node->next);
+- WRITE_ONCE(*tail, node);
+- atomic_inc(&queue->job_count);
+-
+- /*
+- * In case of first element verify new node will be visible to the consumer
+- * thread when we ping the kernel thread that there is new work to do.
+- */
+- smp_wmb();
+-
+- preempt_enable();
+-
+- return tail == &queue->head;
+-}
+-
+-
+-static inline struct spsc_node *spsc_queue_pop(struct spsc_queue *queue)
+-{
+- struct spsc_node *next, *node;
+-
+- /* Verify reading from memory and not the cache */
+- smp_rmb();
+-
+- node = READ_ONCE(queue->head);
+-
+- if (!node)
+- return NULL;
+-
+- next = READ_ONCE(node->next);
+- WRITE_ONCE(queue->head, next);
+-
+- if (unlikely(!next)) {
+- /* slowpath for the last element in the queue */
+-
+- if (atomic_long_cmpxchg(&queue->tail,
+- (long)&node->next, (long) &queue->head) != (long)&node->next) {
+- /* Updating tail failed wait for new next to appear */
+- do {
+- smp_rmb();
+- } while (unlikely(!(queue->head = READ_ONCE(node->next))));
+- }
+- }
+-
+- atomic_dec(&queue->job_count);
+- return node;
+-}
+-
+-
+-
+-#endif /* AMD_SCHEDULER_SPSC_QUEUE_H_ */
+diff --git a/drivers/gpu/drm/scheduler/Makefile b/drivers/gpu/drm/scheduler/Makefile
+new file mode 100644
+index 0000000..ed87791
+--- /dev/null
++++ b/drivers/gpu/drm/scheduler/Makefile
+@@ -0,0 +1,4 @@
++ccflags-y := -Iinclude/drm
++gpu-sched-y := gpu_scheduler.o sched_fence.o
++
++obj-$(CONFIG_DRM_SCHED) += gpu-sched.o
+diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
+new file mode 100644
+index 0000000..2c18996
+--- /dev/null
++++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
+@@ -0,0 +1,744 @@
++/*
++ * Copyright 2015 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#include <linux/kthread.h>
++#include <linux/wait.h>
++#include <linux/sched.h>
++#include <uapi/linux/sched/types.h>
++#include <drm/drmP.h>
++#include <drm/gpu_scheduler.h>
++#include <drm/spsc_queue.h>
++
++#define CREATE_TRACE_POINTS
++#include <drm/gpu_scheduler_trace.h>
++
++#define to_drm_sched_job(sched_job) \
++ container_of((sched_job), struct drm_sched_job, queue_node)
++
++static bool drm_sched_entity_is_ready(struct drm_sched_entity *entity);
++static void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
++static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb);
++
++/* Initialize a given run queue struct */
++static void drm_sched_rq_init(struct drm_sched_rq *rq)
++{
++ spin_lock_init(&rq->lock);
++ INIT_LIST_HEAD(&rq->entities);
++ rq->current_entity = NULL;
++}
++
++static void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
++ struct drm_sched_entity *entity)
++{
++ if (!list_empty(&entity->list))
++ return;
++ spin_lock(&rq->lock);
++ list_add_tail(&entity->list, &rq->entities);
++ spin_unlock(&rq->lock);
++}
++
++static void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
++ struct drm_sched_entity *entity)
++{
++ if (list_empty(&entity->list))
++ return;
++ spin_lock(&rq->lock);
++ list_del_init(&entity->list);
++ if (rq->current_entity == entity)
++ rq->current_entity = NULL;
++ spin_unlock(&rq->lock);
++}
++
++/**
++ * Select an entity which could provide a job to run
++ *
++ * @rq The run queue to check.
++ *
++ * Try to find a ready entity, returns NULL if none found.
++ */
++static struct drm_sched_entity *
++drm_sched_rq_select_entity(struct drm_sched_rq *rq)
++{
++ struct drm_sched_entity *entity;
++
++ spin_lock(&rq->lock);
++
++ entity = rq->current_entity;
++ if (entity) {
++ list_for_each_entry_continue(entity, &rq->entities, list) {
++ if (drm_sched_entity_is_ready(entity)) {
++ rq->current_entity = entity;
++ spin_unlock(&rq->lock);
++ return entity;
++ }
++ }
++ }
++
++ list_for_each_entry(entity, &rq->entities, list) {
++
++ if (drm_sched_entity_is_ready(entity)) {
++ rq->current_entity = entity;
++ spin_unlock(&rq->lock);
++ return entity;
++ }
++
++ if (entity == rq->current_entity)
++ break;
++ }
++
++ spin_unlock(&rq->lock);
++
++ return NULL;
++}
++
++/**
++ * Init a context entity used by scheduler when submit to HW ring.
++ *
++ * @sched The pointer to the scheduler
++ * @entity The pointer to a valid drm_sched_entity
++ * @rq The run queue this entity belongs
++ * @kernel If this is an entity for the kernel
++ * @jobs The max number of jobs in the job queue
++ *
++ * return 0 if succeed. negative error code on failure
++*/
++int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
++ struct drm_sched_entity *entity,
++ struct drm_sched_rq *rq,
++ uint32_t jobs, atomic_t *guilty)
++{
++ if (!(sched && entity && rq))
++ return -EINVAL;
++
++ memset(entity, 0, sizeof(struct drm_sched_entity));
++ INIT_LIST_HEAD(&entity->list);
++ entity->rq = rq;
++ entity->sched = sched;
++ entity->guilty = guilty;
++
++ spin_lock_init(&entity->rq_lock);
++ spin_lock_init(&entity->queue_lock);
++ spsc_queue_init(&entity->job_queue);
++
++ atomic_set(&entity->fence_seq, 0);
++ entity->fence_context = dma_fence_context_alloc(2);
++
++ return 0;
++}
++EXPORT_SYMBOL(drm_sched_entity_init);
++
++/**
++ * Query if entity is initialized
++ *
++ * @sched Pointer to scheduler instance
++ * @entity The pointer to a valid scheduler entity
++ *
++ * return true if entity is initialized, false otherwise
++*/
++static bool drm_sched_entity_is_initialized(struct drm_gpu_scheduler *sched,
++ struct drm_sched_entity *entity)
++{
++ return entity->sched == sched &&
++ entity->rq != NULL;
++}
++
++/**
++ * Check if entity is idle
++ *
++ * @entity The pointer to a valid scheduler entity
++ *
++ * Return true if entity don't has any unscheduled jobs.
++ */
++static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
++{
++ rmb();
++ if (spsc_queue_peek(&entity->job_queue) == NULL)
++ return true;
++
++ return false;
++}
++
++/**
++ * Check if entity is ready
++ *
++ * @entity The pointer to a valid scheduler entity
++ *
++ * Return true if entity could provide a job.
++ */
++static bool drm_sched_entity_is_ready(struct drm_sched_entity *entity)
++{
++ if (spsc_queue_peek(&entity->job_queue) == NULL)
++ return false;
++
++ if (READ_ONCE(entity->dependency))
++ return false;
++
++ return true;
++}
++
++/**
++ * Destroy a context entity
++ *
++ * @sched Pointer to scheduler instance
++ * @entity The pointer to a valid scheduler entity
++ *
++ * Cleanup and free the allocated resources.
++ */
++void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
++ struct drm_sched_entity *entity)
++{
++ int r;
++
++ if (!drm_sched_entity_is_initialized(sched, entity))
++ return;
++ /**
++ * The client will not queue more IBs during this fini, consume existing
++ * queued IBs or discard them on SIGKILL
++ */
++ if ((current->flags & PF_SIGNALED) && current->exit_code == SIGKILL)
++ r = -ERESTARTSYS;
++ else
++ r = wait_event_killable(sched->job_scheduled,
++ drm_sched_entity_is_idle(entity));
++ drm_sched_entity_set_rq(entity, NULL);
++ if (r) {
++ struct drm_sched_job *job;
++
++ /* Park the kernel for a moment to make sure it isn't processing
++ * our enity.
++ */
++ kthread_park(sched->thread);
++ kthread_unpark(sched->thread);
++ if (entity->dependency) {
++ dma_fence_remove_callback(entity->dependency,
++ &entity->cb);
++ dma_fence_put(entity->dependency);
++ entity->dependency = NULL;
++ }
++
++ while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) {
++ struct drm_sched_fence *s_fence = job->s_fence;
++ drm_sched_fence_scheduled(s_fence);
++ dma_fence_set_error(&s_fence->finished, -ESRCH);
++ drm_sched_fence_finished(s_fence);
++ WARN_ON(s_fence->parent);
++ dma_fence_put(&s_fence->finished);
++ sched->ops->free_job(job);
++ }
++ }
++}
++EXPORT_SYMBOL(drm_sched_entity_fini);
++
++static void drm_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb)
++{
++ struct drm_sched_entity *entity =
++ container_of(cb, struct drm_sched_entity, cb);
++ entity->dependency = NULL;
++ dma_fence_put(f);
++ drm_sched_wakeup(entity->sched);
++}
++
++static void drm_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb *cb)
++{
++ struct drm_sched_entity *entity =
++ container_of(cb, struct drm_sched_entity, cb);
++ entity->dependency = NULL;
++ dma_fence_put(f);
++}
++
++void drm_sched_entity_set_rq(struct drm_sched_entity *entity,
++ struct drm_sched_rq *rq)
++{
++ if (entity->rq == rq)
++ return;
++
++ spin_lock(&entity->rq_lock);
++
++ if (entity->rq)
++ drm_sched_rq_remove_entity(entity->rq, entity);
++
++ entity->rq = rq;
++ if (rq)
++ drm_sched_rq_add_entity(rq, entity);
++
++ spin_unlock(&entity->rq_lock);
++}
++EXPORT_SYMBOL(drm_sched_entity_set_rq);
++
++bool drm_sched_dependency_optimized(struct dma_fence* fence,
++ struct drm_sched_entity *entity)
++{
++ struct drm_gpu_scheduler *sched = entity->sched;
++ struct drm_sched_fence *s_fence;
++
++ if (!fence || dma_fence_is_signaled(fence))
++ return false;
++ if (fence->context == entity->fence_context)
++ return true;
++ s_fence = to_drm_sched_fence(fence);
++ if (s_fence && s_fence->sched == sched)
++ return true;
++
++ return false;
++}
++EXPORT_SYMBOL(drm_sched_dependency_optimized);
++
++static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
++{
++ struct drm_gpu_scheduler *sched = entity->sched;
++ struct dma_fence * fence = entity->dependency;
++ struct drm_sched_fence *s_fence;
++
++ if (fence->context == entity->fence_context) {
++ /* We can ignore fences from ourself */
++ dma_fence_put(entity->dependency);
++ return false;
++ }
++
++ s_fence = to_drm_sched_fence(fence);
++ if (s_fence && s_fence->sched == sched) {
++
++ /*
++ * Fence is from the same scheduler, only need to wait for
++ * it to be scheduled
++ */
++ fence = dma_fence_get(&s_fence->scheduled);
++ dma_fence_put(entity->dependency);
++ entity->dependency = fence;
++ if (!dma_fence_add_callback(fence, &entity->cb,
++ drm_sched_entity_clear_dep))
++ return true;
++
++ /* Ignore it when it is already scheduled */
++ dma_fence_put(fence);
++ return false;
++ }
++
++ if (!dma_fence_add_callback(entity->dependency, &entity->cb,
++ drm_sched_entity_wakeup))
++ return true;
++
++ dma_fence_put(entity->dependency);
++ return false;
++}
++
++static struct drm_sched_job *
++drm_sched_entity_pop_job(struct drm_sched_entity *entity)
++{
++ struct drm_gpu_scheduler *sched = entity->sched;
++ struct drm_sched_job *sched_job = to_drm_sched_job(
++ spsc_queue_peek(&entity->job_queue));
++
++ if (!sched_job)
++ return NULL;
++
++ while ((entity->dependency = sched->ops->dependency(sched_job, entity)))
++ if (drm_sched_entity_add_dependency_cb(entity))
++ return NULL;
++
++ /* skip jobs from entity that marked guilty */
++ if (entity->guilty && atomic_read(entity->guilty))
++ dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED);
++
++ spsc_queue_pop(&entity->job_queue);
++ return sched_job;
++}
++
++/**
++ * Submit a job to the job queue
++ *
++ * @sched_job The pointer to job required to submit
++ *
++ * Returns 0 for success, negative error code otherwise.
++ */
++void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
++ struct drm_sched_entity *entity)
++{
++ struct drm_gpu_scheduler *sched = sched_job->sched;
++ bool first = false;
++
++ trace_drm_sched_job(sched_job, entity);
++
++ spin_lock(&entity->queue_lock);
++ first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
++
++ spin_unlock(&entity->queue_lock);
++
++ /* first job wakes up scheduler */
++ if (first) {
++ /* Add the entity to the run queue */
++ spin_lock(&entity->rq_lock);
++ drm_sched_rq_add_entity(entity->rq, entity);
++ spin_unlock(&entity->rq_lock);
++ drm_sched_wakeup(sched);
++ }
++}
++EXPORT_SYMBOL(drm_sched_entity_push_job);
++
++/* job_finish is called after hw fence signaled
++ */
++static void drm_sched_job_finish(struct work_struct *work)
++{
++ struct drm_sched_job *s_job = container_of(work, struct drm_sched_job,
++ finish_work);
++ struct drm_gpu_scheduler *sched = s_job->sched;
++
++ /* remove job from ring_mirror_list */
++ spin_lock(&sched->job_list_lock);
++ list_del_init(&s_job->node);
++ if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
++ struct drm_sched_job *next;
++
++ spin_unlock(&sched->job_list_lock);
++ cancel_delayed_work_sync(&s_job->work_tdr);
++ spin_lock(&sched->job_list_lock);
++
++ /* queue TDR for next job */
++ next = list_first_entry_or_null(&sched->ring_mirror_list,
++ struct drm_sched_job, node);
++
++ if (next)
++ schedule_delayed_work(&next->work_tdr, sched->timeout);
++ }
++ spin_unlock(&sched->job_list_lock);
++ dma_fence_put(&s_job->s_fence->finished);
++ sched->ops->free_job(s_job);
++}
++
++static void drm_sched_job_finish_cb(struct dma_fence *f,
++ struct dma_fence_cb *cb)
++{
++ struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
++ finish_cb);
++ schedule_work(&job->finish_work);
++}
++
++static void drm_sched_job_begin(struct drm_sched_job *s_job)
++{
++ struct drm_gpu_scheduler *sched = s_job->sched;
++
++ dma_fence_add_callback(&s_job->s_fence->finished, &s_job->finish_cb,
++ drm_sched_job_finish_cb);
++
++ spin_lock(&sched->job_list_lock);
++ list_add_tail(&s_job->node, &sched->ring_mirror_list);
++ if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
++ list_first_entry_or_null(&sched->ring_mirror_list,
++ struct drm_sched_job, node) == s_job)
++ schedule_delayed_work(&s_job->work_tdr, sched->timeout);
++ spin_unlock(&sched->job_list_lock);
++}
++
++static void drm_sched_job_timedout(struct work_struct *work)
++{
++ struct drm_sched_job *job = container_of(work, struct drm_sched_job,
++ work_tdr.work);
++
++ job->sched->ops->timedout_job(job);
++}
++
++void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
++{
++ struct drm_sched_job *s_job;
++ struct drm_sched_entity *entity, *tmp;
++ int i;;
++
++ spin_lock(&sched->job_list_lock);
++ list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) {
++ if (s_job->s_fence->parent &&
++ dma_fence_remove_callback(s_job->s_fence->parent,
++ &s_job->s_fence->cb)) {
++ dma_fence_put(s_job->s_fence->parent);
++ s_job->s_fence->parent = NULL;
++ atomic_dec(&sched->hw_rq_count);
++ }
++ }
++ spin_unlock(&sched->job_list_lock);
++
++ if (bad && bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) {
++ atomic_inc(&bad->karma);
++ /* don't increase @bad's karma if it's from KERNEL RQ,
++ * becuase sometimes GPU hang would cause kernel jobs (like VM updating jobs)
++ * corrupt but keep in mind that kernel jobs always considered good.
++ */
++ for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_KERNEL; i++ ) {
++ struct drm_sched_rq *rq = &sched->sched_rq[i];
++
++ spin_lock(&rq->lock);
++ list_for_each_entry_safe(entity, tmp, &rq->entities, list) {
++ if (bad->s_fence->scheduled.context == entity->fence_context) {
++ if (atomic_read(&bad->karma) > bad->sched->hang_limit)
++ if (entity->guilty)
++ atomic_set(entity->guilty, 1);
++ break;
++ }
++ }
++ spin_unlock(&rq->lock);
++ if (&entity->list != &rq->entities)
++ break;
++ }
++ }
++}
++EXPORT_SYMBOL(drm_sched_hw_job_reset);
++
++void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
++{
++ struct drm_sched_job *s_job, *tmp;
++ bool found_guilty = false;
++ int r;
++
++ spin_lock(&sched->job_list_lock);
++ s_job = list_first_entry_or_null(&sched->ring_mirror_list,
++ struct drm_sched_job, node);
++ if (s_job && sched->timeout != MAX_SCHEDULE_TIMEOUT)
++ schedule_delayed_work(&s_job->work_tdr, sched->timeout);
++
++ list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
++ struct drm_sched_fence *s_fence = s_job->s_fence;
++ struct dma_fence *fence;
++ uint64_t guilty_context;
++
++ if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) {
++ found_guilty = true;
++ guilty_context = s_job->s_fence->scheduled.context;
++ }
++
++ if (found_guilty && s_job->s_fence->scheduled.context == guilty_context)
++ dma_fence_set_error(&s_fence->finished, -ECANCELED);
++
++ spin_unlock(&sched->job_list_lock);
++ fence = sched->ops->run_job(s_job);
++ atomic_inc(&sched->hw_rq_count);
++ if (fence) {
++ s_fence->parent = dma_fence_get(fence);
++ r = dma_fence_add_callback(fence, &s_fence->cb,
++ drm_sched_process_job);
++ if (r == -ENOENT)
++ drm_sched_process_job(fence, &s_fence->cb);
++ else if (r)
++ DRM_ERROR("fence add callback failed (%d)\n",
++ r);
++ dma_fence_put(fence);
++ } else {
++ drm_sched_process_job(NULL, &s_fence->cb);
++ }
++ spin_lock(&sched->job_list_lock);
++ }
++ spin_unlock(&sched->job_list_lock);
++}
++EXPORT_SYMBOL(drm_sched_job_recovery);
++
++/* init a sched_job with basic field */
++int drm_sched_job_init(struct drm_sched_job *job,
++ struct drm_gpu_scheduler *sched,
++ struct drm_sched_entity *entity,
++ void *owner)
++{
++ job->sched = sched;
++ job->s_priority = entity->rq - sched->sched_rq;
++ job->s_fence = drm_sched_fence_create(entity, owner);
++ if (!job->s_fence)
++ return -ENOMEM;
++ job->id = atomic64_inc_return(&sched->job_id_count);
++
++ INIT_WORK(&job->finish_work, drm_sched_job_finish);
++ INIT_LIST_HEAD(&job->node);
++ INIT_DELAYED_WORK(&job->work_tdr, drm_sched_job_timedout);
++
++ return 0;
++}
++EXPORT_SYMBOL(drm_sched_job_init);
++
++/**
++ * Return ture if we can push more jobs to the hw.
++ */
++static bool drm_sched_ready(struct drm_gpu_scheduler *sched)
++{
++ return atomic_read(&sched->hw_rq_count) <
++ sched->hw_submission_limit;
++}
++
++/**
++ * Wake up the scheduler when it is ready
++ */
++static void drm_sched_wakeup(struct drm_gpu_scheduler *sched)
++{
++ if (drm_sched_ready(sched))
++ wake_up_interruptible(&sched->wake_up_worker);
++}
++
++/**
++ * Select next entity to process
++*/
++static struct drm_sched_entity *
++drm_sched_select_entity(struct drm_gpu_scheduler *sched)
++{
++ struct drm_sched_entity *entity;
++ int i;
++
++ if (!drm_sched_ready(sched))
++ return NULL;
++
++ /* Kernel run queue has higher priority than normal run queue*/
++ for (i = DRM_SCHED_PRIORITY_MAX - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
++ entity = drm_sched_rq_select_entity(&sched->sched_rq[i]);
++ if (entity)
++ break;
++ }
++
++ return entity;
++}
++
++static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
++{
++ struct drm_sched_fence *s_fence =
++ container_of(cb, struct drm_sched_fence, cb);
++ struct drm_gpu_scheduler *sched = s_fence->sched;
++
++ dma_fence_get(&s_fence->finished);
++ atomic_dec(&sched->hw_rq_count);
++ drm_sched_fence_finished(s_fence);
++
++ trace_drm_sched_process_job(s_fence);
++ dma_fence_put(&s_fence->finished);
++ wake_up_interruptible(&sched->wake_up_worker);
++}
++
++static bool drm_sched_blocked(struct drm_gpu_scheduler *sched)
++{
++ if (kthread_should_park()) {
++ kthread_parkme();
++ return true;
++ }
++
++ return false;
++}
++
++static int drm_sched_main(void *param)
++{
++ struct sched_param sparam = {.sched_priority = 1};
++ struct drm_gpu_scheduler *sched = (struct drm_gpu_scheduler *)param;
++ int r;
++
++ sched_setscheduler(current, SCHED_FIFO, &sparam);
++
++ while (!kthread_should_stop()) {
++ struct drm_sched_entity *entity = NULL;
++ struct drm_sched_fence *s_fence;
++ struct drm_sched_job *sched_job;
++ struct dma_fence *fence;
++
++ wait_event_interruptible(sched->wake_up_worker,
++ (!drm_sched_blocked(sched) &&
++ (entity = drm_sched_select_entity(sched))) ||
++ kthread_should_stop());
++
++ if (!entity)
++ continue;
++
++ sched_job = drm_sched_entity_pop_job(entity);
++ if (!sched_job)
++ continue;
++
++ s_fence = sched_job->s_fence;
++
++ atomic_inc(&sched->hw_rq_count);
++ drm_sched_job_begin(sched_job);
++
++ fence = sched->ops->run_job(sched_job);
++ drm_sched_fence_scheduled(s_fence);
++
++ if (fence) {
++ s_fence->parent = dma_fence_get(fence);
++ r = dma_fence_add_callback(fence, &s_fence->cb,
++ drm_sched_process_job);
++ if (r == -ENOENT)
++ drm_sched_process_job(fence, &s_fence->cb);
++ else if (r)
++ DRM_ERROR("fence add callback failed (%d)\n",
++ r);
++ dma_fence_put(fence);
++ } else {
++ drm_sched_process_job(NULL, &s_fence->cb);
++ }
++
++ wake_up(&sched->job_scheduled);
++ }
++ return 0;
++}
++
++/**
++ * Init a gpu scheduler instance
++ *
++ * @sched The pointer to the scheduler
++ * @ops The backend operations for this scheduler.
++ * @hw_submissions Number of hw submissions to do.
++ * @name Name used for debugging
++ *
++ * Return 0 on success, otherwise error code.
++*/
++int drm_sched_init(struct drm_gpu_scheduler *sched,
++ const struct drm_sched_backend_ops *ops,
++ unsigned hw_submission,
++ unsigned hang_limit,
++ long timeout,
++ const char *name)
++{
++ int i;
++ sched->ops = ops;
++ sched->hw_submission_limit = hw_submission;
++ sched->name = name;
++ sched->timeout = timeout;
++ sched->hang_limit = hang_limit;
++ for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_MAX; i++)
++ drm_sched_rq_init(&sched->sched_rq[i]);
++
++ init_waitqueue_head(&sched->wake_up_worker);
++ init_waitqueue_head(&sched->job_scheduled);
++ INIT_LIST_HEAD(&sched->ring_mirror_list);
++ spin_lock_init(&sched->job_list_lock);
++ atomic_set(&sched->hw_rq_count, 0);
++ atomic64_set(&sched->job_id_count, 0);
++
++ /* Each scheduler will run on a seperate kernel thread */
++ sched->thread = kthread_run(drm_sched_main, sched, sched->name);
++ if (IS_ERR(sched->thread)) {
++ DRM_ERROR("Failed to create scheduler for %s.\n", name);
++ return PTR_ERR(sched->thread);
++ }
++
++ return 0;
++}
++EXPORT_SYMBOL(drm_sched_init);
++
++/**
++ * Destroy a gpu scheduler
++ *
++ * @sched The pointer to the scheduler
++ */
++void drm_sched_fini(struct drm_gpu_scheduler *sched)
++{
++ if (sched->thread)
++ kthread_stop(sched->thread);
++}
++EXPORT_SYMBOL(drm_sched_fini);
+diff --git a/drivers/gpu/drm/scheduler/sched_fence.c b/drivers/gpu/drm/scheduler/sched_fence.c
+new file mode 100644
+index 0000000..f6f2955
+--- /dev/null
++++ b/drivers/gpu/drm/scheduler/sched_fence.c
+@@ -0,0 +1,187 @@
++/*
++ * Copyright 2015 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#include <linux/kthread.h>
++#include <linux/wait.h>
++#include <linux/sched.h>
++#include <drm/drmP.h>
++#include <drm/gpu_scheduler.h>
++
++static struct kmem_cache *sched_fence_slab;
++
++int drm_sched_fence_slab_init(void)
++{
++ sched_fence_slab = kmem_cache_create(
++ "drm_sched_fence", sizeof(struct drm_sched_fence), 0,
++ SLAB_HWCACHE_ALIGN, NULL);
++ if (!sched_fence_slab)
++ return -ENOMEM;
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(drm_sched_fence_slab_init);
++
++void drm_sched_fence_slab_fini(void)
++{
++ rcu_barrier();
++ kmem_cache_destroy(sched_fence_slab);
++}
++EXPORT_SYMBOL_GPL(drm_sched_fence_slab_fini);
++
++void drm_sched_fence_scheduled(struct drm_sched_fence *fence)
++{
++ int ret = dma_fence_signal(&fence->scheduled);
++
++ if (!ret)
++ DMA_FENCE_TRACE(&fence->scheduled,
++ "signaled from irq context\n");
++ else
++ DMA_FENCE_TRACE(&fence->scheduled,
++ "was already signaled\n");
++}
++
++void drm_sched_fence_finished(struct drm_sched_fence *fence)
++{
++ int ret = dma_fence_signal(&fence->finished);
++
++ if (!ret)
++ DMA_FENCE_TRACE(&fence->finished,
++ "signaled from irq context\n");
++ else
++ DMA_FENCE_TRACE(&fence->finished,
++ "was already signaled\n");
++}
++
++static const char *drm_sched_fence_get_driver_name(struct dma_fence *fence)
++{
++ return "drm_sched";
++}
++
++static const char *drm_sched_fence_get_timeline_name(struct dma_fence *f)
++{
++ struct drm_sched_fence *fence = to_drm_sched_fence(f);
++ return (const char *)fence->sched->name;
++}
++
++static bool drm_sched_fence_enable_signaling(struct dma_fence *f)
++{
++ return true;
++}
++
++/**
++ * amd_sched_fence_free - free up the fence memory
++ *
++ * @rcu: RCU callback head
++ *
++ * Free up the fence memory after the RCU grace period.
++ */
++static void drm_sched_fence_free(struct rcu_head *rcu)
++{
++ struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
++ struct drm_sched_fence *fence = to_drm_sched_fence(f);
++
++ dma_fence_put(fence->parent);
++ kmem_cache_free(sched_fence_slab, fence);
++}
++
++/**
++ * amd_sched_fence_release_scheduled - callback that fence can be freed
++ *
++ * @fence: fence
++ *
++ * This function is called when the reference count becomes zero.
++ * It just RCU schedules freeing up the fence.
++ */
++static void drm_sched_fence_release_scheduled(struct dma_fence *f)
++{
++ struct drm_sched_fence *fence = to_drm_sched_fence(f);
++
++ call_rcu(&fence->finished.rcu, drm_sched_fence_free);
++}
++
++/**
++ * amd_sched_fence_release_finished - drop extra reference
++ *
++ * @f: fence
++ *
++ * Drop the extra reference from the scheduled fence to the base fence.
++ */
++static void drm_sched_fence_release_finished(struct dma_fence *f)
++{
++ struct drm_sched_fence *fence = to_drm_sched_fence(f);
++
++ dma_fence_put(&fence->scheduled);
++}
++
++const struct dma_fence_ops drm_sched_fence_ops_scheduled = {
++ .get_driver_name = drm_sched_fence_get_driver_name,
++ .get_timeline_name = drm_sched_fence_get_timeline_name,
++ .enable_signaling = drm_sched_fence_enable_signaling,
++ .signaled = NULL,
++ .wait = dma_fence_default_wait,
++ .release = drm_sched_fence_release_scheduled,
++};
++
++const struct dma_fence_ops drm_sched_fence_ops_finished = {
++ .get_driver_name = drm_sched_fence_get_driver_name,
++ .get_timeline_name = drm_sched_fence_get_timeline_name,
++ .enable_signaling = drm_sched_fence_enable_signaling,
++ .signaled = NULL,
++ .wait = dma_fence_default_wait,
++ .release = drm_sched_fence_release_finished,
++};
++
++struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f)
++{
++ if (f->ops == &drm_sched_fence_ops_scheduled)
++ return container_of(f, struct drm_sched_fence, scheduled);
++
++ if (f->ops == &drm_sched_fence_ops_finished)
++ return container_of(f, struct drm_sched_fence, finished);
++
++ return NULL;
++}
++EXPORT_SYMBOL(to_drm_sched_fence);
++
++struct drm_sched_fence *drm_sched_fence_create(struct drm_sched_entity *entity,
++ void *owner)
++{
++ struct drm_sched_fence *fence = NULL;
++ unsigned seq;
++
++ fence = kmem_cache_zalloc(sched_fence_slab, GFP_KERNEL);
++ if (fence == NULL)
++ return NULL;
++
++ fence->owner = owner;
++ fence->sched = entity->sched;
++ spin_lock_init(&fence->lock);
++
++ seq = atomic_inc_return(&entity->fence_seq);
++ dma_fence_init(&fence->scheduled, &drm_sched_fence_ops_scheduled,
++ &fence->lock, entity->fence_context, seq);
++ dma_fence_init(&fence->finished, &drm_sched_fence_ops_finished,
++ &fence->lock, entity->fence_context + 1, seq);
++
++ return fence;
++}
+diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
+new file mode 100644
+index 0000000..d29da4c
+--- /dev/null
++++ b/include/drm/gpu_scheduler.h
+@@ -0,0 +1,176 @@
++/*
++ * Copyright 2015 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#ifndef _DRM_GPU_SCHEDULER_H_
++#define _DRM_GPU_SCHEDULER_H_
++
++#include <drm/spsc_queue.h>
++#include <linux/dma-fence.h>
++
++struct drm_gpu_scheduler;
++struct drm_sched_rq;
++
++enum drm_sched_priority {
++ DRM_SCHED_PRIORITY_MIN,
++ DRM_SCHED_PRIORITY_LOW = DRM_SCHED_PRIORITY_MIN,
++ DRM_SCHED_PRIORITY_NORMAL,
++ DRM_SCHED_PRIORITY_HIGH_SW,
++ DRM_SCHED_PRIORITY_HIGH_HW,
++ DRM_SCHED_PRIORITY_KERNEL,
++ DRM_SCHED_PRIORITY_MAX,
++ DRM_SCHED_PRIORITY_INVALID = -1,
++ DRM_SCHED_PRIORITY_UNSET = -2
++};
++
++/**
++ * A scheduler entity is a wrapper around a job queue or a group
++ * of other entities. Entities take turns emitting jobs from their
++ * job queues to corresponding hardware ring based on scheduling
++ * policy.
++*/
++struct drm_sched_entity {
++ struct list_head list;
++ struct drm_sched_rq *rq;
++ spinlock_t rq_lock;
++ struct drm_gpu_scheduler *sched;
++
++ spinlock_t queue_lock;
++ struct spsc_queue job_queue;
++
++ atomic_t fence_seq;
++ uint64_t fence_context;
++
++ struct dma_fence *dependency;
++ struct dma_fence_cb cb;
++ atomic_t *guilty; /* points to ctx's guilty */
++};
++
++/**
++ * Run queue is a set of entities scheduling command submissions for
++ * one specific ring. It implements the scheduling policy that selects
++ * the next entity to emit commands from.
++*/
++struct drm_sched_rq {
++ spinlock_t lock;
++ struct list_head entities;
++ struct drm_sched_entity *current_entity;
++};
++
++struct drm_sched_fence {
++ struct dma_fence scheduled;
++ struct dma_fence finished;
++ struct dma_fence_cb cb;
++ struct dma_fence *parent;
++ struct drm_gpu_scheduler *sched;
++ spinlock_t lock;
++ void *owner;
++};
++
++struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
++
++struct drm_sched_job {
++ struct spsc_node queue_node;
++ struct drm_gpu_scheduler *sched;
++ struct drm_sched_fence *s_fence;
++ struct dma_fence_cb finish_cb;
++ struct work_struct finish_work;
++ struct list_head node;
++ struct delayed_work work_tdr;
++ uint64_t id;
++ atomic_t karma;
++ enum drm_sched_priority s_priority;
++};
++
++static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
++ int threshold)
++{
++ return (s_job && atomic_inc_return(&s_job->karma) > threshold);
++}
++
++/**
++ * Define the backend operations called by the scheduler,
++ * these functions should be implemented in driver side
++*/
++struct drm_sched_backend_ops {
++ struct dma_fence *(*dependency)(struct drm_sched_job *sched_job,
++ struct drm_sched_entity *s_entity);
++ struct dma_fence *(*run_job)(struct drm_sched_job *sched_job);
++ void (*timedout_job)(struct drm_sched_job *sched_job);
++ void (*free_job)(struct drm_sched_job *sched_job);
++};
++
++/**
++ * One scheduler is implemented for each hardware ring
++*/
++struct drm_gpu_scheduler {
++ const struct drm_sched_backend_ops *ops;
++ uint32_t hw_submission_limit;
++ long timeout;
++ const char *name;
++ struct drm_sched_rq sched_rq[DRM_SCHED_PRIORITY_MAX];
++ wait_queue_head_t wake_up_worker;
++ wait_queue_head_t job_scheduled;
++ atomic_t hw_rq_count;
++ atomic64_t job_id_count;
++ struct task_struct *thread;
++ struct list_head ring_mirror_list;
++ spinlock_t job_list_lock;
++ int hang_limit;
++};
++
++int drm_sched_init(struct drm_gpu_scheduler *sched,
++ const struct drm_sched_backend_ops *ops,
++ uint32_t hw_submission, unsigned hang_limit, long timeout,
++ const char *name);
++void drm_sched_fini(struct drm_gpu_scheduler *sched);
++
++int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
++ struct drm_sched_entity *entity,
++ struct drm_sched_rq *rq,
++ uint32_t jobs, atomic_t *guilty);
++void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
++ struct drm_sched_entity *entity);
++void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
++ struct drm_sched_entity *entity);
++void drm_sched_entity_set_rq(struct drm_sched_entity *entity,
++ struct drm_sched_rq *rq);
++
++int drm_sched_fence_slab_init(void);
++void drm_sched_fence_slab_fini(void);
++
++struct drm_sched_fence *drm_sched_fence_create(
++ struct drm_sched_entity *s_entity, void *owner);
++void drm_sched_fence_scheduled(struct drm_sched_fence *fence);
++void drm_sched_fence_finished(struct drm_sched_fence *fence);
++int drm_sched_job_init(struct drm_sched_job *job,
++ struct drm_gpu_scheduler *sched,
++ struct drm_sched_entity *entity,
++ void *owner);
++void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched,
++ struct drm_sched_job *job);
++void drm_sched_job_recovery(struct drm_gpu_scheduler *sched);
++bool drm_sched_dependency_optimized(struct dma_fence* fence,
++ struct drm_sched_entity *entity);
++void drm_sched_job_kickout(struct drm_sched_job *s_job);
++
++#endif
+diff --git a/include/drm/gpu_scheduler_trace.h b/include/drm/gpu_scheduler_trace.h
+new file mode 100644
+index 0000000..17c2fb7
+--- /dev/null
++++ b/include/drm/gpu_scheduler_trace.h
+@@ -0,0 +1,59 @@
++#if !defined(_GPU_SCHED_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _GPU_SCHED_TRACE_H_
++
++#include <linux/stringify.h>
++#include <linux/types.h>
++#include <linux/tracepoint.h>
++
++#include <drm/drmP.h>
++
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM gpu_scheduler
++#define TRACE_INCLUDE_FILE gpu_scheduler_trace
++
++TRACE_EVENT(drm_sched_job,
++ TP_PROTO(struct drm_sched_job *sched_job, struct drm_sched_entity *entity),
++ TP_ARGS(sched_job, entity),
++ TP_STRUCT__entry(
++ __field(struct drm_sched_entity *, entity)
++ __field(struct dma_fence *, fence)
++ __field(const char *, name)
++ __field(uint64_t, id)
++ __field(u32, job_count)
++ __field(int, hw_job_count)
++ ),
++
++ TP_fast_assign(
++ __entry->entity = entity;
++ __entry->id = sched_job->id;
++ __entry->fence = &sched_job->s_fence->finished;
++ __entry->name = sched_job->sched->name;
++ __entry->job_count = spsc_queue_count(&entity->job_queue);
++ __entry->hw_job_count = atomic_read(
++ &sched_job->sched->hw_rq_count);
++ ),
++ TP_printk("entity=%p, id=%llu, fence=%p, ring=%s, job count:%u, hw job count:%d",
++ __entry->entity, __entry->id,
++ __entry->fence, __entry->name,
++ __entry->job_count, __entry->hw_job_count)
++);
++
++TRACE_EVENT(drm_sched_process_job,
++ TP_PROTO(struct drm_sched_fence *fence),
++ TP_ARGS(fence),
++ TP_STRUCT__entry(
++ __field(struct dma_fence *, fence)
++ ),
++
++ TP_fast_assign(
++ __entry->fence = &fence->finished;
++ ),
++ TP_printk("fence=%p signaled", __entry->fence)
++);
++
++#endif
++
++/* This part must be outside protection */
++#undef TRACE_INCLUDE_PATH
++#define TRACE_INCLUDE_PATH .
++#include <trace/define_trace.h>
+diff --git a/include/drm/spsc_queue.h b/include/drm/spsc_queue.h
+new file mode 100644
+index 0000000..125f096
+--- /dev/null
++++ b/include/drm/spsc_queue.h
+@@ -0,0 +1,122 @@
++/*
++ * Copyright 2017 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#ifndef DRM_SCHEDULER_SPSC_QUEUE_H_
++#define DRM_SCHEDULER_SPSC_QUEUE_H_
++
++#include <linux/atomic.h>
++#include <linux/preempt.h>
++
++/** SPSC lockless queue */
++
++struct spsc_node {
++
++ /* Stores spsc_node* */
++ struct spsc_node *next;
++};
++
++struct spsc_queue {
++
++ struct spsc_node *head;
++
++ /* atomic pointer to struct spsc_node* */
++ atomic_long_t tail;
++
++ atomic_t job_count;
++};
++
++static inline void spsc_queue_init(struct spsc_queue *queue)
++{
++ queue->head = NULL;
++ atomic_long_set(&queue->tail, (long)&queue->head);
++ atomic_set(&queue->job_count, 0);
++}
++
++static inline struct spsc_node *spsc_queue_peek(struct spsc_queue *queue)
++{
++ return queue->head;
++}
++
++static inline int spsc_queue_count(struct spsc_queue *queue)
++{
++ return atomic_read(&queue->job_count);
++}
++
++static inline bool spsc_queue_push(struct spsc_queue *queue, struct spsc_node *node)
++{
++ struct spsc_node **tail;
++
++ node->next = NULL;
++
++ preempt_disable();
++
++ tail = (struct spsc_node **)atomic_long_xchg(&queue->tail, (long)&node->next);
++ WRITE_ONCE(*tail, node);
++ atomic_inc(&queue->job_count);
++
++ /*
++ * In case of first element verify new node will be visible to the consumer
++ * thread when we ping the kernel thread that there is new work to do.
++ */
++ smp_wmb();
++
++ preempt_enable();
++
++ return tail == &queue->head;
++}
++
++
++static inline struct spsc_node *spsc_queue_pop(struct spsc_queue *queue)
++{
++ struct spsc_node *next, *node;
++
++ /* Verify reading from memory and not the cache */
++ smp_rmb();
++
++ node = READ_ONCE(queue->head);
++
++ if (!node)
++ return NULL;
++
++ next = READ_ONCE(node->next);
++ WRITE_ONCE(queue->head, next);
++
++ if (unlikely(!next)) {
++ /* slowpath for the last element in the queue */
++
++ if (atomic_long_cmpxchg(&queue->tail,
++ (long)&node->next, (long) &queue->head) != (long)&node->next) {
++ /* Updating tail failed wait for new next to appear */
++ do {
++ smp_rmb();
++ } while (unlikely(!(queue->head = READ_ONCE(node->next))));
++ }
++ }
++
++ atomic_dec(&queue->job_count);
++ return node;
++}
++
++
++
++#endif /* DRM_SCHEDULER_SPSC_QUEUE_H_ */
+--
+2.7.4
+