aboutsummaryrefslogtreecommitdiffstats
path: root/common/recipes-kernel/linux/linux-yocto-4.19.8/0688-drm-amdgpu-Retire-amdgpu_ring.ready-flag-v4.patch
diff options
context:
space:
mode:
Diffstat (limited to 'common/recipes-kernel/linux/linux-yocto-4.19.8/0688-drm-amdgpu-Retire-amdgpu_ring.ready-flag-v4.patch')
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.19.8/0688-drm-amdgpu-Retire-amdgpu_ring.ready-flag-v4.patch1038
1 files changed, 1038 insertions, 0 deletions
diff --git a/common/recipes-kernel/linux/linux-yocto-4.19.8/0688-drm-amdgpu-Retire-amdgpu_ring.ready-flag-v4.patch b/common/recipes-kernel/linux/linux-yocto-4.19.8/0688-drm-amdgpu-Retire-amdgpu_ring.ready-flag-v4.patch
new file mode 100644
index 00000000..f5b970a5
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.19.8/0688-drm-amdgpu-Retire-amdgpu_ring.ready-flag-v4.patch
@@ -0,0 +1,1038 @@
+From e3888c0acaf765e4ee5ec3a5cfd4629bcca89791 Mon Sep 17 00:00:00 2001
+From: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Date: Fri, 19 Oct 2018 16:22:48 -0400
+Subject: [PATCH 0688/2940] drm/amdgpu: Retire amdgpu_ring.ready flag v4
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Start using drm_gpu_scheduler.ready isntead.
+
+v3:
+Add helper function to run ring test and set
+sched.ready flag status accordingly, clean explicit
+sched.ready sets from the IP specific files.
+
+v4: Add kerneldoc and rebase.
+
+Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Chaudhary Amit Kumar <Chaudharyamit.Kumar@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 2 +-
+ .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c | 2 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c | 6 ++--
+ drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 18 +++++------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 2 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c | 22 +++++++++++++-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h | 3 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 2 +-
+ drivers/gpu/drm/amd/amdgpu/cik_sdma.c | 12 ++++----
+ drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c | 16 ++++------
+ drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | 16 ++++------
+ drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 29 +++++++-----------
+ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 30 +++++++------------
+ drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 2 +-
+ drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c | 12 ++++----
+ drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c | 12 ++++----
+ drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 24 +++++++--------
+ drivers/gpu/drm/amd/amdgpu/si_dma.c | 10 +++----
+ drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c | 9 ++----
+ drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c | 9 ++----
+ drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c | 16 ++++------
+ drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c | 16 ++++------
+ drivers/gpu/drm/amd/amdgpu/vce_v2_0.c | 6 +---
+ drivers/gpu/drm/amd/amdgpu/vce_v3_0.c | 7 +----
+ drivers/gpu/drm/amd/amdgpu/vce_v4_0.c | 9 ++----
+ drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 24 +++++----------
+ 26 files changed, 129 insertions(+), 187 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+index a410fcf98ee5..9ef877f0c87d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+@@ -166,7 +166,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
+ KGD_MAX_QUEUES);
+
+ /* remove the KIQ bit as well */
+- if (adev->gfx.kiq.ring.ready)
++ if (adev->gfx.kiq.ring.sched.ready)
+ clear_bit(amdgpu_gfx_queue_to_bit(adev,
+ adev->gfx.kiq.ring.me - 1,
+ adev->gfx.kiq.ring.pipe,
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+index 8efedfcb9dfc..818dee5ce500 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+@@ -874,7 +874,7 @@ static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
+ if (adev->in_gpu_reset)
+ return -EIO;
+
+- if (ring->ready)
++ if (ring->sched.ready)
+ return invalidate_tlbs_with_kiq(adev, pasid);
+
+ for (vmid = 0; vmid < 16; vmid++) {
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+index f92bb432e17d..b94065d663d8 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+@@ -147,7 +147,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
+ fence_ctx = 0;
+ }
+
+- if (!ring->ready) {
++ if (!ring->sched.ready) {
+ dev_err(adev->dev, "couldn't schedule ib on ring <%s>\n", ring->name);
+ return -EINVAL;
+ }
+@@ -359,7 +359,7 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
+ struct amdgpu_ring *ring = adev->rings[i];
+ long tmo;
+
+- if (!ring || !ring->ready)
++ if (!ring || !ring->sched.ready)
+ continue;
+
+ /* skip IB tests for KIQ in general for the below reasons:
+@@ -383,7 +383,7 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
+
+ r = amdgpu_ring_test_ib(ring, tmo);
+ if (r) {
+- ring->ready = false;
++ ring->sched.ready = false;
+
+ if (ring == &adev->gfx.gfx_ring[0]) {
+ /* oh, oh, that's really bad */
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+index fb019a872733..090a06341e7d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+@@ -333,7 +333,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
+ case AMDGPU_HW_IP_GFX:
+ type = AMD_IP_BLOCK_TYPE_GFX;
+ for (i = 0; i < adev->gfx.num_gfx_rings; i++)
+- if (adev->gfx.gfx_ring[i].ready)
++ if (adev->gfx.gfx_ring[i].sched.ready)
+ ++num_rings;
+ ib_start_alignment = 32;
+ ib_size_alignment = 32;
+@@ -341,7 +341,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
+ case AMDGPU_HW_IP_COMPUTE:
+ type = AMD_IP_BLOCK_TYPE_GFX;
+ for (i = 0; i < adev->gfx.num_compute_rings; i++)
+- if (adev->gfx.compute_ring[i].ready)
++ if (adev->gfx.compute_ring[i].sched.ready)
+ ++num_rings;
+ ib_start_alignment = 32;
+ ib_size_alignment = 32;
+@@ -349,7 +349,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
+ case AMDGPU_HW_IP_DMA:
+ type = AMD_IP_BLOCK_TYPE_SDMA;
+ for (i = 0; i < adev->sdma.num_instances; i++)
+- if (adev->sdma.instance[i].ring.ready)
++ if (adev->sdma.instance[i].ring.sched.ready)
+ ++num_rings;
+ ib_start_alignment = 256;
+ ib_size_alignment = 4;
+@@ -360,7 +360,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
+ if (adev->uvd.harvest_config & (1 << i))
+ continue;
+
+- if (adev->uvd.inst[i].ring.ready)
++ if (adev->uvd.inst[i].ring.sched.ready)
+ ++num_rings;
+ }
+ ib_start_alignment = 64;
+@@ -369,7 +369,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
+ case AMDGPU_HW_IP_VCE:
+ type = AMD_IP_BLOCK_TYPE_VCE;
+ for (i = 0; i < adev->vce.num_rings; i++)
+- if (adev->vce.ring[i].ready)
++ if (adev->vce.ring[i].sched.ready)
+ ++num_rings;
+ ib_start_alignment = 4;
+ ib_size_alignment = 1;
+@@ -381,7 +381,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
+ continue;
+
+ for (j = 0; j < adev->uvd.num_enc_rings; j++)
+- if (adev->uvd.inst[i].ring_enc[j].ready)
++ if (adev->uvd.inst[i].ring_enc[j].sched.ready)
+ ++num_rings;
+ }
+ ib_start_alignment = 64;
+@@ -389,7 +389,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
+ break;
+ case AMDGPU_HW_IP_VCN_DEC:
+ type = AMD_IP_BLOCK_TYPE_VCN;
+- if (adev->vcn.ring_dec.ready)
++ if (adev->vcn.ring_dec.sched.ready)
+ ++num_rings;
+ ib_start_alignment = 16;
+ ib_size_alignment = 16;
+@@ -397,14 +397,14 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
+ case AMDGPU_HW_IP_VCN_ENC:
+ type = AMD_IP_BLOCK_TYPE_VCN;
+ for (i = 0; i < adev->vcn.num_enc_rings; i++)
+- if (adev->vcn.ring_enc[i].ready)
++ if (adev->vcn.ring_enc[i].sched.ready)
+ ++num_rings;
+ ib_start_alignment = 64;
+ ib_size_alignment = 1;
+ break;
+ case AMDGPU_HW_IP_VCN_JPEG:
+ type = AMD_IP_BLOCK_TYPE_VCN;
+- if (adev->vcn.ring_jpeg.ready)
++ if (adev->vcn.ring_jpeg.sched.ready)
+ ++num_rings;
+ ib_start_alignment = 16;
+ ib_size_alignment = 16;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+index 59cc678de8c1..7235cd0b0fa9 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+@@ -2129,7 +2129,7 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
+
+ for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
+ struct amdgpu_ring *ring = adev->rings[i];
+- if (ring && ring->ready)
++ if (ring && ring->sched.ready)
+ amdgpu_fence_wait_empty(ring);
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+index b70e85ec147d..3c89c8aa33d8 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+@@ -338,7 +338,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
+ */
+ void amdgpu_ring_fini(struct amdgpu_ring *ring)
+ {
+- ring->ready = false;
++ ring->sched.ready = false;
+
+ /* Not to finish a ring which is not initialized */
+ if (!(ring->adev) || !(ring->adev->rings[ring->idx]))
+@@ -500,3 +500,23 @@ static void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring)
+ debugfs_remove(ring->ent);
+ #endif
+ }
++
++/**
++ * amdgpu_ring_test_helper - tests ring and set sched readiness status
++ *
++ * @ring: ring to try the recovery on
++ *
++ * Tests ring and set sched readiness status
++ *
++ * Returns 0 on success, error on failure.
++ */
++int amdgpu_ring_test_helper(struct amdgpu_ring *ring)
++{
++ int r;
++
++ r = amdgpu_ring_test_ring(ring);
++
++ ring->sched.ready = !r;
++
++ return r;
++}
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+index 4caa301ce454..4cdddbc4491b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+@@ -189,7 +189,6 @@ struct amdgpu_ring {
+ uint64_t gpu_addr;
+ uint64_t ptr_mask;
+ uint32_t buf_mask;
+- bool ready;
+ u32 idx;
+ u32 me;
+ u32 pipe;
+@@ -313,4 +312,6 @@ static inline void amdgpu_ring_write_multiple(struct amdgpu_ring *ring,
+ ring->count_dw -= count_dw;
+ }
+
++int amdgpu_ring_test_helper(struct amdgpu_ring *ring);
++
+ #endif
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+index b9d6a214f651..9f7d91b69ccb 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+@@ -2286,7 +2286,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
+ unsigned i;
+ int r;
+
+- if (direct_submit && !ring->ready) {
++ if (direct_submit && !ring->sched.ready) {
+ DRM_ERROR("Trying to move memory with ring turned off.\n");
+ return -EINVAL;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+index 32eb43d165f2..561406a1cf88 100644
+--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
++++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+@@ -316,8 +316,8 @@ static void cik_sdma_gfx_stop(struct amdgpu_device *adev)
+ WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
+ WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], 0);
+ }
+- sdma0->ready = false;
+- sdma1->ready = false;
++ sdma0->sched.ready = false;
++ sdma1->sched.ready = false;
+ }
+
+ /**
+@@ -494,18 +494,16 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev)
+ /* enable DMA IBs */
+ WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
+
+- ring->ready = true;
++ ring->sched.ready = true;
+ }
+
+ cik_sdma_enable(adev, true);
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ ring = &adev->sdma.instance[i].ring;
+- r = amdgpu_ring_test_ring(ring);
+- if (r) {
+- ring->ready = false;
++ r = amdgpu_ring_test_helper(ring);
++ if (r)
+ return r;
+- }
+
+ if (adev->mman.buffer_funcs_ring == ring)
+ amdgpu_ttm_set_buffer_funcs_status(adev, true);
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+index 622dd70f310e..c8f038136af0 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+@@ -1950,9 +1950,9 @@ static void gfx_v6_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
+ CP_ME_CNTL__CE_HALT_MASK));
+ WREG32(mmSCRATCH_UMSK, 0);
+ for (i = 0; i < adev->gfx.num_gfx_rings; i++)
+- adev->gfx.gfx_ring[i].ready = false;
++ adev->gfx.gfx_ring[i].sched.ready = false;
+ for (i = 0; i < adev->gfx.num_compute_rings; i++)
+- adev->gfx.compute_ring[i].ready = false;
++ adev->gfx.compute_ring[i].sched.ready = false;
+ }
+ udelay(50);
+ }
+@@ -2124,12 +2124,9 @@ static int gfx_v6_0_cp_gfx_resume(struct amdgpu_device *adev)
+
+ /* start the rings */
+ gfx_v6_0_cp_gfx_start(adev);
+- ring->ready = true;
+- r = amdgpu_ring_test_ring(ring);
+- if (r) {
+- ring->ready = false;
++ r = amdgpu_ring_test_helper(ring);
++ if (r)
+ return r;
+- }
+
+ return 0;
+ }
+@@ -2227,14 +2224,11 @@ static int gfx_v6_0_cp_compute_resume(struct amdgpu_device *adev)
+ WREG32(mmCP_RB2_CNTL, tmp);
+ WREG32(mmCP_RB2_BASE, ring->gpu_addr >> 8);
+
+- adev->gfx.compute_ring[0].ready = false;
+- adev->gfx.compute_ring[1].ready = false;
+
+ for (i = 0; i < 2; i++) {
+- r = amdgpu_ring_test_ring(&adev->gfx.compute_ring[i]);
++ r = amdgpu_ring_test_helper(&adev->gfx.compute_ring[i]);
+ if (r)
+ return r;
+- adev->gfx.compute_ring[i].ready = true;
+ }
+
+ return 0;
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+index cfa45d996482..6de6bb18bdfa 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+@@ -2403,7 +2403,7 @@ static void gfx_v7_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
+ } else {
+ WREG32(mmCP_ME_CNTL, (CP_ME_CNTL__ME_HALT_MASK | CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK));
+ for (i = 0; i < adev->gfx.num_gfx_rings; i++)
+- adev->gfx.gfx_ring[i].ready = false;
++ adev->gfx.gfx_ring[i].sched.ready = false;
+ }
+ udelay(50);
+ }
+@@ -2613,12 +2613,9 @@ static int gfx_v7_0_cp_gfx_resume(struct amdgpu_device *adev)
+
+ /* start the ring */
+ gfx_v7_0_cp_gfx_start(adev);
+- ring->ready = true;
+- r = amdgpu_ring_test_ring(ring);
+- if (r) {
+- ring->ready = false;
++ r = amdgpu_ring_test_helper(ring);
++ if (r)
+ return r;
+- }
+
+ return 0;
+ }
+@@ -2675,7 +2672,7 @@ static void gfx_v7_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
+ } else {
+ WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
+ for (i = 0; i < adev->gfx.num_compute_rings; i++)
+- adev->gfx.compute_ring[i].ready = false;
++ adev->gfx.compute_ring[i].sched.ready = false;
+ }
+ udelay(50);
+ }
+@@ -3106,10 +3103,7 @@ static int gfx_v7_0_cp_compute_resume(struct amdgpu_device *adev)
+
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ ring = &adev->gfx.compute_ring[i];
+- ring->ready = true;
+- r = amdgpu_ring_test_ring(ring);
+- if (r)
+- ring->ready = false;
++ amdgpu_ring_test_helper(ring);
+ }
+
+ return 0;
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+index 78f1eb2024ce..1b2a05077f4d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+@@ -1629,7 +1629,7 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
+ return 0;
+
+ /* bail if the compute ring is not ready */
+- if (!ring->ready)
++ if (!ring->sched.ready)
+ return 0;
+
+ tmp = RREG32(mmGB_EDC_MODE);
+@@ -4197,7 +4197,7 @@ static void gfx_v8_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
+ tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 1);
+ tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 1);
+ for (i = 0; i < adev->gfx.num_gfx_rings; i++)
+- adev->gfx.gfx_ring[i].ready = false;
++ adev->gfx.gfx_ring[i].sched.ready = false;
+ }
+ WREG32(mmCP_ME_CNTL, tmp);
+ udelay(50);
+@@ -4379,10 +4379,8 @@ static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev)
+ /* start the ring */
+ amdgpu_ring_clear_ring(ring);
+ gfx_v8_0_cp_gfx_start(adev);
+- ring->ready = true;
+- r = amdgpu_ring_test_ring(ring);
+- if (r)
+- ring->ready = false;
++ ring->sched.ready = true;
++ r = amdgpu_ring_test_helper(ring);
+
+ return r;
+ }
+@@ -4396,8 +4394,8 @@ static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
+ } else {
+ WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
+ for (i = 0; i < adev->gfx.num_compute_rings; i++)
+- adev->gfx.compute_ring[i].ready = false;
+- adev->gfx.kiq.ring.ready = false;
++ adev->gfx.compute_ring[i].sched.ready = false;
++ adev->gfx.kiq.ring.sched.ready = false;
+ }
+ udelay(50);
+ }
+@@ -4473,11 +4471,9 @@ static int gfx_v8_0_kiq_kcq_enable(struct amdgpu_device *adev)
+ amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
+ }
+
+- r = amdgpu_ring_test_ring(kiq_ring);
+- if (r) {
++ r = amdgpu_ring_test_helper(kiq_ring);
++ if (r)
+ DRM_ERROR("KCQ enable failed\n");
+- kiq_ring->ready = false;
+- }
+ return r;
+ }
+
+@@ -4781,7 +4777,7 @@ static int gfx_v8_0_kiq_resume(struct amdgpu_device *adev)
+ amdgpu_bo_kunmap(ring->mqd_obj);
+ ring->mqd_ptr = NULL;
+ amdgpu_bo_unreserve(ring->mqd_obj);
+- ring->ready = true;
++ ring->sched.ready = true;
+ return 0;
+ }
+
+@@ -4818,10 +4814,7 @@ static int gfx_v8_0_kcq_resume(struct amdgpu_device *adev)
+ /* Test KCQs */
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ ring = &adev->gfx.compute_ring[i];
+- ring->ready = true;
+- r = amdgpu_ring_test_ring(ring);
+- if (r)
+- ring->ready = false;
++ r = amdgpu_ring_test_helper(ring);
+ }
+
+ done:
+@@ -4897,7 +4890,7 @@ static int gfx_v8_0_kcq_disable(struct amdgpu_device *adev)
+ amdgpu_ring_write(kiq_ring, 0);
+ amdgpu_ring_write(kiq_ring, 0);
+ }
+- r = amdgpu_ring_test_ring(kiq_ring);
++ r = amdgpu_ring_test_helper(kiq_ring);
+ if (r)
+ DRM_ERROR("KCQ disable failed\n");
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index a19c93d93789..dbae16072ff6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -2555,7 +2555,7 @@ static void gfx_v9_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
+ tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1);
+ if (!enable) {
+ for (i = 0; i < adev->gfx.num_gfx_rings; i++)
+- adev->gfx.gfx_ring[i].ready = false;
++ adev->gfx.gfx_ring[i].sched.ready = false;
+ }
+ WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp);
+ udelay(50);
+@@ -2745,7 +2745,7 @@ static int gfx_v9_0_cp_gfx_resume(struct amdgpu_device *adev)
+
+ /* start the ring */
+ gfx_v9_0_cp_gfx_start(adev);
+- ring->ready = true;
++ ring->sched.ready = true;
+
+ return 0;
+ }
+@@ -2760,8 +2760,8 @@ static void gfx_v9_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
+ WREG32_SOC15(GC, 0, mmCP_MEC_CNTL,
+ (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
+ for (i = 0; i < adev->gfx.num_compute_rings; i++)
+- adev->gfx.compute_ring[i].ready = false;
+- adev->gfx.kiq.ring.ready = false;
++ adev->gfx.compute_ring[i].sched.ready = false;
++ adev->gfx.kiq.ring.sched.ready = false;
+ }
+ udelay(50);
+ }
+@@ -2884,11 +2884,9 @@ static int gfx_v9_0_kiq_kcq_enable(struct amdgpu_device *adev)
+ amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
+ }
+
+- r = amdgpu_ring_test_ring(kiq_ring);
+- if (r) {
++ r = amdgpu_ring_test_helper(kiq_ring);
++ if (r)
+ DRM_ERROR("KCQ enable failed\n");
+- kiq_ring->ready = false;
+- }
+
+ return r;
+ }
+@@ -3267,7 +3265,7 @@ static int gfx_v9_0_kiq_resume(struct amdgpu_device *adev)
+ amdgpu_bo_kunmap(ring->mqd_obj);
+ ring->mqd_ptr = NULL;
+ amdgpu_bo_unreserve(ring->mqd_obj);
+- ring->ready = true;
++ ring->sched.ready = true;
+ return 0;
+ }
+
+@@ -3332,19 +3330,13 @@ static int gfx_v9_0_cp_resume(struct amdgpu_device *adev)
+ return r;
+
+ ring = &adev->gfx.gfx_ring[0];
+- r = amdgpu_ring_test_ring(ring);
+- if (r) {
+- ring->ready = false;
++ r = amdgpu_ring_test_helper(ring);
++ if (r)
+ return r;
+- }
+
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ ring = &adev->gfx.compute_ring[i];
+-
+- ring->ready = true;
+- r = amdgpu_ring_test_ring(ring);
+- if (r)
+- ring->ready = false;
++ amdgpu_ring_test_helper(ring);
+ }
+
+ gfx_v9_0_enable_gui_idle_interrupt(adev, true);
+@@ -3409,7 +3401,7 @@ static int gfx_v9_0_kcq_disable(struct amdgpu_device *adev)
+ amdgpu_ring_write(kiq_ring, 0);
+ amdgpu_ring_write(kiq_ring, 0);
+ }
+- r = amdgpu_ring_test_ring(kiq_ring);
++ r = amdgpu_ring_test_helper(kiq_ring);
+ if (r)
+ DRM_ERROR("KCQ disable failed\n");
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+index f35d7a554ad5..56fd3d4ba8dd 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+@@ -381,7 +381,7 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev,
+ struct amdgpu_vmhub *hub = &adev->vmhub[i];
+ u32 tmp = gmc_v9_0_get_invalidate_req(vmid);
+
+- if (adev->gfx.kiq.ring.ready &&
++ if (adev->gfx.kiq.ring.sched.ready &&
+ (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
+ !adev->in_gpu_reset) {
+ r = amdgpu_kiq_reg_write_reg_wait(adev, hub->vm_inv_eng0_req + eng,
+diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+index bedbd5f296c5..fa2f6bea1d60 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+@@ -349,8 +349,8 @@ static void sdma_v2_4_gfx_stop(struct amdgpu_device *adev)
+ ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
+ WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
+ }
+- sdma0->ready = false;
+- sdma1->ready = false;
++ sdma0->sched.ready = false;
++ sdma1->sched.ready = false;
+ }
+
+ /**
+@@ -471,17 +471,15 @@ static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev)
+ /* enable DMA IBs */
+ WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
+
+- ring->ready = true;
++ ring->sched.ready = true;
+ }
+
+ sdma_v2_4_enable(adev, true);
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ ring = &adev->sdma.instance[i].ring;
+- r = amdgpu_ring_test_ring(ring);
+- if (r) {
+- ring->ready = false;
++ r = amdgpu_ring_test_helper(ring);
++ if (r)
+ return r;
+- }
+
+ if (adev->mman.buffer_funcs_ring == ring)
+ amdgpu_ttm_set_buffer_funcs_status(adev, true);
+diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+index 415968dc6c87..942fe3696ef0 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+@@ -523,8 +523,8 @@ static void sdma_v3_0_gfx_stop(struct amdgpu_device *adev)
+ ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
+ WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
+ }
+- sdma0->ready = false;
+- sdma1->ready = false;
++ sdma0->sched.ready = false;
++ sdma1->sched.ready = false;
+ }
+
+ /**
+@@ -739,7 +739,7 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
+ /* enable DMA IBs */
+ WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
+
+- ring->ready = true;
++ ring->sched.ready = true;
+ }
+
+ /* unhalt the MEs */
+@@ -749,11 +749,9 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ ring = &adev->sdma.instance[i].ring;
+- r = amdgpu_ring_test_ring(ring);
+- if (r) {
+- ring->ready = false;
++ r = amdgpu_ring_test_helper(ring);
++ if (r)
+ return r;
+- }
+
+ if (adev->mman.buffer_funcs_ring == ring)
+ amdgpu_ttm_set_buffer_funcs_status(adev, true);
+diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+index 88d93430dfb1..65312897b8ba 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+@@ -634,8 +634,8 @@ static void sdma_v4_0_gfx_stop(struct amdgpu_device *adev)
+ WREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL, ib_cntl);
+ }
+
+- sdma0->ready = false;
+- sdma1->ready = false;
++ sdma0->sched.ready = false;
++ sdma1->sched.ready = false;
+ }
+
+ /**
+@@ -675,8 +675,8 @@ static void sdma_v4_0_page_stop(struct amdgpu_device *adev)
+ WREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL, ib_cntl);
+ }
+
+- sdma0->ready = false;
+- sdma1->ready = false;
++ sdma0->sched.ready = false;
++ sdma1->sched.ready = false;
+ }
+
+ /**
+@@ -863,7 +863,7 @@ static void sdma_v4_0_gfx_resume(struct amdgpu_device *adev, unsigned int i)
+ /* enable DMA IBs */
+ WREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL, ib_cntl);
+
+- ring->ready = true;
++ ring->sched.ready = true;
+ }
+
+ /**
+@@ -956,7 +956,7 @@ static void sdma_v4_0_page_resume(struct amdgpu_device *adev, unsigned int i)
+ /* enable DMA IBs */
+ WREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL, ib_cntl);
+
+- ring->ready = true;
++ ring->sched.ready = true;
+ }
+
+ static void
+@@ -1144,20 +1144,16 @@ static int sdma_v4_0_start(struct amdgpu_device *adev)
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ ring = &adev->sdma.instance[i].ring;
+
+- r = amdgpu_ring_test_ring(ring);
+- if (r) {
+- ring->ready = false;
++ r = amdgpu_ring_test_helper(ring);
++ if (r)
+ return r;
+- }
+
+ if (adev->sdma.has_page_queue) {
+ struct amdgpu_ring *page = &adev->sdma.instance[i].page;
+
+- r = amdgpu_ring_test_ring(page);
+- if (r) {
+- page->ready = false;
++ r = amdgpu_ring_test_helper(page);
++ if (r)
+ return r;
+- }
+ }
+
+ if (adev->mman.buffer_funcs_ring == ring)
+diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c
+index d9b27d7017dd..05ce1ca4c789 100644
+--- a/drivers/gpu/drm/amd/amdgpu/si_dma.c
++++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c
+@@ -122,7 +122,7 @@ static void si_dma_stop(struct amdgpu_device *adev)
+
+ if (adev->mman.buffer_funcs_ring == ring)
+ amdgpu_ttm_set_buffer_funcs_status(adev, false);
+- ring->ready = false;
++ ring->sched.ready = false;
+ }
+ }
+
+@@ -175,13 +175,11 @@ static int si_dma_start(struct amdgpu_device *adev)
+ WREG32(DMA_RB_WPTR + sdma_offsets[i], lower_32_bits(ring->wptr) << 2);
+ WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl | DMA_RB_ENABLE);
+
+- ring->ready = true;
++ ring->sched.ready = true;
+
+- r = amdgpu_ring_test_ring(ring);
+- if (r) {
+- ring->ready = false;
++ r = amdgpu_ring_test_helper(ring);
++ if (r)
+ return r;
+- }
+
+ if (adev->mman.buffer_funcs_ring == ring)
+ amdgpu_ttm_set_buffer_funcs_status(adev, true);
+diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+index 1fc17bf39fed..8cabe982a61d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
++++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+@@ -162,12 +162,9 @@ static int uvd_v4_2_hw_init(void *handle)
+ uvd_v4_2_enable_mgcg(adev, true);
+ amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
+
+- ring->ready = true;
+- r = amdgpu_ring_test_ring(ring);
+- if (r) {
+- ring->ready = false;
++ r = amdgpu_ring_test_helper(ring);
++ if (r)
+ goto done;
+- }
+
+ r = amdgpu_ring_alloc(ring, 10);
+ if (r) {
+@@ -218,7 +215,7 @@ static int uvd_v4_2_hw_fini(void *handle)
+ if (RREG32(mmUVD_STATUS) != 0)
+ uvd_v4_2_stop(adev);
+
+- ring->ready = false;
++ ring->sched.ready = false;
+
+ return 0;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+index fde6ad5ac9ab..56b02ee543f9 100644
+--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+@@ -158,12 +158,9 @@ static int uvd_v5_0_hw_init(void *handle)
+ uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
+ uvd_v5_0_enable_mgcg(adev, true);
+
+- ring->ready = true;
+- r = amdgpu_ring_test_ring(ring);
+- if (r) {
+- ring->ready = false;
++ r = amdgpu_ring_test_helper(ring);
++ if (r)
+ goto done;
+- }
+
+ r = amdgpu_ring_alloc(ring, 10);
+ if (r) {
+@@ -215,7 +212,7 @@ static int uvd_v5_0_hw_fini(void *handle)
+ if (RREG32(mmUVD_STATUS) != 0)
+ uvd_v5_0_stop(adev);
+
+- ring->ready = false;
++ ring->sched.ready = false;
+
+ return 0;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+index 7a5b40275e8e..3027607a187c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+@@ -476,12 +476,9 @@ static int uvd_v6_0_hw_init(void *handle)
+ uvd_v6_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
+ uvd_v6_0_enable_mgcg(adev, true);
+
+- ring->ready = true;
+- r = amdgpu_ring_test_ring(ring);
+- if (r) {
+- ring->ready = false;
++ r = amdgpu_ring_test_helper(ring);
++ if (r)
+ goto done;
+- }
+
+ r = amdgpu_ring_alloc(ring, 10);
+ if (r) {
+@@ -513,12 +510,9 @@ static int uvd_v6_0_hw_init(void *handle)
+ if (uvd_v6_0_enc_support(adev)) {
+ for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
+ ring = &adev->uvd.inst->ring_enc[i];
+- ring->ready = true;
+- r = amdgpu_ring_test_ring(ring);
+- if (r) {
+- ring->ready = false;
++ r = amdgpu_ring_test_helper(ring);
++ if (r)
+ goto done;
+- }
+ }
+ }
+
+@@ -548,7 +542,7 @@ static int uvd_v6_0_hw_fini(void *handle)
+ if (RREG32(mmUVD_STATUS) != 0)
+ uvd_v6_0_stop(adev);
+
+- ring->ready = false;
++ ring->sched.ready = false;
+
+ return 0;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+index 58b39afcfb86..76a7fbef532a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+@@ -540,12 +540,9 @@ static int uvd_v7_0_hw_init(void *handle)
+ ring = &adev->uvd.inst[j].ring;
+
+ if (!amdgpu_sriov_vf(adev)) {
+- ring->ready = true;
+- r = amdgpu_ring_test_ring(ring);
+- if (r) {
+- ring->ready = false;
++ r = amdgpu_ring_test_helper(ring);
++ if (r)
+ goto done;
+- }
+
+ r = amdgpu_ring_alloc(ring, 10);
+ if (r) {
+@@ -582,12 +579,9 @@ static int uvd_v7_0_hw_init(void *handle)
+
+ for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
+ ring = &adev->uvd.inst[j].ring_enc[i];
+- ring->ready = true;
+- r = amdgpu_ring_test_ring(ring);
+- if (r) {
+- ring->ready = false;
++ r = amdgpu_ring_test_helper(ring);
++ if (r)
+ goto done;
+- }
+ }
+ }
+ done:
+@@ -619,7 +613,7 @@ static int uvd_v7_0_hw_fini(void *handle)
+ for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
+ if (adev->uvd.harvest_config & (1 << i))
+ continue;
+- adev->uvd.inst[i].ring.ready = false;
++ adev->uvd.inst[i].ring.sched.ready = false;
+ }
+
+ return 0;
+diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
+index ea28828360d3..bed78a778e3f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
+@@ -463,15 +463,11 @@ static int vce_v2_0_hw_init(void *handle)
+
+ amdgpu_asic_set_vce_clocks(adev, 10000, 10000);
+ vce_v2_0_enable_mgcg(adev, true, false);
+- for (i = 0; i < adev->vce.num_rings; i++)
+- adev->vce.ring[i].ready = false;
+
+ for (i = 0; i < adev->vce.num_rings; i++) {
+- r = amdgpu_ring_test_ring(&adev->vce.ring[i]);
++ r = amdgpu_ring_test_helper(&adev->vce.ring[i]);
+ if (r)
+ return r;
+- else
+- adev->vce.ring[i].ready = true;
+ }
+
+ DRM_INFO("VCE initialized successfully.\n");
+diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+index 6dbd39730070..2b1a5a793942 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+@@ -474,15 +474,10 @@ static int vce_v3_0_hw_init(void *handle)
+
+ amdgpu_asic_set_vce_clocks(adev, 10000, 10000);
+
+- for (i = 0; i < adev->vce.num_rings; i++)
+- adev->vce.ring[i].ready = false;
+-
+ for (i = 0; i < adev->vce.num_rings; i++) {
+- r = amdgpu_ring_test_ring(&adev->vce.ring[i]);
++ r = amdgpu_ring_test_helper(&adev->vce.ring[i]);
+ if (r)
+ return r;
+- else
+- adev->vce.ring[i].ready = true;
+ }
+
+ DRM_INFO("VCE initialized successfully.\n");
+diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+index 1c9471890bf7..65b71fc2f7b9 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+@@ -519,15 +519,10 @@ static int vce_v4_0_hw_init(void *handle)
+ if (r)
+ return r;
+
+- for (i = 0; i < adev->vce.num_rings; i++)
+- adev->vce.ring[i].ready = false;
+-
+ for (i = 0; i < adev->vce.num_rings; i++) {
+- r = amdgpu_ring_test_ring(&adev->vce.ring[i]);
++ r = amdgpu_ring_test_helper(&adev->vce.ring[i]);
+ if (r)
+ return r;
+- else
+- adev->vce.ring[i].ready = true;
+ }
+
+ DRM_INFO("VCE initialized successfully.\n");
+@@ -549,7 +544,7 @@ static int vce_v4_0_hw_fini(void *handle)
+ }
+
+ for (i = 0; i < adev->vce.num_rings; i++)
+- adev->vce.ring[i].ready = false;
++ adev->vce.ring[i].sched.ready = false;
+
+ return 0;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+index eae90922fdbe..29628f60d50c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+@@ -176,30 +176,22 @@ static int vcn_v1_0_hw_init(void *handle)
+ struct amdgpu_ring *ring = &adev->vcn.ring_dec;
+ int i, r;
+
+- ring->ready = true;
+- r = amdgpu_ring_test_ring(ring);
+- if (r) {
+- ring->ready = false;
++ r = amdgpu_ring_test_helper(ring);
++ if (r)
+ goto done;
+- }
+
+ for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
+ ring = &adev->vcn.ring_enc[i];
+- ring->ready = true;
+- r = amdgpu_ring_test_ring(ring);
+- if (r) {
+- ring->ready = false;
++ ring->sched.ready = true;
++ r = amdgpu_ring_test_helper(ring);
++ if (r)
+ goto done;
+- }
+ }
+
+ ring = &adev->vcn.ring_jpeg;
+- ring->ready = true;
+- r = amdgpu_ring_test_ring(ring);
+- if (r) {
+- ring->ready = false;
++ r = amdgpu_ring_test_helper(ring);
++ if (r)
+ goto done;
+- }
+
+ done:
+ if (!r)
+@@ -224,7 +216,7 @@ static int vcn_v1_0_hw_fini(void *handle)
+ if (RREG32_SOC15(VCN, 0, mmUVD_STATUS))
+ vcn_v1_0_stop(adev);
+
+- ring->ready = false;
++ ring->sched.ready = false;
+
+ return 0;
+ }
+--
+2.17.1
+