aboutsummaryrefslogtreecommitdiffstats
path: root/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/2305-drm-amdgpu-gfx10-fix-resume-failure-when-enabling-as.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/2305-drm-amdgpu-gfx10-fix-resume-failure-when-enabling-as.patch')
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/2305-drm-amdgpu-gfx10-fix-resume-failure-when-enabling-as.patch62
1 files changed, 62 insertions, 0 deletions
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/2305-drm-amdgpu-gfx10-fix-resume-failure-when-enabling-as.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/2305-drm-amdgpu-gfx10-fix-resume-failure-when-enabling-as.patch
new file mode 100644
index 00000000..41c2fed4
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/2305-drm-amdgpu-gfx10-fix-resume-failure-when-enabling-as.patch
@@ -0,0 +1,62 @@
+From c2967b5b81b81b842e67c77b0f0cd4a5a1d648d4 Mon Sep 17 00:00:00 2001
+From: Xiaojie Yuan <xiaojie.yuan@amd.com>
+Date: Mon, 1 Apr 2019 21:44:21 +0800
+Subject: [PATCH 2305/2940] drm/amdgpu/gfx10: fix resume failure when enabling
+ async gfx ring
+
+'adev->in_suspend' code path is missing in gfx_v10_0_gfx_init_queue()
+
+Signed-off-by: Xiaojie Yuan <xiaojie.yuan@amd.com>
+Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 26 ++++++++++++++------------
+ 1 file changed, 14 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+index 4e7f64d91d12..9d162d269aca 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+@@ -2903,7 +2903,19 @@ static int gfx_v10_0_gfx_init_queue(struct amdgpu_ring *ring)
+ struct amdgpu_device *adev = ring->adev;
+ struct v10_gfx_mqd *mqd = ring->mqd_ptr;
+
+- if (adev->in_gpu_reset) {
++ if (!adev->in_gpu_reset && !adev->in_suspend) {
++ memset((void *)mqd, 0, sizeof(*mqd));
++ mutex_lock(&adev->srbm_mutex);
++ nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
++ gfx_v10_0_gfx_mqd_init(ring);
++#ifdef BRING_UP_DEBUG
++ gfx_v10_0_gfx_queue_init_register(ring);
++#endif
++ nv_grbm_select(adev, 0, 0, 0, 0);
++ mutex_unlock(&adev->srbm_mutex);
++ if (adev->gfx.me.mqd_backup[AMDGPU_MAX_GFX_RINGS])
++ memcpy(adev->gfx.me.mqd_backup[AMDGPU_MAX_GFX_RINGS], mqd, sizeof(*mqd));
++ } else if (adev->in_gpu_reset) {
+ /* reset mqd with the backup copy */
+ if (adev->gfx.me.mqd_backup[AMDGPU_MAX_GFX_RINGS])
+ memcpy(mqd, adev->gfx.me.mqd_backup[AMDGPU_MAX_GFX_RINGS], sizeof(*mqd));
+@@ -2918,17 +2930,7 @@ static int gfx_v10_0_gfx_init_queue(struct amdgpu_ring *ring)
+ mutex_unlock(&adev->srbm_mutex);
+ #endif
+ } else {
+- memset((void *)mqd, 0, sizeof(*mqd));
+- mutex_lock(&adev->srbm_mutex);
+- nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
+- gfx_v10_0_gfx_mqd_init(ring);
+-#ifdef BRING_UP_DEBUG
+- gfx_v10_0_gfx_queue_init_register(ring);
+-#endif
+- nv_grbm_select(adev, 0, 0, 0, 0);
+- mutex_unlock(&adev->srbm_mutex);
+- if (adev->gfx.me.mqd_backup[AMDGPU_MAX_GFX_RINGS])
+- memcpy(adev->gfx.me.mqd_backup[AMDGPU_MAX_GFX_RINGS], mqd, sizeof(*mqd));
++ amdgpu_ring_clear_ring(ring);
+ }
+
+ return 0;
+--
+2.17.1
+