aboutsummaryrefslogtreecommitdiffstats
path: root/common/recipes-kernel/linux/files/0344-drm-amdgpu-wait-engine-idle-before-vm-flush-for-sdma.patch
diff options
context:
space:
mode:
Diffstat (limited to 'common/recipes-kernel/linux/files/0344-drm-amdgpu-wait-engine-idle-before-vm-flush-for-sdma.patch')
-rw-r--r--common/recipes-kernel/linux/files/0344-drm-amdgpu-wait-engine-idle-before-vm-flush-for-sdma.patch98
1 files changed, 98 insertions, 0 deletions
diff --git a/common/recipes-kernel/linux/files/0344-drm-amdgpu-wait-engine-idle-before-vm-flush-for-sdma.patch b/common/recipes-kernel/linux/files/0344-drm-amdgpu-wait-engine-idle-before-vm-flush-for-sdma.patch
new file mode 100644
index 00000000..fd3c0095
--- /dev/null
+++ b/common/recipes-kernel/linux/files/0344-drm-amdgpu-wait-engine-idle-before-vm-flush-for-sdma.patch
@@ -0,0 +1,98 @@
+From dd56d4223732ee78c20fbcebbf86e82c354d3483 Mon Sep 17 00:00:00 2001
+From: Chunming Zhou <David1.Zhou@amd.com>
+Date: Wed, 2 Mar 2016 11:30:31 +0800
+Subject: [PATCH 0344/1110] drm/amdgpu: wait engine idle before vm flush for
+ sdma
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Signed-off-by: Chunming Zhou <David1.Zhou@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/cik_sdma.c | 14 ++++++++++++++
+ drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c | 15 +++++++++++++++
+ drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c | 15 +++++++++++++++
+ 3 files changed, 44 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+index db10010..f751dc3 100644
+--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
++++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+@@ -829,6 +829,20 @@ static void cik_sdma_ring_emit_vm_flush(struct amdgpu_ring *ring,
+ {
+ u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(0) |
+ SDMA_POLL_REG_MEM_EXTRA_FUNC(0)); /* always */
++ uint32_t seq = ring->fence_drv.sync_seq;
++ uint64_t addr = ring->fence_drv.gpu_addr;
++
++ /* wait for idle */
++ amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0,
++ SDMA_POLL_REG_MEM_EXTRA_OP(0) |
++ SDMA_POLL_REG_MEM_EXTRA_FUNC(3) | /* equal */
++ SDMA_POLL_REG_MEM_EXTRA_M));
++ amdgpu_ring_write(ring, addr & 0xfffffffc);
++ amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
++ amdgpu_ring_write(ring, seq); /* reference */
++ amdgpu_ring_write(ring, 0xfffffff); /* mask */
++ amdgpu_ring_write(ring, (0xfff << 16) | 4); /* retry count, poll interval */
++
+
+ amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
+ if (vm_id < 8) {
+diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+index 1562291..60c2721 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+@@ -885,6 +885,21 @@ static void sdma_v2_4_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib
+ static void sdma_v2_4_ring_emit_vm_flush(struct amdgpu_ring *ring,
+ unsigned vm_id, uint64_t pd_addr)
+ {
++ uint32_t seq = ring->fence_drv.sync_seq;
++ uint64_t addr = ring->fence_drv.gpu_addr;
++
++ /* wait for idle */
++ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
++ SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
++ SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3) | /* equal */
++ SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(1));
++ amdgpu_ring_write(ring, addr & 0xfffffffc);
++ amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
++ amdgpu_ring_write(ring, seq); /* reference */
++ amdgpu_ring_write(ring, 0xfffffff); /* mask */
++ amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
++ SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
++
+ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
+ SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
+ if (vm_id < 8) {
+diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+index d748a3a..d585ce2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+@@ -1035,6 +1035,21 @@ static void sdma_v3_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib
+ static void sdma_v3_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
+ unsigned vm_id, uint64_t pd_addr)
+ {
++ uint32_t seq = ring->fence_drv.sync_seq;
++ uint64_t addr = ring->fence_drv.gpu_addr;
++
++ /* wait for idle */
++ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
++ SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
++ SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3) | /* equal */
++ SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(1));
++ amdgpu_ring_write(ring, addr & 0xfffffffc);
++ amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
++ amdgpu_ring_write(ring, seq); /* reference */
++ amdgpu_ring_write(ring, 0xfffffff); /* mask */
++ amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
++ SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
++
+ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
+ SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
+ if (vm_id < 8) {
+--
+2.7.4
+