aboutsummaryrefslogtreecommitdiffstats
path: root/common/recipes-kernel/linux/linux-yocto-4.19.8/1666-drm-amdgpu-new-VM-update-backends.patch
diff options
context:
space:
mode:
Diffstat (limited to 'common/recipes-kernel/linux/linux-yocto-4.19.8/1666-drm-amdgpu-new-VM-update-backends.patch')
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.19.8/1666-drm-amdgpu-new-VM-update-backends.patch520
1 files changed, 520 insertions, 0 deletions
diff --git a/common/recipes-kernel/linux/linux-yocto-4.19.8/1666-drm-amdgpu-new-VM-update-backends.patch b/common/recipes-kernel/linux/linux-yocto-4.19.8/1666-drm-amdgpu-new-VM-update-backends.patch
new file mode 100644
index 00000000..a82fc762
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.19.8/1666-drm-amdgpu-new-VM-update-backends.patch
@@ -0,0 +1,520 @@
+From 8d19c592cacbc608fa56410f8a2f0ffb54073db7 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Mon, 18 Mar 2019 13:16:03 +0100
+Subject: [PATCH 1666/2940] drm/amdgpu: new VM update backends
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Separate out all functions for SDMA and CPU based page table
+updates into separate backends.
+
+This way we can keep most of the complexity of those from the
+core VM code.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
+Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
+Signed-off-by: Chaudhary Amit Kumar <Chaudharyamit.Kumar@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/Makefile | 3 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 7 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 30 ++-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c | 116 +++++++++
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c | 248 ++++++++++++++++++++
+ 5 files changed, 401 insertions(+), 3 deletions(-)
+ create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
+ create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
+index d57415bebceb..bdf6f7f2d3a7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/Makefile
++++ b/drivers/gpu/drm/amd/amdgpu/Makefile
+@@ -52,7 +52,8 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
+ amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \
+ amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \
+ amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o amdgpu_ids.o \
+- amdgpu_sem.o amdgpu_gmc.o amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o
++ amdgpu_sem.o amdgpu_gmc.o amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o \
++ amdgpu_vm_cpu.o amdgpu_vm_sdma.o
+
+ # add asic specific block
+ amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index d9596b4655b4..fb3efd94bfd7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -1218,7 +1218,7 @@ static void amdgpu_vm_do_copy_ptes(struct amdgpu_vm_update_params *params,
+ * Returns:
+ * The pointer for the page table entry.
+ */
+-static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
++uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
+ {
+ uint64_t result;
+
+@@ -2968,6 +2968,11 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ vm->use_cpu_for_update ? "CPU" : "SDMA");
+ WARN_ONCE((vm->use_cpu_for_update && !amdgpu_gmc_vram_full_visible(&adev->gmc)),
+ "CPU update of VM recommended only for large BAR system\n");
++
++ if (vm->use_cpu_for_update)
++ vm->update_funcs = &amdgpu_vm_cpu_funcs;
++ else
++ vm->update_funcs = &amdgpu_vm_sdma_funcs;
+ vm->last_update = NULL;
+
+ amdgpu_vm_bo_param(adev, vm, adev->vm_manager.root_level, &bp);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+index d53e9edaa2b9..67c85759e9ee 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+@@ -203,11 +203,21 @@ struct amdgpu_vm_update_params {
+ */
+ uint64_t src;
+
++ /**
++ * @job: job to used for hw submission
++ */
++ struct amdgpu_job *job;
++
+ /**
+ * @ib: indirect buffer to fill with commands
+ */
+ struct amdgpu_ib *ib;
+
++ /**
++ * @num_dw_left: number of dw left for the IB
++ */
++ unsigned int num_dw_left;
++
+ /**
+ * @func: Function which actually does the update
+ */
+@@ -217,6 +227,17 @@ struct amdgpu_vm_update_params {
+ uint64_t flags);
+ };
+
++struct amdgpu_vm_update_funcs {
++
++ int (*prepare)(struct amdgpu_vm_update_params *p, void * owner,
++ struct dma_fence *exclusive);
++ int (*update)(struct amdgpu_vm_update_params *p,
++ struct amdgpu_bo *bo, uint64_t pe, uint64_t addr,
++ unsigned count, uint32_t incr, uint64_t flags);
++ int (*commit)(struct amdgpu_vm_update_params *p,
++ struct dma_fence **fence);
++};
++
+ struct amdgpu_vm {
+ /* tree of virtual addresses mapped */
+ struct rb_root_cached va;
+@@ -252,7 +273,10 @@ struct amdgpu_vm {
+ struct amdgpu_vmid *reserved_vmid[AMDGPU_MAX_VMHUBS];
+
+ /* Flag to indicate if VM tables are updated by CPU or GPU (SDMA) */
+- bool use_cpu_for_update;
++ bool use_cpu_for_update;
++
++ /* Functions to use for VM table updates */
++ const struct amdgpu_vm_update_funcs *update_funcs;
+
+ /* Flag to indicate ATS support from PTE for GFX9 */
+ bool pte_support_ats;
+@@ -319,6 +343,9 @@ struct amdgpu_vm_manager {
+ #define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr)))
+ #define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
+
++extern const struct amdgpu_vm_update_funcs amdgpu_vm_cpu_funcs;
++extern const struct amdgpu_vm_update_funcs amdgpu_vm_sdma_funcs;
++
+ void amdgpu_vm_manager_init(struct amdgpu_device *adev);
+ void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
+
+@@ -347,6 +374,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
+ bool clear);
+ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
+ struct amdgpu_bo *bo, bool evicted);
++uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
+ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo);
+ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
+new file mode 100644
+index 000000000000..9d53982021de
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
+@@ -0,0 +1,116 @@
++/*
++ * Copyright 2019 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#include "amdgpu_vm.h"
++#include "amdgpu_object.h"
++#include "amdgpu_trace.h"
++
++/**
++ * amdgpu_vm_cpu_prepare - prepare page table update with the CPU
++ *
++ * @p: see amdgpu_vm_update_params definition
++ * @owner: owner we need to sync to
++ * @exclusive: exclusive move fence we need to sync to
++ *
++ * Returns:
++ * Negativ errno, 0 for success.
++ */
++static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p, void *owner,
++ struct dma_fence *exclusive)
++{
++ int r;
++
++ /* Wait for PT BOs to be idle. PTs share the same resv. object
++ * as the root PD BO
++ */
++ r = amdgpu_bo_sync_wait(p->vm->root.base.bo, owner, true);
++ if (unlikely(r))
++ return r;
++
++ /* Wait for any BO move to be completed */
++ if (exclusive) {
++ r = dma_fence_wait(exclusive, true);
++ if (unlikely(r))
++ return r;
++ }
++
++ return 0;
++}
++
++/**
++ * amdgpu_vm_cpu_update - helper to update page tables via CPU
++ *
++ * @p: see amdgpu_vm_update_params definition
++ * @bo: PD/PT to update
++ * @pe: kmap addr of the page entry
++ * @addr: dst addr to write into pe
++ * @count: number of page entries to update
++ * @incr: increase next addr by incr bytes
++ * @flags: hw access flags
++ *
++ * Write count number of PT/PD entries directly.
++ */
++static int amdgpu_vm_cpu_update(struct amdgpu_vm_update_params *p,
++ struct amdgpu_bo *bo, uint64_t pe,
++ uint64_t addr, unsigned count, uint32_t incr,
++ uint64_t flags)
++{
++ unsigned int i;
++ uint64_t value;
++
++ pe += (unsigned long)amdgpu_bo_kptr(bo);
++
++ trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
++
++ for (i = 0; i < count; i++) {
++ value = p->pages_addr ?
++ amdgpu_vm_map_gart(p->pages_addr, addr) :
++ addr;
++ amdgpu_gmc_set_pte_pde(p->adev, (void *)(uintptr_t)pe,
++ i, value, flags);
++ addr += incr;
++ }
++ return 0;
++}
++
++/**
++ * amdgpu_vm_cpu_commit - commit page table update to the HW
++ *
++ * @p: see amdgpu_vm_update_params definition
++ * @fence: unused
++ *
++ * Make sure that the hardware sees the page table updates.
++ */
++static int amdgpu_vm_cpu_commit(struct amdgpu_vm_update_params *p,
++ struct dma_fence **fence)
++{
++ /* Flush HDP */
++ mb();
++ amdgpu_asic_flush_hdp(p->adev, NULL);
++ return 0;
++}
++
++const struct amdgpu_vm_update_funcs amdgpu_vm_cpu_funcs = {
++ .prepare = amdgpu_vm_cpu_prepare,
++ .update = amdgpu_vm_cpu_update,
++ .commit = amdgpu_vm_cpu_commit
++};
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
+new file mode 100644
+index 000000000000..e4bacdb44c68
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
+@@ -0,0 +1,248 @@
++/*
++ * Copyright 2019 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#include "amdgpu_vm.h"
++#include "amdgpu_job.h"
++#include "amdgpu_object.h"
++#include "amdgpu_trace.h"
++
++#define AMDGPU_VM_SDMA_MIN_NUM_DW 256u
++#define AMDGPU_VM_SDMA_MAX_NUM_DW (16u * 1024u)
++
++/**
++ * amdgpu_vm_sdma_prepare - prepare SDMA command submission
++ *
++ * @p: see amdgpu_vm_update_params definition
++ * @owner: owner we need to sync to
++ * @exclusive: exclusive move fence we need to sync to
++ *
++ * Returns:
++ * Negativ errno, 0 for success.
++ */
++static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
++ void *owner, struct dma_fence *exclusive)
++{
++ struct amdgpu_bo *root = p->vm->root.base.bo;
++ unsigned int ndw = AMDGPU_VM_SDMA_MIN_NUM_DW;
++ int r;
++
++ r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, &p->job);
++ if (r)
++ return r;
++
++ r = amdgpu_sync_fence(p->adev, &p->job->sync, exclusive, false);
++ if (r)
++ return r;
++
++ r = amdgpu_sync_resv(p->adev, &p->job->sync, root->tbo.resv,
++ owner, false);
++ if (r)
++ return r;
++
++ p->num_dw_left = ndw;
++ p->ib = &p->job->ibs[0];
++ return 0;
++}
++
++/**
++ * amdgpu_vm_sdma_commit - commit SDMA command submission
++ *
++ * @p: see amdgpu_vm_update_params definition
++ * @fence: resulting fence
++ *
++ * Returns:
++ * Negativ errno, 0 for success.
++ */
++static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
++ struct dma_fence **fence)
++{
++ struct amdgpu_bo *root = p->vm->root.base.bo;
++ struct amdgpu_ring *ring;
++ struct dma_fence *f;
++ int r;
++
++ ring = container_of(p->vm->entity.rq->sched, struct amdgpu_ring, sched);
++
++ WARN_ON(p->ib->length_dw == 0);
++ amdgpu_ring_pad_ib(ring, p->ib);
++ WARN_ON(p->ib->length_dw > p->num_dw_left);
++ r = amdgpu_job_submit(p->job, &p->vm->entity,
++ AMDGPU_FENCE_OWNER_VM, &f);
++ if (r)
++ goto error;
++
++ amdgpu_bo_fence(root, f, true);
++ if (fence)
++ swap(*fence, f);
++ dma_fence_put(f);
++ return 0;
++
++error:
++ amdgpu_job_free(p->job);
++ return r;
++}
++
++
++/**
++ * amdgpu_vm_sdma_copy_ptes - copy the PTEs from mapping
++ *
++ * @p: see amdgpu_vm_update_params definition
++ * @bo: PD/PT to update
++ * @pe: addr of the page entry
++ * @count: number of page entries to copy
++ *
++ * Traces the parameters and calls the DMA function to copy the PTEs.
++ */
++static void amdgpu_vm_sdma_copy_ptes(struct amdgpu_vm_update_params *p,
++ struct amdgpu_bo *bo, uint64_t pe,
++ unsigned count)
++{
++ uint64_t src = p->ib->gpu_addr;
++
++ src += p->num_dw_left * 4;
++
++ pe += amdgpu_bo_gpu_offset(bo);
++ trace_amdgpu_vm_copy_ptes(pe, src, count);
++
++ amdgpu_vm_copy_pte(p->adev, p->ib, pe, src, count);
++}
++
++/**
++ * amdgpu_vm_sdma_set_ptes - helper to call the right asic function
++ *
++ * @p: see amdgpu_vm_update_params definition
++ * @bo: PD/PT to update
++ * @pe: addr of the page entry
++ * @addr: dst addr to write into pe
++ * @count: number of page entries to update
++ * @incr: increase next addr by incr bytes
++ * @flags: hw access flags
++ *
++ * Traces the parameters and calls the right asic functions
++ * to setup the page table using the DMA.
++ */
++static void amdgpu_vm_sdma_set_ptes(struct amdgpu_vm_update_params *p,
++ struct amdgpu_bo *bo, uint64_t pe,
++ uint64_t addr, unsigned count,
++ uint32_t incr, uint64_t flags)
++{
++ pe += amdgpu_bo_gpu_offset(bo);
++ trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
++ if (count < 3) {
++ amdgpu_vm_write_pte(p->adev, p->ib, pe, addr | flags,
++ count, incr);
++ } else {
++ amdgpu_vm_set_pte_pde(p->adev, p->ib, pe, addr,
++ count, incr, flags);
++ }
++}
++
++/**
++ * amdgpu_vm_sdma_update - execute VM update
++ *
++ * @p: see amdgpu_vm_update_params definition
++ * @bo: PD/PT to update
++ * @pe: addr of the page entry
++ * @addr: dst addr to write into pe
++ * @count: number of page entries to update
++ * @incr: increase next addr by incr bytes
++ * @flags: hw access flags
++ *
++ * Reserve space in the IB, setup mapping buffer on demand and write commands to
++ * the IB.
++ */
++static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
++ struct amdgpu_bo *bo, uint64_t pe,
++ uint64_t addr, unsigned count, uint32_t incr,
++ uint64_t flags)
++{
++ unsigned int i, ndw, nptes;
++ uint64_t *pte;
++ int r;
++
++ do {
++ ndw = p->num_dw_left;
++ ndw -= p->ib->length_dw;
++
++ if (ndw < 32) {
++ r = amdgpu_vm_sdma_commit(p, NULL);
++ if (r)
++ return r;
++
++ /* estimate how many dw we need */
++ ndw = 32;
++ if (p->pages_addr)
++ ndw += count * 2;
++ ndw = max(ndw, AMDGPU_VM_SDMA_MIN_NUM_DW);
++ ndw = min(ndw, AMDGPU_VM_SDMA_MAX_NUM_DW);
++
++ r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, &p->job);
++ if (r)
++ return r;
++
++ p->num_dw_left = ndw;
++ p->ib = &p->job->ibs[0];
++ }
++
++ if (!p->pages_addr) {
++ /* set page commands needed */
++ if (bo->shadow)
++ amdgpu_vm_sdma_set_ptes(p, bo->shadow, pe, addr,
++ count, incr, flags);
++ amdgpu_vm_sdma_set_ptes(p, bo, pe, addr, count,
++ incr, flags);
++ return 0;
++ }
++
++ /* copy commands needed */
++ ndw -= p->adev->vm_manager.vm_pte_funcs->copy_pte_num_dw *
++ (bo->shadow ? 2 : 1);
++
++ /* for padding */
++ ndw -= 7;
++
++ nptes = min(count, ndw / 2);
++
++ /* Put the PTEs at the end of the IB. */
++ p->num_dw_left -= nptes * 2;
++ pte = (uint64_t *)&(p->ib->ptr[p->num_dw_left]);
++ for (i = 0; i < nptes; ++i, addr += incr) {
++ pte[i] = amdgpu_vm_map_gart(p->pages_addr, addr);
++ pte[i] |= flags;
++ }
++
++ if (bo->shadow)
++ amdgpu_vm_sdma_copy_ptes(p, bo->shadow, pe, nptes);
++ amdgpu_vm_sdma_copy_ptes(p, bo, pe, nptes);
++
++ pe += nptes * 8;
++ count -= nptes;
++ } while (count);
++
++ return 0;
++}
++
++const struct amdgpu_vm_update_funcs amdgpu_vm_sdma_funcs = {
++ .prepare = amdgpu_vm_sdma_prepare,
++ .update = amdgpu_vm_sdma_update,
++ .commit = amdgpu_vm_sdma_commit
++};
+--
+2.17.1
+