From a57d5a6bb769c3bcd51cca8010ac0bbfa85949cc Mon Sep 17 00:00:00 2001 From: Flora Cui Date: Mon, 10 Oct 2016 17:41:10 +0800 Subject: [PATCH 0779/4131] drm/amd/amdgpu: adapt dgma to the new vam_mgr Two points need to be noted in this patch: - 1. Has logic conflicts with "6e97228 drm/amdgpu: remove the GART copy hack", apply this patch with fitting that commit. - 2. Combined patch "12c3bd8 drm/amdgpu: update gtt flags for dgma imported bo" into this. Change-Id: Id974e91410107e53a6ae7501855aed31cdf869d4 Signed-off-by: Flora Cui Reviewed-by: Hawking Zhang Conflicts: drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 35 +++++++++++++++++++++++++--------- 1 file changed, 26 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index f94ca9d..531a2a1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1652,7 +1652,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, * @vm: requested vm * @mapping: mapped range and flags to use for the update * @flags: HW flags for the mapping - * @nodes: array of drm_mm_nodes with the MC addresses + * @mem: ttm_mem_reg holding array of drm_mm_nodes with the MC addresses * @fence: optional resulting fence * * Split the mapping into smaller chunks so that each update fits @@ -1665,9 +1665,10 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct amdgpu_bo_va_mapping *mapping, uint64_t flags, - struct drm_mm_node *nodes, + struct ttm_mem_reg *mem, struct dma_fence **fence) { + struct drm_mm_node *nodes = mem ? mem->mm_node : NULL; uint64_t pfn, src = 0, start = mapping->start; int r; @@ -1709,17 +1710,33 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, addr = nodes->start << PAGE_SHIFT; max_entries = (nodes->size - pfn) * (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); + switch (mem->mem_type) { + case AMDGPU_PL_DGMA_IMPORT: + pages_addr = (dma_addr_t *)mem->bus.base; + addr += adev->mman.bdev.man[mem->mem_type].gpu_offset - + adev->mman.bdev.man[TTM_PL_TT].gpu_offset; + gtt_flags = flags; + /* fall through */ + case TTM_PL_TT: + max_entries = min(max_entries, 16ull * 1024ull); + addr = 0; + break; + case AMDGPU_PL_DGMA: + addr += adev->vm_manager.vram_base_offset + + adev->mman.bdev.man[mem->mem_type].gpu_offset - + adev->mman.bdev.man[TTM_PL_VRAM].gpu_offset; + break; + case TTM_PL_VRAM: + addr += adev->vm_manager.vram_base_offset; + break; + default: + break; + } } else { addr = 0; max_entries = S64_MAX; } - if (pages_addr) { - max_entries = min(max_entries, 16ull * 1024ull); - addr = 0; - } else if (flags & AMDGPU_PTE_VALID) { - addr += adev->vm_manager.vram_base_offset; - } addr += pfn << PAGE_SHIFT; last = min((uint64_t)mapping->last, start + max_entries - 1); @@ -1797,7 +1814,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, list_for_each_entry(mapping, &bo_va->invalids, list) { r = amdgpu_vm_bo_split_mapping(adev, exclusive, pages_addr, vm, - mapping, flags, nodes, + mapping, flags, mem, &bo_va->last_pt_update); if (r) return r; -- 2.7.4