From 7d78c8e4a3fe6f5735c815db7d285a803c62d0df Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Fri, 18 Mar 2016 21:00:35 +0100 Subject: [PATCH 0461/1110] drm/amdgpu: change parameter passing in the VM code MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Make it more flexible by passing src and page addresses directly instead of the structures they contain. Signed-off-by: Christian König Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher Signed-off-by: Kalyan Alle --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 100 ++++++++++++++++++--------------- 1 file changed, 54 insertions(+), 46 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index ac4e092..4db8a2b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -356,8 +356,8 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, * amdgpu_vm_update_pages - helper to call the right asic function * * @adev: amdgpu_device pointer - * @gtt: GART instance to use for mapping - * @gtt_flags: GTT hw access flags + * @src: address where to copy page table entries from + * @pages_addr: DMA addresses to use for mapping * @ib: indirect buffer to fill with commands * @pe: addr of the page entry * @addr: dst addr to write into pe @@ -369,8 +369,8 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, * to setup the page table using the DMA. */ static void amdgpu_vm_update_pages(struct amdgpu_device *adev, - struct amdgpu_gart *gtt, - uint32_t gtt_flags, + uint64_t src, + dma_addr_t *pages_addr, struct amdgpu_ib *ib, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, @@ -378,12 +378,11 @@ static void amdgpu_vm_update_pages(struct amdgpu_device *adev, { trace_amdgpu_vm_set_page(pe, addr, count, incr, flags); - if ((gtt == &adev->gart) && (flags == gtt_flags)) { - uint64_t src = gtt->table_addr + (addr >> 12) * 8; + if (src) { + src += (addr >> 12) * 8; amdgpu_vm_copy_pte(adev, ib, pe, src, count); - } else if (gtt) { - dma_addr_t *pages_addr = gtt->pages_addr; + } else if (pages_addr) { amdgpu_vm_write_pte(adev, ib, pages_addr, pe, addr, count, incr, flags); @@ -432,7 +431,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, r = amdgpu_job_alloc_with_ib(adev, 64, &job); if (r) goto error; - amdgpu_vm_update_pages(adev, NULL, 0, &job->ibs[0], addr, 0, entries, + amdgpu_vm_update_pages(adev, 0, NULL, &job->ibs[0], addr, 0, entries, 0, 0); amdgpu_ring_pad_ib(ring, &job->ibs[0]); @@ -538,7 +537,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, ((last_pt + incr * count) != pt)) { if (count) { - amdgpu_vm_update_pages(adev, NULL, 0, ib, + amdgpu_vm_update_pages(adev, 0, NULL, ib, last_pde, last_pt, count, incr, AMDGPU_PTE_VALID); @@ -553,7 +552,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, } if (count) - amdgpu_vm_update_pages(adev, NULL, 0, ib, last_pde, last_pt, + amdgpu_vm_update_pages(adev, 0, NULL, ib, last_pde, last_pt, count, incr, AMDGPU_PTE_VALID); if (ib->length_dw != 0) { @@ -584,8 +583,8 @@ error_free: * amdgpu_vm_frag_ptes - add fragment information to PTEs * * @adev: amdgpu_device pointer - * @gtt: GART instance to use for mapping - * @gtt_flags: GTT hw mapping flags + * @src: address where to copy page table entries from + * @pages_addr: DMA addresses to use for mapping * @ib: IB for the update * @pe_start: first PTE to handle * @pe_end: last PTE to handle @@ -595,8 +594,8 @@ error_free: * Global and local mutex must be locked! */ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev, - struct amdgpu_gart *gtt, - uint32_t gtt_flags, + uint64_t src, + dma_addr_t *pages_addr, struct amdgpu_ib *ib, uint64_t pe_start, uint64_t pe_end, uint64_t addr, uint32_t flags) @@ -634,10 +633,11 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev, return; /* system pages are non continuously */ - if (gtt || !(flags & AMDGPU_PTE_VALID) || (frag_start >= frag_end)) { + if (src || pages_addr || !(flags & AMDGPU_PTE_VALID) || + (frag_start >= frag_end)) { count = (pe_end - pe_start) / 8; - amdgpu_vm_update_pages(adev, gtt, gtt_flags, ib, pe_start, + amdgpu_vm_update_pages(adev, src, pages_addr, ib, pe_start, addr, count, AMDGPU_GPU_PAGE_SIZE, flags); return; @@ -646,21 +646,21 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev, /* handle the 4K area at the beginning */ if (pe_start != frag_start) { count = (frag_start - pe_start) / 8; - amdgpu_vm_update_pages(adev, NULL, 0, ib, pe_start, addr, + amdgpu_vm_update_pages(adev, 0, NULL, ib, pe_start, addr, count, AMDGPU_GPU_PAGE_SIZE, flags); addr += AMDGPU_GPU_PAGE_SIZE * count; } /* handle the area in the middle */ count = (frag_end - frag_start) / 8; - amdgpu_vm_update_pages(adev, NULL, 0, ib, frag_start, addr, count, + amdgpu_vm_update_pages(adev, 0, NULL, ib, frag_start, addr, count, AMDGPU_GPU_PAGE_SIZE, flags | frag_flags); /* handle the 4K area at the end */ if (frag_end != pe_end) { addr += AMDGPU_GPU_PAGE_SIZE * count; count = (pe_end - frag_end) / 8; - amdgpu_vm_update_pages(adev, NULL, 0, ib, frag_end, addr, + amdgpu_vm_update_pages(adev, 0, NULL, ib, frag_end, addr, count, AMDGPU_GPU_PAGE_SIZE, flags); } } @@ -669,8 +669,8 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev, * amdgpu_vm_update_ptes - make sure that page tables are valid * * @adev: amdgpu_device pointer - * @gtt: GART instance to use for mapping - * @gtt_flags: GTT hw mapping flags + * @src: address where to copy page table entries from + * @pages_addr: DMA addresses to use for mapping * @vm: requested vm * @start: start of GPU address range * @end: end of GPU address range @@ -680,8 +680,8 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev, * Update the page tables in the range @start - @end. */ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev, - struct amdgpu_gart *gtt, - uint32_t gtt_flags, + uint64_t src, + dma_addr_t *pages_addr, struct amdgpu_vm *vm, struct amdgpu_ib *ib, uint64_t start, uint64_t end, @@ -712,7 +712,7 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev, if (last_pe_end != pe_start) { - amdgpu_vm_frag_ptes(adev, gtt, gtt_flags, ib, + amdgpu_vm_frag_ptes(adev, src, pages_addr, ib, last_pe_start, last_pe_end, last_dst, flags); @@ -727,17 +727,16 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev, dst += nptes * AMDGPU_GPU_PAGE_SIZE; } - amdgpu_vm_frag_ptes(adev, gtt, gtt_flags, ib, - last_pe_start, last_pe_end, - last_dst, flags); + amdgpu_vm_frag_ptes(adev, src, pages_addr, ib, last_pe_start, + last_pe_end, last_dst, flags); } /** * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table * * @adev: amdgpu_device pointer - * @gtt: GART instance to use for mapping - * @gtt_flags: flags as they are used for GTT + * @src: address where to copy page table entries from + * @pages_addr: DMA addresses to use for mapping * @vm: requested vm * @start: start of mapped range * @last: last mapped entry @@ -749,8 +748,8 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev, * Returns 0 for success, -EINVAL for failure. */ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, - struct amdgpu_gart *gtt, - uint32_t gtt_flags, + uint64_t src, + dma_addr_t *pages_addr, struct amdgpu_vm *vm, uint64_t start, uint64_t last, uint32_t flags, uint64_t addr, @@ -781,11 +780,11 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, /* padding, etc. */ ndw = 64; - if ((gtt == &adev->gart) && (flags == gtt_flags)) { + if (src) { /* only copy commands needed */ ndw += ncmds * 7; - } else if (gtt) { + } else if (pages_addr) { /* header for write data commands */ ndw += ncmds * 4; @@ -815,8 +814,8 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, if (r) goto error_free; - amdgpu_vm_update_ptes(adev, gtt, gtt_flags, vm, ib, start, last + 1, - addr, flags); + amdgpu_vm_update_ptes(adev, src, pages_addr, vm, ib, start, + last + 1, addr, flags); amdgpu_ring_pad_ib(ring, ib); WARN_ON(ib->length_dw > ndw); @@ -858,12 +857,13 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, uint32_t gtt_flags, struct amdgpu_vm *vm, struct amdgpu_bo_va_mapping *mapping, - uint64_t addr, struct fence **fence) + uint32_t flags, uint64_t addr, + struct fence **fence) { const uint64_t max_size = 64ULL * 1024ULL * 1024ULL / AMDGPU_GPU_PAGE_SIZE; - uint64_t start = mapping->it.start; - uint32_t flags = gtt_flags; + uint64_t src = 0, start = mapping->it.start; + dma_addr_t *pages_addr = NULL; int r; /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here @@ -876,10 +876,17 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, trace_amdgpu_vm_bo_update(mapping); + if (gtt) { + if (flags == gtt_flags) + src = adev->gart.table_addr + (addr >> 12) * 8; + else + pages_addr = >t->pages_addr[addr >> 12]; + addr = 0; + } addr += mapping->offset; - if (!gtt || ((gtt == &adev->gart) && (flags == gtt_flags))) - return amdgpu_vm_bo_update_mapping(adev, gtt, gtt_flags, vm, + if (!gtt || src) + return amdgpu_vm_bo_update_mapping(adev, src, pages_addr, vm, start, mapping->it.last, flags, addr, fence); @@ -887,7 +894,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, uint64_t last; last = min((uint64_t)mapping->it.last, start + max_size - 1); - r = amdgpu_vm_bo_update_mapping(adev, gtt, gtt_flags, vm, + r = amdgpu_vm_bo_update_mapping(adev, src, pages_addr, vm, start, last, flags, addr, fence); if (r) @@ -919,7 +926,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_vm *vm = bo_va->vm; struct amdgpu_bo_va_mapping *mapping; struct amdgpu_gart *gtt = NULL; - uint32_t flags; + uint32_t gtt_flags, flags; uint64_t addr; int r; @@ -942,6 +949,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, } flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem); + gtt_flags = (adev == bo_va->bo->adev) ? flags : 0; spin_lock(&vm->status_lock); if (!list_empty(&bo_va->vm_status)) @@ -949,8 +957,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, spin_unlock(&vm->status_lock); list_for_each_entry(mapping, &bo_va->invalids, list) { - r = amdgpu_vm_bo_split_mapping(adev, gtt, flags, vm, mapping, addr, - &bo_va->last_pt_update); + r = amdgpu_vm_bo_split_mapping(adev, gtt, gtt_flags, vm, mapping, + flags, addr, &bo_va->last_pt_update); if (r) return r; } @@ -996,7 +1004,7 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev, list_del(&mapping->list); r = amdgpu_vm_bo_split_mapping(adev, NULL, 0, vm, mapping, - 0, NULL); + 0, 0, NULL); kfree(mapping); if (r) return r; -- 2.7.4