From 85e5a9ae464a6f115c03ef50588e5d5a63a770a5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Fri, 27 Oct 2017 14:17:09 +0200 Subject: [PATCH 2090/4131] drm/amdgpu: nuke amdgpu_ttm_is_bound() v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Rename amdgpu_gtt_mgr_is_allocated() to amdgpu_gtt_mgr_has_gart_addr() and use that instead. v2: rename the function as well. Signed-off-by: Christian König Reviewed-by: Chunming Zhou Conflicts: drivers/gpu/drm/amd/amdgpu/amdgpu_object.h drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h Change-Id: I8024b4da505f6566126ebe7e5baca86a1ab5f604 --- drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c | 6 +++--- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_object.h | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 24 +++++++++--------------- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h | 3 +-- 5 files changed, 15 insertions(+), 22 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c index 287f97a..f6c4af9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c @@ -85,13 +85,13 @@ static int amdgpu_gtt_mgr_fini(struct ttm_mem_type_manager *man) } /** - * amdgpu_gtt_mgr_is_allocated - Check if mem has address space + * amdgpu_gtt_mgr_has_gart_addr - Check if mem has address space * * @mem: the mem object to check * * Check if a mem object has already address space allocated. */ -bool amdgpu_gtt_mgr_is_allocated(struct ttm_mem_reg *mem) +bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem) { struct drm_mm_node *node = mem->mm_node; @@ -120,7 +120,7 @@ static int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man, unsigned long fpfn, lpfn; int r; - if (amdgpu_gtt_mgr_is_allocated(mem)) + if (amdgpu_gtt_mgr_has_gart_addr(mem)) return 0; if (place) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 2678270..2bddb7a 100755 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -1025,7 +1025,7 @@ u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo) { WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM); WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_TT && - !amdgpu_ttm_is_bound(bo->tbo.ttm)); + !amdgpu_gtt_mgr_has_gart_addr(&bo->tbo.mem)); WARN_ON_ONCE(!ww_mutex_is_locked(&bo->tbo.resv->lock) && !bo->pin_count); WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index 5ffa548..d235aca 100755 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -194,7 +194,7 @@ static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo) static inline bool amdgpu_bo_gpu_accessible(struct amdgpu_bo *bo) { switch (bo->tbo.mem.mem_type) { - case TTM_PL_TT: return amdgpu_ttm_is_bound(bo->tbo.ttm); + case TTM_PL_TT: return amdgpu_gtt_mgr_has_gart_addr(&bo->tbo.mem); case TTM_PL_VRAM: case AMDGPU_PL_DGMA: diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 885dbf6..cb4a8c9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -325,8 +325,7 @@ static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo, { uint64_t addr = 0; - if (mem->mem_type != TTM_PL_TT || - amdgpu_gtt_mgr_is_allocated(mem)) { + if (mem->mem_type != TTM_PL_TT || amdgpu_gtt_mgr_has_gart_addr(mem)) { addr = mm_node->start << PAGE_SHIFT; addr += bo->bdev->man[mem->mem_type].gpu_offset; } @@ -412,7 +411,7 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev, * dst to window 1 */ if (src->mem->mem_type == TTM_PL_TT && - !amdgpu_gtt_mgr_is_allocated(src->mem)) { + !amdgpu_gtt_mgr_has_gart_addr(src->mem)) { r = amdgpu_map_buffer(src->bo, src->mem, PFN_UP(cur_size + src_page_offset), src_node_start, 0, ring, @@ -426,7 +425,7 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev, } if (dst->mem->mem_type == TTM_PL_TT && - !amdgpu_gtt_mgr_is_allocated(dst->mem)) { + !amdgpu_gtt_mgr_has_gart_addr(dst->mem)) { r = amdgpu_map_buffer(dst->bo, dst->mem, PFN_UP(cur_size + dst_page_offset), dst_node_start, 1, ring, @@ -927,8 +926,10 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm, bo_mem->mem_type == AMDGPU_PL_OA) return -EINVAL; - if (!amdgpu_gtt_mgr_is_allocated(bo_mem)) + if (!amdgpu_gtt_mgr_has_gart_addr(bo_mem)) { + gtt->offset = AMDGPU_BO_INVALID_OFFSET; return 0; + } spin_lock(>t->adev->gtt_list_lock); flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem); @@ -948,24 +949,17 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm, return r; } -bool amdgpu_ttm_is_bound(struct ttm_tt *ttm) -{ - struct amdgpu_ttm_tt *gtt = (void *)ttm; - - return gtt && !list_empty(>t->list); -} - int amdgpu_ttm_bind(struct ttm_buffer_object *bo) { struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); - struct ttm_tt *ttm = bo->ttm; struct ttm_mem_reg tmp; struct ttm_placement placement; struct ttm_place placements; int r; - if (!ttm || amdgpu_ttm_is_bound(ttm)) + if (bo->mem.mem_type != TTM_PL_TT || + amdgpu_gtt_mgr_has_gart_addr(&bo->mem)) return 0; tmp = bo->mem; @@ -1026,7 +1020,7 @@ static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm) if (gtt->userptr) amdgpu_ttm_tt_unpin_userptr(ttm); - if (!amdgpu_ttm_is_bound(ttm)) + if (gtt->offset == AMDGPU_BO_INVALID_OFFSET) return 0; /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index 7349ee5..af4b76a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -71,7 +71,7 @@ struct amdgpu_copy_mem { extern const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func; extern const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func; -bool amdgpu_gtt_mgr_is_allocated(struct ttm_mem_reg *mem); +bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem); uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man); u64 amdgpu_vram_mgr_bo_invisible_size(struct amdgpu_bo *bo); @@ -97,7 +97,6 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo, int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma); int amdgpu_bo_mmap(struct file *filp, struct vm_area_struct *vma, struct ttm_bo_device *bdev); -bool amdgpu_ttm_is_bound(struct ttm_tt *ttm); int amdgpu_ttm_bind(struct ttm_buffer_object *bo); int amdgpu_ttm_recover_gart(struct amdgpu_device *adev); -- 2.7.4