aboutsummaryrefslogtreecommitdiffstats
path: root/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/3727-drm-ttm-use-the-operation-context-inside-TTM.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/3727-drm-ttm-use-the-operation-context-inside-TTM.patch')
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/3727-drm-ttm-use-the-operation-context-inside-TTM.patch230
1 files changed, 230 insertions, 0 deletions
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/3727-drm-ttm-use-the-operation-context-inside-TTM.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/3727-drm-ttm-use-the-operation-context-inside-TTM.patch
new file mode 100644
index 00000000..fbe797ca
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/3727-drm-ttm-use-the-operation-context-inside-TTM.patch
@@ -0,0 +1,230 @@
+From 991993c88bfd4a469ffc1ee6ad1d2f48628a5023 Mon Sep 17 00:00:00 2001
+From: Christian Koenig <christian.koenig@amd.com>
+Date: Wed, 12 Apr 2017 16:48:39 +0200
+Subject: [PATCH 3727/4131] drm/ttm: use the operation context inside TTM
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Instead of passing down the parameters manually to every function.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Michel Dänzer <michel.daenzer@amd.com>
+Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
+Tested-by: Dieter Nützel <Dieter@nuetzel-hh.de>
+Tested-by: Michel Dänzer <michel.daenzer@amd.com>
+Acked-by: Felix Kuehling <Felix.Kuehling@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/ttm/ttm_bo.c | 71 ++++++++++++++++++++------------------------
+ 1 file changed, 32 insertions(+), 39 deletions(-)
+
+diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
+index 124081e..a8928ceb 100644
+--- a/drivers/gpu/drm/ttm/ttm_bo.c
++++ b/drivers/gpu/drm/ttm/ttm_bo.c
+@@ -267,9 +267,8 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
+ }
+
+ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
+- struct ttm_mem_reg *mem,
+- bool evict, bool interruptible,
+- bool no_wait_gpu)
++ struct ttm_mem_reg *mem, bool evict,
++ struct ttm_operation_ctx *ctx)
+ {
+ struct ttm_bo_device *bdev = bo->bdev;
+ bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
+@@ -323,12 +322,14 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
+
+ if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
+ !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
+- ret = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, mem);
++ ret = ttm_bo_move_ttm(bo, ctx->interruptible,
++ ctx->no_wait_gpu, mem);
+ else if (bdev->driver->move)
+- ret = bdev->driver->move(bo, evict, interruptible,
+- no_wait_gpu, mem);
++ ret = bdev->driver->move(bo, evict, ctx->interruptible,
++ ctx->no_wait_gpu, mem);
+ else
+- ret = ttm_bo_move_memcpy(bo, interruptible, no_wait_gpu, mem);
++ ret = ttm_bo_move_memcpy(bo, ctx->interruptible,
++ ctx->no_wait_gpu, mem);
+
+ if (ret) {
+ if (bdev->driver->move_notify) {
+@@ -670,10 +671,9 @@ void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
+ }
+ EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
+
+-static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
+- bool no_wait_gpu)
++static int ttm_bo_evict(struct ttm_buffer_object *bo,
++ struct ttm_operation_ctx *ctx)
+ {
+- struct ttm_operation_ctx ctx = { interruptible, no_wait_gpu };
+ struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_mem_reg evict_mem;
+ struct ttm_placement placement;
+@@ -689,7 +689,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
+ placement.num_placement = 0;
+ placement.num_busy_placement = 0;
+ bdev->driver->evict_flags(bo, &placement);
+- ret = ttm_bo_mem_space(bo, &placement, &evict_mem, &ctx);
++ ret = ttm_bo_mem_space(bo, &placement, &evict_mem, ctx);
+ if (ret) {
+ if (ret != -ERESTARTSYS) {
+ pr_err("Failed to find memory space for buffer 0x%p eviction\n",
+@@ -699,8 +699,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
+ goto out;
+ }
+
+- ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
+- interruptible, no_wait_gpu);
++ ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, ctx);
+ if (unlikely(ret)) {
+ if (ret != -ERESTARTSYS)
+ pr_err("Buffer eviction failed\n");
+@@ -729,8 +728,7 @@ EXPORT_SYMBOL(ttm_bo_eviction_valuable);
+ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
+ uint32_t mem_type,
+ const struct ttm_place *place,
+- bool interruptible,
+- bool no_wait_gpu)
++ struct ttm_operation_ctx *ctx)
+ {
+ struct ttm_bo_global *glob = bdev->glob;
+ struct ttm_mem_type_manager *man = &bdev->man[mem_type];
+@@ -767,8 +765,8 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
+ kref_get(&bo->list_kref);
+
+ if (!list_empty(&bo->ddestroy)) {
+- ret = ttm_bo_cleanup_refs_and_unlock(bo, interruptible,
+- no_wait_gpu);
++ ret = ttm_bo_cleanup_refs(bo, ctx->interruptible,
++ ctx->no_wait_gpu, locked);
+ kref_put(&bo->list_kref, ttm_bo_release_list);
+ return ret;
+ }
+@@ -777,8 +775,8 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
+ spin_unlock(&glob->lru_lock);
+
+ BUG_ON(ret != 0);
+-
+- ret = ttm_bo_evict(bo, interruptible, no_wait_gpu);
++
++ ret = ttm_bo_evict(bo, ctx);
+ ttm_bo_unreserve(bo);
+
+ kref_put(&bo->list_kref, ttm_bo_release_list);
+@@ -830,8 +828,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
+ uint32_t mem_type,
+ const struct ttm_place *place,
+ struct ttm_mem_reg *mem,
+- bool interruptible,
+- bool no_wait_gpu)
++ struct ttm_operation_ctx *ctx)
+ {
+ struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_mem_type_manager *man = &bdev->man[mem_type];
+@@ -843,8 +840,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
+ return ret;
+ if (mem->mm_node)
+ break;
+- ret = ttm_mem_evict_first(bdev, mem_type, place,
+- interruptible, no_wait_gpu);
++ ret = ttm_mem_evict_first(bdev, bo->resv, mem_type, place, ctx);
+ if (unlikely(ret != 0))
+ return ret;
+ } while (1);
+@@ -1000,10 +996,8 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
+ mem->mm_node = NULL;
+ return 0;
+ }
+-
+- ret = ttm_bo_mem_force_space(bo, mem_type, place, mem,
+- ctx->interruptible,
+- ctx->no_wait_gpu);
++
++ ret = ttm_bo_mem_force_space(bo, mem_type, place, mem, ctx);
+ if (ret == 0 && mem->mm_node) {
+ mem->placement = cur_flags;
+ return 0;
+@@ -1023,10 +1017,9 @@ EXPORT_SYMBOL(ttm_bo_mem_space);
+
+ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement,
+- bool interruptible,
+- bool no_wait_gpu)
++ struct ttm_placement *placement,
++ struct ttm_operation_ctx *ctx)
+ {
+- struct ttm_operation_ctx ctx = { interruptible, no_wait_gpu };
+ int ret = 0;
+ struct ttm_mem_reg mem;
+
+@@ -1040,11 +1033,10 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
+ /*
+ * Determine where to move the buffer.
+ */
+- ret = ttm_bo_mem_space(bo, placement, &mem, &ctx);
++ ret = ttm_bo_mem_space(bo, placement, &mem, ctx);
+ if (ret)
+ goto out_unlock;
+- ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible,
+- no_wait_gpu);
++ ret = ttm_bo_handle_move_mem(bo, &mem, false, ctx);
+ out_unlock:
+ if (ret && mem.mm_node)
+ ttm_bo_mem_put(bo, &mem);
+@@ -1106,8 +1098,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
+ * Check whether we need to move buffer.
+ */
+ if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) {
+- ret = ttm_bo_move_buffer(bo, placement, ctx->interruptible,
+- ctx->no_wait_gpu);
++ ret = ttm_bo_move_buffer(bo, placement, ctx);
+ if (ret)
+ return ret;
+ } else {
+@@ -1335,6 +1326,7 @@ EXPORT_SYMBOL(ttm_bo_create);
+ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
+ unsigned mem_type)
+ {
++ struct ttm_operation_ctx ctx = { false, false };
+ struct ttm_mem_type_manager *man = &bdev->man[mem_type];
+ struct ttm_bo_global *glob = bdev->glob;
+ struct dma_fence *fence;
+@@ -1349,7 +1341,8 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
+ for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
+ while (!list_empty(&man->lru[i])) {
+ spin_unlock(&glob->lru_lock);
+- ret = ttm_mem_evict_first(bdev, mem_type, NULL, false, false);
++ ret = ttm_mem_evict_first(bdev, NULL, mem_type,
++ NULL, &ctx);
+ if (ret)
+ return ret;
+ spin_lock(&glob->lru_lock);
+@@ -1749,15 +1742,15 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
+
+ if (bo->mem.mem_type != TTM_PL_SYSTEM ||
+ bo->ttm->caching_state != tt_cached) {
++ struct ttm_operation_ctx ctx = { false, false };
+ struct ttm_mem_reg evict_mem;
+
+ evict_mem = bo->mem;
+ evict_mem.mm_node = NULL;
+ evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
+ evict_mem.mem_type = TTM_PL_SYSTEM;
+-
+- ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
+- false, false);
++
++ ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, &ctx);
+ if (unlikely(ret != 0))
+ goto out;
+ }
+--
+2.7.4
+