aboutsummaryrefslogtreecommitdiffstats
path: root/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/2092-drm-amd-amdgpu-Bail-out-of-BO-node-creation-if-not-e.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/2092-drm-amd-amdgpu-Bail-out-of-BO-node-creation-if-not-e.patch')
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/2092-drm-amd-amdgpu-Bail-out-of-BO-node-creation-if-not-e.patch92
1 files changed, 92 insertions, 0 deletions
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/2092-drm-amd-amdgpu-Bail-out-of-BO-node-creation-if-not-e.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/2092-drm-amd-amdgpu-Bail-out-of-BO-node-creation-if-not-e.patch
new file mode 100644
index 00000000..32803a50
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/2092-drm-amd-amdgpu-Bail-out-of-BO-node-creation-if-not-e.patch
@@ -0,0 +1,92 @@
+From 3628643115a6ede3dbfd526d91ae98fdc758e64e Mon Sep 17 00:00:00 2001
+From: Tom St Denis <tom.stdenis@amd.com>
+Date: Mon, 10 Jun 2019 09:53:58 -0400
+Subject: [PATCH 2092/2940] drm/amd/amdgpu: Bail out of BO node creation if not
+ enough VRAM (v3)
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+(v2): Return 0 and set mem->mm_node to NULL.
+(v3): Use atomic64_add_return instead.
+
+Signed-off-by: Tom St Denis <tom.stdenis@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Chaudhary Amit Kumar <Chaudharyamit.Kumar@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c | 17 ++++++++++++-----
+ 1 file changed, 12 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+index 8aea2f21b202..c963ad86072e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+@@ -276,7 +276,7 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
+ struct drm_mm_node *nodes;
+ enum drm_mm_insert_mode mode;
+ unsigned long lpfn, num_nodes, pages_per_node, pages_left;
+- uint64_t usage = 0, vis_usage = 0;
++ uint64_t vis_usage = 0;
+ unsigned i;
+ int r;
+
+@@ -284,6 +284,13 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
+ if (!lpfn)
+ lpfn = man->size;
+
++ /* bail out quickly if there's likely not enough VRAM for this BO */
++ if (atomic64_add_return(mem->num_pages << PAGE_SHIFT, &mgr->usage) > adev->gmc.mc_vram_size) {
++ atomic64_sub(mem->num_pages << PAGE_SHIFT, &mgr->usage);
++ mem->mm_node = NULL;
++ return 0;
++ }
++
+ if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
+ pages_per_node = ~0ul;
+ num_nodes = 1;
+@@ -300,8 +307,10 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
+
+ nodes = kvmalloc_array((uint32_t)num_nodes, sizeof(*nodes),
+ GFP_KERNEL | __GFP_ZERO);
+- if (!nodes)
++ if (!nodes) {
++ atomic64_sub(mem->num_pages << PAGE_SHIFT, &mgr->usage);
+ return -ENOMEM;
++ }
+
+ mode = DRM_MM_INSERT_BEST;
+ if (place->flags & TTM_PL_FLAG_TOPDOWN)
+@@ -321,7 +330,6 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
+ if (unlikely(r))
+ break;
+
+- usage += nodes[i].size << PAGE_SHIFT;
+ vis_usage += amdgpu_vram_mgr_vis_size(adev, &nodes[i]);
+ amdgpu_vram_mgr_virt_start(mem, &nodes[i]);
+ pages_left -= pages;
+@@ -341,14 +349,12 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
+ if (unlikely(r))
+ goto error;
+
+- usage += nodes[i].size << PAGE_SHIFT;
+ vis_usage += amdgpu_vram_mgr_vis_size(adev, &nodes[i]);
+ amdgpu_vram_mgr_virt_start(mem, &nodes[i]);
+ pages_left -= pages;
+ }
+ spin_unlock(&mgr->lock);
+
+- atomic64_add(usage, &mgr->usage);
+ atomic64_add(vis_usage, &mgr->vis_usage);
+
+ mem->mm_node = nodes;
+@@ -359,6 +365,7 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
+ while (i--)
+ drm_mm_remove_node(&nodes[i]);
+ spin_unlock(&mgr->lock);
++ atomic64_sub(mem->num_pages << PAGE_SHIFT, &mgr->usage);
+
+ kvfree(nodes);
+ return r == -ENOSPC ? 0 : r;
+--
+2.17.1
+