aboutsummaryrefslogtreecommitdiffstats
path: root/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/2242-resolved-the-compilation-issues-v4.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/2242-resolved-the-compilation-issues-v4.patch')
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/2242-resolved-the-compilation-issues-v4.patch568
1 files changed, 568 insertions, 0 deletions
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/2242-resolved-the-compilation-issues-v4.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/2242-resolved-the-compilation-issues-v4.patch
new file mode 100644
index 00000000..d3439aed
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/2242-resolved-the-compilation-issues-v4.patch
@@ -0,0 +1,568 @@
+From fdf58520b35add7fa51b185409f8a4d28f47a845 Mon Sep 17 00:00:00 2001
+From: Kalyan Alle <kalyan.alle@amd.com>
+Date: Tue, 24 Apr 2018 14:22:06 +0530
+Subject: [PATCH 2242/4131] resolved the compilation issues v4
+
+This patch rsolves the compilation issues thrown while
+porting
+till commit : 741e46c01f96030cacade134982aab2a1604a7a1
+branch- amd-18.10 , repo: brahma/ec/linux
+
+Signed-off-by: kalyan alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 22 ++--
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 2 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 3 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 12 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 184 +++++++++++++++--------------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 6 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 3 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 3 +-
+ include/drm/ttm/ttm_bo_api.h | 15 +++
+ include/uapi/drm/amdgpu_drm.h | 4 +
+ 10 files changed, 136 insertions(+), 118 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+index 35818a3..8b724b3 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+@@ -353,7 +353,7 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
+ struct amdgpu_bo *bo)
+ {
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+- struct ttm_operation_ctx ctx = { true, false };
++ u64 initial_bytes_moved, bytes_moved;
+ uint32_t domain;
+ int r;
+
+@@ -383,13 +383,16 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
+
+ retry:
+ amdgpu_ttm_placement_from_domain(bo, domain);
+- r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
++ initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
++ r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
++ bytes_moved = atomic64_read(&adev->num_bytes_moved) -
++ initial_bytes_moved;
++ p->bytes_moved += bytes_moved;
+
+- p->bytes_moved += ctx.bytes_moved;
+ if (adev->mc.visible_vram_size < adev->mc.real_vram_size &&
+ bo->tbo.mem.mem_type == TTM_PL_VRAM &&
+ bo->tbo.mem.start < adev->mc.visible_vram_size >> PAGE_SHIFT)
+- p->bytes_moved_vis += ctx.bytes_moved;
++ p->bytes_moved_vis += bytes_moved;
+
+ if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
+ domain = bo->allowed_domains;
+@@ -404,7 +407,6 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
+ struct amdgpu_bo *validated)
+ {
+ uint32_t domain = validated->allowed_domains;
+- struct ttm_operation_ctx ctx = { true, false };
+ int r;
+
+ if (!p->evictable)
+@@ -446,7 +448,7 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
+ bo->tbo.mem.mem_type == TTM_PL_VRAM &&
+ bo->tbo.mem.start < adev->mc.visible_vram_size >> PAGE_SHIFT;
+ initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
+- r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
++ r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
+ bytes_moved = atomic64_read(&adev->num_bytes_moved) -
+ initial_bytes_moved;
+ p->bytes_moved += bytes_moved;
+@@ -485,7 +487,6 @@ static int amdgpu_cs_validate(void *param, struct amdgpu_bo *bo)
+ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
+ struct list_head *validated)
+ {
+- struct ttm_operation_ctx ctx = { true, false };
+ struct amdgpu_bo_list_entry *lobj;
+ int r;
+
+@@ -503,7 +504,8 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
+ lobj->user_pages) {
+ amdgpu_ttm_placement_from_domain(bo,
+ AMDGPU_GEM_DOMAIN_CPU);
+- r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
++ r = ttm_bo_validate(&bo->tbo, &bo->placement, true,
++ false);
+ if (r)
+ return r;
+ amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
+@@ -1621,7 +1623,6 @@ int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
+ struct amdgpu_bo_va_mapping **map)
+ {
+ struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
+- struct ttm_operation_ctx ctx = { false, false };
+ struct amdgpu_vm *vm = &fpriv->vm;
+ struct amdgpu_bo_va_mapping *mapping;
+ int r;
+@@ -1642,7 +1643,8 @@ int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
+ if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
+ (*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
+ amdgpu_ttm_placement_from_domain(*bo, (*bo)->allowed_domains);
+- r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx);
++ r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, false,
++ false);
+ if (r)
+ return r;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 0f9c756..808ca47 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -692,7 +692,7 @@ int amdgpu_fw_reserve_vram_init(struct amdgpu_device *adev)
+
+ ttm_bo_mem_put(&bo->tbo, &bo->tbo.mem);
+ r = ttm_bo_mem_space(&bo->tbo, &bo->placement,
+- &bo->tbo.mem, &ctx);
++ &bo->tbo.mem, false, false);
+ if (r)
+ goto error_pin;
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+index f653e3a..c9149a1f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+@@ -397,7 +397,6 @@ int amdgpu_gem_find_bo_by_cpu_mapping_ioctl(struct drm_device *dev, void *data,
+ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+ {
+- struct ttm_operation_ctx ctx = { true, false };
+ struct amdgpu_device *adev = dev->dev_private;
+ struct drm_amdgpu_gem_userptr *args = data;
+ struct drm_gem_object *gobj;
+@@ -451,7 +450,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
+ goto free_pages;
+
+ amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
+- r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
++ r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
+ amdgpu_bo_unreserve(bo);
+ if (r)
+ goto free_pages;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+index 0c1da87..510d322 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+@@ -590,7 +590,6 @@ int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
+
+ int amdgpu_bo_validate(struct amdgpu_bo *bo)
+ {
+- struct ttm_operation_ctx ctx = { false, false };
+ uint32_t domain;
+ int r;
+
+@@ -601,7 +600,7 @@ int amdgpu_bo_validate(struct amdgpu_bo *bo)
+
+ retry:
+ amdgpu_ttm_placement_from_domain(bo, domain);
+- r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
++ r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+ if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
+ domain = bo->allowed_domains;
+ goto retry;
+@@ -712,7 +711,6 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
+ u64 *gpu_addr)
+ {
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+- struct ttm_operation_ctx ctx = { false, false };
+ int r, i;
+
+ if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
+@@ -767,7 +765,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
+ bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
+ }
+
+- r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
++ r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+ if (unlikely(r)) {
+ dev_err(adev->dev, "%p pin failed\n", bo);
+ goto error;
+@@ -803,7 +801,6 @@ int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr)
+ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
+ {
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+- struct ttm_operation_ctx ctx = { false, false };
+ int r, i;
+
+ if (!bo->pin_count) {
+@@ -817,7 +814,7 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
+ bo->placements[i].lpfn = 0;
+ bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
+ }
+- r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
++ r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+ if (unlikely(r)) {
+ dev_err(adev->dev, "%p validate failed for unpin\n", bo);
+ goto error;
+@@ -988,7 +985,6 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
+ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
+ {
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
+- struct ttm_operation_ctx ctx = { false, false };
+ struct amdgpu_bo *abo;
+ unsigned long offset, size;
+ int r;
+@@ -1022,7 +1018,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
+ abo->placement.num_busy_placement = 1;
+ abo->placement.busy_placement = &abo->placements[1];
+
+- r = ttm_bo_validate(bo, &abo->placement, &ctx);
++ r = ttm_bo_validate(bo, &abo->placement, false, false);
+ if (unlikely(r != 0))
+ return r;
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+index 60f8842..a05451a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+@@ -509,92 +509,97 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
+ return r;
+ }
+
+-static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict,
+- struct ttm_operation_ctx *ctx,
+- struct ttm_mem_reg *new_mem)
+-{
+- struct amdgpu_device *adev;
+- struct ttm_mem_reg *old_mem = &bo->mem;
+- struct ttm_mem_reg tmp_mem;
+- struct ttm_place placements;
+- struct ttm_placement placement;
+- int r;
+-
+- adev = amdgpu_ttm_adev(bo->bdev);
+- tmp_mem = *new_mem;
+- tmp_mem.mm_node = NULL;
+- placement.num_placement = 1;
+- placement.placement = &placements;
+- placement.num_busy_placement = 1;
+- placement.busy_placement = &placements;
+- placements.fpfn = 0;
+- placements.lpfn = 0;
+- placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
+- r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx);
+- if (unlikely(r)) {
+- return r;
+- }
+-
+- r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement);
+- if (unlikely(r)) {
+- goto out_cleanup;
+- }
+-
+- r = ttm_tt_bind(bo->ttm, &tmp_mem);
+- if (unlikely(r)) {
+- goto out_cleanup;
+- }
+- r = amdgpu_move_blit(bo, true, ctx->no_wait_gpu, &tmp_mem, old_mem);
+- if (unlikely(r)) {
+- goto out_cleanup;
+- }
+- r = ttm_bo_move_ttm(bo, ctx->interruptible, ctx->no_wait_gpu, new_mem);
++static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo,
++ bool evict, bool interruptible,
++ bool no_wait_gpu,
++ struct ttm_mem_reg *new_mem)
++{
++ struct amdgpu_device *adev;
++ struct ttm_mem_reg *old_mem = &bo->mem;
++ struct ttm_mem_reg tmp_mem;
++ struct ttm_place placements;
++ struct ttm_placement placement;
++ int r;
++
++ adev = amdgpu_ttm_adev(bo->bdev);
++ tmp_mem = *new_mem;
++ tmp_mem.mm_node = NULL;
++ placement.num_placement = 1;
++ placement.placement = &placements;
++ placement.num_busy_placement = 1;
++ placement.busy_placement = &placements;
++ placements.fpfn = 0;
++ placements.lpfn = 0;
++ placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
++ r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
++ interruptible, no_wait_gpu);
++ if (unlikely(r)) {
++ return r;
++ }
++
++ r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement);
++ if (unlikely(r)) {
++ goto out_cleanup;
++ }
++
++ r = ttm_tt_bind(bo->ttm, &tmp_mem);
++ if (unlikely(r)) {
++ goto out_cleanup;
++ }
++ r = amdgpu_move_blit(bo, true, no_wait_gpu, &tmp_mem, old_mem);
++ if (unlikely(r)) {
++ goto out_cleanup;
++ }
++ r = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, new_mem);
+ out_cleanup:
+- ttm_bo_mem_put(bo, &tmp_mem);
+- return r;
+-}
+-
+-static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict,
+- struct ttm_operation_ctx *ctx,
+- struct ttm_mem_reg *new_mem)
+-{
+- struct amdgpu_device *adev;
+- struct ttm_mem_reg *old_mem = &bo->mem;
+- struct ttm_mem_reg tmp_mem;
+- struct ttm_placement placement;
+- struct ttm_place placements;
+- int r;
+-
+- adev = amdgpu_ttm_adev(bo->bdev);
+- tmp_mem = *new_mem;
+- tmp_mem.mm_node = NULL;
+- placement.num_placement = 1;
+- placement.placement = &placements;
+- placement.num_busy_placement = 1;
+- placement.busy_placement = &placements;
+- placements.fpfn = 0;
+- placements.lpfn = 0;
+- placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
+- r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx);
+- if (unlikely(r)) {
+- return r;
+- }
+- r = ttm_bo_move_ttm(bo, ctx->interruptible, ctx->no_wait_gpu, &tmp_mem);
+- if (unlikely(r)) {
+- goto out_cleanup;
+- }
+- r = amdgpu_move_blit(bo, true, ctx->no_wait_gpu, new_mem, old_mem);
+- if (unlikely(r)) {
+- goto out_cleanup;
+- }
++ ttm_bo_mem_put(bo, &tmp_mem);
++ return r;
++}
++
++static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo,
++ bool evict, bool interruptible,
++ bool no_wait_gpu,
++ struct ttm_mem_reg *new_mem)
++{
++ struct amdgpu_device *adev;
++ struct ttm_mem_reg *old_mem = &bo->mem;
++ struct ttm_mem_reg tmp_mem;
++ struct ttm_placement placement;
++ struct ttm_place placements;
++ int r;
++
++ adev = amdgpu_ttm_adev(bo->bdev);
++ tmp_mem = *new_mem;
++ tmp_mem.mm_node = NULL;
++ placement.num_placement = 1;
++ placement.placement = &placements;
++ placement.num_busy_placement = 1;
++ placement.busy_placement = &placements;
++ placements.fpfn = 0;
++ placements.lpfn = 0;
++ placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
++ r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
++ interruptible, no_wait_gpu);
++ if (unlikely(r)) {
++ return r;
++ }
++ r = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, &tmp_mem);
++ if (unlikely(r)) {
++ goto out_cleanup;
++ }
++ r = amdgpu_move_blit(bo, true, no_wait_gpu, new_mem, old_mem);
++ if (unlikely(r)) {
++ goto out_cleanup;
++ }
+ out_cleanup:
+- ttm_bo_mem_put(bo, &tmp_mem);
+- return r;
++ ttm_bo_mem_put(bo, &tmp_mem);
++ return r;
+ }
+
+-static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
+- struct ttm_operation_ctx *ctx,
+- struct ttm_mem_reg *new_mem)
++static int amdgpu_bo_move(struct ttm_buffer_object *bo,
++ bool evict, bool interruptible,
++ bool no_wait_gpu,
++ struct ttm_mem_reg *new_mem)
+ {
+ struct amdgpu_device *adev;
+ struct amdgpu_bo *abo;
+@@ -633,19 +638,21 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
+
+ if (old_mem->mem_type == TTM_PL_VRAM &&
+ new_mem->mem_type == TTM_PL_SYSTEM) {
+- r = amdgpu_move_vram_ram(bo, evict, ctx, new_mem);
++ r = amdgpu_move_vram_ram(bo, evict, interruptible,
++ no_wait_gpu, new_mem);
+ } else if (old_mem->mem_type == TTM_PL_SYSTEM &&
+ new_mem->mem_type == TTM_PL_VRAM) {
+- r = amdgpu_move_ram_vram(bo, evict, ctx, new_mem);
++ r = amdgpu_move_ram_vram(bo, evict, interruptible,
++ no_wait_gpu, new_mem);
+ } else {
+- r = amdgpu_move_blit(bo, evict, ctx->no_wait_gpu,
++ r = amdgpu_move_blit(bo, evict, no_wait_gpu,
+ new_mem, old_mem);
+ }
+
+ if (r) {
+ memcpy:
+- r = ttm_bo_move_memcpy(bo, ctx->interruptible,
+- ctx->no_wait_gpu, new_mem);
++ r = ttm_bo_move_memcpy(bo, interruptible,
++ no_wait_gpu, new_mem);
+ if (r) {
+ return r;
+ }
+@@ -939,7 +946,6 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
+ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
+ {
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
+- struct ttm_operation_ctx ctx = { false, false };
+ struct amdgpu_ttm_tt *gtt = (void*)bo->ttm;
+ struct ttm_mem_reg tmp;
+
+@@ -963,7 +969,7 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
+ placements.flags = (bo->mem.placement & ~TTM_PL_MASK_MEM) |
+ TTM_PL_FLAG_TT;
+
+- r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx);
++ r = ttm_bo_mem_space(bo, &placement, &tmp, false, false);
+ if (unlikely(r))
+ return r;
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+index 67f1e97..af7e83d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+@@ -411,7 +411,6 @@ static u64 amdgpu_uvd_get_addr_from_ctx(struct amdgpu_uvd_cs_ctx *ctx)
+ */
+ static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx)
+ {
+- struct ttm_operation_ctx tctx = { false, false };
+ struct amdgpu_bo_va_mapping *mapping;
+ struct amdgpu_bo *bo;
+ uint32_t cmd;
+@@ -434,7 +433,7 @@ static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx)
+ }
+ amdgpu_uvd_force_into_uvd_segment(bo);
+
+- r = ttm_bo_validate(&bo->tbo, &bo->placement, &tctx);
++ r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+ }
+
+ return r;
+@@ -953,7 +952,6 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
+ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
+ bool direct, struct dma_fence **fence)
+ {
+- struct ttm_operation_ctx ctx = { true, false };
+ struct ttm_validate_buffer tv;
+ struct ww_acquire_ctx ticket;
+ struct list_head head;
+@@ -980,7 +978,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
+ amdgpu_uvd_force_into_uvd_segment(bo);
+ }
+
+- r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
++ r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
+ if (r)
+ goto err;
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+index ba6d846..a91abfb 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+@@ -558,7 +558,6 @@ static int amdgpu_vce_validate_bo(struct amdgpu_cs_parser *p, uint32_t ib_idx,
+ int lo, int hi, unsigned size, int32_t index)
+ {
+ int64_t offset = ((uint64_t)size) * ((int64_t)index);
+- struct ttm_operation_ctx ctx = { false, false };
+ struct amdgpu_bo_va_mapping *mapping;
+ unsigned i, fpfn, lpfn;
+ struct amdgpu_bo *bo;
+@@ -588,7 +587,7 @@ static int amdgpu_vce_validate_bo(struct amdgpu_cs_parser *p, uint32_t ib_idx,
+ bo->placements[i].lpfn = bo->placements[i].fpfn ?
+ min(bo->placements[i].fpfn, lpfn) : lpfn;
+ }
+- return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
++ return ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+ }
+
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+index 3b1ef8b..e34b07f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+@@ -278,7 +278,6 @@ int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
+ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
+ bool direct, struct dma_fence **fence)
+ {
+- struct ttm_operation_ctx ctx = { true, false };
+ struct ttm_validate_buffer tv;
+ struct ww_acquire_ctx ticket;
+ struct list_head head;
+@@ -299,7 +298,7 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *b
+ if (r)
+ return r;
+
+- r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
++ r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
+ if (r)
+ goto err;
+
+diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
+index a29c078..ae5048b 100644
+--- a/include/drm/ttm/ttm_bo_api.h
++++ b/include/drm/ttm/ttm_bo_api.h
+@@ -261,6 +261,21 @@ struct ttm_bo_kmap_obj {
+ };
+
+ /**
++ * struct ttm_operation_ctx
++ *
++ * @interruptible: Sleep interruptible if sleeping.
++ * @no_wait_gpu: Return immediately if the GPU is busy.
++ *
++ * Context for TTM operations like changing buffer placement or general memory
++ * allocation.
++ */
++struct ttm_operation_ctx {
++ bool interruptible;
++ bool no_wait_gpu;
++ uint64_t bytes_moved;
++};
++
++/**
+ * ttm_bo_reference - reference a struct ttm_buffer_object
+ *
+ * @bo: The buffer object.
+diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
+index 58dae78..c9aed87 100644
+--- a/include/uapi/drm/amdgpu_drm.h
++++ b/include/uapi/drm/amdgpu_drm.h
+@@ -961,6 +961,10 @@ struct drm_amdgpu_info_device {
+ __u32 _pad1;
+ /* always on cu bitmap */
+ __u32 cu_ao_bitmap[4][4];
++ /** Starting high virtual address for UMDs. */
++ __u64 high_va_offset;
++ /** The maximum high virtual address */
++ __u64 high_va_max;
+ };
+
+ struct drm_amdgpu_info_hw_ip {
+--
+2.7.4
+