aboutsummaryrefslogtreecommitdiffstats
path: root/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/3035-drm-amdgpu-rename-vm_id-to-vmid.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/3035-drm-amdgpu-rename-vm_id-to-vmid.patch')
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/3035-drm-amdgpu-rename-vm_id-to-vmid.patch1466
1 files changed, 1466 insertions, 0 deletions
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/3035-drm-amdgpu-rename-vm_id-to-vmid.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/3035-drm-amdgpu-rename-vm_id-to-vmid.patch
new file mode 100644
index 00000000..3462ef5d
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/3035-drm-amdgpu-rename-vm_id-to-vmid.patch
@@ -0,0 +1,1466 @@
+From 4103be8e591ad3c043de9918dbee6aec9d785f1a Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Mon, 18 Dec 2017 17:08:25 +0100
+Subject: [PATCH 3035/4131] drm/amdgpu: rename vm_id to vmid
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+sed -i "s/vm_id/vmid/g" drivers/gpu/drm/amd/amdgpu/*.c
+sed -i "s/vm_id/vmid/g" drivers/gpu/drm/amd/amdgpu/*.h
+
+Change-Id: I510c322499e64a9b950440ff4683a942864cb450
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu.h | 6 ++---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 2 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c | 8 +++----
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c | 6 ++---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h | 4 ++--
+ drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 2 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h | 4 ++--
+ drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h | 28 +++++++++++------------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 2 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h | 2 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 14 ++++++------
+ drivers/gpu/drm/amd/amdgpu/cik_ih.c | 2 +-
+ drivers/gpu/drm/amd/amdgpu/cik_sdma.c | 14 ++++++------
+ drivers/gpu/drm/amd/amdgpu/cz_ih.c | 2 +-
+ drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c | 14 ++++++------
+ drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | 18 +++++++--------
+ drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 18 +++++++--------
+ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 18 +++++++--------
+ drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 14 ++++++------
+ drivers/gpu/drm/amd/amdgpu/iceland_ih.c | 2 +-
+ drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c | 16 ++++++-------
+ drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c | 16 ++++++-------
+ drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 18 +++++++--------
+ drivers/gpu/drm/amd/amdgpu/si_dma.c | 16 ++++++-------
+ drivers/gpu/drm/amd/amdgpu/si_ih.c | 2 +-
+ drivers/gpu/drm/amd/amdgpu/tonga_ih.c | 2 +-
+ drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c | 2 +-
+ drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c | 2 +-
+ drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c | 26 ++++++++++-----------
+ drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c | 36 +++++++++++++++---------------
+ drivers/gpu/drm/amd/amdgpu/vce_v3_0.c | 10 ++++-----
+ drivers/gpu/drm/amd/amdgpu/vce_v4_0.c | 18 +++++++--------
+ drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 36 +++++++++++++++---------------
+ drivers/gpu/drm/amd/amdgpu/vega10_ih.c | 4 ++--
+ 34 files changed, 189 insertions(+), 195 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index 09ec133..2a5155a 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -354,7 +354,7 @@ struct amdgpu_gart_funcs {
+ /* get the pde for a given mc addr */
+ void (*get_vm_pde)(struct amdgpu_device *adev, int level,
+ u64 *dst, u64 *flags);
+- uint32_t (*get_invalidate_req)(unsigned int vm_id);
++ uint32_t (*get_invalidate_req)(unsigned int vmid);
+ };
+
+ /* provided by the ih block */
+@@ -1148,7 +1148,7 @@ struct amdgpu_job {
+ void *owner;
+ uint64_t fence_ctx; /* the fence_context this job uses */
+ bool vm_needs_flush;
+- unsigned vm_id;
++ unsigned vmid;
+ uint64_t vm_pd_addr;
+ uint32_t gds_base, gds_size;
+ uint32_t gws_base, gws_size;
+@@ -1919,7 +1919,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
+ #define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r))
+ #define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r))
+ #define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r))
+-#define amdgpu_ring_emit_ib(r, ib, vm_id, c) (r)->funcs->emit_ib((r), (ib), (vm_id), (c))
++#define amdgpu_ring_emit_ib(r, ib, vmid, c) (r)->funcs->emit_ib((r), (ib), (vmid), (c))
+ #define amdgpu_ring_emit_pipeline_sync(r) (r)->funcs->emit_pipeline_sync((r))
+ #define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr))
+ #define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags))
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+index e8910de..f88e8d9 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+@@ -280,7 +280,7 @@ int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine,
+ ib->ptr = ib_cmd;
+ ib->length_dw = ib_len;
+ /* This works for NO_HWS. TODO: need to handle without knowing VMID */
+- job->vm_id = vmid;
++ job->vmid = vmid;
+
+ ret = amdgpu_ib_schedule(ring, 1, ib, job, &f);
+ if (ret) {
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+index 6506451..e0b9200 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+@@ -150,7 +150,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
+ return -EINVAL;
+ }
+
+- if (vm && !job->vm_id) {
++ if (vm && !job->vmid) {
+ dev_err(adev->dev, "VM IB without ID\n");
+ return -EINVAL;
+ }
+@@ -219,7 +219,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
+ !amdgpu_sriov_vf(adev)) /* for SRIOV preemption, Preamble CE ib must be inserted anyway */
+ continue;
+
+- amdgpu_ring_emit_ib(ring, ib, job ? job->vm_id : 0,
++ amdgpu_ring_emit_ib(ring, ib, job ? job->vmid : 0,
+ need_ctx_switch);
+ need_ctx_switch = false;
+ }
+@@ -237,8 +237,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
+ r = amdgpu_fence_emit(ring, f);
+ if (r) {
+ dev_err(adev->dev, "failed to emit fence (%d)\n", r);
+- if (job && job->vm_id)
+- amdgpu_vmid_reset(adev, ring->funcs->vmhub, job->vm_id);
++ if (job && job->vmid)
++ amdgpu_vmid_reset(adev, ring->funcs->vmhub, job->vmid);
+ amdgpu_ring_undo(ring);
+ return r;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+index 7900bb7..9f938cb 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+@@ -154,7 +154,7 @@ static int amdgpu_vmid_grab_reserved_locked(struct amdgpu_vm *vm,
+ dma_fence_put(id->last_flush);
+ id->last_flush = NULL;
+ }
+- job->vm_id = id - id_mgr->ids;
++ job->vmid = id - id_mgr->ids;
+ trace_amdgpu_vm_grab_id(vm, ring, job);
+ out:
+ return r;
+@@ -305,7 +305,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
+ no_flush_needed:
+ list_move_tail(&id->list, &id_mgr->ids_lru);
+
+- job->vm_id = id - id_mgr->ids;
++ job->vmid = id - id_mgr->ids;
+ trace_amdgpu_vm_grab_id(vm, ring, job);
+
+ error:
+@@ -364,7 +364,7 @@ void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
+ * amdgpu_vmid_reset - reset VMID to zero
+ *
+ * @adev: amdgpu device structure
+- * @vm_id: vmid number to use
++ * @vmid: vmid number to use
+ *
+ * Reset saved GDW, GWS and OA to force switch on next flush.
+ */
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
+index ada89358..29cf109 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
+@@ -105,8 +105,8 @@ struct amdgpu_iv_entry {
+ unsigned client_id;
+ unsigned src_id;
+ unsigned ring_id;
+- unsigned vm_id;
+- unsigned vm_id_src;
++ unsigned vmid;
++ unsigned vmid_src;
+ uint64_t timestamp;
+ unsigned timestamp_src;
+ unsigned pas_id;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+index 2e2341a..dfef4db 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+@@ -158,7 +158,7 @@ static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
+ }
+ }
+
+- while (fence == NULL && vm && !job->vm_id) {
++ while (fence == NULL && vm && !job->vmid) {
+ struct amdgpu_ring *ring = job->ring;
+
+ r = amdgpu_vmid_grab(vm, ring, &job->sync,
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+index 154237cb..8651966 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+@@ -122,11 +122,11 @@ struct amdgpu_ring_funcs {
+ /* command emit functions */
+ void (*emit_ib)(struct amdgpu_ring *ring,
+ struct amdgpu_ib *ib,
+- unsigned vm_id, bool ctx_switch);
++ unsigned vmid, bool ctx_switch);
+ void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr,
+ uint64_t seq, unsigned flags);
+ void (*emit_pipeline_sync)(struct amdgpu_ring *ring);
+- void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vm_id,
++ void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vmid,
+ uint64_t pd_addr);
+ void (*emit_hdp_flush)(struct amdgpu_ring *ring);
+ void (*emit_hdp_invalidate)(struct amdgpu_ring *ring);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+index f337c316..1013ce9 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+@@ -60,8 +60,8 @@ TRACE_EVENT(amdgpu_iv,
+ __field(unsigned, client_id)
+ __field(unsigned, src_id)
+ __field(unsigned, ring_id)
+- __field(unsigned, vm_id)
+- __field(unsigned, vm_id_src)
++ __field(unsigned, vmid)
++ __field(unsigned, vmid_src)
+ __field(uint64_t, timestamp)
+ __field(unsigned, timestamp_src)
+ __field(unsigned, pas_id)
+@@ -71,8 +71,8 @@ TRACE_EVENT(amdgpu_iv,
+ __entry->client_id = iv->client_id;
+ __entry->src_id = iv->src_id;
+ __entry->ring_id = iv->ring_id;
+- __entry->vm_id = iv->vm_id;
+- __entry->vm_id_src = iv->vm_id_src;
++ __entry->vmid = iv->vmid;
++ __entry->vmid_src = iv->vmid_src;
+ __entry->timestamp = iv->timestamp;
+ __entry->timestamp_src = iv->timestamp_src;
+ __entry->pas_id = iv->pas_id;
+@@ -81,9 +81,9 @@ TRACE_EVENT(amdgpu_iv,
+ __entry->src_data[2] = iv->src_data[2];
+ __entry->src_data[3] = iv->src_data[3];
+ ),
+- TP_printk("client_id:%u src_id:%u ring:%u vm_id:%u timestamp: %llu pas_id:%u src_data: %08x %08x %08x %08x\n",
++ TP_printk("client_id:%u src_id:%u ring:%u vmid:%u timestamp: %llu pas_id:%u src_data: %08x %08x %08x %08x\n",
+ __entry->client_id, __entry->src_id,
+- __entry->ring_id, __entry->vm_id,
++ __entry->ring_id, __entry->vmid,
+ __entry->timestamp, __entry->pas_id,
+ __entry->src_data[0], __entry->src_data[1],
+ __entry->src_data[2], __entry->src_data[3])
+@@ -197,7 +197,7 @@ TRACE_EVENT(amdgpu_vm_grab_id,
+ TP_STRUCT__entry(
+ __field(struct amdgpu_vm *, vm)
+ __field(u32, ring)
+- __field(u32, vm_id)
++ __field(u32, vmid)
+ __field(u32, vm_hub)
+ __field(u64, pd_addr)
+ __field(u32, needs_flush)
+@@ -206,13 +206,13 @@ TRACE_EVENT(amdgpu_vm_grab_id,
+ TP_fast_assign(
+ __entry->vm = vm;
+ __entry->ring = ring->idx;
+- __entry->vm_id = job->vm_id;
++ __entry->vmid = job->vmid;
+ __entry->vm_hub = ring->funcs->vmhub,
+ __entry->pd_addr = job->vm_pd_addr;
+ __entry->needs_flush = job->vm_needs_flush;
+ ),
+ TP_printk("vm=%p, ring=%u, id=%u, hub=%u, pd_addr=%010Lx needs_flush=%u",
+- __entry->vm, __entry->ring, __entry->vm_id,
++ __entry->vm, __entry->ring, __entry->vmid,
+ __entry->vm_hub, __entry->pd_addr, __entry->needs_flush)
+ );
+
+@@ -335,24 +335,24 @@ TRACE_EVENT(amdgpu_vm_copy_ptes,
+ );
+
+ TRACE_EVENT(amdgpu_vm_flush,
+- TP_PROTO(struct amdgpu_ring *ring, unsigned vm_id,
++ TP_PROTO(struct amdgpu_ring *ring, unsigned vmid,
+ uint64_t pd_addr),
+- TP_ARGS(ring, vm_id, pd_addr),
++ TP_ARGS(ring, vmid, pd_addr),
+ TP_STRUCT__entry(
+ __field(u32, ring)
+- __field(u32, vm_id)
++ __field(u32, vmid)
+ __field(u32, vm_hub)
+ __field(u64, pd_addr)
+ ),
+
+ TP_fast_assign(
+ __entry->ring = ring->idx;
+- __entry->vm_id = vm_id;
++ __entry->vmid = vmid;
+ __entry->vm_hub = ring->funcs->vmhub;
+ __entry->pd_addr = pd_addr;
+ ),
+ TP_printk("ring=%u, id=%u, hub=%u, pd_addr=%010Lx",
+- __entry->ring, __entry->vm_id,
++ __entry->ring, __entry->vmid,
+ __entry->vm_hub,__entry->pd_addr)
+ );
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+index ffa66a2..ed74df5 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+@@ -990,7 +990,7 @@ int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx)
+ *
+ */
+ void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib,
+- unsigned vm_id, bool ctx_switch)
++ unsigned vmid, bool ctx_switch)
+ {
+ amdgpu_ring_write(ring, VCE_CMD_IB);
+ amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
+index 162cae9..0fd378a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
+@@ -63,7 +63,7 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp);
+ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx);
+ int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx);
+ void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib,
+- unsigned vm_id, bool ctx_switch);
++ unsigned vmid, bool ctx_switch);
+ void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
+ unsigned flags);
+ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index a2b7100..00146a1 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -451,9 +451,9 @@ bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
+ bool gds_switch_needed;
+ bool vm_flush_needed = job->vm_needs_flush || ring->has_compute_vm_bug;
+
+- if (job->vm_id == 0)
++ if (job->vmid == 0)
+ return false;
+- id = &id_mgr->ids[job->vm_id];
++ id = &id_mgr->ids[job->vmid];
+ gds_switch_needed = ring->funcs->emit_gds_switch && (
+ id->gds_base != job->gds_base ||
+ id->gds_size != job->gds_size ||
+@@ -477,7 +477,7 @@ static bool amdgpu_vm_is_large_bar(struct amdgpu_device *adev)
+ * amdgpu_vm_flush - hardware flush the vm
+ *
+ * @ring: ring to use for flush
+- * @vm_id: vmid number to use
++ * @vmid: vmid number to use
+ * @pd_addr: address of the page directory
+ *
+ * Emit a VM flush when it is necessary.
+@@ -487,7 +487,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_
+ struct amdgpu_device *adev = ring->adev;
+ unsigned vmhub = ring->funcs->vmhub;
+ struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
+- struct amdgpu_vmid *id = &id_mgr->ids[job->vm_id];
++ struct amdgpu_vmid *id = &id_mgr->ids[job->vmid];
+ bool gds_switch_needed = ring->funcs->emit_gds_switch && (
+ id->gds_base != job->gds_base ||
+ id->gds_size != job->gds_size ||
+@@ -516,8 +516,8 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_
+ if (ring->funcs->emit_vm_flush && vm_flush_needed) {
+ struct dma_fence *fence;
+
+- trace_amdgpu_vm_flush(ring, job->vm_id, job->vm_pd_addr);
+- amdgpu_ring_emit_vm_flush(ring, job->vm_id, job->vm_pd_addr);
++ trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr);
++ amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr);
+
+ r = amdgpu_fence_emit(ring, &fence);
+ if (r)
+@@ -537,7 +537,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_
+ id->gws_size = job->gws_size;
+ id->oa_base = job->oa_base;
+ id->oa_size = job->oa_size;
+- amdgpu_ring_emit_gds_switch(ring, job->vm_id, job->gds_base,
++ amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base,
+ job->gds_size, job->gws_base,
+ job->gws_size, job->oa_base,
+ job->oa_size);
+diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
+index a870b35..d5a05c1 100644
+--- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c
++++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
+@@ -280,7 +280,7 @@ static void cik_ih_decode_iv(struct amdgpu_device *adev,
+ entry->src_id = dw[0] & 0xff;
+ entry->src_data[0] = dw[1] & 0xfffffff;
+ entry->ring_id = dw[2] & 0xff;
+- entry->vm_id = (dw[2] >> 8) & 0xff;
++ entry->vmid = (dw[2] >> 8) & 0xff;
+ entry->pas_id = (dw[2] >> 16) & 0xffff;
+
+ /* wptr/rptr are in bytes! */
+diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+index 375ef27..0066da3 100644
+--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
++++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+@@ -221,9 +221,9 @@ static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
+ */
+ static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring,
+ struct amdgpu_ib *ib,
+- unsigned vm_id, bool ctx_switch)
++ unsigned vmid, bool ctx_switch)
+ {
+- u32 extra_bits = vm_id & 0xf;
++ u32 extra_bits = vmid & 0xf;
+
+ /* IB packet must end on a 8 DW boundary */
+ cik_sdma_ring_insert_nop(ring, (12 - (lower_32_bits(ring->wptr) & 7)) % 8);
+@@ -880,23 +880,23 @@ static void cik_sdma_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
+ * using sDMA (CIK).
+ */
+ static void cik_sdma_ring_emit_vm_flush(struct amdgpu_ring *ring,
+- unsigned vm_id, uint64_t pd_addr)
++ unsigned vmid, uint64_t pd_addr)
+ {
+ u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(0) |
+ SDMA_POLL_REG_MEM_EXTRA_FUNC(0)); /* always */
+
+ amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
+- if (vm_id < 8) {
+- amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id));
++ if (vmid < 8) {
++ amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid));
+ } else {
+- amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8));
++ amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8));
+ }
+ amdgpu_ring_write(ring, pd_addr >> 12);
+
+ /* flush TLB */
+ amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
+ amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
+- amdgpu_ring_write(ring, 1 << vm_id);
++ amdgpu_ring_write(ring, 1 << vmid);
+
+ amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
+ amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
+diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
+index fa61d64..f576e9c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c
++++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
+@@ -259,7 +259,7 @@ static void cz_ih_decode_iv(struct amdgpu_device *adev,
+ entry->src_id = dw[0] & 0xff;
+ entry->src_data[0] = dw[1] & 0xfffffff;
+ entry->ring_id = dw[2] & 0xff;
+- entry->vm_id = (dw[2] >> 8) & 0xff;
++ entry->vmid = (dw[2] >> 8) & 0xff;
+ entry->pas_id = (dw[2] >> 16) & 0xffff;
+
+ /* wptr/rptr are in bytes! */
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+index edef17d..9870d83 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+@@ -1874,7 +1874,7 @@ static void gfx_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
+
+ static void gfx_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
+ struct amdgpu_ib *ib,
+- unsigned vm_id, bool ctx_switch)
++ unsigned vmid, bool ctx_switch)
+ {
+ u32 header, control = 0;
+
+@@ -1889,7 +1889,7 @@ static void gfx_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
+ else
+ header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
+
+- control |= ib->length_dw | (vm_id << 24);
++ control |= ib->length_dw | (vmid << 24);
+
+ amdgpu_ring_write(ring, header);
+ amdgpu_ring_write(ring,
+@@ -2354,7 +2354,7 @@ static void gfx_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
+ }
+
+ static void gfx_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
+- unsigned vm_id, uint64_t pd_addr)
++ unsigned vmid, uint64_t pd_addr)
+ {
+ int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
+
+@@ -2362,10 +2362,10 @@ static void gfx_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
+ amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+ amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
+ WRITE_DATA_DST_SEL(0)));
+- if (vm_id < 8) {
+- amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id ));
++ if (vmid < 8) {
++ amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid ));
+ } else {
+- amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (vm_id - 8)));
++ amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (vmid - 8)));
+ }
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, pd_addr >> 12);
+@@ -2376,7 +2376,7 @@ static void gfx_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
+ WRITE_DATA_DST_SEL(0)));
+ amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
+ amdgpu_ring_write(ring, 0);
+- amdgpu_ring_write(ring, 1 << vm_id);
++ amdgpu_ring_write(ring, 1 << vmid);
+
+ /* wait for the invalidate to complete */
+ amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+index 73de583..a8a06b3 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+@@ -2254,7 +2254,7 @@ static void gfx_v7_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
+ */
+ static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
+ struct amdgpu_ib *ib,
+- unsigned vm_id, bool ctx_switch)
++ unsigned vmid, bool ctx_switch)
+ {
+ u32 header, control = 0;
+
+@@ -2269,7 +2269,7 @@ static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
+ else
+ header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
+
+- control |= ib->length_dw | (vm_id << 24);
++ control |= ib->length_dw | (vmid << 24);
+
+ amdgpu_ring_write(ring, header);
+ amdgpu_ring_write(ring,
+@@ -2283,9 +2283,9 @@ static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
+
+ static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
+ struct amdgpu_ib *ib,
+- unsigned vm_id, bool ctx_switch)
++ unsigned vmid, bool ctx_switch)
+ {
+- u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vm_id << 24);
++ u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
+
+ amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
+ amdgpu_ring_write(ring,
+@@ -3239,19 +3239,19 @@ static void gfx_v7_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
+ * using the CP (CIK).
+ */
+ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
+- unsigned vm_id, uint64_t pd_addr)
++ unsigned vmid, uint64_t pd_addr)
+ {
+ int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
+
+ amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+ amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
+ WRITE_DATA_DST_SEL(0)));
+- if (vm_id < 8) {
++ if (vmid < 8) {
+ amdgpu_ring_write(ring,
+- (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id));
++ (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid));
+ } else {
+ amdgpu_ring_write(ring,
+- (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8));
++ (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8));
+ }
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, pd_addr >> 12);
+@@ -3262,7 +3262,7 @@ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
+ WRITE_DATA_DST_SEL(0)));
+ amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
+ amdgpu_ring_write(ring, 0);
+- amdgpu_ring_write(ring, 1 << vm_id);
++ amdgpu_ring_write(ring, 1 << vmid);
+
+ /* wait for the invalidate to complete */
+ amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+index c0877e8..3a2011f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+@@ -6248,7 +6248,7 @@ static void gfx_v8_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
+
+ static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
+ struct amdgpu_ib *ib,
+- unsigned vm_id, bool ctx_switch)
++ unsigned vmid, bool ctx_switch)
+ {
+ u32 header, control = 0;
+
+@@ -6257,7 +6257,7 @@ static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
+ else
+ header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
+
+- control |= ib->length_dw | (vm_id << 24);
++ control |= ib->length_dw | (vmid << 24);
+
+ if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
+ control |= INDIRECT_BUFFER_PRE_ENB(1);
+@@ -6278,9 +6278,9 @@ static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
+
+ static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
+ struct amdgpu_ib *ib,
+- unsigned vm_id, bool ctx_switch)
++ unsigned vmid, bool ctx_switch)
+ {
+- u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vm_id << 24);
++ u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
+
+ amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
+ amdgpu_ring_write(ring,
+@@ -6331,7 +6331,7 @@ static void gfx_v8_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
+ }
+
+ static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
+- unsigned vm_id, uint64_t pd_addr)
++ unsigned vmid, uint64_t pd_addr)
+ {
+ int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
+
+@@ -6339,12 +6339,12 @@ static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
+ amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
+ WRITE_DATA_DST_SEL(0)) |
+ WR_CONFIRM);
+- if (vm_id < 8) {
++ if (vmid < 8) {
+ amdgpu_ring_write(ring,
+- (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id));
++ (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid));
+ } else {
+ amdgpu_ring_write(ring,
+- (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8));
++ (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8));
+ }
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, pd_addr >> 12);
+@@ -6356,7 +6356,7 @@ static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
+ WRITE_DATA_DST_SEL(0)));
+ amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
+ amdgpu_ring_write(ring, 0);
+- amdgpu_ring_write(ring, 1 << vm_id);
++ amdgpu_ring_write(ring, 1 << vmid);
+
+ /* wait for the invalidate to complete */
+ amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index b9f9a36..840c70b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -3600,7 +3600,7 @@ static void gfx_v9_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
+
+ static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
+ struct amdgpu_ib *ib,
+- unsigned vm_id, bool ctx_switch)
++ unsigned vmid, bool ctx_switch)
+ {
+ u32 header, control = 0;
+
+@@ -3609,7 +3609,7 @@ static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
+ else
+ header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
+
+- control |= ib->length_dw | (vm_id << 24);
++ control |= ib->length_dw | (vmid << 24);
+
+ if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
+ control |= INDIRECT_BUFFER_PRE_ENB(1);
+@@ -3631,9 +3631,9 @@ BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
+
+ static void gfx_v9_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
+ struct amdgpu_ib *ib,
+- unsigned vm_id, bool ctx_switch)
++ unsigned vmid, bool ctx_switch)
+ {
+- u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vm_id << 24);
++ u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
+
+ amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
+ BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
+@@ -3689,11 +3689,11 @@ static void gfx_v9_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
+ }
+
+ static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
+- unsigned vm_id, uint64_t pd_addr)
++ unsigned vmid, uint64_t pd_addr)
+ {
+ struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
+ int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
+- uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
++ uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vmid);
+ uint64_t flags = AMDGPU_PTE_VALID;
+ unsigned eng = ring->vm_inv_eng;
+
+@@ -3701,11 +3701,11 @@ static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
+ pd_addr |= flags;
+
+ gfx_v9_0_write_data_to_reg(ring, usepfp, true,
+- hub->ctx0_ptb_addr_lo32 + (2 * vm_id),
++ hub->ctx0_ptb_addr_lo32 + (2 * vmid),
+ lower_32_bits(pd_addr));
+
+ gfx_v9_0_write_data_to_reg(ring, usepfp, true,
+- hub->ctx0_ptb_addr_hi32 + (2 * vm_id),
++ hub->ctx0_ptb_addr_hi32 + (2 * vmid),
+ upper_32_bits(pd_addr));
+
+ gfx_v9_0_write_data_to_reg(ring, usepfp, true,
+@@ -3713,7 +3713,7 @@ static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
+
+ /* wait for the invalidate to complete */
+ gfx_v9_0_wait_reg_mem(ring, 0, 0, 0, hub->vm_inv_eng0_ack +
+- eng, 0, 1 << vm_id, 1 << vm_id, 0x20);
++ eng, 0, 1 << vmid, 1 << vmid, 0x20);
+
+ /* compute doesn't have PFP */
+ if (usepfp) {
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+index efc7ca7..040cde8 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+@@ -248,7 +248,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+ {
+- struct amdgpu_vmhub *hub = &adev->vmhub[entry->vm_id_src];
++ struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src];
+ uint32_t status = 0;
+ u64 addr;
+
+@@ -262,9 +262,9 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
+
+ if (printk_ratelimit()) {
+ dev_err(adev->dev,
+- "[%s] VMC page fault (src_id:%u ring:%u vm_id:%u pas_id:%u)\n",
+- entry->vm_id_src ? "mmhub" : "gfxhub",
+- entry->src_id, entry->ring_id, entry->vm_id,
++ "[%s] VMC page fault (src_id:%u ring:%u vmid:%u pas_id:%u)\n",
++ entry->vmid_src ? "mmhub" : "gfxhub",
++ entry->src_id, entry->ring_id, entry->vmid,
+ entry->pas_id);
+ dev_err(adev->dev, " at page 0x%016llx from %d\n",
+ addr, entry->client_id);
+@@ -288,13 +288,13 @@ static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
+ adev->mc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
+ }
+
+-static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vm_id)
++static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid)
+ {
+ u32 req = 0;
+
+- /* invalidate using legacy mode on vm_id*/
++ /* invalidate using legacy mode on vmid*/
+ req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
+- PER_VMID_INVALIDATE_REQ, 1 << vm_id);
++ PER_VMID_INVALIDATE_REQ, 1 << vmid);
+ req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, 0);
+ req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
+ req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
+diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
+index bd592cb..c4e4be3 100644
+--- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
++++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
+@@ -259,7 +259,7 @@ static void iceland_ih_decode_iv(struct amdgpu_device *adev,
+ entry->src_id = dw[0] & 0xff;
+ entry->src_data[0] = dw[1] & 0xfffffff;
+ entry->ring_id = dw[2] & 0xff;
+- entry->vm_id = (dw[2] >> 8) & 0xff;
++ entry->vmid = (dw[2] >> 8) & 0xff;
+ entry->pas_id = (dw[2] >> 16) & 0xffff;
+
+ /* wptr/rptr are in bytes! */
+diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+index b069b90..bd844ed 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+@@ -246,15 +246,13 @@ static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
+ */
+ static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring,
+ struct amdgpu_ib *ib,
+- unsigned vm_id, bool ctx_switch)
++ unsigned vmid, bool ctx_switch)
+ {
+- u32 vmid = vm_id & 0xf;
+-
+ /* IB packet must end on a 8 DW boundary */
+ sdma_v2_4_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
+
+ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
+- SDMA_PKT_INDIRECT_HEADER_VMID(vmid));
++ SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
+ /* base must be 32 byte aligned */
+ amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0);
+ amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
+@@ -861,14 +859,14 @@ static void sdma_v2_4_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
+ * using sDMA (VI).
+ */
+ static void sdma_v2_4_ring_emit_vm_flush(struct amdgpu_ring *ring,
+- unsigned vm_id, uint64_t pd_addr)
++ unsigned vmid, uint64_t pd_addr)
+ {
+ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
+ SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
+- if (vm_id < 8) {
+- amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id));
++ if (vmid < 8) {
++ amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid));
+ } else {
+- amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8));
++ amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8));
+ }
+ amdgpu_ring_write(ring, pd_addr >> 12);
+
+@@ -876,7 +874,7 @@ static void sdma_v2_4_ring_emit_vm_flush(struct amdgpu_ring *ring,
+ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
+ SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
+ amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
+- amdgpu_ring_write(ring, 1 << vm_id);
++ amdgpu_ring_write(ring, 1 << vmid);
+
+ /* wait for flush */
+ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
+diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+index c3e0e61..fa63c56 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+@@ -417,15 +417,13 @@ static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
+ */
+ static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
+ struct amdgpu_ib *ib,
+- unsigned vm_id, bool ctx_switch)
++ unsigned vmid, bool ctx_switch)
+ {
+- u32 vmid = vm_id & 0xf;
+-
+ /* IB packet must end on a 8 DW boundary */
+ sdma_v3_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
+
+ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
+- SDMA_PKT_INDIRECT_HEADER_VMID(vmid));
++ SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
+ /* base must be 32 byte aligned */
+ amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0);
+ amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
+@@ -1127,14 +1125,14 @@ static void sdma_v3_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
+ * using sDMA (VI).
+ */
+ static void sdma_v3_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
+- unsigned vm_id, uint64_t pd_addr)
++ unsigned vmid, uint64_t pd_addr)
+ {
+ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
+ SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
+- if (vm_id < 8) {
+- amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id));
++ if (vmid < 8) {
++ amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid));
+ } else {
+- amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8));
++ amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8));
+ }
+ amdgpu_ring_write(ring, pd_addr >> 12);
+
+@@ -1142,7 +1140,7 @@ static void sdma_v3_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
+ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
+ SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
+ amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
+- amdgpu_ring_write(ring, 1 << vm_id);
++ amdgpu_ring_write(ring, 1 << vmid);
+
+ /* wait for flush */
+ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
+diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+index 43c0f7b..3dc9e59 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+@@ -330,15 +330,13 @@ static void sdma_v4_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
+ */
+ static void sdma_v4_0_ring_emit_ib(struct amdgpu_ring *ring,
+ struct amdgpu_ib *ib,
+- unsigned vm_id, bool ctx_switch)
++ unsigned vmid, bool ctx_switch)
+ {
+- u32 vmid = vm_id & 0xf;
+-
+ /* IB packet must end on a 8 DW boundary */
+ sdma_v4_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
+
+ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
+- SDMA_PKT_INDIRECT_HEADER_VMID(vmid));
++ SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
+ /* base must be 32 byte aligned */
+ amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0);
+ amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
+@@ -1135,10 +1133,10 @@ static void sdma_v4_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
+ * using sDMA (VEGA10).
+ */
+ static void sdma_v4_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
+- unsigned vm_id, uint64_t pd_addr)
++ unsigned vmid, uint64_t pd_addr)
+ {
+ struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
+- uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
++ uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vmid);
+ uint64_t flags = AMDGPU_PTE_VALID;
+ unsigned eng = ring->vm_inv_eng;
+
+@@ -1147,12 +1145,12 @@ static void sdma_v4_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
+
+ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
+ SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
+- amdgpu_ring_write(ring, hub->ctx0_ptb_addr_lo32 + vm_id * 2);
++ amdgpu_ring_write(ring, hub->ctx0_ptb_addr_lo32 + vmid * 2);
+ amdgpu_ring_write(ring, lower_32_bits(pd_addr));
+
+ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
+ SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
+- amdgpu_ring_write(ring, hub->ctx0_ptb_addr_hi32 + vm_id * 2);
++ amdgpu_ring_write(ring, hub->ctx0_ptb_addr_hi32 + vmid * 2);
+ amdgpu_ring_write(ring, upper_32_bits(pd_addr));
+
+ /* flush TLB */
+@@ -1167,8 +1165,8 @@ static void sdma_v4_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
+ SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* equal */
+ amdgpu_ring_write(ring, (hub->vm_inv_eng0_ack + eng) << 2);
+ amdgpu_ring_write(ring, 0);
+- amdgpu_ring_write(ring, 1 << vm_id); /* reference */
+- amdgpu_ring_write(ring, 1 << vm_id); /* mask */
++ amdgpu_ring_write(ring, 1 << vmid); /* reference */
++ amdgpu_ring_write(ring, 1 << vmid); /* mask */
+ amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
+ SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10));
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c
+index 9adca5d..9a29c13 100644
+--- a/drivers/gpu/drm/amd/amdgpu/si_dma.c
++++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c
+@@ -61,14 +61,14 @@ static void si_dma_ring_set_wptr(struct amdgpu_ring *ring)
+
+ static void si_dma_ring_emit_ib(struct amdgpu_ring *ring,
+ struct amdgpu_ib *ib,
+- unsigned vm_id, bool ctx_switch)
++ unsigned vmid, bool ctx_switch)
+ {
+ /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
+ * Pad as necessary with NOPs.
+ */
+ while ((lower_32_bits(ring->wptr) & 7) != 5)
+ amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
+- amdgpu_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, vm_id, 0));
++ amdgpu_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, vmid, 0));
+ amdgpu_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
+ amdgpu_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
+
+@@ -473,25 +473,25 @@ static void si_dma_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
+ * using sDMA (VI).
+ */
+ static void si_dma_ring_emit_vm_flush(struct amdgpu_ring *ring,
+- unsigned vm_id, uint64_t pd_addr)
++ unsigned vmid, uint64_t pd_addr)
+ {
+ amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
+- if (vm_id < 8)
+- amdgpu_ring_write(ring, (0xf << 16) | (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id));
++ if (vmid < 8)
++ amdgpu_ring_write(ring, (0xf << 16) | (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid));
+ else
+- amdgpu_ring_write(ring, (0xf << 16) | (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (vm_id - 8)));
++ amdgpu_ring_write(ring, (0xf << 16) | (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (vmid - 8)));
+ amdgpu_ring_write(ring, pd_addr >> 12);
+
+ /* bits 0-7 are the VM contexts0-7 */
+ amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
+ amdgpu_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST));
+- amdgpu_ring_write(ring, 1 << vm_id);
++ amdgpu_ring_write(ring, 1 << vmid);
+
+ /* wait for invalidate to complete */
+ amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0));
+ amdgpu_ring_write(ring, VM_INVALIDATE_REQUEST);
+ amdgpu_ring_write(ring, 0xff << 16); /* retry */
+- amdgpu_ring_write(ring, 1 << vm_id); /* mask */
++ amdgpu_ring_write(ring, 1 << vmid); /* mask */
+ amdgpu_ring_write(ring, 0); /* value */
+ amdgpu_ring_write(ring, (0 << 28) | 0x20); /* func(always) | poll interval */
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c
+index d2c6b80..60dad63 100644
+--- a/drivers/gpu/drm/amd/amdgpu/si_ih.c
++++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c
+@@ -146,7 +146,7 @@ static void si_ih_decode_iv(struct amdgpu_device *adev,
+ entry->src_id = dw[0] & 0xff;
+ entry->src_data[0] = dw[1] & 0xfffffff;
+ entry->ring_id = dw[2] & 0xff;
+- entry->vm_id = (dw[2] >> 8) & 0xff;
++ entry->vmid = (dw[2] >> 8) & 0xff;
+
+ adev->irq.ih.rptr += 16;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+index aa4e320..5995ffc 100644
+--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
++++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+@@ -270,7 +270,7 @@ static void tonga_ih_decode_iv(struct amdgpu_device *adev,
+ entry->src_id = dw[0] & 0xff;
+ entry->src_data[0] = dw[1] & 0xfffffff;
+ entry->ring_id = dw[2] & 0xff;
+- entry->vm_id = (dw[2] >> 8) & 0xff;
++ entry->vmid = (dw[2] >> 8) & 0xff;
+ entry->pas_id = (dw[2] >> 16) & 0xffff;
+
+ /* wptr/rptr are in bytes! */
+diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+index b13ae34..8ab10c2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
++++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+@@ -541,7 +541,7 @@ static int uvd_v4_2_ring_test_ring(struct amdgpu_ring *ring)
+ */
+ static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring,
+ struct amdgpu_ib *ib,
+- unsigned vm_id, bool ctx_switch)
++ unsigned vmid, bool ctx_switch)
+ {
+ amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_BASE, 0));
+ amdgpu_ring_write(ring, ib->gpu_addr);
+diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+index a4b0f1d..c1fe30c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+@@ -556,7 +556,7 @@ static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring)
+ */
+ static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
+ struct amdgpu_ib *ib,
+- unsigned vm_id, bool ctx_switch)
++ unsigned vmid, bool ctx_switch)
+ {
+ amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
+ amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
+diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+index 2d4159f..b31c333 100644
+--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+@@ -1028,10 +1028,10 @@ static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring)
+ */
+ static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
+ struct amdgpu_ib *ib,
+- unsigned vm_id, bool ctx_switch)
++ unsigned vmid, bool ctx_switch)
+ {
+ amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_VMID, 0));
+- amdgpu_ring_write(ring, vm_id);
++ amdgpu_ring_write(ring, vmid);
+
+ amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
+ amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
+@@ -1050,24 +1050,24 @@ static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
+ * Write enc ring commands to execute the indirect buffer
+ */
+ static void uvd_v6_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
+- struct amdgpu_ib *ib, unsigned int vm_id, bool ctx_switch)
++ struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
+ {
+ amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
+- amdgpu_ring_write(ring, vm_id);
++ amdgpu_ring_write(ring, vmid);
+ amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
+ amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
+ amdgpu_ring_write(ring, ib->length_dw);
+ }
+
+ static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
+- unsigned vm_id, uint64_t pd_addr)
++ unsigned vmid, uint64_t pd_addr)
+ {
+ uint32_t reg;
+
+- if (vm_id < 8)
+- reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id;
++ if (vmid < 8)
++ reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid;
+ else
+- reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8;
++ reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8;
+
+ amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
+ amdgpu_ring_write(ring, reg << 2);
+@@ -1079,7 +1079,7 @@ static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
+ amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
+ amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
+ amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
+- amdgpu_ring_write(ring, 1 << vm_id);
++ amdgpu_ring_write(ring, 1 << vmid);
+ amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
+ amdgpu_ring_write(ring, 0x8);
+
+@@ -1088,7 +1088,7 @@ static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
+ amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0));
+- amdgpu_ring_write(ring, 1 << vm_id); /* mask */
++ amdgpu_ring_write(ring, 1 << vmid); /* mask */
+ amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
+ amdgpu_ring_write(ring, 0xC);
+ }
+@@ -1127,14 +1127,14 @@ static void uvd_v6_0_enc_ring_insert_end(struct amdgpu_ring *ring)
+ }
+
+ static void uvd_v6_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
+- unsigned int vm_id, uint64_t pd_addr)
++ unsigned int vmid, uint64_t pd_addr)
+ {
+ amdgpu_ring_write(ring, HEVC_ENC_CMD_UPDATE_PTB);
+- amdgpu_ring_write(ring, vm_id);
++ amdgpu_ring_write(ring, vmid);
+ amdgpu_ring_write(ring, pd_addr >> 12);
+
+ amdgpu_ring_write(ring, HEVC_ENC_CMD_FLUSH_TLB);
+- amdgpu_ring_write(ring, vm_id);
++ amdgpu_ring_write(ring, vmid);
+ }
+
+ static bool uvd_v6_0_is_idle(void *handle)
+diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+index 4ec4447..6b95f4f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+@@ -1218,13 +1218,13 @@ static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring)
+ */
+ static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
+ struct amdgpu_ib *ib,
+- unsigned vm_id, bool ctx_switch)
++ unsigned vmid, bool ctx_switch)
+ {
+ struct amdgpu_device *adev = ring->adev;
+
+ amdgpu_ring_write(ring,
+ PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_VMID), 0));
+- amdgpu_ring_write(ring, vm_id);
++ amdgpu_ring_write(ring, vmid);
+
+ amdgpu_ring_write(ring,
+ PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW), 0));
+@@ -1246,10 +1246,10 @@ static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
+ * Write enc ring commands to execute the indirect buffer
+ */
+ static void uvd_v7_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
+- struct amdgpu_ib *ib, unsigned int vm_id, bool ctx_switch)
++ struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
+ {
+ amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
+- amdgpu_ring_write(ring, vm_id);
++ amdgpu_ring_write(ring, vmid);
+ amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
+ amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
+ amdgpu_ring_write(ring, ib->length_dw);
+@@ -1291,10 +1291,10 @@ static void uvd_v7_0_vm_reg_wait(struct amdgpu_ring *ring,
+ }
+
+ static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
+- unsigned vm_id, uint64_t pd_addr)
++ unsigned vmid, uint64_t pd_addr)
+ {
+ struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
+- uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
++ uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vmid);
+ uint64_t flags = AMDGPU_PTE_VALID;
+ unsigned eng = ring->vm_inv_eng;
+ uint32_t data0, data1, mask;
+@@ -1302,15 +1302,15 @@ static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
+ amdgpu_gart_get_vm_pde(ring->adev, -1, &pd_addr, &flags);
+ pd_addr |= flags;
+
+- data0 = (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2;
++ data0 = (hub->ctx0_ptb_addr_hi32 + vmid * 2) << 2;
+ data1 = upper_32_bits(pd_addr);
+ uvd_v7_0_vm_reg_write(ring, data0, data1);
+
+- data0 = (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2;
++ data0 = (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2;
+ data1 = lower_32_bits(pd_addr);
+ uvd_v7_0_vm_reg_write(ring, data0, data1);
+
+- data0 = (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2;
++ data0 = (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2;
+ data1 = lower_32_bits(pd_addr);
+ mask = 0xffffffff;
+ uvd_v7_0_vm_reg_wait(ring, data0, data1, mask);
+@@ -1322,8 +1322,8 @@ static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
+
+ /* wait for flush */
+ data0 = (hub->vm_inv_eng0_ack + eng) << 2;
+- data1 = 1 << vm_id;
+- mask = 1 << vm_id;
++ data1 = 1 << vmid;
++ mask = 1 << vmid;
+ uvd_v7_0_vm_reg_wait(ring, data0, data1, mask);
+ }
+
+@@ -1343,10 +1343,10 @@ static void uvd_v7_0_enc_ring_insert_end(struct amdgpu_ring *ring)
+ }
+
+ static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
+- unsigned int vm_id, uint64_t pd_addr)
++ unsigned int vmid, uint64_t pd_addr)
+ {
+ struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
+- uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
++ uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vmid);
+ uint64_t flags = AMDGPU_PTE_VALID;
+ unsigned eng = ring->vm_inv_eng;
+
+@@ -1354,15 +1354,15 @@ static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
+ pd_addr |= flags;
+
+ amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
+- amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2);
++ amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_hi32 + vmid * 2) << 2);
+ amdgpu_ring_write(ring, upper_32_bits(pd_addr));
+
+ amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
+- amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2);
++ amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2);
+ amdgpu_ring_write(ring, lower_32_bits(pd_addr));
+
+ amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WAIT);
+- amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2);
++ amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2);
+ amdgpu_ring_write(ring, 0xffffffff);
+ amdgpu_ring_write(ring, lower_32_bits(pd_addr));
+
+@@ -1374,8 +1374,8 @@ static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
+ /* wait for flush */
+ amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WAIT);
+ amdgpu_ring_write(ring, (hub->vm_inv_eng0_ack + eng) << 2);
+- amdgpu_ring_write(ring, 1 << vm_id);
+- amdgpu_ring_write(ring, 1 << vm_id);
++ amdgpu_ring_write(ring, 1 << vmid);
++ amdgpu_ring_write(ring, 1 << vmid);
+ }
+
+ #if 0
+diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+index 242dfb1..bb5ec09 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+@@ -834,24 +834,24 @@ static void vce_v3_0_get_clockgating_state(void *handle, u32 *flags)
+ }
+
+ static void vce_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
+- struct amdgpu_ib *ib, unsigned int vm_id, bool ctx_switch)
++ struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
+ {
+ amdgpu_ring_write(ring, VCE_CMD_IB_VM);
+- amdgpu_ring_write(ring, vm_id);
++ amdgpu_ring_write(ring, vmid);
+ amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
+ amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
+ amdgpu_ring_write(ring, ib->length_dw);
+ }
+
+ static void vce_v3_0_emit_vm_flush(struct amdgpu_ring *ring,
+- unsigned int vm_id, uint64_t pd_addr)
++ unsigned int vmid, uint64_t pd_addr)
+ {
+ amdgpu_ring_write(ring, VCE_CMD_UPDATE_PTB);
+- amdgpu_ring_write(ring, vm_id);
++ amdgpu_ring_write(ring, vmid);
+ amdgpu_ring_write(ring, pd_addr >> 12);
+
+ amdgpu_ring_write(ring, VCE_CMD_FLUSH_TLB);
+- amdgpu_ring_write(ring, vm_id);
++ amdgpu_ring_write(ring, vmid);
+ amdgpu_ring_write(ring, VCE_CMD_END);
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+index 308949d..7cf2eef 100755
+--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+@@ -938,10 +938,10 @@ static int vce_v4_0_set_powergating_state(void *handle,
+ #endif
+
+ static void vce_v4_0_ring_emit_ib(struct amdgpu_ring *ring,
+- struct amdgpu_ib *ib, unsigned int vm_id, bool ctx_switch)
++ struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
+ {
+ amdgpu_ring_write(ring, VCE_CMD_IB_VM);
+- amdgpu_ring_write(ring, vm_id);
++ amdgpu_ring_write(ring, vmid);
+ amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
+ amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
+ amdgpu_ring_write(ring, ib->length_dw);
+@@ -965,10 +965,10 @@ static void vce_v4_0_ring_insert_end(struct amdgpu_ring *ring)
+ }
+
+ static void vce_v4_0_emit_vm_flush(struct amdgpu_ring *ring,
+- unsigned int vm_id, uint64_t pd_addr)
++ unsigned int vmid, uint64_t pd_addr)
+ {
+ struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
+- uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
++ uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vmid);
+ uint64_t flags = AMDGPU_PTE_VALID;
+ unsigned eng = ring->vm_inv_eng;
+
+@@ -976,15 +976,15 @@ static void vce_v4_0_emit_vm_flush(struct amdgpu_ring *ring,
+ pd_addr |= flags;
+
+ amdgpu_ring_write(ring, VCE_CMD_REG_WRITE);
+- amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2);
++ amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_hi32 + vmid * 2) << 2);
+ amdgpu_ring_write(ring, upper_32_bits(pd_addr));
+
+ amdgpu_ring_write(ring, VCE_CMD_REG_WRITE);
+- amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2);
++ amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2);
+ amdgpu_ring_write(ring, lower_32_bits(pd_addr));
+
+ amdgpu_ring_write(ring, VCE_CMD_REG_WAIT);
+- amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2);
++ amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2);
+ amdgpu_ring_write(ring, 0xffffffff);
+ amdgpu_ring_write(ring, lower_32_bits(pd_addr));
+
+@@ -996,8 +996,8 @@ static void vce_v4_0_emit_vm_flush(struct amdgpu_ring *ring,
+ /* wait for flush */
+ amdgpu_ring_write(ring, VCE_CMD_REG_WAIT);
+ amdgpu_ring_write(ring, (hub->vm_inv_eng0_ack + eng) << 2);
+- amdgpu_ring_write(ring, 1 << vm_id);
+- amdgpu_ring_write(ring, 1 << vm_id);
++ amdgpu_ring_write(ring, 1 << vmid);
++ amdgpu_ring_write(ring, 1 << vmid);
+ }
+
+ static int vce_v4_0_set_interrupt_state(struct amdgpu_device *adev,
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+index c76e51c..23dbb1e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+@@ -855,13 +855,13 @@ static void vcn_v1_0_dec_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
+ */
+ static void vcn_v1_0_dec_ring_emit_ib(struct amdgpu_ring *ring,
+ struct amdgpu_ib *ib,
+- unsigned vm_id, bool ctx_switch)
++ unsigned vmid, bool ctx_switch)
+ {
+ struct amdgpu_device *adev = ring->adev;
+
+ amdgpu_ring_write(ring,
+ PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_VMID), 0));
+- amdgpu_ring_write(ring, vm_id);
++ amdgpu_ring_write(ring, vmid);
+
+ amdgpu_ring_write(ring,
+ PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW), 0));
+@@ -910,10 +910,10 @@ static void vcn_v1_0_dec_vm_reg_wait(struct amdgpu_ring *ring,
+ }
+
+ static void vcn_v1_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
+- unsigned vm_id, uint64_t pd_addr)
++ unsigned vmid, uint64_t pd_addr)
+ {
+ struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
+- uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
++ uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vmid);
+ uint64_t flags = AMDGPU_PTE_VALID;
+ unsigned eng = ring->vm_inv_eng;
+ uint32_t data0, data1, mask;
+@@ -921,15 +921,15 @@ static void vcn_v1_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
+ amdgpu_gart_get_vm_pde(ring->adev, -1, &pd_addr, &flags);
+ pd_addr |= flags;
+
+- data0 = (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2;
++ data0 = (hub->ctx0_ptb_addr_hi32 + vmid * 2) << 2;
+ data1 = upper_32_bits(pd_addr);
+ vcn_v1_0_dec_vm_reg_write(ring, data0, data1);
+
+- data0 = (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2;
++ data0 = (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2;
+ data1 = lower_32_bits(pd_addr);
+ vcn_v1_0_dec_vm_reg_write(ring, data0, data1);
+
+- data0 = (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2;
++ data0 = (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2;
+ data1 = lower_32_bits(pd_addr);
+ mask = 0xffffffff;
+ vcn_v1_0_dec_vm_reg_wait(ring, data0, data1, mask);
+@@ -941,8 +941,8 @@ static void vcn_v1_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
+
+ /* wait for flush */
+ data0 = (hub->vm_inv_eng0_ack + eng) << 2;
+- data1 = 1 << vm_id;
+- mask = 1 << vm_id;
++ data1 = 1 << vmid;
++ mask = 1 << vmid;
+ vcn_v1_0_dec_vm_reg_wait(ring, data0, data1, mask);
+ }
+
+@@ -1033,20 +1033,20 @@ static void vcn_v1_0_enc_ring_insert_end(struct amdgpu_ring *ring)
+ * Write enc ring commands to execute the indirect buffer
+ */
+ static void vcn_v1_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
+- struct amdgpu_ib *ib, unsigned int vm_id, bool ctx_switch)
++ struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
+ {
+ amdgpu_ring_write(ring, VCN_ENC_CMD_IB);
+- amdgpu_ring_write(ring, vm_id);
++ amdgpu_ring_write(ring, vmid);
+ amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
+ amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
+ amdgpu_ring_write(ring, ib->length_dw);
+ }
+
+ static void vcn_v1_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
+- unsigned int vm_id, uint64_t pd_addr)
++ unsigned int vmid, uint64_t pd_addr)
+ {
+ struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
+- uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
++ uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vmid);
+ uint64_t flags = AMDGPU_PTE_VALID;
+ unsigned eng = ring->vm_inv_eng;
+
+@@ -1055,17 +1055,17 @@ static void vcn_v1_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
+
+ amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE);
+ amdgpu_ring_write(ring,
+- (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2);
++ (hub->ctx0_ptb_addr_hi32 + vmid * 2) << 2);
+ amdgpu_ring_write(ring, upper_32_bits(pd_addr));
+
+ amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE);
+ amdgpu_ring_write(ring,
+- (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2);
++ (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2);
+ amdgpu_ring_write(ring, lower_32_bits(pd_addr));
+
+ amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WAIT);
+ amdgpu_ring_write(ring,
+- (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2);
++ (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2);
+ amdgpu_ring_write(ring, 0xffffffff);
+ amdgpu_ring_write(ring, lower_32_bits(pd_addr));
+
+@@ -1077,8 +1077,8 @@ static void vcn_v1_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
+ /* wait for flush */
+ amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WAIT);
+ amdgpu_ring_write(ring, (hub->vm_inv_eng0_ack + eng) << 2);
+- amdgpu_ring_write(ring, 1 << vm_id);
+- amdgpu_ring_write(ring, 1 << vm_id);
++ amdgpu_ring_write(ring, 1 << vmid);
++ amdgpu_ring_write(ring, 1 << vmid);
+ }
+
+ static int vcn_v1_0_set_interrupt_state(struct amdgpu_device *adev,
+diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+index e1d7dae..b69ceaf 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
++++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+@@ -327,8 +327,8 @@ static void vega10_ih_decode_iv(struct amdgpu_device *adev,
+ entry->client_id = dw[0] & 0xff;
+ entry->src_id = (dw[0] >> 8) & 0xff;
+ entry->ring_id = (dw[0] >> 16) & 0xff;
+- entry->vm_id = (dw[0] >> 24) & 0xf;
+- entry->vm_id_src = (dw[0] >> 31);
++ entry->vmid = (dw[0] >> 24) & 0xf;
++ entry->vmid_src = (dw[0] >> 31);
+ entry->timestamp = dw[1] | ((u64)(dw[2] & 0xffff) << 32);
+ entry->timestamp_src = dw[2] >> 31;
+ entry->pas_id = dw[3] & 0xffff;
+--
+2.7.4
+