aboutsummaryrefslogtreecommitdiffstats
path: root/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/3033-Revert-drm-amdgpu-separate-VMID-and-PASID-handling.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/3033-Revert-drm-amdgpu-separate-VMID-and-PASID-handling.patch')
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/3033-Revert-drm-amdgpu-separate-VMID-and-PASID-handling.patch1335
1 files changed, 1335 insertions, 0 deletions
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/3033-Revert-drm-amdgpu-separate-VMID-and-PASID-handling.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/3033-Revert-drm-amdgpu-separate-VMID-and-PASID-handling.patch
new file mode 100644
index 00000000..3bb155da
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/3033-Revert-drm-amdgpu-separate-VMID-and-PASID-handling.patch
@@ -0,0 +1,1335 @@
+From e646a9d271fdc6c6ea1c5fac4c7744e4f497954b Mon Sep 17 00:00:00 2001
+From: Kevin Wang <Kevin1.Wang@amd.com>
+Date: Fri, 29 Dec 2017 15:23:42 +0800
+Subject: [PATCH 3033/4131] Revert "drm/amdgpu: separate VMID and PASID
+ handling"
+
+This reverts commit 50ca178008bdc043cd47265162cf1f2ea5eeb994.
+
+Reason: Re cherry-pick this commit for fix compile error
+
+Change-Id: I341bbd9c5729694edf1b4818372db9761669931b
+Signed-off-by: Kevin Wang <Kevin1.Wang@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/Makefile | 2 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c | 4 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c | 4 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c | 3 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c | 463 ----------------------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h | 91 -----
+ drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 6 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 428 +++++++++++++++++++-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 44 +-
+ drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c | 2 +-
+ drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | 2 +-
+ drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | 2 +-
+ drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 2 +-
+ 13 files changed, 470 insertions(+), 583 deletions(-)
+ delete mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+ delete mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
+index b209618..da46ce1 100755
+--- a/drivers/gpu/drm/amd/amdgpu/Makefile
++++ b/drivers/gpu/drm/amd/amdgpu/Makefile
+@@ -33,7 +33,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
+ amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \
+ amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \
+ amdgpu_queue_mgr.o amdgpu_vf_error.o amdgpu_sched.o amdgpu_sem.o amdgpu_amdkfd_fence.o \
+- amdgpu_debugfs.o amdgpu_ids.o
++ amdgpu_debugfs.o
+
+ # add asic specific block
+ amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+index 0aef5dc..c12803d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+@@ -192,8 +192,8 @@ static const struct kfd2kgd_calls kfd2kgd = {
+ .destroy_process_vm = amdgpu_amdkfd_gpuvm_destroy_process_vm,
+ .get_process_page_dir = amdgpu_amdkfd_gpuvm_get_process_page_dir,
+ .open_graphic_handle = open_graphic_handle,
+- .alloc_pasid = amdgpu_pasid_alloc,
+- .free_pasid = amdgpu_pasid_free,
++ .alloc_pasid = amdgpu_vm_alloc_pasid,
++ .free_pasid = amdgpu_vm_free_pasid,
+ .program_sh_mem_settings = kgd_program_sh_mem_settings,
+ .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
+ .init_pipeline = kgd_init_pipeline,
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+index a560f9a..83f9196 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+@@ -166,8 +166,8 @@ static const struct kfd2kgd_calls kfd2kgd = {
+ .destroy_process_gpumem = destroy_process_gpumem,
+ .get_process_page_dir = amdgpu_amdkfd_gpuvm_get_process_page_dir,
+ .open_graphic_handle = open_graphic_handle,
+- .alloc_pasid = amdgpu_pasid_alloc,
+- .free_pasid = amdgpu_pasid_free,
++ .alloc_pasid = amdgpu_vm_alloc_pasid,
++ .free_pasid = amdgpu_vm_free_pasid,
+ .program_sh_mem_settings = kgd_program_sh_mem_settings,
+ .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
+ .init_pipeline = kgd_init_pipeline,
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+index 6506451..1643897 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+@@ -238,7 +238,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
+ if (r) {
+ dev_err(adev->dev, "failed to emit fence (%d)\n", r);
+ if (job && job->vm_id)
+- amdgpu_vmid_reset(adev, ring->funcs->vmhub, job->vm_id);
++ amdgpu_vm_reset_id(adev, ring->funcs->vmhub,
++ job->vm_id);
+ amdgpu_ring_undo(ring);
+ return r;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+deleted file mode 100644
+index 797f4e9..0000000
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
++++ /dev/null
+@@ -1,463 +0,0 @@
+-/*
+- * Copyright 2017 Advanced Micro Devices, Inc.
+- *
+- * Permission is hereby granted, free of charge, to any person obtaining a
+- * copy of this software and associated documentation files (the "Software"),
+- * to deal in the Software without restriction, including without limitation
+- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+- * and/or sell copies of the Software, and to permit persons to whom the
+- * Software is furnished to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice shall be included in
+- * all copies or substantial portions of the Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+- * OTHER DEALINGS IN THE SOFTWARE.
+- *
+- */
+-#include "amdgpu_ids.h"
+-
+-#include <linux/idr.h>
+-#include <linux/dma-fence-array.h>
+-#include <drm/drmP.h>
+-
+-#include "amdgpu.h"
+-#include "amdgpu_trace.h"
+-
+-/*
+- * PASID manager
+- *
+- * PASIDs are global address space identifiers that can be shared
+- * between the GPU, an IOMMU and the driver. VMs on different devices
+- * may use the same PASID if they share the same address
+- * space. Therefore PASIDs are allocated using a global IDA. VMs are
+- * looked up from the PASID per amdgpu_device.
+- */
+-#if !defined(OS_NAME_RHEL_6)
+-static DEFINE_IDA(amdgpu_vm_pasid_ida);
+-#else
+-static DEFINE_IDA2(amdgpu_vm_pasid_ida);
+-#endif
+-
+-/**
+- * amdgpu_pasid_alloc - Allocate a PASID
+- * @bits: Maximum width of the PASID in bits, must be at least 1
+- *
+- * Allocates a PASID of the given width while keeping smaller PASIDs
+- * available if possible.
+- *
+- * Returns a positive integer on success. Returns %-EINVAL if bits==0.
+- * Returns %-ENOSPC if no PASID was available. Returns %-ENOMEM on
+- * memory allocation failure.
+- */
+-int amdgpu_pasid_alloc(unsigned int bits)
+-{
+- int pasid = -EINVAL;
+-
+- for (bits = min(bits, 31U); bits > 0; bits--) {
+- pasid = ida_simple_get(&amdgpu_pasid_ida,
+- 1U << (bits - 1), 1U << bits,
+- GFP_KERNEL);
+- if (pasid != -ENOSPC)
+- break;
+- }
+-
+- return pasid;
+-}
+-
+-/**
+- * amdgpu_pasid_free - Free a PASID
+- * @pasid: PASID to free
+- */
+-void amdgpu_pasid_free(unsigned int pasid)
+-{
+- ida_simple_remove(&amdgpu_pasid_ida, pasid);
+-}
+-
+-/*
+- * VMID manager
+- *
+- * VMIDs are a per VMHUB identifier for page tables handling.
+- */
+-
+-/**
+- * amdgpu_vmid_had_gpu_reset - check if reset occured since last use
+- *
+- * @adev: amdgpu_device pointer
+- * @id: VMID structure
+- *
+- * Check if GPU reset occured since last use of the VMID.
+- */
+-bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
+- struct amdgpu_vmid *id)
+-{
+- return id->current_gpu_reset_count !=
+- atomic_read(&adev->gpu_reset_counter);
+-}
+-
+-/* idr_mgr->lock must be held */
+-static int amdgpu_vmid_grab_reserved_locked(struct amdgpu_vm *vm,
+- struct amdgpu_ring *ring,
+- struct amdgpu_sync *sync,
+- struct dma_fence *fence,
+- struct amdgpu_job *job)
+-{
+- struct amdgpu_device *adev = ring->adev;
+- unsigned vmhub = ring->funcs->vmhub;
+- uint64_t fence_context = adev->fence_context + ring->idx;
+- struct amdgpu_vmid *id = vm->reserved_vmid[vmhub];
+- struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
+- struct dma_fence *updates = sync->last_vm_update;
+- int r = 0;
+- struct dma_fence *flushed, *tmp;
+- bool needs_flush = vm->use_cpu_for_update;
+-
+- flushed = id->flushed_updates;
+- if ((amdgpu_vmid_had_gpu_reset(adev, id)) ||
+- (atomic64_read(&id->owner) != vm->client_id) ||
+- (job->vm_pd_addr != id->pd_gpu_addr) ||
+- (updates && (!flushed || updates->context != flushed->context ||
+- dma_fence_is_later(updates, flushed))) ||
+- (!id->last_flush || (id->last_flush->context != fence_context &&
+- !dma_fence_is_signaled(id->last_flush)))) {
+- needs_flush = true;
+- /* to prevent one context starved by another context */
+- id->pd_gpu_addr = 0;
+- tmp = amdgpu_sync_peek_fence(&id->active, ring);
+- if (tmp) {
+- r = amdgpu_sync_fence(adev, sync, tmp, false);
+- return r;
+- }
+- }
+-
+- /* Good we can use this VMID. Remember this submission as
+- * user of the VMID.
+- */
+- r = amdgpu_sync_fence(ring->adev, &id->active, fence, false);
+- if (r)
+- goto out;
+-
+- if (updates && (!flushed || updates->context != flushed->context ||
+- dma_fence_is_later(updates, flushed))) {
+- dma_fence_put(id->flushed_updates);
+- id->flushed_updates = dma_fence_get(updates);
+- }
+- id->pd_gpu_addr = job->vm_pd_addr;
+- atomic64_set(&id->owner, vm->client_id);
+- job->vm_needs_flush = needs_flush;
+- if (needs_flush) {
+- dma_fence_put(id->last_flush);
+- id->last_flush = NULL;
+- }
+- job->vm_id = id - id_mgr->ids;
+- trace_amdgpu_vm_grab_id(vm, ring, job);
+-out:
+- return r;
+-}
+-
+-/**
+- * amdgpu_vm_grab_id - allocate the next free VMID
+- *
+- * @vm: vm to allocate id for
+- * @ring: ring we want to submit job to
+- * @sync: sync object where we add dependencies
+- * @fence: fence protecting ID from reuse
+- *
+- * Allocate an id for the vm, adding fences to the sync obj as necessary.
+- */
+-int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
+- struct amdgpu_sync *sync, struct dma_fence *fence,
+- struct amdgpu_job *job)
+-{
+- struct amdgpu_device *adev = ring->adev;
+- unsigned vmhub = ring->funcs->vmhub;
+- struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
+- uint64_t fence_context = adev->fence_context + ring->idx;
+- struct dma_fence *updates = sync->last_vm_update;
+- struct amdgpu_vmid *id, *idle;
+- struct dma_fence **fences;
+- unsigned i;
+- int r = 0;
+-
+- mutex_lock(&id_mgr->lock);
+- if (vm->reserved_vmid[vmhub]) {
+- r = amdgpu_vmid_grab_reserved_locked(vm, ring, sync, fence, job);
+- mutex_unlock(&id_mgr->lock);
+- return r;
+- }
+- fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL);
+- if (!fences) {
+- mutex_unlock(&id_mgr->lock);
+- return -ENOMEM;
+- }
+- /* Check if we have an idle VMID */
+- i = 0;
+- list_for_each_entry(idle, &id_mgr->ids_lru, list) {
+- fences[i] = amdgpu_sync_peek_fence(&idle->active, ring);
+- if (!fences[i])
+- break;
+- ++i;
+- }
+-
+- /* If we can't find a idle VMID to use, wait till one becomes available */
+- if (&idle->list == &id_mgr->ids_lru) {
+- u64 fence_context = adev->vm_manager.fence_context + ring->idx;
+- unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
+- struct dma_fence_array *array;
+- unsigned j;
+-
+- for (j = 0; j < i; ++j)
+- dma_fence_get(fences[j]);
+-
+- array = dma_fence_array_create(i, fences, fence_context,
+- seqno, true);
+- if (!array) {
+- for (j = 0; j < i; ++j)
+- dma_fence_put(fences[j]);
+- kfree(fences);
+- r = -ENOMEM;
+- goto error;
+- }
+-
+-
+- r = amdgpu_sync_fence(ring->adev, sync, &array->base, false);
+- dma_fence_put(&array->base);
+- if (r)
+- goto error;
+-
+- mutex_unlock(&id_mgr->lock);
+- return 0;
+-
+- }
+- kfree(fences);
+-
+- job->vm_needs_flush = vm->use_cpu_for_update;
+- /* Check if we can use a VMID already assigned to this VM */
+- list_for_each_entry_reverse(id, &id_mgr->ids_lru, list) {
+- struct dma_fence *flushed;
+- bool needs_flush = vm->use_cpu_for_update;
+-
+- /* Check all the prerequisites to using this VMID */
+- if (amdgpu_vmid_had_gpu_reset(adev, id))
+- continue;
+-
+- if (atomic64_read(&id->owner) != vm->client_id)
+- continue;
+-
+- if (job->vm_pd_addr != id->pd_gpu_addr)
+- continue;
+-
+- if (!id->last_flush ||
+- (id->last_flush->context != fence_context &&
+- !dma_fence_is_signaled(id->last_flush)))
+- needs_flush = true;
+-
+- flushed = id->flushed_updates;
+- if (updates && (!flushed || dma_fence_is_later(updates, flushed)))
+- needs_flush = true;
+-
+- /* Concurrent flushes are only possible starting with Vega10 */
+- if (adev->asic_type < CHIP_VEGA10 && needs_flush)
+- continue;
+-
+- /* Good we can use this VMID. Remember this submission as
+- * user of the VMID.
+- */
+- r = amdgpu_sync_fence(ring->adev, &id->active, fence, false);
+- if (r)
+- goto error;
+-
+- if (updates && (!flushed || dma_fence_is_later(updates, flushed))) {
+- dma_fence_put(id->flushed_updates);
+- id->flushed_updates = dma_fence_get(updates);
+- }
+-
+- if (needs_flush)
+- goto needs_flush;
+- else
+- goto no_flush_needed;
+-
+- };
+-
+- /* Still no ID to use? Then use the idle one found earlier */
+- id = idle;
+-
+- /* Remember this submission as user of the VMID */
+- r = amdgpu_sync_fence(ring->adev, &id->active, fence, false);
+- if (r)
+- goto error;
+-
+- id->pd_gpu_addr = job->vm_pd_addr;
+- dma_fence_put(id->flushed_updates);
+- id->flushed_updates = dma_fence_get(updates);
+- atomic64_set(&id->owner, vm->client_id);
+-
+-needs_flush:
+- job->vm_needs_flush = true;
+- dma_fence_put(id->last_flush);
+- id->last_flush = NULL;
+-
+-no_flush_needed:
+- list_move_tail(&id->list, &id_mgr->ids_lru);
+-
+- job->vm_id = id - id_mgr->ids;
+- trace_amdgpu_vm_grab_id(vm, ring, job);
+-
+-error:
+- mutex_unlock(&id_mgr->lock);
+- return r;
+-}
+-
+-int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev,
+- struct amdgpu_vm *vm,
+- unsigned vmhub)
+-{
+- struct amdgpu_vmid_mgr *id_mgr;
+- struct amdgpu_vmid *idle;
+- int r = 0;
+-
+- id_mgr = &adev->vm_manager.id_mgr[vmhub];
+- mutex_lock(&id_mgr->lock);
+- if (vm->reserved_vmid[vmhub])
+- goto unlock;
+- if (atomic_inc_return(&id_mgr->reserved_vmid_num) >
+- AMDGPU_VM_MAX_RESERVED_VMID) {
+- DRM_ERROR("Over limitation of reserved vmid\n");
+- atomic_dec(&id_mgr->reserved_vmid_num);
+- r = -EINVAL;
+- goto unlock;
+- }
+- /* Select the first entry VMID */
+- idle = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vmid, list);
+- list_del_init(&idle->list);
+- vm->reserved_vmid[vmhub] = idle;
+- mutex_unlock(&id_mgr->lock);
+-
+- return 0;
+-unlock:
+- mutex_unlock(&id_mgr->lock);
+- return r;
+-}
+-
+-void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
+- struct amdgpu_vm *vm,
+- unsigned vmhub)
+-{
+- struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
+-
+- mutex_lock(&id_mgr->lock);
+- if (vm->reserved_vmid[vmhub]) {
+- list_add(&vm->reserved_vmid[vmhub]->list,
+- &id_mgr->ids_lru);
+- vm->reserved_vmid[vmhub] = NULL;
+- atomic_dec(&id_mgr->reserved_vmid_num);
+- }
+- mutex_unlock(&id_mgr->lock);
+-}
+-
+-/**
+- * amdgpu_vmid_reset - reset VMID to zero
+- *
+- * @adev: amdgpu device structure
+- * @vm_id: vmid number to use
+- *
+- * Reset saved GDW, GWS and OA to force switch on next flush.
+- */
+-void amdgpu_vmid_reset(struct amdgpu_device *adev, unsigned vmhub,
+- unsigned vmid)
+-{
+- struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
+- struct amdgpu_vmid *id = &id_mgr->ids[vmid];
+-
+- atomic64_set(&id->owner, 0);
+- id->gds_base = 0;
+- id->gds_size = 0;
+- id->gws_base = 0;
+- id->gws_size = 0;
+- id->oa_base = 0;
+- id->oa_size = 0;
+-}
+-
+-/**
+- * amdgpu_vmid_reset_all - reset VMID to zero
+- *
+- * @adev: amdgpu device structure
+- *
+- * Reset VMID to force flush on next use
+- */
+-void amdgpu_vmid_reset_all(struct amdgpu_device *adev)
+-{
+- unsigned i, j;
+-
+- for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
+- struct amdgpu_vmid_mgr *id_mgr =
+- &adev->vm_manager.id_mgr[i];
+-
+- for (j = 1; j < id_mgr->num_ids; ++j)
+- amdgpu_vmid_reset(adev, i, j);
+- }
+-}
+-
+-/**
+- * amdgpu_vmid_mgr_init - init the VMID manager
+- *
+- * @adev: amdgpu_device pointer
+- *
+- * Initialize the VM manager structures
+- */
+-void amdgpu_vmid_mgr_init(struct amdgpu_device *adev)
+-{
+- unsigned i, j;
+-
+- for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
+- struct amdgpu_vmid_mgr *id_mgr =
+- &adev->vm_manager.id_mgr[i];
+-
+- mutex_init(&id_mgr->lock);
+- INIT_LIST_HEAD(&id_mgr->ids_lru);
+- atomic_set(&id_mgr->reserved_vmid_num, 0);
+-
+- /* skip over VMID 0, since it is the system VM */
+- for (j = 1; j < id_mgr->num_ids; ++j) {
+- amdgpu_vmid_reset(adev, i, j);
+- amdgpu_sync_create(&id_mgr->ids[i].active);
+- list_add_tail(&id_mgr->ids[j].list, &id_mgr->ids_lru);
+- }
+- }
+-
+- adev->vm_manager.fence_context =
+- dma_fence_context_alloc(AMDGPU_MAX_RINGS);
+- for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
+- adev->vm_manager.seqno[i] = 0;
+-}
+-
+-/**
+- * amdgpu_vmid_mgr_fini - cleanup VM manager
+- *
+- * @adev: amdgpu_device pointer
+- *
+- * Cleanup the VM manager and free resources.
+- */
+-void amdgpu_vmid_mgr_fini(struct amdgpu_device *adev)
+-{
+- unsigned i, j;
+-
+- for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
+- struct amdgpu_vmid_mgr *id_mgr =
+- &adev->vm_manager.id_mgr[i];
+-
+- mutex_destroy(&id_mgr->lock);
+- for (j = 0; j < AMDGPU_NUM_VMID; ++j) {
+- struct amdgpu_vmid *id = &id_mgr->ids[j];
+-
+- amdgpu_sync_free(&id->active);
+- dma_fence_put(id->flushed_updates);
+- dma_fence_put(id->last_flush);
+- }
+- }
+-}
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
+deleted file mode 100644
+index ad931fa..0000000
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
++++ /dev/null
+@@ -1,91 +0,0 @@
+-/*
+- * Copyright 2017 Advanced Micro Devices, Inc.
+- *
+- * Permission is hereby granted, free of charge, to any person obtaining a
+- * copy of this software and associated documentation files (the "Software"),
+- * to deal in the Software without restriction, including without limitation
+- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+- * and/or sell copies of the Software, and to permit persons to whom the
+- * Software is furnished to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice shall be included in
+- * all copies or substantial portions of the Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+- * OTHER DEALINGS IN THE SOFTWARE.
+- *
+- */
+-#ifndef __AMDGPU_IDS_H__
+-#define __AMDGPU_IDS_H__
+-
+-#include <linux/types.h>
+-#include <linux/mutex.h>
+-#include <linux/list.h>
+-#include <linux/dma-fence.h>
+-
+-#include "amdgpu_sync.h"
+-
+-/* maximum number of VMIDs */
+-#define AMDGPU_NUM_VMID 16
+-
+-struct amdgpu_device;
+-struct amdgpu_vm;
+-struct amdgpu_ring;
+-struct amdgpu_sync;
+-struct amdgpu_job;
+-
+-struct amdgpu_vmid {
+- struct list_head list;
+- struct amdgpu_sync active;
+- struct dma_fence *last_flush;
+- atomic64_t owner;
+-
+- uint64_t pd_gpu_addr;
+- /* last flushed PD/PT update */
+- struct dma_fence *flushed_updates;
+-
+- uint32_t current_gpu_reset_count;
+-
+- uint32_t gds_base;
+- uint32_t gds_size;
+- uint32_t gws_base;
+- uint32_t gws_size;
+- uint32_t oa_base;
+- uint32_t oa_size;
+-};
+-
+-struct amdgpu_vmid_mgr {
+- struct mutex lock;
+- unsigned num_ids;
+- struct list_head ids_lru;
+- struct amdgpu_vmid ids[AMDGPU_NUM_VMID];
+- atomic_t reserved_vmid_num;
+-};
+-
+-int amdgpu_pasid_alloc(unsigned int bits);
+-void amdgpu_pasid_free(unsigned int pasid);
+-
+-bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
+- struct amdgpu_vmid *id);
+-int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev,
+- struct amdgpu_vm *vm,
+- unsigned vmhub);
+-void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
+- struct amdgpu_vm *vm,
+- unsigned vmhub);
+-int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
+- struct amdgpu_sync *sync, struct dma_fence *fence,
+- struct amdgpu_job *job);
+-void amdgpu_vmid_reset(struct amdgpu_device *adev, unsigned vmhub,
+- unsigned vmid);
+-void amdgpu_vmid_reset_all(struct amdgpu_device *adev);
+-
+-void amdgpu_vmid_mgr_init(struct amdgpu_device *adev);
+-void amdgpu_vmid_mgr_fini(struct amdgpu_device *adev);
+-
+-#endif
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+index 2e2341a..a872504 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+@@ -161,9 +161,9 @@ static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
+ while (fence == NULL && vm && !job->vm_id) {
+ struct amdgpu_ring *ring = job->ring;
+
+- r = amdgpu_vmid_grab(vm, ring, &job->sync,
+- &job->base.s_fence->finished,
+- job);
++ r = amdgpu_vm_grab_id(vm, ring, &job->sync,
++ &job->base.s_fence->finished,
++ job);
+ if (r)
+ DRM_ERROR("Error getting VM ID (%d)\n", r);
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index 9f16308..ee5a935 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -34,6 +34,56 @@
+ #include "amdgpu_trace.h"
+
+ /*
++ * PASID manager
++ *
++ * PASIDs are global address space identifiers that can be shared
++ * between the GPU, an IOMMU and the driver. VMs on different devices
++ * may use the same PASID if they share the same address
++ * space. Therefore PASIDs are allocated using a global IDA. VMs are
++ * looked up from the PASID per amdgpu_device.
++ */
++#if !defined(OS_NAME_RHEL_6)
++static DEFINE_IDA(amdgpu_vm_pasid_ida);
++#else
++static DEFINE_IDA2(amdgpu_vm_pasid_ida);
++#endif
++
++/**
++ * amdgpu_vm_alloc_pasid - Allocate a PASID
++ * @bits: Maximum width of the PASID in bits, must be at least 1
++ *
++ * Allocates a PASID of the given width while keeping smaller PASIDs
++ * available if possible.
++ *
++ * Returns a positive integer on success. Returns %-EINVAL if bits==0.
++ * Returns %-ENOSPC if no PASID was available. Returns %-ENOMEM on
++ * memory allocation failure.
++ */
++int amdgpu_vm_alloc_pasid(unsigned int bits)
++{
++ int pasid = -EINVAL;
++
++ for (bits = min(bits, 31U); bits > 0; bits--) {
++ pasid = ida_simple_get(&amdgpu_vm_pasid_ida,
++ 1U << (bits - 1), 1U << bits,
++ GFP_KERNEL);
++ if (pasid != -ENOSPC)
++ break;
++ }
++
++ return pasid;
++}
++
++/**
++ * amdgpu_vm_free_pasid - Free a PASID
++ * @pasid: PASID to free
++ */
++void amdgpu_vm_free_pasid(unsigned int pasid)
++{
++ ida_simple_remove(&amdgpu_vm_pasid_ida, pasid);
++}
++
++/*
+ * GPUVM
+ * GPUVM is similar to the legacy gart on older asics, however
+ * rather than there being a single global gart table
+@@ -407,6 +457,286 @@ int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
+ }
+
+ /**
++ * amdgpu_vm_had_gpu_reset - check if reset occured since last use
++ *
++ * @adev: amdgpu_device pointer
++ * @id: VMID structure
++ *
++ * Check if GPU reset occured since last use of the VMID.
++ */
++static bool amdgpu_vm_had_gpu_reset(struct amdgpu_device *adev,
++ struct amdgpu_vm_id *id)
++{
++ return id->current_gpu_reset_count !=
++ atomic_read(&adev->gpu_reset_counter);
++}
++
++static bool amdgpu_vm_reserved_vmid_ready(struct amdgpu_vm *vm, unsigned vmhub)
++{
++ return !!vm->reserved_vmid[vmhub];
++}
++
++/* idr_mgr->lock must be held */
++static int amdgpu_vm_grab_reserved_vmid_locked(struct amdgpu_vm *vm,
++ struct amdgpu_ring *ring,
++ struct amdgpu_sync *sync,
++ struct dma_fence *fence,
++ struct amdgpu_job *job)
++{
++ struct amdgpu_device *adev = ring->adev;
++ unsigned vmhub = ring->funcs->vmhub;
++ uint64_t fence_context = adev->fence_context + ring->idx;
++ struct amdgpu_vm_id *id = vm->reserved_vmid[vmhub];
++ struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
++ struct dma_fence *updates = sync->last_vm_update;
++ int r = 0;
++ struct dma_fence *flushed, *tmp;
++ bool needs_flush = vm->use_cpu_for_update;
++
++ flushed = id->flushed_updates;
++ if ((amdgpu_vm_had_gpu_reset(adev, id)) ||
++ (atomic64_read(&id->owner) != vm->client_id) ||
++ (job->vm_pd_addr != id->pd_gpu_addr) ||
++ (updates && (!flushed || updates->context != flushed->context ||
++ dma_fence_is_later(updates, flushed))) ||
++ (!id->last_flush || (id->last_flush->context != fence_context &&
++ !dma_fence_is_signaled(id->last_flush)))) {
++ needs_flush = true;
++ /* to prevent one context starved by another context */
++ id->pd_gpu_addr = 0;
++ tmp = amdgpu_sync_peek_fence(&id->active, ring);
++ if (tmp) {
++ r = amdgpu_sync_fence(adev, sync, tmp, false);
++ return r;
++ }
++ }
++
++ /* Good we can use this VMID. Remember this submission as
++ * user of the VMID.
++ */
++ r = amdgpu_sync_fence(ring->adev, &id->active, fence, false);
++ if (r)
++ goto out;
++
++ if (updates && (!flushed || updates->context != flushed->context ||
++ dma_fence_is_later(updates, flushed))) {
++ dma_fence_put(id->flushed_updates);
++ id->flushed_updates = dma_fence_get(updates);
++ }
++ id->pd_gpu_addr = job->vm_pd_addr;
++ atomic64_set(&id->owner, vm->client_id);
++ job->vm_needs_flush = needs_flush;
++ if (needs_flush) {
++ dma_fence_put(id->last_flush);
++ id->last_flush = NULL;
++ }
++ job->vm_id = id - id_mgr->ids;
++ trace_amdgpu_vm_grab_id(vm, ring, job);
++out:
++ return r;
++}
++
++/**
++ * amdgpu_vm_grab_id - allocate the next free VMID
++ *
++ * @vm: vm to allocate id for
++ * @ring: ring we want to submit job to
++ * @sync: sync object where we add dependencies
++ * @fence: fence protecting ID from reuse
++ *
++ * Allocate an id for the vm, adding fences to the sync obj as necessary.
++ */
++int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
++ struct amdgpu_sync *sync, struct dma_fence *fence,
++ struct amdgpu_job *job)
++{
++ struct amdgpu_device *adev = ring->adev;
++ unsigned vmhub = ring->funcs->vmhub;
++ struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
++ uint64_t fence_context = adev->fence_context + ring->idx;
++ struct dma_fence *updates = sync->last_vm_update;
++ struct amdgpu_vm_id *id, *idle;
++ struct dma_fence **fences;
++ unsigned i;
++ int r = 0;
++
++ mutex_lock(&id_mgr->lock);
++ if (amdgpu_vm_reserved_vmid_ready(vm, vmhub)) {
++ r = amdgpu_vm_grab_reserved_vmid_locked(vm, ring, sync, fence, job);
++ mutex_unlock(&id_mgr->lock);
++ return r;
++ }
++ fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL);
++ if (!fences) {
++ mutex_unlock(&id_mgr->lock);
++ return -ENOMEM;
++ }
++ /* Check if we have an idle VMID */
++ i = 0;
++ list_for_each_entry(idle, &id_mgr->ids_lru, list) {
++ fences[i] = amdgpu_sync_peek_fence(&idle->active, ring);
++ if (!fences[i])
++ break;
++ ++i;
++ }
++
++ /* If we can't find a idle VMID to use, wait till one becomes available */
++ if (&idle->list == &id_mgr->ids_lru) {
++ u64 fence_context = adev->vm_manager.fence_context + ring->idx;
++ unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
++ struct dma_fence_array *array;
++ unsigned j;
++
++ for (j = 0; j < i; ++j)
++ dma_fence_get(fences[j]);
++
++ array = dma_fence_array_create(i, fences, fence_context,
++ seqno, true);
++ if (!array) {
++ for (j = 0; j < i; ++j)
++ dma_fence_put(fences[j]);
++ kfree(fences);
++ r = -ENOMEM;
++ goto error;
++ }
++
++
++ r = amdgpu_sync_fence(ring->adev, sync, &array->base, false);
++ dma_fence_put(&array->base);
++ if (r)
++ goto error;
++
++ mutex_unlock(&id_mgr->lock);
++ return 0;
++
++ }
++ kfree(fences);
++
++ job->vm_needs_flush = vm->use_cpu_for_update;
++ /* Check if we can use a VMID already assigned to this VM */
++ list_for_each_entry_reverse(id, &id_mgr->ids_lru, list) {
++ struct dma_fence *flushed;
++ bool needs_flush = vm->use_cpu_for_update;
++
++ /* Check all the prerequisites to using this VMID */
++ if (amdgpu_vm_had_gpu_reset(adev, id))
++ continue;
++
++ if (atomic64_read(&id->owner) != vm->client_id)
++ continue;
++
++ if (job->vm_pd_addr != id->pd_gpu_addr)
++ continue;
++
++ if (!id->last_flush ||
++ (id->last_flush->context != fence_context &&
++ !dma_fence_is_signaled(id->last_flush)))
++ needs_flush = true;
++
++ flushed = id->flushed_updates;
++ if (updates && (!flushed || dma_fence_is_later(updates, flushed)))
++ needs_flush = true;
++
++ /* Concurrent flushes are only possible starting with Vega10 */
++ if (adev->asic_type < CHIP_VEGA10 && needs_flush)
++ continue;
++
++ /* Good we can use this VMID. Remember this submission as
++ * user of the VMID.
++ */
++ r = amdgpu_sync_fence(ring->adev, &id->active, fence, false);
++ if (r)
++ goto error;
++
++ if (updates && (!flushed || dma_fence_is_later(updates, flushed))) {
++ dma_fence_put(id->flushed_updates);
++ id->flushed_updates = dma_fence_get(updates);
++ }
++
++ if (needs_flush)
++ goto needs_flush;
++ else
++ goto no_flush_needed;
++
++ };
++
++ /* Still no ID to use? Then use the idle one found earlier */
++ id = idle;
++
++ /* Remember this submission as user of the VMID */
++ r = amdgpu_sync_fence(ring->adev, &id->active, fence, false);
++ if (r)
++ goto error;
++
++ id->pd_gpu_addr = job->vm_pd_addr;
++ dma_fence_put(id->flushed_updates);
++ id->flushed_updates = dma_fence_get(updates);
++ atomic64_set(&id->owner, vm->client_id);
++
++needs_flush:
++ job->vm_needs_flush = true;
++ dma_fence_put(id->last_flush);
++ id->last_flush = NULL;
++
++no_flush_needed:
++ list_move_tail(&id->list, &id_mgr->ids_lru);
++
++ job->vm_id = id - id_mgr->ids;
++ trace_amdgpu_vm_grab_id(vm, ring, job);
++
++error:
++ mutex_unlock(&id_mgr->lock);
++ return r;
++}
++
++static void amdgpu_vm_free_reserved_vmid(struct amdgpu_device *adev,
++ struct amdgpu_vm *vm,
++ unsigned vmhub)
++{
++ struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
++
++ mutex_lock(&id_mgr->lock);
++ if (vm->reserved_vmid[vmhub]) {
++ list_add(&vm->reserved_vmid[vmhub]->list,
++ &id_mgr->ids_lru);
++ vm->reserved_vmid[vmhub] = NULL;
++ atomic_dec(&id_mgr->reserved_vmid_num);
++ }
++ mutex_unlock(&id_mgr->lock);
++}
++
++static int amdgpu_vm_alloc_reserved_vmid(struct amdgpu_device *adev,
++ struct amdgpu_vm *vm,
++ unsigned vmhub)
++{
++ struct amdgpu_vm_id_manager *id_mgr;
++ struct amdgpu_vm_id *idle;
++ int r = 0;
++
++ id_mgr = &adev->vm_manager.id_mgr[vmhub];
++ mutex_lock(&id_mgr->lock);
++ if (vm->reserved_vmid[vmhub])
++ goto unlock;
++ if (atomic_inc_return(&id_mgr->reserved_vmid_num) >
++ AMDGPU_VM_MAX_RESERVED_VMID) {
++ DRM_ERROR("Over limitation of reserved vmid\n");
++ atomic_dec(&id_mgr->reserved_vmid_num);
++ r = -EINVAL;
++ goto unlock;
++ }
++ /* Select the first entry VMID */
++ idle = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vm_id, list);
++ list_del_init(&idle->list);
++ vm->reserved_vmid[vmhub] = idle;
++ mutex_unlock(&id_mgr->lock);
++
++ return 0;
++unlock:
++ mutex_unlock(&id_mgr->lock);
++ return r;
++}
++
++/**
+ * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug
+ *
+ * @adev: amdgpu_device pointer
+@@ -446,8 +776,8 @@ bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
+ {
+ struct amdgpu_device *adev = ring->adev;
+ unsigned vmhub = ring->funcs->vmhub;
+- struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
+- struct amdgpu_vmid *id;
++ struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
++ struct amdgpu_vm_id *id;
+ bool gds_switch_needed;
+ bool vm_flush_needed = job->vm_needs_flush || ring->has_compute_vm_bug;
+
+@@ -462,7 +792,7 @@ bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
+ id->oa_base != job->oa_base ||
+ id->oa_size != job->oa_size);
+
+- if (amdgpu_vmid_had_gpu_reset(adev, id))
++ if (amdgpu_vm_had_gpu_reset(adev, id))
+ return true;
+
+ return vm_flush_needed || gds_switch_needed;
+@@ -486,8 +816,8 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_
+ {
+ struct amdgpu_device *adev = ring->adev;
+ unsigned vmhub = ring->funcs->vmhub;
+- struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
+- struct amdgpu_vmid *id = &id_mgr->ids[job->vm_id];
++ struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
++ struct amdgpu_vm_id *id = &id_mgr->ids[job->vm_id];
+ bool gds_switch_needed = ring->funcs->emit_gds_switch && (
+ id->gds_base != job->gds_base ||
+ id->gds_size != job->gds_size ||
+@@ -499,7 +829,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_
+ unsigned patch_offset = 0;
+ int r;
+
+- if (amdgpu_vmid_had_gpu_reset(adev, id)) {
++ if (amdgpu_vm_had_gpu_reset(adev, id)) {
+ gds_switch_needed = true;
+ vm_flush_needed = true;
+ }
+@@ -555,6 +885,49 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_
+ }
+
+ /**
++ * amdgpu_vm_reset_id - reset VMID to zero
++ *
++ * @adev: amdgpu device structure
++ * @vm_id: vmid number to use
++ *
++ * Reset saved GDW, GWS and OA to force switch on next flush.
++ */
++void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub,
++ unsigned vmid)
++{
++ struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
++ struct amdgpu_vm_id *id = &id_mgr->ids[vmid];
++
++ atomic64_set(&id->owner, 0);
++ id->gds_base = 0;
++ id->gds_size = 0;
++ id->gws_base = 0;
++ id->gws_size = 0;
++ id->oa_base = 0;
++ id->oa_size = 0;
++}
++
++/**
++ * amdgpu_vm_reset_all_id - reset VMID to zero
++ *
++ * @adev: amdgpu device structure
++ *
++ * Reset VMID to force flush on next use
++ */
++void amdgpu_vm_reset_all_ids(struct amdgpu_device *adev)
++{
++ unsigned i, j;
++
++ for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
++ struct amdgpu_vm_id_manager *id_mgr =
++ &adev->vm_manager.id_mgr[i];
++
++ for (j = 1; j < id_mgr->num_ids; ++j)
++ amdgpu_vm_reset_id(adev, i, j);
++ }
++}
++
++/**
+ * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
+ *
+ * @vm: requested vm
+@@ -2521,7 +2894,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
+ amdgpu_bo_unref(&root);
+ dma_fence_put(vm->last_update);
+ for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
+- amdgpu_vmid_free_reserved(adev, vm, i);
++ amdgpu_vm_free_reserved_vmid(adev, vm, i);
+ }
+
+ /**
+@@ -2563,10 +2936,24 @@ bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev,
+ */
+ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
+ {
+- unsigned i;
+-
+- amdgpu_vmid_mgr_init(adev);
++ unsigned i, j;
++
++ for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
++ struct amdgpu_vm_id_manager *id_mgr =
++ &adev->vm_manager.id_mgr[i];
++
++ mutex_init(&id_mgr->lock);
++ INIT_LIST_HEAD(&id_mgr->ids_lru);
++ atomic_set(&id_mgr->reserved_vmid_num, 0);
+
++ /* skip over VMID 0, since it is the system VM */
++ for (j = 1; j < id_mgr->num_ids; ++j) {
++ amdgpu_vm_reset_id(adev, i, j);
++ amdgpu_sync_create(&id_mgr->ids[i].active);
++ list_add_tail(&id_mgr->ids[j].list, &id_mgr->ids_lru);
++ }
++ }
++
+ adev->vm_manager.fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
+ for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
+ adev->vm_manager.seqno[i] = 0;
+@@ -2606,10 +2993,24 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
+ */
+ void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
+ {
++ unsigned i, j;
++
+ WARN_ON(!idr_is_empty(&adev->vm_manager.pasid_idr));
+ idr_destroy(&adev->vm_manager.pasid_idr);
+
+- amdgpu_vmid_mgr_fini(adev);
++ for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
++ struct amdgpu_vm_id_manager *id_mgr =
++ &adev->vm_manager.id_mgr[i];
++
++ mutex_destroy(&id_mgr->lock);
++ for (j = 0; j < AMDGPU_NUM_VM; ++j) {
++ struct amdgpu_vm_id *id = &id_mgr->ids[j];
++
++ amdgpu_sync_free(&id->active);
++ dma_fence_put(id->flushed_updates);
++ dma_fence_put(id->last_flush);
++ }
++ }
+ }
+
+ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+@@ -2622,12 +3023,13 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+ switch (args->in.op) {
+ case AMDGPU_VM_OP_RESERVE_VMID:
+ /* current, we only have requirement to reserve vmid from gfxhub */
+- r = amdgpu_vmid_alloc_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB);
++ r = amdgpu_vm_alloc_reserved_vmid(adev, &fpriv->vm,
++ AMDGPU_GFXHUB);
+ if (r)
+ return r;
+ break;
+ case AMDGPU_VM_OP_UNRESERVE_VMID:
+- amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB);
++ amdgpu_vm_free_reserved_vmid(adev, &fpriv->vm, AMDGPU_GFXHUB);
+ break;
+ default:
+ return -EINVAL;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+index 71f4ac2..189b5c7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+@@ -35,7 +35,6 @@
+
+ #include "amdgpu_sync.h"
+ #include "amdgpu_ring.h"
+-#include "amdgpu_ids.h"
+
+ struct amdgpu_bo_va;
+ struct amdgpu_job;
+@@ -45,6 +44,9 @@ struct amdgpu_bo_list_entry;
+ * GPUVM handling
+ */
+
++/* maximum number of VMIDs */
++#define AMDGPU_NUM_VM 16
++
+ /* Maximum number of PTEs the hardware can write with one command */
+ #define AMDGPU_VM_MAX_UPDATE_SIZE 0x3FFFF
+
+@@ -199,7 +201,7 @@ struct amdgpu_vm {
+ u64 client_id;
+ unsigned int pasid;
+ /* dedicated to vm */
+- struct amdgpu_vmid *reserved_vmid[AMDGPU_MAX_VMHUBS];
++ struct amdgpu_vm_id *reserved_vmid[AMDGPU_MAX_VMHUBS];
+
+ /* Whether this is a Compute or GFX Context */
+ int vm_context;
+@@ -217,9 +219,37 @@ struct amdgpu_vm {
+ unsigned int fault_credit;
+ };
+
++struct amdgpu_vm_id {
++ struct list_head list;
++ struct amdgpu_sync active;
++ struct dma_fence *last_flush;
++ atomic64_t owner;
++
++ uint64_t pd_gpu_addr;
++ /* last flushed PD/PT update */
++ struct dma_fence *flushed_updates;
++
++ uint32_t current_gpu_reset_count;
++
++ uint32_t gds_base;
++ uint32_t gds_size;
++ uint32_t gws_base;
++ uint32_t gws_size;
++ uint32_t oa_base;
++ uint32_t oa_size;
++};
++
++struct amdgpu_vm_id_manager {
++ struct mutex lock;
++ unsigned num_ids;
++ struct list_head ids_lru;
++ struct amdgpu_vm_id ids[AMDGPU_NUM_VM];
++ atomic_t reserved_vmid_num;
++};
++
+ struct amdgpu_vm_manager {
+ /* Handling of VMIDs */
+- struct amdgpu_vmid_mgr id_mgr[AMDGPU_MAX_VMHUBS];
++ struct amdgpu_vm_id_manager id_mgr[AMDGPU_MAX_VMHUBS];
+
+ /* Handling of VM fences */
+ u64 fence_context;
+@@ -259,6 +289,8 @@ struct amdgpu_vm_manager {
+ spinlock_t pasid_lock;
+ };
+
++int amdgpu_vm_alloc_pasid(unsigned int bits);
++void amdgpu_vm_free_pasid(unsigned int pasid);
+ void amdgpu_vm_manager_init(struct amdgpu_device *adev);
+ void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
+ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+@@ -276,7 +308,13 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
+ uint64_t saddr, uint64_t size);
++int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
++ struct amdgpu_sync *sync, struct dma_fence *fence,
++ struct amdgpu_job *job);
+ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync);
++void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub,
++ unsigned vmid);
++void amdgpu_vm_reset_all_ids(struct amdgpu_device *adev);
+ int amdgpu_vm_update_directories(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm);
+ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+index 86ba1e7..14360d4 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+@@ -958,7 +958,7 @@ static int gmc_v6_0_resume(void *handle)
+ if (r)
+ return r;
+
+- amdgpu_vmid_reset_all(adev);
++ amdgpu_vm_reset_all_ids(adev);
+
+ return 0;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+index bda5f72..c28195c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+@@ -1119,7 +1119,7 @@ static int gmc_v7_0_resume(void *handle)
+ if (r)
+ return r;
+
+- amdgpu_vmid_reset_all(adev);
++ amdgpu_vm_reset_all_ids(adev);
+
+ return 0;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+index ec7ce94..26ef404 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+@@ -1228,7 +1228,7 @@ static int gmc_v8_0_resume(void *handle)
+ if (r)
+ return r;
+
+- amdgpu_vmid_reset_all(adev);
++ amdgpu_vm_reset_all_ids(adev);
+
+ return 0;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+index efc7ca7..8b823b6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+@@ -1061,7 +1061,7 @@ static int gmc_v9_0_resume(void *handle)
+ if (r)
+ return r;
+
+- amdgpu_vmid_reset_all(adev);
++ amdgpu_vm_reset_all_ids(adev);
+
+ return 0;
+ }
+--
+2.7.4
+