aboutsummaryrefslogtreecommitdiffstats
path: root/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/2971-drm-amdgpu-use-consistent-naming-for-static-funcs-in.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/2971-drm-amdgpu-use-consistent-naming-for-static-funcs-in.patch')
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/2971-drm-amdgpu-use-consistent-naming-for-static-funcs-in.patch695
1 files changed, 695 insertions, 0 deletions
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/2971-drm-amdgpu-use-consistent-naming-for-static-funcs-in.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/2971-drm-amdgpu-use-consistent-naming-for-static-funcs-in.patch
new file mode 100644
index 00000000..33d49020
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/2971-drm-amdgpu-use-consistent-naming-for-static-funcs-in.patch
@@ -0,0 +1,695 @@
+From af1f686450018efc852ddee84dd8495306f2b902 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Thu, 14 Dec 2017 15:02:39 -0500
+Subject: [PATCH 2971/4131] drm/amdgpu: use consistent naming for static funcs
+ in amdgpu_device.c
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Prefix the functions with device or device_ip for functions which
+deal with ip blocks for consistency.
+
+Acked-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 190 +++++++++++++++--------------
+ 1 file changed, 97 insertions(+), 93 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 89e2c1d..f0f1b16 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -332,7 +332,7 @@ static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
+ BUG();
+ }
+
+-static int amdgpu_vram_scratch_init(struct amdgpu_device *adev)
++static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
+ {
+ return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
+@@ -341,7 +341,7 @@ static int amdgpu_vram_scratch_init(struct amdgpu_device *adev)
+ (void **)&adev->vram_scratch.ptr);
+ }
+
+-static void amdgpu_vram_scratch_fini(struct amdgpu_device *adev)
++static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
+ {
+ amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
+ }
+@@ -391,14 +391,14 @@ void amdgpu_pci_config_reset(struct amdgpu_device *adev)
+ * GPU doorbell aperture helpers function.
+ */
+ /**
+- * amdgpu_doorbell_init - Init doorbell driver information.
++ * amdgpu_device_doorbell_init - Init doorbell driver information.
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Init doorbell driver information (CIK)
+ * Returns 0 on success, error on failure.
+ */
+-static int amdgpu_doorbell_init(struct amdgpu_device *adev)
++static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
+ {
+ /* No doorbell on SI hardware generation */
+ if (adev->asic_type < CHIP_BONAIRE) {
+@@ -431,13 +431,13 @@ static int amdgpu_doorbell_init(struct amdgpu_device *adev)
+ }
+
+ /**
+- * amdgpu_doorbell_fini - Tear down doorbell driver information.
++ * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Tear down doorbell driver information (CIK)
+ */
+-static void amdgpu_doorbell_fini(struct amdgpu_device *adev)
++static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
+ {
+ iounmap(adev->doorbell.ptr);
+ adev->doorbell.ptr = NULL;
+@@ -477,20 +477,20 @@ void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
+ }
+
+ /*
+- * amdgpu_wb_*()
++ * amdgpu_device_wb_*()
+ * Writeback is the method by which the GPU updates special pages in memory
+ * with the status of certain GPU events (fences, ring pointers,etc.).
+ */
+
+ /**
+- * amdgpu_wb_fini - Disable Writeback and free memory
++ * amdgpu_device_wb_fini - Disable Writeback and free memory
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Disables Writeback and frees the Writeback memory (all asics).
+ * Used at driver shutdown.
+ */
+-static void amdgpu_wb_fini(struct amdgpu_device *adev)
++static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
+ {
+ if (adev->wb.wb_obj) {
+ amdgpu_bo_free_kernel(&adev->wb.wb_obj,
+@@ -501,7 +501,7 @@ static void amdgpu_wb_fini(struct amdgpu_device *adev)
+ }
+
+ /**
+- * amdgpu_wb_init- Init Writeback driver info and allocate memory
++ * amdgpu_device_wb_init- Init Writeback driver info and allocate memory
+ *
+ * @adev: amdgpu_device pointer
+ *
+@@ -509,7 +509,7 @@ static void amdgpu_wb_fini(struct amdgpu_device *adev)
+ * Used at driver startup.
+ * Returns 0 on success or an -error on failure.
+ */
+-static int amdgpu_wb_init(struct amdgpu_device *adev)
++static int amdgpu_device_wb_init(struct amdgpu_device *adev)
+ {
+ int r;
+
+@@ -772,7 +772,7 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
+ cmd & ~PCI_COMMAND_MEMORY);
+
+ /* Free the VRAM and doorbell BAR, we most likely need to move both. */
+- amdgpu_doorbell_fini(adev);
++ amdgpu_device_doorbell_fini(adev);
+ if (adev->asic_type >= CHIP_BONAIRE)
+ pci_release_resource(adev->pdev, 2);
+
+@@ -789,7 +789,7 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
+ /* When the doorbell or fb BAR isn't available we have no chance of
+ * using the device.
+ */
+- r = amdgpu_doorbell_init(adev);
++ r = amdgpu_device_doorbell_init(adev);
+ if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
+ return -ENODEV;
+
+@@ -903,7 +903,7 @@ void amdgpu_dummy_page_fini(struct amdgpu_device *adev)
+
+ /* if we get transitioned to only one device, take VGA back */
+ /**
+- * amdgpu_vga_set_decode - enable/disable vga decode
++ * amdgpu_device_vga_set_decode - enable/disable vga decode
+ *
+ * @cookie: amdgpu_device pointer
+ * @state: enable/disable vga decode
+@@ -911,7 +911,7 @@ void amdgpu_dummy_page_fini(struct amdgpu_device *adev)
+ * Enable/disable vga decode (all asics).
+ * Returns VGA resource flags.
+ */
+-static unsigned int amdgpu_vga_set_decode(void *cookie, bool state)
++static unsigned int amdgpu_device_vga_set_decode(void *cookie, bool state)
+ {
+ struct amdgpu_device *adev = cookie;
+ amdgpu_asic_set_vga_state(adev, state);
+@@ -922,7 +922,7 @@ static unsigned int amdgpu_vga_set_decode(void *cookie, bool state)
+ return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
+ }
+
+-static void amdgpu_check_block_size(struct amdgpu_device *adev)
++static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
+ {
+ /* defines number of bits in page table versus page directory,
+ * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
+@@ -937,7 +937,7 @@ static void amdgpu_check_block_size(struct amdgpu_device *adev)
+ }
+ }
+
+-static void amdgpu_check_vm_size(struct amdgpu_device *adev)
++static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
+ {
+ /* no need to check the default value */
+ if (amdgpu_vm_size == -1)
+@@ -951,14 +951,14 @@ static void amdgpu_check_vm_size(struct amdgpu_device *adev)
+ }
+
+ /**
+- * amdgpu_check_arguments - validate module params
++ * amdgpu_device_check_arguments - validate module params
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Validates certain module parameters and updates
+ * the associated values used by the driver (all asics).
+ */
+-static void amdgpu_check_arguments(struct amdgpu_device *adev)
++static void amdgpu_device_check_arguments(struct amdgpu_device *adev)
+ {
+ struct sysinfo si;
+ int phys_ram_gb, amdgpu_vm_size_aligned;
+@@ -1015,9 +1015,9 @@ static void amdgpu_check_arguments(struct amdgpu_device *adev)
+ amdgpu_vm_fragment_size = -1;
+ }
+
+- amdgpu_check_vm_size(adev);
++ amdgpu_device_check_vm_size(adev);
+
+- amdgpu_check_block_size(adev);
++ amdgpu_device_check_block_size(adev);
+
+ if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 ||
+ !is_power_of_2(amdgpu_vram_page_split))) {
+@@ -1388,7 +1388,7 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
+ return err;
+ }
+
+-static int amdgpu_early_init(struct amdgpu_device *adev)
++static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
+ {
+ int i, r;
+
+@@ -1497,7 +1497,7 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
+ return 0;
+ }
+
+-static int amdgpu_init(struct amdgpu_device *adev)
++static int amdgpu_device_ip_init(struct amdgpu_device *adev)
+ {
+ int i, r;
+
+@@ -1513,7 +1513,7 @@ static int amdgpu_init(struct amdgpu_device *adev)
+ adev->ip_blocks[i].status.sw = true;
+ /* need to do gmc hw init early so we can allocate gpu mem */
+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
+- r = amdgpu_vram_scratch_init(adev);
++ r = amdgpu_device_vram_scratch_init(adev);
+ if (r) {
+ DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
+ return r;
+@@ -1523,9 +1523,9 @@ static int amdgpu_init(struct amdgpu_device *adev)
+ DRM_ERROR("hw_init %d failed %d\n", i, r);
+ return r;
+ }
+- r = amdgpu_wb_init(adev);
++ r = amdgpu_device_wb_init(adev);
+ if (r) {
+- DRM_ERROR("amdgpu_wb_init failed %d\n", r);
++ DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
+ return r;
+ }
+ adev->ip_blocks[i].status.hw = true;
+@@ -1564,18 +1564,18 @@ static int amdgpu_init(struct amdgpu_device *adev)
+ return 0;
+ }
+
+-static void amdgpu_fill_reset_magic(struct amdgpu_device *adev)
++static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
+ {
+ memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
+ }
+
+-static bool amdgpu_check_vram_lost(struct amdgpu_device *adev)
++static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
+ {
+ return !!memcmp(adev->gart.ptr, adev->reset_magic,
+ AMDGPU_RESET_MAGIC_NUM);
+ }
+
+-static int amdgpu_late_set_cg_state(struct amdgpu_device *adev)
++static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device *adev)
+ {
+ int i = 0, r;
+
+@@ -1598,7 +1598,7 @@ static int amdgpu_late_set_cg_state(struct amdgpu_device *adev)
+ return 0;
+ }
+
+-static int amdgpu_late_init(struct amdgpu_device *adev)
++static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
+ {
+ int i = 0, r;
+
+@@ -1619,12 +1619,12 @@ static int amdgpu_late_init(struct amdgpu_device *adev)
+ mod_delayed_work(system_wq, &adev->late_init_work,
+ msecs_to_jiffies(AMDGPU_RESUME_MS));
+
+- amdgpu_fill_reset_magic(adev);
++ amdgpu_device_fill_reset_magic(adev);
+
+ return 0;
+ }
+
+-static int amdgpu_fini(struct amdgpu_device *adev)
++static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
+ {
+ int i, r;
+
+@@ -1658,8 +1658,8 @@ static int amdgpu_fini(struct amdgpu_device *adev)
+ continue;
+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
+ amdgpu_free_static_csa(adev);
+- amdgpu_wb_fini(adev);
+- amdgpu_vram_scratch_fini(adev);
++ amdgpu_device_wb_fini(adev);
++ amdgpu_device_vram_scratch_fini(adev);
+ }
+
+ if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
+@@ -1715,11 +1715,11 @@ static int amdgpu_fini(struct amdgpu_device *adev)
+ return 0;
+ }
+
+-static void amdgpu_late_init_func_handler(struct work_struct *work)
++static void amdgpu_device_ip_late_init_func_handler(struct work_struct *work)
+ {
+ struct amdgpu_device *adev =
+ container_of(work, struct amdgpu_device, late_init_work.work);
+- amdgpu_late_set_cg_state(adev);
++ amdgpu_device_ip_late_set_cg_state(adev);
+ }
+
+ int amdgpu_suspend(struct amdgpu_device *adev)
+@@ -1763,7 +1763,7 @@ int amdgpu_suspend(struct amdgpu_device *adev)
+ return 0;
+ }
+
+-static int amdgpu_sriov_reinit_early(struct amdgpu_device *adev)
++static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
+ {
+ int i, r;
+
+@@ -1792,7 +1792,7 @@ static int amdgpu_sriov_reinit_early(struct amdgpu_device *adev)
+ return 0;
+ }
+
+-static int amdgpu_sriov_reinit_late(struct amdgpu_device *adev)
++static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
+ {
+ int i, r;
+
+@@ -1825,7 +1825,7 @@ static int amdgpu_sriov_reinit_late(struct amdgpu_device *adev)
+ return 0;
+ }
+
+-static int amdgpu_resume_phase1(struct amdgpu_device *adev)
++static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
+ {
+ int i, r;
+
+@@ -1848,7 +1848,7 @@ static int amdgpu_resume_phase1(struct amdgpu_device *adev)
+ return 0;
+ }
+
+-static int amdgpu_resume_phase2(struct amdgpu_device *adev)
++static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
+ {
+ int i, r;
+
+@@ -1870,14 +1870,14 @@ static int amdgpu_resume_phase2(struct amdgpu_device *adev)
+ return 0;
+ }
+
+-static int amdgpu_resume(struct amdgpu_device *adev)
++static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
+ {
+ int r;
+
+- r = amdgpu_resume_phase1(adev);
++ r = amdgpu_device_ip_resume_phase1(adev);
+ if (r)
+ return r;
+- r = amdgpu_resume_phase2(adev);
++ r = amdgpu_device_ip_resume_phase2(adev);
+
+ return r;
+ }
+@@ -2014,7 +2014,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
+ hash_init(adev->mn_hash);
+ mutex_init(&adev->lock_reset);
+
+- amdgpu_check_arguments(adev);
++ amdgpu_device_check_arguments(adev);
+
+ spin_lock_init(&adev->mmio_idx_lock);
+ spin_lock_init(&adev->smc_idx_lock);
+@@ -2033,7 +2033,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
+ INIT_LIST_HEAD(&adev->ring_lru_list);
+ spin_lock_init(&adev->ring_lru_list_lock);
+
+- INIT_DELAYED_WORK(&adev->late_init_work, amdgpu_late_init_func_handler);
++ INIT_DELAYED_WORK(&adev->late_init_work,
++ amdgpu_device_ip_late_init_func_handler);
+
+ /* Registers mapping */
+ /* TODO: block userspace mapping of io register */
+@@ -2053,7 +2054,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
+ DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
+
+ /* doorbell bar mapping */
+- amdgpu_doorbell_init(adev);
++ amdgpu_device_doorbell_init(adev);
+
+ /* io port mapping */
+ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+@@ -2067,14 +2068,14 @@ int amdgpu_device_init(struct amdgpu_device *adev,
+ DRM_INFO("PCI I/O BAR is not found.\n");
+
+ /* early init functions */
+- r = amdgpu_early_init(adev);
++ r = amdgpu_device_ip_early_init(adev);
+ if (r)
+ return r;
+
+ /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
+ /* this will fail for cards that aren't VGA class devices, just
+ * ignore it */
+- vga_client_register(adev->pdev, adev, NULL, amdgpu_vga_set_decode);
++ vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode);
+
+ if (amdgpu_device_is_px(ddev))
+ runtime = true;
+@@ -2147,7 +2148,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
+ /* init the mode config */
+ drm_mode_config_init(adev->ddev);
+
+- r = amdgpu_init(adev);
++ r = amdgpu_device_ip_init(adev);
+ if (r) {
+ /* failed in exclusive mode due to timeout */
+ if (amdgpu_sriov_vf(adev) &&
+@@ -2158,9 +2159,9 @@ int amdgpu_device_init(struct amdgpu_device *adev,
+ r = -EAGAIN;
+ goto failed;
+ }
+- dev_err(adev->dev, "amdgpu_init failed\n");
++ dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
+ amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
+- amdgpu_fini(adev);
++ amdgpu_device_ip_fini(adev);
+ goto failed;
+ }
+
+@@ -2228,9 +2229,9 @@ int amdgpu_device_init(struct amdgpu_device *adev,
+ /* enable clockgating, etc. after ib tests, etc. since some blocks require
+ * explicit gating rather than handling it automatically.
+ */
+- r = amdgpu_late_init(adev);
++ r = amdgpu_device_ip_late_init(adev);
+ if (r) {
+- dev_err(adev->dev, "amdgpu_late_init failed\n");
++ dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
+ amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
+ goto failed;
+ }
+@@ -2265,7 +2266,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
+ amdgpu_ib_pool_fini(adev);
+ amdgpu_fence_driver_fini(adev);
+ amdgpu_fbdev_fini(adev);
+- r = amdgpu_fini(adev);
++ r = amdgpu_device_ip_fini(adev);
+ if (adev->firmware.gpu_info_fw) {
+ release_firmware(adev->firmware.gpu_info_fw);
+ adev->firmware.gpu_info_fw = NULL;
+@@ -2291,7 +2292,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
+ iounmap(adev->rmmio);
+ adev->rmmio = NULL;
+
+- amdgpu_doorbell_fini(adev);
++ amdgpu_device_doorbell_fini(adev);
+ amdgpu_pm_sysfs_fini(adev);
+ amdgpu_debugfs_regs_cleanup(adev);
+ }
+@@ -2436,9 +2437,9 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
+ DRM_ERROR("amdgpu asic init failed\n");
+ }
+
+- r = amdgpu_resume(adev);
++ r = amdgpu_device_ip_resume(adev);
+ if (r) {
+- DRM_ERROR("amdgpu_resume failed (%d).\n", r);
++ DRM_ERROR("amdgpu_device_ip_resume failed (%d).\n", r);
+ goto unlock;
+ }
+
+@@ -2450,7 +2451,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
+ DRM_ERROR("ib ring test failed (%d).\n", r);
+ }
+
+- r = amdgpu_late_init(adev);
++ r = amdgpu_device_ip_late_init(adev);
+ if (r)
+ goto unlock;
+
+@@ -2530,7 +2531,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
+ return r;
+ }
+
+-static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
++static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
+ {
+ int i;
+ bool asic_hang = false;
+@@ -2552,7 +2553,7 @@ static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
+ return asic_hang;
+ }
+
+-static int amdgpu_pre_soft_reset(struct amdgpu_device *adev)
++static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
+ {
+ int i, r = 0;
+
+@@ -2570,7 +2571,7 @@ static int amdgpu_pre_soft_reset(struct amdgpu_device *adev)
+ return 0;
+ }
+
+-static bool amdgpu_need_full_reset(struct amdgpu_device *adev)
++static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
+ {
+ int i;
+
+@@ -2591,7 +2592,7 @@ static bool amdgpu_need_full_reset(struct amdgpu_device *adev)
+ return false;
+ }
+
+-static int amdgpu_soft_reset(struct amdgpu_device *adev)
++static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
+ {
+ int i, r = 0;
+
+@@ -2609,7 +2610,7 @@ static int amdgpu_soft_reset(struct amdgpu_device *adev)
+ return 0;
+ }
+
+-static int amdgpu_post_soft_reset(struct amdgpu_device *adev)
++static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
+ {
+ int i, r = 0;
+
+@@ -2634,10 +2635,10 @@ bool amdgpu_need_backup(struct amdgpu_device *adev)
+ return amdgpu_gpu_recovery;
+ }
+
+-static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev,
+- struct amdgpu_ring *ring,
+- struct amdgpu_bo *bo,
+- struct dma_fence **fence)
++static int amdgpu_device_recover_vram_from_shadow(struct amdgpu_device *adev,
++ struct amdgpu_ring *ring,
++ struct amdgpu_bo *bo,
++ struct dma_fence **fence)
+ {
+ uint32_t domain;
+ int r;
+@@ -2670,7 +2671,7 @@ static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev,
+ }
+
+ /*
+- * amdgpu_reset - reset ASIC/GPU for bare-metal or passthrough
++ * amdgpu_device_reset - reset ASIC/GPU for bare-metal or passthrough
+ *
+ * @adev: amdgpu device pointer
+ * @reset_flags: output param tells caller the reset result
+@@ -2678,18 +2679,19 @@ static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev,
+ * attempt to do soft-reset or full-reset and reinitialize Asic
+ * return 0 means successed otherwise failed
+ */
+-static int amdgpu_reset(struct amdgpu_device *adev, uint64_t* reset_flags)
++static int amdgpu_device_reset(struct amdgpu_device *adev,
++ uint64_t* reset_flags)
+ {
+ bool need_full_reset, vram_lost = 0;
+ int r;
+
+- need_full_reset = amdgpu_need_full_reset(adev);
++ need_full_reset = amdgpu_device_ip_need_full_reset(adev);
+
+ if (!need_full_reset) {
+- amdgpu_pre_soft_reset(adev);
+- r = amdgpu_soft_reset(adev);
+- amdgpu_post_soft_reset(adev);
+- if (r || amdgpu_check_soft_reset(adev)) {
++ amdgpu_device_ip_pre_soft_reset(adev);
++ r = amdgpu_device_ip_soft_reset(adev);
++ amdgpu_device_ip_post_soft_reset(adev);
++ if (r || amdgpu_device_ip_check_soft_reset(adev)) {
+ DRM_INFO("soft reset failed, will fallback to full reset!\n");
+ need_full_reset = true;
+ }
+@@ -2706,27 +2708,27 @@ static int amdgpu_reset(struct amdgpu_device *adev, uint64_t* reset_flags)
+
+ if (!r) {
+ dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
+- r = amdgpu_resume_phase1(adev);
++ r = amdgpu_device_ip_resume_phase1(adev);
+ if (r)
+ goto out;
+
+- vram_lost = amdgpu_check_vram_lost(adev);
++ vram_lost = amdgpu_device_check_vram_lost(adev);
+ if (vram_lost) {
+ DRM_ERROR("VRAM is lost!\n");
+ atomic_inc(&adev->vram_lost_counter);
+ }
+
+- r = amdgpu_gtt_mgr_recover(
+- &adev->mman.bdev.man[TTM_PL_TT]);
++ r = amdgpu_gtt_mgr_recover(
++ &adev->mman.bdev.man[TTM_PL_TT]);
+ if (r)
+ goto out;
+
+- r = amdgpu_resume_phase2(adev);
++ r = amdgpu_device_ip_resume_phase2(adev);
+ if (r)
+ goto out;
+
+ if (vram_lost)
+- amdgpu_fill_reset_magic(adev);
++ amdgpu_device_fill_reset_magic(adev);
+ }
+ }
+
+@@ -2754,7 +2756,7 @@ static int amdgpu_reset(struct amdgpu_device *adev, uint64_t* reset_flags)
+ }
+
+ /*
+- * amdgpu_reset_sriov - reset ASIC for SR-IOV vf
++ * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
+ *
+ * @adev: amdgpu device pointer
+ * @reset_flags: output param tells caller the reset result
+@@ -2762,7 +2764,9 @@ static int amdgpu_reset(struct amdgpu_device *adev, uint64_t* reset_flags)
+ * do VF FLR and reinitialize Asic
+ * return 0 means successed otherwise failed
+ */
+-static int amdgpu_reset_sriov(struct amdgpu_device *adev, uint64_t *reset_flags, bool from_hypervisor)
++static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
++ uint64_t *reset_flags,
++ bool from_hypervisor)
+ {
+ int r;
+
+@@ -2774,15 +2778,15 @@ static int amdgpu_reset_sriov(struct amdgpu_device *adev, uint64_t *reset_flags,
+ return r;
+
+ /* Resume IP prior to SMC */
+- r = amdgpu_sriov_reinit_early(adev);
++ r = amdgpu_device_ip_reinit_early_sriov(adev);
+ if (r)
+ goto error;
+
+ /* we need recover gart prior to run SMC/CP/SDMA resume */
+- amdgpu_gtt_mgr_recover(&adev->mman.bdev.man[TTM_PL_TT]);
++ amdgpu_gtt_mgr_recover(&adev->mman.bdev.man[TTM_PL_TT]);
+
+ /* now we are okay to resume SMC/CP/SDMA */
+- r = amdgpu_sriov_reinit_late(adev);
++ r = amdgpu_device_ip_reinit_late_sriov(adev);
+ if (r)
+ goto error;
+
+@@ -2796,10 +2800,10 @@ static int amdgpu_reset_sriov(struct amdgpu_device *adev, uint64_t *reset_flags,
+ amdgpu_virt_release_full_gpu(adev, true);
+
+ if (reset_flags) {
+- if (adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
+- (*reset_flags) |= AMDGPU_RESET_INFO_VRAM_LOST;
+- atomic_inc(&adev->vram_lost_counter);
+- }
++ if (adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
++ (*reset_flags) |= AMDGPU_RESET_INFO_VRAM_LOST;
++ atomic_inc(&adev->vram_lost_counter);
++ }
+
+ /* VF FLR or hotlink reset is always full-reset */
+ (*reset_flags) |= AMDGPU_RESET_INFO_FULLRESET;
+@@ -2824,7 +2828,7 @@ int amdgpu_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job *job, bool
+ uint64_t reset_flags = 0;
+ int i, r, resched;
+
+- if (!amdgpu_check_soft_reset(adev)) {
++ if (!amdgpu_device_ip_check_soft_reset(adev)) {
+ DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
+ return 0;
+ }
+@@ -2866,9 +2870,9 @@ int amdgpu_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job *job, bool
+ }
+
+ if (amdgpu_sriov_vf(adev))
+- r = amdgpu_reset_sriov(adev, &reset_flags, job ? false : true);
++ r = amdgpu_device_reset_sriov(adev, &reset_flags, job ? false : true);
+ else
+- r = amdgpu_reset(adev, &reset_flags);
++ r = amdgpu_device_reset(adev, &reset_flags);
+
+ if (!r) {
+ if (((reset_flags & AMDGPU_RESET_INFO_FULLRESET) && !(adev->flags & AMD_IS_APU)) ||
+@@ -2881,7 +2885,7 @@ int amdgpu_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job *job, bool
+ mutex_lock(&adev->shadow_list_lock);
+ list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
+ next = NULL;
+- amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
++ amdgpu_device_recover_vram_from_shadow(adev, ring, bo, &next);
+ if (fence) {
+ r = dma_fence_wait(fence, false);
+ if (r) {
+--
+2.7.4
+