aboutsummaryrefslogtreecommitdiffstats
path: root/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/3656-drm-amdgpu-switch-to-new-amdgpu_nbio-structure.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/3656-drm-amdgpu-switch-to-new-amdgpu_nbio-structure.patch')
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/3656-drm-amdgpu-switch-to-new-amdgpu_nbio-structure.patch955
1 files changed, 955 insertions, 0 deletions
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/3656-drm-amdgpu-switch-to-new-amdgpu_nbio-structure.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/3656-drm-amdgpu-switch-to-new-amdgpu_nbio-structure.patch
new file mode 100644
index 00000000..f4c83659
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/3656-drm-amdgpu-switch-to-new-amdgpu_nbio-structure.patch
@@ -0,0 +1,955 @@
+From dea9d05135ec1a2e0e0f4a4ed29a4c3a0a1382a7 Mon Sep 17 00:00:00 2001
+From: Hawking Zhang <Hawking.Zhang@amd.com>
+Date: Fri, 23 Aug 2019 19:39:18 +0800
+Subject: [PATCH 3656/4256] drm/amdgpu: switch to new amdgpu_nbio structure
+
+no functional change, just switch to new structures
+
+Signed-off-by: Hawking Zhang <Hawking.Zhang@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu.h | 63 ++--------------
+ drivers/gpu/drm/amd/amdgpu/df_v3_6.c | 16 ++---
+ drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 14 ++--
+ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 6 +-
+ drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c | 6 +-
+ drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 4 +-
+ drivers/gpu/drm/amd/amdgpu/navi10_ih.c | 4 +-
+ drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c | 1 -
+ drivers/gpu/drm/amd/amdgpu/nbio_v2_3.h | 1 +
+ drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c | 3 +-
+ drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h | 1 +
+ drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c | 1 -
+ drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h | 1 +
+ drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c | 3 +-
+ drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h | 1 +
+ drivers/gpu/drm/amd/amdgpu/nv.c | 34 ++++-----
+ drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 6 +-
+ drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c | 8 +--
+ drivers/gpu/drm/amd/amdgpu/soc15.c | 71 ++++++++++---------
+ drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c | 2 +-
+ drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c | 2 +-
+ drivers/gpu/drm/amd/amdgpu/vega10_ih.c | 2 +-
+ drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 2 +-
+ .../drm/amd/powerplay/smumgr/smu10_smumgr.c | 2 +-
+ .../drm/amd/powerplay/smumgr/vega10_smumgr.c | 2 +-
+ .../drm/amd/powerplay/smumgr/vega12_smumgr.c | 2 +-
+ .../drm/amd/powerplay/smumgr/vega20_smumgr.c | 4 +-
+ 27 files changed, 108 insertions(+), 154 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index 02bd4e99906f..178039b28651 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -74,6 +74,7 @@
+ #include "amdgpu_gmc.h"
+ #include "amdgpu_gfx.h"
+ #include "amdgpu_sdma.h"
++#include "amdgpu_nbio.h"
+ #include "amdgpu_dm.h"
+ #include "amdgpu_virt.h"
+ #include "amdgpu_csa.h"
+@@ -660,69 +661,11 @@ typedef void (*amdgpu_wreg64_t)(struct amdgpu_device*, uint32_t, uint64_t);
+ typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
+ typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t);
+
+-
+-/*
+- * amdgpu nbio functions
+- *
+- */
+-struct nbio_hdp_flush_reg {
+- u32 ref_and_mask_cp0;
+- u32 ref_and_mask_cp1;
+- u32 ref_and_mask_cp2;
+- u32 ref_and_mask_cp3;
+- u32 ref_and_mask_cp4;
+- u32 ref_and_mask_cp5;
+- u32 ref_and_mask_cp6;
+- u32 ref_and_mask_cp7;
+- u32 ref_and_mask_cp8;
+- u32 ref_and_mask_cp9;
+- u32 ref_and_mask_sdma0;
+- u32 ref_and_mask_sdma1;
+- u32 ref_and_mask_sdma2;
+- u32 ref_and_mask_sdma3;
+- u32 ref_and_mask_sdma4;
+- u32 ref_and_mask_sdma5;
+- u32 ref_and_mask_sdma6;
+- u32 ref_and_mask_sdma7;
+-};
+-
+ struct amdgpu_mmio_remap {
+ u32 reg_offset;
+ resource_size_t bus_addr;
+ };
+
+-struct amdgpu_nbio_funcs {
+- const struct nbio_hdp_flush_reg *hdp_flush_reg;
+- u32 (*get_hdp_flush_req_offset)(struct amdgpu_device *adev);
+- u32 (*get_hdp_flush_done_offset)(struct amdgpu_device *adev);
+- u32 (*get_pcie_index_offset)(struct amdgpu_device *adev);
+- u32 (*get_pcie_data_offset)(struct amdgpu_device *adev);
+- u32 (*get_rev_id)(struct amdgpu_device *adev);
+- void (*mc_access_enable)(struct amdgpu_device *adev, bool enable);
+- void (*hdp_flush)(struct amdgpu_device *adev, struct amdgpu_ring *ring);
+- u32 (*get_memsize)(struct amdgpu_device *adev);
+- void (*sdma_doorbell_range)(struct amdgpu_device *adev, int instance,
+- bool use_doorbell, int doorbell_index, int doorbell_size);
+- void (*vcn_doorbell_range)(struct amdgpu_device *adev, bool use_doorbell,
+- int doorbell_index, int instance);
+- void (*enable_doorbell_aperture)(struct amdgpu_device *adev,
+- bool enable);
+- void (*enable_doorbell_selfring_aperture)(struct amdgpu_device *adev,
+- bool enable);
+- void (*ih_doorbell_range)(struct amdgpu_device *adev,
+- bool use_doorbell, int doorbell_index);
+- void (*update_medium_grain_clock_gating)(struct amdgpu_device *adev,
+- bool enable);
+- void (*update_medium_grain_light_sleep)(struct amdgpu_device *adev,
+- bool enable);
+- void (*get_clockgating_state)(struct amdgpu_device *adev,
+- u32 *flags);
+- void (*ih_control)(struct amdgpu_device *adev);
+- void (*init_registers)(struct amdgpu_device *adev);
+- void (*detect_hw_virt)(struct amdgpu_device *adev);
+- void (*remap_hdp_registers)(struct amdgpu_device *adev);
+-};
+-
+ struct amdgpu_df_funcs {
+ void (*sw_init)(struct amdgpu_device *adev);
+ void (*enable_broadcast_mode)(struct amdgpu_device *adev,
+@@ -962,6 +905,9 @@ struct amdgpu_device {
+ u32 cg_flags;
+ u32 pg_flags;
+
++ /* nbio */
++ struct amdgpu_nbio nbio;
++
+ /* gfx */
+ struct amdgpu_gfx gfx;
+
+@@ -1015,7 +961,6 @@ struct amdgpu_device {
+ /* soc15 register offset based on ip, instance and segment */
+ uint32_t *reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE];
+
+- const struct amdgpu_nbio_funcs *nbio_funcs;
+ const struct amdgpu_df_funcs *df_funcs;
+ const struct amdgpu_mmhub_funcs *mmhub_funcs;
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
+index 47ba0b31a8a4..7f7896a69d53 100644
+--- a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
++++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
+@@ -99,8 +99,8 @@ static uint64_t df_v3_6_get_fica(struct amdgpu_device *adev,
+ unsigned long flags, address, data;
+ uint32_t ficadl_val, ficadh_val;
+
+- address = adev->nbio_funcs->get_pcie_index_offset(adev);
+- data = adev->nbio_funcs->get_pcie_data_offset(adev);
++ address = adev->nbio.funcs->get_pcie_index_offset(adev);
++ data = adev->nbio.funcs->get_pcie_data_offset(adev);
+
+ spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+ WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessAddress3);
+@@ -122,8 +122,8 @@ static void df_v3_6_set_fica(struct amdgpu_device *adev, uint32_t ficaa_val,
+ {
+ unsigned long flags, address, data;
+
+- address = adev->nbio_funcs->get_pcie_index_offset(adev);
+- data = adev->nbio_funcs->get_pcie_data_offset(adev);
++ address = adev->nbio.funcs->get_pcie_index_offset(adev);
++ data = adev->nbio.funcs->get_pcie_data_offset(adev);
+
+ spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+ WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessAddress3);
+@@ -150,8 +150,8 @@ static void df_v3_6_perfmon_rreg(struct amdgpu_device *adev,
+ {
+ unsigned long flags, address, data;
+
+- address = adev->nbio_funcs->get_pcie_index_offset(adev);
+- data = adev->nbio_funcs->get_pcie_data_offset(adev);
++ address = adev->nbio.funcs->get_pcie_index_offset(adev);
++ data = adev->nbio.funcs->get_pcie_data_offset(adev);
+
+ spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+ WREG32(address, lo_addr);
+@@ -172,8 +172,8 @@ static void df_v3_6_perfmon_wreg(struct amdgpu_device *adev, uint32_t lo_addr,
+ {
+ unsigned long flags, address, data;
+
+- address = adev->nbio_funcs->get_pcie_index_offset(adev);
+- data = adev->nbio_funcs->get_pcie_data_offset(adev);
++ address = adev->nbio.funcs->get_pcie_index_offset(adev);
++ data = adev->nbio.funcs->get_pcie_data_offset(adev);
+
+ spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+ WREG32(address, lo_addr);
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+index c3b48ac398a5..082a0b3298a9 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+@@ -2409,7 +2409,7 @@ static int gfx_v10_0_cp_gfx_load_pfp_microcode(struct amdgpu_device *adev)
+ }
+
+ if (amdgpu_emu_mode == 1)
+- adev->nbio_funcs->hdp_flush(adev, NULL);
++ adev->nbio.funcs->hdp_flush(adev, NULL);
+
+ tmp = RREG32_SOC15(GC, 0, mmCP_PFP_IC_BASE_CNTL);
+ tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0);
+@@ -2479,7 +2479,7 @@ static int gfx_v10_0_cp_gfx_load_ce_microcode(struct amdgpu_device *adev)
+ }
+
+ if (amdgpu_emu_mode == 1)
+- adev->nbio_funcs->hdp_flush(adev, NULL);
++ adev->nbio.funcs->hdp_flush(adev, NULL);
+
+ tmp = RREG32_SOC15(GC, 0, mmCP_CE_IC_BASE_CNTL);
+ tmp = REG_SET_FIELD(tmp, CP_CE_IC_BASE_CNTL, VMID, 0);
+@@ -2548,7 +2548,7 @@ static int gfx_v10_0_cp_gfx_load_me_microcode(struct amdgpu_device *adev)
+ }
+
+ if (amdgpu_emu_mode == 1)
+- adev->nbio_funcs->hdp_flush(adev, NULL);
++ adev->nbio.funcs->hdp_flush(adev, NULL);
+
+ tmp = RREG32_SOC15(GC, 0, mmCP_ME_IC_BASE_CNTL);
+ tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0);
+@@ -2869,7 +2869,7 @@ static int gfx_v10_0_cp_compute_load_microcode(struct amdgpu_device *adev)
+ }
+
+ if (amdgpu_emu_mode == 1)
+- adev->nbio_funcs->hdp_flush(adev, NULL);
++ adev->nbio.funcs->hdp_flush(adev, NULL);
+
+ tmp = RREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL);
+ tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
+@@ -4323,7 +4323,7 @@ static void gfx_v10_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
+ {
+ struct amdgpu_device *adev = ring->adev;
+ u32 ref_and_mask, reg_mem_engine;
+- const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg;
++ const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
+
+ if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
+ switch (ring->me) {
+@@ -4343,8 +4343,8 @@ static void gfx_v10_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
+ }
+
+ gfx_v10_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
+- adev->nbio_funcs->get_hdp_flush_req_offset(adev),
+- adev->nbio_funcs->get_hdp_flush_done_offset(adev),
++ adev->nbio.funcs->get_hdp_flush_req_offset(adev),
++ adev->nbio.funcs->get_hdp_flush_done_offset(adev),
+ ref_and_mask, ref_and_mask, 0x20);
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index fd7947ef4c24..78fea99c0d0a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -4970,7 +4970,7 @@ static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
+ {
+ struct amdgpu_device *adev = ring->adev;
+ u32 ref_and_mask, reg_mem_engine;
+- const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg;
++ const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
+
+ if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
+ switch (ring->me) {
+@@ -4990,8 +4990,8 @@ static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
+ }
+
+ gfx_v9_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
+- adev->nbio_funcs->get_hdp_flush_req_offset(adev),
+- adev->nbio_funcs->get_hdp_flush_done_offset(adev),
++ adev->nbio.funcs->get_hdp_flush_req_offset(adev),
++ adev->nbio.funcs->get_hdp_flush_done_offset(adev),
+ ref_and_mask, ref_and_mask, 0x20);
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+index 56f76a1f32ee..46efd4d17a34 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+@@ -277,7 +277,7 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
+ int r;
+
+ /* flush hdp cache */
+- adev->nbio_funcs->hdp_flush(adev, NULL);
++ adev->nbio.funcs->hdp_flush(adev, NULL);
+
+ mutex_lock(&adev->mman.gtt_window_lock);
+
+@@ -557,7 +557,7 @@ static int gmc_v10_0_mc_init(struct amdgpu_device *adev)
+
+ /* size in MB on si */
+ adev->gmc.mc_vram_size =
+- adev->nbio_funcs->get_memsize(adev) * 1024ULL * 1024ULL;
++ adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
+ adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
+ adev->gmc.visible_vram_size = adev->gmc.aper_size;
+
+@@ -811,7 +811,7 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
+ WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
+
+ /* Flush HDP after it is initialized */
+- adev->nbio_funcs->hdp_flush(adev, NULL);
++ adev->nbio.funcs->hdp_flush(adev, NULL);
+
+ value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
+ false : true;
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+index a22fbb8fe1a5..b97ea92bda51 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+@@ -992,7 +992,7 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
+
+ /* size in MB on si */
+ adev->gmc.mc_vram_size =
+- adev->nbio_funcs->get_memsize(adev) * 1024ULL * 1024ULL;
++ adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
+ adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
+
+ if (!(adev->flags & AMD_IS_APU)) {
+@@ -1370,7 +1370,7 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
+ WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE_HI, (adev->gmc.vram_start >> 40));
+
+ /* After HDP is initialized, flush HDP.*/
+- adev->nbio_funcs->hdp_flush(adev, NULL);
++ adev->nbio.funcs->hdp_flush(adev, NULL);
+
+ if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
+ value = false;
+diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
+index e963746be11c..7dc94e730efb 100644
+--- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
++++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
+@@ -116,7 +116,7 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev)
+ /* disable irqs */
+ navi10_ih_disable_interrupts(adev);
+
+- adev->nbio_funcs->ih_control(adev);
++ adev->nbio.funcs->ih_control(adev);
+
+ /* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE, ih->gpu_addr >> 8);
+@@ -161,7 +161,7 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev)
+ }
+ WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR, ih_doorbell_rtpr);
+
+- adev->nbio_funcs->ih_doorbell_range(adev, ih->use_doorbell,
++ adev->nbio.funcs->ih_doorbell_range(adev, ih->use_doorbell,
+ ih->doorbell_index);
+
+ tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL);
+diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
+index f5611c479e28..a5fa741e4aff 100644
+--- a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
++++ b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
+@@ -311,7 +311,6 @@ static void nbio_v2_3_init_registers(struct amdgpu_device *adev)
+ }
+
+ const struct amdgpu_nbio_funcs nbio_v2_3_funcs = {
+- .hdp_flush_reg = &nbio_v2_3_hdp_flush_reg,
+ .get_hdp_flush_req_offset = nbio_v2_3_get_hdp_flush_req_offset,
+ .get_hdp_flush_done_offset = nbio_v2_3_get_hdp_flush_done_offset,
+ .get_pcie_index_offset = nbio_v2_3_get_pcie_index_offset,
+diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.h b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.h
+index 5ae52085f6b7..a43b60acf7f6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.h
++++ b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.h
+@@ -26,6 +26,7 @@
+
+ #include "soc15_common.h"
+
++extern const struct nbio_hdp_flush_reg nbio_v2_3_hdp_flush_reg;
+ extern const struct amdgpu_nbio_funcs nbio_v2_3_funcs;
+
+ #endif
+diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
+index 6590143c3f75..635d9e1fc0a3 100644
+--- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
++++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
+@@ -226,7 +226,7 @@ static u32 nbio_v6_1_get_pcie_data_offset(struct amdgpu_device *adev)
+ return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA2);
+ }
+
+-static const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg = {
++const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg = {
+ .ref_and_mask_cp0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP0_MASK,
+ .ref_and_mask_cp1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP1_MASK,
+ .ref_and_mask_cp2 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP2_MASK,
+@@ -277,7 +277,6 @@ static void nbio_v6_1_init_registers(struct amdgpu_device *adev)
+ }
+
+ const struct amdgpu_nbio_funcs nbio_v6_1_funcs = {
+- .hdp_flush_reg = &nbio_v6_1_hdp_flush_reg,
+ .get_hdp_flush_req_offset = nbio_v6_1_get_hdp_flush_req_offset,
+ .get_hdp_flush_done_offset = nbio_v6_1_get_hdp_flush_done_offset,
+ .get_pcie_index_offset = nbio_v6_1_get_pcie_index_offset,
+diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h
+index 0743a6f016f3..6dc743b73218 100644
+--- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h
++++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h
+@@ -26,6 +26,7 @@
+
+ #include "soc15_common.h"
+
++extern const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg;
+ extern const struct amdgpu_nbio_funcs nbio_v6_1_funcs;
+
+ #endif
+diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
+index 6f3b55d0aa3c..c8eadaa17e95 100644
+--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
+@@ -308,7 +308,6 @@ static void nbio_v7_0_init_registers(struct amdgpu_device *adev)
+ }
+
+ const struct amdgpu_nbio_funcs nbio_v7_0_funcs = {
+- .hdp_flush_reg = &nbio_v7_0_hdp_flush_reg,
+ .get_hdp_flush_req_offset = nbio_v7_0_get_hdp_flush_req_offset,
+ .get_hdp_flush_done_offset = nbio_v7_0_get_hdp_flush_done_offset,
+ .get_pcie_index_offset = nbio_v7_0_get_pcie_index_offset,
+diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h
+index 508d549c5029..e7aefb252550 100644
+--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h
++++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h
+@@ -26,6 +26,7 @@
+
+ #include "soc15_common.h"
+
++extern const struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg;
+ extern const struct amdgpu_nbio_funcs nbio_v7_0_funcs;
+
+ #endif
+diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+index 910fffced43b..c416ab8ab1c3 100644
+--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
++++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+@@ -266,7 +266,7 @@ static u32 nbio_v7_4_get_pcie_data_offset(struct amdgpu_device *adev)
+ return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA2);
+ }
+
+-static const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg = {
++const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg = {
+ .ref_and_mask_cp0 = GPU_HDP_FLUSH_DONE__CP0_MASK,
+ .ref_and_mask_cp1 = GPU_HDP_FLUSH_DONE__CP1_MASK,
+ .ref_and_mask_cp2 = GPU_HDP_FLUSH_DONE__CP2_MASK,
+@@ -316,7 +316,6 @@ static void nbio_v7_4_init_registers(struct amdgpu_device *adev)
+ }
+
+ const struct amdgpu_nbio_funcs nbio_v7_4_funcs = {
+- .hdp_flush_reg = &nbio_v7_4_hdp_flush_reg,
+ .get_hdp_flush_req_offset = nbio_v7_4_get_hdp_flush_req_offset,
+ .get_hdp_flush_done_offset = nbio_v7_4_get_hdp_flush_done_offset,
+ .get_pcie_index_offset = nbio_v7_4_get_pcie_index_offset,
+diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h
+index c442865bac4f..b1ac82872752 100644
+--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h
++++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h
+@@ -26,6 +26,7 @@
+
+ #include "soc15_common.h"
+
++extern const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg;
+ extern const struct amdgpu_nbio_funcs nbio_v7_4_funcs;
+
+ #endif
+diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
+index 323af1ecfe9c..585fc7dce39d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/nv.c
++++ b/drivers/gpu/drm/amd/amdgpu/nv.c
+@@ -45,6 +45,7 @@
+ #include "gmc_v10_0.h"
+ #include "gfxhub_v2_0.h"
+ #include "mmhub_v2_0.h"
++#include "nbio_v2_3.h"
+ #include "nv.h"
+ #include "navi10_ih.h"
+ #include "gfx_v10_0.h"
+@@ -62,8 +63,8 @@ static u32 nv_pcie_rreg(struct amdgpu_device *adev, u32 reg)
+ {
+ unsigned long flags, address, data;
+ u32 r;
+- address = adev->nbio_funcs->get_pcie_index_offset(adev);
+- data = adev->nbio_funcs->get_pcie_data_offset(adev);
++ address = adev->nbio.funcs->get_pcie_index_offset(adev);
++ data = adev->nbio.funcs->get_pcie_data_offset(adev);
+
+ spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+ WREG32(address, reg);
+@@ -77,8 +78,8 @@ static void nv_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
+ {
+ unsigned long flags, address, data;
+
+- address = adev->nbio_funcs->get_pcie_index_offset(adev);
+- data = adev->nbio_funcs->get_pcie_data_offset(adev);
++ address = adev->nbio.funcs->get_pcie_index_offset(adev);
++ data = adev->nbio.funcs->get_pcie_data_offset(adev);
+
+ spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+ WREG32(address, reg);
+@@ -118,7 +119,7 @@ static void nv_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
+
+ static u32 nv_get_config_memsize(struct amdgpu_device *adev)
+ {
+- return adev->nbio_funcs->get_memsize(adev);
++ return adev->nbio.funcs->get_memsize(adev);
+ }
+
+ static u32 nv_get_xclk(struct amdgpu_device *adev)
+@@ -278,7 +279,7 @@ static int nv_asic_mode1_reset(struct amdgpu_device *adev)
+
+ /* wait for asic to come out of reset */
+ for (i = 0; i < adev->usec_timeout; i++) {
+- u32 memsize = adev->nbio_funcs->get_memsize(adev);
++ u32 memsize = adev->nbio.funcs->get_memsize(adev);
+
+ if (memsize != 0xffffffff)
+ break;
+@@ -365,8 +366,8 @@ static void nv_program_aspm(struct amdgpu_device *adev)
+ static void nv_enable_doorbell_aperture(struct amdgpu_device *adev,
+ bool enable)
+ {
+- adev->nbio_funcs->enable_doorbell_aperture(adev, enable);
+- adev->nbio_funcs->enable_doorbell_selfring_aperture(adev, enable);
++ adev->nbio.funcs->enable_doorbell_aperture(adev, enable);
++ adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable);
+ }
+
+ static const struct amdgpu_ip_block_version nv_common_ip_block =
+@@ -420,9 +421,10 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
+ if (r)
+ return r;
+
+- adev->nbio_funcs = &nbio_v2_3_funcs;
++ adev->nbio.funcs = &nbio_v2_3_funcs;
++ adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
+
+- adev->nbio_funcs->detect_hw_virt(adev);
++ adev->nbio.funcs->detect_hw_virt(adev);
+
+ switch (adev->asic_type) {
+ case CHIP_NAVI10:
+@@ -477,12 +479,12 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
+
+ static uint32_t nv_get_rev_id(struct amdgpu_device *adev)
+ {
+- return adev->nbio_funcs->get_rev_id(adev);
++ return adev->nbio.funcs->get_rev_id(adev);
+ }
+
+ static void nv_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
+ {
+- adev->nbio_funcs->hdp_flush(adev, ring);
++ adev->nbio.funcs->hdp_flush(adev, ring);
+ }
+
+ static void nv_invalidate_hdp(struct amdgpu_device *adev,
+@@ -689,7 +691,7 @@ static int nv_common_hw_init(void *handle)
+ /* enable aspm */
+ nv_program_aspm(adev);
+ /* setup nbio registers */
+- adev->nbio_funcs->init_registers(adev);
++ adev->nbio.funcs->init_registers(adev);
+ /* enable the doorbell aperture */
+ nv_enable_doorbell_aperture(adev, true);
+
+@@ -851,9 +853,9 @@ static int nv_common_set_clockgating_state(void *handle,
+ case CHIP_NAVI10:
+ case CHIP_NAVI14:
+ case CHIP_NAVI12:
+- adev->nbio_funcs->update_medium_grain_clock_gating(adev,
++ adev->nbio.funcs->update_medium_grain_clock_gating(adev,
+ state == AMD_CG_STATE_GATE ? true : false);
+- adev->nbio_funcs->update_medium_grain_light_sleep(adev,
++ adev->nbio.funcs->update_medium_grain_light_sleep(adev,
+ state == AMD_CG_STATE_GATE ? true : false);
+ nv_update_hdp_mem_power_gating(adev,
+ state == AMD_CG_STATE_GATE ? true : false);
+@@ -881,7 +883,7 @@ static void nv_common_get_clockgating_state(void *handle, u32 *flags)
+ if (amdgpu_sriov_vf(adev))
+ *flags = 0;
+
+- adev->nbio_funcs->get_clockgating_state(adev, flags);
++ adev->nbio.funcs->get_clockgating_state(adev, flags);
+
+ /* AMD_CG_SUPPORT_HDP_MGCG */
+ tmp = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
+diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+index b41e21e67791..5e5b6a3cda2c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+@@ -743,13 +743,13 @@ static void sdma_v4_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
+ {
+ struct amdgpu_device *adev = ring->adev;
+ u32 ref_and_mask = 0;
+- const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg;
++ const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
+
+ ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me;
+
+ sdma_v4_0_wait_reg_mem(ring, 0, 1,
+- adev->nbio_funcs->get_hdp_flush_done_offset(adev),
+- adev->nbio_funcs->get_hdp_flush_req_offset(adev),
++ adev->nbio.funcs->get_hdp_flush_done_offset(adev),
++ adev->nbio.funcs->get_hdp_flush_req_offset(adev),
+ ref_and_mask, ref_and_mask, 10);
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+index 89174e778d2f..ad5c3566337c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+@@ -403,7 +403,7 @@ static void sdma_v5_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
+ {
+ struct amdgpu_device *adev = ring->adev;
+ u32 ref_and_mask = 0;
+- const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg;
++ const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
+
+ if (ring->me == 0)
+ ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0;
+@@ -413,8 +413,8 @@ static void sdma_v5_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
+ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
+ SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
+ SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
+- amdgpu_ring_write(ring, (adev->nbio_funcs->get_hdp_flush_done_offset(adev)) << 2);
+- amdgpu_ring_write(ring, (adev->nbio_funcs->get_hdp_flush_req_offset(adev)) << 2);
++ amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_done_offset(adev)) << 2);
++ amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_req_offset(adev)) << 2);
+ amdgpu_ring_write(ring, ref_and_mask); /* reference */
+ amdgpu_ring_write(ring, ref_and_mask); /* mask */
+ amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
+@@ -680,7 +680,7 @@ static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev)
+ WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL), doorbell);
+ WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET), doorbell_offset);
+
+- adev->nbio_funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
++ adev->nbio.funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
+ ring->doorbell_index, 20);
+
+ if (amdgpu_sriov_vf(adev))
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
+index 98cdf3eccaec..ae25b0928f3f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
++++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
+@@ -57,6 +57,9 @@
+ #include "mmhub_v1_0.h"
+ #include "df_v1_7.h"
+ #include "df_v3_6.h"
++#include "nbio_v6_1.h"
++#include "nbio_v7_0.h"
++#include "nbio_v7_4.h"
+ #include "vega10_ih.h"
+ #include "sdma_v4_0.h"
+ #include "uvd_v7_0.h"
+@@ -90,8 +93,8 @@ static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg)
+ {
+ unsigned long flags, address, data;
+ u32 r;
+- address = adev->nbio_funcs->get_pcie_index_offset(adev);
+- data = adev->nbio_funcs->get_pcie_data_offset(adev);
++ address = adev->nbio.funcs->get_pcie_index_offset(adev);
++ data = adev->nbio.funcs->get_pcie_data_offset(adev);
+
+ spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+ WREG32(address, reg);
+@@ -105,8 +108,8 @@ static void soc15_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
+ {
+ unsigned long flags, address, data;
+
+- address = adev->nbio_funcs->get_pcie_index_offset(adev);
+- data = adev->nbio_funcs->get_pcie_data_offset(adev);
++ address = adev->nbio.funcs->get_pcie_index_offset(adev);
++ data = adev->nbio.funcs->get_pcie_data_offset(adev);
+
+ spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+ WREG32(address, reg);
+@@ -120,8 +123,8 @@ static u64 soc15_pcie_rreg64(struct amdgpu_device *adev, u32 reg)
+ {
+ unsigned long flags, address, data;
+ u64 r;
+- address = adev->nbio_funcs->get_pcie_index_offset(adev);
+- data = adev->nbio_funcs->get_pcie_data_offset(adev);
++ address = adev->nbio.funcs->get_pcie_index_offset(adev);
++ data = adev->nbio.funcs->get_pcie_data_offset(adev);
+
+ spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+ /* read low 32 bit */
+@@ -141,8 +144,8 @@ static void soc15_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v)
+ {
+ unsigned long flags, address, data;
+
+- address = adev->nbio_funcs->get_pcie_index_offset(adev);
+- data = adev->nbio_funcs->get_pcie_data_offset(adev);
++ address = adev->nbio.funcs->get_pcie_index_offset(adev);
++ data = adev->nbio.funcs->get_pcie_data_offset(adev);
+
+ spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+ /* write low 32 bit */
+@@ -261,7 +264,7 @@ static void soc15_se_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
+
+ static u32 soc15_get_config_memsize(struct amdgpu_device *adev)
+ {
+- return adev->nbio_funcs->get_memsize(adev);
++ return adev->nbio.funcs->get_memsize(adev);
+ }
+
+ static u32 soc15_get_xclk(struct amdgpu_device *adev)
+@@ -460,7 +463,7 @@ static int soc15_asic_mode1_reset(struct amdgpu_device *adev)
+
+ /* wait for asic to come out of reset */
+ for (i = 0; i < adev->usec_timeout; i++) {
+- u32 memsize = adev->nbio_funcs->get_memsize(adev);
++ u32 memsize = adev->nbio.funcs->get_memsize(adev);
+
+ if (memsize != 0xffffffff)
+ break;
+@@ -623,8 +626,8 @@ static void soc15_program_aspm(struct amdgpu_device *adev)
+ static void soc15_enable_doorbell_aperture(struct amdgpu_device *adev,
+ bool enable)
+ {
+- adev->nbio_funcs->enable_doorbell_aperture(adev, enable);
+- adev->nbio_funcs->enable_doorbell_selfring_aperture(adev, enable);
++ adev->nbio.funcs->enable_doorbell_aperture(adev, enable);
++ adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable);
+ }
+
+ static const struct amdgpu_ip_block_version vega10_common_ip_block =
+@@ -638,7 +641,7 @@ static const struct amdgpu_ip_block_version vega10_common_ip_block =
+
+ static uint32_t soc15_get_rev_id(struct amdgpu_device *adev)
+ {
+- return adev->nbio_funcs->get_rev_id(adev);
++ return adev->nbio.funcs->get_rev_id(adev);
+ }
+
+ int soc15_set_ip_blocks(struct amdgpu_device *adev)
+@@ -664,13 +667,17 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
+ if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS)
+ adev->gmc.xgmi.supported = true;
+
+- if (adev->flags & AMD_IS_APU)
+- adev->nbio_funcs = &nbio_v7_0_funcs;
+- else if (adev->asic_type == CHIP_VEGA20 ||
+- adev->asic_type == CHIP_ARCTURUS)
+- adev->nbio_funcs = &nbio_v7_4_funcs;
+- else
+- adev->nbio_funcs = &nbio_v6_1_funcs;
++ if (adev->flags & AMD_IS_APU) {
++ adev->nbio.funcs = &nbio_v7_0_funcs;
++ adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg;
++ } else if (adev->asic_type == CHIP_VEGA20 ||
++ adev->asic_type == CHIP_ARCTURUS) {
++ adev->nbio.funcs = &nbio_v7_4_funcs;
++ adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg;
++ } else {
++ adev->nbio.funcs = &nbio_v6_1_funcs;
++ adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg;
++ }
+
+ if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS)
+ adev->df_funcs = &df_v3_6_funcs;
+@@ -678,7 +685,7 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
+ adev->df_funcs = &df_v1_7_funcs;
+
+ adev->rev_id = soc15_get_rev_id(adev);
+- adev->nbio_funcs->detect_hw_virt(adev);
++ adev->nbio.funcs->detect_hw_virt(adev);
+
+ if (amdgpu_sriov_vf(adev))
+ adev->virt.ops = &xgpu_ai_virt_ops;
+@@ -784,7 +791,7 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
+
+ static void soc15_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
+ {
+- adev->nbio_funcs->hdp_flush(adev, ring);
++ adev->nbio.funcs->hdp_flush(adev, ring);
+ }
+
+ static void soc15_invalidate_hdp(struct amdgpu_device *adev,
+@@ -1240,12 +1247,12 @@ static void soc15_doorbell_range_init(struct amdgpu_device *adev)
+ if (!amdgpu_sriov_vf(adev)) {
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ ring = &adev->sdma.instance[i].ring;
+- adev->nbio_funcs->sdma_doorbell_range(adev, i,
++ adev->nbio.funcs->sdma_doorbell_range(adev, i,
+ ring->use_doorbell, ring->doorbell_index,
+ adev->doorbell_index.sdma_doorbell_range);
+ }
+
+- adev->nbio_funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell,
++ adev->nbio.funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell,
+ adev->irq.ih.doorbell_index);
+ }
+ }
+@@ -1259,13 +1266,13 @@ static int soc15_common_hw_init(void *handle)
+ /* enable aspm */
+ soc15_program_aspm(adev);
+ /* setup nbio registers */
+- adev->nbio_funcs->init_registers(adev);
++ adev->nbio.funcs->init_registers(adev);
+ /* remap HDP registers to a hole in mmio space,
+ * for the purpose of expose those registers
+ * to process space
+ */
+- if (adev->nbio_funcs->remap_hdp_registers)
+- adev->nbio_funcs->remap_hdp_registers(adev);
++ if (adev->nbio.funcs->remap_hdp_registers)
++ adev->nbio.funcs->remap_hdp_registers(adev);
+
+ /* enable the doorbell aperture */
+ soc15_enable_doorbell_aperture(adev, true);
+@@ -1428,9 +1435,9 @@ static int soc15_common_set_clockgating_state(void *handle,
+ case CHIP_VEGA10:
+ case CHIP_VEGA12:
+ case CHIP_VEGA20:
+- adev->nbio_funcs->update_medium_grain_clock_gating(adev,
++ adev->nbio.funcs->update_medium_grain_clock_gating(adev,
+ state == AMD_CG_STATE_GATE ? true : false);
+- adev->nbio_funcs->update_medium_grain_light_sleep(adev,
++ adev->nbio.funcs->update_medium_grain_light_sleep(adev,
+ state == AMD_CG_STATE_GATE ? true : false);
+ soc15_update_hdp_light_sleep(adev,
+ state == AMD_CG_STATE_GATE ? true : false);
+@@ -1445,9 +1452,9 @@ static int soc15_common_set_clockgating_state(void *handle,
+ break;
+ case CHIP_RAVEN:
+ case CHIP_RENOIR:
+- adev->nbio_funcs->update_medium_grain_clock_gating(adev,
++ adev->nbio.funcs->update_medium_grain_clock_gating(adev,
+ state == AMD_CG_STATE_GATE ? true : false);
+- adev->nbio_funcs->update_medium_grain_light_sleep(adev,
++ adev->nbio.funcs->update_medium_grain_light_sleep(adev,
+ state == AMD_CG_STATE_GATE ? true : false);
+ soc15_update_hdp_light_sleep(adev,
+ state == AMD_CG_STATE_GATE ? true : false);
+@@ -1476,7 +1483,7 @@ static void soc15_common_get_clockgating_state(void *handle, u32 *flags)
+ if (amdgpu_sriov_vf(adev))
+ *flags = 0;
+
+- adev->nbio_funcs->get_clockgating_state(adev, flags);
++ adev->nbio.funcs->get_clockgating_state(adev, flags);
+
+ /* AMD_CG_SUPPORT_HDP_LS */
+ data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+index 7528b1b562e1..5a590064bfff 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+@@ -244,7 +244,7 @@ static int vcn_v2_0_hw_init(void *handle)
+ struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
+ int i, r;
+
+- adev->nbio_funcs->vcn_doorbell_range(adev, ring->use_doorbell,
++ adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
+ ring->doorbell_index, 0);
+
+ ring->sched.ready = true;
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+index 0c84dbc6a62d..247cf7e71e1b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+@@ -255,7 +255,7 @@ static int vcn_v2_5_hw_init(void *handle)
+ continue;
+ ring = &adev->vcn.inst[j].ring_dec;
+
+- adev->nbio_funcs->vcn_doorbell_range(adev, ring->use_doorbell,
++ adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
+ ring->doorbell_index, j);
+
+ r = amdgpu_ring_test_ring(ring);
+diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+index f19268aea38d..d92ff25f1ed3 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
++++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+@@ -224,7 +224,7 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
+ /* disable irqs */
+ vega10_ih_disable_interrupts(adev);
+
+- adev->nbio_funcs->ih_control(adev);
++ adev->nbio.funcs->ih_control(adev);
+
+ ih = &adev->irq.ih;
+ /* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
+diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+index a65c9297e7bd..797441894c2f 100644
+--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
++++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+@@ -460,7 +460,7 @@ int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int
+ return ret;
+
+ /* flush hdp cache */
+- adev->nbio_funcs->hdp_flush(adev, NULL);
++ adev->nbio.funcs->hdp_flush(adev, NULL);
+
+ if (!drv2smu)
+ memcpy(table_data, table->cpu_addr, table->size);
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
+index 59b11ac5b53b..c2131e930051 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
+@@ -135,7 +135,7 @@ static int smu10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
+ priv->smu_tables.entry[table_id].table_id);
+
+ /* flush hdp cache */
+- adev->nbio_funcs->hdp_flush(adev, NULL);
++ adev->nbio.funcs->hdp_flush(adev, NULL);
+
+ memcpy(table, (uint8_t *)priv->smu_tables.entry[table_id].table,
+ priv->smu_tables.entry[table_id].size);
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
+index 8e07fc1fb9ce..eb024fe606f2 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
+@@ -56,7 +56,7 @@ static int vega10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
+ priv->smu_tables.entry[table_id].table_id);
+
+ /* flush hdp cache */
+- adev->nbio_funcs->hdp_flush(adev, NULL);
++ adev->nbio.funcs->hdp_flush(adev, NULL);
+
+ memcpy(table, priv->smu_tables.entry[table_id].table,
+ priv->smu_tables.entry[table_id].size);
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
+index c11dae720a35..cd7058b04f5e 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
+@@ -66,7 +66,7 @@ static int vega12_copy_table_from_smc(struct pp_hwmgr *hwmgr,
+ return -EINVAL);
+
+ /* flush hdp cache */
+- adev->nbio_funcs->hdp_flush(adev, NULL);
++ adev->nbio.funcs->hdp_flush(adev, NULL);
+
+ memcpy(table, priv->smu_tables.entry[table_id].table,
+ priv->smu_tables.entry[table_id].size);
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
+index b9089c6bea85..f604612f411f 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
+@@ -189,7 +189,7 @@ static int vega20_copy_table_from_smc(struct pp_hwmgr *hwmgr,
+ return ret);
+
+ /* flush hdp cache */
+- adev->nbio_funcs->hdp_flush(adev, NULL);
++ adev->nbio.funcs->hdp_flush(adev, NULL);
+
+ memcpy(table, priv->smu_tables.entry[table_id].table,
+ priv->smu_tables.entry[table_id].size);
+@@ -290,7 +290,7 @@ int vega20_get_activity_monitor_coeff(struct pp_hwmgr *hwmgr,
+ return ret);
+
+ /* flush hdp cache */
+- adev->nbio_funcs->hdp_flush(adev, NULL);
++ adev->nbio.funcs->hdp_flush(adev, NULL);
+
+ memcpy(table, priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table,
+ priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size);
+--
+2.17.1
+