aboutsummaryrefslogtreecommitdiffstats
path: root/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/3223-drm-amdgpu-cleanup-vega10-SRIOV-code-path.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/3223-drm-amdgpu-cleanup-vega10-SRIOV-code-path.patch')
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/3223-drm-amdgpu-cleanup-vega10-SRIOV-code-path.patch442
1 files changed, 442 insertions, 0 deletions
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/3223-drm-amdgpu-cleanup-vega10-SRIOV-code-path.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/3223-drm-amdgpu-cleanup-vega10-SRIOV-code-path.patch
new file mode 100644
index 00000000..04395d5c
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/3223-drm-amdgpu-cleanup-vega10-SRIOV-code-path.patch
@@ -0,0 +1,442 @@
+From a9c3a46b977e371701962f0117242daeb01c8e3a Mon Sep 17 00:00:00 2001
+From: Monk Liu <Monk.Liu@amd.com>
+Date: Tue, 30 Jul 2019 17:21:19 +0800
+Subject: [PATCH 3223/4256] drm/amdgpu: cleanup vega10 SRIOV code path
+
+we can simplify all those unnecessary function under
+SRIOV for vega10 since:
+1) PSP L1 policy is by force enabled in SRIOV
+2) original logic always set all flags which make itself
+ a dummy step
+
+besides,
+1) the ih_doorbell_range set should also be skipped
+for VEGA10 SRIOV.
+2) the gfx_common registers should also be skipped
+for VEGA10 SRIOV.
+
+Signed-off-by: Monk Liu <Monk.Liu@amd.com>
+Reviewed-by: Emily Deng <Emily.Deng@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 3 --
+ drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | 45 ----------------------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h | 13 -------
+ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 17 ++++----
+ drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 2 +-
+ drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c | 10 ++---
+ drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c | 15 --------
+ drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 17 ++++----
+ drivers/gpu/drm/amd/amdgpu/soc15.c | 11 ++----
+ drivers/gpu/drm/amd/amdgpu/soc15_common.h | 5 ++-
+ drivers/gpu/drm/amd/amdgpu/vega10_ih.c | 18 ++++-----
+ 11 files changed, 38 insertions(+), 118 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 5e44ddeb21db..111d3fc175d8 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -1643,9 +1643,6 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
+ r = amdgpu_virt_request_full_gpu(adev, true);
+ if (r)
+ return -EAGAIN;
+-
+- /* query the reg access mode at the very beginning */
+- amdgpu_virt_init_reg_access_mode(adev);
+ }
+
+ adev->pm.pp_feature = amdgpu_pp_feature_mask;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+index 1d68729a9a6b..f04eb1a64271 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+@@ -426,48 +426,3 @@ uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool lowest)
+
+ return clk;
+ }
+-
+-void amdgpu_virt_init_reg_access_mode(struct amdgpu_device *adev)
+-{
+- struct amdgpu_virt *virt = &adev->virt;
+-
+- if (virt->ops && virt->ops->init_reg_access_mode)
+- virt->ops->init_reg_access_mode(adev);
+-}
+-
+-bool amdgpu_virt_support_psp_prg_ih_reg(struct amdgpu_device *adev)
+-{
+- bool ret = false;
+- struct amdgpu_virt *virt = &adev->virt;
+-
+- if (amdgpu_sriov_vf(adev)
+- && (virt->reg_access_mode & AMDGPU_VIRT_REG_ACCESS_PSP_PRG_IH))
+- ret = true;
+-
+- return ret;
+-}
+-
+-bool amdgpu_virt_support_rlc_prg_reg(struct amdgpu_device *adev)
+-{
+- bool ret = false;
+- struct amdgpu_virt *virt = &adev->virt;
+-
+- if (amdgpu_sriov_vf(adev)
+- && (virt->reg_access_mode & AMDGPU_VIRT_REG_ACCESS_RLC)
+- && !(amdgpu_sriov_runtime(adev)))
+- ret = true;
+-
+- return ret;
+-}
+-
+-bool amdgpu_virt_support_skip_setting(struct amdgpu_device *adev)
+-{
+- bool ret = false;
+- struct amdgpu_virt *virt = &adev->virt;
+-
+- if (amdgpu_sriov_vf(adev)
+- && (virt->reg_access_mode & AMDGPU_VIRT_REG_SKIP_SEETING))
+- ret = true;
+-
+- return ret;
+-}
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+index f5107731e9c4..b0b2bdc750df 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+@@ -48,12 +48,6 @@ struct amdgpu_vf_error_buffer {
+ uint64_t data[AMDGPU_VF_ERROR_ENTRY_SIZE];
+ };
+
+-/* According to the fw feature, some new reg access modes are supported */
+-#define AMDGPU_VIRT_REG_ACCESS_LEGACY (1 << 0) /* directly mmio */
+-#define AMDGPU_VIRT_REG_ACCESS_PSP_PRG_IH (1 << 1) /* by PSP */
+-#define AMDGPU_VIRT_REG_ACCESS_RLC (1 << 2) /* by RLC */
+-#define AMDGPU_VIRT_REG_SKIP_SEETING (1 << 3) /* Skip setting reg */
+-
+ /**
+ * struct amdgpu_virt_ops - amdgpu device virt operations
+ */
+@@ -65,7 +59,6 @@ struct amdgpu_virt_ops {
+ void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1, u32 data2, u32 data3);
+ int (*get_pp_clk)(struct amdgpu_device *adev, u32 type, char *buf);
+ int (*force_dpm_level)(struct amdgpu_device *adev, u32 level);
+- void (*init_reg_access_mode)(struct amdgpu_device *adev);
+ };
+
+ /*
+@@ -315,10 +308,4 @@ int amdgpu_virt_fw_reserve_get_checksum(void *obj, unsigned long obj_size,
+ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev);
+ uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool lowest);
+ uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool lowest);
+-
+-void amdgpu_virt_init_reg_access_mode(struct amdgpu_device *adev);
+-bool amdgpu_virt_support_psp_prg_ih_reg(struct amdgpu_device *adev);
+-bool amdgpu_virt_support_rlc_prg_reg(struct amdgpu_device *adev);
+-bool amdgpu_virt_support_skip_setting(struct amdgpu_device *adev);
+-
+ #endif
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index baccfc8f624c..1ba428d5c2e1 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -711,14 +711,12 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
+ {
+ switch (adev->asic_type) {
+ case CHIP_VEGA10:
+- if (!amdgpu_virt_support_skip_setting(adev)) {
+- soc15_program_register_sequence(adev,
+- golden_settings_gc_9_0,
+- ARRAY_SIZE(golden_settings_gc_9_0));
+- soc15_program_register_sequence(adev,
+- golden_settings_gc_9_0_vg10,
+- ARRAY_SIZE(golden_settings_gc_9_0_vg10));
+- }
++ soc15_program_register_sequence(adev,
++ golden_settings_gc_9_0,
++ ARRAY_SIZE(golden_settings_gc_9_0));
++ soc15_program_register_sequence(adev,
++ golden_settings_gc_9_0_vg10,
++ ARRAY_SIZE(golden_settings_gc_9_0_vg10));
+ break;
+ case CHIP_VEGA12:
+ soc15_program_register_sequence(adev,
+@@ -3808,7 +3806,8 @@ static int gfx_v9_0_hw_init(void *handle)
+ int r;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+- gfx_v9_0_init_golden_registers(adev);
++ if (!amdgpu_sriov_vf(adev))
++ gfx_v9_0_init_golden_registers(adev);
+
+ gfx_v9_0_constants_init(adev);
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+index 76769e35a774..7f4da9254dfb 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+@@ -1194,7 +1194,7 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
+
+ switch (adev->asic_type) {
+ case CHIP_VEGA10:
+- if (amdgpu_virt_support_skip_setting(adev))
++ if (amdgpu_sriov_vf(adev))
+ break;
+ /* fall through */
+ case CHIP_VEGA20:
+diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+index e3fc03b6a618..df0117df45a9 100644
+--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+@@ -111,7 +111,7 @@ static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
+ WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+ max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
+
+- if (amdgpu_virt_support_skip_setting(adev))
++ if (amdgpu_sriov_vf(adev))
+ return;
+
+ /* Set default page address. */
+@@ -159,7 +159,7 @@ static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
+ {
+ uint32_t tmp;
+
+- if (amdgpu_virt_support_skip_setting(adev))
++ if (amdgpu_sriov_vf(adev))
+ return;
+
+ /* Setup L2 cache */
+@@ -208,7 +208,7 @@ static void mmhub_v1_0_enable_system_domain(struct amdgpu_device *adev)
+
+ static void mmhub_v1_0_disable_identity_aperture(struct amdgpu_device *adev)
+ {
+- if (amdgpu_virt_support_skip_setting(adev))
++ if (amdgpu_sriov_vf(adev))
+ return;
+
+ WREG32_SOC15(MMHUB, 0, mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32,
+@@ -348,7 +348,7 @@ void mmhub_v1_0_gart_disable(struct amdgpu_device *adev)
+ 0);
+ WREG32_SOC15(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL, tmp);
+
+- if (!amdgpu_virt_support_skip_setting(adev)) {
++ if (!amdgpu_sriov_vf(adev)) {
+ /* Setup L2 cache */
+ tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
+@@ -367,7 +367,7 @@ void mmhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, bool value)
+ {
+ u32 tmp;
+
+- if (amdgpu_virt_support_skip_setting(adev))
++ if (amdgpu_sriov_vf(adev))
+ return;
+
+ tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL);
+diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+index 235548c0b41f..cc5bf595f9b1 100644
+--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
++++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+@@ -449,20 +449,6 @@ void xgpu_ai_mailbox_put_irq(struct amdgpu_device *adev)
+ amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
+ }
+
+-static void xgpu_ai_init_reg_access_mode(struct amdgpu_device *adev)
+-{
+- adev->virt.reg_access_mode = AMDGPU_VIRT_REG_ACCESS_LEGACY;
+-
+- /* Enable L1 security reg access mode by defaul, as non-security VF
+- * will no longer be supported.
+- */
+- adev->virt.reg_access_mode |= AMDGPU_VIRT_REG_ACCESS_RLC;
+-
+- adev->virt.reg_access_mode |= AMDGPU_VIRT_REG_ACCESS_PSP_PRG_IH;
+-
+- adev->virt.reg_access_mode |= AMDGPU_VIRT_REG_SKIP_SEETING;
+-}
+-
+ const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
+ .req_full_gpu = xgpu_ai_request_full_gpu_access,
+ .rel_full_gpu = xgpu_ai_release_full_gpu_access,
+@@ -471,5 +457,4 @@ const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
+ .trans_msg = xgpu_ai_mailbox_trans_msg,
+ .get_pp_clk = xgpu_ai_get_pp_clk,
+ .force_dpm_level = xgpu_ai_force_dpm_level,
+- .init_reg_access_mode = xgpu_ai_init_reg_access_mode,
+ };
+diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+index bf9365c6e9b7..93cd0a64eca0 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+@@ -327,14 +327,12 @@ static void sdma_v4_0_init_golden_registers(struct amdgpu_device *adev)
+ {
+ switch (adev->asic_type) {
+ case CHIP_VEGA10:
+- if (!amdgpu_virt_support_skip_setting(adev)) {
+- soc15_program_register_sequence(adev,
+- golden_settings_sdma_4,
+- ARRAY_SIZE(golden_settings_sdma_4));
+- soc15_program_register_sequence(adev,
+- golden_settings_sdma_vg10,
+- ARRAY_SIZE(golden_settings_sdma_vg10));
+- }
++ soc15_program_register_sequence(adev,
++ golden_settings_sdma_4,
++ ARRAY_SIZE(golden_settings_sdma_4));
++ soc15_program_register_sequence(adev,
++ golden_settings_sdma_vg10,
++ ARRAY_SIZE(golden_settings_sdma_vg10));
+ break;
+ case CHIP_VEGA12:
+ soc15_program_register_sequence(adev,
+@@ -1832,7 +1830,8 @@ static int sdma_v4_0_hw_init(void *handle)
+ adev->powerplay.pp_funcs->set_powergating_by_smu)
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_SDMA, false);
+
+- sdma_v4_0_init_golden_registers(adev);
++ if (!amdgpu_sriov_vf(adev))
++ sdma_v4_0_init_golden_registers(adev);
+
+ r = sdma_v4_0_start(adev);
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
+index 214fc9d880e5..dc553978d23a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
++++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
+@@ -1122,21 +1122,18 @@ static void soc15_doorbell_range_init(struct amdgpu_device *adev)
+ int i;
+ struct amdgpu_ring *ring;
+
+- /* Two reasons to skip
+- * 1, Host driver already programmed them
+- * 2, To avoid registers program violations in SR-IOV
+- */
+- if (!amdgpu_virt_support_skip_setting(adev)) {
++ /* sdma/ih doorbell range are programed by hypervisor */
++ if (!amdgpu_sriov_vf(adev)) {
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ ring = &adev->sdma.instance[i].ring;
+ adev->nbio_funcs->sdma_doorbell_range(adev, i,
+ ring->use_doorbell, ring->doorbell_index,
+ adev->doorbell_index.sdma_doorbell_range);
+ }
+- }
+
+- adev->nbio_funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell,
++ adev->nbio_funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell,
+ adev->irq.ih.doorbell_index);
++ }
+ }
+
+ static int soc15_common_hw_init(void *handle)
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc15_common.h b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
+index 47f74dab365d..839f186e1182 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc15_common.h
++++ b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
+@@ -69,9 +69,10 @@
+ } \
+ } while (0)
+
++#define AMDGPU_VIRT_SUPPORT_RLC_PRG_REG(a) (amdgpu_sriov_vf((a)) && !amdgpu_sriov_runtime((a)))
+ #define WREG32_RLC(reg, value) \
+ do { \
+- if (amdgpu_virt_support_rlc_prg_reg(adev)) { \
++ if (AMDGPU_VIRT_SUPPORT_RLC_PRG_REG(adev)) { \
+ uint32_t i = 0; \
+ uint32_t retries = 50000; \
+ uint32_t r0 = adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG0; \
+@@ -96,7 +97,7 @@
+ #define WREG32_SOC15_RLC_SHADOW(ip, inst, reg, value) \
+ do { \
+ uint32_t target_reg = adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg;\
+- if (amdgpu_virt_support_rlc_prg_reg(adev)) { \
++ if (AMDGPU_VIRT_SUPPORT_RLC_PRG_REG(adev)) { \
+ uint32_t r2 = adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG2; \
+ uint32_t r3 = adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG3; \
+ uint32_t grbm_cntl = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_CNTL_BASE_IDX] + mmGRBM_GFX_CNTL; \
+diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+index ee9cd8579038..a55525abb73c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
++++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+@@ -48,7 +48,7 @@ static void vega10_ih_enable_interrupts(struct amdgpu_device *adev)
+
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 1);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 1);
+- if (amdgpu_virt_support_psp_prg_ih_reg(adev)) {
++ if (amdgpu_sriov_vf(adev)) {
+ if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) {
+ DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
+ return;
+@@ -62,7 +62,7 @@ static void vega10_ih_enable_interrupts(struct amdgpu_device *adev)
+ ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
+ RB_ENABLE, 1);
+- if (amdgpu_virt_support_psp_prg_ih_reg(adev)) {
++ if (amdgpu_sriov_vf(adev)) {
+ if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1,
+ ih_rb_cntl)) {
+ DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n");
+@@ -78,7 +78,7 @@ static void vega10_ih_enable_interrupts(struct amdgpu_device *adev)
+ ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2,
+ RB_ENABLE, 1);
+- if (amdgpu_virt_support_psp_prg_ih_reg(adev)) {
++ if (amdgpu_sriov_vf(adev)) {
+ if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2,
+ ih_rb_cntl)) {
+ DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n");
+@@ -104,7 +104,7 @@ static void vega10_ih_disable_interrupts(struct amdgpu_device *adev)
+
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 0);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 0);
+- if (amdgpu_virt_support_psp_prg_ih_reg(adev)) {
++ if (amdgpu_sriov_vf(adev)) {
+ if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) {
+ DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
+ return;
+@@ -123,7 +123,7 @@ static void vega10_ih_disable_interrupts(struct amdgpu_device *adev)
+ ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
+ RB_ENABLE, 0);
+- if (amdgpu_virt_support_psp_prg_ih_reg(adev)) {
++ if (amdgpu_sriov_vf(adev)) {
+ if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1,
+ ih_rb_cntl)) {
+ DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n");
+@@ -143,7 +143,7 @@ static void vega10_ih_disable_interrupts(struct amdgpu_device *adev)
+ ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2,
+ RB_ENABLE, 0);
+- if (amdgpu_virt_support_psp_prg_ih_reg(adev)) {
++ if (amdgpu_sriov_vf(adev)) {
+ if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2,
+ ih_rb_cntl)) {
+ DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n");
+@@ -236,7 +236,7 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RPTR_REARM,
+ !!adev->irq.msi_enabled);
+
+- if (amdgpu_virt_support_psp_prg_ih_reg(adev)) {
++ if (amdgpu_sriov_vf(adev)) {
+ if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) {
+ DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
+ return -ETIMEDOUT;
+@@ -279,7 +279,7 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
+ WPTR_OVERFLOW_ENABLE, 0);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
+ RB_FULL_DRAIN_ENABLE, 1);
+- if (amdgpu_virt_support_psp_prg_ih_reg(adev)) {
++ if (amdgpu_sriov_vf(adev)) {
+ if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1,
+ ih_rb_cntl)) {
+ DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n");
+@@ -306,7 +306,7 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
+ ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
+ ih_rb_cntl = vega10_ih_rb_cntl(ih, ih_rb_cntl);
+
+- if (amdgpu_virt_support_psp_prg_ih_reg(adev)) {
++ if (amdgpu_sriov_vf(adev)) {
+ if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2,
+ ih_rb_cntl)) {
+ DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n");
+--
+2.17.1
+