diff options
Diffstat (limited to 'meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/2943-drm-amdgpu-add-VCN2.5-VCPU-start-and-stop-2-7-patch.patch')
-rw-r--r-- | meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/2943-drm-amdgpu-add-VCN2.5-VCPU-start-and-stop-2-7-patch.patch | 509 |
1 files changed, 509 insertions, 0 deletions
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/2943-drm-amdgpu-add-VCN2.5-VCPU-start-and-stop-2-7-patch.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/2943-drm-amdgpu-add-VCN2.5-VCPU-start-and-stop-2-7-patch.patch new file mode 100644 index 00000000..cb792cfa --- /dev/null +++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/2943-drm-amdgpu-add-VCN2.5-VCPU-start-and-stop-2-7-patch.patch @@ -0,0 +1,509 @@ +From bf87a480b2c9a253abbd2ccd0e09b1654653a902 Mon Sep 17 00:00:00 2001 +From: Leo Liu <leo.liu@amd.com> +Date: Mon, 15 Apr 2019 12:41:09 -0400 +Subject: [PATCH 2943/4256] drm/amdgpu: add VCN2.5 VCPU start and stop 2/7 + patch + +HW engine initialization and suspend sequences. + +Signed-off-by: Leo Liu <leo.liu@amd.com> +Reviewed-by: Alex Deucher <alexander.deucher@amd.com> +Signed-off-by: Alex Deucher <alexander.deucher@amd.com> +--- + drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c | 467 +++++++++++++++++++++++++- + 1 file changed, 466 insertions(+), 1 deletion(-) + +diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c +index 0f553563ceb9..b6e72fff94f5 100644 +--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c ++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c +@@ -307,6 +307,446 @@ static int vcn_v2_5_resume(void *handle) + return r; + } + ++/** ++ * vcn_v2_5_mc_resume - memory controller programming ++ * ++ * @adev: amdgpu_device pointer ++ * ++ * Let the VCN memory controller know it's offsets ++ */ ++static void vcn_v2_5_mc_resume(struct amdgpu_device *adev) ++{ ++ uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4); ++ uint32_t offset; ++ ++ /* cache window 0: fw */ ++ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { ++ WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, ++ (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo)); ++ WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, ++ (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi)); ++ WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 0); ++ offset = 0; ++ } else { ++ WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, ++ lower_32_bits(adev->vcn.gpu_addr)); ++ WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, ++ upper_32_bits(adev->vcn.gpu_addr)); ++ offset = size; ++ /* No signed header for now from firmware ++ WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, ++ AMDGPU_UVD_FIRMWARE_OFFSET >> 3); ++ */ ++ WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 0); ++ } ++ WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size); ++ ++ /* cache window 1: stack */ ++ WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW, ++ lower_32_bits(adev->vcn.gpu_addr + offset)); ++ WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH, ++ upper_32_bits(adev->vcn.gpu_addr + offset)); ++ WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0); ++ WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE); ++ ++ /* cache window 2: context */ ++ WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW, ++ lower_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); ++ WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH, ++ upper_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); ++ WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0); ++ WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE); ++} ++ ++/** ++ * vcn_v2_5_disable_clock_gating - disable VCN clock gating ++ * ++ * @adev: amdgpu_device pointer ++ * @sw: enable SW clock gating ++ * ++ * Disable clock gating for VCN block ++ */ ++static void vcn_v2_5_disable_clock_gating(struct amdgpu_device *adev) ++{ ++ uint32_t data; ++ int ret = 0; ++ ++ /* UVD disable CGC */ ++ data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL); ++ if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) ++ data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; ++ else ++ data &= ~ UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; ++ data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; ++ data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT; ++ WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data); ++ ++ data = RREG32_SOC15(VCN, 0, mmUVD_CGC_GATE); ++ data &= ~(UVD_CGC_GATE__SYS_MASK ++ | UVD_CGC_GATE__UDEC_MASK ++ | UVD_CGC_GATE__MPEG2_MASK ++ | UVD_CGC_GATE__REGS_MASK ++ | UVD_CGC_GATE__RBC_MASK ++ | UVD_CGC_GATE__LMI_MC_MASK ++ | UVD_CGC_GATE__LMI_UMC_MASK ++ | UVD_CGC_GATE__IDCT_MASK ++ | UVD_CGC_GATE__MPRD_MASK ++ | UVD_CGC_GATE__MPC_MASK ++ | UVD_CGC_GATE__LBSI_MASK ++ | UVD_CGC_GATE__LRBBM_MASK ++ | UVD_CGC_GATE__UDEC_RE_MASK ++ | UVD_CGC_GATE__UDEC_CM_MASK ++ | UVD_CGC_GATE__UDEC_IT_MASK ++ | UVD_CGC_GATE__UDEC_DB_MASK ++ | UVD_CGC_GATE__UDEC_MP_MASK ++ | UVD_CGC_GATE__WCB_MASK ++ | UVD_CGC_GATE__VCPU_MASK ++ | UVD_CGC_GATE__MMSCH_MASK); ++ ++ WREG32_SOC15(VCN, 0, mmUVD_CGC_GATE, data); ++ ++ SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_CGC_GATE, 0, 0xFFFFFFFF, ret); ++ ++ data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL); ++ data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK ++ | UVD_CGC_CTRL__UDEC_CM_MODE_MASK ++ | UVD_CGC_CTRL__UDEC_IT_MODE_MASK ++ | UVD_CGC_CTRL__UDEC_DB_MODE_MASK ++ | UVD_CGC_CTRL__UDEC_MP_MODE_MASK ++ | UVD_CGC_CTRL__SYS_MODE_MASK ++ | UVD_CGC_CTRL__UDEC_MODE_MASK ++ | UVD_CGC_CTRL__MPEG2_MODE_MASK ++ | UVD_CGC_CTRL__REGS_MODE_MASK ++ | UVD_CGC_CTRL__RBC_MODE_MASK ++ | UVD_CGC_CTRL__LMI_MC_MODE_MASK ++ | UVD_CGC_CTRL__LMI_UMC_MODE_MASK ++ | UVD_CGC_CTRL__IDCT_MODE_MASK ++ | UVD_CGC_CTRL__MPRD_MODE_MASK ++ | UVD_CGC_CTRL__MPC_MODE_MASK ++ | UVD_CGC_CTRL__LBSI_MODE_MASK ++ | UVD_CGC_CTRL__LRBBM_MODE_MASK ++ | UVD_CGC_CTRL__WCB_MODE_MASK ++ | UVD_CGC_CTRL__VCPU_MODE_MASK ++ | UVD_CGC_CTRL__MMSCH_MODE_MASK); ++ WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data); ++ ++ /* turn on */ ++ data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_GATE); ++ data |= (UVD_SUVD_CGC_GATE__SRE_MASK ++ | UVD_SUVD_CGC_GATE__SIT_MASK ++ | UVD_SUVD_CGC_GATE__SMP_MASK ++ | UVD_SUVD_CGC_GATE__SCM_MASK ++ | UVD_SUVD_CGC_GATE__SDB_MASK ++ | UVD_SUVD_CGC_GATE__SRE_H264_MASK ++ | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK ++ | UVD_SUVD_CGC_GATE__SIT_H264_MASK ++ | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK ++ | UVD_SUVD_CGC_GATE__SCM_H264_MASK ++ | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK ++ | UVD_SUVD_CGC_GATE__SDB_H264_MASK ++ | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK ++ | UVD_SUVD_CGC_GATE__SCLR_MASK ++ | UVD_SUVD_CGC_GATE__UVD_SC_MASK ++ | UVD_SUVD_CGC_GATE__ENT_MASK ++ | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK ++ | UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK ++ | UVD_SUVD_CGC_GATE__SITE_MASK ++ | UVD_SUVD_CGC_GATE__SRE_VP9_MASK ++ | UVD_SUVD_CGC_GATE__SCM_VP9_MASK ++ | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK ++ | UVD_SUVD_CGC_GATE__SDB_VP9_MASK ++ | UVD_SUVD_CGC_GATE__IME_HEVC_MASK); ++ WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_GATE, data); ++ ++ data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL); ++ data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK ++ | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK ++ | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK ++ | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK ++ | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK ++ | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK ++ | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK ++ | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK ++ | UVD_SUVD_CGC_CTRL__IME_MODE_MASK ++ | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK); ++ WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data); ++} ++ ++/** ++ * vcn_v2_5_enable_clock_gating - enable VCN clock gating ++ * ++ * @adev: amdgpu_device pointer ++ * @sw: enable SW clock gating ++ * ++ * Enable clock gating for VCN block ++ */ ++static void vcn_v2_5_enable_clock_gating(struct amdgpu_device *adev) ++{ ++ uint32_t data = 0; ++ ++ /* enable UVD CGC */ ++ data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL); ++ if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) ++ data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; ++ else ++ data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; ++ data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; ++ data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT; ++ WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data); ++ ++ data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL); ++ data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK ++ | UVD_CGC_CTRL__UDEC_CM_MODE_MASK ++ | UVD_CGC_CTRL__UDEC_IT_MODE_MASK ++ | UVD_CGC_CTRL__UDEC_DB_MODE_MASK ++ | UVD_CGC_CTRL__UDEC_MP_MODE_MASK ++ | UVD_CGC_CTRL__SYS_MODE_MASK ++ | UVD_CGC_CTRL__UDEC_MODE_MASK ++ | UVD_CGC_CTRL__MPEG2_MODE_MASK ++ | UVD_CGC_CTRL__REGS_MODE_MASK ++ | UVD_CGC_CTRL__RBC_MODE_MASK ++ | UVD_CGC_CTRL__LMI_MC_MODE_MASK ++ | UVD_CGC_CTRL__LMI_UMC_MODE_MASK ++ | UVD_CGC_CTRL__IDCT_MODE_MASK ++ | UVD_CGC_CTRL__MPRD_MODE_MASK ++ | UVD_CGC_CTRL__MPC_MODE_MASK ++ | UVD_CGC_CTRL__LBSI_MODE_MASK ++ | UVD_CGC_CTRL__LRBBM_MODE_MASK ++ | UVD_CGC_CTRL__WCB_MODE_MASK ++ | UVD_CGC_CTRL__VCPU_MODE_MASK); ++ WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data); ++ ++ data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL); ++ data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK ++ | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK ++ | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK ++ | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK ++ | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK ++ | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK ++ | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK ++ | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK ++ | UVD_SUVD_CGC_CTRL__IME_MODE_MASK ++ | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK); ++ WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data); ++} ++ ++static int vcn_v2_5_start(struct amdgpu_device *adev) ++{ ++ struct amdgpu_ring *ring = &adev->vcn.ring_dec; ++ uint32_t rb_bufsz, tmp; ++ int i, j, r; ++ ++ /* disable register anti-hang mechanism */ ++ WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), 0, ++ ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); ++ ++ /* set uvd status busy */ ++ tmp = RREG32_SOC15(UVD, 0, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY; ++ WREG32_SOC15(UVD, 0, mmUVD_STATUS, tmp); ++ ++ /*SW clock gating */ ++ vcn_v2_5_disable_clock_gating(adev); ++ ++ /* enable VCPU clock */ ++ WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL), ++ UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK); ++ ++ /* disable master interrupt */ ++ WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), 0, ++ ~UVD_MASTINT_EN__VCPU_EN_MASK); ++ ++ /* setup mmUVD_LMI_CTRL */ ++ tmp = RREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL); ++ tmp &= ~0xff; ++ WREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL, tmp | 0x8| ++ UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | ++ UVD_LMI_CTRL__MASK_MC_URGENT_MASK | ++ UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | ++ UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK); ++ ++ /* setup mmUVD_MPC_CNTL */ ++ tmp = RREG32_SOC15(UVD, 0, mmUVD_MPC_CNTL); ++ tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK; ++ tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT; ++ WREG32_SOC15(VCN, 0, mmUVD_MPC_CNTL, tmp); ++ ++ /* setup UVD_MPC_SET_MUXA0 */ ++ WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXA0, ++ ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) | ++ (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) | ++ (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) | ++ (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT))); ++ ++ /* setup UVD_MPC_SET_MUXB0 */ ++ WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXB0, ++ ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) | ++ (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) | ++ (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) | ++ (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT))); ++ ++ /* setup mmUVD_MPC_SET_MUX */ ++ WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUX, ++ ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) | ++ (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) | ++ (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT))); ++ ++ vcn_v2_5_mc_resume(adev); ++ ++ /* VCN global tiling registers */ ++ WREG32_SOC15(UVD, 0, mmUVD_GFX8_ADDR_CONFIG, ++ adev->gfx.config.gb_addr_config); ++ WREG32_SOC15(UVD, 0, mmUVD_GFX8_ADDR_CONFIG, ++ adev->gfx.config.gb_addr_config); ++ ++ /* enable LMI MC and UMC channels */ ++ WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0, ++ ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); ++ ++ /* unblock VCPU register access */ ++ WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_ARB_CTRL), 0, ++ ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK); ++ ++ WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL), 0, ++ ~UVD_VCPU_CNTL__BLK_RST_MASK); ++ ++ for (i = 0; i < 10; ++i) { ++ uint32_t status; ++ ++ for (j = 0; j < 100; ++j) { ++ status = RREG32_SOC15(UVD, 0, mmUVD_STATUS); ++ if (status & 2) ++ break; ++ if (amdgpu_emu_mode == 1) ++ msleep(500); ++ else ++ mdelay(10); ++ } ++ r = 0; ++ if (status & 2) ++ break; ++ ++ DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n"); ++ WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL), ++ UVD_VCPU_CNTL__BLK_RST_MASK, ++ ~UVD_VCPU_CNTL__BLK_RST_MASK); ++ mdelay(10); ++ WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL), 0, ++ ~UVD_VCPU_CNTL__BLK_RST_MASK); ++ ++ mdelay(10); ++ r = -1; ++ } ++ ++ if (r) { ++ DRM_ERROR("VCN decode not responding, giving up!!!\n"); ++ return r; ++ } ++ ++ /* enable master interrupt */ ++ WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), ++ UVD_MASTINT_EN__VCPU_EN_MASK, ++ ~UVD_MASTINT_EN__VCPU_EN_MASK); ++ ++ /* clear the busy bit of VCN_STATUS */ ++ WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS), 0, ++ ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT)); ++ ++ WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_VMID, 0); ++ ++ /* force RBC into idle state */ ++ rb_bufsz = order_base_2(ring->ring_size); ++ tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz); ++ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1); ++ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); ++ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1); ++ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); ++ WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp); ++ ++ /* programm the RB_BASE for ring buffer */ ++ WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW, ++ lower_32_bits(ring->gpu_addr)); ++ WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH, ++ upper_32_bits(ring->gpu_addr)); ++ ++ /* Initialize the ring buffer's read and write pointers */ ++ WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR, 0); ++ ++ ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR); ++ WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, ++ lower_32_bits(ring->wptr)); ++ ring = &adev->vcn.ring_enc[0]; ++ WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr)); ++ WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); ++ WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr); ++ WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); ++ WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4); ++ ++ ring = &adev->vcn.ring_enc[1]; ++ WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr)); ++ WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); ++ WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr); ++ WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); ++ WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4); ++ ++ return r; ++} ++ ++static int vcn_v2_5_stop(struct amdgpu_device *adev) ++{ ++ uint32_t tmp; ++ int r; ++ ++ /* wait for vcn idle */ ++ SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7, r); ++ if (r) ++ return r; ++ ++ tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK | ++ UVD_LMI_STATUS__READ_CLEAN_MASK | ++ UVD_LMI_STATUS__WRITE_CLEAN_MASK | ++ UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK; ++ SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_LMI_STATUS, tmp, tmp, r); ++ if (r) ++ return r; ++ ++ /* block LMI UMC channel */ ++ tmp = RREG32_SOC15(VCN, 0, mmUVD_LMI_CTRL2); ++ tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK; ++ WREG32_SOC15(VCN, 0, mmUVD_LMI_CTRL2, tmp); ++ ++ tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK| ++ UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK; ++ SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_LMI_STATUS, tmp, tmp, r); ++ if (r) ++ return r; ++ ++ /* block VCPU register access */ ++ WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_ARB_CTRL), ++ UVD_RB_ARB_CTRL__VCPU_DIS_MASK, ++ ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK); ++ ++ /* reset VCPU */ ++ WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL), ++ UVD_VCPU_CNTL__BLK_RST_MASK, ++ ~UVD_VCPU_CNTL__BLK_RST_MASK); ++ ++ /* disable VCPU clock */ ++ WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL), 0, ++ ~(UVD_VCPU_CNTL__CLK_EN_MASK)); ++ ++ /* clear status */ ++ WREG32_SOC15(VCN, 0, mmUVD_STATUS, 0); ++ ++ vcn_v2_5_enable_clock_gating(adev); ++ ++ /* enable register anti-hang mechanism */ ++ WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), ++ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ++ ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); ++ ++ return 0; ++} ++ + static bool vcn_v2_5_is_idle(void *handle) + { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; +@@ -328,13 +768,38 @@ static int vcn_v2_5_wait_for_idle(void *handle) + static int vcn_v2_5_set_clockgating_state(void *handle, + enum amd_clockgating_state state) + { ++ struct amdgpu_device *adev = (struct amdgpu_device *)handle; ++ bool enable = (state == AMD_CG_STATE_GATE) ? true : false; ++ ++ if (enable) { ++ if (vcn_v2_5_is_idle(handle)) ++ return -EBUSY; ++ vcn_v2_5_enable_clock_gating(adev); ++ } else { ++ vcn_v2_5_disable_clock_gating(adev); ++ } ++ + return 0; + } + + static int vcn_v2_5_set_powergating_state(void *handle, + enum amd_powergating_state state) + { +- return 0; ++ struct amdgpu_device *adev = (struct amdgpu_device *)handle; ++ int ret; ++ ++ if(state == adev->vcn.cur_state) ++ return 0; ++ ++ if (state == AMD_PG_STATE_GATE) ++ ret = vcn_v2_5_stop(adev); ++ else ++ ret = vcn_v2_5_start(adev); ++ ++ if(!ret) ++ adev->vcn.cur_state = state; ++ ++ return ret; + } + + static int vcn_v2_5_set_interrupt_state(struct amdgpu_device *adev, +-- +2.17.1 + |