diff options
Diffstat (limited to 'meta-amd-bsp/recipes-kernel/linux-4.19/linux-yocto-4.19.8/0972-drm-amd-powerplay-add-Vega20-LCLK-DPM-level-setting-.patch')
-rw-r--r-- | meta-amd-bsp/recipes-kernel/linux-4.19/linux-yocto-4.19.8/0972-drm-amd-powerplay-add-Vega20-LCLK-DPM-level-setting-.patch | 98 |
1 files changed, 98 insertions, 0 deletions
diff --git a/meta-amd-bsp/recipes-kernel/linux-4.19/linux-yocto-4.19.8/0972-drm-amd-powerplay-add-Vega20-LCLK-DPM-level-setting-.patch b/meta-amd-bsp/recipes-kernel/linux-4.19/linux-yocto-4.19.8/0972-drm-amd-powerplay-add-Vega20-LCLK-DPM-level-setting-.patch new file mode 100644 index 00000000..383b6f9b --- /dev/null +++ b/meta-amd-bsp/recipes-kernel/linux-4.19/linux-yocto-4.19.8/0972-drm-amd-powerplay-add-Vega20-LCLK-DPM-level-setting-.patch @@ -0,0 +1,98 @@ +From 533956049895ff1ba8a7efc313f1fd9e31e4d138 Mon Sep 17 00:00:00 2001 +From: Evan Quan <evan.quan@amd.com> +Date: Tue, 18 Dec 2018 19:06:53 +0800 +Subject: [PATCH 0972/2940] drm/amd/powerplay: add Vega20 LCLK DPM level + setting support + +Support manual LCLK DPM level switch on Vega20. + +Signed-off-by: Evan Quan <evan.quan@amd.com> +Acked-by: Alex Deucher <alexander.deucher@amd.com> +Reviewed-by: Rex Zhu <Rex.Zhu@amd.com> +Reviewed-by: Feifei Xu <Feifei.Xu@amd.com> +Signed-off-by: Alex Deucher <alexander.deucher@amd.com> +--- + .../drm/amd/powerplay/hwmgr/vega20_hwmgr.c | 43 +++++++++++++++++++ + 1 file changed, 43 insertions(+) + +diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c +index 2e99ecf4ab76..26154f9b2178 100644 +--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c ++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c +@@ -49,6 +49,10 @@ + #include "soc15_common.h" + #include "smuio/smuio_9_0_offset.h" + #include "smuio/smuio_9_0_sh_mask.h" ++#include "nbio/nbio_7_4_sh_mask.h" ++ ++#define smnPCIE_LC_SPEED_CNTL 0x11140290 ++#define smnPCIE_LC_LINK_WIDTH_CNTL 0x11140288 + + static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr) + { +@@ -2282,6 +2286,18 @@ static int vega20_force_clock_level(struct pp_hwmgr *hwmgr, + break; + + case PP_PCIE: ++ soft_min_level = mask ? (ffs(mask) - 1) : 0; ++ soft_max_level = mask ? (fls(mask) - 1) : 0; ++ if (soft_min_level >= NUM_LINK_LEVELS || ++ soft_max_level >= NUM_LINK_LEVELS) ++ return -EINVAL; ++ ++ ret = smum_send_msg_to_smc_with_parameter(hwmgr, ++ PPSMC_MSG_SetMinLinkDpmByIndex, soft_min_level); ++ PP_ASSERT_WITH_CODE(!ret, ++ "Failed to set min link dpm level!", ++ return ret); ++ + break; + + default: +@@ -2758,9 +2774,14 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, + data->od8_settings.od8_settings_array; + OverDriveTable_t *od_table = + &(data->smc_state_table.overdrive_table); ++ struct phm_ppt_v3_information *pptable_information = ++ (struct phm_ppt_v3_information *)hwmgr->pptable; ++ PPTable_t *pptable = (PPTable_t *)pptable_information->smc_pptable; ++ struct amdgpu_device *adev = hwmgr->adev; + struct pp_clock_levels_with_latency clocks; + int i, now, size = 0; + int ret = 0; ++ uint32_t gen_speed, lane_width; + + switch (type) { + case PP_SCLK: +@@ -2798,6 +2819,28 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, + break; + + case PP_PCIE: ++ gen_speed = (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) & ++ PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK) ++ >> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT; ++ lane_width = (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) & ++ PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK) ++ >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT; ++ for (i = 0; i < NUM_LINK_LEVELS; i++) ++ size += sprintf(buf + size, "%d: %s %s %dMhz %s\n", i, ++ (pptable->PcieGenSpeed[i] == 0) ? "2.5GT/s," : ++ (pptable->PcieGenSpeed[i] == 1) ? "5.0GT/s," : ++ (pptable->PcieGenSpeed[i] == 2) ? "8.0GT/s," : ++ (pptable->PcieGenSpeed[i] == 3) ? "16.0GT/s," : "", ++ (pptable->PcieLaneCount[i] == 1) ? "x1" : ++ (pptable->PcieLaneCount[i] == 2) ? "x2" : ++ (pptable->PcieLaneCount[i] == 3) ? "x4" : ++ (pptable->PcieLaneCount[i] == 4) ? "x8" : ++ (pptable->PcieLaneCount[i] == 5) ? "x12" : ++ (pptable->PcieLaneCount[i] == 6) ? "x16" : "", ++ pptable->LclkFreq[i], ++ (gen_speed == pptable->PcieGenSpeed[i]) && ++ (lane_width == pptable->PcieLaneCount[i]) ? ++ "*" : ""); + break; + + case OD_SCLK: +-- +2.17.1 + |