aboutsummaryrefslogtreecommitdiffstats
path: root/common/recipes-kernel/linux/linux-yocto-4.19.8/1493-drm-amd-powerplay-support-sysfs-to-set-get-pcie.patch
diff options
context:
space:
mode:
Diffstat (limited to 'common/recipes-kernel/linux/linux-yocto-4.19.8/1493-drm-amd-powerplay-support-sysfs-to-set-get-pcie.patch')
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.19.8/1493-drm-amd-powerplay-support-sysfs-to-set-get-pcie.patch94
1 files changed, 94 insertions, 0 deletions
diff --git a/common/recipes-kernel/linux/linux-yocto-4.19.8/1493-drm-amd-powerplay-support-sysfs-to-set-get-pcie.patch b/common/recipes-kernel/linux/linux-yocto-4.19.8/1493-drm-amd-powerplay-support-sysfs-to-set-get-pcie.patch
new file mode 100644
index 00000000..588f6ca4
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.19.8/1493-drm-amd-powerplay-support-sysfs-to-set-get-pcie.patch
@@ -0,0 +1,94 @@
+From 385bbc4e526d2b86cd15f4523eb6c66004c5afc6 Mon Sep 17 00:00:00 2001
+From: Likun Gao <Likun.Gao@amd.com>
+Date: Thu, 21 Feb 2019 11:09:31 +0800
+Subject: [PATCH 1493/2940] drm/amd/powerplay: support sysfs to set/get pcie
+
+Add sys interface to set and get pcie info for smu.
+
+Signed-off-by: Likun Gao <Likun.Gao@amd.com>
+Reviewed-by: Gui Chengming <Jack.Gui@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/vega20_ppt.c | 38 ++++++++++++++++++++++
+ 1 file changed, 38 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
+index fa65b7509bc4..e6db56d158eb 100644
+--- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
++++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
+@@ -35,6 +35,10 @@
+ #include "vega20_ppt.h"
+ #include "vega20_pptable.h"
+ #include "vega20_ppsmc.h"
++#include "nbio/nbio_7_4_sh_mask.h"
++
++#define smnPCIE_LC_SPEED_CNTL 0x11140290
++#define smnPCIE_LC_LINK_WIDTH_CNTL 0x11140288
+
+ #define MSG_MAP(msg, index) \
+ [SMU_MSG_##msg] = index
+@@ -718,6 +722,8 @@ static int vega20_print_clk_levels(struct smu_context *smu,
+ {
+ int i, now, size = 0;
+ int ret = 0;
++ uint32_t gen_speed, lane_width;
++ struct amdgpu_device *adev = smu->adev;
+ struct pp_clock_levels_with_latency clocks;
+ struct vega20_single_dpm_table *single_dpm_table;
+ struct smu_table_context *table_context = &smu->smu_table;
+@@ -727,6 +733,7 @@ static int vega20_print_clk_levels(struct smu_context *smu,
+ (struct vega20_od8_settings *)table_context->od8_settings;
+ OverDriveTable_t *od_table =
+ (OverDriveTable_t *)(table_context->overdrive_table);
++ PPTable_t *pptable = (PPTable_t *)table_context->driver_pptable;
+
+ dpm_table = smu_dpm->dpm_context;
+
+@@ -830,6 +837,28 @@ static int vega20_print_clk_levels(struct smu_context *smu,
+ break;
+
+ case PP_PCIE:
++ gen_speed = (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) &
++ PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK)
++ >> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
++ lane_width = (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) &
++ PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK)
++ >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
++ for (i = 0; i < NUM_LINK_LEVELS; i++)
++ size += sprintf(buf + size, "%d: %s %s %dMhz %s\n", i,
++ (pptable->PcieGenSpeed[i] == 0) ? "2.5GT/s," :
++ (pptable->PcieGenSpeed[i] == 1) ? "5.0GT/s," :
++ (pptable->PcieGenSpeed[i] == 2) ? "8.0GT/s," :
++ (pptable->PcieGenSpeed[i] == 3) ? "16.0GT/s," : "",
++ (pptable->PcieLaneCount[i] == 1) ? "x1" :
++ (pptable->PcieLaneCount[i] == 2) ? "x2" :
++ (pptable->PcieLaneCount[i] == 3) ? "x4" :
++ (pptable->PcieLaneCount[i] == 4) ? "x8" :
++ (pptable->PcieLaneCount[i] == 5) ? "x12" :
++ (pptable->PcieLaneCount[i] == 6) ? "x16" : "",
++ pptable->LclkFreq[i],
++ (gen_speed == pptable->PcieGenSpeed[i]) &&
++ (lane_width == pptable->PcieLaneCount[i]) ?
++ "*" : "");
+ break;
+
+ case OD_SCLK:
+@@ -1170,6 +1199,15 @@ static int vega20_force_clk_levels(struct smu_context *smu,
+ break;
+
+ case PP_PCIE:
++ if (soft_min_level >= NUM_LINK_LEVELS ||
++ soft_max_level >= NUM_LINK_LEVELS)
++ return -EINVAL;
++
++ ret = smu_send_smc_msg_with_param(smu,
++ SMU_MSG_SetMinLinkDpmByIndex, soft_min_level);
++ if (ret)
++ pr_err("Failed to set min link dpm level!\n");
++
+ break;
+
+ default:
+--
+2.17.1
+