aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c')
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c256
1 files changed, 212 insertions, 44 deletions
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index f005a90c35af..e1a5ee911dbb 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -43,6 +43,7 @@
#include "smu_v13_0_5_ppt.h"
#include "smu_v13_0_6_ppt.h"
#include "smu_v13_0_7_ppt.h"
+#include "smu_v14_0_0_ppt.h"
#include "amd_pcie.h"
/*
@@ -215,6 +216,20 @@ static int smu_set_gfx_imu_enable(struct smu_context *smu)
return smu_set_gfx_power_up_by_imu(smu);
}
+static bool is_vcn_enabled(struct amdgpu_device *adev)
+{
+ int i;
+
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_VCN ||
+ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_JPEG) &&
+ !adev->ip_blocks[i].status.valid)
+ return false;
+ }
+
+ return true;
+}
+
static int smu_dpm_set_vcn_enable(struct smu_context *smu,
bool enable)
{
@@ -222,6 +237,12 @@ static int smu_dpm_set_vcn_enable(struct smu_context *smu,
struct smu_power_gate *power_gate = &smu_power->power_gate;
int ret = 0;
+ /*
+ * don't poweron vcn/jpeg when they are skipped.
+ */
+ if (!is_vcn_enabled(smu->adev))
+ return 0;
+
if (!smu->ppt_funcs->dpm_set_vcn_enable)
return 0;
@@ -242,6 +263,9 @@ static int smu_dpm_set_jpeg_enable(struct smu_context *smu,
struct smu_power_gate *power_gate = &smu_power->power_gate;
int ret = 0;
+ if (!is_vcn_enabled(smu->adev))
+ return 0;
+
if (!smu->ppt_funcs->dpm_set_jpeg_enable)
return 0;
@@ -255,6 +279,49 @@ static int smu_dpm_set_jpeg_enable(struct smu_context *smu,
return ret;
}
+static int smu_dpm_set_vpe_enable(struct smu_context *smu,
+ bool enable)
+{
+ struct smu_power_context *smu_power = &smu->smu_power;
+ struct smu_power_gate *power_gate = &smu_power->power_gate;
+ int ret = 0;
+
+ if (!smu->ppt_funcs->dpm_set_vpe_enable)
+ return 0;
+
+ if (atomic_read(&power_gate->vpe_gated) ^ enable)
+ return 0;
+
+ ret = smu->ppt_funcs->dpm_set_vpe_enable(smu, enable);
+ if (!ret)
+ atomic_set(&power_gate->vpe_gated, !enable);
+
+ return ret;
+}
+
+static int smu_dpm_set_umsch_mm_enable(struct smu_context *smu,
+ bool enable)
+{
+ struct smu_power_context *smu_power = &smu->smu_power;
+ struct smu_power_gate *power_gate = &smu_power->power_gate;
+ int ret = 0;
+
+ if (!smu->adev->enable_umsch_mm)
+ return 0;
+
+ if (!smu->ppt_funcs->dpm_set_umsch_mm_enable)
+ return 0;
+
+ if (atomic_read(&power_gate->umsch_mm_gated) ^ enable)
+ return 0;
+
+ ret = smu->ppt_funcs->dpm_set_umsch_mm_enable(smu, enable);
+ if (!ret)
+ atomic_set(&power_gate->umsch_mm_gated, !enable);
+
+ return ret;
+}
+
/**
* smu_dpm_set_power_gate - power gate/ungate the specific IP block
*
@@ -313,6 +380,12 @@ static int smu_dpm_set_power_gate(void *handle,
dev_err(smu->adev->dev, "Failed to power %s JPEG!\n",
gate ? "gate" : "ungate");
break;
+ case AMD_IP_BLOCK_TYPE_VPE:
+ ret = smu_dpm_set_vpe_enable(smu, !gate);
+ if (ret)
+ dev_err(smu->adev->dev, "Failed to power %s VPE!\n",
+ gate ? "gate" : "ungate");
+ break;
default:
dev_err(smu->adev->dev, "Unsupported block type!\n");
return -EINVAL;
@@ -462,7 +535,7 @@ bool is_support_sw_smu(struct amdgpu_device *adev)
if (adev->asic_type == CHIP_VEGA20)
return false;
- if (adev->ip_versions[MP1_HWIP][0] >= IP_VERSION(11, 0, 0))
+ if (amdgpu_ip_version(adev, MP1_HWIP, 0) >= IP_VERSION(11, 0, 0))
return true;
return false;
@@ -580,7 +653,7 @@ static int smu_set_funcs(struct amdgpu_device *adev)
if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
smu->od_enabled = true;
- switch (adev->ip_versions[MP1_HWIP][0]) {
+ switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 5):
case IP_VERSION(11, 0, 9):
@@ -637,6 +710,9 @@ static int smu_set_funcs(struct amdgpu_device *adev)
case IP_VERSION(13, 0, 7):
smu_v13_0_7_set_ppt_funcs(smu);
break;
+ case IP_VERSION(14, 0, 0):
+ smu_v14_0_0_set_ppt_funcs(smu);
+ break;
default:
return -EINVAL;
}
@@ -657,7 +733,7 @@ static int smu_early_init(void *handle)
smu->adev = adev;
smu->pm_enabled = !!amdgpu_dpm;
smu->is_apu = false;
- smu->smu_baco.state = SMU_BACO_STATE_EXIT;
+ smu->smu_baco.state = SMU_BACO_STATE_NONE;
smu->smu_baco.platform_support = false;
smu->user_dpm_profile.fan_mode = -1;
@@ -752,8 +828,8 @@ static int smu_late_init(void *handle)
}
}
- if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 1)) ||
- (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 3)))
+ if ((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 1)) ||
+ (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 3)))
return 0;
if (!amdgpu_sriov_vf(adev) || smu->od_enabled) {
@@ -773,7 +849,8 @@ static int smu_late_init(void *handle)
ret = smu_get_asic_power_limits(smu,
&smu->current_power_limit,
&smu->default_power_limit,
- &smu->max_power_limit);
+ &smu->max_power_limit,
+ &smu->min_power_limit);
if (ret) {
dev_err(adev->dev, "Failed to get asic power limits!\n");
return ret;
@@ -1107,6 +1184,21 @@ static void smu_swctf_delayed_work_handler(struct work_struct *work)
orderly_poweroff(true);
}
+static void smu_init_xgmi_plpd_mode(struct smu_context *smu)
+{
+ if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 2)) {
+ smu->plpd_mode = XGMI_PLPD_DEFAULT;
+ return;
+ }
+
+ /* PMFW put PLPD into default policy after enabling the feature */
+ if (smu_feature_is_enabled(smu,
+ SMU_FEATURE_XGMI_PER_LINK_PWR_DWN_BIT))
+ smu->plpd_mode = XGMI_PLPD_DEFAULT;
+ else
+ smu->plpd_mode = XGMI_PLPD_NONE;
+}
+
static int smu_sw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1127,6 +1219,8 @@ static int smu_sw_init(void *handle)
atomic_set(&smu->smu_power.power_gate.vcn_gated, 1);
atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1);
+ atomic_set(&smu->smu_power.power_gate.vpe_gated, 1);
+ atomic_set(&smu->smu_power.power_gate.umsch_mm_gated, 1);
smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
@@ -1232,11 +1326,11 @@ static int smu_smc_hw_setup(struct smu_context *smu)
{
struct smu_feature *feature = &smu->smu_feature;
struct amdgpu_device *adev = smu->adev;
- uint32_t pcie_gen = 0, pcie_width = 0;
+ uint8_t pcie_gen = 0, pcie_width = 0;
uint64_t features_supported;
int ret = 0;
- switch (adev->ip_versions[MP1_HWIP][0]) {
+ switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
case IP_VERSION(11, 0, 7):
case IP_VERSION(11, 0, 11):
case IP_VERSION(11, 5, 0):
@@ -1338,6 +1432,8 @@ static int smu_smc_hw_setup(struct smu_context *smu)
return ret;
}
+ smu_init_xgmi_plpd_mode(smu);
+
ret = smu_feature_get_enabled_mask(smu, &features_supported);
if (ret) {
dev_err(adev->dev, "Failed to retrieve supported dpm features!\n");
@@ -1426,7 +1522,7 @@ static int smu_start_smc_engine(struct smu_context *smu)
int ret = 0;
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
- if (adev->ip_versions[MP1_HWIP][0] < IP_VERSION(11, 0, 0)) {
+ if (amdgpu_ip_version(adev, MP1_HWIP, 0) < IP_VERSION(11, 0, 0)) {
if (smu->ppt_funcs->load_microcode) {
ret = smu->ppt_funcs->load_microcode(smu);
if (ret)
@@ -1477,6 +1573,8 @@ static int smu_hw_init(void *handle)
return ret;
smu_dpm_set_vcn_enable(smu, true);
smu_dpm_set_jpeg_enable(smu, true);
+ smu_dpm_set_vpe_enable(smu, true);
+ smu_dpm_set_umsch_mm_enable(smu, true);
smu_set_gfx_cgpg(smu, true);
}
@@ -1526,7 +1624,7 @@ static int smu_disable_dpms(struct smu_context *smu)
* For SMU 13.0.0 and 13.0.7, PMFW will handle the DPM features(disablement or others)
* properly on suspend/reset/unload. Driver involvement may cause some unexpected issues.
*/
- switch (adev->ip_versions[MP1_HWIP][0]) {
+ switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
case IP_VERSION(13, 0, 0):
case IP_VERSION(13, 0, 7):
case IP_VERSION(13, 0, 10):
@@ -1547,7 +1645,7 @@ static int smu_disable_dpms(struct smu_context *smu)
* properly.
*/
if (smu->uploading_custom_pp_table) {
- switch (adev->ip_versions[MP1_HWIP][0]) {
+ switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 5):
case IP_VERSION(11, 0, 9):
@@ -1567,7 +1665,7 @@ static int smu_disable_dpms(struct smu_context *smu)
* on BACO in. Driver involvement is unnecessary.
*/
if (use_baco) {
- switch (adev->ip_versions[MP1_HWIP][0]) {
+ switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
case IP_VERSION(11, 0, 7):
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 5):
@@ -1580,13 +1678,14 @@ static int smu_disable_dpms(struct smu_context *smu)
}
/*
- * For SMU 13.0.4/11, PMFW will handle the features disablement properly
+ * For SMU 13.0.4/11 and 14.0.0, PMFW will handle the features disablement properly
* for gpu reset and S0i3 cases. Driver involvement is unnecessary.
*/
if (amdgpu_in_reset(adev) || adev->in_s0ix) {
- switch (adev->ip_versions[MP1_HWIP][0]) {
+ switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
case IP_VERSION(13, 0, 4):
case IP_VERSION(13, 0, 11):
+ case IP_VERSION(14, 0, 0):
return 0;
default:
break;
@@ -1611,7 +1710,18 @@ static int smu_disable_dpms(struct smu_context *smu)
}
}
- if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 4, 2) &&
+ /* Notify SMU RLC is going to be off, stop RLC and SMU interaction.
+ * otherwise SMU will hang while interacting with RLC if RLC is halted
+ * this is a WA for Vangogh asic which fix the SMU hang issue.
+ */
+ ret = smu_notify_rlc_state(smu, false);
+ if (ret) {
+ dev_err(adev->dev, "Fail to notify rlc status!\n");
+ return ret;
+ }
+
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(9, 4, 2) &&
+ !((adev->flags & AMD_IS_APU) && adev->gfx.imu.funcs) &&
!amdgpu_sriov_vf(adev) && adev->gfx.rlc.funcs->stop)
adev->gfx.rlc.funcs->stop(adev);
@@ -1643,16 +1753,39 @@ static int smu_smc_hw_cleanup(struct smu_context *smu)
return 0;
}
+static int smu_reset_mp1_state(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int ret = 0;
+
+ if ((!adev->in_runpm) && (!adev->in_suspend) &&
+ (!amdgpu_in_reset(adev)))
+ switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
+ case IP_VERSION(13, 0, 0):
+ case IP_VERSION(13, 0, 7):
+ case IP_VERSION(13, 0, 10):
+ ret = smu_set_mp1_state(smu, PP_MP1_STATE_UNLOAD);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
static int smu_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct smu_context *smu = adev->powerplay.pp_handle;
+ int ret;
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
return 0;
smu_dpm_set_vcn_enable(smu, false);
smu_dpm_set_jpeg_enable(smu, false);
+ smu_dpm_set_vpe_enable(smu, false);
+ smu_dpm_set_umsch_mm_enable(smu, false);
adev->vcn.cur_state = AMD_PG_STATE_GATE;
adev->jpeg.cur_state = AMD_PG_STATE_GATE;
@@ -1662,7 +1795,15 @@ static int smu_hw_fini(void *handle)
adev->pm.dpm_enabled = false;
- return smu_smc_hw_cleanup(smu);
+ ret = smu_smc_hw_cleanup(smu);
+ if (ret)
+ return ret;
+
+ ret = smu_reset_mp1_state(smu);
+ if (ret)
+ return ret;
+
+ return 0;
}
static void smu_late_fini(void *handle)
@@ -2138,23 +2279,6 @@ static int smu_set_df_cstate(void *handle,
return ret;
}
-int smu_allow_xgmi_power_down(struct smu_context *smu, bool en)
-{
- int ret = 0;
-
- if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
- return -EOPNOTSUPP;
-
- if (!smu->ppt_funcs || !smu->ppt_funcs->allow_xgmi_power_down)
- return 0;
-
- ret = smu->ppt_funcs->allow_xgmi_power_down(smu, en);
- if (ret)
- dev_err(smu->adev->dev, "[AllowXgmiPowerDown] failed!\n");
-
- return ret;
-}
-
int smu_write_watermarks_table(struct smu_context *smu)
{
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
@@ -2241,6 +2365,14 @@ const struct amdgpu_ip_block_version smu_v13_0_ip_block = {
.funcs = &smu_ip_funcs,
};
+const struct amdgpu_ip_block_version smu_v14_0_ip_block = {
+ .type = AMD_IP_BLOCK_TYPE_SMC,
+ .major = 14,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &smu_ip_funcs,
+};
+
static int smu_load_microcode(void *handle)
{
struct smu_context *smu = handle;
@@ -2343,7 +2475,6 @@ int smu_get_power_limit(void *handle,
break;
default:
return -EOPNOTSUPP;
- break;
}
switch (pp_limit_level) {
@@ -2357,9 +2488,10 @@ int smu_get_power_limit(void *handle,
limit_level = SMU_PPT_LIMIT_MAX;
break;
case PP_PWR_LIMIT_MIN:
+ limit_level = SMU_PPT_LIMIT_MIN;
+ break;
default:
return -EOPNOTSUPP;
- break;
}
if (limit_type != SMU_DEFAULT_PPT_LIMIT) {
@@ -2368,7 +2500,7 @@ int smu_get_power_limit(void *handle,
} else {
switch (limit_level) {
case SMU_PPT_LIMIT_CURRENT:
- switch (adev->ip_versions[MP1_HWIP][0]) {
+ switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
case IP_VERSION(13, 0, 2):
case IP_VERSION(11, 0, 7):
case IP_VERSION(11, 0, 11):
@@ -2376,8 +2508,7 @@ int smu_get_power_limit(void *handle,
case IP_VERSION(11, 0, 13):
ret = smu_get_asic_power_limits(smu,
&smu->current_power_limit,
- NULL,
- NULL);
+ NULL, NULL, NULL);
break;
default:
break;
@@ -2390,8 +2521,11 @@ int smu_get_power_limit(void *handle,
case SMU_PPT_LIMIT_MAX:
*limit = smu->max_power_limit;
break;
- default:
+ case SMU_PPT_LIMIT_MIN:
+ *limit = smu->min_power_limit;
break;
+ default:
+ return -EINVAL;
}
}
@@ -2412,10 +2546,10 @@ static int smu_set_power_limit(void *handle, uint32_t limit)
if (smu->ppt_funcs->set_power_limit)
return smu->ppt_funcs->set_power_limit(smu, limit_type, limit);
- if (limit > smu->max_power_limit) {
+ if ((limit > smu->max_power_limit) || (limit < smu->min_power_limit)) {
dev_err(smu->adev->dev,
- "New power limit (%d) is over the max allowed %d\n",
- limit, smu->max_power_limit);
+ "New power limit (%d) is out of range [%d,%d]\n",
+ limit, smu->min_power_limit, smu->max_power_limit);
return -EINVAL;
}
@@ -2481,6 +2615,16 @@ static enum smu_clk_type smu_convert_to_smuclk(enum pp_clock_type type)
clk_type = SMU_OD_VDDGFX_OFFSET; break;
case OD_CCLK:
clk_type = SMU_OD_CCLK; break;
+ case OD_FAN_CURVE:
+ clk_type = SMU_OD_FAN_CURVE; break;
+ case OD_ACOUSTIC_LIMIT:
+ clk_type = SMU_OD_ACOUSTIC_LIMIT; break;
+ case OD_ACOUSTIC_TARGET:
+ clk_type = SMU_OD_ACOUSTIC_TARGET; break;
+ case OD_FAN_TARGET_TEMPERATURE:
+ clk_type = SMU_OD_FAN_TARGET_TEMPERATURE; break;
+ case OD_FAN_MINIMUM_PWM:
+ clk_type = SMU_OD_FAN_MINIMUM_PWM; break;
default:
clk_type = SMU_CLK_COUNT; break;
}
@@ -2614,7 +2758,7 @@ unlock:
static int smu_get_apu_thermal_limit(void *handle, uint32_t *limit)
{
- int ret = -EINVAL;
+ int ret = -EOPNOTSUPP;
struct smu_context *smu = handle;
if (smu->ppt_funcs && smu->ppt_funcs->get_apu_thermal_limit)
@@ -2625,7 +2769,7 @@ static int smu_get_apu_thermal_limit(void *handle, uint32_t *limit)
static int smu_set_apu_thermal_limit(void *handle, uint32_t limit)
{
- int ret = -EINVAL;
+ int ret = -EOPNOTSUPP;
struct smu_context *smu = handle;
if (smu->ppt_funcs && smu->ppt_funcs->set_apu_thermal_limit)
@@ -3124,6 +3268,30 @@ static int smu_get_prv_buffer_details(void *handle, void **addr, size_t *size)
return 0;
}
+int smu_set_xgmi_plpd_mode(struct smu_context *smu,
+ enum pp_xgmi_plpd_mode mode)
+{
+ int ret = -EOPNOTSUPP;
+
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+ return ret;
+
+ /* PLPD policy is not supported if it's NONE */
+ if (smu->plpd_mode == XGMI_PLPD_NONE)
+ return ret;
+
+ if (smu->plpd_mode == mode)
+ return 0;
+
+ if (smu->ppt_funcs && smu->ppt_funcs->select_xgmi_plpd_policy)
+ ret = smu->ppt_funcs->select_xgmi_plpd_policy(smu, mode);
+
+ if (!ret)
+ smu->plpd_mode = mode;
+
+ return ret;
+}
+
static const struct amd_pm_funcs swsmu_pm_funcs = {
/* export for sysfs */
.set_fan_control_mode = smu_set_fan_control_mode,