aboutsummaryrefslogtreecommitdiffstats
path: root/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/3798-drm-amd-pp-Move-helper-functions-to-smu_help.c.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/3798-drm-amd-pp-Move-helper-functions-to-smu_help.c.patch')
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/3798-drm-amd-pp-Move-helper-functions-to-smu_help.c.patch1491
1 files changed, 1491 insertions, 0 deletions
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/3798-drm-amd-pp-Move-helper-functions-to-smu_help.c.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/3798-drm-amd-pp-Move-helper-functions-to-smu_help.c.patch
new file mode 100644
index 00000000..75b506fb
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/3798-drm-amd-pp-Move-helper-functions-to-smu_help.c.patch
@@ -0,0 +1,1491 @@
+From 5c33b3c53c5187a71354936e49a41a60c49a9d96 Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Fri, 9 Mar 2018 19:52:26 +0800
+Subject: [PATCH 3798/4131] drm/amd/pp: Move helper functions to smu_help.c
+
+Change-Id: I4d17ce9acda546d49a797f34a9d6815b6f41bb06
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Evan Quan <evan.quan@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/Makefile | 2 +-
+ drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c | 513 +---------------------
+ drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c | 536 +++++++++++++++++++++++
+ drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h | 180 ++++++++
+ drivers/gpu/drm/amd/powerplay/inc/hwmgr.h | 155 +------
+ 5 files changed, 723 insertions(+), 663 deletions(-)
+ create mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
+ create mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
+index efbdd3e..d319c36 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
+@@ -11,7 +11,7 @@ HARDWARE_MGR = hwmgr.o processpptables.o \
+ smu7_clockpowergating.o \
+ vega10_processpptables.o vega10_hwmgr.o vega10_powertune.o \
+ vega10_thermal.o smu10_hwmgr.o pp_psm.o\
+- pp_overdriver.o
++ pp_overdriver.o smu_helper.o
+
+ AMD_PP_HWMGR = $(addprefix $(AMD_PP_PATH)/hwmgr/,$(HARDWARE_MGR))
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+index 3432dc0..5563b65 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+@@ -30,8 +30,6 @@
+ #include <drm/amdgpu_drm.h>
+ #include "power_state.h"
+ #include "hwmgr.h"
+-#include "pppcielanes.h"
+-#include "ppatomctrl.h"
+ #include "ppsmc.h"
+ #include "amd_acpi.h"
+ #include "pp_psm.h"
+@@ -45,7 +43,11 @@ extern const struct pp_smumgr_func polaris10_smu_funcs;
+ extern const struct pp_smumgr_func vega10_smu_funcs;
+ extern const struct pp_smumgr_func smu10_smu_funcs;
+
++extern int smu7_init_function_pointers(struct pp_hwmgr *hwmgr);
+ extern int cz_init_function_pointers(struct pp_hwmgr *hwmgr);
++extern int vega10_hwmgr_init(struct pp_hwmgr *hwmgr);
++extern int smu10_init_function_pointers(struct pp_hwmgr *hwmgr);
++
+ static int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr);
+ static void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr);
+ static int hwmgr_set_user_specify_caps(struct pp_hwmgr *hwmgr);
+@@ -54,32 +56,6 @@ static int tonga_set_asic_special_caps(struct pp_hwmgr *hwmgr);
+ static int topaz_set_asic_special_caps(struct pp_hwmgr *hwmgr);
+ static int ci_set_asic_special_caps(struct pp_hwmgr *hwmgr);
+
+-uint8_t convert_to_vid(uint16_t vddc)
+-{
+- return (uint8_t) ((6200 - (vddc * VOLTAGE_SCALE)) / 25);
+-}
+-
+-uint16_t convert_to_vddc(uint8_t vid)
+-{
+- return (uint16_t) ((6200 - (vid * 25)) / VOLTAGE_SCALE);
+-}
+-
+-uint32_t phm_set_field_to_u32(u32 offset, u32 original_data, u32 field, u32 size)
+-{
+- u32 mask = 0;
+- u32 shift = 0;
+-
+- shift = (offset % 4) << 3;
+- if (size == sizeof(uint8_t))
+- mask = 0xFF << shift;
+- else if (size == sizeof(uint16_t))
+- mask = 0xFFFF << shift;
+-
+- original_data &= ~mask;
+- original_data |= (field << shift);
+- return original_data;
+-}
+-
+ static int phm_thermal_l2h_irq(void *private_data,
+ unsigned src_id, const uint32_t *iv_entry)
+ {
+@@ -432,468 +408,6 @@ int hwmgr_handle_task(struct pp_instance *handle, enum amd_pp_task task_id,
+ }
+ return ret;
+ }
+-/**
+- * Returns once the part of the register indicated by the mask has
+- * reached the given value.
+- */
+-int phm_wait_on_register(struct pp_hwmgr *hwmgr, uint32_t index,
+- uint32_t value, uint32_t mask)
+-{
+- uint32_t i;
+- uint32_t cur_value;
+-
+- if (hwmgr == NULL || hwmgr->device == NULL) {
+- pr_err("Invalid Hardware Manager!");
+- return -EINVAL;
+- }
+-
+- for (i = 0; i < hwmgr->usec_timeout; i++) {
+- cur_value = cgs_read_register(hwmgr->device, index);
+- if ((cur_value & mask) == (value & mask))
+- break;
+- udelay(1);
+- }
+-
+- /* timeout means wrong logic*/
+- if (i == hwmgr->usec_timeout)
+- return -1;
+- return 0;
+-}
+-
+-
+-/**
+- * Returns once the part of the register indicated by the mask has
+- * reached the given value.The indirect space is described by giving
+- * the memory-mapped index of the indirect index register.
+- */
+-int phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr,
+- uint32_t indirect_port,
+- uint32_t index,
+- uint32_t value,
+- uint32_t mask)
+-{
+- if (hwmgr == NULL || hwmgr->device == NULL) {
+- pr_err("Invalid Hardware Manager!");
+- return -EINVAL;
+- }
+-
+- cgs_write_register(hwmgr->device, indirect_port, index);
+- return phm_wait_on_register(hwmgr, indirect_port + 1, mask, value);
+-}
+-
+-int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr,
+- uint32_t index,
+- uint32_t value, uint32_t mask)
+-{
+- uint32_t i;
+- uint32_t cur_value;
+-
+- if (hwmgr == NULL || hwmgr->device == NULL)
+- return -EINVAL;
+-
+- for (i = 0; i < hwmgr->usec_timeout; i++) {
+- cur_value = cgs_read_register(hwmgr->device,
+- index);
+- if ((cur_value & mask) != (value & mask))
+- break;
+- udelay(1);
+- }
+-
+- /* timeout means wrong logic */
+- if (i == hwmgr->usec_timeout)
+- return -ETIME;
+- return 0;
+-}
+-
+-int phm_wait_for_indirect_register_unequal(struct pp_hwmgr *hwmgr,
+- uint32_t indirect_port,
+- uint32_t index,
+- uint32_t value,
+- uint32_t mask)
+-{
+- if (hwmgr == NULL || hwmgr->device == NULL)
+- return -EINVAL;
+-
+- cgs_write_register(hwmgr->device, indirect_port, index);
+- return phm_wait_for_register_unequal(hwmgr, indirect_port + 1,
+- value, mask);
+-}
+-
+-bool phm_cf_want_uvd_power_gating(struct pp_hwmgr *hwmgr)
+-{
+- return phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDPowerGating);
+-}
+-
+-bool phm_cf_want_vce_power_gating(struct pp_hwmgr *hwmgr)
+-{
+- return phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VCEPowerGating);
+-}
+-
+-
+-int phm_trim_voltage_table(struct pp_atomctrl_voltage_table *vol_table)
+-{
+- uint32_t i, j;
+- uint16_t vvalue;
+- bool found = false;
+- struct pp_atomctrl_voltage_table *table;
+-
+- PP_ASSERT_WITH_CODE((NULL != vol_table),
+- "Voltage Table empty.", return -EINVAL);
+-
+- table = kzalloc(sizeof(struct pp_atomctrl_voltage_table),
+- GFP_KERNEL);
+-
+- if (NULL == table)
+- return -EINVAL;
+-
+- table->mask_low = vol_table->mask_low;
+- table->phase_delay = vol_table->phase_delay;
+-
+- for (i = 0; i < vol_table->count; i++) {
+- vvalue = vol_table->entries[i].value;
+- found = false;
+-
+- for (j = 0; j < table->count; j++) {
+- if (vvalue == table->entries[j].value) {
+- found = true;
+- break;
+- }
+- }
+-
+- if (!found) {
+- table->entries[table->count].value = vvalue;
+- table->entries[table->count].smio_low =
+- vol_table->entries[i].smio_low;
+- table->count++;
+- }
+- }
+-
+- memcpy(vol_table, table, sizeof(struct pp_atomctrl_voltage_table));
+- kfree(table);
+- table = NULL;
+- return 0;
+-}
+-
+-int phm_get_svi2_mvdd_voltage_table(struct pp_atomctrl_voltage_table *vol_table,
+- phm_ppt_v1_clock_voltage_dependency_table *dep_table)
+-{
+- uint32_t i;
+- int result;
+-
+- PP_ASSERT_WITH_CODE((0 != dep_table->count),
+- "Voltage Dependency Table empty.", return -EINVAL);
+-
+- PP_ASSERT_WITH_CODE((NULL != vol_table),
+- "vol_table empty.", return -EINVAL);
+-
+- vol_table->mask_low = 0;
+- vol_table->phase_delay = 0;
+- vol_table->count = dep_table->count;
+-
+- for (i = 0; i < dep_table->count; i++) {
+- vol_table->entries[i].value = dep_table->entries[i].mvdd;
+- vol_table->entries[i].smio_low = 0;
+- }
+-
+- result = phm_trim_voltage_table(vol_table);
+- PP_ASSERT_WITH_CODE((0 == result),
+- "Failed to trim MVDD table.", return result);
+-
+- return 0;
+-}
+-
+-int phm_get_svi2_vddci_voltage_table(struct pp_atomctrl_voltage_table *vol_table,
+- phm_ppt_v1_clock_voltage_dependency_table *dep_table)
+-{
+- uint32_t i;
+- int result;
+-
+- PP_ASSERT_WITH_CODE((0 != dep_table->count),
+- "Voltage Dependency Table empty.", return -EINVAL);
+-
+- PP_ASSERT_WITH_CODE((NULL != vol_table),
+- "vol_table empty.", return -EINVAL);
+-
+- vol_table->mask_low = 0;
+- vol_table->phase_delay = 0;
+- vol_table->count = dep_table->count;
+-
+- for (i = 0; i < dep_table->count; i++) {
+- vol_table->entries[i].value = dep_table->entries[i].vddci;
+- vol_table->entries[i].smio_low = 0;
+- }
+-
+- result = phm_trim_voltage_table(vol_table);
+- PP_ASSERT_WITH_CODE((0 == result),
+- "Failed to trim VDDCI table.", return result);
+-
+- return 0;
+-}
+-
+-int phm_get_svi2_vdd_voltage_table(struct pp_atomctrl_voltage_table *vol_table,
+- phm_ppt_v1_voltage_lookup_table *lookup_table)
+-{
+- int i = 0;
+-
+- PP_ASSERT_WITH_CODE((0 != lookup_table->count),
+- "Voltage Lookup Table empty.", return -EINVAL);
+-
+- PP_ASSERT_WITH_CODE((NULL != vol_table),
+- "vol_table empty.", return -EINVAL);
+-
+- vol_table->mask_low = 0;
+- vol_table->phase_delay = 0;
+-
+- vol_table->count = lookup_table->count;
+-
+- for (i = 0; i < vol_table->count; i++) {
+- vol_table->entries[i].value = lookup_table->entries[i].us_vdd;
+- vol_table->entries[i].smio_low = 0;
+- }
+-
+- return 0;
+-}
+-
+-void phm_trim_voltage_table_to_fit_state_table(uint32_t max_vol_steps,
+- struct pp_atomctrl_voltage_table *vol_table)
+-{
+- unsigned int i, diff;
+-
+- if (vol_table->count <= max_vol_steps)
+- return;
+-
+- diff = vol_table->count - max_vol_steps;
+-
+- for (i = 0; i < max_vol_steps; i++)
+- vol_table->entries[i] = vol_table->entries[i + diff];
+-
+- vol_table->count = max_vol_steps;
+-
+- return;
+-}
+-
+-int phm_reset_single_dpm_table(void *table,
+- uint32_t count, int max)
+-{
+- int i;
+-
+- struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
+-
+- dpm_table->count = count > max ? max : count;
+-
+- for (i = 0; i < dpm_table->count; i++)
+- dpm_table->dpm_level[i].enabled = false;
+-
+- return 0;
+-}
+-
+-void phm_setup_pcie_table_entry(
+- void *table,
+- uint32_t index, uint32_t pcie_gen,
+- uint32_t pcie_lanes)
+-{
+- struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
+- dpm_table->dpm_level[index].value = pcie_gen;
+- dpm_table->dpm_level[index].param1 = pcie_lanes;
+- dpm_table->dpm_level[index].enabled = 1;
+-}
+-
+-int32_t phm_get_dpm_level_enable_mask_value(void *table)
+-{
+- int32_t i;
+- int32_t mask = 0;
+- struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
+-
+- for (i = dpm_table->count; i > 0; i--) {
+- mask = mask << 1;
+- if (dpm_table->dpm_level[i - 1].enabled)
+- mask |= 0x1;
+- else
+- mask &= 0xFFFFFFFE;
+- }
+-
+- return mask;
+-}
+-
+-uint8_t phm_get_voltage_index(
+- struct phm_ppt_v1_voltage_lookup_table *lookup_table, uint16_t voltage)
+-{
+- uint8_t count = (uint8_t) (lookup_table->count);
+- uint8_t i;
+-
+- PP_ASSERT_WITH_CODE((NULL != lookup_table),
+- "Lookup Table empty.", return 0);
+- PP_ASSERT_WITH_CODE((0 != count),
+- "Lookup Table empty.", return 0);
+-
+- for (i = 0; i < lookup_table->count; i++) {
+- /* find first voltage equal or bigger than requested */
+- if (lookup_table->entries[i].us_vdd >= voltage)
+- return i;
+- }
+- /* voltage is bigger than max voltage in the table */
+- return i - 1;
+-}
+-
+-uint8_t phm_get_voltage_id(pp_atomctrl_voltage_table *voltage_table,
+- uint32_t voltage)
+-{
+- uint8_t count = (uint8_t) (voltage_table->count);
+- uint8_t i = 0;
+-
+- PP_ASSERT_WITH_CODE((NULL != voltage_table),
+- "Voltage Table empty.", return 0;);
+- PP_ASSERT_WITH_CODE((0 != count),
+- "Voltage Table empty.", return 0;);
+-
+- for (i = 0; i < count; i++) {
+- /* find first voltage bigger than requested */
+- if (voltage_table->entries[i].value >= voltage)
+- return i;
+- }
+-
+- /* voltage is bigger than max voltage in the table */
+- return i - 1;
+-}
+-
+-uint16_t phm_find_closest_vddci(struct pp_atomctrl_voltage_table *vddci_table, uint16_t vddci)
+-{
+- uint32_t i;
+-
+- for (i = 0; i < vddci_table->count; i++) {
+- if (vddci_table->entries[i].value >= vddci)
+- return vddci_table->entries[i].value;
+- }
+-
+- pr_debug("vddci is larger than max value in vddci_table\n");
+- return vddci_table->entries[i-1].value;
+-}
+-
+-int phm_find_boot_level(void *table,
+- uint32_t value, uint32_t *boot_level)
+-{
+- int result = -EINVAL;
+- uint32_t i;
+- struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
+-
+- for (i = 0; i < dpm_table->count; i++) {
+- if (value == dpm_table->dpm_level[i].value) {
+- *boot_level = i;
+- result = 0;
+- }
+- }
+-
+- return result;
+-}
+-
+-int phm_get_sclk_for_voltage_evv(struct pp_hwmgr *hwmgr,
+- phm_ppt_v1_voltage_lookup_table *lookup_table,
+- uint16_t virtual_voltage_id, int32_t *sclk)
+-{
+- uint8_t entry_id;
+- uint8_t voltage_id;
+- struct phm_ppt_v1_information *table_info =
+- (struct phm_ppt_v1_information *)(hwmgr->pptable);
+-
+- PP_ASSERT_WITH_CODE(lookup_table->count != 0, "Lookup table is empty", return -EINVAL);
+-
+- /* search for leakage voltage ID 0xff01 ~ 0xff08 and sckl */
+- for (entry_id = 0; entry_id < table_info->vdd_dep_on_sclk->count; entry_id++) {
+- voltage_id = table_info->vdd_dep_on_sclk->entries[entry_id].vddInd;
+- if (lookup_table->entries[voltage_id].us_vdd == virtual_voltage_id)
+- break;
+- }
+-
+- if (entry_id >= table_info->vdd_dep_on_sclk->count) {
+- pr_debug("Can't find requested voltage id in vdd_dep_on_sclk table\n");
+- return -EINVAL;
+- }
+-
+- *sclk = table_info->vdd_dep_on_sclk->entries[entry_id].clk;
+-
+- return 0;
+-}
+-
+-/**
+- * Initialize Dynamic State Adjustment Rule Settings
+- *
+- * @param hwmgr the address of the powerplay hardware manager.
+- */
+-int phm_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr)
+-{
+- uint32_t table_size;
+- struct phm_clock_voltage_dependency_table *table_clk_vlt;
+- struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
+-
+- /* initialize vddc_dep_on_dal_pwrl table */
+- table_size = sizeof(uint32_t) + 4 * sizeof(struct phm_clock_voltage_dependency_record);
+- table_clk_vlt = kzalloc(table_size, GFP_KERNEL);
+-
+- if (NULL == table_clk_vlt) {
+- pr_err("Can not allocate space for vddc_dep_on_dal_pwrl! \n");
+- return -ENOMEM;
+- } else {
+- table_clk_vlt->count = 4;
+- table_clk_vlt->entries[0].clk = PP_DAL_POWERLEVEL_ULTRALOW;
+- table_clk_vlt->entries[0].v = 0;
+- table_clk_vlt->entries[1].clk = PP_DAL_POWERLEVEL_LOW;
+- table_clk_vlt->entries[1].v = 720;
+- table_clk_vlt->entries[2].clk = PP_DAL_POWERLEVEL_NOMINAL;
+- table_clk_vlt->entries[2].v = 810;
+- table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_PERFORMANCE;
+- table_clk_vlt->entries[3].v = 900;
+- if (pptable_info != NULL)
+- pptable_info->vddc_dep_on_dal_pwrl = table_clk_vlt;
+- hwmgr->dyn_state.vddc_dep_on_dal_pwrl = table_clk_vlt;
+- }
+-
+- return 0;
+-}
+-
+-uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t mask)
+-{
+- uint32_t level = 0;
+-
+- while (0 == (mask & (1 << level)))
+- level++;
+-
+- return level;
+-}
+-
+-void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr)
+-{
+- struct phm_ppt_v1_information *table_info =
+- (struct phm_ppt_v1_information *)hwmgr->pptable;
+- struct phm_clock_voltage_dependency_table *table =
+- table_info->vddc_dep_on_dal_pwrl;
+- struct phm_ppt_v1_clock_voltage_dependency_table *vddc_table;
+- enum PP_DAL_POWERLEVEL dal_power_level = hwmgr->dal_power_level;
+- uint32_t req_vddc = 0, req_volt, i;
+-
+- if (!table || table->count <= 0
+- || dal_power_level < PP_DAL_POWERLEVEL_ULTRALOW
+- || dal_power_level > PP_DAL_POWERLEVEL_PERFORMANCE)
+- return;
+-
+- for (i = 0; i < table->count; i++) {
+- if (dal_power_level == table->entries[i].clk) {
+- req_vddc = table->entries[i].v;
+- break;
+- }
+- }
+-
+- vddc_table = table_info->vdd_dep_on_sclk;
+- for (i = 0; i < vddc_table->count; i++) {
+- if (req_vddc <= vddc_table->entries[i].vddc) {
+- req_volt = (((uint32_t)vddc_table->entries[i].vddc) * VOLTAGE_SCALE);
+- smum_send_msg_to_smc_with_parameter(hwmgr,
+- PPSMC_MSG_VddC_Request, req_volt);
+- return;
+- }
+- }
+- pr_err("DAL requested level can not"
+- " found a available voltage in VDDC DPM Table \n");
+-}
+
+ void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr)
+ {
+@@ -954,25 +468,6 @@ int hwmgr_set_user_specify_caps(struct pp_hwmgr *hwmgr)
+ return 0;
+ }
+
+-int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
+- uint32_t sclk, uint16_t id, uint16_t *voltage)
+-{
+- uint32_t vol;
+- int ret = 0;
+-
+- if (hwmgr->chip_id < CHIP_TONGA) {
+- ret = atomctrl_get_voltage_evv(hwmgr, id, voltage);
+- } else if (hwmgr->chip_id < CHIP_POLARIS10) {
+- ret = atomctrl_get_voltage_evv_on_sclk(hwmgr, voltage_type, sclk, id, voltage);
+- if (*voltage >= 2000 || *voltage == 0)
+- *voltage = 1150;
+- } else {
+- ret = atomctrl_get_voltage_evv_on_sclk_ai(hwmgr, voltage_type, sclk, id, &vol);
+- *voltage = (uint16_t)(vol/100);
+- }
+- return ret;
+-}
+-
+ int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr)
+ {
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
+new file mode 100644
+index 0000000..e11daf5
+--- /dev/null
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
+@@ -0,0 +1,536 @@
++/*
++ * Copyright 2018 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++#include "hwmgr.h"
++#include "pp_debug.h"
++#include "ppatomctrl.h"
++#include "ppsmc.h"
++
++uint8_t convert_to_vid(uint16_t vddc)
++{
++ return (uint8_t) ((6200 - (vddc * VOLTAGE_SCALE)) / 25);
++}
++
++uint16_t convert_to_vddc(uint8_t vid)
++{
++ return (uint16_t) ((6200 - (vid * 25)) / VOLTAGE_SCALE);
++}
++
++uint32_t phm_set_field_to_u32(u32 offset, u32 original_data, u32 field, u32 size)
++{
++ u32 mask = 0;
++ u32 shift = 0;
++
++ shift = (offset % 4) << 3;
++ if (size == sizeof(uint8_t))
++ mask = 0xFF << shift;
++ else if (size == sizeof(uint16_t))
++ mask = 0xFFFF << shift;
++
++ original_data &= ~mask;
++ original_data |= (field << shift);
++ return original_data;
++}
++
++/**
++ * Returns once the part of the register indicated by the mask has
++ * reached the given value.
++ */
++int phm_wait_on_register(struct pp_hwmgr *hwmgr, uint32_t index,
++ uint32_t value, uint32_t mask)
++{
++ uint32_t i;
++ uint32_t cur_value;
++
++ if (hwmgr == NULL || hwmgr->device == NULL) {
++ pr_err("Invalid Hardware Manager!");
++ return -EINVAL;
++ }
++
++ for (i = 0; i < hwmgr->usec_timeout; i++) {
++ cur_value = cgs_read_register(hwmgr->device, index);
++ if ((cur_value & mask) == (value & mask))
++ break;
++ udelay(1);
++ }
++
++ /* timeout means wrong logic*/
++ if (i == hwmgr->usec_timeout)
++ return -1;
++ return 0;
++}
++
++
++/**
++ * Returns once the part of the register indicated by the mask has
++ * reached the given value.The indirect space is described by giving
++ * the memory-mapped index of the indirect index register.
++ */
++int phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr,
++ uint32_t indirect_port,
++ uint32_t index,
++ uint32_t value,
++ uint32_t mask)
++{
++ if (hwmgr == NULL || hwmgr->device == NULL) {
++ pr_err("Invalid Hardware Manager!");
++ return -EINVAL;
++ }
++
++ cgs_write_register(hwmgr->device, indirect_port, index);
++ return phm_wait_on_register(hwmgr, indirect_port + 1, mask, value);
++}
++
++int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr,
++ uint32_t index,
++ uint32_t value, uint32_t mask)
++{
++ uint32_t i;
++ uint32_t cur_value;
++
++ if (hwmgr == NULL || hwmgr->device == NULL)
++ return -EINVAL;
++
++ for (i = 0; i < hwmgr->usec_timeout; i++) {
++ cur_value = cgs_read_register(hwmgr->device,
++ index);
++ if ((cur_value & mask) != (value & mask))
++ break;
++ udelay(1);
++ }
++
++ /* timeout means wrong logic */
++ if (i == hwmgr->usec_timeout)
++ return -ETIME;
++ return 0;
++}
++
++int phm_wait_for_indirect_register_unequal(struct pp_hwmgr *hwmgr,
++ uint32_t indirect_port,
++ uint32_t index,
++ uint32_t value,
++ uint32_t mask)
++{
++ if (hwmgr == NULL || hwmgr->device == NULL)
++ return -EINVAL;
++
++ cgs_write_register(hwmgr->device, indirect_port, index);
++ return phm_wait_for_register_unequal(hwmgr, indirect_port + 1,
++ value, mask);
++}
++
++bool phm_cf_want_uvd_power_gating(struct pp_hwmgr *hwmgr)
++{
++ return phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDPowerGating);
++}
++
++bool phm_cf_want_vce_power_gating(struct pp_hwmgr *hwmgr)
++{
++ return phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VCEPowerGating);
++}
++
++
++int phm_trim_voltage_table(struct pp_atomctrl_voltage_table *vol_table)
++{
++ uint32_t i, j;
++ uint16_t vvalue;
++ bool found = false;
++ struct pp_atomctrl_voltage_table *table;
++
++ PP_ASSERT_WITH_CODE((NULL != vol_table),
++ "Voltage Table empty.", return -EINVAL);
++
++ table = kzalloc(sizeof(struct pp_atomctrl_voltage_table),
++ GFP_KERNEL);
++
++ if (NULL == table)
++ return -EINVAL;
++
++ table->mask_low = vol_table->mask_low;
++ table->phase_delay = vol_table->phase_delay;
++
++ for (i = 0; i < vol_table->count; i++) {
++ vvalue = vol_table->entries[i].value;
++ found = false;
++
++ for (j = 0; j < table->count; j++) {
++ if (vvalue == table->entries[j].value) {
++ found = true;
++ break;
++ }
++ }
++
++ if (!found) {
++ table->entries[table->count].value = vvalue;
++ table->entries[table->count].smio_low =
++ vol_table->entries[i].smio_low;
++ table->count++;
++ }
++ }
++
++ memcpy(vol_table, table, sizeof(struct pp_atomctrl_voltage_table));
++ kfree(table);
++ table = NULL;
++ return 0;
++}
++
++int phm_get_svi2_mvdd_voltage_table(struct pp_atomctrl_voltage_table *vol_table,
++ phm_ppt_v1_clock_voltage_dependency_table *dep_table)
++{
++ uint32_t i;
++ int result;
++
++ PP_ASSERT_WITH_CODE((0 != dep_table->count),
++ "Voltage Dependency Table empty.", return -EINVAL);
++
++ PP_ASSERT_WITH_CODE((NULL != vol_table),
++ "vol_table empty.", return -EINVAL);
++
++ vol_table->mask_low = 0;
++ vol_table->phase_delay = 0;
++ vol_table->count = dep_table->count;
++
++ for (i = 0; i < dep_table->count; i++) {
++ vol_table->entries[i].value = dep_table->entries[i].mvdd;
++ vol_table->entries[i].smio_low = 0;
++ }
++
++ result = phm_trim_voltage_table(vol_table);
++ PP_ASSERT_WITH_CODE((0 == result),
++ "Failed to trim MVDD table.", return result);
++
++ return 0;
++}
++
++int phm_get_svi2_vddci_voltage_table(struct pp_atomctrl_voltage_table *vol_table,
++ phm_ppt_v1_clock_voltage_dependency_table *dep_table)
++{
++ uint32_t i;
++ int result;
++
++ PP_ASSERT_WITH_CODE((0 != dep_table->count),
++ "Voltage Dependency Table empty.", return -EINVAL);
++
++ PP_ASSERT_WITH_CODE((NULL != vol_table),
++ "vol_table empty.", return -EINVAL);
++
++ vol_table->mask_low = 0;
++ vol_table->phase_delay = 0;
++ vol_table->count = dep_table->count;
++
++ for (i = 0; i < dep_table->count; i++) {
++ vol_table->entries[i].value = dep_table->entries[i].vddci;
++ vol_table->entries[i].smio_low = 0;
++ }
++
++ result = phm_trim_voltage_table(vol_table);
++ PP_ASSERT_WITH_CODE((0 == result),
++ "Failed to trim VDDCI table.", return result);
++
++ return 0;
++}
++
++int phm_get_svi2_vdd_voltage_table(struct pp_atomctrl_voltage_table *vol_table,
++ phm_ppt_v1_voltage_lookup_table *lookup_table)
++{
++ int i = 0;
++
++ PP_ASSERT_WITH_CODE((0 != lookup_table->count),
++ "Voltage Lookup Table empty.", return -EINVAL);
++
++ PP_ASSERT_WITH_CODE((NULL != vol_table),
++ "vol_table empty.", return -EINVAL);
++
++ vol_table->mask_low = 0;
++ vol_table->phase_delay = 0;
++
++ vol_table->count = lookup_table->count;
++
++ for (i = 0; i < vol_table->count; i++) {
++ vol_table->entries[i].value = lookup_table->entries[i].us_vdd;
++ vol_table->entries[i].smio_low = 0;
++ }
++
++ return 0;
++}
++
++void phm_trim_voltage_table_to_fit_state_table(uint32_t max_vol_steps,
++ struct pp_atomctrl_voltage_table *vol_table)
++{
++ unsigned int i, diff;
++
++ if (vol_table->count <= max_vol_steps)
++ return;
++
++ diff = vol_table->count - max_vol_steps;
++
++ for (i = 0; i < max_vol_steps; i++)
++ vol_table->entries[i] = vol_table->entries[i + diff];
++
++ vol_table->count = max_vol_steps;
++
++ return;
++}
++
++int phm_reset_single_dpm_table(void *table,
++ uint32_t count, int max)
++{
++ int i;
++
++ struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
++
++ dpm_table->count = count > max ? max : count;
++
++ for (i = 0; i < dpm_table->count; i++)
++ dpm_table->dpm_level[i].enabled = false;
++
++ return 0;
++}
++
++void phm_setup_pcie_table_entry(
++ void *table,
++ uint32_t index, uint32_t pcie_gen,
++ uint32_t pcie_lanes)
++{
++ struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
++ dpm_table->dpm_level[index].value = pcie_gen;
++ dpm_table->dpm_level[index].param1 = pcie_lanes;
++ dpm_table->dpm_level[index].enabled = 1;
++}
++
++int32_t phm_get_dpm_level_enable_mask_value(void *table)
++{
++ int32_t i;
++ int32_t mask = 0;
++ struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
++
++ for (i = dpm_table->count; i > 0; i--) {
++ mask = mask << 1;
++ if (dpm_table->dpm_level[i - 1].enabled)
++ mask |= 0x1;
++ else
++ mask &= 0xFFFFFFFE;
++ }
++
++ return mask;
++}
++
++uint8_t phm_get_voltage_index(
++ struct phm_ppt_v1_voltage_lookup_table *lookup_table, uint16_t voltage)
++{
++ uint8_t count = (uint8_t) (lookup_table->count);
++ uint8_t i;
++
++ PP_ASSERT_WITH_CODE((NULL != lookup_table),
++ "Lookup Table empty.", return 0);
++ PP_ASSERT_WITH_CODE((0 != count),
++ "Lookup Table empty.", return 0);
++
++ for (i = 0; i < lookup_table->count; i++) {
++ /* find first voltage equal or bigger than requested */
++ if (lookup_table->entries[i].us_vdd >= voltage)
++ return i;
++ }
++ /* voltage is bigger than max voltage in the table */
++ return i - 1;
++}
++
++uint8_t phm_get_voltage_id(pp_atomctrl_voltage_table *voltage_table,
++ uint32_t voltage)
++{
++ uint8_t count = (uint8_t) (voltage_table->count);
++ uint8_t i = 0;
++
++ PP_ASSERT_WITH_CODE((NULL != voltage_table),
++ "Voltage Table empty.", return 0;);
++ PP_ASSERT_WITH_CODE((0 != count),
++ "Voltage Table empty.", return 0;);
++
++ for (i = 0; i < count; i++) {
++ /* find first voltage bigger than requested */
++ if (voltage_table->entries[i].value >= voltage)
++ return i;
++ }
++
++ /* voltage is bigger than max voltage in the table */
++ return i - 1;
++}
++
++uint16_t phm_find_closest_vddci(struct pp_atomctrl_voltage_table *vddci_table, uint16_t vddci)
++{
++ uint32_t i;
++
++ for (i = 0; i < vddci_table->count; i++) {
++ if (vddci_table->entries[i].value >= vddci)
++ return vddci_table->entries[i].value;
++ }
++
++ pr_debug("vddci is larger than max value in vddci_table\n");
++ return vddci_table->entries[i-1].value;
++}
++
++int phm_find_boot_level(void *table,
++ uint32_t value, uint32_t *boot_level)
++{
++ int result = -EINVAL;
++ uint32_t i;
++ struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
++
++ for (i = 0; i < dpm_table->count; i++) {
++ if (value == dpm_table->dpm_level[i].value) {
++ *boot_level = i;
++ result = 0;
++ }
++ }
++
++ return result;
++}
++
++int phm_get_sclk_for_voltage_evv(struct pp_hwmgr *hwmgr,
++ phm_ppt_v1_voltage_lookup_table *lookup_table,
++ uint16_t virtual_voltage_id, int32_t *sclk)
++{
++ uint8_t entry_id;
++ uint8_t voltage_id;
++ struct phm_ppt_v1_information *table_info =
++ (struct phm_ppt_v1_information *)(hwmgr->pptable);
++
++ PP_ASSERT_WITH_CODE(lookup_table->count != 0, "Lookup table is empty", return -EINVAL);
++
++ /* search for leakage voltage ID 0xff01 ~ 0xff08 and sckl */
++ for (entry_id = 0; entry_id < table_info->vdd_dep_on_sclk->count; entry_id++) {
++ voltage_id = table_info->vdd_dep_on_sclk->entries[entry_id].vddInd;
++ if (lookup_table->entries[voltage_id].us_vdd == virtual_voltage_id)
++ break;
++ }
++
++ if (entry_id >= table_info->vdd_dep_on_sclk->count) {
++ pr_debug("Can't find requested voltage id in vdd_dep_on_sclk table\n");
++ return -EINVAL;
++ }
++
++ *sclk = table_info->vdd_dep_on_sclk->entries[entry_id].clk;
++
++ return 0;
++}
++
++/**
++ * Initialize Dynamic State Adjustment Rule Settings
++ *
++ * @param hwmgr the address of the powerplay hardware manager.
++ */
++int phm_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr)
++{
++ uint32_t table_size;
++ struct phm_clock_voltage_dependency_table *table_clk_vlt;
++ struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
++
++ /* initialize vddc_dep_on_dal_pwrl table */
++ table_size = sizeof(uint32_t) + 4 * sizeof(struct phm_clock_voltage_dependency_record);
++ table_clk_vlt = kzalloc(table_size, GFP_KERNEL);
++
++ if (NULL == table_clk_vlt) {
++ pr_err("Can not allocate space for vddc_dep_on_dal_pwrl! \n");
++ return -ENOMEM;
++ } else {
++ table_clk_vlt->count = 4;
++ table_clk_vlt->entries[0].clk = PP_DAL_POWERLEVEL_ULTRALOW;
++ table_clk_vlt->entries[0].v = 0;
++ table_clk_vlt->entries[1].clk = PP_DAL_POWERLEVEL_LOW;
++ table_clk_vlt->entries[1].v = 720;
++ table_clk_vlt->entries[2].clk = PP_DAL_POWERLEVEL_NOMINAL;
++ table_clk_vlt->entries[2].v = 810;
++ table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_PERFORMANCE;
++ table_clk_vlt->entries[3].v = 900;
++ if (pptable_info != NULL)
++ pptable_info->vddc_dep_on_dal_pwrl = table_clk_vlt;
++ hwmgr->dyn_state.vddc_dep_on_dal_pwrl = table_clk_vlt;
++ }
++
++ return 0;
++}
++
++uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t mask)
++{
++ uint32_t level = 0;
++
++ while (0 == (mask & (1 << level)))
++ level++;
++
++ return level;
++}
++
++void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr)
++{
++ struct phm_ppt_v1_information *table_info =
++ (struct phm_ppt_v1_information *)hwmgr->pptable;
++ struct phm_clock_voltage_dependency_table *table =
++ table_info->vddc_dep_on_dal_pwrl;
++ struct phm_ppt_v1_clock_voltage_dependency_table *vddc_table;
++ enum PP_DAL_POWERLEVEL dal_power_level = hwmgr->dal_power_level;
++ uint32_t req_vddc = 0, req_volt, i;
++
++ if (!table || table->count <= 0
++ || dal_power_level < PP_DAL_POWERLEVEL_ULTRALOW
++ || dal_power_level > PP_DAL_POWERLEVEL_PERFORMANCE)
++ return;
++
++ for (i = 0; i < table->count; i++) {
++ if (dal_power_level == table->entries[i].clk) {
++ req_vddc = table->entries[i].v;
++ break;
++ }
++ }
++
++ vddc_table = table_info->vdd_dep_on_sclk;
++ for (i = 0; i < vddc_table->count; i++) {
++ if (req_vddc <= vddc_table->entries[i].vddc) {
++ req_volt = (((uint32_t)vddc_table->entries[i].vddc) * VOLTAGE_SCALE);
++ smum_send_msg_to_smc_with_parameter(hwmgr,
++ PPSMC_MSG_VddC_Request, req_volt);
++ return;
++ }
++ }
++ pr_err("DAL requested level can not"
++ " found a available voltage in VDDC DPM Table \n");
++}
++
++int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
++ uint32_t sclk, uint16_t id, uint16_t *voltage)
++{
++ uint32_t vol;
++ int ret = 0;
++
++ if (hwmgr->chip_id < CHIP_TONGA) {
++ ret = atomctrl_get_voltage_evv(hwmgr, id, voltage);
++ } else if (hwmgr->chip_id < CHIP_POLARIS10) {
++ ret = atomctrl_get_voltage_evv_on_sclk(hwmgr, voltage_type, sclk, id, voltage);
++ if (*voltage >= 2000 || *voltage == 0)
++ *voltage = 1150;
++ } else {
++ ret = atomctrl_get_voltage_evv_on_sclk_ai(hwmgr, voltage_type, sclk, id, &vol);
++ *voltage = (uint16_t)(vol/100);
++ }
++ return ret;
++}
++
++
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h
+new file mode 100644
+index 0000000..a1a4913
+--- /dev/null
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h
+@@ -0,0 +1,180 @@
++/*
++ * Copyright 2018 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++#ifndef _SMU_HELPER_H_
++#define _SMU_HELPER_H_
++
++struct pp_atomctrl_voltage_table;
++struct pp_hwmgr;
++struct phm_ppt_v1_voltage_lookup_table;
++
++extern int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr,
++ uint32_t index,
++ uint32_t value, uint32_t mask);
++extern int phm_wait_for_indirect_register_unequal(
++ struct pp_hwmgr *hwmgr,
++ uint32_t indirect_port, uint32_t index,
++ uint32_t value, uint32_t mask);
++
++
++extern bool phm_cf_want_uvd_power_gating(struct pp_hwmgr *hwmgr);
++extern bool phm_cf_want_vce_power_gating(struct pp_hwmgr *hwmgr);
++extern bool phm_cf_want_microcode_fan_ctrl(struct pp_hwmgr *hwmgr);
++
++extern int phm_trim_voltage_table(struct pp_atomctrl_voltage_table *vol_table);
++extern int phm_get_svi2_mvdd_voltage_table(struct pp_atomctrl_voltage_table *vol_table, phm_ppt_v1_clock_voltage_dependency_table *dep_table);
++extern int phm_get_svi2_vddci_voltage_table(struct pp_atomctrl_voltage_table *vol_table, phm_ppt_v1_clock_voltage_dependency_table *dep_table);
++extern int phm_get_svi2_vdd_voltage_table(struct pp_atomctrl_voltage_table *vol_table, phm_ppt_v1_voltage_lookup_table *lookup_table);
++extern void phm_trim_voltage_table_to_fit_state_table(uint32_t max_vol_steps, struct pp_atomctrl_voltage_table *vol_table);
++extern int phm_reset_single_dpm_table(void *table, uint32_t count, int max);
++extern void phm_setup_pcie_table_entry(void *table, uint32_t index, uint32_t pcie_gen, uint32_t pcie_lanes);
++extern int32_t phm_get_dpm_level_enable_mask_value(void *table);
++extern uint8_t phm_get_voltage_id(struct pp_atomctrl_voltage_table *voltage_table,
++ uint32_t voltage);
++extern uint8_t phm_get_voltage_index(struct phm_ppt_v1_voltage_lookup_table *lookup_table, uint16_t voltage);
++extern uint16_t phm_find_closest_vddci(struct pp_atomctrl_voltage_table *vddci_table, uint16_t vddci);
++extern int phm_find_boot_level(void *table, uint32_t value, uint32_t *boot_level);
++extern int phm_get_sclk_for_voltage_evv(struct pp_hwmgr *hwmgr, phm_ppt_v1_voltage_lookup_table *lookup_table,
++ uint16_t virtual_voltage_id, int32_t *sclk);
++extern int phm_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr);
++extern uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t mask);
++extern void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr);
++
++extern int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
++ uint32_t sclk, uint16_t id, uint16_t *voltage);
++
++extern uint32_t phm_set_field_to_u32(u32 offset, u32 original_data, u32 field, u32 size);
++
++extern int phm_wait_on_register(struct pp_hwmgr *hwmgr, uint32_t index,
++ uint32_t value, uint32_t mask);
++
++extern int phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr,
++ uint32_t indirect_port,
++ uint32_t index,
++ uint32_t value,
++ uint32_t mask);
++
++#define PHM_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
++#define PHM_FIELD_MASK(reg, field) reg##__##field##_MASK
++
++#define PHM_SET_FIELD(origval, reg, field, fieldval) \
++ (((origval) & ~PHM_FIELD_MASK(reg, field)) | \
++ (PHM_FIELD_MASK(reg, field) & ((fieldval) << PHM_FIELD_SHIFT(reg, field))))
++
++#define PHM_GET_FIELD(value, reg, field) \
++ (((value) & PHM_FIELD_MASK(reg, field)) >> \
++ PHM_FIELD_SHIFT(reg, field))
++
++
++/* Operations on named fields. */
++
++#define PHM_READ_FIELD(device, reg, field) \
++ PHM_GET_FIELD(cgs_read_register(device, mm##reg), reg, field)
++
++#define PHM_READ_INDIRECT_FIELD(device, port, reg, field) \
++ PHM_GET_FIELD(cgs_read_ind_register(device, port, ix##reg), \
++ reg, field)
++
++#define PHM_READ_VFPF_INDIRECT_FIELD(device, port, reg, field) \
++ PHM_GET_FIELD(cgs_read_ind_register(device, port, ix##reg), \
++ reg, field)
++
++#define PHM_WRITE_FIELD(device, reg, field, fieldval) \
++ cgs_write_register(device, mm##reg, PHM_SET_FIELD( \
++ cgs_read_register(device, mm##reg), reg, field, fieldval))
++
++#define PHM_WRITE_INDIRECT_FIELD(device, port, reg, field, fieldval) \
++ cgs_write_ind_register(device, port, ix##reg, \
++ PHM_SET_FIELD(cgs_read_ind_register(device, port, ix##reg), \
++ reg, field, fieldval))
++
++#define PHM_WRITE_VFPF_INDIRECT_FIELD(device, port, reg, field, fieldval) \
++ cgs_write_ind_register(device, port, ix##reg, \
++ PHM_SET_FIELD(cgs_read_ind_register(device, port, ix##reg), \
++ reg, field, fieldval))
++
++#define PHM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, index, value, mask) \
++ phm_wait_on_indirect_register(hwmgr, mm##port##_INDEX, index, value, mask)
++
++
++#define PHM_WAIT_INDIRECT_REGISTER(hwmgr, port, reg, value, mask) \
++ PHM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask)
++
++#define PHM_WAIT_INDIRECT_FIELD(hwmgr, port, reg, field, fieldval) \
++ PHM_WAIT_INDIRECT_REGISTER(hwmgr, port, reg, (fieldval) \
++ << PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field))
++
++#define PHM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, index, value, mask) \
++ phm_wait_for_indirect_register_unequal(hwmgr, \
++ mm##port##_INDEX, index, value, mask)
++
++#define PHM_WAIT_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, value, mask) \
++ PHM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask)
++
++#define PHM_WAIT_INDIRECT_FIELD_UNEQUAL(hwmgr, port, reg, field, fieldval) \
++ PHM_WAIT_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, \
++ (fieldval) << PHM_FIELD_SHIFT(reg, field), \
++ PHM_FIELD_MASK(reg, field) )
++
++
++#define PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, \
++ port, index, value, mask) \
++ phm_wait_for_indirect_register_unequal(hwmgr, \
++ mm##port##_INDEX_11, index, value, mask)
++
++#define PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, value, mask) \
++ PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask)
++
++#define PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, port, reg, field, fieldval) \
++ PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, \
++ (fieldval) << PHM_FIELD_SHIFT(reg, field), \
++ PHM_FIELD_MASK(reg, field))
++
++
++#define PHM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, \
++ port, index, value, mask) \
++ phm_wait_on_indirect_register(hwmgr, \
++ mm##port##_INDEX_11, index, value, mask)
++
++#define PHM_WAIT_VFPF_INDIRECT_REGISTER(hwmgr, port, reg, value, mask) \
++ PHM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask)
++
++#define PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, port, reg, field, fieldval) \
++ PHM_WAIT_VFPF_INDIRECT_REGISTER(hwmgr, port, reg, \
++ (fieldval) << PHM_FIELD_SHIFT(reg, field), \
++ PHM_FIELD_MASK(reg, field))
++
++#define PHM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, \
++ index, value, mask) \
++ phm_wait_for_register_unequal(hwmgr, \
++ index, value, mask)
++
++#define PHM_WAIT_REGISTER_UNEQUAL(hwmgr, reg, value, mask) \
++ PHM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, \
++ mm##reg, value, mask)
++
++#define PHM_WAIT_FIELD_UNEQUAL(hwmgr, reg, field, fieldval) \
++ PHM_WAIT_REGISTER_UNEQUAL(hwmgr, reg, \
++ (fieldval) << PHM_FIELD_SHIFT(reg, field), \
++ PHM_FIELD_MASK(reg, field))
++
++#endif /* _SMU_HELPER_H_ */
+diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+index 9bdad48..2e2e4d0 100644
+--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
++++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+@@ -32,7 +32,7 @@
+ #include "ppatomctrl.h"
+ #include "hwmgr_ppt.h"
+ #include "power_state.h"
+-#include "cgs_linux.h"
++#include "smu_helper.h"
+
+ struct pp_instance;
+ struct pp_hwmgr;
+@@ -777,160 +777,9 @@ extern int hwmgr_hw_resume(struct pp_instance *handle);
+ extern int hwmgr_handle_task(struct pp_instance *handle,
+ enum amd_pp_task task_id,
+ enum amd_pm_state_type *user_state);
+-extern int phm_wait_on_register(struct pp_hwmgr *hwmgr, uint32_t index,
+- uint32_t value, uint32_t mask);
+-
+-extern int phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr,
+- uint32_t indirect_port,
+- uint32_t index,
+- uint32_t value,
+- uint32_t mask);
+-
+-extern int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr,
+- uint32_t index,
+- uint32_t value, uint32_t mask);
+-extern int phm_wait_for_indirect_register_unequal(
+- struct pp_hwmgr *hwmgr,
+- uint32_t indirect_port, uint32_t index,
+- uint32_t value, uint32_t mask);
+-
+-
+-extern bool phm_cf_want_uvd_power_gating(struct pp_hwmgr *hwmgr);
+-extern bool phm_cf_want_vce_power_gating(struct pp_hwmgr *hwmgr);
+-extern bool phm_cf_want_microcode_fan_ctrl(struct pp_hwmgr *hwmgr);
+-
+-extern int phm_trim_voltage_table(struct pp_atomctrl_voltage_table *vol_table);
+-extern int phm_get_svi2_mvdd_voltage_table(struct pp_atomctrl_voltage_table *vol_table, phm_ppt_v1_clock_voltage_dependency_table *dep_table);
+-extern int phm_get_svi2_vddci_voltage_table(struct pp_atomctrl_voltage_table *vol_table, phm_ppt_v1_clock_voltage_dependency_table *dep_table);
+-extern int phm_get_svi2_vdd_voltage_table(struct pp_atomctrl_voltage_table *vol_table, phm_ppt_v1_voltage_lookup_table *lookup_table);
+-extern void phm_trim_voltage_table_to_fit_state_table(uint32_t max_vol_steps, struct pp_atomctrl_voltage_table *vol_table);
+-extern int phm_reset_single_dpm_table(void *table, uint32_t count, int max);
+-extern void phm_setup_pcie_table_entry(void *table, uint32_t index, uint32_t pcie_gen, uint32_t pcie_lanes);
+-extern int32_t phm_get_dpm_level_enable_mask_value(void *table);
+-extern uint8_t phm_get_voltage_id(struct pp_atomctrl_voltage_table *voltage_table,
+- uint32_t voltage);
+-extern uint8_t phm_get_voltage_index(struct phm_ppt_v1_voltage_lookup_table *lookup_table, uint16_t voltage);
+-extern uint16_t phm_find_closest_vddci(struct pp_atomctrl_voltage_table *vddci_table, uint16_t vddci);
+-extern int phm_find_boot_level(void *table, uint32_t value, uint32_t *boot_level);
+-extern int phm_get_sclk_for_voltage_evv(struct pp_hwmgr *hwmgr, phm_ppt_v1_voltage_lookup_table *lookup_table,
+- uint16_t virtual_voltage_id, int32_t *sclk);
+-extern int phm_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr);
+-extern uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t mask);
+-extern void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr);
+-
+-extern int smu7_init_function_pointers(struct pp_hwmgr *hwmgr);
+-extern int vega10_hwmgr_init(struct pp_hwmgr *hwmgr);
+-extern int smu10_init_function_pointers(struct pp_hwmgr *hwmgr);
+-
+-extern int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
+- uint32_t sclk, uint16_t id, uint16_t *voltage);
+-
+-extern uint32_t phm_set_field_to_u32(u32 offset, u32 original_data, u32 field, u32 size);
+
+-#define PHM_ENTIRE_REGISTER_MASK 0xFFFFFFFFU
+-
+-#define PHM_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
+-#define PHM_FIELD_MASK(reg, field) reg##__##field##_MASK
+-
+-#define PHM_SET_FIELD(origval, reg, field, fieldval) \
+- (((origval) & ~PHM_FIELD_MASK(reg, field)) | \
+- (PHM_FIELD_MASK(reg, field) & ((fieldval) << PHM_FIELD_SHIFT(reg, field))))
+-
+-#define PHM_GET_FIELD(value, reg, field) \
+- (((value) & PHM_FIELD_MASK(reg, field)) >> \
+- PHM_FIELD_SHIFT(reg, field))
+-
+-
+-/* Operations on named fields. */
+-
+-#define PHM_READ_FIELD(device, reg, field) \
+- PHM_GET_FIELD(cgs_read_register(device, mm##reg), reg, field)
+-
+-#define PHM_READ_INDIRECT_FIELD(device, port, reg, field) \
+- PHM_GET_FIELD(cgs_read_ind_register(device, port, ix##reg), \
+- reg, field)
+-
+-#define PHM_READ_VFPF_INDIRECT_FIELD(device, port, reg, field) \
+- PHM_GET_FIELD(cgs_read_ind_register(device, port, ix##reg), \
+- reg, field)
+-
+-#define PHM_WRITE_FIELD(device, reg, field, fieldval) \
+- cgs_write_register(device, mm##reg, PHM_SET_FIELD( \
+- cgs_read_register(device, mm##reg), reg, field, fieldval))
+-
+-#define PHM_WRITE_INDIRECT_FIELD(device, port, reg, field, fieldval) \
+- cgs_write_ind_register(device, port, ix##reg, \
+- PHM_SET_FIELD(cgs_read_ind_register(device, port, ix##reg), \
+- reg, field, fieldval))
+-
+-#define PHM_WRITE_VFPF_INDIRECT_FIELD(device, port, reg, field, fieldval) \
+- cgs_write_ind_register(device, port, ix##reg, \
+- PHM_SET_FIELD(cgs_read_ind_register(device, port, ix##reg), \
+- reg, field, fieldval))
+-
+-#define PHM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, index, value, mask) \
+- phm_wait_on_indirect_register(hwmgr, mm##port##_INDEX, index, value, mask)
+-
+-
+-#define PHM_WAIT_INDIRECT_REGISTER(hwmgr, port, reg, value, mask) \
+- PHM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask)
+
+-#define PHM_WAIT_INDIRECT_FIELD(hwmgr, port, reg, field, fieldval) \
+- PHM_WAIT_INDIRECT_REGISTER(hwmgr, port, reg, (fieldval) \
+- << PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field))
+-
+-#define PHM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, index, value, mask) \
+- phm_wait_for_indirect_register_unequal(hwmgr, \
+- mm##port##_INDEX, index, value, mask)
+-
+-#define PHM_WAIT_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, value, mask) \
+- PHM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask)
+-
+-#define PHM_WAIT_INDIRECT_FIELD_UNEQUAL(hwmgr, port, reg, field, fieldval) \
+- PHM_WAIT_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, \
+- (fieldval) << PHM_FIELD_SHIFT(reg, field), \
+- PHM_FIELD_MASK(reg, field) )
+-
+-
+-#define PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, \
+- port, index, value, mask) \
+- phm_wait_for_indirect_register_unequal(hwmgr, \
+- mm##port##_INDEX_11, index, value, mask)
+-
+-#define PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, value, mask) \
+- PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask)
+-
+-#define PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, port, reg, field, fieldval) \
+- PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, \
+- (fieldval) << PHM_FIELD_SHIFT(reg, field), \
+- PHM_FIELD_MASK(reg, field))
+-
+-
+-#define PHM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, \
+- port, index, value, mask) \
+- phm_wait_on_indirect_register(hwmgr, \
+- mm##port##_INDEX_11, index, value, mask)
+-
+-#define PHM_WAIT_VFPF_INDIRECT_REGISTER(hwmgr, port, reg, value, mask) \
+- PHM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask)
+-
+-#define PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, port, reg, field, fieldval) \
+- PHM_WAIT_VFPF_INDIRECT_REGISTER(hwmgr, port, reg, \
+- (fieldval) << PHM_FIELD_SHIFT(reg, field), \
+- PHM_FIELD_MASK(reg, field))
+-
+-#define PHM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, \
+- index, value, mask) \
+- phm_wait_for_register_unequal(hwmgr, \
+- index, value, mask)
+-
+-#define PHM_WAIT_REGISTER_UNEQUAL(hwmgr, reg, value, mask) \
+- PHM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, \
+- mm##reg, value, mask)
++#define PHM_ENTIRE_REGISTER_MASK 0xFFFFFFFFU
+
+-#define PHM_WAIT_FIELD_UNEQUAL(hwmgr, reg, field, fieldval) \
+- PHM_WAIT_REGISTER_UNEQUAL(hwmgr, reg, \
+- (fieldval) << PHM_FIELD_SHIFT(reg, field), \
+- PHM_FIELD_MASK(reg, field))
+
+ #endif /* _HWMGR_H_ */
+--
+2.7.4
+