aboutsummaryrefslogtreecommitdiffstats
path: root/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/3801-drm-amdgpu-Remove-wrapper-layer-of-smu-ip-functions.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/3801-drm-amdgpu-Remove-wrapper-layer-of-smu-ip-functions.patch')
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/3801-drm-amdgpu-Remove-wrapper-layer-of-smu-ip-functions.patch2537
1 files changed, 2537 insertions, 0 deletions
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/3801-drm-amdgpu-Remove-wrapper-layer-of-smu-ip-functions.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/3801-drm-amdgpu-Remove-wrapper-layer-of-smu-ip-functions.patch
new file mode 100644
index 00000000..18694900
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/3801-drm-amdgpu-Remove-wrapper-layer-of-smu-ip-functions.patch
@@ -0,0 +1,2537 @@
+From 16f5d991070b75722df3935e945039f213cb896d Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Mon, 12 Mar 2018 19:52:23 +0800
+Subject: [PATCH 3801/4131] drm/amdgpu: Remove wrapper layer of smu ip
+ functions
+
+1. delete amdgpu_powerplay.c used for wrapping smu ip functions
+2. delete struct pp_instance,
+3. make struct hwmgr as the smu hw handle.
+
+Change-Id: Ib475acf85798de98080b902baed28fcf55649b82
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Evan Quan <evan.quan@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/Makefile | 3 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 -
+ drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 2 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c | 285 -----------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.h | 33 --
+ drivers/gpu/drm/amd/amdgpu/ci_dpm.c | 16 +-
+ drivers/gpu/drm/amd/amdgpu/cik.c | 15 +-
+ drivers/gpu/drm/amd/amdgpu/cik_dpm.h | 7 +-
+ drivers/gpu/drm/amd/amdgpu/kv_dpm.c | 16 +-
+ drivers/gpu/drm/amd/amdgpu/si.c | 8 +-
+ drivers/gpu/drm/amd/amdgpu/si_dpm.c | 16 +-
+ drivers/gpu/drm/amd/amdgpu/si_dpm.h | 3 +-
+ drivers/gpu/drm/amd/amdgpu/soc15.c | 5 +-
+ drivers/gpu/drm/amd/amdgpu/vi.c | 13 +-
+ drivers/gpu/drm/amd/include/kgd_pp_interface.h | 3 +-
+ drivers/gpu/drm/amd/powerplay/amd_powerplay.c | 611 ++++++++++--------------
+ drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c | 50 +-
+ drivers/gpu/drm/amd/powerplay/inc/hwmgr.h | 16 +-
+ drivers/gpu/drm/amd/powerplay/inc/pp_instance.h | 36 --
+ 19 files changed, 336 insertions(+), 804 deletions(-)
+ delete mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
+ delete mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.h
+ delete mode 100644 drivers/gpu/drm/amd/powerplay/inc/pp_instance.h
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
+index 296f967..e2e9f7a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/Makefile
++++ b/drivers/gpu/drm/amd/amdgpu/Makefile
+@@ -66,8 +66,7 @@ amdgpu-y += \
+
+ # add SMC block
+ amdgpu-y += \
+- amdgpu_dpm.o \
+- amdgpu_powerplay.o
++ amdgpu_dpm.o
+
+ # add DCE block
+ amdgpu-y += \
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index 05a2c77..5f547a8 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -1452,9 +1452,7 @@ enum amd_hw_ip_block_type {
+ #define HWIP_MAX_INSTANCE 6
+
+ struct amd_powerplay {
+- struct cgs_device *cgs_device;
+ void *pp_handle;
+- const struct amd_ip_funcs *ip_funcs;
+ const struct amd_pm_funcs *pp_funcs;
+ };
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+index 5db811d..2e40a5f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+@@ -1154,7 +1154,7 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
+ umode_t effective_mode = attr->mode;
+
+ /* handle non-powerplay limitations */
+- if (!adev->powerplay.cgs_device) {
++ if (!adev->powerplay.pp_handle) {
+ /* Skip fan attributes if fan is not present */
+ if (adev->pm.no_fan &&
+ (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
+deleted file mode 100644
+index 825c9b9..0000000
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
++++ /dev/null
+@@ -1,285 +0,0 @@
+-/*
+- * Copyright 2015 Advanced Micro Devices, Inc.
+- *
+- * Permission is hereby granted, free of charge, to any person obtaining a
+- * copy of this software and associated documentation files (the "Software"),
+- * to deal in the Software without restriction, including without limitation
+- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+- * and/or sell copies of the Software, and to permit persons to whom the
+- * Software is furnished to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice shall be included in
+- * all copies or substantial portions of the Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+- * OTHER DEALINGS IN THE SOFTWARE.
+- *
+- * Authors: AMD
+- *
+- */
+-#include "atom.h"
+-#include "amdgpu.h"
+-#include "amd_shared.h"
+-#include <linux/module.h>
+-#include <linux/moduleparam.h>
+-#include "amdgpu_pm.h"
+-#include <drm/amdgpu_drm.h>
+-#include "amdgpu_powerplay.h"
+-#include "si_dpm.h"
+-#include "cik_dpm.h"
+-#include "vi_dpm.h"
+-
+-static int amdgpu_pp_early_init(void *handle)
+-{
+- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+- struct amd_powerplay *amd_pp;
+- int ret = 0;
+-
+- amd_pp = &(adev->powerplay);
+- amd_pp->pp_handle = (void *)adev;
+-
+- switch (adev->asic_type) {
+- case CHIP_POLARIS11:
+- case CHIP_POLARIS10:
+- case CHIP_POLARIS12:
+- case CHIP_TONGA:
+- case CHIP_FIJI:
+- case CHIP_TOPAZ:
+- case CHIP_CARRIZO:
+- case CHIP_STONEY:
+- case CHIP_VEGA10:
+- case CHIP_RAVEN:
+- amd_pp->cgs_device = amdgpu_cgs_create_device(adev);
+- amd_pp->ip_funcs = &pp_ip_funcs;
+- amd_pp->pp_funcs = &pp_dpm_funcs;
+- break;
+- /* These chips don't have powerplay implemenations */
+-#ifdef CONFIG_DRM_AMDGPU_SI
+- case CHIP_TAHITI:
+- case CHIP_PITCAIRN:
+- case CHIP_VERDE:
+- case CHIP_OLAND:
+- case CHIP_HAINAN:
+- amd_pp->ip_funcs = &si_dpm_ip_funcs;
+- amd_pp->pp_funcs = &si_dpm_funcs;
+- break;
+-#endif
+-#ifdef CONFIG_DRM_AMDGPU_CIK
+- case CHIP_BONAIRE:
+- case CHIP_HAWAII:
+- if (amdgpu_dpm == -1) {
+- amd_pp->ip_funcs = &ci_dpm_ip_funcs;
+- amd_pp->pp_funcs = &ci_dpm_funcs;
+- } else {
+- amd_pp->cgs_device = amdgpu_cgs_create_device(adev);
+- amd_pp->ip_funcs = &pp_ip_funcs;
+- amd_pp->pp_funcs = &pp_dpm_funcs;
+- }
+- break;
+- case CHIP_KABINI:
+- case CHIP_MULLINS:
+- case CHIP_KAVERI:
+- amd_pp->ip_funcs = &kv_dpm_ip_funcs;
+- amd_pp->pp_funcs = &kv_dpm_funcs;
+- break;
+-#endif
+- default:
+- ret = -EINVAL;
+- break;
+- }
+-
+- if (adev->powerplay.ip_funcs->early_init)
+- ret = adev->powerplay.ip_funcs->early_init(adev);
+-
+- return ret;
+-}
+-
+-
+-static int amdgpu_pp_late_init(void *handle)
+-{
+- int ret = 0;
+- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+-
+- if (adev->powerplay.ip_funcs->late_init)
+- ret = adev->powerplay.ip_funcs->late_init(
+- adev->powerplay.pp_handle);
+-
+- return ret;
+-}
+-
+-static int amdgpu_pp_sw_init(void *handle)
+-{
+- int ret = 0;
+- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+-
+- if (adev->powerplay.ip_funcs->sw_init)
+- ret = adev->powerplay.ip_funcs->sw_init(
+- adev->powerplay.pp_handle);
+-
+- return ret;
+-}
+-
+-static int amdgpu_pp_sw_fini(void *handle)
+-{
+- int ret = 0;
+- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+-
+- if (adev->powerplay.ip_funcs->sw_fini)
+- ret = adev->powerplay.ip_funcs->sw_fini(
+- adev->powerplay.pp_handle);
+- if (ret)
+- return ret;
+-
+- return ret;
+-}
+-
+-static int amdgpu_pp_hw_init(void *handle)
+-{
+- int ret = 0;
+- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+-
+- if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
+- amdgpu_ucode_init_bo(adev);
+-
+- if (adev->powerplay.ip_funcs->hw_init)
+- ret = adev->powerplay.ip_funcs->hw_init(
+- adev->powerplay.pp_handle);
+-
+- return ret;
+-}
+-
+-static int amdgpu_pp_hw_fini(void *handle)
+-{
+- int ret = 0;
+- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+-
+- if (adev->powerplay.ip_funcs->hw_fini)
+- ret = adev->powerplay.ip_funcs->hw_fini(
+- adev->powerplay.pp_handle);
+-
+- return ret;
+-}
+-
+-static void amdgpu_pp_late_fini(void *handle)
+-{
+- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+-
+- if (adev->powerplay.ip_funcs->late_fini)
+- adev->powerplay.ip_funcs->late_fini(
+- adev->powerplay.pp_handle);
+-
+- if (adev->powerplay.cgs_device)
+- amdgpu_cgs_destroy_device(adev->powerplay.cgs_device);
+-}
+-
+-static int amdgpu_pp_suspend(void *handle)
+-{
+- int ret = 0;
+- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+-
+- if (adev->powerplay.ip_funcs->suspend)
+- ret = adev->powerplay.ip_funcs->suspend(
+- adev->powerplay.pp_handle);
+- return ret;
+-}
+-
+-static int amdgpu_pp_resume(void *handle)
+-{
+- int ret = 0;
+- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+-
+- if (adev->powerplay.ip_funcs->resume)
+- ret = adev->powerplay.ip_funcs->resume(
+- adev->powerplay.pp_handle);
+- return ret;
+-}
+-
+-static int amdgpu_pp_set_clockgating_state(void *handle,
+- enum amd_clockgating_state state)
+-{
+- int ret = 0;
+- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+-
+- if (adev->powerplay.ip_funcs->set_clockgating_state)
+- ret = adev->powerplay.ip_funcs->set_clockgating_state(
+- adev->powerplay.pp_handle, state);
+- return ret;
+-}
+-
+-static int amdgpu_pp_set_powergating_state(void *handle,
+- enum amd_powergating_state state)
+-{
+- int ret = 0;
+- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+-
+- if (adev->powerplay.ip_funcs->set_powergating_state)
+- ret = adev->powerplay.ip_funcs->set_powergating_state(
+- adev->powerplay.pp_handle, state);
+- return ret;
+-}
+-
+-
+-static bool amdgpu_pp_is_idle(void *handle)
+-{
+- bool ret = true;
+- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+-
+- if (adev->powerplay.ip_funcs->is_idle)
+- ret = adev->powerplay.ip_funcs->is_idle(
+- adev->powerplay.pp_handle);
+- return ret;
+-}
+-
+-static int amdgpu_pp_wait_for_idle(void *handle)
+-{
+- int ret = 0;
+- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+-
+- if (adev->powerplay.ip_funcs->wait_for_idle)
+- ret = adev->powerplay.ip_funcs->wait_for_idle(
+- adev->powerplay.pp_handle);
+- return ret;
+-}
+-
+-static int amdgpu_pp_soft_reset(void *handle)
+-{
+- int ret = 0;
+- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+-
+- if (adev->powerplay.ip_funcs->soft_reset)
+- ret = adev->powerplay.ip_funcs->soft_reset(
+- adev->powerplay.pp_handle);
+- return ret;
+-}
+-
+-static const struct amd_ip_funcs amdgpu_pp_ip_funcs = {
+- .name = "amdgpu_powerplay",
+- .early_init = amdgpu_pp_early_init,
+- .late_init = amdgpu_pp_late_init,
+- .sw_init = amdgpu_pp_sw_init,
+- .sw_fini = amdgpu_pp_sw_fini,
+- .hw_init = amdgpu_pp_hw_init,
+- .hw_fini = amdgpu_pp_hw_fini,
+- .late_fini = amdgpu_pp_late_fini,
+- .suspend = amdgpu_pp_suspend,
+- .resume = amdgpu_pp_resume,
+- .is_idle = amdgpu_pp_is_idle,
+- .wait_for_idle = amdgpu_pp_wait_for_idle,
+- .soft_reset = amdgpu_pp_soft_reset,
+- .set_clockgating_state = amdgpu_pp_set_clockgating_state,
+- .set_powergating_state = amdgpu_pp_set_powergating_state,
+-};
+-
+-const struct amdgpu_ip_block_version amdgpu_pp_ip_block =
+-{
+- .type = AMD_IP_BLOCK_TYPE_SMC,
+- .major = 1,
+- .minor = 0,
+- .rev = 0,
+- .funcs = &amdgpu_pp_ip_funcs,
+-};
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.h
+deleted file mode 100644
+index c0c4bfd..0000000
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.h
++++ /dev/null
+@@ -1,33 +0,0 @@
+-/*
+- * Copyright 2015 Advanced Micro Devices, Inc.
+- *
+- * Permission is hereby granted, free of charge, to any person obtaining a
+- * copy of this software and associated documentation files (the "Software"),
+- * to deal in the Software without restriction, including without limitation
+- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+- * and/or sell copies of the Software, and to permit persons to whom the
+- * Software is furnished to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice shall be included in
+- * all copies or substantial portions of the Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+- * OTHER DEALINGS IN THE SOFTWARE.
+- *
+- * Authors: AMD
+- *
+- */
+-
+-#ifndef __AMDGPU_POWERPLAY_H__
+-#define __AMDGPU_POWERPLAY_H__
+-
+-#include "amd_shared.h"
+-
+-extern const struct amdgpu_ip_block_version amdgpu_pp_ip_block;
+-
+-#endif /* __AMDGPU_POWERPLAY_H__ */
+diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+index ddb814f..98d1dd2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
++++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+@@ -65,6 +65,8 @@ MODULE_FIRMWARE("radeon/hawaii_k_smc.bin");
+ #define VOLTAGE_VID_OFFSET_SCALE1 625
+ #define VOLTAGE_VID_OFFSET_SCALE2 100
+
++static const struct amd_pm_funcs ci_dpm_funcs;
++
+ static const struct ci_pt_defaults defaults_hawaii_xt =
+ {
+ 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000,
+@@ -6241,6 +6243,7 @@ static int ci_dpm_early_init(void *handle)
+ {
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
++ adev->powerplay.pp_funcs = &ci_dpm_funcs;
+ ci_dpm_set_irq_funcs(adev);
+
+ return 0;
+@@ -6760,7 +6763,7 @@ static int ci_dpm_read_sensor(void *handle, int idx,
+ }
+ }
+
+-const struct amd_ip_funcs ci_dpm_ip_funcs = {
++static const struct amd_ip_funcs ci_dpm_ip_funcs = {
+ .name = "ci_dpm",
+ .early_init = ci_dpm_early_init,
+ .late_init = ci_dpm_late_init,
+@@ -6777,7 +6780,16 @@ const struct amd_ip_funcs ci_dpm_ip_funcs = {
+ .set_powergating_state = ci_dpm_set_powergating_state,
+ };
+
+-const struct amd_pm_funcs ci_dpm_funcs = {
++const struct amdgpu_ip_block_version ci_smu_ip_block =
++{
++ .type = AMD_IP_BLOCK_TYPE_SMC,
++ .major = 7,
++ .minor = 0,
++ .rev = 0,
++ .funcs = &ci_dpm_ip_funcs,
++};
++
++static const struct amd_pm_funcs ci_dpm_funcs = {
+ .pre_set_power_state = &ci_dpm_pre_set_power_state,
+ .set_power_state = &ci_dpm_set_power_state,
+ .post_set_power_state = &ci_dpm_post_set_power_state,
+diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
+index 71b3562..0df2203 100644
+--- a/drivers/gpu/drm/amd/amdgpu/cik.c
++++ b/drivers/gpu/drm/amd/amdgpu/cik.c
+@@ -67,7 +67,6 @@
+
+ #include "amdgpu_dm.h"
+ #include "amdgpu_amdkfd.h"
+-#include "amdgpu_powerplay.h"
+ #include "dce_virtual.h"
+
+ /*
+@@ -1996,7 +1995,10 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
+ amdgpu_device_ip_block_add(adev, &cik_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &cik_ih_ip_block);
+- amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block);
++ if (amdgpu_dpm == -1)
++ amdgpu_device_ip_block_add(adev, &ci_smu_ip_block);
++ else
++ amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
+ #if defined(CONFIG_DRM_AMD_DC)
+@@ -2014,7 +2016,10 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
+ amdgpu_device_ip_block_add(adev, &cik_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &cik_ih_ip_block);
+- amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block);
++ if (amdgpu_dpm == -1)
++ amdgpu_device_ip_block_add(adev, &ci_smu_ip_block);
++ else
++ amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
+ #if defined(CONFIG_DRM_AMD_DC)
+@@ -2032,7 +2037,7 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
+ amdgpu_device_ip_block_add(adev, &cik_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &cik_ih_ip_block);
+- amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block);
++ amdgpu_device_ip_block_add(adev, &kv_smu_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
+ #if defined(CONFIG_DRM_AMD_DC)
+@@ -2051,7 +2056,7 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
+ amdgpu_device_ip_block_add(adev, &cik_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &cik_ih_ip_block);
+- amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block);
++ amdgpu_device_ip_block_add(adev, &kv_smu_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
+ #if defined(CONFIG_DRM_AMD_DC)
+diff --git a/drivers/gpu/drm/amd/amdgpu/cik_dpm.h b/drivers/gpu/drm/amd/amdgpu/cik_dpm.h
+index c7b4349..2a08661 100644
+--- a/drivers/gpu/drm/amd/amdgpu/cik_dpm.h
++++ b/drivers/gpu/drm/amd/amdgpu/cik_dpm.h
+@@ -24,8 +24,7 @@
+ #ifndef __CIK_DPM_H__
+ #define __CIK_DPM_H__
+
+-extern const struct amd_ip_funcs ci_dpm_ip_funcs;
+-extern const struct amd_ip_funcs kv_dpm_ip_funcs;
+-extern const struct amd_pm_funcs ci_dpm_funcs;
+-extern const struct amd_pm_funcs kv_dpm_funcs;
++extern const struct amdgpu_ip_block_version ci_smu_ip_block;
++extern const struct amdgpu_ip_block_version kv_smu_ip_block;
++
+ #endif
+diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+index 8766681..81babe0 100644
+--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
++++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+@@ -42,6 +42,8 @@
+ #define KV_MINIMUM_ENGINE_CLOCK 800
+ #define SMC_RAM_END 0x40000
+
++static const struct amd_pm_funcs kv_dpm_funcs;
++
+ static void kv_dpm_set_irq_funcs(struct amdgpu_device *adev);
+ static int kv_enable_nb_dpm(struct amdgpu_device *adev,
+ bool enable);
+@@ -2960,6 +2962,7 @@ static int kv_dpm_early_init(void *handle)
+ {
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
++ adev->powerplay.pp_funcs = &kv_dpm_funcs;
+ kv_dpm_set_irq_funcs(adev);
+
+ return 0;
+@@ -3301,7 +3304,7 @@ static int kv_dpm_read_sensor(void *handle, int idx,
+ }
+ }
+
+-const struct amd_ip_funcs kv_dpm_ip_funcs = {
++static const struct amd_ip_funcs kv_dpm_ip_funcs = {
+ .name = "kv_dpm",
+ .early_init = kv_dpm_early_init,
+ .late_init = kv_dpm_late_init,
+@@ -3318,7 +3321,16 @@ const struct amd_ip_funcs kv_dpm_ip_funcs = {
+ .set_powergating_state = kv_dpm_set_powergating_state,
+ };
+
+-const struct amd_pm_funcs kv_dpm_funcs = {
++const struct amdgpu_ip_block_version kv_smu_ip_block =
++{
++ .type = AMD_IP_BLOCK_TYPE_SMC,
++ .major = 1,
++ .minor = 0,
++ .rev = 0,
++ .funcs = &kv_dpm_ip_funcs,
++};
++
++static const struct amd_pm_funcs kv_dpm_funcs = {
+ .pre_set_power_state = &kv_dpm_pre_set_power_state,
+ .set_power_state = &kv_dpm_set_power_state,
+ .post_set_power_state = &kv_dpm_post_set_power_state,
+diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
+index f606b71..a763ac0 100644
+--- a/drivers/gpu/drm/amd/amdgpu/si.c
++++ b/drivers/gpu/drm/amd/amdgpu/si.c
+@@ -32,7 +32,7 @@
+ #include "amdgpu_vce.h"
+ #include "atom.h"
+ #include "amd_pcie.h"
+-#include "amdgpu_powerplay.h"
++#include "si_dpm.h"
+ #include "sid.h"
+ #include "si_ih.h"
+ #include "gfx_v6_0.h"
+@@ -2050,7 +2050,7 @@ int si_set_ip_blocks(struct amdgpu_device *adev)
+ amdgpu_device_ip_block_add(adev, &si_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v6_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &si_ih_ip_block);
+- amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block);
++ amdgpu_device_ip_block_add(adev, &si_smu_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
+ else
+@@ -2064,7 +2064,7 @@ int si_set_ip_blocks(struct amdgpu_device *adev)
+ amdgpu_device_ip_block_add(adev, &si_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v6_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &si_ih_ip_block);
+- amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block);
++ amdgpu_device_ip_block_add(adev, &si_smu_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
+ else
+@@ -2078,7 +2078,7 @@ int si_set_ip_blocks(struct amdgpu_device *adev)
+ amdgpu_device_ip_block_add(adev, &si_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v6_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &si_ih_ip_block);
+- amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block);
++ amdgpu_device_ip_block_add(adev, &si_smu_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
+ amdgpu_device_ip_block_add(adev, &gfx_v6_0_ip_block);
+diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+index 424f17a0..7c2deae 100644
+--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
++++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+@@ -67,6 +67,8 @@ MODULE_FIRMWARE("radeon/hainan_smc.bin");
+ MODULE_FIRMWARE("radeon/hainan_k_smc.bin");
+ MODULE_FIRMWARE("radeon/banks_k_2_smc.bin");
+
++static const struct amd_pm_funcs si_dpm_funcs;
++
+ union power_info {
+ struct _ATOM_POWERPLAY_INFO info;
+ struct _ATOM_POWERPLAY_INFO_V2 info_2;
+@@ -7914,6 +7916,7 @@ static int si_dpm_early_init(void *handle)
+
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
++ adev->powerplay.pp_funcs = &si_dpm_funcs;
+ si_dpm_set_irq_funcs(adev);
+ return 0;
+ }
+@@ -8014,7 +8017,7 @@ static int si_dpm_read_sensor(void *handle, int idx,
+ }
+ }
+
+-const struct amd_ip_funcs si_dpm_ip_funcs = {
++static const struct amd_ip_funcs si_dpm_ip_funcs = {
+ .name = "si_dpm",
+ .early_init = si_dpm_early_init,
+ .late_init = si_dpm_late_init,
+@@ -8031,7 +8034,16 @@ const struct amd_ip_funcs si_dpm_ip_funcs = {
+ .set_powergating_state = si_dpm_set_powergating_state,
+ };
+
+-const struct amd_pm_funcs si_dpm_funcs = {
++const struct amdgpu_ip_block_version si_smu_ip_block =
++{
++ .type = AMD_IP_BLOCK_TYPE_SMC,
++ .major = 6,
++ .minor = 0,
++ .rev = 0,
++ .funcs = &si_dpm_ip_funcs,
++};
++
++static const struct amd_pm_funcs si_dpm_funcs = {
+ .pre_set_power_state = &si_dpm_pre_set_power_state,
+ .set_power_state = &si_dpm_set_power_state,
+ .post_set_power_state = &si_dpm_post_set_power_state,
+diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.h b/drivers/gpu/drm/amd/amdgpu/si_dpm.h
+index 9fe343d..6b7d292 100644
+--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.h
++++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.h
+@@ -245,8 +245,7 @@ enum si_display_gap
+ SI_PM_DISPLAY_GAP_IGNORE = 3,
+ };
+
+-extern const struct amd_ip_funcs si_dpm_ip_funcs;
+-extern const struct amd_pm_funcs si_dpm_funcs;
++extern const struct amdgpu_ip_block_version si_smu_ip_block;
+
+ struct ni_leakage_coeffients
+ {
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
+index 28b0dbf..c6e8573 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
++++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
+@@ -57,7 +57,6 @@
+ #include "uvd_v7_0.h"
+ #include "vce_v4_0.h"
+ #include "vcn_v1_0.h"
+-#include "amdgpu_powerplay.h"
+ #include "dce_virtual.h"
+ #include "mxgpu_ai.h"
+
+@@ -533,7 +532,7 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
+ amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
+ amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
+ if (!amdgpu_sriov_vf(adev))
+- amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block);
++ amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
+ if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
+ #if defined(CONFIG_DRM_AMD_DC)
+@@ -552,7 +551,7 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
+ amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
+ amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block);
+- amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block);
++ amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
+ if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
+ #if defined(CONFIG_DRM_AMD_DC)
+diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
+index 3a73d01..d710bd5 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vi.c
++++ b/drivers/gpu/drm/amd/amdgpu/vi.c
+@@ -71,7 +71,6 @@
+ #include "uvd_v5_0.h"
+ #include "uvd_v6_0.h"
+ #include "vce_v3_0.h"
+-#include "amdgpu_powerplay.h"
+ #if defined(CONFIG_DRM_AMD_ACP)
+ #include "amdgpu_acp.h"
+ #endif
+@@ -1554,7 +1553,7 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
+ amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v7_4_ip_block);
+ amdgpu_device_ip_block_add(adev, &iceland_ih_ip_block);
+- amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block);
++ amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
+ amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
+@@ -1564,7 +1563,7 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
+ amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v8_5_ip_block);
+ amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
+- amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block);
++ amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
+ if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
+ #if defined(CONFIG_DRM_AMD_DC)
+@@ -1584,7 +1583,7 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
+ amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
+- amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block);
++ amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
+ if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
+ #if defined(CONFIG_DRM_AMD_DC)
+@@ -1606,7 +1605,7 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
+ amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v8_1_ip_block);
+ amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
+- amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block);
++ amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
+ #if defined(CONFIG_DRM_AMD_DC)
+@@ -1624,7 +1623,7 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
+ amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &cz_ih_ip_block);
+- amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block);
++ amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
+ #if defined(CONFIG_DRM_AMD_DC)
+@@ -1645,7 +1644,7 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
+ amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &cz_ih_ip_block);
+- amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block);
++ amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
+ #if defined(CONFIG_DRM_AMD_DC)
+diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+index 15bd0f9..5c840c0 100644
+--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
++++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+@@ -24,8 +24,7 @@
+ #ifndef __KGD_PP_INTERFACE_H__
+ #define __KGD_PP_INTERFACE_H__
+
+-extern const struct amd_ip_funcs pp_ip_funcs;
+-extern const struct amd_pm_funcs pp_dpm_funcs;
++extern const struct amdgpu_ip_block_version pp_smu_ip_block;
+
+ struct amd_vce_state {
+ /* vce clocks */
+diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+index 4550f09..ed8eb0d 100644
+--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
++++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+@@ -27,7 +27,6 @@
+ #include <linux/slab.h>
+ #include "amd_shared.h"
+ #include "amd_powerplay.h"
+-#include "pp_instance.h"
+ #include "power_state.h"
+ #include "amdgpu.h"
+ #include "hwmgr.h"
+@@ -37,18 +36,14 @@
+ static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
+ enum amd_pm_state_type *user_state);
+
+-static inline int pp_check(struct pp_instance *handle)
+-{
+- if (handle == NULL)
+- return -EINVAL;
++static const struct amd_pm_funcs pp_dpm_funcs;
+
+- if (handle->hwmgr == NULL || handle->hwmgr->smumgr_funcs == NULL)
++static inline int pp_check(struct pp_hwmgr *hwmgr)
++{
++ if (hwmgr == NULL || hwmgr->smumgr_funcs == NULL)
+ return -EINVAL;
+
+- if (handle->pm_en == 0)
+- return PP_DPM_DISABLED;
+-
+- if (handle->hwmgr->hwmgr_func == NULL)
++ if (hwmgr->pm_en == 0 || hwmgr->hwmgr_func == NULL)
+ return PP_DPM_DISABLED;
+
+ return 0;
+@@ -56,54 +51,52 @@ static inline int pp_check(struct pp_instance *handle)
+
+ static int amd_powerplay_create(struct amdgpu_device *adev)
+ {
+- struct pp_instance *instance;
++ struct pp_hwmgr *hwmgr;
+
+ if (adev == NULL)
+ return -EINVAL;
+
+- instance = kzalloc(sizeof(struct pp_instance), GFP_KERNEL);
+- if (instance == NULL)
++ hwmgr = kzalloc(sizeof(struct pp_hwmgr), GFP_KERNEL);
++ if (hwmgr == NULL)
+ return -ENOMEM;
+
+- instance->parent = adev;
+- instance->pm_en = (amdgpu_dpm != 0 && !amdgpu_sriov_vf(adev)) ? true : false;
+- instance->device = adev->powerplay.cgs_device;
+- mutex_init(&instance->pp_lock);
+- adev->powerplay.pp_handle = instance;
+-
++ hwmgr->adev = adev;
++ hwmgr->pm_en = (amdgpu_dpm != 0 && !amdgpu_sriov_vf(adev)) ? true : false;
++ hwmgr->device = amdgpu_cgs_create_device(adev);
++ mutex_init(&hwmgr->smu_lock);
++ hwmgr->chip_family = adev->family;
++ hwmgr->chip_id = adev->asic_type;
++ hwmgr->feature_mask = amdgpu_pp_feature_mask;
++ adev->powerplay.pp_handle = hwmgr;
++ adev->powerplay.pp_funcs = &pp_dpm_funcs;
+ return 0;
+ }
+
+
+-static int amd_powerplay_destroy(void *handle)
++static int amd_powerplay_destroy(struct amdgpu_device *adev)
+ {
+- struct pp_instance *instance = (struct pp_instance *)handle;
++ struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
+
+- kfree(instance->hwmgr->hardcode_pp_table);
+- instance->hwmgr->hardcode_pp_table = NULL;
++ kfree(hwmgr->hardcode_pp_table);
++ hwmgr->hardcode_pp_table = NULL;
+
+- kfree(instance->hwmgr);
+- instance->hwmgr = NULL;
++ kfree(hwmgr);
++ hwmgr = NULL;
+
+- kfree(instance);
+- instance = NULL;
+ return 0;
+ }
+
+ static int pp_early_init(void *handle)
+ {
+ int ret;
+- struct pp_instance *pp_handle = NULL;
+- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
++ struct amdgpu_device *adev = handle;
+
+ ret = amd_powerplay_create(adev);
+
+ if (ret != 0)
+ return ret;
+
+- pp_handle = adev->powerplay.pp_handle;
+-
+- ret = hwmgr_early_init(pp_handle);
++ ret = hwmgr_early_init(adev->powerplay.pp_handle);
+ if (ret)
+ return -EINVAL;
+
+@@ -112,15 +105,13 @@ static int pp_early_init(void *handle)
+
+ static int pp_sw_init(void *handle)
+ {
+- struct pp_hwmgr *hwmgr;
++ struct amdgpu_device *adev = handle;
++ struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
+ int ret = 0;
+- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+
+- ret = pp_check(pp_handle);
++ ret = pp_check(hwmgr);
+
+ if (ret >= 0) {
+- hwmgr = pp_handle->hwmgr;
+-
+ if (hwmgr->smumgr_funcs->smu_init == NULL)
+ return -EINVAL;
+
+@@ -128,55 +119,53 @@ static int pp_sw_init(void *handle)
+
+ pr_debug("amdgpu: powerplay sw initialized\n");
+ }
++
+ return ret;
+ }
+
+ static int pp_sw_fini(void *handle)
+ {
+- struct pp_hwmgr *hwmgr;
++ struct amdgpu_device *adev = handle;
++ struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
+ int ret = 0;
+- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+
+- ret = pp_check(pp_handle);
++ ret = pp_check(hwmgr);
+ if (ret >= 0) {
+- hwmgr = pp_handle->hwmgr;
+-
+- if (hwmgr->smumgr_funcs->smu_fini == NULL)
+- return -EINVAL;
+-
+- ret = hwmgr->smumgr_funcs->smu_fini(pp_handle->hwmgr);
++ if (hwmgr->smumgr_funcs->smu_fini != NULL)
++ hwmgr->smumgr_funcs->smu_fini(hwmgr);
+ }
+- return ret;
++ return 0;
+ }
+
+ static int pp_hw_init(void *handle)
+ {
+ int ret = 0;
+- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+- struct pp_hwmgr *hwmgr;
++ struct amdgpu_device *adev = handle;
++ struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
+
+- ret = pp_check(pp_handle);
++ if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
++ amdgpu_ucode_init_bo(adev);
+
+- if (ret >= 0) {
+- hwmgr = pp_handle->hwmgr;
++ ret = pp_check(hwmgr);
+
++ if (ret >= 0) {
+ if (hwmgr->smumgr_funcs->start_smu == NULL)
+ return -EINVAL;
+
+- if(hwmgr->smumgr_funcs->start_smu(pp_handle->hwmgr)) {
++ if (hwmgr->smumgr_funcs->start_smu(hwmgr)) {
+ pr_err("smc start failed\n");
+- hwmgr->smumgr_funcs->smu_fini(pp_handle->hwmgr);
++ hwmgr->smumgr_funcs->smu_fini(hwmgr);
+ return -EINVAL;
+ }
+ if (ret == PP_DPM_DISABLED)
+ goto exit;
+- ret = hwmgr_hw_init(pp_handle);
++ ret = hwmgr_hw_init(hwmgr);
+ if (ret)
+ goto exit;
+ }
+ return ret;
+ exit:
+- pp_handle->pm_en = 0;
++ hwmgr->pm_en = 0;
+ cgs_notify_dpm_enabled(hwmgr->device, false);
+ return 0;
+
+@@ -184,24 +173,27 @@ static int pp_hw_init(void *handle)
+
+ static int pp_hw_fini(void *handle)
+ {
+- struct pp_instance *pp_handle = (struct pp_instance *)handle;
++ struct amdgpu_device *adev = handle;
++ struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
+ int ret = 0;
+
+- ret = pp_check(pp_handle);
++ ret = pp_check(hwmgr);
+ if (ret == 0)
+- hwmgr_hw_fini(pp_handle);
++ hwmgr_hw_fini(hwmgr);
+
+ return 0;
+ }
+
+ static int pp_late_init(void *handle)
+ {
+- struct pp_instance *pp_handle = (struct pp_instance *)handle;
++ struct amdgpu_device *adev = handle;
++ struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
+ int ret = 0;
+
+- ret = pp_check(pp_handle);
++ ret = pp_check(hwmgr);
++
+ if (ret == 0)
+- pp_dpm_dispatch_tasks(pp_handle,
++ pp_dpm_dispatch_tasks(hwmgr,
+ AMD_PP_TASK_COMPLETE_INIT, NULL);
+
+ return 0;
+@@ -231,17 +223,15 @@ static int pp_sw_reset(void *handle)
+ static int pp_set_powergating_state(void *handle,
+ enum amd_powergating_state state)
+ {
+- struct pp_hwmgr *hwmgr;
+- struct pp_instance *pp_handle = (struct pp_instance *)handle;
++ struct amdgpu_device *adev = handle;
++ struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
+ int ret = 0;
+
+- ret = pp_check(pp_handle);
++ ret = pp_check(hwmgr);
+
+ if (ret)
+ return ret;
+
+- hwmgr = pp_handle->hwmgr;
+-
+ if (hwmgr->hwmgr_func->enable_per_cu_power_gating == NULL) {
+ pr_info("%s was not implemented.\n", __func__);
+ return 0;
+@@ -254,44 +244,43 @@ static int pp_set_powergating_state(void *handle,
+
+ static int pp_suspend(void *handle)
+ {
+- struct pp_instance *pp_handle = (struct pp_instance *)handle;
++ struct amdgpu_device *adev = handle;
++ struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
+ int ret = 0;
+
+- ret = pp_check(pp_handle);
++ ret = pp_check(hwmgr);
+ if (ret == 0)
+- hwmgr_hw_suspend(pp_handle);
++ hwmgr_hw_suspend(hwmgr);
+ return 0;
+ }
+
+ static int pp_resume(void *handle)
+ {
+- struct pp_hwmgr *hwmgr;
++ struct amdgpu_device *adev = handle;
++ struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
+ int ret;
+- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+
+- ret = pp_check(pp_handle);
++ ret = pp_check(hwmgr);
+
+ if (ret < 0)
+ return ret;
+
+- hwmgr = pp_handle->hwmgr;
+-
+ if (hwmgr->smumgr_funcs->start_smu == NULL)
+ return -EINVAL;
+
+- if (hwmgr->smumgr_funcs->start_smu(pp_handle->hwmgr)) {
++ if (hwmgr->smumgr_funcs->start_smu(hwmgr)) {
+ pr_err("smc start failed\n");
+- hwmgr->smumgr_funcs->smu_fini(pp_handle->hwmgr);
++ hwmgr->smumgr_funcs->smu_fini(hwmgr);
+ return -EINVAL;
+ }
+
+ if (ret == PP_DPM_DISABLED)
+ return 0;
+
+- return hwmgr_hw_resume(pp_handle);
++ return hwmgr_hw_resume(hwmgr);
+ }
+
+-const struct amd_ip_funcs pp_ip_funcs = {
++static const struct amd_ip_funcs pp_ip_funcs = {
+ .name = "powerplay",
+ .early_init = pp_early_init,
+ .late_init = pp_late_init,
+@@ -309,6 +298,15 @@ const struct amd_ip_funcs pp_ip_funcs = {
+ .set_powergating_state = pp_set_powergating_state,
+ };
+
++const struct amdgpu_ip_block_version pp_smu_ip_block =
++{
++ .type = AMD_IP_BLOCK_TYPE_SMC,
++ .major = 1,
++ .minor = 0,
++ .rev = 0,
++ .funcs = &pp_ip_funcs,
++};
++
+ static int pp_dpm_load_fw(void *handle)
+ {
+ return 0;
+@@ -321,17 +319,14 @@ static int pp_dpm_fw_loading_complete(void *handle)
+
+ static int pp_set_clockgating_by_smu(void *handle, uint32_t msg_id)
+ {
+- struct pp_hwmgr *hwmgr;
+- struct pp_instance *pp_handle = (struct pp_instance *)handle;
++ struct pp_hwmgr *hwmgr = handle;
+ int ret = 0;
+
+- ret = pp_check(pp_handle);
++ ret = pp_check(hwmgr);
+
+ if (ret)
+ return ret;
+
+- hwmgr = pp_handle->hwmgr;
+-
+ if (hwmgr->hwmgr_func->update_clock_gatings == NULL) {
+ pr_info("%s was not implemented.\n", __func__);
+ return 0;
+@@ -379,25 +374,22 @@ static void pp_dpm_en_umd_pstate(struct pp_hwmgr *hwmgr,
+ static int pp_dpm_force_performance_level(void *handle,
+ enum amd_dpm_forced_level level)
+ {
+- struct pp_hwmgr *hwmgr;
+- struct pp_instance *pp_handle = (struct pp_instance *)handle;
++ struct pp_hwmgr *hwmgr = handle;
+ int ret = 0;
+
+- ret = pp_check(pp_handle);
++ ret = pp_check(hwmgr);
+
+ if (ret)
+ return ret;
+
+- hwmgr = pp_handle->hwmgr;
+-
+ if (level == hwmgr->dpm_level)
+ return 0;
+
+- mutex_lock(&pp_handle->pp_lock);
++ mutex_lock(&hwmgr->smu_lock);
+ pp_dpm_en_umd_pstate(hwmgr, &level);
+ hwmgr->request_dpm_level = level;
+- hwmgr_handle_task(pp_handle, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
+- mutex_unlock(&pp_handle->pp_lock);
++ hwmgr_handle_task(hwmgr, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
++ mutex_unlock(&hwmgr->smu_lock);
+
+ return 0;
+ }
+@@ -405,152 +397,135 @@ static int pp_dpm_force_performance_level(void *handle,
+ static enum amd_dpm_forced_level pp_dpm_get_performance_level(
+ void *handle)
+ {
+- struct pp_hwmgr *hwmgr;
+- struct pp_instance *pp_handle = (struct pp_instance *)handle;
++ struct pp_hwmgr *hwmgr = handle;
+ int ret = 0;
+ enum amd_dpm_forced_level level;
+
+- ret = pp_check(pp_handle);
++ ret = pp_check(hwmgr);
+
+ if (ret)
+ return ret;
+
+- hwmgr = pp_handle->hwmgr;
+- mutex_lock(&pp_handle->pp_lock);
++ mutex_lock(&hwmgr->smu_lock);
+ level = hwmgr->dpm_level;
+- mutex_unlock(&pp_handle->pp_lock);
++ mutex_unlock(&hwmgr->smu_lock);
+ return level;
+ }
+
+ static uint32_t pp_dpm_get_sclk(void *handle, bool low)
+ {
+- struct pp_hwmgr *hwmgr;
+- struct pp_instance *pp_handle = (struct pp_instance *)handle;
++ struct pp_hwmgr *hwmgr = handle;
+ int ret = 0;
+ uint32_t clk = 0;
+
+- ret = pp_check(pp_handle);
++ ret = pp_check(hwmgr);
+
+ if (ret)
+ return ret;
+
+- hwmgr = pp_handle->hwmgr;
+-
+ if (hwmgr->hwmgr_func->get_sclk == NULL) {
+ pr_info("%s was not implemented.\n", __func__);
+ return 0;
+ }
+- mutex_lock(&pp_handle->pp_lock);
++ mutex_lock(&hwmgr->smu_lock);
+ clk = hwmgr->hwmgr_func->get_sclk(hwmgr, low);
+- mutex_unlock(&pp_handle->pp_lock);
++ mutex_unlock(&hwmgr->smu_lock);
+ return clk;
+ }
+
+ static uint32_t pp_dpm_get_mclk(void *handle, bool low)
+ {
+- struct pp_hwmgr *hwmgr;
+- struct pp_instance *pp_handle = (struct pp_instance *)handle;
++ struct pp_hwmgr *hwmgr = handle;
+ int ret = 0;
+ uint32_t clk = 0;
+
+- ret = pp_check(pp_handle);
++ ret = pp_check(hwmgr);
+
+ if (ret)
+ return ret;
+
+- hwmgr = pp_handle->hwmgr;
+-
+ if (hwmgr->hwmgr_func->get_mclk == NULL) {
+ pr_info("%s was not implemented.\n", __func__);
+ return 0;
+ }
+- mutex_lock(&pp_handle->pp_lock);
++ mutex_lock(&hwmgr->smu_lock);
+ clk = hwmgr->hwmgr_func->get_mclk(hwmgr, low);
+- mutex_unlock(&pp_handle->pp_lock);
++ mutex_unlock(&hwmgr->smu_lock);
+ return clk;
+ }
+
+ static void pp_dpm_powergate_vce(void *handle, bool gate)
+ {
+- struct pp_hwmgr *hwmgr;
+- struct pp_instance *pp_handle = (struct pp_instance *)handle;
++ struct pp_hwmgr *hwmgr = handle;
+ int ret = 0;
+
+- ret = pp_check(pp_handle);
++ ret = pp_check(hwmgr);
+
+ if (ret)
+ return;
+
+- hwmgr = pp_handle->hwmgr;
+-
+ if (hwmgr->hwmgr_func->powergate_vce == NULL) {
+ pr_info("%s was not implemented.\n", __func__);
+ return;
+ }
+- mutex_lock(&pp_handle->pp_lock);
++ mutex_lock(&hwmgr->smu_lock);
+ hwmgr->hwmgr_func->powergate_vce(hwmgr, gate);
+- mutex_unlock(&pp_handle->pp_lock);
++ mutex_unlock(&hwmgr->smu_lock);
+ }
+
+ static void pp_dpm_powergate_uvd(void *handle, bool gate)
+ {
+- struct pp_hwmgr *hwmgr;
+- struct pp_instance *pp_handle = (struct pp_instance *)handle;
++ struct pp_hwmgr *hwmgr = handle;
+ int ret = 0;
+
+- ret = pp_check(pp_handle);
++ ret = pp_check(hwmgr);
+
+ if (ret)
+ return;
+
+- hwmgr = pp_handle->hwmgr;
+-
+ if (hwmgr->hwmgr_func->powergate_uvd == NULL) {
+ pr_info("%s was not implemented.\n", __func__);
+ return;
+ }
+- mutex_lock(&pp_handle->pp_lock);
++ mutex_lock(&hwmgr->smu_lock);
+ hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate);
+- mutex_unlock(&pp_handle->pp_lock);
++ mutex_unlock(&hwmgr->smu_lock);
+ }
+
+ static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
+ enum amd_pm_state_type *user_state)
+ {
+ int ret = 0;
+- struct pp_instance *pp_handle = (struct pp_instance *)handle;
++ struct pp_hwmgr *hwmgr = handle;
+
+- ret = pp_check(pp_handle);
++ ret = pp_check(hwmgr);
+
+ if (ret)
+ return ret;
+
+- mutex_lock(&pp_handle->pp_lock);
+- ret = hwmgr_handle_task(pp_handle, task_id, user_state);
+- mutex_unlock(&pp_handle->pp_lock);
++ mutex_lock(&hwmgr->smu_lock);
++ ret = hwmgr_handle_task(hwmgr, task_id, user_state);
++ mutex_unlock(&hwmgr->smu_lock);
+
+ return ret;
+ }
+
+ static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
+ {
+- struct pp_hwmgr *hwmgr;
++ struct pp_hwmgr *hwmgr = handle;
+ struct pp_power_state *state;
+- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ int ret = 0;
+ enum amd_pm_state_type pm_type;
+
+- ret = pp_check(pp_handle);
++ ret = pp_check(hwmgr);
+
+ if (ret)
+ return ret;
+
+- hwmgr = pp_handle->hwmgr;
+-
+ if (hwmgr->current_ps == NULL)
+ return -EINVAL;
+
+- mutex_lock(&pp_handle->pp_lock);
++ mutex_lock(&hwmgr->smu_lock);
+
+ state = hwmgr->current_ps;
+
+@@ -571,147 +546,129 @@ static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
+ pm_type = POWER_STATE_TYPE_DEFAULT;
+ break;
+ }
+- mutex_unlock(&pp_handle->pp_lock);
++ mutex_unlock(&hwmgr->smu_lock);
+
+ return pm_type;
+ }
+
+ static void pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
+ {
+- struct pp_hwmgr *hwmgr;
+- struct pp_instance *pp_handle = (struct pp_instance *)handle;
++ struct pp_hwmgr *hwmgr = handle;
+ int ret = 0;
+
+- ret = pp_check(pp_handle);
++ ret = pp_check(hwmgr);
+
+ if (ret)
+ return;
+
+- hwmgr = pp_handle->hwmgr;
+-
+ if (hwmgr->hwmgr_func->set_fan_control_mode == NULL) {
+ pr_info("%s was not implemented.\n", __func__);
+ return;
+ }
+- mutex_lock(&pp_handle->pp_lock);
++ mutex_lock(&hwmgr->smu_lock);
+ hwmgr->hwmgr_func->set_fan_control_mode(hwmgr, mode);
+- mutex_unlock(&pp_handle->pp_lock);
++ mutex_unlock(&hwmgr->smu_lock);
+ }
+
+ static uint32_t pp_dpm_get_fan_control_mode(void *handle)
+ {
+- struct pp_hwmgr *hwmgr;
+- struct pp_instance *pp_handle = (struct pp_instance *)handle;
++ struct pp_hwmgr *hwmgr = handle;
+ int ret = 0;
+ uint32_t mode = 0;
+
+- ret = pp_check(pp_handle);
++ ret = pp_check(hwmgr);
+
+ if (ret)
+ return ret;
+
+- hwmgr = pp_handle->hwmgr;
+-
+ if (hwmgr->hwmgr_func->get_fan_control_mode == NULL) {
+ pr_info("%s was not implemented.\n", __func__);
+ return 0;
+ }
+- mutex_lock(&pp_handle->pp_lock);
++ mutex_lock(&hwmgr->smu_lock);
+ mode = hwmgr->hwmgr_func->get_fan_control_mode(hwmgr);
+- mutex_unlock(&pp_handle->pp_lock);
++ mutex_unlock(&hwmgr->smu_lock);
+ return mode;
+ }
+
+ static int pp_dpm_set_fan_speed_percent(void *handle, uint32_t percent)
+ {
+- struct pp_hwmgr *hwmgr;
+- struct pp_instance *pp_handle = (struct pp_instance *)handle;
++ struct pp_hwmgr *hwmgr = handle;
+ int ret = 0;
+
+- ret = pp_check(pp_handle);
++ ret = pp_check(hwmgr);
+
+ if (ret)
+ return ret;
+
+- hwmgr = pp_handle->hwmgr;
+-
+ if (hwmgr->hwmgr_func->set_fan_speed_percent == NULL) {
+ pr_info("%s was not implemented.\n", __func__);
+ return 0;
+ }
+- mutex_lock(&pp_handle->pp_lock);
++ mutex_lock(&hwmgr->smu_lock);
+ ret = hwmgr->hwmgr_func->set_fan_speed_percent(hwmgr, percent);
+- mutex_unlock(&pp_handle->pp_lock);
++ mutex_unlock(&hwmgr->smu_lock);
+ return ret;
+ }
+
+ static int pp_dpm_get_fan_speed_percent(void *handle, uint32_t *speed)
+ {
+- struct pp_hwmgr *hwmgr;
+- struct pp_instance *pp_handle = (struct pp_instance *)handle;
++ struct pp_hwmgr *hwmgr = handle;
+ int ret = 0;
+
+- ret = pp_check(pp_handle);
++ ret = pp_check(hwmgr);
+
+ if (ret)
+ return ret;
+
+- hwmgr = pp_handle->hwmgr;
+-
+ if (hwmgr->hwmgr_func->get_fan_speed_percent == NULL) {
+ pr_info("%s was not implemented.\n", __func__);
+ return 0;
+ }
+
+- mutex_lock(&pp_handle->pp_lock);
++ mutex_lock(&hwmgr->smu_lock);
+ ret = hwmgr->hwmgr_func->get_fan_speed_percent(hwmgr, speed);
+- mutex_unlock(&pp_handle->pp_lock);
++ mutex_unlock(&hwmgr->smu_lock);
+ return ret;
+ }
+
+ static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm)
+ {
+- struct pp_hwmgr *hwmgr;
+- struct pp_instance *pp_handle = (struct pp_instance *)handle;
++ struct pp_hwmgr *hwmgr = handle;
+ int ret = 0;
+
+- ret = pp_check(pp_handle);
++ ret = pp_check(hwmgr);
+
+ if (ret)
+ return ret;
+
+- hwmgr = pp_handle->hwmgr;
+-
+ if (hwmgr->hwmgr_func->get_fan_speed_rpm == NULL)
+ return -EINVAL;
+
+- mutex_lock(&pp_handle->pp_lock);
++ mutex_lock(&hwmgr->smu_lock);
+ ret = hwmgr->hwmgr_func->get_fan_speed_rpm(hwmgr, rpm);
+- mutex_unlock(&pp_handle->pp_lock);
++ mutex_unlock(&hwmgr->smu_lock);
+ return ret;
+ }
+
+ static int pp_dpm_get_pp_num_states(void *handle,
+ struct pp_states_info *data)
+ {
+- struct pp_hwmgr *hwmgr;
++ struct pp_hwmgr *hwmgr = handle;
+ int i;
+- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ int ret = 0;
+
+ memset(data, 0, sizeof(*data));
+
+- ret = pp_check(pp_handle);
++ ret = pp_check(hwmgr);
+
+ if (ret)
+ return ret;
+
+- hwmgr = pp_handle->hwmgr;
+-
+ if (hwmgr->ps == NULL)
+ return -EINVAL;
+
+- mutex_lock(&pp_handle->pp_lock);
++ mutex_lock(&hwmgr->smu_lock);
+
+ data->nums = hwmgr->num_ps;
+
+@@ -735,73 +692,68 @@ static int pp_dpm_get_pp_num_states(void *handle,
+ data->states[i] = POWER_STATE_TYPE_DEFAULT;
+ }
+ }
+- mutex_unlock(&pp_handle->pp_lock);
++ mutex_unlock(&hwmgr->smu_lock);
+ return 0;
+ }
+
+ static int pp_dpm_get_pp_table(void *handle, char **table)
+ {
+- struct pp_hwmgr *hwmgr;
+- struct pp_instance *pp_handle = (struct pp_instance *)handle;
++ struct pp_hwmgr *hwmgr = handle;
+ int ret = 0;
+ int size = 0;
+
+- ret = pp_check(pp_handle);
++ ret = pp_check(hwmgr);
+
+ if (ret)
+ return ret;
+
+- hwmgr = pp_handle->hwmgr;
+-
+ if (!hwmgr->soft_pp_table)
+ return -EINVAL;
+
+- mutex_lock(&pp_handle->pp_lock);
++ mutex_lock(&hwmgr->smu_lock);
+ *table = (char *)hwmgr->soft_pp_table;
+ size = hwmgr->soft_pp_table_size;
+- mutex_unlock(&pp_handle->pp_lock);
++ mutex_unlock(&hwmgr->smu_lock);
+ return size;
+ }
+
+ static int amd_powerplay_reset(void *handle)
+ {
+- struct pp_instance *instance = (struct pp_instance *)handle;
++ struct pp_hwmgr *hwmgr = handle;
+ int ret;
+
+- ret = pp_check(instance);
++ ret = pp_check(hwmgr);
+ if (ret)
+ return ret;
+
+- ret = pp_hw_fini(instance);
++ ret = pp_hw_fini(hwmgr);
+ if (ret)
+ return ret;
+
+- ret = hwmgr_hw_init(instance);
++ ret = hwmgr_hw_init(hwmgr);
+ if (ret)
+ return ret;
+
+- return hwmgr_handle_task(instance, AMD_PP_TASK_COMPLETE_INIT, NULL);
++ return hwmgr_handle_task(hwmgr, AMD_PP_TASK_COMPLETE_INIT, NULL);
+ }
+
+ static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
+ {
+- struct pp_hwmgr *hwmgr;
+- struct pp_instance *pp_handle = (struct pp_instance *)handle;
++ struct pp_hwmgr *hwmgr = handle;
+ int ret = 0;
+
+- ret = pp_check(pp_handle);
++ ret = pp_check(hwmgr);
+
+ if (ret)
+ return ret;
+
+- hwmgr = pp_handle->hwmgr;
+- mutex_lock(&pp_handle->pp_lock);
++ mutex_lock(&hwmgr->smu_lock);
+ if (!hwmgr->hardcode_pp_table) {
+ hwmgr->hardcode_pp_table = kmemdup(hwmgr->soft_pp_table,
+ hwmgr->soft_pp_table_size,
+ GFP_KERNEL);
+ if (!hwmgr->hardcode_pp_table) {
+- mutex_unlock(&pp_handle->pp_lock);
++ mutex_unlock(&hwmgr->smu_lock);
+ return -ENOMEM;
+ }
+ }
+@@ -809,7 +761,7 @@ static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
+ memcpy(hwmgr->hardcode_pp_table, buf, size);
+
+ hwmgr->soft_pp_table = hwmgr->hardcode_pp_table;
+- mutex_unlock(&pp_handle->pp_lock);
++ mutex_unlock(&hwmgr->smu_lock);
+
+ ret = amd_powerplay_reset(handle);
+ if (ret)
+@@ -827,163 +779,142 @@ static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
+ static int pp_dpm_force_clock_level(void *handle,
+ enum pp_clock_type type, uint32_t mask)
+ {
+- struct pp_hwmgr *hwmgr;
+- struct pp_instance *pp_handle = (struct pp_instance *)handle;
++ struct pp_hwmgr *hwmgr = handle;
+ int ret = 0;
+
+- ret = pp_check(pp_handle);
++ ret = pp_check(hwmgr);
+
+ if (ret)
+ return ret;
+
+- hwmgr = pp_handle->hwmgr;
+-
+ if (hwmgr->hwmgr_func->force_clock_level == NULL) {
+ pr_info("%s was not implemented.\n", __func__);
+ return 0;
+ }
+- mutex_lock(&pp_handle->pp_lock);
++ mutex_lock(&hwmgr->smu_lock);
+ if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)
+ ret = hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask);
+ else
+ ret = -EINVAL;
+- mutex_unlock(&pp_handle->pp_lock);
++ mutex_unlock(&hwmgr->smu_lock);
+ return ret;
+ }
+
+ static int pp_dpm_print_clock_levels(void *handle,
+ enum pp_clock_type type, char *buf)
+ {
+- struct pp_hwmgr *hwmgr;
+- struct pp_instance *pp_handle = (struct pp_instance *)handle;
++ struct pp_hwmgr *hwmgr = handle;
+ int ret = 0;
+
+- ret = pp_check(pp_handle);
++ ret = pp_check(hwmgr);
+
+ if (ret)
+ return ret;
+
+- hwmgr = pp_handle->hwmgr;
+-
+ if (hwmgr->hwmgr_func->print_clock_levels == NULL) {
+ pr_info("%s was not implemented.\n", __func__);
+ return 0;
+ }
+- mutex_lock(&pp_handle->pp_lock);
++ mutex_lock(&hwmgr->smu_lock);
+ ret = hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf);
+- mutex_unlock(&pp_handle->pp_lock);
++ mutex_unlock(&hwmgr->smu_lock);
+ return ret;
+ }
+
+ static int pp_dpm_get_sclk_od(void *handle)
+ {
+- struct pp_hwmgr *hwmgr;
+- struct pp_instance *pp_handle = (struct pp_instance *)handle;
++ struct pp_hwmgr *hwmgr = handle;
+ int ret = 0;
+
+- ret = pp_check(pp_handle);
++ ret = pp_check(hwmgr);
+
+ if (ret)
+ return ret;
+
+- hwmgr = pp_handle->hwmgr;
+-
+ if (hwmgr->hwmgr_func->get_sclk_od == NULL) {
+ pr_info("%s was not implemented.\n", __func__);
+ return 0;
+ }
+- mutex_lock(&pp_handle->pp_lock);
++ mutex_lock(&hwmgr->smu_lock);
+ ret = hwmgr->hwmgr_func->get_sclk_od(hwmgr);
+- mutex_unlock(&pp_handle->pp_lock);
++ mutex_unlock(&hwmgr->smu_lock);
+ return ret;
+ }
+
+ static int pp_dpm_set_sclk_od(void *handle, uint32_t value)
+ {
+- struct pp_hwmgr *hwmgr;
+- struct pp_instance *pp_handle = (struct pp_instance *)handle;
++ struct pp_hwmgr *hwmgr = handle;
+ int ret = 0;
+
+- ret = pp_check(pp_handle);
++ ret = pp_check(hwmgr);
+
+ if (ret)
+ return ret;
+
+- hwmgr = pp_handle->hwmgr;
+-
+ if (hwmgr->hwmgr_func->set_sclk_od == NULL) {
+ pr_info("%s was not implemented.\n", __func__);
+ return 0;
+ }
+
+- mutex_lock(&pp_handle->pp_lock);
++ mutex_lock(&hwmgr->smu_lock);
+ ret = hwmgr->hwmgr_func->set_sclk_od(hwmgr, value);
+- mutex_unlock(&pp_handle->pp_lock);
++ mutex_unlock(&hwmgr->smu_lock);
+ return ret;
+ }
+
+ static int pp_dpm_get_mclk_od(void *handle)
+ {
+- struct pp_hwmgr *hwmgr;
+- struct pp_instance *pp_handle = (struct pp_instance *)handle;
++ struct pp_hwmgr *hwmgr = handle;
+ int ret = 0;
+
+- ret = pp_check(pp_handle);
++ ret = pp_check(hwmgr);
+
+ if (ret)
+ return ret;
+
+- hwmgr = pp_handle->hwmgr;
+-
+ if (hwmgr->hwmgr_func->get_mclk_od == NULL) {
+ pr_info("%s was not implemented.\n", __func__);
+ return 0;
+ }
+- mutex_lock(&pp_handle->pp_lock);
++ mutex_lock(&hwmgr->smu_lock);
+ ret = hwmgr->hwmgr_func->get_mclk_od(hwmgr);
+- mutex_unlock(&pp_handle->pp_lock);
++ mutex_unlock(&hwmgr->smu_lock);
+ return ret;
+ }
+
+ static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
+ {
+- struct pp_hwmgr *hwmgr;
+- struct pp_instance *pp_handle = (struct pp_instance *)handle;
++ struct pp_hwmgr *hwmgr = handle;
+ int ret = 0;
+
+- ret = pp_check(pp_handle);
++ ret = pp_check(hwmgr);
+
+ if (ret)
+ return ret;
+
+- hwmgr = pp_handle->hwmgr;
+-
+ if (hwmgr->hwmgr_func->set_mclk_od == NULL) {
+ pr_info("%s was not implemented.\n", __func__);
+ return 0;
+ }
+- mutex_lock(&pp_handle->pp_lock);
++ mutex_lock(&hwmgr->smu_lock);
+ ret = hwmgr->hwmgr_func->set_mclk_od(hwmgr, value);
+- mutex_unlock(&pp_handle->pp_lock);
++ mutex_unlock(&hwmgr->smu_lock);
+ return ret;
+ }
+
+ static int pp_dpm_read_sensor(void *handle, int idx,
+ void *value, int *size)
+ {
+- struct pp_hwmgr *hwmgr;
+- struct pp_instance *pp_handle = (struct pp_instance *)handle;
++ struct pp_hwmgr *hwmgr = handle;
+ int ret = 0;
+
+- ret = pp_check(pp_handle);
++ ret = pp_check(hwmgr);
+ if (ret)
+ return ret;
+
+ if (value == NULL)
+ return -EINVAL;
+
+- hwmgr = pp_handle->hwmgr;
+-
+ switch (idx) {
+ case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
+ *((uint32_t *)value) = hwmgr->pstate_sclk;
+@@ -992,9 +923,9 @@ static int pp_dpm_read_sensor(void *handle, int idx,
+ *((uint32_t *)value) = hwmgr->pstate_mclk;
+ return 0;
+ default:
+- mutex_lock(&pp_handle->pp_lock);
++ mutex_lock(&hwmgr->smu_lock);
+ ret = hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value, size);
+- mutex_unlock(&pp_handle->pp_lock);
++ mutex_unlock(&hwmgr->smu_lock);
+ return ret;
+ }
+ }
+@@ -1002,17 +933,14 @@ static int pp_dpm_read_sensor(void *handle, int idx,
+ static struct amd_vce_state*
+ pp_dpm_get_vce_clock_state(void *handle, unsigned idx)
+ {
+- struct pp_hwmgr *hwmgr;
+- struct pp_instance *pp_handle = (struct pp_instance *)handle;
++ struct pp_hwmgr *hwmgr = handle;
+ int ret = 0;
+
+- ret = pp_check(pp_handle);
++ ret = pp_check(hwmgr);
+
+ if (ret)
+ return NULL;
+
+- hwmgr = pp_handle->hwmgr;
+-
+ if (hwmgr && idx < hwmgr->num_vce_state_tables)
+ return &hwmgr->vce_states[idx];
+ return NULL;
+@@ -1020,14 +948,11 @@ pp_dpm_get_vce_clock_state(void *handle, unsigned idx)
+
+ static int pp_get_power_profile_mode(void *handle, char *buf)
+ {
+- struct pp_hwmgr *hwmgr;
+- struct pp_instance *pp_handle = (struct pp_instance *)handle;
++ struct pp_hwmgr *hwmgr = handle;
+
+- if (!buf || pp_check(pp_handle))
++ if (!buf || pp_check(hwmgr))
+ return -EINVAL;
+
+- hwmgr = pp_handle->hwmgr;
+-
+ if (hwmgr->hwmgr_func->get_power_profile_mode == NULL) {
+ pr_info("%s was not implemented.\n", __func__);
+ return snprintf(buf, PAGE_SIZE, "\n");
+@@ -1038,36 +963,30 @@ static int pp_get_power_profile_mode(void *handle, char *buf)
+
+ static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size)
+ {
+- struct pp_hwmgr *hwmgr;
+- struct pp_instance *pp_handle = (struct pp_instance *)handle;
++ struct pp_hwmgr *hwmgr = handle;
+ int ret = -EINVAL;
+
+- if (pp_check(pp_handle))
++ if (pp_check(hwmgr))
+ return -EINVAL;
+
+- hwmgr = pp_handle->hwmgr;
+-
+ if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
+ pr_info("%s was not implemented.\n", __func__);
+ return -EINVAL;
+ }
+- mutex_lock(&pp_handle->pp_lock);
++ mutex_lock(&hwmgr->smu_lock);
+ if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)
+ ret = hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, input, size);
+- mutex_unlock(&pp_handle->pp_lock);
++ mutex_unlock(&hwmgr->smu_lock);
+ return ret;
+ }
+
+ static int pp_odn_edit_dpm_table(void *handle, uint32_t type, long *input, uint32_t size)
+ {
+- struct pp_hwmgr *hwmgr;
+- struct pp_instance *pp_handle = (struct pp_instance *)handle;
++ struct pp_hwmgr *hwmgr = handle;
+
+- if (pp_check(pp_handle))
++ if (pp_check(hwmgr))
+ return -EINVAL;
+
+- hwmgr = pp_handle->hwmgr;
+-
+ if (hwmgr->hwmgr_func->odn_edit_dpm_table == NULL) {
+ pr_info("%s was not implemented.\n", __func__);
+ return -EINVAL;
+@@ -1079,16 +998,13 @@ static int pp_odn_edit_dpm_table(void *handle, uint32_t type, long *input, uint3
+ static int pp_dpm_switch_power_profile(void *handle,
+ enum PP_SMC_POWER_PROFILE type, bool en)
+ {
+- struct pp_hwmgr *hwmgr;
+- struct pp_instance *pp_handle = (struct pp_instance *)handle;
++ struct pp_hwmgr *hwmgr = handle;
+ long workload;
+ uint32_t index;
+
+- if (pp_check(pp_handle))
++ if (pp_check(hwmgr))
+ return -EINVAL;
+
+- hwmgr = pp_handle->hwmgr;
+-
+ if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
+ pr_info("%s was not implemented.\n", __func__);
+ return -EINVAL;
+@@ -1097,7 +1013,7 @@ static int pp_dpm_switch_power_profile(void *handle,
+ if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
+ return -EINVAL;
+
+- mutex_lock(&pp_handle->pp_lock);
++ mutex_lock(&hwmgr->smu_lock);
+
+ if (!en) {
+ hwmgr->workload_mask &= ~(1 << hwmgr->workload_prority[type]);
+@@ -1113,7 +1029,7 @@ static int pp_dpm_switch_power_profile(void *handle,
+
+ if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
+ hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0);
+- mutex_unlock(&pp_handle->pp_lock);
++ mutex_unlock(&hwmgr->smu_lock);
+
+ return 0;
+ }
+@@ -1125,46 +1041,40 @@ static int pp_dpm_notify_smu_memory_info(void *handle,
+ uint32_t mc_addr_hi,
+ uint32_t size)
+ {
+- struct pp_hwmgr *hwmgr;
+- struct pp_instance *pp_handle = (struct pp_instance *)handle;
++ struct pp_hwmgr *hwmgr = handle;
+ int ret = 0;
+
+- ret = pp_check(pp_handle);
++ ret = pp_check(hwmgr);
+
+ if (ret)
+ return ret;
+
+- hwmgr = pp_handle->hwmgr;
+-
+ if (hwmgr->hwmgr_func->notify_cac_buffer_info == NULL) {
+ pr_info("%s was not implemented.\n", __func__);
+ return -EINVAL;
+ }
+
+- mutex_lock(&pp_handle->pp_lock);
++ mutex_lock(&hwmgr->smu_lock);
+
+ ret = hwmgr->hwmgr_func->notify_cac_buffer_info(hwmgr, virtual_addr_low,
+ virtual_addr_hi, mc_addr_low, mc_addr_hi,
+ size);
+
+- mutex_unlock(&pp_handle->pp_lock);
++ mutex_unlock(&hwmgr->smu_lock);
+
+ return ret;
+ }
+
+ static int pp_set_power_limit(void *handle, uint32_t limit)
+ {
+- struct pp_hwmgr *hwmgr;
+- struct pp_instance *pp_handle = (struct pp_instance *)handle;
++ struct pp_hwmgr *hwmgr = handle;
+ int ret = 0;
+
+- ret = pp_check(pp_handle);
++ ret = pp_check(hwmgr);
+
+ if (ret)
+ return ret;
+
+- hwmgr = pp_handle->hwmgr;
+-
+ if (hwmgr->hwmgr_func->set_power_limit == NULL) {
+ pr_info("%s was not implemented.\n", __func__);
+ return -EINVAL;
+@@ -1176,20 +1086,19 @@ static int pp_set_power_limit(void *handle, uint32_t limit)
+ if (limit > hwmgr->default_power_limit)
+ return -EINVAL;
+
+- mutex_lock(&pp_handle->pp_lock);
++ mutex_lock(&hwmgr->smu_lock);
+ hwmgr->hwmgr_func->set_power_limit(hwmgr, limit);
+ hwmgr->power_limit = limit;
+- mutex_unlock(&pp_handle->pp_lock);
++ mutex_unlock(&hwmgr->smu_lock);
+ return ret;
+ }
+
+ static int pp_get_power_limit(void *handle, uint32_t *limit, bool default_limit)
+ {
+- struct pp_hwmgr *hwmgr;
+- struct pp_instance *pp_handle = (struct pp_instance *)handle;
++ struct pp_hwmgr *hwmgr = handle;
+ int ret = 0;
+
+- ret = pp_check(pp_handle);
++ ret = pp_check(hwmgr);
+
+ if (ret)
+ return ret;
+@@ -1197,16 +1106,14 @@ static int pp_get_power_limit(void *handle, uint32_t *limit, bool default_limit)
+ if (limit == NULL)
+ return -EINVAL;
+
+- hwmgr = pp_handle->hwmgr;
+-
+- mutex_lock(&pp_handle->pp_lock);
++ mutex_lock(&hwmgr->smu_lock);
+
+ if (default_limit)
+ *limit = hwmgr->default_power_limit;
+ else
+ *limit = hwmgr->power_limit;
+
+- mutex_unlock(&pp_handle->pp_lock);
++ mutex_unlock(&hwmgr->smu_lock);
+
+ return ret;
+ }
+@@ -1214,42 +1121,37 @@ static int pp_get_power_limit(void *handle, uint32_t *limit, bool default_limit)
+ static int pp_display_configuration_change(void *handle,
+ const struct amd_pp_display_configuration *display_config)
+ {
+- struct pp_hwmgr *hwmgr;
+- struct pp_instance *pp_handle = (struct pp_instance *)handle;
++ struct pp_hwmgr *hwmgr = handle;
+ int ret = 0;
+
+- ret = pp_check(pp_handle);
++ ret = pp_check(hwmgr);
+
+ if (ret)
+ return ret;
+
+- hwmgr = pp_handle->hwmgr;
+- mutex_lock(&pp_handle->pp_lock);
++ mutex_lock(&hwmgr->smu_lock);
+ phm_store_dal_configuration_data(hwmgr, display_config);
+- mutex_unlock(&pp_handle->pp_lock);
++ mutex_unlock(&hwmgr->smu_lock);
+ return 0;
+ }
+
+ static int pp_get_display_power_level(void *handle,
+ struct amd_pp_simple_clock_info *output)
+ {
+- struct pp_hwmgr *hwmgr;
+- struct pp_instance *pp_handle = (struct pp_instance *)handle;
++ struct pp_hwmgr *hwmgr = handle;
+ int ret = 0;
+
+- ret = pp_check(pp_handle);
++ ret = pp_check(hwmgr);
+
+ if (ret)
+ return ret;
+
+- hwmgr = pp_handle->hwmgr;
+-
+ if (output == NULL)
+ return -EINVAL;
+
+- mutex_lock(&pp_handle->pp_lock);
++ mutex_lock(&hwmgr->smu_lock);
+ ret = phm_get_dal_power_level(hwmgr, output);
+- mutex_unlock(&pp_handle->pp_lock);
++ mutex_unlock(&hwmgr->smu_lock);
+ return ret;
+ }
+
+@@ -1258,18 +1160,15 @@ static int pp_get_current_clocks(void *handle,
+ {
+ struct amd_pp_simple_clock_info simple_clocks;
+ struct pp_clock_info hw_clocks;
+- struct pp_hwmgr *hwmgr;
+- struct pp_instance *pp_handle = (struct pp_instance *)handle;
++ struct pp_hwmgr *hwmgr = handle;
+ int ret = 0;
+
+- ret = pp_check(pp_handle);
++ ret = pp_check(hwmgr);
+
+ if (ret)
+ return ret;
+
+- hwmgr = pp_handle->hwmgr;
+-
+- mutex_lock(&pp_handle->pp_lock);
++ mutex_lock(&hwmgr->smu_lock);
+
+ phm_get_dal_power_level(hwmgr, &simple_clocks);
+
+@@ -1283,7 +1182,7 @@ static int pp_get_current_clocks(void *handle,
+
+ if (ret) {
+ pr_info("Error in phm_get_clock_info \n");
+- mutex_unlock(&pp_handle->pp_lock);
++ mutex_unlock(&hwmgr->smu_lock);
+ return -EINVAL;
+ }
+
+@@ -1303,29 +1202,26 @@ static int pp_get_current_clocks(void *handle,
+ clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
+ clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
+ }
+- mutex_unlock(&pp_handle->pp_lock);
++ mutex_unlock(&hwmgr->smu_lock);
+ return 0;
+ }
+
+ static int pp_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks)
+ {
+- struct pp_hwmgr *hwmgr;
+- struct pp_instance *pp_handle = (struct pp_instance *)handle;
++ struct pp_hwmgr *hwmgr = handle;
+ int ret = 0;
+
+- ret = pp_check(pp_handle);
++ ret = pp_check(hwmgr);
+
+ if (ret)
+ return ret;
+
+- hwmgr = pp_handle->hwmgr;
+-
+ if (clocks == NULL)
+ return -EINVAL;
+
+- mutex_lock(&pp_handle->pp_lock);
++ mutex_lock(&hwmgr->smu_lock);
+ ret = phm_get_clock_by_type(hwmgr, type, clocks);
+- mutex_unlock(&pp_handle->pp_lock);
++ mutex_unlock(&hwmgr->smu_lock);
+ return ret;
+ }
+
+@@ -1333,21 +1229,19 @@ static int pp_get_clock_by_type_with_latency(void *handle,
+ enum amd_pp_clock_type type,
+ struct pp_clock_levels_with_latency *clocks)
+ {
+- struct pp_hwmgr *hwmgr;
+- struct pp_instance *pp_handle = (struct pp_instance *)handle;
++ struct pp_hwmgr *hwmgr = handle;
+ int ret = 0;
+
+- ret = pp_check(pp_handle);
++ ret = pp_check(hwmgr);
+ if (ret)
+ return ret;
+
+ if (!clocks)
+ return -EINVAL;
+
+- mutex_lock(&pp_handle->pp_lock);
+- hwmgr = ((struct pp_instance *)handle)->hwmgr;
++ mutex_lock(&hwmgr->smu_lock);
+ ret = phm_get_clock_by_type_with_latency(hwmgr, type, clocks);
+- mutex_unlock(&pp_handle->pp_lock);
++ mutex_unlock(&hwmgr->smu_lock);
+ return ret;
+ }
+
+@@ -1355,47 +1249,41 @@ static int pp_get_clock_by_type_with_voltage(void *handle,
+ enum amd_pp_clock_type type,
+ struct pp_clock_levels_with_voltage *clocks)
+ {
+- struct pp_hwmgr *hwmgr;
+- struct pp_instance *pp_handle = (struct pp_instance *)handle;
++ struct pp_hwmgr *hwmgr = handle;
+ int ret = 0;
+
+- ret = pp_check(pp_handle);
++ ret = pp_check(hwmgr);
+ if (ret)
+ return ret;
+
+ if (!clocks)
+ return -EINVAL;
+
+- hwmgr = ((struct pp_instance *)handle)->hwmgr;
+-
+- mutex_lock(&pp_handle->pp_lock);
++ mutex_lock(&hwmgr->smu_lock);
+
+ ret = phm_get_clock_by_type_with_voltage(hwmgr, type, clocks);
+
+- mutex_unlock(&pp_handle->pp_lock);
++ mutex_unlock(&hwmgr->smu_lock);
+ return ret;
+ }
+
+ static int pp_set_watermarks_for_clocks_ranges(void *handle,
+ struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
+ {
+- struct pp_hwmgr *hwmgr;
+- struct pp_instance *pp_handle = (struct pp_instance *)handle;
++ struct pp_hwmgr *hwmgr = handle;
+ int ret = 0;
+
+- ret = pp_check(pp_handle);
++ ret = pp_check(hwmgr);
+ if (ret)
+ return ret;
+
+ if (!wm_with_clock_ranges)
+ return -EINVAL;
+
+- hwmgr = ((struct pp_instance *)handle)->hwmgr;
+-
+- mutex_lock(&pp_handle->pp_lock);
++ mutex_lock(&hwmgr->smu_lock);
+ ret = phm_set_watermarks_for_clocks_ranges(hwmgr,
+ wm_with_clock_ranges);
+- mutex_unlock(&pp_handle->pp_lock);
++ mutex_unlock(&hwmgr->smu_lock);
+
+ return ret;
+ }
+@@ -1403,22 +1291,19 @@ static int pp_set_watermarks_for_clocks_ranges(void *handle,
+ static int pp_display_clock_voltage_request(void *handle,
+ struct pp_display_clock_request *clock)
+ {
+- struct pp_hwmgr *hwmgr;
+- struct pp_instance *pp_handle = (struct pp_instance *)handle;
++ struct pp_hwmgr *hwmgr = handle;
+ int ret = 0;
+
+- ret = pp_check(pp_handle);
++ ret = pp_check(hwmgr);
+ if (ret)
+ return ret;
+
+ if (!clock)
+ return -EINVAL;
+
+- hwmgr = ((struct pp_instance *)handle)->hwmgr;
+-
+- mutex_lock(&pp_handle->pp_lock);
++ mutex_lock(&hwmgr->smu_lock);
+ ret = phm_display_clock_voltage_request(hwmgr, clock);
+- mutex_unlock(&pp_handle->pp_lock);
++ mutex_unlock(&hwmgr->smu_lock);
+
+ return ret;
+ }
+@@ -1426,43 +1311,37 @@ static int pp_display_clock_voltage_request(void *handle,
+ static int pp_get_display_mode_validation_clocks(void *handle,
+ struct amd_pp_simple_clock_info *clocks)
+ {
+- struct pp_hwmgr *hwmgr;
+- struct pp_instance *pp_handle = (struct pp_instance *)handle;
++ struct pp_hwmgr *hwmgr = handle;
+ int ret = 0;
+
+- ret = pp_check(pp_handle);
++ ret = pp_check(hwmgr);
+
+ if (ret)
+ return ret;
+
+- hwmgr = pp_handle->hwmgr;
+-
+ if (clocks == NULL)
+ return -EINVAL;
+
+- mutex_lock(&pp_handle->pp_lock);
++ mutex_lock(&hwmgr->smu_lock);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState))
+ ret = phm_get_max_high_clocks(hwmgr, clocks);
+
+- mutex_unlock(&pp_handle->pp_lock);
++ mutex_unlock(&hwmgr->smu_lock);
+
+ return ret;
+ }
+
+ static int pp_set_mmhub_powergating_by_smu(void *handle)
+ {
+- struct pp_hwmgr *hwmgr;
+- struct pp_instance *pp_handle = (struct pp_instance *)handle;
++ struct pp_hwmgr *hwmgr = handle;
+ int ret = 0;
+
+- ret = pp_check(pp_handle);
++ ret = pp_check(hwmgr);
+
+ if (ret)
+ return ret;
+
+- hwmgr = pp_handle->hwmgr;
+-
+ if (hwmgr->hwmgr_func->set_mmhub_powergating_by_smu == NULL) {
+ pr_info("%s was not implemented.\n", __func__);
+ return 0;
+@@ -1471,7 +1350,7 @@ static int pp_set_mmhub_powergating_by_smu(void *handle)
+ return hwmgr->hwmgr_func->set_mmhub_powergating_by_smu(hwmgr);
+ }
+
+-const struct amd_pm_funcs pp_dpm_funcs = {
++static const struct amd_pm_funcs pp_dpm_funcs = {
+ .load_firmware = pp_dpm_load_fw,
+ .wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
+ .force_performance_level = pp_dpm_force_performance_level,
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+index 5563b65..238dd59 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+@@ -116,23 +116,11 @@ static void hwmgr_init_workload_prority(struct pp_hwmgr *hwmgr)
+ hwmgr->workload_setting[4] = PP_SMC_POWER_PROFILE_COMPUTE;
+ }
+
+-int hwmgr_early_init(struct pp_instance *handle)
++int hwmgr_early_init(struct pp_hwmgr *hwmgr)
+ {
+- struct pp_hwmgr *hwmgr;
+-
+- if (handle == NULL)
++ if (hwmgr == NULL)
+ return -EINVAL;
+
+- hwmgr = kzalloc(sizeof(struct pp_hwmgr), GFP_KERNEL);
+- if (hwmgr == NULL)
+- return -ENOMEM;
+-
+- handle->hwmgr = hwmgr;
+- hwmgr->adev = handle->parent;
+- hwmgr->device = handle->device;
+- hwmgr->chip_family = ((struct amdgpu_device *)handle->parent)->family;
+- hwmgr->chip_id = ((struct amdgpu_device *)handle->parent)->asic_type;
+- hwmgr->feature_mask = amdgpu_pp_feature_mask;
+ hwmgr->usec_timeout = AMD_MAX_USEC_TIMEOUT;
+ hwmgr->power_source = PP_PowerSource_AC;
+ hwmgr->pp_table_version = PP_TABLE_V1;
+@@ -220,16 +208,13 @@ int hwmgr_early_init(struct pp_instance *handle)
+ return 0;
+ }
+
+-int hwmgr_hw_init(struct pp_instance *handle)
++int hwmgr_hw_init(struct pp_hwmgr *hwmgr)
+ {
+- struct pp_hwmgr *hwmgr;
+ int ret = 0;
+
+- if (handle == NULL)
++ if (hwmgr == NULL)
+ return -EINVAL;
+
+- hwmgr = handle->hwmgr;
+-
+ if (hwmgr->pptable_func == NULL ||
+ hwmgr->pptable_func->pptable_init == NULL ||
+ hwmgr->hwmgr_func->backend_init == NULL)
+@@ -275,15 +260,11 @@ int hwmgr_hw_init(struct pp_instance *handle)
+ return ret;
+ }
+
+-int hwmgr_hw_fini(struct pp_instance *handle)
++int hwmgr_hw_fini(struct pp_hwmgr *hwmgr)
+ {
+- struct pp_hwmgr *hwmgr;
+-
+- if (handle == NULL || handle->hwmgr == NULL)
++ if (hwmgr == NULL)
+ return -EINVAL;
+
+- hwmgr = handle->hwmgr;
+-
+ phm_stop_thermal_controller(hwmgr);
+ psm_set_boot_states(hwmgr);
+ psm_adjust_power_state_dynamic(hwmgr, false, NULL);
+@@ -297,15 +278,13 @@ int hwmgr_hw_fini(struct pp_instance *handle)
+ return psm_fini_power_state_table(hwmgr);
+ }
+
+-int hwmgr_hw_suspend(struct pp_instance *handle)
++int hwmgr_hw_suspend(struct pp_hwmgr *hwmgr)
+ {
+- struct pp_hwmgr *hwmgr;
+ int ret = 0;
+
+- if (handle == NULL || handle->hwmgr == NULL)
++ if (hwmgr == NULL)
+ return -EINVAL;
+
+- hwmgr = handle->hwmgr;
+ phm_disable_smc_firmware_ctf(hwmgr);
+ ret = psm_set_boot_states(hwmgr);
+ if (ret)
+@@ -318,15 +297,13 @@ int hwmgr_hw_suspend(struct pp_instance *handle)
+ return ret;
+ }
+
+-int hwmgr_hw_resume(struct pp_instance *handle)
++int hwmgr_hw_resume(struct pp_hwmgr *hwmgr)
+ {
+- struct pp_hwmgr *hwmgr;
+ int ret = 0;
+
+- if (handle == NULL || handle->hwmgr == NULL)
++ if (hwmgr == NULL)
+ return -EINVAL;
+
+- hwmgr = handle->hwmgr;
+ ret = phm_setup_asic(hwmgr);
+ if (ret)
+ return ret;
+@@ -361,17 +338,14 @@ static enum PP_StateUILabel power_state_convert(enum amd_pm_state_type state)
+ }
+ }
+
+-int hwmgr_handle_task(struct pp_instance *handle, enum amd_pp_task task_id,
++int hwmgr_handle_task(struct pp_hwmgr *hwmgr, enum amd_pp_task task_id,
+ enum amd_pm_state_type *user_state)
+ {
+ int ret = 0;
+- struct pp_hwmgr *hwmgr;
+
+- if (handle == NULL || handle->hwmgr == NULL)
++ if (hwmgr == NULL)
+ return -EINVAL;
+
+- hwmgr = handle->hwmgr;
+-
+ switch (task_id) {
+ case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
+ ret = phm_set_cpu_power_state(hwmgr);
+diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+index 2e2e4d0..85b46ad 100644
+--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
++++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+@@ -25,7 +25,6 @@
+
+ #include <linux/seq_file.h>
+ #include "amd_powerplay.h"
+-#include "pp_instance.h"
+ #include "hardwaremanager.h"
+ #include "pp_power_source.h"
+ #include "hwmgr_ppt.h"
+@@ -34,7 +33,6 @@
+ #include "power_state.h"
+ #include "smu_helper.h"
+
+-struct pp_instance;
+ struct pp_hwmgr;
+ struct phm_fan_speed_info;
+ struct pp_atomctrl_voltage_table;
+@@ -703,6 +701,8 @@ struct pp_hwmgr {
+ uint32_t chip_family;
+ uint32_t chip_id;
+ uint32_t smu_version;
++ bool pm_en;
++ struct mutex smu_lock;
+
+ uint32_t pp_table_version;
+ void *device;
+@@ -769,12 +769,12 @@ struct cgs_irq_src_funcs {
+ cgs_irq_handler_func_t handler;
+ };
+
+-extern int hwmgr_early_init(struct pp_instance *handle);
+-extern int hwmgr_hw_init(struct pp_instance *handle);
+-extern int hwmgr_hw_fini(struct pp_instance *handle);
+-extern int hwmgr_hw_suspend(struct pp_instance *handle);
+-extern int hwmgr_hw_resume(struct pp_instance *handle);
+-extern int hwmgr_handle_task(struct pp_instance *handle,
++extern int hwmgr_early_init(struct pp_hwmgr *hwmgr);
++extern int hwmgr_hw_init(struct pp_hwmgr *hwmgr);
++extern int hwmgr_hw_fini(struct pp_hwmgr *hwmgr);
++extern int hwmgr_hw_suspend(struct pp_hwmgr *hwmgr);
++extern int hwmgr_hw_resume(struct pp_hwmgr *hwmgr);
++extern int hwmgr_handle_task(struct pp_hwmgr *hwmgr,
+ enum amd_pp_task task_id,
+ enum amd_pm_state_type *user_state);
+
+diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_instance.h b/drivers/gpu/drm/amd/powerplay/inc/pp_instance.h
+deleted file mode 100644
+index 6c2fa33..0000000
+--- a/drivers/gpu/drm/amd/powerplay/inc/pp_instance.h
++++ /dev/null
+@@ -1,36 +0,0 @@
+-/*
+- * Copyright 2015 Advanced Micro Devices, Inc.
+- *
+- * Permission is hereby granted, free of charge, to any person obtaining a
+- * copy of this software and associated documentation files (the "Software"),
+- * to deal in the Software without restriction, including without limitation
+- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+- * and/or sell copies of the Software, and to permit persons to whom the
+- * Software is furnished to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice shall be included in
+- * all copies or substantial portions of the Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+- * OTHER DEALINGS IN THE SOFTWARE.
+- *
+- */
+-#ifndef _PP_INSTANCE_H_
+-#define _PP_INSTANCE_H_
+-
+-struct pp_hwmgr;
+-
+-struct pp_instance {
+- void *parent; /* e.g. amdgpu_device */
+- void *device; /* e.g. cgs_device */
+- bool pm_en;
+- struct pp_hwmgr *hwmgr;
+- struct mutex pp_lock;
+-};
+-
+-#endif
+--
+2.7.4
+