aboutsummaryrefslogtreecommitdiffstats
path: root/common/recipes-kernel/linux/linux-yocto-4.19.8/1743-drm-amdgpu-support-dpm-level-modification-under-virt.patch
diff options
context:
space:
mode:
Diffstat (limited to 'common/recipes-kernel/linux/linux-yocto-4.19.8/1743-drm-amdgpu-support-dpm-level-modification-under-virt.patch')
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.19.8/1743-drm-amdgpu-support-dpm-level-modification-under-virt.patch315
1 files changed, 315 insertions, 0 deletions
diff --git a/common/recipes-kernel/linux/linux-yocto-4.19.8/1743-drm-amdgpu-support-dpm-level-modification-under-virt.patch b/common/recipes-kernel/linux/linux-yocto-4.19.8/1743-drm-amdgpu-support-dpm-level-modification-under-virt.patch
new file mode 100644
index 00000000..9a505a6e
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.19.8/1743-drm-amdgpu-support-dpm-level-modification-under-virt.patch
@@ -0,0 +1,315 @@
+From 88901b2eb8f24eda08d5eb7630fe96b23a4f4c96 Mon Sep 17 00:00:00 2001
+From: Yintian Tao <yttao@amd.com>
+Date: Tue, 9 Apr 2019 20:33:20 +0800
+Subject: [PATCH 1743/2940] drm/amdgpu: support dpm level modification under
+ virtualization v3
+
+Under vega10 virtualuzation, smu ip block will not be added.
+Therefore, we need add pp clk query and force dpm level function
+at amdgpu_virt_ops to support the feature.
+
+v2: add get_pp_clk existence check and use kzalloc to allocate buf
+
+v3: return -ENOMEM for allocation failure and correct the coding style
+
+Change-Id: I713419c57b854082f6f739f1d32a055c7115e620
+Signed-off-by: Yintian Tao <yttao@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 1 +
+ drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 4 ++
+ drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 16 +++++
+ drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | 49 ++++++++++++++
+ drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h | 11 +++
+ drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c | 78 ++++++++++++++++++++++
+ drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h | 6 ++
+ 7 files changed, 165 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 20e2cc8d7a04..698663b1ef2e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -2520,6 +2520,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
+ mutex_init(&adev->virt.vf_errors.lock);
+ hash_init(adev->mn_hash);
+ mutex_init(&adev->lock_reset);
++ mutex_init(&adev->virt.dpm_mutex);
+
+ amdgpu_device_check_arguments(adev);
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+index aaa728ebd378..d4f822307bc2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+@@ -710,6 +710,10 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
+ if (adev->pm.dpm_enabled) {
+ dev_info.max_engine_clock = amdgpu_dpm_get_sclk(adev, false) * 10;
+ dev_info.max_memory_clock = amdgpu_dpm_get_mclk(adev, false) * 10;
++ } else if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) &&
++ adev->virt.ops->get_pp_clk) {
++ dev_info.max_engine_clock = amdgpu_virt_get_sclk(adev, false) * 10;
++ dev_info.max_memory_clock = amdgpu_virt_get_mclk(adev, false) * 10;
+ } else {
+ dev_info.max_engine_clock = adev->clock.default_sclk * 10;
+ dev_info.max_memory_clock = adev->clock.default_mclk * 10;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+index 88362019d1dd..4b7a076eea9c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+@@ -327,6 +327,18 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
+ goto fail;
+ }
+
++ if (amdgpu_sriov_vf(adev)) {
++ if (amdgim_is_hwperf(adev) &&
++ adev->virt.ops->force_dpm_level) {
++ mutex_lock(&adev->pm.mutex);
++ adev->virt.ops->force_dpm_level(adev, level);
++ mutex_unlock(&adev->pm.mutex);
++ return count;
++ } else {
++ return -EINVAL;
++ }
++ }
++
+ if (current_level == level)
+ return count;
+
+@@ -790,6 +802,10 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+
++ if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) &&
++ adev->virt.ops->get_pp_clk)
++ return adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf);
++
+ if (is_support_sw_smu(adev))
+ return smu_print_clk_levels(&adev->smu, PP_SCLK, buf);
+ else if (adev->powerplay.pp_funcs->print_clock_levels)
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+index 053513f5ae37..36bb4a0ae1bb 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+@@ -335,4 +335,53 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
+ }
+ }
+
++static uint32_t parse_clk(char *buf, bool min)
++{
++ char *ptr = buf;
++ uint32_t clk = 0;
++
++ do {
++ ptr = strchr(ptr, ':');
++ if (!ptr)
++ break;
++ ptr+=2;
++ clk = simple_strtoul(ptr, NULL, 10);
++ } while (!min);
++
++ return clk * 100;
++}
++
++uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool lowest)
++{
++ char *buf = NULL;
++ uint32_t clk = 0;
++
++ buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
++ if (!buf)
++ return -ENOMEM;
++
++ adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf);
++ clk = parse_clk(buf, lowest);
++
++ kfree(buf);
++
++ return clk;
++}
++
++uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool lowest)
++{
++ char *buf = NULL;
++ uint32_t clk = 0;
++
++ buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
++ if (!buf)
++ return -ENOMEM;
++
++ adev->virt.ops->get_pp_clk(adev, PP_MCLK, buf);
++ clk = parse_clk(buf, lowest);
++
++ kfree(buf);
++
++ return clk;
++}
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+index 9187dda3b537..d87c98b9263c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+@@ -57,6 +57,8 @@ struct amdgpu_virt_ops {
+ int (*reset_gpu)(struct amdgpu_device *adev);
+ int (*wait_reset)(struct amdgpu_device *adev);
+ void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1, u32 data2, u32 data3);
++ int (*get_pp_clk)(struct amdgpu_device *adev, u32 type, char *buf);
++ int (*force_dpm_level)(struct amdgpu_device *adev, u32 level);
+ };
+
+ /*
+@@ -83,6 +85,8 @@ enum AMDGIM_FEATURE_FLAG {
+ AMDGIM_FEATURE_GIM_LOAD_UCODES = 0x2,
+ /* VRAM LOST by GIM */
+ AMDGIM_FEATURE_GIM_FLR_VRAMLOST = 0x4,
++ /* HW PERF SIM in GIM */
++ AMDGIM_FEATURE_HW_PERF_SIMULATION = (1 << 3),
+ };
+
+ struct amd_sriov_msg_pf2vf_info_header {
+@@ -252,6 +256,8 @@ struct amdgpu_virt {
+ struct amdgpu_vf_error_buffer vf_errors;
+ struct amdgpu_virt_fw_reserve fw_reserve;
+ uint32_t gim_feature;
++ /* protect DPM events to GIM */
++ struct mutex dpm_mutex;
+ };
+
+ #define amdgpu_sriov_enabled(adev) \
+@@ -278,6 +284,9 @@ static inline bool is_virtual_machine(void)
+ #endif
+ }
+
++#define amdgim_is_hwperf(adev) \
++ ((adev)->virt.gim_feature & AMDGIM_FEATURE_HW_PERF_SIMULATION)
++
+ bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);
+ void amdgpu_virt_init_setting(struct amdgpu_device *adev);
+ uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg);
+@@ -292,5 +301,7 @@ int amdgpu_virt_fw_reserve_get_checksum(void *obj, unsigned long obj_size,
+ unsigned int key,
+ unsigned int chksum);
+ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev);
++uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool lowest);
++uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool lowest);
+
+ #endif
+diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+index 73851ebb3833..8dbad496b29f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
++++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+@@ -157,6 +157,82 @@ static void xgpu_ai_mailbox_trans_msg (struct amdgpu_device *adev,
+ xgpu_ai_mailbox_set_valid(adev, false);
+ }
+
++static int xgpu_ai_get_pp_clk(struct amdgpu_device *adev, u32 type, char *buf)
++{
++ int r = 0;
++ u32 req, val, size;
++
++ if (!amdgim_is_hwperf(adev) || buf == NULL)
++ return -EBADRQC;
++
++ switch(type) {
++ case PP_SCLK:
++ req = IDH_IRQ_GET_PP_SCLK;
++ break;
++ case PP_MCLK:
++ req = IDH_IRQ_GET_PP_MCLK;
++ break;
++ default:
++ return -EBADRQC;
++ }
++
++ mutex_lock(&adev->virt.dpm_mutex);
++
++ xgpu_ai_mailbox_trans_msg(adev, req, 0, 0, 0);
++
++ r = xgpu_ai_poll_msg(adev, IDH_SUCCESS);
++ if (!r && adev->fw_vram_usage.va != NULL) {
++ val = RREG32_NO_KIQ(
++ SOC15_REG_OFFSET(NBIO, 0,
++ mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW1));
++ size = strnlen((((char *)adev->virt.fw_reserve.p_pf2vf) +
++ val), PAGE_SIZE);
++
++ if (size < PAGE_SIZE)
++ strcpy(buf,((char *)adev->virt.fw_reserve.p_pf2vf + val));
++ else
++ size = 0;
++
++ r = size;
++ goto out;
++ }
++
++ r = xgpu_ai_poll_msg(adev, IDH_FAIL);
++ if(r)
++ pr_info("%s DPM request failed",
++ (type == PP_SCLK)? "SCLK" : "MCLK");
++
++out:
++ mutex_unlock(&adev->virt.dpm_mutex);
++ return r;
++}
++
++static int xgpu_ai_force_dpm_level(struct amdgpu_device *adev, u32 level)
++{
++ int r = 0;
++ u32 req = IDH_IRQ_FORCE_DPM_LEVEL;
++
++ if (!amdgim_is_hwperf(adev))
++ return -EBADRQC;
++
++ mutex_lock(&adev->virt.dpm_mutex);
++ xgpu_ai_mailbox_trans_msg(adev, req, level, 0, 0);
++
++ r = xgpu_ai_poll_msg(adev, IDH_SUCCESS);
++ if (!r)
++ goto out;
++
++ r = xgpu_ai_poll_msg(adev, IDH_FAIL);
++ if (!r)
++ pr_info("DPM request failed");
++ else
++ pr_info("Mailbox is broken");
++
++out:
++ mutex_unlock(&adev->virt.dpm_mutex);
++ return r;
++}
++
+ static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
+ enum idh_request req)
+ {
+@@ -375,4 +451,6 @@ const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
+ .reset_gpu = xgpu_ai_request_reset,
+ .wait_reset = NULL,
+ .trans_msg = xgpu_ai_mailbox_trans_msg,
++ .get_pp_clk = xgpu_ai_get_pp_clk,
++ .force_dpm_level = xgpu_ai_force_dpm_level,
+ };
+diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
+index b4a9ceea334b..39d151b79153 100644
+--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
++++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
+@@ -35,6 +35,10 @@ enum idh_request {
+ IDH_REL_GPU_FINI_ACCESS,
+ IDH_REQ_GPU_RESET_ACCESS,
+
++ IDH_IRQ_FORCE_DPM_LEVEL = 10,
++ IDH_IRQ_GET_PP_SCLK,
++ IDH_IRQ_GET_PP_MCLK,
++
+ IDH_LOG_VF_ERROR = 200,
+ };
+
+@@ -43,6 +47,8 @@ enum idh_event {
+ IDH_READY_TO_ACCESS_GPU,
+ IDH_FLR_NOTIFICATION,
+ IDH_FLR_NOTIFICATION_CMPL,
++ IDH_SUCCESS,
++ IDH_FAIL,
+ IDH_EVENT_MAX
+ };
+
+--
+2.17.1
+