aboutsummaryrefslogtreecommitdiffstats
path: root/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/4221-drm-amdgpu-revert-calling-smu-msg-in-df-callbacks.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/4221-drm-amdgpu-revert-calling-smu-msg-in-df-callbacks.patch')
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/4221-drm-amdgpu-revert-calling-smu-msg-in-df-callbacks.patch133
1 files changed, 133 insertions, 0 deletions
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/4221-drm-amdgpu-revert-calling-smu-msg-in-df-callbacks.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/4221-drm-amdgpu-revert-calling-smu-msg-in-df-callbacks.patch
new file mode 100644
index 00000000..b8ea18d8
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/4221-drm-amdgpu-revert-calling-smu-msg-in-df-callbacks.patch
@@ -0,0 +1,133 @@
+From ded09343b5c1253376dca4acb97fa56e9eab2749 Mon Sep 17 00:00:00 2001
+From: Jonathan Kim <jonathan.kim@amd.com>
+Date: Fri, 18 Oct 2019 15:26:05 -0400
+Subject: [PATCH 4221/4736] drm/amdgpu: revert calling smu msg in df callbacks
+
+reverting the following changes:
+commit 7dd2eb31fcd5 ("drm/amdgpu: fix compiler warnings for df perfmons")
+commit 54275cd1649f ("drm/amdgpu: disable c-states on xgmi perfmons")
+
+perf events use spin-locks. embedded smu messages have potential long
+response times and potentially deadlocks the system.
+
+Change-Id: Ic36c35a62dec116d0a2f5b69c22af4d414458679
+Signed-off-by: Jonathan Kim <jonathan.kim@amd.com>
+Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/df_v3_6.c | 38 ++--------------------------
+ 1 file changed, 2 insertions(+), 36 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
+index 766615f8c0ba..72bfefdbfa65 100644
+--- a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
++++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
+@@ -93,21 +93,6 @@ const struct attribute_group *df_v3_6_attr_groups[] = {
+ NULL
+ };
+
+-static int df_v3_6_set_df_cstate(struct amdgpu_device *adev, int allow)
+-{
+- int r = 0;
+-
+- if (is_support_sw_smu(adev)) {
+- r = smu_set_df_cstate(&adev->smu, allow);
+- } else if (adev->powerplay.pp_funcs
+- && adev->powerplay.pp_funcs->set_df_cstate) {
+- r = adev->powerplay.pp_funcs->set_df_cstate(
+- adev->powerplay.pp_handle, allow);
+- }
+-
+- return r;
+-}
+-
+ static uint64_t df_v3_6_get_fica(struct amdgpu_device *adev,
+ uint32_t ficaa_val)
+ {
+@@ -117,9 +102,6 @@ static uint64_t df_v3_6_get_fica(struct amdgpu_device *adev,
+ address = adev->nbio.funcs->get_pcie_index_offset(adev);
+ data = adev->nbio.funcs->get_pcie_data_offset(adev);
+
+- if (df_v3_6_set_df_cstate(adev, DF_CSTATE_DISALLOW))
+- return 0xFFFFFFFFFFFFFFFF;
+-
+ spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+ WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessAddress3);
+ WREG32(data, ficaa_val);
+@@ -132,8 +114,6 @@ static uint64_t df_v3_6_get_fica(struct amdgpu_device *adev,
+
+ spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+
+- df_v3_6_set_df_cstate(adev, DF_CSTATE_ALLOW);
+-
+ return (((ficadh_val & 0xFFFFFFFFFFFFFFFF) << 32) | ficadl_val);
+ }
+
+@@ -145,9 +125,6 @@ static void df_v3_6_set_fica(struct amdgpu_device *adev, uint32_t ficaa_val,
+ address = adev->nbio.funcs->get_pcie_index_offset(adev);
+ data = adev->nbio.funcs->get_pcie_data_offset(adev);
+
+- if (df_v3_6_set_df_cstate(adev, DF_CSTATE_DISALLOW))
+- return;
+-
+ spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+ WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessAddress3);
+ WREG32(data, ficaa_val);
+@@ -157,9 +134,8 @@ static void df_v3_6_set_fica(struct amdgpu_device *adev, uint32_t ficaa_val,
+
+ WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessDataHi3);
+ WREG32(data, ficadh_val);
+- spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+
+- df_v3_6_set_df_cstate(adev, DF_CSTATE_ALLOW);
++ spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+ }
+
+ /*
+@@ -177,17 +153,12 @@ static void df_v3_6_perfmon_rreg(struct amdgpu_device *adev,
+ address = adev->nbio.funcs->get_pcie_index_offset(adev);
+ data = adev->nbio.funcs->get_pcie_data_offset(adev);
+
+- if (df_v3_6_set_df_cstate(adev, DF_CSTATE_DISALLOW))
+- return;
+-
+ spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+ WREG32(address, lo_addr);
+ *lo_val = RREG32(data);
+ WREG32(address, hi_addr);
+ *hi_val = RREG32(data);
+ spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+-
+- df_v3_6_set_df_cstate(adev, DF_CSTATE_ALLOW);
+ }
+
+ /*
+@@ -204,17 +175,12 @@ static void df_v3_6_perfmon_wreg(struct amdgpu_device *adev, uint32_t lo_addr,
+ address = adev->nbio.funcs->get_pcie_index_offset(adev);
+ data = adev->nbio.funcs->get_pcie_data_offset(adev);
+
+- if (df_v3_6_set_df_cstate(adev, DF_CSTATE_DISALLOW))
+- return;
+-
+ spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+ WREG32(address, lo_addr);
+ WREG32(data, lo_val);
+ WREG32(address, hi_addr);
+ WREG32(data, hi_val);
+ spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+-
+- df_v3_6_set_df_cstate(adev, DF_CSTATE_ALLOW);
+ }
+
+ /* get the number of df counters available */
+@@ -547,7 +513,7 @@ static void df_v3_6_pmc_get_count(struct amdgpu_device *adev,
+ uint64_t config,
+ uint64_t *count)
+ {
+- uint32_t lo_base_addr, hi_base_addr, lo_val = 0, hi_val = 0;
++ uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val;
+ *count = 0;
+
+ switch (adev->asic_type) {
+--
+2.17.1
+