aboutsummaryrefslogtreecommitdiffstats
path: root/meta-amd-bsp/recipes-kernel/linux/linux-yocto-5.15/0063-perf-x86-amd-Add-AMD-branch-sampling-period-adjustme.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta-amd-bsp/recipes-kernel/linux/linux-yocto-5.15/0063-perf-x86-amd-Add-AMD-branch-sampling-period-adjustme.patch')
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-5.15/0063-perf-x86-amd-Add-AMD-branch-sampling-period-adjustme.patch79
1 files changed, 79 insertions, 0 deletions
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-5.15/0063-perf-x86-amd-Add-AMD-branch-sampling-period-adjustme.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-5.15/0063-perf-x86-amd-Add-AMD-branch-sampling-period-adjustme.patch
new file mode 100644
index 00000000..aa553a1c
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-5.15/0063-perf-x86-amd-Add-AMD-branch-sampling-period-adjustme.patch
@@ -0,0 +1,79 @@
+From c9c86880f212bdb168b231fb770e2f4d05a84cc9 Mon Sep 17 00:00:00 2001
+From: Stephane Eranian <eranian@google.com>
+Date: Tue, 22 Mar 2022 15:15:10 -0700
+Subject: [PATCH 63/86] perf/x86/amd: Add AMD branch sampling period adjustment
+
+commit ba2fe7500845a30fc845a72081999cf632051862 upstream
+
+Add code to adjust the sampling event period when used with the Branch
+Sampling feature (BRS). Given the depth of the BRS (16), the period is
+reduced by that depth such that in the best case scenario, BRS saturates at
+the desired sampling period. In practice, though, the processor may execute
+more branches. Given a desired period P and a depth D, the kernel programs
+the actual period at P - D. After P occurrences of the sampling event, the
+counter overflows. It then may take X branches (skid) before the NMI is
+caught and held by the hardware and BRS activates. Then, after D branches,
+BRS saturates and the NMI is delivered. With no skid, the effective period
+would be (P - D) + D = P. In practice, however, it will likely be (P - D) +
+X + D. There is no way to eliminate X or predict X.
+
+Signed-off-by: Stephane Eranian <eranian@google.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lore.kernel.org/r/20220322221517.2510440-7-eranian@google.com
+Signed-off-by: Zhaolong Zhang <zhaolong.zhang@windriver.com>
+---
+ arch/x86/events/core.c | 7 +++++++
+ arch/x86/events/perf_event.h | 12 ++++++++++++
+ 2 files changed, 19 insertions(+)
+
+diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
+index ddbfbf304b2d..91ed45c98353 100644
+--- a/arch/x86/events/core.c
++++ b/arch/x86/events/core.c
+@@ -1370,6 +1370,13 @@ int x86_perf_event_set_period(struct perf_event *event)
+ x86_pmu.set_topdown_event_period)
+ return x86_pmu.set_topdown_event_period(event);
+
++ /*
++ * decrease period by the depth of the BRS feature to get
++ * the last N taken branches and approximate the desired period
++ */
++ if (has_branch_stack(event))
++ period = amd_brs_adjust_period(period);
++
+ /*
+ * If we are way outside a reasonable range then just skip forward:
+ */
+diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
+index ec7eb3fedbce..19a254cf8c7d 100644
+--- a/arch/x86/events/perf_event.h
++++ b/arch/x86/events/perf_event.h
+@@ -1261,6 +1261,14 @@ static inline bool amd_brs_active(void)
+ return cpuc->brs_active;
+ }
+
++static inline s64 amd_brs_adjust_period(s64 period)
++{
++ if (period > x86_pmu.lbr_nr)
++ return period - x86_pmu.lbr_nr;
++
++ return period;
++}
++
+ #else /* CONFIG_CPU_SUP_AMD */
+
+ static inline int amd_pmu_init(void)
+@@ -1285,6 +1293,10 @@ static inline void amd_brs_disable_all(void)
+ {
+ }
+
++static inline s64 amd_brs_adjust_period(s64 period)
++{
++ return period;
++}
+ #endif /* CONFIG_CPU_SUP_AMD */
+
+ static inline int is_pebs_pt(struct perf_event *event)
+--
+2.37.3
+