aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/events/perf_event.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/events/perf_event.h')
-rw-r--r--arch/x86/events/perf_event.h80
1 files changed, 77 insertions, 3 deletions
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index 834e53e538d4..b958801b9919 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -78,6 +78,32 @@ static inline bool constraint_match(struct event_constraint *c, u64 ecode)
#define PERF_X86_EVENT_LARGE_PEBS 0x0400 /* use large PEBS */
#define PERF_X86_EVENT_PEBS_VIA_PT 0x0800 /* use PT buffer for PEBS */
#define PERF_X86_EVENT_PAIR 0x1000 /* Large Increment per Cycle */
+#define PERF_X86_EVENT_LBR_SELECT 0x2000 /* Save/Restore MSR_LBR_SELECT */
+#define PERF_X86_EVENT_TOPDOWN 0x4000 /* Count Topdown slots/metrics events */
+
+static inline bool is_topdown_count(struct perf_event *event)
+{
+ return event->hw.flags & PERF_X86_EVENT_TOPDOWN;
+}
+
+static inline bool is_metric_event(struct perf_event *event)
+{
+ u64 config = event->attr.config;
+
+ return ((config & ARCH_PERFMON_EVENTSEL_EVENT) == 0) &&
+ ((config & INTEL_ARCH_EVENT_MASK) >= INTEL_TD_METRIC_RETIRING) &&
+ ((config & INTEL_ARCH_EVENT_MASK) <= INTEL_TD_METRIC_MAX);
+}
+
+static inline bool is_slots_event(struct perf_event *event)
+{
+ return (event->attr.config & INTEL_ARCH_EVENT_MASK) == INTEL_TD_SLOTS;
+}
+
+static inline bool is_topdown_event(struct perf_event *event)
+{
+ return is_metric_event(event) || is_slots_event(event);
+}
struct amd_nb {
int nb_id; /* NorthBridge id */
@@ -106,7 +132,7 @@ struct amd_nb {
PERF_SAMPLE_DATA_SRC | PERF_SAMPLE_IDENTIFIER | \
PERF_SAMPLE_TRANSACTION | PERF_SAMPLE_PHYS_ADDR | \
PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER | \
- PERF_SAMPLE_PERIOD)
+ PERF_SAMPLE_PERIOD | PERF_SAMPLE_CODE_PAGE_SIZE)
#define PEBS_GP_REGS \
((1ULL << PERF_REG_X86_AX) | \
@@ -198,6 +224,8 @@ struct cpu_hw_events {
they've never been enabled yet */
int n_txn; /* the # last events in the below arrays;
added in the current transaction */
+ int n_txn_pair;
+ int n_txn_metric;
int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
u64 tags[X86_PMC_IDX_MAX];
@@ -237,6 +265,7 @@ struct cpu_hw_events {
u64 br_sel;
struct x86_perf_task_context *last_task_ctx;
int last_log_id;
+ int lbr_select;
/*
* Intel host/guest exclude bits
@@ -268,11 +297,18 @@ struct cpu_hw_events {
u64 tfa_shadow;
/*
+ * Perf Metrics
+ */
+ /* number of accepted metrics events */
+ int n_metric;
+
+ /*
* AMD specific bits
*/
struct amd_nb *amd_nb;
/* Inverted mask of bits to clear in the perf_ctr ctrl registers */
u64 perf_ctr_virt_mask;
+ int n_pair; /* Large increment events */
void *kfree_on_online[X86_PERF_KFREE_MAX];
};
@@ -358,6 +394,19 @@ struct cpu_hw_events {
EVENT_CONSTRAINT(c, (1ULL << (32+n)), FIXED_EVENT_FLAGS)
/*
+ * The special metric counters do not actually exist. They are calculated from
+ * the combination of the FxCtr3 + MSR_PERF_METRICS.
+ *
+ * The special metric counters are mapped to a dummy offset for the scheduler.
+ * The sharing between multiple users of the same metric without multiplexing
+ * is not allowed, even though the hardware supports that in principle.
+ */
+
+#define METRIC_EVENT_CONSTRAINT(c, n) \
+ EVENT_CONSTRAINT(c, (1ULL << (INTEL_PMC_IDX_METRIC_BASE + n)), \
+ INTEL_ARCH_EVENT_MASK)
+
+/*
* Constraint on the Event code + UMask
*/
#define INTEL_UEVENT_CONSTRAINT(c, n) \
@@ -519,7 +568,7 @@ union perf_capabilities {
*/
u64 full_width_write:1;
u64 pebs_baseline:1;
- u64 pebs_metrics_available:1;
+ u64 perf_metrics:1;
u64 pebs_output_pt_available:1;
};
u64 capabilities;
@@ -684,9 +733,16 @@ struct x86_pmu {
atomic_t lbr_exclusive[x86_lbr_exclusive_max];
/*
+ * Intel perf metrics
+ */
+ u64 (*update_topdown_event)(struct perf_event *event);
+ int (*set_topdown_event_period)(struct perf_event *event);
+
+ /*
* AMD bits
*/
unsigned int amd_nb_constraints : 1;
+ u64 perf_ctr_pair_en;
/*
* Extra registers for events
@@ -711,6 +767,7 @@ struct x86_perf_task_context {
u64 lbr_from[MAX_LBR_ENTRIES];
u64 lbr_to[MAX_LBR_ENTRIES];
u64 lbr_info[MAX_LBR_ENTRIES];
+ u64 lbr_sel;
int tos;
int valid_lbrs;
int lbr_callstack_users;
@@ -832,6 +889,11 @@ int x86_pmu_hw_config(struct perf_event *event);
void x86_pmu_disable_all(void);
+static inline bool is_counter_pair(struct hw_perf_event *hwc)
+{
+ return hwc->flags & PERF_X86_EVENT_PAIR;
+}
+
static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
u64 enable_mask)
{
@@ -839,6 +901,14 @@ static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
if (hwc->extra_reg.reg)
wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config);
+
+ /*
+ * Add enabled Merge event on next counter
+ * if large increment event being enabled on this counter
+ */
+ if (is_counter_pair(hwc))
+ wrmsrl(x86_pmu_config_addr(hwc->idx + 1), x86_pmu.perf_ctr_pair_en);
+
wrmsrl(hwc->config_base, (hwc->config | enable_mask) & ~disable_mask);
}
@@ -855,7 +925,10 @@ static inline void x86_pmu_disable_event(struct perf_event *event)
u64 disable_mask = __this_cpu_read(cpu_hw_events.perf_ctr_virt_mask);
struct hw_perf_event *hwc = &event->hw;
- wrmsrl(hwc->config_base, hwc->config & ~disable_mask);
+ wrmsrl(hwc->config_base, hwc->config);
+
+ if (is_counter_pair(hwc))
+ wrmsrl(x86_pmu_config_addr(hwc->idx + 1), 0);
}
void x86_pmu_enable_event(struct perf_event *event);
@@ -964,6 +1037,7 @@ void release_ds_buffers(void);
void reserve_ds_buffers(void);
extern struct event_constraint bts_constraint;
+extern struct event_constraint vlbr_constraint;
void intel_pmu_enable_bts(u64 config);