1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/perf_event.h>
3 #include <linux/types.h>
4
5 #include <asm/perf_event.h>
6 #include <asm/msr.h>
7 #include <asm/insn.h>
8
9 #include "../perf_event.h"
10
11 static const enum {
12 LBR_EIP_FLAGS = 1,
13 LBR_TSX = 2,
14 } lbr_desc[LBR_FORMAT_MAX_KNOWN + 1] = {
15 [LBR_FORMAT_EIP_FLAGS] = LBR_EIP_FLAGS,
16 [LBR_FORMAT_EIP_FLAGS2] = LBR_EIP_FLAGS | LBR_TSX,
17 };
18
19 /*
20 * Intel LBR_SELECT bits
21 * Intel Vol3a, April 2011, Section 16.7 Table 16-10
22 *
23 * Hardware branch filter (not available on all CPUs)
24 */
25 #define LBR_KERNEL_BIT 0 /* do not capture at ring0 */
26 #define LBR_USER_BIT 1 /* do not capture at ring > 0 */
27 #define LBR_JCC_BIT 2 /* do not capture conditional branches */
28 #define LBR_REL_CALL_BIT 3 /* do not capture relative calls */
29 #define LBR_IND_CALL_BIT 4 /* do not capture indirect calls */
30 #define LBR_RETURN_BIT 5 /* do not capture near returns */
31 #define LBR_IND_JMP_BIT 6 /* do not capture indirect jumps */
32 #define LBR_REL_JMP_BIT 7 /* do not capture relative jumps */
33 #define LBR_FAR_BIT 8 /* do not capture far branches */
34 #define LBR_CALL_STACK_BIT 9 /* enable call stack */
35
36 /*
37 * Following bit only exists in Linux; we mask it out before writing it to
38 * the actual MSR. But it helps the constraint perf code to understand
39 * that this is a separate configuration.
40 */
41 #define LBR_NO_INFO_BIT 63 /* don't read LBR_INFO. */
42
43 #define LBR_KERNEL (1 << LBR_KERNEL_BIT)
44 #define LBR_USER (1 << LBR_USER_BIT)
45 #define LBR_JCC (1 << LBR_JCC_BIT)
46 #define LBR_REL_CALL (1 << LBR_REL_CALL_BIT)
47 #define LBR_IND_CALL (1 << LBR_IND_CALL_BIT)
48 #define LBR_RETURN (1 << LBR_RETURN_BIT)
49 #define LBR_REL_JMP (1 << LBR_REL_JMP_BIT)
50 #define LBR_IND_JMP (1 << LBR_IND_JMP_BIT)
51 #define LBR_FAR (1 << LBR_FAR_BIT)
52 #define LBR_CALL_STACK (1 << LBR_CALL_STACK_BIT)
53 #define LBR_NO_INFO (1ULL << LBR_NO_INFO_BIT)
54
55 #define LBR_PLM (LBR_KERNEL | LBR_USER)
56
57 #define LBR_SEL_MASK 0x3ff /* valid bits in LBR_SELECT */
58 #define LBR_NOT_SUPP -1 /* LBR filter not supported */
59 #define LBR_IGN 0 /* ignored */
60
61 #define LBR_ANY \
62 (LBR_JCC |\
63 LBR_REL_CALL |\
64 LBR_IND_CALL |\
65 LBR_RETURN |\
66 LBR_REL_JMP |\
67 LBR_IND_JMP |\
68 LBR_FAR)
69
70 #define LBR_FROM_FLAG_MISPRED BIT_ULL(63)
71 #define LBR_FROM_FLAG_IN_TX BIT_ULL(62)
72 #define LBR_FROM_FLAG_ABORT BIT_ULL(61)
73
74 #define LBR_FROM_SIGNEXT_2MSB (BIT_ULL(60) | BIT_ULL(59))
75
76 /*
77 * x86control flow change classification
78 * x86control flow changes include branches, interrupts, traps, faults
79 */
80 enum {
81 X86_BR_NONE = 0, /* unknown */
82
83 X86_BR_USER = 1 << 0, /* branch target is user */
84 X86_BR_KERNEL = 1 << 1, /* branch target is kernel */
85
86 X86_BR_CALL = 1 << 2, /* call */
87 X86_BR_RET = 1 << 3, /* return */
88 X86_BR_SYSCALL = 1 << 4, /* syscall */
89 X86_BR_SYSRET = 1 << 5, /* syscall return */
90 X86_BR_INT = 1 << 6, /* sw interrupt */
91 X86_BR_IRET = 1 << 7, /* return from interrupt */
92 X86_BR_JCC = 1 << 8, /* conditional */
93 X86_BR_JMP = 1 << 9, /* jump */
94 X86_BR_IRQ = 1 << 10,/* hw interrupt or trap or fault */
95 X86_BR_IND_CALL = 1 << 11,/* indirect calls */
96 X86_BR_ABORT = 1 << 12,/* transaction abort */
97 X86_BR_IN_TX = 1 << 13,/* in transaction */
98 X86_BR_NO_TX = 1 << 14,/* not in transaction */
99 X86_BR_ZERO_CALL = 1 << 15,/* zero length call */
100 X86_BR_CALL_STACK = 1 << 16,/* call stack */
101 X86_BR_IND_JMP = 1 << 17,/* indirect jump */
102
103 X86_BR_TYPE_SAVE = 1 << 18,/* indicate to save branch type */
104
105 };
106
107 #define X86_BR_PLM (X86_BR_USER | X86_BR_KERNEL)
108 #define X86_BR_ANYTX (X86_BR_NO_TX | X86_BR_IN_TX)
109
110 #define X86_BR_ANY \
111 (X86_BR_CALL |\
112 X86_BR_RET |\
113 X86_BR_SYSCALL |\
114 X86_BR_SYSRET |\
115 X86_BR_INT |\
116 X86_BR_IRET |\
117 X86_BR_JCC |\
118 X86_BR_JMP |\
119 X86_BR_IRQ |\
120 X86_BR_ABORT |\
121 X86_BR_IND_CALL |\
122 X86_BR_IND_JMP |\
123 X86_BR_ZERO_CALL)
124
125 #define X86_BR_ALL (X86_BR_PLM | X86_BR_ANY)
126
127 #define X86_BR_ANY_CALL \
128 (X86_BR_CALL |\
129 X86_BR_IND_CALL |\
130 X86_BR_ZERO_CALL |\
131 X86_BR_SYSCALL |\
132 X86_BR_IRQ |\
133 X86_BR_INT)
134
135 /*
136 * Intel LBR_CTL bits
137 *
138 * Hardware branch filter for Arch LBR
139 */
140 #define ARCH_LBR_KERNEL_BIT 1 /* capture at ring0 */
141 #define ARCH_LBR_USER_BIT 2 /* capture at ring > 0 */
142 #define ARCH_LBR_CALL_STACK_BIT 3 /* enable call stack */
143 #define ARCH_LBR_JCC_BIT 16 /* capture conditional branches */
144 #define ARCH_LBR_REL_JMP_BIT 17 /* capture relative jumps */
145 #define ARCH_LBR_IND_JMP_BIT 18 /* capture indirect jumps */
146 #define ARCH_LBR_REL_CALL_BIT 19 /* capture relative calls */
147 #define ARCH_LBR_IND_CALL_BIT 20 /* capture indirect calls */
148 #define ARCH_LBR_RETURN_BIT 21 /* capture near returns */
149 #define ARCH_LBR_OTHER_BRANCH_BIT 22 /* capture other branches */
150
151 #define ARCH_LBR_KERNEL (1ULL << ARCH_LBR_KERNEL_BIT)
152 #define ARCH_LBR_USER (1ULL << ARCH_LBR_USER_BIT)
153 #define ARCH_LBR_CALL_STACK (1ULL << ARCH_LBR_CALL_STACK_BIT)
154 #define ARCH_LBR_JCC (1ULL << ARCH_LBR_JCC_BIT)
155 #define ARCH_LBR_REL_JMP (1ULL << ARCH_LBR_REL_JMP_BIT)
156 #define ARCH_LBR_IND_JMP (1ULL << ARCH_LBR_IND_JMP_BIT)
157 #define ARCH_LBR_REL_CALL (1ULL << ARCH_LBR_REL_CALL_BIT)
158 #define ARCH_LBR_IND_CALL (1ULL << ARCH_LBR_IND_CALL_BIT)
159 #define ARCH_LBR_RETURN (1ULL << ARCH_LBR_RETURN_BIT)
160 #define ARCH_LBR_OTHER_BRANCH (1ULL << ARCH_LBR_OTHER_BRANCH_BIT)
161
162 #define ARCH_LBR_ANY \
163 (ARCH_LBR_JCC |\
164 ARCH_LBR_REL_JMP |\
165 ARCH_LBR_IND_JMP |\
166 ARCH_LBR_REL_CALL |\
167 ARCH_LBR_IND_CALL |\
168 ARCH_LBR_RETURN |\
169 ARCH_LBR_OTHER_BRANCH)
170
171 #define ARCH_LBR_CTL_MASK 0x7f000e
172
173 static void intel_pmu_lbr_filter(struct cpu_hw_events *cpuc);
174
is_lbr_call_stack_bit_set(u64 config)175 static __always_inline bool is_lbr_call_stack_bit_set(u64 config)
176 {
177 if (static_cpu_has(X86_FEATURE_ARCH_LBR))
178 return !!(config & ARCH_LBR_CALL_STACK);
179
180 return !!(config & LBR_CALL_STACK);
181 }
182
183 /*
184 * We only support LBR implementations that have FREEZE_LBRS_ON_PMI
185 * otherwise it becomes near impossible to get a reliable stack.
186 */
187
__intel_pmu_lbr_enable(bool pmi)188 static void __intel_pmu_lbr_enable(bool pmi)
189 {
190 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
191 u64 debugctl, lbr_select = 0, orig_debugctl;
192
193 /*
194 * No need to unfreeze manually, as v4 can do that as part
195 * of the GLOBAL_STATUS ack.
196 */
197 if (pmi && x86_pmu.version >= 4)
198 return;
199
200 /*
201 * No need to reprogram LBR_SELECT in a PMI, as it
202 * did not change.
203 */
204 if (cpuc->lbr_sel)
205 lbr_select = cpuc->lbr_sel->config & x86_pmu.lbr_sel_mask;
206 if (!static_cpu_has(X86_FEATURE_ARCH_LBR) && !pmi && cpuc->lbr_sel)
207 wrmsrl(MSR_LBR_SELECT, lbr_select);
208
209 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
210 orig_debugctl = debugctl;
211
212 if (!static_cpu_has(X86_FEATURE_ARCH_LBR))
213 debugctl |= DEBUGCTLMSR_LBR;
214 /*
215 * LBR callstack does not work well with FREEZE_LBRS_ON_PMI.
216 * If FREEZE_LBRS_ON_PMI is set, PMI near call/return instructions
217 * may cause superfluous increase/decrease of LBR_TOS.
218 */
219 if (is_lbr_call_stack_bit_set(lbr_select))
220 debugctl &= ~DEBUGCTLMSR_FREEZE_LBRS_ON_PMI;
221 else
222 debugctl |= DEBUGCTLMSR_FREEZE_LBRS_ON_PMI;
223
224 if (orig_debugctl != debugctl)
225 wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
226
227 if (static_cpu_has(X86_FEATURE_ARCH_LBR))
228 wrmsrl(MSR_ARCH_LBR_CTL, lbr_select | ARCH_LBR_CTL_LBREN);
229 }
230
intel_pmu_lbr_reset_32(void)231 void intel_pmu_lbr_reset_32(void)
232 {
233 int i;
234
235 for (i = 0; i < x86_pmu.lbr_nr; i++)
236 wrmsrl(x86_pmu.lbr_from + i, 0);
237 }
238
intel_pmu_lbr_reset_64(void)239 void intel_pmu_lbr_reset_64(void)
240 {
241 int i;
242
243 for (i = 0; i < x86_pmu.lbr_nr; i++) {
244 wrmsrl(x86_pmu.lbr_from + i, 0);
245 wrmsrl(x86_pmu.lbr_to + i, 0);
246 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
247 wrmsrl(x86_pmu.lbr_info + i, 0);
248 }
249 }
250
intel_pmu_arch_lbr_reset(void)251 static void intel_pmu_arch_lbr_reset(void)
252 {
253 /* Write to ARCH_LBR_DEPTH MSR, all LBR entries are reset to 0 */
254 wrmsrl(MSR_ARCH_LBR_DEPTH, x86_pmu.lbr_nr);
255 }
256
intel_pmu_lbr_reset(void)257 void intel_pmu_lbr_reset(void)
258 {
259 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
260
261 if (!x86_pmu.lbr_nr)
262 return;
263
264 x86_pmu.lbr_reset();
265
266 cpuc->last_task_ctx = NULL;
267 cpuc->last_log_id = 0;
268 if (!static_cpu_has(X86_FEATURE_ARCH_LBR) && cpuc->lbr_select)
269 wrmsrl(MSR_LBR_SELECT, 0);
270 }
271
272 /*
273 * TOS = most recently recorded branch
274 */
intel_pmu_lbr_tos(void)275 static inline u64 intel_pmu_lbr_tos(void)
276 {
277 u64 tos;
278
279 rdmsrl(x86_pmu.lbr_tos, tos);
280 return tos;
281 }
282
283 enum {
284 LBR_NONE,
285 LBR_VALID,
286 };
287
288 /*
289 * For formats with LBR_TSX flags (e.g. LBR_FORMAT_EIP_FLAGS2), bits 61:62 in
290 * MSR_LAST_BRANCH_FROM_x are the TSX flags when TSX is supported, but when
291 * TSX is not supported they have no consistent behavior:
292 *
293 * - For wrmsr(), bits 61:62 are considered part of the sign extension.
294 * - For HW updates (branch captures) bits 61:62 are always OFF and are not
295 * part of the sign extension.
296 *
297 * Therefore, if:
298 *
299 * 1) LBR has TSX format
300 * 2) CPU has no TSX support enabled
301 *
302 * ... then any value passed to wrmsr() must be sign extended to 63 bits and any
303 * value from rdmsr() must be converted to have a 61 bits sign extension,
304 * ignoring the TSX flags.
305 */
lbr_from_signext_quirk_needed(void)306 static inline bool lbr_from_signext_quirk_needed(void)
307 {
308 int lbr_format = x86_pmu.intel_cap.lbr_format;
309 bool tsx_support = boot_cpu_has(X86_FEATURE_HLE) ||
310 boot_cpu_has(X86_FEATURE_RTM);
311
312 return !tsx_support && (lbr_desc[lbr_format] & LBR_TSX);
313 }
314
315 static DEFINE_STATIC_KEY_FALSE(lbr_from_quirk_key);
316
317 /* If quirk is enabled, ensure sign extension is 63 bits: */
lbr_from_signext_quirk_wr(u64 val)318 inline u64 lbr_from_signext_quirk_wr(u64 val)
319 {
320 if (static_branch_unlikely(&lbr_from_quirk_key)) {
321 /*
322 * Sign extend into bits 61:62 while preserving bit 63.
323 *
324 * Quirk is enabled when TSX is disabled. Therefore TSX bits
325 * in val are always OFF and must be changed to be sign
326 * extension bits. Since bits 59:60 are guaranteed to be
327 * part of the sign extension bits, we can just copy them
328 * to 61:62.
329 */
330 val |= (LBR_FROM_SIGNEXT_2MSB & val) << 2;
331 }
332 return val;
333 }
334
335 /*
336 * If quirk is needed, ensure sign extension is 61 bits:
337 */
lbr_from_signext_quirk_rd(u64 val)338 static u64 lbr_from_signext_quirk_rd(u64 val)
339 {
340 if (static_branch_unlikely(&lbr_from_quirk_key)) {
341 /*
342 * Quirk is on when TSX is not enabled. Therefore TSX
343 * flags must be read as OFF.
344 */
345 val &= ~(LBR_FROM_FLAG_IN_TX | LBR_FROM_FLAG_ABORT);
346 }
347 return val;
348 }
349
wrlbr_from(unsigned int idx,u64 val)350 static __always_inline void wrlbr_from(unsigned int idx, u64 val)
351 {
352 val = lbr_from_signext_quirk_wr(val);
353 wrmsrl(x86_pmu.lbr_from + idx, val);
354 }
355
wrlbr_to(unsigned int idx,u64 val)356 static __always_inline void wrlbr_to(unsigned int idx, u64 val)
357 {
358 wrmsrl(x86_pmu.lbr_to + idx, val);
359 }
360
wrlbr_info(unsigned int idx,u64 val)361 static __always_inline void wrlbr_info(unsigned int idx, u64 val)
362 {
363 wrmsrl(x86_pmu.lbr_info + idx, val);
364 }
365
rdlbr_from(unsigned int idx,struct lbr_entry * lbr)366 static __always_inline u64 rdlbr_from(unsigned int idx, struct lbr_entry *lbr)
367 {
368 u64 val;
369
370 if (lbr)
371 return lbr->from;
372
373 rdmsrl(x86_pmu.lbr_from + idx, val);
374
375 return lbr_from_signext_quirk_rd(val);
376 }
377
rdlbr_to(unsigned int idx,struct lbr_entry * lbr)378 static __always_inline u64 rdlbr_to(unsigned int idx, struct lbr_entry *lbr)
379 {
380 u64 val;
381
382 if (lbr)
383 return lbr->to;
384
385 rdmsrl(x86_pmu.lbr_to + idx, val);
386
387 return val;
388 }
389
rdlbr_info(unsigned int idx,struct lbr_entry * lbr)390 static __always_inline u64 rdlbr_info(unsigned int idx, struct lbr_entry *lbr)
391 {
392 u64 val;
393
394 if (lbr)
395 return lbr->info;
396
397 rdmsrl(x86_pmu.lbr_info + idx, val);
398
399 return val;
400 }
401
402 static inline void
wrlbr_all(struct lbr_entry * lbr,unsigned int idx,bool need_info)403 wrlbr_all(struct lbr_entry *lbr, unsigned int idx, bool need_info)
404 {
405 wrlbr_from(idx, lbr->from);
406 wrlbr_to(idx, lbr->to);
407 if (need_info)
408 wrlbr_info(idx, lbr->info);
409 }
410
411 static inline bool
rdlbr_all(struct lbr_entry * lbr,unsigned int idx,bool need_info)412 rdlbr_all(struct lbr_entry *lbr, unsigned int idx, bool need_info)
413 {
414 u64 from = rdlbr_from(idx, NULL);
415
416 /* Don't read invalid entry */
417 if (!from)
418 return false;
419
420 lbr->from = from;
421 lbr->to = rdlbr_to(idx, NULL);
422 if (need_info)
423 lbr->info = rdlbr_info(idx, NULL);
424
425 return true;
426 }
427
intel_pmu_lbr_restore(void * ctx)428 void intel_pmu_lbr_restore(void *ctx)
429 {
430 bool need_info = x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO;
431 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
432 struct x86_perf_task_context *task_ctx = ctx;
433 int i;
434 unsigned lbr_idx, mask;
435 u64 tos = task_ctx->tos;
436
437 mask = x86_pmu.lbr_nr - 1;
438 for (i = 0; i < task_ctx->valid_lbrs; i++) {
439 lbr_idx = (tos - i) & mask;
440 wrlbr_all(&task_ctx->lbr[i], lbr_idx, need_info);
441 }
442
443 for (; i < x86_pmu.lbr_nr; i++) {
444 lbr_idx = (tos - i) & mask;
445 wrlbr_from(lbr_idx, 0);
446 wrlbr_to(lbr_idx, 0);
447 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
448 wrlbr_info(lbr_idx, 0);
449 }
450
451 wrmsrl(x86_pmu.lbr_tos, tos);
452
453 if (cpuc->lbr_select)
454 wrmsrl(MSR_LBR_SELECT, task_ctx->lbr_sel);
455 }
456
intel_pmu_arch_lbr_restore(void * ctx)457 static void intel_pmu_arch_lbr_restore(void *ctx)
458 {
459 struct x86_perf_task_context_arch_lbr *task_ctx = ctx;
460 struct lbr_entry *entries = task_ctx->entries;
461 int i;
462
463 /* Fast reset the LBRs before restore if the call stack is not full. */
464 if (!entries[x86_pmu.lbr_nr - 1].from)
465 intel_pmu_arch_lbr_reset();
466
467 for (i = 0; i < x86_pmu.lbr_nr; i++) {
468 if (!entries[i].from)
469 break;
470 wrlbr_all(&entries[i], i, true);
471 }
472 }
473
474 /*
475 * Restore the Architecture LBR state from the xsave area in the perf
476 * context data for the task via the XRSTORS instruction.
477 */
intel_pmu_arch_lbr_xrstors(void * ctx)478 static void intel_pmu_arch_lbr_xrstors(void *ctx)
479 {
480 struct x86_perf_task_context_arch_lbr_xsave *task_ctx = ctx;
481
482 xrstors(&task_ctx->xsave, XFEATURE_MASK_LBR);
483 }
484
lbr_is_reset_in_cstate(void * ctx)485 static __always_inline bool lbr_is_reset_in_cstate(void *ctx)
486 {
487 if (static_cpu_has(X86_FEATURE_ARCH_LBR))
488 return x86_pmu.lbr_deep_c_reset && !rdlbr_from(0, NULL);
489
490 return !rdlbr_from(((struct x86_perf_task_context *)ctx)->tos, NULL);
491 }
492
__intel_pmu_lbr_restore(void * ctx)493 static void __intel_pmu_lbr_restore(void *ctx)
494 {
495 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
496
497 if (task_context_opt(ctx)->lbr_callstack_users == 0 ||
498 task_context_opt(ctx)->lbr_stack_state == LBR_NONE) {
499 intel_pmu_lbr_reset();
500 return;
501 }
502
503 /*
504 * Does not restore the LBR registers, if
505 * - No one else touched them, and
506 * - Was not cleared in Cstate
507 */
508 if ((ctx == cpuc->last_task_ctx) &&
509 (task_context_opt(ctx)->log_id == cpuc->last_log_id) &&
510 !lbr_is_reset_in_cstate(ctx)) {
511 task_context_opt(ctx)->lbr_stack_state = LBR_NONE;
512 return;
513 }
514
515 x86_pmu.lbr_restore(ctx);
516
517 task_context_opt(ctx)->lbr_stack_state = LBR_NONE;
518 }
519
intel_pmu_lbr_save(void * ctx)520 void intel_pmu_lbr_save(void *ctx)
521 {
522 bool need_info = x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO;
523 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
524 struct x86_perf_task_context *task_ctx = ctx;
525 unsigned lbr_idx, mask;
526 u64 tos;
527 int i;
528
529 mask = x86_pmu.lbr_nr - 1;
530 tos = intel_pmu_lbr_tos();
531 for (i = 0; i < x86_pmu.lbr_nr; i++) {
532 lbr_idx = (tos - i) & mask;
533 if (!rdlbr_all(&task_ctx->lbr[i], lbr_idx, need_info))
534 break;
535 }
536 task_ctx->valid_lbrs = i;
537 task_ctx->tos = tos;
538
539 if (cpuc->lbr_select)
540 rdmsrl(MSR_LBR_SELECT, task_ctx->lbr_sel);
541 }
542
intel_pmu_arch_lbr_save(void * ctx)543 static void intel_pmu_arch_lbr_save(void *ctx)
544 {
545 struct x86_perf_task_context_arch_lbr *task_ctx = ctx;
546 struct lbr_entry *entries = task_ctx->entries;
547 int i;
548
549 for (i = 0; i < x86_pmu.lbr_nr; i++) {
550 if (!rdlbr_all(&entries[i], i, true))
551 break;
552 }
553
554 /* LBR call stack is not full. Reset is required in restore. */
555 if (i < x86_pmu.lbr_nr)
556 entries[x86_pmu.lbr_nr - 1].from = 0;
557 }
558
559 /*
560 * Save the Architecture LBR state to the xsave area in the perf
561 * context data for the task via the XSAVES instruction.
562 */
intel_pmu_arch_lbr_xsaves(void * ctx)563 static void intel_pmu_arch_lbr_xsaves(void *ctx)
564 {
565 struct x86_perf_task_context_arch_lbr_xsave *task_ctx = ctx;
566
567 xsaves(&task_ctx->xsave, XFEATURE_MASK_LBR);
568 }
569
__intel_pmu_lbr_save(void * ctx)570 static void __intel_pmu_lbr_save(void *ctx)
571 {
572 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
573
574 if (task_context_opt(ctx)->lbr_callstack_users == 0) {
575 task_context_opt(ctx)->lbr_stack_state = LBR_NONE;
576 return;
577 }
578
579 x86_pmu.lbr_save(ctx);
580
581 task_context_opt(ctx)->lbr_stack_state = LBR_VALID;
582
583 cpuc->last_task_ctx = ctx;
584 cpuc->last_log_id = ++task_context_opt(ctx)->log_id;
585 }
586
intel_pmu_lbr_swap_task_ctx(struct perf_event_context * prev,struct perf_event_context * next)587 void intel_pmu_lbr_swap_task_ctx(struct perf_event_context *prev,
588 struct perf_event_context *next)
589 {
590 void *prev_ctx_data, *next_ctx_data;
591
592 swap(prev->task_ctx_data, next->task_ctx_data);
593
594 /*
595 * Architecture specific synchronization makes sense in
596 * case both prev->task_ctx_data and next->task_ctx_data
597 * pointers are allocated.
598 */
599
600 prev_ctx_data = next->task_ctx_data;
601 next_ctx_data = prev->task_ctx_data;
602
603 if (!prev_ctx_data || !next_ctx_data)
604 return;
605
606 swap(task_context_opt(prev_ctx_data)->lbr_callstack_users,
607 task_context_opt(next_ctx_data)->lbr_callstack_users);
608 }
609
intel_pmu_lbr_sched_task(struct perf_event_context * ctx,bool sched_in)610 void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in)
611 {
612 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
613 void *task_ctx;
614
615 if (!cpuc->lbr_users)
616 return;
617
618 /*
619 * If LBR callstack feature is enabled and the stack was saved when
620 * the task was scheduled out, restore the stack. Otherwise flush
621 * the LBR stack.
622 */
623 task_ctx = ctx ? ctx->task_ctx_data : NULL;
624 if (task_ctx) {
625 if (sched_in)
626 __intel_pmu_lbr_restore(task_ctx);
627 else
628 __intel_pmu_lbr_save(task_ctx);
629 return;
630 }
631
632 /*
633 * Since a context switch can flip the address space and LBR entries
634 * are not tagged with an identifier, we need to wipe the LBR, even for
635 * per-cpu events. You simply cannot resolve the branches from the old
636 * address space.
637 */
638 if (sched_in)
639 intel_pmu_lbr_reset();
640 }
641
branch_user_callstack(unsigned br_sel)642 static inline bool branch_user_callstack(unsigned br_sel)
643 {
644 return (br_sel & X86_BR_USER) && (br_sel & X86_BR_CALL_STACK);
645 }
646
intel_pmu_lbr_add(struct perf_event * event)647 void intel_pmu_lbr_add(struct perf_event *event)
648 {
649 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
650
651 if (!x86_pmu.lbr_nr)
652 return;
653
654 if (event->hw.flags & PERF_X86_EVENT_LBR_SELECT)
655 cpuc->lbr_select = 1;
656
657 cpuc->br_sel = event->hw.branch_reg.reg;
658
659 if (branch_user_callstack(cpuc->br_sel) && event->ctx->task_ctx_data)
660 task_context_opt(event->ctx->task_ctx_data)->lbr_callstack_users++;
661
662 /*
663 * Request pmu::sched_task() callback, which will fire inside the
664 * regular perf event scheduling, so that call will:
665 *
666 * - restore or wipe; when LBR-callstack,
667 * - wipe; otherwise,
668 *
669 * when this is from __perf_event_task_sched_in().
670 *
671 * However, if this is from perf_install_in_context(), no such callback
672 * will follow and we'll need to reset the LBR here if this is the
673 * first LBR event.
674 *
675 * The problem is, we cannot tell these cases apart... but we can
676 * exclude the biggest chunk of cases by looking at
677 * event->total_time_running. An event that has accrued runtime cannot
678 * be 'new'. Conversely, a new event can get installed through the
679 * context switch path for the first time.
680 */
681 if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip > 0)
682 cpuc->lbr_pebs_users++;
683 perf_sched_cb_inc(event->ctx->pmu);
684 if (!cpuc->lbr_users++ && !event->total_time_running)
685 intel_pmu_lbr_reset();
686 }
687
release_lbr_buffers(void)688 void release_lbr_buffers(void)
689 {
690 struct kmem_cache *kmem_cache;
691 struct cpu_hw_events *cpuc;
692 int cpu;
693
694 if (!static_cpu_has(X86_FEATURE_ARCH_LBR))
695 return;
696
697 for_each_possible_cpu(cpu) {
698 cpuc = per_cpu_ptr(&cpu_hw_events, cpu);
699 kmem_cache = x86_get_pmu(cpu)->task_ctx_cache;
700 if (kmem_cache && cpuc->lbr_xsave) {
701 kmem_cache_free(kmem_cache, cpuc->lbr_xsave);
702 cpuc->lbr_xsave = NULL;
703 }
704 }
705 }
706
reserve_lbr_buffers(void)707 void reserve_lbr_buffers(void)
708 {
709 struct kmem_cache *kmem_cache;
710 struct cpu_hw_events *cpuc;
711 int cpu;
712
713 if (!static_cpu_has(X86_FEATURE_ARCH_LBR))
714 return;
715
716 for_each_possible_cpu(cpu) {
717 cpuc = per_cpu_ptr(&cpu_hw_events, cpu);
718 kmem_cache = x86_get_pmu(cpu)->task_ctx_cache;
719 if (!kmem_cache || cpuc->lbr_xsave)
720 continue;
721
722 cpuc->lbr_xsave = kmem_cache_alloc_node(kmem_cache,
723 GFP_KERNEL | __GFP_ZERO,
724 cpu_to_node(cpu));
725 }
726 }
727
intel_pmu_lbr_del(struct perf_event * event)728 void intel_pmu_lbr_del(struct perf_event *event)
729 {
730 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
731
732 if (!x86_pmu.lbr_nr)
733 return;
734
735 if (branch_user_callstack(cpuc->br_sel) &&
736 event->ctx->task_ctx_data)
737 task_context_opt(event->ctx->task_ctx_data)->lbr_callstack_users--;
738
739 if (event->hw.flags & PERF_X86_EVENT_LBR_SELECT)
740 cpuc->lbr_select = 0;
741
742 if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip > 0)
743 cpuc->lbr_pebs_users--;
744 cpuc->lbr_users--;
745 WARN_ON_ONCE(cpuc->lbr_users < 0);
746 WARN_ON_ONCE(cpuc->lbr_pebs_users < 0);
747 perf_sched_cb_dec(event->ctx->pmu);
748 }
749
vlbr_exclude_host(void)750 static inline bool vlbr_exclude_host(void)
751 {
752 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
753
754 return test_bit(INTEL_PMC_IDX_FIXED_VLBR,
755 (unsigned long *)&cpuc->intel_ctrl_guest_mask);
756 }
757
intel_pmu_lbr_enable_all(bool pmi)758 void intel_pmu_lbr_enable_all(bool pmi)
759 {
760 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
761
762 if (cpuc->lbr_users && !vlbr_exclude_host())
763 __intel_pmu_lbr_enable(pmi);
764 }
765
intel_pmu_lbr_disable_all(void)766 void intel_pmu_lbr_disable_all(void)
767 {
768 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
769
770 if (cpuc->lbr_users && !vlbr_exclude_host()) {
771 if (static_cpu_has(X86_FEATURE_ARCH_LBR))
772 return __intel_pmu_arch_lbr_disable();
773
774 __intel_pmu_lbr_disable();
775 }
776 }
777
intel_pmu_lbr_read_32(struct cpu_hw_events * cpuc)778 void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc)
779 {
780 unsigned long mask = x86_pmu.lbr_nr - 1;
781 u64 tos = intel_pmu_lbr_tos();
782 int i;
783
784 for (i = 0; i < x86_pmu.lbr_nr; i++) {
785 unsigned long lbr_idx = (tos - i) & mask;
786 union {
787 struct {
788 u32 from;
789 u32 to;
790 };
791 u64 lbr;
792 } msr_lastbranch;
793
794 rdmsrl(x86_pmu.lbr_from + lbr_idx, msr_lastbranch.lbr);
795
796 cpuc->lbr_entries[i].from = msr_lastbranch.from;
797 cpuc->lbr_entries[i].to = msr_lastbranch.to;
798 cpuc->lbr_entries[i].mispred = 0;
799 cpuc->lbr_entries[i].predicted = 0;
800 cpuc->lbr_entries[i].in_tx = 0;
801 cpuc->lbr_entries[i].abort = 0;
802 cpuc->lbr_entries[i].cycles = 0;
803 cpuc->lbr_entries[i].type = 0;
804 cpuc->lbr_entries[i].reserved = 0;
805 }
806 cpuc->lbr_stack.nr = i;
807 cpuc->lbr_stack.hw_idx = tos;
808 }
809
810 /*
811 * Due to lack of segmentation in Linux the effective address (offset)
812 * is the same as the linear address, allowing us to merge the LIP and EIP
813 * LBR formats.
814 */
intel_pmu_lbr_read_64(struct cpu_hw_events * cpuc)815 void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
816 {
817 bool need_info = false, call_stack = false;
818 unsigned long mask = x86_pmu.lbr_nr - 1;
819 int lbr_format = x86_pmu.intel_cap.lbr_format;
820 u64 tos = intel_pmu_lbr_tos();
821 int i;
822 int out = 0;
823 int num = x86_pmu.lbr_nr;
824
825 if (cpuc->lbr_sel) {
826 need_info = !(cpuc->lbr_sel->config & LBR_NO_INFO);
827 if (cpuc->lbr_sel->config & LBR_CALL_STACK)
828 call_stack = true;
829 }
830
831 for (i = 0; i < num; i++) {
832 unsigned long lbr_idx = (tos - i) & mask;
833 u64 from, to, mis = 0, pred = 0, in_tx = 0, abort = 0;
834 int skip = 0;
835 u16 cycles = 0;
836 int lbr_flags = lbr_desc[lbr_format];
837
838 from = rdlbr_from(lbr_idx, NULL);
839 to = rdlbr_to(lbr_idx, NULL);
840
841 /*
842 * Read LBR call stack entries
843 * until invalid entry (0s) is detected.
844 */
845 if (call_stack && !from)
846 break;
847
848 if (lbr_format == LBR_FORMAT_INFO && need_info) {
849 u64 info;
850
851 info = rdlbr_info(lbr_idx, NULL);
852 mis = !!(info & LBR_INFO_MISPRED);
853 pred = !mis;
854 in_tx = !!(info & LBR_INFO_IN_TX);
855 abort = !!(info & LBR_INFO_ABORT);
856 cycles = (info & LBR_INFO_CYCLES);
857 }
858
859 if (lbr_format == LBR_FORMAT_TIME) {
860 mis = !!(from & LBR_FROM_FLAG_MISPRED);
861 pred = !mis;
862 skip = 1;
863 cycles = ((to >> 48) & LBR_INFO_CYCLES);
864
865 to = (u64)((((s64)to) << 16) >> 16);
866 }
867
868 if (lbr_flags & LBR_EIP_FLAGS) {
869 mis = !!(from & LBR_FROM_FLAG_MISPRED);
870 pred = !mis;
871 skip = 1;
872 }
873 if (lbr_flags & LBR_TSX) {
874 in_tx = !!(from & LBR_FROM_FLAG_IN_TX);
875 abort = !!(from & LBR_FROM_FLAG_ABORT);
876 skip = 3;
877 }
878 from = (u64)((((s64)from) << skip) >> skip);
879
880 /*
881 * Some CPUs report duplicated abort records,
882 * with the second entry not having an abort bit set.
883 * Skip them here. This loop runs backwards,
884 * so we need to undo the previous record.
885 * If the abort just happened outside the window
886 * the extra entry cannot be removed.
887 */
888 if (abort && x86_pmu.lbr_double_abort && out > 0)
889 out--;
890
891 cpuc->lbr_entries[out].from = from;
892 cpuc->lbr_entries[out].to = to;
893 cpuc->lbr_entries[out].mispred = mis;
894 cpuc->lbr_entries[out].predicted = pred;
895 cpuc->lbr_entries[out].in_tx = in_tx;
896 cpuc->lbr_entries[out].abort = abort;
897 cpuc->lbr_entries[out].cycles = cycles;
898 cpuc->lbr_entries[out].type = 0;
899 cpuc->lbr_entries[out].reserved = 0;
900 out++;
901 }
902 cpuc->lbr_stack.nr = out;
903 cpuc->lbr_stack.hw_idx = tos;
904 }
905
get_lbr_br_type(u64 info)906 static __always_inline int get_lbr_br_type(u64 info)
907 {
908 if (!static_cpu_has(X86_FEATURE_ARCH_LBR) || !x86_pmu.lbr_br_type)
909 return 0;
910
911 return (info & LBR_INFO_BR_TYPE) >> LBR_INFO_BR_TYPE_OFFSET;
912 }
913
get_lbr_mispred(u64 info)914 static __always_inline bool get_lbr_mispred(u64 info)
915 {
916 if (static_cpu_has(X86_FEATURE_ARCH_LBR) && !x86_pmu.lbr_mispred)
917 return 0;
918
919 return !!(info & LBR_INFO_MISPRED);
920 }
921
get_lbr_predicted(u64 info)922 static __always_inline bool get_lbr_predicted(u64 info)
923 {
924 if (static_cpu_has(X86_FEATURE_ARCH_LBR) && !x86_pmu.lbr_mispred)
925 return 0;
926
927 return !(info & LBR_INFO_MISPRED);
928 }
929
get_lbr_cycles(u64 info)930 static __always_inline u16 get_lbr_cycles(u64 info)
931 {
932 if (static_cpu_has(X86_FEATURE_ARCH_LBR) &&
933 !(x86_pmu.lbr_timed_lbr && info & LBR_INFO_CYC_CNT_VALID))
934 return 0;
935
936 return info & LBR_INFO_CYCLES;
937 }
938
intel_pmu_store_lbr(struct cpu_hw_events * cpuc,struct lbr_entry * entries)939 static void intel_pmu_store_lbr(struct cpu_hw_events *cpuc,
940 struct lbr_entry *entries)
941 {
942 struct perf_branch_entry *e;
943 struct lbr_entry *lbr;
944 u64 from, to, info;
945 int i;
946
947 for (i = 0; i < x86_pmu.lbr_nr; i++) {
948 lbr = entries ? &entries[i] : NULL;
949 e = &cpuc->lbr_entries[i];
950
951 from = rdlbr_from(i, lbr);
952 /*
953 * Read LBR entries until invalid entry (0s) is detected.
954 */
955 if (!from)
956 break;
957
958 to = rdlbr_to(i, lbr);
959 info = rdlbr_info(i, lbr);
960
961 e->from = from;
962 e->to = to;
963 e->mispred = get_lbr_mispred(info);
964 e->predicted = get_lbr_predicted(info);
965 e->in_tx = !!(info & LBR_INFO_IN_TX);
966 e->abort = !!(info & LBR_INFO_ABORT);
967 e->cycles = get_lbr_cycles(info);
968 e->type = get_lbr_br_type(info);
969 e->reserved = 0;
970 }
971
972 cpuc->lbr_stack.nr = i;
973 }
974
intel_pmu_arch_lbr_read(struct cpu_hw_events * cpuc)975 static void intel_pmu_arch_lbr_read(struct cpu_hw_events *cpuc)
976 {
977 intel_pmu_store_lbr(cpuc, NULL);
978 }
979
intel_pmu_arch_lbr_read_xsave(struct cpu_hw_events * cpuc)980 static void intel_pmu_arch_lbr_read_xsave(struct cpu_hw_events *cpuc)
981 {
982 struct x86_perf_task_context_arch_lbr_xsave *xsave = cpuc->lbr_xsave;
983
984 if (!xsave) {
985 intel_pmu_store_lbr(cpuc, NULL);
986 return;
987 }
988 xsaves(&xsave->xsave, XFEATURE_MASK_LBR);
989
990 intel_pmu_store_lbr(cpuc, xsave->lbr.entries);
991 }
992
intel_pmu_lbr_read(void)993 void intel_pmu_lbr_read(void)
994 {
995 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
996
997 /*
998 * Don't read when all LBRs users are using adaptive PEBS.
999 *
1000 * This could be smarter and actually check the event,
1001 * but this simple approach seems to work for now.
1002 */
1003 if (!cpuc->lbr_users || vlbr_exclude_host() ||
1004 cpuc->lbr_users == cpuc->lbr_pebs_users)
1005 return;
1006
1007 x86_pmu.lbr_read(cpuc);
1008
1009 intel_pmu_lbr_filter(cpuc);
1010 }
1011
1012 /*
1013 * SW filter is used:
1014 * - in case there is no HW filter
1015 * - in case the HW filter has errata or limitations
1016 */
intel_pmu_setup_sw_lbr_filter(struct perf_event * event)1017 static int intel_pmu_setup_sw_lbr_filter(struct perf_event *event)
1018 {
1019 u64 br_type = event->attr.branch_sample_type;
1020 int mask = 0;
1021
1022 if (br_type & PERF_SAMPLE_BRANCH_USER)
1023 mask |= X86_BR_USER;
1024
1025 if (br_type & PERF_SAMPLE_BRANCH_KERNEL)
1026 mask |= X86_BR_KERNEL;
1027
1028 /* we ignore BRANCH_HV here */
1029
1030 if (br_type & PERF_SAMPLE_BRANCH_ANY)
1031 mask |= X86_BR_ANY;
1032
1033 if (br_type & PERF_SAMPLE_BRANCH_ANY_CALL)
1034 mask |= X86_BR_ANY_CALL;
1035
1036 if (br_type & PERF_SAMPLE_BRANCH_ANY_RETURN)
1037 mask |= X86_BR_RET | X86_BR_IRET | X86_BR_SYSRET;
1038
1039 if (br_type & PERF_SAMPLE_BRANCH_IND_CALL)
1040 mask |= X86_BR_IND_CALL;
1041
1042 if (br_type & PERF_SAMPLE_BRANCH_ABORT_TX)
1043 mask |= X86_BR_ABORT;
1044
1045 if (br_type & PERF_SAMPLE_BRANCH_IN_TX)
1046 mask |= X86_BR_IN_TX;
1047
1048 if (br_type & PERF_SAMPLE_BRANCH_NO_TX)
1049 mask |= X86_BR_NO_TX;
1050
1051 if (br_type & PERF_SAMPLE_BRANCH_COND)
1052 mask |= X86_BR_JCC;
1053
1054 if (br_type & PERF_SAMPLE_BRANCH_CALL_STACK) {
1055 if (!x86_pmu_has_lbr_callstack())
1056 return -EOPNOTSUPP;
1057 if (mask & ~(X86_BR_USER | X86_BR_KERNEL))
1058 return -EINVAL;
1059 mask |= X86_BR_CALL | X86_BR_IND_CALL | X86_BR_RET |
1060 X86_BR_CALL_STACK;
1061 }
1062
1063 if (br_type & PERF_SAMPLE_BRANCH_IND_JUMP)
1064 mask |= X86_BR_IND_JMP;
1065
1066 if (br_type & PERF_SAMPLE_BRANCH_CALL)
1067 mask |= X86_BR_CALL | X86_BR_ZERO_CALL;
1068
1069 if (br_type & PERF_SAMPLE_BRANCH_TYPE_SAVE)
1070 mask |= X86_BR_TYPE_SAVE;
1071
1072 /*
1073 * stash actual user request into reg, it may
1074 * be used by fixup code for some CPU
1075 */
1076 event->hw.branch_reg.reg = mask;
1077 return 0;
1078 }
1079
1080 /*
1081 * setup the HW LBR filter
1082 * Used only when available, may not be enough to disambiguate
1083 * all branches, may need the help of the SW filter
1084 */
intel_pmu_setup_hw_lbr_filter(struct perf_event * event)1085 static int intel_pmu_setup_hw_lbr_filter(struct perf_event *event)
1086 {
1087 struct hw_perf_event_extra *reg;
1088 u64 br_type = event->attr.branch_sample_type;
1089 u64 mask = 0, v;
1090 int i;
1091
1092 for (i = 0; i < PERF_SAMPLE_BRANCH_MAX_SHIFT; i++) {
1093 if (!(br_type & (1ULL << i)))
1094 continue;
1095
1096 v = x86_pmu.lbr_sel_map[i];
1097 if (v == LBR_NOT_SUPP)
1098 return -EOPNOTSUPP;
1099
1100 if (v != LBR_IGN)
1101 mask |= v;
1102 }
1103
1104 reg = &event->hw.branch_reg;
1105 reg->idx = EXTRA_REG_LBR;
1106
1107 if (static_cpu_has(X86_FEATURE_ARCH_LBR)) {
1108 reg->config = mask;
1109 return 0;
1110 }
1111
1112 /*
1113 * The first 9 bits (LBR_SEL_MASK) in LBR_SELECT operate
1114 * in suppress mode. So LBR_SELECT should be set to
1115 * (~mask & LBR_SEL_MASK) | (mask & ~LBR_SEL_MASK)
1116 * But the 10th bit LBR_CALL_STACK does not operate
1117 * in suppress mode.
1118 */
1119 reg->config = mask ^ (x86_pmu.lbr_sel_mask & ~LBR_CALL_STACK);
1120
1121 if ((br_type & PERF_SAMPLE_BRANCH_NO_CYCLES) &&
1122 (br_type & PERF_SAMPLE_BRANCH_NO_FLAGS) &&
1123 (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO))
1124 reg->config |= LBR_NO_INFO;
1125
1126 return 0;
1127 }
1128
intel_pmu_setup_lbr_filter(struct perf_event * event)1129 int intel_pmu_setup_lbr_filter(struct perf_event *event)
1130 {
1131 int ret = 0;
1132
1133 /*
1134 * no LBR on this PMU
1135 */
1136 if (!x86_pmu.lbr_nr)
1137 return -EOPNOTSUPP;
1138
1139 /*
1140 * setup SW LBR filter
1141 */
1142 ret = intel_pmu_setup_sw_lbr_filter(event);
1143 if (ret)
1144 return ret;
1145
1146 /*
1147 * setup HW LBR filter, if any
1148 */
1149 if (x86_pmu.lbr_sel_map)
1150 ret = intel_pmu_setup_hw_lbr_filter(event);
1151
1152 return ret;
1153 }
1154
1155 /*
1156 * return the type of control flow change at address "from"
1157 * instruction is not necessarily a branch (in case of interrupt).
1158 *
1159 * The branch type returned also includes the priv level of the
1160 * target of the control flow change (X86_BR_USER, X86_BR_KERNEL).
1161 *
1162 * If a branch type is unknown OR the instruction cannot be
1163 * decoded (e.g., text page not present), then X86_BR_NONE is
1164 * returned.
1165 */
branch_type(unsigned long from,unsigned long to,int abort)1166 static int branch_type(unsigned long from, unsigned long to, int abort)
1167 {
1168 struct insn insn;
1169 void *addr;
1170 int bytes_read, bytes_left;
1171 int ret = X86_BR_NONE;
1172 int ext, to_plm, from_plm;
1173 u8 buf[MAX_INSN_SIZE];
1174 int is64 = 0;
1175
1176 to_plm = kernel_ip(to) ? X86_BR_KERNEL : X86_BR_USER;
1177 from_plm = kernel_ip(from) ? X86_BR_KERNEL : X86_BR_USER;
1178
1179 /*
1180 * maybe zero if lbr did not fill up after a reset by the time
1181 * we get a PMU interrupt
1182 */
1183 if (from == 0 || to == 0)
1184 return X86_BR_NONE;
1185
1186 if (abort)
1187 return X86_BR_ABORT | to_plm;
1188
1189 if (from_plm == X86_BR_USER) {
1190 /*
1191 * can happen if measuring at the user level only
1192 * and we interrupt in a kernel thread, e.g., idle.
1193 */
1194 if (!current->mm)
1195 return X86_BR_NONE;
1196
1197 /* may fail if text not present */
1198 bytes_left = copy_from_user_nmi(buf, (void __user *)from,
1199 MAX_INSN_SIZE);
1200 bytes_read = MAX_INSN_SIZE - bytes_left;
1201 if (!bytes_read)
1202 return X86_BR_NONE;
1203
1204 addr = buf;
1205 } else {
1206 /*
1207 * The LBR logs any address in the IP, even if the IP just
1208 * faulted. This means userspace can control the from address.
1209 * Ensure we don't blindly read any address by validating it is
1210 * a known text address.
1211 */
1212 if (kernel_text_address(from)) {
1213 addr = (void *)from;
1214 /*
1215 * Assume we can get the maximum possible size
1216 * when grabbing kernel data. This is not
1217 * _strictly_ true since we could possibly be
1218 * executing up next to a memory hole, but
1219 * it is very unlikely to be a problem.
1220 */
1221 bytes_read = MAX_INSN_SIZE;
1222 } else {
1223 return X86_BR_NONE;
1224 }
1225 }
1226
1227 /*
1228 * decoder needs to know the ABI especially
1229 * on 64-bit systems running 32-bit apps
1230 */
1231 #ifdef CONFIG_X86_64
1232 is64 = kernel_ip((unsigned long)addr) || any_64bit_mode(current_pt_regs());
1233 #endif
1234 insn_init(&insn, addr, bytes_read, is64);
1235 if (insn_get_opcode(&insn))
1236 return X86_BR_ABORT;
1237
1238 switch (insn.opcode.bytes[0]) {
1239 case 0xf:
1240 switch (insn.opcode.bytes[1]) {
1241 case 0x05: /* syscall */
1242 case 0x34: /* sysenter */
1243 ret = X86_BR_SYSCALL;
1244 break;
1245 case 0x07: /* sysret */
1246 case 0x35: /* sysexit */
1247 ret = X86_BR_SYSRET;
1248 break;
1249 case 0x80 ... 0x8f: /* conditional */
1250 ret = X86_BR_JCC;
1251 break;
1252 default:
1253 ret = X86_BR_NONE;
1254 }
1255 break;
1256 case 0x70 ... 0x7f: /* conditional */
1257 ret = X86_BR_JCC;
1258 break;
1259 case 0xc2: /* near ret */
1260 case 0xc3: /* near ret */
1261 case 0xca: /* far ret */
1262 case 0xcb: /* far ret */
1263 ret = X86_BR_RET;
1264 break;
1265 case 0xcf: /* iret */
1266 ret = X86_BR_IRET;
1267 break;
1268 case 0xcc ... 0xce: /* int */
1269 ret = X86_BR_INT;
1270 break;
1271 case 0xe8: /* call near rel */
1272 if (insn_get_immediate(&insn) || insn.immediate1.value == 0) {
1273 /* zero length call */
1274 ret = X86_BR_ZERO_CALL;
1275 break;
1276 }
1277 fallthrough;
1278 case 0x9a: /* call far absolute */
1279 ret = X86_BR_CALL;
1280 break;
1281 case 0xe0 ... 0xe3: /* loop jmp */
1282 ret = X86_BR_JCC;
1283 break;
1284 case 0xe9 ... 0xeb: /* jmp */
1285 ret = X86_BR_JMP;
1286 break;
1287 case 0xff: /* call near absolute, call far absolute ind */
1288 if (insn_get_modrm(&insn))
1289 return X86_BR_ABORT;
1290
1291 ext = (insn.modrm.bytes[0] >> 3) & 0x7;
1292 switch (ext) {
1293 case 2: /* near ind call */
1294 case 3: /* far ind call */
1295 ret = X86_BR_IND_CALL;
1296 break;
1297 case 4:
1298 case 5:
1299 ret = X86_BR_IND_JMP;
1300 break;
1301 }
1302 break;
1303 default:
1304 ret = X86_BR_NONE;
1305 }
1306 /*
1307 * interrupts, traps, faults (and thus ring transition) may
1308 * occur on any instructions. Thus, to classify them correctly,
1309 * we need to first look at the from and to priv levels. If they
1310 * are different and to is in the kernel, then it indicates
1311 * a ring transition. If the from instruction is not a ring
1312 * transition instr (syscall, systenter, int), then it means
1313 * it was a irq, trap or fault.
1314 *
1315 * we have no way of detecting kernel to kernel faults.
1316 */
1317 if (from_plm == X86_BR_USER && to_plm == X86_BR_KERNEL
1318 && ret != X86_BR_SYSCALL && ret != X86_BR_INT)
1319 ret = X86_BR_IRQ;
1320
1321 /*
1322 * branch priv level determined by target as
1323 * is done by HW when LBR_SELECT is implemented
1324 */
1325 if (ret != X86_BR_NONE)
1326 ret |= to_plm;
1327
1328 return ret;
1329 }
1330
1331 #define X86_BR_TYPE_MAP_MAX 16
1332
1333 static int branch_map[X86_BR_TYPE_MAP_MAX] = {
1334 PERF_BR_CALL, /* X86_BR_CALL */
1335 PERF_BR_RET, /* X86_BR_RET */
1336 PERF_BR_SYSCALL, /* X86_BR_SYSCALL */
1337 PERF_BR_SYSRET, /* X86_BR_SYSRET */
1338 PERF_BR_UNKNOWN, /* X86_BR_INT */
1339 PERF_BR_UNKNOWN, /* X86_BR_IRET */
1340 PERF_BR_COND, /* X86_BR_JCC */
1341 PERF_BR_UNCOND, /* X86_BR_JMP */
1342 PERF_BR_UNKNOWN, /* X86_BR_IRQ */
1343 PERF_BR_IND_CALL, /* X86_BR_IND_CALL */
1344 PERF_BR_UNKNOWN, /* X86_BR_ABORT */
1345 PERF_BR_UNKNOWN, /* X86_BR_IN_TX */
1346 PERF_BR_UNKNOWN, /* X86_BR_NO_TX */
1347 PERF_BR_CALL, /* X86_BR_ZERO_CALL */
1348 PERF_BR_UNKNOWN, /* X86_BR_CALL_STACK */
1349 PERF_BR_IND, /* X86_BR_IND_JMP */
1350 };
1351
1352 static int
common_branch_type(int type)1353 common_branch_type(int type)
1354 {
1355 int i;
1356
1357 type >>= 2; /* skip X86_BR_USER and X86_BR_KERNEL */
1358
1359 if (type) {
1360 i = __ffs(type);
1361 if (i < X86_BR_TYPE_MAP_MAX)
1362 return branch_map[i];
1363 }
1364
1365 return PERF_BR_UNKNOWN;
1366 }
1367
1368 enum {
1369 ARCH_LBR_BR_TYPE_JCC = 0,
1370 ARCH_LBR_BR_TYPE_NEAR_IND_JMP = 1,
1371 ARCH_LBR_BR_TYPE_NEAR_REL_JMP = 2,
1372 ARCH_LBR_BR_TYPE_NEAR_IND_CALL = 3,
1373 ARCH_LBR_BR_TYPE_NEAR_REL_CALL = 4,
1374 ARCH_LBR_BR_TYPE_NEAR_RET = 5,
1375 ARCH_LBR_BR_TYPE_KNOWN_MAX = ARCH_LBR_BR_TYPE_NEAR_RET,
1376
1377 ARCH_LBR_BR_TYPE_MAP_MAX = 16,
1378 };
1379
1380 static const int arch_lbr_br_type_map[ARCH_LBR_BR_TYPE_MAP_MAX] = {
1381 [ARCH_LBR_BR_TYPE_JCC] = X86_BR_JCC,
1382 [ARCH_LBR_BR_TYPE_NEAR_IND_JMP] = X86_BR_IND_JMP,
1383 [ARCH_LBR_BR_TYPE_NEAR_REL_JMP] = X86_BR_JMP,
1384 [ARCH_LBR_BR_TYPE_NEAR_IND_CALL] = X86_BR_IND_CALL,
1385 [ARCH_LBR_BR_TYPE_NEAR_REL_CALL] = X86_BR_CALL,
1386 [ARCH_LBR_BR_TYPE_NEAR_RET] = X86_BR_RET,
1387 };
1388
1389 /*
1390 * implement actual branch filter based on user demand.
1391 * Hardware may not exactly satisfy that request, thus
1392 * we need to inspect opcodes. Mismatched branches are
1393 * discarded. Therefore, the number of branches returned
1394 * in PERF_SAMPLE_BRANCH_STACK sample may vary.
1395 */
1396 static void
intel_pmu_lbr_filter(struct cpu_hw_events * cpuc)1397 intel_pmu_lbr_filter(struct cpu_hw_events *cpuc)
1398 {
1399 u64 from, to;
1400 int br_sel = cpuc->br_sel;
1401 int i, j, type, to_plm;
1402 bool compress = false;
1403
1404 /* if sampling all branches, then nothing to filter */
1405 if (((br_sel & X86_BR_ALL) == X86_BR_ALL) &&
1406 ((br_sel & X86_BR_TYPE_SAVE) != X86_BR_TYPE_SAVE))
1407 return;
1408
1409 for (i = 0; i < cpuc->lbr_stack.nr; i++) {
1410
1411 from = cpuc->lbr_entries[i].from;
1412 to = cpuc->lbr_entries[i].to;
1413 type = cpuc->lbr_entries[i].type;
1414
1415 /*
1416 * Parse the branch type recorded in LBR_x_INFO MSR.
1417 * Doesn't support OTHER_BRANCH decoding for now.
1418 * OTHER_BRANCH branch type still rely on software decoding.
1419 */
1420 if (static_cpu_has(X86_FEATURE_ARCH_LBR) &&
1421 type <= ARCH_LBR_BR_TYPE_KNOWN_MAX) {
1422 to_plm = kernel_ip(to) ? X86_BR_KERNEL : X86_BR_USER;
1423 type = arch_lbr_br_type_map[type] | to_plm;
1424 } else
1425 type = branch_type(from, to, cpuc->lbr_entries[i].abort);
1426 if (type != X86_BR_NONE && (br_sel & X86_BR_ANYTX)) {
1427 if (cpuc->lbr_entries[i].in_tx)
1428 type |= X86_BR_IN_TX;
1429 else
1430 type |= X86_BR_NO_TX;
1431 }
1432
1433 /* if type does not correspond, then discard */
1434 if (type == X86_BR_NONE || (br_sel & type) != type) {
1435 cpuc->lbr_entries[i].from = 0;
1436 compress = true;
1437 }
1438
1439 if ((br_sel & X86_BR_TYPE_SAVE) == X86_BR_TYPE_SAVE)
1440 cpuc->lbr_entries[i].type = common_branch_type(type);
1441 }
1442
1443 if (!compress)
1444 return;
1445
1446 /* remove all entries with from=0 */
1447 for (i = 0; i < cpuc->lbr_stack.nr; ) {
1448 if (!cpuc->lbr_entries[i].from) {
1449 j = i;
1450 while (++j < cpuc->lbr_stack.nr)
1451 cpuc->lbr_entries[j-1] = cpuc->lbr_entries[j];
1452 cpuc->lbr_stack.nr--;
1453 if (!cpuc->lbr_entries[i].from)
1454 continue;
1455 }
1456 i++;
1457 }
1458 }
1459
intel_pmu_store_pebs_lbrs(struct lbr_entry * lbr)1460 void intel_pmu_store_pebs_lbrs(struct lbr_entry *lbr)
1461 {
1462 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1463
1464 /* Cannot get TOS for large PEBS and Arch LBR */
1465 if (static_cpu_has(X86_FEATURE_ARCH_LBR) ||
1466 (cpuc->n_pebs == cpuc->n_large_pebs))
1467 cpuc->lbr_stack.hw_idx = -1ULL;
1468 else
1469 cpuc->lbr_stack.hw_idx = intel_pmu_lbr_tos();
1470
1471 intel_pmu_store_lbr(cpuc, lbr);
1472 intel_pmu_lbr_filter(cpuc);
1473 }
1474
1475 /*
1476 * Map interface branch filters onto LBR filters
1477 */
1478 static const int nhm_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
1479 [PERF_SAMPLE_BRANCH_ANY_SHIFT] = LBR_ANY,
1480 [PERF_SAMPLE_BRANCH_USER_SHIFT] = LBR_USER,
1481 [PERF_SAMPLE_BRANCH_KERNEL_SHIFT] = LBR_KERNEL,
1482 [PERF_SAMPLE_BRANCH_HV_SHIFT] = LBR_IGN,
1483 [PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT] = LBR_RETURN | LBR_REL_JMP
1484 | LBR_IND_JMP | LBR_FAR,
1485 /*
1486 * NHM/WSM erratum: must include REL_JMP+IND_JMP to get CALL branches
1487 */
1488 [PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT] =
1489 LBR_REL_CALL | LBR_IND_CALL | LBR_REL_JMP | LBR_IND_JMP | LBR_FAR,
1490 /*
1491 * NHM/WSM erratum: must include IND_JMP to capture IND_CALL
1492 */
1493 [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = LBR_IND_CALL | LBR_IND_JMP,
1494 [PERF_SAMPLE_BRANCH_COND_SHIFT] = LBR_JCC,
1495 [PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT] = LBR_IND_JMP,
1496 };
1497
1498 static const int snb_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
1499 [PERF_SAMPLE_BRANCH_ANY_SHIFT] = LBR_ANY,
1500 [PERF_SAMPLE_BRANCH_USER_SHIFT] = LBR_USER,
1501 [PERF_SAMPLE_BRANCH_KERNEL_SHIFT] = LBR_KERNEL,
1502 [PERF_SAMPLE_BRANCH_HV_SHIFT] = LBR_IGN,
1503 [PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT] = LBR_RETURN | LBR_FAR,
1504 [PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT] = LBR_REL_CALL | LBR_IND_CALL
1505 | LBR_FAR,
1506 [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = LBR_IND_CALL,
1507 [PERF_SAMPLE_BRANCH_COND_SHIFT] = LBR_JCC,
1508 [PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT] = LBR_IND_JMP,
1509 [PERF_SAMPLE_BRANCH_CALL_SHIFT] = LBR_REL_CALL,
1510 };
1511
1512 static const int hsw_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
1513 [PERF_SAMPLE_BRANCH_ANY_SHIFT] = LBR_ANY,
1514 [PERF_SAMPLE_BRANCH_USER_SHIFT] = LBR_USER,
1515 [PERF_SAMPLE_BRANCH_KERNEL_SHIFT] = LBR_KERNEL,
1516 [PERF_SAMPLE_BRANCH_HV_SHIFT] = LBR_IGN,
1517 [PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT] = LBR_RETURN | LBR_FAR,
1518 [PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT] = LBR_REL_CALL | LBR_IND_CALL
1519 | LBR_FAR,
1520 [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = LBR_IND_CALL,
1521 [PERF_SAMPLE_BRANCH_COND_SHIFT] = LBR_JCC,
1522 [PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] = LBR_REL_CALL | LBR_IND_CALL
1523 | LBR_RETURN | LBR_CALL_STACK,
1524 [PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT] = LBR_IND_JMP,
1525 [PERF_SAMPLE_BRANCH_CALL_SHIFT] = LBR_REL_CALL,
1526 };
1527
1528 static int arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
1529 [PERF_SAMPLE_BRANCH_ANY_SHIFT] = ARCH_LBR_ANY,
1530 [PERF_SAMPLE_BRANCH_USER_SHIFT] = ARCH_LBR_USER,
1531 [PERF_SAMPLE_BRANCH_KERNEL_SHIFT] = ARCH_LBR_KERNEL,
1532 [PERF_SAMPLE_BRANCH_HV_SHIFT] = LBR_IGN,
1533 [PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT] = ARCH_LBR_RETURN |
1534 ARCH_LBR_OTHER_BRANCH,
1535 [PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT] = ARCH_LBR_REL_CALL |
1536 ARCH_LBR_IND_CALL |
1537 ARCH_LBR_OTHER_BRANCH,
1538 [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = ARCH_LBR_IND_CALL,
1539 [PERF_SAMPLE_BRANCH_COND_SHIFT] = ARCH_LBR_JCC,
1540 [PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] = ARCH_LBR_REL_CALL |
1541 ARCH_LBR_IND_CALL |
1542 ARCH_LBR_RETURN |
1543 ARCH_LBR_CALL_STACK,
1544 [PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT] = ARCH_LBR_IND_JMP,
1545 [PERF_SAMPLE_BRANCH_CALL_SHIFT] = ARCH_LBR_REL_CALL,
1546 };
1547
1548 /* core */
intel_pmu_lbr_init_core(void)1549 void __init intel_pmu_lbr_init_core(void)
1550 {
1551 x86_pmu.lbr_nr = 4;
1552 x86_pmu.lbr_tos = MSR_LBR_TOS;
1553 x86_pmu.lbr_from = MSR_LBR_CORE_FROM;
1554 x86_pmu.lbr_to = MSR_LBR_CORE_TO;
1555
1556 /*
1557 * SW branch filter usage:
1558 * - compensate for lack of HW filter
1559 */
1560 }
1561
1562 /* nehalem/westmere */
intel_pmu_lbr_init_nhm(void)1563 void __init intel_pmu_lbr_init_nhm(void)
1564 {
1565 x86_pmu.lbr_nr = 16;
1566 x86_pmu.lbr_tos = MSR_LBR_TOS;
1567 x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
1568 x86_pmu.lbr_to = MSR_LBR_NHM_TO;
1569
1570 x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1571 x86_pmu.lbr_sel_map = nhm_lbr_sel_map;
1572
1573 /*
1574 * SW branch filter usage:
1575 * - workaround LBR_SEL errata (see above)
1576 * - support syscall, sysret capture.
1577 * That requires LBR_FAR but that means far
1578 * jmp need to be filtered out
1579 */
1580 }
1581
1582 /* sandy bridge */
intel_pmu_lbr_init_snb(void)1583 void __init intel_pmu_lbr_init_snb(void)
1584 {
1585 x86_pmu.lbr_nr = 16;
1586 x86_pmu.lbr_tos = MSR_LBR_TOS;
1587 x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
1588 x86_pmu.lbr_to = MSR_LBR_NHM_TO;
1589
1590 x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1591 x86_pmu.lbr_sel_map = snb_lbr_sel_map;
1592
1593 /*
1594 * SW branch filter usage:
1595 * - support syscall, sysret capture.
1596 * That requires LBR_FAR but that means far
1597 * jmp need to be filtered out
1598 */
1599 }
1600
1601 static inline struct kmem_cache *
create_lbr_kmem_cache(size_t size,size_t align)1602 create_lbr_kmem_cache(size_t size, size_t align)
1603 {
1604 return kmem_cache_create("x86_lbr", size, align, 0, NULL);
1605 }
1606
1607 /* haswell */
intel_pmu_lbr_init_hsw(void)1608 void intel_pmu_lbr_init_hsw(void)
1609 {
1610 size_t size = sizeof(struct x86_perf_task_context);
1611
1612 x86_pmu.lbr_nr = 16;
1613 x86_pmu.lbr_tos = MSR_LBR_TOS;
1614 x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
1615 x86_pmu.lbr_to = MSR_LBR_NHM_TO;
1616
1617 x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1618 x86_pmu.lbr_sel_map = hsw_lbr_sel_map;
1619
1620 x86_get_pmu(smp_processor_id())->task_ctx_cache = create_lbr_kmem_cache(size, 0);
1621
1622 if (lbr_from_signext_quirk_needed())
1623 static_branch_enable(&lbr_from_quirk_key);
1624 }
1625
1626 /* skylake */
intel_pmu_lbr_init_skl(void)1627 __init void intel_pmu_lbr_init_skl(void)
1628 {
1629 size_t size = sizeof(struct x86_perf_task_context);
1630
1631 x86_pmu.lbr_nr = 32;
1632 x86_pmu.lbr_tos = MSR_LBR_TOS;
1633 x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
1634 x86_pmu.lbr_to = MSR_LBR_NHM_TO;
1635 x86_pmu.lbr_info = MSR_LBR_INFO_0;
1636
1637 x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1638 x86_pmu.lbr_sel_map = hsw_lbr_sel_map;
1639
1640 x86_get_pmu(smp_processor_id())->task_ctx_cache = create_lbr_kmem_cache(size, 0);
1641
1642 /*
1643 * SW branch filter usage:
1644 * - support syscall, sysret capture.
1645 * That requires LBR_FAR but that means far
1646 * jmp need to be filtered out
1647 */
1648 }
1649
1650 /* atom */
intel_pmu_lbr_init_atom(void)1651 void __init intel_pmu_lbr_init_atom(void)
1652 {
1653 /*
1654 * only models starting at stepping 10 seems
1655 * to have an operational LBR which can freeze
1656 * on PMU interrupt
1657 */
1658 if (boot_cpu_data.x86_model == 28
1659 && boot_cpu_data.x86_stepping < 10) {
1660 pr_cont("LBR disabled due to erratum");
1661 return;
1662 }
1663
1664 x86_pmu.lbr_nr = 8;
1665 x86_pmu.lbr_tos = MSR_LBR_TOS;
1666 x86_pmu.lbr_from = MSR_LBR_CORE_FROM;
1667 x86_pmu.lbr_to = MSR_LBR_CORE_TO;
1668
1669 /*
1670 * SW branch filter usage:
1671 * - compensate for lack of HW filter
1672 */
1673 }
1674
1675 /* slm */
intel_pmu_lbr_init_slm(void)1676 void __init intel_pmu_lbr_init_slm(void)
1677 {
1678 x86_pmu.lbr_nr = 8;
1679 x86_pmu.lbr_tos = MSR_LBR_TOS;
1680 x86_pmu.lbr_from = MSR_LBR_CORE_FROM;
1681 x86_pmu.lbr_to = MSR_LBR_CORE_TO;
1682
1683 x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1684 x86_pmu.lbr_sel_map = nhm_lbr_sel_map;
1685
1686 /*
1687 * SW branch filter usage:
1688 * - compensate for lack of HW filter
1689 */
1690 pr_cont("8-deep LBR, ");
1691 }
1692
1693 /* Knights Landing */
intel_pmu_lbr_init_knl(void)1694 void intel_pmu_lbr_init_knl(void)
1695 {
1696 x86_pmu.lbr_nr = 8;
1697 x86_pmu.lbr_tos = MSR_LBR_TOS;
1698 x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
1699 x86_pmu.lbr_to = MSR_LBR_NHM_TO;
1700
1701 x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1702 x86_pmu.lbr_sel_map = snb_lbr_sel_map;
1703
1704 /* Knights Landing does have MISPREDICT bit */
1705 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_LIP)
1706 x86_pmu.intel_cap.lbr_format = LBR_FORMAT_EIP_FLAGS;
1707 }
1708
1709 /*
1710 * LBR state size is variable based on the max number of registers.
1711 * This calculates the expected state size, which should match
1712 * what the hardware enumerates for the size of XFEATURE_LBR.
1713 */
get_lbr_state_size(void)1714 static inline unsigned int get_lbr_state_size(void)
1715 {
1716 return sizeof(struct arch_lbr_state) +
1717 x86_pmu.lbr_nr * sizeof(struct lbr_entry);
1718 }
1719
is_arch_lbr_xsave_available(void)1720 static bool is_arch_lbr_xsave_available(void)
1721 {
1722 if (!boot_cpu_has(X86_FEATURE_XSAVES))
1723 return false;
1724
1725 /*
1726 * Check the LBR state with the corresponding software structure.
1727 * Disable LBR XSAVES support if the size doesn't match.
1728 */
1729 if (WARN_ON(xfeature_size(XFEATURE_LBR) != get_lbr_state_size()))
1730 return false;
1731
1732 return true;
1733 }
1734
intel_pmu_arch_lbr_init(void)1735 void __init intel_pmu_arch_lbr_init(void)
1736 {
1737 struct pmu *pmu = x86_get_pmu(smp_processor_id());
1738 union cpuid28_eax eax;
1739 union cpuid28_ebx ebx;
1740 union cpuid28_ecx ecx;
1741 unsigned int unused_edx;
1742 bool arch_lbr_xsave;
1743 size_t size;
1744 u64 lbr_nr;
1745
1746 /* Arch LBR Capabilities */
1747 cpuid(28, &eax.full, &ebx.full, &ecx.full, &unused_edx);
1748
1749 lbr_nr = fls(eax.split.lbr_depth_mask) * 8;
1750 if (!lbr_nr)
1751 goto clear_arch_lbr;
1752
1753 /* Apply the max depth of Arch LBR */
1754 if (wrmsrl_safe(MSR_ARCH_LBR_DEPTH, lbr_nr))
1755 goto clear_arch_lbr;
1756
1757 x86_pmu.lbr_depth_mask = eax.split.lbr_depth_mask;
1758 x86_pmu.lbr_deep_c_reset = eax.split.lbr_deep_c_reset;
1759 x86_pmu.lbr_lip = eax.split.lbr_lip;
1760 x86_pmu.lbr_cpl = ebx.split.lbr_cpl;
1761 x86_pmu.lbr_filter = ebx.split.lbr_filter;
1762 x86_pmu.lbr_call_stack = ebx.split.lbr_call_stack;
1763 x86_pmu.lbr_mispred = ecx.split.lbr_mispred;
1764 x86_pmu.lbr_timed_lbr = ecx.split.lbr_timed_lbr;
1765 x86_pmu.lbr_br_type = ecx.split.lbr_br_type;
1766 x86_pmu.lbr_nr = lbr_nr;
1767
1768
1769 arch_lbr_xsave = is_arch_lbr_xsave_available();
1770 if (arch_lbr_xsave) {
1771 size = sizeof(struct x86_perf_task_context_arch_lbr_xsave) +
1772 get_lbr_state_size();
1773 pmu->task_ctx_cache = create_lbr_kmem_cache(size,
1774 XSAVE_ALIGNMENT);
1775 }
1776
1777 if (!pmu->task_ctx_cache) {
1778 arch_lbr_xsave = false;
1779
1780 size = sizeof(struct x86_perf_task_context_arch_lbr) +
1781 lbr_nr * sizeof(struct lbr_entry);
1782 pmu->task_ctx_cache = create_lbr_kmem_cache(size, 0);
1783 }
1784
1785 x86_pmu.lbr_from = MSR_ARCH_LBR_FROM_0;
1786 x86_pmu.lbr_to = MSR_ARCH_LBR_TO_0;
1787 x86_pmu.lbr_info = MSR_ARCH_LBR_INFO_0;
1788
1789 /* LBR callstack requires both CPL and Branch Filtering support */
1790 if (!x86_pmu.lbr_cpl ||
1791 !x86_pmu.lbr_filter ||
1792 !x86_pmu.lbr_call_stack)
1793 arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] = LBR_NOT_SUPP;
1794
1795 if (!x86_pmu.lbr_cpl) {
1796 arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_USER_SHIFT] = LBR_NOT_SUPP;
1797 arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_KERNEL_SHIFT] = LBR_NOT_SUPP;
1798 } else if (!x86_pmu.lbr_filter) {
1799 arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_ANY_SHIFT] = LBR_NOT_SUPP;
1800 arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT] = LBR_NOT_SUPP;
1801 arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT] = LBR_NOT_SUPP;
1802 arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = LBR_NOT_SUPP;
1803 arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_COND_SHIFT] = LBR_NOT_SUPP;
1804 arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT] = LBR_NOT_SUPP;
1805 arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_CALL_SHIFT] = LBR_NOT_SUPP;
1806 }
1807
1808 x86_pmu.lbr_ctl_mask = ARCH_LBR_CTL_MASK;
1809 x86_pmu.lbr_ctl_map = arch_lbr_ctl_map;
1810
1811 if (!x86_pmu.lbr_cpl && !x86_pmu.lbr_filter)
1812 x86_pmu.lbr_ctl_map = NULL;
1813
1814 x86_pmu.lbr_reset = intel_pmu_arch_lbr_reset;
1815 if (arch_lbr_xsave) {
1816 x86_pmu.lbr_save = intel_pmu_arch_lbr_xsaves;
1817 x86_pmu.lbr_restore = intel_pmu_arch_lbr_xrstors;
1818 x86_pmu.lbr_read = intel_pmu_arch_lbr_read_xsave;
1819 pr_cont("XSAVE ");
1820 } else {
1821 x86_pmu.lbr_save = intel_pmu_arch_lbr_save;
1822 x86_pmu.lbr_restore = intel_pmu_arch_lbr_restore;
1823 x86_pmu.lbr_read = intel_pmu_arch_lbr_read;
1824 }
1825
1826 pr_cont("Architectural LBR, ");
1827
1828 return;
1829
1830 clear_arch_lbr:
1831 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_ARCH_LBR);
1832 }
1833
1834 /**
1835 * x86_perf_get_lbr - get the LBR records information
1836 *
1837 * @lbr: the caller's memory to store the LBR records information
1838 *
1839 * Returns: 0 indicates the LBR info has been successfully obtained
1840 */
x86_perf_get_lbr(struct x86_pmu_lbr * lbr)1841 int x86_perf_get_lbr(struct x86_pmu_lbr *lbr)
1842 {
1843 int lbr_fmt = x86_pmu.intel_cap.lbr_format;
1844
1845 lbr->nr = x86_pmu.lbr_nr;
1846 lbr->from = x86_pmu.lbr_from;
1847 lbr->to = x86_pmu.lbr_to;
1848 lbr->info = (lbr_fmt == LBR_FORMAT_INFO) ? x86_pmu.lbr_info : 0;
1849
1850 return 0;
1851 }
1852 EXPORT_SYMBOL_GPL(x86_perf_get_lbr);
1853
1854 struct event_constraint vlbr_constraint =
1855 __EVENT_CONSTRAINT(INTEL_FIXED_VLBR_EVENT, (1ULL << INTEL_PMC_IDX_FIXED_VLBR),
1856 FIXED_EVENT_FLAGS, 1, 0, PERF_X86_EVENT_LBR_SELECT);
1857