1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/hw_breakpoint.h>
3 #include <linux/err.h>
4 #include <linux/zalloc.h>
5 #include <dirent.h>
6 #include <errno.h>
7 #include <sys/ioctl.h>
8 #include <sys/types.h>
9 #include <sys/stat.h>
10 #include <fcntl.h>
11 #include <sys/param.h>
12 #include "term.h"
13 #include "build-id.h"
14 #include "evlist.h"
15 #include "evsel.h"
16 #include <subcmd/pager.h>
17 #include <subcmd/parse-options.h>
18 #include "parse-events.h"
19 #include <subcmd/exec-cmd.h>
20 #include "string2.h"
21 #include "strlist.h"
22 #include "bpf-loader.h"
23 #include "debug.h"
24 #include <api/fs/tracing_path.h>
25 #include <perf/cpumap.h>
26 #include "parse-events-bison.h"
27 #define YY_EXTRA_TYPE void*
28 #include "parse-events-flex.h"
29 #include "pmu.h"
30 #include "thread_map.h"
31 #include "probe-file.h"
32 #include "asm/bug.h"
33 #include "util/parse-branch-options.h"
34 #include "metricgroup.h"
35 #include "util/evsel_config.h"
36 #include "util/event.h"
37 #include "util/pfm.h"
38 #include "util/parse-events-hybrid.h"
39 #include "util/pmu-hybrid.h"
40 #include "perf.h"
41
42 #define MAX_NAME_LEN 100
43
44 #ifdef PARSER_DEBUG
45 extern int parse_events_debug;
46 #endif
47 int parse_events_parse(void *parse_state, void *scanner);
48 static int get_config_terms(struct list_head *head_config,
49 struct list_head *head_terms __maybe_unused);
50 static int parse_events__with_hybrid_pmu(struct parse_events_state *parse_state,
51 const char *str, char *pmu_name,
52 struct list_head *list);
53
54 static struct perf_pmu_event_symbol *perf_pmu_events_list;
55 /*
56 * The variable indicates the number of supported pmu event symbols.
57 * 0 means not initialized and ready to init
58 * -1 means failed to init, don't try anymore
59 * >0 is the number of supported pmu event symbols
60 */
61 static int perf_pmu_events_list_num;
62
63 struct event_symbol event_symbols_hw[PERF_COUNT_HW_MAX] = {
64 [PERF_COUNT_HW_CPU_CYCLES] = {
65 .symbol = "cpu-cycles",
66 .alias = "cycles",
67 },
68 [PERF_COUNT_HW_INSTRUCTIONS] = {
69 .symbol = "instructions",
70 .alias = "",
71 },
72 [PERF_COUNT_HW_CACHE_REFERENCES] = {
73 .symbol = "cache-references",
74 .alias = "",
75 },
76 [PERF_COUNT_HW_CACHE_MISSES] = {
77 .symbol = "cache-misses",
78 .alias = "",
79 },
80 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = {
81 .symbol = "branch-instructions",
82 .alias = "branches",
83 },
84 [PERF_COUNT_HW_BRANCH_MISSES] = {
85 .symbol = "branch-misses",
86 .alias = "",
87 },
88 [PERF_COUNT_HW_BUS_CYCLES] = {
89 .symbol = "bus-cycles",
90 .alias = "",
91 },
92 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = {
93 .symbol = "stalled-cycles-frontend",
94 .alias = "idle-cycles-frontend",
95 },
96 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = {
97 .symbol = "stalled-cycles-backend",
98 .alias = "idle-cycles-backend",
99 },
100 [PERF_COUNT_HW_REF_CPU_CYCLES] = {
101 .symbol = "ref-cycles",
102 .alias = "",
103 },
104 };
105
106 struct event_symbol event_symbols_sw[PERF_COUNT_SW_MAX] = {
107 [PERF_COUNT_SW_CPU_CLOCK] = {
108 .symbol = "cpu-clock",
109 .alias = "",
110 },
111 [PERF_COUNT_SW_TASK_CLOCK] = {
112 .symbol = "task-clock",
113 .alias = "",
114 },
115 [PERF_COUNT_SW_PAGE_FAULTS] = {
116 .symbol = "page-faults",
117 .alias = "faults",
118 },
119 [PERF_COUNT_SW_CONTEXT_SWITCHES] = {
120 .symbol = "context-switches",
121 .alias = "cs",
122 },
123 [PERF_COUNT_SW_CPU_MIGRATIONS] = {
124 .symbol = "cpu-migrations",
125 .alias = "migrations",
126 },
127 [PERF_COUNT_SW_PAGE_FAULTS_MIN] = {
128 .symbol = "minor-faults",
129 .alias = "",
130 },
131 [PERF_COUNT_SW_PAGE_FAULTS_MAJ] = {
132 .symbol = "major-faults",
133 .alias = "",
134 },
135 [PERF_COUNT_SW_ALIGNMENT_FAULTS] = {
136 .symbol = "alignment-faults",
137 .alias = "",
138 },
139 [PERF_COUNT_SW_EMULATION_FAULTS] = {
140 .symbol = "emulation-faults",
141 .alias = "",
142 },
143 [PERF_COUNT_SW_DUMMY] = {
144 .symbol = "dummy",
145 .alias = "",
146 },
147 [PERF_COUNT_SW_BPF_OUTPUT] = {
148 .symbol = "bpf-output",
149 .alias = "",
150 },
151 [PERF_COUNT_SW_CGROUP_SWITCHES] = {
152 .symbol = "cgroup-switches",
153 .alias = "",
154 },
155 };
156
157 #define __PERF_EVENT_FIELD(config, name) \
158 ((config & PERF_EVENT_##name##_MASK) >> PERF_EVENT_##name##_SHIFT)
159
160 #define PERF_EVENT_RAW(config) __PERF_EVENT_FIELD(config, RAW)
161 #define PERF_EVENT_CONFIG(config) __PERF_EVENT_FIELD(config, CONFIG)
162 #define PERF_EVENT_TYPE(config) __PERF_EVENT_FIELD(config, TYPE)
163 #define PERF_EVENT_ID(config) __PERF_EVENT_FIELD(config, EVENT)
164
165 #define for_each_subsystem(sys_dir, sys_dirent) \
166 while ((sys_dirent = readdir(sys_dir)) != NULL) \
167 if (sys_dirent->d_type == DT_DIR && \
168 (strcmp(sys_dirent->d_name, ".")) && \
169 (strcmp(sys_dirent->d_name, "..")))
170
tp_event_has_id(const char * dir_path,struct dirent * evt_dir)171 static int tp_event_has_id(const char *dir_path, struct dirent *evt_dir)
172 {
173 char evt_path[MAXPATHLEN];
174 int fd;
175
176 snprintf(evt_path, MAXPATHLEN, "%s/%s/id", dir_path, evt_dir->d_name);
177 fd = open(evt_path, O_RDONLY);
178 if (fd < 0)
179 return -EINVAL;
180 close(fd);
181
182 return 0;
183 }
184
185 #define for_each_event(dir_path, evt_dir, evt_dirent) \
186 while ((evt_dirent = readdir(evt_dir)) != NULL) \
187 if (evt_dirent->d_type == DT_DIR && \
188 (strcmp(evt_dirent->d_name, ".")) && \
189 (strcmp(evt_dirent->d_name, "..")) && \
190 (!tp_event_has_id(dir_path, evt_dirent)))
191
192 #define MAX_EVENT_LENGTH 512
193
tracepoint_id_to_path(u64 config)194 struct tracepoint_path *tracepoint_id_to_path(u64 config)
195 {
196 struct tracepoint_path *path = NULL;
197 DIR *sys_dir, *evt_dir;
198 struct dirent *sys_dirent, *evt_dirent;
199 char id_buf[24];
200 int fd;
201 u64 id;
202 char evt_path[MAXPATHLEN];
203 char *dir_path;
204
205 sys_dir = tracing_events__opendir();
206 if (!sys_dir)
207 return NULL;
208
209 for_each_subsystem(sys_dir, sys_dirent) {
210 dir_path = get_events_file(sys_dirent->d_name);
211 if (!dir_path)
212 continue;
213 evt_dir = opendir(dir_path);
214 if (!evt_dir)
215 goto next;
216
217 for_each_event(dir_path, evt_dir, evt_dirent) {
218
219 scnprintf(evt_path, MAXPATHLEN, "%s/%s/id", dir_path,
220 evt_dirent->d_name);
221 fd = open(evt_path, O_RDONLY);
222 if (fd < 0)
223 continue;
224 if (read(fd, id_buf, sizeof(id_buf)) < 0) {
225 close(fd);
226 continue;
227 }
228 close(fd);
229 id = atoll(id_buf);
230 if (id == config) {
231 put_events_file(dir_path);
232 closedir(evt_dir);
233 closedir(sys_dir);
234 path = zalloc(sizeof(*path));
235 if (!path)
236 return NULL;
237 if (asprintf(&path->system, "%.*s", MAX_EVENT_LENGTH, sys_dirent->d_name) < 0) {
238 free(path);
239 return NULL;
240 }
241 if (asprintf(&path->name, "%.*s", MAX_EVENT_LENGTH, evt_dirent->d_name) < 0) {
242 zfree(&path->system);
243 free(path);
244 return NULL;
245 }
246 return path;
247 }
248 }
249 closedir(evt_dir);
250 next:
251 put_events_file(dir_path);
252 }
253
254 closedir(sys_dir);
255 return NULL;
256 }
257
tracepoint_name_to_path(const char * name)258 struct tracepoint_path *tracepoint_name_to_path(const char *name)
259 {
260 struct tracepoint_path *path = zalloc(sizeof(*path));
261 char *str = strchr(name, ':');
262
263 if (path == NULL || str == NULL) {
264 free(path);
265 return NULL;
266 }
267
268 path->system = strndup(name, str - name);
269 path->name = strdup(str+1);
270
271 if (path->system == NULL || path->name == NULL) {
272 zfree(&path->system);
273 zfree(&path->name);
274 zfree(&path);
275 }
276
277 return path;
278 }
279
event_type(int type)280 const char *event_type(int type)
281 {
282 switch (type) {
283 case PERF_TYPE_HARDWARE:
284 return "hardware";
285
286 case PERF_TYPE_SOFTWARE:
287 return "software";
288
289 case PERF_TYPE_TRACEPOINT:
290 return "tracepoint";
291
292 case PERF_TYPE_HW_CACHE:
293 return "hardware-cache";
294
295 default:
296 break;
297 }
298
299 return "unknown";
300 }
301
get_config_str(struct list_head * head_terms,int type_term)302 static char *get_config_str(struct list_head *head_terms, int type_term)
303 {
304 struct parse_events_term *term;
305
306 if (!head_terms)
307 return NULL;
308
309 list_for_each_entry(term, head_terms, list)
310 if (term->type_term == type_term)
311 return term->val.str;
312
313 return NULL;
314 }
315
get_config_metric_id(struct list_head * head_terms)316 static char *get_config_metric_id(struct list_head *head_terms)
317 {
318 return get_config_str(head_terms, PARSE_EVENTS__TERM_TYPE_METRIC_ID);
319 }
320
get_config_name(struct list_head * head_terms)321 static char *get_config_name(struct list_head *head_terms)
322 {
323 return get_config_str(head_terms, PARSE_EVENTS__TERM_TYPE_NAME);
324 }
325
326 static struct evsel *
__add_event(struct list_head * list,int * idx,struct perf_event_attr * attr,bool init_attr,const char * name,const char * metric_id,struct perf_pmu * pmu,struct list_head * config_terms,bool auto_merge_stats,const char * cpu_list)327 __add_event(struct list_head *list, int *idx,
328 struct perf_event_attr *attr,
329 bool init_attr,
330 const char *name, const char *metric_id, struct perf_pmu *pmu,
331 struct list_head *config_terms, bool auto_merge_stats,
332 const char *cpu_list)
333 {
334 struct evsel *evsel;
335 struct perf_cpu_map *cpus = pmu ? perf_cpu_map__get(pmu->cpus) :
336 cpu_list ? perf_cpu_map__new(cpu_list) : NULL;
337
338 if (pmu && attr->type == PERF_TYPE_RAW)
339 perf_pmu__warn_invalid_config(pmu, attr->config, name);
340
341 if (init_attr)
342 event_attr_init(attr);
343
344 evsel = evsel__new_idx(attr, *idx);
345 if (!evsel) {
346 perf_cpu_map__put(cpus);
347 return NULL;
348 }
349
350 (*idx)++;
351 evsel->core.cpus = cpus;
352 evsel->core.own_cpus = perf_cpu_map__get(cpus);
353 evsel->core.system_wide = pmu ? pmu->is_uncore : false;
354 evsel->auto_merge_stats = auto_merge_stats;
355
356 if (name)
357 evsel->name = strdup(name);
358
359 if (metric_id)
360 evsel->metric_id = strdup(metric_id);
361
362 if (config_terms)
363 list_splice_init(config_terms, &evsel->config_terms);
364
365 if (list)
366 list_add_tail(&evsel->core.node, list);
367
368 return evsel;
369 }
370
parse_events__add_event(int idx,struct perf_event_attr * attr,const char * name,const char * metric_id,struct perf_pmu * pmu)371 struct evsel *parse_events__add_event(int idx, struct perf_event_attr *attr,
372 const char *name, const char *metric_id,
373 struct perf_pmu *pmu)
374 {
375 return __add_event(/*list=*/NULL, &idx, attr, /*init_attr=*/false, name,
376 metric_id, pmu, /*config_terms=*/NULL,
377 /*auto_merge_stats=*/false, /*cpu_list=*/NULL);
378 }
379
add_event(struct list_head * list,int * idx,struct perf_event_attr * attr,const char * name,const char * metric_id,struct list_head * config_terms)380 static int add_event(struct list_head *list, int *idx,
381 struct perf_event_attr *attr, const char *name,
382 const char *metric_id, struct list_head *config_terms)
383 {
384 return __add_event(list, idx, attr, /*init_attr*/true, name, metric_id,
385 /*pmu=*/NULL, config_terms,
386 /*auto_merge_stats=*/false, /*cpu_list=*/NULL) ? 0 : -ENOMEM;
387 }
388
add_event_tool(struct list_head * list,int * idx,enum perf_tool_event tool_event)389 static int add_event_tool(struct list_head *list, int *idx,
390 enum perf_tool_event tool_event)
391 {
392 struct evsel *evsel;
393 struct perf_event_attr attr = {
394 .type = PERF_TYPE_SOFTWARE,
395 .config = PERF_COUNT_SW_DUMMY,
396 };
397
398 evsel = __add_event(list, idx, &attr, /*init_attr=*/true, /*name=*/NULL,
399 /*metric_id=*/NULL, /*pmu=*/NULL,
400 /*config_terms=*/NULL, /*auto_merge_stats=*/false,
401 /*cpu_list=*/"0");
402 if (!evsel)
403 return -ENOMEM;
404 evsel->tool_event = tool_event;
405 if (tool_event == PERF_TOOL_DURATION_TIME) {
406 free((char *)evsel->unit);
407 evsel->unit = strdup("ns");
408 }
409 return 0;
410 }
411
parse_aliases(char * str,const char * names[][EVSEL__MAX_ALIASES],int size)412 static int parse_aliases(char *str, const char *names[][EVSEL__MAX_ALIASES], int size)
413 {
414 int i, j;
415 int n, longest = -1;
416
417 for (i = 0; i < size; i++) {
418 for (j = 0; j < EVSEL__MAX_ALIASES && names[i][j]; j++) {
419 n = strlen(names[i][j]);
420 if (n > longest && !strncasecmp(str, names[i][j], n))
421 longest = n;
422 }
423 if (longest > 0)
424 return i;
425 }
426
427 return -1;
428 }
429
430 typedef int config_term_func_t(struct perf_event_attr *attr,
431 struct parse_events_term *term,
432 struct parse_events_error *err);
433 static int config_term_common(struct perf_event_attr *attr,
434 struct parse_events_term *term,
435 struct parse_events_error *err);
436 static int config_attr(struct perf_event_attr *attr,
437 struct list_head *head,
438 struct parse_events_error *err,
439 config_term_func_t config_term);
440
parse_events_add_cache(struct list_head * list,int * idx,char * type,char * op_result1,char * op_result2,struct parse_events_error * err,struct list_head * head_config,struct parse_events_state * parse_state)441 int parse_events_add_cache(struct list_head *list, int *idx,
442 char *type, char *op_result1, char *op_result2,
443 struct parse_events_error *err,
444 struct list_head *head_config,
445 struct parse_events_state *parse_state)
446 {
447 struct perf_event_attr attr;
448 LIST_HEAD(config_terms);
449 char name[MAX_NAME_LEN];
450 const char *config_name, *metric_id;
451 int cache_type = -1, cache_op = -1, cache_result = -1;
452 char *op_result[2] = { op_result1, op_result2 };
453 int i, n, ret;
454 bool hybrid;
455
456 /*
457 * No fallback - if we cannot get a clear cache type
458 * then bail out:
459 */
460 cache_type = parse_aliases(type, evsel__hw_cache, PERF_COUNT_HW_CACHE_MAX);
461 if (cache_type == -1)
462 return -EINVAL;
463
464 config_name = get_config_name(head_config);
465 n = snprintf(name, MAX_NAME_LEN, "%s", type);
466
467 for (i = 0; (i < 2) && (op_result[i]); i++) {
468 char *str = op_result[i];
469
470 n += snprintf(name + n, MAX_NAME_LEN - n, "-%s", str);
471
472 if (cache_op == -1) {
473 cache_op = parse_aliases(str, evsel__hw_cache_op,
474 PERF_COUNT_HW_CACHE_OP_MAX);
475 if (cache_op >= 0) {
476 if (!evsel__is_cache_op_valid(cache_type, cache_op))
477 return -EINVAL;
478 continue;
479 }
480 }
481
482 if (cache_result == -1) {
483 cache_result = parse_aliases(str, evsel__hw_cache_result,
484 PERF_COUNT_HW_CACHE_RESULT_MAX);
485 if (cache_result >= 0)
486 continue;
487 }
488 }
489
490 /*
491 * Fall back to reads:
492 */
493 if (cache_op == -1)
494 cache_op = PERF_COUNT_HW_CACHE_OP_READ;
495
496 /*
497 * Fall back to accesses:
498 */
499 if (cache_result == -1)
500 cache_result = PERF_COUNT_HW_CACHE_RESULT_ACCESS;
501
502 memset(&attr, 0, sizeof(attr));
503 attr.config = cache_type | (cache_op << 8) | (cache_result << 16);
504 attr.type = PERF_TYPE_HW_CACHE;
505
506 if (head_config) {
507 if (config_attr(&attr, head_config, err,
508 config_term_common))
509 return -EINVAL;
510
511 if (get_config_terms(head_config, &config_terms))
512 return -ENOMEM;
513 }
514
515 metric_id = get_config_metric_id(head_config);
516 ret = parse_events__add_cache_hybrid(list, idx, &attr,
517 config_name ? : name,
518 metric_id,
519 &config_terms,
520 &hybrid, parse_state);
521 if (hybrid)
522 goto out_free_terms;
523
524 ret = add_event(list, idx, &attr, config_name ? : name, metric_id,
525 &config_terms);
526 out_free_terms:
527 free_config_terms(&config_terms);
528 return ret;
529 }
530
tracepoint_error(struct parse_events_error * e,int err,const char * sys,const char * name)531 static void tracepoint_error(struct parse_events_error *e, int err,
532 const char *sys, const char *name)
533 {
534 const char *str;
535 char help[BUFSIZ];
536
537 if (!e)
538 return;
539
540 /*
541 * We get error directly from syscall errno ( > 0),
542 * or from encoded pointer's error ( < 0).
543 */
544 err = abs(err);
545
546 switch (err) {
547 case EACCES:
548 str = "can't access trace events";
549 break;
550 case ENOENT:
551 str = "unknown tracepoint";
552 break;
553 default:
554 str = "failed to add tracepoint";
555 break;
556 }
557
558 tracing_path__strerror_open_tp(err, help, sizeof(help), sys, name);
559 parse_events_error__handle(e, 0, strdup(str), strdup(help));
560 }
561
add_tracepoint(struct list_head * list,int * idx,const char * sys_name,const char * evt_name,struct parse_events_error * err,struct list_head * head_config)562 static int add_tracepoint(struct list_head *list, int *idx,
563 const char *sys_name, const char *evt_name,
564 struct parse_events_error *err,
565 struct list_head *head_config)
566 {
567 struct evsel *evsel = evsel__newtp_idx(sys_name, evt_name, (*idx)++);
568
569 if (IS_ERR(evsel)) {
570 tracepoint_error(err, PTR_ERR(evsel), sys_name, evt_name);
571 return PTR_ERR(evsel);
572 }
573
574 if (head_config) {
575 LIST_HEAD(config_terms);
576
577 if (get_config_terms(head_config, &config_terms))
578 return -ENOMEM;
579 list_splice(&config_terms, &evsel->config_terms);
580 }
581
582 list_add_tail(&evsel->core.node, list);
583 return 0;
584 }
585
add_tracepoint_multi_event(struct list_head * list,int * idx,const char * sys_name,const char * evt_name,struct parse_events_error * err,struct list_head * head_config)586 static int add_tracepoint_multi_event(struct list_head *list, int *idx,
587 const char *sys_name, const char *evt_name,
588 struct parse_events_error *err,
589 struct list_head *head_config)
590 {
591 char *evt_path;
592 struct dirent *evt_ent;
593 DIR *evt_dir;
594 int ret = 0, found = 0;
595
596 evt_path = get_events_file(sys_name);
597 if (!evt_path) {
598 tracepoint_error(err, errno, sys_name, evt_name);
599 return -1;
600 }
601 evt_dir = opendir(evt_path);
602 if (!evt_dir) {
603 put_events_file(evt_path);
604 tracepoint_error(err, errno, sys_name, evt_name);
605 return -1;
606 }
607
608 while (!ret && (evt_ent = readdir(evt_dir))) {
609 if (!strcmp(evt_ent->d_name, ".")
610 || !strcmp(evt_ent->d_name, "..")
611 || !strcmp(evt_ent->d_name, "enable")
612 || !strcmp(evt_ent->d_name, "filter"))
613 continue;
614
615 if (!strglobmatch(evt_ent->d_name, evt_name))
616 continue;
617
618 found++;
619
620 ret = add_tracepoint(list, idx, sys_name, evt_ent->d_name,
621 err, head_config);
622 }
623
624 if (!found) {
625 tracepoint_error(err, ENOENT, sys_name, evt_name);
626 ret = -1;
627 }
628
629 put_events_file(evt_path);
630 closedir(evt_dir);
631 return ret;
632 }
633
add_tracepoint_event(struct list_head * list,int * idx,const char * sys_name,const char * evt_name,struct parse_events_error * err,struct list_head * head_config)634 static int add_tracepoint_event(struct list_head *list, int *idx,
635 const char *sys_name, const char *evt_name,
636 struct parse_events_error *err,
637 struct list_head *head_config)
638 {
639 return strpbrk(evt_name, "*?") ?
640 add_tracepoint_multi_event(list, idx, sys_name, evt_name,
641 err, head_config) :
642 add_tracepoint(list, idx, sys_name, evt_name,
643 err, head_config);
644 }
645
add_tracepoint_multi_sys(struct list_head * list,int * idx,const char * sys_name,const char * evt_name,struct parse_events_error * err,struct list_head * head_config)646 static int add_tracepoint_multi_sys(struct list_head *list, int *idx,
647 const char *sys_name, const char *evt_name,
648 struct parse_events_error *err,
649 struct list_head *head_config)
650 {
651 struct dirent *events_ent;
652 DIR *events_dir;
653 int ret = 0;
654
655 events_dir = tracing_events__opendir();
656 if (!events_dir) {
657 tracepoint_error(err, errno, sys_name, evt_name);
658 return -1;
659 }
660
661 while (!ret && (events_ent = readdir(events_dir))) {
662 if (!strcmp(events_ent->d_name, ".")
663 || !strcmp(events_ent->d_name, "..")
664 || !strcmp(events_ent->d_name, "enable")
665 || !strcmp(events_ent->d_name, "header_event")
666 || !strcmp(events_ent->d_name, "header_page"))
667 continue;
668
669 if (!strglobmatch(events_ent->d_name, sys_name))
670 continue;
671
672 ret = add_tracepoint_event(list, idx, events_ent->d_name,
673 evt_name, err, head_config);
674 }
675
676 closedir(events_dir);
677 return ret;
678 }
679
680 #ifdef HAVE_LIBBPF_SUPPORT
681 struct __add_bpf_event_param {
682 struct parse_events_state *parse_state;
683 struct list_head *list;
684 struct list_head *head_config;
685 };
686
add_bpf_event(const char * group,const char * event,int fd,struct bpf_object * obj,void * _param)687 static int add_bpf_event(const char *group, const char *event, int fd, struct bpf_object *obj,
688 void *_param)
689 {
690 LIST_HEAD(new_evsels);
691 struct __add_bpf_event_param *param = _param;
692 struct parse_events_state *parse_state = param->parse_state;
693 struct list_head *list = param->list;
694 struct evsel *pos;
695 int err;
696 /*
697 * Check if we should add the event, i.e. if it is a TP but starts with a '!',
698 * then don't add the tracepoint, this will be used for something else, like
699 * adding to a BPF_MAP_TYPE_PROG_ARRAY.
700 *
701 * See tools/perf/examples/bpf/augmented_raw_syscalls.c
702 */
703 if (group[0] == '!')
704 return 0;
705
706 pr_debug("add bpf event %s:%s and attach bpf program %d\n",
707 group, event, fd);
708
709 err = parse_events_add_tracepoint(&new_evsels, &parse_state->idx, group,
710 event, parse_state->error,
711 param->head_config);
712 if (err) {
713 struct evsel *evsel, *tmp;
714
715 pr_debug("Failed to add BPF event %s:%s\n",
716 group, event);
717 list_for_each_entry_safe(evsel, tmp, &new_evsels, core.node) {
718 list_del_init(&evsel->core.node);
719 evsel__delete(evsel);
720 }
721 return err;
722 }
723 pr_debug("adding %s:%s\n", group, event);
724
725 list_for_each_entry(pos, &new_evsels, core.node) {
726 pr_debug("adding %s:%s to %p\n",
727 group, event, pos);
728 pos->bpf_fd = fd;
729 pos->bpf_obj = obj;
730 }
731 list_splice(&new_evsels, list);
732 return 0;
733 }
734
parse_events_load_bpf_obj(struct parse_events_state * parse_state,struct list_head * list,struct bpf_object * obj,struct list_head * head_config)735 int parse_events_load_bpf_obj(struct parse_events_state *parse_state,
736 struct list_head *list,
737 struct bpf_object *obj,
738 struct list_head *head_config)
739 {
740 int err;
741 char errbuf[BUFSIZ];
742 struct __add_bpf_event_param param = {parse_state, list, head_config};
743 static bool registered_unprobe_atexit = false;
744
745 if (IS_ERR(obj) || !obj) {
746 snprintf(errbuf, sizeof(errbuf),
747 "Internal error: load bpf obj with NULL");
748 err = -EINVAL;
749 goto errout;
750 }
751
752 /*
753 * Register atexit handler before calling bpf__probe() so
754 * bpf__probe() don't need to unprobe probe points its already
755 * created when failure.
756 */
757 if (!registered_unprobe_atexit) {
758 atexit(bpf__clear);
759 registered_unprobe_atexit = true;
760 }
761
762 err = bpf__probe(obj);
763 if (err) {
764 bpf__strerror_probe(obj, err, errbuf, sizeof(errbuf));
765 goto errout;
766 }
767
768 err = bpf__load(obj);
769 if (err) {
770 bpf__strerror_load(obj, err, errbuf, sizeof(errbuf));
771 goto errout;
772 }
773
774 err = bpf__foreach_event(obj, add_bpf_event, ¶m);
775 if (err) {
776 snprintf(errbuf, sizeof(errbuf),
777 "Attach events in BPF object failed");
778 goto errout;
779 }
780
781 return 0;
782 errout:
783 parse_events_error__handle(parse_state->error, 0,
784 strdup(errbuf), strdup("(add -v to see detail)"));
785 return err;
786 }
787
788 static int
parse_events_config_bpf(struct parse_events_state * parse_state,struct bpf_object * obj,struct list_head * head_config)789 parse_events_config_bpf(struct parse_events_state *parse_state,
790 struct bpf_object *obj,
791 struct list_head *head_config)
792 {
793 struct parse_events_term *term;
794 int error_pos;
795
796 if (!head_config || list_empty(head_config))
797 return 0;
798
799 list_for_each_entry(term, head_config, list) {
800 int err;
801
802 if (term->type_term != PARSE_EVENTS__TERM_TYPE_USER) {
803 parse_events_error__handle(parse_state->error, term->err_term,
804 strdup("Invalid config term for BPF object"),
805 NULL);
806 return -EINVAL;
807 }
808
809 err = bpf__config_obj(obj, term, parse_state->evlist, &error_pos);
810 if (err) {
811 char errbuf[BUFSIZ];
812 int idx;
813
814 bpf__strerror_config_obj(obj, term, parse_state->evlist,
815 &error_pos, err, errbuf,
816 sizeof(errbuf));
817
818 if (err == -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUE)
819 idx = term->err_val;
820 else
821 idx = term->err_term + error_pos;
822
823 parse_events_error__handle(parse_state->error, idx,
824 strdup(errbuf),
825 strdup(
826 "Hint:\tValid config terms:\n"
827 " \tmap:[<arraymap>].value<indices>=[value]\n"
828 " \tmap:[<eventmap>].event<indices>=[event]\n"
829 "\n"
830 " \twhere <indices> is something like [0,3...5] or [all]\n"
831 " \t(add -v to see detail)"));
832 return err;
833 }
834 }
835 return 0;
836 }
837
838 /*
839 * Split config terms:
840 * perf record -e bpf.c/call-graph=fp,map:array.value[0]=1/ ...
841 * 'call-graph=fp' is 'evt config', should be applied to each
842 * events in bpf.c.
843 * 'map:array.value[0]=1' is 'obj config', should be processed
844 * with parse_events_config_bpf.
845 *
846 * Move object config terms from the first list to obj_head_config.
847 */
848 static void
split_bpf_config_terms(struct list_head * evt_head_config,struct list_head * obj_head_config)849 split_bpf_config_terms(struct list_head *evt_head_config,
850 struct list_head *obj_head_config)
851 {
852 struct parse_events_term *term, *temp;
853
854 /*
855 * Currently, all possible user config term
856 * belong to bpf object. parse_events__is_hardcoded_term()
857 * happens to be a good flag.
858 *
859 * See parse_events_config_bpf() and
860 * config_term_tracepoint().
861 */
862 list_for_each_entry_safe(term, temp, evt_head_config, list)
863 if (!parse_events__is_hardcoded_term(term))
864 list_move_tail(&term->list, obj_head_config);
865 }
866
parse_events_load_bpf(struct parse_events_state * parse_state,struct list_head * list,char * bpf_file_name,bool source,struct list_head * head_config)867 int parse_events_load_bpf(struct parse_events_state *parse_state,
868 struct list_head *list,
869 char *bpf_file_name,
870 bool source,
871 struct list_head *head_config)
872 {
873 int err;
874 struct bpf_object *obj;
875 LIST_HEAD(obj_head_config);
876
877 if (head_config)
878 split_bpf_config_terms(head_config, &obj_head_config);
879
880 obj = bpf__prepare_load(bpf_file_name, source);
881 if (IS_ERR(obj)) {
882 char errbuf[BUFSIZ];
883
884 err = PTR_ERR(obj);
885
886 if (err == -ENOTSUP)
887 snprintf(errbuf, sizeof(errbuf),
888 "BPF support is not compiled");
889 else
890 bpf__strerror_prepare_load(bpf_file_name,
891 source,
892 -err, errbuf,
893 sizeof(errbuf));
894
895 parse_events_error__handle(parse_state->error, 0,
896 strdup(errbuf), strdup("(add -v to see detail)"));
897 return err;
898 }
899
900 err = parse_events_load_bpf_obj(parse_state, list, obj, head_config);
901 if (err)
902 return err;
903 err = parse_events_config_bpf(parse_state, obj, &obj_head_config);
904
905 /*
906 * Caller doesn't know anything about obj_head_config,
907 * so combine them together again before returning.
908 */
909 if (head_config)
910 list_splice_tail(&obj_head_config, head_config);
911 return err;
912 }
913 #else // HAVE_LIBBPF_SUPPORT
parse_events_load_bpf_obj(struct parse_events_state * parse_state,struct list_head * list __maybe_unused,struct bpf_object * obj __maybe_unused,struct list_head * head_config __maybe_unused)914 int parse_events_load_bpf_obj(struct parse_events_state *parse_state,
915 struct list_head *list __maybe_unused,
916 struct bpf_object *obj __maybe_unused,
917 struct list_head *head_config __maybe_unused)
918 {
919 parse_events_error__handle(parse_state->error, 0,
920 strdup("BPF support is not compiled"),
921 strdup("Make sure libbpf-devel is available at build time."));
922 return -ENOTSUP;
923 }
924
parse_events_load_bpf(struct parse_events_state * parse_state,struct list_head * list __maybe_unused,char * bpf_file_name __maybe_unused,bool source __maybe_unused,struct list_head * head_config __maybe_unused)925 int parse_events_load_bpf(struct parse_events_state *parse_state,
926 struct list_head *list __maybe_unused,
927 char *bpf_file_name __maybe_unused,
928 bool source __maybe_unused,
929 struct list_head *head_config __maybe_unused)
930 {
931 parse_events_error__handle(parse_state->error, 0,
932 strdup("BPF support is not compiled"),
933 strdup("Make sure libbpf-devel is available at build time."));
934 return -ENOTSUP;
935 }
936 #endif // HAVE_LIBBPF_SUPPORT
937
938 static int
parse_breakpoint_type(const char * type,struct perf_event_attr * attr)939 parse_breakpoint_type(const char *type, struct perf_event_attr *attr)
940 {
941 int i;
942
943 for (i = 0; i < 3; i++) {
944 if (!type || !type[i])
945 break;
946
947 #define CHECK_SET_TYPE(bit) \
948 do { \
949 if (attr->bp_type & bit) \
950 return -EINVAL; \
951 else \
952 attr->bp_type |= bit; \
953 } while (0)
954
955 switch (type[i]) {
956 case 'r':
957 CHECK_SET_TYPE(HW_BREAKPOINT_R);
958 break;
959 case 'w':
960 CHECK_SET_TYPE(HW_BREAKPOINT_W);
961 break;
962 case 'x':
963 CHECK_SET_TYPE(HW_BREAKPOINT_X);
964 break;
965 default:
966 return -EINVAL;
967 }
968 }
969
970 #undef CHECK_SET_TYPE
971
972 if (!attr->bp_type) /* Default */
973 attr->bp_type = HW_BREAKPOINT_R | HW_BREAKPOINT_W;
974
975 return 0;
976 }
977
parse_events_add_breakpoint(struct list_head * list,int * idx,u64 addr,char * type,u64 len)978 int parse_events_add_breakpoint(struct list_head *list, int *idx,
979 u64 addr, char *type, u64 len)
980 {
981 struct perf_event_attr attr;
982
983 memset(&attr, 0, sizeof(attr));
984 attr.bp_addr = addr;
985
986 if (parse_breakpoint_type(type, &attr))
987 return -EINVAL;
988
989 /* Provide some defaults if len is not specified */
990 if (!len) {
991 if (attr.bp_type == HW_BREAKPOINT_X)
992 len = sizeof(long);
993 else
994 len = HW_BREAKPOINT_LEN_4;
995 }
996
997 attr.bp_len = len;
998
999 attr.type = PERF_TYPE_BREAKPOINT;
1000 attr.sample_period = 1;
1001
1002 return add_event(list, idx, &attr, /*name=*/NULL, /*mertic_id=*/NULL,
1003 /*config_terms=*/NULL);
1004 }
1005
check_type_val(struct parse_events_term * term,struct parse_events_error * err,int type)1006 static int check_type_val(struct parse_events_term *term,
1007 struct parse_events_error *err,
1008 int type)
1009 {
1010 if (type == term->type_val)
1011 return 0;
1012
1013 if (err) {
1014 parse_events_error__handle(err, term->err_val,
1015 type == PARSE_EVENTS__TERM_TYPE_NUM
1016 ? strdup("expected numeric value")
1017 : strdup("expected string value"),
1018 NULL);
1019 }
1020 return -EINVAL;
1021 }
1022
1023 /*
1024 * Update according to parse-events.l
1025 */
1026 static const char *config_term_names[__PARSE_EVENTS__TERM_TYPE_NR] = {
1027 [PARSE_EVENTS__TERM_TYPE_USER] = "<sysfs term>",
1028 [PARSE_EVENTS__TERM_TYPE_CONFIG] = "config",
1029 [PARSE_EVENTS__TERM_TYPE_CONFIG1] = "config1",
1030 [PARSE_EVENTS__TERM_TYPE_CONFIG2] = "config2",
1031 [PARSE_EVENTS__TERM_TYPE_NAME] = "name",
1032 [PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD] = "period",
1033 [PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ] = "freq",
1034 [PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE] = "branch_type",
1035 [PARSE_EVENTS__TERM_TYPE_TIME] = "time",
1036 [PARSE_EVENTS__TERM_TYPE_CALLGRAPH] = "call-graph",
1037 [PARSE_EVENTS__TERM_TYPE_STACKSIZE] = "stack-size",
1038 [PARSE_EVENTS__TERM_TYPE_NOINHERIT] = "no-inherit",
1039 [PARSE_EVENTS__TERM_TYPE_INHERIT] = "inherit",
1040 [PARSE_EVENTS__TERM_TYPE_MAX_STACK] = "max-stack",
1041 [PARSE_EVENTS__TERM_TYPE_MAX_EVENTS] = "nr",
1042 [PARSE_EVENTS__TERM_TYPE_OVERWRITE] = "overwrite",
1043 [PARSE_EVENTS__TERM_TYPE_NOOVERWRITE] = "no-overwrite",
1044 [PARSE_EVENTS__TERM_TYPE_DRV_CFG] = "driver-config",
1045 [PARSE_EVENTS__TERM_TYPE_PERCORE] = "percore",
1046 [PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT] = "aux-output",
1047 [PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE] = "aux-sample-size",
1048 [PARSE_EVENTS__TERM_TYPE_METRIC_ID] = "metric-id",
1049 };
1050
1051 static bool config_term_shrinked;
1052
1053 static bool
config_term_avail(int term_type,struct parse_events_error * err)1054 config_term_avail(int term_type, struct parse_events_error *err)
1055 {
1056 char *err_str;
1057
1058 if (term_type < 0 || term_type >= __PARSE_EVENTS__TERM_TYPE_NR) {
1059 parse_events_error__handle(err, -1,
1060 strdup("Invalid term_type"), NULL);
1061 return false;
1062 }
1063 if (!config_term_shrinked)
1064 return true;
1065
1066 switch (term_type) {
1067 case PARSE_EVENTS__TERM_TYPE_CONFIG:
1068 case PARSE_EVENTS__TERM_TYPE_CONFIG1:
1069 case PARSE_EVENTS__TERM_TYPE_CONFIG2:
1070 case PARSE_EVENTS__TERM_TYPE_NAME:
1071 case PARSE_EVENTS__TERM_TYPE_METRIC_ID:
1072 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
1073 case PARSE_EVENTS__TERM_TYPE_PERCORE:
1074 return true;
1075 default:
1076 if (!err)
1077 return false;
1078
1079 /* term_type is validated so indexing is safe */
1080 if (asprintf(&err_str, "'%s' is not usable in 'perf stat'",
1081 config_term_names[term_type]) >= 0)
1082 parse_events_error__handle(err, -1, err_str, NULL);
1083 return false;
1084 }
1085 }
1086
parse_events__shrink_config_terms(void)1087 void parse_events__shrink_config_terms(void)
1088 {
1089 config_term_shrinked = true;
1090 }
1091
config_term_common(struct perf_event_attr * attr,struct parse_events_term * term,struct parse_events_error * err)1092 static int config_term_common(struct perf_event_attr *attr,
1093 struct parse_events_term *term,
1094 struct parse_events_error *err)
1095 {
1096 #define CHECK_TYPE_VAL(type) \
1097 do { \
1098 if (check_type_val(term, err, PARSE_EVENTS__TERM_TYPE_ ## type)) \
1099 return -EINVAL; \
1100 } while (0)
1101
1102 switch (term->type_term) {
1103 case PARSE_EVENTS__TERM_TYPE_CONFIG:
1104 CHECK_TYPE_VAL(NUM);
1105 attr->config = term->val.num;
1106 break;
1107 case PARSE_EVENTS__TERM_TYPE_CONFIG1:
1108 CHECK_TYPE_VAL(NUM);
1109 attr->config1 = term->val.num;
1110 break;
1111 case PARSE_EVENTS__TERM_TYPE_CONFIG2:
1112 CHECK_TYPE_VAL(NUM);
1113 attr->config2 = term->val.num;
1114 break;
1115 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
1116 CHECK_TYPE_VAL(NUM);
1117 break;
1118 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ:
1119 CHECK_TYPE_VAL(NUM);
1120 break;
1121 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
1122 CHECK_TYPE_VAL(STR);
1123 if (strcmp(term->val.str, "no") &&
1124 parse_branch_str(term->val.str,
1125 &attr->branch_sample_type)) {
1126 parse_events_error__handle(err, term->err_val,
1127 strdup("invalid branch sample type"),
1128 NULL);
1129 return -EINVAL;
1130 }
1131 break;
1132 case PARSE_EVENTS__TERM_TYPE_TIME:
1133 CHECK_TYPE_VAL(NUM);
1134 if (term->val.num > 1) {
1135 parse_events_error__handle(err, term->err_val,
1136 strdup("expected 0 or 1"),
1137 NULL);
1138 return -EINVAL;
1139 }
1140 break;
1141 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH:
1142 CHECK_TYPE_VAL(STR);
1143 break;
1144 case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
1145 CHECK_TYPE_VAL(NUM);
1146 break;
1147 case PARSE_EVENTS__TERM_TYPE_INHERIT:
1148 CHECK_TYPE_VAL(NUM);
1149 break;
1150 case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
1151 CHECK_TYPE_VAL(NUM);
1152 break;
1153 case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
1154 CHECK_TYPE_VAL(NUM);
1155 break;
1156 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE:
1157 CHECK_TYPE_VAL(NUM);
1158 break;
1159 case PARSE_EVENTS__TERM_TYPE_NAME:
1160 CHECK_TYPE_VAL(STR);
1161 break;
1162 case PARSE_EVENTS__TERM_TYPE_METRIC_ID:
1163 CHECK_TYPE_VAL(STR);
1164 break;
1165 case PARSE_EVENTS__TERM_TYPE_MAX_STACK:
1166 CHECK_TYPE_VAL(NUM);
1167 break;
1168 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS:
1169 CHECK_TYPE_VAL(NUM);
1170 break;
1171 case PARSE_EVENTS__TERM_TYPE_PERCORE:
1172 CHECK_TYPE_VAL(NUM);
1173 if ((unsigned int)term->val.num > 1) {
1174 parse_events_error__handle(err, term->err_val,
1175 strdup("expected 0 or 1"),
1176 NULL);
1177 return -EINVAL;
1178 }
1179 break;
1180 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
1181 CHECK_TYPE_VAL(NUM);
1182 break;
1183 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
1184 CHECK_TYPE_VAL(NUM);
1185 if (term->val.num > UINT_MAX) {
1186 parse_events_error__handle(err, term->err_val,
1187 strdup("too big"),
1188 NULL);
1189 return -EINVAL;
1190 }
1191 break;
1192 default:
1193 parse_events_error__handle(err, term->err_term,
1194 strdup("unknown term"),
1195 parse_events_formats_error_string(NULL));
1196 return -EINVAL;
1197 }
1198
1199 /*
1200 * Check term availability after basic checking so
1201 * PARSE_EVENTS__TERM_TYPE_USER can be found and filtered.
1202 *
1203 * If check availability at the entry of this function,
1204 * user will see "'<sysfs term>' is not usable in 'perf stat'"
1205 * if an invalid config term is provided for legacy events
1206 * (for example, instructions/badterm/...), which is confusing.
1207 */
1208 if (!config_term_avail(term->type_term, err))
1209 return -EINVAL;
1210 return 0;
1211 #undef CHECK_TYPE_VAL
1212 }
1213
config_term_pmu(struct perf_event_attr * attr,struct parse_events_term * term,struct parse_events_error * err)1214 static int config_term_pmu(struct perf_event_attr *attr,
1215 struct parse_events_term *term,
1216 struct parse_events_error *err)
1217 {
1218 if (term->type_term == PARSE_EVENTS__TERM_TYPE_USER ||
1219 term->type_term == PARSE_EVENTS__TERM_TYPE_DRV_CFG)
1220 /*
1221 * Always succeed for sysfs terms, as we dont know
1222 * at this point what type they need to have.
1223 */
1224 return 0;
1225 else
1226 return config_term_common(attr, term, err);
1227 }
1228
config_term_tracepoint(struct perf_event_attr * attr,struct parse_events_term * term,struct parse_events_error * err)1229 static int config_term_tracepoint(struct perf_event_attr *attr,
1230 struct parse_events_term *term,
1231 struct parse_events_error *err)
1232 {
1233 switch (term->type_term) {
1234 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH:
1235 case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
1236 case PARSE_EVENTS__TERM_TYPE_INHERIT:
1237 case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
1238 case PARSE_EVENTS__TERM_TYPE_MAX_STACK:
1239 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS:
1240 case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
1241 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE:
1242 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
1243 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
1244 return config_term_common(attr, term, err);
1245 default:
1246 if (err) {
1247 parse_events_error__handle(err, term->err_term,
1248 strdup("unknown term"),
1249 strdup("valid terms: call-graph,stack-size\n"));
1250 }
1251 return -EINVAL;
1252 }
1253
1254 return 0;
1255 }
1256
config_attr(struct perf_event_attr * attr,struct list_head * head,struct parse_events_error * err,config_term_func_t config_term)1257 static int config_attr(struct perf_event_attr *attr,
1258 struct list_head *head,
1259 struct parse_events_error *err,
1260 config_term_func_t config_term)
1261 {
1262 struct parse_events_term *term;
1263
1264 list_for_each_entry(term, head, list)
1265 if (config_term(attr, term, err))
1266 return -EINVAL;
1267
1268 return 0;
1269 }
1270
get_config_terms(struct list_head * head_config,struct list_head * head_terms __maybe_unused)1271 static int get_config_terms(struct list_head *head_config,
1272 struct list_head *head_terms __maybe_unused)
1273 {
1274 #define ADD_CONFIG_TERM(__type, __weak) \
1275 struct evsel_config_term *__t; \
1276 \
1277 __t = zalloc(sizeof(*__t)); \
1278 if (!__t) \
1279 return -ENOMEM; \
1280 \
1281 INIT_LIST_HEAD(&__t->list); \
1282 __t->type = EVSEL__CONFIG_TERM_ ## __type; \
1283 __t->weak = __weak; \
1284 list_add_tail(&__t->list, head_terms)
1285
1286 #define ADD_CONFIG_TERM_VAL(__type, __name, __val, __weak) \
1287 do { \
1288 ADD_CONFIG_TERM(__type, __weak); \
1289 __t->val.__name = __val; \
1290 } while (0)
1291
1292 #define ADD_CONFIG_TERM_STR(__type, __val, __weak) \
1293 do { \
1294 ADD_CONFIG_TERM(__type, __weak); \
1295 __t->val.str = strdup(__val); \
1296 if (!__t->val.str) { \
1297 zfree(&__t); \
1298 return -ENOMEM; \
1299 } \
1300 __t->free_str = true; \
1301 } while (0)
1302
1303 struct parse_events_term *term;
1304
1305 list_for_each_entry(term, head_config, list) {
1306 switch (term->type_term) {
1307 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
1308 ADD_CONFIG_TERM_VAL(PERIOD, period, term->val.num, term->weak);
1309 break;
1310 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ:
1311 ADD_CONFIG_TERM_VAL(FREQ, freq, term->val.num, term->weak);
1312 break;
1313 case PARSE_EVENTS__TERM_TYPE_TIME:
1314 ADD_CONFIG_TERM_VAL(TIME, time, term->val.num, term->weak);
1315 break;
1316 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH:
1317 ADD_CONFIG_TERM_STR(CALLGRAPH, term->val.str, term->weak);
1318 break;
1319 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
1320 ADD_CONFIG_TERM_STR(BRANCH, term->val.str, term->weak);
1321 break;
1322 case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
1323 ADD_CONFIG_TERM_VAL(STACK_USER, stack_user,
1324 term->val.num, term->weak);
1325 break;
1326 case PARSE_EVENTS__TERM_TYPE_INHERIT:
1327 ADD_CONFIG_TERM_VAL(INHERIT, inherit,
1328 term->val.num ? 1 : 0, term->weak);
1329 break;
1330 case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
1331 ADD_CONFIG_TERM_VAL(INHERIT, inherit,
1332 term->val.num ? 0 : 1, term->weak);
1333 break;
1334 case PARSE_EVENTS__TERM_TYPE_MAX_STACK:
1335 ADD_CONFIG_TERM_VAL(MAX_STACK, max_stack,
1336 term->val.num, term->weak);
1337 break;
1338 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS:
1339 ADD_CONFIG_TERM_VAL(MAX_EVENTS, max_events,
1340 term->val.num, term->weak);
1341 break;
1342 case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
1343 ADD_CONFIG_TERM_VAL(OVERWRITE, overwrite,
1344 term->val.num ? 1 : 0, term->weak);
1345 break;
1346 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE:
1347 ADD_CONFIG_TERM_VAL(OVERWRITE, overwrite,
1348 term->val.num ? 0 : 1, term->weak);
1349 break;
1350 case PARSE_EVENTS__TERM_TYPE_DRV_CFG:
1351 ADD_CONFIG_TERM_STR(DRV_CFG, term->val.str, term->weak);
1352 break;
1353 case PARSE_EVENTS__TERM_TYPE_PERCORE:
1354 ADD_CONFIG_TERM_VAL(PERCORE, percore,
1355 term->val.num ? true : false, term->weak);
1356 break;
1357 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
1358 ADD_CONFIG_TERM_VAL(AUX_OUTPUT, aux_output,
1359 term->val.num ? 1 : 0, term->weak);
1360 break;
1361 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
1362 ADD_CONFIG_TERM_VAL(AUX_SAMPLE_SIZE, aux_sample_size,
1363 term->val.num, term->weak);
1364 break;
1365 default:
1366 break;
1367 }
1368 }
1369 return 0;
1370 }
1371
1372 /*
1373 * Add EVSEL__CONFIG_TERM_CFG_CHG where cfg_chg will have a bit set for
1374 * each bit of attr->config that the user has changed.
1375 */
get_config_chgs(struct perf_pmu * pmu,struct list_head * head_config,struct list_head * head_terms)1376 static int get_config_chgs(struct perf_pmu *pmu, struct list_head *head_config,
1377 struct list_head *head_terms)
1378 {
1379 struct parse_events_term *term;
1380 u64 bits = 0;
1381 int type;
1382
1383 list_for_each_entry(term, head_config, list) {
1384 switch (term->type_term) {
1385 case PARSE_EVENTS__TERM_TYPE_USER:
1386 type = perf_pmu__format_type(&pmu->format, term->config);
1387 if (type != PERF_PMU_FORMAT_VALUE_CONFIG)
1388 continue;
1389 bits |= perf_pmu__format_bits(&pmu->format, term->config);
1390 break;
1391 case PARSE_EVENTS__TERM_TYPE_CONFIG:
1392 bits = ~(u64)0;
1393 break;
1394 default:
1395 break;
1396 }
1397 }
1398
1399 if (bits)
1400 ADD_CONFIG_TERM_VAL(CFG_CHG, cfg_chg, bits, false);
1401
1402 #undef ADD_CONFIG_TERM
1403 return 0;
1404 }
1405
parse_events_add_tracepoint(struct list_head * list,int * idx,const char * sys,const char * event,struct parse_events_error * err,struct list_head * head_config)1406 int parse_events_add_tracepoint(struct list_head *list, int *idx,
1407 const char *sys, const char *event,
1408 struct parse_events_error *err,
1409 struct list_head *head_config)
1410 {
1411 if (head_config) {
1412 struct perf_event_attr attr;
1413
1414 if (config_attr(&attr, head_config, err,
1415 config_term_tracepoint))
1416 return -EINVAL;
1417 }
1418
1419 if (strpbrk(sys, "*?"))
1420 return add_tracepoint_multi_sys(list, idx, sys, event,
1421 err, head_config);
1422 else
1423 return add_tracepoint_event(list, idx, sys, event,
1424 err, head_config);
1425 }
1426
parse_events_add_numeric(struct parse_events_state * parse_state,struct list_head * list,u32 type,u64 config,struct list_head * head_config)1427 int parse_events_add_numeric(struct parse_events_state *parse_state,
1428 struct list_head *list,
1429 u32 type, u64 config,
1430 struct list_head *head_config)
1431 {
1432 struct perf_event_attr attr;
1433 LIST_HEAD(config_terms);
1434 const char *name, *metric_id;
1435 bool hybrid;
1436 int ret;
1437
1438 memset(&attr, 0, sizeof(attr));
1439 attr.type = type;
1440 attr.config = config;
1441
1442 if (head_config) {
1443 if (config_attr(&attr, head_config, parse_state->error,
1444 config_term_common))
1445 return -EINVAL;
1446
1447 if (get_config_terms(head_config, &config_terms))
1448 return -ENOMEM;
1449 }
1450
1451 name = get_config_name(head_config);
1452 metric_id = get_config_metric_id(head_config);
1453 ret = parse_events__add_numeric_hybrid(parse_state, list, &attr,
1454 name, metric_id,
1455 &config_terms, &hybrid);
1456 if (hybrid)
1457 goto out_free_terms;
1458
1459 ret = add_event(list, &parse_state->idx, &attr, name, metric_id,
1460 &config_terms);
1461 out_free_terms:
1462 free_config_terms(&config_terms);
1463 return ret;
1464 }
1465
parse_events_add_tool(struct parse_events_state * parse_state,struct list_head * list,int tool_event)1466 int parse_events_add_tool(struct parse_events_state *parse_state,
1467 struct list_head *list,
1468 int tool_event)
1469 {
1470 return add_event_tool(list, &parse_state->idx, tool_event);
1471 }
1472
config_term_percore(struct list_head * config_terms)1473 static bool config_term_percore(struct list_head *config_terms)
1474 {
1475 struct evsel_config_term *term;
1476
1477 list_for_each_entry(term, config_terms, list) {
1478 if (term->type == EVSEL__CONFIG_TERM_PERCORE)
1479 return term->val.percore;
1480 }
1481
1482 return false;
1483 }
1484
parse_events__inside_hybrid_pmu(struct parse_events_state * parse_state,struct list_head * list,char * name,struct list_head * head_config)1485 static int parse_events__inside_hybrid_pmu(struct parse_events_state *parse_state,
1486 struct list_head *list, char *name,
1487 struct list_head *head_config)
1488 {
1489 struct parse_events_term *term;
1490 int ret = -1;
1491
1492 if (parse_state->fake_pmu || !head_config || list_empty(head_config) ||
1493 !perf_pmu__is_hybrid(name)) {
1494 return -1;
1495 }
1496
1497 /*
1498 * More than one term in list.
1499 */
1500 if (head_config->next && head_config->next->next != head_config)
1501 return -1;
1502
1503 term = list_first_entry(head_config, struct parse_events_term, list);
1504 if (term && term->config && strcmp(term->config, "event")) {
1505 ret = parse_events__with_hybrid_pmu(parse_state, term->config,
1506 name, list);
1507 }
1508
1509 return ret;
1510 }
1511
parse_events_add_pmu(struct parse_events_state * parse_state,struct list_head * list,char * name,struct list_head * head_config,bool auto_merge_stats,bool use_alias)1512 int parse_events_add_pmu(struct parse_events_state *parse_state,
1513 struct list_head *list, char *name,
1514 struct list_head *head_config,
1515 bool auto_merge_stats,
1516 bool use_alias)
1517 {
1518 struct perf_event_attr attr;
1519 struct perf_pmu_info info;
1520 struct perf_pmu *pmu;
1521 struct evsel *evsel;
1522 struct parse_events_error *err = parse_state->error;
1523 bool use_uncore_alias;
1524 LIST_HEAD(config_terms);
1525
1526 if (verbose > 1) {
1527 fprintf(stderr, "Attempting to add event pmu '%s' with '",
1528 name);
1529 if (head_config) {
1530 struct parse_events_term *term;
1531
1532 list_for_each_entry(term, head_config, list) {
1533 fprintf(stderr, "%s,", term->config);
1534 }
1535 }
1536 fprintf(stderr, "' that may result in non-fatal errors\n");
1537 }
1538
1539 pmu = parse_state->fake_pmu ?: perf_pmu__find(name);
1540 if (!pmu) {
1541 char *err_str;
1542
1543 if (asprintf(&err_str,
1544 "Cannot find PMU `%s'. Missing kernel support?",
1545 name) >= 0)
1546 parse_events_error__handle(err, 0, err_str, NULL);
1547 return -EINVAL;
1548 }
1549
1550 if (pmu->default_config) {
1551 memcpy(&attr, pmu->default_config,
1552 sizeof(struct perf_event_attr));
1553 } else {
1554 memset(&attr, 0, sizeof(attr));
1555 }
1556
1557 use_uncore_alias = (pmu->is_uncore && use_alias);
1558
1559 if (!head_config) {
1560 attr.type = pmu->type;
1561 evsel = __add_event(list, &parse_state->idx, &attr,
1562 /*init_attr=*/true, /*name=*/NULL,
1563 /*metric_id=*/NULL, pmu,
1564 /*config_terms=*/NULL, auto_merge_stats,
1565 /*cpu_list=*/NULL);
1566 if (evsel) {
1567 evsel->pmu_name = name ? strdup(name) : NULL;
1568 evsel->use_uncore_alias = use_uncore_alias;
1569 return 0;
1570 } else {
1571 return -ENOMEM;
1572 }
1573 }
1574
1575 if (!parse_state->fake_pmu && perf_pmu__check_alias(pmu, head_config, &info))
1576 return -EINVAL;
1577
1578 if (verbose > 1) {
1579 fprintf(stderr, "After aliases, add event pmu '%s' with '",
1580 name);
1581 if (head_config) {
1582 struct parse_events_term *term;
1583
1584 list_for_each_entry(term, head_config, list) {
1585 fprintf(stderr, "%s,", term->config);
1586 }
1587 }
1588 fprintf(stderr, "' that may result in non-fatal errors\n");
1589 }
1590
1591 /*
1592 * Configure hardcoded terms first, no need to check
1593 * return value when called with fail == 0 ;)
1594 */
1595 if (config_attr(&attr, head_config, parse_state->error, config_term_pmu))
1596 return -EINVAL;
1597
1598 if (get_config_terms(head_config, &config_terms))
1599 return -ENOMEM;
1600
1601 /*
1602 * When using default config, record which bits of attr->config were
1603 * changed by the user.
1604 */
1605 if (pmu->default_config && get_config_chgs(pmu, head_config, &config_terms))
1606 return -ENOMEM;
1607
1608 if (!parse_events__inside_hybrid_pmu(parse_state, list, name,
1609 head_config)) {
1610 return 0;
1611 }
1612
1613 if (!parse_state->fake_pmu && perf_pmu__config(pmu, &attr, head_config, parse_state->error)) {
1614 free_config_terms(&config_terms);
1615 return -EINVAL;
1616 }
1617
1618 evsel = __add_event(list, &parse_state->idx, &attr, /*init_attr=*/true,
1619 get_config_name(head_config),
1620 get_config_metric_id(head_config), pmu,
1621 &config_terms, auto_merge_stats, /*cpu_list=*/NULL);
1622 if (!evsel)
1623 return -ENOMEM;
1624
1625 if (evsel->name)
1626 evsel->use_config_name = true;
1627
1628 evsel->pmu_name = name ? strdup(name) : NULL;
1629 evsel->use_uncore_alias = use_uncore_alias;
1630 evsel->percore = config_term_percore(&evsel->config_terms);
1631
1632 if (parse_state->fake_pmu)
1633 return 0;
1634
1635 free((char *)evsel->unit);
1636 evsel->unit = strdup(info.unit);
1637 evsel->scale = info.scale;
1638 evsel->per_pkg = info.per_pkg;
1639 evsel->snapshot = info.snapshot;
1640 evsel->metric_expr = info.metric_expr;
1641 evsel->metric_name = info.metric_name;
1642 return 0;
1643 }
1644
parse_events_multi_pmu_add(struct parse_events_state * parse_state,char * str,struct list_head * head,struct list_head ** listp)1645 int parse_events_multi_pmu_add(struct parse_events_state *parse_state,
1646 char *str, struct list_head *head,
1647 struct list_head **listp)
1648 {
1649 struct parse_events_term *term;
1650 struct list_head *list = NULL;
1651 struct perf_pmu *pmu = NULL;
1652 int ok = 0;
1653 char *config;
1654
1655 *listp = NULL;
1656
1657 if (!head) {
1658 head = malloc(sizeof(struct list_head));
1659 if (!head)
1660 goto out_err;
1661
1662 INIT_LIST_HEAD(head);
1663 }
1664 config = strdup(str);
1665 if (!config)
1666 goto out_err;
1667
1668 if (parse_events_term__num(&term,
1669 PARSE_EVENTS__TERM_TYPE_USER,
1670 config, 1, false, &config,
1671 NULL) < 0) {
1672 free(config);
1673 goto out_err;
1674 }
1675 list_add_tail(&term->list, head);
1676
1677
1678 /* Add it for all PMUs that support the alias */
1679 list = malloc(sizeof(struct list_head));
1680 if (!list)
1681 goto out_err;
1682
1683 INIT_LIST_HEAD(list);
1684
1685 while ((pmu = perf_pmu__scan(pmu)) != NULL) {
1686 struct perf_pmu_alias *alias;
1687
1688 list_for_each_entry(alias, &pmu->aliases, list) {
1689 if (!strcasecmp(alias->name, str)) {
1690 if (!parse_events_add_pmu(parse_state, list,
1691 pmu->name, head,
1692 true, true)) {
1693 pr_debug("%s -> %s/%s/\n", str,
1694 pmu->name, alias->str);
1695 ok++;
1696 }
1697 }
1698 }
1699 }
1700 out_err:
1701 if (ok)
1702 *listp = list;
1703 else
1704 free(list);
1705
1706 parse_events_terms__delete(head);
1707 return ok ? 0 : -1;
1708 }
1709
parse_events__modifier_group(struct list_head * list,char * event_mod)1710 int parse_events__modifier_group(struct list_head *list,
1711 char *event_mod)
1712 {
1713 return parse_events__modifier_event(list, event_mod, true);
1714 }
1715
1716 /*
1717 * Check if the two uncore PMUs are from the same uncore block
1718 * The format of the uncore PMU name is uncore_#blockname_#pmuidx
1719 */
is_same_uncore_block(const char * pmu_name_a,const char * pmu_name_b)1720 static bool is_same_uncore_block(const char *pmu_name_a, const char *pmu_name_b)
1721 {
1722 char *end_a, *end_b;
1723
1724 end_a = strrchr(pmu_name_a, '_');
1725 end_b = strrchr(pmu_name_b, '_');
1726
1727 if (!end_a || !end_b)
1728 return false;
1729
1730 if ((end_a - pmu_name_a) != (end_b - pmu_name_b))
1731 return false;
1732
1733 return (strncmp(pmu_name_a, pmu_name_b, end_a - pmu_name_a) == 0);
1734 }
1735
1736 static int
parse_events__set_leader_for_uncore_aliase(char * name,struct list_head * list,struct parse_events_state * parse_state)1737 parse_events__set_leader_for_uncore_aliase(char *name, struct list_head *list,
1738 struct parse_events_state *parse_state)
1739 {
1740 struct evsel *evsel, *leader;
1741 uintptr_t *leaders;
1742 bool is_leader = true;
1743 int i, nr_pmu = 0, total_members, ret = 0;
1744
1745 leader = list_first_entry(list, struct evsel, core.node);
1746 evsel = list_last_entry(list, struct evsel, core.node);
1747 total_members = evsel->core.idx - leader->core.idx + 1;
1748
1749 leaders = calloc(total_members, sizeof(uintptr_t));
1750 if (WARN_ON(!leaders))
1751 return 0;
1752
1753 /*
1754 * Going through the whole group and doing sanity check.
1755 * All members must use alias, and be from the same uncore block.
1756 * Also, storing the leader events in an array.
1757 */
1758 __evlist__for_each_entry(list, evsel) {
1759
1760 /* Only split the uncore group which members use alias */
1761 if (!evsel->use_uncore_alias)
1762 goto out;
1763
1764 /* The events must be from the same uncore block */
1765 if (!is_same_uncore_block(leader->pmu_name, evsel->pmu_name))
1766 goto out;
1767
1768 if (!is_leader)
1769 continue;
1770 /*
1771 * If the event's PMU name starts to repeat, it must be a new
1772 * event. That can be used to distinguish the leader from
1773 * other members, even they have the same event name.
1774 */
1775 if ((leader != evsel) &&
1776 !strcmp(leader->pmu_name, evsel->pmu_name)) {
1777 is_leader = false;
1778 continue;
1779 }
1780
1781 /* Store the leader event for each PMU */
1782 leaders[nr_pmu++] = (uintptr_t) evsel;
1783 }
1784
1785 /* only one event alias */
1786 if (nr_pmu == total_members) {
1787 parse_state->nr_groups--;
1788 goto handled;
1789 }
1790
1791 /*
1792 * An uncore event alias is a joint name which means the same event
1793 * runs on all PMUs of a block.
1794 * Perf doesn't support mixed events from different PMUs in the same
1795 * group. The big group has to be split into multiple small groups
1796 * which only include the events from the same PMU.
1797 *
1798 * Here the uncore event aliases must be from the same uncore block.
1799 * The number of PMUs must be same for each alias. The number of new
1800 * small groups equals to the number of PMUs.
1801 * Setting the leader event for corresponding members in each group.
1802 */
1803 i = 0;
1804 __evlist__for_each_entry(list, evsel) {
1805 if (i >= nr_pmu)
1806 i = 0;
1807 evsel__set_leader(evsel, (struct evsel *) leaders[i++]);
1808 }
1809
1810 /* The number of members and group name are same for each group */
1811 for (i = 0; i < nr_pmu; i++) {
1812 evsel = (struct evsel *) leaders[i];
1813 evsel->core.nr_members = total_members / nr_pmu;
1814 evsel->group_name = name ? strdup(name) : NULL;
1815 }
1816
1817 /* Take the new small groups into account */
1818 parse_state->nr_groups += nr_pmu - 1;
1819
1820 handled:
1821 ret = 1;
1822 out:
1823 free(leaders);
1824 return ret;
1825 }
1826
parse_events__set_leader(char * name,struct list_head * list,struct parse_events_state * parse_state)1827 void parse_events__set_leader(char *name, struct list_head *list,
1828 struct parse_events_state *parse_state)
1829 {
1830 struct evsel *leader;
1831
1832 if (list_empty(list)) {
1833 WARN_ONCE(true, "WARNING: failed to set leader: empty list");
1834 return;
1835 }
1836
1837 if (parse_events__set_leader_for_uncore_aliase(name, list, parse_state))
1838 return;
1839
1840 __perf_evlist__set_leader(list);
1841 leader = list_entry(list->next, struct evsel, core.node);
1842 leader->group_name = name ? strdup(name) : NULL;
1843 }
1844
1845 /* list_event is assumed to point to malloc'ed memory */
parse_events_update_lists(struct list_head * list_event,struct list_head * list_all)1846 void parse_events_update_lists(struct list_head *list_event,
1847 struct list_head *list_all)
1848 {
1849 /*
1850 * Called for single event definition. Update the
1851 * 'all event' list, and reinit the 'single event'
1852 * list, for next event definition.
1853 */
1854 list_splice_tail(list_event, list_all);
1855 free(list_event);
1856 }
1857
1858 struct event_modifier {
1859 int eu;
1860 int ek;
1861 int eh;
1862 int eH;
1863 int eG;
1864 int eI;
1865 int precise;
1866 int precise_max;
1867 int exclude_GH;
1868 int sample_read;
1869 int pinned;
1870 int weak;
1871 int exclusive;
1872 int bpf_counter;
1873 };
1874
get_event_modifier(struct event_modifier * mod,char * str,struct evsel * evsel)1875 static int get_event_modifier(struct event_modifier *mod, char *str,
1876 struct evsel *evsel)
1877 {
1878 int eu = evsel ? evsel->core.attr.exclude_user : 0;
1879 int ek = evsel ? evsel->core.attr.exclude_kernel : 0;
1880 int eh = evsel ? evsel->core.attr.exclude_hv : 0;
1881 int eH = evsel ? evsel->core.attr.exclude_host : 0;
1882 int eG = evsel ? evsel->core.attr.exclude_guest : 0;
1883 int eI = evsel ? evsel->core.attr.exclude_idle : 0;
1884 int precise = evsel ? evsel->core.attr.precise_ip : 0;
1885 int precise_max = 0;
1886 int sample_read = 0;
1887 int pinned = evsel ? evsel->core.attr.pinned : 0;
1888 int exclusive = evsel ? evsel->core.attr.exclusive : 0;
1889
1890 int exclude = eu | ek | eh;
1891 int exclude_GH = evsel ? evsel->exclude_GH : 0;
1892 int weak = 0;
1893 int bpf_counter = 0;
1894
1895 memset(mod, 0, sizeof(*mod));
1896
1897 while (*str) {
1898 if (*str == 'u') {
1899 if (!exclude)
1900 exclude = eu = ek = eh = 1;
1901 if (!exclude_GH && !perf_guest)
1902 eG = 1;
1903 eu = 0;
1904 } else if (*str == 'k') {
1905 if (!exclude)
1906 exclude = eu = ek = eh = 1;
1907 ek = 0;
1908 } else if (*str == 'h') {
1909 if (!exclude)
1910 exclude = eu = ek = eh = 1;
1911 eh = 0;
1912 } else if (*str == 'G') {
1913 if (!exclude_GH)
1914 exclude_GH = eG = eH = 1;
1915 eG = 0;
1916 } else if (*str == 'H') {
1917 if (!exclude_GH)
1918 exclude_GH = eG = eH = 1;
1919 eH = 0;
1920 } else if (*str == 'I') {
1921 eI = 1;
1922 } else if (*str == 'p') {
1923 precise++;
1924 /* use of precise requires exclude_guest */
1925 if (!exclude_GH)
1926 eG = 1;
1927 } else if (*str == 'P') {
1928 precise_max = 1;
1929 } else if (*str == 'S') {
1930 sample_read = 1;
1931 } else if (*str == 'D') {
1932 pinned = 1;
1933 } else if (*str == 'e') {
1934 exclusive = 1;
1935 } else if (*str == 'W') {
1936 weak = 1;
1937 } else if (*str == 'b') {
1938 bpf_counter = 1;
1939 } else
1940 break;
1941
1942 ++str;
1943 }
1944
1945 /*
1946 * precise ip:
1947 *
1948 * 0 - SAMPLE_IP can have arbitrary skid
1949 * 1 - SAMPLE_IP must have constant skid
1950 * 2 - SAMPLE_IP requested to have 0 skid
1951 * 3 - SAMPLE_IP must have 0 skid
1952 *
1953 * See also PERF_RECORD_MISC_EXACT_IP
1954 */
1955 if (precise > 3)
1956 return -EINVAL;
1957
1958 mod->eu = eu;
1959 mod->ek = ek;
1960 mod->eh = eh;
1961 mod->eH = eH;
1962 mod->eG = eG;
1963 mod->eI = eI;
1964 mod->precise = precise;
1965 mod->precise_max = precise_max;
1966 mod->exclude_GH = exclude_GH;
1967 mod->sample_read = sample_read;
1968 mod->pinned = pinned;
1969 mod->weak = weak;
1970 mod->bpf_counter = bpf_counter;
1971 mod->exclusive = exclusive;
1972
1973 return 0;
1974 }
1975
1976 /*
1977 * Basic modifier sanity check to validate it contains only one
1978 * instance of any modifier (apart from 'p') present.
1979 */
check_modifier(char * str)1980 static int check_modifier(char *str)
1981 {
1982 char *p = str;
1983
1984 /* The sizeof includes 0 byte as well. */
1985 if (strlen(str) > (sizeof("ukhGHpppPSDIWeb") - 1))
1986 return -1;
1987
1988 while (*p) {
1989 if (*p != 'p' && strchr(p + 1, *p))
1990 return -1;
1991 p++;
1992 }
1993
1994 return 0;
1995 }
1996
parse_events__modifier_event(struct list_head * list,char * str,bool add)1997 int parse_events__modifier_event(struct list_head *list, char *str, bool add)
1998 {
1999 struct evsel *evsel;
2000 struct event_modifier mod;
2001
2002 if (str == NULL)
2003 return 0;
2004
2005 if (check_modifier(str))
2006 return -EINVAL;
2007
2008 if (!add && get_event_modifier(&mod, str, NULL))
2009 return -EINVAL;
2010
2011 __evlist__for_each_entry(list, evsel) {
2012 if (add && get_event_modifier(&mod, str, evsel))
2013 return -EINVAL;
2014
2015 evsel->core.attr.exclude_user = mod.eu;
2016 evsel->core.attr.exclude_kernel = mod.ek;
2017 evsel->core.attr.exclude_hv = mod.eh;
2018 evsel->core.attr.precise_ip = mod.precise;
2019 evsel->core.attr.exclude_host = mod.eH;
2020 evsel->core.attr.exclude_guest = mod.eG;
2021 evsel->core.attr.exclude_idle = mod.eI;
2022 evsel->exclude_GH = mod.exclude_GH;
2023 evsel->sample_read = mod.sample_read;
2024 evsel->precise_max = mod.precise_max;
2025 evsel->weak_group = mod.weak;
2026 evsel->bpf_counter = mod.bpf_counter;
2027
2028 if (evsel__is_group_leader(evsel)) {
2029 evsel->core.attr.pinned = mod.pinned;
2030 evsel->core.attr.exclusive = mod.exclusive;
2031 }
2032 }
2033
2034 return 0;
2035 }
2036
parse_events_name(struct list_head * list,const char * name)2037 int parse_events_name(struct list_head *list, const char *name)
2038 {
2039 struct evsel *evsel;
2040
2041 __evlist__for_each_entry(list, evsel) {
2042 if (!evsel->name)
2043 evsel->name = strdup(name);
2044 }
2045
2046 return 0;
2047 }
2048
2049 static int
comp_pmu(const void * p1,const void * p2)2050 comp_pmu(const void *p1, const void *p2)
2051 {
2052 struct perf_pmu_event_symbol *pmu1 = (struct perf_pmu_event_symbol *) p1;
2053 struct perf_pmu_event_symbol *pmu2 = (struct perf_pmu_event_symbol *) p2;
2054
2055 return strcasecmp(pmu1->symbol, pmu2->symbol);
2056 }
2057
perf_pmu__parse_cleanup(void)2058 static void perf_pmu__parse_cleanup(void)
2059 {
2060 if (perf_pmu_events_list_num > 0) {
2061 struct perf_pmu_event_symbol *p;
2062 int i;
2063
2064 for (i = 0; i < perf_pmu_events_list_num; i++) {
2065 p = perf_pmu_events_list + i;
2066 zfree(&p->symbol);
2067 }
2068 zfree(&perf_pmu_events_list);
2069 perf_pmu_events_list_num = 0;
2070 }
2071 }
2072
2073 #define SET_SYMBOL(str, stype) \
2074 do { \
2075 p->symbol = str; \
2076 if (!p->symbol) \
2077 goto err; \
2078 p->type = stype; \
2079 } while (0)
2080
2081 /*
2082 * Read the pmu events list from sysfs
2083 * Save it into perf_pmu_events_list
2084 */
perf_pmu__parse_init(void)2085 static void perf_pmu__parse_init(void)
2086 {
2087
2088 struct perf_pmu *pmu = NULL;
2089 struct perf_pmu_alias *alias;
2090 int len = 0;
2091
2092 pmu = NULL;
2093 while ((pmu = perf_pmu__scan(pmu)) != NULL) {
2094 list_for_each_entry(alias, &pmu->aliases, list) {
2095 if (strchr(alias->name, '-'))
2096 len++;
2097 len++;
2098 }
2099 }
2100
2101 if (len == 0) {
2102 perf_pmu_events_list_num = -1;
2103 return;
2104 }
2105 perf_pmu_events_list = malloc(sizeof(struct perf_pmu_event_symbol) * len);
2106 if (!perf_pmu_events_list)
2107 return;
2108 perf_pmu_events_list_num = len;
2109
2110 len = 0;
2111 pmu = NULL;
2112 while ((pmu = perf_pmu__scan(pmu)) != NULL) {
2113 list_for_each_entry(alias, &pmu->aliases, list) {
2114 struct perf_pmu_event_symbol *p = perf_pmu_events_list + len;
2115 char *tmp = strchr(alias->name, '-');
2116
2117 if (tmp != NULL) {
2118 SET_SYMBOL(strndup(alias->name, tmp - alias->name),
2119 PMU_EVENT_SYMBOL_PREFIX);
2120 p++;
2121 SET_SYMBOL(strdup(++tmp), PMU_EVENT_SYMBOL_SUFFIX);
2122 len += 2;
2123 } else {
2124 SET_SYMBOL(strdup(alias->name), PMU_EVENT_SYMBOL);
2125 len++;
2126 }
2127 }
2128 }
2129 qsort(perf_pmu_events_list, len,
2130 sizeof(struct perf_pmu_event_symbol), comp_pmu);
2131
2132 return;
2133 err:
2134 perf_pmu__parse_cleanup();
2135 }
2136
2137 /*
2138 * This function injects special term in
2139 * perf_pmu_events_list so the test code
2140 * can check on this functionality.
2141 */
perf_pmu__test_parse_init(void)2142 int perf_pmu__test_parse_init(void)
2143 {
2144 struct perf_pmu_event_symbol *list;
2145
2146 list = malloc(sizeof(*list) * 1);
2147 if (!list)
2148 return -ENOMEM;
2149
2150 list->type = PMU_EVENT_SYMBOL;
2151 list->symbol = strdup("read");
2152
2153 if (!list->symbol) {
2154 free(list);
2155 return -ENOMEM;
2156 }
2157
2158 perf_pmu_events_list = list;
2159 perf_pmu_events_list_num = 1;
2160 return 0;
2161 }
2162
2163 enum perf_pmu_event_symbol_type
perf_pmu__parse_check(const char * name)2164 perf_pmu__parse_check(const char *name)
2165 {
2166 struct perf_pmu_event_symbol p, *r;
2167
2168 /* scan kernel pmu events from sysfs if needed */
2169 if (perf_pmu_events_list_num == 0)
2170 perf_pmu__parse_init();
2171 /*
2172 * name "cpu" could be prefix of cpu-cycles or cpu// events.
2173 * cpu-cycles has been handled by hardcode.
2174 * So it must be cpu// events, not kernel pmu event.
2175 */
2176 if ((perf_pmu_events_list_num <= 0) || !strcmp(name, "cpu"))
2177 return PMU_EVENT_SYMBOL_ERR;
2178
2179 p.symbol = strdup(name);
2180 r = bsearch(&p, perf_pmu_events_list,
2181 (size_t) perf_pmu_events_list_num,
2182 sizeof(struct perf_pmu_event_symbol), comp_pmu);
2183 zfree(&p.symbol);
2184 return r ? r->type : PMU_EVENT_SYMBOL_ERR;
2185 }
2186
parse_events__scanner(const char * str,struct parse_events_state * parse_state)2187 static int parse_events__scanner(const char *str,
2188 struct parse_events_state *parse_state)
2189 {
2190 YY_BUFFER_STATE buffer;
2191 void *scanner;
2192 int ret;
2193
2194 ret = parse_events_lex_init_extra(parse_state, &scanner);
2195 if (ret)
2196 return ret;
2197
2198 buffer = parse_events__scan_string(str, scanner);
2199
2200 #ifdef PARSER_DEBUG
2201 parse_events_debug = 1;
2202 parse_events_set_debug(1, scanner);
2203 #endif
2204 ret = parse_events_parse(parse_state, scanner);
2205
2206 parse_events__flush_buffer(buffer, scanner);
2207 parse_events__delete_buffer(buffer, scanner);
2208 parse_events_lex_destroy(scanner);
2209 return ret;
2210 }
2211
2212 /*
2213 * parse event config string, return a list of event terms.
2214 */
parse_events_terms(struct list_head * terms,const char * str)2215 int parse_events_terms(struct list_head *terms, const char *str)
2216 {
2217 struct parse_events_state parse_state = {
2218 .terms = NULL,
2219 .stoken = PE_START_TERMS,
2220 };
2221 int ret;
2222
2223 ret = parse_events__scanner(str, &parse_state);
2224 perf_pmu__parse_cleanup();
2225
2226 if (!ret) {
2227 list_splice(parse_state.terms, terms);
2228 zfree(&parse_state.terms);
2229 return 0;
2230 }
2231
2232 parse_events_terms__delete(parse_state.terms);
2233 return ret;
2234 }
2235
parse_events__with_hybrid_pmu(struct parse_events_state * parse_state,const char * str,char * pmu_name,struct list_head * list)2236 static int parse_events__with_hybrid_pmu(struct parse_events_state *parse_state,
2237 const char *str, char *pmu_name,
2238 struct list_head *list)
2239 {
2240 struct parse_events_state ps = {
2241 .list = LIST_HEAD_INIT(ps.list),
2242 .stoken = PE_START_EVENTS,
2243 .hybrid_pmu_name = pmu_name,
2244 .idx = parse_state->idx,
2245 };
2246 int ret;
2247
2248 ret = parse_events__scanner(str, &ps);
2249 perf_pmu__parse_cleanup();
2250
2251 if (!ret) {
2252 if (!list_empty(&ps.list)) {
2253 list_splice(&ps.list, list);
2254 parse_state->idx = ps.idx;
2255 return 0;
2256 } else
2257 return -1;
2258 }
2259
2260 return ret;
2261 }
2262
__parse_events(struct evlist * evlist,const char * str,struct parse_events_error * err,struct perf_pmu * fake_pmu)2263 int __parse_events(struct evlist *evlist, const char *str,
2264 struct parse_events_error *err, struct perf_pmu *fake_pmu)
2265 {
2266 struct parse_events_state parse_state = {
2267 .list = LIST_HEAD_INIT(parse_state.list),
2268 .idx = evlist->core.nr_entries,
2269 .error = err,
2270 .evlist = evlist,
2271 .stoken = PE_START_EVENTS,
2272 .fake_pmu = fake_pmu,
2273 };
2274 int ret;
2275
2276 ret = parse_events__scanner(str, &parse_state);
2277 perf_pmu__parse_cleanup();
2278
2279 if (!ret && list_empty(&parse_state.list)) {
2280 WARN_ONCE(true, "WARNING: event parser found nothing\n");
2281 return -1;
2282 }
2283
2284 /*
2285 * Add list to the evlist even with errors to allow callers to clean up.
2286 */
2287 evlist__splice_list_tail(evlist, &parse_state.list);
2288
2289 if (!ret) {
2290 struct evsel *last;
2291
2292 evlist->core.nr_groups += parse_state.nr_groups;
2293 last = evlist__last(evlist);
2294 last->cmdline_group_boundary = true;
2295
2296 return 0;
2297 }
2298
2299 /*
2300 * There are 2 users - builtin-record and builtin-test objects.
2301 * Both call evlist__delete in case of error, so we dont
2302 * need to bother.
2303 */
2304 return ret;
2305 }
2306
parse_events_error__init(struct parse_events_error * err)2307 void parse_events_error__init(struct parse_events_error *err)
2308 {
2309 bzero(err, sizeof(*err));
2310 }
2311
parse_events_error__exit(struct parse_events_error * err)2312 void parse_events_error__exit(struct parse_events_error *err)
2313 {
2314 zfree(&err->str);
2315 zfree(&err->help);
2316 zfree(&err->first_str);
2317 zfree(&err->first_help);
2318 }
2319
parse_events_error__handle(struct parse_events_error * err,int idx,char * str,char * help)2320 void parse_events_error__handle(struct parse_events_error *err, int idx,
2321 char *str, char *help)
2322 {
2323 if (WARN(!str, "WARNING: failed to provide error string\n")) {
2324 free(help);
2325 return;
2326 }
2327 switch (err->num_errors) {
2328 case 0:
2329 err->idx = idx;
2330 err->str = str;
2331 err->help = help;
2332 break;
2333 case 1:
2334 err->first_idx = err->idx;
2335 err->idx = idx;
2336 err->first_str = err->str;
2337 err->str = str;
2338 err->first_help = err->help;
2339 err->help = help;
2340 break;
2341 default:
2342 pr_debug("Multiple errors dropping message: %s (%s)\n",
2343 err->str, err->help);
2344 free(err->str);
2345 err->str = str;
2346 free(err->help);
2347 err->help = help;
2348 break;
2349 }
2350 err->num_errors++;
2351 }
2352
2353 #define MAX_WIDTH 1000
get_term_width(void)2354 static int get_term_width(void)
2355 {
2356 struct winsize ws;
2357
2358 get_term_dimensions(&ws);
2359 return ws.ws_col > MAX_WIDTH ? MAX_WIDTH : ws.ws_col;
2360 }
2361
__parse_events_error__print(int err_idx,const char * err_str,const char * err_help,const char * event)2362 static void __parse_events_error__print(int err_idx, const char *err_str,
2363 const char *err_help, const char *event)
2364 {
2365 const char *str = "invalid or unsupported event: ";
2366 char _buf[MAX_WIDTH];
2367 char *buf = (char *) event;
2368 int idx = 0;
2369 if (err_str) {
2370 /* -2 for extra '' in the final fprintf */
2371 int width = get_term_width() - 2;
2372 int len_event = strlen(event);
2373 int len_str, max_len, cut = 0;
2374
2375 /*
2376 * Maximum error index indent, we will cut
2377 * the event string if it's bigger.
2378 */
2379 int max_err_idx = 13;
2380
2381 /*
2382 * Let's be specific with the message when
2383 * we have the precise error.
2384 */
2385 str = "event syntax error: ";
2386 len_str = strlen(str);
2387 max_len = width - len_str;
2388
2389 buf = _buf;
2390
2391 /* We're cutting from the beginning. */
2392 if (err_idx > max_err_idx)
2393 cut = err_idx - max_err_idx;
2394
2395 strncpy(buf, event + cut, max_len);
2396
2397 /* Mark cut parts with '..' on both sides. */
2398 if (cut)
2399 buf[0] = buf[1] = '.';
2400
2401 if ((len_event - cut) > max_len) {
2402 buf[max_len - 1] = buf[max_len - 2] = '.';
2403 buf[max_len] = 0;
2404 }
2405
2406 idx = len_str + err_idx - cut;
2407 }
2408
2409 fprintf(stderr, "%s'%s'\n", str, buf);
2410 if (idx) {
2411 fprintf(stderr, "%*s\\___ %s\n", idx + 1, "", err_str);
2412 if (err_help)
2413 fprintf(stderr, "\n%s\n", err_help);
2414 }
2415 }
2416
parse_events_error__print(struct parse_events_error * err,const char * event)2417 void parse_events_error__print(struct parse_events_error *err,
2418 const char *event)
2419 {
2420 if (!err->num_errors)
2421 return;
2422
2423 __parse_events_error__print(err->idx, err->str, err->help, event);
2424
2425 if (err->num_errors > 1) {
2426 fputs("\nInitial error:\n", stderr);
2427 __parse_events_error__print(err->first_idx, err->first_str,
2428 err->first_help, event);
2429 }
2430 }
2431
2432 #undef MAX_WIDTH
2433
parse_events_option(const struct option * opt,const char * str,int unset __maybe_unused)2434 int parse_events_option(const struct option *opt, const char *str,
2435 int unset __maybe_unused)
2436 {
2437 struct evlist *evlist = *(struct evlist **)opt->value;
2438 struct parse_events_error err;
2439 int ret;
2440
2441 parse_events_error__init(&err);
2442 ret = parse_events(evlist, str, &err);
2443
2444 if (ret) {
2445 parse_events_error__print(&err, str);
2446 fprintf(stderr, "Run 'perf list' for a list of valid events\n");
2447 }
2448 parse_events_error__exit(&err);
2449
2450 return ret;
2451 }
2452
parse_events_option_new_evlist(const struct option * opt,const char * str,int unset)2453 int parse_events_option_new_evlist(const struct option *opt, const char *str, int unset)
2454 {
2455 struct evlist **evlistp = opt->value;
2456 int ret;
2457
2458 if (*evlistp == NULL) {
2459 *evlistp = evlist__new();
2460
2461 if (*evlistp == NULL) {
2462 fprintf(stderr, "Not enough memory to create evlist\n");
2463 return -1;
2464 }
2465 }
2466
2467 ret = parse_events_option(opt, str, unset);
2468 if (ret) {
2469 evlist__delete(*evlistp);
2470 *evlistp = NULL;
2471 }
2472
2473 return ret;
2474 }
2475
2476 static int
foreach_evsel_in_last_glob(struct evlist * evlist,int (* func)(struct evsel * evsel,const void * arg),const void * arg)2477 foreach_evsel_in_last_glob(struct evlist *evlist,
2478 int (*func)(struct evsel *evsel,
2479 const void *arg),
2480 const void *arg)
2481 {
2482 struct evsel *last = NULL;
2483 int err;
2484
2485 /*
2486 * Don't return when list_empty, give func a chance to report
2487 * error when it found last == NULL.
2488 *
2489 * So no need to WARN here, let *func do this.
2490 */
2491 if (evlist->core.nr_entries > 0)
2492 last = evlist__last(evlist);
2493
2494 do {
2495 err = (*func)(last, arg);
2496 if (err)
2497 return -1;
2498 if (!last)
2499 return 0;
2500
2501 if (last->core.node.prev == &evlist->core.entries)
2502 return 0;
2503 last = list_entry(last->core.node.prev, struct evsel, core.node);
2504 } while (!last->cmdline_group_boundary);
2505
2506 return 0;
2507 }
2508
set_filter(struct evsel * evsel,const void * arg)2509 static int set_filter(struct evsel *evsel, const void *arg)
2510 {
2511 const char *str = arg;
2512 bool found = false;
2513 int nr_addr_filters = 0;
2514 struct perf_pmu *pmu = NULL;
2515
2516 if (evsel == NULL) {
2517 fprintf(stderr,
2518 "--filter option should follow a -e tracepoint or HW tracer option\n");
2519 return -1;
2520 }
2521
2522 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) {
2523 if (evsel__append_tp_filter(evsel, str) < 0) {
2524 fprintf(stderr,
2525 "not enough memory to hold filter string\n");
2526 return -1;
2527 }
2528
2529 return 0;
2530 }
2531
2532 while ((pmu = perf_pmu__scan(pmu)) != NULL)
2533 if (pmu->type == evsel->core.attr.type) {
2534 found = true;
2535 break;
2536 }
2537
2538 if (found)
2539 perf_pmu__scan_file(pmu, "nr_addr_filters",
2540 "%d", &nr_addr_filters);
2541
2542 if (!nr_addr_filters) {
2543 fprintf(stderr,
2544 "This CPU does not support address filtering\n");
2545 return -1;
2546 }
2547
2548 if (evsel__append_addr_filter(evsel, str) < 0) {
2549 fprintf(stderr,
2550 "not enough memory to hold filter string\n");
2551 return -1;
2552 }
2553
2554 return 0;
2555 }
2556
parse_filter(const struct option * opt,const char * str,int unset __maybe_unused)2557 int parse_filter(const struct option *opt, const char *str,
2558 int unset __maybe_unused)
2559 {
2560 struct evlist *evlist = *(struct evlist **)opt->value;
2561
2562 return foreach_evsel_in_last_glob(evlist, set_filter,
2563 (const void *)str);
2564 }
2565
add_exclude_perf_filter(struct evsel * evsel,const void * arg __maybe_unused)2566 static int add_exclude_perf_filter(struct evsel *evsel,
2567 const void *arg __maybe_unused)
2568 {
2569 char new_filter[64];
2570
2571 if (evsel == NULL || evsel->core.attr.type != PERF_TYPE_TRACEPOINT) {
2572 fprintf(stderr,
2573 "--exclude-perf option should follow a -e tracepoint option\n");
2574 return -1;
2575 }
2576
2577 snprintf(new_filter, sizeof(new_filter), "common_pid != %d", getpid());
2578
2579 if (evsel__append_tp_filter(evsel, new_filter) < 0) {
2580 fprintf(stderr,
2581 "not enough memory to hold filter string\n");
2582 return -1;
2583 }
2584
2585 return 0;
2586 }
2587
exclude_perf(const struct option * opt,const char * arg __maybe_unused,int unset __maybe_unused)2588 int exclude_perf(const struct option *opt,
2589 const char *arg __maybe_unused,
2590 int unset __maybe_unused)
2591 {
2592 struct evlist *evlist = *(struct evlist **)opt->value;
2593
2594 return foreach_evsel_in_last_glob(evlist, add_exclude_perf_filter,
2595 NULL);
2596 }
2597
2598 static const char * const event_type_descriptors[] = {
2599 "Hardware event",
2600 "Software event",
2601 "Tracepoint event",
2602 "Hardware cache event",
2603 "Raw hardware event descriptor",
2604 "Hardware breakpoint",
2605 };
2606
cmp_string(const void * a,const void * b)2607 static int cmp_string(const void *a, const void *b)
2608 {
2609 const char * const *as = a;
2610 const char * const *bs = b;
2611
2612 return strcmp(*as, *bs);
2613 }
2614
2615 /*
2616 * Print the events from <debugfs_mount_point>/tracing/events
2617 */
2618
print_tracepoint_events(const char * subsys_glob,const char * event_glob,bool name_only)2619 void print_tracepoint_events(const char *subsys_glob, const char *event_glob,
2620 bool name_only)
2621 {
2622 DIR *sys_dir, *evt_dir;
2623 struct dirent *sys_dirent, *evt_dirent;
2624 char evt_path[MAXPATHLEN];
2625 char *dir_path;
2626 char **evt_list = NULL;
2627 unsigned int evt_i = 0, evt_num = 0;
2628 bool evt_num_known = false;
2629
2630 restart:
2631 sys_dir = tracing_events__opendir();
2632 if (!sys_dir)
2633 return;
2634
2635 if (evt_num_known) {
2636 evt_list = zalloc(sizeof(char *) * evt_num);
2637 if (!evt_list)
2638 goto out_close_sys_dir;
2639 }
2640
2641 for_each_subsystem(sys_dir, sys_dirent) {
2642 if (subsys_glob != NULL &&
2643 !strglobmatch(sys_dirent->d_name, subsys_glob))
2644 continue;
2645
2646 dir_path = get_events_file(sys_dirent->d_name);
2647 if (!dir_path)
2648 continue;
2649 evt_dir = opendir(dir_path);
2650 if (!evt_dir)
2651 goto next;
2652
2653 for_each_event(dir_path, evt_dir, evt_dirent) {
2654 if (event_glob != NULL &&
2655 !strglobmatch(evt_dirent->d_name, event_glob))
2656 continue;
2657
2658 if (!evt_num_known) {
2659 evt_num++;
2660 continue;
2661 }
2662
2663 snprintf(evt_path, MAXPATHLEN, "%s:%s",
2664 sys_dirent->d_name, evt_dirent->d_name);
2665
2666 evt_list[evt_i] = strdup(evt_path);
2667 if (evt_list[evt_i] == NULL) {
2668 put_events_file(dir_path);
2669 goto out_close_evt_dir;
2670 }
2671 evt_i++;
2672 }
2673 closedir(evt_dir);
2674 next:
2675 put_events_file(dir_path);
2676 }
2677 closedir(sys_dir);
2678
2679 if (!evt_num_known) {
2680 evt_num_known = true;
2681 goto restart;
2682 }
2683 qsort(evt_list, evt_num, sizeof(char *), cmp_string);
2684 evt_i = 0;
2685 while (evt_i < evt_num) {
2686 if (name_only) {
2687 printf("%s ", evt_list[evt_i++]);
2688 continue;
2689 }
2690 printf(" %-50s [%s]\n", evt_list[evt_i++],
2691 event_type_descriptors[PERF_TYPE_TRACEPOINT]);
2692 }
2693 if (evt_num && pager_in_use())
2694 printf("\n");
2695
2696 out_free:
2697 evt_num = evt_i;
2698 for (evt_i = 0; evt_i < evt_num; evt_i++)
2699 zfree(&evt_list[evt_i]);
2700 zfree(&evt_list);
2701 return;
2702
2703 out_close_evt_dir:
2704 closedir(evt_dir);
2705 out_close_sys_dir:
2706 closedir(sys_dir);
2707
2708 printf("FATAL: not enough memory to print %s\n",
2709 event_type_descriptors[PERF_TYPE_TRACEPOINT]);
2710 if (evt_list)
2711 goto out_free;
2712 }
2713
2714 /*
2715 * Check whether event is in <debugfs_mount_point>/tracing/events
2716 */
2717
is_valid_tracepoint(const char * event_string)2718 int is_valid_tracepoint(const char *event_string)
2719 {
2720 DIR *sys_dir, *evt_dir;
2721 struct dirent *sys_dirent, *evt_dirent;
2722 char evt_path[MAXPATHLEN];
2723 char *dir_path;
2724
2725 sys_dir = tracing_events__opendir();
2726 if (!sys_dir)
2727 return 0;
2728
2729 for_each_subsystem(sys_dir, sys_dirent) {
2730 dir_path = get_events_file(sys_dirent->d_name);
2731 if (!dir_path)
2732 continue;
2733 evt_dir = opendir(dir_path);
2734 if (!evt_dir)
2735 goto next;
2736
2737 for_each_event(dir_path, evt_dir, evt_dirent) {
2738 snprintf(evt_path, MAXPATHLEN, "%s:%s",
2739 sys_dirent->d_name, evt_dirent->d_name);
2740 if (!strcmp(evt_path, event_string)) {
2741 closedir(evt_dir);
2742 closedir(sys_dir);
2743 return 1;
2744 }
2745 }
2746 closedir(evt_dir);
2747 next:
2748 put_events_file(dir_path);
2749 }
2750 closedir(sys_dir);
2751 return 0;
2752 }
2753
is_event_supported(u8 type,u64 config)2754 static bool is_event_supported(u8 type, u64 config)
2755 {
2756 bool ret = true;
2757 int open_return;
2758 struct evsel *evsel;
2759 struct perf_event_attr attr = {
2760 .type = type,
2761 .config = config,
2762 .disabled = 1,
2763 };
2764 struct perf_thread_map *tmap = thread_map__new_by_tid(0);
2765
2766 if (tmap == NULL)
2767 return false;
2768
2769 evsel = evsel__new(&attr);
2770 if (evsel) {
2771 open_return = evsel__open(evsel, NULL, tmap);
2772 ret = open_return >= 0;
2773
2774 if (open_return == -EACCES) {
2775 /*
2776 * This happens if the paranoid value
2777 * /proc/sys/kernel/perf_event_paranoid is set to 2
2778 * Re-run with exclude_kernel set; we don't do that
2779 * by default as some ARM machines do not support it.
2780 *
2781 */
2782 evsel->core.attr.exclude_kernel = 1;
2783 ret = evsel__open(evsel, NULL, tmap) >= 0;
2784 }
2785 evsel__delete(evsel);
2786 }
2787
2788 perf_thread_map__put(tmap);
2789 return ret;
2790 }
2791
print_sdt_events(const char * subsys_glob,const char * event_glob,bool name_only)2792 void print_sdt_events(const char *subsys_glob, const char *event_glob,
2793 bool name_only)
2794 {
2795 struct probe_cache *pcache;
2796 struct probe_cache_entry *ent;
2797 struct strlist *bidlist, *sdtlist;
2798 struct strlist_config cfg = {.dont_dupstr = true};
2799 struct str_node *nd, *nd2;
2800 char *buf, *path, *ptr = NULL;
2801 bool show_detail = false;
2802 int ret;
2803
2804 sdtlist = strlist__new(NULL, &cfg);
2805 if (!sdtlist) {
2806 pr_debug("Failed to allocate new strlist for SDT\n");
2807 return;
2808 }
2809 bidlist = build_id_cache__list_all(true);
2810 if (!bidlist) {
2811 pr_debug("Failed to get buildids: %d\n", errno);
2812 return;
2813 }
2814 strlist__for_each_entry(nd, bidlist) {
2815 pcache = probe_cache__new(nd->s, NULL);
2816 if (!pcache)
2817 continue;
2818 list_for_each_entry(ent, &pcache->entries, node) {
2819 if (!ent->sdt)
2820 continue;
2821 if (subsys_glob &&
2822 !strglobmatch(ent->pev.group, subsys_glob))
2823 continue;
2824 if (event_glob &&
2825 !strglobmatch(ent->pev.event, event_glob))
2826 continue;
2827 ret = asprintf(&buf, "%s:%s@%s", ent->pev.group,
2828 ent->pev.event, nd->s);
2829 if (ret > 0)
2830 strlist__add(sdtlist, buf);
2831 }
2832 probe_cache__delete(pcache);
2833 }
2834 strlist__delete(bidlist);
2835
2836 strlist__for_each_entry(nd, sdtlist) {
2837 buf = strchr(nd->s, '@');
2838 if (buf)
2839 *(buf++) = '\0';
2840 if (name_only) {
2841 printf("%s ", nd->s);
2842 continue;
2843 }
2844 nd2 = strlist__next(nd);
2845 if (nd2) {
2846 ptr = strchr(nd2->s, '@');
2847 if (ptr)
2848 *ptr = '\0';
2849 if (strcmp(nd->s, nd2->s) == 0)
2850 show_detail = true;
2851 }
2852 if (show_detail) {
2853 path = build_id_cache__origname(buf);
2854 ret = asprintf(&buf, "%s@%s(%.12s)", nd->s, path, buf);
2855 if (ret > 0) {
2856 printf(" %-50s [%s]\n", buf, "SDT event");
2857 free(buf);
2858 }
2859 free(path);
2860 } else
2861 printf(" %-50s [%s]\n", nd->s, "SDT event");
2862 if (nd2) {
2863 if (strcmp(nd->s, nd2->s) != 0)
2864 show_detail = false;
2865 if (ptr)
2866 *ptr = '@';
2867 }
2868 }
2869 strlist__delete(sdtlist);
2870 }
2871
print_hwcache_events(const char * event_glob,bool name_only)2872 int print_hwcache_events(const char *event_glob, bool name_only)
2873 {
2874 unsigned int type, op, i, evt_i = 0, evt_num = 0, npmus = 0;
2875 char name[64], new_name[128];
2876 char **evt_list = NULL, **evt_pmus = NULL;
2877 bool evt_num_known = false;
2878 struct perf_pmu *pmu = NULL;
2879
2880 if (perf_pmu__has_hybrid()) {
2881 npmus = perf_pmu__hybrid_pmu_num();
2882 evt_pmus = zalloc(sizeof(char *) * npmus);
2883 if (!evt_pmus)
2884 goto out_enomem;
2885 }
2886
2887 restart:
2888 if (evt_num_known) {
2889 evt_list = zalloc(sizeof(char *) * evt_num);
2890 if (!evt_list)
2891 goto out_enomem;
2892 }
2893
2894 for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) {
2895 for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) {
2896 /* skip invalid cache type */
2897 if (!evsel__is_cache_op_valid(type, op))
2898 continue;
2899
2900 for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) {
2901 unsigned int hybrid_supported = 0, j;
2902 bool supported;
2903
2904 __evsel__hw_cache_type_op_res_name(type, op, i, name, sizeof(name));
2905 if (event_glob != NULL && !strglobmatch(name, event_glob))
2906 continue;
2907
2908 if (!perf_pmu__has_hybrid()) {
2909 if (!is_event_supported(PERF_TYPE_HW_CACHE,
2910 type | (op << 8) | (i << 16))) {
2911 continue;
2912 }
2913 } else {
2914 perf_pmu__for_each_hybrid_pmu(pmu) {
2915 if (!evt_num_known) {
2916 evt_num++;
2917 continue;
2918 }
2919
2920 supported = is_event_supported(
2921 PERF_TYPE_HW_CACHE,
2922 type | (op << 8) | (i << 16) |
2923 ((__u64)pmu->type << PERF_PMU_TYPE_SHIFT));
2924 if (supported) {
2925 snprintf(new_name, sizeof(new_name), "%s/%s/",
2926 pmu->name, name);
2927 evt_pmus[hybrid_supported] = strdup(new_name);
2928 hybrid_supported++;
2929 }
2930 }
2931
2932 if (hybrid_supported == 0)
2933 continue;
2934 }
2935
2936 if (!evt_num_known) {
2937 evt_num++;
2938 continue;
2939 }
2940
2941 if ((hybrid_supported == 0) ||
2942 (hybrid_supported == npmus)) {
2943 evt_list[evt_i] = strdup(name);
2944 if (npmus > 0) {
2945 for (j = 0; j < npmus; j++)
2946 zfree(&evt_pmus[j]);
2947 }
2948 } else {
2949 for (j = 0; j < hybrid_supported; j++) {
2950 evt_list[evt_i++] = evt_pmus[j];
2951 evt_pmus[j] = NULL;
2952 }
2953 continue;
2954 }
2955
2956 if (evt_list[evt_i] == NULL)
2957 goto out_enomem;
2958 evt_i++;
2959 }
2960 }
2961 }
2962
2963 if (!evt_num_known) {
2964 evt_num_known = true;
2965 goto restart;
2966 }
2967
2968 for (evt_i = 0; evt_i < evt_num; evt_i++) {
2969 if (!evt_list[evt_i])
2970 break;
2971 }
2972
2973 evt_num = evt_i;
2974 qsort(evt_list, evt_num, sizeof(char *), cmp_string);
2975 evt_i = 0;
2976 while (evt_i < evt_num) {
2977 if (name_only) {
2978 printf("%s ", evt_list[evt_i++]);
2979 continue;
2980 }
2981 printf(" %-50s [%s]\n", evt_list[evt_i++],
2982 event_type_descriptors[PERF_TYPE_HW_CACHE]);
2983 }
2984 if (evt_num && pager_in_use())
2985 printf("\n");
2986
2987 out_free:
2988 evt_num = evt_i;
2989 for (evt_i = 0; evt_i < evt_num; evt_i++)
2990 zfree(&evt_list[evt_i]);
2991 zfree(&evt_list);
2992
2993 for (evt_i = 0; evt_i < npmus; evt_i++)
2994 zfree(&evt_pmus[evt_i]);
2995 zfree(&evt_pmus);
2996 return evt_num;
2997
2998 out_enomem:
2999 printf("FATAL: not enough memory to print %s\n", event_type_descriptors[PERF_TYPE_HW_CACHE]);
3000 if (evt_list)
3001 goto out_free;
3002 return evt_num;
3003 }
3004
print_tool_event(const char * name,const char * event_glob,bool name_only)3005 static void print_tool_event(const char *name, const char *event_glob,
3006 bool name_only)
3007 {
3008 if (event_glob && !strglobmatch(name, event_glob))
3009 return;
3010 if (name_only)
3011 printf("%s ", name);
3012 else
3013 printf(" %-50s [%s]\n", name, "Tool event");
3014
3015 }
3016
print_tool_events(const char * event_glob,bool name_only)3017 void print_tool_events(const char *event_glob, bool name_only)
3018 {
3019 print_tool_event("duration_time", event_glob, name_only);
3020 if (pager_in_use())
3021 printf("\n");
3022 }
3023
print_symbol_events(const char * event_glob,unsigned type,struct event_symbol * syms,unsigned max,bool name_only)3024 void print_symbol_events(const char *event_glob, unsigned type,
3025 struct event_symbol *syms, unsigned max,
3026 bool name_only)
3027 {
3028 unsigned int i, evt_i = 0, evt_num = 0;
3029 char name[MAX_NAME_LEN];
3030 char **evt_list = NULL;
3031 bool evt_num_known = false;
3032
3033 restart:
3034 if (evt_num_known) {
3035 evt_list = zalloc(sizeof(char *) * evt_num);
3036 if (!evt_list)
3037 goto out_enomem;
3038 syms -= max;
3039 }
3040
3041 for (i = 0; i < max; i++, syms++) {
3042 /*
3043 * New attr.config still not supported here, the latest
3044 * example was PERF_COUNT_SW_CGROUP_SWITCHES
3045 */
3046 if (syms->symbol == NULL)
3047 continue;
3048
3049 if (event_glob != NULL && !(strglobmatch(syms->symbol, event_glob) ||
3050 (syms->alias && strglobmatch(syms->alias, event_glob))))
3051 continue;
3052
3053 if (!is_event_supported(type, i))
3054 continue;
3055
3056 if (!evt_num_known) {
3057 evt_num++;
3058 continue;
3059 }
3060
3061 if (!name_only && strlen(syms->alias))
3062 snprintf(name, MAX_NAME_LEN, "%s OR %s", syms->symbol, syms->alias);
3063 else
3064 strlcpy(name, syms->symbol, MAX_NAME_LEN);
3065
3066 evt_list[evt_i] = strdup(name);
3067 if (evt_list[evt_i] == NULL)
3068 goto out_enomem;
3069 evt_i++;
3070 }
3071
3072 if (!evt_num_known) {
3073 evt_num_known = true;
3074 goto restart;
3075 }
3076 qsort(evt_list, evt_num, sizeof(char *), cmp_string);
3077 evt_i = 0;
3078 while (evt_i < evt_num) {
3079 if (name_only) {
3080 printf("%s ", evt_list[evt_i++]);
3081 continue;
3082 }
3083 printf(" %-50s [%s]\n", evt_list[evt_i++], event_type_descriptors[type]);
3084 }
3085 if (evt_num && pager_in_use())
3086 printf("\n");
3087
3088 out_free:
3089 evt_num = evt_i;
3090 for (evt_i = 0; evt_i < evt_num; evt_i++)
3091 zfree(&evt_list[evt_i]);
3092 zfree(&evt_list);
3093 return;
3094
3095 out_enomem:
3096 printf("FATAL: not enough memory to print %s\n", event_type_descriptors[type]);
3097 if (evt_list)
3098 goto out_free;
3099 }
3100
3101 /*
3102 * Print the help text for the event symbols:
3103 */
print_events(const char * event_glob,bool name_only,bool quiet_flag,bool long_desc,bool details_flag,bool deprecated,const char * pmu_name)3104 void print_events(const char *event_glob, bool name_only, bool quiet_flag,
3105 bool long_desc, bool details_flag, bool deprecated,
3106 const char *pmu_name)
3107 {
3108 print_symbol_events(event_glob, PERF_TYPE_HARDWARE,
3109 event_symbols_hw, PERF_COUNT_HW_MAX, name_only);
3110
3111 print_symbol_events(event_glob, PERF_TYPE_SOFTWARE,
3112 event_symbols_sw, PERF_COUNT_SW_MAX, name_only);
3113 print_tool_events(event_glob, name_only);
3114
3115 print_hwcache_events(event_glob, name_only);
3116
3117 print_pmu_events(event_glob, name_only, quiet_flag, long_desc,
3118 details_flag, deprecated, pmu_name);
3119
3120 if (event_glob != NULL)
3121 return;
3122
3123 if (!name_only) {
3124 printf(" %-50s [%s]\n",
3125 "rNNN",
3126 event_type_descriptors[PERF_TYPE_RAW]);
3127 printf(" %-50s [%s]\n",
3128 "cpu/t1=v1[,t2=v2,t3 ...]/modifier",
3129 event_type_descriptors[PERF_TYPE_RAW]);
3130 if (pager_in_use())
3131 printf(" (see 'man perf-list' on how to encode it)\n\n");
3132
3133 printf(" %-50s [%s]\n",
3134 "mem:<addr>[/len][:access]",
3135 event_type_descriptors[PERF_TYPE_BREAKPOINT]);
3136 if (pager_in_use())
3137 printf("\n");
3138 }
3139
3140 print_tracepoint_events(NULL, NULL, name_only);
3141
3142 print_sdt_events(NULL, NULL, name_only);
3143
3144 metricgroup__print(true, true, NULL, name_only, details_flag,
3145 pmu_name);
3146
3147 print_libpfm_events(name_only, long_desc);
3148 }
3149
parse_events__is_hardcoded_term(struct parse_events_term * term)3150 int parse_events__is_hardcoded_term(struct parse_events_term *term)
3151 {
3152 return term->type_term != PARSE_EVENTS__TERM_TYPE_USER;
3153 }
3154
new_term(struct parse_events_term ** _term,struct parse_events_term * temp,char * str,u64 num)3155 static int new_term(struct parse_events_term **_term,
3156 struct parse_events_term *temp,
3157 char *str, u64 num)
3158 {
3159 struct parse_events_term *term;
3160
3161 term = malloc(sizeof(*term));
3162 if (!term)
3163 return -ENOMEM;
3164
3165 *term = *temp;
3166 INIT_LIST_HEAD(&term->list);
3167 term->weak = false;
3168
3169 switch (term->type_val) {
3170 case PARSE_EVENTS__TERM_TYPE_NUM:
3171 term->val.num = num;
3172 break;
3173 case PARSE_EVENTS__TERM_TYPE_STR:
3174 term->val.str = str;
3175 break;
3176 default:
3177 free(term);
3178 return -EINVAL;
3179 }
3180
3181 *_term = term;
3182 return 0;
3183 }
3184
parse_events_term__num(struct parse_events_term ** term,int type_term,char * config,u64 num,bool no_value,void * loc_term_,void * loc_val_)3185 int parse_events_term__num(struct parse_events_term **term,
3186 int type_term, char *config, u64 num,
3187 bool no_value,
3188 void *loc_term_, void *loc_val_)
3189 {
3190 YYLTYPE *loc_term = loc_term_;
3191 YYLTYPE *loc_val = loc_val_;
3192
3193 struct parse_events_term temp = {
3194 .type_val = PARSE_EVENTS__TERM_TYPE_NUM,
3195 .type_term = type_term,
3196 .config = config ? : strdup(config_term_names[type_term]),
3197 .no_value = no_value,
3198 .err_term = loc_term ? loc_term->first_column : 0,
3199 .err_val = loc_val ? loc_val->first_column : 0,
3200 };
3201
3202 return new_term(term, &temp, NULL, num);
3203 }
3204
parse_events_term__str(struct parse_events_term ** term,int type_term,char * config,char * str,void * loc_term_,void * loc_val_)3205 int parse_events_term__str(struct parse_events_term **term,
3206 int type_term, char *config, char *str,
3207 void *loc_term_, void *loc_val_)
3208 {
3209 YYLTYPE *loc_term = loc_term_;
3210 YYLTYPE *loc_val = loc_val_;
3211
3212 struct parse_events_term temp = {
3213 .type_val = PARSE_EVENTS__TERM_TYPE_STR,
3214 .type_term = type_term,
3215 .config = config,
3216 .err_term = loc_term ? loc_term->first_column : 0,
3217 .err_val = loc_val ? loc_val->first_column : 0,
3218 };
3219
3220 return new_term(term, &temp, str, 0);
3221 }
3222
parse_events_term__sym_hw(struct parse_events_term ** term,char * config,unsigned idx)3223 int parse_events_term__sym_hw(struct parse_events_term **term,
3224 char *config, unsigned idx)
3225 {
3226 struct event_symbol *sym;
3227 char *str;
3228 struct parse_events_term temp = {
3229 .type_val = PARSE_EVENTS__TERM_TYPE_STR,
3230 .type_term = PARSE_EVENTS__TERM_TYPE_USER,
3231 .config = config,
3232 };
3233
3234 if (!temp.config) {
3235 temp.config = strdup("event");
3236 if (!temp.config)
3237 return -ENOMEM;
3238 }
3239 BUG_ON(idx >= PERF_COUNT_HW_MAX);
3240 sym = &event_symbols_hw[idx];
3241
3242 str = strdup(sym->symbol);
3243 if (!str)
3244 return -ENOMEM;
3245 return new_term(term, &temp, str, 0);
3246 }
3247
parse_events_term__clone(struct parse_events_term ** new,struct parse_events_term * term)3248 int parse_events_term__clone(struct parse_events_term **new,
3249 struct parse_events_term *term)
3250 {
3251 char *str;
3252 struct parse_events_term temp = {
3253 .type_val = term->type_val,
3254 .type_term = term->type_term,
3255 .config = NULL,
3256 .err_term = term->err_term,
3257 .err_val = term->err_val,
3258 };
3259
3260 if (term->config) {
3261 temp.config = strdup(term->config);
3262 if (!temp.config)
3263 return -ENOMEM;
3264 }
3265 if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM)
3266 return new_term(new, &temp, NULL, term->val.num);
3267
3268 str = strdup(term->val.str);
3269 if (!str)
3270 return -ENOMEM;
3271 return new_term(new, &temp, str, 0);
3272 }
3273
parse_events_term__delete(struct parse_events_term * term)3274 void parse_events_term__delete(struct parse_events_term *term)
3275 {
3276 if (term->array.nr_ranges)
3277 zfree(&term->array.ranges);
3278
3279 if (term->type_val != PARSE_EVENTS__TERM_TYPE_NUM)
3280 zfree(&term->val.str);
3281
3282 zfree(&term->config);
3283 free(term);
3284 }
3285
parse_events_copy_term_list(struct list_head * old,struct list_head ** new)3286 int parse_events_copy_term_list(struct list_head *old,
3287 struct list_head **new)
3288 {
3289 struct parse_events_term *term, *n;
3290 int ret;
3291
3292 if (!old) {
3293 *new = NULL;
3294 return 0;
3295 }
3296
3297 *new = malloc(sizeof(struct list_head));
3298 if (!*new)
3299 return -ENOMEM;
3300 INIT_LIST_HEAD(*new);
3301
3302 list_for_each_entry (term, old, list) {
3303 ret = parse_events_term__clone(&n, term);
3304 if (ret)
3305 return ret;
3306 list_add_tail(&n->list, *new);
3307 }
3308 return 0;
3309 }
3310
parse_events_terms__purge(struct list_head * terms)3311 void parse_events_terms__purge(struct list_head *terms)
3312 {
3313 struct parse_events_term *term, *h;
3314
3315 list_for_each_entry_safe(term, h, terms, list) {
3316 list_del_init(&term->list);
3317 parse_events_term__delete(term);
3318 }
3319 }
3320
parse_events_terms__delete(struct list_head * terms)3321 void parse_events_terms__delete(struct list_head *terms)
3322 {
3323 if (!terms)
3324 return;
3325 parse_events_terms__purge(terms);
3326 free(terms);
3327 }
3328
parse_events__clear_array(struct parse_events_array * a)3329 void parse_events__clear_array(struct parse_events_array *a)
3330 {
3331 zfree(&a->ranges);
3332 }
3333
parse_events_evlist_error(struct parse_events_state * parse_state,int idx,const char * str)3334 void parse_events_evlist_error(struct parse_events_state *parse_state,
3335 int idx, const char *str)
3336 {
3337 if (!parse_state->error)
3338 return;
3339
3340 parse_events_error__handle(parse_state->error, idx, strdup(str), NULL);
3341 }
3342
config_terms_list(char * buf,size_t buf_sz)3343 static void config_terms_list(char *buf, size_t buf_sz)
3344 {
3345 int i;
3346 bool first = true;
3347
3348 buf[0] = '\0';
3349 for (i = 0; i < __PARSE_EVENTS__TERM_TYPE_NR; i++) {
3350 const char *name = config_term_names[i];
3351
3352 if (!config_term_avail(i, NULL))
3353 continue;
3354 if (!name)
3355 continue;
3356 if (name[0] == '<')
3357 continue;
3358
3359 if (strlen(buf) + strlen(name) + 2 >= buf_sz)
3360 return;
3361
3362 if (!first)
3363 strcat(buf, ",");
3364 else
3365 first = false;
3366 strcat(buf, name);
3367 }
3368 }
3369
3370 /*
3371 * Return string contains valid config terms of an event.
3372 * @additional_terms: For terms such as PMU sysfs terms.
3373 */
parse_events_formats_error_string(char * additional_terms)3374 char *parse_events_formats_error_string(char *additional_terms)
3375 {
3376 char *str;
3377 /* "no-overwrite" is the longest name */
3378 char static_terms[__PARSE_EVENTS__TERM_TYPE_NR *
3379 (sizeof("no-overwrite") - 1)];
3380
3381 config_terms_list(static_terms, sizeof(static_terms));
3382 /* valid terms */
3383 if (additional_terms) {
3384 if (asprintf(&str, "valid terms: %s,%s",
3385 additional_terms, static_terms) < 0)
3386 goto fail;
3387 } else {
3388 if (asprintf(&str, "valid terms: %s", static_terms) < 0)
3389 goto fail;
3390 }
3391 return str;
3392
3393 fail:
3394 return NULL;
3395 }
3396
parse_events__add_event_hybrid(struct list_head * list,int * idx,struct perf_event_attr * attr,const char * name,const char * metric_id,struct perf_pmu * pmu,struct list_head * config_terms)3397 struct evsel *parse_events__add_event_hybrid(struct list_head *list, int *idx,
3398 struct perf_event_attr *attr,
3399 const char *name,
3400 const char *metric_id,
3401 struct perf_pmu *pmu,
3402 struct list_head *config_terms)
3403 {
3404 return __add_event(list, idx, attr, /*init_attr=*/true, name, metric_id,
3405 pmu, config_terms, /*auto_merge_stats=*/false,
3406 /*cpu_list=*/NULL);
3407 }
3408