1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
3
4 #define _GNU_SOURCE
5 #include <errno.h>
6 #include <fcntl.h>
7 #include <signal.h>
8 #include <stdarg.h>
9 #include <stdio.h>
10 #include <stdlib.h>
11 #include <string.h>
12 #include <time.h>
13 #include <unistd.h>
14 #include <net/if.h>
15 #include <sys/ioctl.h>
16 #include <sys/types.h>
17 #include <sys/stat.h>
18 #include <sys/syscall.h>
19 #include <dirent.h>
20
21 #include <linux/err.h>
22 #include <linux/perf_event.h>
23 #include <linux/sizes.h>
24
25 #include <bpf/bpf.h>
26 #include <bpf/btf.h>
27 #include <bpf/hashmap.h>
28 #include <bpf/libbpf.h>
29 #include <bpf/skel_internal.h>
30
31 #include "cfg.h"
32 #include "main.h"
33 #include "xlated_dumper.h"
34
35 #define BPF_METADATA_PREFIX "bpf_metadata_"
36 #define BPF_METADATA_PREFIX_LEN (sizeof(BPF_METADATA_PREFIX) - 1)
37
38 const char * const prog_type_name[] = {
39 [BPF_PROG_TYPE_UNSPEC] = "unspec",
40 [BPF_PROG_TYPE_SOCKET_FILTER] = "socket_filter",
41 [BPF_PROG_TYPE_KPROBE] = "kprobe",
42 [BPF_PROG_TYPE_SCHED_CLS] = "sched_cls",
43 [BPF_PROG_TYPE_SCHED_ACT] = "sched_act",
44 [BPF_PROG_TYPE_TRACEPOINT] = "tracepoint",
45 [BPF_PROG_TYPE_XDP] = "xdp",
46 [BPF_PROG_TYPE_PERF_EVENT] = "perf_event",
47 [BPF_PROG_TYPE_CGROUP_SKB] = "cgroup_skb",
48 [BPF_PROG_TYPE_CGROUP_SOCK] = "cgroup_sock",
49 [BPF_PROG_TYPE_LWT_IN] = "lwt_in",
50 [BPF_PROG_TYPE_LWT_OUT] = "lwt_out",
51 [BPF_PROG_TYPE_LWT_XMIT] = "lwt_xmit",
52 [BPF_PROG_TYPE_SOCK_OPS] = "sock_ops",
53 [BPF_PROG_TYPE_SK_SKB] = "sk_skb",
54 [BPF_PROG_TYPE_CGROUP_DEVICE] = "cgroup_device",
55 [BPF_PROG_TYPE_SK_MSG] = "sk_msg",
56 [BPF_PROG_TYPE_RAW_TRACEPOINT] = "raw_tracepoint",
57 [BPF_PROG_TYPE_CGROUP_SOCK_ADDR] = "cgroup_sock_addr",
58 [BPF_PROG_TYPE_LWT_SEG6LOCAL] = "lwt_seg6local",
59 [BPF_PROG_TYPE_LIRC_MODE2] = "lirc_mode2",
60 [BPF_PROG_TYPE_SK_REUSEPORT] = "sk_reuseport",
61 [BPF_PROG_TYPE_FLOW_DISSECTOR] = "flow_dissector",
62 [BPF_PROG_TYPE_CGROUP_SYSCTL] = "cgroup_sysctl",
63 [BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE] = "raw_tracepoint_writable",
64 [BPF_PROG_TYPE_CGROUP_SOCKOPT] = "cgroup_sockopt",
65 [BPF_PROG_TYPE_TRACING] = "tracing",
66 [BPF_PROG_TYPE_STRUCT_OPS] = "struct_ops",
67 [BPF_PROG_TYPE_EXT] = "ext",
68 [BPF_PROG_TYPE_LSM] = "lsm",
69 [BPF_PROG_TYPE_SK_LOOKUP] = "sk_lookup",
70 };
71
72 const size_t prog_type_name_size = ARRAY_SIZE(prog_type_name);
73
74 enum dump_mode {
75 DUMP_JITED,
76 DUMP_XLATED,
77 };
78
79 static const char * const attach_type_strings[] = {
80 [BPF_SK_SKB_STREAM_PARSER] = "stream_parser",
81 [BPF_SK_SKB_STREAM_VERDICT] = "stream_verdict",
82 [BPF_SK_SKB_VERDICT] = "skb_verdict",
83 [BPF_SK_MSG_VERDICT] = "msg_verdict",
84 [BPF_FLOW_DISSECTOR] = "flow_dissector",
85 [__MAX_BPF_ATTACH_TYPE] = NULL,
86 };
87
88 static struct hashmap *prog_table;
89
parse_attach_type(const char * str)90 static enum bpf_attach_type parse_attach_type(const char *str)
91 {
92 enum bpf_attach_type type;
93
94 for (type = 0; type < __MAX_BPF_ATTACH_TYPE; type++) {
95 if (attach_type_strings[type] &&
96 is_prefix(str, attach_type_strings[type]))
97 return type;
98 }
99
100 return __MAX_BPF_ATTACH_TYPE;
101 }
102
print_boot_time(__u64 nsecs,char * buf,unsigned int size)103 static void print_boot_time(__u64 nsecs, char *buf, unsigned int size)
104 {
105 struct timespec real_time_ts, boot_time_ts;
106 time_t wallclock_secs;
107 struct tm load_tm;
108
109 buf[--size] = '\0';
110
111 if (clock_gettime(CLOCK_REALTIME, &real_time_ts) ||
112 clock_gettime(CLOCK_BOOTTIME, &boot_time_ts)) {
113 perror("Can't read clocks");
114 snprintf(buf, size, "%llu", nsecs / 1000000000);
115 return;
116 }
117
118 wallclock_secs = (real_time_ts.tv_sec - boot_time_ts.tv_sec) +
119 (real_time_ts.tv_nsec - boot_time_ts.tv_nsec + nsecs) /
120 1000000000;
121
122
123 if (!localtime_r(&wallclock_secs, &load_tm)) {
124 snprintf(buf, size, "%llu", nsecs / 1000000000);
125 return;
126 }
127
128 if (json_output)
129 strftime(buf, size, "%s", &load_tm);
130 else
131 strftime(buf, size, "%FT%T%z", &load_tm);
132 }
133
show_prog_maps(int fd,__u32 num_maps)134 static void show_prog_maps(int fd, __u32 num_maps)
135 {
136 struct bpf_prog_info info = {};
137 __u32 len = sizeof(info);
138 __u32 map_ids[num_maps];
139 unsigned int i;
140 int err;
141
142 info.nr_map_ids = num_maps;
143 info.map_ids = ptr_to_u64(map_ids);
144
145 err = bpf_obj_get_info_by_fd(fd, &info, &len);
146 if (err || !info.nr_map_ids)
147 return;
148
149 if (json_output) {
150 jsonw_name(json_wtr, "map_ids");
151 jsonw_start_array(json_wtr);
152 for (i = 0; i < info.nr_map_ids; i++)
153 jsonw_uint(json_wtr, map_ids[i]);
154 jsonw_end_array(json_wtr);
155 } else {
156 printf(" map_ids ");
157 for (i = 0; i < info.nr_map_ids; i++)
158 printf("%u%s", map_ids[i],
159 i == info.nr_map_ids - 1 ? "" : ",");
160 }
161 }
162
find_metadata(int prog_fd,struct bpf_map_info * map_info)163 static void *find_metadata(int prog_fd, struct bpf_map_info *map_info)
164 {
165 struct bpf_prog_info prog_info;
166 __u32 prog_info_len;
167 __u32 map_info_len;
168 void *value = NULL;
169 __u32 *map_ids;
170 int nr_maps;
171 int key = 0;
172 int map_fd;
173 int ret;
174 __u32 i;
175
176 memset(&prog_info, 0, sizeof(prog_info));
177 prog_info_len = sizeof(prog_info);
178 ret = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &prog_info_len);
179 if (ret)
180 return NULL;
181
182 if (!prog_info.nr_map_ids)
183 return NULL;
184
185 map_ids = calloc(prog_info.nr_map_ids, sizeof(__u32));
186 if (!map_ids)
187 return NULL;
188
189 nr_maps = prog_info.nr_map_ids;
190 memset(&prog_info, 0, sizeof(prog_info));
191 prog_info.nr_map_ids = nr_maps;
192 prog_info.map_ids = ptr_to_u64(map_ids);
193 prog_info_len = sizeof(prog_info);
194
195 ret = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &prog_info_len);
196 if (ret)
197 goto free_map_ids;
198
199 for (i = 0; i < prog_info.nr_map_ids; i++) {
200 map_fd = bpf_map_get_fd_by_id(map_ids[i]);
201 if (map_fd < 0)
202 goto free_map_ids;
203
204 memset(map_info, 0, sizeof(*map_info));
205 map_info_len = sizeof(*map_info);
206 ret = bpf_obj_get_info_by_fd(map_fd, map_info, &map_info_len);
207 if (ret < 0) {
208 close(map_fd);
209 goto free_map_ids;
210 }
211
212 if (map_info->type != BPF_MAP_TYPE_ARRAY ||
213 map_info->key_size != sizeof(int) ||
214 map_info->max_entries != 1 ||
215 !map_info->btf_value_type_id ||
216 !strstr(map_info->name, ".rodata")) {
217 close(map_fd);
218 continue;
219 }
220
221 value = malloc(map_info->value_size);
222 if (!value) {
223 close(map_fd);
224 goto free_map_ids;
225 }
226
227 if (bpf_map_lookup_elem(map_fd, &key, value)) {
228 close(map_fd);
229 free(value);
230 value = NULL;
231 goto free_map_ids;
232 }
233
234 close(map_fd);
235 break;
236 }
237
238 free_map_ids:
239 free(map_ids);
240 return value;
241 }
242
has_metadata_prefix(const char * s)243 static bool has_metadata_prefix(const char *s)
244 {
245 return strncmp(s, BPF_METADATA_PREFIX, BPF_METADATA_PREFIX_LEN) == 0;
246 }
247
show_prog_metadata(int fd,__u32 num_maps)248 static void show_prog_metadata(int fd, __u32 num_maps)
249 {
250 const struct btf_type *t_datasec, *t_var;
251 struct bpf_map_info map_info;
252 struct btf_var_secinfo *vsi;
253 bool printed_header = false;
254 unsigned int i, vlen;
255 void *value = NULL;
256 const char *name;
257 struct btf *btf;
258 int err;
259
260 if (!num_maps)
261 return;
262
263 memset(&map_info, 0, sizeof(map_info));
264 value = find_metadata(fd, &map_info);
265 if (!value)
266 return;
267
268 btf = btf__load_from_kernel_by_id(map_info.btf_id);
269 if (libbpf_get_error(btf))
270 goto out_free;
271
272 t_datasec = btf__type_by_id(btf, map_info.btf_value_type_id);
273 if (!btf_is_datasec(t_datasec))
274 goto out_free;
275
276 vlen = btf_vlen(t_datasec);
277 vsi = btf_var_secinfos(t_datasec);
278
279 /* We don't proceed to check the kinds of the elements of the DATASEC.
280 * The verifier enforces them to be BTF_KIND_VAR.
281 */
282
283 if (json_output) {
284 struct btf_dumper d = {
285 .btf = btf,
286 .jw = json_wtr,
287 .is_plain_text = false,
288 };
289
290 for (i = 0; i < vlen; i++, vsi++) {
291 t_var = btf__type_by_id(btf, vsi->type);
292 name = btf__name_by_offset(btf, t_var->name_off);
293
294 if (!has_metadata_prefix(name))
295 continue;
296
297 if (!printed_header) {
298 jsonw_name(json_wtr, "metadata");
299 jsonw_start_object(json_wtr);
300 printed_header = true;
301 }
302
303 jsonw_name(json_wtr, name + BPF_METADATA_PREFIX_LEN);
304 err = btf_dumper_type(&d, t_var->type, value + vsi->offset);
305 if (err) {
306 p_err("btf dump failed: %d", err);
307 break;
308 }
309 }
310 if (printed_header)
311 jsonw_end_object(json_wtr);
312 } else {
313 json_writer_t *btf_wtr;
314 struct btf_dumper d = {
315 .btf = btf,
316 .is_plain_text = true,
317 };
318
319 for (i = 0; i < vlen; i++, vsi++) {
320 t_var = btf__type_by_id(btf, vsi->type);
321 name = btf__name_by_offset(btf, t_var->name_off);
322
323 if (!has_metadata_prefix(name))
324 continue;
325
326 if (!printed_header) {
327 printf("\tmetadata:");
328
329 btf_wtr = jsonw_new(stdout);
330 if (!btf_wtr) {
331 p_err("jsonw alloc failed");
332 goto out_free;
333 }
334 d.jw = btf_wtr,
335
336 printed_header = true;
337 }
338
339 printf("\n\t\t%s = ", name + BPF_METADATA_PREFIX_LEN);
340
341 jsonw_reset(btf_wtr);
342 err = btf_dumper_type(&d, t_var->type, value + vsi->offset);
343 if (err) {
344 p_err("btf dump failed: %d", err);
345 break;
346 }
347 }
348 if (printed_header)
349 jsonw_destroy(&btf_wtr);
350 }
351
352 out_free:
353 btf__free(btf);
354 free(value);
355 }
356
print_prog_header_json(struct bpf_prog_info * info)357 static void print_prog_header_json(struct bpf_prog_info *info)
358 {
359 jsonw_uint_field(json_wtr, "id", info->id);
360 if (info->type < ARRAY_SIZE(prog_type_name))
361 jsonw_string_field(json_wtr, "type",
362 prog_type_name[info->type]);
363 else
364 jsonw_uint_field(json_wtr, "type", info->type);
365
366 if (*info->name)
367 jsonw_string_field(json_wtr, "name", info->name);
368
369 jsonw_name(json_wtr, "tag");
370 jsonw_printf(json_wtr, "\"" BPF_TAG_FMT "\"",
371 info->tag[0], info->tag[1], info->tag[2], info->tag[3],
372 info->tag[4], info->tag[5], info->tag[6], info->tag[7]);
373
374 jsonw_bool_field(json_wtr, "gpl_compatible", info->gpl_compatible);
375 if (info->run_time_ns) {
376 jsonw_uint_field(json_wtr, "run_time_ns", info->run_time_ns);
377 jsonw_uint_field(json_wtr, "run_cnt", info->run_cnt);
378 }
379 if (info->recursion_misses)
380 jsonw_uint_field(json_wtr, "recursion_misses", info->recursion_misses);
381 }
382
print_prog_json(struct bpf_prog_info * info,int fd)383 static void print_prog_json(struct bpf_prog_info *info, int fd)
384 {
385 char *memlock;
386
387 jsonw_start_object(json_wtr);
388 print_prog_header_json(info);
389 print_dev_json(info->ifindex, info->netns_dev, info->netns_ino);
390
391 if (info->load_time) {
392 char buf[32];
393
394 print_boot_time(info->load_time, buf, sizeof(buf));
395
396 /* Piggy back on load_time, since 0 uid is a valid one */
397 jsonw_name(json_wtr, "loaded_at");
398 jsonw_printf(json_wtr, "%s", buf);
399 jsonw_uint_field(json_wtr, "uid", info->created_by_uid);
400 }
401
402 jsonw_uint_field(json_wtr, "bytes_xlated", info->xlated_prog_len);
403
404 if (info->jited_prog_len) {
405 jsonw_bool_field(json_wtr, "jited", true);
406 jsonw_uint_field(json_wtr, "bytes_jited", info->jited_prog_len);
407 } else {
408 jsonw_bool_field(json_wtr, "jited", false);
409 }
410
411 memlock = get_fdinfo(fd, "memlock");
412 if (memlock)
413 jsonw_int_field(json_wtr, "bytes_memlock", atoi(memlock));
414 free(memlock);
415
416 if (info->nr_map_ids)
417 show_prog_maps(fd, info->nr_map_ids);
418
419 if (info->btf_id)
420 jsonw_int_field(json_wtr, "btf_id", info->btf_id);
421
422 if (!hashmap__empty(prog_table)) {
423 struct hashmap_entry *entry;
424
425 jsonw_name(json_wtr, "pinned");
426 jsonw_start_array(json_wtr);
427 hashmap__for_each_key_entry(prog_table, entry,
428 u32_as_hash_field(info->id))
429 jsonw_string(json_wtr, entry->value);
430 jsonw_end_array(json_wtr);
431 }
432
433 emit_obj_refs_json(refs_table, info->id, json_wtr);
434
435 show_prog_metadata(fd, info->nr_map_ids);
436
437 jsonw_end_object(json_wtr);
438 }
439
print_prog_header_plain(struct bpf_prog_info * info)440 static void print_prog_header_plain(struct bpf_prog_info *info)
441 {
442 printf("%u: ", info->id);
443 if (info->type < ARRAY_SIZE(prog_type_name))
444 printf("%s ", prog_type_name[info->type]);
445 else
446 printf("type %u ", info->type);
447
448 if (*info->name)
449 printf("name %s ", info->name);
450
451 printf("tag ");
452 fprint_hex(stdout, info->tag, BPF_TAG_SIZE, "");
453 print_dev_plain(info->ifindex, info->netns_dev, info->netns_ino);
454 printf("%s", info->gpl_compatible ? " gpl" : "");
455 if (info->run_time_ns)
456 printf(" run_time_ns %lld run_cnt %lld",
457 info->run_time_ns, info->run_cnt);
458 if (info->recursion_misses)
459 printf(" recursion_misses %lld", info->recursion_misses);
460 printf("\n");
461 }
462
print_prog_plain(struct bpf_prog_info * info,int fd)463 static void print_prog_plain(struct bpf_prog_info *info, int fd)
464 {
465 char *memlock;
466
467 print_prog_header_plain(info);
468
469 if (info->load_time) {
470 char buf[32];
471
472 print_boot_time(info->load_time, buf, sizeof(buf));
473
474 /* Piggy back on load_time, since 0 uid is a valid one */
475 printf("\tloaded_at %s uid %u\n", buf, info->created_by_uid);
476 }
477
478 printf("\txlated %uB", info->xlated_prog_len);
479
480 if (info->jited_prog_len)
481 printf(" jited %uB", info->jited_prog_len);
482 else
483 printf(" not jited");
484
485 memlock = get_fdinfo(fd, "memlock");
486 if (memlock)
487 printf(" memlock %sB", memlock);
488 free(memlock);
489
490 if (info->nr_map_ids)
491 show_prog_maps(fd, info->nr_map_ids);
492
493 if (!hashmap__empty(prog_table)) {
494 struct hashmap_entry *entry;
495
496 hashmap__for_each_key_entry(prog_table, entry,
497 u32_as_hash_field(info->id))
498 printf("\n\tpinned %s", (char *)entry->value);
499 }
500
501 if (info->btf_id)
502 printf("\n\tbtf_id %d", info->btf_id);
503
504 emit_obj_refs_plain(refs_table, info->id, "\n\tpids ");
505
506 printf("\n");
507
508 show_prog_metadata(fd, info->nr_map_ids);
509 }
510
show_prog(int fd)511 static int show_prog(int fd)
512 {
513 struct bpf_prog_info info = {};
514 __u32 len = sizeof(info);
515 int err;
516
517 err = bpf_obj_get_info_by_fd(fd, &info, &len);
518 if (err) {
519 p_err("can't get prog info: %s", strerror(errno));
520 return -1;
521 }
522
523 if (json_output)
524 print_prog_json(&info, fd);
525 else
526 print_prog_plain(&info, fd);
527
528 return 0;
529 }
530
do_show_subset(int argc,char ** argv)531 static int do_show_subset(int argc, char **argv)
532 {
533 int *fds = NULL;
534 int nb_fds, i;
535 int err = -1;
536
537 fds = malloc(sizeof(int));
538 if (!fds) {
539 p_err("mem alloc failed");
540 return -1;
541 }
542 nb_fds = prog_parse_fds(&argc, &argv, &fds);
543 if (nb_fds < 1)
544 goto exit_free;
545
546 if (json_output && nb_fds > 1)
547 jsonw_start_array(json_wtr); /* root array */
548 for (i = 0; i < nb_fds; i++) {
549 err = show_prog(fds[i]);
550 if (err) {
551 for (; i < nb_fds; i++)
552 close(fds[i]);
553 break;
554 }
555 close(fds[i]);
556 }
557 if (json_output && nb_fds > 1)
558 jsonw_end_array(json_wtr); /* root array */
559
560 exit_free:
561 free(fds);
562 return err;
563 }
564
do_show(int argc,char ** argv)565 static int do_show(int argc, char **argv)
566 {
567 __u32 id = 0;
568 int err;
569 int fd;
570
571 if (show_pinned) {
572 prog_table = hashmap__new(hash_fn_for_key_as_id,
573 equal_fn_for_key_as_id, NULL);
574 if (!prog_table) {
575 p_err("failed to create hashmap for pinned paths");
576 return -1;
577 }
578 build_pinned_obj_table(prog_table, BPF_OBJ_PROG);
579 }
580 build_obj_refs_table(&refs_table, BPF_OBJ_PROG);
581
582 if (argc == 2)
583 return do_show_subset(argc, argv);
584
585 if (argc)
586 return BAD_ARG();
587
588 if (json_output)
589 jsonw_start_array(json_wtr);
590 while (true) {
591 err = bpf_prog_get_next_id(id, &id);
592 if (err) {
593 if (errno == ENOENT) {
594 err = 0;
595 break;
596 }
597 p_err("can't get next program: %s%s", strerror(errno),
598 errno == EINVAL ? " -- kernel too old?" : "");
599 err = -1;
600 break;
601 }
602
603 fd = bpf_prog_get_fd_by_id(id);
604 if (fd < 0) {
605 if (errno == ENOENT)
606 continue;
607 p_err("can't get prog by id (%u): %s",
608 id, strerror(errno));
609 err = -1;
610 break;
611 }
612
613 err = show_prog(fd);
614 close(fd);
615 if (err)
616 break;
617 }
618
619 if (json_output)
620 jsonw_end_array(json_wtr);
621
622 delete_obj_refs_table(refs_table);
623
624 if (show_pinned)
625 delete_pinned_obj_table(prog_table);
626
627 return err;
628 }
629
630 static int
prog_dump(struct bpf_prog_info * info,enum dump_mode mode,char * filepath,bool opcodes,bool visual,bool linum)631 prog_dump(struct bpf_prog_info *info, enum dump_mode mode,
632 char *filepath, bool opcodes, bool visual, bool linum)
633 {
634 struct bpf_prog_linfo *prog_linfo = NULL;
635 const char *disasm_opt = NULL;
636 struct dump_data dd = {};
637 void *func_info = NULL;
638 struct btf *btf = NULL;
639 char func_sig[1024];
640 unsigned char *buf;
641 __u32 member_len;
642 ssize_t n;
643 int fd;
644
645 if (mode == DUMP_JITED) {
646 if (info->jited_prog_len == 0 || !info->jited_prog_insns) {
647 p_info("no instructions returned");
648 return -1;
649 }
650 buf = u64_to_ptr(info->jited_prog_insns);
651 member_len = info->jited_prog_len;
652 } else { /* DUMP_XLATED */
653 if (info->xlated_prog_len == 0 || !info->xlated_prog_insns) {
654 p_err("error retrieving insn dump: kernel.kptr_restrict set?");
655 return -1;
656 }
657 buf = u64_to_ptr(info->xlated_prog_insns);
658 member_len = info->xlated_prog_len;
659 }
660
661 if (info->btf_id) {
662 btf = btf__load_from_kernel_by_id(info->btf_id);
663 if (libbpf_get_error(btf)) {
664 p_err("failed to get btf");
665 return -1;
666 }
667 }
668
669 func_info = u64_to_ptr(info->func_info);
670
671 if (info->nr_line_info) {
672 prog_linfo = bpf_prog_linfo__new(info);
673 if (!prog_linfo)
674 p_info("error in processing bpf_line_info. continue without it.");
675 }
676
677 if (filepath) {
678 fd = open(filepath, O_WRONLY | O_CREAT | O_TRUNC, 0600);
679 if (fd < 0) {
680 p_err("can't open file %s: %s", filepath,
681 strerror(errno));
682 return -1;
683 }
684
685 n = write(fd, buf, member_len);
686 close(fd);
687 if (n != (ssize_t)member_len) {
688 p_err("error writing output file: %s",
689 n < 0 ? strerror(errno) : "short write");
690 return -1;
691 }
692
693 if (json_output)
694 jsonw_null(json_wtr);
695 } else if (mode == DUMP_JITED) {
696 const char *name = NULL;
697
698 if (info->ifindex) {
699 name = ifindex_to_bfd_params(info->ifindex,
700 info->netns_dev,
701 info->netns_ino,
702 &disasm_opt);
703 if (!name)
704 return -1;
705 }
706
707 if (info->nr_jited_func_lens && info->jited_func_lens) {
708 struct kernel_sym *sym = NULL;
709 struct bpf_func_info *record;
710 char sym_name[SYM_MAX_NAME];
711 unsigned char *img = buf;
712 __u64 *ksyms = NULL;
713 __u32 *lens;
714 __u32 i;
715 if (info->nr_jited_ksyms) {
716 kernel_syms_load(&dd);
717 ksyms = u64_to_ptr(info->jited_ksyms);
718 }
719
720 if (json_output)
721 jsonw_start_array(json_wtr);
722
723 lens = u64_to_ptr(info->jited_func_lens);
724 for (i = 0; i < info->nr_jited_func_lens; i++) {
725 if (ksyms) {
726 sym = kernel_syms_search(&dd, ksyms[i]);
727 if (sym)
728 sprintf(sym_name, "%s", sym->name);
729 else
730 sprintf(sym_name, "0x%016llx", ksyms[i]);
731 } else {
732 strcpy(sym_name, "unknown");
733 }
734
735 if (func_info) {
736 record = func_info + i * info->func_info_rec_size;
737 btf_dumper_type_only(btf, record->type_id,
738 func_sig,
739 sizeof(func_sig));
740 }
741
742 if (json_output) {
743 jsonw_start_object(json_wtr);
744 if (func_info && func_sig[0] != '\0') {
745 jsonw_name(json_wtr, "proto");
746 jsonw_string(json_wtr, func_sig);
747 }
748 jsonw_name(json_wtr, "name");
749 jsonw_string(json_wtr, sym_name);
750 jsonw_name(json_wtr, "insns");
751 } else {
752 if (func_info && func_sig[0] != '\0')
753 printf("%s:\n", func_sig);
754 printf("%s:\n", sym_name);
755 }
756
757 disasm_print_insn(img, lens[i], opcodes,
758 name, disasm_opt, btf,
759 prog_linfo, ksyms[i], i,
760 linum);
761
762 img += lens[i];
763
764 if (json_output)
765 jsonw_end_object(json_wtr);
766 else
767 printf("\n");
768 }
769
770 if (json_output)
771 jsonw_end_array(json_wtr);
772 } else {
773 disasm_print_insn(buf, member_len, opcodes, name,
774 disasm_opt, btf, NULL, 0, 0, false);
775 }
776 } else if (visual) {
777 if (json_output)
778 jsonw_null(json_wtr);
779 else
780 dump_xlated_cfg(buf, member_len);
781 } else {
782 kernel_syms_load(&dd);
783 dd.nr_jited_ksyms = info->nr_jited_ksyms;
784 dd.jited_ksyms = u64_to_ptr(info->jited_ksyms);
785 dd.btf = btf;
786 dd.func_info = func_info;
787 dd.finfo_rec_size = info->func_info_rec_size;
788 dd.prog_linfo = prog_linfo;
789
790 if (json_output)
791 dump_xlated_json(&dd, buf, member_len, opcodes,
792 linum);
793 else
794 dump_xlated_plain(&dd, buf, member_len, opcodes,
795 linum);
796 kernel_syms_destroy(&dd);
797 }
798
799 btf__free(btf);
800
801 return 0;
802 }
803
do_dump(int argc,char ** argv)804 static int do_dump(int argc, char **argv)
805 {
806 struct bpf_prog_info_linear *info_linear;
807 char *filepath = NULL;
808 bool opcodes = false;
809 bool visual = false;
810 enum dump_mode mode;
811 bool linum = false;
812 int *fds = NULL;
813 int nb_fds, i = 0;
814 int err = -1;
815 __u64 arrays;
816
817 if (is_prefix(*argv, "jited")) {
818 if (disasm_init())
819 return -1;
820 mode = DUMP_JITED;
821 } else if (is_prefix(*argv, "xlated")) {
822 mode = DUMP_XLATED;
823 } else {
824 p_err("expected 'xlated' or 'jited', got: %s", *argv);
825 return -1;
826 }
827 NEXT_ARG();
828
829 if (argc < 2)
830 usage();
831
832 fds = malloc(sizeof(int));
833 if (!fds) {
834 p_err("mem alloc failed");
835 return -1;
836 }
837 nb_fds = prog_parse_fds(&argc, &argv, &fds);
838 if (nb_fds < 1)
839 goto exit_free;
840
841 if (is_prefix(*argv, "file")) {
842 NEXT_ARG();
843 if (!argc) {
844 p_err("expected file path");
845 goto exit_close;
846 }
847 if (nb_fds > 1) {
848 p_err("several programs matched");
849 goto exit_close;
850 }
851
852 filepath = *argv;
853 NEXT_ARG();
854 } else if (is_prefix(*argv, "opcodes")) {
855 opcodes = true;
856 NEXT_ARG();
857 } else if (is_prefix(*argv, "visual")) {
858 if (nb_fds > 1) {
859 p_err("several programs matched");
860 goto exit_close;
861 }
862
863 visual = true;
864 NEXT_ARG();
865 } else if (is_prefix(*argv, "linum")) {
866 linum = true;
867 NEXT_ARG();
868 }
869
870 if (argc) {
871 usage();
872 goto exit_close;
873 }
874
875 if (mode == DUMP_JITED)
876 arrays = 1UL << BPF_PROG_INFO_JITED_INSNS;
877 else
878 arrays = 1UL << BPF_PROG_INFO_XLATED_INSNS;
879
880 arrays |= 1UL << BPF_PROG_INFO_JITED_KSYMS;
881 arrays |= 1UL << BPF_PROG_INFO_JITED_FUNC_LENS;
882 arrays |= 1UL << BPF_PROG_INFO_FUNC_INFO;
883 arrays |= 1UL << BPF_PROG_INFO_LINE_INFO;
884 arrays |= 1UL << BPF_PROG_INFO_JITED_LINE_INFO;
885
886 if (json_output && nb_fds > 1)
887 jsonw_start_array(json_wtr); /* root array */
888 for (i = 0; i < nb_fds; i++) {
889 info_linear = bpf_program__get_prog_info_linear(fds[i], arrays);
890 if (IS_ERR_OR_NULL(info_linear)) {
891 p_err("can't get prog info: %s", strerror(errno));
892 break;
893 }
894
895 if (json_output && nb_fds > 1) {
896 jsonw_start_object(json_wtr); /* prog object */
897 print_prog_header_json(&info_linear->info);
898 jsonw_name(json_wtr, "insns");
899 } else if (nb_fds > 1) {
900 print_prog_header_plain(&info_linear->info);
901 }
902
903 err = prog_dump(&info_linear->info, mode, filepath, opcodes,
904 visual, linum);
905
906 if (json_output && nb_fds > 1)
907 jsonw_end_object(json_wtr); /* prog object */
908 else if (i != nb_fds - 1 && nb_fds > 1)
909 printf("\n");
910
911 free(info_linear);
912 if (err)
913 break;
914 close(fds[i]);
915 }
916 if (json_output && nb_fds > 1)
917 jsonw_end_array(json_wtr); /* root array */
918
919 exit_close:
920 for (; i < nb_fds; i++)
921 close(fds[i]);
922 exit_free:
923 free(fds);
924 return err;
925 }
926
do_pin(int argc,char ** argv)927 static int do_pin(int argc, char **argv)
928 {
929 int err;
930
931 err = do_pin_any(argc, argv, prog_parse_fd);
932 if (!err && json_output)
933 jsonw_null(json_wtr);
934 return err;
935 }
936
937 struct map_replace {
938 int idx;
939 int fd;
940 char *name;
941 };
942
map_replace_compar(const void * p1,const void * p2)943 static int map_replace_compar(const void *p1, const void *p2)
944 {
945 const struct map_replace *a = p1, *b = p2;
946
947 return a->idx - b->idx;
948 }
949
parse_attach_detach_args(int argc,char ** argv,int * progfd,enum bpf_attach_type * attach_type,int * mapfd)950 static int parse_attach_detach_args(int argc, char **argv, int *progfd,
951 enum bpf_attach_type *attach_type,
952 int *mapfd)
953 {
954 if (!REQ_ARGS(3))
955 return -EINVAL;
956
957 *progfd = prog_parse_fd(&argc, &argv);
958 if (*progfd < 0)
959 return *progfd;
960
961 *attach_type = parse_attach_type(*argv);
962 if (*attach_type == __MAX_BPF_ATTACH_TYPE) {
963 p_err("invalid attach/detach type");
964 return -EINVAL;
965 }
966
967 if (*attach_type == BPF_FLOW_DISSECTOR) {
968 *mapfd = 0;
969 return 0;
970 }
971
972 NEXT_ARG();
973 if (!REQ_ARGS(2))
974 return -EINVAL;
975
976 *mapfd = map_parse_fd(&argc, &argv);
977 if (*mapfd < 0)
978 return *mapfd;
979
980 return 0;
981 }
982
do_attach(int argc,char ** argv)983 static int do_attach(int argc, char **argv)
984 {
985 enum bpf_attach_type attach_type;
986 int err, progfd;
987 int mapfd;
988
989 err = parse_attach_detach_args(argc, argv,
990 &progfd, &attach_type, &mapfd);
991 if (err)
992 return err;
993
994 err = bpf_prog_attach(progfd, mapfd, attach_type, 0);
995 if (err) {
996 p_err("failed prog attach to map");
997 return -EINVAL;
998 }
999
1000 if (json_output)
1001 jsonw_null(json_wtr);
1002 return 0;
1003 }
1004
do_detach(int argc,char ** argv)1005 static int do_detach(int argc, char **argv)
1006 {
1007 enum bpf_attach_type attach_type;
1008 int err, progfd;
1009 int mapfd;
1010
1011 err = parse_attach_detach_args(argc, argv,
1012 &progfd, &attach_type, &mapfd);
1013 if (err)
1014 return err;
1015
1016 err = bpf_prog_detach2(progfd, mapfd, attach_type);
1017 if (err) {
1018 p_err("failed prog detach from map");
1019 return -EINVAL;
1020 }
1021
1022 if (json_output)
1023 jsonw_null(json_wtr);
1024 return 0;
1025 }
1026
check_single_stdin(char * file_data_in,char * file_ctx_in)1027 static int check_single_stdin(char *file_data_in, char *file_ctx_in)
1028 {
1029 if (file_data_in && file_ctx_in &&
1030 !strcmp(file_data_in, "-") && !strcmp(file_ctx_in, "-")) {
1031 p_err("cannot use standard input for both data_in and ctx_in");
1032 return -1;
1033 }
1034
1035 return 0;
1036 }
1037
get_run_data(const char * fname,void ** data_ptr,unsigned int * size)1038 static int get_run_data(const char *fname, void **data_ptr, unsigned int *size)
1039 {
1040 size_t block_size = 256;
1041 size_t buf_size = block_size;
1042 size_t nb_read = 0;
1043 void *tmp;
1044 FILE *f;
1045
1046 if (!fname) {
1047 *data_ptr = NULL;
1048 *size = 0;
1049 return 0;
1050 }
1051
1052 if (!strcmp(fname, "-"))
1053 f = stdin;
1054 else
1055 f = fopen(fname, "r");
1056 if (!f) {
1057 p_err("failed to open %s: %s", fname, strerror(errno));
1058 return -1;
1059 }
1060
1061 *data_ptr = malloc(block_size);
1062 if (!*data_ptr) {
1063 p_err("failed to allocate memory for data_in/ctx_in: %s",
1064 strerror(errno));
1065 goto err_fclose;
1066 }
1067
1068 while ((nb_read += fread(*data_ptr + nb_read, 1, block_size, f))) {
1069 if (feof(f))
1070 break;
1071 if (ferror(f)) {
1072 p_err("failed to read data_in/ctx_in from %s: %s",
1073 fname, strerror(errno));
1074 goto err_free;
1075 }
1076 if (nb_read > buf_size - block_size) {
1077 if (buf_size == UINT32_MAX) {
1078 p_err("data_in/ctx_in is too long (max: %d)",
1079 UINT32_MAX);
1080 goto err_free;
1081 }
1082 /* No space for fread()-ing next chunk; realloc() */
1083 buf_size *= 2;
1084 tmp = realloc(*data_ptr, buf_size);
1085 if (!tmp) {
1086 p_err("failed to reallocate data_in/ctx_in: %s",
1087 strerror(errno));
1088 goto err_free;
1089 }
1090 *data_ptr = tmp;
1091 }
1092 }
1093 if (f != stdin)
1094 fclose(f);
1095
1096 *size = nb_read;
1097 return 0;
1098
1099 err_free:
1100 free(*data_ptr);
1101 *data_ptr = NULL;
1102 err_fclose:
1103 if (f != stdin)
1104 fclose(f);
1105 return -1;
1106 }
1107
hex_print(void * data,unsigned int size,FILE * f)1108 static void hex_print(void *data, unsigned int size, FILE *f)
1109 {
1110 size_t i, j;
1111 char c;
1112
1113 for (i = 0; i < size; i += 16) {
1114 /* Row offset */
1115 fprintf(f, "%07zx\t", i);
1116
1117 /* Hexadecimal values */
1118 for (j = i; j < i + 16 && j < size; j++)
1119 fprintf(f, "%02x%s", *(uint8_t *)(data + j),
1120 j % 2 ? " " : "");
1121 for (; j < i + 16; j++)
1122 fprintf(f, " %s", j % 2 ? " " : "");
1123
1124 /* ASCII values (if relevant), '.' otherwise */
1125 fprintf(f, "| ");
1126 for (j = i; j < i + 16 && j < size; j++) {
1127 c = *(char *)(data + j);
1128 if (c < ' ' || c > '~')
1129 c = '.';
1130 fprintf(f, "%c%s", c, j == i + 7 ? " " : "");
1131 }
1132
1133 fprintf(f, "\n");
1134 }
1135 }
1136
1137 static int
print_run_output(void * data,unsigned int size,const char * fname,const char * json_key)1138 print_run_output(void *data, unsigned int size, const char *fname,
1139 const char *json_key)
1140 {
1141 size_t nb_written;
1142 FILE *f;
1143
1144 if (!fname)
1145 return 0;
1146
1147 if (!strcmp(fname, "-")) {
1148 f = stdout;
1149 if (json_output) {
1150 jsonw_name(json_wtr, json_key);
1151 print_data_json(data, size);
1152 } else {
1153 hex_print(data, size, f);
1154 }
1155 return 0;
1156 }
1157
1158 f = fopen(fname, "w");
1159 if (!f) {
1160 p_err("failed to open %s: %s", fname, strerror(errno));
1161 return -1;
1162 }
1163
1164 nb_written = fwrite(data, 1, size, f);
1165 fclose(f);
1166 if (nb_written != size) {
1167 p_err("failed to write output data/ctx: %s", strerror(errno));
1168 return -1;
1169 }
1170
1171 return 0;
1172 }
1173
alloc_run_data(void ** data_ptr,unsigned int size_out)1174 static int alloc_run_data(void **data_ptr, unsigned int size_out)
1175 {
1176 *data_ptr = calloc(size_out, 1);
1177 if (!*data_ptr) {
1178 p_err("failed to allocate memory for output data/ctx: %s",
1179 strerror(errno));
1180 return -1;
1181 }
1182
1183 return 0;
1184 }
1185
do_run(int argc,char ** argv)1186 static int do_run(int argc, char **argv)
1187 {
1188 char *data_fname_in = NULL, *data_fname_out = NULL;
1189 char *ctx_fname_in = NULL, *ctx_fname_out = NULL;
1190 struct bpf_prog_test_run_attr test_attr = {0};
1191 const unsigned int default_size = SZ_32K;
1192 void *data_in = NULL, *data_out = NULL;
1193 void *ctx_in = NULL, *ctx_out = NULL;
1194 unsigned int repeat = 1;
1195 int fd, err;
1196
1197 if (!REQ_ARGS(4))
1198 return -1;
1199
1200 fd = prog_parse_fd(&argc, &argv);
1201 if (fd < 0)
1202 return -1;
1203
1204 while (argc) {
1205 if (detect_common_prefix(*argv, "data_in", "data_out",
1206 "data_size_out", NULL))
1207 return -1;
1208 if (detect_common_prefix(*argv, "ctx_in", "ctx_out",
1209 "ctx_size_out", NULL))
1210 return -1;
1211
1212 if (is_prefix(*argv, "data_in")) {
1213 NEXT_ARG();
1214 if (!REQ_ARGS(1))
1215 return -1;
1216
1217 data_fname_in = GET_ARG();
1218 if (check_single_stdin(data_fname_in, ctx_fname_in))
1219 return -1;
1220 } else if (is_prefix(*argv, "data_out")) {
1221 NEXT_ARG();
1222 if (!REQ_ARGS(1))
1223 return -1;
1224
1225 data_fname_out = GET_ARG();
1226 } else if (is_prefix(*argv, "data_size_out")) {
1227 char *endptr;
1228
1229 NEXT_ARG();
1230 if (!REQ_ARGS(1))
1231 return -1;
1232
1233 test_attr.data_size_out = strtoul(*argv, &endptr, 0);
1234 if (*endptr) {
1235 p_err("can't parse %s as output data size",
1236 *argv);
1237 return -1;
1238 }
1239 NEXT_ARG();
1240 } else if (is_prefix(*argv, "ctx_in")) {
1241 NEXT_ARG();
1242 if (!REQ_ARGS(1))
1243 return -1;
1244
1245 ctx_fname_in = GET_ARG();
1246 if (check_single_stdin(data_fname_in, ctx_fname_in))
1247 return -1;
1248 } else if (is_prefix(*argv, "ctx_out")) {
1249 NEXT_ARG();
1250 if (!REQ_ARGS(1))
1251 return -1;
1252
1253 ctx_fname_out = GET_ARG();
1254 } else if (is_prefix(*argv, "ctx_size_out")) {
1255 char *endptr;
1256
1257 NEXT_ARG();
1258 if (!REQ_ARGS(1))
1259 return -1;
1260
1261 test_attr.ctx_size_out = strtoul(*argv, &endptr, 0);
1262 if (*endptr) {
1263 p_err("can't parse %s as output context size",
1264 *argv);
1265 return -1;
1266 }
1267 NEXT_ARG();
1268 } else if (is_prefix(*argv, "repeat")) {
1269 char *endptr;
1270
1271 NEXT_ARG();
1272 if (!REQ_ARGS(1))
1273 return -1;
1274
1275 repeat = strtoul(*argv, &endptr, 0);
1276 if (*endptr) {
1277 p_err("can't parse %s as repeat number",
1278 *argv);
1279 return -1;
1280 }
1281 NEXT_ARG();
1282 } else {
1283 p_err("expected no more arguments, 'data_in', 'data_out', 'data_size_out', 'ctx_in', 'ctx_out', 'ctx_size_out' or 'repeat', got: '%s'?",
1284 *argv);
1285 return -1;
1286 }
1287 }
1288
1289 err = get_run_data(data_fname_in, &data_in, &test_attr.data_size_in);
1290 if (err)
1291 return -1;
1292
1293 if (data_in) {
1294 if (!test_attr.data_size_out)
1295 test_attr.data_size_out = default_size;
1296 err = alloc_run_data(&data_out, test_attr.data_size_out);
1297 if (err)
1298 goto free_data_in;
1299 }
1300
1301 err = get_run_data(ctx_fname_in, &ctx_in, &test_attr.ctx_size_in);
1302 if (err)
1303 goto free_data_out;
1304
1305 if (ctx_in) {
1306 if (!test_attr.ctx_size_out)
1307 test_attr.ctx_size_out = default_size;
1308 err = alloc_run_data(&ctx_out, test_attr.ctx_size_out);
1309 if (err)
1310 goto free_ctx_in;
1311 }
1312
1313 test_attr.prog_fd = fd;
1314 test_attr.repeat = repeat;
1315 test_attr.data_in = data_in;
1316 test_attr.data_out = data_out;
1317 test_attr.ctx_in = ctx_in;
1318 test_attr.ctx_out = ctx_out;
1319
1320 err = bpf_prog_test_run_xattr(&test_attr);
1321 if (err) {
1322 p_err("failed to run program: %s", strerror(errno));
1323 goto free_ctx_out;
1324 }
1325
1326 err = 0;
1327
1328 if (json_output)
1329 jsonw_start_object(json_wtr); /* root */
1330
1331 /* Do not exit on errors occurring when printing output data/context,
1332 * we still want to print return value and duration for program run.
1333 */
1334 if (test_attr.data_size_out)
1335 err += print_run_output(test_attr.data_out,
1336 test_attr.data_size_out,
1337 data_fname_out, "data_out");
1338 if (test_attr.ctx_size_out)
1339 err += print_run_output(test_attr.ctx_out,
1340 test_attr.ctx_size_out,
1341 ctx_fname_out, "ctx_out");
1342
1343 if (json_output) {
1344 jsonw_uint_field(json_wtr, "retval", test_attr.retval);
1345 jsonw_uint_field(json_wtr, "duration", test_attr.duration);
1346 jsonw_end_object(json_wtr); /* root */
1347 } else {
1348 fprintf(stdout, "Return value: %u, duration%s: %uns\n",
1349 test_attr.retval,
1350 repeat > 1 ? " (average)" : "", test_attr.duration);
1351 }
1352
1353 free_ctx_out:
1354 free(ctx_out);
1355 free_ctx_in:
1356 free(ctx_in);
1357 free_data_out:
1358 free(data_out);
1359 free_data_in:
1360 free(data_in);
1361
1362 return err;
1363 }
1364
1365 static int
get_prog_type_by_name(const char * name,enum bpf_prog_type * prog_type,enum bpf_attach_type * expected_attach_type)1366 get_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
1367 enum bpf_attach_type *expected_attach_type)
1368 {
1369 libbpf_print_fn_t print_backup;
1370 int ret;
1371
1372 ret = libbpf_prog_type_by_name(name, prog_type, expected_attach_type);
1373 if (!ret)
1374 return ret;
1375
1376 /* libbpf_prog_type_by_name() failed, let's re-run with debug level */
1377 print_backup = libbpf_set_print(print_all_levels);
1378 ret = libbpf_prog_type_by_name(name, prog_type, expected_attach_type);
1379 libbpf_set_print(print_backup);
1380
1381 return ret;
1382 }
1383
load_with_options(int argc,char ** argv,bool first_prog_only)1384 static int load_with_options(int argc, char **argv, bool first_prog_only)
1385 {
1386 enum bpf_prog_type common_prog_type = BPF_PROG_TYPE_UNSPEC;
1387 DECLARE_LIBBPF_OPTS(bpf_object_open_opts, open_opts,
1388 .relaxed_maps = relaxed_maps,
1389 );
1390 struct bpf_object_load_attr load_attr = { 0 };
1391 enum bpf_attach_type expected_attach_type;
1392 struct map_replace *map_replace = NULL;
1393 struct bpf_program *prog = NULL, *pos;
1394 unsigned int old_map_fds = 0;
1395 const char *pinmaps = NULL;
1396 struct bpf_object *obj;
1397 struct bpf_map *map;
1398 const char *pinfile;
1399 unsigned int i, j;
1400 __u32 ifindex = 0;
1401 const char *file;
1402 int idx, err;
1403
1404
1405 if (!REQ_ARGS(2))
1406 return -1;
1407 file = GET_ARG();
1408 pinfile = GET_ARG();
1409
1410 while (argc) {
1411 if (is_prefix(*argv, "type")) {
1412 char *type;
1413
1414 NEXT_ARG();
1415
1416 if (common_prog_type != BPF_PROG_TYPE_UNSPEC) {
1417 p_err("program type already specified");
1418 goto err_free_reuse_maps;
1419 }
1420 if (!REQ_ARGS(1))
1421 goto err_free_reuse_maps;
1422
1423 /* Put a '/' at the end of type to appease libbpf */
1424 type = malloc(strlen(*argv) + 2);
1425 if (!type) {
1426 p_err("mem alloc failed");
1427 goto err_free_reuse_maps;
1428 }
1429 *type = 0;
1430 strcat(type, *argv);
1431 strcat(type, "/");
1432
1433 err = get_prog_type_by_name(type, &common_prog_type,
1434 &expected_attach_type);
1435 free(type);
1436 if (err < 0)
1437 goto err_free_reuse_maps;
1438
1439 NEXT_ARG();
1440 } else if (is_prefix(*argv, "map")) {
1441 void *new_map_replace;
1442 char *endptr, *name;
1443 int fd;
1444
1445 NEXT_ARG();
1446
1447 if (!REQ_ARGS(4))
1448 goto err_free_reuse_maps;
1449
1450 if (is_prefix(*argv, "idx")) {
1451 NEXT_ARG();
1452
1453 idx = strtoul(*argv, &endptr, 0);
1454 if (*endptr) {
1455 p_err("can't parse %s as IDX", *argv);
1456 goto err_free_reuse_maps;
1457 }
1458 name = NULL;
1459 } else if (is_prefix(*argv, "name")) {
1460 NEXT_ARG();
1461
1462 name = *argv;
1463 idx = -1;
1464 } else {
1465 p_err("expected 'idx' or 'name', got: '%s'?",
1466 *argv);
1467 goto err_free_reuse_maps;
1468 }
1469 NEXT_ARG();
1470
1471 fd = map_parse_fd(&argc, &argv);
1472 if (fd < 0)
1473 goto err_free_reuse_maps;
1474
1475 new_map_replace = reallocarray(map_replace,
1476 old_map_fds + 1,
1477 sizeof(*map_replace));
1478 if (!new_map_replace) {
1479 p_err("mem alloc failed");
1480 goto err_free_reuse_maps;
1481 }
1482 map_replace = new_map_replace;
1483
1484 map_replace[old_map_fds].idx = idx;
1485 map_replace[old_map_fds].name = name;
1486 map_replace[old_map_fds].fd = fd;
1487 old_map_fds++;
1488 } else if (is_prefix(*argv, "dev")) {
1489 NEXT_ARG();
1490
1491 if (ifindex) {
1492 p_err("offload device already specified");
1493 goto err_free_reuse_maps;
1494 }
1495 if (!REQ_ARGS(1))
1496 goto err_free_reuse_maps;
1497
1498 ifindex = if_nametoindex(*argv);
1499 if (!ifindex) {
1500 p_err("unrecognized netdevice '%s': %s",
1501 *argv, strerror(errno));
1502 goto err_free_reuse_maps;
1503 }
1504 NEXT_ARG();
1505 } else if (is_prefix(*argv, "pinmaps")) {
1506 NEXT_ARG();
1507
1508 if (!REQ_ARGS(1))
1509 goto err_free_reuse_maps;
1510
1511 pinmaps = GET_ARG();
1512 } else {
1513 p_err("expected no more arguments, 'type', 'map' or 'dev', got: '%s'?",
1514 *argv);
1515 goto err_free_reuse_maps;
1516 }
1517 }
1518
1519 set_max_rlimit();
1520
1521 obj = bpf_object__open_file(file, &open_opts);
1522 if (libbpf_get_error(obj)) {
1523 p_err("failed to open object file");
1524 goto err_free_reuse_maps;
1525 }
1526
1527 bpf_object__for_each_program(pos, obj) {
1528 enum bpf_prog_type prog_type = common_prog_type;
1529
1530 if (prog_type == BPF_PROG_TYPE_UNSPEC) {
1531 const char *sec_name = bpf_program__section_name(pos);
1532
1533 err = get_prog_type_by_name(sec_name, &prog_type,
1534 &expected_attach_type);
1535 if (err < 0)
1536 goto err_close_obj;
1537 }
1538
1539 bpf_program__set_ifindex(pos, ifindex);
1540 bpf_program__set_type(pos, prog_type);
1541 bpf_program__set_expected_attach_type(pos, expected_attach_type);
1542 }
1543
1544 qsort(map_replace, old_map_fds, sizeof(*map_replace),
1545 map_replace_compar);
1546
1547 /* After the sort maps by name will be first on the list, because they
1548 * have idx == -1. Resolve them.
1549 */
1550 j = 0;
1551 while (j < old_map_fds && map_replace[j].name) {
1552 i = 0;
1553 bpf_object__for_each_map(map, obj) {
1554 if (!strcmp(bpf_map__name(map), map_replace[j].name)) {
1555 map_replace[j].idx = i;
1556 break;
1557 }
1558 i++;
1559 }
1560 if (map_replace[j].idx == -1) {
1561 p_err("unable to find map '%s'", map_replace[j].name);
1562 goto err_close_obj;
1563 }
1564 j++;
1565 }
1566 /* Resort if any names were resolved */
1567 if (j)
1568 qsort(map_replace, old_map_fds, sizeof(*map_replace),
1569 map_replace_compar);
1570
1571 /* Set ifindex and name reuse */
1572 j = 0;
1573 idx = 0;
1574 bpf_object__for_each_map(map, obj) {
1575 if (!bpf_map__is_offload_neutral(map))
1576 bpf_map__set_ifindex(map, ifindex);
1577
1578 if (j < old_map_fds && idx == map_replace[j].idx) {
1579 err = bpf_map__reuse_fd(map, map_replace[j++].fd);
1580 if (err) {
1581 p_err("unable to set up map reuse: %d", err);
1582 goto err_close_obj;
1583 }
1584
1585 /* Next reuse wants to apply to the same map */
1586 if (j < old_map_fds && map_replace[j].idx == idx) {
1587 p_err("replacement for map idx %d specified more than once",
1588 idx);
1589 goto err_close_obj;
1590 }
1591 }
1592
1593 idx++;
1594 }
1595 if (j < old_map_fds) {
1596 p_err("map idx '%d' not used", map_replace[j].idx);
1597 goto err_close_obj;
1598 }
1599
1600 load_attr.obj = obj;
1601 if (verifier_logs)
1602 /* log_level1 + log_level2 + stats, but not stable UAPI */
1603 load_attr.log_level = 1 + 2 + 4;
1604
1605 err = bpf_object__load_xattr(&load_attr);
1606 if (err) {
1607 p_err("failed to load object file");
1608 goto err_close_obj;
1609 }
1610
1611 err = mount_bpffs_for_pin(pinfile);
1612 if (err)
1613 goto err_close_obj;
1614
1615 if (first_prog_only) {
1616 prog = bpf_object__next_program(obj, NULL);
1617 if (!prog) {
1618 p_err("object file doesn't contain any bpf program");
1619 goto err_close_obj;
1620 }
1621
1622 err = bpf_obj_pin(bpf_program__fd(prog), pinfile);
1623 if (err) {
1624 p_err("failed to pin program %s",
1625 bpf_program__section_name(prog));
1626 goto err_close_obj;
1627 }
1628 } else {
1629 err = bpf_object__pin_programs(obj, pinfile);
1630 if (err) {
1631 p_err("failed to pin all programs");
1632 goto err_close_obj;
1633 }
1634 }
1635
1636 if (pinmaps) {
1637 err = bpf_object__pin_maps(obj, pinmaps);
1638 if (err) {
1639 p_err("failed to pin all maps");
1640 goto err_unpin;
1641 }
1642 }
1643
1644 if (json_output)
1645 jsonw_null(json_wtr);
1646
1647 bpf_object__close(obj);
1648 for (i = 0; i < old_map_fds; i++)
1649 close(map_replace[i].fd);
1650 free(map_replace);
1651
1652 return 0;
1653
1654 err_unpin:
1655 if (first_prog_only)
1656 unlink(pinfile);
1657 else
1658 bpf_object__unpin_programs(obj, pinfile);
1659 err_close_obj:
1660 bpf_object__close(obj);
1661 err_free_reuse_maps:
1662 for (i = 0; i < old_map_fds; i++)
1663 close(map_replace[i].fd);
1664 free(map_replace);
1665 return -1;
1666 }
1667
count_open_fds(void)1668 static int count_open_fds(void)
1669 {
1670 DIR *dp = opendir("/proc/self/fd");
1671 struct dirent *de;
1672 int cnt = -3;
1673
1674 if (!dp)
1675 return -1;
1676
1677 while ((de = readdir(dp)))
1678 cnt++;
1679
1680 closedir(dp);
1681 return cnt;
1682 }
1683
try_loader(struct gen_loader_opts * gen)1684 static int try_loader(struct gen_loader_opts *gen)
1685 {
1686 struct bpf_load_and_run_opts opts = {};
1687 struct bpf_loader_ctx *ctx;
1688 int ctx_sz = sizeof(*ctx) + 64 * max(sizeof(struct bpf_map_desc),
1689 sizeof(struct bpf_prog_desc));
1690 int log_buf_sz = (1u << 24) - 1;
1691 int err, fds_before, fd_delta;
1692 char *log_buf;
1693
1694 ctx = alloca(ctx_sz);
1695 memset(ctx, 0, ctx_sz);
1696 ctx->sz = ctx_sz;
1697 ctx->log_level = 1;
1698 ctx->log_size = log_buf_sz;
1699 log_buf = malloc(log_buf_sz);
1700 if (!log_buf)
1701 return -ENOMEM;
1702 ctx->log_buf = (long) log_buf;
1703 opts.ctx = ctx;
1704 opts.data = gen->data;
1705 opts.data_sz = gen->data_sz;
1706 opts.insns = gen->insns;
1707 opts.insns_sz = gen->insns_sz;
1708 fds_before = count_open_fds();
1709 err = bpf_load_and_run(&opts);
1710 fd_delta = count_open_fds() - fds_before;
1711 if (err < 0) {
1712 fprintf(stderr, "err %d\n%s\n%s", err, opts.errstr, log_buf);
1713 if (fd_delta)
1714 fprintf(stderr, "loader prog leaked %d FDs\n",
1715 fd_delta);
1716 }
1717 free(log_buf);
1718 return err;
1719 }
1720
do_loader(int argc,char ** argv)1721 static int do_loader(int argc, char **argv)
1722 {
1723 DECLARE_LIBBPF_OPTS(bpf_object_open_opts, open_opts);
1724 DECLARE_LIBBPF_OPTS(gen_loader_opts, gen);
1725 struct bpf_object_load_attr load_attr = {};
1726 struct bpf_object *obj;
1727 const char *file;
1728 int err = 0;
1729
1730 if (!REQ_ARGS(1))
1731 return -1;
1732 file = GET_ARG();
1733
1734 obj = bpf_object__open_file(file, &open_opts);
1735 if (libbpf_get_error(obj)) {
1736 p_err("failed to open object file");
1737 goto err_close_obj;
1738 }
1739
1740 err = bpf_object__gen_loader(obj, &gen);
1741 if (err)
1742 goto err_close_obj;
1743
1744 load_attr.obj = obj;
1745 if (verifier_logs)
1746 /* log_level1 + log_level2 + stats, but not stable UAPI */
1747 load_attr.log_level = 1 + 2 + 4;
1748
1749 err = bpf_object__load_xattr(&load_attr);
1750 if (err) {
1751 p_err("failed to load object file");
1752 goto err_close_obj;
1753 }
1754
1755 if (verifier_logs) {
1756 struct dump_data dd = {};
1757
1758 kernel_syms_load(&dd);
1759 dump_xlated_plain(&dd, (void *)gen.insns, gen.insns_sz, false, false);
1760 kernel_syms_destroy(&dd);
1761 }
1762 err = try_loader(&gen);
1763 err_close_obj:
1764 bpf_object__close(obj);
1765 return err;
1766 }
1767
do_load(int argc,char ** argv)1768 static int do_load(int argc, char **argv)
1769 {
1770 if (use_loader)
1771 return do_loader(argc, argv);
1772 return load_with_options(argc, argv, true);
1773 }
1774
do_loadall(int argc,char ** argv)1775 static int do_loadall(int argc, char **argv)
1776 {
1777 return load_with_options(argc, argv, false);
1778 }
1779
1780 #ifdef BPFTOOL_WITHOUT_SKELETONS
1781
do_profile(int argc,char ** argv)1782 static int do_profile(int argc, char **argv)
1783 {
1784 p_err("bpftool prog profile command is not supported. Please build bpftool with clang >= 10.0.0");
1785 return 0;
1786 }
1787
1788 #else /* BPFTOOL_WITHOUT_SKELETONS */
1789
1790 #include "profiler.skel.h"
1791
1792 struct profile_metric {
1793 const char *name;
1794 struct bpf_perf_event_value val;
1795 struct perf_event_attr attr;
1796 bool selected;
1797
1798 /* calculate ratios like instructions per cycle */
1799 const int ratio_metric; /* 0 for N/A, 1 for index 0 (cycles) */
1800 const char *ratio_desc;
1801 const float ratio_mul;
1802 } metrics[] = {
1803 {
1804 .name = "cycles",
1805 .attr = {
1806 .type = PERF_TYPE_HARDWARE,
1807 .config = PERF_COUNT_HW_CPU_CYCLES,
1808 .exclude_user = 1,
1809 },
1810 },
1811 {
1812 .name = "instructions",
1813 .attr = {
1814 .type = PERF_TYPE_HARDWARE,
1815 .config = PERF_COUNT_HW_INSTRUCTIONS,
1816 .exclude_user = 1,
1817 },
1818 .ratio_metric = 1,
1819 .ratio_desc = "insns per cycle",
1820 .ratio_mul = 1.0,
1821 },
1822 {
1823 .name = "l1d_loads",
1824 .attr = {
1825 .type = PERF_TYPE_HW_CACHE,
1826 .config =
1827 PERF_COUNT_HW_CACHE_L1D |
1828 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
1829 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16),
1830 .exclude_user = 1,
1831 },
1832 },
1833 {
1834 .name = "llc_misses",
1835 .attr = {
1836 .type = PERF_TYPE_HW_CACHE,
1837 .config =
1838 PERF_COUNT_HW_CACHE_LL |
1839 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
1840 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16),
1841 .exclude_user = 1
1842 },
1843 .ratio_metric = 2,
1844 .ratio_desc = "LLC misses per million insns",
1845 .ratio_mul = 1e6,
1846 },
1847 {
1848 .name = "itlb_misses",
1849 .attr = {
1850 .type = PERF_TYPE_HW_CACHE,
1851 .config =
1852 PERF_COUNT_HW_CACHE_ITLB |
1853 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
1854 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16),
1855 .exclude_user = 1
1856 },
1857 .ratio_metric = 2,
1858 .ratio_desc = "itlb misses per million insns",
1859 .ratio_mul = 1e6,
1860 },
1861 {
1862 .name = "dtlb_misses",
1863 .attr = {
1864 .type = PERF_TYPE_HW_CACHE,
1865 .config =
1866 PERF_COUNT_HW_CACHE_DTLB |
1867 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
1868 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16),
1869 .exclude_user = 1
1870 },
1871 .ratio_metric = 2,
1872 .ratio_desc = "dtlb misses per million insns",
1873 .ratio_mul = 1e6,
1874 },
1875 };
1876
1877 static __u64 profile_total_count;
1878
1879 #define MAX_NUM_PROFILE_METRICS 4
1880
profile_parse_metrics(int argc,char ** argv)1881 static int profile_parse_metrics(int argc, char **argv)
1882 {
1883 unsigned int metric_cnt;
1884 int selected_cnt = 0;
1885 unsigned int i;
1886
1887 metric_cnt = sizeof(metrics) / sizeof(struct profile_metric);
1888
1889 while (argc > 0) {
1890 for (i = 0; i < metric_cnt; i++) {
1891 if (is_prefix(argv[0], metrics[i].name)) {
1892 if (!metrics[i].selected)
1893 selected_cnt++;
1894 metrics[i].selected = true;
1895 break;
1896 }
1897 }
1898 if (i == metric_cnt) {
1899 p_err("unknown metric %s", argv[0]);
1900 return -1;
1901 }
1902 NEXT_ARG();
1903 }
1904 if (selected_cnt > MAX_NUM_PROFILE_METRICS) {
1905 p_err("too many (%d) metrics, please specify no more than %d metrics at at time",
1906 selected_cnt, MAX_NUM_PROFILE_METRICS);
1907 return -1;
1908 }
1909 return selected_cnt;
1910 }
1911
profile_read_values(struct profiler_bpf * obj)1912 static void profile_read_values(struct profiler_bpf *obj)
1913 {
1914 __u32 m, cpu, num_cpu = obj->rodata->num_cpu;
1915 int reading_map_fd, count_map_fd;
1916 __u64 counts[num_cpu];
1917 __u32 key = 0;
1918 int err;
1919
1920 reading_map_fd = bpf_map__fd(obj->maps.accum_readings);
1921 count_map_fd = bpf_map__fd(obj->maps.counts);
1922 if (reading_map_fd < 0 || count_map_fd < 0) {
1923 p_err("failed to get fd for map");
1924 return;
1925 }
1926
1927 err = bpf_map_lookup_elem(count_map_fd, &key, counts);
1928 if (err) {
1929 p_err("failed to read count_map: %s", strerror(errno));
1930 return;
1931 }
1932
1933 profile_total_count = 0;
1934 for (cpu = 0; cpu < num_cpu; cpu++)
1935 profile_total_count += counts[cpu];
1936
1937 for (m = 0; m < ARRAY_SIZE(metrics); m++) {
1938 struct bpf_perf_event_value values[num_cpu];
1939
1940 if (!metrics[m].selected)
1941 continue;
1942
1943 err = bpf_map_lookup_elem(reading_map_fd, &key, values);
1944 if (err) {
1945 p_err("failed to read reading_map: %s",
1946 strerror(errno));
1947 return;
1948 }
1949 for (cpu = 0; cpu < num_cpu; cpu++) {
1950 metrics[m].val.counter += values[cpu].counter;
1951 metrics[m].val.enabled += values[cpu].enabled;
1952 metrics[m].val.running += values[cpu].running;
1953 }
1954 key++;
1955 }
1956 }
1957
profile_print_readings_json(void)1958 static void profile_print_readings_json(void)
1959 {
1960 __u32 m;
1961
1962 jsonw_start_array(json_wtr);
1963 for (m = 0; m < ARRAY_SIZE(metrics); m++) {
1964 if (!metrics[m].selected)
1965 continue;
1966 jsonw_start_object(json_wtr);
1967 jsonw_string_field(json_wtr, "metric", metrics[m].name);
1968 jsonw_lluint_field(json_wtr, "run_cnt", profile_total_count);
1969 jsonw_lluint_field(json_wtr, "value", metrics[m].val.counter);
1970 jsonw_lluint_field(json_wtr, "enabled", metrics[m].val.enabled);
1971 jsonw_lluint_field(json_wtr, "running", metrics[m].val.running);
1972
1973 jsonw_end_object(json_wtr);
1974 }
1975 jsonw_end_array(json_wtr);
1976 }
1977
profile_print_readings_plain(void)1978 static void profile_print_readings_plain(void)
1979 {
1980 __u32 m;
1981
1982 printf("\n%18llu %-20s\n", profile_total_count, "run_cnt");
1983 for (m = 0; m < ARRAY_SIZE(metrics); m++) {
1984 struct bpf_perf_event_value *val = &metrics[m].val;
1985 int r;
1986
1987 if (!metrics[m].selected)
1988 continue;
1989 printf("%18llu %-20s", val->counter, metrics[m].name);
1990
1991 r = metrics[m].ratio_metric - 1;
1992 if (r >= 0 && metrics[r].selected &&
1993 metrics[r].val.counter > 0) {
1994 printf("# %8.2f %-30s",
1995 val->counter * metrics[m].ratio_mul /
1996 metrics[r].val.counter,
1997 metrics[m].ratio_desc);
1998 } else {
1999 printf("%-41s", "");
2000 }
2001
2002 if (val->enabled > val->running)
2003 printf("(%4.2f%%)",
2004 val->running * 100.0 / val->enabled);
2005 printf("\n");
2006 }
2007 }
2008
profile_print_readings(void)2009 static void profile_print_readings(void)
2010 {
2011 if (json_output)
2012 profile_print_readings_json();
2013 else
2014 profile_print_readings_plain();
2015 }
2016
profile_target_name(int tgt_fd)2017 static char *profile_target_name(int tgt_fd)
2018 {
2019 struct bpf_prog_info_linear *info_linear;
2020 struct bpf_func_info *func_info;
2021 const struct btf_type *t;
2022 struct btf *btf = NULL;
2023 char *name = NULL;
2024
2025 info_linear = bpf_program__get_prog_info_linear(
2026 tgt_fd, 1UL << BPF_PROG_INFO_FUNC_INFO);
2027 if (IS_ERR_OR_NULL(info_linear)) {
2028 p_err("failed to get info_linear for prog FD %d", tgt_fd);
2029 return NULL;
2030 }
2031
2032 if (info_linear->info.btf_id == 0) {
2033 p_err("prog FD %d doesn't have valid btf", tgt_fd);
2034 goto out;
2035 }
2036
2037 btf = btf__load_from_kernel_by_id(info_linear->info.btf_id);
2038 if (libbpf_get_error(btf)) {
2039 p_err("failed to load btf for prog FD %d", tgt_fd);
2040 goto out;
2041 }
2042
2043 func_info = u64_to_ptr(info_linear->info.func_info);
2044 t = btf__type_by_id(btf, func_info[0].type_id);
2045 if (!t) {
2046 p_err("btf %d doesn't have type %d",
2047 info_linear->info.btf_id, func_info[0].type_id);
2048 goto out;
2049 }
2050 name = strdup(btf__name_by_offset(btf, t->name_off));
2051 out:
2052 btf__free(btf);
2053 free(info_linear);
2054 return name;
2055 }
2056
2057 static struct profiler_bpf *profile_obj;
2058 static int profile_tgt_fd = -1;
2059 static char *profile_tgt_name;
2060 static int *profile_perf_events;
2061 static int profile_perf_event_cnt;
2062
profile_close_perf_events(struct profiler_bpf * obj)2063 static void profile_close_perf_events(struct profiler_bpf *obj)
2064 {
2065 int i;
2066
2067 for (i = profile_perf_event_cnt - 1; i >= 0; i--)
2068 close(profile_perf_events[i]);
2069
2070 free(profile_perf_events);
2071 profile_perf_event_cnt = 0;
2072 }
2073
profile_open_perf_events(struct profiler_bpf * obj)2074 static int profile_open_perf_events(struct profiler_bpf *obj)
2075 {
2076 unsigned int cpu, m;
2077 int map_fd, pmu_fd;
2078
2079 profile_perf_events = calloc(
2080 sizeof(int), obj->rodata->num_cpu * obj->rodata->num_metric);
2081 if (!profile_perf_events) {
2082 p_err("failed to allocate memory for perf_event array: %s",
2083 strerror(errno));
2084 return -1;
2085 }
2086 map_fd = bpf_map__fd(obj->maps.events);
2087 if (map_fd < 0) {
2088 p_err("failed to get fd for events map");
2089 return -1;
2090 }
2091
2092 for (m = 0; m < ARRAY_SIZE(metrics); m++) {
2093 if (!metrics[m].selected)
2094 continue;
2095 for (cpu = 0; cpu < obj->rodata->num_cpu; cpu++) {
2096 pmu_fd = syscall(__NR_perf_event_open, &metrics[m].attr,
2097 -1/*pid*/, cpu, -1/*group_fd*/, 0);
2098 if (pmu_fd < 0 ||
2099 bpf_map_update_elem(map_fd, &profile_perf_event_cnt,
2100 &pmu_fd, BPF_ANY) ||
2101 ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0)) {
2102 p_err("failed to create event %s on cpu %d",
2103 metrics[m].name, cpu);
2104 return -1;
2105 }
2106 profile_perf_events[profile_perf_event_cnt++] = pmu_fd;
2107 }
2108 }
2109 return 0;
2110 }
2111
profile_print_and_cleanup(void)2112 static void profile_print_and_cleanup(void)
2113 {
2114 profile_close_perf_events(profile_obj);
2115 profile_read_values(profile_obj);
2116 profile_print_readings();
2117 profiler_bpf__destroy(profile_obj);
2118
2119 close(profile_tgt_fd);
2120 free(profile_tgt_name);
2121 }
2122
int_exit(int signo)2123 static void int_exit(int signo)
2124 {
2125 profile_print_and_cleanup();
2126 exit(0);
2127 }
2128
do_profile(int argc,char ** argv)2129 static int do_profile(int argc, char **argv)
2130 {
2131 int num_metric, num_cpu, err = -1;
2132 struct bpf_program *prog;
2133 unsigned long duration;
2134 char *endptr;
2135
2136 /* we at least need two args for the prog and one metric */
2137 if (!REQ_ARGS(3))
2138 return -EINVAL;
2139
2140 /* parse target fd */
2141 profile_tgt_fd = prog_parse_fd(&argc, &argv);
2142 if (profile_tgt_fd < 0) {
2143 p_err("failed to parse fd");
2144 return -1;
2145 }
2146
2147 /* parse profiling optional duration */
2148 if (argc > 2 && is_prefix(argv[0], "duration")) {
2149 NEXT_ARG();
2150 duration = strtoul(*argv, &endptr, 0);
2151 if (*endptr)
2152 usage();
2153 NEXT_ARG();
2154 } else {
2155 duration = UINT_MAX;
2156 }
2157
2158 num_metric = profile_parse_metrics(argc, argv);
2159 if (num_metric <= 0)
2160 goto out;
2161
2162 num_cpu = libbpf_num_possible_cpus();
2163 if (num_cpu <= 0) {
2164 p_err("failed to identify number of CPUs");
2165 goto out;
2166 }
2167
2168 profile_obj = profiler_bpf__open();
2169 if (!profile_obj) {
2170 p_err("failed to open and/or load BPF object");
2171 goto out;
2172 }
2173
2174 profile_obj->rodata->num_cpu = num_cpu;
2175 profile_obj->rodata->num_metric = num_metric;
2176
2177 /* adjust map sizes */
2178 bpf_map__resize(profile_obj->maps.events, num_metric * num_cpu);
2179 bpf_map__resize(profile_obj->maps.fentry_readings, num_metric);
2180 bpf_map__resize(profile_obj->maps.accum_readings, num_metric);
2181 bpf_map__resize(profile_obj->maps.counts, 1);
2182
2183 /* change target name */
2184 profile_tgt_name = profile_target_name(profile_tgt_fd);
2185 if (!profile_tgt_name)
2186 goto out;
2187
2188 bpf_object__for_each_program(prog, profile_obj->obj) {
2189 err = bpf_program__set_attach_target(prog, profile_tgt_fd,
2190 profile_tgt_name);
2191 if (err) {
2192 p_err("failed to set attach target\n");
2193 goto out;
2194 }
2195 }
2196
2197 set_max_rlimit();
2198 err = profiler_bpf__load(profile_obj);
2199 if (err) {
2200 p_err("failed to load profile_obj");
2201 goto out;
2202 }
2203
2204 err = profile_open_perf_events(profile_obj);
2205 if (err)
2206 goto out;
2207
2208 err = profiler_bpf__attach(profile_obj);
2209 if (err) {
2210 p_err("failed to attach profile_obj");
2211 goto out;
2212 }
2213 signal(SIGINT, int_exit);
2214
2215 sleep(duration);
2216 profile_print_and_cleanup();
2217 return 0;
2218
2219 out:
2220 profile_close_perf_events(profile_obj);
2221 if (profile_obj)
2222 profiler_bpf__destroy(profile_obj);
2223 close(profile_tgt_fd);
2224 free(profile_tgt_name);
2225 return err;
2226 }
2227
2228 #endif /* BPFTOOL_WITHOUT_SKELETONS */
2229
do_help(int argc,char ** argv)2230 static int do_help(int argc, char **argv)
2231 {
2232 if (json_output) {
2233 jsonw_null(json_wtr);
2234 return 0;
2235 }
2236
2237 fprintf(stderr,
2238 "Usage: %1$s %2$s { show | list } [PROG]\n"
2239 " %1$s %2$s dump xlated PROG [{ file FILE | opcodes | visual | linum }]\n"
2240 " %1$s %2$s dump jited PROG [{ file FILE | opcodes | linum }]\n"
2241 " %1$s %2$s pin PROG FILE\n"
2242 " %1$s %2$s { load | loadall } OBJ PATH \\\n"
2243 " [type TYPE] [dev NAME] \\\n"
2244 " [map { idx IDX | name NAME } MAP]\\\n"
2245 " [pinmaps MAP_DIR]\n"
2246 " %1$s %2$s attach PROG ATTACH_TYPE [MAP]\n"
2247 " %1$s %2$s detach PROG ATTACH_TYPE [MAP]\n"
2248 " %1$s %2$s run PROG \\\n"
2249 " data_in FILE \\\n"
2250 " [data_out FILE [data_size_out L]] \\\n"
2251 " [ctx_in FILE [ctx_out FILE [ctx_size_out M]]] \\\n"
2252 " [repeat N]\n"
2253 " %1$s %2$s profile PROG [duration DURATION] METRICs\n"
2254 " %1$s %2$s tracelog\n"
2255 " %1$s %2$s help\n"
2256 "\n"
2257 " " HELP_SPEC_MAP "\n"
2258 " " HELP_SPEC_PROGRAM "\n"
2259 " TYPE := { socket | kprobe | kretprobe | classifier | action |\n"
2260 " tracepoint | raw_tracepoint | xdp | perf_event | cgroup/skb |\n"
2261 " cgroup/sock | cgroup/dev | lwt_in | lwt_out | lwt_xmit |\n"
2262 " lwt_seg6local | sockops | sk_skb | sk_msg | lirc_mode2 |\n"
2263 " sk_reuseport | flow_dissector | cgroup/sysctl |\n"
2264 " cgroup/bind4 | cgroup/bind6 | cgroup/post_bind4 |\n"
2265 " cgroup/post_bind6 | cgroup/connect4 | cgroup/connect6 |\n"
2266 " cgroup/getpeername4 | cgroup/getpeername6 |\n"
2267 " cgroup/getsockname4 | cgroup/getsockname6 | cgroup/sendmsg4 |\n"
2268 " cgroup/sendmsg6 | cgroup/recvmsg4 | cgroup/recvmsg6 |\n"
2269 " cgroup/getsockopt | cgroup/setsockopt | cgroup/sock_release |\n"
2270 " struct_ops | fentry | fexit | freplace | sk_lookup }\n"
2271 " ATTACH_TYPE := { msg_verdict | skb_verdict | stream_verdict |\n"
2272 " stream_parser | flow_dissector }\n"
2273 " METRIC := { cycles | instructions | l1d_loads | llc_misses | itlb_misses | dtlb_misses }\n"
2274 " " HELP_SPEC_OPTIONS " |\n"
2275 " {-f|--bpffs} | {-m|--mapcompat} | {-n|--nomount} |\n"
2276 " {-L|--use-loader} }\n"
2277 "",
2278 bin_name, argv[-2]);
2279
2280 return 0;
2281 }
2282
2283 static const struct cmd cmds[] = {
2284 { "show", do_show },
2285 { "list", do_show },
2286 { "help", do_help },
2287 { "dump", do_dump },
2288 { "pin", do_pin },
2289 { "load", do_load },
2290 { "loadall", do_loadall },
2291 { "attach", do_attach },
2292 { "detach", do_detach },
2293 { "tracelog", do_tracelog },
2294 { "run", do_run },
2295 { "profile", do_profile },
2296 { 0 }
2297 };
2298
do_prog(int argc,char ** argv)2299 int do_prog(int argc, char **argv)
2300 {
2301 return cmd_select(cmds, argc, argv, do_help);
2302 }
2303