1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * builtin-inject.c
4 *
5 * Builtin inject command: Examine the live mode (stdin) event stream
6 * and repipe it to stdout while optionally injecting additional
7 * events into it.
8 */
9 #include "builtin.h"
10
11 #include "util/color.h"
12 #include "util/dso.h"
13 #include "util/vdso.h"
14 #include "util/evlist.h"
15 #include "util/evsel.h"
16 #include "util/map.h"
17 #include "util/session.h"
18 #include "util/tool.h"
19 #include "util/debug.h"
20 #include "util/build-id.h"
21 #include "util/data.h"
22 #include "util/auxtrace.h"
23 #include "util/jit.h"
24 #include "util/string2.h"
25 #include "util/symbol.h"
26 #include "util/synthetic-events.h"
27 #include "util/thread.h"
28 #include "util/namespaces.h"
29 #include "util/util.h"
30 #include "util/tsc.h"
31
32 #include <internal/lib.h>
33
34 #include <linux/err.h>
35 #include <subcmd/parse-options.h>
36 #include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */
37
38 #include <linux/list.h>
39 #include <linux/string.h>
40 #include <linux/zalloc.h>
41 #include <linux/hash.h>
42 #include <ctype.h>
43 #include <errno.h>
44 #include <signal.h>
45 #include <inttypes.h>
46
47 struct guest_event {
48 struct perf_sample sample;
49 union perf_event *event;
50 char event_buf[PERF_SAMPLE_MAX_SIZE];
51 };
52
53 struct guest_id {
54 /* hlist_node must be first, see free_hlist() */
55 struct hlist_node node;
56 u64 id;
57 u64 host_id;
58 u32 vcpu;
59 };
60
61 struct guest_tid {
62 /* hlist_node must be first, see free_hlist() */
63 struct hlist_node node;
64 /* Thread ID of QEMU thread */
65 u32 tid;
66 u32 vcpu;
67 };
68
69 struct guest_vcpu {
70 /* Current host CPU */
71 u32 cpu;
72 /* Thread ID of QEMU thread */
73 u32 tid;
74 };
75
76 struct guest_session {
77 char *perf_data_file;
78 u32 machine_pid;
79 u64 time_offset;
80 double time_scale;
81 struct perf_tool tool;
82 struct perf_data data;
83 struct perf_session *session;
84 char *tmp_file_name;
85 int tmp_fd;
86 struct perf_tsc_conversion host_tc;
87 struct perf_tsc_conversion guest_tc;
88 bool copy_kcore_dir;
89 bool have_tc;
90 bool fetched;
91 bool ready;
92 u16 dflt_id_hdr_size;
93 u64 dflt_id;
94 u64 highest_id;
95 /* Array of guest_vcpu */
96 struct guest_vcpu *vcpu;
97 size_t vcpu_cnt;
98 /* Hash table for guest_id */
99 struct hlist_head heads[PERF_EVLIST__HLIST_SIZE];
100 /* Hash table for guest_tid */
101 struct hlist_head tids[PERF_EVLIST__HLIST_SIZE];
102 /* Place to stash next guest event */
103 struct guest_event ev;
104 };
105
106 struct perf_inject {
107 struct perf_tool tool;
108 struct perf_session *session;
109 bool build_ids;
110 bool build_id_all;
111 bool sched_stat;
112 bool have_auxtrace;
113 bool strip;
114 bool jit_mode;
115 bool in_place_update;
116 bool in_place_update_dry_run;
117 bool is_pipe;
118 bool copy_kcore_dir;
119 const char *input_name;
120 struct perf_data output;
121 u64 bytes_written;
122 u64 aux_id;
123 struct list_head samples;
124 struct itrace_synth_opts itrace_synth_opts;
125 char event_copy[PERF_SAMPLE_MAX_SIZE];
126 struct perf_file_section secs[HEADER_FEAT_BITS];
127 struct guest_session guest_session;
128 struct strlist *known_build_ids;
129 };
130
131 struct event_entry {
132 struct list_head node;
133 u32 tid;
134 union perf_event event[];
135 };
136
137 static int dso__inject_build_id(struct dso *dso, struct perf_tool *tool,
138 struct machine *machine, u8 cpumode, u32 flags);
139
output_bytes(struct perf_inject * inject,void * buf,size_t sz)140 static int output_bytes(struct perf_inject *inject, void *buf, size_t sz)
141 {
142 ssize_t size;
143
144 size = perf_data__write(&inject->output, buf, sz);
145 if (size < 0)
146 return -errno;
147
148 inject->bytes_written += size;
149 return 0;
150 }
151
perf_event__repipe_synth(struct perf_tool * tool,union perf_event * event)152 static int perf_event__repipe_synth(struct perf_tool *tool,
153 union perf_event *event)
154 {
155 struct perf_inject *inject = container_of(tool, struct perf_inject,
156 tool);
157
158 return output_bytes(inject, event, event->header.size);
159 }
160
perf_event__repipe_oe_synth(struct perf_tool * tool,union perf_event * event,struct ordered_events * oe __maybe_unused)161 static int perf_event__repipe_oe_synth(struct perf_tool *tool,
162 union perf_event *event,
163 struct ordered_events *oe __maybe_unused)
164 {
165 return perf_event__repipe_synth(tool, event);
166 }
167
168 #ifdef HAVE_JITDUMP
perf_event__drop_oe(struct perf_tool * tool __maybe_unused,union perf_event * event __maybe_unused,struct ordered_events * oe __maybe_unused)169 static int perf_event__drop_oe(struct perf_tool *tool __maybe_unused,
170 union perf_event *event __maybe_unused,
171 struct ordered_events *oe __maybe_unused)
172 {
173 return 0;
174 }
175 #endif
176
perf_event__repipe_op2_synth(struct perf_session * session,union perf_event * event)177 static int perf_event__repipe_op2_synth(struct perf_session *session,
178 union perf_event *event)
179 {
180 return perf_event__repipe_synth(session->tool, event);
181 }
182
perf_event__repipe_op4_synth(struct perf_session * session,union perf_event * event,u64 data __maybe_unused,const char * str __maybe_unused)183 static int perf_event__repipe_op4_synth(struct perf_session *session,
184 union perf_event *event,
185 u64 data __maybe_unused,
186 const char *str __maybe_unused)
187 {
188 return perf_event__repipe_synth(session->tool, event);
189 }
190
perf_event__repipe_attr(struct perf_tool * tool,union perf_event * event,struct evlist ** pevlist)191 static int perf_event__repipe_attr(struct perf_tool *tool,
192 union perf_event *event,
193 struct evlist **pevlist)
194 {
195 struct perf_inject *inject = container_of(tool, struct perf_inject,
196 tool);
197 int ret;
198
199 ret = perf_event__process_attr(tool, event, pevlist);
200 if (ret)
201 return ret;
202
203 if (!inject->is_pipe)
204 return 0;
205
206 return perf_event__repipe_synth(tool, event);
207 }
208
perf_event__repipe_event_update(struct perf_tool * tool,union perf_event * event,struct evlist ** pevlist __maybe_unused)209 static int perf_event__repipe_event_update(struct perf_tool *tool,
210 union perf_event *event,
211 struct evlist **pevlist __maybe_unused)
212 {
213 return perf_event__repipe_synth(tool, event);
214 }
215
216 #ifdef HAVE_AUXTRACE_SUPPORT
217
copy_bytes(struct perf_inject * inject,struct perf_data * data,off_t size)218 static int copy_bytes(struct perf_inject *inject, struct perf_data *data, off_t size)
219 {
220 char buf[4096];
221 ssize_t ssz;
222 int ret;
223
224 while (size > 0) {
225 ssz = perf_data__read(data, buf, min(size, (off_t)sizeof(buf)));
226 if (ssz < 0)
227 return -errno;
228 ret = output_bytes(inject, buf, ssz);
229 if (ret)
230 return ret;
231 size -= ssz;
232 }
233
234 return 0;
235 }
236
perf_event__repipe_auxtrace(struct perf_session * session,union perf_event * event)237 static s64 perf_event__repipe_auxtrace(struct perf_session *session,
238 union perf_event *event)
239 {
240 struct perf_tool *tool = session->tool;
241 struct perf_inject *inject = container_of(tool, struct perf_inject,
242 tool);
243 int ret;
244
245 inject->have_auxtrace = true;
246
247 if (!inject->output.is_pipe) {
248 off_t offset;
249
250 offset = lseek(inject->output.file.fd, 0, SEEK_CUR);
251 if (offset == -1)
252 return -errno;
253 ret = auxtrace_index__auxtrace_event(&session->auxtrace_index,
254 event, offset);
255 if (ret < 0)
256 return ret;
257 }
258
259 if (perf_data__is_pipe(session->data) || !session->one_mmap) {
260 ret = output_bytes(inject, event, event->header.size);
261 if (ret < 0)
262 return ret;
263 ret = copy_bytes(inject, session->data,
264 event->auxtrace.size);
265 } else {
266 ret = output_bytes(inject, event,
267 event->header.size + event->auxtrace.size);
268 }
269 if (ret < 0)
270 return ret;
271
272 return event->auxtrace.size;
273 }
274
275 #else
276
277 static s64
perf_event__repipe_auxtrace(struct perf_session * session __maybe_unused,union perf_event * event __maybe_unused)278 perf_event__repipe_auxtrace(struct perf_session *session __maybe_unused,
279 union perf_event *event __maybe_unused)
280 {
281 pr_err("AUX area tracing not supported\n");
282 return -EINVAL;
283 }
284
285 #endif
286
perf_event__repipe(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample __maybe_unused,struct machine * machine __maybe_unused)287 static int perf_event__repipe(struct perf_tool *tool,
288 union perf_event *event,
289 struct perf_sample *sample __maybe_unused,
290 struct machine *machine __maybe_unused)
291 {
292 return perf_event__repipe_synth(tool, event);
293 }
294
perf_event__drop(struct perf_tool * tool __maybe_unused,union perf_event * event __maybe_unused,struct perf_sample * sample __maybe_unused,struct machine * machine __maybe_unused)295 static int perf_event__drop(struct perf_tool *tool __maybe_unused,
296 union perf_event *event __maybe_unused,
297 struct perf_sample *sample __maybe_unused,
298 struct machine *machine __maybe_unused)
299 {
300 return 0;
301 }
302
perf_event__drop_aux(struct perf_tool * tool,union perf_event * event __maybe_unused,struct perf_sample * sample,struct machine * machine __maybe_unused)303 static int perf_event__drop_aux(struct perf_tool *tool,
304 union perf_event *event __maybe_unused,
305 struct perf_sample *sample,
306 struct machine *machine __maybe_unused)
307 {
308 struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
309
310 if (!inject->aux_id)
311 inject->aux_id = sample->id;
312
313 return 0;
314 }
315
316 static union perf_event *
perf_inject__cut_auxtrace_sample(struct perf_inject * inject,union perf_event * event,struct perf_sample * sample)317 perf_inject__cut_auxtrace_sample(struct perf_inject *inject,
318 union perf_event *event,
319 struct perf_sample *sample)
320 {
321 size_t sz1 = sample->aux_sample.data - (void *)event;
322 size_t sz2 = event->header.size - sample->aux_sample.size - sz1;
323 union perf_event *ev = (union perf_event *)inject->event_copy;
324
325 if (sz1 > event->header.size || sz2 > event->header.size ||
326 sz1 + sz2 > event->header.size ||
327 sz1 < sizeof(struct perf_event_header) + sizeof(u64))
328 return event;
329
330 memcpy(ev, event, sz1);
331 memcpy((void *)ev + sz1, (void *)event + event->header.size - sz2, sz2);
332 ev->header.size = sz1 + sz2;
333 ((u64 *)((void *)ev + sz1))[-1] = 0;
334
335 return ev;
336 }
337
338 typedef int (*inject_handler)(struct perf_tool *tool,
339 union perf_event *event,
340 struct perf_sample *sample,
341 struct evsel *evsel,
342 struct machine *machine);
343
perf_event__repipe_sample(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct evsel * evsel,struct machine * machine)344 static int perf_event__repipe_sample(struct perf_tool *tool,
345 union perf_event *event,
346 struct perf_sample *sample,
347 struct evsel *evsel,
348 struct machine *machine)
349 {
350 struct perf_inject *inject = container_of(tool, struct perf_inject,
351 tool);
352
353 if (evsel && evsel->handler) {
354 inject_handler f = evsel->handler;
355 return f(tool, event, sample, evsel, machine);
356 }
357
358 build_id__mark_dso_hit(tool, event, sample, evsel, machine);
359
360 if (inject->itrace_synth_opts.set && sample->aux_sample.size)
361 event = perf_inject__cut_auxtrace_sample(inject, event, sample);
362
363 return perf_event__repipe_synth(tool, event);
364 }
365
perf_event__repipe_mmap(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)366 static int perf_event__repipe_mmap(struct perf_tool *tool,
367 union perf_event *event,
368 struct perf_sample *sample,
369 struct machine *machine)
370 {
371 int err;
372
373 err = perf_event__process_mmap(tool, event, sample, machine);
374 perf_event__repipe(tool, event, sample, machine);
375
376 return err;
377 }
378
379 #ifdef HAVE_JITDUMP
perf_event__jit_repipe_mmap(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)380 static int perf_event__jit_repipe_mmap(struct perf_tool *tool,
381 union perf_event *event,
382 struct perf_sample *sample,
383 struct machine *machine)
384 {
385 struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
386 u64 n = 0;
387 int ret;
388
389 /*
390 * if jit marker, then inject jit mmaps and generate ELF images
391 */
392 ret = jit_process(inject->session, &inject->output, machine,
393 event->mmap.filename, event->mmap.pid, event->mmap.tid, &n);
394 if (ret < 0)
395 return ret;
396 if (ret) {
397 inject->bytes_written += n;
398 return 0;
399 }
400 return perf_event__repipe_mmap(tool, event, sample, machine);
401 }
402 #endif
403
findnew_dso(int pid,int tid,const char * filename,struct dso_id * id,struct machine * machine)404 static struct dso *findnew_dso(int pid, int tid, const char *filename,
405 struct dso_id *id, struct machine *machine)
406 {
407 struct thread *thread;
408 struct nsinfo *nsi = NULL;
409 struct nsinfo *nnsi;
410 struct dso *dso;
411 bool vdso;
412
413 thread = machine__findnew_thread(machine, pid, tid);
414 if (thread == NULL) {
415 pr_err("cannot find or create a task %d/%d.\n", tid, pid);
416 return NULL;
417 }
418
419 vdso = is_vdso_map(filename);
420 nsi = nsinfo__get(thread->nsinfo);
421
422 if (vdso) {
423 /* The vdso maps are always on the host and not the
424 * container. Ensure that we don't use setns to look
425 * them up.
426 */
427 nnsi = nsinfo__copy(nsi);
428 if (nnsi) {
429 nsinfo__put(nsi);
430 nsinfo__clear_need_setns(nnsi);
431 nsi = nnsi;
432 }
433 dso = machine__findnew_vdso(machine, thread);
434 } else {
435 dso = machine__findnew_dso_id(machine, filename, id);
436 }
437
438 if (dso) {
439 mutex_lock(&dso->lock);
440 nsinfo__put(dso->nsinfo);
441 dso->nsinfo = nsi;
442 mutex_unlock(&dso->lock);
443 } else
444 nsinfo__put(nsi);
445
446 thread__put(thread);
447 return dso;
448 }
449
perf_event__repipe_buildid_mmap(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)450 static int perf_event__repipe_buildid_mmap(struct perf_tool *tool,
451 union perf_event *event,
452 struct perf_sample *sample,
453 struct machine *machine)
454 {
455 struct dso *dso;
456
457 dso = findnew_dso(event->mmap.pid, event->mmap.tid,
458 event->mmap.filename, NULL, machine);
459
460 if (dso && !dso->hit) {
461 dso->hit = 1;
462 dso__inject_build_id(dso, tool, machine, sample->cpumode, 0);
463 }
464 dso__put(dso);
465
466 return perf_event__repipe(tool, event, sample, machine);
467 }
468
perf_event__repipe_mmap2(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)469 static int perf_event__repipe_mmap2(struct perf_tool *tool,
470 union perf_event *event,
471 struct perf_sample *sample,
472 struct machine *machine)
473 {
474 int err;
475
476 err = perf_event__process_mmap2(tool, event, sample, machine);
477 perf_event__repipe(tool, event, sample, machine);
478
479 if (event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID) {
480 struct dso *dso;
481
482 dso = findnew_dso(event->mmap2.pid, event->mmap2.tid,
483 event->mmap2.filename, NULL, machine);
484 if (dso) {
485 /* mark it not to inject build-id */
486 dso->hit = 1;
487 }
488 dso__put(dso);
489 }
490
491 return err;
492 }
493
494 #ifdef HAVE_JITDUMP
perf_event__jit_repipe_mmap2(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)495 static int perf_event__jit_repipe_mmap2(struct perf_tool *tool,
496 union perf_event *event,
497 struct perf_sample *sample,
498 struct machine *machine)
499 {
500 struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
501 u64 n = 0;
502 int ret;
503
504 /*
505 * if jit marker, then inject jit mmaps and generate ELF images
506 */
507 ret = jit_process(inject->session, &inject->output, machine,
508 event->mmap2.filename, event->mmap2.pid, event->mmap2.tid, &n);
509 if (ret < 0)
510 return ret;
511 if (ret) {
512 inject->bytes_written += n;
513 return 0;
514 }
515 return perf_event__repipe_mmap2(tool, event, sample, machine);
516 }
517 #endif
518
perf_event__repipe_buildid_mmap2(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)519 static int perf_event__repipe_buildid_mmap2(struct perf_tool *tool,
520 union perf_event *event,
521 struct perf_sample *sample,
522 struct machine *machine)
523 {
524 struct dso_id dso_id = {
525 .maj = event->mmap2.maj,
526 .min = event->mmap2.min,
527 .ino = event->mmap2.ino,
528 .ino_generation = event->mmap2.ino_generation,
529 };
530 struct dso *dso;
531
532 if (event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID) {
533 /* cannot use dso_id since it'd have invalid info */
534 dso = findnew_dso(event->mmap2.pid, event->mmap2.tid,
535 event->mmap2.filename, NULL, machine);
536 if (dso) {
537 /* mark it not to inject build-id */
538 dso->hit = 1;
539 }
540 dso__put(dso);
541 perf_event__repipe(tool, event, sample, machine);
542 return 0;
543 }
544
545 dso = findnew_dso(event->mmap2.pid, event->mmap2.tid,
546 event->mmap2.filename, &dso_id, machine);
547
548 if (dso && !dso->hit) {
549 dso->hit = 1;
550 dso__inject_build_id(dso, tool, machine, sample->cpumode,
551 event->mmap2.flags);
552 }
553 dso__put(dso);
554
555 perf_event__repipe(tool, event, sample, machine);
556
557 return 0;
558 }
559
perf_event__repipe_fork(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)560 static int perf_event__repipe_fork(struct perf_tool *tool,
561 union perf_event *event,
562 struct perf_sample *sample,
563 struct machine *machine)
564 {
565 int err;
566
567 err = perf_event__process_fork(tool, event, sample, machine);
568 perf_event__repipe(tool, event, sample, machine);
569
570 return err;
571 }
572
perf_event__repipe_comm(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)573 static int perf_event__repipe_comm(struct perf_tool *tool,
574 union perf_event *event,
575 struct perf_sample *sample,
576 struct machine *machine)
577 {
578 int err;
579
580 err = perf_event__process_comm(tool, event, sample, machine);
581 perf_event__repipe(tool, event, sample, machine);
582
583 return err;
584 }
585
perf_event__repipe_namespaces(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)586 static int perf_event__repipe_namespaces(struct perf_tool *tool,
587 union perf_event *event,
588 struct perf_sample *sample,
589 struct machine *machine)
590 {
591 int err = perf_event__process_namespaces(tool, event, sample, machine);
592
593 perf_event__repipe(tool, event, sample, machine);
594
595 return err;
596 }
597
perf_event__repipe_exit(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)598 static int perf_event__repipe_exit(struct perf_tool *tool,
599 union perf_event *event,
600 struct perf_sample *sample,
601 struct machine *machine)
602 {
603 int err;
604
605 err = perf_event__process_exit(tool, event, sample, machine);
606 perf_event__repipe(tool, event, sample, machine);
607
608 return err;
609 }
610
611 #ifdef HAVE_LIBTRACEEVENT
perf_event__repipe_tracing_data(struct perf_session * session,union perf_event * event)612 static int perf_event__repipe_tracing_data(struct perf_session *session,
613 union perf_event *event)
614 {
615 perf_event__repipe_synth(session->tool, event);
616
617 return perf_event__process_tracing_data(session, event);
618 }
619 #endif
620
dso__read_build_id(struct dso * dso)621 static int dso__read_build_id(struct dso *dso)
622 {
623 struct nscookie nsc;
624
625 if (dso->has_build_id)
626 return 0;
627
628 mutex_lock(&dso->lock);
629 nsinfo__mountns_enter(dso->nsinfo, &nsc);
630 if (filename__read_build_id(dso->long_name, &dso->bid) > 0)
631 dso->has_build_id = true;
632 else if (dso->nsinfo) {
633 char *new_name;
634
635 new_name = filename_with_chroot(dso->nsinfo->pid,
636 dso->long_name);
637 if (new_name && filename__read_build_id(new_name, &dso->bid) > 0)
638 dso->has_build_id = true;
639 free(new_name);
640 }
641 nsinfo__mountns_exit(&nsc);
642 mutex_unlock(&dso->lock);
643
644 return dso->has_build_id ? 0 : -1;
645 }
646
perf_inject__parse_known_build_ids(const char * known_build_ids_string)647 static struct strlist *perf_inject__parse_known_build_ids(
648 const char *known_build_ids_string)
649 {
650 struct str_node *pos, *tmp;
651 struct strlist *known_build_ids;
652 int bid_len;
653
654 known_build_ids = strlist__new(known_build_ids_string, NULL);
655 if (known_build_ids == NULL)
656 return NULL;
657 strlist__for_each_entry_safe(pos, tmp, known_build_ids) {
658 const char *build_id, *dso_name;
659
660 build_id = skip_spaces(pos->s);
661 dso_name = strchr(build_id, ' ');
662 if (dso_name == NULL) {
663 strlist__remove(known_build_ids, pos);
664 continue;
665 }
666 bid_len = dso_name - pos->s;
667 dso_name = skip_spaces(dso_name);
668 if (bid_len % 2 != 0 || bid_len >= SBUILD_ID_SIZE) {
669 strlist__remove(known_build_ids, pos);
670 continue;
671 }
672 for (int ix = 0; 2 * ix + 1 < bid_len; ++ix) {
673 if (!isxdigit(build_id[2 * ix]) ||
674 !isxdigit(build_id[2 * ix + 1])) {
675 strlist__remove(known_build_ids, pos);
676 break;
677 }
678 }
679 }
680 return known_build_ids;
681 }
682
perf_inject__lookup_known_build_id(struct perf_inject * inject,struct dso * dso)683 static bool perf_inject__lookup_known_build_id(struct perf_inject *inject,
684 struct dso *dso)
685 {
686 struct str_node *pos;
687 int bid_len;
688
689 strlist__for_each_entry(pos, inject->known_build_ids) {
690 const char *build_id, *dso_name;
691
692 build_id = skip_spaces(pos->s);
693 dso_name = strchr(build_id, ' ');
694 bid_len = dso_name - pos->s;
695 dso_name = skip_spaces(dso_name);
696 if (strcmp(dso->long_name, dso_name))
697 continue;
698 for (int ix = 0; 2 * ix + 1 < bid_len; ++ix) {
699 dso->bid.data[ix] = (hex(build_id[2 * ix]) << 4 |
700 hex(build_id[2 * ix + 1]));
701 }
702 dso->bid.size = bid_len / 2;
703 dso->has_build_id = 1;
704 return true;
705 }
706 return false;
707 }
708
dso__inject_build_id(struct dso * dso,struct perf_tool * tool,struct machine * machine,u8 cpumode,u32 flags)709 static int dso__inject_build_id(struct dso *dso, struct perf_tool *tool,
710 struct machine *machine, u8 cpumode, u32 flags)
711 {
712 struct perf_inject *inject = container_of(tool, struct perf_inject,
713 tool);
714 int err;
715
716 if (is_anon_memory(dso->long_name) || flags & MAP_HUGETLB)
717 return 0;
718 if (is_no_dso_memory(dso->long_name))
719 return 0;
720
721 if (inject->known_build_ids != NULL &&
722 perf_inject__lookup_known_build_id(inject, dso))
723 return 1;
724
725 if (dso__read_build_id(dso) < 0) {
726 pr_debug("no build_id found for %s\n", dso->long_name);
727 return -1;
728 }
729
730 err = perf_event__synthesize_build_id(tool, dso, cpumode,
731 perf_event__repipe, machine);
732 if (err) {
733 pr_err("Can't synthesize build_id event for %s\n", dso->long_name);
734 return -1;
735 }
736
737 return 0;
738 }
739
perf_event__inject_buildid(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct evsel * evsel __maybe_unused,struct machine * machine)740 int perf_event__inject_buildid(struct perf_tool *tool, union perf_event *event,
741 struct perf_sample *sample,
742 struct evsel *evsel __maybe_unused,
743 struct machine *machine)
744 {
745 struct addr_location al;
746 struct thread *thread;
747
748 thread = machine__findnew_thread(machine, sample->pid, sample->tid);
749 if (thread == NULL) {
750 pr_err("problem processing %d event, skipping it.\n",
751 event->header.type);
752 goto repipe;
753 }
754
755 if (thread__find_map(thread, sample->cpumode, sample->ip, &al)) {
756 if (!al.map->dso->hit) {
757 al.map->dso->hit = 1;
758 dso__inject_build_id(al.map->dso, tool, machine,
759 sample->cpumode, al.map->flags);
760 }
761 }
762
763 thread__put(thread);
764 repipe:
765 perf_event__repipe(tool, event, sample, machine);
766 return 0;
767 }
768
perf_inject__sched_process_exit(struct perf_tool * tool,union perf_event * event __maybe_unused,struct perf_sample * sample,struct evsel * evsel __maybe_unused,struct machine * machine __maybe_unused)769 static int perf_inject__sched_process_exit(struct perf_tool *tool,
770 union perf_event *event __maybe_unused,
771 struct perf_sample *sample,
772 struct evsel *evsel __maybe_unused,
773 struct machine *machine __maybe_unused)
774 {
775 struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
776 struct event_entry *ent;
777
778 list_for_each_entry(ent, &inject->samples, node) {
779 if (sample->tid == ent->tid) {
780 list_del_init(&ent->node);
781 free(ent);
782 break;
783 }
784 }
785
786 return 0;
787 }
788
perf_inject__sched_switch(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct evsel * evsel,struct machine * machine)789 static int perf_inject__sched_switch(struct perf_tool *tool,
790 union perf_event *event,
791 struct perf_sample *sample,
792 struct evsel *evsel,
793 struct machine *machine)
794 {
795 struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
796 struct event_entry *ent;
797
798 perf_inject__sched_process_exit(tool, event, sample, evsel, machine);
799
800 ent = malloc(event->header.size + sizeof(struct event_entry));
801 if (ent == NULL) {
802 color_fprintf(stderr, PERF_COLOR_RED,
803 "Not enough memory to process sched switch event!");
804 return -1;
805 }
806
807 ent->tid = sample->tid;
808 memcpy(&ent->event, event, event->header.size);
809 list_add(&ent->node, &inject->samples);
810 return 0;
811 }
812
813 #ifdef HAVE_LIBTRACEEVENT
perf_inject__sched_stat(struct perf_tool * tool,union perf_event * event __maybe_unused,struct perf_sample * sample,struct evsel * evsel,struct machine * machine)814 static int perf_inject__sched_stat(struct perf_tool *tool,
815 union perf_event *event __maybe_unused,
816 struct perf_sample *sample,
817 struct evsel *evsel,
818 struct machine *machine)
819 {
820 struct event_entry *ent;
821 union perf_event *event_sw;
822 struct perf_sample sample_sw;
823 struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
824 u32 pid = evsel__intval(evsel, sample, "pid");
825
826 list_for_each_entry(ent, &inject->samples, node) {
827 if (pid == ent->tid)
828 goto found;
829 }
830
831 return 0;
832 found:
833 event_sw = &ent->event[0];
834 evsel__parse_sample(evsel, event_sw, &sample_sw);
835
836 sample_sw.period = sample->period;
837 sample_sw.time = sample->time;
838 perf_event__synthesize_sample(event_sw, evsel->core.attr.sample_type,
839 evsel->core.attr.read_format, &sample_sw);
840 build_id__mark_dso_hit(tool, event_sw, &sample_sw, evsel, machine);
841 return perf_event__repipe(tool, event_sw, &sample_sw, machine);
842 }
843 #endif
844
guest_session__vcpu(struct guest_session * gs,u32 vcpu)845 static struct guest_vcpu *guest_session__vcpu(struct guest_session *gs, u32 vcpu)
846 {
847 if (realloc_array_as_needed(gs->vcpu, gs->vcpu_cnt, vcpu, NULL))
848 return NULL;
849 return &gs->vcpu[vcpu];
850 }
851
guest_session__output_bytes(struct guest_session * gs,void * buf,size_t sz)852 static int guest_session__output_bytes(struct guest_session *gs, void *buf, size_t sz)
853 {
854 ssize_t ret = writen(gs->tmp_fd, buf, sz);
855
856 return ret < 0 ? ret : 0;
857 }
858
guest_session__repipe(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample __maybe_unused,struct machine * machine __maybe_unused)859 static int guest_session__repipe(struct perf_tool *tool,
860 union perf_event *event,
861 struct perf_sample *sample __maybe_unused,
862 struct machine *machine __maybe_unused)
863 {
864 struct guest_session *gs = container_of(tool, struct guest_session, tool);
865
866 return guest_session__output_bytes(gs, event, event->header.size);
867 }
868
guest_session__map_tid(struct guest_session * gs,u32 tid,u32 vcpu)869 static int guest_session__map_tid(struct guest_session *gs, u32 tid, u32 vcpu)
870 {
871 struct guest_tid *guest_tid = zalloc(sizeof(*guest_tid));
872 int hash;
873
874 if (!guest_tid)
875 return -ENOMEM;
876
877 guest_tid->tid = tid;
878 guest_tid->vcpu = vcpu;
879 hash = hash_32(guest_tid->tid, PERF_EVLIST__HLIST_BITS);
880 hlist_add_head(&guest_tid->node, &gs->tids[hash]);
881
882 return 0;
883 }
884
host_peek_vm_comms_cb(struct perf_session * session __maybe_unused,union perf_event * event,u64 offset __maybe_unused,void * data)885 static int host_peek_vm_comms_cb(struct perf_session *session __maybe_unused,
886 union perf_event *event,
887 u64 offset __maybe_unused, void *data)
888 {
889 struct guest_session *gs = data;
890 unsigned int vcpu;
891 struct guest_vcpu *guest_vcpu;
892 int ret;
893
894 if (event->header.type != PERF_RECORD_COMM ||
895 event->comm.pid != gs->machine_pid)
896 return 0;
897
898 /*
899 * QEMU option -name debug-threads=on, causes thread names formatted as
900 * below, although it is not an ABI. Also libvirt seems to use this by
901 * default. Here we rely on it to tell us which thread is which VCPU.
902 */
903 ret = sscanf(event->comm.comm, "CPU %u/KVM", &vcpu);
904 if (ret <= 0)
905 return ret;
906 pr_debug("Found VCPU: tid %u comm %s vcpu %u\n",
907 event->comm.tid, event->comm.comm, vcpu);
908 if (vcpu > INT_MAX) {
909 pr_err("Invalid VCPU %u\n", vcpu);
910 return -EINVAL;
911 }
912 guest_vcpu = guest_session__vcpu(gs, vcpu);
913 if (!guest_vcpu)
914 return -ENOMEM;
915 if (guest_vcpu->tid && guest_vcpu->tid != event->comm.tid) {
916 pr_err("Fatal error: Two threads found with the same VCPU\n");
917 return -EINVAL;
918 }
919 guest_vcpu->tid = event->comm.tid;
920
921 return guest_session__map_tid(gs, event->comm.tid, vcpu);
922 }
923
host_peek_vm_comms(struct perf_session * session,struct guest_session * gs)924 static int host_peek_vm_comms(struct perf_session *session, struct guest_session *gs)
925 {
926 return perf_session__peek_events(session, session->header.data_offset,
927 session->header.data_size,
928 host_peek_vm_comms_cb, gs);
929 }
930
evlist__is_id_used(struct evlist * evlist,u64 id)931 static bool evlist__is_id_used(struct evlist *evlist, u64 id)
932 {
933 return evlist__id2sid(evlist, id);
934 }
935
guest_session__allocate_new_id(struct guest_session * gs,struct evlist * host_evlist)936 static u64 guest_session__allocate_new_id(struct guest_session *gs, struct evlist *host_evlist)
937 {
938 do {
939 gs->highest_id += 1;
940 } while (!gs->highest_id || evlist__is_id_used(host_evlist, gs->highest_id));
941
942 return gs->highest_id;
943 }
944
guest_session__map_id(struct guest_session * gs,u64 id,u64 host_id,u32 vcpu)945 static int guest_session__map_id(struct guest_session *gs, u64 id, u64 host_id, u32 vcpu)
946 {
947 struct guest_id *guest_id = zalloc(sizeof(*guest_id));
948 int hash;
949
950 if (!guest_id)
951 return -ENOMEM;
952
953 guest_id->id = id;
954 guest_id->host_id = host_id;
955 guest_id->vcpu = vcpu;
956 hash = hash_64(guest_id->id, PERF_EVLIST__HLIST_BITS);
957 hlist_add_head(&guest_id->node, &gs->heads[hash]);
958
959 return 0;
960 }
961
evlist__find_highest_id(struct evlist * evlist)962 static u64 evlist__find_highest_id(struct evlist *evlist)
963 {
964 struct evsel *evsel;
965 u64 highest_id = 1;
966
967 evlist__for_each_entry(evlist, evsel) {
968 u32 j;
969
970 for (j = 0; j < evsel->core.ids; j++) {
971 u64 id = evsel->core.id[j];
972
973 if (id > highest_id)
974 highest_id = id;
975 }
976 }
977
978 return highest_id;
979 }
980
guest_session__map_ids(struct guest_session * gs,struct evlist * host_evlist)981 static int guest_session__map_ids(struct guest_session *gs, struct evlist *host_evlist)
982 {
983 struct evlist *evlist = gs->session->evlist;
984 struct evsel *evsel;
985 int ret;
986
987 evlist__for_each_entry(evlist, evsel) {
988 u32 j;
989
990 for (j = 0; j < evsel->core.ids; j++) {
991 struct perf_sample_id *sid;
992 u64 host_id;
993 u64 id;
994
995 id = evsel->core.id[j];
996 sid = evlist__id2sid(evlist, id);
997 if (!sid || sid->cpu.cpu == -1)
998 continue;
999 host_id = guest_session__allocate_new_id(gs, host_evlist);
1000 ret = guest_session__map_id(gs, id, host_id, sid->cpu.cpu);
1001 if (ret)
1002 return ret;
1003 }
1004 }
1005
1006 return 0;
1007 }
1008
guest_session__lookup_id(struct guest_session * gs,u64 id)1009 static struct guest_id *guest_session__lookup_id(struct guest_session *gs, u64 id)
1010 {
1011 struct hlist_head *head;
1012 struct guest_id *guest_id;
1013 int hash;
1014
1015 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
1016 head = &gs->heads[hash];
1017
1018 hlist_for_each_entry(guest_id, head, node)
1019 if (guest_id->id == id)
1020 return guest_id;
1021
1022 return NULL;
1023 }
1024
process_attr(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample __maybe_unused,struct machine * machine __maybe_unused)1025 static int process_attr(struct perf_tool *tool, union perf_event *event,
1026 struct perf_sample *sample __maybe_unused,
1027 struct machine *machine __maybe_unused)
1028 {
1029 struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
1030
1031 return perf_event__process_attr(tool, event, &inject->session->evlist);
1032 }
1033
guest_session__add_attr(struct guest_session * gs,struct evsel * evsel)1034 static int guest_session__add_attr(struct guest_session *gs, struct evsel *evsel)
1035 {
1036 struct perf_inject *inject = container_of(gs, struct perf_inject, guest_session);
1037 struct perf_event_attr attr = evsel->core.attr;
1038 u64 *id_array;
1039 u32 *vcpu_array;
1040 int ret = -ENOMEM;
1041 u32 i;
1042
1043 id_array = calloc(evsel->core.ids, sizeof(*id_array));
1044 if (!id_array)
1045 return -ENOMEM;
1046
1047 vcpu_array = calloc(evsel->core.ids, sizeof(*vcpu_array));
1048 if (!vcpu_array)
1049 goto out;
1050
1051 for (i = 0; i < evsel->core.ids; i++) {
1052 u64 id = evsel->core.id[i];
1053 struct guest_id *guest_id = guest_session__lookup_id(gs, id);
1054
1055 if (!guest_id) {
1056 pr_err("Failed to find guest id %"PRIu64"\n", id);
1057 ret = -EINVAL;
1058 goto out;
1059 }
1060 id_array[i] = guest_id->host_id;
1061 vcpu_array[i] = guest_id->vcpu;
1062 }
1063
1064 attr.sample_type |= PERF_SAMPLE_IDENTIFIER;
1065 attr.exclude_host = 1;
1066 attr.exclude_guest = 0;
1067
1068 ret = perf_event__synthesize_attr(&inject->tool, &attr, evsel->core.ids,
1069 id_array, process_attr);
1070 if (ret)
1071 pr_err("Failed to add guest attr.\n");
1072
1073 for (i = 0; i < evsel->core.ids; i++) {
1074 struct perf_sample_id *sid;
1075 u32 vcpu = vcpu_array[i];
1076
1077 sid = evlist__id2sid(inject->session->evlist, id_array[i]);
1078 /* Guest event is per-thread from the host point of view */
1079 sid->cpu.cpu = -1;
1080 sid->tid = gs->vcpu[vcpu].tid;
1081 sid->machine_pid = gs->machine_pid;
1082 sid->vcpu.cpu = vcpu;
1083 }
1084 out:
1085 free(vcpu_array);
1086 free(id_array);
1087 return ret;
1088 }
1089
guest_session__add_attrs(struct guest_session * gs)1090 static int guest_session__add_attrs(struct guest_session *gs)
1091 {
1092 struct evlist *evlist = gs->session->evlist;
1093 struct evsel *evsel;
1094 int ret;
1095
1096 evlist__for_each_entry(evlist, evsel) {
1097 ret = guest_session__add_attr(gs, evsel);
1098 if (ret)
1099 return ret;
1100 }
1101
1102 return 0;
1103 }
1104
synthesize_id_index(struct perf_inject * inject,size_t new_cnt)1105 static int synthesize_id_index(struct perf_inject *inject, size_t new_cnt)
1106 {
1107 struct perf_session *session = inject->session;
1108 struct evlist *evlist = session->evlist;
1109 struct machine *machine = &session->machines.host;
1110 size_t from = evlist->core.nr_entries - new_cnt;
1111
1112 return __perf_event__synthesize_id_index(&inject->tool, perf_event__repipe,
1113 evlist, machine, from);
1114 }
1115
guest_session__lookup_tid(struct guest_session * gs,u32 tid)1116 static struct guest_tid *guest_session__lookup_tid(struct guest_session *gs, u32 tid)
1117 {
1118 struct hlist_head *head;
1119 struct guest_tid *guest_tid;
1120 int hash;
1121
1122 hash = hash_32(tid, PERF_EVLIST__HLIST_BITS);
1123 head = &gs->tids[hash];
1124
1125 hlist_for_each_entry(guest_tid, head, node)
1126 if (guest_tid->tid == tid)
1127 return guest_tid;
1128
1129 return NULL;
1130 }
1131
dso__is_in_kernel_space(struct dso * dso)1132 static bool dso__is_in_kernel_space(struct dso *dso)
1133 {
1134 if (dso__is_vdso(dso))
1135 return false;
1136
1137 return dso__is_kcore(dso) ||
1138 dso->kernel ||
1139 is_kernel_module(dso->long_name, PERF_RECORD_MISC_CPUMODE_UNKNOWN);
1140 }
1141
evlist__first_id(struct evlist * evlist)1142 static u64 evlist__first_id(struct evlist *evlist)
1143 {
1144 struct evsel *evsel;
1145
1146 evlist__for_each_entry(evlist, evsel) {
1147 if (evsel->core.ids)
1148 return evsel->core.id[0];
1149 }
1150 return 0;
1151 }
1152
process_build_id(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample __maybe_unused,struct machine * machine __maybe_unused)1153 static int process_build_id(struct perf_tool *tool,
1154 union perf_event *event,
1155 struct perf_sample *sample __maybe_unused,
1156 struct machine *machine __maybe_unused)
1157 {
1158 struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
1159
1160 return perf_event__process_build_id(inject->session, event);
1161 }
1162
synthesize_build_id(struct perf_inject * inject,struct dso * dso,pid_t machine_pid)1163 static int synthesize_build_id(struct perf_inject *inject, struct dso *dso, pid_t machine_pid)
1164 {
1165 struct machine *machine = perf_session__findnew_machine(inject->session, machine_pid);
1166 u8 cpumode = dso__is_in_kernel_space(dso) ?
1167 PERF_RECORD_MISC_GUEST_KERNEL :
1168 PERF_RECORD_MISC_GUEST_USER;
1169
1170 if (!machine)
1171 return -ENOMEM;
1172
1173 dso->hit = 1;
1174
1175 return perf_event__synthesize_build_id(&inject->tool, dso, cpumode,
1176 process_build_id, machine);
1177 }
1178
guest_session__add_build_ids(struct guest_session * gs)1179 static int guest_session__add_build_ids(struct guest_session *gs)
1180 {
1181 struct perf_inject *inject = container_of(gs, struct perf_inject, guest_session);
1182 struct machine *machine = &gs->session->machines.host;
1183 struct dso *dso;
1184 int ret;
1185
1186 /* Build IDs will be put in the Build ID feature section */
1187 perf_header__set_feat(&inject->session->header, HEADER_BUILD_ID);
1188
1189 dsos__for_each_with_build_id(dso, &machine->dsos.head) {
1190 ret = synthesize_build_id(inject, dso, gs->machine_pid);
1191 if (ret)
1192 return ret;
1193 }
1194
1195 return 0;
1196 }
1197
guest_session__ksymbol_event(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample __maybe_unused,struct machine * machine __maybe_unused)1198 static int guest_session__ksymbol_event(struct perf_tool *tool,
1199 union perf_event *event,
1200 struct perf_sample *sample __maybe_unused,
1201 struct machine *machine __maybe_unused)
1202 {
1203 struct guest_session *gs = container_of(tool, struct guest_session, tool);
1204
1205 /* Only support out-of-line i.e. no BPF support */
1206 if (event->ksymbol.ksym_type != PERF_RECORD_KSYMBOL_TYPE_OOL)
1207 return 0;
1208
1209 return guest_session__output_bytes(gs, event, event->header.size);
1210 }
1211
guest_session__start(struct guest_session * gs,const char * name,bool force)1212 static int guest_session__start(struct guest_session *gs, const char *name, bool force)
1213 {
1214 char tmp_file_name[] = "/tmp/perf-inject-guest_session-XXXXXX";
1215 struct perf_session *session;
1216 int ret;
1217
1218 /* Only these events will be injected */
1219 gs->tool.mmap = guest_session__repipe;
1220 gs->tool.mmap2 = guest_session__repipe;
1221 gs->tool.comm = guest_session__repipe;
1222 gs->tool.fork = guest_session__repipe;
1223 gs->tool.exit = guest_session__repipe;
1224 gs->tool.lost = guest_session__repipe;
1225 gs->tool.context_switch = guest_session__repipe;
1226 gs->tool.ksymbol = guest_session__ksymbol_event;
1227 gs->tool.text_poke = guest_session__repipe;
1228 /*
1229 * Processing a build ID creates a struct dso with that build ID. Later,
1230 * all guest dsos are iterated and the build IDs processed into the host
1231 * session where they will be output to the Build ID feature section
1232 * when the perf.data file header is written.
1233 */
1234 gs->tool.build_id = perf_event__process_build_id;
1235 /* Process the id index to know what VCPU an ID belongs to */
1236 gs->tool.id_index = perf_event__process_id_index;
1237
1238 gs->tool.ordered_events = true;
1239 gs->tool.ordering_requires_timestamps = true;
1240
1241 gs->data.path = name;
1242 gs->data.force = force;
1243 gs->data.mode = PERF_DATA_MODE_READ;
1244
1245 session = perf_session__new(&gs->data, &gs->tool);
1246 if (IS_ERR(session))
1247 return PTR_ERR(session);
1248 gs->session = session;
1249
1250 /*
1251 * Initial events have zero'd ID samples. Get default ID sample size
1252 * used for removing them.
1253 */
1254 gs->dflt_id_hdr_size = session->machines.host.id_hdr_size;
1255 /* And default ID for adding back a host-compatible ID sample */
1256 gs->dflt_id = evlist__first_id(session->evlist);
1257 if (!gs->dflt_id) {
1258 pr_err("Guest data has no sample IDs");
1259 return -EINVAL;
1260 }
1261
1262 /* Temporary file for guest events */
1263 gs->tmp_file_name = strdup(tmp_file_name);
1264 if (!gs->tmp_file_name)
1265 return -ENOMEM;
1266 gs->tmp_fd = mkstemp(gs->tmp_file_name);
1267 if (gs->tmp_fd < 0)
1268 return -errno;
1269
1270 if (zstd_init(&gs->session->zstd_data, 0) < 0)
1271 pr_warning("Guest session decompression initialization failed.\n");
1272
1273 /*
1274 * perf does not support processing 2 sessions simultaneously, so output
1275 * guest events to a temporary file.
1276 */
1277 ret = perf_session__process_events(gs->session);
1278 if (ret)
1279 return ret;
1280
1281 if (lseek(gs->tmp_fd, 0, SEEK_SET))
1282 return -errno;
1283
1284 return 0;
1285 }
1286
1287 /* Free hlist nodes assuming hlist_node is the first member of hlist entries */
free_hlist(struct hlist_head * heads,size_t hlist_sz)1288 static void free_hlist(struct hlist_head *heads, size_t hlist_sz)
1289 {
1290 struct hlist_node *pos, *n;
1291 size_t i;
1292
1293 for (i = 0; i < hlist_sz; ++i) {
1294 hlist_for_each_safe(pos, n, &heads[i]) {
1295 hlist_del(pos);
1296 free(pos);
1297 }
1298 }
1299 }
1300
guest_session__exit(struct guest_session * gs)1301 static void guest_session__exit(struct guest_session *gs)
1302 {
1303 if (gs->session) {
1304 perf_session__delete(gs->session);
1305 free_hlist(gs->heads, PERF_EVLIST__HLIST_SIZE);
1306 free_hlist(gs->tids, PERF_EVLIST__HLIST_SIZE);
1307 }
1308 if (gs->tmp_file_name) {
1309 if (gs->tmp_fd >= 0)
1310 close(gs->tmp_fd);
1311 unlink(gs->tmp_file_name);
1312 free(gs->tmp_file_name);
1313 }
1314 free(gs->vcpu);
1315 free(gs->perf_data_file);
1316 }
1317
get_tsc_conv(struct perf_tsc_conversion * tc,struct perf_record_time_conv * time_conv)1318 static void get_tsc_conv(struct perf_tsc_conversion *tc, struct perf_record_time_conv *time_conv)
1319 {
1320 tc->time_shift = time_conv->time_shift;
1321 tc->time_mult = time_conv->time_mult;
1322 tc->time_zero = time_conv->time_zero;
1323 tc->time_cycles = time_conv->time_cycles;
1324 tc->time_mask = time_conv->time_mask;
1325 tc->cap_user_time_zero = time_conv->cap_user_time_zero;
1326 tc->cap_user_time_short = time_conv->cap_user_time_short;
1327 }
1328
guest_session__get_tc(struct guest_session * gs)1329 static void guest_session__get_tc(struct guest_session *gs)
1330 {
1331 struct perf_inject *inject = container_of(gs, struct perf_inject, guest_session);
1332
1333 get_tsc_conv(&gs->host_tc, &inject->session->time_conv);
1334 get_tsc_conv(&gs->guest_tc, &gs->session->time_conv);
1335 }
1336
guest_session__convert_time(struct guest_session * gs,u64 guest_time,u64 * host_time)1337 static void guest_session__convert_time(struct guest_session *gs, u64 guest_time, u64 *host_time)
1338 {
1339 u64 tsc;
1340
1341 if (!guest_time) {
1342 *host_time = 0;
1343 return;
1344 }
1345
1346 if (gs->guest_tc.cap_user_time_zero)
1347 tsc = perf_time_to_tsc(guest_time, &gs->guest_tc);
1348 else
1349 tsc = guest_time;
1350
1351 /*
1352 * This is the correct order of operations for x86 if the TSC Offset and
1353 * Multiplier values are used.
1354 */
1355 tsc -= gs->time_offset;
1356 tsc /= gs->time_scale;
1357
1358 if (gs->host_tc.cap_user_time_zero)
1359 *host_time = tsc_to_perf_time(tsc, &gs->host_tc);
1360 else
1361 *host_time = tsc;
1362 }
1363
guest_session__fetch(struct guest_session * gs)1364 static int guest_session__fetch(struct guest_session *gs)
1365 {
1366 void *buf = gs->ev.event_buf;
1367 struct perf_event_header *hdr = buf;
1368 size_t hdr_sz = sizeof(*hdr);
1369 ssize_t ret;
1370
1371 ret = readn(gs->tmp_fd, buf, hdr_sz);
1372 if (ret < 0)
1373 return ret;
1374
1375 if (!ret) {
1376 /* Zero size means EOF */
1377 hdr->size = 0;
1378 return 0;
1379 }
1380
1381 buf += hdr_sz;
1382
1383 ret = readn(gs->tmp_fd, buf, hdr->size - hdr_sz);
1384 if (ret < 0)
1385 return ret;
1386
1387 gs->ev.event = (union perf_event *)gs->ev.event_buf;
1388 gs->ev.sample.time = 0;
1389
1390 if (hdr->type >= PERF_RECORD_USER_TYPE_START) {
1391 pr_err("Unexpected type fetching guest event");
1392 return 0;
1393 }
1394
1395 ret = evlist__parse_sample(gs->session->evlist, gs->ev.event, &gs->ev.sample);
1396 if (ret) {
1397 pr_err("Parse failed fetching guest event");
1398 return ret;
1399 }
1400
1401 if (!gs->have_tc) {
1402 guest_session__get_tc(gs);
1403 gs->have_tc = true;
1404 }
1405
1406 guest_session__convert_time(gs, gs->ev.sample.time, &gs->ev.sample.time);
1407
1408 return 0;
1409 }
1410
evlist__append_id_sample(struct evlist * evlist,union perf_event * ev,const struct perf_sample * sample)1411 static int evlist__append_id_sample(struct evlist *evlist, union perf_event *ev,
1412 const struct perf_sample *sample)
1413 {
1414 struct evsel *evsel;
1415 void *array;
1416 int ret;
1417
1418 evsel = evlist__id2evsel(evlist, sample->id);
1419 array = ev;
1420
1421 if (!evsel) {
1422 pr_err("No evsel for id %"PRIu64"\n", sample->id);
1423 return -EINVAL;
1424 }
1425
1426 array += ev->header.size;
1427 ret = perf_event__synthesize_id_sample(array, evsel->core.attr.sample_type, sample);
1428 if (ret < 0)
1429 return ret;
1430
1431 if (ret & 7) {
1432 pr_err("Bad id sample size %d\n", ret);
1433 return -EINVAL;
1434 }
1435
1436 ev->header.size += ret;
1437
1438 return 0;
1439 }
1440
guest_session__inject_events(struct guest_session * gs,u64 timestamp)1441 static int guest_session__inject_events(struct guest_session *gs, u64 timestamp)
1442 {
1443 struct perf_inject *inject = container_of(gs, struct perf_inject, guest_session);
1444 int ret;
1445
1446 if (!gs->ready)
1447 return 0;
1448
1449 while (1) {
1450 struct perf_sample *sample;
1451 struct guest_id *guest_id;
1452 union perf_event *ev;
1453 u16 id_hdr_size;
1454 u8 cpumode;
1455 u64 id;
1456
1457 if (!gs->fetched) {
1458 ret = guest_session__fetch(gs);
1459 if (ret)
1460 return ret;
1461 gs->fetched = true;
1462 }
1463
1464 ev = gs->ev.event;
1465 sample = &gs->ev.sample;
1466
1467 if (!ev->header.size)
1468 return 0; /* EOF */
1469
1470 if (sample->time > timestamp)
1471 return 0;
1472
1473 /* Change cpumode to guest */
1474 cpumode = ev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
1475 if (cpumode & PERF_RECORD_MISC_USER)
1476 cpumode = PERF_RECORD_MISC_GUEST_USER;
1477 else
1478 cpumode = PERF_RECORD_MISC_GUEST_KERNEL;
1479 ev->header.misc &= ~PERF_RECORD_MISC_CPUMODE_MASK;
1480 ev->header.misc |= cpumode;
1481
1482 id = sample->id;
1483 if (!id) {
1484 id = gs->dflt_id;
1485 id_hdr_size = gs->dflt_id_hdr_size;
1486 } else {
1487 struct evsel *evsel = evlist__id2evsel(gs->session->evlist, id);
1488
1489 id_hdr_size = evsel__id_hdr_size(evsel);
1490 }
1491
1492 if (id_hdr_size & 7) {
1493 pr_err("Bad id_hdr_size %u\n", id_hdr_size);
1494 return -EINVAL;
1495 }
1496
1497 if (ev->header.size & 7) {
1498 pr_err("Bad event size %u\n", ev->header.size);
1499 return -EINVAL;
1500 }
1501
1502 /* Remove guest id sample */
1503 ev->header.size -= id_hdr_size;
1504
1505 if (ev->header.size & 7) {
1506 pr_err("Bad raw event size %u\n", ev->header.size);
1507 return -EINVAL;
1508 }
1509
1510 guest_id = guest_session__lookup_id(gs, id);
1511 if (!guest_id) {
1512 pr_err("Guest event with unknown id %llu\n",
1513 (unsigned long long)id);
1514 return -EINVAL;
1515 }
1516
1517 /* Change to host ID to avoid conflicting ID values */
1518 sample->id = guest_id->host_id;
1519 sample->stream_id = guest_id->host_id;
1520
1521 if (sample->cpu != (u32)-1) {
1522 if (sample->cpu >= gs->vcpu_cnt) {
1523 pr_err("Guest event with unknown VCPU %u\n",
1524 sample->cpu);
1525 return -EINVAL;
1526 }
1527 /* Change to host CPU instead of guest VCPU */
1528 sample->cpu = gs->vcpu[sample->cpu].cpu;
1529 }
1530
1531 /* New id sample with new ID and CPU */
1532 ret = evlist__append_id_sample(inject->session->evlist, ev, sample);
1533 if (ret)
1534 return ret;
1535
1536 if (ev->header.size & 7) {
1537 pr_err("Bad new event size %u\n", ev->header.size);
1538 return -EINVAL;
1539 }
1540
1541 gs->fetched = false;
1542
1543 ret = output_bytes(inject, ev, ev->header.size);
1544 if (ret)
1545 return ret;
1546 }
1547 }
1548
guest_session__flush_events(struct guest_session * gs)1549 static int guest_session__flush_events(struct guest_session *gs)
1550 {
1551 return guest_session__inject_events(gs, -1);
1552 }
1553
host__repipe(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)1554 static int host__repipe(struct perf_tool *tool,
1555 union perf_event *event,
1556 struct perf_sample *sample,
1557 struct machine *machine)
1558 {
1559 struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
1560 int ret;
1561
1562 ret = guest_session__inject_events(&inject->guest_session, sample->time);
1563 if (ret)
1564 return ret;
1565
1566 return perf_event__repipe(tool, event, sample, machine);
1567 }
1568
host__finished_init(struct perf_session * session,union perf_event * event)1569 static int host__finished_init(struct perf_session *session, union perf_event *event)
1570 {
1571 struct perf_inject *inject = container_of(session->tool, struct perf_inject, tool);
1572 struct guest_session *gs = &inject->guest_session;
1573 int ret;
1574
1575 /*
1576 * Peek through host COMM events to find QEMU threads and the VCPU they
1577 * are running.
1578 */
1579 ret = host_peek_vm_comms(session, gs);
1580 if (ret)
1581 return ret;
1582
1583 if (!gs->vcpu_cnt) {
1584 pr_err("No VCPU threads found for pid %u\n", gs->machine_pid);
1585 return -EINVAL;
1586 }
1587
1588 /*
1589 * Allocate new (unused) host sample IDs and map them to the guest IDs.
1590 */
1591 gs->highest_id = evlist__find_highest_id(session->evlist);
1592 ret = guest_session__map_ids(gs, session->evlist);
1593 if (ret)
1594 return ret;
1595
1596 ret = guest_session__add_attrs(gs);
1597 if (ret)
1598 return ret;
1599
1600 ret = synthesize_id_index(inject, gs->session->evlist->core.nr_entries);
1601 if (ret) {
1602 pr_err("Failed to synthesize id_index\n");
1603 return ret;
1604 }
1605
1606 ret = guest_session__add_build_ids(gs);
1607 if (ret) {
1608 pr_err("Failed to add guest build IDs\n");
1609 return ret;
1610 }
1611
1612 gs->ready = true;
1613
1614 ret = guest_session__inject_events(gs, 0);
1615 if (ret)
1616 return ret;
1617
1618 return perf_event__repipe_op2_synth(session, event);
1619 }
1620
1621 /*
1622 * Obey finished-round ordering. The FINISHED_ROUND event is first processed
1623 * which flushes host events to file up until the last flush time. Then inject
1624 * guest events up to the same time. Finally write out the FINISHED_ROUND event
1625 * itself.
1626 */
host__finished_round(struct perf_tool * tool,union perf_event * event,struct ordered_events * oe)1627 static int host__finished_round(struct perf_tool *tool,
1628 union perf_event *event,
1629 struct ordered_events *oe)
1630 {
1631 struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
1632 int ret = perf_event__process_finished_round(tool, event, oe);
1633 u64 timestamp = ordered_events__last_flush_time(oe);
1634
1635 if (ret)
1636 return ret;
1637
1638 ret = guest_session__inject_events(&inject->guest_session, timestamp);
1639 if (ret)
1640 return ret;
1641
1642 return perf_event__repipe_oe_synth(tool, event, oe);
1643 }
1644
host__context_switch(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)1645 static int host__context_switch(struct perf_tool *tool,
1646 union perf_event *event,
1647 struct perf_sample *sample,
1648 struct machine *machine)
1649 {
1650 struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
1651 bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
1652 struct guest_session *gs = &inject->guest_session;
1653 u32 pid = event->context_switch.next_prev_pid;
1654 u32 tid = event->context_switch.next_prev_tid;
1655 struct guest_tid *guest_tid;
1656 u32 vcpu;
1657
1658 if (out || pid != gs->machine_pid)
1659 goto out;
1660
1661 guest_tid = guest_session__lookup_tid(gs, tid);
1662 if (!guest_tid)
1663 goto out;
1664
1665 if (sample->cpu == (u32)-1) {
1666 pr_err("Switch event does not have CPU\n");
1667 return -EINVAL;
1668 }
1669
1670 vcpu = guest_tid->vcpu;
1671 if (vcpu >= gs->vcpu_cnt)
1672 return -EINVAL;
1673
1674 /* Guest is switching in, record which CPU the VCPU is now running on */
1675 gs->vcpu[vcpu].cpu = sample->cpu;
1676 out:
1677 return host__repipe(tool, event, sample, machine);
1678 }
1679
sig_handler(int sig __maybe_unused)1680 static void sig_handler(int sig __maybe_unused)
1681 {
1682 session_done = 1;
1683 }
1684
evsel__check_stype(struct evsel * evsel,u64 sample_type,const char * sample_msg)1685 static int evsel__check_stype(struct evsel *evsel, u64 sample_type, const char *sample_msg)
1686 {
1687 struct perf_event_attr *attr = &evsel->core.attr;
1688 const char *name = evsel__name(evsel);
1689
1690 if (!(attr->sample_type & sample_type)) {
1691 pr_err("Samples for %s event do not have %s attribute set.",
1692 name, sample_msg);
1693 return -EINVAL;
1694 }
1695
1696 return 0;
1697 }
1698
drop_sample(struct perf_tool * tool __maybe_unused,union perf_event * event __maybe_unused,struct perf_sample * sample __maybe_unused,struct evsel * evsel __maybe_unused,struct machine * machine __maybe_unused)1699 static int drop_sample(struct perf_tool *tool __maybe_unused,
1700 union perf_event *event __maybe_unused,
1701 struct perf_sample *sample __maybe_unused,
1702 struct evsel *evsel __maybe_unused,
1703 struct machine *machine __maybe_unused)
1704 {
1705 return 0;
1706 }
1707
strip_init(struct perf_inject * inject)1708 static void strip_init(struct perf_inject *inject)
1709 {
1710 struct evlist *evlist = inject->session->evlist;
1711 struct evsel *evsel;
1712
1713 inject->tool.context_switch = perf_event__drop;
1714
1715 evlist__for_each_entry(evlist, evsel)
1716 evsel->handler = drop_sample;
1717 }
1718
parse_vm_time_correlation(const struct option * opt,const char * str,int unset)1719 static int parse_vm_time_correlation(const struct option *opt, const char *str, int unset)
1720 {
1721 struct perf_inject *inject = opt->value;
1722 const char *args;
1723 char *dry_run;
1724
1725 if (unset)
1726 return 0;
1727
1728 inject->itrace_synth_opts.set = true;
1729 inject->itrace_synth_opts.vm_time_correlation = true;
1730 inject->in_place_update = true;
1731
1732 if (!str)
1733 return 0;
1734
1735 dry_run = skip_spaces(str);
1736 if (!strncmp(dry_run, "dry-run", strlen("dry-run"))) {
1737 inject->itrace_synth_opts.vm_tm_corr_dry_run = true;
1738 inject->in_place_update_dry_run = true;
1739 args = dry_run + strlen("dry-run");
1740 } else {
1741 args = str;
1742 }
1743
1744 inject->itrace_synth_opts.vm_tm_corr_args = strdup(args);
1745
1746 return inject->itrace_synth_opts.vm_tm_corr_args ? 0 : -ENOMEM;
1747 }
1748
parse_guest_data(const struct option * opt,const char * str,int unset)1749 static int parse_guest_data(const struct option *opt, const char *str, int unset)
1750 {
1751 struct perf_inject *inject = opt->value;
1752 struct guest_session *gs = &inject->guest_session;
1753 char *tok;
1754 char *s;
1755
1756 if (unset)
1757 return 0;
1758
1759 if (!str)
1760 goto bad_args;
1761
1762 s = strdup(str);
1763 if (!s)
1764 return -ENOMEM;
1765
1766 gs->perf_data_file = strsep(&s, ",");
1767 if (!gs->perf_data_file)
1768 goto bad_args;
1769
1770 gs->copy_kcore_dir = has_kcore_dir(gs->perf_data_file);
1771 if (gs->copy_kcore_dir)
1772 inject->output.is_dir = true;
1773
1774 tok = strsep(&s, ",");
1775 if (!tok)
1776 goto bad_args;
1777 gs->machine_pid = strtoul(tok, NULL, 0);
1778 if (!inject->guest_session.machine_pid)
1779 goto bad_args;
1780
1781 gs->time_scale = 1;
1782
1783 tok = strsep(&s, ",");
1784 if (!tok)
1785 goto out;
1786 gs->time_offset = strtoull(tok, NULL, 0);
1787
1788 tok = strsep(&s, ",");
1789 if (!tok)
1790 goto out;
1791 gs->time_scale = strtod(tok, NULL);
1792 if (!gs->time_scale)
1793 goto bad_args;
1794 out:
1795 return 0;
1796
1797 bad_args:
1798 pr_err("--guest-data option requires guest perf.data file name, "
1799 "guest machine PID, and optionally guest timestamp offset, "
1800 "and guest timestamp scale factor, separated by commas.\n");
1801 return -1;
1802 }
1803
save_section_info_cb(struct perf_file_section * section,struct perf_header * ph __maybe_unused,int feat,int fd __maybe_unused,void * data)1804 static int save_section_info_cb(struct perf_file_section *section,
1805 struct perf_header *ph __maybe_unused,
1806 int feat, int fd __maybe_unused, void *data)
1807 {
1808 struct perf_inject *inject = data;
1809
1810 inject->secs[feat] = *section;
1811 return 0;
1812 }
1813
save_section_info(struct perf_inject * inject)1814 static int save_section_info(struct perf_inject *inject)
1815 {
1816 struct perf_header *header = &inject->session->header;
1817 int fd = perf_data__fd(inject->session->data);
1818
1819 return perf_header__process_sections(header, fd, inject, save_section_info_cb);
1820 }
1821
keep_feat(int feat)1822 static bool keep_feat(int feat)
1823 {
1824 switch (feat) {
1825 /* Keep original information that describes the machine or software */
1826 case HEADER_TRACING_DATA:
1827 case HEADER_HOSTNAME:
1828 case HEADER_OSRELEASE:
1829 case HEADER_VERSION:
1830 case HEADER_ARCH:
1831 case HEADER_NRCPUS:
1832 case HEADER_CPUDESC:
1833 case HEADER_CPUID:
1834 case HEADER_TOTAL_MEM:
1835 case HEADER_CPU_TOPOLOGY:
1836 case HEADER_NUMA_TOPOLOGY:
1837 case HEADER_PMU_MAPPINGS:
1838 case HEADER_CACHE:
1839 case HEADER_MEM_TOPOLOGY:
1840 case HEADER_CLOCKID:
1841 case HEADER_BPF_PROG_INFO:
1842 case HEADER_BPF_BTF:
1843 case HEADER_CPU_PMU_CAPS:
1844 case HEADER_CLOCK_DATA:
1845 case HEADER_HYBRID_TOPOLOGY:
1846 case HEADER_PMU_CAPS:
1847 return true;
1848 /* Information that can be updated */
1849 case HEADER_BUILD_ID:
1850 case HEADER_CMDLINE:
1851 case HEADER_EVENT_DESC:
1852 case HEADER_BRANCH_STACK:
1853 case HEADER_GROUP_DESC:
1854 case HEADER_AUXTRACE:
1855 case HEADER_STAT:
1856 case HEADER_SAMPLE_TIME:
1857 case HEADER_DIR_FORMAT:
1858 case HEADER_COMPRESSED:
1859 default:
1860 return false;
1861 };
1862 }
1863
read_file(int fd,u64 offs,void * buf,size_t sz)1864 static int read_file(int fd, u64 offs, void *buf, size_t sz)
1865 {
1866 ssize_t ret = preadn(fd, buf, sz, offs);
1867
1868 if (ret < 0)
1869 return -errno;
1870 if ((size_t)ret != sz)
1871 return -EINVAL;
1872 return 0;
1873 }
1874
feat_copy(struct perf_inject * inject,int feat,struct feat_writer * fw)1875 static int feat_copy(struct perf_inject *inject, int feat, struct feat_writer *fw)
1876 {
1877 int fd = perf_data__fd(inject->session->data);
1878 u64 offs = inject->secs[feat].offset;
1879 size_t sz = inject->secs[feat].size;
1880 void *buf = malloc(sz);
1881 int ret;
1882
1883 if (!buf)
1884 return -ENOMEM;
1885
1886 ret = read_file(fd, offs, buf, sz);
1887 if (ret)
1888 goto out_free;
1889
1890 ret = fw->write(fw, buf, sz);
1891 out_free:
1892 free(buf);
1893 return ret;
1894 }
1895
1896 struct inject_fc {
1897 struct feat_copier fc;
1898 struct perf_inject *inject;
1899 };
1900
feat_copy_cb(struct feat_copier * fc,int feat,struct feat_writer * fw)1901 static int feat_copy_cb(struct feat_copier *fc, int feat, struct feat_writer *fw)
1902 {
1903 struct inject_fc *inj_fc = container_of(fc, struct inject_fc, fc);
1904 struct perf_inject *inject = inj_fc->inject;
1905 int ret;
1906
1907 if (!inject->secs[feat].offset ||
1908 !keep_feat(feat))
1909 return 0;
1910
1911 ret = feat_copy(inject, feat, fw);
1912 if (ret < 0)
1913 return ret;
1914
1915 return 1; /* Feature section copied */
1916 }
1917
copy_kcore_dir(struct perf_inject * inject)1918 static int copy_kcore_dir(struct perf_inject *inject)
1919 {
1920 char *cmd;
1921 int ret;
1922
1923 ret = asprintf(&cmd, "cp -r -n %s/kcore_dir* %s >/dev/null 2>&1",
1924 inject->input_name, inject->output.path);
1925 if (ret < 0)
1926 return ret;
1927 pr_debug("%s\n", cmd);
1928 ret = system(cmd);
1929 free(cmd);
1930 return ret;
1931 }
1932
guest_session__copy_kcore_dir(struct guest_session * gs)1933 static int guest_session__copy_kcore_dir(struct guest_session *gs)
1934 {
1935 struct perf_inject *inject = container_of(gs, struct perf_inject, guest_session);
1936 char *cmd;
1937 int ret;
1938
1939 ret = asprintf(&cmd, "cp -r -n %s/kcore_dir %s/kcore_dir__%u >/dev/null 2>&1",
1940 gs->perf_data_file, inject->output.path, gs->machine_pid);
1941 if (ret < 0)
1942 return ret;
1943 pr_debug("%s\n", cmd);
1944 ret = system(cmd);
1945 free(cmd);
1946 return ret;
1947 }
1948
output_fd(struct perf_inject * inject)1949 static int output_fd(struct perf_inject *inject)
1950 {
1951 return inject->in_place_update ? -1 : perf_data__fd(&inject->output);
1952 }
1953
__cmd_inject(struct perf_inject * inject)1954 static int __cmd_inject(struct perf_inject *inject)
1955 {
1956 int ret = -EINVAL;
1957 struct guest_session *gs = &inject->guest_session;
1958 struct perf_session *session = inject->session;
1959 int fd = output_fd(inject);
1960 u64 output_data_offset;
1961
1962 signal(SIGINT, sig_handler);
1963
1964 if (inject->build_ids || inject->sched_stat ||
1965 inject->itrace_synth_opts.set || inject->build_id_all) {
1966 inject->tool.mmap = perf_event__repipe_mmap;
1967 inject->tool.mmap2 = perf_event__repipe_mmap2;
1968 inject->tool.fork = perf_event__repipe_fork;
1969 #ifdef HAVE_LIBTRACEEVENT
1970 inject->tool.tracing_data = perf_event__repipe_tracing_data;
1971 #endif
1972 }
1973
1974 output_data_offset = perf_session__data_offset(session->evlist);
1975
1976 if (inject->build_id_all) {
1977 inject->tool.mmap = perf_event__repipe_buildid_mmap;
1978 inject->tool.mmap2 = perf_event__repipe_buildid_mmap2;
1979 } else if (inject->build_ids) {
1980 inject->tool.sample = perf_event__inject_buildid;
1981 } else if (inject->sched_stat) {
1982 struct evsel *evsel;
1983
1984 evlist__for_each_entry(session->evlist, evsel) {
1985 const char *name = evsel__name(evsel);
1986
1987 if (!strcmp(name, "sched:sched_switch")) {
1988 if (evsel__check_stype(evsel, PERF_SAMPLE_TID, "TID"))
1989 return -EINVAL;
1990
1991 evsel->handler = perf_inject__sched_switch;
1992 } else if (!strcmp(name, "sched:sched_process_exit"))
1993 evsel->handler = perf_inject__sched_process_exit;
1994 #ifdef HAVE_LIBTRACEEVENT
1995 else if (!strncmp(name, "sched:sched_stat_", 17))
1996 evsel->handler = perf_inject__sched_stat;
1997 #endif
1998 }
1999 } else if (inject->itrace_synth_opts.vm_time_correlation) {
2000 session->itrace_synth_opts = &inject->itrace_synth_opts;
2001 memset(&inject->tool, 0, sizeof(inject->tool));
2002 inject->tool.id_index = perf_event__process_id_index;
2003 inject->tool.auxtrace_info = perf_event__process_auxtrace_info;
2004 inject->tool.auxtrace = perf_event__process_auxtrace;
2005 inject->tool.auxtrace_error = perf_event__process_auxtrace_error;
2006 inject->tool.ordered_events = true;
2007 inject->tool.ordering_requires_timestamps = true;
2008 } else if (inject->itrace_synth_opts.set) {
2009 session->itrace_synth_opts = &inject->itrace_synth_opts;
2010 inject->itrace_synth_opts.inject = true;
2011 inject->tool.comm = perf_event__repipe_comm;
2012 inject->tool.namespaces = perf_event__repipe_namespaces;
2013 inject->tool.exit = perf_event__repipe_exit;
2014 inject->tool.id_index = perf_event__process_id_index;
2015 inject->tool.auxtrace_info = perf_event__process_auxtrace_info;
2016 inject->tool.auxtrace = perf_event__process_auxtrace;
2017 inject->tool.aux = perf_event__drop_aux;
2018 inject->tool.itrace_start = perf_event__drop_aux;
2019 inject->tool.aux_output_hw_id = perf_event__drop_aux;
2020 inject->tool.ordered_events = true;
2021 inject->tool.ordering_requires_timestamps = true;
2022 /* Allow space in the header for new attributes */
2023 output_data_offset = roundup(8192 + session->header.data_offset, 4096);
2024 if (inject->strip)
2025 strip_init(inject);
2026 } else if (gs->perf_data_file) {
2027 char *name = gs->perf_data_file;
2028
2029 /*
2030 * Not strictly necessary, but keep these events in order wrt
2031 * guest events.
2032 */
2033 inject->tool.mmap = host__repipe;
2034 inject->tool.mmap2 = host__repipe;
2035 inject->tool.comm = host__repipe;
2036 inject->tool.fork = host__repipe;
2037 inject->tool.exit = host__repipe;
2038 inject->tool.lost = host__repipe;
2039 inject->tool.context_switch = host__repipe;
2040 inject->tool.ksymbol = host__repipe;
2041 inject->tool.text_poke = host__repipe;
2042 /*
2043 * Once the host session has initialized, set up sample ID
2044 * mapping and feed in guest attrs, build IDs and initial
2045 * events.
2046 */
2047 inject->tool.finished_init = host__finished_init;
2048 /* Obey finished round ordering */
2049 inject->tool.finished_round = host__finished_round,
2050 /* Keep track of which CPU a VCPU is runnng on */
2051 inject->tool.context_switch = host__context_switch;
2052 /*
2053 * Must order events to be able to obey finished round
2054 * ordering.
2055 */
2056 inject->tool.ordered_events = true;
2057 inject->tool.ordering_requires_timestamps = true;
2058 /* Set up a separate session to process guest perf.data file */
2059 ret = guest_session__start(gs, name, session->data->force);
2060 if (ret) {
2061 pr_err("Failed to process %s, error %d\n", name, ret);
2062 return ret;
2063 }
2064 /* Allow space in the header for guest attributes */
2065 output_data_offset += gs->session->header.data_offset;
2066 output_data_offset = roundup(output_data_offset, 4096);
2067 }
2068
2069 if (!inject->itrace_synth_opts.set)
2070 auxtrace_index__free(&session->auxtrace_index);
2071
2072 if (!inject->is_pipe && !inject->in_place_update)
2073 lseek(fd, output_data_offset, SEEK_SET);
2074
2075 ret = perf_session__process_events(session);
2076 if (ret)
2077 return ret;
2078
2079 if (gs->session) {
2080 /*
2081 * Remaining guest events have later timestamps. Flush them
2082 * out to file.
2083 */
2084 ret = guest_session__flush_events(gs);
2085 if (ret) {
2086 pr_err("Failed to flush guest events\n");
2087 return ret;
2088 }
2089 }
2090
2091 if (!inject->is_pipe && !inject->in_place_update) {
2092 struct inject_fc inj_fc = {
2093 .fc.copy = feat_copy_cb,
2094 .inject = inject,
2095 };
2096
2097 if (inject->build_ids)
2098 perf_header__set_feat(&session->header,
2099 HEADER_BUILD_ID);
2100 /*
2101 * Keep all buildids when there is unprocessed AUX data because
2102 * it is not known which ones the AUX trace hits.
2103 */
2104 if (perf_header__has_feat(&session->header, HEADER_BUILD_ID) &&
2105 inject->have_auxtrace && !inject->itrace_synth_opts.set)
2106 dsos__hit_all(session);
2107 /*
2108 * The AUX areas have been removed and replaced with
2109 * synthesized hardware events, so clear the feature flag.
2110 */
2111 if (inject->itrace_synth_opts.set) {
2112 perf_header__clear_feat(&session->header,
2113 HEADER_AUXTRACE);
2114 if (inject->itrace_synth_opts.last_branch ||
2115 inject->itrace_synth_opts.add_last_branch)
2116 perf_header__set_feat(&session->header,
2117 HEADER_BRANCH_STACK);
2118 }
2119 session->header.data_offset = output_data_offset;
2120 session->header.data_size = inject->bytes_written;
2121 perf_session__inject_header(session, session->evlist, fd, &inj_fc.fc);
2122
2123 if (inject->copy_kcore_dir) {
2124 ret = copy_kcore_dir(inject);
2125 if (ret) {
2126 pr_err("Failed to copy kcore\n");
2127 return ret;
2128 }
2129 }
2130 if (gs->copy_kcore_dir) {
2131 ret = guest_session__copy_kcore_dir(gs);
2132 if (ret) {
2133 pr_err("Failed to copy guest kcore\n");
2134 return ret;
2135 }
2136 }
2137 }
2138
2139 return ret;
2140 }
2141
cmd_inject(int argc,const char ** argv)2142 int cmd_inject(int argc, const char **argv)
2143 {
2144 struct perf_inject inject = {
2145 .tool = {
2146 .sample = perf_event__repipe_sample,
2147 .read = perf_event__repipe_sample,
2148 .mmap = perf_event__repipe,
2149 .mmap2 = perf_event__repipe,
2150 .comm = perf_event__repipe,
2151 .namespaces = perf_event__repipe,
2152 .cgroup = perf_event__repipe,
2153 .fork = perf_event__repipe,
2154 .exit = perf_event__repipe,
2155 .lost = perf_event__repipe,
2156 .lost_samples = perf_event__repipe,
2157 .aux = perf_event__repipe,
2158 .itrace_start = perf_event__repipe,
2159 .aux_output_hw_id = perf_event__repipe,
2160 .context_switch = perf_event__repipe,
2161 .throttle = perf_event__repipe,
2162 .unthrottle = perf_event__repipe,
2163 .ksymbol = perf_event__repipe,
2164 .bpf = perf_event__repipe,
2165 .text_poke = perf_event__repipe,
2166 .attr = perf_event__repipe_attr,
2167 .event_update = perf_event__repipe_event_update,
2168 .tracing_data = perf_event__repipe_op2_synth,
2169 .finished_round = perf_event__repipe_oe_synth,
2170 .build_id = perf_event__repipe_op2_synth,
2171 .id_index = perf_event__repipe_op2_synth,
2172 .auxtrace_info = perf_event__repipe_op2_synth,
2173 .auxtrace_error = perf_event__repipe_op2_synth,
2174 .time_conv = perf_event__repipe_op2_synth,
2175 .thread_map = perf_event__repipe_op2_synth,
2176 .cpu_map = perf_event__repipe_op2_synth,
2177 .stat_config = perf_event__repipe_op2_synth,
2178 .stat = perf_event__repipe_op2_synth,
2179 .stat_round = perf_event__repipe_op2_synth,
2180 .feature = perf_event__repipe_op2_synth,
2181 .finished_init = perf_event__repipe_op2_synth,
2182 .compressed = perf_event__repipe_op4_synth,
2183 .auxtrace = perf_event__repipe_auxtrace,
2184 },
2185 .input_name = "-",
2186 .samples = LIST_HEAD_INIT(inject.samples),
2187 .output = {
2188 .path = "-",
2189 .mode = PERF_DATA_MODE_WRITE,
2190 .use_stdio = true,
2191 },
2192 };
2193 struct perf_data data = {
2194 .mode = PERF_DATA_MODE_READ,
2195 .use_stdio = true,
2196 };
2197 int ret;
2198 bool repipe = true;
2199 const char *known_build_ids = NULL;
2200
2201 struct option options[] = {
2202 OPT_BOOLEAN('b', "build-ids", &inject.build_ids,
2203 "Inject build-ids into the output stream"),
2204 OPT_BOOLEAN(0, "buildid-all", &inject.build_id_all,
2205 "Inject build-ids of all DSOs into the output stream"),
2206 OPT_STRING(0, "known-build-ids", &known_build_ids,
2207 "buildid path [,buildid path...]",
2208 "build-ids to use for given paths"),
2209 OPT_STRING('i', "input", &inject.input_name, "file",
2210 "input file name"),
2211 OPT_STRING('o', "output", &inject.output.path, "file",
2212 "output file name"),
2213 OPT_BOOLEAN('s', "sched-stat", &inject.sched_stat,
2214 "Merge sched-stat and sched-switch for getting events "
2215 "where and how long tasks slept"),
2216 #ifdef HAVE_JITDUMP
2217 OPT_BOOLEAN('j', "jit", &inject.jit_mode, "merge jitdump files into perf.data file"),
2218 #endif
2219 OPT_INCR('v', "verbose", &verbose,
2220 "be more verbose (show build ids, etc)"),
2221 OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
2222 "file", "vmlinux pathname"),
2223 OPT_BOOLEAN(0, "ignore-vmlinux", &symbol_conf.ignore_vmlinux,
2224 "don't load vmlinux even if found"),
2225 OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name, "file",
2226 "kallsyms pathname"),
2227 OPT_BOOLEAN('f', "force", &data.force, "don't complain, do it"),
2228 OPT_CALLBACK_OPTARG(0, "itrace", &inject.itrace_synth_opts,
2229 NULL, "opts", "Instruction Tracing options\n"
2230 ITRACE_HELP,
2231 itrace_parse_synth_opts),
2232 OPT_BOOLEAN(0, "strip", &inject.strip,
2233 "strip non-synthesized events (use with --itrace)"),
2234 OPT_CALLBACK_OPTARG(0, "vm-time-correlation", &inject, NULL, "opts",
2235 "correlate time between VM guests and the host",
2236 parse_vm_time_correlation),
2237 OPT_CALLBACK_OPTARG(0, "guest-data", &inject, NULL, "opts",
2238 "inject events from a guest perf.data file",
2239 parse_guest_data),
2240 OPT_STRING(0, "guestmount", &symbol_conf.guestmount, "directory",
2241 "guest mount directory under which every guest os"
2242 " instance has a subdir"),
2243 OPT_END()
2244 };
2245 const char * const inject_usage[] = {
2246 "perf inject [<options>]",
2247 NULL
2248 };
2249 #ifndef HAVE_JITDUMP
2250 set_option_nobuild(options, 'j', "jit", "NO_LIBELF=1", true);
2251 #endif
2252 argc = parse_options(argc, argv, options, inject_usage, 0);
2253
2254 /*
2255 * Any (unrecognized) arguments left?
2256 */
2257 if (argc)
2258 usage_with_options(inject_usage, options);
2259
2260 if (inject.strip && !inject.itrace_synth_opts.set) {
2261 pr_err("--strip option requires --itrace option\n");
2262 return -1;
2263 }
2264
2265 if (symbol__validate_sym_arguments())
2266 return -1;
2267
2268 if (inject.in_place_update) {
2269 if (!strcmp(inject.input_name, "-")) {
2270 pr_err("Input file name required for in-place updating\n");
2271 return -1;
2272 }
2273 if (strcmp(inject.output.path, "-")) {
2274 pr_err("Output file name must not be specified for in-place updating\n");
2275 return -1;
2276 }
2277 if (!data.force && !inject.in_place_update_dry_run) {
2278 pr_err("The input file would be updated in place, "
2279 "the --force option is required.\n");
2280 return -1;
2281 }
2282 if (!inject.in_place_update_dry_run)
2283 data.in_place_update = true;
2284 } else {
2285 if (strcmp(inject.output.path, "-") && !inject.strip &&
2286 has_kcore_dir(inject.input_name)) {
2287 inject.output.is_dir = true;
2288 inject.copy_kcore_dir = true;
2289 }
2290 if (perf_data__open(&inject.output)) {
2291 perror("failed to create output file");
2292 return -1;
2293 }
2294 }
2295
2296 data.path = inject.input_name;
2297 if (!strcmp(inject.input_name, "-") || inject.output.is_pipe) {
2298 inject.is_pipe = true;
2299 /*
2300 * Do not repipe header when input is a regular file
2301 * since either it can rewrite the header at the end
2302 * or write a new pipe header.
2303 */
2304 if (strcmp(inject.input_name, "-"))
2305 repipe = false;
2306 }
2307
2308 inject.session = __perf_session__new(&data, repipe,
2309 output_fd(&inject),
2310 &inject.tool);
2311 if (IS_ERR(inject.session)) {
2312 ret = PTR_ERR(inject.session);
2313 goto out_close_output;
2314 }
2315
2316 if (zstd_init(&(inject.session->zstd_data), 0) < 0)
2317 pr_warning("Decompression initialization failed.\n");
2318
2319 /* Save original section info before feature bits change */
2320 ret = save_section_info(&inject);
2321 if (ret)
2322 goto out_delete;
2323
2324 if (!data.is_pipe && inject.output.is_pipe) {
2325 ret = perf_header__write_pipe(perf_data__fd(&inject.output));
2326 if (ret < 0) {
2327 pr_err("Couldn't write a new pipe header.\n");
2328 goto out_delete;
2329 }
2330
2331 ret = perf_event__synthesize_for_pipe(&inject.tool,
2332 inject.session,
2333 &inject.output,
2334 perf_event__repipe);
2335 if (ret < 0)
2336 goto out_delete;
2337 }
2338
2339 if (inject.build_ids && !inject.build_id_all) {
2340 /*
2341 * to make sure the mmap records are ordered correctly
2342 * and so that the correct especially due to jitted code
2343 * mmaps. We cannot generate the buildid hit list and
2344 * inject the jit mmaps at the same time for now.
2345 */
2346 inject.tool.ordered_events = true;
2347 inject.tool.ordering_requires_timestamps = true;
2348 if (known_build_ids != NULL) {
2349 inject.known_build_ids =
2350 perf_inject__parse_known_build_ids(known_build_ids);
2351
2352 if (inject.known_build_ids == NULL) {
2353 pr_err("Couldn't parse known build ids.\n");
2354 goto out_delete;
2355 }
2356 }
2357 }
2358
2359 if (inject.sched_stat) {
2360 inject.tool.ordered_events = true;
2361 }
2362
2363 #ifdef HAVE_JITDUMP
2364 if (inject.jit_mode) {
2365 inject.tool.mmap2 = perf_event__jit_repipe_mmap2;
2366 inject.tool.mmap = perf_event__jit_repipe_mmap;
2367 inject.tool.ordered_events = true;
2368 inject.tool.ordering_requires_timestamps = true;
2369 /*
2370 * JIT MMAP injection injects all MMAP events in one go, so it
2371 * does not obey finished_round semantics.
2372 */
2373 inject.tool.finished_round = perf_event__drop_oe;
2374 }
2375 #endif
2376 ret = symbol__init(&inject.session->header.env);
2377 if (ret < 0)
2378 goto out_delete;
2379
2380 ret = __cmd_inject(&inject);
2381
2382 guest_session__exit(&inject.guest_session);
2383
2384 out_delete:
2385 strlist__delete(inject.known_build_ids);
2386 zstd_fini(&(inject.session->zstd_data));
2387 perf_session__delete(inject.session);
2388 out_close_output:
2389 if (!inject.in_place_update)
2390 perf_data__close(&inject.output);
2391 free(inject.itrace_synth_opts.vm_tm_corr_args);
2392 return ret;
2393 }
2394