1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/types.h>
3 #include <linux/string.h>
4 #include <linux/zalloc.h>
5 #include <stdlib.h>
6
7 #include "../../../util/event.h"
8 #include "../../../util/synthetic-events.h"
9 #include "../../../util/machine.h"
10 #include "../../../util/tool.h"
11 #include "../../../util/map.h"
12 #include "../../../util/debug.h"
13 #include "util/sample.h"
14
15 #if defined(__x86_64__)
16
perf_event__synthesize_extra_kmaps(struct perf_tool * tool,perf_event__handler_t process,struct machine * machine)17 int perf_event__synthesize_extra_kmaps(struct perf_tool *tool,
18 perf_event__handler_t process,
19 struct machine *machine)
20 {
21 int rc = 0;
22 struct map *pos;
23 struct maps *kmaps = machine__kernel_maps(machine);
24 union perf_event *event = zalloc(sizeof(event->mmap) +
25 machine->id_hdr_size);
26
27 if (!event) {
28 pr_debug("Not enough memory synthesizing mmap event "
29 "for extra kernel maps\n");
30 return -1;
31 }
32
33 maps__for_each_entry(kmaps, pos) {
34 struct kmap *kmap;
35 size_t size;
36
37 if (!__map__is_extra_kernel_map(pos))
38 continue;
39
40 kmap = map__kmap(pos);
41
42 size = sizeof(event->mmap) - sizeof(event->mmap.filename) +
43 PERF_ALIGN(strlen(kmap->name) + 1, sizeof(u64)) +
44 machine->id_hdr_size;
45
46 memset(event, 0, size);
47
48 event->mmap.header.type = PERF_RECORD_MMAP;
49
50 /*
51 * kernel uses 0 for user space maps, see kernel/perf_event.c
52 * __perf_event_mmap
53 */
54 if (machine__is_host(machine))
55 event->header.misc = PERF_RECORD_MISC_KERNEL;
56 else
57 event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
58
59 event->mmap.header.size = size;
60
61 event->mmap.start = pos->start;
62 event->mmap.len = pos->end - pos->start;
63 event->mmap.pgoff = pos->pgoff;
64 event->mmap.pid = machine->pid;
65
66 strlcpy(event->mmap.filename, kmap->name, PATH_MAX);
67
68 if (perf_tool__process_synth_event(tool, event, machine,
69 process) != 0) {
70 rc = -1;
71 break;
72 }
73 }
74
75 free(event);
76 return rc;
77 }
78
79 #endif
80
arch_perf_parse_sample_weight(struct perf_sample * data,const __u64 * array,u64 type)81 void arch_perf_parse_sample_weight(struct perf_sample *data,
82 const __u64 *array, u64 type)
83 {
84 union perf_sample_weight weight;
85
86 weight.full = *array;
87 if (type & PERF_SAMPLE_WEIGHT)
88 data->weight = weight.full;
89 else {
90 data->weight = weight.var1_dw;
91 data->ins_lat = weight.var2_w;
92 data->retire_lat = weight.var3_w;
93 }
94 }
95
arch_perf_synthesize_sample_weight(const struct perf_sample * data,__u64 * array,u64 type)96 void arch_perf_synthesize_sample_weight(const struct perf_sample *data,
97 __u64 *array, u64 type)
98 {
99 *array = data->weight;
100
101 if (type & PERF_SAMPLE_WEIGHT_STRUCT) {
102 *array &= 0xffffffff;
103 *array |= ((u64)data->ins_lat << 32);
104 *array |= ((u64)data->retire_lat << 48);
105 }
106 }
107
arch_perf_header_entry(const char * se_header)108 const char *arch_perf_header_entry(const char *se_header)
109 {
110 if (!strcmp(se_header, "Local Pipeline Stage Cycle"))
111 return "Local Retire Latency";
112 else if (!strcmp(se_header, "Pipeline Stage Cycle"))
113 return "Retire Latency";
114
115 return se_header;
116 }
117
arch_support_sort_key(const char * sort_key)118 int arch_support_sort_key(const char *sort_key)
119 {
120 if (!strcmp(sort_key, "p_stage_cyc"))
121 return 1;
122 if (!strcmp(sort_key, "local_p_stage_cyc"))
123 return 1;
124 return 0;
125 }
126