1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Performance event support - Processor Activity Instrumentation Facility
4 *
5 * Copyright IBM Corp. 2022
6 * Author(s): Thomas Richter <tmricht@linux.ibm.com>
7 */
8 #define KMSG_COMPONENT "pai_crypto"
9 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
10
11 #include <linux/kernel.h>
12 #include <linux/kernel_stat.h>
13 #include <linux/percpu.h>
14 #include <linux/notifier.h>
15 #include <linux/init.h>
16 #include <linux/export.h>
17 #include <linux/io.h>
18 #include <linux/perf_event.h>
19
20 #include <asm/ctl_reg.h>
21 #include <asm/pai.h>
22 #include <asm/debug.h>
23
24 static debug_info_t *cfm_dbg;
25 static unsigned int paicrypt_cnt; /* Size of the mapped counter sets */
26 /* extracted with QPACI instruction */
27
28 DEFINE_STATIC_KEY_FALSE(pai_key);
29
30 struct pai_userdata {
31 u16 num;
32 u64 value;
33 } __packed;
34
35 struct paicrypt_map {
36 unsigned long *page; /* Page for CPU to store counters */
37 struct pai_userdata *save; /* Page to store no-zero counters */
38 unsigned int active_events; /* # of PAI crypto users */
39 unsigned int refcnt; /* Reference count mapped buffers */
40 enum paievt_mode mode; /* Type of event */
41 struct perf_event *event; /* Perf event for sampling */
42 };
43
44 static DEFINE_PER_CPU(struct paicrypt_map, paicrypt_map);
45
46 /* Release the PMU if event is the last perf event */
47 static DEFINE_MUTEX(pai_reserve_mutex);
48
49 /* Adjust usage counters and remove allocated memory when all users are
50 * gone.
51 */
paicrypt_event_destroy(struct perf_event * event)52 static void paicrypt_event_destroy(struct perf_event *event)
53 {
54 struct paicrypt_map *cpump = per_cpu_ptr(&paicrypt_map, event->cpu);
55
56 cpump->event = NULL;
57 static_branch_dec(&pai_key);
58 mutex_lock(&pai_reserve_mutex);
59 debug_sprintf_event(cfm_dbg, 5, "%s event %#llx cpu %d users %d"
60 " mode %d refcnt %d\n", __func__,
61 event->attr.config, event->cpu,
62 cpump->active_events, cpump->mode, cpump->refcnt);
63 if (!--cpump->refcnt) {
64 debug_sprintf_event(cfm_dbg, 4, "%s page %#lx save %p\n",
65 __func__, (unsigned long)cpump->page,
66 cpump->save);
67 free_page((unsigned long)cpump->page);
68 cpump->page = NULL;
69 kvfree(cpump->save);
70 cpump->save = NULL;
71 cpump->mode = PAI_MODE_NONE;
72 }
73 mutex_unlock(&pai_reserve_mutex);
74 }
75
paicrypt_getctr(struct paicrypt_map * cpump,int nr,bool kernel)76 static u64 paicrypt_getctr(struct paicrypt_map *cpump, int nr, bool kernel)
77 {
78 if (kernel)
79 nr += PAI_CRYPTO_MAXCTR;
80 return cpump->page[nr];
81 }
82
83 /* Read the counter values. Return value from location in CMP. For event
84 * CRYPTO_ALL sum up all events.
85 */
paicrypt_getdata(struct perf_event * event,bool kernel)86 static u64 paicrypt_getdata(struct perf_event *event, bool kernel)
87 {
88 struct paicrypt_map *cpump = this_cpu_ptr(&paicrypt_map);
89 u64 sum = 0;
90 int i;
91
92 if (event->attr.config != PAI_CRYPTO_BASE) {
93 return paicrypt_getctr(cpump,
94 event->attr.config - PAI_CRYPTO_BASE,
95 kernel);
96 }
97
98 for (i = 1; i <= paicrypt_cnt; i++) {
99 u64 val = paicrypt_getctr(cpump, i, kernel);
100
101 if (!val)
102 continue;
103 sum += val;
104 }
105 return sum;
106 }
107
paicrypt_getall(struct perf_event * event)108 static u64 paicrypt_getall(struct perf_event *event)
109 {
110 u64 sum = 0;
111
112 if (!event->attr.exclude_kernel)
113 sum += paicrypt_getdata(event, true);
114 if (!event->attr.exclude_user)
115 sum += paicrypt_getdata(event, false);
116
117 return sum;
118 }
119
120 /* Used to avoid races in checking concurrent access of counting and
121 * sampling for crypto events
122 *
123 * Only one instance of event pai_crypto/CRYPTO_ALL/ for sampling is
124 * allowed and when this event is running, no counting event is allowed.
125 * Several counting events are allowed in parallel, but no sampling event
126 * is allowed while one (or more) counting events are running.
127 *
128 * This function is called in process context and it is save to block.
129 * When the event initialization functions fails, no other call back will
130 * be invoked.
131 *
132 * Allocate the memory for the event.
133 */
paicrypt_busy(struct perf_event_attr * a,struct paicrypt_map * cpump)134 static int paicrypt_busy(struct perf_event_attr *a, struct paicrypt_map *cpump)
135 {
136 int rc = 0;
137
138 mutex_lock(&pai_reserve_mutex);
139 if (a->sample_period) { /* Sampling requested */
140 if (cpump->mode != PAI_MODE_NONE)
141 rc = -EBUSY; /* ... sampling/counting active */
142 } else { /* Counting requested */
143 if (cpump->mode == PAI_MODE_SAMPLING)
144 rc = -EBUSY; /* ... and sampling active */
145 }
146 if (rc)
147 goto unlock;
148
149 /* Allocate memory for counter page and counter extraction.
150 * Only the first counting event has to allocate a page.
151 */
152 if (cpump->page)
153 goto unlock;
154
155 rc = -ENOMEM;
156 cpump->page = (unsigned long *)get_zeroed_page(GFP_KERNEL);
157 if (!cpump->page)
158 goto unlock;
159 cpump->save = kvmalloc_array(paicrypt_cnt + 1,
160 sizeof(struct pai_userdata), GFP_KERNEL);
161 if (!cpump->save) {
162 free_page((unsigned long)cpump->page);
163 cpump->page = NULL;
164 goto unlock;
165 }
166 rc = 0;
167
168 unlock:
169 /* If rc is non-zero, do not set mode and reference count */
170 if (!rc) {
171 cpump->refcnt++;
172 cpump->mode = a->sample_period ? PAI_MODE_SAMPLING
173 : PAI_MODE_COUNTING;
174 }
175 debug_sprintf_event(cfm_dbg, 5, "%s sample_period %#llx users %d"
176 " mode %d refcnt %d page %#lx save %p rc %d\n",
177 __func__, a->sample_period, cpump->active_events,
178 cpump->mode, cpump->refcnt,
179 (unsigned long)cpump->page, cpump->save, rc);
180 mutex_unlock(&pai_reserve_mutex);
181 return rc;
182 }
183
184 /* Might be called on different CPU than the one the event is intended for. */
paicrypt_event_init(struct perf_event * event)185 static int paicrypt_event_init(struct perf_event *event)
186 {
187 struct perf_event_attr *a = &event->attr;
188 struct paicrypt_map *cpump;
189 int rc;
190
191 /* PAI crypto PMU registered as PERF_TYPE_RAW, check event type */
192 if (a->type != PERF_TYPE_RAW && event->pmu->type != a->type)
193 return -ENOENT;
194 /* PAI crypto event must be in valid range */
195 if (a->config < PAI_CRYPTO_BASE ||
196 a->config > PAI_CRYPTO_BASE + paicrypt_cnt)
197 return -EINVAL;
198 /* Allow only CPU wide operation, no process context for now. */
199 if (event->hw.target || event->cpu == -1)
200 return -ENOENT;
201 /* Allow only CRYPTO_ALL for sampling. */
202 if (a->sample_period && a->config != PAI_CRYPTO_BASE)
203 return -EINVAL;
204
205 cpump = per_cpu_ptr(&paicrypt_map, event->cpu);
206 rc = paicrypt_busy(a, cpump);
207 if (rc)
208 return rc;
209
210 /* Event initialization sets last_tag to 0. When later on the events
211 * are deleted and re-added, do not reset the event count value to zero.
212 * Events are added, deleted and re-added when 2 or more events
213 * are active at the same time.
214 */
215 event->hw.last_tag = 0;
216 cpump->event = event;
217 event->destroy = paicrypt_event_destroy;
218
219 if (a->sample_period) {
220 a->sample_period = 1;
221 a->freq = 0;
222 /* Register for paicrypt_sched_task() to be called */
223 event->attach_state |= PERF_ATTACH_SCHED_CB;
224 /* Add raw data which contain the memory mapped counters */
225 a->sample_type |= PERF_SAMPLE_RAW;
226 /* Turn off inheritance */
227 a->inherit = 0;
228 }
229
230 static_branch_inc(&pai_key);
231 return 0;
232 }
233
paicrypt_read(struct perf_event * event)234 static void paicrypt_read(struct perf_event *event)
235 {
236 u64 prev, new, delta;
237
238 prev = local64_read(&event->hw.prev_count);
239 new = paicrypt_getall(event);
240 local64_set(&event->hw.prev_count, new);
241 delta = (prev <= new) ? new - prev
242 : (-1ULL - prev) + new + 1; /* overflow */
243 local64_add(delta, &event->count);
244 }
245
paicrypt_start(struct perf_event * event,int flags)246 static void paicrypt_start(struct perf_event *event, int flags)
247 {
248 u64 sum;
249
250 if (!event->hw.last_tag) {
251 event->hw.last_tag = 1;
252 sum = paicrypt_getall(event); /* Get current value */
253 local64_set(&event->count, 0);
254 local64_set(&event->hw.prev_count, sum);
255 }
256 }
257
paicrypt_add(struct perf_event * event,int flags)258 static int paicrypt_add(struct perf_event *event, int flags)
259 {
260 struct paicrypt_map *cpump = this_cpu_ptr(&paicrypt_map);
261 unsigned long ccd;
262
263 if (++cpump->active_events == 1) {
264 ccd = virt_to_phys(cpump->page) | PAI_CRYPTO_KERNEL_OFFSET;
265 WRITE_ONCE(S390_lowcore.ccd, ccd);
266 __ctl_set_bit(0, 50);
267 }
268 cpump->event = event;
269 if (flags & PERF_EF_START && !event->attr.sample_period) {
270 /* Only counting needs initial counter value */
271 paicrypt_start(event, PERF_EF_RELOAD);
272 }
273 event->hw.state = 0;
274 if (event->attr.sample_period)
275 perf_sched_cb_inc(event->pmu);
276 return 0;
277 }
278
paicrypt_stop(struct perf_event * event,int flags)279 static void paicrypt_stop(struct perf_event *event, int flags)
280 {
281 paicrypt_read(event);
282 event->hw.state = PERF_HES_STOPPED;
283 }
284
paicrypt_del(struct perf_event * event,int flags)285 static void paicrypt_del(struct perf_event *event, int flags)
286 {
287 struct paicrypt_map *cpump = this_cpu_ptr(&paicrypt_map);
288
289 if (event->attr.sample_period)
290 perf_sched_cb_dec(event->pmu);
291 if (!event->attr.sample_period)
292 /* Only counting needs to read counter */
293 paicrypt_stop(event, PERF_EF_UPDATE);
294 if (--cpump->active_events == 0) {
295 __ctl_clear_bit(0, 50);
296 WRITE_ONCE(S390_lowcore.ccd, 0);
297 }
298 }
299
300 /* Create raw data and save it in buffer. Returns number of bytes copied.
301 * Saves only positive counter entries of the form
302 * 2 bytes: Number of counter
303 * 8 bytes: Value of counter
304 */
paicrypt_copy(struct pai_userdata * userdata,struct paicrypt_map * cpump,bool exclude_user,bool exclude_kernel)305 static size_t paicrypt_copy(struct pai_userdata *userdata,
306 struct paicrypt_map *cpump,
307 bool exclude_user, bool exclude_kernel)
308 {
309 int i, outidx = 0;
310
311 for (i = 1; i <= paicrypt_cnt; i++) {
312 u64 val = 0;
313
314 if (!exclude_kernel)
315 val += paicrypt_getctr(cpump, i, true);
316 if (!exclude_user)
317 val += paicrypt_getctr(cpump, i, false);
318 if (val) {
319 userdata[outidx].num = i;
320 userdata[outidx].value = val;
321 outidx++;
322 }
323 }
324 return outidx * sizeof(struct pai_userdata);
325 }
326
paicrypt_push_sample(void)327 static int paicrypt_push_sample(void)
328 {
329 struct paicrypt_map *cpump = this_cpu_ptr(&paicrypt_map);
330 struct perf_event *event = cpump->event;
331 struct perf_sample_data data;
332 struct perf_raw_record raw;
333 struct pt_regs regs;
334 size_t rawsize;
335 int overflow;
336
337 if (!cpump->event) /* No event active */
338 return 0;
339 rawsize = paicrypt_copy(cpump->save, cpump,
340 cpump->event->attr.exclude_user,
341 cpump->event->attr.exclude_kernel);
342 if (!rawsize) /* No incremented counters */
343 return 0;
344
345 /* Setup perf sample */
346 memset(®s, 0, sizeof(regs));
347 memset(&raw, 0, sizeof(raw));
348 memset(&data, 0, sizeof(data));
349 perf_sample_data_init(&data, 0, event->hw.last_period);
350 if (event->attr.sample_type & PERF_SAMPLE_TID) {
351 data.tid_entry.pid = task_tgid_nr(current);
352 data.tid_entry.tid = task_pid_nr(current);
353 }
354 if (event->attr.sample_type & PERF_SAMPLE_TIME)
355 data.time = event->clock();
356 if (event->attr.sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER))
357 data.id = event->id;
358 if (event->attr.sample_type & PERF_SAMPLE_CPU) {
359 data.cpu_entry.cpu = smp_processor_id();
360 data.cpu_entry.reserved = 0;
361 }
362 if (event->attr.sample_type & PERF_SAMPLE_RAW) {
363 raw.frag.size = rawsize;
364 raw.frag.data = cpump->save;
365 perf_sample_save_raw_data(&data, &raw);
366 }
367
368 overflow = perf_event_overflow(event, &data, ®s);
369 perf_event_update_userpage(event);
370 /* Clear lowcore page after read */
371 memset(cpump->page, 0, PAGE_SIZE);
372 return overflow;
373 }
374
375 /* Called on schedule-in and schedule-out. No access to event structure,
376 * but for sampling only event CRYPTO_ALL is allowed.
377 */
paicrypt_sched_task(struct perf_event_pmu_context * pmu_ctx,bool sched_in)378 static void paicrypt_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in)
379 {
380 /* We started with a clean page on event installation. So read out
381 * results on schedule_out and if page was dirty, clear values.
382 */
383 if (!sched_in)
384 paicrypt_push_sample();
385 }
386
387 /* Attribute definitions for paicrypt interface. As with other CPU
388 * Measurement Facilities, there is one attribute per mapped counter.
389 * The number of mapped counters may vary per machine generation. Use
390 * the QUERY PROCESSOR ACTIVITY COUNTER INFORMATION (QPACI) instruction
391 * to determine the number of mapped counters. The instructions returns
392 * a positive number, which is the highest number of supported counters.
393 * All counters less than this number are also supported, there are no
394 * holes. A returned number of zero means no support for mapped counters.
395 *
396 * The identification of the counter is a unique number. The chosen range
397 * is 0x1000 + offset in mapped kernel page.
398 * All CPU Measurement Facility counters identifiers must be unique and
399 * the numbers from 0 to 496 are already used for the CPU Measurement
400 * Counter facility. Numbers 0xb0000, 0xbc000 and 0xbd000 are already
401 * used for the CPU Measurement Sampling facility.
402 */
403 PMU_FORMAT_ATTR(event, "config:0-63");
404
405 static struct attribute *paicrypt_format_attr[] = {
406 &format_attr_event.attr,
407 NULL,
408 };
409
410 static struct attribute_group paicrypt_events_group = {
411 .name = "events",
412 .attrs = NULL /* Filled in attr_event_init() */
413 };
414
415 static struct attribute_group paicrypt_format_group = {
416 .name = "format",
417 .attrs = paicrypt_format_attr,
418 };
419
420 static const struct attribute_group *paicrypt_attr_groups[] = {
421 &paicrypt_events_group,
422 &paicrypt_format_group,
423 NULL,
424 };
425
426 /* Performance monitoring unit for mapped counters */
427 static struct pmu paicrypt = {
428 .task_ctx_nr = perf_invalid_context,
429 .event_init = paicrypt_event_init,
430 .add = paicrypt_add,
431 .del = paicrypt_del,
432 .start = paicrypt_start,
433 .stop = paicrypt_stop,
434 .read = paicrypt_read,
435 .sched_task = paicrypt_sched_task,
436 .attr_groups = paicrypt_attr_groups
437 };
438
439 /* List of symbolic PAI counter names. */
440 static const char * const paicrypt_ctrnames[] = {
441 [0] = "CRYPTO_ALL",
442 [1] = "KM_DEA",
443 [2] = "KM_TDEA_128",
444 [3] = "KM_TDEA_192",
445 [4] = "KM_ENCRYPTED_DEA",
446 [5] = "KM_ENCRYPTED_TDEA_128",
447 [6] = "KM_ENCRYPTED_TDEA_192",
448 [7] = "KM_AES_128",
449 [8] = "KM_AES_192",
450 [9] = "KM_AES_256",
451 [10] = "KM_ENCRYPTED_AES_128",
452 [11] = "KM_ENCRYPTED_AES_192",
453 [12] = "KM_ENCRYPTED_AES_256",
454 [13] = "KM_XTS_AES_128",
455 [14] = "KM_XTS_AES_256",
456 [15] = "KM_XTS_ENCRYPTED_AES_128",
457 [16] = "KM_XTS_ENCRYPTED_AES_256",
458 [17] = "KMC_DEA",
459 [18] = "KMC_TDEA_128",
460 [19] = "KMC_TDEA_192",
461 [20] = "KMC_ENCRYPTED_DEA",
462 [21] = "KMC_ENCRYPTED_TDEA_128",
463 [22] = "KMC_ENCRYPTED_TDEA_192",
464 [23] = "KMC_AES_128",
465 [24] = "KMC_AES_192",
466 [25] = "KMC_AES_256",
467 [26] = "KMC_ENCRYPTED_AES_128",
468 [27] = "KMC_ENCRYPTED_AES_192",
469 [28] = "KMC_ENCRYPTED_AES_256",
470 [29] = "KMC_PRNG",
471 [30] = "KMA_GCM_AES_128",
472 [31] = "KMA_GCM_AES_192",
473 [32] = "KMA_GCM_AES_256",
474 [33] = "KMA_GCM_ENCRYPTED_AES_128",
475 [34] = "KMA_GCM_ENCRYPTED_AES_192",
476 [35] = "KMA_GCM_ENCRYPTED_AES_256",
477 [36] = "KMF_DEA",
478 [37] = "KMF_TDEA_128",
479 [38] = "KMF_TDEA_192",
480 [39] = "KMF_ENCRYPTED_DEA",
481 [40] = "KMF_ENCRYPTED_TDEA_128",
482 [41] = "KMF_ENCRYPTED_TDEA_192",
483 [42] = "KMF_AES_128",
484 [43] = "KMF_AES_192",
485 [44] = "KMF_AES_256",
486 [45] = "KMF_ENCRYPTED_AES_128",
487 [46] = "KMF_ENCRYPTED_AES_192",
488 [47] = "KMF_ENCRYPTED_AES_256",
489 [48] = "KMCTR_DEA",
490 [49] = "KMCTR_TDEA_128",
491 [50] = "KMCTR_TDEA_192",
492 [51] = "KMCTR_ENCRYPTED_DEA",
493 [52] = "KMCTR_ENCRYPTED_TDEA_128",
494 [53] = "KMCTR_ENCRYPTED_TDEA_192",
495 [54] = "KMCTR_AES_128",
496 [55] = "KMCTR_AES_192",
497 [56] = "KMCTR_AES_256",
498 [57] = "KMCTR_ENCRYPTED_AES_128",
499 [58] = "KMCTR_ENCRYPTED_AES_192",
500 [59] = "KMCTR_ENCRYPTED_AES_256",
501 [60] = "KMO_DEA",
502 [61] = "KMO_TDEA_128",
503 [62] = "KMO_TDEA_192",
504 [63] = "KMO_ENCRYPTED_DEA",
505 [64] = "KMO_ENCRYPTED_TDEA_128",
506 [65] = "KMO_ENCRYPTED_TDEA_192",
507 [66] = "KMO_AES_128",
508 [67] = "KMO_AES_192",
509 [68] = "KMO_AES_256",
510 [69] = "KMO_ENCRYPTED_AES_128",
511 [70] = "KMO_ENCRYPTED_AES_192",
512 [71] = "KMO_ENCRYPTED_AES_256",
513 [72] = "KIMD_SHA_1",
514 [73] = "KIMD_SHA_256",
515 [74] = "KIMD_SHA_512",
516 [75] = "KIMD_SHA3_224",
517 [76] = "KIMD_SHA3_256",
518 [77] = "KIMD_SHA3_384",
519 [78] = "KIMD_SHA3_512",
520 [79] = "KIMD_SHAKE_128",
521 [80] = "KIMD_SHAKE_256",
522 [81] = "KIMD_GHASH",
523 [82] = "KLMD_SHA_1",
524 [83] = "KLMD_SHA_256",
525 [84] = "KLMD_SHA_512",
526 [85] = "KLMD_SHA3_224",
527 [86] = "KLMD_SHA3_256",
528 [87] = "KLMD_SHA3_384",
529 [88] = "KLMD_SHA3_512",
530 [89] = "KLMD_SHAKE_128",
531 [90] = "KLMD_SHAKE_256",
532 [91] = "KMAC_DEA",
533 [92] = "KMAC_TDEA_128",
534 [93] = "KMAC_TDEA_192",
535 [94] = "KMAC_ENCRYPTED_DEA",
536 [95] = "KMAC_ENCRYPTED_TDEA_128",
537 [96] = "KMAC_ENCRYPTED_TDEA_192",
538 [97] = "KMAC_AES_128",
539 [98] = "KMAC_AES_192",
540 [99] = "KMAC_AES_256",
541 [100] = "KMAC_ENCRYPTED_AES_128",
542 [101] = "KMAC_ENCRYPTED_AES_192",
543 [102] = "KMAC_ENCRYPTED_AES_256",
544 [103] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_DEA",
545 [104] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_TDEA_128",
546 [105] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_TDEA_192",
547 [106] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_DEA",
548 [107] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_TDEA_128",
549 [108] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_TDEA_192",
550 [109] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_128",
551 [110] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_192",
552 [111] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_256",
553 [112] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_128",
554 [113] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_192",
555 [114] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_256A",
556 [115] = "PCC_COMPUTE_XTS_PARAMETER_USING_AES_128",
557 [116] = "PCC_COMPUTE_XTS_PARAMETER_USING_AES_256",
558 [117] = "PCC_COMPUTE_XTS_PARAMETER_USING_ENCRYPTED_AES_128",
559 [118] = "PCC_COMPUTE_XTS_PARAMETER_USING_ENCRYPTED_AES_256",
560 [119] = "PCC_SCALAR_MULTIPLY_P256",
561 [120] = "PCC_SCALAR_MULTIPLY_P384",
562 [121] = "PCC_SCALAR_MULTIPLY_P521",
563 [122] = "PCC_SCALAR_MULTIPLY_ED25519",
564 [123] = "PCC_SCALAR_MULTIPLY_ED448",
565 [124] = "PCC_SCALAR_MULTIPLY_X25519",
566 [125] = "PCC_SCALAR_MULTIPLY_X448",
567 [126] = "PRNO_SHA_512_DRNG",
568 [127] = "PRNO_TRNG_QUERY_RAW_TO_CONDITIONED_RATIO",
569 [128] = "PRNO_TRNG",
570 [129] = "KDSA_ECDSA_VERIFY_P256",
571 [130] = "KDSA_ECDSA_VERIFY_P384",
572 [131] = "KDSA_ECDSA_VERIFY_P521",
573 [132] = "KDSA_ECDSA_SIGN_P256",
574 [133] = "KDSA_ECDSA_SIGN_P384",
575 [134] = "KDSA_ECDSA_SIGN_P521",
576 [135] = "KDSA_ENCRYPTED_ECDSA_SIGN_P256",
577 [136] = "KDSA_ENCRYPTED_ECDSA_SIGN_P384",
578 [137] = "KDSA_ENCRYPTED_ECDSA_SIGN_P521",
579 [138] = "KDSA_EDDSA_VERIFY_ED25519",
580 [139] = "KDSA_EDDSA_VERIFY_ED448",
581 [140] = "KDSA_EDDSA_SIGN_ED25519",
582 [141] = "KDSA_EDDSA_SIGN_ED448",
583 [142] = "KDSA_ENCRYPTED_EDDSA_SIGN_ED25519",
584 [143] = "KDSA_ENCRYPTED_EDDSA_SIGN_ED448",
585 [144] = "PCKMO_ENCRYPT_DEA_KEY",
586 [145] = "PCKMO_ENCRYPT_TDEA_128_KEY",
587 [146] = "PCKMO_ENCRYPT_TDEA_192_KEY",
588 [147] = "PCKMO_ENCRYPT_AES_128_KEY",
589 [148] = "PCKMO_ENCRYPT_AES_192_KEY",
590 [149] = "PCKMO_ENCRYPT_AES_256_KEY",
591 [150] = "PCKMO_ENCRYPT_ECC_P256_KEY",
592 [151] = "PCKMO_ENCRYPT_ECC_P384_KEY",
593 [152] = "PCKMO_ENCRYPT_ECC_P521_KEY",
594 [153] = "PCKMO_ENCRYPT_ECC_ED25519_KEY",
595 [154] = "PCKMO_ENCRYPT_ECC_ED448_KEY",
596 [155] = "IBM_RESERVED_155",
597 [156] = "IBM_RESERVED_156",
598 };
599
attr_event_free(struct attribute ** attrs,int num)600 static void __init attr_event_free(struct attribute **attrs, int num)
601 {
602 struct perf_pmu_events_attr *pa;
603 int i;
604
605 for (i = 0; i < num; i++) {
606 struct device_attribute *dap;
607
608 dap = container_of(attrs[i], struct device_attribute, attr);
609 pa = container_of(dap, struct perf_pmu_events_attr, attr);
610 kfree(pa);
611 }
612 kfree(attrs);
613 }
614
attr_event_init_one(struct attribute ** attrs,int num)615 static int __init attr_event_init_one(struct attribute **attrs, int num)
616 {
617 struct perf_pmu_events_attr *pa;
618
619 pa = kzalloc(sizeof(*pa), GFP_KERNEL);
620 if (!pa)
621 return -ENOMEM;
622
623 sysfs_attr_init(&pa->attr.attr);
624 pa->id = PAI_CRYPTO_BASE + num;
625 pa->attr.attr.name = paicrypt_ctrnames[num];
626 pa->attr.attr.mode = 0444;
627 pa->attr.show = cpumf_events_sysfs_show;
628 pa->attr.store = NULL;
629 attrs[num] = &pa->attr.attr;
630 return 0;
631 }
632
633 /* Create PMU sysfs event attributes on the fly. */
attr_event_init(void)634 static int __init attr_event_init(void)
635 {
636 struct attribute **attrs;
637 int ret, i;
638
639 attrs = kmalloc_array(ARRAY_SIZE(paicrypt_ctrnames) + 1, sizeof(*attrs),
640 GFP_KERNEL);
641 if (!attrs)
642 return -ENOMEM;
643 for (i = 0; i < ARRAY_SIZE(paicrypt_ctrnames); i++) {
644 ret = attr_event_init_one(attrs, i);
645 if (ret) {
646 attr_event_free(attrs, i - 1);
647 return ret;
648 }
649 }
650 attrs[i] = NULL;
651 paicrypt_events_group.attrs = attrs;
652 return 0;
653 }
654
paicrypt_init(void)655 static int __init paicrypt_init(void)
656 {
657 struct qpaci_info_block ib;
658 int rc;
659
660 if (!test_facility(196))
661 return 0;
662
663 qpaci(&ib);
664 paicrypt_cnt = ib.num_cc;
665 if (paicrypt_cnt == 0)
666 return 0;
667 if (paicrypt_cnt >= PAI_CRYPTO_MAXCTR)
668 paicrypt_cnt = PAI_CRYPTO_MAXCTR - 1;
669
670 rc = attr_event_init(); /* Export known PAI crypto events */
671 if (rc) {
672 pr_err("Creation of PMU pai_crypto /sysfs failed\n");
673 return rc;
674 }
675
676 /* Setup s390dbf facility */
677 cfm_dbg = debug_register(KMSG_COMPONENT, 2, 256, 128);
678 if (!cfm_dbg) {
679 pr_err("Registration of s390dbf pai_crypto failed\n");
680 return -ENOMEM;
681 }
682 debug_register_view(cfm_dbg, &debug_sprintf_view);
683
684 rc = perf_pmu_register(&paicrypt, "pai_crypto", -1);
685 if (rc) {
686 pr_err("Registering the pai_crypto PMU failed with rc=%i\n",
687 rc);
688 debug_unregister_view(cfm_dbg, &debug_sprintf_view);
689 debug_unregister(cfm_dbg);
690 return rc;
691 }
692 return 0;
693 }
694
695 device_initcall(paicrypt_init);
696