1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Author: Hanlu Li <lihanlu@loongson.cn>
4 * Huacai Chen <chenhuacai@loongson.cn>
5 *
6 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
7 *
8 * Derived from MIPS:
9 * Copyright (C) 1992 Ross Biro
10 * Copyright (C) Linus Torvalds
11 * Copyright (C) 1994, 95, 96, 97, 98, 2000 Ralf Baechle
12 * Copyright (C) 1996 David S. Miller
13 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
14 * Copyright (C) 1999 MIPS Technologies, Inc.
15 * Copyright (C) 2000 Ulf Carlsson
16 */
17 #include <linux/kernel.h>
18 #include <linux/audit.h>
19 #include <linux/compiler.h>
20 #include <linux/context_tracking.h>
21 #include <linux/elf.h>
22 #include <linux/errno.h>
23 #include <linux/hw_breakpoint.h>
24 #include <linux/mm.h>
25 #include <linux/nospec.h>
26 #include <linux/ptrace.h>
27 #include <linux/regset.h>
28 #include <linux/sched.h>
29 #include <linux/sched/task_stack.h>
30 #include <linux/security.h>
31 #include <linux/smp.h>
32 #include <linux/stddef.h>
33 #include <linux/seccomp.h>
34 #include <linux/thread_info.h>
35 #include <linux/uaccess.h>
36
37 #include <asm/byteorder.h>
38 #include <asm/cpu.h>
39 #include <asm/cpu-info.h>
40 #include <asm/fpu.h>
41 #include <asm/loongarch.h>
42 #include <asm/page.h>
43 #include <asm/pgtable.h>
44 #include <asm/processor.h>
45 #include <asm/ptrace.h>
46 #include <asm/reg.h>
47 #include <asm/syscall.h>
48
init_fp_ctx(struct task_struct * target)49 static void init_fp_ctx(struct task_struct *target)
50 {
51 /* The target already has context */
52 if (tsk_used_math(target))
53 return;
54
55 /* Begin with data registers set to all 1s... */
56 memset(&target->thread.fpu.fpr, ~0, sizeof(target->thread.fpu.fpr));
57 set_stopped_child_used_math(target);
58 }
59
60 /*
61 * Called by kernel/ptrace.c when detaching..
62 *
63 * Make sure single step bits etc are not set.
64 */
ptrace_disable(struct task_struct * child)65 void ptrace_disable(struct task_struct *child)
66 {
67 /* Don't load the watchpoint registers for the ex-child. */
68 clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
69 clear_tsk_thread_flag(child, TIF_SINGLESTEP);
70 }
71
72 /* regset get/set implementations */
73
gpr_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)74 static int gpr_get(struct task_struct *target,
75 const struct user_regset *regset,
76 struct membuf to)
77 {
78 int r;
79 struct pt_regs *regs = task_pt_regs(target);
80
81 r = membuf_write(&to, ®s->regs, sizeof(u64) * GPR_NUM);
82 r = membuf_write(&to, ®s->orig_a0, sizeof(u64));
83 r = membuf_write(&to, ®s->csr_era, sizeof(u64));
84 r = membuf_write(&to, ®s->csr_badvaddr, sizeof(u64));
85
86 return r;
87 }
88
gpr_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)89 static int gpr_set(struct task_struct *target,
90 const struct user_regset *regset,
91 unsigned int pos, unsigned int count,
92 const void *kbuf, const void __user *ubuf)
93 {
94 int err;
95 int a0_start = sizeof(u64) * GPR_NUM;
96 int era_start = a0_start + sizeof(u64);
97 int badvaddr_start = era_start + sizeof(u64);
98 struct pt_regs *regs = task_pt_regs(target);
99
100 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
101 ®s->regs,
102 0, a0_start);
103 err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
104 ®s->orig_a0,
105 a0_start, a0_start + sizeof(u64));
106 err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
107 ®s->csr_era,
108 era_start, era_start + sizeof(u64));
109 err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
110 ®s->csr_badvaddr,
111 badvaddr_start, badvaddr_start + sizeof(u64));
112
113 return err;
114 }
115
116
117 /*
118 * Get the general floating-point registers.
119 */
gfpr_get(struct task_struct * target,struct membuf * to)120 static int gfpr_get(struct task_struct *target, struct membuf *to)
121 {
122 return membuf_write(to, &target->thread.fpu.fpr,
123 sizeof(elf_fpreg_t) * NUM_FPU_REGS);
124 }
125
gfpr_get_simd(struct task_struct * target,struct membuf * to)126 static int gfpr_get_simd(struct task_struct *target, struct membuf *to)
127 {
128 int i, r;
129 u64 fpr_val;
130
131 BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
132 for (i = 0; i < NUM_FPU_REGS; i++) {
133 fpr_val = get_fpr64(&target->thread.fpu.fpr[i], 0);
134 r = membuf_write(to, &fpr_val, sizeof(elf_fpreg_t));
135 }
136
137 return r;
138 }
139
140 /*
141 * Choose the appropriate helper for general registers, and then copy
142 * the FCC and FCSR registers separately.
143 */
fpr_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)144 static int fpr_get(struct task_struct *target,
145 const struct user_regset *regset,
146 struct membuf to)
147 {
148 int r;
149
150 if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
151 r = gfpr_get(target, &to);
152 else
153 r = gfpr_get_simd(target, &to);
154
155 r = membuf_write(&to, &target->thread.fpu.fcc, sizeof(target->thread.fpu.fcc));
156 r = membuf_write(&to, &target->thread.fpu.fcsr, sizeof(target->thread.fpu.fcsr));
157
158 return r;
159 }
160
gfpr_set(struct task_struct * target,unsigned int * pos,unsigned int * count,const void ** kbuf,const void __user ** ubuf)161 static int gfpr_set(struct task_struct *target,
162 unsigned int *pos, unsigned int *count,
163 const void **kbuf, const void __user **ubuf)
164 {
165 return user_regset_copyin(pos, count, kbuf, ubuf,
166 &target->thread.fpu.fpr,
167 0, NUM_FPU_REGS * sizeof(elf_fpreg_t));
168 }
169
gfpr_set_simd(struct task_struct * target,unsigned int * pos,unsigned int * count,const void ** kbuf,const void __user ** ubuf)170 static int gfpr_set_simd(struct task_struct *target,
171 unsigned int *pos, unsigned int *count,
172 const void **kbuf, const void __user **ubuf)
173 {
174 int i, err;
175 u64 fpr_val;
176
177 BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
178 for (i = 0; i < NUM_FPU_REGS && *count > 0; i++) {
179 err = user_regset_copyin(pos, count, kbuf, ubuf,
180 &fpr_val, i * sizeof(elf_fpreg_t),
181 (i + 1) * sizeof(elf_fpreg_t));
182 if (err)
183 return err;
184 set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val);
185 }
186
187 return 0;
188 }
189
190 /*
191 * Choose the appropriate helper for general registers, and then copy
192 * the FCC register separately.
193 */
fpr_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)194 static int fpr_set(struct task_struct *target,
195 const struct user_regset *regset,
196 unsigned int pos, unsigned int count,
197 const void *kbuf, const void __user *ubuf)
198 {
199 const int fcc_start = NUM_FPU_REGS * sizeof(elf_fpreg_t);
200 const int fcsr_start = fcc_start + sizeof(u64);
201 int err;
202
203 BUG_ON(count % sizeof(elf_fpreg_t));
204 if (pos + count > sizeof(elf_fpregset_t))
205 return -EIO;
206
207 init_fp_ctx(target);
208
209 if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
210 err = gfpr_set(target, &pos, &count, &kbuf, &ubuf);
211 else
212 err = gfpr_set_simd(target, &pos, &count, &kbuf, &ubuf);
213 if (err)
214 return err;
215
216 err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
217 &target->thread.fpu.fcc, fcc_start,
218 fcc_start + sizeof(u64));
219 err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
220 &target->thread.fpu.fcsr, fcsr_start,
221 fcsr_start + sizeof(u32));
222
223 return err;
224 }
225
cfg_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)226 static int cfg_get(struct task_struct *target,
227 const struct user_regset *regset,
228 struct membuf to)
229 {
230 int i, r;
231 u32 cfg_val;
232
233 i = 0;
234 while (to.left > 0) {
235 cfg_val = read_cpucfg(i++);
236 r = membuf_write(&to, &cfg_val, sizeof(u32));
237 }
238
239 return r;
240 }
241
242 /*
243 * CFG registers are read-only.
244 */
cfg_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)245 static int cfg_set(struct task_struct *target,
246 const struct user_regset *regset,
247 unsigned int pos, unsigned int count,
248 const void *kbuf, const void __user *ubuf)
249 {
250 return 0;
251 }
252
253 #ifdef CONFIG_HAVE_HW_BREAKPOINT
254
255 /*
256 * Handle hitting a HW-breakpoint.
257 */
ptrace_hbptriggered(struct perf_event * bp,struct perf_sample_data * data,struct pt_regs * regs)258 static void ptrace_hbptriggered(struct perf_event *bp,
259 struct perf_sample_data *data,
260 struct pt_regs *regs)
261 {
262 int i;
263 struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
264
265 for (i = 0; i < LOONGARCH_MAX_BRP; ++i)
266 if (current->thread.hbp_break[i] == bp)
267 break;
268
269 for (i = 0; i < LOONGARCH_MAX_WRP; ++i)
270 if (current->thread.hbp_watch[i] == bp)
271 break;
272
273 force_sig_ptrace_errno_trap(i, (void __user *)bkpt->address);
274 }
275
ptrace_hbp_get_event(unsigned int note_type,struct task_struct * tsk,unsigned long idx)276 static struct perf_event *ptrace_hbp_get_event(unsigned int note_type,
277 struct task_struct *tsk,
278 unsigned long idx)
279 {
280 struct perf_event *bp;
281
282 switch (note_type) {
283 case NT_LOONGARCH_HW_BREAK:
284 if (idx >= LOONGARCH_MAX_BRP)
285 return ERR_PTR(-EINVAL);
286 idx = array_index_nospec(idx, LOONGARCH_MAX_BRP);
287 bp = tsk->thread.hbp_break[idx];
288 break;
289 case NT_LOONGARCH_HW_WATCH:
290 if (idx >= LOONGARCH_MAX_WRP)
291 return ERR_PTR(-EINVAL);
292 idx = array_index_nospec(idx, LOONGARCH_MAX_WRP);
293 bp = tsk->thread.hbp_watch[idx];
294 break;
295 }
296
297 return bp;
298 }
299
ptrace_hbp_set_event(unsigned int note_type,struct task_struct * tsk,unsigned long idx,struct perf_event * bp)300 static int ptrace_hbp_set_event(unsigned int note_type,
301 struct task_struct *tsk,
302 unsigned long idx,
303 struct perf_event *bp)
304 {
305 switch (note_type) {
306 case NT_LOONGARCH_HW_BREAK:
307 if (idx >= LOONGARCH_MAX_BRP)
308 return -EINVAL;
309 idx = array_index_nospec(idx, LOONGARCH_MAX_BRP);
310 tsk->thread.hbp_break[idx] = bp;
311 break;
312 case NT_LOONGARCH_HW_WATCH:
313 if (idx >= LOONGARCH_MAX_WRP)
314 return -EINVAL;
315 idx = array_index_nospec(idx, LOONGARCH_MAX_WRP);
316 tsk->thread.hbp_watch[idx] = bp;
317 break;
318 }
319
320 return 0;
321 }
322
ptrace_hbp_create(unsigned int note_type,struct task_struct * tsk,unsigned long idx)323 static struct perf_event *ptrace_hbp_create(unsigned int note_type,
324 struct task_struct *tsk,
325 unsigned long idx)
326 {
327 int err, type;
328 struct perf_event *bp;
329 struct perf_event_attr attr;
330
331 switch (note_type) {
332 case NT_LOONGARCH_HW_BREAK:
333 type = HW_BREAKPOINT_X;
334 break;
335 case NT_LOONGARCH_HW_WATCH:
336 type = HW_BREAKPOINT_RW;
337 break;
338 default:
339 return ERR_PTR(-EINVAL);
340 }
341
342 ptrace_breakpoint_init(&attr);
343
344 /*
345 * Initialise fields to sane defaults
346 * (i.e. values that will pass validation).
347 */
348 attr.bp_addr = 0;
349 attr.bp_len = HW_BREAKPOINT_LEN_4;
350 attr.bp_type = type;
351 attr.disabled = 1;
352
353 bp = register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, tsk);
354 if (IS_ERR(bp))
355 return bp;
356
357 err = ptrace_hbp_set_event(note_type, tsk, idx, bp);
358 if (err)
359 return ERR_PTR(err);
360
361 return bp;
362 }
363
ptrace_hbp_fill_attr_ctrl(unsigned int note_type,struct arch_hw_breakpoint_ctrl ctrl,struct perf_event_attr * attr)364 static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
365 struct arch_hw_breakpoint_ctrl ctrl,
366 struct perf_event_attr *attr)
367 {
368 int err, len, type, offset;
369
370 err = arch_bp_generic_fields(ctrl, &len, &type, &offset);
371 if (err)
372 return err;
373
374 switch (note_type) {
375 case NT_LOONGARCH_HW_BREAK:
376 if ((type & HW_BREAKPOINT_X) != type)
377 return -EINVAL;
378 break;
379 case NT_LOONGARCH_HW_WATCH:
380 if ((type & HW_BREAKPOINT_RW) != type)
381 return -EINVAL;
382 break;
383 default:
384 return -EINVAL;
385 }
386
387 attr->bp_len = len;
388 attr->bp_type = type;
389 attr->bp_addr += offset;
390
391 return 0;
392 }
393
ptrace_hbp_get_resource_info(unsigned int note_type,u16 * info)394 static int ptrace_hbp_get_resource_info(unsigned int note_type, u16 *info)
395 {
396 u8 num;
397 u16 reg = 0;
398
399 switch (note_type) {
400 case NT_LOONGARCH_HW_BREAK:
401 num = hw_breakpoint_slots(TYPE_INST);
402 break;
403 case NT_LOONGARCH_HW_WATCH:
404 num = hw_breakpoint_slots(TYPE_DATA);
405 break;
406 default:
407 return -EINVAL;
408 }
409
410 *info = reg | num;
411
412 return 0;
413 }
414
ptrace_hbp_get_initialised_bp(unsigned int note_type,struct task_struct * tsk,unsigned long idx)415 static struct perf_event *ptrace_hbp_get_initialised_bp(unsigned int note_type,
416 struct task_struct *tsk,
417 unsigned long idx)
418 {
419 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
420
421 if (!bp)
422 bp = ptrace_hbp_create(note_type, tsk, idx);
423
424 return bp;
425 }
426
ptrace_hbp_get_ctrl(unsigned int note_type,struct task_struct * tsk,unsigned long idx,u32 * ctrl)427 static int ptrace_hbp_get_ctrl(unsigned int note_type,
428 struct task_struct *tsk,
429 unsigned long idx, u32 *ctrl)
430 {
431 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
432
433 if (IS_ERR(bp))
434 return PTR_ERR(bp);
435
436 *ctrl = bp ? encode_ctrl_reg(counter_arch_bp(bp)->ctrl) : 0;
437
438 return 0;
439 }
440
ptrace_hbp_get_mask(unsigned int note_type,struct task_struct * tsk,unsigned long idx,u64 * mask)441 static int ptrace_hbp_get_mask(unsigned int note_type,
442 struct task_struct *tsk,
443 unsigned long idx, u64 *mask)
444 {
445 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
446
447 if (IS_ERR(bp))
448 return PTR_ERR(bp);
449
450 *mask = bp ? counter_arch_bp(bp)->mask : 0;
451
452 return 0;
453 }
454
ptrace_hbp_get_addr(unsigned int note_type,struct task_struct * tsk,unsigned long idx,u64 * addr)455 static int ptrace_hbp_get_addr(unsigned int note_type,
456 struct task_struct *tsk,
457 unsigned long idx, u64 *addr)
458 {
459 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
460
461 if (IS_ERR(bp))
462 return PTR_ERR(bp);
463
464 *addr = bp ? counter_arch_bp(bp)->address : 0;
465
466 return 0;
467 }
468
ptrace_hbp_set_ctrl(unsigned int note_type,struct task_struct * tsk,unsigned long idx,u32 uctrl)469 static int ptrace_hbp_set_ctrl(unsigned int note_type,
470 struct task_struct *tsk,
471 unsigned long idx, u32 uctrl)
472 {
473 int err;
474 struct perf_event *bp;
475 struct perf_event_attr attr;
476 struct arch_hw_breakpoint_ctrl ctrl;
477
478 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
479 if (IS_ERR(bp))
480 return PTR_ERR(bp);
481
482 attr = bp->attr;
483 decode_ctrl_reg(uctrl, &ctrl);
484 err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr);
485 if (err)
486 return err;
487
488 return modify_user_hw_breakpoint(bp, &attr);
489 }
490
ptrace_hbp_set_mask(unsigned int note_type,struct task_struct * tsk,unsigned long idx,u64 mask)491 static int ptrace_hbp_set_mask(unsigned int note_type,
492 struct task_struct *tsk,
493 unsigned long idx, u64 mask)
494 {
495 struct perf_event *bp;
496 struct perf_event_attr attr;
497 struct arch_hw_breakpoint *info;
498
499 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
500 if (IS_ERR(bp))
501 return PTR_ERR(bp);
502
503 attr = bp->attr;
504 info = counter_arch_bp(bp);
505 info->mask = mask;
506
507 return modify_user_hw_breakpoint(bp, &attr);
508 }
509
ptrace_hbp_set_addr(unsigned int note_type,struct task_struct * tsk,unsigned long idx,u64 addr)510 static int ptrace_hbp_set_addr(unsigned int note_type,
511 struct task_struct *tsk,
512 unsigned long idx, u64 addr)
513 {
514 struct perf_event *bp;
515 struct perf_event_attr attr;
516
517 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
518 if (IS_ERR(bp))
519 return PTR_ERR(bp);
520
521 attr = bp->attr;
522 attr.bp_addr = addr;
523
524 return modify_user_hw_breakpoint(bp, &attr);
525 }
526
527 #define PTRACE_HBP_CTRL_SZ sizeof(u32)
528 #define PTRACE_HBP_ADDR_SZ sizeof(u64)
529 #define PTRACE_HBP_MASK_SZ sizeof(u64)
530
hw_break_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)531 static int hw_break_get(struct task_struct *target,
532 const struct user_regset *regset,
533 struct membuf to)
534 {
535 u16 info;
536 u32 ctrl;
537 u64 addr, mask;
538 int ret, idx = 0;
539 unsigned int note_type = regset->core_note_type;
540
541 /* Resource info */
542 ret = ptrace_hbp_get_resource_info(note_type, &info);
543 if (ret)
544 return ret;
545
546 membuf_write(&to, &info, sizeof(info));
547
548 /* (address, ctrl) registers */
549 while (to.left) {
550 ret = ptrace_hbp_get_addr(note_type, target, idx, &addr);
551 if (ret)
552 return ret;
553
554 ret = ptrace_hbp_get_mask(note_type, target, idx, &mask);
555 if (ret)
556 return ret;
557
558 ret = ptrace_hbp_get_ctrl(note_type, target, idx, &ctrl);
559 if (ret)
560 return ret;
561
562 membuf_store(&to, addr);
563 membuf_store(&to, mask);
564 membuf_store(&to, ctrl);
565 idx++;
566 }
567
568 return 0;
569 }
570
hw_break_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)571 static int hw_break_set(struct task_struct *target,
572 const struct user_regset *regset,
573 unsigned int pos, unsigned int count,
574 const void *kbuf, const void __user *ubuf)
575 {
576 u32 ctrl;
577 u64 addr, mask;
578 int ret, idx = 0, offset, limit;
579 unsigned int note_type = regset->core_note_type;
580
581 /* Resource info */
582 offset = offsetof(struct user_watch_state, dbg_regs);
583 user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offset);
584
585 /* (address, ctrl) registers */
586 limit = regset->n * regset->size;
587 while (count && offset < limit) {
588 if (count < PTRACE_HBP_ADDR_SZ)
589 return -EINVAL;
590
591 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr,
592 offset, offset + PTRACE_HBP_ADDR_SZ);
593 if (ret)
594 return ret;
595
596 ret = ptrace_hbp_set_addr(note_type, target, idx, addr);
597 if (ret)
598 return ret;
599 offset += PTRACE_HBP_ADDR_SZ;
600
601 if (!count)
602 break;
603
604 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &mask,
605 offset, offset + PTRACE_HBP_ADDR_SZ);
606 if (ret)
607 return ret;
608
609 ret = ptrace_hbp_set_mask(note_type, target, idx, mask);
610 if (ret)
611 return ret;
612 offset += PTRACE_HBP_MASK_SZ;
613
614 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &mask,
615 offset, offset + PTRACE_HBP_MASK_SZ);
616 if (ret)
617 return ret;
618
619 ret = ptrace_hbp_set_ctrl(note_type, target, idx, ctrl);
620 if (ret)
621 return ret;
622 offset += PTRACE_HBP_CTRL_SZ;
623 idx++;
624 }
625
626 return 0;
627 }
628
629 #endif
630
631 struct pt_regs_offset {
632 const char *name;
633 int offset;
634 };
635
636 #define REG_OFFSET_NAME(n, r) {.name = #n, .offset = offsetof(struct pt_regs, r)}
637 #define REG_OFFSET_END {.name = NULL, .offset = 0}
638
639 static const struct pt_regs_offset regoffset_table[] = {
640 REG_OFFSET_NAME(r0, regs[0]),
641 REG_OFFSET_NAME(r1, regs[1]),
642 REG_OFFSET_NAME(r2, regs[2]),
643 REG_OFFSET_NAME(r3, regs[3]),
644 REG_OFFSET_NAME(r4, regs[4]),
645 REG_OFFSET_NAME(r5, regs[5]),
646 REG_OFFSET_NAME(r6, regs[6]),
647 REG_OFFSET_NAME(r7, regs[7]),
648 REG_OFFSET_NAME(r8, regs[8]),
649 REG_OFFSET_NAME(r9, regs[9]),
650 REG_OFFSET_NAME(r10, regs[10]),
651 REG_OFFSET_NAME(r11, regs[11]),
652 REG_OFFSET_NAME(r12, regs[12]),
653 REG_OFFSET_NAME(r13, regs[13]),
654 REG_OFFSET_NAME(r14, regs[14]),
655 REG_OFFSET_NAME(r15, regs[15]),
656 REG_OFFSET_NAME(r16, regs[16]),
657 REG_OFFSET_NAME(r17, regs[17]),
658 REG_OFFSET_NAME(r18, regs[18]),
659 REG_OFFSET_NAME(r19, regs[19]),
660 REG_OFFSET_NAME(r20, regs[20]),
661 REG_OFFSET_NAME(r21, regs[21]),
662 REG_OFFSET_NAME(r22, regs[22]),
663 REG_OFFSET_NAME(r23, regs[23]),
664 REG_OFFSET_NAME(r24, regs[24]),
665 REG_OFFSET_NAME(r25, regs[25]),
666 REG_OFFSET_NAME(r26, regs[26]),
667 REG_OFFSET_NAME(r27, regs[27]),
668 REG_OFFSET_NAME(r28, regs[28]),
669 REG_OFFSET_NAME(r29, regs[29]),
670 REG_OFFSET_NAME(r30, regs[30]),
671 REG_OFFSET_NAME(r31, regs[31]),
672 REG_OFFSET_NAME(orig_a0, orig_a0),
673 REG_OFFSET_NAME(csr_era, csr_era),
674 REG_OFFSET_NAME(csr_badvaddr, csr_badvaddr),
675 REG_OFFSET_NAME(csr_crmd, csr_crmd),
676 REG_OFFSET_NAME(csr_prmd, csr_prmd),
677 REG_OFFSET_NAME(csr_euen, csr_euen),
678 REG_OFFSET_NAME(csr_ecfg, csr_ecfg),
679 REG_OFFSET_NAME(csr_estat, csr_estat),
680 REG_OFFSET_END,
681 };
682
683 /**
684 * regs_query_register_offset() - query register offset from its name
685 * @name: the name of a register
686 *
687 * regs_query_register_offset() returns the offset of a register in struct
688 * pt_regs from its name. If the name is invalid, this returns -EINVAL;
689 */
regs_query_register_offset(const char * name)690 int regs_query_register_offset(const char *name)
691 {
692 const struct pt_regs_offset *roff;
693
694 for (roff = regoffset_table; roff->name != NULL; roff++)
695 if (!strcmp(roff->name, name))
696 return roff->offset;
697 return -EINVAL;
698 }
699
700 enum loongarch_regset {
701 REGSET_GPR,
702 REGSET_FPR,
703 REGSET_CPUCFG,
704 #ifdef CONFIG_HAVE_HW_BREAKPOINT
705 REGSET_HW_BREAK,
706 REGSET_HW_WATCH,
707 #endif
708 };
709
710 static const struct user_regset loongarch64_regsets[] = {
711 [REGSET_GPR] = {
712 .core_note_type = NT_PRSTATUS,
713 .n = ELF_NGREG,
714 .size = sizeof(elf_greg_t),
715 .align = sizeof(elf_greg_t),
716 .regset_get = gpr_get,
717 .set = gpr_set,
718 },
719 [REGSET_FPR] = {
720 .core_note_type = NT_PRFPREG,
721 .n = ELF_NFPREG,
722 .size = sizeof(elf_fpreg_t),
723 .align = sizeof(elf_fpreg_t),
724 .regset_get = fpr_get,
725 .set = fpr_set,
726 },
727 [REGSET_CPUCFG] = {
728 .core_note_type = NT_LOONGARCH_CPUCFG,
729 .n = 64,
730 .size = sizeof(u32),
731 .align = sizeof(u32),
732 .regset_get = cfg_get,
733 .set = cfg_set,
734 },
735 #ifdef CONFIG_HAVE_HW_BREAKPOINT
736 [REGSET_HW_BREAK] = {
737 .core_note_type = NT_LOONGARCH_HW_BREAK,
738 .n = sizeof(struct user_watch_state) / sizeof(u32),
739 .size = sizeof(u32),
740 .align = sizeof(u32),
741 .regset_get = hw_break_get,
742 .set = hw_break_set,
743 },
744 [REGSET_HW_WATCH] = {
745 .core_note_type = NT_LOONGARCH_HW_WATCH,
746 .n = sizeof(struct user_watch_state) / sizeof(u32),
747 .size = sizeof(u32),
748 .align = sizeof(u32),
749 .regset_get = hw_break_get,
750 .set = hw_break_set,
751 },
752 #endif
753 };
754
755 static const struct user_regset_view user_loongarch64_view = {
756 .name = "loongarch64",
757 .e_machine = ELF_ARCH,
758 .regsets = loongarch64_regsets,
759 .n = ARRAY_SIZE(loongarch64_regsets),
760 };
761
762
task_user_regset_view(struct task_struct * task)763 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
764 {
765 return &user_loongarch64_view;
766 }
767
read_user(struct task_struct * target,unsigned long addr,unsigned long __user * data)768 static inline int read_user(struct task_struct *target, unsigned long addr,
769 unsigned long __user *data)
770 {
771 unsigned long tmp = 0;
772
773 switch (addr) {
774 case 0 ... 31:
775 tmp = task_pt_regs(target)->regs[addr];
776 break;
777 case ARG0:
778 tmp = task_pt_regs(target)->orig_a0;
779 break;
780 case PC:
781 tmp = task_pt_regs(target)->csr_era;
782 break;
783 case BADVADDR:
784 tmp = task_pt_regs(target)->csr_badvaddr;
785 break;
786 default:
787 return -EIO;
788 }
789
790 return put_user(tmp, data);
791 }
792
write_user(struct task_struct * target,unsigned long addr,unsigned long data)793 static inline int write_user(struct task_struct *target, unsigned long addr,
794 unsigned long data)
795 {
796 switch (addr) {
797 case 0 ... 31:
798 task_pt_regs(target)->regs[addr] = data;
799 break;
800 case ARG0:
801 task_pt_regs(target)->orig_a0 = data;
802 break;
803 case PC:
804 task_pt_regs(target)->csr_era = data;
805 break;
806 case BADVADDR:
807 task_pt_regs(target)->csr_badvaddr = data;
808 break;
809 default:
810 return -EIO;
811 }
812
813 return 0;
814 }
815
arch_ptrace(struct task_struct * child,long request,unsigned long addr,unsigned long data)816 long arch_ptrace(struct task_struct *child, long request,
817 unsigned long addr, unsigned long data)
818 {
819 int ret;
820 unsigned long __user *datap = (void __user *) data;
821
822 switch (request) {
823 case PTRACE_PEEKUSR:
824 ret = read_user(child, addr, datap);
825 break;
826
827 case PTRACE_POKEUSR:
828 ret = write_user(child, addr, data);
829 break;
830
831 default:
832 ret = ptrace_request(child, request, addr, data);
833 break;
834 }
835
836 return ret;
837 }
838
839 #ifdef CONFIG_HAVE_HW_BREAKPOINT
ptrace_triggered(struct perf_event * bp,struct perf_sample_data * data,struct pt_regs * regs)840 static void ptrace_triggered(struct perf_event *bp,
841 struct perf_sample_data *data, struct pt_regs *regs)
842 {
843 struct perf_event_attr attr;
844
845 attr = bp->attr;
846 attr.disabled = true;
847 modify_user_hw_breakpoint(bp, &attr);
848 }
849
set_single_step(struct task_struct * tsk,unsigned long addr)850 static int set_single_step(struct task_struct *tsk, unsigned long addr)
851 {
852 struct perf_event *bp;
853 struct perf_event_attr attr;
854 struct arch_hw_breakpoint *info;
855 struct thread_struct *thread = &tsk->thread;
856
857 bp = thread->hbp_break[0];
858 if (!bp) {
859 ptrace_breakpoint_init(&attr);
860
861 attr.bp_addr = addr;
862 attr.bp_len = HW_BREAKPOINT_LEN_8;
863 attr.bp_type = HW_BREAKPOINT_X;
864
865 bp = register_user_hw_breakpoint(&attr, ptrace_triggered,
866 NULL, tsk);
867 if (IS_ERR(bp))
868 return PTR_ERR(bp);
869
870 thread->hbp_break[0] = bp;
871 } else {
872 int err;
873
874 attr = bp->attr;
875 attr.bp_addr = addr;
876
877 /* Reenable breakpoint */
878 attr.disabled = false;
879 err = modify_user_hw_breakpoint(bp, &attr);
880 if (unlikely(err))
881 return err;
882
883 csr_write64(attr.bp_addr, LOONGARCH_CSR_IB0ADDR);
884 }
885 info = counter_arch_bp(bp);
886 info->mask = TASK_SIZE - 1;
887
888 return 0;
889 }
890
891 /* ptrace API */
user_enable_single_step(struct task_struct * task)892 void user_enable_single_step(struct task_struct *task)
893 {
894 struct thread_info *ti = task_thread_info(task);
895
896 set_single_step(task, task_pt_regs(task)->csr_era);
897 task->thread.single_step = task_pt_regs(task)->csr_era;
898 set_ti_thread_flag(ti, TIF_SINGLESTEP);
899 }
900
user_disable_single_step(struct task_struct * task)901 void user_disable_single_step(struct task_struct *task)
902 {
903 clear_tsk_thread_flag(task, TIF_SINGLESTEP);
904 }
905 #endif
906