1 /*
2 * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230)
3 *
4 * SPDX-License-Identifier: GPL-2.0-only
5 */
6
7 #include <config.h>
8 #ifdef CONFIG_HARDWARE_DEBUG_API
9
10 #include <string.h>
11 #include <util.h>
12 #include <arch/model/statedata.h>
13 #include <arch/machine/debug.h>
14 #include <arch/machine/debug_conf.h>
15 #include <arch/kernel/vspace.h>
16 #include <arch/machine/registerset.h>
17 #include <armv/debug.h>
18 #include <mode/machine/debug.h>
19 #include <sel4/constants.h> /* seL4_NumExclusiveBreakpoints/Watchpoints */
20
21 #define DBGDSCR_MDBGEN (BIT(15))
22 #define DBGDSCR_HDBGEN (BIT(14))
23 #define DBGDSCR_USER_ACCESS_DISABLE (BIT(12))
24
25 /* This bit is always RAO */
26 #define DBGLSR_LOCK_IMPLEMENTED (BIT(0))
27 #define DBGLSR_LOCK_ENABLED (BIT(1))
28 #define DBGLAR_UNLOCK_VALUE (0xC5ACCE55u)
29
30 #define DBGOSLAR_LOCK_VALUE (0xC5ACCE55u)
31
32 #define DBGOSLSR_GET_OSLOCK_MODEL(v) ((((v) >> 2u) & 0x2u) | ((v) & 0x1u))
33 #define DBGOSLSR_LOCK_MODEL_NO_OSLOCK (0u)
34 #define DBGOSLSR_LOCK_MODEL_OSLOCK_AND_OSSR (1u)
35 #define DBGOSLSR_LOCK_MODEL_OSLOCK_ONLY (2u)
36
37 #define DBGPRSR_OSLOCK (BIT(5))
38 #define DBGPRSR_OS_DLOCK (BIT(6))
39
40 #define DBGOSDLR_LOCK_ENABLE (BIT(0))
41
42 #define DBGAUTHSTATUS_NSI_IMPLEMENTED (BIT(1))
43 #define DBGAUTHSTATUS_NSI_ENABLED (BIT(0))
44 #define DBGAUTHSTATUS_SI_IMPLEMENTED (BIT(5))
45 #define DBGAUTHSTATUS_SI_ENABLED (BIT(4))
46
47 #define DBGDRAR_VALID (MASK(2))
48 #define DBGDSAR_VALID (MASK(2))
49
50 #define DBGSDER_ENABLE_SECURE_USER_INVASIVE_DEBUG (BIT(0))
51
52 /* ARMv7 Manuals, c3.3.1:
53 * "Breakpoint debug events are synchronous. That is, the debug event acts
54 * like an exception that cancels the breakpointed instruction."
55 *
56 * ARMv7 Manuals, c3.4.1:
57 * "Watchpoint debug events are precise and can be synchronous or asynchronous:
58 * a synchronous Watchpoint debug event acts like a synchronous abort
59 * exception on the memory access instruction itself. An asynchronous
60 * Watchpoint debug event acts like a precise asynchronous abort exception that
61 * cancels a later instruction."
62 */
63
64 enum breakpoint_privilege /* BCR[2:1] */ {
65 DBGBCR_PRIV_RESERVED = 0u,
66 DBGBCR_PRIV_PRIVILEGED = 1u,
67 DBGBCR_PRIV_USER = 2u,
68 /* Use either when doing context linking, because the linked WVR or BVR that
69 * specifies the vaddr, overrides the context-programmed BCR privilege.
70 */
71 DBGBCR_BCR_PRIV_EITHER = 3u
72 };
73
74 enum watchpoint_privilege /* WCR[2:1] */ {
75 DBGWCR_PRIV_RESERVED = 0u,
76 DBGWCR_PRIV_PRIVILEGED = 1u,
77 DBGWCR_PRIV_USER = 2u,
78 DBGWCR_PRIV_EITHER = 3u
79 };
80
81 enum watchpoint_access /* WCR[4:3] */ {
82 DBGWCR_ACCESS_RESERVED = 0u,
83 DBGWCR_ACCESS_LOAD = 1u,
84 DBGWCR_ACCESS_STORE = 2u,
85 DBGWCR_ACCESS_EITHER = 3u
86 };
87
88 /** Describes the availability and level of support for the debug features on
89 * a particular CPU. Currently a static local singleton instance, but for
90 * multiprocessor adaptation, just make it per-CPU.
91 *
92 * The majority of the writing to the debug coprocessor is done when a thread
93 * is being context-switched to, so the code in this file always executes on
94 * the target CPU. MP adaptation should come with few surprises.
95 */
96 typedef struct debug_state {
97 bool_t is_available, coprocessor_is_baseline_only, watchpoint_8b_supported,
98 non_secure_invasive_unavailable, secure_invasive_unavailable,
99 cpu_is_in_secure_mode, single_step_supported, breakpoints_supported,
100 watchpoints_supported;
101 uint8_t debug_armv;
102 uint8_t didr_version, oem_variant, oem_revision;
103 } debug_state_t;
104 static debug_state_t dbg;
105
byte8WatchpointsSupported(void)106 bool_t byte8WatchpointsSupported(void)
107 {
108 return dbg.watchpoint_8b_supported;
109 }
110
111 #define SCR "p15, 0, %0, c1, c1, 0"
112 #define DBGDIDR "p14,0,%0,c0,c0,0"
113 /* Not guaranteed in v7, only v7.1+ */
114 #define DBGDRCR ""
115 #define DBGVCR "p15, 0, %0, c0, c7, 0"
116
117 #define DBGDRAR_32 "p14,0,%0,c1,c0,0"
118 #define DBGDRAR_64 "p14,0,%Q0,%R0,c1"
119 #define DBGDSAR_32 "p14,0,%0,c2,c0,0"
120 #define DBGDSAR_64 "p14,0,%Q0,%R0,c2"
121
122 /* ARMv7 manual C11.11.41:
123 * "This register is required in all implementations."
124 * "In v7.1 DBGPRSR is not visible in the CP14 interface."
125 */
126 #define DBGPRSR "p14, 0, %0, c1, c5, 4"
127
128 #define DBGOSLAR "p14,0,%0,c1,c0,4"
129 /* ARMv7 manual: C11.11.32:
130 * "In any implementation, software can read this register to detect whether
131 * the OS Save and Restore mechanism is implemented. If it is not implemented
132 * the read of DBGOSLSR.OSLM returns zero."
133 */
134 #define DBGOSLSR "p14,0,%0,c1,c1,4"
135
136 /* ARMv7 manual: C11.11.30:
137 * "This register is only visible in the CP14 interface."
138 * "In v7 Debug, this register is not implemented."
139 * "In v7.1 Debug, this register is required in all implementations."
140 */
141 #define DBGOSDLR "p14, 0, %0, c1, c3, 4"
142
143 #define DBGDEVID2 "p14,0,%0,c7,c0,7"
144 #define DBGDEVID1 "p14,0,%0,c7,c1,7"
145 #define DBGDEVID "p14,0,%0,c7,c2,7"
146 #define DBGDEVTYPE ""
147
148 /* ARMv7 manual: C11.11.1: DBGAUTHSTATUS:
149 * "This register is required in all implementations."
150 * However, in v7, it is only visible in the memory mapped interface.
151 * However, in the v6 manual, this register is not mentioned at all and doesn't
152 * exist.
153 */
154 #define DBGAUTHSTATUS "p14,0,%0,c7,c14,6"
155
156 #endif /* CONFIG_HARDWARE_DEBUG_API */
157
158 #ifdef ARM_BASE_CP14_SAVE_AND_RESTORE
159
160 #define DBGBCR_ENABLE (BIT(0))
161
162 #define DBGWCR_ENABLE (BIT(0))
163
164 #define MAKE_P14(crn, crm, opc2) "p14, 0, %0, c" #crn ", c" #crm ", " #opc2
165 #define MAKE_DBGBVR(num) MAKE_P14(0, num, 4)
166 #define MAKE_DBGBCR(num) MAKE_P14(0, num, 5)
167 #define MAKE_DBGWVR(num) MAKE_P14(0, num, 6)
168 #define MAKE_DBGWCR(num) MAKE_P14(0, num, 7)
169 #define MAKE_DBGXVR(num) MAKE_P14(1, num, 1)
170
171 /** Generates read functions for the CP14 control and value registers.
172 */
173 #define DEBUG_GENERATE_READ_FN(_name, _reg) \
174 static word_t \
175 _name(uint16_t bp_num) \
176 { \
177 word_t ret; \
178 \
179 switch (bp_num) { \
180 case 1: \
181 MRC(MAKE_ ## _reg(1), ret); \
182 return ret; \
183 case 2: \
184 MRC(MAKE_ ## _reg(2), ret); \
185 return ret; \
186 case 3: \
187 MRC(MAKE_ ## _reg(3), ret); \
188 return ret; \
189 case 4: \
190 MRC(MAKE_ ## _reg(4), ret); \
191 return ret; \
192 case 5: \
193 MRC(MAKE_ ## _reg(5), ret); \
194 return ret; \
195 case 6: \
196 MRC(MAKE_ ## _reg(6), ret); \
197 return ret; \
198 case 7: \
199 MRC(MAKE_ ## _reg(7), ret); \
200 return ret; \
201 case 8: \
202 MRC(MAKE_ ## _reg(8), ret); \
203 return ret; \
204 case 9: \
205 MRC(MAKE_ ## _reg(9), ret); \
206 return ret; \
207 case 10: \
208 MRC(MAKE_ ## _reg(10), ret); \
209 return ret; \
210 case 11: \
211 MRC(MAKE_ ## _reg(11), ret); \
212 return ret; \
213 case 12: \
214 MRC(MAKE_ ## _reg(12), ret); \
215 return ret; \
216 case 13: \
217 MRC(MAKE_ ## _reg(13), ret); \
218 return ret; \
219 case 14: \
220 MRC(MAKE_ ## _reg(14), ret); \
221 return ret; \
222 case 15: \
223 MRC(MAKE_ ## _reg(15), ret); \
224 return ret; \
225 default: \
226 assert(bp_num == 0); \
227 MRC(MAKE_ ## _reg(0), ret); \
228 return ret; \
229 } \
230 }
231
232 /** Generates write functions for the CP14 control and value registers.
233 */
234 #define DEBUG_GENERATE_WRITE_FN(_name, _reg) \
235 static void \
236 _name(uint16_t bp_num, word_t val) \
237 { \
238 switch (bp_num) { \
239 case 1: \
240 MCR(MAKE_ ## _reg(1), val); \
241 return; \
242 case 2: \
243 MCR(MAKE_ ## _reg(2), val); \
244 return; \
245 case 3: \
246 MCR(MAKE_ ## _reg(3), val); \
247 return; \
248 case 4: \
249 MCR(MAKE_ ## _reg(4), val); \
250 return; \
251 case 5: \
252 MCR(MAKE_ ## _reg(5), val); \
253 return; \
254 case 6: \
255 MCR(MAKE_ ## _reg(6), val); \
256 return; \
257 case 7: \
258 MCR(MAKE_ ## _reg(7), val); \
259 return; \
260 case 8: \
261 MCR(MAKE_ ## _reg(8), val); \
262 return; \
263 case 9: \
264 MCR(MAKE_ ## _reg(9), val); \
265 return; \
266 case 10: \
267 MCR(MAKE_ ## _reg(10), val); \
268 return; \
269 case 11: \
270 MCR(MAKE_ ## _reg(11), val); \
271 return; \
272 case 12: \
273 MCR(MAKE_ ## _reg(12), val); \
274 return; \
275 case 13: \
276 MCR(MAKE_ ## _reg(13), val); \
277 return; \
278 case 14: \
279 MCR(MAKE_ ## _reg(14), val); \
280 return; \
281 case 15: \
282 MCR(MAKE_ ## _reg(15), val); \
283 return; \
284 default: \
285 assert(bp_num == 0); \
286 MCR(MAKE_ ## _reg(0), val); \
287 return; \
288 } \
289 }
290
DEBUG_GENERATE_READ_FN(readBcrCp,DBGBCR)291 DEBUG_GENERATE_READ_FN(readBcrCp, DBGBCR)
292 DEBUG_GENERATE_READ_FN(readBvrCp, DBGBVR)
293 DEBUG_GENERATE_READ_FN(readWcrCp, DBGWCR)
294 DEBUG_GENERATE_READ_FN(readWvrCp, DBGWVR)
295 DEBUG_GENERATE_WRITE_FN(writeBcrCp, DBGBCR)
296 DEBUG_GENERATE_WRITE_FN(writeBvrCp, DBGBVR)
297 DEBUG_GENERATE_WRITE_FN(writeWcrCp, DBGWCR)
298 DEBUG_GENERATE_WRITE_FN(writeWvrCp, DBGWVR)
299
300 /* These next few functions (read*Context()/write*Context()) read from TCB
301 * context and not from the hardware registers.
302 */
303 static word_t
304 readBcrContext(tcb_t *t, uint16_t index)
305 {
306 assert(index < seL4_NumExclusiveBreakpoints);
307 return t->tcbArch.tcbContext.breakpointState.breakpoint[index].cr;
308 }
309
readBvrContext(tcb_t * t,uint16_t index)310 static word_t readBvrContext(tcb_t *t, uint16_t index)
311 {
312 assert(index < seL4_NumExclusiveBreakpoints);
313 return t->tcbArch.tcbContext.breakpointState.breakpoint[index].vr;
314 }
315
readWcrContext(tcb_t * t,uint16_t index)316 static word_t readWcrContext(tcb_t *t, uint16_t index)
317 {
318 assert(index < seL4_NumExclusiveWatchpoints);
319 return t->tcbArch.tcbContext.breakpointState.watchpoint[index].cr;
320 }
321
readWvrContext(tcb_t * t,uint16_t index)322 static word_t readWvrContext(tcb_t *t, uint16_t index)
323 {
324 assert(index < seL4_NumExclusiveWatchpoints);
325 return t->tcbArch.tcbContext.breakpointState.watchpoint[index].vr;
326 }
327
writeBcrContext(tcb_t * t,uint16_t index,word_t val)328 static void writeBcrContext(tcb_t *t, uint16_t index, word_t val)
329 {
330 assert(index < seL4_NumExclusiveBreakpoints);
331 t->tcbArch.tcbContext.breakpointState.breakpoint[index].cr = val;
332 }
333
writeBvrContext(tcb_t * t,uint16_t index,word_t val)334 static void writeBvrContext(tcb_t *t, uint16_t index, word_t val)
335 {
336 assert(index < seL4_NumExclusiveBreakpoints);
337 t->tcbArch.tcbContext.breakpointState.breakpoint[index].vr = val;
338 }
339
writeWcrContext(tcb_t * t,uint16_t index,word_t val)340 static void writeWcrContext(tcb_t *t, uint16_t index, word_t val)
341 {
342 assert(index < seL4_NumExclusiveWatchpoints);
343 t->tcbArch.tcbContext.breakpointState.watchpoint[index].cr = val;
344 }
345
writeWvrContext(tcb_t * t,uint16_t index,word_t val)346 static void writeWvrContext(tcb_t *t, uint16_t index, word_t val)
347 {
348 assert(index < seL4_NumExclusiveWatchpoints);
349 t->tcbArch.tcbContext.breakpointState.watchpoint[index].vr = val;
350 }
351
352 #endif /* ARM_BASE_CP14_SAVE_AND_RESTORE */
353
354 #ifdef CONFIG_HARDWARE_DEBUG_API
355
356 /** For debugging: prints out the debug register pair values as returned by the
357 * coprocessor.
358 *
359 * @param nBp Number of breakpoint reg pairs to print, starting at BP #0.
360 * @param nBp Number of watchpoint reg pairs to print, starting at WP #0.
361 */
dumpBpsAndWpsCp(int nBp,int nWp)362 UNUSED static void dumpBpsAndWpsCp(int nBp, int nWp)
363 {
364 int i;
365
366 for (i = 0; i < nBp; i++) {
367 userError("CP BP %d: Bcr %lx, Bvr %lx", i, readBcrCp(i), readBvrCp(i));
368 }
369
370 for (i = 0; i < nWp; i++) {
371 userError("CP WP %d: Wcr %lx, Wvr %lx", i, readWcrCp(i), readWvrCp(i));
372 }
373 }
374
375 /** Print a thread's saved debug context. For debugging. This differs from
376 * dumpBpsAndWpsCp in that it reads from a thread's saved register context, and
377 * not from the hardware coprocessor registers.
378 *
379 * @param at arch_tcb_t where the thread's reg context is stored.
380 * @param nBp Number of BP regs to print, beginning at BP #0.
381 * @param mWp Number of WP regs to print, beginning at WP #0.
382 */
dumpBpsAndWpsContext(tcb_t * t,int nBp,int nWp)383 UNUSED static void dumpBpsAndWpsContext(tcb_t *t, int nBp, int nWp)
384 {
385 int i;
386
387 for (i = 0; i < nBp; i++) {
388 userError("Ctxt BP %d: Bcr %lx, Bvr %lx", i, readBcrContext(t, i), readBvrContext(t, i));
389 }
390
391 for (i = 0; i < nWp; i++) {
392 userError("Ctxt WP %d: Wcr %lx, Wvr %lx", i, readWcrContext(t, i), readWvrContext(t, i));
393 }
394 }
395
396 /* ARM allows watchpoint trigger on load, load-exclusive, and "swap" accesses.
397 * store, store-exclusive and "swap" accesses. All accesses.
398 *
399 * The mask defines which bits are EXCLUDED from the comparison.
400 * Always program the DBGDWVR with a WORD aligned address, and use the BAS to
401 * state which bits form part of the match.
402 *
403 * It seems the BAS works as a bitmask of bytes to select in the range.
404 *
405 * To detect support for the 8-bit BAS field:
406 * * If the 8-bit BAS is unsupported, then BAS[7:4] is RAZ/WI.
407 *
408 * When using an 8-byte watchpoint that is not dword aligned, the result is
409 * undefined. You should program it as the aligned base of the range, and select
410 * only the relevant bytes then.
411 *
412 * You cannot do sparse byte selection: you either select a single byte in the
413 * BAS or you select a contiguous range. ARM has deprecated sparse byte
414 * selection.
415 */
416
417 /** Convert a watchpoint size (0, 1, 2, 4 or 8 bytes) into the arch specific
418 * register encoding.
419 */
convertSizeToArch(word_t size)420 static word_t convertSizeToArch(word_t size)
421 {
422 switch (size) {
423 case 1:
424 return 0x1;
425 case 2:
426 return 0x3;
427 case 8:
428 return 0xFF;
429 default:
430 assert(size == 4);
431 return 0xF;
432 }
433 }
434
435 /** Convert an arch specific encoded watchpoint size back into a simple integer
436 * representation.
437 */
convertArchToSize(word_t archsize)438 static word_t convertArchToSize(word_t archsize)
439 {
440 switch (archsize) {
441 case 0x1:
442 return 1;
443 case 0x3:
444 return 2;
445 case 0xFF:
446 return 8;
447 default:
448 assert(archsize == 0xF);
449 return 4;
450 }
451 }
452
453 /** Convert an access perms API value (seL4_BreakOnRead, etc) into the register
454 * encoding that matches it.
455 */
convertAccessToArch(word_t access)456 static word_t convertAccessToArch(word_t access)
457 {
458 switch (access) {
459 case seL4_BreakOnRead:
460 return DBGWCR_ACCESS_LOAD;
461 case seL4_BreakOnWrite:
462 return DBGWCR_ACCESS_STORE;
463 default:
464 assert(access == seL4_BreakOnReadWrite);
465 return DBGWCR_ACCESS_EITHER;
466 }
467 }
468
469 /** Convert an arch-specific register encoding back into an API access perms
470 * value.
471 */
convertArchToAccess(word_t archaccess)472 static word_t convertArchToAccess(word_t archaccess)
473 {
474 switch (archaccess) {
475 case DBGWCR_ACCESS_LOAD:
476 return seL4_BreakOnRead;
477 case DBGWCR_ACCESS_STORE:
478 return seL4_BreakOnWrite;
479 default:
480 assert(archaccess == DBGWCR_ACCESS_EITHER);
481 return seL4_BreakOnReadWrite;
482 }
483 }
484
getBpNumFromType(uint16_t bp_num,word_t type)485 static uint16_t getBpNumFromType(uint16_t bp_num, word_t type)
486 {
487 assert(type == seL4_InstructionBreakpoint || type == seL4_DataBreakpoint
488 || type == seL4_SingleStep);
489
490 switch (type) {
491 case seL4_InstructionBreakpoint:
492 case seL4_SingleStep:
493 return bp_num;
494 default: /* seL4_DataBreakpoint: */
495 assert(type == seL4_DataBreakpoint);
496 return bp_num + seL4_NumExclusiveBreakpoints;
497 }
498 }
499
500 /** Extracts the "Method of Entry" bits from DBGDSCR.
501 *
502 * Used to determine what type of debug exception has occurred.
503 */
getMethodOfEntry(void)504 static inline word_t getMethodOfEntry(void)
505 {
506 dbg_dscr_t dscr;
507
508 dscr.words[0] = readDscrCp();
509 return dbg_dscr_get_methodOfEntry(dscr);
510 }
511
512 /** Sets up the requested hardware breakpoint register.
513 *
514 * Acts as the backend for seL4_TCB_SetBreakpoint. Doesn't actually operate
515 * on the hardware coprocessor, but just modifies the thread's debug register
516 * context. The thread will pop off the updated register context when it is
517 * popping its context the next time it runs.
518 *
519 * On ARM the hardware breakpoints are consumed by all operations, including
520 * single-stepping, unlike x86, where single-stepping doesn't require the use
521 * of an actual hardware breakpoint register (just uses the EFLAGS.TF bit).
522 *
523 * @param at arch_tcb_t that points to the register context of the thread we
524 * want to modify.
525 * @param bp_num The hardware register we want to set up.
526 * @params vaddr, type, size, rw: seL4 API values for seL4_TCB_SetBreakpoint.
527 * All documented in the seL4 API Manuals.
528 */
setBreakpoint(tcb_t * t,uint16_t bp_num,word_t vaddr,word_t type,word_t size,word_t rw)529 void setBreakpoint(tcb_t *t,
530 uint16_t bp_num,
531 word_t vaddr, word_t type, word_t size, word_t rw)
532 {
533 bp_num = convertBpNumToArch(bp_num);
534
535 /* C3.3.4: "A debugger can use either byte address selection or address range
536 * masking, if it is implemented. However, it must not attempt to use both at
537 * the same time"
538 *
539 * "v7 Debug and v7.1 Debug deprecate any use of the DBGBCR.MASK field."
540 * ^ So prefer to use DBGBCR.BAS instead. When using masking, you must set
541 * BAS to all 1s, and when using BAS you must set the MASK field to all 0s.
542 *
543 * To detect support for BPAddrMask:
544 * * When it's unsupported: DBGBCR.MASK is always RAZ/WI, and EITHER:
545 * * DBGIDR.DEVID_tmp is RAZ
546 * * OR DBGIDR.DEVID_tmp is RAO and DBGDEVID.{CIDMask, BPAddrMask} are RAZ.
547 * * OR:
548 * * DBGDEVID.BPAddrMask indicates whether addr masking is supported.
549 * * DBGBCR.MASK is UNK/SBZP.
550 *
551 * Setting BAS to 0b0000 makes the cpu break on every instruction.
552 * Be aware that the processor checks the MASK before the BAS.
553 * You must set BAS to 0b1111 for all context match comparisons.
554 */
555 if (type == seL4_InstructionBreakpoint) {
556 dbg_bcr_t bcr;
557
558 writeBvrContext(t, bp_num, vaddr);
559
560 /* Preserve reserved bits. */
561 bcr.words[0] = readBcrContext(t, bp_num);
562 bcr = dbg_bcr_set_enabled(bcr, 1);
563 bcr = dbg_bcr_set_linkedBrp(bcr, 0);
564 bcr = dbg_bcr_set_supervisorAccess(bcr, DBGBCR_PRIV_USER);
565 bcr = dbg_bcr_set_byteAddressSelect(bcr, convertSizeToArch(4));
566 bcr = Arch_setupBcr(bcr, true);
567 writeBcrContext(t, bp_num, bcr.words[0]);
568 } else {
569 dbg_wcr_t wcr;
570
571 writeWvrContext(t, bp_num, vaddr);
572
573 /* Preserve reserved bits */
574 wcr.words[0] = readWcrContext(t, bp_num);
575 wcr = dbg_wcr_set_enabled(wcr, 1);
576 wcr = dbg_wcr_set_supervisorAccess(wcr, DBGWCR_PRIV_USER);
577 wcr = dbg_wcr_set_byteAddressSelect(wcr, convertSizeToArch(size));
578 wcr = dbg_wcr_set_loadStore(wcr, convertAccessToArch(rw));
579 wcr = dbg_wcr_set_enableLinking(wcr, 0);
580 wcr = dbg_wcr_set_linkedBrp(wcr, 0);
581 wcr = Arch_setupWcr(wcr);
582 writeWcrContext(t, bp_num, wcr.words[0]);
583 }
584 }
585
586 /** Retrieves the current configuration of a hardware breakpoint for a given
587 * thread.
588 *
589 * Doesn't modify the configuration of that thread's breakpoints.
590 *
591 * @param at arch_tcb_t that holds the register context for the thread you wish
592 * to query.
593 * @param bp_num Hardware breakpoint ID.
594 * @return A struct describing the current configuration of the requested
595 * breakpoint.
596 */
getBreakpoint(tcb_t * t,uint16_t bp_num)597 getBreakpoint_t getBreakpoint(tcb_t *t, uint16_t bp_num)
598 {
599 getBreakpoint_t ret;
600
601 ret.type = getTypeFromBpNum(bp_num);
602 bp_num = convertBpNumToArch(bp_num);
603
604 if (ret.type == seL4_InstructionBreakpoint) {
605 dbg_bcr_t bcr;
606
607 bcr.words[0] = readBcrContext(t, bp_num);
608 if (Arch_breakpointIsMismatch(bcr) == true) {
609 ret.type = seL4_SingleStep;
610 };
611 ret.size = 0;
612 ret.rw = seL4_BreakOnRead;
613 ret.vaddr = readBvrContext(t, bp_num);
614 ret.is_enabled = dbg_bcr_get_enabled(bcr);
615 } else {
616 dbg_wcr_t wcr;
617
618 wcr.words[0] = readWcrContext(t, bp_num);
619 ret.size = convertArchToSize(dbg_wcr_get_byteAddressSelect(wcr));
620 ret.rw = convertArchToAccess(dbg_wcr_get_loadStore(wcr));
621 ret.vaddr = readWvrContext(t, bp_num);
622 ret.is_enabled = dbg_wcr_get_enabled(wcr);
623 }
624 return ret;
625 }
626
627 /** Disables and clears the configuration of a hardware breakpoint.
628 *
629 * @param at arch_tcb_t holding the reg context for the target thread.
630 * @param bp_num The hardware breakpoint you want to disable+clear.
631 */
unsetBreakpoint(tcb_t * t,uint16_t bp_num)632 void unsetBreakpoint(tcb_t *t, uint16_t bp_num)
633 {
634 word_t type;
635
636 type = getTypeFromBpNum(bp_num);
637 bp_num = convertBpNumToArch(bp_num);
638
639 if (type == seL4_InstructionBreakpoint) {
640 dbg_bcr_t bcr;
641
642 bcr.words[0] = readBcrContext(t, bp_num);
643 bcr = dbg_bcr_set_enabled(bcr, 0);
644 writeBcrContext(t, bp_num, bcr.words[0]);
645 writeBvrContext(t, bp_num, 0);
646 } else {
647 dbg_wcr_t wcr;
648
649 wcr.words[0] = readWcrContext(t, bp_num);
650 wcr = dbg_wcr_set_enabled(wcr, 0);
651 writeWcrContext(t, bp_num, wcr.words[0]);
652 writeWvrContext(t, bp_num, 0);
653 }
654 }
655
656 /** Initiates or halts single-stepping on the target process.
657 *
658 * @param at arch_tcb_t for the target process to be configured.
659 * @param bp_num The hardware ID of the breakpoint register to be used.
660 * @param n_instr The number of instructions to step over.
661 */
configureSingleStepping(tcb_t * t,uint16_t bp_num,word_t n_instr,bool_t is_reply)662 bool_t configureSingleStepping(tcb_t *t,
663 uint16_t bp_num,
664 word_t n_instr,
665 bool_t is_reply)
666 {
667
668 if (is_reply) {
669 bp_num = t->tcbArch.tcbContext.breakpointState.single_step_hw_bp_num;
670 } else {
671 bp_num = convertBpNumToArch(bp_num);
672 }
673
674 /* On ARM single-stepping is emulated using breakpoint mismatches. So you
675 * would basically set the breakpoint to mismatch everything, and this will
676 * cause an exception to be triggered on every instruction.
677 *
678 * We use NULL as the mismatch address since no code should be trying to
679 * execute NULL, so it's a perfect address to use as the mismatch
680 * criterion. An alternative might be to use an address in the kernel's
681 * high vaddrspace, since that's an address that it's impossible for
682 * userspace to be executing at.
683 */
684 dbg_bcr_t bcr;
685
686 bcr.words[0] = readBcrContext(t, bp_num);
687
688 /* If the user calls us with n_instr == 0, allow them to configure, but
689 * leave it disabled.
690 */
691 if (n_instr > 0) {
692 bcr = dbg_bcr_set_enabled(bcr, 1);
693 t->tcbArch.tcbContext.breakpointState.single_step_enabled = true;
694 } else {
695 bcr = dbg_bcr_set_enabled(bcr, 0);
696 t->tcbArch.tcbContext.breakpointState.single_step_enabled = false;
697 }
698
699 bcr = dbg_bcr_set_linkedBrp(bcr, 0);
700 bcr = dbg_bcr_set_supervisorAccess(bcr, DBGBCR_PRIV_USER);
701 bcr = dbg_bcr_set_byteAddressSelect(bcr, convertSizeToArch(1));
702 bcr = Arch_setupBcr(bcr, false);
703
704 writeBvrContext(t, bp_num, 0);
705 writeBcrContext(t, bp_num, bcr.words[0]);
706
707 t->tcbArch.tcbContext.breakpointState.n_instructions = n_instr;
708 t->tcbArch.tcbContext.breakpointState.single_step_hw_bp_num = bp_num;
709 return true;
710 }
711
712 /** Using the DBGDIDR register, detects the debug architecture version, and
713 * does a preliminary check for the level of support for our debug API.
714 *
715 * Reads DBGDIDR, which is guaranteed to be read safely. Then
716 * determine whether or not we can or should proceed.
717 *
718 * The majority of the debug setup is concerned with trying to tell which
719 * registers are safe to access on this CPU. The debug architecture is wildly
720 * different across different CPUs and platforms, so genericity is fairly
721 * challenging.
722 */
initVersionInfo(void)723 BOOT_CODE static void initVersionInfo(void)
724 {
725 dbg_didr_t didr;
726
727 didr.words[0] = getDIDR();
728 dbg.oem_revision = dbg_didr_get_revision(didr);
729 dbg.oem_variant = dbg_didr_get_variant(didr);
730 dbg.didr_version = dbg_didr_get_version(didr);
731 dbg.coprocessor_is_baseline_only = true;
732 dbg.breakpoints_supported = dbg.watchpoints_supported =
733 dbg.single_step_supported = true;
734
735 switch (dbg.didr_version) {
736 case 0x1:
737 dbg.debug_armv = 0x60;
738 dbg.single_step_supported = false;
739 break;
740 case 0x2:
741 dbg.debug_armv = 0x61;
742 break;
743 case 0x3:
744 dbg.debug_armv = 0x70;
745 dbg.coprocessor_is_baseline_only = false;
746 break;
747 case 0x4:
748 dbg.debug_armv = 0x70;
749 break;
750 case 0x5:
751 dbg.debug_armv = 0x71;
752 dbg.coprocessor_is_baseline_only = false;
753 break;
754 case 0x6:
755 dbg.debug_armv = 0x80;
756 dbg.coprocessor_is_baseline_only = false;
757 break;
758 default:
759 dbg.is_available = false;
760 dbg.debug_armv = 0;
761 return;
762 }
763
764 dbg.is_available = true;
765 }
766
767 /** Load an initial, all-disabled setup state for the registers.
768 */
disableAllBpsAndWps(void)769 BOOT_CODE static void disableAllBpsAndWps(void)
770 {
771 int i;
772
773 for (i = 0; i < seL4_NumExclusiveBreakpoints; i++) {
774 writeBvrCp(i, 0);
775 writeBcrCp(i, readBcrCp(i) & ~DBGBCR_ENABLE);
776 }
777 for (i = 0; i < seL4_NumExclusiveWatchpoints; i++) {
778 writeWvrCp(i, 0);
779 writeWcrCp(i, readWcrCp(i) & ~DBGWCR_ENABLE);
780 }
781
782 isb();
783 }
784
785 /** Guides the debug hardware initialization sequence.
786 *
787 * In short, there is a small set of registers, the "baseline" registers, which
788 * are guaranteed to be available on all ARM debug architecture implementations.
789 * Aside from those, the rest are a *COMPLETE* toss-up, and detection is
790 * difficult, because if you access any particular register which is
791 * unavailable on an implementation, you trigger an #UNDEFINED exception. And
792 * there is little uniformity or consistency.
793 *
794 * In addition, there are as many as 3 lock registers, all of which have
795 * effects on which registers you can access...and only one of them is
796 * consistently implemented. The others may or may not be implemented, and well,
797 * you have to grope in the dark to determine whether or not they are...but
798 * if they are implemented, their effect on software is still upheld, of course.
799 *
800 * Much of this sequence is catering for the different versions and determining
801 * which registers and locks are implemented, and creating a common register
802 * environment for the rest of the API code.
803 *
804 * There are several conditions which will cause the code to exit and give up.
805 * For the most part, most implementations give you the baseline registers and
806 * some others. When an implementation only supports the baseline registers and
807 * nothing more, you're told so, and that basically means you can't do anything
808 * with it because you have no reliable access to the debug registers.
809 */
Arch_initHardwareBreakpoints(void)810 BOOT_CODE bool_t Arch_initHardwareBreakpoints(void)
811 {
812 word_t dbgosdlr, dbgoslsr;
813
814 /* The functioning of breakpoints on ARM requires that certain external
815 * pin signals be enabled. If these are not enabled, there is nothing
816 * that can be done from software. If these are enabled, we can then
817 * select the debug-mode we want by programming the CP14 interface.
818 *
819 * Of the four modes available, we want monitor mode, because only monitor
820 * mode delivers breakpoint and watchpoint events to the kernel as
821 * exceptions. The other modes cause a break into "debug mode" or ignore
822 * debug events.
823 */
824 memset(&dbg, 0, sizeof(dbg));
825
826 initVersionInfo();
827 if (dbg.is_available == false) {
828 printf("Debug architecture not implemented.\n");
829 return false;
830 }
831
832 printf("DIDRv: %x, armv %x, coproc baseline only? %s.\n",
833 dbg.didr_version, dbg.debug_armv,
834 ((dbg.coprocessor_is_baseline_only) ? "yes" : "no"));
835
836 if (dbg.debug_armv > 0x61) {
837 if (dbg.coprocessor_is_baseline_only) {
838 printf("ARMDBG: No reliable access to DBG regs.\n");
839 return dbg.is_available = false;
840 }
841
842 /* Interestingly, since the debug features have so many bits that
843 * behave differently pending the state of secure-mode, ARM had to
844 * expose a bit in the debug coprocessor that reveals whether or not the
845 * CPU is in secure mode, or else it would be semi-impossible to program
846 * this feature.
847 */
848 dbg.cpu_is_in_secure_mode = !(readDscrCp() & DBGDSCR_SECURE_MODE_DISABLED);
849 if (dbg.cpu_is_in_secure_mode) {
850 word_t sder;
851
852 printf("CPU is in secure mode. Enabling debugging in secure user mode.\n");
853 MRC(DBGSDER, sder);
854 MCR(DBGSDER, sder
855 | DBGSDER_ENABLE_SECURE_USER_INVASIVE_DEBUG);
856 }
857
858 /* Deal with OS Double-lock: */
859 if (dbg.debug_armv == 0x71) {
860 /* ARMv7 manuals, C11.11.30:
861 * "In v7.1 Debug, this register is required in all implementations."
862 */
863 MRC(DBGOSDLR, dbgosdlr);
864 MCR(DBGOSDLR, dbgosdlr & ~DBGOSDLR_LOCK_ENABLE);
865 } else if (dbg.debug_armv == 0x70) {
866 /* ARMv7 manuals, C11.11.30:
867 * "In v7 Debug, this register is not implemented."
868 *
869 * So no need to do anything for debug v7.0.
870 */
871 }
872
873 /* Now deal with OS lock: ARMv7 manual, C11.11.32:
874 * "In any implementation, software can read this register to detect
875 * whether the OS Save and Restore mechanism is implemented. If it is
876 * not implemented the read of DBGOSLSR.OSLM returns zero."
877 */
878 MRC(DBGOSLSR, dbgoslsr);
879 if (DBGOSLSR_GET_OSLOCK_MODEL(dbgoslsr) != DBGOSLSR_LOCK_MODEL_NO_OSLOCK) {
880 MCR(DBGOSLAR, ~DBGOSLAR_LOCK_VALUE);
881 }
882
883 disableAllBpsAndWps();
884 if (!enableMonitorMode()) {
885 return dbg.is_available = false;
886 }
887 } else {
888 /* On v6 you have to enable monitor mode first. */
889 if (!enableMonitorMode()) {
890 return dbg.is_available = false;
891 }
892 disableAllBpsAndWps();
893 }
894
895 /* Finally, also pre-load some initial register state that can be used
896 * for all new threads so that their initial saved debug register state
897 * is valid when it's first loaded onto the CPU.
898 */
899 for (int i = 0; i < seL4_NumExclusiveBreakpoints; i++) {
900 armKSNullBreakpointState.breakpoint[i].cr = readBcrCp(i) & ~DBGBCR_ENABLE;
901 }
902 for (int i = 0; i < seL4_NumExclusiveWatchpoints; i++) {
903 armKSNullBreakpointState.watchpoint[i].cr = readWcrCp(i) & ~DBGWCR_ENABLE;
904 }
905
906 dbg.watchpoint_8b_supported = watchpoint8bSupported();
907 return true;
908 }
909
910 /** Determines which breakpoint or watchpoint register caused the debug
911 * exception to be triggered.
912 *
913 * Checks to see which hardware breakpoint was triggered, and saves
914 * the ID of that breakpoint.
915 * There is no short way to do this on ARM. On x86 there is a status
916 * register that tells you which watchpoint has been triggered. On ARM
917 * there is no such register, so you have to manually check each to see which
918 * one was triggered.
919 *
920 * The arguments also work a bit differently from x86 as well. On x86 the
921 * 2 arguments are dummy values, while on ARM, they contain useful information.
922 *
923 * @param vaddr The virtual address stored in the IFSR/DFSR register, which
924 * is either the watchpoint address or breakpoint address.
925 * @param reason The presumed reason for the exception, which is based on
926 * whether it was a prefetch or data abort.
927 * @return Struct with a member "bp_num", which is a positive integer if we
928 * successfully detected which debug register triggered the exception.
929 * "Bp_num" will be negative otherwise.
930 */
getAndResetActiveBreakpoint(word_t vaddr,word_t reason)931 static int getAndResetActiveBreakpoint(word_t vaddr, word_t reason)
932 {
933 word_t align_mask;
934 int i, ret = -1;
935
936 if (reason == seL4_InstructionBreakpoint) {
937 for (i = 0; i < seL4_NumExclusiveBreakpoints; i++) {
938 dbg_bcr_t bcr;
939 word_t bvr = readBvrCp(i);
940
941 bcr.words[0] = readBcrCp(i);
942 /* The actual trigger address may be an unaligned sub-byte of the
943 * range, which means it's not guaranteed to match the aligned value
944 * that was programmed into the address register.
945 */
946 align_mask = convertArchToSize(dbg_bcr_get_byteAddressSelect(bcr));
947 align_mask = ~(align_mask - 1);
948
949 if (bvr != (vaddr & align_mask) || !dbg_bcr_get_enabled(bcr)) {
950 continue;
951 }
952
953 ret = i;
954 return ret;
955 }
956 }
957
958 if (reason == seL4_DataBreakpoint) {
959 for (i = 0; i < seL4_NumExclusiveWatchpoints; i++) {
960 dbg_wcr_t wcr;
961 word_t wvr = readWvrCp(i);
962
963 wcr.words[0] = readWcrCp(i);
964 align_mask = convertArchToSize(dbg_wcr_get_byteAddressSelect(wcr));
965 align_mask = ~(align_mask - 1);
966
967 if (wvr != (vaddr & align_mask) || !dbg_wcr_get_enabled(wcr)) {
968 continue;
969 }
970
971 ret = i;
972 return ret;
973 }
974 }
975
976 return ret;
977 }
978
979 /** Abstract wrapper around the IFSR/DFSR fault status values.
980 *
981 * Format of the FSR bits is different for long and short descriptors, so
982 * extract the FSR bits and accompany them with a boolean.
983 */
984 typedef struct fault_status {
985 uint8_t status;
986 bool_t is_long_desc_format;
987 } fault_status_t;
988
getFaultStatus(word_t hsr_or_fsr)989 static fault_status_t getFaultStatus(word_t hsr_or_fsr)
990 {
991 fault_status_t ret;
992
993 /* Hyp mode uses the HSR, Hype syndrome register. */
994 #ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
995 /* the HSR only uses the long descriptor format. */
996 ret.is_long_desc_format = true;
997 /* FSR[5:0]. */
998 ret.status = hsr_or_fsr & 0x3F;
999 #else
1000 /* Non-hyp uses IFSR/DFSR */
1001 if (hsr_or_fsr & BIT(FSR_LPAE_SHIFT)) {
1002 ret.is_long_desc_format = true;
1003 /* FSR[5:0] */
1004 ret.status = hsr_or_fsr & 0x3F;
1005 } else {
1006 ret.is_long_desc_format = false;
1007 /* FSR[10] | FSR[3:0]. */
1008 ret.status = (hsr_or_fsr & BIT(FSR_STATUS_BIT4_SHIFT)) >> FSR_STATUS_BIT4_SHIFT;
1009 ret.status <<= 4;
1010 ret.status = hsr_or_fsr & 0xF;
1011 }
1012 #endif
1013
1014 return ret;
1015 }
1016
1017 /** Called to determine if an abort was a debug exception.
1018 *
1019 * The ARM debug exceptions look like Prefetch Aborts or Data Aborts, and you
1020 * have to examine some extra register state to determine whether or not the
1021 * abort you currently have on your hands is actually a debug exception.
1022 *
1023 * This routine takes care of the checks.
1024 * @param fs An abstraction of the DFSR/IFSR values, meant to make it irrelevant
1025 * whether we're using the long/short descriptors. Bit positions and
1026 * values change. This also makes the debug code forward compatible
1027 * aarch64.
1028 */
isDebugFault(word_t hsr_or_fsr)1029 bool_t isDebugFault(word_t hsr_or_fsr)
1030 {
1031 fault_status_t fs;
1032
1033 fs = getFaultStatus(hsr_or_fsr);
1034 if (fs.is_long_desc_format) {
1035 if (fs.status == FSR_LONGDESC_STATUS_DEBUG_EVENT) {
1036 return true;
1037 }
1038 } else {
1039 if (fs.status == FSR_SHORTDESC_STATUS_DEBUG_EVENT) {
1040 return true;
1041 }
1042 }
1043
1044 if (getMethodOfEntry() == DEBUG_ENTRY_ASYNC_WATCHPOINT) {
1045 userError("Debug: Watchpoint delivered as async abort.");
1046 return true;
1047 }
1048 return false;
1049 }
1050
1051 /** Called to process a debug exception.
1052 *
1053 * On x86, you're told which breakpoint register triggered the exception. On
1054 * ARM, you're told the virtual address that triggered the exception and what
1055 * type of access (data access vs instruction execution) triggered the exception
1056 * and you have to figure out which register triggered it.
1057 *
1058 * For watchpoints, it's not very complicated: just check to see which
1059 * register matches the virtual address.
1060 *
1061 * For breakpoints, it's a bit more complex: since both breakpoints and single-
1062 * stepping are configured using the same registers, we need to first detect
1063 * whether single-stepping is enabled. If not, then we check for a breakpoint.
1064 * @param fault_vaddr The instruction vaddr which triggered the exception, as
1065 * extracted by the kernel.
1066 */
handleUserLevelDebugException(word_t fault_vaddr)1067 seL4_Fault_t handleUserLevelDebugException(word_t fault_vaddr)
1068 {
1069 #ifdef TRACK_KERNEL_ENTRIES
1070 ksKernelEntry.path = Entry_DebugFault;
1071 ksKernelEntry.word = fault_vaddr;
1072 #endif
1073
1074 word_t method_of_entry = getMethodOfEntry();
1075 int i, active_bp;
1076 seL4_Fault_t ret;
1077 word_t bp_reason, bp_vaddr;
1078
1079 switch (method_of_entry) {
1080 case DEBUG_ENTRY_BREAKPOINT:
1081 bp_reason = seL4_InstructionBreakpoint;
1082 bp_vaddr = fault_vaddr;
1083
1084 /* Could have been triggered by:
1085 * 1. An actual breakpoint.
1086 * 2. A breakpoint configured in mismatch mode to emulate
1087 * single-stepping.
1088 *
1089 * If the register is configured for mismatch, then it's a single-step
1090 * exception. If the register is configured for match, then it's a
1091 * normal breakpoint exception.
1092 */
1093 for (i = 0; i < seL4_NumExclusiveBreakpoints; i++) {
1094 dbg_bcr_t bcr;
1095
1096 bcr.words[0] = readBcrCp(i);
1097 if (!dbg_bcr_get_enabled(bcr) || Arch_breakpointIsMismatch(bcr) != true) {
1098 continue;
1099 }
1100 /* Return the first BP enabled and configured for mismatch. */
1101 bp_reason = seL4_SingleStep;
1102 active_bp = i;
1103 break;
1104 }
1105 break;
1106
1107 case DEBUG_ENTRY_SYNC_WATCHPOINT:
1108 bp_reason = seL4_DataBreakpoint;
1109 #ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
1110 /* Sync watchpoint sets the BP vaddr in HDFAR. */
1111 bp_vaddr = getHDFAR();
1112 #else
1113 bp_vaddr = getFAR();
1114 #endif
1115 break;
1116
1117 case DEBUG_ENTRY_ASYNC_WATCHPOINT:
1118 bp_reason = seL4_DataBreakpoint;
1119 /* Async WP sets the WP vaddr in DBGWFAR for both hyp and non-hyp. */
1120 bp_vaddr = getWFAR();
1121 break;
1122
1123 default: /* EXPLICIT_BKPT: BKPT instruction */
1124 assert(method_of_entry == DEBUG_ENTRY_EXPLICIT_BKPT);
1125 bp_reason = seL4_SoftwareBreakRequest;
1126 bp_vaddr = fault_vaddr;
1127 active_bp = 0;
1128 }
1129
1130 if (method_of_entry != DEBUG_ENTRY_EXPLICIT_BKPT
1131 && bp_reason != seL4_SingleStep) {
1132 active_bp = getAndResetActiveBreakpoint(bp_vaddr,
1133 bp_reason);
1134 assert(active_bp >= 0);
1135 }
1136
1137 /* There is no hardware register associated with BKPT instruction
1138 * triggers.
1139 */
1140 if (bp_reason != seL4_SoftwareBreakRequest) {
1141 /* Convert the hardware BP num back into an API-ID */
1142 active_bp = getBpNumFromType(active_bp, bp_reason);
1143 }
1144 ret = seL4_Fault_DebugException_new(bp_vaddr, active_bp, bp_reason);
1145 return ret;
1146 }
1147
1148 #endif /* CONFIG_HARDWARE_DEBUG_API */
1149
1150 #ifdef ARM_BASE_CP14_SAVE_AND_RESTORE
1151
1152 /** Mirrors Arch_initFpuContext.
1153 *
1154 * Zeroes out the BVR thread context and preloads reserved bit values from the
1155 * control regs into the thread context so we can operate solely on the values
1156 * cached in RAM in API calls, rather than retrieving the values from the
1157 * coprocessor.
1158 */
Arch_initBreakpointContext(user_context_t * uc)1159 void Arch_initBreakpointContext(user_context_t *uc)
1160 {
1161 uc->breakpointState = armKSNullBreakpointState;
1162 }
1163
loadAllDisabledBreakpointState(void)1164 void loadAllDisabledBreakpointState(void)
1165 {
1166 int i;
1167
1168 /* We basically just want to read-modify-write each reg to ensure its
1169 * "ENABLE" bit is clear. We did preload the register context with the
1170 * reserved values from the control registers, so we can read our
1171 * initial values from either the coprocessor or the thread's register
1172 * context.
1173 *
1174 * Both are perfectly fine, and the only discriminant factor is performance.
1175 * I suspect that reading from RAM is faster than reading from the
1176 * coprocessor, but I can't be sure.
1177 */
1178 for (i = 0; i < seL4_NumExclusiveBreakpoints; i++) {
1179 writeBcrCp(i, readBcrCp(i) & ~DBGBCR_ENABLE);
1180 }
1181 for (i = 0; i < seL4_NumExclusiveWatchpoints; i++) {
1182 writeWcrCp(i, readWcrCp(i) & ~DBGWCR_ENABLE);
1183 }
1184 }
1185
1186 /* We only need to save the breakpoint state in the hypervisor
1187 * build, and only for threads that have an associated VCPU.
1188 *
1189 * When the normal kernel is running with the debug API, all
1190 * changes to the debug regs are done through the debug API.
1191 * In the hypervisor build, the guest VM has full access to the
1192 * debug regs in PL1, so we need to save its values on vmexit.
1193 *
1194 * When saving the debug regs we will always save all of them.
1195 * When restoring, we will restore only those that have been used
1196 * for native threads; and we will restore all of them
1197 * unconditionally for VCPUs (because we don't know which of
1198 * them have been changed by the guest).
1199 *
1200 * To ensure that all the debug regs are restored unconditionally,
1201 * we just set the "used_breakpoints_bf" bitfield to all 1s in
1202 * associateVcpu.
1203 */
saveAllBreakpointState(tcb_t * t)1204 void saveAllBreakpointState(tcb_t *t)
1205 {
1206 int i;
1207
1208 assert(t != NULL);
1209
1210 for (i = 0; i < seL4_NumExclusiveBreakpoints; i++) {
1211 writeBvrContext(t, i, readBvrCp(i));
1212 writeBcrContext(t, i, readBcrCp(i));
1213 }
1214
1215 for (i = 0; i < seL4_NumExclusiveWatchpoints; i++) {
1216 writeWvrContext(t, i, readWvrCp(i));
1217 writeWcrContext(t, i, readWcrCp(i));
1218 }
1219 }
1220
1221 #ifdef ARM_HYP_CP14_SAVE_AND_RESTORE_VCPU_THREADS
Arch_debugAssociateVCPUTCB(tcb_t * t)1222 void Arch_debugAssociateVCPUTCB(tcb_t *t)
1223 {
1224 /* Don't attempt to shift beyond end of word. */
1225 assert(seL4_NumHWBreakpoints < sizeof(word_t) * 8);
1226
1227 /* Set all the bits to 1, so loadBreakpointState() will
1228 * restore all the debug regs unconditionally.
1229 */
1230 t->tcbArch.tcbContext.breakpointState.used_breakpoints_bf = MASK(seL4_NumHWBreakpoints);
1231 }
1232
Arch_debugDissociateVCPUTCB(tcb_t * t)1233 void Arch_debugDissociateVCPUTCB(tcb_t *t)
1234 {
1235 t->tcbArch.tcbContext.breakpointState.used_breakpoints_bf = 0;
1236 }
1237 #endif
1238
loadBreakpointState(tcb_t * t)1239 static void loadBreakpointState(tcb_t *t)
1240 {
1241 int i;
1242
1243 assert(t != NULL);
1244
1245 for (i = 0; i < seL4_NumExclusiveBreakpoints; i++) {
1246 if (t->tcbArch.tcbContext.breakpointState.used_breakpoints_bf & BIT(i)) {
1247 writeBvrCp(i, readBvrContext(t, i));
1248 writeBcrCp(i, readBcrContext(t, i));
1249 } else {
1250 /* If the thread isn't using the BP, then just load
1251 * a default "disabled" state.
1252 */
1253 writeBcrCp(i, readBcrCp(i) & ~DBGBCR_ENABLE);
1254 }
1255 }
1256
1257 for (i = 0; i < seL4_NumExclusiveWatchpoints; i++) {
1258 if (t->tcbArch.tcbContext.breakpointState.used_breakpoints_bf &
1259 BIT(i + seL4_NumExclusiveBreakpoints)) {
1260 writeWvrCp(i, readWvrContext(t, i));
1261 writeWcrCp(i, readWcrContext(t, i));
1262 } else {
1263 writeWcrCp(i, readWcrCp(i) & ~DBGWCR_ENABLE);
1264 }
1265 }
1266 }
1267
1268 /** Pops debug register context for a thread into the CPU.
1269 *
1270 * Mirrors the idea of restore_user_context.
1271 */
restore_user_debug_context(tcb_t * target_thread)1272 void restore_user_debug_context(tcb_t *target_thread)
1273 {
1274 assert(target_thread != NULL);
1275
1276 if (target_thread->tcbArch.tcbContext.breakpointState.used_breakpoints_bf == 0) {
1277 loadAllDisabledBreakpointState();
1278 } else {
1279 loadBreakpointState(target_thread);
1280 }
1281
1282 /* ARMv7 manual, sec C3.7:
1283 * "Usually, an exception return sequence is a context change operation as
1284 * well as a context synchronization operation, in which case the context
1285 * change operation is guaranteed to take effect on the debug logic by the
1286 * end of that exception return sequence."
1287 *
1288 * So we don't need to execute ISB here because we're about to RFE.
1289 */
1290 }
1291
1292 #endif /* ARM_BASE_CP14_SAVE_AND_RESTORE */
1293