1 /*
2 * Copyright 2014, General Dynamics C4 Systems
3 *
4 * SPDX-License-Identifier: GPL-2.0-only
5 */
6
7 #include <config.h>
8 #include <types.h>
9 #include <benchmark/benchmark.h>
10 #include <api/failures.h>
11 #include <api/syscall.h>
12 #include <kernel/boot.h>
13 #include <kernel/cspace.h>
14 #include <kernel/thread.h>
15 #include <kernel/stack.h>
16 #include <machine/io.h>
17 #include <machine/debug.h>
18 #include <model/statedata.h>
19 #include <object/cnode.h>
20 #include <object/untyped.h>
21 #include <arch/api/invocation.h>
22 #include <arch/kernel/vspace.h>
23 #include <linker.h>
24 #include <plat/machine/hardware.h>
25 #include <armv/context_switch.h>
26 #include <arch/object/iospace.h>
27 #include <arch/object/vcpu.h>
28 #include <arch/machine/tlb.h>
29
30 #ifdef CONFIG_BENCHMARK_TRACK_KERNEL_ENTRIES
31 #include <benchmark/benchmark_track.h>
32 #endif
33
34 /* ARM uses multiple identical mappings in a page table / page directory to construct
35 * large mappings. In both cases it happens to be 16 entries, which can be calculated by
36 * looking at the size difference of the mappings, and is done this way to remove magic
37 * numbers littering this code and make it clear what is going on */
38 #define SECTIONS_PER_SUPER_SECTION BIT(ARMSuperSectionBits - ARMSectionBits)
39 #define PAGES_PER_LARGE_PAGE BIT(ARMLargePageBits - ARMSmallPageBits)
40
41 /* helper stuff to avoid fencepost errors when
42 * getting the last byte of a PTE or PDE */
43 #define LAST_BYTE_PTE(PTE,LENGTH) ((word_t)&(PTE)[(LENGTH)-1] + (BIT(PTE_SIZE_BITS)-1))
44 #define LAST_BYTE_PDE(PDE,LENGTH) ((word_t)&(PDE)[(LENGTH)-1] + (BIT(PDE_SIZE_BITS)-1))
45
46 #ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
47 /* Stage 2 */
48 #define MEMATTR_CACHEABLE 0xf /* Inner and Outer write-back */
49 #define MEMATTR_NONCACHEABLE 0x0 /* Strongly ordered or device memory */
50
51 /* STage 1 hyp */
52 #define ATTRINDX_CACHEABLE 0xff /* Inner and Outer RW write-back non-transient */
53 #define ATTRINDX_NONCACHEABLE 0x0 /* strongly ordered or device memory */
54 #endif /* CONFIG_ARM_HYPERVISOR_SUPPORT */
55
56 struct resolve_ret {
57 paddr_t frameBase;
58 vm_page_size_t frameSize;
59 bool_t valid;
60 };
61 typedef struct resolve_ret resolve_ret_t;
62
63 #ifndef CONFIG_ARM_HYPERVISOR_SUPPORT
64 static bool_t PURE pteCheckIfMapped(pte_t *pte);
65 static bool_t PURE pdeCheckIfMapped(pde_t *pde);
66
APFromVMRights(vm_rights_t vm_rights)67 static word_t CONST APFromVMRights(vm_rights_t vm_rights)
68 {
69 switch (vm_rights) {
70 case VMNoAccess:
71 return 0;
72
73 case VMKernelOnly:
74 return 1;
75
76 case VMReadOnly:
77 return 2;
78
79 case VMReadWrite:
80 return 3;
81
82 default:
83 fail("Invalid VM rights");
84 }
85 }
86
87 #else
88 /* AP encoding slightly different. AP only used for kernel mappings which are fixed after boot time */
89 BOOT_CODE
APFromVMRights(vm_rights_t vm_rights)90 static word_t CONST APFromVMRights(vm_rights_t vm_rights)
91 {
92 switch (vm_rights) {
93 case VMKernelOnly:
94 return 0;
95 case VMReadWrite:
96 return 1;
97 case VMNoAccess:
98 /* RO at PL1 only */
99 return 2;
100 case VMReadOnly:
101 return 3;
102 default:
103 fail("Invalid VM rights");
104 }
105 }
106
HAPFromVMRights(vm_rights_t vm_rights)107 static word_t CONST HAPFromVMRights(vm_rights_t vm_rights)
108 {
109 switch (vm_rights) {
110 case VMKernelOnly:
111 case VMNoAccess:
112 return 0;
113 case VMReadOnly:
114 return 1;
115 /*
116 case VMWriteOnly:
117 return 2;
118 */
119 case VMReadWrite:
120 return 3;
121 default:
122 fail("Invalid VM rights");
123 }
124 }
125
126 #endif
127
maskVMRights(vm_rights_t vm_rights,seL4_CapRights_t cap_rights_mask)128 vm_rights_t CONST maskVMRights(vm_rights_t vm_rights, seL4_CapRights_t cap_rights_mask)
129 {
130 if (vm_rights == VMNoAccess) {
131 return VMNoAccess;
132 }
133 if (vm_rights == VMReadOnly &&
134 seL4_CapRights_get_capAllowRead(cap_rights_mask)) {
135 return VMReadOnly;
136 }
137 if (vm_rights == VMReadWrite &&
138 seL4_CapRights_get_capAllowRead(cap_rights_mask)) {
139 if (!seL4_CapRights_get_capAllowWrite(cap_rights_mask)) {
140 return VMReadOnly;
141 } else {
142 return VMReadWrite;
143 }
144 }
145 if (vm_rights == VMReadWrite &&
146 !seL4_CapRights_get_capAllowRead(cap_rights_mask) &&
147 seL4_CapRights_get_capAllowWrite(cap_rights_mask)) {
148 userError("Attempted to make unsupported write only mapping");
149 }
150 return VMKernelOnly;
151 }
152
153 /* ==================== BOOT CODE STARTS HERE ==================== */
154
map_kernel_frame(paddr_t paddr,pptr_t vaddr,vm_rights_t vm_rights,vm_attributes_t attributes)155 BOOT_CODE void map_kernel_frame(paddr_t paddr, pptr_t vaddr, vm_rights_t vm_rights, vm_attributes_t attributes)
156 {
157 word_t idx = (vaddr & MASK(pageBitsForSize(ARMSection))) >> pageBitsForSize(ARMSmallPage);
158
159 assert(vaddr >= PPTR_TOP); /* vaddr lies in the region the global PT covers */
160 #ifndef CONFIG_ARM_HYPERVISOR_SUPPORT
161 word_t tex;
162 if (vm_attributes_get_armPageCacheable(attributes)) {
163 tex = 5; /* TEX = 0b1(Cached)01(Outer Write Allocate) */
164 } else {
165 tex = 0;
166 }
167 armKSGlobalPT[idx] =
168 pte_pte_small_new(
169 paddr,
170 0, /* global */
171 SMP_TERNARY(1, 0), /* shareable if SMP enabled, otherwise unshared */
172 0, /* APX = 0, privileged full access */
173 tex,
174 APFromVMRights(vm_rights),
175 0, /* C (Inner write allocate) */
176 1, /* B (Inner write allocate) */
177 0 /* executable */
178 );
179 #else /* CONFIG_ARM_HYPERVISOR_SUPPORT */
180 armHSGlobalPT[idx] =
181 pteS1_pteS1_small_new(
182 0, /* Executeable */
183 0, /* Executeable at PL1 */
184 0, /* Not contiguous */
185 paddr,
186 0, /* global */
187 1, /* AF -- always set */
188 0, /* Not shared */
189 APFromVMRights(vm_rights),
190 0, /* non secure */
191 vm_attributes_get_armPageCacheable(attributes)
192 );
193 #endif /* !CONFIG_ARM_HYPERVISOR_SUPPORT */
194 }
195
196 #ifndef CONFIG_ARM_HYPERVISOR_SUPPORT
map_kernel_window(void)197 BOOT_CODE void map_kernel_window(void)
198 {
199 paddr_t phys;
200 word_t idx;
201 pde_t pde;
202
203 /* mapping of KERNEL_ELF_BASE (virtual address) to kernel's
204 * KERNEL_ELF_PHYS_BASE */
205 /* up to end of virtual address space minus 16M using 16M frames */
206 phys = physBase;
207 idx = PPTR_BASE >> pageBitsForSize(ARMSection);
208
209 while (idx < BIT(PD_INDEX_BITS) - SECTIONS_PER_SUPER_SECTION) {
210 word_t idx2;
211
212 pde = pde_pde_section_new(
213 phys,
214 1, /* SuperSection */
215 0, /* global */
216 SMP_TERNARY(1, 0), /* shareable if SMP enabled, otherwise unshared */
217 0, /* APX = 0, privileged full access */
218 5, /* TEX = 0b1(Cached)01(Outer Write Allocate) */
219 1, /* VMKernelOnly */
220 1, /* Parity enabled */
221 0, /* Domain 0 */
222 0, /* XN not set */
223 0, /* C (Inner write allocate) */
224 1 /* B (Inner write allocate) */
225 );
226 for (idx2 = idx; idx2 < idx + SECTIONS_PER_SUPER_SECTION; idx2++) {
227 armKSGlobalPD[idx2] = pde;
228 }
229 phys += BIT(pageBitsForSize(ARMSuperSection));
230 idx += SECTIONS_PER_SUPER_SECTION;
231 }
232
233 while (idx < (PPTR_TOP >> pageBitsForSize(ARMSection))) {
234 pde = pde_pde_section_new(
235 phys,
236 0, /* Section */
237 0, /* global */
238 SMP_TERNARY(1, 0), /* shareable if SMP enabled, otherwise unshared */
239 0, /* APX = 0, privileged full access */
240 5, /* TEX = 0b1(Cached)01(Outer Write Allocate) */
241 1, /* VMKernelOnly */
242 1, /* Parity enabled */
243 0, /* Domain 0 */
244 0, /* XN not set */
245 0, /* C (Inner write allocate) */
246 1 /* B (Inner write allocate) */
247 );
248 armKSGlobalPD[idx] = pde;
249 phys += BIT(pageBitsForSize(ARMSection));
250 idx++;
251 }
252
253 /* crosscheck whether we have mapped correctly so far */
254 assert(phys == PADDR_TOP);
255
256 #ifdef CONFIG_KERNEL_LOG_BUFFER
257 /* map log buffer page table. PTEs to be filled by user later by calling seL4_BenchmarkSetLogBuffer() */
258 armKSGlobalPD[idx] =
259 pde_pde_coarse_new(
260 addrFromKPPtr(armKSGlobalLogPT), /* address */
261 true, /* P */
262 0 /* Domain */
263 );
264
265 memzero(armKSGlobalLogPT, BIT(seL4_PageTableBits));
266
267 phys += BIT(pageBitsForSize(ARMSection));
268 idx++;
269 #endif /* CONFIG_KERNEL_LOG_BUFFER */
270
271 /* map page table covering last 1M of virtual address space to page directory */
272 armKSGlobalPD[idx] =
273 pde_pde_coarse_new(
274 addrFromKPPtr(armKSGlobalPT), /* address */
275 true, /* P */
276 0 /* Domain */
277 );
278
279 /* now start initialising the page table */
280 memzero(armKSGlobalPT, 1 << seL4_PageTableBits);
281
282 /* map vector table */
283 map_kernel_frame(
284 addrFromKPPtr(arm_vector_table),
285 PPTR_VECTOR_TABLE,
286 VMKernelOnly,
287 vm_attributes_new(
288 false, /* armExecuteNever */
289 true, /* armParityEnabled */
290 true /* armPageCacheable */
291 )
292 );
293
294 map_kernel_devices();
295 }
296
297 #else /* CONFIG_ARM_HYPERVISOR_SUPPORT */
298
map_kernel_window(void)299 BOOT_CODE void map_kernel_window(void)
300 {
301 paddr_t phys;
302 uint32_t idx;
303 pdeS1_t pde;
304 pte_t UNUSED pteS2;
305
306 /* Initialise PGD */
307 for (idx = 0; idx < 3; idx++) {
308 pde = pdeS1_pdeS1_invalid_new();
309 armHSGlobalPGD[idx] = pde;
310 }
311 pde = pdeS1_pdeS1_coarse_new(0, 0, 0, 0, addrFromKPPtr(armHSGlobalPD));
312 armHSGlobalPGD[3] = pde;
313
314 /* Initialise PMD */
315 /* Invalidate up until USER_TOP */
316 for (idx = 0; idx < (USER_TOP - 0xC0000000) >> (PT_INDEX_BITS + PAGE_BITS); idx++) {
317 pde = pdeS1_pdeS1_invalid_new();
318 armHSGlobalPD[idx] = pde;
319 }
320 /* mapping of PPTR_BASE (virtual address) to kernel's physBase */
321 /* up to end of virtual address space minus 2M using 2M frames */
322 phys = physBase;
323 for (; idx < BIT(PT_INDEX_BITS) - 1; idx++) {
324 pde = pdeS1_pdeS1_section_new(
325 0, /* Executable */
326 0, /* Executable in PL1 */
327 0, /* Not contiguous */
328 phys, /* Address */
329 0, /* global */
330 1, /* AF -- always set to 1 */
331 0, /* Not Shareable */
332 0, /* AP: WR at PL1 only */
333 0, /* Not secure */
334 1 /* outer write-back Cacheable */
335 );
336 armHSGlobalPD[idx] = pde;
337 phys += BIT(PT_INDEX_BITS + PAGE_BITS);
338 }
339 /* map page table covering last 2M of virtual address space */
340 pde = pdeS1_pdeS1_coarse_new(0, 0, 0, 0, addrFromKPPtr(armHSGlobalPT));
341 armHSGlobalPD[idx] = pde;
342
343 /* now start initialising the page table */
344 memzero(armHSGlobalPT, 1 << seL4_PageTableBits);
345 for (idx = 0; idx < 256; idx++) {
346 pteS1_t pte;
347 pte = pteS1_pteS1_small_new(
348 0, /* Executable */
349 0, /* Executable in PL1 */
350 0, /* Not contiguous */
351 phys, /* Address */
352 0, /* global */
353 1, /* AF -- always set to 1 */
354 0, /* Not Shareable */
355 0, /* AP: WR at PL1 only */
356 0, /* Not secure */
357 1 /* outer write-back Cacheable */
358 );
359 armHSGlobalPT[idx] = pte;
360 phys += BIT(PAGE_BITS);
361 }
362 /* map vector table */
363 map_kernel_frame(
364 addrFromKPPtr(arm_vector_table),
365 PPTR_VECTOR_TABLE,
366 VMKernelOnly,
367 vm_attributes_new(
368 false, /* armExecuteNever */
369 true, /* armParityEnabled */
370 true /* armPageCacheable */
371 )
372 );
373
374 map_kernel_devices();
375 }
376
377 #endif /* !CONFIG_ARM_HYPERVISOR_SUPPORT */
378
map_it_frame_cap(cap_t pd_cap,cap_t frame_cap,bool_t executable)379 static BOOT_CODE void map_it_frame_cap(cap_t pd_cap, cap_t frame_cap, bool_t executable)
380 {
381 pte_t *pt;
382 pte_t *targetSlot;
383 pde_t *pd = PDE_PTR(cap_page_directory_cap_get_capPDBasePtr(pd_cap));
384 void *frame = (void *)generic_frame_cap_get_capFBasePtr(frame_cap);
385 vptr_t vptr = generic_frame_cap_get_capFMappedAddress(frame_cap);
386
387 assert(generic_frame_cap_get_capFMappedASID(frame_cap) != 0);
388
389 pd += (vptr >> pageBitsForSize(ARMSection));
390 pt = ptrFromPAddr(pde_pde_coarse_ptr_get_address(pd));
391 targetSlot = pt + ((vptr & MASK(pageBitsForSize(ARMSection)))
392 >> pageBitsForSize(ARMSmallPage));
393 #ifndef CONFIG_ARM_HYPERVISOR_SUPPORT
394 *targetSlot = pte_pte_small_new(
395 addrFromPPtr(frame),
396 1, /* not global */
397 SMP_TERNARY(1, 0), /* shareable if SMP enabled, otherwise unshared */
398 0, /* APX = 0, privileged full access */
399 5, /* TEX = 0b1(Cached)01(Outer Write Allocate) */
400 APFromVMRights(VMReadWrite),
401 0, /* C (Inner write allocate) */
402 1, /* B (Inner write allocate) */
403 !executable
404 );
405 #else
406 *targetSlot = pte_pte_small_new(
407 0, /* Executeable */
408 0, /* Not contiguous */
409 addrFromPPtr(frame),
410 1, /* AF -- always set */
411 0, /* Not shared */
412 HAPFromVMRights(VMReadWrite),
413 MEMATTR_CACHEABLE /* Cacheable */
414 );
415 #endif
416 }
417
418 /* Create a frame cap for the initial thread. */
419
create_it_frame_cap(pptr_t pptr,vptr_t vptr,asid_t asid,bool_t use_large)420 static BOOT_CODE cap_t create_it_frame_cap(pptr_t pptr, vptr_t vptr, asid_t asid, bool_t use_large)
421 {
422 if (use_large)
423 return
424 cap_frame_cap_new(
425 ARMSection, /* capFSize */
426 ASID_LOW(asid), /* capFMappedASIDLow */
427 wordFromVMRights(VMReadWrite), /* capFVMRights */
428 vptr, /* capFMappedAddress */
429 false, /* capFIsDevice */
430 ASID_HIGH(asid), /* capFMappedASIDHigh */
431 pptr /* capFBasePtr */
432 );
433 else
434 return
435 cap_small_frame_cap_new(
436 ASID_LOW(asid), /* capFMappedASIDLow */
437 wordFromVMRights(VMReadWrite), /* capFVMRights */
438 vptr, /* capFMappedAddress */
439 false, /* capFIsDevice */
440 #ifdef CONFIG_TK1_SMMU
441 0, /* IOSpace */
442 #endif
443 ASID_HIGH(asid), /* capFMappedASIDHigh */
444 pptr /* capFBasePtr */
445 );
446 }
447
map_it_pt_cap(cap_t pd_cap,cap_t pt_cap)448 static BOOT_CODE void map_it_pt_cap(cap_t pd_cap, cap_t pt_cap)
449 {
450 pde_t *pd = PDE_PTR(cap_page_directory_cap_get_capPDBasePtr(pd_cap));
451 pte_t *pt = PTE_PTR(cap_page_table_cap_get_capPTBasePtr(pt_cap));
452 vptr_t vptr = cap_page_table_cap_get_capPTMappedAddress(pt_cap);
453 pde_t *targetSlot = pd + (vptr >> pageBitsForSize(ARMSection));
454
455 assert(cap_page_table_cap_get_capPTIsMapped(pt_cap));
456
457 #ifndef CONFIG_ARM_HYPERVISOR_SUPPORT
458 *targetSlot = pde_pde_coarse_new(
459 addrFromPPtr(pt), /* address */
460 true, /* P */
461 0 /* Domain */
462 );
463 #else
464 *targetSlot = pde_pde_coarse_new(addrFromPPtr(pt));
465 #endif
466 }
467
468 /* Create a page table for the initial thread */
469
create_it_page_table_cap(cap_t pd,pptr_t pptr,vptr_t vptr,asid_t asid)470 static BOOT_CODE cap_t create_it_page_table_cap(cap_t pd, pptr_t pptr, vptr_t vptr, asid_t asid)
471 {
472 cap_t cap;
473 cap = cap_page_table_cap_new(
474 1, /* capPTIsMapped */
475 asid, /* capPTMappedASID */
476 vptr, /* capPTMappedAddress */
477 pptr /* capPTBasePtr */
478 );
479 if (asid != asidInvalid) {
480 map_it_pt_cap(pd, cap);
481 }
482 return cap;
483 }
484
arch_get_n_paging(v_region_t it_v_reg)485 BOOT_CODE word_t arch_get_n_paging(v_region_t it_v_reg)
486 {
487 return get_n_paging(it_v_reg, PT_INDEX_BITS + PAGE_BITS);
488 }
489
490 /* Create an address space for the initial thread.
491 * This includes page directory and page tables */
create_it_address_space(cap_t root_cnode_cap,v_region_t it_v_reg)492 BOOT_CODE cap_t create_it_address_space(cap_t root_cnode_cap, v_region_t it_v_reg)
493 {
494 vptr_t pt_vptr;
495 seL4_SlotPos slot_pos_before;
496 seL4_SlotPos slot_pos_after;
497
498 /* create PD cap */
499 copyGlobalMappings(PDE_PTR(rootserver.vspace));
500 cleanCacheRange_PoU(rootserver.vspace, rootserver.vspace + (1 << seL4_PageDirBits) - 1,
501 addrFromPPtr((void *)rootserver.vspace));
502 cap_t pd_cap =
503 cap_page_directory_cap_new(
504 true, /* capPDIsMapped */
505 IT_ASID, /* capPDMappedASID */
506 rootserver.vspace /* capPDBasePtr */
507 );
508 slot_pos_before = ndks_boot.slot_pos_cur;
509 write_slot(SLOT_PTR(pptr_of_cap(root_cnode_cap), seL4_CapInitThreadVSpace), pd_cap);
510
511 /* create all PT caps necessary to cover userland image */
512 for (pt_vptr = ROUND_DOWN(it_v_reg.start, PT_INDEX_BITS + PAGE_BITS);
513 pt_vptr < it_v_reg.end;
514 pt_vptr += BIT(PT_INDEX_BITS + PAGE_BITS)) {
515 if (!provide_cap(root_cnode_cap,
516 create_it_page_table_cap(pd_cap, it_alloc_paging(), pt_vptr, IT_ASID))
517 ) {
518 return cap_null_cap_new();
519 }
520 }
521
522 slot_pos_after = ndks_boot.slot_pos_cur;
523 ndks_boot.bi_frame->userImagePaging = (seL4_SlotRegion) {
524 slot_pos_before, slot_pos_after
525 };
526
527 return pd_cap;
528 }
529
create_unmapped_it_frame_cap(pptr_t pptr,bool_t use_large)530 BOOT_CODE cap_t create_unmapped_it_frame_cap(pptr_t pptr, bool_t use_large)
531 {
532 return create_it_frame_cap(pptr, 0, asidInvalid, use_large);
533 }
534
create_mapped_it_frame_cap(cap_t pd_cap,pptr_t pptr,vptr_t vptr,asid_t asid,bool_t use_large,bool_t executable)535 BOOT_CODE cap_t create_mapped_it_frame_cap(cap_t pd_cap, pptr_t pptr, vptr_t vptr, asid_t asid, bool_t use_large,
536 bool_t executable)
537 {
538 cap_t cap = create_it_frame_cap(pptr, vptr, asid, use_large);
539 map_it_frame_cap(pd_cap, cap, executable);
540 return cap;
541 }
542
543 #ifndef CONFIG_ARM_HYPERVISOR_SUPPORT
544
activate_global_pd(void)545 BOOT_CODE void activate_global_pd(void)
546 {
547 /* Ensure that there's nothing stale in newly-mapped regions, and
548 that everything we've written (particularly the kernel page tables)
549 is committed. */
550 cleanInvalidateL1Caches();
551 setCurrentPD(addrFromKPPtr(armKSGlobalPD));
552 invalidateLocalTLB();
553 lockTLBEntry(PPTR_BASE);
554 lockTLBEntry(PPTR_VECTOR_TABLE);
555 }
556
557 #else
558
activate_global_pd(void)559 BOOT_CODE void activate_global_pd(void)
560 {
561 uint32_t r;
562 /* Ensure that there's nothing stale in newly-mapped regions, and
563 that everything we've written (particularly the kernel page tables)
564 is committed. */
565 cleanInvalidateL1Caches();
566 /* Setup the memory attributes: We use 2 indicies (cachable/non-cachable) */
567 setHMAIR((ATTRINDX_NONCACHEABLE << 0) | (ATTRINDX_CACHEABLE << 8), 0);
568 setCurrentHypPD(addrFromKPPtr(armHSGlobalPGD));
569 invalidateHypTLB();
570 #if 0 /* Can't lock entries on A15 */
571 lockTLBEntry(PPTR_BASE);
572 lockTLBEntry(PPTR_VECTOR_TABLE);
573 #endif
574 /* TODO find a better place to init the VMMU */
575 r = 0;
576 /* Translation range */
577 r |= (0x0 << 0); /* 2^(32 -(0)) input range. */
578 r |= (r & 0x8) << 1; /* Sign bit */
579 /* starting level */
580 r |= (0x0 << 6); /* Start at second level */
581 /* Sharability of tables */
582 r |= BIT(8); /* Inner write-back, write-allocate */
583 r |= BIT(10); /* Outer write-back, write-allocate */
584 /* Long descriptor format (not that we have a choice) */
585 r |= BIT(31);
586 setVTCR(r);
587 }
588
589 #endif /* CONFIG_ARM_HYPERVISOR_SUPPORT */
590
write_it_asid_pool(cap_t it_ap_cap,cap_t it_pd_cap)591 BOOT_CODE void write_it_asid_pool(cap_t it_ap_cap, cap_t it_pd_cap)
592 {
593 asid_pool_t *ap = ASID_POOL_PTR(pptr_of_cap(it_ap_cap));
594 ap->array[IT_ASID] = PDE_PTR(pptr_of_cap(it_pd_cap));
595 armKSASIDTable[IT_ASID >> asidLowBits] = ap;
596 }
597
598 /* ==================== BOOT CODE FINISHES HERE ==================== */
599
findPDForASID(asid_t asid)600 findPDForASID_ret_t findPDForASID(asid_t asid)
601 {
602 findPDForASID_ret_t ret;
603 asid_pool_t *poolPtr;
604 pde_t *pd;
605
606 poolPtr = armKSASIDTable[asid >> asidLowBits];
607 if (unlikely(!poolPtr)) {
608 current_lookup_fault = lookup_fault_invalid_root_new();
609
610 ret.pd = NULL;
611 ret.status = EXCEPTION_LOOKUP_FAULT;
612 return ret;
613 }
614
615 pd = poolPtr->array[asid & MASK(asidLowBits)];
616 if (unlikely(!pd)) {
617 current_lookup_fault = lookup_fault_invalid_root_new();
618
619 ret.pd = NULL;
620 ret.status = EXCEPTION_LOOKUP_FAULT;
621 return ret;
622 }
623
624 ret.pd = pd;
625 ret.status = EXCEPTION_NONE;
626 return ret;
627 }
628
lookupIPCBuffer(bool_t isReceiver,tcb_t * thread)629 word_t *PURE lookupIPCBuffer(bool_t isReceiver, tcb_t *thread)
630 {
631 word_t w_bufferPtr;
632 cap_t bufferCap;
633 vm_rights_t vm_rights;
634
635 w_bufferPtr = thread->tcbIPCBuffer;
636 bufferCap = TCB_PTR_CTE_PTR(thread, tcbBuffer)->cap;
637
638 if (unlikely(cap_get_capType(bufferCap) != cap_small_frame_cap &&
639 cap_get_capType(bufferCap) != cap_frame_cap)) {
640 return NULL;
641 }
642 if (unlikely(generic_frame_cap_get_capFIsDevice(bufferCap))) {
643 return NULL;
644 }
645
646 vm_rights = generic_frame_cap_get_capFVMRights(bufferCap);
647 if (likely(vm_rights == VMReadWrite ||
648 (!isReceiver && vm_rights == VMReadOnly))) {
649 word_t basePtr;
650 unsigned int pageBits;
651
652 basePtr = generic_frame_cap_get_capFBasePtr(bufferCap);
653 pageBits = pageBitsForSize(generic_frame_cap_get_capFSize(bufferCap));
654 return (word_t *)(basePtr + (w_bufferPtr & MASK(pageBits)));
655 } else {
656 return NULL;
657 }
658 }
659
checkValidIPCBuffer(vptr_t vptr,cap_t cap)660 exception_t checkValidIPCBuffer(vptr_t vptr, cap_t cap)
661 {
662 if (unlikely(cap_get_capType(cap) != cap_small_frame_cap &&
663 cap_get_capType(cap) != cap_frame_cap)) {
664 userError("Requested IPC Buffer is not a frame cap.");
665 current_syscall_error.type = seL4_IllegalOperation;
666 return EXCEPTION_SYSCALL_ERROR;
667 }
668 if (unlikely(generic_frame_cap_get_capFIsDevice(cap))) {
669 userError("Specifying a device frame as an IPC buffer is not permitted.");
670 current_syscall_error.type = seL4_IllegalOperation;
671 return EXCEPTION_SYSCALL_ERROR;
672 }
673
674 if (unlikely(vptr & MASK(seL4_IPCBufferSizeBits))) {
675 userError("Requested IPC Buffer location 0x%x is not aligned.",
676 (int)vptr);
677 current_syscall_error.type = seL4_AlignmentError;
678 return EXCEPTION_SYSCALL_ERROR;
679 }
680
681 return EXCEPTION_NONE;
682 }
683
lookupPDSlot(pde_t * pd,vptr_t vptr)684 pde_t *CONST lookupPDSlot(pde_t *pd, vptr_t vptr)
685 {
686 unsigned int pdIndex;
687
688 pdIndex = vptr >> (PAGE_BITS + PT_INDEX_BITS);
689 return pd + pdIndex;
690 }
691
lookupPTSlot(pde_t * pd,vptr_t vptr)692 lookupPTSlot_ret_t lookupPTSlot(pde_t *pd, vptr_t vptr)
693 {
694 lookupPTSlot_ret_t ret;
695 pde_t *pdSlot;
696
697 pdSlot = lookupPDSlot(pd, vptr);
698
699 if (unlikely(pde_ptr_get_pdeType(pdSlot) != pde_pde_coarse)) {
700 current_lookup_fault = lookup_fault_missing_capability_new(PT_INDEX_BITS + PAGE_BITS);
701
702 ret.ptSlot = NULL;
703 ret.status = EXCEPTION_LOOKUP_FAULT;
704 return ret;
705 } else {
706 pte_t *pt, *ptSlot;
707 unsigned int ptIndex;
708
709 pt = ptrFromPAddr(pde_pde_coarse_ptr_get_address(pdSlot));
710 ptIndex = (vptr >> PAGE_BITS) & MASK(PT_INDEX_BITS);
711 ptSlot = pt + ptIndex;
712
713 ret.ptSlot = ptSlot;
714 ret.status = EXCEPTION_NONE;
715 return ret;
716 }
717 }
718
lookupPTSlot_nofail(pte_t * pt,vptr_t vptr)719 static pte_t *lookupPTSlot_nofail(pte_t *pt, vptr_t vptr)
720 {
721 unsigned int ptIndex;
722
723 ptIndex = (vptr >> PAGE_BITS) & MASK(PT_INDEX_BITS);
724 return pt + ptIndex;
725 }
726
727 static const resolve_ret_t default_resolve_ret_t;
728
resolveVAddr(pde_t * pd,vptr_t vaddr)729 static resolve_ret_t resolveVAddr(pde_t *pd, vptr_t vaddr)
730 {
731 pde_t *pde = lookupPDSlot(pd, vaddr);
732 resolve_ret_t ret = default_resolve_ret_t;
733
734 ret.valid = true;
735
736 switch (pde_ptr_get_pdeType(pde)) {
737 case pde_pde_section:
738 ret.frameBase = pde_pde_section_ptr_get_address(pde);
739 #ifndef CONFIG_ARM_HYPERVISOR_SUPPORT
740 if (pde_pde_section_ptr_get_size(pde)) {
741 ret.frameSize = ARMSuperSection;
742 } else {
743 ret.frameSize = ARMSection;
744 }
745 #else
746 if (pde_pde_section_ptr_get_contiguous_hint(pde)) {
747 /* Entires are represented as 16 contiguous sections. We need to mask
748 to get the super section frame base */
749 ret.frameBase &= ~MASK(pageBitsForSize(ARMSuperSection));
750 ret.frameSize = ARMSuperSection;
751 } else {
752 ret.frameSize = ARMSection;
753 }
754 #endif
755 return ret;
756
757 case pde_pde_coarse: {
758 pte_t *pt = ptrFromPAddr(pde_pde_coarse_ptr_get_address(pde));
759 pte_t *pte = lookupPTSlot_nofail(pt, vaddr);
760 switch (pte_ptr_get_pteType(pte)) {
761 case pte_pte_small:
762 ret.frameBase = pte_pte_small_ptr_get_address(pte);
763 #ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
764 if (pte_pte_small_ptr_get_contiguous_hint(pte)) {
765 /* Entries are represented as 16 contiguous small frames. We need to mask
766 to get the large frame base */
767 ret.frameBase &= ~MASK(pageBitsForSize(ARMLargePage));
768 ret.frameSize = ARMLargePage;
769 } else {
770 ret.frameSize = ARMSmallPage;
771 }
772 #else
773 ret.frameSize = ARMSmallPage;
774 #endif
775 return ret;
776
777 #ifndef CONFIG_ARM_HYPERVISOR_SUPPORT
778 case pte_pte_large:
779 ret.frameBase = pte_pte_large_ptr_get_address(pte);
780 ret.frameSize = ARMLargePage;
781 return ret;
782 #endif
783 default:
784 break;
785 }
786 break;
787 }
788 }
789
790 ret.valid = false;
791 return ret;
792 }
793
makeUserPTE(vm_page_size_t page_size,paddr_t paddr,bool_t cacheable,bool_t nonexecutable,vm_rights_t vm_rights)794 static pte_t CONST makeUserPTE(vm_page_size_t page_size, paddr_t paddr,
795 bool_t cacheable, bool_t nonexecutable, vm_rights_t vm_rights)
796 {
797 pte_t pte;
798 #ifndef CONFIG_ARM_HYPERVISOR_SUPPORT
799 word_t ap;
800
801 ap = APFromVMRights(vm_rights);
802
803 switch (page_size) {
804 case ARMSmallPage: {
805 if (cacheable) {
806 pte = pte_pte_small_new(paddr,
807 1, /* not global */
808 SMP_TERNARY(1, 0), /* shareable if SMP enabled, otherwise unshared */
809 0, /* APX = 0, privileged full access */
810 5, /* TEX = 0b101, outer write-back, write-allocate */
811 ap,
812 0, 1, /* Inner write-back, write-allocate */
813 nonexecutable);
814 } else {
815 pte = pte_pte_small_new(paddr,
816 1, /* not global */
817 1, /* shared */
818 0, /* APX = 0, privileged full access */
819 0, /* TEX = 0b000, strongly-ordered. */
820 ap,
821 0, 0,
822 nonexecutable);
823 }
824 break;
825 }
826
827 case ARMLargePage: {
828 if (cacheable) {
829 pte = pte_pte_large_new(paddr,
830 nonexecutable,
831 5, /* TEX = 0b101, outer write-back, write-allocate */
832 1, /* not global */
833 SMP_TERNARY(1, 0), /* shareable if SMP enabled, otherwise unshared */
834 0, /* APX = 0, privileged full access */
835 ap,
836 0, 1, /* Inner write-back, write-allocate */
837 1 /* reserved */);
838 } else {
839 pte = pte_pte_large_new(paddr,
840 nonexecutable,
841 0, /* TEX = 0b000, strongly-ordered */
842 1, /* not global */
843 1, /* shared */
844 0, /* APX = 0, privileged full access */
845 ap,
846 0, 0,
847 1 /* reserved */);
848 }
849 break;
850 }
851
852 default:
853 fail("Invalid PTE frame type");
854 }
855
856 #else
857
858 word_t hap;
859
860 hap = HAPFromVMRights(vm_rights);
861
862 switch (page_size) {
863 case ARMSmallPage: {
864 if (cacheable) {
865 pte = pte_pte_small_new(
866 nonexecutable, /* Executable */
867 0, /* Not contiguous */
868 paddr,
869 1, /* AF - Always set */
870 0, /* not shared */
871 hap, /* HAP - access */
872 MEMATTR_CACHEABLE /* Cacheable */);
873 } else {
874 pte = pte_pte_small_new(
875 nonexecutable, /* Executable */
876 0, /* Not contiguous */
877 paddr,
878 1, /* AF - Always set */
879 0, /* not shared */
880 hap, /* HAP - access */
881 MEMATTR_NONCACHEABLE /* Not cacheable */);
882 }
883 break;
884 }
885
886 case ARMLargePage: {
887 if (cacheable) {
888 pte = pte_pte_small_new(
889 nonexecutable, /* Executable */
890 1, /* 16 contiguous */
891 paddr,
892 1, /* AF - Always set */
893 0, /* not shared */
894 hap, /* HAP - access */
895 MEMATTR_CACHEABLE /* Cacheable */);
896 } else {
897 pte = pte_pte_small_new(
898 nonexecutable, /* Executable */
899 1, /* 16 contiguous */
900 paddr,
901 1, /* AF - Always set */
902 0, /* not shared */
903 hap, /* HAP - access */
904 MEMATTR_NONCACHEABLE /* Not cacheable */);
905 }
906 break;
907 }
908 default:
909 fail("Invalid PTE frame type");
910 }
911 #endif /* CONFIG_ARM_HYPERVISOR_SUPPORT */
912
913 return pte;
914 }
915
makeUserPDE(vm_page_size_t page_size,paddr_t paddr,bool_t parity,bool_t cacheable,bool_t nonexecutable,word_t domain,vm_rights_t vm_rights)916 static pde_t CONST makeUserPDE(vm_page_size_t page_size, paddr_t paddr, bool_t parity,
917 bool_t cacheable, bool_t nonexecutable, word_t domain,
918 vm_rights_t vm_rights)
919 {
920 #ifndef CONFIG_ARM_HYPERVISOR_SUPPORT
921 word_t ap, size2;
922
923 ap = APFromVMRights(vm_rights);
924 #else
925 word_t hap, size2;
926
927 (void)domain;
928 hap = HAPFromVMRights(vm_rights);
929 #endif
930
931 switch (page_size) {
932 case ARMSection:
933 size2 = 0;
934 break;
935
936 case ARMSuperSection:
937 size2 = 1;
938 break;
939
940 default:
941 fail("Invalid PDE frame type");
942 }
943
944 #ifndef CONFIG_ARM_HYPERVISOR_SUPPORT
945 if (cacheable) {
946 return pde_pde_section_new(paddr, size2,
947 1, /* not global */
948 SMP_TERNARY(1, 0), /* shareable if SMP enabled, otherwise unshared */
949 0, /* APX = 0, privileged full access */
950 5, /* TEX = 0b101, outer write-back, write-allocate */
951 ap, parity, domain, nonexecutable,
952 0, 1 /* Inner write-back, write-allocate */);
953 } else {
954 return pde_pde_section_new(paddr, size2,
955 1, /* not global */
956 1, /* shared */
957 0, /* APX = 0, privileged full access */
958 0, /* TEX = 0b000, strongly-ordered */
959 ap, parity, domain, nonexecutable,
960 0, 0);
961 }
962 #else
963 if (cacheable) {
964 return pde_pde_section_new(
965 nonexecutable, /* Executable */
966 size2, /* contiguous */
967 paddr,
968 1, /* AF - Always set */
969 0, /* not shared */
970 hap,
971 MEMATTR_CACHEABLE /* Cacheable */);
972 } else {
973 return pde_pde_section_new(
974 nonexecutable, /* Executable */
975 size2, /* contiguous */
976 paddr,
977 1, /* AF - Always set */
978 0, /* not shared */
979 hap,
980 MEMATTR_NONCACHEABLE /* Not cacheable */);
981 }
982 #endif /* CONFIG_ARM_HYPERVISOR_SUPPORT */
983 }
984
isValidVTableRoot(cap_t cap)985 bool_t CONST isValidVTableRoot(cap_t cap)
986 {
987 return cap_get_capType(cap) == cap_page_directory_cap &&
988 cap_page_directory_cap_get_capPDIsMapped(cap);
989 }
990
isIOSpaceFrameCap(cap_t cap)991 bool_t CONST isIOSpaceFrameCap(cap_t cap)
992 {
993 #ifdef CONFIG_TK1_SMMU
994 return cap_get_capType(cap) == cap_small_frame_cap && cap_small_frame_cap_get_capFIsIOSpace(cap);
995 #else
996 return false;
997 #endif
998 }
999
setVMRoot(tcb_t * tcb)1000 void setVMRoot(tcb_t *tcb)
1001 {
1002 cap_t threadRoot;
1003 asid_t asid;
1004 pde_t *pd;
1005 findPDForASID_ret_t find_ret;
1006
1007 threadRoot = TCB_PTR_CTE_PTR(tcb, tcbVTable)->cap;
1008
1009 if (cap_get_capType(threadRoot) != cap_page_directory_cap ||
1010 !cap_page_directory_cap_get_capPDIsMapped(threadRoot)) {
1011 #ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
1012 setCurrentPD(addrFromKPPtr(armUSGlobalPD));
1013 #else
1014 setCurrentPD(addrFromKPPtr(armKSGlobalPD));
1015 #endif
1016 return;
1017 }
1018
1019 pd = PDE_PTR(cap_page_directory_cap_get_capPDBasePtr(threadRoot));
1020 asid = cap_page_directory_cap_get_capPDMappedASID(threadRoot);
1021 find_ret = findPDForASID(asid);
1022 if (unlikely(find_ret.status != EXCEPTION_NONE || find_ret.pd != pd)) {
1023 #ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
1024 setCurrentPD(addrFromKPPtr(armUSGlobalPD));
1025 #else
1026 setCurrentPD(addrFromKPPtr(armKSGlobalPD));
1027 #endif
1028 return;
1029 }
1030
1031 armv_contextSwitch(pd, asid);
1032 }
1033
setVMRootForFlush(pde_t * pd,asid_t asid)1034 static bool_t setVMRootForFlush(pde_t *pd, asid_t asid)
1035 {
1036 cap_t threadRoot;
1037
1038 threadRoot = TCB_PTR_CTE_PTR(NODE_STATE(ksCurThread), tcbVTable)->cap;
1039
1040 if (cap_get_capType(threadRoot) == cap_page_directory_cap &&
1041 cap_page_directory_cap_get_capPDIsMapped(threadRoot) &&
1042 PDE_PTR(cap_page_directory_cap_get_capPDBasePtr(threadRoot)) == pd) {
1043 return false;
1044 }
1045
1046 armv_contextSwitch(pd, asid);
1047
1048 return true;
1049 }
1050
pageTableMapped(asid_t asid,vptr_t vaddr,pte_t * pt)1051 pde_t *pageTableMapped(asid_t asid, vptr_t vaddr, pte_t *pt)
1052 {
1053 findPDForASID_ret_t find_ret;
1054 pde_t pde;
1055 unsigned int pdIndex;
1056
1057 find_ret = findPDForASID(asid);
1058 if (unlikely(find_ret.status != EXCEPTION_NONE)) {
1059 return NULL;
1060 }
1061
1062 pdIndex = vaddr >> (PAGE_BITS + PT_INDEX_BITS);
1063 pde = find_ret.pd[pdIndex];
1064
1065 if (likely(pde_get_pdeType(pde) == pde_pde_coarse
1066 && ptrFromPAddr(pde_pde_coarse_get_address(pde)) == pt)) {
1067 return find_ret.pd;
1068 } else {
1069 return NULL;
1070 }
1071 }
1072
invalidateASID(asid_t asid)1073 static void invalidateASID(asid_t asid)
1074 {
1075 asid_pool_t *asidPool;
1076 pde_t *pd;
1077
1078 asidPool = armKSASIDTable[asid >> asidLowBits];
1079 assert(asidPool);
1080
1081 pd = asidPool->array[asid & MASK(asidLowBits)];
1082 assert(pd);
1083
1084 pd[PD_ASID_SLOT] = pde_pde_invalid_new(0, false);
1085 }
1086
loadHWASID(asid_t asid)1087 static pde_t PURE loadHWASID(asid_t asid)
1088 {
1089 asid_pool_t *asidPool;
1090 pde_t *pd;
1091
1092 asidPool = armKSASIDTable[asid >> asidLowBits];
1093 assert(asidPool);
1094
1095 pd = asidPool->array[asid & MASK(asidLowBits)];
1096 assert(pd);
1097
1098 return pd[PD_ASID_SLOT];
1099 }
1100
storeHWASID(asid_t asid,hw_asid_t hw_asid)1101 static void storeHWASID(asid_t asid, hw_asid_t hw_asid)
1102 {
1103 asid_pool_t *asidPool;
1104 pde_t *pd;
1105
1106 asidPool = armKSASIDTable[asid >> asidLowBits];
1107 assert(asidPool);
1108
1109 pd = asidPool->array[asid & MASK(asidLowBits)];
1110 assert(pd);
1111
1112 /* Store HW ASID in the last entry
1113 Masquerade as an invalid PDE */
1114 pd[PD_ASID_SLOT] = pde_pde_invalid_new(hw_asid, true);
1115
1116 armKSHWASIDTable[hw_asid] = asid;
1117 }
1118
findFreeHWASID(void)1119 hw_asid_t findFreeHWASID(void)
1120 {
1121 word_t hw_asid_offset;
1122 hw_asid_t hw_asid;
1123
1124 /* Find a free hardware ASID */
1125 for (hw_asid_offset = 0;
1126 hw_asid_offset <= (word_t)((hw_asid_t) - 1);
1127 hw_asid_offset ++) {
1128 hw_asid = armKSNextASID + ((hw_asid_t)hw_asid_offset);
1129 if (armKSHWASIDTable[hw_asid] == asidInvalid) {
1130 return hw_asid;
1131 }
1132 }
1133
1134 hw_asid = armKSNextASID;
1135
1136 /* If we've scanned the table without finding a free ASID */
1137 invalidateASID(armKSHWASIDTable[hw_asid]);
1138
1139 /* Flush TLB */
1140 invalidateTranslationASID(hw_asid);
1141 armKSHWASIDTable[hw_asid] = asidInvalid;
1142
1143 /* Increment the NextASID index */
1144 armKSNextASID++;
1145
1146 return hw_asid;
1147 }
1148
getHWASID(asid_t asid)1149 hw_asid_t getHWASID(asid_t asid)
1150 {
1151 pde_t stored_hw_asid;
1152
1153 stored_hw_asid = loadHWASID(asid);
1154 if (pde_pde_invalid_get_stored_asid_valid(stored_hw_asid)) {
1155 return pde_pde_invalid_get_stored_hw_asid(stored_hw_asid);
1156 } else {
1157 hw_asid_t new_hw_asid;
1158
1159 new_hw_asid = findFreeHWASID();
1160 storeHWASID(asid, new_hw_asid);
1161 return new_hw_asid;
1162 }
1163 }
1164
invalidateASIDEntry(asid_t asid)1165 static void invalidateASIDEntry(asid_t asid)
1166 {
1167 pde_t stored_hw_asid;
1168
1169 stored_hw_asid = loadHWASID(asid);
1170 if (pde_pde_invalid_get_stored_asid_valid(stored_hw_asid)) {
1171 armKSHWASIDTable[pde_pde_invalid_get_stored_hw_asid(stored_hw_asid)] =
1172 asidInvalid;
1173 }
1174 invalidateASID(asid);
1175 }
1176
unmapPageTable(asid_t asid,vptr_t vaddr,pte_t * pt)1177 void unmapPageTable(asid_t asid, vptr_t vaddr, pte_t *pt)
1178 {
1179 pde_t *pd, *pdSlot;
1180 unsigned int pdIndex;
1181
1182 pd = pageTableMapped(asid, vaddr, pt);
1183
1184 if (likely(pd != NULL)) {
1185 pdIndex = vaddr >> (PT_INDEX_BITS + PAGE_BITS);
1186 pdSlot = pd + pdIndex;
1187
1188 *pdSlot = pde_pde_invalid_new(0, 0);
1189 cleanByVA_PoU((word_t)pdSlot, addrFromPPtr(pdSlot));
1190 flushTable(pd, asid, vaddr, pt);
1191 }
1192 }
1193
copyGlobalMappings(pde_t * newPD)1194 void copyGlobalMappings(pde_t *newPD)
1195 {
1196 #ifndef CONFIG_ARM_HYPERVISOR_SUPPORT
1197 word_t i;
1198 pde_t *global_pd = armKSGlobalPD;
1199
1200 for (i = PPTR_BASE >> ARMSectionBits; i < BIT(PD_INDEX_BITS); i++) {
1201 if (i != PD_ASID_SLOT) {
1202 newPD[i] = global_pd[i];
1203 }
1204 }
1205 #endif
1206 }
1207
handleVMFault(tcb_t * thread,vm_fault_type_t vm_faultType)1208 exception_t handleVMFault(tcb_t *thread, vm_fault_type_t vm_faultType)
1209 {
1210 switch (vm_faultType) {
1211 case ARMDataAbort: {
1212 word_t addr, fault;
1213
1214 #ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
1215 addr = getHDFAR();
1216 addr = (addressTranslateS1CPR(addr) & ~MASK(PAGE_BITS)) | (addr & MASK(PAGE_BITS));
1217 /* MSBs tell us that this was a DataAbort */
1218 fault = getHSR() & 0x3ffffff;
1219 #else
1220 addr = getFAR();
1221 fault = getDFSR();
1222 #endif
1223
1224 #ifdef CONFIG_HARDWARE_DEBUG_API
1225 /* Debug exceptions come in on the Prefetch and Data abort vectors.
1226 * We have to test the fault-status bits in the IFSR/DFSR to determine
1227 * if it's a debug exception when one occurs.
1228 *
1229 * If it is a debug exception, return early and don't fallthrough to the
1230 * normal VM Fault handling path.
1231 */
1232 if (isDebugFault(fault)) {
1233 current_fault = handleUserLevelDebugException(0);
1234 return EXCEPTION_FAULT;
1235 }
1236 #endif
1237 current_fault = seL4_Fault_VMFault_new(addr, fault, false);
1238 return EXCEPTION_FAULT;
1239 }
1240
1241 case ARMPrefetchAbort: {
1242 word_t pc, fault;
1243
1244 pc = getRestartPC(thread);
1245
1246 #ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
1247 pc = (addressTranslateS1CPR(pc) & ~MASK(PAGE_BITS)) | (pc & MASK(PAGE_BITS));
1248 /* MSBs tell us that this was a PrefetchAbort */
1249 fault = getHSR() & 0x3ffffff;
1250 #else
1251 fault = getIFSR();
1252 #endif
1253
1254 #ifdef CONFIG_HARDWARE_DEBUG_API
1255 if (isDebugFault(fault)) {
1256 current_fault = handleUserLevelDebugException(pc);
1257
1258 if (seL4_Fault_DebugException_get_exceptionReason(current_fault) == seL4_SingleStep
1259 && !singleStepFaultCounterReady(thread)) {
1260 /* Don't send a fault message to the thread yet if we were asked
1261 * to step through N instructions and the counter isn't depleted
1262 * yet.
1263 */
1264 return EXCEPTION_NONE;
1265 }
1266 return EXCEPTION_FAULT;
1267 }
1268 #endif
1269 current_fault = seL4_Fault_VMFault_new(pc, fault, true);
1270 return EXCEPTION_FAULT;
1271 }
1272
1273 default:
1274 fail("Invalid VM fault type");
1275 }
1276 }
1277
deleteASIDPool(asid_t asid_base,asid_pool_t * pool)1278 void deleteASIDPool(asid_t asid_base, asid_pool_t *pool)
1279 {
1280 unsigned int offset;
1281
1282 /* Haskell error: "ASID pool's base must be aligned" */
1283 assert((asid_base & MASK(asidLowBits)) == 0);
1284
1285 if (armKSASIDTable[asid_base >> asidLowBits] == pool) {
1286 for (offset = 0; offset < BIT(asidLowBits); offset++) {
1287 if (pool->array[offset]) {
1288 flushSpace(asid_base + offset);
1289 invalidateASIDEntry(asid_base + offset);
1290 }
1291 }
1292 armKSASIDTable[asid_base >> asidLowBits] = NULL;
1293 setVMRoot(NODE_STATE(ksCurThread));
1294 }
1295 }
1296
deleteASID(asid_t asid,pde_t * pd)1297 void deleteASID(asid_t asid, pde_t *pd)
1298 {
1299 asid_pool_t *poolPtr;
1300
1301 poolPtr = armKSASIDTable[asid >> asidLowBits];
1302
1303 if (poolPtr != NULL && poolPtr->array[asid & MASK(asidLowBits)] == pd) {
1304 flushSpace(asid);
1305 invalidateASIDEntry(asid);
1306 poolPtr->array[asid & MASK(asidLowBits)] = NULL;
1307 setVMRoot(NODE_STATE(ksCurThread));
1308 }
1309 }
1310
1311 #ifndef CONFIG_ARM_HYPERVISOR_SUPPORT
pte_pte_invalid_new(void)1312 static pte_t pte_pte_invalid_new(void)
1313 {
1314 /* Invalid as every PTE must have bit 0 set (large PTE) or bit 1 set (small
1315 * PTE). 0 == 'translation fault' in ARM parlance.
1316 */
1317 return (pte_t) {
1318 {
1319 0
1320 }
1321 };
1322 }
1323 #endif
1324
unmapPage(vm_page_size_t page_size,asid_t asid,vptr_t vptr,void * pptr)1325 void unmapPage(vm_page_size_t page_size, asid_t asid, vptr_t vptr, void *pptr)
1326 {
1327 findPDForASID_ret_t find_ret;
1328 paddr_t addr = addrFromPPtr(pptr);
1329
1330 find_ret = findPDForASID(asid);
1331 if (unlikely(find_ret.status != EXCEPTION_NONE)) {
1332 return;
1333 }
1334
1335 switch (page_size) {
1336 case ARMSmallPage: {
1337 lookupPTSlot_ret_t lu_ret;
1338
1339 lu_ret = lookupPTSlot(find_ret.pd, vptr);
1340 if (unlikely(lu_ret.status != EXCEPTION_NONE)) {
1341 return;
1342 }
1343
1344 if (unlikely(pte_ptr_get_pteType(lu_ret.ptSlot) != pte_pte_small)) {
1345 return;
1346 }
1347 #ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
1348 if (unlikely(pte_pte_small_ptr_get_contiguous_hint(lu_ret.ptSlot) != 0)) {
1349 return;
1350 }
1351 #endif
1352 if (unlikely(pte_pte_small_ptr_get_address(lu_ret.ptSlot) != addr)) {
1353 return;
1354 }
1355
1356 *(lu_ret.ptSlot) = pte_pte_invalid_new();
1357 cleanByVA_PoU((word_t)lu_ret.ptSlot, addrFromPPtr(lu_ret.ptSlot));
1358
1359 break;
1360 }
1361
1362 case ARMLargePage: {
1363 lookupPTSlot_ret_t lu_ret;
1364 word_t i;
1365
1366 lu_ret = lookupPTSlot(find_ret.pd, vptr);
1367 if (unlikely(lu_ret.status != EXCEPTION_NONE)) {
1368 return;
1369 }
1370 #ifndef CONFIG_ARM_HYPERVISOR_SUPPORT
1371 if (unlikely(pte_ptr_get_pteType(lu_ret.ptSlot) != pte_pte_large)) {
1372 return;
1373 }
1374 if (unlikely(pte_pte_large_ptr_get_address(lu_ret.ptSlot) != addr)) {
1375 return;
1376 }
1377 #else
1378 if (unlikely(pte_ptr_get_pteType(lu_ret.ptSlot) != pte_pte_small)) {
1379 return;
1380 }
1381 if (unlikely(pte_pte_small_ptr_get_contiguous_hint(lu_ret.ptSlot) != 1)) {
1382 return;
1383 }
1384 if (unlikely(pte_pte_small_ptr_get_address(lu_ret.ptSlot) != addr)) {
1385 return;
1386 }
1387 #endif
1388
1389 for (i = 0; i < PAGES_PER_LARGE_PAGE; i++) {
1390 lu_ret.ptSlot[i] = pte_pte_invalid_new();
1391 }
1392 cleanCacheRange_PoU((word_t)&lu_ret.ptSlot[0],
1393 LAST_BYTE_PTE(lu_ret.ptSlot, PAGES_PER_LARGE_PAGE),
1394 addrFromPPtr(&lu_ret.ptSlot[0]));
1395
1396 break;
1397 }
1398
1399 case ARMSection: {
1400 pde_t *pd;
1401
1402 pd = lookupPDSlot(find_ret.pd, vptr);
1403
1404 if (unlikely(pde_ptr_get_pdeType(pd) != pde_pde_section)) {
1405 return;
1406 }
1407 #ifndef CONFIG_ARM_HYPERVISOR_SUPPORT
1408 if (unlikely(pde_pde_section_ptr_get_size(pd) != 0)) {
1409 #else
1410 if (unlikely(pde_pde_section_ptr_get_contiguous_hint(pd) != 0)) {
1411 #endif
1412 return;
1413 }
1414 if (unlikely(pde_pde_section_ptr_get_address(pd) != addr)) {
1415 return;
1416 }
1417
1418 *pd = pde_pde_invalid_new(0, 0);
1419 cleanByVA_PoU((word_t)pd, addrFromPPtr(pd));
1420
1421 break;
1422 }
1423
1424 case ARMSuperSection: {
1425 pde_t *pd;
1426 word_t i;
1427
1428 pd = lookupPDSlot(find_ret.pd, vptr);
1429
1430 if (unlikely(pde_ptr_get_pdeType(pd) != pde_pde_section)) {
1431 return;
1432 }
1433 #ifndef CONFIG_ARM_HYPERVISOR_SUPPORT
1434 if (unlikely(pde_pde_section_ptr_get_size(pd) != 1)) {
1435 #else
1436 if (unlikely(pde_pde_section_ptr_get_contiguous_hint(pd) != 1)) {
1437 #endif
1438 return;
1439 }
1440 if (unlikely(pde_pde_section_ptr_get_address(pd) != addr)) {
1441 return;
1442 }
1443
1444 for (i = 0; i < SECTIONS_PER_SUPER_SECTION; i++) {
1445 pd[i] = pde_pde_invalid_new(0, 0);
1446 }
1447 cleanCacheRange_PoU((word_t)&pd[0], LAST_BYTE_PDE(pd, SECTIONS_PER_SUPER_SECTION),
1448 addrFromPPtr(&pd[0]));
1449
1450 break;
1451 }
1452
1453 default:
1454 fail("Invalid ARM page type");
1455 break;
1456 }
1457
1458 /* Flush the page now that the mapping has been updated */
1459 flushPage(page_size, find_ret.pd, asid, vptr);
1460 }
1461
1462 void flushPage(vm_page_size_t page_size, pde_t *pd, asid_t asid, word_t vptr)
1463 {
1464 pde_t stored_hw_asid;
1465 word_t base_addr;
1466 bool_t root_switched;
1467
1468 assert((vptr & MASK(pageBitsForSize(page_size))) == 0);
1469
1470 /* Switch to the address space to allow a cache clean by VA */
1471 root_switched = setVMRootForFlush(pd, asid);
1472 stored_hw_asid = loadHWASID(asid);
1473
1474 if (pde_pde_invalid_get_stored_asid_valid(stored_hw_asid)) {
1475 base_addr = vptr & ~MASK(12);
1476
1477 /* Do the TLB flush */
1478 invalidateTranslationSingle(base_addr | pde_pde_invalid_get_stored_hw_asid(stored_hw_asid));
1479
1480 if (root_switched) {
1481 setVMRoot(NODE_STATE(ksCurThread));
1482 }
1483 }
1484 }
1485
1486 void flushTable(pde_t *pd, asid_t asid, word_t vptr, pte_t *pt)
1487 {
1488 pde_t stored_hw_asid;
1489 bool_t root_switched;
1490
1491 assert((vptr & MASK(PT_INDEX_BITS + ARMSmallPageBits)) == 0);
1492
1493 /* Switch to the address space to allow a cache clean by VA */
1494 root_switched = setVMRootForFlush(pd, asid);
1495 stored_hw_asid = loadHWASID(asid);
1496
1497 if (pde_pde_invalid_get_stored_asid_valid(stored_hw_asid)) {
1498 invalidateTranslationASID(pde_pde_invalid_get_stored_hw_asid(stored_hw_asid));
1499 if (root_switched) {
1500 setVMRoot(NODE_STATE(ksCurThread));
1501 }
1502 }
1503 }
1504
1505 void flushSpace(asid_t asid)
1506 {
1507 pde_t stored_hw_asid;
1508
1509 stored_hw_asid = loadHWASID(asid);
1510
1511 /* Clean the entire data cache, to guarantee that any VAs mapped
1512 * in the deleted space are clean (because we can't clean by VA after
1513 * deleting the space) */
1514 cleanCaches_PoU();
1515
1516 /* If the given ASID doesn't have a hardware ASID
1517 * assigned, then it can't have any mappings in the TLB */
1518 if (!pde_pde_invalid_get_stored_asid_valid(stored_hw_asid)) {
1519 return;
1520 }
1521
1522 /* Do the TLB flush */
1523 invalidateTranslationASID(pde_pde_invalid_get_stored_hw_asid(stored_hw_asid));
1524 }
1525
1526 void invalidateTLBByASID(asid_t asid)
1527 {
1528 pde_t stored_hw_asid;
1529
1530 stored_hw_asid = loadHWASID(asid);
1531
1532 /* If the given ASID doesn't have a hardware ASID
1533 * assigned, then it can't have any mappings in the TLB */
1534 if (!pde_pde_invalid_get_stored_asid_valid(stored_hw_asid)) {
1535 return;
1536 }
1537
1538 /* Do the TLB flush */
1539 invalidateTranslationASID(pde_pde_invalid_get_stored_hw_asid(stored_hw_asid));
1540 }
1541
1542 static inline bool_t CONST checkVPAlignment(vm_page_size_t sz, word_t w)
1543 {
1544 return (w & MASK(pageBitsForSize(sz))) == 0;
1545 }
1546
1547 struct create_mappings_pte_return {
1548 exception_t status;
1549 pte_t pte;
1550 pte_range_t pte_entries;
1551 };
1552 typedef struct create_mappings_pte_return create_mappings_pte_return_t;
1553
1554 struct create_mappings_pde_return {
1555 exception_t status;
1556 pde_t pde;
1557 pde_range_t pde_entries;
1558 };
1559 typedef struct create_mappings_pde_return create_mappings_pde_return_t;
1560
1561 static create_mappings_pte_return_t
1562 createSafeMappingEntries_PTE
1563 (paddr_t base, word_t vaddr, vm_page_size_t frameSize,
1564 vm_rights_t vmRights, vm_attributes_t attr, pde_t *pd)
1565 {
1566
1567 create_mappings_pte_return_t ret;
1568 lookupPTSlot_ret_t lu_ret;
1569 word_t i;
1570
1571 switch (frameSize) {
1572
1573 case ARMSmallPage:
1574
1575 ret.pte_entries.base = NULL; /* to avoid uninitialised warning */
1576 ret.pte_entries.length = 1;
1577
1578 ret.pte = makeUserPTE(ARMSmallPage, base,
1579 vm_attributes_get_armPageCacheable(attr),
1580 vm_attributes_get_armExecuteNever(attr),
1581 vmRights);
1582
1583 lu_ret = lookupPTSlot(pd, vaddr);
1584 if (unlikely(lu_ret.status != EXCEPTION_NONE)) {
1585 current_syscall_error.type =
1586 seL4_FailedLookup;
1587 current_syscall_error.failedLookupWasSource =
1588 false;
1589 ret.status = EXCEPTION_SYSCALL_ERROR;
1590 /* current_lookup_fault will have been set by
1591 * lookupPTSlot */
1592 return ret;
1593 }
1594
1595 ret.pte_entries.base = lu_ret.ptSlot;
1596 #ifndef CONFIG_ARM_HYPERVISOR_SUPPORT
1597 if (unlikely(pte_ptr_get_pteType(ret.pte_entries.base) ==
1598 pte_pte_large)) {
1599 #else
1600 if (unlikely(pte_ptr_get_pteType(ret.pte_entries.base) == pte_pte_small
1601 && pte_pte_small_ptr_get_contiguous_hint(ret.pte_entries.base))) {
1602 #endif
1603 current_syscall_error.type =
1604 seL4_DeleteFirst;
1605
1606 ret.status = EXCEPTION_SYSCALL_ERROR;
1607 return ret;
1608 }
1609
1610 ret.status = EXCEPTION_NONE;
1611 return ret;
1612
1613 case ARMLargePage:
1614
1615 ret.pte_entries.base = NULL; /* to avoid uninitialised warning */
1616 ret.pte_entries.length = PAGES_PER_LARGE_PAGE;
1617
1618 ret.pte = makeUserPTE(ARMLargePage, base,
1619 vm_attributes_get_armPageCacheable(attr),
1620 vm_attributes_get_armExecuteNever(attr),
1621 vmRights);
1622
1623 lu_ret = lookupPTSlot(pd, vaddr);
1624 if (unlikely(lu_ret.status != EXCEPTION_NONE)) {
1625 current_syscall_error.type =
1626 seL4_FailedLookup;
1627 current_syscall_error.failedLookupWasSource =
1628 false;
1629 ret.status = EXCEPTION_SYSCALL_ERROR;
1630 /* current_lookup_fault will have been set by
1631 * lookupPTSlot */
1632 return ret;
1633 }
1634
1635 ret.pte_entries.base = lu_ret.ptSlot;
1636
1637 for (i = 0; i < PAGES_PER_LARGE_PAGE; i++) {
1638 if (unlikely(pte_get_pteType(ret.pte_entries.base[i]) ==
1639 pte_pte_small)
1640 #ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
1641 && !pte_pte_small_get_contiguous_hint(ret.pte_entries.base[i])
1642 #endif
1643 ) {
1644 current_syscall_error.type =
1645 seL4_DeleteFirst;
1646
1647 ret.status = EXCEPTION_SYSCALL_ERROR;
1648 return ret;
1649 }
1650 }
1651
1652 ret.status = EXCEPTION_NONE;
1653 return ret;
1654
1655 default:
1656 fail("Invalid or unexpected ARM page type.");
1657
1658 }
1659 }
1660
1661 static create_mappings_pde_return_t
1662 createSafeMappingEntries_PDE
1663 (paddr_t base, word_t vaddr, vm_page_size_t frameSize,
1664 vm_rights_t vmRights, vm_attributes_t attr, pde_t *pd)
1665 {
1666
1667 create_mappings_pde_return_t ret;
1668 pde_tag_t currentPDEType;
1669 word_t i;
1670
1671 switch (frameSize) {
1672
1673 /* PDE mappings */
1674 case ARMSection:
1675 ret.pde_entries.base = lookupPDSlot(pd, vaddr);
1676 ret.pde_entries.length = 1;
1677
1678 ret.pde = makeUserPDE(ARMSection, base,
1679 vm_attributes_get_armParityEnabled(attr),
1680 vm_attributes_get_armPageCacheable(attr),
1681 vm_attributes_get_armExecuteNever(attr),
1682 0,
1683 vmRights);
1684
1685 currentPDEType =
1686 pde_ptr_get_pdeType(ret.pde_entries.base);
1687 if (unlikely(currentPDEType != pde_pde_invalid &&
1688 (currentPDEType != pde_pde_section ||
1689 #ifndef CONFIG_ARM_HYPERVISOR_SUPPORT
1690 pde_pde_section_ptr_get_size(ret.pde_entries.base) != 0))) {
1691 #else
1692 pde_pde_section_ptr_get_contiguous_hint(ret.pde_entries.base) != 0))) {
1693 #endif
1694 current_syscall_error.type =
1695 seL4_DeleteFirst;
1696 ret.status = EXCEPTION_SYSCALL_ERROR;
1697
1698 return ret;
1699 }
1700
1701 ret.status = EXCEPTION_NONE;
1702 return ret;
1703
1704 case ARMSuperSection:
1705 ret.pde_entries.base = lookupPDSlot(pd, vaddr);
1706 ret.pde_entries.length = SECTIONS_PER_SUPER_SECTION;
1707
1708 ret.pde = makeUserPDE(ARMSuperSection, base,
1709 vm_attributes_get_armParityEnabled(attr),
1710 vm_attributes_get_armPageCacheable(attr),
1711 vm_attributes_get_armExecuteNever(attr),
1712 0,
1713 vmRights);
1714
1715 for (i = 0; i < SECTIONS_PER_SUPER_SECTION; i++) {
1716 currentPDEType =
1717 pde_get_pdeType(ret.pde_entries.base[i]);
1718 if (unlikely(currentPDEType != pde_pde_invalid &&
1719 (currentPDEType != pde_pde_section ||
1720 #ifndef CONFIG_ARM_HYPERVISOR_SUPPORT
1721 pde_pde_section_get_size(ret.pde_entries.base[i]) != 1))) {
1722 #else
1723 pde_pde_section_get_contiguous_hint(ret.pde_entries.base[i]) != 1))) {
1724 #endif
1725 current_syscall_error.type =
1726 seL4_DeleteFirst;
1727 ret.status = EXCEPTION_SYSCALL_ERROR;
1728
1729 return ret;
1730 }
1731 }
1732
1733 ret.status = EXCEPTION_NONE;
1734 return ret;
1735
1736 default:
1737 fail("Invalid or unexpected ARM page type.");
1738
1739 }
1740 }
1741
1742 static inline vptr_t pageBase(vptr_t vaddr, vm_page_size_t size)
1743 {
1744 return vaddr & ~MASK(pageBitsForSize(size));
1745 }
1746
1747 static bool_t PURE pteCheckIfMapped(pte_t *pte)
1748 {
1749 return pte_ptr_get_pteType(pte) != pte_pte_invalid;
1750 }
1751
1752 static bool_t PURE pdeCheckIfMapped(pde_t *pde)
1753 {
1754 return pde_ptr_get_pdeType(pde) != pde_pde_invalid;
1755 }
1756
1757 static void doFlush(int invLabel, vptr_t start, vptr_t end, paddr_t pstart)
1758 {
1759 /** GHOSTUPD: "((gs_get_assn cap_get_capSizeBits_'proc \<acute>ghost'state = 0
1760 \<or> \<acute>end - \<acute>start <= gs_get_assn cap_get_capSizeBits_'proc \<acute>ghost'state)
1761 \<and> \<acute>start <= \<acute>end, id)" */
1762 if (config_set(CONFIG_ARM_HYPERVISOR_SUPPORT)) {
1763 /* The hypervisor does not share an AS with userspace so we must flush
1764 * by kernel MVA instead. ARMv7 caches are PIPT so it makes no difference */
1765 end = (vptr_t)paddr_to_pptr(pstart) + (end - start);
1766 start = (vptr_t)paddr_to_pptr(pstart);
1767 }
1768 switch (invLabel) {
1769 case ARMPDClean_Data:
1770 case ARMPageClean_Data:
1771 cleanCacheRange_RAM(start, end, pstart);
1772 break;
1773 case ARMPDInvalidate_Data:
1774 case ARMPageInvalidate_Data:
1775 invalidateCacheRange_RAM(start, end, pstart);
1776 break;
1777 case ARMPDCleanInvalidate_Data:
1778 case ARMPageCleanInvalidate_Data:
1779 cleanInvalidateCacheRange_RAM(start, end, pstart);
1780 break;
1781 case ARMPDUnify_Instruction:
1782 case ARMPageUnify_Instruction:
1783 /* First clean data lines to point of unification
1784 (L2 cache)... */
1785 cleanCacheRange_PoU(start, end, pstart);
1786 /* Ensure it's been written. */
1787 dsb();
1788 /* ...then invalidate the corresponding instruction lines
1789 to point of unification... */
1790 invalidateCacheRange_I(start, end, pstart);
1791 /* ...then invalidate branch predictors. */
1792 branchFlushRange(start, end, pstart);
1793 /* Ensure new instructions come from fresh cache lines. */
1794 isb();
1795 break;
1796 default:
1797 fail("Invalid operation, shouldn't get here.\n");
1798 }
1799 }
1800
1801 /* ================= INVOCATION HANDLING STARTS HERE ================== */
1802
1803 static exception_t performPDFlush(int invLabel, pde_t *pd, asid_t asid, vptr_t start,
1804 vptr_t end, paddr_t pstart)
1805 {
1806 bool_t root_switched;
1807
1808 /* Flush if given a non zero range */
1809 if (start < end) {
1810 root_switched = setVMRootForFlush(pd, asid);
1811
1812 doFlush(invLabel, start, end, pstart);
1813
1814 if (root_switched) {
1815 setVMRoot(NODE_STATE(ksCurThread));
1816 }
1817 }
1818
1819 return EXCEPTION_NONE;
1820 }
1821
1822 static exception_t performPageTableInvocationMap(cap_t cap, cte_t *ctSlot,
1823 pde_t pde, pde_t *pdSlot)
1824 {
1825 ctSlot->cap = cap;
1826 *pdSlot = pde;
1827 cleanByVA_PoU((word_t)pdSlot, addrFromPPtr(pdSlot));
1828
1829 return EXCEPTION_NONE;
1830 }
1831
1832 static exception_t performPageTableInvocationUnmap(cap_t cap, cte_t *ctSlot)
1833 {
1834 if (cap_page_table_cap_get_capPTIsMapped(cap)) {
1835 pte_t *pt = PTE_PTR(cap_page_table_cap_get_capPTBasePtr(cap));
1836 unmapPageTable(
1837 cap_page_table_cap_get_capPTMappedASID(cap),
1838 cap_page_table_cap_get_capPTMappedAddress(cap),
1839 pt);
1840 clearMemory_PT((void *)pt, cap_get_capSizeBits(cap));
1841 }
1842 cap_page_table_cap_ptr_set_capPTIsMapped(&(ctSlot->cap), 0);
1843
1844 return EXCEPTION_NONE;
1845 }
1846
1847 static exception_t performPageInvocationMapPTE(asid_t asid, cap_t cap, cte_t *ctSlot, pte_t pte,
1848 pte_range_t pte_entries)
1849 {
1850 word_t i, j UNUSED;
1851 bool_t tlbflush_required;
1852
1853 ctSlot->cap = cap;
1854
1855 /* we only need to check the first entries because of how createSafeMappingEntries
1856 * works to preserve the consistency of tables */
1857 tlbflush_required = pteCheckIfMapped(pte_entries.base);
1858
1859 j = pte_entries.length;
1860 /** GHOSTUPD: "(\<acute>j <= 16, id)" */
1861
1862 #ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
1863 word_t base_address = pte_pte_small_get_address(pte);
1864 #endif
1865 for (i = 0; i < pte_entries.length; i++) {
1866 #ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
1867 pte = pte_pte_small_set_address(pte, base_address + i * BIT(pageBitsForSize(ARMSmallPage)));
1868 #endif
1869 pte_entries.base[i] = pte;
1870 }
1871 cleanCacheRange_PoU((word_t)pte_entries.base,
1872 LAST_BYTE_PTE(pte_entries.base, pte_entries.length),
1873 addrFromPPtr(pte_entries.base));
1874 if (unlikely(tlbflush_required)) {
1875 invalidateTLBByASID(asid);
1876 }
1877
1878 return EXCEPTION_NONE;
1879 }
1880
1881 static exception_t performPageInvocationMapPDE(asid_t asid, cap_t cap, cte_t *ctSlot, pde_t pde,
1882 pde_range_t pde_entries)
1883 {
1884 word_t i, j UNUSED;
1885 bool_t tlbflush_required;
1886
1887 ctSlot->cap = cap;
1888
1889 /* we only need to check the first entries because of how createSafeMappingEntries
1890 * works to preserve the consistency of tables */
1891 tlbflush_required = pdeCheckIfMapped(pde_entries.base);
1892
1893 j = pde_entries.length;
1894 /** GHOSTUPD: "(\<acute>j <= 16, id)" */
1895
1896 #ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
1897 word_t base_address = pde_pde_section_get_address(pde);
1898 #endif
1899 for (i = 0; i < pde_entries.length; i++) {
1900 #ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
1901 pde = pde_pde_section_set_address(pde, base_address + i * BIT(pageBitsForSize(ARMSection)));
1902 #endif
1903 pde_entries.base[i] = pde;
1904 }
1905 cleanCacheRange_PoU((word_t)pde_entries.base,
1906 LAST_BYTE_PDE(pde_entries.base, pde_entries.length),
1907 addrFromPPtr(pde_entries.base));
1908 if (unlikely(tlbflush_required)) {
1909 invalidateTLBByASID(asid);
1910 }
1911
1912 return EXCEPTION_NONE;
1913 }
1914
1915 static exception_t performPageInvocationUnmap(cap_t cap, cte_t *ctSlot)
1916 {
1917 if (generic_frame_cap_get_capFIsMapped(cap)) {
1918 unmapPage(generic_frame_cap_get_capFSize(cap),
1919 generic_frame_cap_get_capFMappedASID(cap),
1920 generic_frame_cap_get_capFMappedAddress(cap),
1921 (void *)generic_frame_cap_get_capFBasePtr(cap));
1922 }
1923
1924 generic_frame_cap_ptr_set_capFMappedAddress(&ctSlot->cap, asidInvalid, 0);
1925
1926 return EXCEPTION_NONE;
1927 }
1928
1929 static exception_t performPageFlush(int invLabel, pde_t *pd, asid_t asid, vptr_t start,
1930 vptr_t end, paddr_t pstart)
1931 {
1932 bool_t root_switched;
1933
1934 /* now we can flush. But only if we were given a non zero range */
1935 if (start < end) {
1936 root_switched = setVMRootForFlush(pd, asid);
1937
1938 doFlush(invLabel, start, end, pstart);
1939
1940 if (root_switched) {
1941 setVMRoot(NODE_STATE(ksCurThread));
1942 }
1943 }
1944
1945 return EXCEPTION_NONE;
1946 }
1947
1948 static exception_t performPageGetAddress(void *vbase_ptr)
1949 {
1950 paddr_t capFBasePtr;
1951
1952 /* Get the physical address of this frame. */
1953 capFBasePtr = addrFromPPtr(vbase_ptr);
1954
1955 /* return it in the first message register */
1956 setRegister(NODE_STATE(ksCurThread), msgRegisters[0], capFBasePtr);
1957 setRegister(NODE_STATE(ksCurThread), msgInfoRegister,
1958 wordFromMessageInfo(seL4_MessageInfo_new(0, 0, 0, 1)));
1959
1960 return EXCEPTION_NONE;
1961 }
1962
1963 static exception_t performASIDPoolInvocation(asid_t asid, asid_pool_t *poolPtr,
1964 cte_t *pdCapSlot)
1965 {
1966 cap_page_directory_cap_ptr_set_capPDMappedASID(&pdCapSlot->cap, asid);
1967 cap_page_directory_cap_ptr_set_capPDIsMapped(&pdCapSlot->cap, 1);
1968 poolPtr->array[asid & MASK(asidLowBits)] =
1969 PDE_PTR(cap_page_directory_cap_get_capPDBasePtr(pdCapSlot->cap));
1970
1971 return EXCEPTION_NONE;
1972 }
1973
1974 static exception_t performASIDControlInvocation(void *frame, cte_t *slot,
1975 cte_t *parent, asid_t asid_base)
1976 {
1977
1978 /** AUXUPD: "(True, typ_region_bytes (ptr_val \<acute>frame) 12)" */
1979 /** GHOSTUPD: "(True, gs_clear_region (ptr_val \<acute>frame) 12)" */
1980 cap_untyped_cap_ptr_set_capFreeIndex(&(parent->cap),
1981 MAX_FREE_INDEX(cap_untyped_cap_get_capBlockSize(parent->cap)));
1982
1983 memzero(frame, 1 << ARMSmallPageBits);
1984 /** AUXUPD: "(True, ptr_retyps 1 (Ptr (ptr_val \<acute>frame) :: asid_pool_C ptr))" */
1985
1986 cteInsert(cap_asid_pool_cap_new(asid_base, WORD_REF(frame)),
1987 parent, slot);;
1988 /* Haskell error: "ASID pool's base must be aligned" */
1989 assert((asid_base & MASK(asidLowBits)) == 0);
1990 armKSASIDTable[asid_base >> asidLowBits] = (asid_pool_t *)frame;
1991
1992 return EXCEPTION_NONE;
1993 }
1994
1995 static exception_t decodeARMPageDirectoryInvocation(word_t invLabel, word_t length,
1996 cptr_t cptr, cte_t *cte, cap_t cap,
1997 word_t *buffer)
1998 {
1999 switch (invLabel) {
2000 case ARMPDClean_Data:
2001 case ARMPDInvalidate_Data:
2002 case ARMPDCleanInvalidate_Data:
2003 case ARMPDUnify_Instruction: {
2004 vptr_t start, end;
2005 paddr_t pstart;
2006 findPDForASID_ret_t find_ret;
2007 asid_t asid;
2008 pde_t *pd;
2009 resolve_ret_t resolve_ret;
2010
2011 if (length < 2) {
2012 userError("PD Flush: Truncated message.");
2013 current_syscall_error.type = seL4_TruncatedMessage;
2014 return EXCEPTION_SYSCALL_ERROR;
2015 }
2016
2017 start = getSyscallArg(0, buffer);
2018 end = getSyscallArg(1, buffer);
2019
2020 /* Check sanity of arguments */
2021 if (end <= start) {
2022 userError("PD Flush: Invalid range");
2023 current_syscall_error.type = seL4_InvalidArgument;
2024 current_syscall_error.invalidArgumentNumber = 1;
2025 return EXCEPTION_SYSCALL_ERROR;
2026 }
2027
2028 /* Don't let applications flush kernel regions. */
2029 if (start >= USER_TOP || end > USER_TOP) {
2030 userError("PD Flush: Overlaps kernel region.");
2031 current_syscall_error.type = seL4_IllegalOperation;
2032 return EXCEPTION_SYSCALL_ERROR;
2033 }
2034
2035 if (unlikely(cap_get_capType(cap) != cap_page_directory_cap ||
2036 !cap_page_directory_cap_get_capPDIsMapped(cap))) {
2037 userError("PD Flush: Invalid cap.");
2038 current_syscall_error.type = seL4_InvalidCapability;
2039 current_syscall_error.invalidCapNumber = 0;
2040 return EXCEPTION_SYSCALL_ERROR;
2041 }
2042
2043
2044 /* Make sure that the supplied pd is ok */
2045 pd = PDE_PTR(cap_page_directory_cap_get_capPDBasePtr(cap));
2046 asid = cap_page_directory_cap_get_capPDMappedASID(cap);
2047
2048 find_ret = findPDForASID(asid);
2049 if (unlikely(find_ret.status != EXCEPTION_NONE)) {
2050 userError("PD Flush: No PD for ASID");
2051 current_syscall_error.type = seL4_FailedLookup;
2052 current_syscall_error.failedLookupWasSource = false;
2053 return EXCEPTION_SYSCALL_ERROR;
2054 }
2055
2056 if (unlikely(find_ret.pd != pd)) {
2057 userError("PD Flush: Invalid PD Cap");
2058 current_syscall_error.type = seL4_InvalidCapability;
2059 current_syscall_error.invalidCapNumber = 0;
2060 return EXCEPTION_SYSCALL_ERROR;
2061 }
2062
2063 /* Look up the frame containing 'start'. */
2064 resolve_ret = resolveVAddr(pd, start);
2065
2066 /* Check that there's actually something there. */
2067 if (!resolve_ret.valid) {
2068 /* Fail silently, as there can't be any stale cached data (for the
2069 * given address space), and getting a syscall error because the
2070 * relevant page is non-resident would be 'astonishing'. */
2071 setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
2072 return EXCEPTION_NONE;
2073 }
2074
2075 /* Refuse to cross a page boundary. */
2076 if (pageBase(start, resolve_ret.frameSize) !=
2077 pageBase(end - 1, resolve_ret.frameSize)) {
2078 userError("PD Flush: Range is across page boundary.");
2079 current_syscall_error.type = seL4_RangeError;
2080 current_syscall_error.rangeErrorMin = start;
2081 current_syscall_error.rangeErrorMax =
2082 pageBase(start, resolve_ret.frameSize) +
2083 MASK(pageBitsForSize(resolve_ret.frameSize));
2084 return EXCEPTION_SYSCALL_ERROR;
2085 }
2086
2087
2088 /* Calculate the physical start address. */
2089 pstart = resolve_ret.frameBase
2090 + (start & MASK(pageBitsForSize(resolve_ret.frameSize)));
2091
2092
2093 setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
2094 return performPDFlush(invLabel, pd, asid, start, end - 1, pstart);
2095 }
2096
2097 default:
2098 userError("PD: Invalid invocation number");
2099 current_syscall_error.type = seL4_IllegalOperation;
2100 return EXCEPTION_SYSCALL_ERROR;
2101 }
2102
2103 }
2104
2105 static exception_t decodeARMPageTableInvocation(word_t invLabel, word_t length,
2106 cte_t *cte, cap_t cap, word_t *buffer)
2107 {
2108 word_t vaddr, pdIndex;
2109
2110 #ifndef CONFIG_ARM_HYPERVISOR_SUPPORT
2111 vm_attributes_t attr;
2112 #endif
2113 cap_t pdCap;
2114 pde_t *pd, *pdSlot;
2115 pde_t pde;
2116 asid_t asid;
2117 paddr_t paddr;
2118
2119 if (invLabel == ARMPageTableUnmap) {
2120 if (unlikely(! isFinalCapability(cte))) {
2121 userError("ARMPageTableUnmap: Cannot unmap if more than one cap exists.");
2122 current_syscall_error.type = seL4_RevokeFirst;
2123 return EXCEPTION_SYSCALL_ERROR;
2124 }
2125 setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
2126 return performPageTableInvocationUnmap(cap, cte);
2127 }
2128
2129 if (unlikely(invLabel != ARMPageTableMap)) {
2130 userError("ARMPageTable: Illegal operation.");
2131 current_syscall_error.type = seL4_IllegalOperation;
2132 return EXCEPTION_SYSCALL_ERROR;
2133 }
2134
2135 if (unlikely(length < 2 || current_extra_caps.excaprefs[0] == NULL)) {
2136 userError("ARMPageTableMap: Truncated message.");
2137 current_syscall_error.type = seL4_TruncatedMessage;
2138 return EXCEPTION_SYSCALL_ERROR;
2139 }
2140
2141 if (unlikely(cap_page_table_cap_get_capPTIsMapped(cap))) {
2142 userError("ARMPageTableMap: Page table is already mapped to page directory.");
2143 current_syscall_error.type =
2144 seL4_InvalidCapability;
2145 current_syscall_error.invalidCapNumber = 0;
2146
2147 return EXCEPTION_SYSCALL_ERROR;
2148 }
2149
2150 vaddr = getSyscallArg(0, buffer);
2151 #ifndef CONFIG_ARM_HYPERVISOR_SUPPORT
2152 attr = vmAttributesFromWord(getSyscallArg(1, buffer));
2153 #endif
2154 pdCap = current_extra_caps.excaprefs[0]->cap;
2155
2156 if (unlikely(cap_get_capType(pdCap) != cap_page_directory_cap ||
2157 !cap_page_directory_cap_get_capPDIsMapped(pdCap))) {
2158 userError("ARMPageTableMap: Invalid PD cap.");
2159 current_syscall_error.type = seL4_InvalidCapability;
2160 current_syscall_error.invalidCapNumber = 1;
2161
2162 return EXCEPTION_SYSCALL_ERROR;
2163 }
2164
2165 pd = PDE_PTR(cap_page_directory_cap_get_capPDBasePtr(pdCap));
2166 asid = cap_page_directory_cap_get_capPDMappedASID(pdCap);
2167
2168 if (unlikely(vaddr >= USER_TOP)) {
2169 userError("ARMPageTableMap: Virtual address cannot be in kernel window. vaddr: 0x%08lx, USER_TOP: 0x%08x", vaddr,
2170 USER_TOP);
2171 current_syscall_error.type = seL4_InvalidArgument;
2172 current_syscall_error.invalidArgumentNumber = 0;
2173
2174 return EXCEPTION_SYSCALL_ERROR;
2175 }
2176
2177 {
2178 findPDForASID_ret_t find_ret;
2179
2180 find_ret = findPDForASID(asid);
2181 if (unlikely(find_ret.status != EXCEPTION_NONE)) {
2182 userError("ARMPageTableMap: ASID lookup failed.");
2183 current_syscall_error.type = seL4_FailedLookup;
2184 current_syscall_error.failedLookupWasSource = false;
2185
2186 return EXCEPTION_SYSCALL_ERROR;
2187 }
2188
2189 if (unlikely(find_ret.pd != pd)) {
2190 userError("ARMPageTableMap: ASID lookup failed.");
2191 current_syscall_error.type =
2192 seL4_InvalidCapability;
2193 current_syscall_error.invalidCapNumber = 1;
2194
2195 return EXCEPTION_SYSCALL_ERROR;
2196 }
2197 }
2198
2199 pdIndex = vaddr >> (PAGE_BITS + PT_INDEX_BITS);
2200 pdSlot = &pd[pdIndex];
2201 if (unlikely(pde_ptr_get_pdeType(pdSlot) != pde_pde_invalid)) {
2202 userError("ARMPageTableMap: Page directory already has entry for supplied address.");
2203 current_syscall_error.type = seL4_DeleteFirst;
2204
2205 return EXCEPTION_SYSCALL_ERROR;
2206 }
2207
2208 paddr = addrFromPPtr(
2209 PTE_PTR(cap_page_table_cap_get_capPTBasePtr(cap)));
2210 #ifndef CONFIG_ARM_HYPERVISOR_SUPPORT
2211 pde = pde_pde_coarse_new(
2212 paddr,
2213 vm_attributes_get_armParityEnabled(attr),
2214 0 /* Domain */
2215 );
2216 #else
2217 pde = pde_pde_coarse_new(paddr);
2218 #endif
2219
2220 cap = cap_page_table_cap_set_capPTIsMapped(cap, 1);
2221 cap = cap_page_table_cap_set_capPTMappedASID(cap, asid);
2222 cap = cap_page_table_cap_set_capPTMappedAddress(cap, vaddr);
2223
2224 setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
2225 return performPageTableInvocationMap(cap, cte, pde, pdSlot);
2226 }
2227
2228 static exception_t decodeARMFrameInvocation(word_t invLabel, word_t length,
2229 cte_t *cte, cap_t cap, word_t *buffer)
2230 {
2231 switch (invLabel) {
2232 case ARMPageMap: {
2233 word_t vaddr, vtop, w_rightsMask;
2234 paddr_t capFBasePtr;
2235 cap_t pdCap;
2236 pde_t *pd;
2237 asid_t asid;
2238 vm_rights_t capVMRights, vmRights;
2239 vm_page_size_t frameSize;
2240 vm_attributes_t attr;
2241
2242 if (unlikely(length < 3 || current_extra_caps.excaprefs[0] == NULL)) {
2243 userError("ARMPageMap: Truncated message.");
2244 current_syscall_error.type =
2245 seL4_TruncatedMessage;
2246
2247 return EXCEPTION_SYSCALL_ERROR;
2248 }
2249
2250 vaddr = getSyscallArg(0, buffer);
2251 w_rightsMask = getSyscallArg(1, buffer);
2252 attr = vmAttributesFromWord(getSyscallArg(2, buffer));
2253 pdCap = current_extra_caps.excaprefs[0]->cap;
2254
2255 frameSize = generic_frame_cap_get_capFSize(cap);
2256 capVMRights = generic_frame_cap_get_capFVMRights(cap);
2257
2258 if (unlikely(cap_get_capType(pdCap) != cap_page_directory_cap ||
2259 !cap_page_directory_cap_get_capPDIsMapped(pdCap))) {
2260 userError("ARMPageMap: Bad PageDirectory cap.");
2261 current_syscall_error.type =
2262 seL4_InvalidCapability;
2263 current_syscall_error.invalidCapNumber = 1;
2264
2265 return EXCEPTION_SYSCALL_ERROR;
2266 }
2267 pd = PDE_PTR(cap_page_directory_cap_get_capPDBasePtr(
2268 pdCap));
2269 asid = cap_page_directory_cap_get_capPDMappedASID(pdCap);
2270
2271 if (generic_frame_cap_get_capFIsMapped(cap)) {
2272 if (generic_frame_cap_get_capFMappedASID(cap) != asid) {
2273 current_syscall_error.type = seL4_InvalidCapability;
2274 current_syscall_error.invalidCapNumber = 1;
2275
2276 return EXCEPTION_SYSCALL_ERROR;
2277 }
2278
2279 if (generic_frame_cap_get_capFMappedAddress(cap) != vaddr) {
2280 userError("ARMPageMap: Attempting to map frame into multiple addresses");
2281 current_syscall_error.type = seL4_InvalidArgument;
2282 current_syscall_error.invalidArgumentNumber = 0;
2283
2284 return EXCEPTION_SYSCALL_ERROR;
2285 }
2286 } else {
2287 vtop = vaddr + BIT(pageBitsForSize(frameSize)) - 1;
2288
2289 if (unlikely(vtop >= USER_TOP)) {
2290 userError("ARMPageMap: Cannot map frame over kernel window. vaddr: 0x%08lx, USER_TOP: 0x%08x", vaddr, USER_TOP);
2291 current_syscall_error.type =
2292 seL4_InvalidArgument;
2293 current_syscall_error.invalidArgumentNumber = 0;
2294
2295 return EXCEPTION_SYSCALL_ERROR;
2296 }
2297 }
2298
2299 {
2300 findPDForASID_ret_t find_ret;
2301
2302 find_ret = findPDForASID(asid);
2303 if (unlikely(find_ret.status != EXCEPTION_NONE)) {
2304 userError("ARMPageMap: No PD for ASID");
2305 current_syscall_error.type =
2306 seL4_FailedLookup;
2307 current_syscall_error.failedLookupWasSource =
2308 false;
2309
2310 return EXCEPTION_SYSCALL_ERROR;
2311 }
2312
2313 if (unlikely(find_ret.pd != pd)) {
2314 userError("ARMPageMap: ASID lookup failed.");
2315 current_syscall_error.type =
2316 seL4_InvalidCapability;
2317 current_syscall_error.invalidCapNumber = 1;
2318
2319 return EXCEPTION_SYSCALL_ERROR;
2320 }
2321 }
2322
2323 vmRights =
2324 maskVMRights(capVMRights, rightsFromWord(w_rightsMask));
2325
2326 if (unlikely(!checkVPAlignment(frameSize, vaddr))) {
2327 userError("ARMPageMap: Virtual address has incorrect alignment.");
2328 current_syscall_error.type =
2329 seL4_AlignmentError;
2330
2331 return EXCEPTION_SYSCALL_ERROR;
2332 }
2333
2334 capFBasePtr = addrFromPPtr((void *)
2335 generic_frame_cap_get_capFBasePtr(cap));
2336
2337 cap = generic_frame_cap_set_capFMappedAddress(cap, asid,
2338 vaddr);
2339 if (frameSize == ARMSmallPage || frameSize == ARMLargePage) {
2340 create_mappings_pte_return_t map_ret;
2341 map_ret = createSafeMappingEntries_PTE(capFBasePtr, vaddr,
2342 frameSize, vmRights,
2343 attr, pd);
2344 if (unlikely(map_ret.status != EXCEPTION_NONE)) {
2345 #ifdef CONFIG_PRINTING
2346 if (current_syscall_error.type == seL4_DeleteFirst) {
2347 userError("ARMPageMap: Page table entry was not free.");
2348 }
2349 #endif
2350 return map_ret.status;
2351 }
2352
2353 setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
2354 return performPageInvocationMapPTE(asid, cap, cte,
2355 map_ret.pte,
2356 map_ret.pte_entries);
2357 } else {
2358 create_mappings_pde_return_t map_ret;
2359 map_ret = createSafeMappingEntries_PDE(capFBasePtr, vaddr,
2360 frameSize, vmRights,
2361 attr, pd);
2362 if (unlikely(map_ret.status != EXCEPTION_NONE)) {
2363 #ifdef CONFIG_PRINTING
2364 if (current_syscall_error.type == seL4_DeleteFirst) {
2365 userError("ARMPageMap: Page directory entry was not free.");
2366 }
2367 #endif
2368 return map_ret.status;
2369 }
2370
2371 setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
2372 return performPageInvocationMapPDE(asid, cap, cte,
2373 map_ret.pde,
2374 map_ret.pde_entries);
2375 }
2376 }
2377
2378 case ARMPageUnmap: {
2379 #ifdef CONFIG_TK1_SMMU
2380 if (isIOSpaceFrameCap(cap)) {
2381 setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
2382 return performPageInvocationUnmapIO(cap, cte);
2383 } else
2384 #endif
2385 {
2386 setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
2387 return performPageInvocationUnmap(cap, cte);
2388 }
2389 }
2390
2391 #ifdef CONFIG_TK1_SMMU
2392 case ARMPageMapIO: {
2393 return decodeARMIOMapInvocation(invLabel, length, cte, cap, buffer);
2394 }
2395 #endif
2396
2397 case ARMPageClean_Data:
2398 case ARMPageInvalidate_Data:
2399 case ARMPageCleanInvalidate_Data:
2400 case ARMPageUnify_Instruction: {
2401 asid_t asid;
2402 vptr_t vaddr;
2403 findPDForASID_ret_t pd;
2404 vptr_t start, end;
2405 paddr_t pstart;
2406 word_t page_size;
2407 word_t page_base;
2408
2409 if (length < 2) {
2410 userError("Page Flush: Truncated message.");
2411 current_syscall_error.type = seL4_TruncatedMessage;
2412 return EXCEPTION_SYSCALL_ERROR;
2413 }
2414
2415 asid = generic_frame_cap_get_capFMappedASID(cap);
2416 #ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
2417 /* Must use kernel vaddr in hyp mode. */
2418 vaddr = generic_frame_cap_get_capFBasePtr(cap);
2419 #else
2420 vaddr = generic_frame_cap_get_capFMappedAddress(cap);
2421 #endif
2422
2423 if (unlikely(!generic_frame_cap_get_capFIsMapped(cap))) {
2424 userError("Page Flush: Frame is not mapped.");
2425 current_syscall_error.type = seL4_IllegalOperation;
2426 return EXCEPTION_SYSCALL_ERROR;
2427 }
2428
2429 pd = findPDForASID(asid);
2430 if (unlikely(pd.status != EXCEPTION_NONE)) {
2431 userError("Page Flush: No PD for ASID");
2432 current_syscall_error.type =
2433 seL4_FailedLookup;
2434 current_syscall_error.failedLookupWasSource = false;
2435 return EXCEPTION_SYSCALL_ERROR;
2436 }
2437
2438 start = getSyscallArg(0, buffer);
2439 end = getSyscallArg(1, buffer);
2440
2441 /* check that the range is sane */
2442 if (end <= start) {
2443 userError("PageFlush: Invalid range");
2444 current_syscall_error.type = seL4_InvalidArgument;
2445 current_syscall_error.invalidArgumentNumber = 1;
2446 return EXCEPTION_SYSCALL_ERROR;
2447 }
2448
2449
2450 /* start and end are currently relative inside this page */
2451 page_size = 1 << pageBitsForSize(generic_frame_cap_get_capFSize(cap));
2452 page_base = addrFromPPtr((void *)generic_frame_cap_get_capFBasePtr(cap));
2453
2454 if (start >= page_size || end > page_size) {
2455 userError("Page Flush: Requested range not inside page");
2456 current_syscall_error.type = seL4_InvalidArgument;
2457 current_syscall_error.invalidArgumentNumber = 0;
2458 return EXCEPTION_SYSCALL_ERROR;
2459 }
2460
2461 /* turn start and end into absolute addresses */
2462 pstart = page_base + start;
2463 start += vaddr;
2464 end += vaddr;
2465
2466 #ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
2467 /* Don't let applications flush outside of the kernel window */
2468 if (pstart < physBase || ((end - start) + pstart) > PADDR_TOP) {
2469 userError("Page Flush: Overlaps kernel region.");
2470 current_syscall_error.type = seL4_IllegalOperation;
2471 return EXCEPTION_SYSCALL_ERROR;
2472 }
2473 #endif
2474
2475 setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
2476 return performPageFlush(invLabel, pd.pd, asid, start, end - 1, pstart);
2477 }
2478
2479 case ARMPageGetAddress: {
2480
2481
2482 /* Check that there are enough message registers */
2483 assert(n_msgRegisters >= 1);
2484
2485 setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
2486 return performPageGetAddress((void *)generic_frame_cap_get_capFBasePtr(cap));
2487 }
2488
2489 default:
2490 userError("ARMPage: Illegal operation.");
2491 current_syscall_error.type = seL4_IllegalOperation;
2492
2493 return EXCEPTION_SYSCALL_ERROR;
2494 }
2495 }
2496
2497 exception_t decodeARMMMUInvocation(word_t invLabel, word_t length, cptr_t cptr,
2498 cte_t *cte, cap_t cap, word_t *buffer)
2499 {
2500 switch (cap_get_capType(cap)) {
2501 case cap_page_directory_cap:
2502 return decodeARMPageDirectoryInvocation(invLabel, length, cptr, cte,
2503 cap, buffer);
2504
2505 case cap_page_table_cap:
2506 return decodeARMPageTableInvocation(invLabel, length, cte, cap, buffer);
2507
2508 case cap_small_frame_cap:
2509 case cap_frame_cap:
2510 return decodeARMFrameInvocation(invLabel, length, cte, cap, buffer);
2511
2512 case cap_asid_control_cap: {
2513 word_t i;
2514 asid_t asid_base;
2515 word_t index, depth;
2516 cap_t untyped, root;
2517 cte_t *parentSlot, *destSlot;
2518 lookupSlot_ret_t lu_ret;
2519 void *frame;
2520 exception_t status;
2521
2522 if (unlikely(invLabel != ARMASIDControlMakePool)) {
2523 userError("ASIDControl: Illegal operation.");
2524 current_syscall_error.type = seL4_IllegalOperation;
2525
2526 return EXCEPTION_SYSCALL_ERROR;
2527 }
2528
2529 if (unlikely(length < 2 || current_extra_caps.excaprefs[0] == NULL
2530 || current_extra_caps.excaprefs[1] == NULL)) {
2531 userError("ASIDControlMakePool: Truncated message.");
2532 current_syscall_error.type = seL4_TruncatedMessage;
2533
2534 return EXCEPTION_SYSCALL_ERROR;
2535 }
2536
2537 index = getSyscallArg(0, buffer);
2538 depth = getSyscallArg(1, buffer);
2539 parentSlot = current_extra_caps.excaprefs[0];
2540 untyped = parentSlot->cap;
2541 root = current_extra_caps.excaprefs[1]->cap;
2542
2543 /* Find first free pool */
2544 for (i = 0; i < nASIDPools && armKSASIDTable[i]; i++);
2545
2546 if (unlikely(i == nASIDPools)) { /* If no unallocated pool is found */
2547 userError("ASIDControlMakePool: No free pools found.");
2548 current_syscall_error.type = seL4_DeleteFirst;
2549
2550 return EXCEPTION_SYSCALL_ERROR;
2551 }
2552
2553 asid_base = i << asidLowBits;
2554
2555 if (unlikely(cap_get_capType(untyped) != cap_untyped_cap ||
2556 cap_untyped_cap_get_capBlockSize(untyped) !=
2557 seL4_ASIDPoolBits || cap_untyped_cap_get_capIsDevice(untyped))) {
2558 userError("ASIDControlMakePool: Invalid untyped cap.");
2559 current_syscall_error.type = seL4_InvalidCapability;
2560 current_syscall_error.invalidCapNumber = 1;
2561
2562 return EXCEPTION_SYSCALL_ERROR;
2563 }
2564
2565 status = ensureNoChildren(parentSlot);
2566 if (unlikely(status != EXCEPTION_NONE)) {
2567 userError("ASIDControlMakePool: Untyped has children. Revoke first.");
2568 return status;
2569 }
2570
2571 frame = WORD_PTR(cap_untyped_cap_get_capPtr(untyped));
2572
2573 lu_ret = lookupTargetSlot(root, index, depth);
2574 if (unlikely(lu_ret.status != EXCEPTION_NONE)) {
2575 userError("ASIDControlMakePool: Failed to lookup destination slot.");
2576 return lu_ret.status;
2577 }
2578 destSlot = lu_ret.slot;
2579
2580 status = ensureEmptySlot(destSlot);
2581 if (unlikely(status != EXCEPTION_NONE)) {
2582 userError("ASIDControlMakePool: Destination slot not empty.");
2583 return status;
2584 }
2585
2586 setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
2587 return performASIDControlInvocation(frame, destSlot,
2588 parentSlot, asid_base);
2589 }
2590
2591 case cap_asid_pool_cap: {
2592 cap_t pdCap;
2593 cte_t *pdCapSlot;
2594 asid_pool_t *pool;
2595 word_t i;
2596 asid_t asid;
2597
2598 if (unlikely(invLabel != ARMASIDPoolAssign)) {
2599 userError("ASIDPool: Illegal operation.");
2600 current_syscall_error.type = seL4_IllegalOperation;
2601
2602 return EXCEPTION_SYSCALL_ERROR;
2603 }
2604
2605 if (unlikely(current_extra_caps.excaprefs[0] == NULL)) {
2606 userError("ASIDPoolAssign: Truncated message.");
2607 current_syscall_error.type = seL4_TruncatedMessage;
2608
2609 return EXCEPTION_SYSCALL_ERROR;
2610 }
2611
2612 pdCapSlot = current_extra_caps.excaprefs[0];
2613 pdCap = pdCapSlot->cap;
2614
2615 if (unlikely(
2616 cap_get_capType(pdCap) != cap_page_directory_cap ||
2617 cap_page_directory_cap_get_capPDIsMapped(pdCap))) {
2618 userError("ASIDPoolAssign: Invalid page directory cap.");
2619 current_syscall_error.type = seL4_InvalidCapability;
2620 current_syscall_error.invalidCapNumber = 1;
2621
2622 return EXCEPTION_SYSCALL_ERROR;
2623 }
2624
2625 pool = armKSASIDTable[cap_asid_pool_cap_get_capASIDBase(cap) >>
2626 asidLowBits];
2627 if (unlikely(!pool)) {
2628 userError("ASIDPoolAssign: Failed to lookup pool.");
2629 current_syscall_error.type = seL4_FailedLookup;
2630 current_syscall_error.failedLookupWasSource = false;
2631 current_lookup_fault = lookup_fault_invalid_root_new();
2632
2633 return EXCEPTION_SYSCALL_ERROR;
2634 }
2635
2636 if (unlikely(pool != ASID_POOL_PTR(cap_asid_pool_cap_get_capASIDPool(cap)))) {
2637 userError("ASIDPoolAssign: Failed to lookup pool.");
2638 current_syscall_error.type = seL4_InvalidCapability;
2639 current_syscall_error.invalidCapNumber = 0;
2640
2641 return EXCEPTION_SYSCALL_ERROR;
2642 }
2643
2644 /* Find first free ASID */
2645 asid = cap_asid_pool_cap_get_capASIDBase(cap);
2646 for (i = 0; i < (1 << asidLowBits) && (asid + i == 0 || pool->array[i]); i++);
2647
2648 if (unlikely(i == 1 << asidLowBits)) {
2649 userError("ASIDPoolAssign: No free ASID.");
2650 current_syscall_error.type = seL4_DeleteFirst;
2651
2652 return EXCEPTION_SYSCALL_ERROR;
2653 }
2654
2655 asid += i;
2656
2657 setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
2658 return performASIDPoolInvocation(asid, pool, pdCapSlot);
2659 }
2660
2661 default:
2662 fail("Invalid ARM arch cap type");
2663 }
2664 }
2665
2666 #ifdef CONFIG_KERNEL_LOG_BUFFER
2667 exception_t benchmark_arch_map_logBuffer(word_t frame_cptr)
2668 {
2669 lookupCapAndSlot_ret_t lu_ret;
2670 vm_page_size_t frameSize;
2671 pptr_t frame_pptr;
2672
2673 /* faulting section */
2674 lu_ret = lookupCapAndSlot(NODE_STATE(ksCurThread), frame_cptr);
2675
2676 if (unlikely(lu_ret.status != EXCEPTION_NONE)) {
2677 userError("Invalid cap #%lu.", frame_cptr);
2678 current_fault = seL4_Fault_CapFault_new(frame_cptr, false);
2679
2680 return EXCEPTION_SYSCALL_ERROR;
2681 }
2682
2683 if (cap_get_capType(lu_ret.cap) != cap_frame_cap) {
2684 userError("Invalid cap. Log buffer should be of a frame cap");
2685 current_fault = seL4_Fault_CapFault_new(frame_cptr, false);
2686
2687 return EXCEPTION_SYSCALL_ERROR;
2688 }
2689
2690 frameSize = generic_frame_cap_get_capFSize(lu_ret.cap);
2691
2692 if (frameSize != ARMSection) {
2693 userError("Invalid frame size. The kernel expects 1M log buffer");
2694 current_fault = seL4_Fault_CapFault_new(frame_cptr, false);
2695
2696 return EXCEPTION_SYSCALL_ERROR;
2697 }
2698
2699 frame_pptr = generic_frame_cap_get_capFBasePtr(lu_ret.cap);
2700
2701 ksUserLogBuffer = pptr_to_paddr((void *) frame_pptr);
2702
2703 for (int idx = 0; idx < BIT(PT_INDEX_BITS); idx++) {
2704 paddr_t physical_address = ksUserLogBuffer + (idx << seL4_PageBits);
2705
2706 armKSGlobalLogPT[idx] =
2707 pte_pte_small_new(
2708 physical_address,
2709 0, /* global */
2710 0, /* Not shared */
2711 0, /* APX = 0, privileged full access */
2712 0, /* TEX = 0 */
2713 1, /* VMKernelOnly */
2714 1, /* Cacheable */
2715 0, /* Write-through to minimise perf hit */
2716 0 /* executable */
2717 );
2718
2719 cleanByVA_PoU((vptr_t)&armKSGlobalLogPT[idx], addrFromKPPtr(&armKSGlobalLogPT[idx]));
2720 invalidateTranslationSingle(KS_LOG_PPTR + (idx * BIT(seL4_PageBits)));
2721 }
2722
2723 return EXCEPTION_NONE;
2724 }
2725 #endif /* CONFIG_KERNEL_LOG_BUFFER */
2726
2727 #ifdef CONFIG_DEBUG_BUILD
2728 void kernelPrefetchAbort(word_t pc) VISIBLE;
2729 void kernelDataAbort(word_t pc) VISIBLE;
2730
2731 #ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
2732
2733 void kernelUndefinedInstruction(word_t pc) VISIBLE;
2734
2735 void kernelPrefetchAbort(word_t pc)
2736 {
2737 printf("\n\nKERNEL PREFETCH ABORT!\n");
2738 printf("Faulting instruction: 0x%"SEL4_PRIx_word"\n", pc);
2739 printf("HSR: 0x%"SEL4_PRIx_word"\n", getHSR());
2740 halt();
2741 }
2742
2743 void kernelDataAbort(word_t pc)
2744 {
2745 printf("\n\nKERNEL DATA ABORT!\n");
2746 printf("Faulting instruction: 0x%"SEL4_PRIx_word"\n", pc);
2747 printf("HDFAR: 0x%"SEL4_PRIx_word" HSR: 0x%"SEL4_PRIx_word"\n",
2748 getHDFAR(), getHSR());
2749 halt();
2750 }
2751
2752 void kernelUndefinedInstruction(word_t pc)
2753 {
2754 printf("\n\nKERNEL UNDEFINED INSTRUCTION!\n");
2755 printf("Faulting instruction: 0x%"SEL4_PRIx_word"\n", pc);
2756 printf("HSR: 0x%"SEL4_PRIx_word"\n", getHSR());
2757 halt();
2758 }
2759
2760 #else /* CONFIG_ARM_HYPERVISOR_SUPPORT */
2761
2762 void kernelPrefetchAbort(word_t pc)
2763 {
2764 printf("\n\nKERNEL PREFETCH ABORT!\n");
2765 printf("Faulting instruction: 0x%"SEL4_PRIx_word"\n", pc);
2766 printf("IFSR: 0x%"SEL4_PRIx_word"\n", getIFSR());
2767 halt();
2768 }
2769
2770 void kernelDataAbort(word_t pc)
2771 {
2772 printf("\n\nKERNEL DATA ABORT!\n");
2773 printf("Faulting instruction: 0x%"SEL4_PRIx_word"\n", pc);
2774 printf("FAR: 0x%"SEL4_PRIx_word" DFSR: 0x%"SEL4_PRIx_word"\n",
2775 getFAR(), getDFSR());
2776 halt();
2777 }
2778 #endif /* CONFIG_ARM_HYPERVISOR_SUPPORT */
2779
2780 #endif
2781
2782 #ifdef CONFIG_PRINTING
2783 typedef struct readWordFromVSpace_ret {
2784 exception_t status;
2785 word_t value;
2786 } readWordFromVSpace_ret_t;
2787
2788 static readWordFromVSpace_ret_t readWordFromVSpace(pde_t *pd, word_t vaddr)
2789 {
2790 readWordFromVSpace_ret_t ret;
2791 lookupPTSlot_ret_t ptSlot;
2792 pde_t *pdSlot;
2793 paddr_t paddr;
2794 word_t offset;
2795 pptr_t kernel_vaddr;
2796 word_t *value;
2797
2798 pdSlot = lookupPDSlot(pd, vaddr);
2799 if (pde_ptr_get_pdeType(pdSlot) == pde_pde_section) {
2800 paddr = pde_pde_section_ptr_get_address(pdSlot);
2801 offset = vaddr & MASK(ARMSectionBits);
2802 } else {
2803 ptSlot = lookupPTSlot(pd, vaddr);
2804 #ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
2805 if (ptSlot.status == EXCEPTION_NONE && pte_ptr_get_pteType(ptSlot.ptSlot) == pte_pte_small) {
2806 paddr = pte_pte_small_ptr_get_address(ptSlot.ptSlot);
2807 if (pte_pte_small_ptr_get_contiguous_hint(ptSlot.ptSlot)) {
2808 offset = vaddr & MASK(ARMLargePageBits);
2809 } else {
2810 offset = vaddr & MASK(ARMSmallPageBits);
2811 }
2812 #else
2813 if (ptSlot.status == EXCEPTION_NONE && pte_ptr_get_pteType(ptSlot.ptSlot) == pte_pte_small) {
2814 paddr = pte_pte_small_ptr_get_address(ptSlot.ptSlot);
2815 offset = vaddr & MASK(ARMSmallPageBits);
2816 } else if (ptSlot.status == EXCEPTION_NONE && pte_ptr_get_pteType(ptSlot.ptSlot) == pte_pte_large) {
2817 paddr = pte_pte_large_ptr_get_address(ptSlot.ptSlot);
2818 offset = vaddr & MASK(ARMLargePageBits);
2819 #endif
2820 } else {
2821 ret.status = EXCEPTION_LOOKUP_FAULT;
2822 return ret;
2823 }
2824 }
2825
2826
2827 kernel_vaddr = (word_t)paddr_to_pptr(paddr);
2828 value = (word_t *)(kernel_vaddr + offset);
2829 ret.status = EXCEPTION_NONE;
2830 ret.value = *value;
2831 return ret;
2832 }
2833
2834 void Arch_userStackTrace(tcb_t *tptr)
2835 {
2836 cap_t threadRoot;
2837 pde_t *pd;
2838 word_t sp;
2839 int i;
2840
2841 threadRoot = TCB_PTR_CTE_PTR(tptr, tcbVTable)->cap;
2842
2843 /* lookup the PD */
2844 if (cap_get_capType(threadRoot) != cap_page_directory_cap) {
2845 printf("Invalid vspace\n");
2846 return;
2847 }
2848
2849 pd = (pde_t *)pptr_of_cap(threadRoot);
2850
2851 sp = getRegister(tptr, SP);
2852 /* check for alignment so we don't have to worry about accessing
2853 * words that might be on two different pages */
2854 if (!IS_ALIGNED(sp, seL4_WordSizeBits)) {
2855 printf("SP not aligned\n");
2856 return;
2857 }
2858
2859 for (i = 0; i < CONFIG_USER_STACK_TRACE_LENGTH; i++) {
2860 word_t address = sp + (i * sizeof(word_t));
2861 readWordFromVSpace_ret_t result;
2862 result = readWordFromVSpace(pd, address);
2863 if (result.status == EXCEPTION_NONE) {
2864 printf("0x%lx: 0x%lx\n", (long)address, (long)result.value);
2865 } else {
2866 printf("0x%lx: INVALID\n", (long)address);
2867 }
2868 }
2869 }
2870 #endif
2871
2872