1 /*
2 * Copyright 2020, DornerWorks
3 * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230)
4 * Copyright 2015, 2016 Hesham Almatary <heshamelmatary@gmail.com>
5 *
6 * SPDX-License-Identifier: GPL-2.0-only
7 */
8
9 #include <types.h>
10 #include <benchmark/benchmark.h>
11 #include <api/failures.h>
12 #include <api/syscall.h>
13 #include <kernel/boot.h>
14 #include <kernel/cspace.h>
15 #include <kernel/thread.h>
16 #include <object/tcb.h>
17 #include <machine/io.h>
18 #include <model/preemption.h>
19 #include <model/statedata.h>
20 #include <object/cnode.h>
21 #include <object/untyped.h>
22 #include <arch/api/invocation.h>
23 #include <arch/kernel/vspace.h>
24 #include <linker.h>
25 #include <arch/machine.h>
26 #include <plat/machine/hardware.h>
27 #include <kernel/stack.h>
28 #include <util.h>
29
30 struct resolve_ret {
31 paddr_t frameBase;
32 vm_page_size_t frameSize;
33 bool_t valid;
34 };
35 typedef struct resolve_ret resolve_ret_t;
36
37 static exception_t performPageGetAddress(void *vbase_ptr);
38
RISCVGetWriteFromVMRights(vm_rights_t vm_rights)39 static word_t CONST RISCVGetWriteFromVMRights(vm_rights_t vm_rights)
40 {
41 /* Write-only frame cap rights not currently supported. */
42 return vm_rights == VMReadWrite;
43 }
44
RISCVGetReadFromVMRights(vm_rights_t vm_rights)45 static inline word_t CONST RISCVGetReadFromVMRights(vm_rights_t vm_rights)
46 {
47 /* Write-only frame cap rights not currently supported.
48 * Kernel-only conveys no user rights. */
49 return vm_rights != VMKernelOnly;
50 }
51
isPTEPageTable(pte_t * pte)52 static inline bool_t isPTEPageTable(pte_t *pte)
53 {
54 return pte_ptr_get_valid(pte) &&
55 !(pte_ptr_get_read(pte) || pte_ptr_get_write(pte) || pte_ptr_get_execute(pte));
56 }
57
58 /** Helper function meant only to be used for mapping the kernel
59 * window.
60 *
61 * Maps all pages with full RWX and supervisor perms by default.
62 */
pte_next(word_t phys_addr,bool_t is_leaf)63 static pte_t pte_next(word_t phys_addr, bool_t is_leaf)
64 {
65 word_t ppn = (word_t)(phys_addr >> 12);
66
67 uint8_t read = is_leaf ? 1 : 0;
68 uint8_t write = read;
69 uint8_t exec = read;
70
71 return pte_new(ppn,
72 0, /* sw */
73 1, /* dirty */
74 1, /* accessed */
75 1, /* global */
76 0, /* user */
77 exec, /* execute */
78 write, /* write */
79 read, /* read */
80 1 /* valid */
81 );
82 }
83
84 /* ==================== BOOT CODE STARTS HERE ==================== */
85
map_kernel_frame(paddr_t paddr,pptr_t vaddr,vm_rights_t vm_rights)86 BOOT_CODE void map_kernel_frame(paddr_t paddr, pptr_t vaddr, vm_rights_t vm_rights)
87 {
88 #if __riscv_xlen == 32
89 paddr = ROUND_DOWN(paddr, RISCV_GET_LVL_PGSIZE_BITS(0));
90 assert((paddr % RISCV_GET_LVL_PGSIZE(0)) == 0);
91 kernel_root_pageTable[RISCV_GET_PT_INDEX(vaddr, 0)] = pte_next(paddr, true);
92 #else
93 if (vaddr >= KDEV_BASE) {
94 /* Map devices in 2nd-level page table */
95 paddr = ROUND_DOWN(paddr, RISCV_GET_LVL_PGSIZE_BITS(1));
96 assert((paddr % RISCV_GET_LVL_PGSIZE(1)) == 0);
97 kernel_image_level2_dev_pt[RISCV_GET_PT_INDEX(vaddr, 1)] = pte_next(paddr, true);
98 } else {
99 paddr = ROUND_DOWN(paddr, RISCV_GET_LVL_PGSIZE_BITS(0));
100 assert((paddr % RISCV_GET_LVL_PGSIZE(0)) == 0);
101 kernel_root_pageTable[RISCV_GET_PT_INDEX(vaddr, 0)] = pte_next(paddr, true);
102 }
103 #endif
104 }
105
map_kernel_window(void)106 BOOT_CODE VISIBLE void map_kernel_window(void)
107 {
108 /* mapping of KERNEL_ELF_BASE (virtual address) to kernel's
109 * KERNEL_ELF_PHYS_BASE */
110 assert(CONFIG_PT_LEVELS > 1 && CONFIG_PT_LEVELS <= 4);
111
112 /* kernel window starts at PPTR_BASE */
113 word_t pptr = PPTR_BASE;
114
115 /* first we map in memory from PADDR_BASE */
116 word_t paddr = PADDR_BASE;
117 while (pptr < PPTR_TOP) {
118 assert(IS_ALIGNED(pptr, RISCV_GET_LVL_PGSIZE_BITS(0)));
119 assert(IS_ALIGNED(paddr, RISCV_GET_LVL_PGSIZE_BITS(0)));
120
121 kernel_root_pageTable[RISCV_GET_PT_INDEX(pptr, 0)] = pte_next(paddr, true);
122
123 pptr += RISCV_GET_LVL_PGSIZE(0);
124 paddr += RISCV_GET_LVL_PGSIZE(0);
125 }
126 /* now we should be mapping the 1GiB kernel base */
127 assert(pptr == PPTR_TOP);
128 pptr = ROUND_DOWN(KERNEL_ELF_BASE, RISCV_GET_LVL_PGSIZE_BITS(0));
129 paddr = ROUND_DOWN(KERNEL_ELF_PADDR_BASE, RISCV_GET_LVL_PGSIZE_BITS(0));
130
131 #if __riscv_xlen == 32
132 kernel_root_pageTable[RISCV_GET_PT_INDEX(pptr, 0)] = pte_next(paddr, true);
133 pptr += RISCV_GET_LVL_PGSIZE(0);
134 paddr += RISCV_GET_LVL_PGSIZE(0);
135 #ifdef CONFIG_KERNEL_LOG_BUFFER
136 kernel_root_pageTable[RISCV_GET_PT_INDEX(KS_LOG_PPTR, 0)] =
137 pte_next(kpptr_to_paddr(kernel_image_level2_log_buffer_pt), false);
138 #endif
139 #else
140 word_t index = 0;
141 /* The kernel image is mapped twice, locating the two indexes in the
142 * root page table, pointing them to the same second level page table.
143 */
144 kernel_root_pageTable[RISCV_GET_PT_INDEX(KERNEL_ELF_PADDR_BASE + PPTR_BASE_OFFSET, 0)] =
145 pte_next(kpptr_to_paddr(kernel_image_level2_pt), false);
146 kernel_root_pageTable[RISCV_GET_PT_INDEX(pptr, 0)] =
147 pte_next(kpptr_to_paddr(kernel_image_level2_pt), false);
148 while (pptr < PPTR_TOP + RISCV_GET_LVL_PGSIZE(0)) {
149 kernel_image_level2_pt[index] = pte_next(paddr, true);
150 index++;
151 pptr += RISCV_GET_LVL_PGSIZE(1);
152 paddr += RISCV_GET_LVL_PGSIZE(1);
153 }
154
155 /* Map kernel device page table */
156 kernel_root_pageTable[RISCV_GET_PT_INDEX(KDEV_BASE, 0)] =
157 pte_next(kpptr_to_paddr(kernel_image_level2_dev_pt), false);
158 #endif
159
160 /* There should be 1GiB free where we put device mapping */
161 assert(pptr == UINTPTR_MAX - RISCV_GET_LVL_PGSIZE(0) + 1);
162 map_kernel_devices();
163 }
164
map_it_pt_cap(cap_t vspace_cap,cap_t pt_cap)165 BOOT_CODE void map_it_pt_cap(cap_t vspace_cap, cap_t pt_cap)
166 {
167 lookupPTSlot_ret_t pt_ret;
168 pte_t *targetSlot;
169 vptr_t vptr = cap_page_table_cap_get_capPTMappedAddress(pt_cap);
170 pte_t *lvl1pt = PTE_PTR(pptr_of_cap(vspace_cap));
171
172 /* pt to be mapped */
173 pte_t *pt = PTE_PTR(pptr_of_cap(pt_cap));
174
175 /* Get PT slot to install the address in */
176 pt_ret = lookupPTSlot(lvl1pt, vptr);
177
178 targetSlot = pt_ret.ptSlot;
179
180 *targetSlot = pte_new(
181 (addrFromPPtr(pt) >> seL4_PageBits),
182 0, /* sw */
183 1, /* dirty */
184 1, /* accessed */
185 0, /* global */
186 0, /* user */
187 0, /* execute */
188 0, /* write */
189 0, /* read */
190 1 /* valid */
191 );
192 sfence();
193 }
194
map_it_frame_cap(cap_t vspace_cap,cap_t frame_cap)195 BOOT_CODE void map_it_frame_cap(cap_t vspace_cap, cap_t frame_cap)
196 {
197 pte_t *lvl1pt = PTE_PTR(pptr_of_cap(vspace_cap));
198 pte_t *frame_pptr = PTE_PTR(pptr_of_cap(frame_cap));
199 vptr_t frame_vptr = cap_frame_cap_get_capFMappedAddress(frame_cap);
200
201 /* We deal with a frame as 4KiB */
202 lookupPTSlot_ret_t lu_ret = lookupPTSlot(lvl1pt, frame_vptr);
203 assert(lu_ret.ptBitsLeft == seL4_PageBits);
204
205 pte_t *targetSlot = lu_ret.ptSlot;
206
207 *targetSlot = pte_new(
208 (pptr_to_paddr(frame_pptr) >> seL4_PageBits),
209 0, /* sw */
210 1, /* dirty */
211 1, /* accessed */
212 0, /* global */
213 1, /* user */
214 1, /* execute */
215 1, /* write */
216 1, /* read */
217 1 /* valid */
218 );
219 sfence();
220 }
221
create_unmapped_it_frame_cap(pptr_t pptr,bool_t use_large)222 BOOT_CODE cap_t create_unmapped_it_frame_cap(pptr_t pptr, bool_t use_large)
223 {
224 cap_t cap = cap_frame_cap_new(
225 asidInvalid, /* capFMappedASID */
226 pptr, /* capFBasePtr */
227 0, /* capFSize */
228 0, /* capFVMRights */
229 0,
230 0 /* capFMappedAddress */
231 );
232
233 return cap;
234 }
235
236 /* Create a page table for the initial thread */
create_it_pt_cap(cap_t vspace_cap,pptr_t pptr,vptr_t vptr,asid_t asid)237 static BOOT_CODE cap_t create_it_pt_cap(cap_t vspace_cap, pptr_t pptr, vptr_t vptr, asid_t asid)
238 {
239 cap_t cap;
240 cap = cap_page_table_cap_new(
241 asid, /* capPTMappedASID */
242 pptr, /* capPTBasePtr */
243 1, /* capPTIsMapped */
244 vptr /* capPTMappedAddress */
245 );
246
247 map_it_pt_cap(vspace_cap, cap);
248 return cap;
249 }
250
arch_get_n_paging(v_region_t it_v_reg)251 BOOT_CODE word_t arch_get_n_paging(v_region_t it_v_reg)
252 {
253 word_t n = 0;
254 for (int i = 0; i < CONFIG_PT_LEVELS - 1; i++) {
255 n += get_n_paging(it_v_reg, RISCV_GET_LVL_PGSIZE_BITS(i));
256 }
257 return n;
258 }
259
260 /* Create an address space for the initial thread.
261 * This includes page directory and page tables */
create_it_address_space(cap_t root_cnode_cap,v_region_t it_v_reg)262 BOOT_CODE cap_t create_it_address_space(cap_t root_cnode_cap, v_region_t it_v_reg)
263 {
264 cap_t lvl1pt_cap;
265 vptr_t pt_vptr;
266
267 copyGlobalMappings(PTE_PTR(rootserver.vspace));
268
269 lvl1pt_cap =
270 cap_page_table_cap_new(
271 IT_ASID, /* capPTMappedASID */
272 (word_t) rootserver.vspace, /* capPTBasePtr */
273 1, /* capPTIsMapped */
274 (word_t) rootserver.vspace /* capPTMappedAddress */
275 );
276
277 seL4_SlotPos slot_pos_before = ndks_boot.slot_pos_cur;
278 write_slot(SLOT_PTR(pptr_of_cap(root_cnode_cap), seL4_CapInitThreadVSpace), lvl1pt_cap);
279
280 /* create all n level PT caps necessary to cover userland image in 4KiB pages */
281 for (int i = 0; i < CONFIG_PT_LEVELS - 1; i++) {
282
283 for (pt_vptr = ROUND_DOWN(it_v_reg.start, RISCV_GET_LVL_PGSIZE_BITS(i));
284 pt_vptr < it_v_reg.end;
285 pt_vptr += RISCV_GET_LVL_PGSIZE(i)) {
286 if (!provide_cap(root_cnode_cap,
287 create_it_pt_cap(lvl1pt_cap, it_alloc_paging(), pt_vptr, IT_ASID))
288 ) {
289 return cap_null_cap_new();
290 }
291 }
292
293 }
294
295 seL4_SlotPos slot_pos_after = ndks_boot.slot_pos_cur;
296 ndks_boot.bi_frame->userImagePaging = (seL4_SlotRegion) {
297 slot_pos_before, slot_pos_after
298 };
299
300 return lvl1pt_cap;
301 }
302
activate_kernel_vspace(void)303 BOOT_CODE void activate_kernel_vspace(void)
304 {
305 setVSpaceRoot(kpptr_to_paddr(&kernel_root_pageTable), 0);
306 }
307
write_it_asid_pool(cap_t it_ap_cap,cap_t it_lvl1pt_cap)308 BOOT_CODE void write_it_asid_pool(cap_t it_ap_cap, cap_t it_lvl1pt_cap)
309 {
310 asid_pool_t *ap = ASID_POOL_PTR(pptr_of_cap(it_ap_cap));
311 ap->array[IT_ASID] = PTE_PTR(pptr_of_cap(it_lvl1pt_cap));
312 riscvKSASIDTable[IT_ASID >> asidLowBits] = ap;
313 }
314
315 /* ==================== BOOT CODE FINISHES HERE ==================== */
316
findVSpaceForASID(asid_t asid)317 static findVSpaceForASID_ret_t findVSpaceForASID(asid_t asid)
318 {
319 findVSpaceForASID_ret_t ret;
320 asid_pool_t *poolPtr;
321 pte_t *vspace_root;
322
323 poolPtr = riscvKSASIDTable[asid >> asidLowBits];
324 if (!poolPtr) {
325 current_lookup_fault = lookup_fault_invalid_root_new();
326
327 ret.vspace_root = NULL;
328 ret.status = EXCEPTION_LOOKUP_FAULT;
329 return ret;
330 }
331
332 vspace_root = poolPtr->array[asid & MASK(asidLowBits)];
333 if (!vspace_root) {
334 current_lookup_fault = lookup_fault_invalid_root_new();
335
336 ret.vspace_root = NULL;
337 ret.status = EXCEPTION_LOOKUP_FAULT;
338 return ret;
339 }
340
341 ret.vspace_root = vspace_root;
342 ret.status = EXCEPTION_NONE;
343 return ret;
344 }
345
copyGlobalMappings(pte_t * newLvl1pt)346 void copyGlobalMappings(pte_t *newLvl1pt)
347 {
348 unsigned long i;
349 pte_t *global_kernel_vspace = kernel_root_pageTable;
350
351 for (i = RISCV_GET_PT_INDEX(PPTR_BASE, 0); i < BIT(PT_INDEX_BITS); i++) {
352 newLvl1pt[i] = global_kernel_vspace[i];
353 }
354 }
355
lookupIPCBuffer(bool_t isReceiver,tcb_t * thread)356 word_t *PURE lookupIPCBuffer(bool_t isReceiver, tcb_t *thread)
357 {
358 word_t w_bufferPtr;
359 cap_t bufferCap;
360 vm_rights_t vm_rights;
361
362 w_bufferPtr = thread->tcbIPCBuffer;
363 bufferCap = TCB_PTR_CTE_PTR(thread, tcbBuffer)->cap;
364
365 if (unlikely(cap_get_capType(bufferCap) != cap_frame_cap)) {
366 return NULL;
367 }
368 if (unlikely(cap_frame_cap_get_capFIsDevice(bufferCap))) {
369 return NULL;
370 }
371
372 vm_rights = cap_frame_cap_get_capFVMRights(bufferCap);
373 if (likely(vm_rights == VMReadWrite ||
374 (!isReceiver && vm_rights == VMReadOnly))) {
375 word_t basePtr, pageBits;
376
377 basePtr = cap_frame_cap_get_capFBasePtr(bufferCap);
378 pageBits = pageBitsForSize(cap_frame_cap_get_capFSize(bufferCap));
379 return (word_t *)(basePtr + (w_bufferPtr & MASK(pageBits)));
380 } else {
381 return NULL;
382 }
383 }
384
getPPtrFromHWPTE(pte_t * pte)385 static inline pte_t *getPPtrFromHWPTE(pte_t *pte)
386 {
387 return PTE_PTR(ptrFromPAddr(pte_ptr_get_ppn(pte) << seL4_PageTableBits));
388 }
389
lookupPTSlot(pte_t * lvl1pt,vptr_t vptr)390 lookupPTSlot_ret_t lookupPTSlot(pte_t *lvl1pt, vptr_t vptr)
391 {
392 lookupPTSlot_ret_t ret;
393
394 word_t level = CONFIG_PT_LEVELS - 1;
395 pte_t *pt = lvl1pt;
396
397 /* this is how many bits we potentially have left to decode. Initially we have the
398 * full address space to decode, and every time we walk this will be reduced. The
399 * final value of this after the walk is the size of the frame that can be inserted,
400 * or already exists, in ret.ptSlot. The following formulation is an invariant of
401 * the loop: */
402 ret.ptBitsLeft = PT_INDEX_BITS * level + seL4_PageBits;
403 ret.ptSlot = pt + ((vptr >> ret.ptBitsLeft) & MASK(PT_INDEX_BITS));
404
405 while (isPTEPageTable(ret.ptSlot) && likely(0 < level)) {
406 level--;
407 ret.ptBitsLeft -= PT_INDEX_BITS;
408 pt = getPPtrFromHWPTE(ret.ptSlot);
409 ret.ptSlot = pt + ((vptr >> ret.ptBitsLeft) & MASK(PT_INDEX_BITS));
410 }
411
412 return ret;
413 }
414
handleVMFault(tcb_t * thread,vm_fault_type_t vm_faultType)415 exception_t handleVMFault(tcb_t *thread, vm_fault_type_t vm_faultType)
416 {
417 uint64_t addr;
418
419 addr = read_stval();
420
421 switch (vm_faultType) {
422 case RISCVLoadPageFault:
423 case RISCVLoadAccessFault:
424 current_fault = seL4_Fault_VMFault_new(addr, RISCVLoadAccessFault, false);
425 return EXCEPTION_FAULT;
426 case RISCVStorePageFault:
427 case RISCVStoreAccessFault:
428 current_fault = seL4_Fault_VMFault_new(addr, RISCVStoreAccessFault, false);
429 return EXCEPTION_FAULT;
430 case RISCVInstructionPageFault:
431 case RISCVInstructionAccessFault:
432 current_fault = seL4_Fault_VMFault_new(addr, RISCVInstructionAccessFault, true);
433 return EXCEPTION_FAULT;
434
435 default:
436 fail("Invalid VM fault type");
437 }
438 }
439
deleteASIDPool(asid_t asid_base,asid_pool_t * pool)440 void deleteASIDPool(asid_t asid_base, asid_pool_t *pool)
441 {
442 /* Haskell error: "ASID pool's base must be aligned" */
443 assert(IS_ALIGNED(asid_base, asidLowBits));
444
445 if (riscvKSASIDTable[asid_base >> asidLowBits] == pool) {
446 riscvKSASIDTable[asid_base >> asidLowBits] = NULL;
447 setVMRoot(NODE_STATE(ksCurThread));
448 }
449 }
450
performASIDControlInvocation(void * frame,cte_t * slot,cte_t * parent,asid_t asid_base)451 static exception_t performASIDControlInvocation(void *frame, cte_t *slot, cte_t *parent, asid_t asid_base)
452 {
453 /** AUXUPD: "(True, typ_region_bytes (ptr_val \<acute>frame) 12)" */
454 /** GHOSTUPD: "(True, gs_clear_region (ptr_val \<acute>frame) 12)" */
455 cap_untyped_cap_ptr_set_capFreeIndex(&(parent->cap),
456 MAX_FREE_INDEX(cap_untyped_cap_get_capBlockSize(parent->cap)));
457
458 memzero(frame, BIT(pageBitsForSize(RISCV_4K_Page)));
459 /** AUXUPD: "(True, ptr_retyps 1 (Ptr (ptr_val \<acute>frame) :: asid_pool_C ptr))" */
460
461 cteInsert(
462 cap_asid_pool_cap_new(
463 asid_base, /* capASIDBase */
464 WORD_REF(frame) /* capASIDPool */
465 ),
466 parent,
467 slot
468 );
469 /* Haskell error: "ASID pool's base must be aligned" */
470 assert((asid_base & MASK(asidLowBits)) == 0);
471 riscvKSASIDTable[asid_base >> asidLowBits] = (asid_pool_t *)frame;
472
473 return EXCEPTION_NONE;
474 }
475
performASIDPoolInvocation(asid_t asid,asid_pool_t * poolPtr,cte_t * vspaceCapSlot)476 static exception_t performASIDPoolInvocation(asid_t asid, asid_pool_t *poolPtr, cte_t *vspaceCapSlot)
477 {
478 cap_t cap = vspaceCapSlot->cap;
479 pte_t *regionBase = PTE_PTR(cap_page_table_cap_get_capPTBasePtr(cap));
480 cap = cap_page_table_cap_set_capPTMappedASID(cap, asid);
481 cap = cap_page_table_cap_set_capPTMappedAddress(cap, 0);
482 cap = cap_page_table_cap_set_capPTIsMapped(cap, 1);
483 vspaceCapSlot->cap = cap;
484
485 copyGlobalMappings(regionBase);
486
487 poolPtr->array[asid & MASK(asidLowBits)] = regionBase;
488
489 return EXCEPTION_NONE;
490 }
491
deleteASID(asid_t asid,pte_t * vspace)492 void deleteASID(asid_t asid, pte_t *vspace)
493 {
494 asid_pool_t *poolPtr;
495
496 poolPtr = riscvKSASIDTable[asid >> asidLowBits];
497 if (poolPtr != NULL && poolPtr->array[asid & MASK(asidLowBits)] == vspace) {
498 hwASIDFlush(asid);
499 poolPtr->array[asid & MASK(asidLowBits)] = NULL;
500 setVMRoot(NODE_STATE(ksCurThread));
501 }
502 }
503
unmapPageTable(asid_t asid,vptr_t vptr,pte_t * target_pt)504 void unmapPageTable(asid_t asid, vptr_t vptr, pte_t *target_pt)
505 {
506 findVSpaceForASID_ret_t find_ret = findVSpaceForASID(asid);
507 if (unlikely(find_ret.status != EXCEPTION_NONE)) {
508 /* nothing to do */
509 return;
510 }
511 /* We won't ever unmap a top level page table */
512 assert(find_ret.vspace_root != target_pt);
513 pte_t *ptSlot = NULL;
514 pte_t *pt = find_ret.vspace_root;
515
516 for (word_t i = 0; i < CONFIG_PT_LEVELS - 1 && pt != target_pt; i++) {
517 ptSlot = pt + RISCV_GET_PT_INDEX(vptr, i);
518 if (unlikely(!isPTEPageTable(ptSlot))) {
519 /* couldn't find it */
520 return;
521 }
522 pt = getPPtrFromHWPTE(ptSlot);
523 }
524
525 if (pt != target_pt) {
526 /* didn't find it */
527 return;
528 }
529 /* If we found a pt then ptSlot won't be null */
530 assert(ptSlot != NULL);
531 *ptSlot = pte_new(
532 0, /* phy_address */
533 0, /* sw */
534 0, /* dirty */
535 0, /* accessed */
536 0, /* global */
537 0, /* user */
538 0, /* execute */
539 0, /* write */
540 0, /* read */
541 0 /* valid */
542 );
543 sfence();
544 }
545
pte_pte_invalid_new(void)546 static pte_t pte_pte_invalid_new(void)
547 {
548 return (pte_t) {
549 0
550 };
551 }
552
unmapPage(vm_page_size_t page_size,asid_t asid,vptr_t vptr,pptr_t pptr)553 void unmapPage(vm_page_size_t page_size, asid_t asid, vptr_t vptr, pptr_t pptr)
554 {
555 findVSpaceForASID_ret_t find_ret;
556 lookupPTSlot_ret_t lu_ret;
557
558 find_ret = findVSpaceForASID(asid);
559 if (find_ret.status != EXCEPTION_NONE) {
560 return;
561 }
562
563 lu_ret = lookupPTSlot(find_ret.vspace_root, vptr);
564 if (unlikely(lu_ret.ptBitsLeft != pageBitsForSize(page_size))) {
565 return;
566 }
567 if (!pte_ptr_get_valid(lu_ret.ptSlot) || isPTEPageTable(lu_ret.ptSlot)
568 || (pte_ptr_get_ppn(lu_ret.ptSlot) << seL4_PageBits) != pptr_to_paddr((void *)pptr)) {
569 return;
570 }
571
572 lu_ret.ptSlot[0] = pte_pte_invalid_new();
573 sfence();
574 }
575
setVMRoot(tcb_t * tcb)576 void setVMRoot(tcb_t *tcb)
577 {
578 cap_t threadRoot;
579 asid_t asid;
580 pte_t *lvl1pt;
581 findVSpaceForASID_ret_t find_ret;
582
583 threadRoot = TCB_PTR_CTE_PTR(tcb, tcbVTable)->cap;
584
585 if (cap_get_capType(threadRoot) != cap_page_table_cap) {
586 setVSpaceRoot(kpptr_to_paddr(&kernel_root_pageTable), 0);
587 return;
588 }
589
590 lvl1pt = PTE_PTR(cap_page_table_cap_get_capPTBasePtr(threadRoot));
591
592 asid = cap_page_table_cap_get_capPTMappedASID(threadRoot);
593 find_ret = findVSpaceForASID(asid);
594 if (unlikely(find_ret.status != EXCEPTION_NONE || find_ret.vspace_root != lvl1pt)) {
595 setVSpaceRoot(kpptr_to_paddr(&kernel_root_pageTable), 0);
596 return;
597 }
598
599 setVSpaceRoot(addrFromPPtr(lvl1pt), asid);
600 }
601
isValidVTableRoot(cap_t cap)602 bool_t CONST isValidVTableRoot(cap_t cap)
603 {
604 return (cap_get_capType(cap) == cap_page_table_cap &&
605 cap_page_table_cap_get_capPTIsMapped(cap));
606 }
607
checkValidIPCBuffer(vptr_t vptr,cap_t cap)608 exception_t checkValidIPCBuffer(vptr_t vptr, cap_t cap)
609 {
610 if (unlikely(cap_get_capType(cap) != cap_frame_cap)) {
611 userError("Requested IPC Buffer is not a frame cap.");
612 current_syscall_error.type = seL4_IllegalOperation;
613 return EXCEPTION_SYSCALL_ERROR;
614 }
615
616 if (unlikely(cap_frame_cap_get_capFIsDevice(cap))) {
617 userError("Specifying a device frame as an IPC buffer is not permitted.");
618 current_syscall_error.type = seL4_IllegalOperation;
619 return EXCEPTION_SYSCALL_ERROR;
620 }
621
622 if (unlikely(!IS_ALIGNED(vptr, seL4_IPCBufferSizeBits))) {
623 userError("Requested IPC Buffer location 0x%x is not aligned.",
624 (int)vptr);
625 current_syscall_error.type = seL4_AlignmentError;
626 return EXCEPTION_SYSCALL_ERROR;
627 }
628
629 return EXCEPTION_NONE;
630 }
631
maskVMRights(vm_rights_t vm_rights,seL4_CapRights_t cap_rights_mask)632 vm_rights_t CONST maskVMRights(vm_rights_t vm_rights, seL4_CapRights_t cap_rights_mask)
633 {
634 if (vm_rights == VMReadOnly && seL4_CapRights_get_capAllowRead(cap_rights_mask)) {
635 return VMReadOnly;
636 }
637 if (vm_rights == VMReadWrite && seL4_CapRights_get_capAllowRead(cap_rights_mask)) {
638 if (!seL4_CapRights_get_capAllowWrite(cap_rights_mask)) {
639 return VMReadOnly;
640 } else {
641 return VMReadWrite;
642 }
643 }
644 return VMKernelOnly;
645 }
646
647 /* The rest of the file implements the RISCV object invocations */
648
makeUserPTE(paddr_t paddr,bool_t executable,vm_rights_t vm_rights)649 static pte_t CONST makeUserPTE(paddr_t paddr, bool_t executable, vm_rights_t vm_rights)
650 {
651 word_t write = RISCVGetWriteFromVMRights(vm_rights);
652 word_t read = RISCVGetReadFromVMRights(vm_rights);
653 if (unlikely(!read && !write && !executable)) {
654 return pte_pte_invalid_new();
655 } else {
656 return pte_new(
657 paddr >> seL4_PageBits,
658 0, /* sw */
659 1, /* dirty */
660 1, /* accessed */
661 0, /* global */
662 1, /* user */
663 executable, /* execute */
664 RISCVGetWriteFromVMRights(vm_rights), /* write */
665 RISCVGetReadFromVMRights(vm_rights), /* read */
666 1 /* valid */
667 );
668 }
669 }
670
checkVPAlignment(vm_page_size_t sz,word_t w)671 static inline bool_t CONST checkVPAlignment(vm_page_size_t sz, word_t w)
672 {
673 return (w & MASK(pageBitsForSize(sz))) == 0;
674 }
675
decodeRISCVPageTableInvocation(word_t label,word_t length,cte_t * cte,cap_t cap,word_t * buffer)676 static exception_t decodeRISCVPageTableInvocation(word_t label, word_t length,
677 cte_t *cte, cap_t cap, word_t *buffer)
678 {
679 if (label == RISCVPageTableUnmap) {
680 if (unlikely(!isFinalCapability(cte))) {
681 userError("RISCVPageTableUnmap: cannot unmap if more than once cap exists");
682 current_syscall_error.type = seL4_RevokeFirst;
683 return EXCEPTION_SYSCALL_ERROR;
684 }
685 /* Ensure that if the page table is mapped, it is not a top level table */
686 if (likely(cap_page_table_cap_get_capPTIsMapped(cap))) {
687 asid_t asid = cap_page_table_cap_get_capPTMappedASID(cap);
688 findVSpaceForASID_ret_t find_ret = findVSpaceForASID(asid);
689 pte_t *pte = PTE_PTR(cap_page_table_cap_get_capPTBasePtr(cap));
690 if (unlikely(find_ret.status == EXCEPTION_NONE &&
691 find_ret.vspace_root == pte)) {
692 userError("RISCVPageTableUnmap: cannot call unmap on top level PageTable");
693 current_syscall_error.type = seL4_RevokeFirst;
694 return EXCEPTION_SYSCALL_ERROR;
695 }
696 }
697
698 setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
699 return performPageTableInvocationUnmap(cap, cte);
700 }
701
702 if (unlikely((label != RISCVPageTableMap))) {
703 userError("RISCVPageTable: Illegal Operation");
704 current_syscall_error.type = seL4_IllegalOperation;
705 return EXCEPTION_SYSCALL_ERROR;
706 }
707
708 if (unlikely(length < 2 || current_extra_caps.excaprefs[0] == NULL)) {
709 userError("RISCVPageTable: truncated message");
710 current_syscall_error.type = seL4_TruncatedMessage;
711 return EXCEPTION_SYSCALL_ERROR;
712 }
713 if (unlikely(cap_page_table_cap_get_capPTIsMapped(cap))) {
714 userError("RISCVPageTable: PageTable is already mapped.");
715 current_syscall_error.type = seL4_InvalidCapability;
716 current_syscall_error.invalidCapNumber = 0;
717 return EXCEPTION_SYSCALL_ERROR;
718 }
719
720 word_t vaddr = getSyscallArg(0, buffer);
721 cap_t lvl1ptCap = current_extra_caps.excaprefs[0]->cap;
722
723 if (unlikely(cap_get_capType(lvl1ptCap) != cap_page_table_cap ||
724 cap_page_table_cap_get_capPTIsMapped(lvl1ptCap) == asidInvalid)) {
725 userError("RISCVPageTableMap: Invalid top-level PageTable.");
726 current_syscall_error.type = seL4_InvalidCapability;
727 current_syscall_error.invalidCapNumber = 1;
728
729 return EXCEPTION_SYSCALL_ERROR;
730 }
731
732 pte_t *lvl1pt = PTE_PTR(cap_page_table_cap_get_capPTBasePtr(lvl1ptCap));
733 asid_t asid = cap_page_table_cap_get_capPTMappedASID(lvl1ptCap);
734
735 if (unlikely(vaddr >= USER_TOP)) {
736 userError("RISCVPageTableMap: Virtual address cannot be in kernel window.");
737 current_syscall_error.type = seL4_InvalidArgument;
738 current_syscall_error.invalidArgumentNumber = 0;
739
740 return EXCEPTION_SYSCALL_ERROR;
741 }
742
743 findVSpaceForASID_ret_t find_ret = findVSpaceForASID(asid);
744 if (unlikely(find_ret.status != EXCEPTION_NONE)) {
745 userError("RISCVPageTableMap: ASID lookup failed");
746 current_syscall_error.type = seL4_FailedLookup;
747 current_syscall_error.failedLookupWasSource = false;
748 return EXCEPTION_SYSCALL_ERROR;
749 }
750
751 if (unlikely(find_ret.vspace_root != lvl1pt)) {
752 userError("RISCVPageTableMap: ASID lookup failed");
753 current_syscall_error.type = seL4_InvalidCapability;
754 current_syscall_error.invalidCapNumber = 1;
755 return EXCEPTION_SYSCALL_ERROR;
756 }
757
758 lookupPTSlot_ret_t lu_ret = lookupPTSlot(lvl1pt, vaddr);
759
760 /* if there is already something mapped (valid is set) or we have traversed far enough
761 * that a page table is not valid to map then tell the user that they ahve to delete
762 * something before they can put a PT here */
763 if (lu_ret.ptBitsLeft == seL4_PageBits || pte_ptr_get_valid(lu_ret.ptSlot)) {
764 userError("RISCVPageTableMap: All objects mapped at this address");
765 current_syscall_error.type = seL4_DeleteFirst;
766 return EXCEPTION_SYSCALL_ERROR;
767 }
768
769 /* Get the slot to install the PT in */
770 pte_t *ptSlot = lu_ret.ptSlot;
771
772 paddr_t paddr = addrFromPPtr(
773 PTE_PTR(cap_page_table_cap_get_capPTBasePtr(cap)));
774 pte_t pte = pte_new((paddr >> seL4_PageBits),
775 0, /* sw */
776 1, /* dirty */
777 1, /* accessed */
778 0, /* global */
779 0, /* user */
780 0, /* execute */
781 0, /* write */
782 0, /* read */
783 1 /* valid */
784 );
785
786 cap = cap_page_table_cap_set_capPTIsMapped(cap, 1);
787 cap = cap_page_table_cap_set_capPTMappedASID(cap, asid);
788 cap = cap_page_table_cap_set_capPTMappedAddress(cap, (vaddr & ~MASK(lu_ret.ptBitsLeft)));
789
790 setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
791 return performPageTableInvocationMap(cap, cte, pte, ptSlot);
792 }
793
decodeRISCVFrameInvocation(word_t label,word_t length,cte_t * cte,cap_t cap,word_t * buffer)794 static exception_t decodeRISCVFrameInvocation(word_t label, word_t length,
795 cte_t *cte, cap_t cap, word_t *buffer)
796 {
797 switch (label) {
798 case RISCVPageMap: {
799 if (unlikely(length < 3 || current_extra_caps.excaprefs[0] == NULL)) {
800 userError("RISCVPageMap: Truncated message.");
801 current_syscall_error.type = seL4_TruncatedMessage;
802 return EXCEPTION_SYSCALL_ERROR;
803 }
804
805 word_t vaddr = getSyscallArg(0, buffer);
806 word_t w_rightsMask = getSyscallArg(1, buffer);
807 vm_attributes_t attr = vmAttributesFromWord(getSyscallArg(2, buffer));
808 cap_t lvl1ptCap = current_extra_caps.excaprefs[0]->cap;
809
810 vm_page_size_t frameSize = cap_frame_cap_get_capFSize(cap);
811 vm_rights_t capVMRights = cap_frame_cap_get_capFVMRights(cap);
812
813 if (unlikely(cap_get_capType(lvl1ptCap) != cap_page_table_cap ||
814 !cap_page_table_cap_get_capPTIsMapped(lvl1ptCap))) {
815 userError("RISCVPageMap: Bad PageTable cap.");
816 current_syscall_error.type = seL4_InvalidCapability;
817 current_syscall_error.invalidCapNumber = 1;
818 return EXCEPTION_SYSCALL_ERROR;
819 }
820
821 pte_t *lvl1pt = PTE_PTR(cap_page_table_cap_get_capPTBasePtr(lvl1ptCap));
822 asid_t asid = cap_page_table_cap_get_capPTMappedASID(lvl1ptCap);
823
824 findVSpaceForASID_ret_t find_ret = findVSpaceForASID(asid);
825 if (unlikely(find_ret.status != EXCEPTION_NONE)) {
826 userError("RISCVPageMap: No PageTable for ASID");
827 current_syscall_error.type = seL4_FailedLookup;
828 current_syscall_error.failedLookupWasSource = false;
829 return EXCEPTION_SYSCALL_ERROR;
830 }
831
832 if (unlikely(find_ret.vspace_root != lvl1pt)) {
833 userError("RISCVPageMap: ASID lookup failed");
834 current_syscall_error.type = seL4_InvalidCapability;
835 current_syscall_error.invalidCapNumber = 1;
836 return EXCEPTION_SYSCALL_ERROR;
837 }
838
839 /* check the vaddr is valid */
840 word_t vtop = vaddr + BIT(pageBitsForSize(frameSize)) - 1;
841 if (unlikely(vtop >= USER_TOP)) {
842 current_syscall_error.type = seL4_InvalidArgument;
843 current_syscall_error.invalidArgumentNumber = 0;
844 return EXCEPTION_SYSCALL_ERROR;
845 }
846 if (unlikely(!checkVPAlignment(frameSize, vaddr))) {
847 current_syscall_error.type = seL4_AlignmentError;
848 return EXCEPTION_SYSCALL_ERROR;
849 }
850
851 /* Check if this page is already mapped */
852 lookupPTSlot_ret_t lu_ret = lookupPTSlot(lvl1pt, vaddr);
853 if (unlikely(lu_ret.ptBitsLeft != pageBitsForSize(frameSize))) {
854 current_lookup_fault = lookup_fault_missing_capability_new(lu_ret.ptBitsLeft);
855 current_syscall_error.type = seL4_FailedLookup;
856 current_syscall_error.failedLookupWasSource = false;
857 return EXCEPTION_SYSCALL_ERROR;
858 }
859
860 asid_t frame_asid = cap_frame_cap_get_capFMappedASID(cap);
861 if (unlikely(frame_asid != asidInvalid)) {
862 /* this frame is already mapped */
863 if (frame_asid != asid) {
864 userError("RISCVPageMap: Attempting to remap a frame that does not belong to the passed address space");
865 current_syscall_error.type = seL4_InvalidCapability;
866 current_syscall_error.invalidCapNumber = 1;
867 return EXCEPTION_SYSCALL_ERROR;
868 }
869 word_t mapped_vaddr = cap_frame_cap_get_capFMappedAddress(cap);
870 if (unlikely(mapped_vaddr != vaddr)) {
871 userError("RISCVPageMap: attempting to map frame into multiple addresses");
872 current_syscall_error.type = seL4_InvalidArgument;
873 current_syscall_error.invalidArgumentNumber = 0;
874 return EXCEPTION_SYSCALL_ERROR;
875 }
876 /* this check is redundant, as lookupPTSlot does not stop on a page
877 * table PTE */
878 if (unlikely(isPTEPageTable(lu_ret.ptSlot))) {
879 userError("RISCVPageMap: no mapping to remap.");
880 current_syscall_error.type = seL4_DeleteFirst;
881 return EXCEPTION_SYSCALL_ERROR;
882 }
883 } else {
884 /* check this vaddr isn't already mapped */
885 if (unlikely(pte_ptr_get_valid(lu_ret.ptSlot))) {
886 userError("Virtual address already mapped");
887 current_syscall_error.type = seL4_DeleteFirst;
888 return EXCEPTION_SYSCALL_ERROR;
889 }
890 }
891
892 vm_rights_t vmRights = maskVMRights(capVMRights, rightsFromWord(w_rightsMask));
893 paddr_t frame_paddr = addrFromPPtr((void *) cap_frame_cap_get_capFBasePtr(cap));
894 cap = cap_frame_cap_set_capFMappedASID(cap, asid);
895 cap = cap_frame_cap_set_capFMappedAddress(cap, vaddr);
896
897 bool_t executable = !vm_attributes_get_riscvExecuteNever(attr);
898 pte_t pte = makeUserPTE(frame_paddr, executable, vmRights);
899 setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
900 return performPageInvocationMapPTE(cap, cte, pte, lu_ret.ptSlot);
901 }
902
903 case RISCVPageUnmap: {
904 setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
905 return performPageInvocationUnmap(cap, cte);
906 }
907
908 case RISCVPageGetAddress: {
909
910 /* Check that there are enough message registers */
911 assert(n_msgRegisters >= 1);
912
913 setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
914 return performPageGetAddress((void *)cap_frame_cap_get_capFBasePtr(cap));
915 }
916
917 default:
918 userError("RISCVPage: Illegal operation.");
919 current_syscall_error.type = seL4_IllegalOperation;
920
921 return EXCEPTION_SYSCALL_ERROR;
922 }
923
924 }
925
decodeRISCVMMUInvocation(word_t label,word_t length,cptr_t cptr,cte_t * cte,cap_t cap,word_t * buffer)926 exception_t decodeRISCVMMUInvocation(word_t label, word_t length, cptr_t cptr,
927 cte_t *cte, cap_t cap, word_t *buffer)
928 {
929 switch (cap_get_capType(cap)) {
930
931 case cap_page_table_cap:
932 return decodeRISCVPageTableInvocation(label, length, cte, cap, buffer);
933
934 case cap_frame_cap:
935 return decodeRISCVFrameInvocation(label, length, cte, cap, buffer);
936
937 case cap_asid_control_cap: {
938 word_t i;
939 asid_t asid_base;
940 word_t index;
941 word_t depth;
942 cap_t untyped;
943 cap_t root;
944 cte_t *parentSlot;
945 cte_t *destSlot;
946 lookupSlot_ret_t lu_ret;
947 void *frame;
948 exception_t status;
949
950 if (label != RISCVASIDControlMakePool) {
951 current_syscall_error.type = seL4_IllegalOperation;
952
953 return EXCEPTION_SYSCALL_ERROR;
954 }
955
956 if (length < 2 || current_extra_caps.excaprefs[0] == NULL
957 || current_extra_caps.excaprefs[1] == NULL) {
958 current_syscall_error.type = seL4_TruncatedMessage;
959 return EXCEPTION_SYSCALL_ERROR;
960 }
961
962 index = getSyscallArg(0, buffer);
963 depth = getSyscallArg(1, buffer);
964 parentSlot = current_extra_caps.excaprefs[0];
965 untyped = parentSlot->cap;
966 root = current_extra_caps.excaprefs[1]->cap;
967
968 /* Find first free pool */
969 for (i = 0; i < nASIDPools && riscvKSASIDTable[i]; i++);
970
971 if (i == nASIDPools) {
972 /* no unallocated pool is found */
973 current_syscall_error.type = seL4_DeleteFirst;
974
975 return EXCEPTION_SYSCALL_ERROR;
976 }
977
978 asid_base = i << asidLowBits;
979
980 if (cap_get_capType(untyped) != cap_untyped_cap ||
981 cap_untyped_cap_get_capBlockSize(untyped) != seL4_ASIDPoolBits ||
982 cap_untyped_cap_get_capIsDevice(untyped)) {
983 current_syscall_error.type = seL4_InvalidCapability;
984 current_syscall_error.invalidCapNumber = 1;
985
986 return EXCEPTION_SYSCALL_ERROR;
987 }
988
989 status = ensureNoChildren(parentSlot);
990 if (status != EXCEPTION_NONE) {
991 return status;
992 }
993
994 frame = WORD_PTR(cap_untyped_cap_get_capPtr(untyped));
995
996 lu_ret = lookupTargetSlot(root, index, depth);
997 if (lu_ret.status != EXCEPTION_NONE) {
998 return lu_ret.status;
999 }
1000 destSlot = lu_ret.slot;
1001
1002 status = ensureEmptySlot(destSlot);
1003 if (status != EXCEPTION_NONE) {
1004 return status;
1005 }
1006
1007 setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
1008 return performASIDControlInvocation(frame, destSlot, parentSlot, asid_base);
1009 }
1010
1011 case cap_asid_pool_cap: {
1012 cap_t vspaceCap;
1013 cte_t *vspaceCapSlot;
1014 asid_pool_t *pool;
1015 word_t i;
1016 asid_t asid;
1017
1018 if (label != RISCVASIDPoolAssign) {
1019 current_syscall_error.type = seL4_IllegalOperation;
1020
1021 return EXCEPTION_SYSCALL_ERROR;
1022 }
1023 if (current_extra_caps.excaprefs[0] == NULL) {
1024 current_syscall_error.type = seL4_TruncatedMessage;
1025
1026 return EXCEPTION_SYSCALL_ERROR;
1027 }
1028
1029 vspaceCapSlot = current_extra_caps.excaprefs[0];
1030 vspaceCap = vspaceCapSlot->cap;
1031
1032 if (unlikely(
1033 cap_get_capType(vspaceCap) != cap_page_table_cap ||
1034 cap_page_table_cap_get_capPTIsMapped(vspaceCap))) {
1035 userError("RISCVASIDPool: Invalid vspace root.");
1036 current_syscall_error.type = seL4_InvalidCapability;
1037 current_syscall_error.invalidCapNumber = 1;
1038
1039 return EXCEPTION_SYSCALL_ERROR;
1040 }
1041
1042 pool = riscvKSASIDTable[cap_asid_pool_cap_get_capASIDBase(cap) >> asidLowBits];
1043 if (!pool) {
1044 current_syscall_error.type = seL4_FailedLookup;
1045 current_syscall_error.failedLookupWasSource = false;
1046 current_lookup_fault = lookup_fault_invalid_root_new();
1047 return EXCEPTION_SYSCALL_ERROR;
1048 }
1049
1050 if (pool != ASID_POOL_PTR(cap_asid_pool_cap_get_capASIDPool(cap))) {
1051 current_syscall_error.type = seL4_InvalidCapability;
1052 current_syscall_error.invalidCapNumber = 0;
1053 return EXCEPTION_SYSCALL_ERROR;
1054 }
1055
1056 /* Find first free ASID */
1057 asid = cap_asid_pool_cap_get_capASIDBase(cap);
1058 for (i = 0; i < BIT(asidLowBits) && (asid + i == 0 || pool->array[i]); i++);
1059
1060 if (i == BIT(asidLowBits)) {
1061 current_syscall_error.type = seL4_DeleteFirst;
1062
1063 return EXCEPTION_SYSCALL_ERROR;
1064 }
1065
1066 asid += i;
1067
1068 setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
1069 return performASIDPoolInvocation(asid, pool, vspaceCapSlot);
1070 }
1071 default:
1072 fail("Invalid arch cap type");
1073 }
1074 }
1075
performPageTableInvocationMap(cap_t cap,cte_t * ctSlot,pte_t pte,pte_t * ptSlot)1076 exception_t performPageTableInvocationMap(cap_t cap, cte_t *ctSlot,
1077 pte_t pte, pte_t *ptSlot)
1078 {
1079 ctSlot->cap = cap;
1080 *ptSlot = pte;
1081 sfence();
1082
1083 return EXCEPTION_NONE;
1084 }
1085
performPageTableInvocationUnmap(cap_t cap,cte_t * ctSlot)1086 exception_t performPageTableInvocationUnmap(cap_t cap, cte_t *ctSlot)
1087 {
1088 if (cap_page_table_cap_get_capPTIsMapped(cap)) {
1089 pte_t *pt = PTE_PTR(cap_page_table_cap_get_capPTBasePtr(cap));
1090 unmapPageTable(
1091 cap_page_table_cap_get_capPTMappedASID(cap),
1092 cap_page_table_cap_get_capPTMappedAddress(cap),
1093 pt
1094 );
1095 clearMemory((void *)pt, seL4_PageTableBits);
1096 }
1097 cap_page_table_cap_ptr_set_capPTIsMapped(&(ctSlot->cap), 0);
1098
1099 return EXCEPTION_NONE;
1100 }
1101
performPageGetAddress(void * vbase_ptr)1102 static exception_t performPageGetAddress(void *vbase_ptr)
1103 {
1104 paddr_t capFBasePtr;
1105
1106 /* Get the physical address of this frame. */
1107 capFBasePtr = addrFromPPtr(vbase_ptr);
1108
1109 /* return it in the first message register */
1110 setRegister(NODE_STATE(ksCurThread), msgRegisters[0], capFBasePtr);
1111 setRegister(NODE_STATE(ksCurThread), msgInfoRegister,
1112 wordFromMessageInfo(seL4_MessageInfo_new(0, 0, 0, 1)));
1113
1114 return EXCEPTION_NONE;
1115 }
1116
updatePTE(pte_t pte,pte_t * base)1117 static exception_t updatePTE(pte_t pte, pte_t *base)
1118 {
1119 *base = pte;
1120 sfence();
1121 return EXCEPTION_NONE;
1122 }
1123
performPageInvocationMapPTE(cap_t cap,cte_t * ctSlot,pte_t pte,pte_t * base)1124 exception_t performPageInvocationMapPTE(cap_t cap, cte_t *ctSlot,
1125 pte_t pte, pte_t *base)
1126 {
1127 ctSlot->cap = cap;
1128 return updatePTE(pte, base);
1129 }
1130
performPageInvocationUnmap(cap_t cap,cte_t * ctSlot)1131 exception_t performPageInvocationUnmap(cap_t cap, cte_t *ctSlot)
1132 {
1133 if (cap_frame_cap_get_capFMappedASID(cap) != asidInvalid) {
1134 unmapPage(cap_frame_cap_get_capFSize(cap),
1135 cap_frame_cap_get_capFMappedASID(cap),
1136 cap_frame_cap_get_capFMappedAddress(cap),
1137 cap_frame_cap_get_capFBasePtr(cap)
1138 );
1139 }
1140
1141 cap_t slotCap = ctSlot->cap;
1142 slotCap = cap_frame_cap_set_capFMappedAddress(slotCap, 0);
1143 slotCap = cap_frame_cap_set_capFMappedASID(slotCap, asidInvalid);
1144 ctSlot->cap = slotCap;
1145
1146 return EXCEPTION_NONE;
1147 }
1148
1149 #ifdef CONFIG_PRINTING
Arch_userStackTrace(tcb_t * tptr)1150 void Arch_userStackTrace(tcb_t *tptr)
1151 {
1152 cap_t threadRoot = TCB_PTR_CTE_PTR(tptr, tcbVTable)->cap;
1153 if (!isValidVTableRoot(threadRoot)) {
1154 printf("Invalid vspace\n");
1155 return;
1156 }
1157
1158 word_t sp = getRegister(tptr, SP);
1159 if (!IS_ALIGNED(sp, seL4_WordSizeBits)) {
1160 printf("SP %p not aligned", (void *) sp);
1161 return;
1162 }
1163
1164 pte_t *vspace_root = PTE_PTR(pptr_of_cap(threadRoot));
1165 for (int i = 0; i < CONFIG_USER_STACK_TRACE_LENGTH; i++) {
1166 word_t address = sp + (i * sizeof(word_t));
1167 lookupPTSlot_ret_t ret = lookupPTSlot(vspace_root, address);
1168 if (pte_ptr_get_valid(ret.ptSlot) && !isPTEPageTable(ret.ptSlot)) {
1169 pptr_t pptr = (pptr_t)(getPPtrFromHWPTE(ret.ptSlot));
1170 word_t *value = (word_t *)((word_t)pptr + (address & MASK(ret.ptBitsLeft)));
1171 printf("0x%lx: 0x%lx\n", (long) address, (long) *value);
1172 } else {
1173 printf("0x%lx: INVALID\n", (long) address);
1174 }
1175 }
1176 }
1177 #endif
1178
1179 #ifdef CONFIG_KERNEL_LOG_BUFFER
benchmark_arch_map_logBuffer(word_t frame_cptr)1180 exception_t benchmark_arch_map_logBuffer(word_t frame_cptr)
1181 {
1182 lookupCapAndSlot_ret_t lu_ret;
1183 vm_page_size_t frameSize;
1184 pptr_t frame_pptr;
1185
1186 /* faulting section */
1187 lu_ret = lookupCapAndSlot(NODE_STATE(ksCurThread), frame_cptr);
1188
1189 if (unlikely(lu_ret.status != EXCEPTION_NONE)) {
1190 userError("Invalid cap #%lu.", frame_cptr);
1191 current_fault = seL4_Fault_CapFault_new(frame_cptr, false);
1192
1193 return EXCEPTION_SYSCALL_ERROR;
1194 }
1195
1196 if (cap_get_capType(lu_ret.cap) != cap_frame_cap) {
1197 userError("Invalid cap. Log buffer should be of a frame cap");
1198 current_fault = seL4_Fault_CapFault_new(frame_cptr, false);
1199
1200 return EXCEPTION_SYSCALL_ERROR;
1201 }
1202
1203 frameSize = cap_frame_cap_get_capFSize(lu_ret.cap);
1204
1205 if (frameSize != RISCV_Mega_Page) {
1206 userError("Invalid frame size. The kernel expects large page log buffer");
1207 current_fault = seL4_Fault_CapFault_new(frame_cptr, false);
1208
1209 return EXCEPTION_SYSCALL_ERROR;
1210 }
1211
1212 frame_pptr = cap_frame_cap_get_capFBasePtr(lu_ret.cap);
1213
1214 ksUserLogBuffer = pptr_to_paddr((void *) frame_pptr);
1215
1216 #if __riscv_xlen == 32
1217 paddr_t physical_address = ksUserLogBuffer;
1218 for (word_t i = 0; i < BIT(PT_INDEX_BITS); i += 1) {
1219 kernel_image_level2_log_buffer_pt[i] = pte_next(physical_address, true);
1220 physical_address += BIT(PAGE_BITS);
1221 }
1222 assert(physical_address - ksUserLogBuffer == BIT(seL4_LargePageBits));
1223 #else
1224 kernel_image_level2_dev_pt[RISCV_GET_PT_INDEX(KS_LOG_PPTR, 1)] = pte_next(ksUserLogBuffer, true);
1225 #endif
1226
1227 sfence();
1228
1229 return EXCEPTION_NONE;
1230 }
1231 #endif /* CONFIG_KERNEL_LOG_BUFFER */
1232