1 /*
2  * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230)
3  *
4  * SPDX-License-Identifier: GPL-2.0-only
5  */
6 
7 #include <config.h>
8 #include <types.h>
9 #include <benchmark/benchmark.h>
10 #include <api/failures.h>
11 #include <api/syscall.h>
12 #include <kernel/boot.h>
13 #include <kernel/cspace.h>
14 #include <kernel/thread.h>
15 #include <kernel/stack.h>
16 #include <machine/io.h>
17 #include <machine/debug.h>
18 #include <model/statedata.h>
19 #include <object/cnode.h>
20 #include <object/untyped.h>
21 #include <arch/api/invocation.h>
22 #include <arch/kernel/vspace.h>
23 #include <linker.h>
24 #include <plat/machine/hardware.h>
25 #include <armv/context_switch.h>
26 #include <arch/object/iospace.h>
27 #include <arch/object/vcpu.h>
28 #include <arch/machine/tlb.h>
29 #define RESERVED 3
30 
31 /*
32  * Memory types are defined in Memory Attribute Indirection Register.
33  *  - nGnRnE Device non-Gathering, non-Reordering, No Early write acknowledgement
34  *  - nGnRE Unused Device non-Gathering, non-Reordering, Early write acknowledgement
35  *  - GRE Unused Device Gathering, Reordering, Early write acknowledgement
36  *  - NORMAL_NC Normal Memory, Inner/Outer non-cacheable
37  *  - NORMAL Normal Memory, Inner/Outer Write-back non-transient, Write-allocate, Read-allocate
38  * Note: These should match with contents of MAIR_EL1 register!
39  */
40 enum mair_types {
41     DEVICE_nGnRnE = 0,
42     DEVICE_nGnRE = 1,
43     DEVICE_GRE = 2,
44     NORMAL_NC = 3,
45     NORMAL = 4
46 };
47 
48 /* Stage-2 translation memory attributes */
49 enum mair_s2_types {
50     S2_DEVICE_nGnRnE = 0,
51     S2_DEVICE_nGnRE = 1,
52     S2_DEVICE_nGRE  = 2,
53     S2_DEVICE_GRE = 3,
54 
55     S2_NORMAL_INNER_NC_OUTER_NC = 5,
56     S2_NORMAL_INNER_WTC_OUTER_NC = 6,
57     S2_NORMAL_INNER_WBC_OUTER_NC = 7,
58 
59     S2_NORMAL_INNER_NC_OUTER_WTC = 9,
60     S2_NORMAL_INNER_WTC_OUTER_WTC = 10,
61     S2_NORMAL_INNER_WBC_OUTER_WTC = 11,
62 
63     S2_NORMAL_INNER_NC_OUTER_WBC = 13,
64     S2_NORMAL_INNER_WTC_OUTER_WBC = 14,
65     S2_NORMAL_INNER_WBC_OUTER_WBC = 15,
66 
67     S2_NORMAL = S2_NORMAL_INNER_WBC_OUTER_WBC
68 };
69 
70 /* Leif from Linaro said the big.LITTLE clusters should be treated as
71  * inner shareable, and we believe so, although the Example B2-1 given in
72  * ARM ARM DDI 0487B.b (ID092517) says otherwise.
73  */
74 
75 #define SMP_SHARE   3
76 
77 struct lookupPGDSlot_ret {
78     exception_t status;
79     pgde_t *pgdSlot;
80 };
81 typedef struct lookupPGDSlot_ret lookupPGDSlot_ret_t;
82 
83 struct lookupPUDSlot_ret {
84     exception_t status;
85     pude_t *pudSlot;
86 };
87 typedef struct lookupPUDSlot_ret lookupPUDSlot_ret_t;
88 
89 struct lookupPDSlot_ret {
90     exception_t status;
91     pde_t *pdSlot;
92 };
93 typedef struct lookupPDSlot_ret lookupPDSlot_ret_t;
94 
95 struct lookupPTSlot_ret {
96     exception_t status;
97     pte_t *ptSlot;
98 };
99 typedef struct lookupPTSlot_ret lookupPTSlot_ret_t;
100 
101 struct lookupFrame_ret {
102     paddr_t frameBase;
103     vm_page_size_t frameSize;
104     bool_t valid;
105 };
106 typedef struct lookupFrame_ret lookupFrame_ret_t;
107 
108 struct findVSpaceForASID_ret {
109     exception_t status;
110     vspace_root_t *vspace_root;
111 };
112 typedef struct findVSpaceForASID_ret findVSpaceForASID_ret_t;
113 
114 /* Stage-1 access permissions:
115  * AP[2:1]  higer EL        EL0
116  *   00       rw            None
117  *   01       rw            rw
118  *   10       r             None
119  *   11       r             r
120  *
121  * Stage-2 access permissions:
122  * S2AP    Access from Nonsecure EL1 or Non-secure EL0
123  *  00                      None
124  *  01                      r
125  *  10                      w
126  *  11                      rw
127  *
128  *  For VMs or native seL4 applications, if hypervisor support
129  *  is enabled, we use the S2AP. The kernel itself running in
130  *  EL2 still uses the Stage-1 AP format.
131  */
132 
APFromVMRights(vm_rights_t vm_rights)133 static word_t CONST APFromVMRights(vm_rights_t vm_rights)
134 {
135     switch (vm_rights) {
136     case VMKernelOnly:
137         if (config_set(CONFIG_ARM_HYPERVISOR_SUPPORT)) {
138             return 0;
139         } else {
140             return 0;
141         }
142 
143     case VMReadWrite:
144         if (config_set(CONFIG_ARM_HYPERVISOR_SUPPORT)) {
145             return 3;
146         } else {
147             return 1;
148         }
149 
150     case VMKernelReadOnly:
151         if (config_set(CONFIG_ARM_HYPERVISOR_SUPPORT)) {
152             /* no corresponding AP for S2AP, return None */
153             return 0;
154         } else {
155             return 2;
156         }
157 
158     case VMReadOnly:
159         if (config_set(CONFIG_ARM_HYPERVISOR_SUPPORT)) {
160             return 1;
161         } else {
162             return 3;
163         }
164 
165     default:
166         fail("Invalid VM rights");
167     }
168 }
169 
maskVMRights(vm_rights_t vm_rights,seL4_CapRights_t cap_rights_mask)170 vm_rights_t CONST maskVMRights(vm_rights_t vm_rights, seL4_CapRights_t cap_rights_mask)
171 {
172     if (vm_rights == VMReadOnly &&
173         seL4_CapRights_get_capAllowRead(cap_rights_mask)) {
174         return VMReadOnly;
175     }
176     if (vm_rights == VMReadWrite &&
177         seL4_CapRights_get_capAllowRead(cap_rights_mask)) {
178         if (!seL4_CapRights_get_capAllowWrite(cap_rights_mask)) {
179             return VMReadOnly;
180         } else {
181             return VMReadWrite;
182         }
183     }
184     if (vm_rights == VMReadWrite &&
185         !seL4_CapRights_get_capAllowRead(cap_rights_mask) &&
186         seL4_CapRights_get_capAllowWrite(cap_rights_mask)) {
187         userError("Attempted to make unsupported write only mapping");
188     }
189     return VMKernelOnly;
190 }
191 
192 /* ==================== BOOT CODE STARTS HERE ==================== */
193 
194 /* The 54th bit is defined as UXN (unprivileged execute-never) for stage 1
195  * of any tranlsation regime for which stage 1 translation can support
196  * two VA ranges. This field applies only to execution at EL0. A value
197  * of 0 indicates that this control permits execution.
198  *
199  * The 54th bit is defined as XN (execute-never) for stage 1 of any translation
200  * regime for which the stage 1 translation can support only a singe VA range or
201  * stage 2 translation when ARMVv8.2-TTS2UXN is not implemented.
202  * This field applies to execution at any exception level to which the stage of
203  * translation applies. A value of 0 indicates that this control permits execution.
204  *
205  * When the kernel is running in EL2, the stage-1 translation only supports one
206  * VA range so that the 54th bit is XN. Setting the bit to 0 allows execution.
207  *
208  */
map_kernel_frame(paddr_t paddr,pptr_t vaddr,vm_rights_t vm_rights,vm_attributes_t attributes)209 BOOT_CODE void map_kernel_frame(paddr_t paddr, pptr_t vaddr, vm_rights_t vm_rights, vm_attributes_t attributes)
210 {
211     assert(vaddr >= PPTR_TOP);
212 
213 #ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
214     word_t uxn = vm_attributes_get_armExecuteNever(attributes);
215 #else
216     word_t uxn = 1; /* unprivileged execute never */
217 #endif /* CONFIG_ARM_HYPERVISOR_SUPPORT */
218     word_t attr_index;
219     word_t shareable;
220     if (vm_attributes_get_armPageCacheable(attributes)) {
221         attr_index = NORMAL;
222         shareable = SMP_TERNARY(SMP_SHARE, 0);
223     } else {
224         attr_index = DEVICE_nGnRnE;
225         shareable = 0;
226     }
227     armKSGlobalKernelPT[GET_PT_INDEX(vaddr)] = pte_new(uxn, paddr,
228                                                        0, /* global */
229                                                        1, /* access flag */
230                                                        shareable,
231                                                        APFromVMRights(vm_rights),
232                                                        attr_index,
233                                                        RESERVED);
234 }
235 
map_kernel_window(void)236 BOOT_CODE void map_kernel_window(void)
237 {
238 
239     paddr_t paddr;
240     pptr_t vaddr;
241     word_t idx;
242 
243 #ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
244     /* verify that the kernel window as at the second entry of the PGD */
245     assert(GET_PGD_INDEX(PPTR_BASE) == 1);
246 #else
247     /* verify that the kernel window as at the last entry of the PGD */
248     assert(GET_PGD_INDEX(PPTR_BASE) == BIT(PGD_INDEX_BITS) - 1);
249 #endif
250     assert(IS_ALIGNED(PPTR_BASE, seL4_LargePageBits));
251     /* verify that the kernel device window is 1gb aligned and 1gb in size */
252     assert(GET_PUD_INDEX(PPTR_TOP) == BIT(PUD_INDEX_BITS) - 1);
253     assert(IS_ALIGNED(PPTR_TOP, seL4_HugePageBits));
254 
255     /* place the PUD into the PGD */
256     armKSGlobalKernelPGD[GET_PGD_INDEX(PPTR_BASE)] = pgde_pgde_pud_new(
257                                                          addrFromKPPtr(armKSGlobalKernelPUD));
258 
259     /* place all PDs except the last one in PUD */
260     for (idx = GET_PUD_INDEX(PPTR_BASE); idx < GET_PUD_INDEX(PPTR_TOP); idx++) {
261         armKSGlobalKernelPUD[idx] = pude_pude_pd_new(
262                                         addrFromKPPtr(&armKSGlobalKernelPDs[idx][0])
263                                     );
264     }
265 
266     /* map the kernel window using large pages */
267     vaddr = PPTR_BASE;
268     for (paddr = PADDR_BASE; paddr < PADDR_TOP; paddr += BIT(seL4_LargePageBits)) {
269         armKSGlobalKernelPDs[GET_PUD_INDEX(vaddr)][GET_PD_INDEX(vaddr)] = pde_pde_large_new(
270 #ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
271                                                                               0, // XN
272 #else
273                                                                               1, // UXN
274 #endif
275                                                                               paddr,
276                                                                               0,                        /* global */
277                                                                               1,                        /* access flag */
278                                                                               SMP_TERNARY(SMP_SHARE, 0),        /* Inner-shareable if SMP enabled, otherwise unshared */
279                                                                               0,                        /* VMKernelOnly */
280                                                                               NORMAL
281                                                                           );
282         vaddr += BIT(seL4_LargePageBits);
283     }
284 
285     /* put the PD into the PUD for device window */
286     armKSGlobalKernelPUD[GET_PUD_INDEX(PPTR_TOP)] = pude_pude_pd_new(
287                                                         addrFromKPPtr(&armKSGlobalKernelPDs[BIT(PUD_INDEX_BITS) - 1][0])
288                                                     );
289 
290     /* put the PT into the PD for device window */
291     armKSGlobalKernelPDs[BIT(PUD_INDEX_BITS) - 1][BIT(PD_INDEX_BITS) - 1] = pde_pde_small_new(
292                                                                                 addrFromKPPtr(armKSGlobalKernelPT)
293                                                                             );
294 
295     map_kernel_devices();
296 }
297 
298 /* When the hypervisor support is enabled, the stage-2 translation table format
299  * is used for applications.
300  * The global bit is always 0.
301  * The memory attributes use the S2 translation values.
302  */
map_it_frame_cap(cap_t vspace_cap,cap_t frame_cap,bool_t executable)303 static BOOT_CODE void map_it_frame_cap(cap_t vspace_cap, cap_t frame_cap, bool_t executable)
304 {
305     vspace_root_t *vspaceRoot = VSPACE_PTR(pptr_of_cap(vspace_cap));
306     pude_t *pud;
307     pde_t *pd;
308     pte_t *pt;
309 
310     vptr_t vptr = cap_frame_cap_get_capFMappedAddress(frame_cap);
311     void *pptr = (void *)cap_frame_cap_get_capFBasePtr(frame_cap);
312 
313     assert(cap_frame_cap_get_capFMappedASID(frame_cap) != 0);
314 
315 #ifdef AARCH64_VSPACE_S2_START_L1
316     pud = vspaceRoot;
317 #else
318     vspaceRoot += GET_PGD_INDEX(vptr);
319     assert(pgde_pgde_pud_ptr_get_present(vspaceRoot));
320     pud = paddr_to_pptr(pgde_pgde_pud_ptr_get_pud_base_address(vspaceRoot));
321 #endif
322     pud += GET_UPUD_INDEX(vptr);
323     assert(pude_pude_pd_ptr_get_present(pud));
324     pd = paddr_to_pptr(pude_pude_pd_ptr_get_pd_base_address(pud));
325     pd += GET_PD_INDEX(vptr);
326     assert(pde_pde_small_ptr_get_present(pd));
327     pt = paddr_to_pptr(pde_pde_small_ptr_get_pt_base_address(pd));
328     *(pt + GET_PT_INDEX(vptr)) = pte_new(
329                                      !executable,                    /* unprivileged execute never */
330                                      pptr_to_paddr(pptr),            /* page_base_address    */
331 #ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
332                                      0,
333 #else
334                                      1,                              /* not global */
335 #endif
336                                      1,                              /* access flag */
337                                      SMP_TERNARY(SMP_SHARE, 0),              /* Inner-shareable if SMP enabled, otherwise unshared */
338                                      APFromVMRights(VMReadWrite),
339 #ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
340                                      S2_NORMAL,
341 #else
342                                      NORMAL,
343 #endif
344                                      RESERVED
345                                  );
346 }
347 
create_it_frame_cap(pptr_t pptr,vptr_t vptr,asid_t asid,bool_t use_large)348 static BOOT_CODE cap_t create_it_frame_cap(pptr_t pptr, vptr_t vptr, asid_t asid, bool_t use_large)
349 {
350     vm_page_size_t frame_size;
351     if (use_large) {
352         frame_size = ARMLargePage;
353     } else {
354         frame_size = ARMSmallPage;
355     }
356     return
357         cap_frame_cap_new(
358             asid,                          /* capFMappedASID */
359             pptr,                          /* capFBasePtr */
360             frame_size,                    /* capFSize */
361             vptr,                          /* capFMappedAddress */
362             wordFromVMRights(VMReadWrite), /* capFVMRights */
363             false                          /* capFIsDevice */
364         );
365 }
366 
map_it_pt_cap(cap_t vspace_cap,cap_t pt_cap)367 static BOOT_CODE void map_it_pt_cap(cap_t vspace_cap, cap_t pt_cap)
368 {
369     vspace_root_t *vspaceRoot = VSPACE_PTR(pptr_of_cap(vspace_cap));
370     pude_t *pud;
371     pde_t *pd;
372     pte_t *pt = PT_PTR(cap_page_table_cap_get_capPTBasePtr(pt_cap));
373     vptr_t vptr = cap_page_table_cap_get_capPTMappedAddress(pt_cap);
374 
375     assert(cap_page_table_cap_get_capPTIsMapped(pt_cap));
376 
377 #ifdef AARCH64_VSPACE_S2_START_L1
378     pud = vspaceRoot;
379 #else
380     vspaceRoot += GET_PGD_INDEX(vptr);
381     assert(pgde_pgde_pud_ptr_get_present(vspaceRoot));
382     pud = paddr_to_pptr(pgde_pgde_pud_ptr_get_pud_base_address(vspaceRoot));
383 #endif
384     pud += GET_UPUD_INDEX(vptr);
385     assert(pude_pude_pd_ptr_get_present(pud));
386     pd = paddr_to_pptr(pude_pude_pd_ptr_get_pd_base_address(pud));
387     *(pd + GET_PD_INDEX(vptr)) = pde_pde_small_new(
388                                      pptr_to_paddr(pt)
389                                  );
390 }
391 
create_it_pt_cap(cap_t vspace_cap,pptr_t pptr,vptr_t vptr,asid_t asid)392 static BOOT_CODE cap_t create_it_pt_cap(cap_t vspace_cap, pptr_t pptr, vptr_t vptr, asid_t asid)
393 {
394     cap_t cap;
395     cap = cap_page_table_cap_new(
396               asid,                   /* capPTMappedASID */
397               pptr,                   /* capPTBasePtr */
398               1,                      /* capPTIsMapped */
399               vptr                    /* capPTMappedAddress */
400           );
401     map_it_pt_cap(vspace_cap, cap);
402     return cap;
403 }
404 
map_it_pd_cap(cap_t vspace_cap,cap_t pd_cap)405 static BOOT_CODE void map_it_pd_cap(cap_t vspace_cap, cap_t pd_cap)
406 {
407     vspace_root_t *vspaceRoot = VSPACE_PTR(pptr_of_cap(vspace_cap));
408     pude_t *pud;
409     pde_t *pd = PD_PTR(cap_page_directory_cap_get_capPDBasePtr(pd_cap));
410     vptr_t vptr = cap_page_directory_cap_get_capPDMappedAddress(pd_cap);
411 
412     assert(cap_page_directory_cap_get_capPDIsMapped(pd_cap));
413 
414 #ifdef AARCH64_VSPACE_S2_START_L1
415     pud = vspaceRoot;
416 #else
417     vspaceRoot += GET_PGD_INDEX(vptr);
418     assert(pgde_pgde_pud_ptr_get_present(vspaceRoot));
419     pud = paddr_to_pptr(pgde_pgde_pud_ptr_get_pud_base_address(vspaceRoot));
420 #endif
421     *(pud + GET_UPUD_INDEX(vptr)) = pude_pude_pd_new(
422                                         pptr_to_paddr(pd)
423                                     );
424 }
425 
create_it_pd_cap(cap_t vspace_cap,pptr_t pptr,vptr_t vptr,asid_t asid)426 static BOOT_CODE cap_t create_it_pd_cap(cap_t vspace_cap, pptr_t pptr, vptr_t vptr, asid_t asid)
427 {
428     cap_t cap;
429     cap = cap_page_directory_cap_new(
430               asid,                   /* capPDMappedASID */
431               pptr,                   /* capPDBasePtr */
432               1,                      /* capPDIsMapped */
433               vptr                    /* capPDMappedAddress */
434           );
435     map_it_pd_cap(vspace_cap, cap);
436     return cap;
437 }
438 
439 #ifndef AARCH64_VSPACE_S2_START_L1
map_it_pud_cap(cap_t vspace_cap,cap_t pud_cap)440 static BOOT_CODE void map_it_pud_cap(cap_t vspace_cap, cap_t pud_cap)
441 {
442     pgde_t *pgd = PGD_PTR(pptr_of_cap(vspace_cap));
443     pude_t *pud = PUD_PTR(cap_page_upper_directory_cap_get_capPUDBasePtr(pud_cap));
444     vptr_t vptr = cap_page_upper_directory_cap_get_capPUDMappedAddress(pud_cap);
445 
446     assert(cap_page_upper_directory_cap_get_capPUDIsMapped(pud_cap));
447 
448     *(pgd + GET_PGD_INDEX(vptr)) = pgde_pgde_pud_new(
449                                        pptr_to_paddr(pud));
450 }
451 
create_it_pud_cap(cap_t vspace_cap,pptr_t pptr,vptr_t vptr,asid_t asid)452 static BOOT_CODE cap_t create_it_pud_cap(cap_t vspace_cap, pptr_t pptr, vptr_t vptr, asid_t asid)
453 {
454     cap_t cap;
455     cap = cap_page_upper_directory_cap_new(
456               asid,               /* capPUDMappedASID */
457               pptr,               /* capPUDBasePtr */
458               1,                  /* capPUDIsMapped */
459               vptr                /* capPUDMappedAddress */
460           );
461     map_it_pud_cap(vspace_cap, cap);
462     return cap;
463 }
464 #endif /* AARCH64_VSPACE_S2_START_L1 */
arch_get_n_paging(v_region_t it_v_reg)465 BOOT_CODE word_t arch_get_n_paging(v_region_t it_v_reg)
466 {
467     return
468 #ifndef AARCH64_VSPACE_S2_START_L1
469         get_n_paging(it_v_reg, PGD_INDEX_OFFSET) +
470 #endif
471         get_n_paging(it_v_reg, PUD_INDEX_OFFSET) +
472         get_n_paging(it_v_reg, PD_INDEX_OFFSET);
473 }
474 
create_it_address_space(cap_t root_cnode_cap,v_region_t it_v_reg)475 BOOT_CODE cap_t create_it_address_space(cap_t root_cnode_cap, v_region_t it_v_reg)
476 {
477     cap_t      vspace_cap;
478     vptr_t     vptr;
479     seL4_SlotPos slot_pos_before;
480     seL4_SlotPos slot_pos_after;
481 
482     /* create the PGD */
483     vspace_cap = cap_vtable_cap_new(
484                      IT_ASID,        /* capPGDMappedASID */
485                      rootserver.vspace, /* capPGDBasePtr   */
486                      1               /* capPGDIsMapped   */
487                  );
488     slot_pos_before = ndks_boot.slot_pos_cur;
489     write_slot(SLOT_PTR(pptr_of_cap(root_cnode_cap), seL4_CapInitThreadVSpace), vspace_cap);
490 
491 #ifndef AARCH64_VSPACE_S2_START_L1
492     /* Create any PUDs needed for the user land image */
493     for (vptr = ROUND_DOWN(it_v_reg.start, PGD_INDEX_OFFSET);
494          vptr < it_v_reg.end;
495          vptr += BIT(PGD_INDEX_OFFSET)) {
496         if (!provide_cap(root_cnode_cap, create_it_pud_cap(vspace_cap, it_alloc_paging(), vptr, IT_ASID))) {
497             return cap_null_cap_new();
498         }
499     }
500 #endif
501     /* Create any PDs needed for the user land image */
502     for (vptr = ROUND_DOWN(it_v_reg.start, PUD_INDEX_OFFSET);
503          vptr < it_v_reg.end;
504          vptr += BIT(PUD_INDEX_OFFSET)) {
505         if (!provide_cap(root_cnode_cap, create_it_pd_cap(vspace_cap, it_alloc_paging(), vptr, IT_ASID))) {
506             return cap_null_cap_new();
507         }
508     }
509 
510     /* Create any PTs needed for the user land image */
511     for (vptr = ROUND_DOWN(it_v_reg.start, PD_INDEX_OFFSET);
512          vptr < it_v_reg.end;
513          vptr += BIT(PD_INDEX_OFFSET)) {
514         if (!provide_cap(root_cnode_cap, create_it_pt_cap(vspace_cap, it_alloc_paging(), vptr, IT_ASID))) {
515             return cap_null_cap_new();
516         }
517     }
518 
519     slot_pos_after = ndks_boot.slot_pos_cur;
520     ndks_boot.bi_frame->userImagePaging = (seL4_SlotRegion) {
521         slot_pos_before, slot_pos_after
522     };
523     return vspace_cap;
524 }
525 
create_unmapped_it_frame_cap(pptr_t pptr,bool_t use_large)526 BOOT_CODE cap_t create_unmapped_it_frame_cap(pptr_t pptr, bool_t use_large)
527 {
528     return create_it_frame_cap(pptr, 0, asidInvalid, use_large);
529 }
530 
create_mapped_it_frame_cap(cap_t pd_cap,pptr_t pptr,vptr_t vptr,asid_t asid,bool_t use_large,bool_t executable)531 BOOT_CODE cap_t create_mapped_it_frame_cap(cap_t pd_cap, pptr_t pptr, vptr_t vptr, asid_t asid, bool_t use_large,
532                                            bool_t executable)
533 {
534     cap_t cap = create_it_frame_cap(pptr, vptr, asid, use_large);
535     map_it_frame_cap(pd_cap, cap, executable);
536     return cap;
537 }
538 
activate_kernel_vspace(void)539 BOOT_CODE void activate_kernel_vspace(void)
540 {
541     cleanInvalidateL1Caches();
542     setCurrentKernelVSpaceRoot(ttbr_new(0, addrFromKPPtr(armKSGlobalKernelPGD)));
543 
544     /* Prevent elf-loader address translation to fill up TLB */
545     setCurrentUserVSpaceRoot(ttbr_new(0, addrFromKPPtr(armKSGlobalUserVSpace)));
546 
547     invalidateLocalTLB();
548     lockTLBEntry(KERNEL_ELF_BASE);
549 }
550 
write_it_asid_pool(cap_t it_ap_cap,cap_t it_vspace_cap)551 BOOT_CODE void write_it_asid_pool(cap_t it_ap_cap, cap_t it_vspace_cap)
552 {
553     asid_pool_t *ap = ASID_POOL_PTR(pptr_of_cap(it_ap_cap));
554     ap->array[IT_ASID] = (void *)(pptr_of_cap(it_vspace_cap));
555     armKSASIDTable[IT_ASID >> asidLowBits] = ap;
556 #ifdef CONFIG_ARM_SMMU
557     vspace_root_t *vtable = ap->array[IT_ASID];
558     vtable[VTABLE_SMMU_SLOT] = vtable_invalid_smmu_new(0);
559 #endif
560 }
561 
562 /* ==================== BOOT CODE FINISHES HERE ==================== */
563 
findVSpaceForASID(asid_t asid)564 static findVSpaceForASID_ret_t findVSpaceForASID(asid_t asid)
565 {
566     findVSpaceForASID_ret_t ret;
567     asid_pool_t *poolPtr;
568     vspace_root_t *vspace_root;
569 
570     poolPtr = armKSASIDTable[asid >> asidLowBits];
571     if (!poolPtr) {
572         current_lookup_fault = lookup_fault_invalid_root_new();
573 
574         ret.vspace_root = NULL;
575         ret.status = EXCEPTION_LOOKUP_FAULT;
576         return ret;
577     }
578 
579     vspace_root = poolPtr->array[asid & MASK(asidLowBits)];
580     if (!vspace_root) {
581         current_lookup_fault = lookup_fault_invalid_root_new();
582 
583         ret.vspace_root = NULL;
584         ret.status = EXCEPTION_LOOKUP_FAULT;
585         return ret;
586     }
587 
588     ret.vspace_root = vspace_root;
589     ret.status = EXCEPTION_NONE;
590     return ret;
591 }
592 
lookupIPCBuffer(bool_t isReceiver,tcb_t * thread)593 word_t *PURE lookupIPCBuffer(bool_t isReceiver, tcb_t *thread)
594 {
595     word_t w_bufferPtr;
596     cap_t bufferCap;
597     vm_rights_t vm_rights;
598 
599     w_bufferPtr = thread->tcbIPCBuffer;
600     bufferCap = TCB_PTR_CTE_PTR(thread, tcbBuffer)->cap;
601 
602     if (unlikely(cap_get_capType(bufferCap) != cap_frame_cap)) {
603         return NULL;
604     }
605     if (unlikely(cap_frame_cap_get_capFIsDevice(bufferCap))) {
606         return NULL;
607     }
608 
609     vm_rights = cap_frame_cap_get_capFVMRights(bufferCap);
610     if (likely(vm_rights == VMReadWrite ||
611                (!isReceiver && vm_rights == VMReadOnly))) {
612         word_t basePtr;
613         unsigned int pageBits;
614 
615         basePtr = cap_frame_cap_get_capFBasePtr(bufferCap);
616         pageBits = pageBitsForSize(cap_frame_cap_get_capFSize(bufferCap));
617         return (word_t *)(basePtr + (w_bufferPtr & MASK(pageBits)));
618     } else {
619         return NULL;
620     }
621 }
622 
checkValidIPCBuffer(vptr_t vptr,cap_t cap)623 exception_t checkValidIPCBuffer(vptr_t vptr, cap_t cap)
624 {
625     if (cap_get_capType(cap) != cap_frame_cap) {
626         userError("IPC Buffer is an invalid cap.");
627         current_syscall_error.type = seL4_IllegalOperation;
628         return EXCEPTION_SYSCALL_ERROR;
629     }
630 
631     if (unlikely(cap_frame_cap_get_capFIsDevice(cap))) {
632         userError("Specifying a device frame as an IPC buffer is not permitted.");
633         current_syscall_error.type = seL4_IllegalOperation;
634         return EXCEPTION_SYSCALL_ERROR;
635     }
636 
637     if (!IS_ALIGNED(vptr, seL4_IPCBufferSizeBits)) {
638         userError("IPC Buffer vaddr 0x%x is not aligned.", (int)vptr);
639         current_syscall_error.type = seL4_AlignmentError;
640         return EXCEPTION_SYSCALL_ERROR;
641     }
642 
643     return EXCEPTION_NONE;
644 }
645 
lookupPGDSlot(vspace_root_t * vspace,vptr_t vptr)646 static lookupPGDSlot_ret_t lookupPGDSlot(vspace_root_t *vspace, vptr_t vptr)
647 {
648     lookupPGDSlot_ret_t ret;
649 
650     pgde_t *pgd = PGDE_PTR(vspace);
651     word_t pgdIndex = GET_PGD_INDEX(vptr);
652     ret.status = EXCEPTION_NONE;
653     ret.pgdSlot = pgd + pgdIndex;
654     return ret;
655 }
656 
lookupPUDSlot(vspace_root_t * vspace,vptr_t vptr)657 static lookupPUDSlot_ret_t lookupPUDSlot(vspace_root_t *vspace, vptr_t vptr)
658 {
659     lookupPUDSlot_ret_t ret;
660 
661 #ifdef AARCH64_VSPACE_S2_START_L1
662     pude_t *pud = PUDE_PTR(vspace);
663     word_t pudIndex = GET_UPUD_INDEX(vptr);
664     ret.status = EXCEPTION_NONE;
665     ret.pudSlot = pud + pudIndex;
666     return ret;
667 #else
668     lookupPGDSlot_ret_t pgdSlot = lookupPGDSlot(vspace, vptr);
669 
670     if (!pgde_pgde_pud_ptr_get_present(pgdSlot.pgdSlot)) {
671         current_lookup_fault = lookup_fault_missing_capability_new(PGD_INDEX_OFFSET);
672 
673         ret.pudSlot = NULL;
674         ret.status = EXCEPTION_LOOKUP_FAULT;
675         return ret;
676     } else {
677         pude_t *pud;
678         pude_t *pudSlot;
679         word_t pudIndex = GET_UPUD_INDEX(vptr);
680         pud = paddr_to_pptr(pgde_pgde_pud_ptr_get_pud_base_address(pgdSlot.pgdSlot));
681         pudSlot = pud + pudIndex;
682 
683         ret.status = EXCEPTION_NONE;
684         ret.pudSlot = pudSlot;
685         return ret;
686     }
687 #endif
688 }
689 
lookupPDSlot(vspace_root_t * vspace,vptr_t vptr)690 static lookupPDSlot_ret_t lookupPDSlot(vspace_root_t *vspace, vptr_t vptr)
691 {
692     lookupPUDSlot_ret_t pudSlot;
693     lookupPDSlot_ret_t ret;
694 
695     pudSlot = lookupPUDSlot(vspace, vptr);
696     if (pudSlot.status != EXCEPTION_NONE) {
697         ret.pdSlot = NULL;
698         ret.status = pudSlot.status;
699         return ret;
700     }
701     if (!pude_pude_pd_ptr_get_present(pudSlot.pudSlot)) {
702         current_lookup_fault = lookup_fault_missing_capability_new(PUD_INDEX_OFFSET);
703 
704         ret.pdSlot = NULL;
705         ret.status = EXCEPTION_LOOKUP_FAULT;
706         return ret;
707     } else {
708         pde_t *pd;
709         pde_t *pdSlot;
710         word_t pdIndex = GET_PD_INDEX(vptr);
711         pd = paddr_to_pptr(pude_pude_pd_ptr_get_pd_base_address(pudSlot.pudSlot));
712         pdSlot = pd + pdIndex;
713 
714         ret.status = EXCEPTION_NONE;
715         ret.pdSlot = pdSlot;
716         return ret;
717     }
718 }
719 
lookupPTSlot(vspace_root_t * vspace,vptr_t vptr)720 static lookupPTSlot_ret_t lookupPTSlot(vspace_root_t *vspace, vptr_t vptr)
721 {
722     lookupPTSlot_ret_t ret;
723     lookupPDSlot_ret_t pdSlot;
724 
725     pdSlot = lookupPDSlot(vspace, vptr);
726     if (pdSlot.status != EXCEPTION_NONE) {
727         ret.ptSlot = NULL;
728         ret.status = pdSlot.status;
729         return ret;
730     }
731     if (!pde_pde_small_ptr_get_present(pdSlot.pdSlot)) {
732         current_lookup_fault = lookup_fault_missing_capability_new(PD_INDEX_OFFSET);
733 
734         ret.ptSlot = NULL;
735         ret.status = EXCEPTION_LOOKUP_FAULT;
736         return ret;
737     } else {
738         pte_t *pt;
739         pte_t *ptSlot;
740         word_t ptIndex = GET_PT_INDEX(vptr);
741         pt = paddr_to_pptr(pde_pde_small_ptr_get_pt_base_address(pdSlot.pdSlot));
742         ptSlot = pt + ptIndex;
743 
744         ret.ptSlot = ptSlot;
745         ret.status = EXCEPTION_NONE;
746         return ret;
747     }
748 }
749 
lookupFrame(vspace_root_t * vspace,vptr_t vptr)750 static lookupFrame_ret_t lookupFrame(vspace_root_t *vspace, vptr_t vptr)
751 {
752     lookupPUDSlot_ret_t pudSlot;
753     lookupFrame_ret_t ret;
754 
755     pudSlot = lookupPUDSlot(vspace, vptr);
756     if (pudSlot.status != EXCEPTION_NONE) {
757         ret.valid = false;
758         return ret;
759     }
760 
761     switch (pude_ptr_get_pude_type(pudSlot.pudSlot)) {
762     case pude_pude_1g:
763         ret.frameBase = pude_pude_1g_ptr_get_page_base_address(pudSlot.pudSlot);
764         ret.frameSize = ARMHugePage;
765         ret.valid = true;
766         return ret;
767 
768     case pude_pude_pd: {
769         pde_t *pd = paddr_to_pptr(pude_pude_pd_ptr_get_pd_base_address(pudSlot.pudSlot));
770         pde_t *pdSlot = pd + GET_PD_INDEX(vptr);
771 
772         if (pde_ptr_get_pde_type(pdSlot) == pde_pde_large) {
773             ret.frameBase = pde_pde_large_ptr_get_page_base_address(pdSlot);
774             ret.frameSize = ARMLargePage;
775             ret.valid = true;
776             return ret;
777         }
778 
779         if (pde_ptr_get_pde_type(pdSlot) == pde_pde_small) {
780             pte_t *pt = paddr_to_pptr(pde_pde_small_ptr_get_pt_base_address(pdSlot));
781             pte_t *ptSlot = pt + GET_PT_INDEX(vptr);
782 
783             if (pte_ptr_get_present(ptSlot)) {
784                 ret.frameBase = pte_ptr_get_page_base_address(ptSlot);
785                 ret.frameSize = ARMSmallPage;
786                 ret.valid = true;
787                 return ret;
788             }
789         }
790     }
791     }
792 
793     ret.valid = false;
794     return ret;
795 }
796 
797 /* Note that if the hypervisor support is enabled, the user page tables use
798  * stage-2 translation format. Otherwise, they follow the stage-1 translation format.
799  */
makeUser3rdLevel(paddr_t paddr,vm_rights_t vm_rights,vm_attributes_t attributes)800 static pte_t makeUser3rdLevel(paddr_t paddr, vm_rights_t vm_rights, vm_attributes_t attributes)
801 {
802     bool_t nonexecutable = vm_attributes_get_armExecuteNever(attributes);
803 
804     if (vm_attributes_get_armPageCacheable(attributes)) {
805         return pte_new(
806                    nonexecutable,              /* unprivileged execute never */
807                    paddr,
808 #ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
809                    0,
810 #else
811                    1,                          /* not global */
812 #endif
813                    1,                          /* access flag */
814                    SMP_TERNARY(SMP_SHARE, 0),          /* Inner-shareable if SMP enabled, otherwise unshared */
815                    APFromVMRights(vm_rights),
816 #ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
817                    S2_NORMAL,
818 #else
819                    NORMAL,
820 #endif
821                    RESERVED
822                );
823     } else {
824         return pte_new(
825                    nonexecutable,              /* unprivileged execute never */
826                    paddr,
827 #ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
828                    0,
829 #else
830                    1,                          /* not global */
831 #endif
832                    1,                          /* access flag */
833                    0,                          /* Ignored - Outter shareable */
834                    APFromVMRights(vm_rights),
835 #ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
836                    S2_DEVICE_nGnRnE,
837 #else
838                    DEVICE_nGnRnE,
839 #endif
840 
841                    RESERVED
842                );
843     }
844 }
845 
makeUser2ndLevel(paddr_t paddr,vm_rights_t vm_rights,vm_attributes_t attributes)846 static pde_t makeUser2ndLevel(paddr_t paddr, vm_rights_t vm_rights, vm_attributes_t attributes)
847 {
848     bool_t nonexecutable = vm_attributes_get_armExecuteNever(attributes);
849 
850     if (vm_attributes_get_armPageCacheable(attributes)) {
851         return pde_pde_large_new(
852                    nonexecutable,              /* unprivileged execute never */
853                    paddr,
854 #ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
855                    0,
856 #else
857                    1,                          /* not global */
858 #endif
859                    1,                          /* access flag */
860                    SMP_TERNARY(SMP_SHARE, 0),          /* Inner-shareable if SMP enabled, otherwise unshared */
861                    APFromVMRights(vm_rights),
862 #ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
863                    S2_NORMAL
864 #else
865                    NORMAL
866 #endif
867                );
868     } else {
869         return pde_pde_large_new(
870                    nonexecutable,              /* unprivileged execute never */
871                    paddr,
872 #ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
873                    0,
874 #else
875                    1,                          /* not global */
876 #endif
877                    1,                          /* access flag */
878                    0,                          /* Ignored - Outter shareable */
879                    APFromVMRights(vm_rights),
880 #ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
881                    S2_DEVICE_nGnRnE
882 #else
883                    DEVICE_nGnRnE
884 #endif
885                );
886     }
887 }
888 
makeUser1stLevel(paddr_t paddr,vm_rights_t vm_rights,vm_attributes_t attributes)889 static pude_t makeUser1stLevel(paddr_t paddr, vm_rights_t vm_rights, vm_attributes_t attributes)
890 {
891     bool_t nonexecutable = vm_attributes_get_armExecuteNever(attributes);
892 
893     if (vm_attributes_get_armPageCacheable(attributes)) {
894         return pude_pude_1g_new(
895                    nonexecutable,              /* unprivileged execute never */
896                    paddr,
897 #ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
898                    0,
899 #else
900                    1,                          /* not global */
901 #endif
902                    1,                          /* access flag */
903                    SMP_TERNARY(SMP_SHARE, 0),          /* Inner-shareable if SMP enabled, otherwise unshared */
904                    APFromVMRights(vm_rights),
905 #ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
906                    S2_NORMAL
907 #else
908                    NORMAL
909 #endif
910                );
911     } else {
912         return pude_pude_1g_new(
913                    nonexecutable,              /* unprivileged execute never */
914                    paddr,
915 #ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
916                    0,
917 #else
918                    1,                          /* not global */
919 #endif
920                    1,                          /* access flag */
921                    0,                          /* Ignored - Outter shareable */
922                    APFromVMRights(vm_rights),
923 #ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
924                    S2_DEVICE_nGnRnE
925 #else
926                    DEVICE_nGnRnE
927 #endif
928                );
929     }
930 }
931 
932 #define PAR_EL1_MASK 0x0000fffffffff000ul
933 #define GET_PAR_ADDR(x) ((x) & PAR_EL1_MASK)
handleVMFault(tcb_t * thread,vm_fault_type_t vm_faultType)934 exception_t handleVMFault(tcb_t *thread, vm_fault_type_t vm_faultType)
935 {
936     switch (vm_faultType) {
937     case ARMDataAbort: {
938         word_t addr, fault;
939 
940         addr = getFAR();
941         fault = getDFSR();
942 
943 #ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
944         /* use the IPA */
945         if (ARCH_NODE_STATE(armHSVCPUActive)) {
946             addr = GET_PAR_ADDR(ats1e1r(addr)) | (addr & MASK(PAGE_BITS));
947         }
948 #endif
949         current_fault = seL4_Fault_VMFault_new(addr, fault, false);
950         return EXCEPTION_FAULT;
951     }
952 
953     case ARMPrefetchAbort: {
954         word_t pc, fault;
955 
956         pc = getRestartPC(thread);
957         fault = getIFSR();
958 
959 #ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
960         if (ARCH_NODE_STATE(armHSVCPUActive)) {
961             pc = GET_PAR_ADDR(ats1e1r(pc)) | (pc & MASK(PAGE_BITS));
962         }
963 #endif
964         current_fault = seL4_Fault_VMFault_new(pc, fault, true);
965         return EXCEPTION_FAULT;
966     }
967 
968     default:
969         fail("Invalid VM fault type");
970     }
971 }
972 
isVTableRoot(cap_t cap)973 bool_t CONST isVTableRoot(cap_t cap)
974 {
975     return cap_get_capType(cap) == cap_vtable_root_cap;
976 }
977 
isValidNativeRoot(cap_t cap)978 bool_t CONST isValidNativeRoot(cap_t cap)
979 {
980     return isVTableRoot(cap) &&
981            cap_vtable_root_isMapped(cap);
982 }
983 
isValidVTableRoot(cap_t cap)984 bool_t CONST isValidVTableRoot(cap_t cap)
985 {
986     return isValidNativeRoot(cap);
987 }
988 
setVMRoot(tcb_t * tcb)989 void setVMRoot(tcb_t *tcb)
990 {
991     cap_t threadRoot;
992     asid_t asid;
993     vspace_root_t *vspaceRoot;
994     findVSpaceForASID_ret_t find_ret;
995 
996     threadRoot = TCB_PTR_CTE_PTR(tcb, tcbVTable)->cap;
997 
998     if (!isValidNativeRoot(threadRoot)) {
999         setCurrentUserVSpaceRoot(ttbr_new(0, addrFromKPPtr(armKSGlobalUserVSpace)));
1000         return;
1001     }
1002 
1003     vspaceRoot = VSPACE_PTR(cap_vtable_root_get_basePtr(threadRoot));
1004     asid = cap_vtable_root_get_mappedASID(threadRoot);
1005     find_ret = findVSpaceForASID(asid);
1006     if (unlikely(find_ret.status != EXCEPTION_NONE || find_ret.vspace_root != vspaceRoot)) {
1007         setCurrentUserVSpaceRoot(ttbr_new(0, addrFromKPPtr(armKSGlobalUserVSpace)));
1008         return;
1009     }
1010 
1011     armv_contextSwitch(vspaceRoot, asid);
1012 }
1013 
setVMRootForFlush(vspace_root_t * vspace,asid_t asid)1014 static bool_t setVMRootForFlush(vspace_root_t *vspace, asid_t asid)
1015 {
1016     cap_t threadRoot;
1017 
1018     threadRoot = TCB_PTR_CTE_PTR(NODE_STATE(ksCurThread), tcbVTable)->cap;
1019 
1020     if (cap_get_capType(threadRoot) == cap_vtable_root_cap &&
1021         cap_vtable_root_isMapped(threadRoot) &&
1022         cap_vtable_root_get_basePtr(threadRoot) == vspace) {
1023         return false;
1024     }
1025 
1026     armv_contextSwitch(vspace, asid);
1027     return true;
1028 }
1029 
pageUpperDirectoryMapped(asid_t asid,vptr_t vaddr,pude_t * pud)1030 pgde_t *pageUpperDirectoryMapped(asid_t asid, vptr_t vaddr, pude_t *pud)
1031 {
1032     findVSpaceForASID_ret_t find_ret;
1033     lookupPGDSlot_ret_t lu_ret;
1034 
1035     find_ret = findVSpaceForASID(asid);
1036     if (find_ret.status != EXCEPTION_NONE) {
1037         return NULL;
1038     }
1039 
1040     lu_ret = lookupPGDSlot(find_ret.vspace_root, vaddr);
1041     if (pgde_pgde_pud_ptr_get_present(lu_ret.pgdSlot) &&
1042         (pgde_pgde_pud_ptr_get_pud_base_address(lu_ret.pgdSlot) == pptr_to_paddr(pud))) {
1043         return lu_ret.pgdSlot;
1044     }
1045 
1046     return NULL;
1047 }
1048 
pageDirectoryMapped(asid_t asid,vptr_t vaddr,pde_t * pd)1049 pude_t *pageDirectoryMapped(asid_t asid, vptr_t vaddr, pde_t *pd)
1050 {
1051     findVSpaceForASID_ret_t find_ret;
1052     lookupPUDSlot_ret_t lu_ret;
1053 
1054     find_ret = findVSpaceForASID(asid);
1055     if (find_ret.status != EXCEPTION_NONE) {
1056         return NULL;
1057     }
1058 
1059     lu_ret = lookupPUDSlot(find_ret.vspace_root, vaddr);
1060     if (lu_ret.status != EXCEPTION_NONE) {
1061         return NULL;
1062     }
1063 
1064     if (pude_pude_pd_ptr_get_present(lu_ret.pudSlot) &&
1065         (pude_pude_pd_ptr_get_pd_base_address(lu_ret.pudSlot) == pptr_to_paddr(pd))) {
1066         return lu_ret.pudSlot;
1067     }
1068 
1069     return NULL;
1070 }
1071 
1072 #ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
1073 
invalidateASID(asid_t asid)1074 static void invalidateASID(asid_t asid)
1075 {
1076     asid_pool_t *asidPool;
1077 
1078     asidPool = armKSASIDTable[asid >> asidLowBits];
1079     assert(asidPool);
1080 
1081     vspace_root_t *vtable = asidPool->array[asid & MASK(asidLowBits)];
1082     assert(vtable);
1083 
1084     vtable[VTABLE_VMID_SLOT] = vtable_invalid_new(0, false);
1085 }
1086 
loadHWASID(asid_t asid)1087 static vspace_root_t PURE loadHWASID(asid_t asid)
1088 {
1089     asid_pool_t *asidPool;
1090 
1091     asidPool = armKSASIDTable[asid >> asidLowBits];
1092     assert(asidPool);
1093 
1094     vspace_root_t *vtable = asidPool->array[asid & MASK(asidLowBits)];
1095     assert(vtable);
1096 
1097     return vtable[VTABLE_VMID_SLOT];
1098 }
1099 
storeHWASID(asid_t asid,hw_asid_t hw_asid)1100 static void storeHWASID(asid_t asid, hw_asid_t hw_asid)
1101 {
1102     asid_pool_t *asidPool;
1103 
1104     asidPool = armKSASIDTable[asid >> asidLowBits];
1105     assert(asidPool);
1106 
1107     vspace_root_t *vtable = asidPool->array[asid & MASK(asidLowBits)];
1108     assert(vtable);
1109 
1110     /* Store HW VMID in the last entry
1111        Masquerade as an invalid PDGE */
1112     vtable[VTABLE_VMID_SLOT] = vtable_invalid_new(hw_asid, true);
1113 
1114     armKSHWASIDTable[hw_asid] = asid;
1115 }
1116 
findFreeHWASID(void)1117 static hw_asid_t findFreeHWASID(void)
1118 {
1119     word_t hw_asid_offset;
1120     hw_asid_t hw_asid;
1121 
1122     /* Find a free hardware ASID */
1123     for (hw_asid_offset = 0;
1124          hw_asid_offset <= (word_t)((hw_asid_t) - 1);
1125          hw_asid_offset++) {
1126         hw_asid = armKSNextASID + ((hw_asid_t)hw_asid_offset);
1127         if (armKSHWASIDTable[hw_asid] == asidInvalid) {
1128             return hw_asid;
1129         }
1130     }
1131 
1132     hw_asid = armKSNextASID;
1133 
1134     /* If we've scanned the table without finding a free ASID */
1135     invalidateASID(armKSHWASIDTable[hw_asid]);
1136 
1137     /* Flush TLB */
1138     invalidateTranslationASID(hw_asid);
1139     armKSHWASIDTable[hw_asid] = asidInvalid;
1140 
1141     /* Increment the NextASID index */
1142     armKSNextASID++;
1143 
1144     return hw_asid;
1145 }
1146 
getHWASID(asid_t asid)1147 hw_asid_t getHWASID(asid_t asid)
1148 {
1149     vspace_root_t stored_hw_asid;
1150 
1151     stored_hw_asid = loadHWASID(asid);
1152     if (vtable_invalid_get_stored_asid_valid(stored_hw_asid)) {
1153         return vtable_invalid_get_stored_hw_asid(stored_hw_asid);
1154     } else {
1155         hw_asid_t new_hw_asid;
1156 
1157         new_hw_asid = findFreeHWASID();
1158         storeHWASID(asid, new_hw_asid);
1159         return new_hw_asid;
1160     }
1161 }
1162 
invalidateASIDEntry(asid_t asid)1163 static void invalidateASIDEntry(asid_t asid)
1164 {
1165     vspace_root_t stored_hw_asid;
1166 
1167     stored_hw_asid = loadHWASID(asid);
1168     if (vtable_invalid_get_stored_asid_valid(stored_hw_asid)) {
1169         armKSHWASIDTable[vtable_invalid_get_stored_hw_asid(stored_hw_asid)] =
1170             asidInvalid;
1171     }
1172     invalidateASID(asid);
1173 }
1174 
1175 #endif
1176 
1177 #ifdef CONFIG_ARM_SMMU
getASIDBindCB(asid_t asid)1178 static vspace_root_t getASIDBindCB(asid_t asid)
1179 {
1180     asid_pool_t *asidPool;
1181 
1182     asidPool = armKSASIDTable[asid >> asidLowBits];
1183     assert(asidPool);
1184 
1185     vspace_root_t *vtable = asidPool->array[asid & MASK(asidLowBits)];
1186     assert(vtable);
1187 
1188     return vtable[VTABLE_SMMU_SLOT];
1189 }
1190 
increaseASIDBindCB(asid_t asid)1191 void increaseASIDBindCB(asid_t asid)
1192 {
1193     asid_pool_t *asidPool;
1194     vspace_root_t stored_info;
1195 
1196     asidPool = armKSASIDTable[asid >> asidLowBits];
1197     assert(asidPool);
1198 
1199     vspace_root_t *vtable = asidPool->array[asid & MASK(asidLowBits)];
1200     assert(vtable);
1201 
1202     stored_info = vtable[VTABLE_SMMU_SLOT];
1203     vtable[VTABLE_SMMU_SLOT] = vtable_invalid_smmu_new(vtable_invalid_get_bind_cb(stored_info) + 1);
1204 }
1205 
decreaseASIDBindCB(asid_t asid)1206 void decreaseASIDBindCB(asid_t asid)
1207 {
1208     asid_pool_t *asidPool;
1209     vspace_root_t stored_info;
1210 
1211     asidPool = armKSASIDTable[asid >> asidLowBits];
1212     assert(asidPool);
1213 
1214     vspace_root_t *vtable = asidPool->array[asid & MASK(asidLowBits)];
1215     assert(vtable);
1216 
1217     stored_info = vtable[VTABLE_SMMU_SLOT];
1218     vtable[VTABLE_SMMU_SLOT] = vtable_invalid_smmu_new(vtable_invalid_get_bind_cb(stored_info) - 1);
1219 }
1220 #endif
1221 
invalidateTLBByASID(asid_t asid)1222 static inline void invalidateTLBByASID(asid_t asid)
1223 {
1224 #ifdef CONFIG_ARM_SMMU
1225     vspace_root_t bind_cb = getASIDBindCB(asid);
1226     if (unlikely(vtable_invalid_get_bind_cb(bind_cb))) {
1227         invalidateSMMUTLBByASID(asid, vtable_invalid_get_bind_cb(bind_cb));
1228     }
1229 #endif
1230 #ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
1231     vspace_root_t stored_hw_asid;
1232 
1233     stored_hw_asid = loadHWASID(asid);
1234     if (!vtable_invalid_get_stored_asid_valid(stored_hw_asid)) {
1235         return;
1236     }
1237     invalidateTranslationASID(vtable_invalid_get_stored_hw_asid(stored_hw_asid));
1238 #else
1239     invalidateTranslationASID(asid);
1240 #endif
1241 }
1242 
invalidateTLBByASIDVA(asid_t asid,vptr_t vaddr)1243 static inline void invalidateTLBByASIDVA(asid_t asid, vptr_t vaddr)
1244 {
1245 #ifdef CONFIG_ARM_SMMU
1246     vspace_root_t bind_cb = getASIDBindCB(asid);
1247     if (unlikely(vtable_invalid_get_bind_cb(bind_cb))) {
1248         invalidateSMMUTLBByASIDVA(asid, vaddr, vtable_invalid_get_bind_cb(bind_cb));
1249     }
1250 #endif
1251 #ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
1252     vspace_root_t stored_hw_asid;
1253 
1254     stored_hw_asid = loadHWASID(asid);
1255     if (!vtable_invalid_get_stored_asid_valid(stored_hw_asid)) {
1256         return;
1257     }
1258     uint64_t hw_asid = vtable_invalid_get_stored_hw_asid(stored_hw_asid);
1259     invalidateTranslationSingle((hw_asid << 48) | vaddr >> seL4_PageBits);
1260 #else
1261     invalidateTranslationSingle((asid << 48) | vaddr >> seL4_PageBits);
1262 #endif
1263 }
1264 
pageTableMapped(asid_t asid,vptr_t vaddr,pte_t * pt)1265 pde_t *pageTableMapped(asid_t asid, vptr_t vaddr, pte_t *pt)
1266 {
1267     findVSpaceForASID_ret_t find_ret;
1268     lookupPDSlot_ret_t lu_ret;
1269 
1270     find_ret = findVSpaceForASID(asid);
1271     if (find_ret.status != EXCEPTION_NONE) {
1272         return NULL;
1273     }
1274 
1275     lu_ret = lookupPDSlot(find_ret.vspace_root, vaddr);
1276     if (lu_ret.status != EXCEPTION_NONE) {
1277         return NULL;
1278     }
1279 
1280     if (pde_pde_small_ptr_get_present(lu_ret.pdSlot) &&
1281         (pde_pde_small_ptr_get_pt_base_address(lu_ret.pdSlot) == pptr_to_paddr(pt))) {
1282         return lu_ret.pdSlot;
1283     }
1284 
1285     return NULL;
1286 }
1287 
unmapPageUpperDirectory(asid_t asid,vptr_t vaddr,pude_t * pud)1288 void unmapPageUpperDirectory(asid_t asid, vptr_t vaddr, pude_t *pud)
1289 {
1290     pgde_t *pgdSlot;
1291 
1292     pgdSlot = pageUpperDirectoryMapped(asid, vaddr, pud);
1293     if (likely(pgdSlot != NULL)) {
1294 #ifdef CONFIG_ARM_SMMU
1295         *pgdSlot = pgde_pgde_invalid_new(0, false, 0);
1296 #else
1297         *pgdSlot = pgde_pgde_invalid_new(0, false);
1298 #endif
1299         cleanByVA_PoU((vptr_t)pgdSlot, pptr_to_paddr(pgdSlot));
1300         invalidateTLBByASID(asid);
1301     }
1302 }
1303 
unmapPageDirectory(asid_t asid,vptr_t vaddr,pde_t * pd)1304 void unmapPageDirectory(asid_t asid, vptr_t vaddr, pde_t *pd)
1305 {
1306     pude_t *pudSlot;
1307 
1308     pudSlot = pageDirectoryMapped(asid, vaddr, pd);
1309     if (likely(pudSlot != NULL)) {
1310         *pudSlot = pude_invalid_new();
1311 
1312         cleanByVA_PoU((vptr_t)pudSlot, pptr_to_paddr(pudSlot));
1313         invalidateTLBByASID(asid);
1314     }
1315 }
1316 
unmapPageTable(asid_t asid,vptr_t vaddr,pte_t * pt)1317 void unmapPageTable(asid_t asid, vptr_t vaddr, pte_t *pt)
1318 {
1319     pde_t *pdSlot;
1320 
1321     pdSlot = pageTableMapped(asid, vaddr, pt);
1322     if (likely(pdSlot != NULL)) {
1323         *pdSlot = pde_invalid_new();
1324 
1325         cleanByVA_PoU((vptr_t)pdSlot, pptr_to_paddr(pdSlot));
1326         invalidateTLBByASID(asid);
1327     }
1328 }
1329 
unmapPage(vm_page_size_t page_size,asid_t asid,vptr_t vptr,pptr_t pptr)1330 void unmapPage(vm_page_size_t page_size, asid_t asid, vptr_t vptr, pptr_t pptr)
1331 {
1332     paddr_t addr;
1333     findVSpaceForASID_ret_t find_ret;
1334 
1335     addr = pptr_to_paddr((void *)pptr);
1336     find_ret = findVSpaceForASID(asid);
1337     if (unlikely(find_ret.status != EXCEPTION_NONE)) {
1338         return;
1339     }
1340 
1341     switch (page_size) {
1342     case ARMSmallPage: {
1343         lookupPTSlot_ret_t lu_ret;
1344 
1345         lu_ret = lookupPTSlot(find_ret.vspace_root, vptr);
1346         if (unlikely(lu_ret.status != EXCEPTION_NONE)) {
1347             return;
1348         }
1349 
1350         if (pte_ptr_get_present(lu_ret.ptSlot) &&
1351             pte_ptr_get_page_base_address(lu_ret.ptSlot) == addr) {
1352             *(lu_ret.ptSlot) = pte_invalid_new();
1353 
1354             cleanByVA_PoU((vptr_t)lu_ret.ptSlot, pptr_to_paddr(lu_ret.ptSlot));
1355         }
1356         break;
1357     }
1358 
1359     case ARMLargePage: {
1360         lookupPDSlot_ret_t lu_ret;
1361 
1362         lu_ret = lookupPDSlot(find_ret.vspace_root, vptr);
1363         if (unlikely(lu_ret.status != EXCEPTION_NONE)) {
1364             return;
1365         }
1366 
1367         if (pde_pde_large_ptr_get_present(lu_ret.pdSlot) &&
1368             pde_pde_large_ptr_get_page_base_address(lu_ret.pdSlot) == addr) {
1369             *(lu_ret.pdSlot) = pde_invalid_new();
1370 
1371             cleanByVA_PoU((vptr_t)lu_ret.pdSlot, pptr_to_paddr(lu_ret.pdSlot));
1372         }
1373         break;
1374     }
1375 
1376     case ARMHugePage: {
1377         lookupPUDSlot_ret_t lu_ret;
1378 
1379         lu_ret = lookupPUDSlot(find_ret.vspace_root, vptr);
1380         if (unlikely(lu_ret.status != EXCEPTION_NONE)) {
1381             return;
1382         }
1383 
1384         if (pude_pude_1g_ptr_get_present(lu_ret.pudSlot) &&
1385             pude_pude_1g_ptr_get_page_base_address(lu_ret.pudSlot) == addr) {
1386             *(lu_ret.pudSlot) = pude_invalid_new();
1387 
1388             cleanByVA_PoU((vptr_t)lu_ret.pudSlot, pptr_to_paddr(lu_ret.pudSlot));
1389         }
1390         break;
1391     }
1392 
1393     default:
1394         fail("Invalid ARM page type");
1395     }
1396 
1397     assert(asid < BIT(16));
1398     invalidateTLBByASIDVA(asid, vptr);
1399 }
1400 
deleteASID(asid_t asid,vspace_root_t * vspace)1401 void deleteASID(asid_t asid, vspace_root_t *vspace)
1402 {
1403     asid_pool_t *poolPtr;
1404 
1405     poolPtr = armKSASIDTable[asid >> asidLowBits];
1406 
1407     if (poolPtr != NULL && poolPtr->array[asid & MASK(asidLowBits)] == vspace) {
1408         invalidateTLBByASID(asid);
1409 #ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
1410         invalidateASIDEntry(asid);
1411 #endif
1412         poolPtr->array[asid & MASK(asidLowBits)] = NULL;
1413         setVMRoot(NODE_STATE(ksCurThread));
1414     }
1415 }
1416 
deleteASIDPool(asid_t asid_base,asid_pool_t * pool)1417 void deleteASIDPool(asid_t asid_base, asid_pool_t *pool)
1418 {
1419     word_t offset;
1420 
1421     assert((asid_base & MASK(asidLowBits)) == 0);
1422 
1423     if (armKSASIDTable[asid_base >> asidLowBits] == pool) {
1424         for (offset = 0; offset < BIT(asidLowBits); offset++) {
1425             if (pool->array[offset]) {
1426                 invalidateTLBByASID(asid_base + offset);
1427 #ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
1428                 invalidateASIDEntry(asid_base + offset);
1429 #endif
1430             }
1431         }
1432         armKSASIDTable[asid_base >> asidLowBits] = NULL;
1433         setVMRoot(NODE_STATE(ksCurThread));
1434     }
1435 }
1436 
doFlush(int invLabel,vptr_t start,vptr_t end,paddr_t pstart)1437 static void doFlush(int invLabel, vptr_t start, vptr_t end, paddr_t pstart)
1438 {
1439     switch (invLabel) {
1440     case ARMVSpaceClean_Data:
1441     case ARMPageClean_Data:
1442         cleanCacheRange_RAM(start, end, pstart);
1443         break;
1444 
1445     case ARMVSpaceInvalidate_Data:
1446     case ARMPageInvalidate_Data:
1447         invalidateCacheRange_RAM(start, end, pstart);
1448         break;
1449 
1450     case ARMVSpaceCleanInvalidate_Data:
1451     case ARMPageCleanInvalidate_Data:
1452         cleanInvalidateCacheRange_RAM(start, end, pstart);
1453         break;
1454 
1455     case ARMVSpaceUnify_Instruction:
1456     case ARMPageUnify_Instruction:
1457         /* First clean data lines to point of unification... */
1458         cleanCacheRange_PoU(start, end, pstart);
1459         /* Ensure it's been written. */
1460         dsb();
1461         /* ...then invalidate the corresponding instruction lines
1462            to point of unification... */
1463         invalidateCacheRange_I(start, end, pstart);
1464         /* ... and ensure new instructions come from fresh cache lines. */
1465         isb();
1466         break;
1467     default:
1468         fail("Invalid operation, shouldn't get here.\n");
1469     }
1470 }
1471 
1472 /* ================= INVOCATION HANDLING STARTS HERE ================== */
1473 
performVSpaceFlush(int invLabel,vspace_root_t * vspaceRoot,asid_t asid,vptr_t start,vptr_t end,paddr_t pstart)1474 static exception_t performVSpaceFlush(int invLabel, vspace_root_t *vspaceRoot, asid_t asid,
1475                                       vptr_t start, vptr_t end, paddr_t pstart)
1476 {
1477 
1478     if (config_set(CONFIG_ARM_HYPERVISOR_SUPPORT)) {
1479         word_t size = end - start;
1480         start = (vptr_t)paddr_to_pptr(pstart);
1481         end = start + size;
1482         if (start < end) {
1483             doFlush(invLabel, start, end, pstart);
1484         }
1485     } else {
1486         bool_t root_switched;
1487 
1488         /* Flush if given a non zero range */
1489         if (start < end) {
1490             root_switched = setVMRootForFlush(vspaceRoot, asid);
1491             doFlush(invLabel, start, end, pstart);
1492             if (root_switched) {
1493                 setVMRoot(NODE_STATE(ksCurThread));
1494             }
1495         }
1496     }
1497     return EXCEPTION_NONE;
1498 }
1499 
1500 #ifndef AARCH64_VSPACE_S2_START_L1
performUpperPageDirectoryInvocationMap(cap_t cap,cte_t * ctSlot,pgde_t pgde,pgde_t * pgdSlot)1501 static exception_t performUpperPageDirectoryInvocationMap(cap_t cap, cte_t *ctSlot, pgde_t pgde, pgde_t *pgdSlot)
1502 {
1503     ctSlot->cap = cap;
1504     *pgdSlot = pgde;
1505     cleanByVA_PoU((vptr_t)pgdSlot, pptr_to_paddr(pgdSlot));
1506 
1507     return EXCEPTION_NONE;
1508 }
1509 
performUpperPageDirectoryInvocationUnmap(cap_t cap,cte_t * ctSlot)1510 static exception_t performUpperPageDirectoryInvocationUnmap(cap_t cap, cte_t *ctSlot)
1511 {
1512     if (cap_page_upper_directory_cap_get_capPUDIsMapped(cap)) {
1513         pude_t *pud = PUD_PTR(cap_page_upper_directory_cap_get_capPUDBasePtr(cap));
1514         unmapPageUpperDirectory(cap_page_upper_directory_cap_get_capPUDMappedASID(cap),
1515                                 cap_page_upper_directory_cap_get_capPUDMappedAddress(cap), pud);
1516         clearMemory_PT((void *)pud, cap_get_capSizeBits(cap));
1517     }
1518 
1519     cap_page_upper_directory_cap_ptr_set_capPUDIsMapped(&(ctSlot->cap), 0);
1520     return EXCEPTION_NONE;
1521 }
1522 #endif
1523 
performPageDirectoryInvocationMap(cap_t cap,cte_t * ctSlot,pude_t pude,pude_t * pudSlot)1524 static exception_t performPageDirectoryInvocationMap(cap_t cap, cte_t *ctSlot, pude_t pude, pude_t *pudSlot)
1525 {
1526     ctSlot->cap = cap;
1527     *pudSlot = pude;
1528     cleanByVA_PoU((vptr_t)pudSlot, pptr_to_paddr(pudSlot));
1529 
1530     return EXCEPTION_NONE;
1531 }
1532 
performPageDirectoryInvocationUnmap(cap_t cap,cte_t * ctSlot)1533 static exception_t performPageDirectoryInvocationUnmap(cap_t cap, cte_t *ctSlot)
1534 {
1535     if (cap_page_directory_cap_get_capPDIsMapped(cap)) {
1536         pde_t *pd = PD_PTR(cap_page_directory_cap_get_capPDBasePtr(cap));
1537         unmapPageDirectory(cap_page_directory_cap_get_capPDMappedASID(cap),
1538                            cap_page_directory_cap_get_capPDMappedAddress(cap), pd);
1539         clearMemory_PT((void *)pd, cap_get_capSizeBits(cap));
1540     }
1541 
1542     cap_page_directory_cap_ptr_set_capPDIsMapped(&(ctSlot->cap), 0);
1543     return EXCEPTION_NONE;
1544 }
1545 
performPageTableInvocationMap(cap_t cap,cte_t * ctSlot,pde_t pde,pde_t * pdSlot)1546 static exception_t performPageTableInvocationMap(cap_t cap, cte_t *ctSlot, pde_t pde, pde_t *pdSlot)
1547 {
1548     ctSlot->cap = cap;
1549     *pdSlot = pde;
1550     cleanByVA_PoU((vptr_t)pdSlot, pptr_to_paddr(pdSlot));
1551 
1552     return EXCEPTION_NONE;
1553 }
1554 
performPageTableInvocationUnmap(cap_t cap,cte_t * ctSlot)1555 static exception_t performPageTableInvocationUnmap(cap_t cap, cte_t *ctSlot)
1556 {
1557     if (cap_page_table_cap_get_capPTIsMapped(cap)) {
1558         pte_t *pt = PT_PTR(cap_page_table_cap_get_capPTBasePtr(cap));
1559         unmapPageTable(cap_page_table_cap_get_capPTMappedASID(cap),
1560                        cap_page_table_cap_get_capPTMappedAddress(cap), pt);
1561         clearMemory_PT((void *)pt, cap_get_capSizeBits(cap));
1562     }
1563 
1564     cap_page_table_cap_ptr_set_capPTIsMapped(&(ctSlot->cap), 0);
1565     return EXCEPTION_NONE;
1566 }
1567 
performHugePageInvocationMap(asid_t asid,cap_t cap,cte_t * ctSlot,pude_t pude,pude_t * pudSlot)1568 static exception_t performHugePageInvocationMap(asid_t asid, cap_t cap, cte_t *ctSlot,
1569                                                 pude_t pude, pude_t *pudSlot)
1570 {
1571     bool_t tlbflush_required = pude_pude_1g_ptr_get_present(pudSlot);
1572 
1573     ctSlot->cap = cap;
1574     *pudSlot = pude;
1575 
1576     cleanByVA_PoU((vptr_t)pudSlot, pptr_to_paddr(pudSlot));
1577     if (unlikely(tlbflush_required)) {
1578         assert(asid < BIT(16));
1579         invalidateTLBByASIDVA(asid, cap_frame_cap_get_capFMappedAddress(cap));
1580     }
1581 
1582     return EXCEPTION_NONE;
1583 }
1584 
performLargePageInvocationMap(asid_t asid,cap_t cap,cte_t * ctSlot,pde_t pde,pde_t * pdSlot)1585 static exception_t performLargePageInvocationMap(asid_t asid, cap_t cap, cte_t *ctSlot,
1586                                                  pde_t pde, pde_t *pdSlot)
1587 {
1588     bool_t tlbflush_required = pde_pde_large_ptr_get_present(pdSlot);
1589 
1590     ctSlot->cap = cap;
1591     *pdSlot = pde;
1592 
1593     cleanByVA_PoU((vptr_t)pdSlot, pptr_to_paddr(pdSlot));
1594     if (unlikely(tlbflush_required)) {
1595         assert(asid < BIT(16));
1596         invalidateTLBByASIDVA(asid, cap_frame_cap_get_capFMappedAddress(cap));
1597     }
1598 
1599     return EXCEPTION_NONE;
1600 }
1601 
performSmallPageInvocationMap(asid_t asid,cap_t cap,cte_t * ctSlot,pte_t pte,pte_t * ptSlot)1602 static exception_t performSmallPageInvocationMap(asid_t asid, cap_t cap, cte_t *ctSlot,
1603                                                  pte_t pte, pte_t *ptSlot)
1604 {
1605     bool_t tlbflush_required = pte_ptr_get_present(ptSlot);
1606 
1607     ctSlot->cap = cap;
1608     *ptSlot = pte;
1609 
1610     cleanByVA_PoU((vptr_t)ptSlot, pptr_to_paddr(ptSlot));
1611     if (unlikely(tlbflush_required)) {
1612         assert(asid < BIT(16));
1613         invalidateTLBByASIDVA(asid, cap_frame_cap_get_capFMappedAddress(cap));
1614     }
1615 
1616     return EXCEPTION_NONE;
1617 }
1618 
performPageInvocationUnmap(cap_t cap,cte_t * ctSlot)1619 static exception_t performPageInvocationUnmap(cap_t cap, cte_t *ctSlot)
1620 {
1621     if (cap_frame_cap_get_capFMappedASID(cap) != 0) {
1622 
1623         unmapPage(cap_frame_cap_get_capFSize(cap),
1624                   cap_frame_cap_get_capFMappedASID(cap),
1625                   cap_frame_cap_get_capFMappedAddress(cap),
1626                   cap_frame_cap_get_capFBasePtr(cap));
1627     }
1628 
1629     cap_frame_cap_ptr_set_capFMappedASID(&ctSlot->cap, asidInvalid);
1630     cap_frame_cap_ptr_set_capFMappedAddress(&ctSlot->cap, 0);
1631     return EXCEPTION_NONE;
1632 }
1633 
performPageFlush(int invLabel,vspace_root_t * vspaceRoot,asid_t asid,vptr_t start,vptr_t end,paddr_t pstart)1634 static exception_t performPageFlush(int invLabel, vspace_root_t *vspaceRoot, asid_t asid,
1635                                     vptr_t start, vptr_t end, paddr_t pstart)
1636 {
1637     if (config_set(CONFIG_ARM_HYPERVISOR_SUPPORT)) {
1638         /* We flush the cache with kernel virtual addresses since
1639          * the user virtual addresses are not valid in EL2.
1640          * Switching VMRoot is not required.
1641          */
1642         word_t size = end - start;
1643         start = (vptr_t)paddr_to_pptr(pstart);
1644         end = start + size;
1645 
1646         if (start < end) {
1647             doFlush(invLabel, start, end, pstart);
1648         }
1649     } else {
1650         bool_t root_switched;
1651 
1652         if (start < end) {
1653             root_switched = setVMRootForFlush(vspaceRoot, asid);
1654             doFlush(invLabel, start, end, pstart);
1655             if (root_switched) {
1656                 setVMRoot(NODE_STATE(ksCurThread));
1657             }
1658         }
1659     }
1660     return EXCEPTION_NONE;
1661 }
1662 
performPageGetAddress(pptr_t base_ptr)1663 static exception_t performPageGetAddress(pptr_t base_ptr)
1664 {
1665     paddr_t base = pptr_to_paddr((void *)base_ptr);
1666 
1667     setRegister(NODE_STATE(ksCurThread), msgRegisters[0], base);
1668     setRegister(NODE_STATE(ksCurThread), msgInfoRegister,
1669                 wordFromMessageInfo(seL4_MessageInfo_new(0, 0, 0, 1)));
1670 
1671     return EXCEPTION_NONE;
1672 }
1673 
performASIDControlInvocation(void * frame,cte_t * slot,cte_t * parent,asid_t asid_base)1674 static exception_t performASIDControlInvocation(void *frame, cte_t *slot,
1675                                                 cte_t *parent, asid_t asid_base)
1676 {
1677     cap_untyped_cap_ptr_set_capFreeIndex(&(parent->cap),
1678                                          MAX_FREE_INDEX(cap_untyped_cap_get_capBlockSize(parent->cap)));
1679 
1680     memzero(frame, BIT(seL4_ASIDPoolBits));
1681 
1682     cteInsert(
1683         cap_asid_pool_cap_new(
1684             asid_base,         /* capASIDBase  */
1685             WORD_REF(frame)    /* capASIDPool  */
1686         ), parent, slot);
1687 
1688     assert((asid_base & MASK(asidLowBits)) == 0);
1689     armKSASIDTable[asid_base >> asidLowBits] = (asid_pool_t *)frame;
1690 
1691     return EXCEPTION_NONE;
1692 }
1693 
decodeARMVSpaceRootInvocation(word_t invLabel,unsigned int length,cte_t * cte,cap_t cap,word_t * buffer)1694 static exception_t decodeARMVSpaceRootInvocation(word_t invLabel, unsigned int length,
1695                                                  cte_t *cte, cap_t cap, word_t *buffer)
1696 {
1697     vptr_t start, end;
1698     paddr_t pstart;
1699     asid_t asid;
1700     vspace_root_t *vspaceRoot;
1701     lookupFrame_ret_t resolve_ret;
1702     findVSpaceForASID_ret_t find_ret;
1703 
1704     switch (invLabel) {
1705     case ARMVSpaceClean_Data:
1706     case ARMVSpaceInvalidate_Data:
1707     case ARMVSpaceCleanInvalidate_Data:
1708     case ARMVSpaceUnify_Instruction:
1709 
1710         if (length < 2) {
1711             userError("VSpaceRoot Flush: Truncated message.");
1712             current_syscall_error.type = seL4_TruncatedMessage;
1713             return EXCEPTION_SYSCALL_ERROR;
1714         }
1715 
1716         start = getSyscallArg(0, buffer);
1717         end =   getSyscallArg(1, buffer);
1718 
1719         /* Check sanity of arguments */
1720         if (end <= start) {
1721             userError("VSpaceRoot Flush: Invalid range.");
1722             current_syscall_error.type = seL4_InvalidArgument;
1723             current_syscall_error.invalidArgumentNumber = 1;
1724             return EXCEPTION_SYSCALL_ERROR;
1725         }
1726 
1727         /* Don't let applications flush kernel regions. */
1728         if (end > USER_TOP) {
1729             userError("VSpaceRoot Flush: Exceed the user addressable region.");
1730             current_syscall_error.type = seL4_IllegalOperation;
1731             return EXCEPTION_SYSCALL_ERROR;
1732         }
1733 
1734         if (unlikely(!isValidNativeRoot(cap))) {
1735             current_syscall_error.type = seL4_InvalidCapability;
1736             current_syscall_error.invalidCapNumber = 0;
1737             return EXCEPTION_SYSCALL_ERROR;
1738         }
1739 
1740         /* Make sure that the supplied pgd is ok */
1741         vspaceRoot = cap_vtable_root_get_basePtr(cap);
1742         asid = cap_vtable_root_get_mappedASID(cap);
1743 
1744         find_ret = findVSpaceForASID(asid);
1745         if (unlikely(find_ret.status != EXCEPTION_NONE)) {
1746             userError("VSpaceRoot Flush: No VSpace for ASID");
1747             current_syscall_error.type = seL4_FailedLookup;
1748             current_syscall_error.failedLookupWasSource = false;
1749             return EXCEPTION_SYSCALL_ERROR;
1750         }
1751 
1752         if (unlikely(find_ret.vspace_root != vspaceRoot)) {
1753             userError("VSpaceRoot Flush: Invalid VSpace Cap");
1754             current_syscall_error.type = seL4_InvalidCapability;
1755             current_syscall_error.invalidCapNumber = 0;
1756             return EXCEPTION_SYSCALL_ERROR;
1757         }
1758 
1759         /* Look up the frame containing 'start'. */
1760         resolve_ret = lookupFrame(vspaceRoot, start);
1761 
1762         if (!resolve_ret.valid) {
1763             /* Fail silently, as there can't be any stale cached data (for the
1764              * given address space), and getting a syscall error because the
1765              * relevant page is non-resident would be 'astonishing'. */
1766             setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
1767             return EXCEPTION_NONE;
1768         }
1769 
1770         /* Refuse to cross a page boundary. */
1771         if (PAGE_BASE(start, resolve_ret.frameSize) != PAGE_BASE(end - 1, resolve_ret.frameSize)) {
1772             current_syscall_error.type = seL4_RangeError;
1773             current_syscall_error.rangeErrorMin = start;
1774             current_syscall_error.rangeErrorMax = PAGE_BASE(start, resolve_ret.frameSize) +
1775                                                   MASK(pageBitsForSize(resolve_ret.frameSize));
1776             return EXCEPTION_SYSCALL_ERROR;
1777         }
1778 
1779         /* Calculate the physical start address. */
1780         pstart = resolve_ret.frameBase + PAGE_OFFSET(start, resolve_ret.frameSize);
1781 
1782         setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
1783         return performVSpaceFlush(invLabel, vspaceRoot, asid, start, end - 1, pstart);
1784 
1785     default:
1786         current_syscall_error.type = seL4_IllegalOperation;
1787         return EXCEPTION_SYSCALL_ERROR;
1788     }
1789 }
1790 
1791 #ifndef AARCH64_VSPACE_S2_START_L1
decodeARMPageUpperDirectoryInvocation(word_t invLabel,unsigned int length,cte_t * cte,cap_t cap,word_t * buffer)1792 static exception_t decodeARMPageUpperDirectoryInvocation(word_t invLabel, unsigned int length,
1793                                                          cte_t *cte, cap_t cap, word_t *buffer)
1794 {
1795     cap_t pgdCap;
1796     vspace_root_t *pgd;
1797     pgde_t pgde;
1798     asid_t asid;
1799     vptr_t vaddr;
1800     lookupPGDSlot_ret_t pgdSlot;
1801     findVSpaceForASID_ret_t find_ret;
1802 
1803     if (invLabel == ARMPageUpperDirectoryUnmap) {
1804         if (unlikely(!isFinalCapability(cte))) {
1805             current_syscall_error.type = seL4_RevokeFirst;
1806             return EXCEPTION_SYSCALL_ERROR;
1807         }
1808 
1809         setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
1810         return performUpperPageDirectoryInvocationUnmap(cap, cte);
1811     }
1812 
1813     if (unlikely(invLabel != ARMPageUpperDirectoryMap)) {
1814         current_syscall_error.type = seL4_IllegalOperation;
1815         return EXCEPTION_SYSCALL_ERROR;
1816     }
1817 
1818     if (unlikely(length < 2 || current_extra_caps.excaprefs[0] == NULL)) {
1819         current_syscall_error.type = seL4_TruncatedMessage;
1820         return EXCEPTION_SYSCALL_ERROR;
1821     }
1822 
1823     if (unlikely(cap_page_upper_directory_cap_get_capPUDIsMapped(cap))) {
1824         current_syscall_error.type = seL4_InvalidCapability;
1825         current_syscall_error.invalidCapNumber = 0;
1826         return EXCEPTION_SYSCALL_ERROR;
1827     }
1828 
1829     vaddr = getSyscallArg(0, buffer) & (~MASK(PGD_INDEX_OFFSET));
1830     pgdCap = current_extra_caps.excaprefs[0]->cap;
1831 
1832     if (unlikely(!isValidNativeRoot(pgdCap))) {
1833         current_syscall_error.type = seL4_InvalidCapability;
1834         current_syscall_error.invalidCapNumber = 1;
1835         return EXCEPTION_SYSCALL_ERROR;
1836     }
1837 
1838     pgd = cap_vtable_root_get_basePtr(pgdCap);
1839     asid = cap_vtable_root_get_mappedASID(pgdCap);
1840 
1841     if (unlikely(vaddr > USER_TOP)) {
1842         current_syscall_error.type = seL4_InvalidArgument;
1843         current_syscall_error.invalidArgumentNumber = 0;
1844         return EXCEPTION_SYSCALL_ERROR;
1845     }
1846 
1847     find_ret = findVSpaceForASID(asid);
1848     if (unlikely(find_ret.status != EXCEPTION_NONE)) {
1849         current_syscall_error.type = seL4_FailedLookup;
1850         current_syscall_error.failedLookupWasSource = false;
1851         return EXCEPTION_SYSCALL_ERROR;
1852     }
1853 
1854     if (unlikely(find_ret.vspace_root != pgd)) {
1855         current_syscall_error.type = seL4_InvalidCapability;
1856         current_syscall_error.invalidCapNumber = 1;
1857         return EXCEPTION_SYSCALL_ERROR;
1858     }
1859 
1860     pgdSlot = lookupPGDSlot(pgd, vaddr);
1861 
1862     if (unlikely(pgde_pgde_pud_ptr_get_present(pgdSlot.pgdSlot))) {
1863         current_syscall_error.type = seL4_DeleteFirst;
1864         return EXCEPTION_SYSCALL_ERROR;
1865     }
1866 
1867     pgde = pgde_pgde_pud_new(
1868                pptr_to_paddr(PUDE_PTR(cap_page_upper_directory_cap_get_capPUDBasePtr(cap))));
1869 
1870     cap_page_upper_directory_cap_ptr_set_capPUDIsMapped(&cap, 1);
1871     cap_page_upper_directory_cap_ptr_set_capPUDMappedASID(&cap, asid);
1872     cap_page_upper_directory_cap_ptr_set_capPUDMappedAddress(&cap, vaddr);
1873 
1874     setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
1875     return performUpperPageDirectoryInvocationMap(cap, cte, pgde, pgdSlot.pgdSlot);
1876 }
1877 #endif
1878 
decodeARMPageDirectoryInvocation(word_t invLabel,unsigned int length,cte_t * cte,cap_t cap,word_t * buffer)1879 static exception_t decodeARMPageDirectoryInvocation(word_t invLabel, unsigned int length,
1880                                                     cte_t *cte, cap_t cap, word_t *buffer)
1881 {
1882     cap_t vspaceRootCap;
1883     vspace_root_t *vspaceRoot;
1884     pude_t pude;
1885     asid_t asid;
1886     vptr_t vaddr;
1887     lookupPUDSlot_ret_t pudSlot;
1888     findVSpaceForASID_ret_t find_ret;
1889 
1890     if (invLabel == ARMPageDirectoryUnmap) {
1891         if (unlikely(!isFinalCapability(cte))) {
1892             current_syscall_error.type = seL4_RevokeFirst;
1893             return EXCEPTION_SYSCALL_ERROR;
1894         }
1895 
1896         setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
1897         return performPageDirectoryInvocationUnmap(cap, cte);
1898     }
1899 
1900     if (unlikely(invLabel != ARMPageDirectoryMap)) {
1901         current_syscall_error.type = seL4_IllegalOperation;
1902         return EXCEPTION_SYSCALL_ERROR;
1903     }
1904 
1905     if (unlikely(length < 2 || current_extra_caps.excaprefs[0] == NULL)) {
1906         current_syscall_error.type = seL4_TruncatedMessage;
1907         return EXCEPTION_SYSCALL_ERROR;
1908     }
1909 
1910     if (unlikely(cap_page_directory_cap_get_capPDIsMapped(cap))) {
1911         current_syscall_error.type = seL4_InvalidCapability;
1912         current_syscall_error.invalidCapNumber = 0;
1913         return EXCEPTION_SYSCALL_ERROR;
1914     }
1915 
1916     vaddr = getSyscallArg(0, buffer) & (~MASK(PUD_INDEX_OFFSET));
1917     vspaceRootCap = current_extra_caps.excaprefs[0]->cap;
1918 
1919     if (unlikely(!isValidNativeRoot(vspaceRootCap))) {
1920         current_syscall_error.type = seL4_InvalidCapability;
1921         current_syscall_error.invalidCapNumber = 1;
1922         return EXCEPTION_SYSCALL_ERROR;
1923     }
1924 
1925     vspaceRoot = cap_vtable_root_get_basePtr(vspaceRootCap);
1926     asid = cap_vtable_root_get_mappedASID(vspaceRootCap);
1927 
1928     if (unlikely(vaddr > USER_TOP)) {
1929         current_syscall_error.type = seL4_InvalidArgument;
1930         current_syscall_error.invalidArgumentNumber = 0;
1931         return EXCEPTION_SYSCALL_ERROR;
1932     }
1933 
1934     find_ret = findVSpaceForASID(asid);
1935     if (unlikely(find_ret.status != EXCEPTION_NONE)) {
1936         current_syscall_error.type = seL4_FailedLookup;
1937         current_syscall_error.failedLookupWasSource = false;
1938         return EXCEPTION_SYSCALL_ERROR;
1939     }
1940 
1941     if (unlikely(find_ret.vspace_root != vspaceRoot)) {
1942         current_syscall_error.type = seL4_InvalidCapability;
1943         current_syscall_error.invalidCapNumber = 1;
1944         return EXCEPTION_SYSCALL_ERROR;
1945     }
1946 
1947     pudSlot = lookupPUDSlot(vspaceRoot, vaddr);
1948 
1949     if (pudSlot.status != EXCEPTION_NONE) {
1950         current_syscall_error.type = seL4_FailedLookup;
1951         current_syscall_error.failedLookupWasSource = false;
1952         return EXCEPTION_SYSCALL_ERROR;
1953     }
1954 
1955     if (unlikely(pude_pude_pd_ptr_get_present(pudSlot.pudSlot) ||
1956                  pude_pude_1g_ptr_get_present(pudSlot.pudSlot))) {
1957         current_syscall_error.type = seL4_DeleteFirst;
1958         return EXCEPTION_SYSCALL_ERROR;
1959     }
1960 
1961     pude = pude_pude_pd_new(pptr_to_paddr(PDE_PTR(cap_page_directory_cap_get_capPDBasePtr(cap))));
1962 
1963     cap_page_directory_cap_ptr_set_capPDIsMapped(&cap, 1);
1964     cap_page_directory_cap_ptr_set_capPDMappedASID(&cap, asid);
1965     cap_page_directory_cap_ptr_set_capPDMappedAddress(&cap, vaddr);
1966 
1967     setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
1968     return performPageDirectoryInvocationMap(cap, cte, pude, pudSlot.pudSlot);
1969 }
1970 
decodeARMPageTableInvocation(word_t invLabel,unsigned int length,cte_t * cte,cap_t cap,word_t * buffer)1971 static exception_t decodeARMPageTableInvocation(word_t invLabel, unsigned int length,
1972                                                 cte_t *cte, cap_t cap, word_t *buffer)
1973 {
1974     cap_t vspaceRootCap;
1975     vspace_root_t *vspaceRoot;
1976     pde_t pde;
1977     asid_t asid;
1978     vptr_t vaddr;
1979     lookupPDSlot_ret_t pdSlot;
1980     findVSpaceForASID_ret_t find_ret;
1981 
1982     if (invLabel == ARMPageTableUnmap) {
1983         if (unlikely(!isFinalCapability(cte))) {
1984             current_syscall_error.type = seL4_RevokeFirst;
1985             return EXCEPTION_SYSCALL_ERROR;
1986         }
1987 
1988         setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
1989         return performPageTableInvocationUnmap(cap, cte);
1990     }
1991 
1992     if (unlikely(invLabel != ARMPageTableMap)) {
1993         current_syscall_error.type = seL4_IllegalOperation;
1994         return EXCEPTION_SYSCALL_ERROR;
1995     }
1996 
1997     if (unlikely(length < 2 || current_extra_caps.excaprefs[0] == NULL)) {
1998         current_syscall_error.type = seL4_TruncatedMessage;
1999         return EXCEPTION_SYSCALL_ERROR;
2000     }
2001 
2002     if (unlikely(cap_page_table_cap_get_capPTIsMapped(cap))) {
2003         current_syscall_error.type = seL4_InvalidCapability;
2004         current_syscall_error.invalidCapNumber = 0;
2005         return EXCEPTION_SYSCALL_ERROR;
2006     }
2007 
2008     vaddr = getSyscallArg(0, buffer) & (~MASK(PD_INDEX_OFFSET));
2009     vspaceRootCap = current_extra_caps.excaprefs[0]->cap;
2010 
2011     if (unlikely(!isValidNativeRoot(vspaceRootCap))) {
2012         current_syscall_error.type = seL4_InvalidCapability;
2013         current_syscall_error.invalidCapNumber = 1;
2014         return EXCEPTION_SYSCALL_ERROR;
2015     }
2016 
2017     vspaceRoot = cap_vtable_root_get_basePtr(vspaceRootCap);
2018     asid = cap_vtable_root_get_mappedASID(vspaceRootCap);
2019 
2020     if (unlikely(vaddr > USER_TOP)) {
2021         current_syscall_error.type = seL4_InvalidArgument;
2022         current_syscall_error.invalidArgumentNumber = 0;
2023         return EXCEPTION_SYSCALL_ERROR;
2024     }
2025 
2026     find_ret = findVSpaceForASID(asid);
2027     if (unlikely(find_ret.status != EXCEPTION_NONE)) {
2028         current_syscall_error.type = seL4_FailedLookup;
2029         current_syscall_error.failedLookupWasSource = false;
2030         return EXCEPTION_SYSCALL_ERROR;
2031     }
2032 
2033     if (unlikely(find_ret.vspace_root != vspaceRoot)) {
2034         current_syscall_error.type = seL4_InvalidCapability;
2035         current_syscall_error.invalidCapNumber = 1;
2036         return EXCEPTION_SYSCALL_ERROR;
2037     }
2038 
2039     pdSlot = lookupPDSlot(vspaceRoot, vaddr);
2040 
2041     if (pdSlot.status != EXCEPTION_NONE) {
2042         current_syscall_error.type = seL4_FailedLookup;
2043         current_syscall_error.failedLookupWasSource = false;
2044         return EXCEPTION_SYSCALL_ERROR;
2045     }
2046 
2047     if (unlikely(pde_pde_small_ptr_get_present(pdSlot.pdSlot) ||
2048                  pde_pde_large_ptr_get_present(pdSlot.pdSlot))) {
2049         current_syscall_error.type = seL4_DeleteFirst;
2050         return EXCEPTION_SYSCALL_ERROR;
2051     }
2052 
2053     pde = pde_pde_small_new(pptr_to_paddr(PTE_PTR(cap_page_table_cap_get_capPTBasePtr(cap))));
2054 
2055     cap_page_table_cap_ptr_set_capPTIsMapped(&cap, 1);
2056     cap_page_table_cap_ptr_set_capPTMappedASID(&cap, asid);
2057     cap_page_table_cap_ptr_set_capPTMappedAddress(&cap, vaddr);
2058 
2059     setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
2060     return performPageTableInvocationMap(cap, cte, pde, pdSlot.pdSlot);
2061 }
2062 
decodeARMFrameInvocation(word_t invLabel,unsigned int length,cte_t * cte,cap_t cap,word_t * buffer)2063 static exception_t decodeARMFrameInvocation(word_t invLabel, unsigned int length,
2064                                             cte_t *cte, cap_t cap, word_t *buffer)
2065 {
2066     switch (invLabel) {
2067     case ARMPageMap: {
2068         vptr_t vaddr;
2069         paddr_t base;
2070         cap_t vspaceRootCap;
2071         vspace_root_t *vspaceRoot;
2072         asid_t asid, frame_asid;
2073         vm_rights_t vmRights;
2074         vm_page_size_t frameSize;
2075         vm_attributes_t attributes;
2076         findVSpaceForASID_ret_t find_ret;
2077 
2078         if (unlikely(length < 3 || current_extra_caps.excaprefs[0] == NULL)) {
2079             current_syscall_error.type = seL4_TruncatedMessage;
2080             return EXCEPTION_SYSCALL_ERROR;
2081         }
2082 
2083         vaddr = getSyscallArg(0, buffer);
2084         attributes = vmAttributesFromWord(getSyscallArg(2, buffer));
2085         vspaceRootCap = current_extra_caps.excaprefs[0]->cap;
2086 
2087         frameSize = cap_frame_cap_get_capFSize(cap);
2088         vmRights = maskVMRights(cap_frame_cap_get_capFVMRights(cap),
2089                                 rightsFromWord(getSyscallArg(1, buffer)));
2090 
2091         if (unlikely(!isValidNativeRoot(vspaceRootCap))) {
2092             current_syscall_error.type = seL4_InvalidCapability;
2093             current_syscall_error.invalidCapNumber = 1;
2094             return EXCEPTION_SYSCALL_ERROR;
2095         }
2096 
2097         vspaceRoot = cap_vtable_root_get_basePtr(vspaceRootCap);
2098         asid = cap_vtable_root_get_mappedASID(vspaceRootCap);
2099 
2100         find_ret = findVSpaceForASID(asid);
2101         if (unlikely(find_ret.status != EXCEPTION_NONE)) {
2102             current_syscall_error.type = seL4_FailedLookup;
2103             current_syscall_error.failedLookupWasSource = false;
2104             return EXCEPTION_SYSCALL_ERROR;
2105         }
2106 
2107         if (unlikely(find_ret.vspace_root != vspaceRoot)) {
2108             current_syscall_error.type = seL4_InvalidCapability;
2109             current_syscall_error.invalidCapNumber = 1;
2110             return EXCEPTION_SYSCALL_ERROR;
2111         }
2112 
2113         if (unlikely(!IS_PAGE_ALIGNED(vaddr, frameSize))) {
2114             current_syscall_error.type = seL4_AlignmentError;
2115             return EXCEPTION_SYSCALL_ERROR;
2116         }
2117 
2118         /* In the case of remap, the cap should have a valid asid */
2119         frame_asid = cap_frame_cap_ptr_get_capFMappedASID(&cap);
2120 
2121         if (frame_asid != asidInvalid) {
2122             if (frame_asid != asid) {
2123                 userError("ARMPageMap: Attempting to remap a frame that does not belong to the passed address space");
2124                 current_syscall_error.type = seL4_InvalidCapability;
2125                 current_syscall_error.invalidArgumentNumber = 0;
2126                 return EXCEPTION_SYSCALL_ERROR;
2127 
2128             } else if (cap_frame_cap_get_capFMappedAddress(cap) != vaddr) {
2129                 userError("ARMPageMap: Attempting to map frame into multiple addresses");
2130                 current_syscall_error.type = seL4_InvalidArgument;
2131                 current_syscall_error.invalidArgumentNumber = 2;
2132                 return EXCEPTION_SYSCALL_ERROR;
2133             }
2134         } else {
2135             if (unlikely(vaddr + BIT(pageBitsForSize(frameSize)) - 1 > USER_TOP)) {
2136                 current_syscall_error.type = seL4_InvalidArgument;
2137                 current_syscall_error.invalidArgumentNumber = 0;
2138                 return EXCEPTION_SYSCALL_ERROR;
2139             }
2140         }
2141 
2142         cap = cap_frame_cap_set_capFMappedASID(cap, asid);
2143         cap = cap_frame_cap_set_capFMappedAddress(cap, vaddr);
2144 
2145         base = pptr_to_paddr((void *)cap_frame_cap_get_capFBasePtr(cap));
2146 
2147         if (frameSize == ARMSmallPage) {
2148             lookupPTSlot_ret_t lu_ret = lookupPTSlot(vspaceRoot, vaddr);
2149 
2150             if (unlikely(lu_ret.status != EXCEPTION_NONE)) {
2151                 current_syscall_error.type = seL4_FailedLookup;
2152                 current_syscall_error.failedLookupWasSource = false;
2153                 return EXCEPTION_SYSCALL_ERROR;
2154             }
2155 
2156             setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
2157             return performSmallPageInvocationMap(asid, cap, cte,
2158                                                  makeUser3rdLevel(base, vmRights, attributes), lu_ret.ptSlot);
2159 
2160         } else if (frameSize == ARMLargePage) {
2161             lookupPDSlot_ret_t lu_ret = lookupPDSlot(vspaceRoot, vaddr);
2162 
2163             if (unlikely(lu_ret.status != EXCEPTION_NONE)) {
2164                 current_syscall_error.type = seL4_FailedLookup;
2165                 current_syscall_error.failedLookupWasSource = false;
2166                 return EXCEPTION_SYSCALL_ERROR;
2167             }
2168 
2169             setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
2170             return performLargePageInvocationMap(asid, cap, cte,
2171                                                  makeUser2ndLevel(base, vmRights, attributes), lu_ret.pdSlot);
2172 
2173         } else {
2174             lookupPUDSlot_ret_t lu_ret = lookupPUDSlot(vspaceRoot, vaddr);
2175 
2176             if (unlikely(lu_ret.status != EXCEPTION_NONE)) {
2177                 current_syscall_error.type = seL4_FailedLookup;
2178                 current_syscall_error.failedLookupWasSource = false;
2179                 return EXCEPTION_SYSCALL_ERROR;
2180             }
2181 
2182             setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
2183             return performHugePageInvocationMap(asid, cap, cte,
2184                                                 makeUser1stLevel(base, vmRights, attributes), lu_ret.pudSlot);
2185         }
2186     }
2187 
2188     case ARMPageUnmap:
2189         setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
2190         return performPageInvocationUnmap(cap, cte);
2191 
2192     case ARMPageClean_Data:
2193     case ARMPageInvalidate_Data:
2194     case ARMPageCleanInvalidate_Data:
2195     case ARMPageUnify_Instruction: {
2196         vptr_t start, end;
2197         vptr_t vaddr;
2198         asid_t asid;
2199         word_t page_size;
2200         findVSpaceForASID_ret_t find_ret;
2201 
2202         if (length < 2) {
2203             userError("Page Flush: Truncated message.");
2204             current_syscall_error.type = seL4_TruncatedMessage;
2205             return EXCEPTION_SYSCALL_ERROR;
2206         }
2207 
2208         if (unlikely(cap_frame_cap_get_capFMappedASID(cap) == 0)) {
2209             userError("Page Flush: Frame is not mapped.");
2210             current_syscall_error.type = seL4_IllegalOperation;
2211             return EXCEPTION_SYSCALL_ERROR;
2212         }
2213 
2214         vaddr = cap_frame_cap_get_capFMappedAddress(cap);
2215         asid = cap_frame_cap_get_capFMappedASID(cap);
2216 
2217         find_ret = findVSpaceForASID(asid);
2218         if (unlikely(find_ret.status != EXCEPTION_NONE)) {
2219             userError("Page Flush: No PGD for ASID");
2220             current_syscall_error.type = seL4_FailedLookup;
2221             current_syscall_error.failedLookupWasSource = false;
2222             return EXCEPTION_SYSCALL_ERROR;
2223         }
2224 
2225         start = getSyscallArg(0, buffer);
2226         end =   getSyscallArg(1, buffer);
2227 
2228         /* check that the range is sane */
2229         if (end <= start) {
2230             userError("PageFlush: Invalid range");
2231             current_syscall_error.type = seL4_InvalidArgument;
2232             current_syscall_error.invalidArgumentNumber = 1;
2233             return EXCEPTION_SYSCALL_ERROR;
2234         }
2235 
2236         /* start and end are currently relative inside this page */
2237         page_size = BIT(pageBitsForSize(cap_frame_cap_get_capFSize(cap)));
2238         if (start >= page_size || end > page_size) {
2239             userError("Page Flush: Requested range not inside page");
2240             current_syscall_error.type = seL4_InvalidArgument;
2241             current_syscall_error.invalidArgumentNumber = 0;
2242             return EXCEPTION_SYSCALL_ERROR;
2243         }
2244 
2245         word_t pstart = pptr_to_paddr((void *)cap_frame_cap_get_capFBasePtr(cap)) + start;
2246 #ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
2247         /* Don't let applications flush outside of the kernel window */
2248         if (pstart < PADDR_BASE || ((end - start) + pstart) > PADDR_TOP) {
2249             userError("Page Flush: Overlaps kernel region.");
2250             current_syscall_error.type = seL4_IllegalOperation;
2251             return EXCEPTION_SYSCALL_ERROR;
2252         }
2253 #endif
2254 
2255         setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
2256         return performPageFlush(invLabel, find_ret.vspace_root, asid, vaddr + start, vaddr + end - 1,
2257                                 pstart);
2258     }
2259 
2260     case ARMPageGetAddress:
2261         setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
2262         return performPageGetAddress(cap_frame_cap_get_capFBasePtr(cap));
2263 
2264     default:
2265         current_syscall_error.type = seL4_IllegalOperation;
2266         return EXCEPTION_SYSCALL_ERROR;
2267     }
2268 }
2269 
decodeARMMMUInvocation(word_t invLabel,word_t length,cptr_t cptr,cte_t * cte,cap_t cap,word_t * buffer)2270 exception_t decodeARMMMUInvocation(word_t invLabel, word_t length, cptr_t cptr,
2271                                    cte_t *cte, cap_t cap, word_t *buffer)
2272 {
2273     switch (cap_get_capType(cap)) {
2274     case cap_vtable_root_cap:
2275         return decodeARMVSpaceRootInvocation(invLabel, length, cte, cap, buffer);
2276 #ifndef AARCH64_VSPACE_S2_START_L1
2277     case cap_page_upper_directory_cap:
2278         return decodeARMPageUpperDirectoryInvocation(invLabel, length, cte, cap, buffer);
2279 #endif
2280     case cap_page_directory_cap:
2281         return decodeARMPageDirectoryInvocation(invLabel, length, cte, cap, buffer);
2282 
2283     case cap_page_table_cap:
2284         return decodeARMPageTableInvocation(invLabel, length, cte, cap, buffer);
2285 
2286     case cap_frame_cap:
2287         return decodeARMFrameInvocation(invLabel, length, cte, cap, buffer);
2288 
2289     case cap_asid_control_cap: {
2290         unsigned int i;
2291         asid_t asid_base;
2292         word_t index, depth;
2293         cap_t untyped, root;
2294         cte_t *parentSlot, *destSlot;
2295         lookupSlot_ret_t lu_ret;
2296         void *frame;
2297         exception_t status;
2298 
2299         if (unlikely(invLabel != ARMASIDControlMakePool)) {
2300             current_syscall_error.type = seL4_IllegalOperation;
2301 
2302             return EXCEPTION_SYSCALL_ERROR;
2303         }
2304 
2305         if (unlikely(length < 2 ||
2306                      current_extra_caps.excaprefs[0] == NULL ||
2307                      current_extra_caps.excaprefs[1] == NULL)) {
2308             current_syscall_error.type = seL4_TruncatedMessage;
2309 
2310             return EXCEPTION_SYSCALL_ERROR;
2311         }
2312 
2313         index = getSyscallArg(0, buffer);
2314         depth = getSyscallArg(1, buffer);
2315         parentSlot = current_extra_caps.excaprefs[0];
2316         untyped = parentSlot->cap;
2317         root = current_extra_caps.excaprefs[1]->cap;
2318 
2319         /* Find first free pool */
2320         for (i = 0; i < nASIDPools && armKSASIDTable[i]; i++);
2321 
2322         if (unlikely(i == nASIDPools)) { /* If no unallocated pool is found */
2323             current_syscall_error.type = seL4_DeleteFirst;
2324 
2325             return EXCEPTION_SYSCALL_ERROR;
2326         }
2327 
2328         asid_base = i << asidLowBits;
2329 
2330         if (unlikely(cap_get_capType(untyped) != cap_untyped_cap ||
2331                      cap_untyped_cap_get_capBlockSize(untyped) != seL4_ASIDPoolBits ||
2332                      cap_untyped_cap_get_capIsDevice(untyped))) {
2333             current_syscall_error.type = seL4_InvalidCapability;
2334             current_syscall_error.invalidCapNumber = 1;
2335 
2336             return EXCEPTION_SYSCALL_ERROR;
2337         }
2338 
2339         status = ensureNoChildren(parentSlot);
2340         if (unlikely(status != EXCEPTION_NONE)) {
2341             return status;
2342         }
2343 
2344         frame = WORD_PTR(cap_untyped_cap_get_capPtr(untyped));
2345 
2346         lu_ret = lookupTargetSlot(root, index, depth);
2347         if (unlikely(lu_ret.status != EXCEPTION_NONE)) {
2348             return lu_ret.status;
2349         }
2350         destSlot = lu_ret.slot;
2351 
2352         status = ensureEmptySlot(destSlot);
2353         if (unlikely(status != EXCEPTION_NONE)) {
2354             return status;
2355         }
2356 
2357         setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
2358         return performASIDControlInvocation(frame, destSlot, parentSlot, asid_base);
2359     }
2360 
2361     case cap_asid_pool_cap: {
2362         cap_t vspaceCap;
2363         cte_t *vspaceCapSlot;
2364         asid_pool_t *pool;
2365         unsigned int i;
2366         asid_t asid;
2367 
2368         if (unlikely(invLabel != ARMASIDPoolAssign)) {
2369             current_syscall_error.type = seL4_IllegalOperation;
2370 
2371             return EXCEPTION_SYSCALL_ERROR;
2372         }
2373 
2374         if (unlikely(current_extra_caps.excaprefs[0] == NULL)) {
2375             current_syscall_error.type = seL4_TruncatedMessage;
2376 
2377             return EXCEPTION_SYSCALL_ERROR;
2378         }
2379 
2380         vspaceCapSlot = current_extra_caps.excaprefs[0];
2381         vspaceCap = vspaceCapSlot->cap;
2382 
2383         if (unlikely(!isVTableRoot(vspaceCap) || cap_vtable_root_isMapped(vspaceCap))) {
2384             current_syscall_error.type = seL4_InvalidCapability;
2385             current_syscall_error.invalidCapNumber = 1;
2386 
2387             return EXCEPTION_SYSCALL_ERROR;
2388         }
2389 
2390         pool = armKSASIDTable[cap_asid_pool_cap_get_capASIDBase(cap) >> asidLowBits];
2391 
2392         if (unlikely(!pool)) {
2393             current_syscall_error.type = seL4_FailedLookup;
2394             current_syscall_error.failedLookupWasSource = false;
2395             current_lookup_fault = lookup_fault_invalid_root_new();
2396 
2397             return EXCEPTION_SYSCALL_ERROR;
2398         }
2399 
2400         if (unlikely(pool != ASID_POOL_PTR(cap_asid_pool_cap_get_capASIDPool(cap)))) {
2401             current_syscall_error.type = seL4_InvalidCapability;
2402             current_syscall_error.invalidCapNumber = 0;
2403 
2404             return EXCEPTION_SYSCALL_ERROR;
2405         }
2406 
2407         /* Find first free ASID */
2408         asid = cap_asid_pool_cap_get_capASIDBase(cap);
2409         for (i = 0; i < (1 << asidLowBits) && (asid + i == 0 || pool->array[i]); i++);
2410 
2411         if (unlikely(i == 1 << asidLowBits)) {
2412             current_syscall_error.type = seL4_DeleteFirst;
2413 
2414             return EXCEPTION_SYSCALL_ERROR;
2415         }
2416 
2417         asid += i;
2418 
2419         setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
2420         return performASIDPoolInvocation(asid, pool, vspaceCapSlot);
2421     }
2422 
2423     default:
2424         fail("Invalid ARM arch cap type");
2425     }
2426 }
2427 
2428 #ifdef CONFIG_DEBUG_BUILD
2429 void kernelPrefetchAbort(word_t pc) VISIBLE;
2430 void kernelDataAbort(word_t pc) VISIBLE;
2431 
kernelPrefetchAbort(word_t pc)2432 void kernelPrefetchAbort(word_t pc)
2433 {
2434     printf("\n\nKERNEL PREFETCH ABORT!\n");
2435     printf("Faulting instruction: 0x%"SEL4_PRIx_word"\n", pc);
2436     printf("ESR (IFSR): 0x%"SEL4_PRIx_word"\n", getIFSR());
2437     halt();
2438 }
2439 
kernelDataAbort(word_t pc)2440 void kernelDataAbort(word_t pc)
2441 {
2442     printf("\n\nKERNEL DATA ABORT!\n");
2443     printf("Faulting instruction: 0x%"SEL4_PRIx_word"\n", pc);
2444     printf("FAR: 0x%"SEL4_PRIx_word" ESR (DFSR): 0x%"SEL4_PRIx_word"\n",
2445            getFAR(), getDFSR());
2446     halt();
2447 }
2448 #endif /* CONFIG_DEBUG_BUILD */
2449 
2450 #ifdef CONFIG_PRINTING
2451 typedef struct readWordFromVSpace_ret {
2452     exception_t status;
2453     word_t value;
2454 } readWordFromVSpace_ret_t;
2455 
readWordFromVSpace(vspace_root_t * pd,word_t vaddr)2456 static readWordFromVSpace_ret_t readWordFromVSpace(vspace_root_t *pd, word_t vaddr)
2457 {
2458     lookupFrame_ret_t lookup_frame_ret;
2459     readWordFromVSpace_ret_t ret;
2460     word_t offset;
2461     pptr_t kernel_vaddr;
2462     word_t *value;
2463 
2464     lookup_frame_ret = lookupFrame(pd, vaddr);
2465 
2466     if (!lookup_frame_ret.valid) {
2467         ret.status = EXCEPTION_LOOKUP_FAULT;
2468         return ret;
2469     }
2470 
2471     offset = vaddr & MASK(pageBitsForSize(lookup_frame_ret.frameSize));
2472     kernel_vaddr = (word_t)paddr_to_pptr(lookup_frame_ret.frameBase);
2473     value = (word_t *)(kernel_vaddr + offset);
2474 
2475     ret.status = EXCEPTION_NONE;
2476     ret.value = *value;
2477     return ret;
2478 }
2479 
Arch_userStackTrace(tcb_t * tptr)2480 void Arch_userStackTrace(tcb_t *tptr)
2481 {
2482     cap_t threadRoot;
2483     vspace_root_t *vspaceRoot;
2484     word_t sp;
2485 
2486     threadRoot = TCB_PTR_CTE_PTR(tptr, tcbVTable)->cap;
2487 
2488     /* lookup the vspace root */
2489     if (cap_get_capType(threadRoot) != cap_vtable_root_cap) {
2490         printf("Invalid vspace\n");
2491         return;
2492     }
2493 
2494     vspaceRoot = cap_vtable_root_get_basePtr(threadRoot);
2495     sp = getRegister(tptr, SP_EL0);
2496 
2497     /* check for alignment so we don't have to worry about accessing
2498      * words that might be on two different pages */
2499     if (!IS_ALIGNED(sp, seL4_WordSizeBits)) {
2500         printf("SP not aligned\n");
2501         return;
2502     }
2503 
2504     for (unsigned int i = 0; i < CONFIG_USER_STACK_TRACE_LENGTH; i++) {
2505         word_t address = sp + (i * sizeof(word_t));
2506         readWordFromVSpace_ret_t result = readWordFromVSpace(vspaceRoot,
2507                                                              address);
2508         if (result.status == EXCEPTION_NONE) {
2509             printf("0x%"SEL4_PRIx_word": 0x%"SEL4_PRIx_word"\n",
2510                    address, result.value);
2511         } else {
2512             printf("0x%"SEL4_PRIx_word": INVALID\n", address);
2513         }
2514     }
2515 }
2516 #endif /* CONFIG_PRINTING */
2517 
2518 #if defined(CONFIG_KERNEL_LOG_BUFFER)
benchmark_arch_map_logBuffer(word_t frame_cptr)2519 exception_t benchmark_arch_map_logBuffer(word_t frame_cptr)
2520 {
2521     lookupCapAndSlot_ret_t lu_ret;
2522     vm_page_size_t frameSize;
2523     pptr_t  frame_pptr;
2524 
2525     /* faulting section */
2526     lu_ret = lookupCapAndSlot(NODE_STATE(ksCurThread), frame_cptr);
2527 
2528     if (unlikely(lu_ret.status != EXCEPTION_NONE)) {
2529         userError("Invalid cap #%lu.", frame_cptr);
2530         current_fault = seL4_Fault_CapFault_new(frame_cptr, false);
2531 
2532         return EXCEPTION_SYSCALL_ERROR;
2533     }
2534 
2535     if (cap_get_capType(lu_ret.cap) != cap_frame_cap) {
2536         userError("Invalid cap. Log buffer should be of a frame cap");
2537         current_fault = seL4_Fault_CapFault_new(frame_cptr, false);
2538 
2539         return EXCEPTION_SYSCALL_ERROR;
2540     }
2541 
2542     frameSize = cap_frame_cap_get_capFSize(lu_ret.cap);
2543 
2544     if (frameSize != ARMLargePage) {
2545         userError("Invalid frame size. The kernel expects 2M log buffer");
2546         current_fault = seL4_Fault_CapFault_new(frame_cptr, false);
2547 
2548         return EXCEPTION_SYSCALL_ERROR;
2549     }
2550 
2551     frame_pptr = cap_frame_cap_get_capFBasePtr(lu_ret.cap);
2552 
2553     ksUserLogBuffer = pptr_to_paddr((void *) frame_pptr);
2554 
2555     *armKSGlobalLogPDE = pde_pde_large_new(
2556 #ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
2557                              0, // XN
2558 #else
2559                              1, // UXN
2560 #endif
2561                              ksUserLogBuffer,
2562                              0,                         /* global */
2563                              1,                         /* access flag */
2564                              SMP_TERNARY(SMP_SHARE, 0), /* Inner-shareable if SMP enabled, otherwise unshared */
2565                              0,                         /* VMKernelOnly */
2566                              NORMAL);
2567 
2568     cleanByVA_PoU((vptr_t)armKSGlobalLogPDE, addrFromKPPtr(armKSGlobalLogPDE));
2569     invalidateTranslationSingle(KS_LOG_PPTR);
2570     return EXCEPTION_NONE;
2571 }
2572 #endif /* CONFIG_KERNEL_LOG_BUFFER */
2573 
2574