1 /*
2 * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230)
3 *
4 * SPDX-License-Identifier: GPL-2.0-only
5 */
6
7 #include <config.h>
8
9 #ifdef CONFIG_VTX
10
11 #include <model/statedata.h>
12 #include <arch/kernel/ept.h>
13 #include <arch/api/invocation.h>
14
15 struct lookupEPTPDPTSlot_ret {
16 exception_t status;
17 ept_pdpte_t *pdptSlot;
18 };
19 typedef struct lookupEPTPDPTSlot_ret lookupEPTPDPTSlot_ret_t;
20
21 struct lookupEPTPDSlot_ret {
22 exception_t status;
23 ept_pde_t *pdSlot;
24 };
25 typedef struct lookupEPTPDSlot_ret lookupEPTPDSlot_ret_t;
26
27 struct lookupEPTPTSlot_ret {
28 exception_t status;
29 ept_pte_t *ptSlot;
30 };
31 typedef struct lookupEPTPTSlot_ret lookupEPTPTSlot_ret_t;
32
33 enum ept_cache_options {
34 EPTUncacheable = 0,
35 EPTWriteCombining = 1,
36 EPTWriteThrough = 4,
37 EPTWriteProtected = 5,
38 EPTWriteBack = 6
39 };
40 typedef enum ept_cache_options ept_cache_options_t;
41
deleteEPTASID(asid_t asid,ept_pml4e_t * ept)42 void deleteEPTASID(asid_t asid, ept_pml4e_t *ept)
43 {
44 asid_pool_t *poolPtr;
45
46 poolPtr = x86KSASIDTable[asid >> asidLowBits];
47 if (poolPtr != NULL) {
48 asid_map_t asid_map = poolPtr->array[asid & MASK(asidLowBits)];
49 if (asid_map_get_type(asid_map) == asid_map_asid_map_ept &&
50 (ept_pml4e_t *)asid_map_asid_map_ept_get_ept_root(asid_map) == ept) {
51 poolPtr->array[asid & MASK(asidLowBits)] = asid_map_asid_map_none_new();
52 }
53 }
54 }
55
performX86EPTPageInvocationUnmap(cap_t cap,cte_t * ctSlot)56 exception_t performX86EPTPageInvocationUnmap(cap_t cap, cte_t *ctSlot)
57 {
58 unmapEPTPage(
59 cap_frame_cap_get_capFSize(cap),
60 cap_frame_cap_get_capFMappedASID(cap),
61 cap_frame_cap_get_capFMappedAddress(cap),
62 (void *)cap_frame_cap_get_capFBasePtr(cap)
63 );
64
65 cap_frame_cap_ptr_set_capFMappedAddress(&ctSlot->cap, 0);
66 cap_frame_cap_ptr_set_capFMappedASID(&ctSlot->cap, asidInvalid);
67 cap_frame_cap_ptr_set_capFMapType(&ctSlot->cap, X86_MappingNone);
68
69 return EXCEPTION_NONE;
70 }
71
findEPTForASID(asid_t asid)72 findEPTForASID_ret_t findEPTForASID(asid_t asid)
73 {
74 findEPTForASID_ret_t ret;
75 asid_map_t asid_map;
76
77 asid_map = findMapForASID(asid);
78 if (asid_map_get_type(asid_map) != asid_map_asid_map_ept) {
79 current_lookup_fault = lookup_fault_invalid_root_new();
80
81 ret.ept = NULL;
82 ret.status = EXCEPTION_LOOKUP_FAULT;
83 return ret;
84 }
85
86 ret.ept = (ept_pml4e_t *)asid_map_asid_map_ept_get_ept_root(asid_map);
87 ret.status = EXCEPTION_NONE;
88 return ret;
89 }
90
lookupEPTPML4Slot(ept_pml4e_t * pml4,vptr_t vptr)91 static ept_pml4e_t *CONST lookupEPTPML4Slot(ept_pml4e_t *pml4, vptr_t vptr)
92 {
93 return pml4 + GET_EPT_PML4_INDEX(vptr);
94 }
95
lookupEPTPDPTSlot(ept_pml4e_t * pml4,vptr_t vptr)96 static lookupEPTPDPTSlot_ret_t CONST lookupEPTPDPTSlot(ept_pml4e_t *pml4, vptr_t vptr)
97 {
98 lookupEPTPDPTSlot_ret_t ret;
99 ept_pml4e_t *pml4Slot;
100
101 pml4Slot = lookupEPTPML4Slot(pml4, vptr);
102
103 if (!ept_pml4e_ptr_get_read(pml4Slot)) {
104 current_lookup_fault = lookup_fault_missing_capability_new(EPT_PML4_INDEX_OFFSET);
105
106 ret.pdptSlot = NULL;
107 ret.status = EXCEPTION_LOOKUP_FAULT;
108 return ret;
109 }
110
111 ept_pdpte_t *pdpt = paddr_to_pptr(ept_pml4e_ptr_get_pdpt_base_address(pml4Slot));
112 uint32_t index = GET_EPT_PDPT_INDEX(vptr);
113 ret.pdptSlot = pdpt + index;
114 ret.status = EXCEPTION_NONE;
115 return ret;
116 }
117
lookupEPTPDSlot(ept_pml4e_t * pml4,vptr_t vptr)118 static lookupEPTPDSlot_ret_t lookupEPTPDSlot(ept_pml4e_t *pml4, vptr_t vptr)
119 {
120 lookupEPTPDSlot_ret_t ret;
121 lookupEPTPDPTSlot_ret_t lu_ret;
122
123 lu_ret = lookupEPTPDPTSlot(pml4, vptr);
124 if (lu_ret.status != EXCEPTION_NONE) {
125 current_syscall_error.type = seL4_FailedLookup;
126 current_syscall_error.failedLookupWasSource = false;
127 /* current_lookup_fault will have been set by lookupEPTPDPTSlot */
128 ret.pdSlot = NULL;
129 ret.status = EXCEPTION_LOOKUP_FAULT;
130 return ret;
131 }
132
133 if (!ept_pdpte_ptr_get_read(lu_ret.pdptSlot)) {
134 current_lookup_fault = lookup_fault_missing_capability_new(EPT_PDPT_INDEX_OFFSET);
135
136 ret.pdSlot = NULL;
137 ret.status = EXCEPTION_LOOKUP_FAULT;
138 return ret;
139 }
140
141 ept_pde_t *pd = paddr_to_pptr(ept_pdpte_ptr_get_pd_base_address(lu_ret.pdptSlot));
142 uint32_t index = GET_EPT_PD_INDEX(vptr);
143 ret.pdSlot = pd + index;
144 ret.status = EXCEPTION_NONE;
145 return ret;
146 }
147
lookupEPTPTSlot(ept_pml4e_t * pml4,vptr_t vptr)148 static lookupEPTPTSlot_ret_t lookupEPTPTSlot(ept_pml4e_t *pml4, vptr_t vptr)
149 {
150 lookupEPTPTSlot_ret_t ret;
151 lookupEPTPDSlot_ret_t lu_ret;
152
153 lu_ret = lookupEPTPDSlot(pml4, vptr);
154 if (lu_ret.status != EXCEPTION_NONE) {
155 current_syscall_error.type = seL4_FailedLookup;
156 current_syscall_error.failedLookupWasSource = false;
157 /* current_lookup_fault will have been set by lookupEPTPDSlot */
158 ret.ptSlot = NULL;
159 ret.status = EXCEPTION_LOOKUP_FAULT;
160 return ret;
161 }
162
163 if ((ept_pde_ptr_get_page_size(lu_ret.pdSlot) != ept_pde_ept_pde_pt) ||
164 !ept_pde_ept_pde_pt_ptr_get_read(lu_ret.pdSlot)) {
165 current_lookup_fault = lookup_fault_missing_capability_new(EPT_PD_INDEX_OFFSET);
166
167 ret.ptSlot = NULL;
168 ret.status = EXCEPTION_LOOKUP_FAULT;
169 return ret;
170 }
171
172 ept_pte_t *pt = paddr_to_pptr(ept_pde_ept_pde_pt_ptr_get_pt_base_address(lu_ret.pdSlot));
173 uint32_t index = GET_EPT_PT_INDEX(vptr);
174
175 ret.ptSlot = pt + index;
176 ret.status = EXCEPTION_NONE;
177 return ret;
178 }
179
eptCacheFromVmAttr(vm_attributes_t vmAttr)180 static ept_cache_options_t eptCacheFromVmAttr(vm_attributes_t vmAttr)
181 {
182 /* Need to sanitise user input, vmAttr will not have been verified at this point. */
183 ept_cache_options_t option = vmAttr.words[0];
184 if (option != EPTUncacheable &&
185 option != EPTWriteCombining &&
186 option != EPTWriteThrough &&
187 option != EPTWriteBack) {
188 option = EPTWriteBack;
189 }
190 return option;
191 }
192
EPTPDPTMapped(asid_t asid,vptr_t vptr,ept_pdpte_t * pdpt)193 EPTPDPTMapped_ret_t EPTPDPTMapped(asid_t asid, vptr_t vptr, ept_pdpte_t *pdpt)
194 {
195 EPTPDPTMapped_ret_t ret;
196 findEPTForASID_ret_t asid_ret;
197 ept_pml4e_t *pml4Slot;
198
199 asid_ret = findEPTForASID(asid);
200 if (asid_ret.status != EXCEPTION_NONE) {
201 ret.pml4 = NULL;
202 ret.pml4Slot = NULL;
203 ret.status = asid_ret.status;
204 return ret;
205 }
206
207 pml4Slot = lookupEPTPML4Slot(asid_ret.ept, vptr);
208
209 if (ept_pml4e_ptr_get_read(pml4Slot)
210 && ptrFromPAddr(ept_pml4e_ptr_get_pdpt_base_address(pml4Slot)) == pdpt) {
211 ret.pml4 = asid_ret.ept;
212 ret.pml4Slot = pml4Slot;
213 ret.status = EXCEPTION_NONE;
214 return ret;
215 } else {
216 ret.pml4 = NULL;
217 ret.pml4Slot = NULL;
218 ret.status = EXCEPTION_LOOKUP_FAULT;
219 return ret;
220 }
221 }
222
unmapEPTPDPT(asid_t asid,vptr_t vaddr,ept_pdpte_t * pdpt)223 void unmapEPTPDPT(asid_t asid, vptr_t vaddr, ept_pdpte_t *pdpt)
224 {
225 EPTPDPTMapped_ret_t lu_ret;
226
227 lu_ret = EPTPDPTMapped(asid, vaddr, pdpt);
228
229 if (lu_ret.status == EXCEPTION_NONE) {
230 *lu_ret.pml4Slot = ept_pml4e_new(0, 0, 0, 0);
231 invept(lu_ret.pml4);
232 }
233 }
234
performEPTPDPTInvocationUnmap(cap_t cap,cte_t * cte)235 static exception_t performEPTPDPTInvocationUnmap(cap_t cap, cte_t *cte)
236 {
237 if (cap_ept_pdpt_cap_get_capPDPTIsMapped(cap)) {
238 ept_pdpte_t *pdpt = (ept_pdpte_t *)cap_ept_pdpt_cap_get_capPDPTBasePtr(cap);
239 unmapEPTPDPT(
240 cap_ept_pdpt_cap_get_capPDPTMappedASID(cap),
241 cap_ept_pdpt_cap_get_capPDPTMappedAddress(cap),
242 pdpt);
243 clearMemory((void *)pdpt, cap_get_capSizeBits(cap));
244 }
245 cap_ept_pdpt_cap_ptr_set_capPDPTIsMapped(&(cte->cap), 0);
246
247 return EXCEPTION_NONE;
248 }
249
performEPTPDPTInvocationMap(cap_t cap,cte_t * cte,ept_pml4e_t pml4e,ept_pml4e_t * pml4Slot,ept_pml4e_t * pml4)250 static exception_t performEPTPDPTInvocationMap(cap_t cap, cte_t *cte, ept_pml4e_t pml4e, ept_pml4e_t *pml4Slot,
251 ept_pml4e_t *pml4)
252 {
253 cte->cap = cap;
254 *pml4Slot = pml4e;
255 invept(pml4);
256
257 return EXCEPTION_NONE;
258 }
259
decodeX86EPTPDPTInvocation(word_t invLabel,word_t length,cte_t * cte,cap_t cap,word_t * buffer)260 static exception_t decodeX86EPTPDPTInvocation(
261 word_t invLabel,
262 word_t length,
263 cte_t *cte,
264 cap_t cap,
265 word_t *buffer
266 )
267 {
268 word_t vaddr;
269 cap_t pml4Cap;
270 ept_pml4e_t *pml4;
271 ept_pml4e_t pml4e;
272 paddr_t paddr;
273 asid_t asid;
274 findEPTForASID_ret_t find_ret;
275 ept_pml4e_t *pml4Slot;
276
277 if (invLabel == X86EPTPDPTUnmap) {
278 if (!isFinalCapability(cte)) {
279 current_syscall_error.type = seL4_RevokeFirst;
280 return EXCEPTION_SYSCALL_ERROR;
281 }
282 setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
283 return performEPTPDPTInvocationUnmap(cap, cte);
284 }
285
286 if (invLabel != X86EPTPDPTMap) {
287 userError("X86EPTPDPT Illegal operation.");
288 current_syscall_error.type = seL4_IllegalOperation;
289 return EXCEPTION_SYSCALL_ERROR;
290 }
291
292 if (length < 2 || current_extra_caps.excaprefs[0] == NULL) {
293 userError("X86EPTPDPTMap: Truncated message.");
294 current_syscall_error.type = seL4_TruncatedMessage;
295 return EXCEPTION_SYSCALL_ERROR;
296 }
297
298 if (cap_ept_pdpt_cap_get_capPDPTIsMapped(cap)) {
299 userError("X86EPTPDPTMap: EPT PDPT is already mapped to a PML4.");
300 current_syscall_error.type = seL4_InvalidCapability;
301 current_syscall_error.invalidCapNumber = 0;
302
303 return EXCEPTION_SYSCALL_ERROR;
304 }
305
306 vaddr = getSyscallArg(0, buffer);
307 /* cannot use ~MASK(EPT_PML4_INDEX_OFFSET) because on 32-bit compilations
308 * this results in an error shifting by greater than 31 bits, so we manually
309 * force a 64-bit variable to do the shifting with */
310 vaddr = vaddr & ~(((uint64_t)1 << EPT_PML4_INDEX_OFFSET) - 1);
311 pml4Cap = current_extra_caps.excaprefs[0]->cap;
312
313 if (cap_get_capType(pml4Cap) != cap_ept_pml4_cap) {
314 userError("X86EPTPDPTMap: Not a valid EPT PML4.");
315 current_syscall_error.type = seL4_InvalidCapability;
316 current_syscall_error.invalidCapNumber = 1;
317
318 return EXCEPTION_SYSCALL_ERROR;
319 }
320
321 pml4 = (ept_pml4e_t *)cap_ept_pml4_cap_get_capPML4BasePtr(pml4Cap);
322 asid = cap_ept_pml4_cap_get_capPML4MappedASID(pml4Cap);
323
324 find_ret = findEPTForASID(asid);
325 if (find_ret.status != EXCEPTION_NONE) {
326 current_syscall_error.type = seL4_FailedLookup;
327 current_syscall_error.failedLookupWasSource = false;
328
329 return EXCEPTION_SYSCALL_ERROR;
330 }
331
332 if (find_ret.ept != pml4) {
333 current_syscall_error.type = seL4_InvalidCapability;
334 current_syscall_error.invalidCapNumber = 1;
335
336 return EXCEPTION_SYSCALL_ERROR;
337 }
338
339 pml4Slot = lookupEPTPML4Slot(pml4, vaddr);
340
341 if (ept_pml4e_ptr_get_read(pml4Slot)) {
342 userError("X86EPTPDPTMap: PDPT already mapped here.");
343 current_syscall_error.type = seL4_DeleteFirst;
344 return EXCEPTION_SYSCALL_ERROR;
345 }
346
347 paddr = pptr_to_paddr((void *)cap_ept_pdpt_cap_get_capPDPTBasePtr(cap));
348 pml4e = ept_pml4e_new(
349 paddr,
350 1,
351 1,
352 1
353 );
354
355 cap = cap_ept_pdpt_cap_set_capPDPTIsMapped(cap, 1);
356 cap = cap_ept_pdpt_cap_set_capPDPTMappedASID(cap, asid);
357 cap = cap_ept_pdpt_cap_set_capPDPTMappedAddress(cap, vaddr);
358
359 setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
360 return performEPTPDPTInvocationMap(cap, cte, pml4e, pml4Slot, pml4);
361 }
362
decodeX86EPTInvocation(word_t invLabel,word_t length,cptr_t cptr,cte_t * cte,cap_t cap,word_t * buffer)363 exception_t decodeX86EPTInvocation(
364 word_t invLabel,
365 word_t length,
366 cptr_t cptr,
367 cte_t *cte,
368 cap_t cap,
369 word_t *buffer
370 )
371 {
372 switch (cap_get_capType(cap)) {
373 case cap_ept_pdpt_cap:
374 return decodeX86EPTPDPTInvocation(invLabel, length, cte, cap, buffer);
375 case cap_ept_pd_cap:
376 return decodeX86EPTPDInvocation(invLabel, length, cte, cap, buffer);
377 case cap_ept_pt_cap:
378 return decodeX86EPTPTInvocation(invLabel, length, cte, cap, buffer);
379 default:
380 fail("Invalid cap type");
381 }
382 }
383
EPTPageDirectoryMapped(asid_t asid,vptr_t vaddr,ept_pde_t * pd)384 EPTPageDirectoryMapped_ret_t EPTPageDirectoryMapped(asid_t asid, vptr_t vaddr, ept_pde_t *pd)
385 {
386 EPTPageDirectoryMapped_ret_t ret;
387 lookupEPTPDPTSlot_ret_t find_ret;
388 findEPTForASID_ret_t asid_ret;
389
390 asid_ret = findEPTForASID(asid);
391 if (asid_ret.status != EXCEPTION_NONE) {
392 ret.pml4 = NULL;
393 ret.pdptSlot = NULL;
394 ret.status = asid_ret.status;
395 return ret;
396 }
397
398 find_ret = lookupEPTPDPTSlot(asid_ret.ept, vaddr);
399 if (find_ret.status != EXCEPTION_NONE) {
400 ret.pml4 = NULL;
401 ret.pdptSlot = NULL;
402 ret.status = find_ret.status;
403 return ret;
404 }
405
406 if (ept_pdpte_ptr_get_read(find_ret.pdptSlot)
407 && ptrFromPAddr(ept_pdpte_ptr_get_pd_base_address(find_ret.pdptSlot)) == pd) {
408 ret.pml4 = asid_ret.ept;
409 ret.pdptSlot = find_ret.pdptSlot;
410 ret.status = EXCEPTION_NONE;
411 return ret;
412 } else {
413 ret.pml4 = NULL;
414 ret.pdptSlot = NULL;
415 ret.status = EXCEPTION_LOOKUP_FAULT;
416 return ret;
417 }
418 }
419
unmapEPTPageDirectory(asid_t asid,vptr_t vaddr,ept_pde_t * pd)420 void unmapEPTPageDirectory(asid_t asid, vptr_t vaddr, ept_pde_t *pd)
421 {
422 EPTPageDirectoryMapped_ret_t lu_ret;
423
424 lu_ret = EPTPageDirectoryMapped(asid, vaddr, pd);
425
426 if (lu_ret.status == EXCEPTION_NONE) {
427 *lu_ret.pdptSlot = ept_pdpte_new(
428 0, /* pd_base_address */
429 0, /* avl_cte_depth */
430 0, /* execute */
431 0, /* write */
432 0 /* read */
433 );
434 invept(lu_ret.pml4);
435 }
436 }
437
performEPTPDInvocationUnmap(cap_t cap,cte_t * cte)438 static exception_t performEPTPDInvocationUnmap(cap_t cap, cte_t *cte)
439 {
440 if (cap_ept_pd_cap_get_capPDIsMapped(cap)) {
441 ept_pde_t *pd = (ept_pde_t *)cap_ept_pd_cap_get_capPDBasePtr(cap);
442 unmapEPTPageDirectory(
443 cap_ept_pd_cap_get_capPDMappedASID(cap),
444 cap_ept_pd_cap_get_capPDMappedAddress(cap),
445 pd);
446 clearMemory((void *)pd, cap_get_capSizeBits(cap));
447 }
448 cap_ept_pd_cap_ptr_set_capPDIsMapped(&(cte->cap), 0);
449
450 return EXCEPTION_NONE;
451 }
452
performEPTPDInvocationMap(cap_t cap,cte_t * cte,ept_pdpte_t pdpte,ept_pdpte_t * pdptSlot,ept_pml4e_t * pml4)453 static exception_t performEPTPDInvocationMap(cap_t cap, cte_t *cte, ept_pdpte_t pdpte, ept_pdpte_t *pdptSlot,
454 ept_pml4e_t *pml4)
455 {
456 cte->cap = cap;
457 *pdptSlot = pdpte;
458 invept(pml4);
459
460 return EXCEPTION_NONE;
461 }
462
decodeX86EPTPDInvocation(word_t invLabel,word_t length,cte_t * cte,cap_t cap,word_t * buffer)463 exception_t decodeX86EPTPDInvocation(
464 word_t invLabel,
465 word_t length,
466 cte_t *cte,
467 cap_t cap,
468 word_t *buffer
469 )
470 {
471 word_t vaddr;
472 cap_t pml4Cap;
473 ept_pml4e_t *pml4;
474 ept_pdpte_t pdpte;
475 paddr_t paddr;
476 asid_t asid;
477 findEPTForASID_ret_t find_ret;
478 lookupEPTPDPTSlot_ret_t lu_ret;
479
480 if (invLabel == X86EPTPDUnmap) {
481 if (!isFinalCapability(cte)) {
482 current_syscall_error.type = seL4_RevokeFirst;
483 return EXCEPTION_SYSCALL_ERROR;
484 }
485 setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
486 return performEPTPDInvocationUnmap(cap, cte);
487 }
488
489 if (invLabel != X86EPTPDMap) {
490 userError("X86EPTPD Illegal operation.");
491 current_syscall_error.type = seL4_IllegalOperation;
492 return EXCEPTION_SYSCALL_ERROR;
493 }
494
495 if (length < 2 || current_extra_caps.excaprefs[0] == NULL) {
496 userError("X86EPTPDMap: Truncated message.");
497 current_syscall_error.type = seL4_TruncatedMessage;
498 return EXCEPTION_SYSCALL_ERROR;
499 }
500
501 if (cap_ept_pd_cap_get_capPDIsMapped(cap)) {
502 userError("X86EPTPDMap: EPT Page directory is already mapped to a PDPT.");
503 current_syscall_error.type =
504 seL4_InvalidCapability;
505 current_syscall_error.invalidCapNumber = 0;
506
507 return EXCEPTION_SYSCALL_ERROR;
508 }
509
510 vaddr = getSyscallArg(0, buffer);
511 vaddr = vaddr & ~MASK(EPT_PDPT_INDEX_OFFSET);
512 pml4Cap = current_extra_caps.excaprefs[0]->cap;
513
514 if (cap_get_capType(pml4Cap) != cap_ept_pml4_cap) {
515 userError("X86EPTPDMap: Not a valid EPT pml4.");
516 current_syscall_error.type = seL4_InvalidCapability;
517 current_syscall_error.invalidCapNumber = 1;
518
519 return EXCEPTION_SYSCALL_ERROR;
520 }
521
522 pml4 = (ept_pml4e_t *)cap_ept_pml4_cap_get_capPML4BasePtr(pml4Cap);
523 asid = cap_ept_pml4_cap_get_capPML4MappedASID(pml4Cap);
524
525 find_ret = findEPTForASID(asid);
526 if (find_ret.status != EXCEPTION_NONE) {
527 userError("X86EPTPDMap: EPT PML4 is not mapped.");
528 current_syscall_error.type = seL4_FailedLookup;
529 current_syscall_error.failedLookupWasSource = false;
530
531 return EXCEPTION_SYSCALL_ERROR;
532 }
533
534 if (find_ret.ept != pml4) {
535 userError("X86EPTPDMap: EPT PML4 asid is invalid.");
536 current_syscall_error.type = seL4_InvalidCapability;
537 current_syscall_error.invalidCapNumber = 1;
538
539 return EXCEPTION_SYSCALL_ERROR;
540 }
541
542 lu_ret = lookupEPTPDPTSlot(pml4, vaddr);
543 if (lu_ret.status != EXCEPTION_NONE) {
544 current_syscall_error.type = seL4_FailedLookup;
545 current_syscall_error.failedLookupWasSource = false;
546 return EXCEPTION_SYSCALL_ERROR;
547 }
548
549 if (ept_pdpte_ptr_get_read(lu_ret.pdptSlot)) {
550 userError("X86EPTPDMap: Page directory already mapped here.");
551 current_syscall_error.type = seL4_DeleteFirst;
552 return EXCEPTION_SYSCALL_ERROR;
553 }
554
555 paddr = pptr_to_paddr((void *)(cap_ept_pd_cap_get_capPDBasePtr(cap)));
556 pdpte = ept_pdpte_new(
557 paddr, /* pd_base_address */
558 0, /* avl_cte_depth */
559 1, /* execute */
560 1, /* write */
561 1 /* read */
562 );
563
564 cap = cap_ept_pd_cap_set_capPDIsMapped(cap, 1);
565 cap = cap_ept_pd_cap_set_capPDMappedASID(cap, asid);
566 cap = cap_ept_pd_cap_set_capPDMappedAddress(cap, vaddr);
567
568 setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
569 return performEPTPDInvocationMap(cap, cte, pdpte, lu_ret.pdptSlot, pml4);
570 }
571
EPTPageTableMapped(asid_t asid,vptr_t vaddr,ept_pte_t * pt)572 EPTPageTableMapped_ret_t EPTPageTableMapped(asid_t asid, vptr_t vaddr, ept_pte_t *pt)
573 {
574 EPTPageTableMapped_ret_t ret;
575 lookupEPTPDSlot_ret_t find_ret;
576 findEPTForASID_ret_t asid_ret;
577
578 asid_ret = findEPTForASID(asid);
579 if (asid_ret.status != EXCEPTION_NONE) {
580 ret.pml4 = NULL;
581 ret.pdSlot = NULL;
582 ret.status = asid_ret.status;
583 return ret;
584 }
585
586 find_ret = lookupEPTPDSlot(asid_ret.ept, vaddr);
587 if (find_ret.status != EXCEPTION_NONE) {
588 ret.pml4 = NULL;
589 ret.pdSlot = NULL;
590 ret.status = find_ret.status;
591 return ret;
592 }
593
594 if (ept_pde_ptr_get_page_size(find_ret.pdSlot) == ept_pde_ept_pde_pt
595 && ptrFromPAddr(ept_pde_ept_pde_pt_ptr_get_pt_base_address(find_ret.pdSlot)) == pt) {
596 ret.pml4 = asid_ret.ept;
597 ret.pdSlot = find_ret.pdSlot;
598 ret.status = EXCEPTION_NONE;
599 return ret;
600 } else {
601 ret.pml4 = NULL;
602 ret.pdSlot = NULL;
603 ret.status = EXCEPTION_LOOKUP_FAULT;
604 return ret;
605 }
606 }
607
unmapEPTPageTable(asid_t asid,vptr_t vaddr,ept_pte_t * pt)608 void unmapEPTPageTable(asid_t asid, vptr_t vaddr, ept_pte_t *pt)
609 {
610 EPTPageTableMapped_ret_t lu_ret;
611
612 lu_ret = EPTPageTableMapped(asid, vaddr, pt);
613
614 if (lu_ret.status == EXCEPTION_NONE) {
615 *lu_ret.pdSlot = ept_pde_ept_pde_pt_new(
616 0, /* pt_base_address */
617 0, /* avl_cte_depth */
618 0, /* execute */
619 0, /* write */
620 0 /* read */
621 );
622 invept(lu_ret.pml4);
623 }
624 }
625
performEPTPTInvocationUnmap(cap_t cap,cte_t * cte)626 static exception_t performEPTPTInvocationUnmap(cap_t cap, cte_t *cte)
627 {
628 if (cap_ept_pt_cap_get_capPTIsMapped(cap)) {
629 ept_pte_t *pt = (ept_pte_t *)cap_ept_pt_cap_get_capPTBasePtr(cap);
630 unmapEPTPageTable(
631 cap_ept_pt_cap_get_capPTMappedASID(cap),
632 cap_ept_pt_cap_get_capPTMappedAddress(cap),
633 pt);
634 clearMemory((void *)pt, cap_get_capSizeBits(cap));
635 }
636 cap_ept_pt_cap_ptr_set_capPTIsMapped(&(cte->cap), 0);
637
638 return EXCEPTION_NONE;
639 }
640
performEPTPTInvocationMap(cap_t cap,cte_t * cte,ept_pde_t pde,ept_pde_t * pdSlot,ept_pml4e_t * pml4)641 static exception_t performEPTPTInvocationMap(cap_t cap, cte_t *cte, ept_pde_t pde, ept_pde_t *pdSlot, ept_pml4e_t *pml4)
642 {
643 cte->cap = cap;
644 *pdSlot = pde;
645 invept(pml4);
646
647 return EXCEPTION_NONE;
648 }
649
decodeX86EPTPTInvocation(word_t invLabel,word_t length,cte_t * cte,cap_t cap,word_t * buffer)650 exception_t decodeX86EPTPTInvocation(
651 word_t invLabel,
652 word_t length,
653 cte_t *cte,
654 cap_t cap,
655 word_t *buffer
656 )
657 {
658 word_t vaddr;
659 cap_t pml4Cap;
660 ept_pml4e_t *pml4;
661 ept_pde_t pde;
662 paddr_t paddr;
663 asid_t asid;
664 findEPTForASID_ret_t find_ret;
665 lookupEPTPDSlot_ret_t lu_ret;
666
667 if (invLabel == X86EPTPTUnmap) {
668 if (!isFinalCapability(cte)) {
669 current_syscall_error.type = seL4_RevokeFirst;
670 return EXCEPTION_SYSCALL_ERROR;
671 }
672 setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
673 return performEPTPTInvocationUnmap(cap, cte);
674 }
675
676 if (invLabel != X86EPTPTMap) {
677 userError("X86EPTPT Illegal operation.");
678 current_syscall_error.type = seL4_IllegalOperation;
679 return EXCEPTION_SYSCALL_ERROR;
680 }
681
682 if (length < 2 || current_extra_caps.excaprefs[0] == NULL) {
683 userError("X86EPTPT: Truncated message.");
684 current_syscall_error.type = seL4_TruncatedMessage;
685 return EXCEPTION_SYSCALL_ERROR;
686 }
687
688 if (cap_ept_pt_cap_get_capPTIsMapped(cap)) {
689 userError("X86EPTPT EPT Page table is already mapped to an EPT page directory.");
690 current_syscall_error.type =
691 seL4_InvalidCapability;
692 current_syscall_error.invalidCapNumber = 0;
693
694 return EXCEPTION_SYSCALL_ERROR;
695 }
696
697 vaddr = getSyscallArg(0, buffer);
698 vaddr = vaddr & ~MASK(EPT_PD_INDEX_OFFSET);
699 pml4Cap = current_extra_caps.excaprefs[0]->cap;
700
701 if (cap_get_capType(pml4Cap) != cap_ept_pml4_cap ||
702 !cap_ept_pml4_cap_get_capPML4IsMapped(pml4Cap)) {
703 userError("X86EPTPTMap: Not a valid EPT pml4.");
704 current_syscall_error.type = seL4_InvalidCapability;
705 current_syscall_error.invalidCapNumber = 1;
706
707 return EXCEPTION_SYSCALL_ERROR;
708 }
709
710 pml4 = (ept_pml4e_t *)(cap_ept_pml4_cap_get_capPML4BasePtr(pml4Cap));
711 asid = cap_ept_pml4_cap_get_capPML4MappedASID(pml4Cap);
712
713 find_ret = findEPTForASID(asid);
714 if (find_ret.status != EXCEPTION_NONE) {
715 current_syscall_error.type = seL4_FailedLookup;
716 current_syscall_error.failedLookupWasSource = false;
717
718 return EXCEPTION_SYSCALL_ERROR;
719 }
720
721 if (find_ret.ept != pml4) {
722 current_syscall_error.type = seL4_InvalidCapability;
723 current_syscall_error.invalidCapNumber = 1;
724
725 return EXCEPTION_SYSCALL_ERROR;
726 }
727
728 lu_ret = lookupEPTPDSlot(pml4, vaddr);
729 if (lu_ret.status != EXCEPTION_NONE) {
730 current_syscall_error.type = seL4_FailedLookup;
731 current_syscall_error.failedLookupWasSource = false;
732 /* current_lookup_fault will have been set by lookupPTSlot */
733 return EXCEPTION_SYSCALL_ERROR;
734 }
735
736 if (((ept_pde_ptr_get_page_size(lu_ret.pdSlot) == ept_pde_ept_pde_pt) &&
737 ept_pde_ept_pde_pt_ptr_get_read(lu_ret.pdSlot)) ||
738 ((ept_pde_ptr_get_page_size(lu_ret.pdSlot) == ept_pde_ept_pde_2m) &&
739 ept_pde_ept_pde_2m_ptr_get_read(lu_ret.pdSlot))) {
740 userError("X86EPTPTMap: Page table already mapped here");
741 current_syscall_error.type = seL4_DeleteFirst;
742 return EXCEPTION_SYSCALL_ERROR;
743 }
744
745 paddr = pptr_to_paddr((void *)(cap_ept_pt_cap_get_capPTBasePtr(cap)));
746 pde = ept_pde_ept_pde_pt_new(
747 paddr,/* pt_base_address */
748 0, /* avl_cte_depth */
749 1, /* execute */
750 1, /* write */
751 1 /* read */
752 );
753
754 cap = cap_ept_pt_cap_set_capPTIsMapped(cap, 1);
755 cap = cap_ept_pt_cap_set_capPTMappedASID(cap, asid);
756 cap = cap_ept_pt_cap_set_capPTMappedAddress(cap, vaddr);
757
758 setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
759 return performEPTPTInvocationMap(cap, cte, pde, lu_ret.pdSlot, pml4);
760 }
761
performEPTPageMapPTE(cap_t cap,cte_t * cte,ept_pte_t * ptSlot,ept_pte_t pte,ept_pml4e_t * pml4)762 static exception_t performEPTPageMapPTE(cap_t cap, cte_t *cte, ept_pte_t *ptSlot, ept_pte_t pte, ept_pml4e_t *pml4)
763 {
764 *ptSlot = pte;
765 cte->cap = cap;
766 invept(pml4);
767
768 return EXCEPTION_NONE;
769 }
770
performEPTPageMapPDE(cap_t cap,cte_t * cte,ept_pde_t * pdSlot,ept_pde_t pde1,ept_pde_t pde2,ept_pml4e_t * pml4)771 static exception_t performEPTPageMapPDE(cap_t cap, cte_t *cte, ept_pde_t *pdSlot, ept_pde_t pde1, ept_pde_t pde2,
772 ept_pml4e_t *pml4)
773 {
774 pdSlot[0] = pde1;
775 if (LARGE_PAGE_BITS == 22) {
776 pdSlot[1] = pde2;
777 }
778 cte->cap = cap;
779 invept(pml4);
780
781 return EXCEPTION_NONE;
782 }
783
decodeX86EPTPageMap(word_t invLabel,word_t length,cte_t * cte,cap_t cap,word_t * buffer)784 exception_t decodeX86EPTPageMap(
785 word_t invLabel,
786 word_t length,
787 cte_t *cte,
788 cap_t cap,
789 word_t *buffer)
790 {
791 word_t vaddr;
792 word_t w_rightsMask;
793 paddr_t paddr;
794 cap_t pml4Cap;
795 ept_pml4e_t *pml4;
796 vm_rights_t capVMRights;
797 vm_rights_t vmRights;
798 vm_attributes_t vmAttr;
799 vm_page_size_t frameSize;
800 asid_t asid;
801
802 frameSize = cap_frame_cap_get_capFSize(cap);
803 vaddr = getSyscallArg(0, buffer);
804 vaddr = vaddr & ~MASK(EPT_PT_INDEX_OFFSET);
805 w_rightsMask = getSyscallArg(1, buffer);
806 vmAttr = vmAttributesFromWord(getSyscallArg(2, buffer));
807 pml4Cap = current_extra_caps.excaprefs[0]->cap;
808
809 capVMRights = cap_frame_cap_get_capFVMRights(cap);
810
811 if (cap_frame_cap_get_capFMappedASID(cap) != asidInvalid) {
812 userError("X86EPTPageMap: Frame already mapped.");
813 current_syscall_error.type = seL4_InvalidCapability;
814 current_syscall_error.invalidCapNumber = 0;
815
816 return EXCEPTION_SYSCALL_ERROR;
817 }
818
819 assert(cap_frame_cap_get_capFMapType(cap) == X86_MappingNone);
820
821 if (cap_get_capType(pml4Cap) != cap_ept_pml4_cap ||
822 !cap_ept_pml4_cap_get_capPML4IsMapped(pml4Cap)) {
823 userError("X86EPTPageMap: Attempting to map frame into invalid ept pml4.");
824 current_syscall_error.type = seL4_InvalidCapability;
825 current_syscall_error.invalidCapNumber = 1;
826
827 return EXCEPTION_SYSCALL_ERROR;
828 }
829
830 pml4 = (ept_pml4e_t *)(cap_ept_pml4_cap_get_capPML4BasePtr(pml4Cap));
831 asid = cap_ept_pml4_cap_get_capPML4MappedASID(pml4Cap);
832
833 findEPTForASID_ret_t find_ret = findEPTForASID(asid);
834 if (find_ret.status != EXCEPTION_NONE) {
835 current_syscall_error.type = seL4_FailedLookup;
836 current_syscall_error.failedLookupWasSource = false;
837
838 return EXCEPTION_SYSCALL_ERROR;
839 }
840
841 if (find_ret.ept != pml4) {
842 current_syscall_error.type = seL4_InvalidCapability;
843 current_syscall_error.invalidCapNumber = 1;
844
845 return EXCEPTION_SYSCALL_ERROR;
846 }
847
848
849 vmRights = maskVMRights(capVMRights, rightsFromWord(w_rightsMask));
850
851 if (!checkVPAlignment(frameSize, vaddr)) {
852 current_syscall_error.type = seL4_AlignmentError;
853
854 return EXCEPTION_SYSCALL_ERROR;
855 }
856
857 paddr = pptr_to_paddr((void *)cap_frame_cap_get_capFBasePtr(cap));
858
859 cap = cap_frame_cap_set_capFMappedASID(cap, asid);
860 cap = cap_frame_cap_set_capFMappedAddress(cap, vaddr);
861 cap = cap_frame_cap_set_capFMapType(cap, X86_MappingEPT);
862
863 switch (frameSize) {
864 /* PTE mappings */
865 case X86_SmallPage: {
866 lookupEPTPTSlot_ret_t lu_ret;
867 ept_pte_t pte;
868
869 lu_ret = lookupEPTPTSlot(pml4, vaddr);
870 if (lu_ret.status != EXCEPTION_NONE) {
871 current_syscall_error.type = seL4_FailedLookup;
872 current_syscall_error.failedLookupWasSource = false;
873 /* current_lookup_fault will have been set by lookupEPTPTSlot */
874 return EXCEPTION_SYSCALL_ERROR;
875 }
876
877 if (ept_pte_ptr_get_read(lu_ret.ptSlot)) {
878 userError("X86EPTPageMap: Mapping already present.");
879 current_syscall_error.type = seL4_DeleteFirst;
880 return EXCEPTION_SYSCALL_ERROR;
881 }
882
883 pte = ept_pte_new(
884 paddr,
885 0,
886 0,
887 eptCacheFromVmAttr(vmAttr),
888 1,
889 WritableFromVMRights(vmRights),
890 1);
891
892 setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
893 return performEPTPageMapPTE(cap, cte, lu_ret.ptSlot, pte, pml4);
894 }
895
896 /* PDE mappings */
897 case X86_LargePage: {
898 lookupEPTPDSlot_ret_t lu_ret;
899
900 lu_ret = lookupEPTPDSlot(pml4, vaddr);
901 if (lu_ret.status != EXCEPTION_NONE) {
902 userError("X86EPTPageMap: Need a page directory first.");
903 current_syscall_error.type = seL4_FailedLookup;
904 current_syscall_error.failedLookupWasSource = false;
905 /* current_lookup_fault will have been set by lookupEPTPDSlot */
906 return EXCEPTION_SYSCALL_ERROR;
907 }
908
909
910 if ((ept_pde_ptr_get_page_size(lu_ret.pdSlot) == ept_pde_ept_pde_pt) &&
911 ept_pde_ept_pde_pt_ptr_get_read(lu_ret.pdSlot)) {
912 userError("X86EPTPageMap: Page table already present.");
913 current_syscall_error.type = seL4_DeleteFirst;
914 return EXCEPTION_SYSCALL_ERROR;
915 }
916 if (LARGE_PAGE_BITS != EPT_PD_INDEX_OFFSET &&
917 (ept_pde_ptr_get_page_size(lu_ret.pdSlot + 1) == ept_pde_ept_pde_pt) &&
918 ept_pde_ept_pde_pt_ptr_get_read(lu_ret.pdSlot + 1)) {
919 userError("X86EPTPageMap: Page table already present.");
920 current_syscall_error.type = seL4_DeleteFirst;
921 return EXCEPTION_SYSCALL_ERROR;
922 }
923 if ((ept_pde_ptr_get_page_size(lu_ret.pdSlot) == ept_pde_ept_pde_2m) &&
924 ept_pde_ept_pde_2m_ptr_get_read(lu_ret.pdSlot)) {
925 userError("X86EPTPageMap: Mapping already present.");
926 current_syscall_error.type = seL4_DeleteFirst;
927 return EXCEPTION_SYSCALL_ERROR;
928 }
929
930 ept_pde_t pde1 = ept_pde_ept_pde_2m_new(
931 paddr,
932 0,
933 0,
934 eptCacheFromVmAttr(vmAttr),
935 1,
936 WritableFromVMRights(vmRights),
937 1);
938
939 ept_pde_t pde2 = ept_pde_ept_pde_2m_new(
940 paddr + BIT(EPT_PD_INDEX_OFFSET),
941 0,
942 0,
943 eptCacheFromVmAttr(vmAttr),
944 1,
945 WritableFromVMRights(vmRights),
946 1);
947
948 setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
949 return performEPTPageMapPDE(cap, cte, lu_ret.pdSlot, pde1, pde2, pml4);
950 }
951
952 default:
953 /* When initializing EPT we only checked for support for 4K and 2M
954 * pages, so we must disallow attempting to use any other */
955 userError("X86EPTPageMap: Attempted to map unsupported page size.");
956 current_syscall_error.type = seL4_InvalidCapability;
957 current_syscall_error.invalidCapNumber = 0;
958 return EXCEPTION_SYSCALL_ERROR;
959 }
960 }
961
unmapEPTPage(vm_page_size_t page_size,asid_t asid,vptr_t vptr,void * pptr)962 void unmapEPTPage(vm_page_size_t page_size, asid_t asid, vptr_t vptr, void *pptr)
963 {
964 findEPTForASID_ret_t find_ret;
965 paddr_t addr = addrFromPPtr(pptr);
966
967 find_ret = findEPTForASID(asid);
968 if (find_ret.status != EXCEPTION_NONE) {
969 return;
970 }
971
972 switch (page_size) {
973 case X86_SmallPage: {
974 lookupEPTPTSlot_ret_t lu_ret;
975
976 lu_ret = lookupEPTPTSlot(find_ret.ept, vptr);
977 if (lu_ret.status != EXCEPTION_NONE) {
978 return;
979 }
980 if (!ept_pte_ptr_get_read(lu_ret.ptSlot)) {
981 return;
982 }
983 if (ept_pte_ptr_get_page_base_address(lu_ret.ptSlot) != addr) {
984 return;
985 }
986
987 *lu_ret.ptSlot = ept_pte_new(0, 0, 0, 0, 0, 0, 0);
988 break;
989 }
990 case X86_LargePage: {
991 lookupEPTPDSlot_ret_t lu_ret;
992
993 lu_ret = lookupEPTPDSlot(find_ret.ept, vptr);
994 if (lu_ret.status != EXCEPTION_NONE) {
995 return;
996 }
997 if (ept_pde_ptr_get_page_size(lu_ret.pdSlot) != ept_pde_ept_pde_2m) {
998 return;
999 }
1000 if (!ept_pde_ept_pde_2m_ptr_get_read(lu_ret.pdSlot)) {
1001 return;
1002 }
1003 if (ept_pde_ept_pde_2m_ptr_get_page_base_address(lu_ret.pdSlot) != addr) {
1004 return;
1005 }
1006
1007 lu_ret.pdSlot[0] = ept_pde_ept_pde_2m_new(0, 0, 0, 0, 0, 0, 0);
1008
1009 if (LARGE_PAGE_BITS != EPT_PD_INDEX_OFFSET) {
1010 assert(ept_pde_ptr_get_page_size(lu_ret.pdSlot + 1) == ept_pde_ept_pde_2m);
1011 assert(ept_pde_ept_pde_2m_ptr_get_read(lu_ret.pdSlot + 1));
1012 assert(ept_pde_ept_pde_2m_ptr_get_page_base_address(lu_ret.pdSlot + 1) == addr + BIT(21));
1013
1014 lu_ret.pdSlot[1] = ept_pde_ept_pde_2m_new(0, 0, 0, 0, 0, 0, 0);
1015 }
1016 break;
1017 }
1018 default:
1019 /* we did not allow mapping additional page sizes into EPT objects,
1020 * so this should not happen. As we have no way to return an error
1021 * all we can do is assert */
1022 assert(!"Invalid page size for unmap");
1023 }
1024 }
1025
1026 #endif /* CONFIG_VTX */
1027