1 /*
2 * Copyright 2014, General Dynamics C4 Systems
3 *
4 * SPDX-License-Identifier: GPL-2.0-only
5 */
6
7 #include <config.h>
8
9 #ifdef CONFIG_IOMMU
10
11 #include <api/syscall.h>
12 #include <machine/io.h>
13 #include <kernel/thread.h>
14 #include <arch/api/invocation.h>
15 #include <arch/object/iospace.h>
16 #include <arch/model/statedata.h>
17 #include <linker.h>
18 #include <plat/machine/intel-vtd.h>
19
20
21 typedef struct lookupVTDContextSlot_ret {
22 vtd_cte_t *cte;
23 word_t index;
24 } lookupVTDContextSlot_ret_t;
25
26
master_iospace_cap(void)27 BOOT_CODE cap_t master_iospace_cap(void)
28 {
29 if (x86KSnumDrhu == 0) {
30 return cap_null_cap_new();
31 }
32
33 return
34 cap_io_space_cap_new(
35 0, /* capDomainID */
36 0 /* capPCIDevice */
37 );
38 }
39
lookup_vtd_context_slot(cap_t cap)40 static vtd_cte_t *lookup_vtd_context_slot(cap_t cap)
41 {
42 uint32_t vtd_root_index;
43 uint32_t vtd_context_index;
44 uint32_t pci_request_id;
45 vtd_rte_t *vtd_root_slot;
46 vtd_cte_t *vtd_context;
47 vtd_cte_t *vtd_context_slot;
48
49 switch (cap_get_capType(cap)) {
50 case cap_io_space_cap:
51 pci_request_id = cap_io_space_cap_get_capPCIDevice(cap);
52 break;
53
54 case cap_io_page_table_cap:
55 pci_request_id = cap_io_page_table_cap_get_capIOPTIOASID(cap);
56 break;
57
58 case cap_frame_cap:
59 pci_request_id = cap_frame_cap_get_capFMappedASID(cap);
60 break;
61
62 default:
63 fail("Invalid cap type");
64 }
65
66 vtd_root_index = get_pci_bus(pci_request_id);
67 vtd_root_slot = x86KSvtdRootTable + vtd_root_index;
68
69 vtd_context = (vtd_cte_t *)paddr_to_pptr(vtd_rte_ptr_get_ctp(vtd_root_slot));
70 vtd_context_index = (get_pci_dev(pci_request_id) << 3) | get_pci_fun(pci_request_id);
71 vtd_context_slot = &vtd_context[vtd_context_index];
72
73 return vtd_context_slot;
74 }
75
lookupIOPTSlot_resolve_levels(vtd_pte_t * iopt,word_t translation,word_t levels_to_resolve,word_t levels_remaining)76 static lookupIOPTSlot_ret_t lookupIOPTSlot_resolve_levels(vtd_pte_t *iopt, word_t translation,
77 word_t levels_to_resolve, word_t levels_remaining)
78 {
79 lookupIOPTSlot_ret_t ret;
80
81 word_t iopt_index = 0;
82 vtd_pte_t *iopt_slot = 0;
83 vtd_pte_t *next_iopt_slot = 0;
84
85 if (iopt == 0) {
86 ret.ioptSlot = 0;
87 ret.level = levels_remaining;
88 ret.status = EXCEPTION_LOOKUP_FAULT;
89 return ret;
90 }
91
92 iopt_index = (translation >> (VTD_PT_INDEX_BITS * (x86KSnumIOPTLevels - 1 - (levels_to_resolve - levels_remaining)))) &
93 MASK(VTD_PT_INDEX_BITS);
94 iopt_slot = iopt + iopt_index;
95
96 if (!vtd_pte_ptr_get_write(iopt_slot) || levels_remaining == 0) {
97 ret.ioptSlot = iopt_slot;
98 ret.level = levels_remaining;
99 ret.status = EXCEPTION_NONE;
100 return ret;
101 }
102 next_iopt_slot = (vtd_pte_t *)paddr_to_pptr(vtd_pte_ptr_get_addr(iopt_slot));
103 return lookupIOPTSlot_resolve_levels(next_iopt_slot, translation, levels_to_resolve, levels_remaining - 1);
104 }
105
106
lookupIOPTSlot(vtd_pte_t * iopt,word_t io_address)107 static inline lookupIOPTSlot_ret_t lookupIOPTSlot(vtd_pte_t *iopt, word_t io_address)
108 {
109 lookupIOPTSlot_ret_t ret;
110
111 if (iopt == 0) {
112 ret.ioptSlot = 0;
113 ret.level = 0;
114 ret.status = EXCEPTION_LOOKUP_FAULT;
115 return ret;
116 } else {
117 return lookupIOPTSlot_resolve_levels(iopt, io_address >> PAGE_BITS,
118 x86KSnumIOPTLevels - 1, x86KSnumIOPTLevels - 1);
119 }
120 }
121
unmapVTDContextEntry(cap_t cap)122 void unmapVTDContextEntry(cap_t cap)
123 {
124 vtd_cte_t *cte = lookup_vtd_context_slot(cap);
125 assert(cte != 0);
126 *cte = vtd_cte_new(
127 0,
128 false,
129 0,
130 0,
131 0,
132 false
133 );
134
135 flushCacheRange(cte, VTD_CTE_SIZE_BITS);
136 invalidate_iotlb();
137 setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
138 return;
139 }
140
performX86IOPTInvocationUnmap(cap_t cap,cte_t * ctSlot)141 static exception_t performX86IOPTInvocationUnmap(cap_t cap, cte_t *ctSlot)
142 {
143 deleteIOPageTable(cap);
144 cap = cap_io_page_table_cap_set_capIOPTIsMapped(cap, 0);
145 ctSlot->cap = cap;
146
147 return EXCEPTION_NONE;
148 }
149
performX86IOPTInvocationMapContextRoot(cap_t cap,cte_t * ctSlot,vtd_cte_t vtd_cte,vtd_cte_t * vtd_context_slot)150 static exception_t performX86IOPTInvocationMapContextRoot(cap_t cap, cte_t *ctSlot, vtd_cte_t vtd_cte,
151 vtd_cte_t *vtd_context_slot)
152 {
153 *vtd_context_slot = vtd_cte;
154 flushCacheRange(vtd_context_slot, VTD_CTE_SIZE_BITS);
155 ctSlot->cap = cap;
156
157 return EXCEPTION_NONE;
158 }
159
performX86IOPTInvocationMapPT(cap_t cap,cte_t * ctSlot,vtd_pte_t iopte,vtd_pte_t * ioptSlot)160 static exception_t performX86IOPTInvocationMapPT(cap_t cap, cte_t *ctSlot, vtd_pte_t iopte, vtd_pte_t *ioptSlot)
161 {
162 *ioptSlot = iopte;
163 flushCacheRange(ioptSlot, VTD_PTE_SIZE_BITS);
164 ctSlot->cap = cap;
165
166 return EXCEPTION_NONE;
167 }
168
decodeX86IOPTInvocation(word_t invLabel,word_t length,cte_t * slot,cap_t cap,word_t * buffer)169 exception_t decodeX86IOPTInvocation(
170 word_t invLabel,
171 word_t length,
172 cte_t *slot,
173 cap_t cap,
174 word_t *buffer
175 )
176 {
177 cap_t io_space;
178 paddr_t paddr;
179 uint32_t pci_request_id;
180 word_t io_address;
181 uint16_t domain_id;
182 vtd_cte_t *vtd_context_slot;
183 vtd_pte_t *vtd_pte;
184
185 if (invLabel == X86IOPageTableUnmap) {
186
187 setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
188 return performX86IOPTInvocationUnmap(cap, slot);
189 }
190
191 if (invLabel != X86IOPageTableMap) {
192 userError("X86IOPageTable: Illegal operation.");
193 current_syscall_error.type = seL4_IllegalOperation;
194 return EXCEPTION_SYSCALL_ERROR;
195 }
196
197 if (current_extra_caps.excaprefs[0] == NULL || length < 1) {
198 userError("X86IOPageTableMap: Truncated message.");
199 current_syscall_error.type = seL4_TruncatedMessage;
200 return EXCEPTION_SYSCALL_ERROR;
201 }
202
203 io_space = current_extra_caps.excaprefs[0]->cap;
204 io_address = getSyscallArg(0, buffer) & ~MASK(VTD_PT_INDEX_BITS + seL4_PageBits);
205
206 if (cap_io_page_table_cap_get_capIOPTIsMapped(cap)) {
207 userError("X86IOPageTableMap: IO page table is already mapped.");
208 current_syscall_error.type = seL4_InvalidCapability;
209 current_syscall_error.invalidCapNumber = 0;
210 return EXCEPTION_SYSCALL_ERROR;
211 }
212
213 if (cap_get_capType(io_space) != cap_io_space_cap) {
214 userError("X86IOPageTableMap: Invalid IO space capability.");
215 current_syscall_error.type = seL4_InvalidCapability;
216 current_syscall_error.invalidCapNumber = 0;
217 return EXCEPTION_SYSCALL_ERROR;
218 }
219
220 pci_request_id = cap_io_space_cap_get_capPCIDevice(io_space);
221 domain_id = cap_io_space_cap_get_capDomainID(io_space);
222 if (pci_request_id == asidInvalid) {
223 current_syscall_error.type = seL4_InvalidCapability;
224 current_syscall_error.invalidCapNumber = 0;
225
226 return EXCEPTION_SYSCALL_ERROR;
227 }
228
229 paddr = pptr_to_paddr(VTD_PTE_PTR(cap_io_page_table_cap_get_capIOPTBasePtr(cap)));
230 vtd_context_slot = lookup_vtd_context_slot(io_space);
231
232 if (!vtd_cte_ptr_get_present(vtd_context_slot)) {
233
234 /* 1st Level Page Table */
235 vtd_cte_t vtd_cte = vtd_cte_new(
236 domain_id, /* domain ID */
237 false, /* RMRR */
238 x86KSnumIOPTLevels - 2, /* addr width (x = levels - 2) */
239 paddr, /* address space root */
240 0, /* translation type */
241 true /* present */
242 );
243
244 cap = cap_io_page_table_cap_set_capIOPTIsMapped(cap, 1);
245 cap = cap_io_page_table_cap_set_capIOPTLevel(cap, 0);
246 cap = cap_io_page_table_cap_set_capIOPTIOASID(cap, pci_request_id);
247
248 setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
249 return performX86IOPTInvocationMapContextRoot(cap, slot, vtd_cte, vtd_context_slot);
250 } else {
251 lookupIOPTSlot_ret_t lu_ret;
252 vtd_pte_t iopte;
253
254 vtd_pte = (vtd_pte_t *)paddr_to_pptr(vtd_cte_ptr_get_asr(vtd_context_slot));
255 lu_ret = lookupIOPTSlot(vtd_pte, io_address);
256
257 if (lu_ret.status != EXCEPTION_NONE) {
258 current_syscall_error.type = seL4_FailedLookup;
259 current_syscall_error.failedLookupWasSource = false;
260 return EXCEPTION_SYSCALL_ERROR;
261 }
262
263 lu_ret.level = x86KSnumIOPTLevels - lu_ret.level;
264 if (vtd_pte_ptr_get_addr(lu_ret.ioptSlot) != 0) {
265 current_syscall_error.type = seL4_DeleteFirst;
266
267 return EXCEPTION_SYSCALL_ERROR;
268 }
269
270 iopte = vtd_pte_new(
271 paddr, /* physical addr */
272 1, /* write permission flag */
273 1 /* read permission flag */
274 );
275
276 cap = cap_io_page_table_cap_set_capIOPTIsMapped(cap, 1);
277 cap = cap_io_page_table_cap_set_capIOPTLevel(cap, lu_ret.level);
278 cap = cap_io_page_table_cap_set_capIOPTIOASID(cap, pci_request_id);
279 cap = cap_io_page_table_cap_set_capIOPTMappedAddress(cap, io_address);
280
281 setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
282 return performX86IOPTInvocationMapPT(cap, slot, iopte, lu_ret.ioptSlot);
283 }
284 }
285
performX86IOInvocationMap(cap_t cap,cte_t * ctSlot,vtd_pte_t iopte,vtd_pte_t * ioptSlot)286 static exception_t performX86IOInvocationMap(cap_t cap, cte_t *ctSlot, vtd_pte_t iopte, vtd_pte_t *ioptSlot)
287 {
288 ctSlot->cap = cap;
289 *ioptSlot = iopte;
290 flushCacheRange(ioptSlot, VTD_PTE_SIZE_BITS);
291
292 return EXCEPTION_NONE;
293 }
294
295
decodeX86IOMapInvocation(word_t length,cte_t * slot,cap_t cap,word_t * buffer)296 exception_t decodeX86IOMapInvocation(
297 word_t length,
298 cte_t *slot,
299 cap_t cap,
300 word_t *buffer
301 )
302 {
303 cap_t io_space;
304 word_t io_address;
305 uint32_t pci_request_id;
306 vtd_cte_t *vtd_context_slot;
307 vtd_pte_t *vtd_pte;
308 vtd_pte_t iopte;
309 paddr_t paddr;
310 lookupIOPTSlot_ret_t lu_ret;
311 vm_rights_t frame_cap_rights;
312 seL4_CapRights_t dma_cap_rights_mask;
313
314 if (current_extra_caps.excaprefs[0] == NULL || length < 2) {
315 userError("X86PageMapIO: Truncated message.");
316 current_syscall_error.type = seL4_TruncatedMessage;
317 return EXCEPTION_SYSCALL_ERROR;
318 }
319
320 if (cap_frame_cap_get_capFSize(cap) != X86_SmallPage) {
321 userError("X86PageMapIO: Invalid page size.");
322 current_syscall_error.type = seL4_InvalidCapability;
323 current_syscall_error.invalidCapNumber = 0;
324 return EXCEPTION_SYSCALL_ERROR;
325 }
326
327 if (cap_frame_cap_get_capFMappedASID(cap) != asidInvalid) {
328 userError("X86PageMapIO: Page already mapped.");
329 current_syscall_error.type = seL4_InvalidCapability;
330 current_syscall_error.invalidCapNumber = 0;
331 return EXCEPTION_SYSCALL_ERROR;
332 }
333
334 io_space = current_extra_caps.excaprefs[0]->cap;
335 io_address = getSyscallArg(1, buffer) & ~MASK(PAGE_BITS);
336 paddr = pptr_to_paddr((void *)cap_frame_cap_get_capFBasePtr(cap));
337
338 if (cap_get_capType(io_space) != cap_io_space_cap) {
339 userError("X86PageMapIO: Invalid IO space capability.");
340 current_syscall_error.type = seL4_InvalidCapability;
341 current_syscall_error.invalidCapNumber = 0;
342 return EXCEPTION_SYSCALL_ERROR;
343 }
344
345 pci_request_id = cap_io_space_cap_get_capPCIDevice(io_space);
346
347 if (pci_request_id == asidInvalid) {
348 userError("X86PageMapIO: Invalid PCI device.");
349 current_syscall_error.type = seL4_InvalidCapability;
350 current_syscall_error.invalidCapNumber = 0;
351 return EXCEPTION_SYSCALL_ERROR;
352 }
353
354 vtd_context_slot = lookup_vtd_context_slot(io_space);
355
356 if (!vtd_cte_ptr_get_present(vtd_context_slot)) {
357 /* 1st Level Page Table is not installed */
358 current_syscall_error.type = seL4_FailedLookup;
359 current_syscall_error.failedLookupWasSource = false;
360 return EXCEPTION_SYSCALL_ERROR;
361 }
362
363 vtd_pte = (vtd_pte_t *)paddr_to_pptr(vtd_cte_ptr_get_asr(vtd_context_slot));
364 lu_ret = lookupIOPTSlot(vtd_pte, io_address);
365 if (lu_ret.status != EXCEPTION_NONE || lu_ret.level != 0) {
366 current_syscall_error.type = seL4_FailedLookup;
367 current_syscall_error.failedLookupWasSource = false;
368 return EXCEPTION_SYSCALL_ERROR;
369 }
370
371 if (vtd_pte_ptr_get_addr(lu_ret.ioptSlot) != 0) {
372 current_syscall_error.type = seL4_DeleteFirst;
373 return EXCEPTION_SYSCALL_ERROR;
374 }
375
376 dma_cap_rights_mask = rightsFromWord(getSyscallArg(0, buffer));
377 frame_cap_rights = cap_frame_cap_get_capFVMRights(cap);
378
379 bool_t write = seL4_CapRights_get_capAllowWrite(dma_cap_rights_mask) && (frame_cap_rights == VMReadWrite);
380 bool_t read = seL4_CapRights_get_capAllowRead(dma_cap_rights_mask) && (frame_cap_rights != VMKernelOnly);
381 if (write || read) {
382 iopte = vtd_pte_new(paddr, !!write, !!read);
383 } else {
384 current_syscall_error.type = seL4_InvalidArgument;
385 current_syscall_error.invalidArgumentNumber = 0;
386 return EXCEPTION_SYSCALL_ERROR;
387 }
388
389 cap = cap_frame_cap_set_capFMapType(cap, X86_MappingIOSpace);
390 cap = cap_frame_cap_set_capFMappedASID(cap, pci_request_id);
391 cap = cap_frame_cap_set_capFMappedAddress(cap, io_address);
392
393 setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
394 return performX86IOInvocationMap(cap, slot, iopte, lu_ret.ioptSlot);
395 }
396
deleteIOPageTable(cap_t io_pt_cap)397 void deleteIOPageTable(cap_t io_pt_cap)
398 {
399 lookupIOPTSlot_ret_t lu_ret;
400 uint32_t level;
401 word_t io_address;
402 vtd_cte_t *vtd_context_slot;
403 vtd_pte_t *vtd_pte;
404
405 if (cap_io_page_table_cap_get_capIOPTIsMapped(io_pt_cap)) {
406 io_pt_cap = cap_io_page_table_cap_set_capIOPTIsMapped(io_pt_cap, 0);
407 level = cap_io_page_table_cap_get_capIOPTLevel(io_pt_cap);
408 vtd_context_slot = lookup_vtd_context_slot(io_pt_cap);
409
410 if (!vtd_cte_ptr_get_present(vtd_context_slot)) {
411 return;
412 }
413
414 vtd_pte = (vtd_pte_t *)paddr_to_pptr(vtd_cte_ptr_get_asr(vtd_context_slot));
415
416 if (level == 0) {
417 /* if we have been overmapped or something */
418 if (pptr_to_paddr(vtd_pte) != pptr_to_paddr((void *)cap_io_page_table_cap_get_capIOPTBasePtr(io_pt_cap))) {
419 return;
420 }
421 *vtd_context_slot = vtd_cte_new(
422 0, /* Domain ID */
423 false, /* RMRR */
424 0, /* Address Width */
425 0, /* Address Space Root */
426 0, /* Translation Type */
427 0 /* Present */
428 );
429 flushCacheRange(vtd_context_slot, VTD_CTE_SIZE_BITS);
430 } else {
431 io_address = cap_io_page_table_cap_get_capIOPTMappedAddress(io_pt_cap);
432 lu_ret = lookupIOPTSlot_resolve_levels(vtd_pte, io_address >> PAGE_BITS, level - 1, level - 1);
433
434 /* if we have been overmapped or something */
435 if (lu_ret.status != EXCEPTION_NONE || lu_ret.level != 0) {
436 return;
437 }
438 if (vtd_pte_ptr_get_addr(lu_ret.ioptSlot) != pptr_to_paddr((void *)cap_io_page_table_cap_get_capIOPTBasePtr(
439 io_pt_cap))) {
440 return;
441 }
442 *lu_ret.ioptSlot = vtd_pte_new(
443 0, /* Physical Address */
444 0, /* Read Permission */
445 0 /* Write Permission */
446 );
447 flushCacheRange(lu_ret.ioptSlot, VTD_PTE_SIZE_BITS);
448 }
449 invalidate_iotlb();
450 }
451 }
452
unmapIOPage(cap_t cap)453 void unmapIOPage(cap_t cap)
454 {
455 lookupIOPTSlot_ret_t lu_ret;
456 word_t io_address;
457 vtd_cte_t *vtd_context_slot;
458 vtd_pte_t *vtd_pte;
459
460 io_address = cap_frame_cap_get_capFMappedAddress(cap);
461 vtd_context_slot = lookup_vtd_context_slot(cap);
462
463
464 if (!vtd_cte_ptr_get_present(vtd_context_slot)) {
465 return;
466 }
467
468 vtd_pte = (vtd_pte_t *)paddr_to_pptr(vtd_cte_ptr_get_asr(vtd_context_slot));
469
470 lu_ret = lookupIOPTSlot(vtd_pte, io_address);
471 if (lu_ret.status != EXCEPTION_NONE || lu_ret.level != 0) {
472 return;
473 }
474
475 if (vtd_pte_ptr_get_addr(lu_ret.ioptSlot) != pptr_to_paddr((void *)cap_frame_cap_get_capFBasePtr(cap))) {
476 return;
477 }
478
479 *lu_ret.ioptSlot = vtd_pte_new(
480 0, /* Physical Address */
481 0, /* Read Permission */
482 0 /* Write Permission */
483 );
484
485 flushCacheRange(lu_ret.ioptSlot, VTD_PTE_SIZE_BITS);
486 invalidate_iotlb();
487 }
488
performX86IOUnMapInvocation(cap_t cap,cte_t * ctSlot)489 exception_t performX86IOUnMapInvocation(cap_t cap, cte_t *ctSlot)
490 {
491 unmapIOPage(ctSlot->cap);
492
493 ctSlot->cap = cap_frame_cap_set_capFMappedAddress(ctSlot->cap, 0);
494 ctSlot->cap = cap_frame_cap_set_capFMapType(ctSlot->cap, X86_MappingNone);
495 ctSlot->cap = cap_frame_cap_set_capFMappedASID(ctSlot->cap, asidInvalid);
496
497 return EXCEPTION_NONE;
498 }
499
decodeX86IOSpaceInvocation(word_t invLabel,cap_t cap)500 exception_t decodeX86IOSpaceInvocation(word_t invLabel, cap_t cap)
501 {
502 userError("IOSpace capability has no invocations");
503 current_syscall_error.type = seL4_IllegalOperation;
504 return EXCEPTION_SYSCALL_ERROR;
505 }
506
507 #endif /* CONFIG_IOMMU */
508