1 /*
2 * Copyright 2014, General Dynamics C4 Systems
3 *
4 * SPDX-License-Identifier: GPL-2.0-only
5 */
6
7 #include <config.h>
8 #include <types.h>
9 #include <api/failures.h>
10 #include <api/invocation.h>
11 #include <api/syscall.h>
12 #include <sel4/shared_types.h>
13 #include <machine/io.h>
14 #include <object/structures.h>
15 #include <object/objecttype.h>
16 #include <object/cnode.h>
17 #ifdef CONFIG_KERNEL_MCS
18 #include <object/schedcontext.h>
19 #endif
20 #include <object/tcb.h>
21 #include <kernel/cspace.h>
22 #include <kernel/thread.h>
23 #include <kernel/vspace.h>
24 #include <model/statedata.h>
25 #include <util.h>
26 #include <string.h>
27 #include <stdint.h>
28 #include <arch/smp/ipi_inline.h>
29
30 #define NULL_PRIO 0
31
checkPrio(prio_t prio,tcb_t * auth)32 static exception_t checkPrio(prio_t prio, tcb_t *auth)
33 {
34 prio_t mcp;
35
36 mcp = auth->tcbMCP;
37
38 /* system invariant: existing MCPs are bounded */
39 assert(mcp <= seL4_MaxPrio);
40
41 /* can't assign a priority greater than our own mcp */
42 if (prio > mcp) {
43 current_syscall_error.type = seL4_RangeError;
44 current_syscall_error.rangeErrorMin = seL4_MinPrio;
45 current_syscall_error.rangeErrorMax = mcp;
46 return EXCEPTION_SYSCALL_ERROR;
47 }
48
49 return EXCEPTION_NONE;
50 }
51
addToBitmap(word_t cpu,word_t dom,word_t prio)52 static inline void addToBitmap(word_t cpu, word_t dom, word_t prio)
53 {
54 word_t l1index;
55 word_t l1index_inverted;
56
57 l1index = prio_to_l1index(prio);
58 l1index_inverted = invert_l1index(l1index);
59
60 NODE_STATE_ON_CORE(ksReadyQueuesL1Bitmap[dom], cpu) |= BIT(l1index);
61 /* we invert the l1 index when accessed the 2nd level of the bitmap in
62 order to increase the liklihood that high prio threads l2 index word will
63 be on the same cache line as the l1 index word - this makes sure the
64 fastpath is fastest for high prio threads */
65 NODE_STATE_ON_CORE(ksReadyQueuesL2Bitmap[dom][l1index_inverted], cpu) |= BIT(prio & MASK(wordRadix));
66 }
67
removeFromBitmap(word_t cpu,word_t dom,word_t prio)68 static inline void removeFromBitmap(word_t cpu, word_t dom, word_t prio)
69 {
70 word_t l1index;
71 word_t l1index_inverted;
72
73 l1index = prio_to_l1index(prio);
74 l1index_inverted = invert_l1index(l1index);
75 NODE_STATE_ON_CORE(ksReadyQueuesL2Bitmap[dom][l1index_inverted], cpu) &= ~BIT(prio & MASK(wordRadix));
76 if (unlikely(!NODE_STATE_ON_CORE(ksReadyQueuesL2Bitmap[dom][l1index_inverted], cpu))) {
77 NODE_STATE_ON_CORE(ksReadyQueuesL1Bitmap[dom], cpu) &= ~BIT(l1index);
78 }
79 }
80
81 /* Add TCB to the head of a scheduler queue */
tcbSchedEnqueue(tcb_t * tcb)82 void tcbSchedEnqueue(tcb_t *tcb)
83 {
84 #ifdef CONFIG_KERNEL_MCS
85 assert(isSchedulable(tcb));
86 assert(refill_sufficient(tcb->tcbSchedContext, 0));
87 #endif
88
89 if (!thread_state_get_tcbQueued(tcb->tcbState)) {
90 tcb_queue_t queue;
91 dom_t dom;
92 prio_t prio;
93 word_t idx;
94
95 dom = tcb->tcbDomain;
96 prio = tcb->tcbPriority;
97 idx = ready_queues_index(dom, prio);
98 queue = NODE_STATE_ON_CORE(ksReadyQueues[idx], tcb->tcbAffinity);
99
100 if (!queue.end) { /* Empty list */
101 queue.end = tcb;
102 addToBitmap(SMP_TERNARY(tcb->tcbAffinity, 0), dom, prio);
103 } else {
104 queue.head->tcbSchedPrev = tcb;
105 }
106 tcb->tcbSchedPrev = NULL;
107 tcb->tcbSchedNext = queue.head;
108 queue.head = tcb;
109
110 NODE_STATE_ON_CORE(ksReadyQueues[idx], tcb->tcbAffinity) = queue;
111
112 thread_state_ptr_set_tcbQueued(&tcb->tcbState, true);
113 }
114 }
115
116 /* Add TCB to the end of a scheduler queue */
tcbSchedAppend(tcb_t * tcb)117 void tcbSchedAppend(tcb_t *tcb)
118 {
119 #ifdef CONFIG_KERNEL_MCS
120 assert(isSchedulable(tcb));
121 assert(refill_sufficient(tcb->tcbSchedContext, 0));
122 assert(refill_ready(tcb->tcbSchedContext));
123 #endif
124 if (!thread_state_get_tcbQueued(tcb->tcbState)) {
125 tcb_queue_t queue;
126 dom_t dom;
127 prio_t prio;
128 word_t idx;
129
130 dom = tcb->tcbDomain;
131 prio = tcb->tcbPriority;
132 idx = ready_queues_index(dom, prio);
133 queue = NODE_STATE_ON_CORE(ksReadyQueues[idx], tcb->tcbAffinity);
134
135 if (!queue.head) { /* Empty list */
136 queue.head = tcb;
137 addToBitmap(SMP_TERNARY(tcb->tcbAffinity, 0), dom, prio);
138 } else {
139 queue.end->tcbSchedNext = tcb;
140 }
141 tcb->tcbSchedPrev = queue.end;
142 tcb->tcbSchedNext = NULL;
143 queue.end = tcb;
144
145 NODE_STATE_ON_CORE(ksReadyQueues[idx], tcb->tcbAffinity) = queue;
146
147 thread_state_ptr_set_tcbQueued(&tcb->tcbState, true);
148 }
149 }
150
151 /* Remove TCB from a scheduler queue */
tcbSchedDequeue(tcb_t * tcb)152 void tcbSchedDequeue(tcb_t *tcb)
153 {
154 if (thread_state_get_tcbQueued(tcb->tcbState)) {
155 tcb_queue_t queue;
156 dom_t dom;
157 prio_t prio;
158 word_t idx;
159
160 dom = tcb->tcbDomain;
161 prio = tcb->tcbPriority;
162 idx = ready_queues_index(dom, prio);
163 queue = NODE_STATE_ON_CORE(ksReadyQueues[idx], tcb->tcbAffinity);
164
165 if (tcb->tcbSchedPrev) {
166 tcb->tcbSchedPrev->tcbSchedNext = tcb->tcbSchedNext;
167 } else {
168 queue.head = tcb->tcbSchedNext;
169 if (likely(!tcb->tcbSchedNext)) {
170 removeFromBitmap(SMP_TERNARY(tcb->tcbAffinity, 0), dom, prio);
171 }
172 }
173
174 if (tcb->tcbSchedNext) {
175 tcb->tcbSchedNext->tcbSchedPrev = tcb->tcbSchedPrev;
176 } else {
177 queue.end = tcb->tcbSchedPrev;
178 }
179
180 NODE_STATE_ON_CORE(ksReadyQueues[idx], tcb->tcbAffinity) = queue;
181
182 thread_state_ptr_set_tcbQueued(&tcb->tcbState, false);
183 }
184 }
185
186 #ifdef CONFIG_DEBUG_BUILD
tcbDebugAppend(tcb_t * tcb)187 void tcbDebugAppend(tcb_t *tcb)
188 {
189 debug_tcb_t *debug_tcb = TCB_PTR_DEBUG_PTR(tcb);
190 /* prepend to the list */
191 debug_tcb->tcbDebugPrev = NULL;
192
193 debug_tcb->tcbDebugNext = NODE_STATE_ON_CORE(ksDebugTCBs, tcb->tcbAffinity);
194
195 if (NODE_STATE_ON_CORE(ksDebugTCBs, tcb->tcbAffinity)) {
196 TCB_PTR_DEBUG_PTR(NODE_STATE_ON_CORE(ksDebugTCBs, tcb->tcbAffinity))->tcbDebugPrev = tcb;
197 }
198
199 NODE_STATE_ON_CORE(ksDebugTCBs, tcb->tcbAffinity) = tcb;
200 }
201
tcbDebugRemove(tcb_t * tcb)202 void tcbDebugRemove(tcb_t *tcb)
203 {
204 debug_tcb_t *debug_tcb = TCB_PTR_DEBUG_PTR(tcb);
205
206 assert(NODE_STATE_ON_CORE(ksDebugTCBs, tcb->tcbAffinity) != NULL);
207 if (tcb == NODE_STATE_ON_CORE(ksDebugTCBs, tcb->tcbAffinity)) {
208 NODE_STATE_ON_CORE(ksDebugTCBs, tcb->tcbAffinity) = TCB_PTR_DEBUG_PTR(NODE_STATE_ON_CORE(ksDebugTCBs,
209 tcb->tcbAffinity))->tcbDebugNext;
210 } else {
211 assert(TCB_PTR_DEBUG_PTR(tcb)->tcbDebugPrev);
212 TCB_PTR_DEBUG_PTR(debug_tcb->tcbDebugPrev)->tcbDebugNext = debug_tcb->tcbDebugNext;
213 }
214
215 if (debug_tcb->tcbDebugNext) {
216 TCB_PTR_DEBUG_PTR(debug_tcb->tcbDebugNext)->tcbDebugPrev = debug_tcb->tcbDebugPrev;
217 }
218
219 debug_tcb->tcbDebugPrev = NULL;
220 debug_tcb->tcbDebugNext = NULL;
221 }
222 #endif /* CONFIG_DEBUG_BUILD */
223
224 #ifndef CONFIG_KERNEL_MCS
225 /* Add TCB to the end of an endpoint queue */
tcbEPAppend(tcb_t * tcb,tcb_queue_t queue)226 tcb_queue_t tcbEPAppend(tcb_t *tcb, tcb_queue_t queue)
227 {
228 if (!queue.head) { /* Empty list */
229 queue.head = tcb;
230 } else {
231 queue.end->tcbEPNext = tcb;
232 }
233 tcb->tcbEPPrev = queue.end;
234 tcb->tcbEPNext = NULL;
235 queue.end = tcb;
236
237 return queue;
238 }
239 #endif
240
241 /* Remove TCB from an endpoint queue */
tcbEPDequeue(tcb_t * tcb,tcb_queue_t queue)242 tcb_queue_t tcbEPDequeue(tcb_t *tcb, tcb_queue_t queue)
243 {
244 if (tcb->tcbEPPrev) {
245 tcb->tcbEPPrev->tcbEPNext = tcb->tcbEPNext;
246 } else {
247 queue.head = tcb->tcbEPNext;
248 }
249
250 if (tcb->tcbEPNext) {
251 tcb->tcbEPNext->tcbEPPrev = tcb->tcbEPPrev;
252 } else {
253 queue.end = tcb->tcbEPPrev;
254 }
255
256 return queue;
257 }
258
259 #ifdef CONFIG_KERNEL_MCS
tcbReleaseRemove(tcb_t * tcb)260 void tcbReleaseRemove(tcb_t *tcb)
261 {
262 if (likely(thread_state_get_tcbInReleaseQueue(tcb->tcbState))) {
263 if (tcb->tcbSchedPrev) {
264 tcb->tcbSchedPrev->tcbSchedNext = tcb->tcbSchedNext;
265 } else {
266 NODE_STATE_ON_CORE(ksReleaseHead, tcb->tcbAffinity) = tcb->tcbSchedNext;
267 /* the head has changed, we might need to set a new timeout */
268 NODE_STATE_ON_CORE(ksReprogram, tcb->tcbAffinity) = true;
269 }
270
271 if (tcb->tcbSchedNext) {
272 tcb->tcbSchedNext->tcbSchedPrev = tcb->tcbSchedPrev;
273 }
274
275 tcb->tcbSchedNext = NULL;
276 tcb->tcbSchedPrev = NULL;
277 thread_state_ptr_set_tcbInReleaseQueue(&tcb->tcbState, false);
278 }
279 }
280
tcbReleaseEnqueue(tcb_t * tcb)281 void tcbReleaseEnqueue(tcb_t *tcb)
282 {
283 assert(thread_state_get_tcbInReleaseQueue(tcb->tcbState) == false);
284 assert(thread_state_get_tcbQueued(tcb->tcbState) == false);
285
286 tcb_t *before = NULL;
287 tcb_t *after = NODE_STATE_ON_CORE(ksReleaseHead, tcb->tcbAffinity);
288
289 /* find our place in the ordered queue */
290 while (after != NULL &&
291 refill_head(tcb->tcbSchedContext)->rTime >= refill_head(after->tcbSchedContext)->rTime) {
292 before = after;
293 after = after->tcbSchedNext;
294 }
295
296 if (before == NULL) {
297 /* insert at head */
298 NODE_STATE_ON_CORE(ksReleaseHead, tcb->tcbAffinity) = tcb;
299 NODE_STATE_ON_CORE(ksReprogram, tcb->tcbAffinity) = true;
300 } else {
301 before->tcbSchedNext = tcb;
302 }
303
304 if (after != NULL) {
305 after->tcbSchedPrev = tcb;
306 }
307
308 tcb->tcbSchedNext = after;
309 tcb->tcbSchedPrev = before;
310
311 thread_state_ptr_set_tcbInReleaseQueue(&tcb->tcbState, true);
312 }
313
tcbReleaseDequeue(void)314 tcb_t *tcbReleaseDequeue(void)
315 {
316 assert(NODE_STATE(ksReleaseHead) != NULL);
317 assert(NODE_STATE(ksReleaseHead)->tcbSchedPrev == NULL);
318 SMP_COND_STATEMENT(assert(NODE_STATE(ksReleaseHead)->tcbAffinity == getCurrentCPUIndex()));
319
320 tcb_t *detached_head = NODE_STATE(ksReleaseHead);
321 NODE_STATE(ksReleaseHead) = NODE_STATE(ksReleaseHead)->tcbSchedNext;
322
323 if (NODE_STATE(ksReleaseHead)) {
324 NODE_STATE(ksReleaseHead)->tcbSchedPrev = NULL;
325 }
326
327 if (detached_head->tcbSchedNext) {
328 detached_head->tcbSchedNext->tcbSchedPrev = NULL;
329 detached_head->tcbSchedNext = NULL;
330 }
331
332 thread_state_ptr_set_tcbInReleaseQueue(&detached_head->tcbState, false);
333 NODE_STATE(ksReprogram) = true;
334
335 return detached_head;
336 }
337 #endif
338
getExtraCPtr(word_t * bufferPtr,word_t i)339 cptr_t PURE getExtraCPtr(word_t *bufferPtr, word_t i)
340 {
341 return (cptr_t)bufferPtr[seL4_MsgMaxLength + 2 + i];
342 }
343
setExtraBadge(word_t * bufferPtr,word_t badge,word_t i)344 void setExtraBadge(word_t *bufferPtr, word_t badge,
345 word_t i)
346 {
347 bufferPtr[seL4_MsgMaxLength + 2 + i] = badge;
348 }
349
350 #ifndef CONFIG_KERNEL_MCS
setupCallerCap(tcb_t * sender,tcb_t * receiver,bool_t canGrant)351 void setupCallerCap(tcb_t *sender, tcb_t *receiver, bool_t canGrant)
352 {
353 cte_t *replySlot, *callerSlot;
354 cap_t masterCap UNUSED, callerCap UNUSED;
355
356 setThreadState(sender, ThreadState_BlockedOnReply);
357 replySlot = TCB_PTR_CTE_PTR(sender, tcbReply);
358 masterCap = replySlot->cap;
359 /* Haskell error: "Sender must have a valid master reply cap" */
360 assert(cap_get_capType(masterCap) == cap_reply_cap);
361 assert(cap_reply_cap_get_capReplyMaster(masterCap));
362 assert(cap_reply_cap_get_capReplyCanGrant(masterCap));
363 assert(TCB_PTR(cap_reply_cap_get_capTCBPtr(masterCap)) == sender);
364 callerSlot = TCB_PTR_CTE_PTR(receiver, tcbCaller);
365 callerCap = callerSlot->cap;
366 /* Haskell error: "Caller cap must not already exist" */
367 assert(cap_get_capType(callerCap) == cap_null_cap);
368 cteInsert(cap_reply_cap_new(canGrant, false, TCB_REF(sender)),
369 replySlot, callerSlot);
370 }
371
deleteCallerCap(tcb_t * receiver)372 void deleteCallerCap(tcb_t *receiver)
373 {
374 cte_t *callerSlot;
375
376 callerSlot = TCB_PTR_CTE_PTR(receiver, tcbCaller);
377 /** GHOSTUPD: "(True, gs_set_assn cteDeleteOne_'proc (ucast cap_reply_cap))" */
378 cteDeleteOne(callerSlot);
379 }
380 #endif
381
382 extra_caps_t current_extra_caps;
383
lookupExtraCaps(tcb_t * thread,word_t * bufferPtr,seL4_MessageInfo_t info)384 exception_t lookupExtraCaps(tcb_t *thread, word_t *bufferPtr, seL4_MessageInfo_t info)
385 {
386 lookupSlot_raw_ret_t lu_ret;
387 cptr_t cptr;
388 word_t i, length;
389
390 if (!bufferPtr) {
391 current_extra_caps.excaprefs[0] = NULL;
392 return EXCEPTION_NONE;
393 }
394
395 length = seL4_MessageInfo_get_extraCaps(info);
396
397 for (i = 0; i < length; i++) {
398 cptr = getExtraCPtr(bufferPtr, i);
399
400 lu_ret = lookupSlot(thread, cptr);
401 if (lu_ret.status != EXCEPTION_NONE) {
402 current_fault = seL4_Fault_CapFault_new(cptr, false);
403 return lu_ret.status;
404 }
405
406 current_extra_caps.excaprefs[i] = lu_ret.slot;
407 }
408 if (i < seL4_MsgMaxExtraCaps) {
409 current_extra_caps.excaprefs[i] = NULL;
410 }
411
412 return EXCEPTION_NONE;
413 }
414
415 /* Copy IPC MRs from one thread to another */
copyMRs(tcb_t * sender,word_t * sendBuf,tcb_t * receiver,word_t * recvBuf,word_t n)416 word_t copyMRs(tcb_t *sender, word_t *sendBuf, tcb_t *receiver,
417 word_t *recvBuf, word_t n)
418 {
419 word_t i;
420
421 /* Copy inline words */
422 for (i = 0; i < n && i < n_msgRegisters; i++) {
423 setRegister(receiver, msgRegisters[i],
424 getRegister(sender, msgRegisters[i]));
425 }
426
427 if (!recvBuf || !sendBuf) {
428 return i;
429 }
430
431 /* Copy out-of-line words */
432 for (; i < n; i++) {
433 recvBuf[i + 1] = sendBuf[i + 1];
434 }
435
436 return i;
437 }
438
439 #ifdef ENABLE_SMP_SUPPORT
440 /* This checks if the current updated to scheduler queue is changing the previous scheduling
441 * decision made by the scheduler. If its a case, an `irq_reschedule_ipi` is sent */
remoteQueueUpdate(tcb_t * tcb)442 void remoteQueueUpdate(tcb_t *tcb)
443 {
444 /* only ipi if the target is for the current domain */
445 if (tcb->tcbAffinity != getCurrentCPUIndex() && tcb->tcbDomain == ksCurDomain) {
446 tcb_t *targetCurThread = NODE_STATE_ON_CORE(ksCurThread, tcb->tcbAffinity);
447
448 /* reschedule if the target core is idle or we are waking a higher priority thread (or
449 * if a new irq would need to be set on MCS) */
450 if (targetCurThread == NODE_STATE_ON_CORE(ksIdleThread, tcb->tcbAffinity) ||
451 tcb->tcbPriority > targetCurThread->tcbPriority
452 #ifdef CONFIG_KERNEL_MCS
453 || NODE_STATE_ON_CORE(ksReprogram, tcb->tcbAffinity)
454 #endif
455 ) {
456 ARCH_NODE_STATE(ipiReschedulePending) |= BIT(tcb->tcbAffinity);
457 }
458 }
459 }
460
461 /* This makes sure the the TCB is not being run on other core.
462 * It would request 'IpiRemoteCall_Stall' to switch the core from this TCB
463 * We also request the 'irq_reschedule_ipi' to restore the state of target core */
remoteTCBStall(tcb_t * tcb)464 void remoteTCBStall(tcb_t *tcb)
465 {
466
467 if (
468 #ifdef CONFIG_KERNEL_MCS
469 tcb->tcbSchedContext &&
470 #endif
471 tcb->tcbAffinity != getCurrentCPUIndex() &&
472 NODE_STATE_ON_CORE(ksCurThread, tcb->tcbAffinity) == tcb) {
473 doRemoteStall(tcb->tcbAffinity);
474 ARCH_NODE_STATE(ipiReschedulePending) |= BIT(tcb->tcbAffinity);
475 }
476 }
477
478 #ifndef CONFIG_KERNEL_MCS
invokeTCB_SetAffinity(tcb_t * thread,word_t affinity)479 static exception_t invokeTCB_SetAffinity(tcb_t *thread, word_t affinity)
480 {
481 /* remove the tcb from scheduler queue in case it is already in one
482 * and add it to new queue if required */
483 tcbSchedDequeue(thread);
484 migrateTCB(thread, affinity);
485 if (isRunnable(thread)) {
486 SCHED_APPEND(thread);
487 }
488 /* reschedule current cpu if tcb moves itself */
489 if (thread == NODE_STATE(ksCurThread)) {
490 rescheduleRequired();
491 }
492 return EXCEPTION_NONE;
493 }
494
decodeSetAffinity(cap_t cap,word_t length,word_t * buffer)495 static exception_t decodeSetAffinity(cap_t cap, word_t length, word_t *buffer)
496 {
497 tcb_t *tcb;
498 word_t affinity;
499
500 if (length < 1) {
501 userError("TCB SetAffinity: Truncated message.");
502 current_syscall_error.type = seL4_TruncatedMessage;
503 return EXCEPTION_SYSCALL_ERROR;
504 }
505
506 tcb = TCB_PTR(cap_thread_cap_get_capTCBPtr(cap));
507
508 affinity = getSyscallArg(0, buffer);
509 if (affinity >= ksNumCPUs) {
510 userError("TCB SetAffinity: Requested CPU does not exist.");
511 current_syscall_error.type = seL4_IllegalOperation;
512 return EXCEPTION_SYSCALL_ERROR;
513 }
514
515 setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
516 return invokeTCB_SetAffinity(tcb, affinity);
517 }
518 #endif
519 #endif /* ENABLE_SMP_SUPPORT */
520
521 #ifdef CONFIG_HARDWARE_DEBUG_API
invokeConfigureSingleStepping(word_t * buffer,tcb_t * t,uint16_t bp_num,word_t n_instrs)522 static exception_t invokeConfigureSingleStepping(word_t *buffer, tcb_t *t,
523 uint16_t bp_num, word_t n_instrs)
524 {
525 bool_t bp_was_consumed;
526
527 bp_was_consumed = configureSingleStepping(t, bp_num, n_instrs, false);
528 if (n_instrs == 0) {
529 unsetBreakpointUsedFlag(t, bp_num);
530 setMR(NODE_STATE(ksCurThread), buffer, 0, false);
531 } else {
532 setBreakpointUsedFlag(t, bp_num);
533 setMR(NODE_STATE(ksCurThread), buffer, 0, bp_was_consumed);
534 }
535 return EXCEPTION_NONE;
536 }
537
decodeConfigureSingleStepping(cap_t cap,word_t * buffer)538 static exception_t decodeConfigureSingleStepping(cap_t cap, word_t *buffer)
539 {
540 uint16_t bp_num;
541 word_t n_instrs;
542 tcb_t *tcb;
543 syscall_error_t syserr;
544
545 tcb = TCB_PTR(cap_thread_cap_get_capTCBPtr(cap));
546
547 bp_num = getSyscallArg(0, buffer);
548 n_instrs = getSyscallArg(1, buffer);
549
550 syserr = Arch_decodeConfigureSingleStepping(tcb, bp_num, n_instrs, false);
551 if (syserr.type != seL4_NoError) {
552 current_syscall_error = syserr;
553 return EXCEPTION_SYSCALL_ERROR;
554 }
555
556 setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
557 return invokeConfigureSingleStepping(buffer, tcb, bp_num, n_instrs);
558 }
559
invokeSetBreakpoint(tcb_t * tcb,uint16_t bp_num,word_t vaddr,word_t type,word_t size,word_t rw)560 static exception_t invokeSetBreakpoint(tcb_t *tcb, uint16_t bp_num,
561 word_t vaddr, word_t type, word_t size, word_t rw)
562 {
563 setBreakpoint(tcb, bp_num, vaddr, type, size, rw);
564 /* Signal restore_user_context() to pop the breakpoint context on return. */
565 setBreakpointUsedFlag(tcb, bp_num);
566 return EXCEPTION_NONE;
567 }
568
decodeSetBreakpoint(cap_t cap,word_t * buffer)569 static exception_t decodeSetBreakpoint(cap_t cap, word_t *buffer)
570 {
571 uint16_t bp_num;
572 word_t vaddr, type, size, rw;
573 tcb_t *tcb;
574 syscall_error_t error;
575
576 tcb = TCB_PTR(cap_thread_cap_get_capTCBPtr(cap));
577 bp_num = getSyscallArg(0, buffer);
578 vaddr = getSyscallArg(1, buffer);
579 type = getSyscallArg(2, buffer);
580 size = getSyscallArg(3, buffer);
581 rw = getSyscallArg(4, buffer);
582
583 /* We disallow the user to set breakpoint addresses that are in the kernel
584 * vaddr range.
585 */
586 if (vaddr >= (word_t)USER_TOP) {
587 userError("Debug: Invalid address %lx: bp addresses must be userspace "
588 "addresses.",
589 vaddr);
590 current_syscall_error.type = seL4_InvalidArgument;
591 current_syscall_error.invalidArgumentNumber = 1;
592 return EXCEPTION_SYSCALL_ERROR;
593 }
594
595 if (type != seL4_InstructionBreakpoint && type != seL4_DataBreakpoint) {
596 userError("Debug: Unknown breakpoint type %lx.", type);
597 current_syscall_error.type = seL4_InvalidArgument;
598 current_syscall_error.invalidArgumentNumber = 2;
599 return EXCEPTION_SYSCALL_ERROR;
600 } else if (type == seL4_InstructionBreakpoint) {
601 if (size != 0) {
602 userError("Debug: Instruction bps must have size of 0.");
603 current_syscall_error.type = seL4_InvalidArgument;
604 current_syscall_error.invalidArgumentNumber = 3;
605 return EXCEPTION_SYSCALL_ERROR;
606 }
607 if (rw != seL4_BreakOnRead) {
608 userError("Debug: Instruction bps must be break-on-read.");
609 current_syscall_error.type = seL4_InvalidArgument;
610 current_syscall_error.invalidArgumentNumber = 4;
611 return EXCEPTION_SYSCALL_ERROR;
612 }
613 if ((seL4_FirstWatchpoint == -1 || bp_num >= seL4_FirstWatchpoint)
614 && seL4_FirstBreakpoint != seL4_FirstWatchpoint) {
615 userError("Debug: Can't specify a watchpoint ID with type seL4_InstructionBreakpoint.");
616 current_syscall_error.type = seL4_InvalidArgument;
617 current_syscall_error.invalidArgumentNumber = 2;
618 return EXCEPTION_SYSCALL_ERROR;
619 }
620 } else if (type == seL4_DataBreakpoint) {
621 if (size == 0) {
622 userError("Debug: Data bps cannot have size of 0.");
623 current_syscall_error.type = seL4_InvalidArgument;
624 current_syscall_error.invalidArgumentNumber = 3;
625 return EXCEPTION_SYSCALL_ERROR;
626 }
627 if (seL4_FirstWatchpoint != -1 && bp_num < seL4_FirstWatchpoint) {
628 userError("Debug: Data watchpoints cannot specify non-data watchpoint ID.");
629 current_syscall_error.type = seL4_InvalidArgument;
630 current_syscall_error.invalidArgumentNumber = 2;
631 return EXCEPTION_SYSCALL_ERROR;
632 }
633 } else if (type == seL4_SoftwareBreakRequest) {
634 userError("Debug: Use a software breakpoint instruction to trigger a "
635 "software breakpoint.");
636 current_syscall_error.type = seL4_InvalidArgument;
637 current_syscall_error.invalidArgumentNumber = 2;
638 return EXCEPTION_SYSCALL_ERROR;
639 }
640
641 if (rw != seL4_BreakOnRead && rw != seL4_BreakOnWrite
642 && rw != seL4_BreakOnReadWrite) {
643 userError("Debug: Unknown access-type %lu.", rw);
644 current_syscall_error.type = seL4_InvalidArgument;
645 current_syscall_error.invalidArgumentNumber = 3;
646 return EXCEPTION_SYSCALL_ERROR;
647 }
648 if (size != 0 && size != 1 && size != 2 && size != 4 && size != 8) {
649 userError("Debug: Invalid size %lu.", size);
650 current_syscall_error.type = seL4_InvalidArgument;
651 current_syscall_error.invalidArgumentNumber = 3;
652 return EXCEPTION_SYSCALL_ERROR;
653 }
654 if (size > 0 && vaddr & (size - 1)) {
655 /* Just Don't allow unaligned watchpoints. They are undefined
656 * both ARM and x86.
657 *
658 * X86: Intel manuals, vol3, 17.2.5:
659 * "Two-byte ranges must be aligned on word boundaries; 4-byte
660 * ranges must be aligned on doubleword boundaries"
661 * "Unaligned data or I/O breakpoint addresses do not yield valid
662 * results"
663 *
664 * ARM: ARMv7 manual, C11.11.44:
665 * "A DBGWVR is programmed with a word-aligned address."
666 */
667 userError("Debug: Unaligned data watchpoint address %lx (size %lx) "
668 "rejected.\n",
669 vaddr, size);
670
671 current_syscall_error.type = seL4_AlignmentError;
672 return EXCEPTION_SYSCALL_ERROR;
673 }
674
675 error = Arch_decodeSetBreakpoint(tcb, bp_num, vaddr, type, size, rw);
676 if (error.type != seL4_NoError) {
677 current_syscall_error = error;
678 return EXCEPTION_SYSCALL_ERROR;
679 }
680
681 setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
682 return invokeSetBreakpoint(tcb, bp_num,
683 vaddr, type, size, rw);
684 }
685
invokeGetBreakpoint(word_t * buffer,tcb_t * tcb,uint16_t bp_num)686 static exception_t invokeGetBreakpoint(word_t *buffer, tcb_t *tcb, uint16_t bp_num)
687 {
688 getBreakpoint_t res;
689
690 res = getBreakpoint(tcb, bp_num);
691 setMR(NODE_STATE(ksCurThread), buffer, 0, res.vaddr);
692 setMR(NODE_STATE(ksCurThread), buffer, 1, res.type);
693 setMR(NODE_STATE(ksCurThread), buffer, 2, res.size);
694 setMR(NODE_STATE(ksCurThread), buffer, 3, res.rw);
695 setMR(NODE_STATE(ksCurThread), buffer, 4, res.is_enabled);
696 return EXCEPTION_NONE;
697 }
698
decodeGetBreakpoint(cap_t cap,word_t * buffer)699 static exception_t decodeGetBreakpoint(cap_t cap, word_t *buffer)
700 {
701 tcb_t *tcb;
702 uint16_t bp_num;
703 syscall_error_t error;
704
705 tcb = TCB_PTR(cap_thread_cap_get_capTCBPtr(cap));
706 bp_num = getSyscallArg(0, buffer);
707
708 error = Arch_decodeGetBreakpoint(tcb, bp_num);
709 if (error.type != seL4_NoError) {
710 current_syscall_error = error;
711 return EXCEPTION_SYSCALL_ERROR;
712 }
713
714 setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
715 return invokeGetBreakpoint(buffer, tcb, bp_num);
716 }
717
invokeUnsetBreakpoint(tcb_t * tcb,uint16_t bp_num)718 static exception_t invokeUnsetBreakpoint(tcb_t *tcb, uint16_t bp_num)
719 {
720 /* Maintain the bitfield of in-use breakpoints. */
721 unsetBreakpoint(tcb, bp_num);
722 unsetBreakpointUsedFlag(tcb, bp_num);
723 return EXCEPTION_NONE;
724 }
725
decodeUnsetBreakpoint(cap_t cap,word_t * buffer)726 static exception_t decodeUnsetBreakpoint(cap_t cap, word_t *buffer)
727 {
728 tcb_t *tcb;
729 uint16_t bp_num;
730 syscall_error_t error;
731
732 tcb = TCB_PTR(cap_thread_cap_get_capTCBPtr(cap));
733 bp_num = getSyscallArg(0, buffer);
734
735 error = Arch_decodeUnsetBreakpoint(tcb, bp_num);
736 if (error.type != seL4_NoError) {
737 current_syscall_error = error;
738 return EXCEPTION_SYSCALL_ERROR;
739 }
740
741 setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
742 return invokeUnsetBreakpoint(tcb, bp_num);
743 }
744 #endif /* CONFIG_HARDWARE_DEBUG_API */
745
invokeSetTLSBase(tcb_t * thread,word_t tls_base)746 static exception_t invokeSetTLSBase(tcb_t *thread, word_t tls_base)
747 {
748 setRegister(thread, TLS_BASE, tls_base);
749 if (thread == NODE_STATE(ksCurThread)) {
750 /* If this is the current thread force a reschedule to ensure that any changes
751 * to the TLS_BASE are realized */
752 rescheduleRequired();
753 }
754
755 return EXCEPTION_NONE;
756 }
757
decodeSetTLSBase(cap_t cap,word_t length,word_t * buffer)758 static exception_t decodeSetTLSBase(cap_t cap, word_t length, word_t *buffer)
759 {
760 word_t tls_base;
761
762 if (length < 1) {
763 userError("TCB SetTLSBase: Truncated message.");
764 current_syscall_error.type = seL4_TruncatedMessage;
765 return EXCEPTION_SYSCALL_ERROR;
766 }
767
768 tls_base = getSyscallArg(0, buffer);
769
770 setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
771 return invokeSetTLSBase(TCB_PTR(cap_thread_cap_get_capTCBPtr(cap)), tls_base);
772 }
773
774 /* The following functions sit in the syscall error monad, but include the
775 * exception cases for the preemptible bottom end, as they call the invoke
776 * functions directly. This is a significant deviation from the Haskell
777 * spec. */
decodeTCBInvocation(word_t invLabel,word_t length,cap_t cap,cte_t * slot,bool_t call,word_t * buffer)778 exception_t decodeTCBInvocation(word_t invLabel, word_t length, cap_t cap,
779 cte_t *slot, bool_t call, word_t *buffer)
780 {
781 /* Stall the core if we are operating on a remote TCB that is currently running */
782 SMP_COND_STATEMENT(remoteTCBStall(TCB_PTR(cap_thread_cap_get_capTCBPtr(cap)));)
783
784 switch (invLabel) {
785 case TCBReadRegisters:
786 /* Second level of decoding */
787 return decodeReadRegisters(cap, length, call, buffer);
788
789 case TCBWriteRegisters:
790 return decodeWriteRegisters(cap, length, buffer);
791
792 case TCBCopyRegisters:
793 return decodeCopyRegisters(cap, length, buffer);
794
795 case TCBSuspend:
796 /* Jump straight to the invoke */
797 setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
798 return invokeTCB_Suspend(
799 TCB_PTR(cap_thread_cap_get_capTCBPtr(cap)));
800
801 case TCBResume:
802 setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
803 return invokeTCB_Resume(
804 TCB_PTR(cap_thread_cap_get_capTCBPtr(cap)));
805
806 case TCBConfigure:
807 return decodeTCBConfigure(cap, length, slot, buffer);
808
809 case TCBSetPriority:
810 return decodeSetPriority(cap, length, buffer);
811
812 case TCBSetMCPriority:
813 return decodeSetMCPriority(cap, length, buffer);
814
815 case TCBSetSchedParams:
816 #ifdef CONFIG_KERNEL_MCS
817 return decodeSetSchedParams(cap, length, slot, buffer);
818 #else
819 return decodeSetSchedParams(cap, length, buffer);
820 #endif
821
822 case TCBSetIPCBuffer:
823 return decodeSetIPCBuffer(cap, length, slot, buffer);
824
825 case TCBSetSpace:
826 return decodeSetSpace(cap, length, slot, buffer);
827
828 case TCBBindNotification:
829 return decodeBindNotification(cap);
830
831 case TCBUnbindNotification:
832 return decodeUnbindNotification(cap);
833
834 #ifdef CONFIG_KERNEL_MCS
835 case TCBSetTimeoutEndpoint:
836 return decodeSetTimeoutEndpoint(cap, slot);
837 #else
838 #ifdef ENABLE_SMP_SUPPORT
839 case TCBSetAffinity:
840 return decodeSetAffinity(cap, length, buffer);
841 #endif /* ENABLE_SMP_SUPPORT */
842 #endif
843
844 /* There is no notion of arch specific TCB invocations so this needs to go here */
845 #ifdef CONFIG_VTX
846 case TCBSetEPTRoot:
847 return decodeSetEPTRoot(cap);
848 #endif
849
850 #ifdef CONFIG_HARDWARE_DEBUG_API
851 case TCBConfigureSingleStepping:
852 return decodeConfigureSingleStepping(cap, buffer);
853
854 case TCBSetBreakpoint:
855 return decodeSetBreakpoint(cap, buffer);
856
857 case TCBGetBreakpoint:
858 return decodeGetBreakpoint(cap, buffer);
859
860 case TCBUnsetBreakpoint:
861 return decodeUnsetBreakpoint(cap, buffer);
862 #endif
863
864 case TCBSetTLSBase:
865 return decodeSetTLSBase(cap, length, buffer);
866
867 default:
868 /* Haskell: "throw IllegalOperation" */
869 userError("TCB: Illegal operation.");
870 current_syscall_error.type = seL4_IllegalOperation;
871 return EXCEPTION_SYSCALL_ERROR;
872 }
873 }
874
875 enum CopyRegistersFlags {
876 CopyRegisters_suspendSource = 0,
877 CopyRegisters_resumeTarget = 1,
878 CopyRegisters_transferFrame = 2,
879 CopyRegisters_transferInteger = 3
880 };
881
decodeCopyRegisters(cap_t cap,word_t length,word_t * buffer)882 exception_t decodeCopyRegisters(cap_t cap, word_t length, word_t *buffer)
883 {
884 word_t transferArch;
885 tcb_t *srcTCB;
886 cap_t source_cap;
887 word_t flags;
888
889 if (length < 1 || current_extra_caps.excaprefs[0] == NULL) {
890 userError("TCB CopyRegisters: Truncated message.");
891 current_syscall_error.type = seL4_TruncatedMessage;
892 return EXCEPTION_SYSCALL_ERROR;
893 }
894
895 flags = getSyscallArg(0, buffer);
896
897 transferArch = Arch_decodeTransfer(flags >> 8);
898
899 source_cap = current_extra_caps.excaprefs[0]->cap;
900
901 if (cap_get_capType(source_cap) == cap_thread_cap) {
902 srcTCB = TCB_PTR(cap_thread_cap_get_capTCBPtr(source_cap));
903 } else {
904 userError("TCB CopyRegisters: Invalid source TCB.");
905 current_syscall_error.type = seL4_InvalidCapability;
906 current_syscall_error.invalidCapNumber = 1;
907 return EXCEPTION_SYSCALL_ERROR;
908 }
909
910 setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
911 return invokeTCB_CopyRegisters(
912 TCB_PTR(cap_thread_cap_get_capTCBPtr(cap)), srcTCB,
913 flags & BIT(CopyRegisters_suspendSource),
914 flags & BIT(CopyRegisters_resumeTarget),
915 flags & BIT(CopyRegisters_transferFrame),
916 flags & BIT(CopyRegisters_transferInteger),
917 transferArch);
918
919 }
920
921 enum ReadRegistersFlags {
922 ReadRegisters_suspend = 0
923 };
924
decodeReadRegisters(cap_t cap,word_t length,bool_t call,word_t * buffer)925 exception_t decodeReadRegisters(cap_t cap, word_t length, bool_t call,
926 word_t *buffer)
927 {
928 word_t transferArch, flags, n;
929 tcb_t *thread;
930
931 if (length < 2) {
932 userError("TCB ReadRegisters: Truncated message.");
933 current_syscall_error.type = seL4_TruncatedMessage;
934 return EXCEPTION_SYSCALL_ERROR;
935 }
936
937 flags = getSyscallArg(0, buffer);
938 n = getSyscallArg(1, buffer);
939
940 if (n < 1 || n > n_frameRegisters + n_gpRegisters) {
941 userError("TCB ReadRegisters: Attempted to read an invalid number of registers (%d).",
942 (int)n);
943 current_syscall_error.type = seL4_RangeError;
944 current_syscall_error.rangeErrorMin = 1;
945 current_syscall_error.rangeErrorMax = n_frameRegisters +
946 n_gpRegisters;
947 return EXCEPTION_SYSCALL_ERROR;
948 }
949
950 transferArch = Arch_decodeTransfer(flags >> 8);
951
952 thread = TCB_PTR(cap_thread_cap_get_capTCBPtr(cap));
953 if (thread == NODE_STATE(ksCurThread)) {
954 userError("TCB ReadRegisters: Attempted to read our own registers.");
955 current_syscall_error.type = seL4_IllegalOperation;
956 return EXCEPTION_SYSCALL_ERROR;
957 }
958
959 setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
960 return invokeTCB_ReadRegisters(
961 TCB_PTR(cap_thread_cap_get_capTCBPtr(cap)),
962 flags & BIT(ReadRegisters_suspend),
963 n, transferArch, call);
964 }
965
966 enum WriteRegistersFlags {
967 WriteRegisters_resume = 0
968 };
969
decodeWriteRegisters(cap_t cap,word_t length,word_t * buffer)970 exception_t decodeWriteRegisters(cap_t cap, word_t length, word_t *buffer)
971 {
972 word_t flags, w;
973 word_t transferArch;
974 tcb_t *thread;
975
976 if (length < 2) {
977 userError("TCB WriteRegisters: Truncated message.");
978 current_syscall_error.type = seL4_TruncatedMessage;
979 return EXCEPTION_SYSCALL_ERROR;
980 }
981
982 flags = getSyscallArg(0, buffer);
983 w = getSyscallArg(1, buffer);
984
985 if (length - 2 < w) {
986 userError("TCB WriteRegisters: Message too short for requested write size (%d/%d).",
987 (int)(length - 2), (int)w);
988 current_syscall_error.type = seL4_TruncatedMessage;
989 return EXCEPTION_SYSCALL_ERROR;
990 }
991
992 transferArch = Arch_decodeTransfer(flags >> 8);
993
994 thread = TCB_PTR(cap_thread_cap_get_capTCBPtr(cap));
995 if (thread == NODE_STATE(ksCurThread)) {
996 userError("TCB WriteRegisters: Attempted to write our own registers.");
997 current_syscall_error.type = seL4_IllegalOperation;
998 return EXCEPTION_SYSCALL_ERROR;
999 }
1000
1001 setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
1002 return invokeTCB_WriteRegisters(thread,
1003 flags & BIT(WriteRegisters_resume),
1004 w, transferArch, buffer);
1005 }
1006
1007 #ifdef CONFIG_KERNEL_MCS
validFaultHandler(cap_t cap)1008 static bool_t validFaultHandler(cap_t cap)
1009 {
1010 switch (cap_get_capType(cap)) {
1011 case cap_endpoint_cap:
1012 if (!cap_endpoint_cap_get_capCanSend(cap) ||
1013 (!cap_endpoint_cap_get_capCanGrant(cap) &&
1014 !cap_endpoint_cap_get_capCanGrantReply(cap))) {
1015 current_syscall_error.type = seL4_InvalidCapability;
1016 return false;
1017 }
1018 break;
1019 case cap_null_cap:
1020 /* just has no fault endpoint */
1021 break;
1022 default:
1023 current_syscall_error.type = seL4_InvalidCapability;
1024 return false;
1025 }
1026 return true;
1027 }
1028 #endif
1029
1030 /* TCBConfigure batches SetIPCBuffer and parts of SetSpace. */
decodeTCBConfigure(cap_t cap,word_t length,cte_t * slot,word_t * buffer)1031 exception_t decodeTCBConfigure(cap_t cap, word_t length, cte_t *slot, word_t *buffer)
1032 {
1033 cte_t *bufferSlot, *cRootSlot, *vRootSlot;
1034 cap_t bufferCap, cRootCap, vRootCap;
1035 deriveCap_ret_t dc_ret;
1036 word_t cRootData, vRootData, bufferAddr;
1037 #ifdef CONFIG_KERNEL_MCS
1038 #define TCBCONFIGURE_ARGS 3
1039 #else
1040 #define TCBCONFIGURE_ARGS 4
1041 #endif
1042 if (length < TCBCONFIGURE_ARGS || current_extra_caps.excaprefs[0] == NULL
1043 || current_extra_caps.excaprefs[1] == NULL
1044 || current_extra_caps.excaprefs[2] == NULL) {
1045 userError("TCB Configure: Truncated message.");
1046 current_syscall_error.type = seL4_TruncatedMessage;
1047 return EXCEPTION_SYSCALL_ERROR;
1048 }
1049
1050 #ifdef CONFIG_KERNEL_MCS
1051 cRootData = getSyscallArg(0, buffer);
1052 vRootData = getSyscallArg(1, buffer);
1053 bufferAddr = getSyscallArg(2, buffer);
1054 #else
1055 cptr_t faultEP = getSyscallArg(0, buffer);
1056 cRootData = getSyscallArg(1, buffer);
1057 vRootData = getSyscallArg(2, buffer);
1058 bufferAddr = getSyscallArg(3, buffer);
1059 #endif
1060
1061 cRootSlot = current_extra_caps.excaprefs[0];
1062 cRootCap = current_extra_caps.excaprefs[0]->cap;
1063 vRootSlot = current_extra_caps.excaprefs[1];
1064 vRootCap = current_extra_caps.excaprefs[1]->cap;
1065 bufferSlot = current_extra_caps.excaprefs[2];
1066 bufferCap = current_extra_caps.excaprefs[2]->cap;
1067
1068 if (bufferAddr == 0) {
1069 bufferSlot = NULL;
1070 } else {
1071 dc_ret = deriveCap(bufferSlot, bufferCap);
1072 if (dc_ret.status != EXCEPTION_NONE) {
1073 return dc_ret.status;
1074 }
1075 bufferCap = dc_ret.cap;
1076
1077 exception_t e = checkValidIPCBuffer(bufferAddr, bufferCap);
1078 if (e != EXCEPTION_NONE) {
1079 return e;
1080 }
1081 }
1082
1083 if (slotCapLongRunningDelete(
1084 TCB_PTR_CTE_PTR(cap_thread_cap_get_capTCBPtr(cap), tcbCTable)) ||
1085 slotCapLongRunningDelete(
1086 TCB_PTR_CTE_PTR(cap_thread_cap_get_capTCBPtr(cap), tcbVTable))) {
1087 userError("TCB Configure: CSpace or VSpace currently being deleted.");
1088 current_syscall_error.type = seL4_IllegalOperation;
1089 return EXCEPTION_SYSCALL_ERROR;
1090 }
1091
1092 if (cRootData != 0) {
1093 cRootCap = updateCapData(false, cRootData, cRootCap);
1094 }
1095
1096 dc_ret = deriveCap(cRootSlot, cRootCap);
1097 if (dc_ret.status != EXCEPTION_NONE) {
1098 return dc_ret.status;
1099 }
1100 cRootCap = dc_ret.cap;
1101
1102 if (cap_get_capType(cRootCap) != cap_cnode_cap) {
1103 userError("TCB Configure: CSpace cap is invalid.");
1104 current_syscall_error.type = seL4_IllegalOperation;
1105 return EXCEPTION_SYSCALL_ERROR;
1106 }
1107
1108 if (vRootData != 0) {
1109 vRootCap = updateCapData(false, vRootData, vRootCap);
1110 }
1111
1112 dc_ret = deriveCap(vRootSlot, vRootCap);
1113 if (dc_ret.status != EXCEPTION_NONE) {
1114 return dc_ret.status;
1115 }
1116 vRootCap = dc_ret.cap;
1117
1118 if (!isValidVTableRoot(vRootCap)) {
1119 userError("TCB Configure: VSpace cap is invalid.");
1120 current_syscall_error.type = seL4_IllegalOperation;
1121 return EXCEPTION_SYSCALL_ERROR;
1122 }
1123
1124 setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
1125 #ifdef CONFIG_KERNEL_MCS
1126 return invokeTCB_ThreadControlCaps(
1127 TCB_PTR(cap_thread_cap_get_capTCBPtr(cap)), slot,
1128 cap_null_cap_new(), NULL,
1129 cap_null_cap_new(), NULL,
1130 cRootCap, cRootSlot,
1131 vRootCap, vRootSlot,
1132 bufferAddr, bufferCap,
1133 bufferSlot, thread_control_caps_update_space |
1134 thread_control_caps_update_ipc_buffer);
1135 #else
1136 return invokeTCB_ThreadControl(
1137 TCB_PTR(cap_thread_cap_get_capTCBPtr(cap)), slot,
1138 faultEP, NULL_PRIO, NULL_PRIO,
1139 cRootCap, cRootSlot,
1140 vRootCap, vRootSlot,
1141 bufferAddr, bufferCap,
1142 bufferSlot, thread_control_update_space |
1143 thread_control_update_ipc_buffer);
1144 #endif
1145 }
1146
decodeSetPriority(cap_t cap,word_t length,word_t * buffer)1147 exception_t decodeSetPriority(cap_t cap, word_t length, word_t *buffer)
1148 {
1149 if (length < 1 || current_extra_caps.excaprefs[0] == NULL) {
1150 userError("TCB SetPriority: Truncated message.");
1151 current_syscall_error.type = seL4_TruncatedMessage;
1152 return EXCEPTION_SYSCALL_ERROR;
1153 }
1154
1155 prio_t newPrio = getSyscallArg(0, buffer);
1156 cap_t authCap = current_extra_caps.excaprefs[0]->cap;
1157
1158 if (cap_get_capType(authCap) != cap_thread_cap) {
1159 userError("Set priority: authority cap not a TCB.");
1160 current_syscall_error.type = seL4_InvalidCapability;
1161 current_syscall_error.invalidCapNumber = 1;
1162 return EXCEPTION_SYSCALL_ERROR;
1163 }
1164
1165 tcb_t *authTCB = TCB_PTR(cap_thread_cap_get_capTCBPtr(authCap));
1166 exception_t status = checkPrio(newPrio, authTCB);
1167 if (status != EXCEPTION_NONE) {
1168 userError("TCB SetPriority: Requested priority %lu too high (max %lu).",
1169 (unsigned long) newPrio, (unsigned long) authTCB->tcbMCP);
1170 return status;
1171 }
1172
1173 setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
1174 #ifdef CONFIG_KERNEL_MCS
1175 return invokeTCB_ThreadControlSched(
1176 TCB_PTR(cap_thread_cap_get_capTCBPtr(cap)), NULL,
1177 cap_null_cap_new(), NULL,
1178 NULL_PRIO, newPrio,
1179 NULL, thread_control_sched_update_priority);
1180 #else
1181 return invokeTCB_ThreadControl(
1182 TCB_PTR(cap_thread_cap_get_capTCBPtr(cap)), NULL,
1183 0, NULL_PRIO, newPrio,
1184 cap_null_cap_new(), NULL,
1185 cap_null_cap_new(), NULL,
1186 0, cap_null_cap_new(),
1187 NULL, thread_control_update_priority);
1188 #endif
1189 }
1190
decodeSetMCPriority(cap_t cap,word_t length,word_t * buffer)1191 exception_t decodeSetMCPriority(cap_t cap, word_t length, word_t *buffer)
1192 {
1193 if (length < 1 || current_extra_caps.excaprefs[0] == NULL) {
1194 userError("TCB SetMCPriority: Truncated message.");
1195 current_syscall_error.type = seL4_TruncatedMessage;
1196 return EXCEPTION_SYSCALL_ERROR;
1197 }
1198
1199 prio_t newMcp = getSyscallArg(0, buffer);
1200 cap_t authCap = current_extra_caps.excaprefs[0]->cap;
1201
1202 if (cap_get_capType(authCap) != cap_thread_cap) {
1203 userError("TCB SetMCPriority: authority cap not a TCB.");
1204 current_syscall_error.type = seL4_InvalidCapability;
1205 current_syscall_error.invalidCapNumber = 1;
1206 return EXCEPTION_SYSCALL_ERROR;
1207 }
1208
1209 tcb_t *authTCB = TCB_PTR(cap_thread_cap_get_capTCBPtr(authCap));
1210 exception_t status = checkPrio(newMcp, authTCB);
1211 if (status != EXCEPTION_NONE) {
1212 userError("TCB SetMCPriority: Requested maximum controlled priority %lu too high (max %lu).",
1213 (unsigned long) newMcp, (unsigned long) authTCB->tcbMCP);
1214 return status;
1215 }
1216
1217 setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
1218 #ifdef CONFIG_KERNEL_MCS
1219 return invokeTCB_ThreadControlSched(
1220 TCB_PTR(cap_thread_cap_get_capTCBPtr(cap)), NULL,
1221 cap_null_cap_new(), NULL,
1222 newMcp, NULL_PRIO,
1223 NULL, thread_control_sched_update_mcp);
1224 #else
1225 return invokeTCB_ThreadControl(
1226 TCB_PTR(cap_thread_cap_get_capTCBPtr(cap)), NULL,
1227 0, newMcp, NULL_PRIO,
1228 cap_null_cap_new(), NULL,
1229 cap_null_cap_new(), NULL,
1230 0, cap_null_cap_new(),
1231 NULL, thread_control_update_mcp);
1232 #endif
1233 }
1234
1235 #ifdef CONFIG_KERNEL_MCS
decodeSetTimeoutEndpoint(cap_t cap,cte_t * slot)1236 exception_t decodeSetTimeoutEndpoint(cap_t cap, cte_t *slot)
1237 {
1238 if (current_extra_caps.excaprefs[0] == NULL) {
1239 userError("TCB SetSchedParams: Truncated message.");
1240 return EXCEPTION_SYSCALL_ERROR;
1241 }
1242
1243 cte_t *thSlot = current_extra_caps.excaprefs[0];
1244 cap_t thCap = current_extra_caps.excaprefs[0]->cap;
1245
1246 /* timeout handler */
1247 if (!validFaultHandler(thCap)) {
1248 userError("TCB SetTimeoutEndpoint: timeout endpoint cap invalid.");
1249 current_syscall_error.invalidCapNumber = 1;
1250 return EXCEPTION_SYSCALL_ERROR;
1251 }
1252
1253 setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
1254 return invokeTCB_ThreadControlCaps(
1255 TCB_PTR(cap_thread_cap_get_capTCBPtr(cap)), slot,
1256 cap_null_cap_new(), NULL,
1257 thCap, thSlot,
1258 cap_null_cap_new(), NULL,
1259 cap_null_cap_new(), NULL,
1260 0, cap_null_cap_new(), NULL,
1261 thread_control_caps_update_timeout);
1262 }
1263 #endif
1264
1265 #ifdef CONFIG_KERNEL_MCS
decodeSetSchedParams(cap_t cap,word_t length,cte_t * slot,word_t * buffer)1266 exception_t decodeSetSchedParams(cap_t cap, word_t length, cte_t *slot, word_t *buffer)
1267 #else
1268 exception_t decodeSetSchedParams(cap_t cap, word_t length, word_t *buffer)
1269 #endif
1270 {
1271 if (length < 2 || current_extra_caps.excaprefs[0] == NULL
1272 #ifdef CONFIG_KERNEL_MCS
1273 || current_extra_caps.excaprefs[1] == NULL || current_extra_caps.excaprefs[2] == NULL
1274 #endif
1275 ) {
1276 userError("TCB SetSchedParams: Truncated message.");
1277 current_syscall_error.type = seL4_TruncatedMessage;
1278 return EXCEPTION_SYSCALL_ERROR;
1279 }
1280
1281 prio_t newMcp = getSyscallArg(0, buffer);
1282 prio_t newPrio = getSyscallArg(1, buffer);
1283 cap_t authCap = current_extra_caps.excaprefs[0]->cap;
1284 #ifdef CONFIG_KERNEL_MCS
1285 cap_t scCap = current_extra_caps.excaprefs[1]->cap;
1286 cte_t *fhSlot = current_extra_caps.excaprefs[2];
1287 cap_t fhCap = current_extra_caps.excaprefs[2]->cap;
1288 #endif
1289
1290 if (cap_get_capType(authCap) != cap_thread_cap) {
1291 userError("TCB SetSchedParams: authority cap not a TCB.");
1292 current_syscall_error.type = seL4_InvalidCapability;
1293 current_syscall_error.invalidCapNumber = 1;
1294 return EXCEPTION_SYSCALL_ERROR;
1295 }
1296
1297 tcb_t *authTCB = TCB_PTR(cap_thread_cap_get_capTCBPtr(authCap));
1298 exception_t status = checkPrio(newMcp, authTCB);
1299 if (status != EXCEPTION_NONE) {
1300 userError("TCB SetSchedParams: Requested maximum controlled priority %lu too high (max %lu).",
1301 (unsigned long) newMcp, (unsigned long) authTCB->tcbMCP);
1302 return status;
1303 }
1304
1305 status = checkPrio(newPrio, authTCB);
1306 if (status != EXCEPTION_NONE) {
1307 userError("TCB SetSchedParams: Requested priority %lu too high (max %lu).",
1308 (unsigned long) newPrio, (unsigned long) authTCB->tcbMCP);
1309 return status;
1310 }
1311
1312 #ifdef CONFIG_KERNEL_MCS
1313 tcb_t *tcb = TCB_PTR(cap_thread_cap_get_capTCBPtr(cap));
1314 sched_context_t *sc = NULL;
1315 switch (cap_get_capType(scCap)) {
1316 case cap_sched_context_cap:
1317 sc = SC_PTR(cap_sched_context_cap_get_capSCPtr(scCap));
1318 if (tcb->tcbSchedContext) {
1319 userError("TCB Configure: tcb already has a scheduling context.");
1320 current_syscall_error.type = seL4_IllegalOperation;
1321 return EXCEPTION_SYSCALL_ERROR;
1322 }
1323 if (sc->scTcb) {
1324 userError("TCB Configure: sched contextext already bound.");
1325 current_syscall_error.type = seL4_IllegalOperation;
1326 return EXCEPTION_SYSCALL_ERROR;
1327 }
1328 if (isBlocked(tcb) && !sc_released(sc)) {
1329 userError("TCB Configure: tcb blocked and scheduling context not schedulable.");
1330 current_syscall_error.type = seL4_IllegalOperation;
1331 return EXCEPTION_SYSCALL_ERROR;
1332 }
1333 break;
1334 case cap_null_cap:
1335 if (tcb == NODE_STATE(ksCurThread)) {
1336 userError("TCB SetSchedParams: Cannot change sched_context of current thread");
1337 current_syscall_error.type = seL4_IllegalOperation;
1338 return EXCEPTION_SYSCALL_ERROR;
1339 }
1340 break;
1341 default:
1342 userError("TCB Configure: sched context cap invalid.");
1343 current_syscall_error.type = seL4_InvalidCapability;
1344 current_syscall_error.invalidCapNumber = 2;
1345 return EXCEPTION_SYSCALL_ERROR;
1346 }
1347
1348 if (!validFaultHandler(fhCap)) {
1349 userError("TCB Configure: fault endpoint cap invalid.");
1350 current_syscall_error.type = seL4_InvalidCapability;
1351 current_syscall_error.invalidCapNumber = 3;
1352 return EXCEPTION_SYSCALL_ERROR;
1353 }
1354 #endif
1355 setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
1356 #ifdef CONFIG_KERNEL_MCS
1357 return invokeTCB_ThreadControlSched(
1358 TCB_PTR(cap_thread_cap_get_capTCBPtr(cap)), slot,
1359 fhCap, fhSlot,
1360 newMcp, newPrio,
1361 sc,
1362 thread_control_sched_update_mcp |
1363 thread_control_sched_update_priority |
1364 thread_control_sched_update_sc |
1365 thread_control_sched_update_fault);
1366 #else
1367 return invokeTCB_ThreadControl(
1368 TCB_PTR(cap_thread_cap_get_capTCBPtr(cap)), NULL,
1369 0, newMcp, newPrio,
1370 cap_null_cap_new(), NULL,
1371 cap_null_cap_new(), NULL,
1372 0, cap_null_cap_new(),
1373 NULL, thread_control_update_mcp |
1374 thread_control_update_priority);
1375 #endif
1376 }
1377
1378
decodeSetIPCBuffer(cap_t cap,word_t length,cte_t * slot,word_t * buffer)1379 exception_t decodeSetIPCBuffer(cap_t cap, word_t length, cte_t *slot, word_t *buffer)
1380 {
1381 cptr_t cptr_bufferPtr;
1382 cap_t bufferCap;
1383 cte_t *bufferSlot;
1384
1385 if (length < 1 || current_extra_caps.excaprefs[0] == NULL) {
1386 userError("TCB SetIPCBuffer: Truncated message.");
1387 current_syscall_error.type = seL4_TruncatedMessage;
1388 return EXCEPTION_SYSCALL_ERROR;
1389 }
1390
1391 cptr_bufferPtr = getSyscallArg(0, buffer);
1392 bufferSlot = current_extra_caps.excaprefs[0];
1393 bufferCap = current_extra_caps.excaprefs[0]->cap;
1394
1395 if (cptr_bufferPtr == 0) {
1396 bufferSlot = NULL;
1397 } else {
1398 exception_t e;
1399 deriveCap_ret_t dc_ret;
1400
1401 dc_ret = deriveCap(bufferSlot, bufferCap);
1402 if (dc_ret.status != EXCEPTION_NONE) {
1403 return dc_ret.status;
1404 }
1405 bufferCap = dc_ret.cap;
1406 e = checkValidIPCBuffer(cptr_bufferPtr, bufferCap);
1407 if (e != EXCEPTION_NONE) {
1408 return e;
1409 }
1410 }
1411
1412 setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
1413 #ifdef CONFIG_KERNEL_MCS
1414 return invokeTCB_ThreadControlCaps(
1415 TCB_PTR(cap_thread_cap_get_capTCBPtr(cap)), slot,
1416 cap_null_cap_new(), NULL,
1417 cap_null_cap_new(), NULL,
1418 cap_null_cap_new(), NULL,
1419 cap_null_cap_new(), NULL,
1420 cptr_bufferPtr, bufferCap,
1421 bufferSlot, thread_control_caps_update_ipc_buffer);
1422 #else
1423 return invokeTCB_ThreadControl(
1424 TCB_PTR(cap_thread_cap_get_capTCBPtr(cap)), slot,
1425 0, NULL_PRIO, NULL_PRIO,
1426 cap_null_cap_new(), NULL,
1427 cap_null_cap_new(), NULL,
1428 cptr_bufferPtr, bufferCap,
1429 bufferSlot, thread_control_update_ipc_buffer);
1430
1431 #endif
1432 }
1433
1434 #ifdef CONFIG_KERNEL_MCS
1435 #define DECODE_SET_SPACE_PARAMS 2
1436 #else
1437 #define DECODE_SET_SPACE_PARAMS 3
1438 #endif
decodeSetSpace(cap_t cap,word_t length,cte_t * slot,word_t * buffer)1439 exception_t decodeSetSpace(cap_t cap, word_t length, cte_t *slot, word_t *buffer)
1440 {
1441 word_t cRootData, vRootData;
1442 cte_t *cRootSlot, *vRootSlot;
1443 cap_t cRootCap, vRootCap;
1444 deriveCap_ret_t dc_ret;
1445
1446 if (length < DECODE_SET_SPACE_PARAMS || current_extra_caps.excaprefs[0] == NULL
1447 || current_extra_caps.excaprefs[1] == NULL
1448 #ifdef CONFIG_KERNEL_MCS
1449 || current_extra_caps.excaprefs[2] == NULL
1450 #endif
1451 ) {
1452 userError("TCB SetSpace: Truncated message.");
1453 current_syscall_error.type = seL4_TruncatedMessage;
1454 return EXCEPTION_SYSCALL_ERROR;
1455 }
1456
1457 #ifdef CONFIG_KERNEL_MCS
1458 cRootData = getSyscallArg(0, buffer);
1459 vRootData = getSyscallArg(1, buffer);
1460
1461 cte_t *fhSlot = current_extra_caps.excaprefs[0];
1462 cap_t fhCap = current_extra_caps.excaprefs[0]->cap;
1463 cRootSlot = current_extra_caps.excaprefs[1];
1464 cRootCap = current_extra_caps.excaprefs[1]->cap;
1465 vRootSlot = current_extra_caps.excaprefs[2];
1466 vRootCap = current_extra_caps.excaprefs[2]->cap;
1467 #else
1468 cptr_t faultEP = getSyscallArg(0, buffer);
1469 cRootData = getSyscallArg(1, buffer);
1470 vRootData = getSyscallArg(2, buffer);
1471
1472 cRootSlot = current_extra_caps.excaprefs[0];
1473 cRootCap = current_extra_caps.excaprefs[0]->cap;
1474 vRootSlot = current_extra_caps.excaprefs[1];
1475 vRootCap = current_extra_caps.excaprefs[1]->cap;
1476 #endif
1477
1478 if (slotCapLongRunningDelete(
1479 TCB_PTR_CTE_PTR(cap_thread_cap_get_capTCBPtr(cap), tcbCTable)) ||
1480 slotCapLongRunningDelete(
1481 TCB_PTR_CTE_PTR(cap_thread_cap_get_capTCBPtr(cap), tcbVTable))) {
1482 userError("TCB SetSpace: CSpace or VSpace currently being deleted.");
1483 current_syscall_error.type = seL4_IllegalOperation;
1484 return EXCEPTION_SYSCALL_ERROR;
1485 }
1486
1487 if (cRootData != 0) {
1488 cRootCap = updateCapData(false, cRootData, cRootCap);
1489 }
1490
1491 dc_ret = deriveCap(cRootSlot, cRootCap);
1492 if (dc_ret.status != EXCEPTION_NONE) {
1493 return dc_ret.status;
1494 }
1495 cRootCap = dc_ret.cap;
1496
1497 if (cap_get_capType(cRootCap) != cap_cnode_cap) {
1498 userError("TCB SetSpace: Invalid CNode cap.");
1499 current_syscall_error.type = seL4_IllegalOperation;
1500 return EXCEPTION_SYSCALL_ERROR;
1501 }
1502
1503 if (vRootData != 0) {
1504 vRootCap = updateCapData(false, vRootData, vRootCap);
1505 }
1506
1507 dc_ret = deriveCap(vRootSlot, vRootCap);
1508 if (dc_ret.status != EXCEPTION_NONE) {
1509 return dc_ret.status;
1510 }
1511 vRootCap = dc_ret.cap;
1512
1513 if (!isValidVTableRoot(vRootCap)) {
1514 userError("TCB SetSpace: Invalid VSpace cap.");
1515 current_syscall_error.type = seL4_IllegalOperation;
1516 return EXCEPTION_SYSCALL_ERROR;
1517 }
1518
1519 #ifdef CONFIG_KERNEL_MCS
1520 /* fault handler */
1521 if (!validFaultHandler(fhCap)) {
1522 userError("TCB SetSpace: fault endpoint cap invalid.");
1523 current_syscall_error.invalidCapNumber = 1;
1524 return EXCEPTION_SYSCALL_ERROR;
1525 }
1526 #endif
1527
1528 setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
1529 #ifdef CONFIG_KERNEL_MCS
1530 return invokeTCB_ThreadControlCaps(
1531 TCB_PTR(cap_thread_cap_get_capTCBPtr(cap)), slot,
1532 fhCap, fhSlot,
1533 cap_null_cap_new(), NULL,
1534 cRootCap, cRootSlot,
1535 vRootCap, vRootSlot,
1536 0, cap_null_cap_new(), NULL, thread_control_caps_update_space | thread_control_caps_update_fault);
1537 #else
1538 return invokeTCB_ThreadControl(
1539 TCB_PTR(cap_thread_cap_get_capTCBPtr(cap)), slot,
1540 faultEP,
1541 NULL_PRIO, NULL_PRIO,
1542 cRootCap, cRootSlot,
1543 vRootCap, vRootSlot,
1544 0, cap_null_cap_new(), NULL, thread_control_update_space);
1545 #endif
1546 }
1547
decodeDomainInvocation(word_t invLabel,word_t length,word_t * buffer)1548 exception_t decodeDomainInvocation(word_t invLabel, word_t length, word_t *buffer)
1549 {
1550 word_t domain;
1551 cap_t tcap;
1552
1553 if (unlikely(invLabel != DomainSetSet)) {
1554 current_syscall_error.type = seL4_IllegalOperation;
1555 return EXCEPTION_SYSCALL_ERROR;
1556 }
1557
1558 if (unlikely(length == 0)) {
1559 userError("Domain Configure: Truncated message.");
1560 current_syscall_error.type = seL4_TruncatedMessage;
1561 return EXCEPTION_SYSCALL_ERROR;
1562 } else {
1563 domain = getSyscallArg(0, buffer);
1564 if (domain >= CONFIG_NUM_DOMAINS) {
1565 userError("Domain Configure: invalid domain (%lu >= %u).",
1566 domain, CONFIG_NUM_DOMAINS);
1567 current_syscall_error.type = seL4_InvalidArgument;
1568 current_syscall_error.invalidArgumentNumber = 0;
1569 return EXCEPTION_SYSCALL_ERROR;
1570 }
1571 }
1572
1573 if (unlikely(current_extra_caps.excaprefs[0] == NULL)) {
1574 userError("Domain Configure: Truncated message.");
1575 current_syscall_error.type = seL4_TruncatedMessage;
1576 return EXCEPTION_SYSCALL_ERROR;
1577 }
1578
1579 tcap = current_extra_caps.excaprefs[0]->cap;
1580 if (unlikely(cap_get_capType(tcap) != cap_thread_cap)) {
1581 userError("Domain Configure: thread cap required.");
1582 current_syscall_error.type = seL4_InvalidArgument;
1583 current_syscall_error.invalidArgumentNumber = 1;
1584 return EXCEPTION_SYSCALL_ERROR;
1585 }
1586
1587 setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
1588 setDomain(TCB_PTR(cap_thread_cap_get_capTCBPtr(tcap)), domain);
1589 return EXCEPTION_NONE;
1590 }
1591
decodeBindNotification(cap_t cap)1592 exception_t decodeBindNotification(cap_t cap)
1593 {
1594 notification_t *ntfnPtr;
1595 tcb_t *tcb;
1596 cap_t ntfn_cap;
1597
1598 if (current_extra_caps.excaprefs[0] == NULL) {
1599 userError("TCB BindNotification: Truncated message.");
1600 current_syscall_error.type = seL4_TruncatedMessage;
1601 return EXCEPTION_SYSCALL_ERROR;
1602 }
1603
1604 tcb = TCB_PTR(cap_thread_cap_get_capTCBPtr(cap));
1605
1606 if (tcb->tcbBoundNotification) {
1607 userError("TCB BindNotification: TCB already has a bound notification.");
1608 current_syscall_error.type = seL4_IllegalOperation;
1609 return EXCEPTION_SYSCALL_ERROR;
1610 }
1611
1612 ntfn_cap = current_extra_caps.excaprefs[0]->cap;
1613
1614 if (cap_get_capType(ntfn_cap) == cap_notification_cap) {
1615 ntfnPtr = NTFN_PTR(cap_notification_cap_get_capNtfnPtr(ntfn_cap));
1616 } else {
1617 userError("TCB BindNotification: Notification is invalid.");
1618 current_syscall_error.type = seL4_IllegalOperation;
1619 return EXCEPTION_SYSCALL_ERROR;
1620 }
1621
1622 if (!cap_notification_cap_get_capNtfnCanReceive(ntfn_cap)) {
1623 userError("TCB BindNotification: Insufficient access rights");
1624 current_syscall_error.type = seL4_IllegalOperation;
1625 return EXCEPTION_SYSCALL_ERROR;
1626 }
1627
1628 if ((tcb_t *)notification_ptr_get_ntfnQueue_head(ntfnPtr)
1629 || (tcb_t *)notification_ptr_get_ntfnBoundTCB(ntfnPtr)) {
1630 userError("TCB BindNotification: Notification cannot be bound.");
1631 current_syscall_error.type = seL4_IllegalOperation;
1632 return EXCEPTION_SYSCALL_ERROR;
1633 }
1634
1635
1636 setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
1637 return invokeTCB_NotificationControl(tcb, ntfnPtr);
1638 }
1639
decodeUnbindNotification(cap_t cap)1640 exception_t decodeUnbindNotification(cap_t cap)
1641 {
1642 tcb_t *tcb;
1643
1644 tcb = TCB_PTR(cap_thread_cap_get_capTCBPtr(cap));
1645
1646 if (!tcb->tcbBoundNotification) {
1647 userError("TCB UnbindNotification: TCB already has no bound Notification.");
1648 current_syscall_error.type = seL4_IllegalOperation;
1649 return EXCEPTION_SYSCALL_ERROR;
1650 }
1651
1652 setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
1653 return invokeTCB_NotificationControl(tcb, NULL);
1654 }
1655
1656 /* The following functions sit in the preemption monad and implement the
1657 * preemptible, non-faulting bottom end of a TCB invocation. */
invokeTCB_Suspend(tcb_t * thread)1658 exception_t invokeTCB_Suspend(tcb_t *thread)
1659 {
1660 suspend(thread);
1661 return EXCEPTION_NONE;
1662 }
1663
invokeTCB_Resume(tcb_t * thread)1664 exception_t invokeTCB_Resume(tcb_t *thread)
1665 {
1666 restart(thread);
1667 return EXCEPTION_NONE;
1668 }
1669
1670 #ifdef CONFIG_KERNEL_MCS
installTCBCap(tcb_t * target,cap_t tCap,cte_t * slot,tcb_cnode_index_t index,cap_t newCap,cte_t * srcSlot)1671 static inline exception_t installTCBCap(tcb_t *target, cap_t tCap, cte_t *slot,
1672 tcb_cnode_index_t index, cap_t newCap, cte_t *srcSlot)
1673 {
1674 cte_t *rootSlot = TCB_PTR_CTE_PTR(target, index);
1675 UNUSED exception_t e = cteDelete(rootSlot, true);
1676 if (e != EXCEPTION_NONE) {
1677 return e;
1678 }
1679
1680 /* cteDelete on a cap installed in the tcb cannot fail */
1681 if (sameObjectAs(newCap, srcSlot->cap) &&
1682 sameObjectAs(tCap, slot->cap)) {
1683 cteInsert(newCap, srcSlot, rootSlot);
1684 }
1685 return e;
1686 }
1687 #endif
1688
1689 #ifdef CONFIG_KERNEL_MCS
invokeTCB_ThreadControlCaps(tcb_t * target,cte_t * slot,cap_t fh_newCap,cte_t * fh_srcSlot,cap_t th_newCap,cte_t * th_srcSlot,cap_t cRoot_newCap,cte_t * cRoot_srcSlot,cap_t vRoot_newCap,cte_t * vRoot_srcSlot,word_t bufferAddr,cap_t bufferCap,cte_t * bufferSrcSlot,thread_control_flag_t updateFlags)1690 exception_t invokeTCB_ThreadControlCaps(tcb_t *target, cte_t *slot,
1691 cap_t fh_newCap, cte_t *fh_srcSlot,
1692 cap_t th_newCap, cte_t *th_srcSlot,
1693 cap_t cRoot_newCap, cte_t *cRoot_srcSlot,
1694 cap_t vRoot_newCap, cte_t *vRoot_srcSlot,
1695 word_t bufferAddr, cap_t bufferCap,
1696 cte_t *bufferSrcSlot,
1697 thread_control_flag_t updateFlags)
1698 {
1699 exception_t e;
1700 cap_t tCap = cap_thread_cap_new((word_t)target);
1701
1702 if (updateFlags & thread_control_caps_update_fault) {
1703 e = installTCBCap(target, tCap, slot, tcbFaultHandler, fh_newCap, fh_srcSlot);
1704 if (e != EXCEPTION_NONE) {
1705 return e;
1706 }
1707
1708 }
1709
1710 if (updateFlags & thread_control_caps_update_timeout) {
1711 e = installTCBCap(target, tCap, slot, tcbTimeoutHandler, th_newCap, th_srcSlot);
1712 if (e != EXCEPTION_NONE) {
1713 return e;
1714 }
1715 }
1716
1717 if (updateFlags & thread_control_caps_update_space) {
1718 e = installTCBCap(target, tCap, slot, tcbCTable, cRoot_newCap, cRoot_srcSlot);
1719 if (e != EXCEPTION_NONE) {
1720 return e;
1721 }
1722
1723 e = installTCBCap(target, tCap, slot, tcbVTable, vRoot_newCap, vRoot_srcSlot);
1724 if (e != EXCEPTION_NONE) {
1725 return e;
1726 }
1727 }
1728
1729 if (updateFlags & thread_control_caps_update_ipc_buffer) {
1730 cte_t *bufferSlot;
1731
1732 bufferSlot = TCB_PTR_CTE_PTR(target, tcbBuffer);
1733 e = cteDelete(bufferSlot, true);
1734 if (e != EXCEPTION_NONE) {
1735 return e;
1736 }
1737 target->tcbIPCBuffer = bufferAddr;
1738
1739 if (bufferSrcSlot && sameObjectAs(bufferCap, bufferSrcSlot->cap) &&
1740 sameObjectAs(tCap, slot->cap)) {
1741 cteInsert(bufferCap, bufferSrcSlot, bufferSlot);
1742 }
1743
1744 if (target == NODE_STATE(ksCurThread)) {
1745 rescheduleRequired();
1746 }
1747 }
1748
1749 return EXCEPTION_NONE;
1750 }
1751 #else
invokeTCB_ThreadControl(tcb_t * target,cte_t * slot,cptr_t faultep,prio_t mcp,prio_t priority,cap_t cRoot_newCap,cte_t * cRoot_srcSlot,cap_t vRoot_newCap,cte_t * vRoot_srcSlot,word_t bufferAddr,cap_t bufferCap,cte_t * bufferSrcSlot,thread_control_flag_t updateFlags)1752 exception_t invokeTCB_ThreadControl(tcb_t *target, cte_t *slot,
1753 cptr_t faultep, prio_t mcp, prio_t priority,
1754 cap_t cRoot_newCap, cte_t *cRoot_srcSlot,
1755 cap_t vRoot_newCap, cte_t *vRoot_srcSlot,
1756 word_t bufferAddr, cap_t bufferCap,
1757 cte_t *bufferSrcSlot,
1758 thread_control_flag_t updateFlags)
1759 {
1760 exception_t e;
1761 cap_t tCap = cap_thread_cap_new((word_t)target);
1762
1763 if (updateFlags & thread_control_update_space) {
1764 target->tcbFaultHandler = faultep;
1765 }
1766
1767 if (updateFlags & thread_control_update_mcp) {
1768 setMCPriority(target, mcp);
1769 }
1770
1771 if (updateFlags & thread_control_update_space) {
1772 cte_t *rootSlot;
1773
1774 rootSlot = TCB_PTR_CTE_PTR(target, tcbCTable);
1775 e = cteDelete(rootSlot, true);
1776 if (e != EXCEPTION_NONE) {
1777 return e;
1778 }
1779 if (sameObjectAs(cRoot_newCap, cRoot_srcSlot->cap) &&
1780 sameObjectAs(tCap, slot->cap)) {
1781 cteInsert(cRoot_newCap, cRoot_srcSlot, rootSlot);
1782 }
1783 }
1784
1785 if (updateFlags & thread_control_update_space) {
1786 cte_t *rootSlot;
1787
1788 rootSlot = TCB_PTR_CTE_PTR(target, tcbVTable);
1789 e = cteDelete(rootSlot, true);
1790 if (e != EXCEPTION_NONE) {
1791 return e;
1792 }
1793 if (sameObjectAs(vRoot_newCap, vRoot_srcSlot->cap) &&
1794 sameObjectAs(tCap, slot->cap)) {
1795 cteInsert(vRoot_newCap, vRoot_srcSlot, rootSlot);
1796 }
1797 }
1798
1799 if (updateFlags & thread_control_update_ipc_buffer) {
1800 cte_t *bufferSlot;
1801
1802 bufferSlot = TCB_PTR_CTE_PTR(target, tcbBuffer);
1803 e = cteDelete(bufferSlot, true);
1804 if (e != EXCEPTION_NONE) {
1805 return e;
1806 }
1807 target->tcbIPCBuffer = bufferAddr;
1808
1809 if (bufferSrcSlot && sameObjectAs(bufferCap, bufferSrcSlot->cap) &&
1810 sameObjectAs(tCap, slot->cap)) {
1811 cteInsert(bufferCap, bufferSrcSlot, bufferSlot);
1812 }
1813
1814 if (target == NODE_STATE(ksCurThread)) {
1815 rescheduleRequired();
1816 }
1817 }
1818
1819 if (updateFlags & thread_control_update_priority) {
1820 setPriority(target, priority);
1821 }
1822
1823 return EXCEPTION_NONE;
1824 }
1825 #endif
1826
1827 #ifdef CONFIG_KERNEL_MCS
invokeTCB_ThreadControlSched(tcb_t * target,cte_t * slot,cap_t fh_newCap,cte_t * fh_srcSlot,prio_t mcp,prio_t priority,sched_context_t * sc,thread_control_flag_t updateFlags)1828 exception_t invokeTCB_ThreadControlSched(tcb_t *target, cte_t *slot,
1829 cap_t fh_newCap, cte_t *fh_srcSlot,
1830 prio_t mcp, prio_t priority,
1831 sched_context_t *sc,
1832 thread_control_flag_t updateFlags)
1833 {
1834 if (updateFlags & thread_control_sched_update_fault) {
1835 cap_t tCap = cap_thread_cap_new((word_t)target);
1836 exception_t e = installTCBCap(target, tCap, slot, tcbFaultHandler, fh_newCap, fh_srcSlot);
1837 if (e != EXCEPTION_NONE) {
1838 return e;
1839 }
1840 }
1841
1842 if (updateFlags & thread_control_sched_update_mcp) {
1843 setMCPriority(target, mcp);
1844 }
1845
1846 if (updateFlags & thread_control_sched_update_priority) {
1847 setPriority(target, priority);
1848 }
1849
1850 if (updateFlags & thread_control_sched_update_sc) {
1851 if (sc != NULL && sc != target->tcbSchedContext) {
1852 schedContext_bindTCB(sc, target);
1853 } else if (sc == NULL && target->tcbSchedContext != NULL) {
1854 schedContext_unbindTCB(target->tcbSchedContext, target);
1855 }
1856 }
1857
1858 return EXCEPTION_NONE;
1859 }
1860 #endif
1861
invokeTCB_CopyRegisters(tcb_t * dest,tcb_t * tcb_src,bool_t suspendSource,bool_t resumeTarget,bool_t transferFrame,bool_t transferInteger,word_t transferArch)1862 exception_t invokeTCB_CopyRegisters(tcb_t *dest, tcb_t *tcb_src,
1863 bool_t suspendSource, bool_t resumeTarget,
1864 bool_t transferFrame, bool_t transferInteger,
1865 word_t transferArch)
1866 {
1867 if (suspendSource) {
1868 suspend(tcb_src);
1869 }
1870
1871 if (resumeTarget) {
1872 restart(dest);
1873 }
1874
1875 if (transferFrame) {
1876 word_t i;
1877 word_t v;
1878 word_t pc;
1879
1880 for (i = 0; i < n_frameRegisters; i++) {
1881 v = getRegister(tcb_src, frameRegisters[i]);
1882 setRegister(dest, frameRegisters[i], v);
1883 }
1884
1885 pc = getRestartPC(dest);
1886 setNextPC(dest, pc);
1887 }
1888
1889 if (transferInteger) {
1890 word_t i;
1891 word_t v;
1892
1893 for (i = 0; i < n_gpRegisters; i++) {
1894 v = getRegister(tcb_src, gpRegisters[i]);
1895 setRegister(dest, gpRegisters[i], v);
1896 }
1897 }
1898
1899 Arch_postModifyRegisters(dest);
1900
1901 if (dest == NODE_STATE(ksCurThread)) {
1902 /* If we modified the current thread we may need to reschedule
1903 * due to changing registers are only reloaded in Arch_switchToThread */
1904 rescheduleRequired();
1905 }
1906
1907 return Arch_performTransfer(transferArch, tcb_src, dest);
1908 }
1909
1910 /* ReadRegisters is a special case: replyFromKernel & setMRs are
1911 * unfolded here, in order to avoid passing the large reply message up
1912 * to the top level in a global (and double-copying). We prevent the
1913 * top-level replyFromKernel_success_empty() from running by setting the
1914 * thread state. Retype does this too.
1915 */
invokeTCB_ReadRegisters(tcb_t * tcb_src,bool_t suspendSource,word_t n,word_t arch,bool_t call)1916 exception_t invokeTCB_ReadRegisters(tcb_t *tcb_src, bool_t suspendSource,
1917 word_t n, word_t arch, bool_t call)
1918 {
1919 word_t i, j;
1920 exception_t e;
1921 tcb_t *thread;
1922
1923 thread = NODE_STATE(ksCurThread);
1924
1925 if (suspendSource) {
1926 suspend(tcb_src);
1927 }
1928
1929 e = Arch_performTransfer(arch, tcb_src, NODE_STATE(ksCurThread));
1930 if (e != EXCEPTION_NONE) {
1931 return e;
1932 }
1933
1934 if (call) {
1935 word_t *ipcBuffer;
1936
1937 ipcBuffer = lookupIPCBuffer(true, thread);
1938
1939 setRegister(thread, badgeRegister, 0);
1940
1941 for (i = 0; i < n && i < n_frameRegisters && i < n_msgRegisters; i++) {
1942 setRegister(thread, msgRegisters[i],
1943 getRegister(tcb_src, frameRegisters[i]));
1944 }
1945
1946 if (ipcBuffer != NULL && i < n && i < n_frameRegisters) {
1947 for (; i < n && i < n_frameRegisters; i++) {
1948 ipcBuffer[i + 1] = getRegister(tcb_src, frameRegisters[i]);
1949 }
1950 }
1951
1952 j = i;
1953
1954 for (i = 0; i < n_gpRegisters && i + n_frameRegisters < n
1955 && i + n_frameRegisters < n_msgRegisters; i++) {
1956 setRegister(thread, msgRegisters[i + n_frameRegisters],
1957 getRegister(tcb_src, gpRegisters[i]));
1958 }
1959
1960 if (ipcBuffer != NULL && i < n_gpRegisters
1961 && i + n_frameRegisters < n) {
1962 for (; i < n_gpRegisters && i + n_frameRegisters < n; i++) {
1963 ipcBuffer[i + n_frameRegisters + 1] =
1964 getRegister(tcb_src, gpRegisters[i]);
1965 }
1966 }
1967
1968 setRegister(thread, msgInfoRegister, wordFromMessageInfo(
1969 seL4_MessageInfo_new(0, 0, 0, i + j)));
1970 }
1971 setThreadState(thread, ThreadState_Running);
1972
1973 return EXCEPTION_NONE;
1974 }
1975
invokeTCB_WriteRegisters(tcb_t * dest,bool_t resumeTarget,word_t n,word_t arch,word_t * buffer)1976 exception_t invokeTCB_WriteRegisters(tcb_t *dest, bool_t resumeTarget,
1977 word_t n, word_t arch, word_t *buffer)
1978 {
1979 word_t i;
1980 word_t pc;
1981 exception_t e;
1982 bool_t archInfo;
1983
1984 e = Arch_performTransfer(arch, NODE_STATE(ksCurThread), dest);
1985 if (e != EXCEPTION_NONE) {
1986 return e;
1987 }
1988
1989 if (n > n_frameRegisters + n_gpRegisters) {
1990 n = n_frameRegisters + n_gpRegisters;
1991 }
1992
1993 archInfo = Arch_getSanitiseRegisterInfo(dest);
1994
1995 for (i = 0; i < n_frameRegisters && i < n; i++) {
1996 /* Offset of 2 to get past the initial syscall arguments */
1997 setRegister(dest, frameRegisters[i],
1998 sanitiseRegister(frameRegisters[i],
1999 getSyscallArg(i + 2, buffer), archInfo));
2000 }
2001
2002 for (i = 0; i < n_gpRegisters && i + n_frameRegisters < n; i++) {
2003 setRegister(dest, gpRegisters[i],
2004 sanitiseRegister(gpRegisters[i],
2005 getSyscallArg(i + n_frameRegisters + 2,
2006 buffer), archInfo));
2007 }
2008
2009 pc = getRestartPC(dest);
2010 setNextPC(dest, pc);
2011
2012 Arch_postModifyRegisters(dest);
2013
2014 if (resumeTarget) {
2015 restart(dest);
2016 }
2017
2018 if (dest == NODE_STATE(ksCurThread)) {
2019 /* If we modified the current thread we may need to reschedule
2020 * due to changing registers are only reloaded in Arch_switchToThread */
2021 rescheduleRequired();
2022 }
2023
2024 return EXCEPTION_NONE;
2025 }
2026
invokeTCB_NotificationControl(tcb_t * tcb,notification_t * ntfnPtr)2027 exception_t invokeTCB_NotificationControl(tcb_t *tcb, notification_t *ntfnPtr)
2028 {
2029 if (ntfnPtr) {
2030 bindNotification(tcb, ntfnPtr);
2031 } else {
2032 unbindNotification(tcb);
2033 }
2034
2035 return EXCEPTION_NONE;
2036 }
2037
2038 #ifdef CONFIG_DEBUG_BUILD
setThreadName(tcb_t * tcb,const char * name)2039 void setThreadName(tcb_t *tcb, const char *name)
2040 {
2041 strlcpy(TCB_PTR_DEBUG_PTR(tcb)->tcbName, name, TCB_NAME_LENGTH);
2042 }
2043 #endif
2044
setMRs_syscall_error(tcb_t * thread,word_t * receiveIPCBuffer)2045 word_t setMRs_syscall_error(tcb_t *thread, word_t *receiveIPCBuffer)
2046 {
2047 switch (current_syscall_error.type) {
2048 case seL4_InvalidArgument:
2049 return setMR(thread, receiveIPCBuffer, 0,
2050 current_syscall_error.invalidArgumentNumber);
2051
2052 case seL4_InvalidCapability:
2053 return setMR(thread, receiveIPCBuffer, 0,
2054 current_syscall_error.invalidCapNumber);
2055
2056 case seL4_IllegalOperation:
2057 return 0;
2058
2059 case seL4_RangeError:
2060 setMR(thread, receiveIPCBuffer, 0,
2061 current_syscall_error.rangeErrorMin);
2062 return setMR(thread, receiveIPCBuffer, 1,
2063 current_syscall_error.rangeErrorMax);
2064
2065 case seL4_AlignmentError:
2066 return 0;
2067
2068 case seL4_FailedLookup:
2069 setMR(thread, receiveIPCBuffer, 0,
2070 current_syscall_error.failedLookupWasSource ? 1 : 0);
2071 return setMRs_lookup_failure(thread, receiveIPCBuffer,
2072 current_lookup_fault, 1);
2073
2074 case seL4_TruncatedMessage:
2075 case seL4_DeleteFirst:
2076 case seL4_RevokeFirst:
2077 return 0;
2078 case seL4_NotEnoughMemory:
2079 return setMR(thread, receiveIPCBuffer, 0,
2080 current_syscall_error.memoryLeft);
2081 default:
2082 fail("Invalid syscall error");
2083 }
2084 }
2085