1 /*
2 * Copyright 2014, General Dynamics C4 Systems
3 *
4 * SPDX-License-Identifier: GPL-2.0-only
5 */
6
7 #include <config.h>
8 #include <types.h>
9 #include <benchmark/benchmark.h>
10 #include <arch/benchmark.h>
11 #include <benchmark/benchmark_track.h>
12 #include <benchmark/benchmark_utilisation.h>
13 #include <api/syscall.h>
14 #include <api/failures.h>
15 #include <api/faults.h>
16 #include <kernel/cspace.h>
17 #include <kernel/faulthandler.h>
18 #include <kernel/thread.h>
19 #include <kernel/vspace.h>
20 #include <machine/io.h>
21 #include <plat/machine/hardware.h>
22 #include <object/interrupt.h>
23 #include <model/statedata.h>
24 #include <string.h>
25 #include <kernel/traps.h>
26 #include <arch/machine.h>
27
28 #ifdef CONFIG_DEBUG_BUILD
29 #include <arch/machine/capdl.h>
30 #endif
31
32 /* The haskell function 'handleEvent' is split into 'handleXXX' variants
33 * for each event causing a kernel entry */
34
handleInterruptEntry(void)35 exception_t handleInterruptEntry(void)
36 {
37 irq_t irq;
38
39 irq = getActiveIRQ();
40 #ifdef CONFIG_KERNEL_MCS
41 if (SMP_TERNARY(clh_is_self_in_queue(), 1)) {
42 updateTimestamp();
43 checkBudget();
44 }
45 #endif
46
47 if (IRQT_TO_IRQ(irq) != IRQT_TO_IRQ(irqInvalid)) {
48 handleInterrupt(irq);
49 } else {
50 #ifdef CONFIG_IRQ_REPORTING
51 userError("Spurious interrupt!");
52 #endif
53 handleSpuriousIRQ();
54 }
55
56 #ifdef CONFIG_KERNEL_MCS
57 if (SMP_TERNARY(clh_is_self_in_queue(), 1)) {
58 #endif
59 schedule();
60 activateThread();
61 #ifdef CONFIG_KERNEL_MCS
62 }
63 #endif
64
65 return EXCEPTION_NONE;
66 }
67
handleUnknownSyscall(word_t w)68 exception_t handleUnknownSyscall(word_t w)
69 {
70 #ifdef CONFIG_PRINTING
71 if (w == SysDebugPutChar) {
72 kernel_putchar(getRegister(NODE_STATE(ksCurThread), capRegister));
73 return EXCEPTION_NONE;
74 }
75 if (w == SysDebugDumpScheduler) {
76 #ifdef CONFIG_DEBUG_BUILD
77 debug_dumpScheduler();
78 #endif
79 return EXCEPTION_NONE;
80 }
81 #endif
82 #ifdef CONFIG_DEBUG_BUILD
83 if (w == SysDebugHalt) {
84 tcb_t *UNUSED tptr = NODE_STATE(ksCurThread);
85 printf("Debug halt syscall from user thread %p \"%s\"\n", tptr, TCB_PTR_DEBUG_PTR(tptr)->tcbName);
86 halt();
87 }
88 if (w == SysDebugSnapshot) {
89 tcb_t *UNUSED tptr = NODE_STATE(ksCurThread);
90 printf("Debug snapshot syscall from user thread %p \"%s\"\n",
91 tptr, TCB_PTR_DEBUG_PTR(tptr)->tcbName);
92 debug_capDL();
93 return EXCEPTION_NONE;
94 }
95 if (w == SysDebugCapIdentify) {
96 word_t cptr = getRegister(NODE_STATE(ksCurThread), capRegister);
97 lookupCapAndSlot_ret_t lu_ret = lookupCapAndSlot(NODE_STATE(ksCurThread), cptr);
98 word_t cap_type = cap_get_capType(lu_ret.cap);
99 setRegister(NODE_STATE(ksCurThread), capRegister, cap_type);
100 return EXCEPTION_NONE;
101 }
102
103 if (w == SysDebugNameThread) {
104 /* This is a syscall meant to aid debugging, so if anything goes wrong
105 * then assume the system is completely misconfigured and halt */
106 const char *name;
107 word_t len;
108 word_t cptr = getRegister(NODE_STATE(ksCurThread), capRegister);
109 lookupCapAndSlot_ret_t lu_ret = lookupCapAndSlot(NODE_STATE(ksCurThread), cptr);
110 /* ensure we got a TCB cap */
111 word_t cap_type = cap_get_capType(lu_ret.cap);
112 if (cap_type != cap_thread_cap) {
113 userError("SysDebugNameThread: cap is not a TCB, halting");
114 halt();
115 }
116 /* Add 1 to the IPC buffer to skip the message info word */
117 name = (const char *)(lookupIPCBuffer(true, NODE_STATE(ksCurThread)) + 1);
118 if (!name) {
119 userError("SysDebugNameThread: Failed to lookup IPC buffer, halting");
120 halt();
121 }
122 /* ensure the name isn't too long */
123 len = strnlen(name, seL4_MsgMaxLength * sizeof(word_t));
124 if (len == seL4_MsgMaxLength * sizeof(word_t)) {
125 userError("SysDebugNameThread: Name too long, halting");
126 halt();
127 }
128 setThreadName(TCB_PTR(cap_thread_cap_get_capTCBPtr(lu_ret.cap)), name);
129 return EXCEPTION_NONE;
130 }
131 #if defined ENABLE_SMP_SUPPORT && defined CONFIG_ARCH_ARM
132 if (w == SysDebugSendIPI) {
133 seL4_Word target = getRegister(NODE_STATE(ksCurThread), capRegister);
134 seL4_Word irq = getRegister(NODE_STATE(ksCurThread), msgInfoRegister);
135
136 if (target > CONFIG_MAX_NUM_NODES) {
137 userError("SysDebugSendIPI: Invalid target, halting");
138 halt();
139 }
140 if (irq > 15) {
141 userError("SysDebugSendIPI: Invalid IRQ, not a SGI, halting");
142 halt();
143 }
144
145 ipi_send_target(CORE_IRQ_TO_IRQT(0, irq), BIT(target));
146 return EXCEPTION_NONE;
147 }
148 #endif /* ENABLE_SMP_SUPPORT && CONFIG_ARCH_ARM */
149 #endif /* CONFIG_DEBUG_BUILD */
150
151 #ifdef CONFIG_DANGEROUS_CODE_INJECTION
152 if (w == SysDebugRun) {
153 ((void (*)(void *))getRegister(NODE_STATE(ksCurThread), capRegister))((void *)getRegister(NODE_STATE(ksCurThread),
154 msgInfoRegister));
155 return EXCEPTION_NONE;
156 }
157 #endif
158
159 #ifdef CONFIG_KERNEL_X86_DANGEROUS_MSR
160 if (w == SysX86DangerousWRMSR) {
161 uint64_t val;
162 uint32_t reg = getRegister(NODE_STATE(ksCurThread), capRegister);
163 if (CONFIG_WORD_SIZE == 32) {
164 val = (uint64_t)getSyscallArg(0, NULL) | ((uint64_t)getSyscallArg(1, NULL) << 32);
165 } else {
166 val = getSyscallArg(0, NULL);
167 }
168 x86_wrmsr(reg, val);
169 return EXCEPTION_NONE;
170 } else if (w == SysX86DangerousRDMSR) {
171 uint64_t val;
172 uint32_t reg = getRegister(NODE_STATE(ksCurThread), capRegister);
173 val = x86_rdmsr(reg);
174 int num = 1;
175 if (CONFIG_WORD_SIZE == 32) {
176 setMR(NODE_STATE(ksCurThread), NULL, 0, val & 0xffffffff);
177 setMR(NODE_STATE(ksCurThread), NULL, 1, val >> 32);
178 num++;
179 } else {
180 setMR(NODE_STATE(ksCurThread), NULL, 0, val);
181 }
182 setRegister(NODE_STATE(ksCurThread), msgInfoRegister, wordFromMessageInfo(seL4_MessageInfo_new(0, 0, 0, num)));
183 return EXCEPTION_NONE;
184 }
185 #endif
186
187 #ifdef CONFIG_ENABLE_BENCHMARKS
188 if (w == SysBenchmarkFlushCaches) {
189 #ifdef CONFIG_ARCH_ARM
190 tcb_t *thread = NODE_STATE(ksCurThread);
191 if (getRegister(thread, capRegister)) {
192 arch_clean_invalidate_L1_caches(getRegister(thread, msgInfoRegister));
193 } else {
194 arch_clean_invalidate_caches();
195 }
196 #else
197 arch_clean_invalidate_caches();
198 #endif
199 return EXCEPTION_NONE;
200 } else if (w == SysBenchmarkResetLog) {
201 #ifdef CONFIG_KERNEL_LOG_BUFFER
202 if (ksUserLogBuffer == 0) {
203 userError("A user-level buffer has to be set before resetting benchmark.\
204 Use seL4_BenchmarkSetLogBuffer\n");
205 setRegister(NODE_STATE(ksCurThread), capRegister, seL4_IllegalOperation);
206 return EXCEPTION_SYSCALL_ERROR;
207 }
208
209 ksLogIndex = 0;
210 #endif /* CONFIG_KERNEL_LOG_BUFFER */
211 #ifdef CONFIG_BENCHMARK_TRACK_UTILISATION
212 NODE_STATE(benchmark_log_utilisation_enabled) = true;
213 benchmark_track_reset_utilisation(NODE_STATE(ksIdleThread));
214 NODE_STATE(ksCurThread)->benchmark.schedule_start_time = ksEnter;
215 NODE_STATE(ksCurThread)->benchmark.number_schedules++;
216
217 NODE_STATE(benchmark_start_time) = ksEnter;
218 NODE_STATE(benchmark_kernel_time) = 0;
219 NODE_STATE(benchmark_kernel_number_entries) = 0;
220 NODE_STATE(benchmark_kernel_number_schedules) = 1;
221 benchmark_arch_utilisation_reset();
222 #endif /* CONFIG_BENCHMARK_TRACK_UTILISATION */
223 setRegister(NODE_STATE(ksCurThread), capRegister, seL4_NoError);
224 return EXCEPTION_NONE;
225 } else if (w == SysBenchmarkFinalizeLog) {
226 #ifdef CONFIG_KERNEL_LOG_BUFFER
227 ksLogIndexFinalized = ksLogIndex;
228 setRegister(NODE_STATE(ksCurThread), capRegister, ksLogIndexFinalized);
229 #endif /* CONFIG_KERNEL_LOG_BUFFER */
230 #ifdef CONFIG_BENCHMARK_TRACK_UTILISATION
231 benchmark_utilisation_finalise();
232 #endif /* CONFIG_BENCHMARK_TRACK_UTILISATION */
233 return EXCEPTION_NONE;
234 } else if (w == SysBenchmarkSetLogBuffer) {
235 #ifdef CONFIG_KERNEL_LOG_BUFFER
236 word_t cptr_userFrame = getRegister(NODE_STATE(ksCurThread), capRegister);
237
238 if (benchmark_arch_map_logBuffer(cptr_userFrame) != EXCEPTION_NONE) {
239 setRegister(NODE_STATE(ksCurThread), capRegister, seL4_IllegalOperation);
240 return EXCEPTION_SYSCALL_ERROR;
241 }
242
243 setRegister(NODE_STATE(ksCurThread), capRegister, seL4_NoError);
244 return EXCEPTION_NONE;
245 #endif /* CONFIG_KERNEL_LOG_BUFFER */
246 }
247
248 #ifdef CONFIG_BENCHMARK_TRACK_UTILISATION
249 else if (w == SysBenchmarkGetThreadUtilisation) {
250 benchmark_track_utilisation_dump();
251 return EXCEPTION_NONE;
252 } else if (w == SysBenchmarkResetThreadUtilisation) {
253 word_t tcb_cptr = getRegister(NODE_STATE(ksCurThread), capRegister);
254 lookupCap_ret_t lu_ret;
255 word_t cap_type;
256
257 lu_ret = lookupCap(NODE_STATE(ksCurThread), tcb_cptr);
258 /* ensure we got a TCB cap */
259 cap_type = cap_get_capType(lu_ret.cap);
260 if (cap_type != cap_thread_cap) {
261 userError("SysBenchmarkResetThreadUtilisation: cap is not a TCB, halting");
262 return EXCEPTION_NONE;
263 }
264
265 tcb_t *tcb = TCB_PTR(cap_thread_cap_get_capTCBPtr(lu_ret.cap));
266
267 benchmark_track_reset_utilisation(tcb);
268 return EXCEPTION_NONE;
269 }
270 #ifdef CONFIG_DEBUG_BUILD
271 else if (w == SysBenchmarkDumpAllThreadsUtilisation) {
272 printf("{\n");
273 printf(" \"BENCHMARK_TOTAL_UTILISATION\":%lu,\n",
274 (word_t)(NODE_STATE(benchmark_end_time) - NODE_STATE(benchmark_start_time)));
275 printf(" \"BENCHMARK_TOTAL_KERNEL_UTILISATION\":%lu,\n", (word_t) NODE_STATE(benchmark_kernel_time));
276 printf(" \"BENCHMARK_TOTAL_NUMBER_KERNEL_ENTRIES\":%lu,\n", (word_t) NODE_STATE(benchmark_kernel_number_entries));
277 printf(" \"BENCHMARK_TOTAL_NUMBER_SCHEDULES\":%lu,\n", (word_t) NODE_STATE(benchmark_kernel_number_schedules));
278 printf(" \"BENCHMARK_TCB_\": [\n");
279 for (tcb_t *curr = NODE_STATE(ksDebugTCBs); curr != NULL; curr = TCB_PTR_DEBUG_PTR(curr)->tcbDebugNext) {
280 printf(" {\n");
281 printf(" \"NAME\":\"%s\",\n", TCB_PTR_DEBUG_PTR(curr)->tcbName);
282 printf(" \"UTILISATION\":%lu,\n", (word_t) curr->benchmark.utilisation);
283 printf(" \"NUMBER_SCHEDULES\":%lu,\n", (word_t) curr->benchmark.number_schedules);
284 printf(" \"KERNEL_UTILISATION\":%lu,\n", (word_t) curr->benchmark.kernel_utilisation);
285 printf(" \"NUMBER_KERNEL_ENTRIES\":%lu\n", (word_t) curr->benchmark.number_kernel_entries);
286 printf(" }");
287 if (TCB_PTR_DEBUG_PTR(curr)->tcbDebugNext != NULL) {
288 printf(",\n");
289 } else {
290 printf("\n");
291 }
292 }
293 printf(" ]\n}\n");
294 return EXCEPTION_NONE;
295 } else if (w == SysBenchmarkResetAllThreadsUtilisation) {
296 for (tcb_t *curr = NODE_STATE(ksDebugTCBs); curr != NULL; curr = TCB_PTR_DEBUG_PTR(curr)->tcbDebugNext) {
297 benchmark_track_reset_utilisation(curr);
298 }
299 return EXCEPTION_NONE;
300 }
301 #endif /* CONFIG_DEBUG_BUILD */
302 #endif /* CONFIG_BENCHMARK_TRACK_UTILISATION */
303
304 else if (w == SysBenchmarkNullSyscall) {
305 return EXCEPTION_NONE;
306 }
307 #endif /* CONFIG_ENABLE_BENCHMARKS */
308
309 MCS_DO_IF_BUDGET({
310 #ifdef CONFIG_SET_TLS_BASE_SELF
311 if (w == SysSetTLSBase)
312 {
313 word_t tls_base = getRegister(NODE_STATE(ksCurThread), capRegister);
314 /*
315 * This updates the real register as opposed to the thread state
316 * value. For many architectures, the TLS variables only get
317 * updated on a thread switch.
318 */
319 return Arch_setTLSRegister(tls_base);
320 }
321 #endif
322 current_fault = seL4_Fault_UnknownSyscall_new(w);
323 handleFault(NODE_STATE(ksCurThread));
324 })
325
326 schedule();
327 activateThread();
328
329 return EXCEPTION_NONE;
330 }
331
handleUserLevelFault(word_t w_a,word_t w_b)332 exception_t handleUserLevelFault(word_t w_a, word_t w_b)
333 {
334 MCS_DO_IF_BUDGET({
335 current_fault = seL4_Fault_UserException_new(w_a, w_b);
336 handleFault(NODE_STATE(ksCurThread));
337 })
338 schedule();
339 activateThread();
340
341 return EXCEPTION_NONE;
342 }
343
handleVMFaultEvent(vm_fault_type_t vm_faultType)344 exception_t handleVMFaultEvent(vm_fault_type_t vm_faultType)
345 {
346 MCS_DO_IF_BUDGET({
347
348 exception_t status = handleVMFault(NODE_STATE(ksCurThread), vm_faultType);
349 if (status != EXCEPTION_NONE)
350 {
351 handleFault(NODE_STATE(ksCurThread));
352 }
353 })
354
355 schedule();
356 activateThread();
357
358 return EXCEPTION_NONE;
359 }
360
361 #ifdef CONFIG_KERNEL_MCS
handleInvocation(bool_t isCall,bool_t isBlocking,bool_t canDonate,bool_t firstPhase,cptr_t cptr)362 static exception_t handleInvocation(bool_t isCall, bool_t isBlocking, bool_t canDonate, bool_t firstPhase, cptr_t cptr)
363 #else
364 static exception_t handleInvocation(bool_t isCall, bool_t isBlocking)
365 #endif
366 {
367 seL4_MessageInfo_t info;
368 lookupCapAndSlot_ret_t lu_ret;
369 word_t *buffer;
370 exception_t status;
371 word_t length;
372 tcb_t *thread;
373
374 thread = NODE_STATE(ksCurThread);
375
376 info = messageInfoFromWord(getRegister(thread, msgInfoRegister));
377 #ifndef CONFIG_KERNEL_MCS
378 cptr_t cptr = getRegister(thread, capRegister);
379 #endif
380
381 /* faulting section */
382 lu_ret = lookupCapAndSlot(thread, cptr);
383
384 if (unlikely(lu_ret.status != EXCEPTION_NONE)) {
385 userError("Invocation of invalid cap #%lu.", cptr);
386 current_fault = seL4_Fault_CapFault_new(cptr, false);
387
388 if (isBlocking) {
389 handleFault(thread);
390 }
391
392 return EXCEPTION_NONE;
393 }
394
395 buffer = lookupIPCBuffer(false, thread);
396
397 status = lookupExtraCaps(thread, buffer, info);
398
399 if (unlikely(status != EXCEPTION_NONE)) {
400 userError("Lookup of extra caps failed.");
401 if (isBlocking) {
402 handleFault(thread);
403 }
404 return EXCEPTION_NONE;
405 }
406
407 /* Syscall error/Preemptible section */
408 length = seL4_MessageInfo_get_length(info);
409 if (unlikely(length > n_msgRegisters && !buffer)) {
410 length = n_msgRegisters;
411 }
412 #ifdef CONFIG_KERNEL_MCS
413 status = decodeInvocation(seL4_MessageInfo_get_label(info), length,
414 cptr, lu_ret.slot, lu_ret.cap,
415 isBlocking, isCall,
416 canDonate, firstPhase, buffer);
417 #else
418 status = decodeInvocation(seL4_MessageInfo_get_label(info), length,
419 cptr, lu_ret.slot, lu_ret.cap,
420 isBlocking, isCall, buffer);
421 #endif
422
423 if (unlikely(status == EXCEPTION_PREEMPTED)) {
424 return status;
425 }
426
427 if (unlikely(status == EXCEPTION_SYSCALL_ERROR)) {
428 if (isCall) {
429 replyFromKernel_error(thread);
430 }
431 return EXCEPTION_NONE;
432 }
433
434 if (unlikely(
435 thread_state_get_tsType(thread->tcbState) == ThreadState_Restart)) {
436 if (isCall) {
437 replyFromKernel_success_empty(thread);
438 }
439 setThreadState(thread, ThreadState_Running);
440 }
441
442 return EXCEPTION_NONE;
443 }
444
445 #ifdef CONFIG_KERNEL_MCS
lookupReply(void)446 static inline lookupCap_ret_t lookupReply(void)
447 {
448 word_t replyCPtr = getRegister(NODE_STATE(ksCurThread), replyRegister);
449 lookupCap_ret_t lu_ret = lookupCap(NODE_STATE(ksCurThread), replyCPtr);
450 if (unlikely(lu_ret.status != EXCEPTION_NONE)) {
451 userError("Reply cap lookup failed");
452 current_fault = seL4_Fault_CapFault_new(replyCPtr, true);
453 handleFault(NODE_STATE(ksCurThread));
454 return lu_ret;
455 }
456
457 if (unlikely(cap_get_capType(lu_ret.cap) != cap_reply_cap)) {
458 userError("Cap in reply slot is not a reply");
459 current_fault = seL4_Fault_CapFault_new(replyCPtr, true);
460 handleFault(NODE_STATE(ksCurThread));
461 lu_ret.status = EXCEPTION_FAULT;
462 return lu_ret;
463 }
464
465 return lu_ret;
466 }
467 #else
handleReply(void)468 static void handleReply(void)
469 {
470 cte_t *callerSlot;
471 cap_t callerCap;
472
473 callerSlot = TCB_PTR_CTE_PTR(NODE_STATE(ksCurThread), tcbCaller);
474 callerCap = callerSlot->cap;
475
476 switch (cap_get_capType(callerCap)) {
477 case cap_reply_cap: {
478 tcb_t *caller;
479
480 if (cap_reply_cap_get_capReplyMaster(callerCap)) {
481 break;
482 }
483 caller = TCB_PTR(cap_reply_cap_get_capTCBPtr(callerCap));
484 /* Haskell error:
485 * "handleReply: caller must not be the current thread" */
486 assert(caller != NODE_STATE(ksCurThread));
487 doReplyTransfer(NODE_STATE(ksCurThread), caller, callerSlot,
488 cap_reply_cap_get_capReplyCanGrant(callerCap));
489 return;
490 }
491
492 case cap_null_cap:
493 userError("Attempted reply operation when no reply cap present.");
494 return;
495
496 default:
497 break;
498 }
499
500 fail("handleReply: invalid caller cap");
501 }
502 #endif
503
504 #ifdef CONFIG_KERNEL_MCS
handleRecv(bool_t isBlocking,bool_t canReply)505 static void handleRecv(bool_t isBlocking, bool_t canReply)
506 #else
507 static void handleRecv(bool_t isBlocking)
508 #endif
509 {
510 word_t epCPtr;
511 lookupCap_ret_t lu_ret;
512
513 epCPtr = getRegister(NODE_STATE(ksCurThread), capRegister);
514
515 lu_ret = lookupCap(NODE_STATE(ksCurThread), epCPtr);
516
517 if (unlikely(lu_ret.status != EXCEPTION_NONE)) {
518 /* current_lookup_fault has been set by lookupCap */
519 current_fault = seL4_Fault_CapFault_new(epCPtr, true);
520 handleFault(NODE_STATE(ksCurThread));
521 return;
522 }
523
524 switch (cap_get_capType(lu_ret.cap)) {
525 case cap_endpoint_cap:
526 if (unlikely(!cap_endpoint_cap_get_capCanReceive(lu_ret.cap))) {
527 current_lookup_fault = lookup_fault_missing_capability_new(0);
528 current_fault = seL4_Fault_CapFault_new(epCPtr, true);
529 handleFault(NODE_STATE(ksCurThread));
530 break;
531 }
532
533 #ifdef CONFIG_KERNEL_MCS
534 cap_t ep_cap = lu_ret.cap;
535 cap_t reply_cap = cap_null_cap_new();
536 if (canReply) {
537 lu_ret = lookupReply();
538 if (lu_ret.status != EXCEPTION_NONE) {
539 return;
540 } else {
541 reply_cap = lu_ret.cap;
542 }
543 }
544 receiveIPC(NODE_STATE(ksCurThread), ep_cap, isBlocking, reply_cap);
545 #else
546 deleteCallerCap(NODE_STATE(ksCurThread));
547 receiveIPC(NODE_STATE(ksCurThread), lu_ret.cap, isBlocking);
548 #endif
549 break;
550
551 case cap_notification_cap: {
552 notification_t *ntfnPtr;
553 tcb_t *boundTCB;
554 ntfnPtr = NTFN_PTR(cap_notification_cap_get_capNtfnPtr(lu_ret.cap));
555 boundTCB = (tcb_t *)notification_ptr_get_ntfnBoundTCB(ntfnPtr);
556 if (unlikely(!cap_notification_cap_get_capNtfnCanReceive(lu_ret.cap)
557 || (boundTCB && boundTCB != NODE_STATE(ksCurThread)))) {
558 current_lookup_fault = lookup_fault_missing_capability_new(0);
559 current_fault = seL4_Fault_CapFault_new(epCPtr, true);
560 handleFault(NODE_STATE(ksCurThread));
561 break;
562 }
563
564 receiveSignal(NODE_STATE(ksCurThread), lu_ret.cap, isBlocking);
565 break;
566 }
567 default:
568 current_lookup_fault = lookup_fault_missing_capability_new(0);
569 current_fault = seL4_Fault_CapFault_new(epCPtr, true);
570 handleFault(NODE_STATE(ksCurThread));
571 break;
572 }
573 }
574
575 #ifdef CONFIG_KERNEL_MCS
mcsPreemptionPoint(irq_t irq)576 static inline void mcsPreemptionPoint(irq_t irq)
577 {
578 /* at this point we could be handling a timer interrupt which actually ends the current
579 * threads timeslice. However, preemption is possible on revoke, which could have deleted
580 * the current thread and/or the current scheduling context, rendering them invalid. */
581 if (isSchedulable(NODE_STATE(ksCurThread))) {
582 /* if the thread is schedulable, the tcb and scheduling context are still valid */
583 checkBudget();
584 } else if (NODE_STATE(ksCurSC)->scRefillMax) {
585 /* otherwise, if the thread is not schedulable, the SC could be valid - charge it if so */
586 chargeBudget(NODE_STATE(ksConsumed), false, CURRENT_CPU_INDEX(), true);
587 } else {
588 /* If the current SC is no longer configured the time can no
589 * longer be charged to it. Simply dropping the consumed time
590 * here is equivalent to having charged the consumed time and
591 * then having cleared the SC. */
592 NODE_STATE(ksConsumed) = 0;
593 }
594 }
595 #else
596 #define handleRecv(isBlocking, canReply) handleRecv(isBlocking)
597 #define mcsPreemptionPoint(irq)
598 #define handleInvocation(isCall, isBlocking, canDonate, firstPhase, cptr) handleInvocation(isCall, isBlocking)
599 #endif
600
handleYield(void)601 static void handleYield(void)
602 {
603 #ifdef CONFIG_KERNEL_MCS
604 /* Yield the current remaining budget */
605 ticks_t consumed = NODE_STATE(ksCurSC)->scConsumed + NODE_STATE(ksConsumed);
606 chargeBudget(refill_head(NODE_STATE(ksCurSC))->rAmount, false, CURRENT_CPU_INDEX(), true);
607 /* Manually updated the scConsumed so that the full timeslice isn't added, just what was consumed */
608 NODE_STATE(ksCurSC)->scConsumed = consumed;
609 #else
610 tcbSchedDequeue(NODE_STATE(ksCurThread));
611 SCHED_APPEND_CURRENT_TCB;
612 rescheduleRequired();
613 #endif
614 }
615
handleSyscall(syscall_t syscall)616 exception_t handleSyscall(syscall_t syscall)
617 {
618 exception_t ret;
619 irq_t irq;
620 MCS_DO_IF_BUDGET({
621 switch (syscall)
622 {
623 case SysSend:
624 ret = handleInvocation(false, true, false, false, getRegister(NODE_STATE(ksCurThread), capRegister));
625 if (unlikely(ret != EXCEPTION_NONE)) {
626 irq = getActiveIRQ();
627 mcsPreemptionPoint(irq);
628 if (IRQT_TO_IRQ(irq) != IRQT_TO_IRQ(irqInvalid)) {
629 handleInterrupt(irq);
630 }
631 }
632
633 break;
634
635 case SysNBSend:
636 ret = handleInvocation(false, false, false, false, getRegister(NODE_STATE(ksCurThread), capRegister));
637 if (unlikely(ret != EXCEPTION_NONE)) {
638 irq = getActiveIRQ();
639 mcsPreemptionPoint(irq);
640 if (IRQT_TO_IRQ(irq) != IRQT_TO_IRQ(irqInvalid)) {
641 handleInterrupt(irq);
642 }
643 }
644 break;
645
646 case SysCall:
647 ret = handleInvocation(true, true, true, false, getRegister(NODE_STATE(ksCurThread), capRegister));
648 if (unlikely(ret != EXCEPTION_NONE)) {
649 irq = getActiveIRQ();
650 mcsPreemptionPoint(irq);
651 if (IRQT_TO_IRQ(irq) != IRQT_TO_IRQ(irqInvalid)) {
652 handleInterrupt(irq);
653 }
654 }
655 break;
656
657 case SysRecv:
658 handleRecv(true, true);
659 break;
660 #ifndef CONFIG_KERNEL_MCS
661 case SysReply:
662 handleReply();
663 break;
664
665 case SysReplyRecv:
666 handleReply();
667 handleRecv(true, true);
668 break;
669
670 #else /* CONFIG_KERNEL_MCS */
671 case SysWait:
672 handleRecv(true, false);
673 break;
674
675 case SysNBWait:
676 handleRecv(false, false);
677 break;
678 case SysReplyRecv: {
679 cptr_t reply = getRegister(NODE_STATE(ksCurThread), replyRegister);
680 ret = handleInvocation(false, false, true, true, reply);
681 /* reply cannot error and is not preemptible */
682 assert(ret == EXCEPTION_NONE);
683 handleRecv(true, true);
684 break;
685 }
686
687 case SysNBSendRecv: {
688 cptr_t dest = getNBSendRecvDest();
689 ret = handleInvocation(false, false, true, true, dest);
690 if (unlikely(ret != EXCEPTION_NONE)) {
691 irq = getActiveIRQ();
692 mcsPreemptionPoint(irq);
693 if (IRQT_TO_IRQ(irq) != IRQT_TO_IRQ(irqInvalid)) {
694 handleInterrupt(irq);
695 }
696 break;
697 }
698 handleRecv(true, true);
699 break;
700 }
701
702 case SysNBSendWait:
703 ret = handleInvocation(false, false, true, true, getRegister(NODE_STATE(ksCurThread), replyRegister));
704 if (unlikely(ret != EXCEPTION_NONE)) {
705 irq = getActiveIRQ();
706 mcsPreemptionPoint(irq);
707 if (IRQT_TO_IRQ(irq) != IRQT_TO_IRQ(irqInvalid)) {
708 handleInterrupt(irq);
709 }
710 break;
711 }
712 handleRecv(true, false);
713 break;
714 #endif
715 case SysNBRecv:
716 handleRecv(false, true);
717 break;
718
719 case SysYield:
720 handleYield();
721 break;
722
723 default:
724 fail("Invalid syscall");
725 }
726
727 })
728
729 schedule();
730 activateThread();
731
732 return EXCEPTION_NONE;
733 }
734