1 /*
2 * Copyright 2014, General Dynamics C4 Systems
3 *
4 * SPDX-License-Identifier: GPL-2.0-only
5 */
6
7 #include <config.h>
8 #include <object.h>
9 #include <util.h>
10 #include <api/faults.h>
11 #include <api/types.h>
12 #include <kernel/cspace.h>
13 #include <kernel/thread.h>
14 #include <kernel/vspace.h>
15 #ifdef CONFIG_KERNEL_MCS
16 #include <object/schedcontext.h>
17 #endif
18 #include <model/statedata.h>
19 #include <arch/machine.h>
20 #include <arch/kernel/thread.h>
21 #include <machine/registerset.h>
22 #include <linker.h>
23
24 static seL4_MessageInfo_t
25 transferCaps(seL4_MessageInfo_t info,
26 endpoint_t *endpoint, tcb_t *receiver,
27 word_t *receiveBuffer);
28
configureIdleThread(tcb_t * tcb)29 BOOT_CODE void configureIdleThread(tcb_t *tcb)
30 {
31 Arch_configureIdleThread(tcb);
32 setThreadState(tcb, ThreadState_IdleThreadState);
33 }
34
activateThread(void)35 void activateThread(void)
36 {
37 #ifdef CONFIG_KERNEL_MCS
38 if (unlikely(NODE_STATE(ksCurThread)->tcbYieldTo)) {
39 schedContext_completeYieldTo(NODE_STATE(ksCurThread));
40 assert(thread_state_get_tsType(NODE_STATE(ksCurThread)->tcbState) == ThreadState_Running);
41 }
42 #endif
43
44 switch (thread_state_get_tsType(NODE_STATE(ksCurThread)->tcbState)) {
45 case ThreadState_Running:
46 #ifdef CONFIG_VTX
47 case ThreadState_RunningVM:
48 #endif
49 break;
50
51 case ThreadState_Restart: {
52 word_t pc;
53
54 pc = getRestartPC(NODE_STATE(ksCurThread));
55 setNextPC(NODE_STATE(ksCurThread), pc);
56 setThreadState(NODE_STATE(ksCurThread), ThreadState_Running);
57 break;
58 }
59
60 case ThreadState_IdleThreadState:
61 Arch_activateIdleThread(NODE_STATE(ksCurThread));
62 break;
63
64 default:
65 fail("Current thread is blocked");
66 }
67 }
68
suspend(tcb_t * target)69 void suspend(tcb_t *target)
70 {
71 cancelIPC(target);
72 if (thread_state_get_tsType(target->tcbState) == ThreadState_Running) {
73 /* whilst in the running state it is possible that restart pc of a thread is
74 * incorrect. As we do not know what state this thread will transition to
75 * after we make it inactive we update its restart pc so that the thread next
76 * runs at the correct address whether it is restarted or moved directly to
77 * running */
78 updateRestartPC(target);
79 }
80 setThreadState(target, ThreadState_Inactive);
81 tcbSchedDequeue(target);
82 #ifdef CONFIG_KERNEL_MCS
83 tcbReleaseRemove(target);
84 schedContext_cancelYieldTo(target);
85 #endif
86 }
87
restart(tcb_t * target)88 void restart(tcb_t *target)
89 {
90 if (isStopped(target)) {
91 cancelIPC(target);
92 #ifdef CONFIG_KERNEL_MCS
93 setThreadState(target, ThreadState_Restart);
94 if (sc_sporadic(target->tcbSchedContext) && sc_active(target->tcbSchedContext)
95 && target->tcbSchedContext != NODE_STATE(ksCurSC)) {
96 refill_unblock_check(target->tcbSchedContext);
97 }
98 schedContext_resume(target->tcbSchedContext);
99 if (isSchedulable(target)) {
100 possibleSwitchTo(target);
101 }
102 #else
103 setupReplyMaster(target);
104 setThreadState(target, ThreadState_Restart);
105 SCHED_ENQUEUE(target);
106 possibleSwitchTo(target);
107 #endif
108 }
109 }
110
doIPCTransfer(tcb_t * sender,endpoint_t * endpoint,word_t badge,bool_t grant,tcb_t * receiver)111 void doIPCTransfer(tcb_t *sender, endpoint_t *endpoint, word_t badge,
112 bool_t grant, tcb_t *receiver)
113 {
114 void *receiveBuffer, *sendBuffer;
115
116 receiveBuffer = lookupIPCBuffer(true, receiver);
117
118 if (likely(seL4_Fault_get_seL4_FaultType(sender->tcbFault) == seL4_Fault_NullFault)) {
119 sendBuffer = lookupIPCBuffer(false, sender);
120 doNormalTransfer(sender, sendBuffer, endpoint, badge, grant,
121 receiver, receiveBuffer);
122 } else {
123 doFaultTransfer(badge, sender, receiver, receiveBuffer);
124 }
125 }
126
127 #ifdef CONFIG_KERNEL_MCS
doReplyTransfer(tcb_t * sender,reply_t * reply,bool_t grant)128 void doReplyTransfer(tcb_t *sender, reply_t *reply, bool_t grant)
129 #else
130 void doReplyTransfer(tcb_t *sender, tcb_t *receiver, cte_t *slot, bool_t grant)
131 #endif
132 {
133 #ifdef CONFIG_KERNEL_MCS
134 if (reply->replyTCB == NULL ||
135 thread_state_get_tsType(reply->replyTCB->tcbState) != ThreadState_BlockedOnReply) {
136 /* nothing to do */
137 return;
138 }
139
140 tcb_t *receiver = reply->replyTCB;
141 reply_remove(reply, receiver);
142 assert(thread_state_get_replyObject(receiver->tcbState) == REPLY_REF(0));
143 assert(reply->replyTCB == NULL);
144
145 if (sc_sporadic(receiver->tcbSchedContext) && sc_active(receiver->tcbSchedContext)
146 && receiver->tcbSchedContext != NODE_STATE_ON_CORE(ksCurSC, receiver->tcbSchedContext->scCore)) {
147 refill_unblock_check(receiver->tcbSchedContext);
148 }
149 #else
150 assert(thread_state_get_tsType(receiver->tcbState) ==
151 ThreadState_BlockedOnReply);
152 #endif
153
154 word_t fault_type = seL4_Fault_get_seL4_FaultType(receiver->tcbFault);
155 if (likely(fault_type == seL4_Fault_NullFault)) {
156 doIPCTransfer(sender, NULL, 0, grant, receiver);
157 #ifdef CONFIG_KERNEL_MCS
158 setThreadState(receiver, ThreadState_Running);
159 #else
160 /** GHOSTUPD: "(True, gs_set_assn cteDeleteOne_'proc (ucast cap_reply_cap))" */
161 cteDeleteOne(slot);
162 setThreadState(receiver, ThreadState_Running);
163 possibleSwitchTo(receiver);
164 #endif
165 } else {
166 #ifndef CONFIG_KERNEL_MCS
167 /** GHOSTUPD: "(True, gs_set_assn cteDeleteOne_'proc (ucast cap_reply_cap))" */
168 cteDeleteOne(slot);
169 #endif
170 bool_t restart = handleFaultReply(receiver, sender);
171 receiver->tcbFault = seL4_Fault_NullFault_new();
172 if (restart) {
173 setThreadState(receiver, ThreadState_Restart);
174 #ifndef CONFIG_KERNEL_MCS
175 possibleSwitchTo(receiver);
176 #endif
177 } else {
178 setThreadState(receiver, ThreadState_Inactive);
179 }
180 }
181
182 #ifdef CONFIG_KERNEL_MCS
183 if (receiver->tcbSchedContext && isRunnable(receiver)) {
184 if ((refill_ready(receiver->tcbSchedContext) && refill_sufficient(receiver->tcbSchedContext, 0))) {
185 possibleSwitchTo(receiver);
186 } else {
187 if (validTimeoutHandler(receiver) && fault_type != seL4_Fault_Timeout) {
188 current_fault = seL4_Fault_Timeout_new(receiver->tcbSchedContext->scBadge);
189 handleTimeout(receiver);
190 } else {
191 postpone(receiver->tcbSchedContext);
192 }
193 }
194 }
195 #endif
196 }
197
doNormalTransfer(tcb_t * sender,word_t * sendBuffer,endpoint_t * endpoint,word_t badge,bool_t canGrant,tcb_t * receiver,word_t * receiveBuffer)198 void doNormalTransfer(tcb_t *sender, word_t *sendBuffer, endpoint_t *endpoint,
199 word_t badge, bool_t canGrant, tcb_t *receiver,
200 word_t *receiveBuffer)
201 {
202 word_t msgTransferred;
203 seL4_MessageInfo_t tag;
204 exception_t status;
205
206 tag = messageInfoFromWord(getRegister(sender, msgInfoRegister));
207
208 if (canGrant) {
209 status = lookupExtraCaps(sender, sendBuffer, tag);
210 if (unlikely(status != EXCEPTION_NONE)) {
211 current_extra_caps.excaprefs[0] = NULL;
212 }
213 } else {
214 current_extra_caps.excaprefs[0] = NULL;
215 }
216
217 msgTransferred = copyMRs(sender, sendBuffer, receiver, receiveBuffer,
218 seL4_MessageInfo_get_length(tag));
219
220 tag = transferCaps(tag, endpoint, receiver, receiveBuffer);
221
222 tag = seL4_MessageInfo_set_length(tag, msgTransferred);
223 setRegister(receiver, msgInfoRegister, wordFromMessageInfo(tag));
224 setRegister(receiver, badgeRegister, badge);
225 }
226
doFaultTransfer(word_t badge,tcb_t * sender,tcb_t * receiver,word_t * receiverIPCBuffer)227 void doFaultTransfer(word_t badge, tcb_t *sender, tcb_t *receiver,
228 word_t *receiverIPCBuffer)
229 {
230 word_t sent;
231 seL4_MessageInfo_t msgInfo;
232
233 sent = setMRs_fault(sender, receiver, receiverIPCBuffer);
234 msgInfo = seL4_MessageInfo_new(
235 seL4_Fault_get_seL4_FaultType(sender->tcbFault), 0, 0, sent);
236 setRegister(receiver, msgInfoRegister, wordFromMessageInfo(msgInfo));
237 setRegister(receiver, badgeRegister, badge);
238 }
239
240 /* Like getReceiveSlots, this is specialised for single-cap transfer. */
transferCaps(seL4_MessageInfo_t info,endpoint_t * endpoint,tcb_t * receiver,word_t * receiveBuffer)241 static seL4_MessageInfo_t transferCaps(seL4_MessageInfo_t info,
242 endpoint_t *endpoint, tcb_t *receiver,
243 word_t *receiveBuffer)
244 {
245 word_t i;
246 cte_t *destSlot;
247
248 info = seL4_MessageInfo_set_extraCaps(info, 0);
249 info = seL4_MessageInfo_set_capsUnwrapped(info, 0);
250
251 if (likely(!current_extra_caps.excaprefs[0] || !receiveBuffer)) {
252 return info;
253 }
254
255 destSlot = getReceiveSlots(receiver, receiveBuffer);
256
257 for (i = 0; i < seL4_MsgMaxExtraCaps && current_extra_caps.excaprefs[i] != NULL; i++) {
258 cte_t *slot = current_extra_caps.excaprefs[i];
259 cap_t cap = slot->cap;
260
261 if (cap_get_capType(cap) == cap_endpoint_cap &&
262 EP_PTR(cap_endpoint_cap_get_capEPPtr(cap)) == endpoint) {
263 /* If this is a cap to the endpoint on which the message was sent,
264 * only transfer the badge, not the cap. */
265 setExtraBadge(receiveBuffer,
266 cap_endpoint_cap_get_capEPBadge(cap), i);
267
268 info = seL4_MessageInfo_set_capsUnwrapped(info,
269 seL4_MessageInfo_get_capsUnwrapped(info) | (1 << i));
270
271 } else {
272 deriveCap_ret_t dc_ret;
273
274 if (!destSlot) {
275 break;
276 }
277
278 dc_ret = deriveCap(slot, cap);
279
280 if (dc_ret.status != EXCEPTION_NONE) {
281 break;
282 }
283 if (cap_get_capType(dc_ret.cap) == cap_null_cap) {
284 break;
285 }
286
287 cteInsert(dc_ret.cap, slot, destSlot);
288
289 destSlot = NULL;
290 }
291 }
292
293 return seL4_MessageInfo_set_extraCaps(info, i);
294 }
295
doNBRecvFailedTransfer(tcb_t * thread)296 void doNBRecvFailedTransfer(tcb_t *thread)
297 {
298 /* Set the badge register to 0 to indicate there was no message */
299 setRegister(thread, badgeRegister, 0);
300 }
301
nextDomain(void)302 static void nextDomain(void)
303 {
304 ksDomScheduleIdx++;
305 if (ksDomScheduleIdx >= ksDomScheduleLength) {
306 ksDomScheduleIdx = 0;
307 }
308 #ifdef CONFIG_KERNEL_MCS
309 NODE_STATE(ksReprogram) = true;
310 #endif
311 ksWorkUnitsCompleted = 0;
312 ksCurDomain = ksDomSchedule[ksDomScheduleIdx].domain;
313 #ifdef CONFIG_KERNEL_MCS
314 ksDomainTime = usToTicks(ksDomSchedule[ksDomScheduleIdx].length * US_IN_MS);
315 #else
316 ksDomainTime = ksDomSchedule[ksDomScheduleIdx].length;
317 #endif
318 }
319
320 #ifdef CONFIG_KERNEL_MCS
switchSchedContext(void)321 static void switchSchedContext(void)
322 {
323 if (unlikely(NODE_STATE(ksCurSC) != NODE_STATE(ksCurThread)->tcbSchedContext)) {
324 NODE_STATE(ksReprogram) = true;
325 if (sc_constant_bandwidth(NODE_STATE(ksCurThread)->tcbSchedContext)) {
326 refill_unblock_check(NODE_STATE(ksCurThread)->tcbSchedContext);
327 }
328
329 assert(refill_ready(NODE_STATE(ksCurThread)->tcbSchedContext));
330 assert(refill_sufficient(NODE_STATE(ksCurThread)->tcbSchedContext, 0));
331 }
332
333 if (NODE_STATE(ksReprogram)) {
334 /* if we are reprogamming, we have acted on the new kernel time and cannot
335 * rollback -> charge the current thread */
336 commitTime();
337 }
338
339 NODE_STATE(ksCurSC) = NODE_STATE(ksCurThread)->tcbSchedContext;
340 }
341 #endif
342
scheduleChooseNewThread(void)343 static void scheduleChooseNewThread(void)
344 {
345 if (ksDomainTime == 0) {
346 nextDomain();
347 }
348 chooseThread();
349 }
350
schedule(void)351 void schedule(void)
352 {
353 #ifdef CONFIG_KERNEL_MCS
354 awaken();
355 checkDomainTime();
356 #endif
357
358 if (NODE_STATE(ksSchedulerAction) != SchedulerAction_ResumeCurrentThread) {
359 bool_t was_runnable;
360 if (isSchedulable(NODE_STATE(ksCurThread))) {
361 was_runnable = true;
362 SCHED_ENQUEUE_CURRENT_TCB;
363 } else {
364 was_runnable = false;
365 }
366
367 if (NODE_STATE(ksSchedulerAction) == SchedulerAction_ChooseNewThread) {
368 scheduleChooseNewThread();
369 } else {
370 tcb_t *candidate = NODE_STATE(ksSchedulerAction);
371 assert(isSchedulable(candidate));
372 /* Avoid checking bitmap when ksCurThread is higher prio, to
373 * match fast path.
374 * Don't look at ksCurThread prio when it's idle, to respect
375 * information flow in non-fastpath cases. */
376 bool_t fastfail =
377 NODE_STATE(ksCurThread) == NODE_STATE(ksIdleThread)
378 || (candidate->tcbPriority < NODE_STATE(ksCurThread)->tcbPriority);
379 if (fastfail &&
380 !isHighestPrio(ksCurDomain, candidate->tcbPriority)) {
381 SCHED_ENQUEUE(candidate);
382 /* we can't, need to reschedule */
383 NODE_STATE(ksSchedulerAction) = SchedulerAction_ChooseNewThread;
384 scheduleChooseNewThread();
385 } else if (was_runnable && candidate->tcbPriority == NODE_STATE(ksCurThread)->tcbPriority) {
386 /* We append the candidate at the end of the scheduling queue, that way the
387 * current thread, that was enqueued at the start of the scheduling queue
388 * will get picked during chooseNewThread */
389 SCHED_APPEND(candidate);
390 NODE_STATE(ksSchedulerAction) = SchedulerAction_ChooseNewThread;
391 scheduleChooseNewThread();
392 } else {
393 assert(candidate != NODE_STATE(ksCurThread));
394 switchToThread(candidate);
395 }
396 }
397 }
398 NODE_STATE(ksSchedulerAction) = SchedulerAction_ResumeCurrentThread;
399 #ifdef ENABLE_SMP_SUPPORT
400 doMaskReschedule(ARCH_NODE_STATE(ipiReschedulePending));
401 ARCH_NODE_STATE(ipiReschedulePending) = 0;
402 #endif /* ENABLE_SMP_SUPPORT */
403
404 #ifdef CONFIG_KERNEL_MCS
405 switchSchedContext();
406
407 if (NODE_STATE(ksReprogram)) {
408 setNextInterrupt();
409 NODE_STATE(ksReprogram) = false;
410 }
411 #endif
412 }
413
chooseThread(void)414 void chooseThread(void)
415 {
416 word_t prio;
417 word_t dom;
418 tcb_t *thread;
419
420 if (CONFIG_NUM_DOMAINS > 1) {
421 dom = ksCurDomain;
422 } else {
423 dom = 0;
424 }
425
426 if (likely(NODE_STATE(ksReadyQueuesL1Bitmap[dom]))) {
427 prio = getHighestPrio(dom);
428 thread = NODE_STATE(ksReadyQueues)[ready_queues_index(dom, prio)].head;
429 assert(thread);
430 assert(isSchedulable(thread));
431 #ifdef CONFIG_KERNEL_MCS
432 assert(refill_sufficient(thread->tcbSchedContext, 0));
433 assert(refill_ready(thread->tcbSchedContext));
434 #endif
435 switchToThread(thread);
436 } else {
437 switchToIdleThread();
438 }
439 }
440
switchToThread(tcb_t * thread)441 void switchToThread(tcb_t *thread)
442 {
443 #ifdef CONFIG_KERNEL_MCS
444 assert(thread->tcbSchedContext != NULL);
445 assert(!thread_state_get_tcbInReleaseQueue(thread->tcbState));
446 assert(refill_sufficient(thread->tcbSchedContext, 0));
447 assert(refill_ready(thread->tcbSchedContext));
448 #endif
449
450 #ifdef CONFIG_BENCHMARK_TRACK_UTILISATION
451 benchmark_utilisation_switch(NODE_STATE(ksCurThread), thread);
452 #endif
453 Arch_switchToThread(thread);
454 tcbSchedDequeue(thread);
455 NODE_STATE(ksCurThread) = thread;
456 }
457
switchToIdleThread(void)458 void switchToIdleThread(void)
459 {
460 #ifdef CONFIG_BENCHMARK_TRACK_UTILISATION
461 benchmark_utilisation_switch(NODE_STATE(ksCurThread), NODE_STATE(ksIdleThread));
462 #endif
463 Arch_switchToIdleThread();
464 NODE_STATE(ksCurThread) = NODE_STATE(ksIdleThread);
465 }
466
setDomain(tcb_t * tptr,dom_t dom)467 void setDomain(tcb_t *tptr, dom_t dom)
468 {
469 tcbSchedDequeue(tptr);
470 tptr->tcbDomain = dom;
471 if (isSchedulable(tptr)) {
472 SCHED_ENQUEUE(tptr);
473 }
474 if (tptr == NODE_STATE(ksCurThread)) {
475 rescheduleRequired();
476 }
477 }
478
setMCPriority(tcb_t * tptr,prio_t mcp)479 void setMCPriority(tcb_t *tptr, prio_t mcp)
480 {
481 tptr->tcbMCP = mcp;
482 }
483 #ifdef CONFIG_KERNEL_MCS
setPriority(tcb_t * tptr,prio_t prio)484 void setPriority(tcb_t *tptr, prio_t prio)
485 {
486 switch (thread_state_get_tsType(tptr->tcbState)) {
487 case ThreadState_Running:
488 case ThreadState_Restart:
489 if (thread_state_get_tcbQueued(tptr->tcbState) || tptr == NODE_STATE(ksCurThread)) {
490 tcbSchedDequeue(tptr);
491 tptr->tcbPriority = prio;
492 SCHED_ENQUEUE(tptr);
493 rescheduleRequired();
494 } else {
495 tptr->tcbPriority = prio;
496 }
497 break;
498 case ThreadState_BlockedOnReceive:
499 case ThreadState_BlockedOnSend:
500 tptr->tcbPriority = prio;
501 reorderEP(EP_PTR(thread_state_get_blockingObject(tptr->tcbState)), tptr);
502 break;
503 case ThreadState_BlockedOnNotification:
504 tptr->tcbPriority = prio;
505 reorderNTFN(NTFN_PTR(thread_state_get_blockingObject(tptr->tcbState)), tptr);
506 break;
507 default:
508 tptr->tcbPriority = prio;
509 break;
510 }
511 }
512 #else
setPriority(tcb_t * tptr,prio_t prio)513 void setPriority(tcb_t *tptr, prio_t prio)
514 {
515 tcbSchedDequeue(tptr);
516 tptr->tcbPriority = prio;
517 if (isRunnable(tptr)) {
518 if (tptr == NODE_STATE(ksCurThread)) {
519 rescheduleRequired();
520 } else {
521 possibleSwitchTo(tptr);
522 }
523 }
524 }
525 #endif
526
527 /* Note that this thread will possibly continue at the end of this kernel
528 * entry. Do not queue it yet, since a queue+unqueue operation is wasteful
529 * if it will be picked. Instead, it waits in the 'ksSchedulerAction' site
530 * on which the scheduler will take action. */
possibleSwitchTo(tcb_t * target)531 void possibleSwitchTo(tcb_t *target)
532 {
533 #ifdef CONFIG_KERNEL_MCS
534 if (target->tcbSchedContext != NULL && !thread_state_get_tcbInReleaseQueue(target->tcbState)) {
535 #endif
536 if (ksCurDomain != target->tcbDomain
537 SMP_COND_STATEMENT( || target->tcbAffinity != getCurrentCPUIndex())) {
538 SCHED_ENQUEUE(target);
539 } else if (NODE_STATE(ksSchedulerAction) != SchedulerAction_ResumeCurrentThread) {
540 /* Too many threads want special treatment, use regular queues. */
541 rescheduleRequired();
542 SCHED_ENQUEUE(target);
543 } else {
544 NODE_STATE(ksSchedulerAction) = target;
545 }
546 #ifdef CONFIG_KERNEL_MCS
547 }
548 #endif
549
550 }
551
setThreadState(tcb_t * tptr,_thread_state_t ts)552 void setThreadState(tcb_t *tptr, _thread_state_t ts)
553 {
554 thread_state_ptr_set_tsType(&tptr->tcbState, ts);
555 scheduleTCB(tptr);
556 }
557
scheduleTCB(tcb_t * tptr)558 void scheduleTCB(tcb_t *tptr)
559 {
560 if (tptr == NODE_STATE(ksCurThread) &&
561 NODE_STATE(ksSchedulerAction) == SchedulerAction_ResumeCurrentThread &&
562 !isSchedulable(tptr)) {
563 rescheduleRequired();
564 }
565 }
566
567 #ifdef CONFIG_KERNEL_MCS
postpone(sched_context_t * sc)568 void postpone(sched_context_t *sc)
569 {
570 tcbSchedDequeue(sc->scTcb);
571 tcbReleaseEnqueue(sc->scTcb);
572 NODE_STATE_ON_CORE(ksReprogram, sc->scCore) = true;
573 }
574
setNextInterrupt(void)575 void setNextInterrupt(void)
576 {
577 time_t next_interrupt = NODE_STATE(ksCurTime) +
578 refill_head(NODE_STATE(ksCurThread)->tcbSchedContext)->rAmount;
579
580 if (CONFIG_NUM_DOMAINS > 1) {
581 next_interrupt = MIN(next_interrupt, NODE_STATE(ksCurTime) + ksDomainTime);
582 }
583
584 if (NODE_STATE(ksReleaseHead) != NULL) {
585 next_interrupt = MIN(refill_head(NODE_STATE(ksReleaseHead)->tcbSchedContext)->rTime, next_interrupt);
586 }
587
588 setDeadline(next_interrupt - getTimerPrecision());
589 }
590
chargeBudget(ticks_t consumed,bool_t canTimeoutFault,word_t core,bool_t isCurCPU)591 void chargeBudget(ticks_t consumed, bool_t canTimeoutFault, word_t core, bool_t isCurCPU)
592 {
593
594 if (isRoundRobin(NODE_STATE_ON_CORE(ksCurSC, core))) {
595 assert(refill_size(NODE_STATE_ON_CORE(ksCurSC, core)) == MIN_REFILLS);
596 refill_head(NODE_STATE_ON_CORE(ksCurSC, core))->rAmount += refill_tail(NODE_STATE_ON_CORE(ksCurSC, core))->rAmount;
597 refill_tail(NODE_STATE_ON_CORE(ksCurSC, core))->rAmount = 0;
598 } else {
599 refill_budget_check(consumed);
600 }
601
602 assert(refill_head(NODE_STATE_ON_CORE(ksCurSC, core))->rAmount >= MIN_BUDGET);
603 NODE_STATE_ON_CORE(ksCurSC, core)->scConsumed += consumed;
604 NODE_STATE_ON_CORE(ksConsumed, core) = 0;
605 if (isCurCPU && likely(isSchedulable(NODE_STATE_ON_CORE(ksCurThread, core)))) {
606 assert(NODE_STATE(ksCurThread)->tcbSchedContext == NODE_STATE(ksCurSC));
607 endTimeslice(canTimeoutFault);
608 rescheduleRequired();
609 NODE_STATE(ksReprogram) = true;
610 }
611 }
612
endTimeslice(bool_t can_timeout_fault)613 void endTimeslice(bool_t can_timeout_fault)
614 {
615 if (can_timeout_fault && !isRoundRobin(NODE_STATE(ksCurSC)) && validTimeoutHandler(NODE_STATE(ksCurThread))) {
616 current_fault = seL4_Fault_Timeout_new(NODE_STATE(ksCurSC)->scBadge);
617 handleTimeout(NODE_STATE(ksCurThread));
618 } else if (refill_ready(NODE_STATE(ksCurSC)) && refill_sufficient(NODE_STATE(ksCurSC), 0)) {
619 /* apply round robin */
620 assert(refill_sufficient(NODE_STATE(ksCurSC), 0));
621 assert(!thread_state_get_tcbQueued(NODE_STATE(ksCurThread)->tcbState));
622 SCHED_APPEND_CURRENT_TCB;
623 } else {
624 /* postpone until ready */
625 postpone(NODE_STATE(ksCurSC));
626 }
627 }
628 #else
629
timerTick(void)630 void timerTick(void)
631 {
632 if (likely(thread_state_get_tsType(NODE_STATE(ksCurThread)->tcbState) ==
633 ThreadState_Running)
634 #ifdef CONFIG_VTX
635 || thread_state_get_tsType(NODE_STATE(ksCurThread)->tcbState) ==
636 ThreadState_RunningVM
637 #endif
638 ) {
639 if (NODE_STATE(ksCurThread)->tcbTimeSlice > 1) {
640 NODE_STATE(ksCurThread)->tcbTimeSlice--;
641 } else {
642 NODE_STATE(ksCurThread)->tcbTimeSlice = CONFIG_TIME_SLICE;
643 SCHED_APPEND_CURRENT_TCB;
644 rescheduleRequired();
645 }
646 }
647
648 if (CONFIG_NUM_DOMAINS > 1) {
649 ksDomainTime--;
650 if (ksDomainTime == 0) {
651 rescheduleRequired();
652 }
653 }
654 }
655 #endif
656
rescheduleRequired(void)657 void rescheduleRequired(void)
658 {
659 if (NODE_STATE(ksSchedulerAction) != SchedulerAction_ResumeCurrentThread
660 && NODE_STATE(ksSchedulerAction) != SchedulerAction_ChooseNewThread
661 #ifdef CONFIG_KERNEL_MCS
662 && isSchedulable(NODE_STATE(ksSchedulerAction))
663 #endif
664 ) {
665 #ifdef CONFIG_KERNEL_MCS
666 assert(refill_sufficient(NODE_STATE(ksSchedulerAction)->tcbSchedContext, 0));
667 assert(refill_ready(NODE_STATE(ksSchedulerAction)->tcbSchedContext));
668 #endif
669 SCHED_ENQUEUE(NODE_STATE(ksSchedulerAction));
670 }
671 NODE_STATE(ksSchedulerAction) = SchedulerAction_ChooseNewThread;
672 }
673
674 #ifdef CONFIG_KERNEL_MCS
awaken(void)675 void awaken(void)
676 {
677 while (unlikely(NODE_STATE(ksReleaseHead) != NULL && refill_ready(NODE_STATE(ksReleaseHead)->tcbSchedContext))) {
678 tcb_t *awakened = tcbReleaseDequeue();
679 /* the currently running thread cannot have just woken up */
680 assert(awakened != NODE_STATE(ksCurThread));
681 /* round robin threads should not be in the release queue */
682 assert(!isRoundRobin(awakened->tcbSchedContext));
683 /* threads should wake up on the correct core */
684 SMP_COND_STATEMENT(assert(awakened->tcbAffinity == getCurrentCPUIndex()));
685 /* threads HEAD refill should always be >= MIN_BUDGET */
686 assert(refill_sufficient(awakened->tcbSchedContext, 0));
687 possibleSwitchTo(awakened);
688 /* changed head of release queue -> need to reprogram */
689 NODE_STATE(ksReprogram) = true;
690 }
691 }
692 #endif
693