1 /*
2  * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230)
3  *
4  * SPDX-License-Identifier: GPL-2.0-only
5  */
6 
7 #include <machine/timer.h>
8 #include <kernel/sporadic.h>
9 #include <kernel/thread.h>
10 #include <object/structures.h>
11 #include <object/schedcontext.h>
12 
invokeSchedContext_UnbindObject(sched_context_t * sc,cap_t cap)13 static exception_t invokeSchedContext_UnbindObject(sched_context_t *sc, cap_t cap)
14 {
15     switch (cap_get_capType(cap)) {
16     case cap_thread_cap:
17         schedContext_unbindTCB(sc, sc->scTcb);
18         break;
19     case cap_notification_cap:
20         schedContext_unbindNtfn(sc);
21         break;
22     default:
23         fail("invalid cap type");
24     }
25 
26     return EXCEPTION_NONE;
27 }
28 
decodeSchedContext_UnbindObject(sched_context_t * sc)29 static exception_t decodeSchedContext_UnbindObject(sched_context_t *sc)
30 {
31     if (current_extra_caps.excaprefs[0] == NULL) {
32         userError("SchedContext_Unbind: Truncated message.");
33         current_syscall_error.type = seL4_TruncatedMessage;
34         return EXCEPTION_SYSCALL_ERROR;
35     }
36 
37     cap_t cap = current_extra_caps.excaprefs[0]->cap;
38     switch (cap_get_capType(cap)) {
39     case cap_thread_cap:
40         if (sc->scTcb != TCB_PTR(cap_thread_cap_get_capTCBPtr(cap))) {
41             userError("SchedContext UnbindObject: object not bound");
42             current_syscall_error.type = seL4_IllegalOperation;
43             return EXCEPTION_SYSCALL_ERROR;
44         }
45         if (sc->scTcb == NODE_STATE(ksCurThread)) {
46             userError("SchedContext UnbindObject: cannot unbind sc of current thread");
47             current_syscall_error.type = seL4_IllegalOperation;
48             return EXCEPTION_SYSCALL_ERROR;
49         }
50         break;
51     case cap_notification_cap:
52         if (sc->scNotification != NTFN_PTR(cap_notification_cap_get_capNtfnPtr(cap))) {
53             userError("SchedContext UnbindObject: object not bound");
54             current_syscall_error.type = seL4_IllegalOperation;
55             return EXCEPTION_SYSCALL_ERROR;
56         }
57         break;
58 
59     default:
60         userError("SchedContext_Unbind: invalid cap");
61         current_syscall_error.type = seL4_InvalidCapability;
62         current_syscall_error.invalidCapNumber = 1;
63         return EXCEPTION_SYSCALL_ERROR;
64 
65     }
66 
67     setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
68     return invokeSchedContext_UnbindObject(sc, cap);
69 }
70 
invokeSchedContext_Bind(sched_context_t * sc,cap_t cap)71 static exception_t invokeSchedContext_Bind(sched_context_t *sc, cap_t cap)
72 {
73     switch (cap_get_capType(cap)) {
74     case cap_thread_cap:
75         schedContext_bindTCB(sc, TCB_PTR(cap_thread_cap_get_capTCBPtr(cap)));
76         break;
77     case cap_notification_cap:
78         schedContext_bindNtfn(sc, NTFN_PTR(cap_notification_cap_get_capNtfnPtr(cap)));
79         break;
80     default:
81         fail("invalid cap type");
82     }
83 
84     return EXCEPTION_NONE;
85 }
86 
decodeSchedContext_Bind(sched_context_t * sc)87 static exception_t decodeSchedContext_Bind(sched_context_t *sc)
88 {
89     if (current_extra_caps.excaprefs[0] == NULL) {
90         userError("SchedContext_Bind: Truncated Message.");
91         current_syscall_error.type = seL4_TruncatedMessage;
92         return EXCEPTION_SYSCALL_ERROR;
93     }
94 
95     cap_t cap = current_extra_caps.excaprefs[0]->cap;
96 
97     switch (cap_get_capType(cap)) {
98     case cap_thread_cap:
99         if (sc->scTcb != NULL) {
100             userError("SchedContext_Bind: sched context already bound.");
101             current_syscall_error.type = seL4_IllegalOperation;
102             return EXCEPTION_SYSCALL_ERROR;
103         }
104 
105         if (TCB_PTR(cap_thread_cap_get_capTCBPtr(cap))->tcbSchedContext != NULL) {
106             userError("SchedContext_Bind: tcb already bound.");
107             current_syscall_error.type = seL4_IllegalOperation;
108             return EXCEPTION_SYSCALL_ERROR;
109         }
110 
111         if (isBlocked(TCB_PTR(cap_thread_cap_get_capTCBPtr(cap))) && !sc_released(sc)) {
112             userError("SchedContext_Bind: tcb blocked and scheduling context not schedulable.");
113             current_syscall_error.type = seL4_IllegalOperation;
114             return EXCEPTION_SYSCALL_ERROR;
115         }
116 
117         break;
118     case cap_notification_cap:
119         if (sc->scNotification != NULL) {
120             userError("SchedContext_Bind: sched context already bound.");
121             current_syscall_error.type = seL4_IllegalOperation;
122             return EXCEPTION_SYSCALL_ERROR;
123         }
124 
125         if (notification_ptr_get_ntfnSchedContext(NTFN_PTR(cap_notification_cap_get_capNtfnPtr(cap)))) {
126             userError("SchedContext_Bind: notification already bound");
127             current_syscall_error.type = seL4_IllegalOperation;
128             return EXCEPTION_SYSCALL_ERROR;
129         }
130         break;
131     default:
132         userError("SchedContext_Bind: invalid cap.");
133         current_syscall_error.type = seL4_InvalidCapability;
134         current_syscall_error.invalidCapNumber = 1;
135         return EXCEPTION_SYSCALL_ERROR;
136     }
137 
138     setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
139     return invokeSchedContext_Bind(sc, cap);
140 }
141 
invokeSchedContext_Unbind(sched_context_t * sc)142 static exception_t invokeSchedContext_Unbind(sched_context_t *sc)
143 {
144     schedContext_unbindAllTCBs(sc);
145     schedContext_unbindNtfn(sc);
146     if (sc->scReply) {
147         sc->scReply->replyNext = call_stack_new(0, false);
148         sc->scReply = NULL;
149     }
150     return EXCEPTION_NONE;
151 }
152 
153 #ifdef ENABLE_SMP_SUPPORT
maybeStallSC(sched_context_t * sc)154 static inline void maybeStallSC(sched_context_t *sc)
155 {
156     if (sc->scTcb) {
157         remoteTCBStall(sc->scTcb);
158     }
159 }
160 #endif
161 
setConsumed(sched_context_t * sc,word_t * buffer)162 static inline void setConsumed(sched_context_t *sc, word_t *buffer)
163 {
164     time_t consumed = schedContext_updateConsumed(sc);
165     word_t length = mode_setTimeArg(0, consumed, buffer, NODE_STATE(ksCurThread));
166     setRegister(NODE_STATE(ksCurThread), msgInfoRegister, wordFromMessageInfo(seL4_MessageInfo_new(0, 0, 0, length)));
167 }
168 
invokeSchedContext_Consumed(sched_context_t * sc,word_t * buffer)169 static exception_t invokeSchedContext_Consumed(sched_context_t *sc, word_t *buffer)
170 {
171     setConsumed(sc, buffer);
172     return EXCEPTION_NONE;
173 }
174 
invokeSchedContext_YieldTo(sched_context_t * sc,word_t * buffer)175 static exception_t invokeSchedContext_YieldTo(sched_context_t *sc, word_t *buffer)
176 {
177     if (sc->scYieldFrom) {
178         schedContext_completeYieldTo(sc->scYieldFrom);
179         assert(sc->scYieldFrom == NULL);
180     }
181 
182     /* if the tcb is in the scheduler, it's ready and sufficient.
183      * Otherwise, check that it is ready and sufficient and if not,
184      * place the thread in the release queue. This way, from this point,
185      * if the thread isSchedulable, it is ready and sufficient.*/
186     schedContext_resume(sc);
187 
188     bool_t return_now = true;
189     if (isSchedulable(sc->scTcb)) {
190         if (SMP_COND_STATEMENT(sc->scCore != getCurrentCPUIndex() ||)
191             sc->scTcb->tcbPriority < NODE_STATE(ksCurThread)->tcbPriority) {
192             tcbSchedDequeue(sc->scTcb);
193             SCHED_ENQUEUE(sc->scTcb);
194         } else {
195             NODE_STATE(ksCurThread)->tcbYieldTo = sc;
196             sc->scYieldFrom = NODE_STATE(ksCurThread);
197             tcbSchedDequeue(sc->scTcb);
198             tcbSchedEnqueue(NODE_STATE(ksCurThread));
199             tcbSchedEnqueue(sc->scTcb);
200             rescheduleRequired();
201 
202             /* we are scheduling the thread associated with sc,
203              * so we don't need to write to the ipc buffer
204              * until the caller is scheduled again */
205             return_now = false;
206         }
207     }
208 
209     if (return_now) {
210         setConsumed(sc, buffer);
211     }
212 
213     return EXCEPTION_NONE;
214 }
215 
decodeSchedContext_YieldTo(sched_context_t * sc,word_t * buffer)216 static exception_t decodeSchedContext_YieldTo(sched_context_t *sc, word_t *buffer)
217 {
218     if (sc->scTcb == NULL) {
219         userError("SchedContext_YieldTo: cannot yield to an inactive sched context");
220         current_syscall_error.type = seL4_IllegalOperation;
221         return EXCEPTION_SYSCALL_ERROR;
222     }
223 
224     if (sc->scTcb == NODE_STATE(ksCurThread)) {
225         userError("SchedContext_YieldTo: cannot seL4_SchedContext_YieldTo on self");
226         current_syscall_error.type = seL4_IllegalOperation;
227         return EXCEPTION_SYSCALL_ERROR;
228     }
229 
230     if (sc->scTcb->tcbPriority > NODE_STATE(ksCurThread)->tcbMCP) {
231         userError("SchedContext_YieldTo: insufficient mcp (%lu) to yield to a thread with prio (%lu)",
232                   (unsigned long) NODE_STATE(ksCurThread)->tcbMCP, (unsigned long) sc->scTcb->tcbPriority);
233         current_syscall_error.type = seL4_IllegalOperation;
234         return EXCEPTION_SYSCALL_ERROR;
235     }
236 
237     // This should not be possible as the currently running thread
238     // should never have a non-null yieldTo, however verifying this
239     // invariant is being left to future work.
240     assert(NODE_STATE(ksCurThread)->tcbYieldTo == NULL);
241     if (NODE_STATE(ksCurThread)->tcbYieldTo != NULL) {
242         userError("SchedContext_YieldTo: cannot seL4_SchedContext_YieldTo to more than on SC at a time");
243         current_syscall_error.type = seL4_IllegalOperation;
244         return EXCEPTION_SYSCALL_ERROR;
245     }
246 
247     setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
248     return invokeSchedContext_YieldTo(sc, buffer);
249 }
250 
decodeSchedContextInvocation(word_t label,cap_t cap,word_t * buffer)251 exception_t decodeSchedContextInvocation(word_t label, cap_t cap, word_t *buffer)
252 {
253     sched_context_t *sc = SC_PTR(cap_sched_context_cap_get_capSCPtr(cap));
254 
255     SMP_COND_STATEMENT((maybeStallSC(sc));)
256 
257     switch (label) {
258     case SchedContextConsumed:
259         /* no decode */
260         setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
261         return invokeSchedContext_Consumed(sc, buffer);
262     case SchedContextBind:
263         return decodeSchedContext_Bind(sc);
264     case SchedContextUnbindObject:
265         return decodeSchedContext_UnbindObject(sc);
266     case SchedContextUnbind:
267         /* no decode */
268         if (sc->scTcb == NODE_STATE(ksCurThread)) {
269             userError("SchedContext UnbindObject: cannot unbind sc of current thread");
270             current_syscall_error.type = seL4_IllegalOperation;
271             return EXCEPTION_SYSCALL_ERROR;
272         }
273         setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
274         return invokeSchedContext_Unbind(sc);
275     case SchedContextYieldTo:
276         return decodeSchedContext_YieldTo(sc, buffer);
277     default:
278         userError("SchedContext invocation: Illegal operation attempted.");
279         current_syscall_error.type = seL4_IllegalOperation;
280         return EXCEPTION_SYSCALL_ERROR;
281     }
282 }
283 
schedContext_resume(sched_context_t * sc)284 void schedContext_resume(sched_context_t *sc)
285 {
286     assert(!sc || sc->scTcb != NULL);
287     if (likely(sc) && isSchedulable(sc->scTcb)) {
288         if (!(refill_ready(sc) && refill_sufficient(sc, 0))) {
289             assert(!thread_state_get_tcbQueued(sc->scTcb->tcbState));
290             postpone(sc);
291         }
292     }
293 }
294 
schedContext_bindTCB(sched_context_t * sc,tcb_t * tcb)295 void schedContext_bindTCB(sched_context_t *sc, tcb_t *tcb)
296 {
297     assert(sc->scTcb == NULL);
298     assert(tcb->tcbSchedContext == NULL);
299 
300     tcb->tcbSchedContext = sc;
301     sc->scTcb = tcb;
302 
303     SMP_COND_STATEMENT(migrateTCB(tcb, sc->scCore));
304 
305     if (sc_sporadic(sc) && sc_active(sc) && sc != NODE_STATE(ksCurSC)) {
306         refill_unblock_check(sc);
307     }
308     schedContext_resume(sc);
309     if (isSchedulable(tcb)) {
310         SCHED_ENQUEUE(tcb);
311         rescheduleRequired();
312         // TODO -- at some stage we should take this call out of any TCB invocations that
313         // alter capabilities, so that we can do a direct switch. The prefernce here is to
314         // remove seL4_SetSchedParams from using ThreadControl. It's currently out of scope for
315         // verification work, so the work around is to use rescheduleRequired()
316         //possibleSwitchTo(tcb);
317     }
318 }
319 
schedContext_unbindTCB(sched_context_t * sc,tcb_t * tcb)320 void schedContext_unbindTCB(sched_context_t *sc, tcb_t *tcb)
321 {
322     assert(sc->scTcb == tcb);
323 
324     /* tcb must already be stalled at this point */
325     if (tcb == NODE_STATE(ksCurThread)) {
326         rescheduleRequired();
327     }
328 
329     tcbSchedDequeue(sc->scTcb);
330     tcbReleaseRemove(sc->scTcb);
331 
332     sc->scTcb->tcbSchedContext = NULL;
333     sc->scTcb = NULL;
334 }
335 
schedContext_unbindAllTCBs(sched_context_t * sc)336 void schedContext_unbindAllTCBs(sched_context_t *sc)
337 {
338     if (sc->scTcb) {
339         SMP_COND_STATEMENT(remoteTCBStall(sc->scTcb));
340         schedContext_unbindTCB(sc, sc->scTcb);
341     }
342 }
343 
schedContext_donate(sched_context_t * sc,tcb_t * to)344 void schedContext_donate(sched_context_t *sc, tcb_t *to)
345 {
346     assert(sc != NULL);
347     assert(to != NULL);
348     assert(to->tcbSchedContext == NULL);
349 
350     tcb_t *from = sc->scTcb;
351     if (from) {
352         SMP_COND_STATEMENT(remoteTCBStall(from));
353         tcbSchedDequeue(from);
354         tcbReleaseRemove(from);
355         from->tcbSchedContext = NULL;
356         if (from == NODE_STATE(ksCurThread) || from == NODE_STATE(ksSchedulerAction)) {
357             rescheduleRequired();
358         }
359     }
360     sc->scTcb = to;
361     to->tcbSchedContext = sc;
362 
363     SMP_COND_STATEMENT(migrateTCB(to, sc->scCore));
364 }
365 
schedContext_bindNtfn(sched_context_t * sc,notification_t * ntfn)366 void schedContext_bindNtfn(sched_context_t *sc, notification_t *ntfn)
367 {
368     notification_ptr_set_ntfnSchedContext(ntfn, SC_REF(sc));
369     sc->scNotification = ntfn;
370 }
371 
schedContext_unbindNtfn(sched_context_t * sc)372 void schedContext_unbindNtfn(sched_context_t *sc)
373 {
374     if (sc && sc->scNotification) {
375         notification_ptr_set_ntfnSchedContext(sc->scNotification, SC_REF(0));
376         sc->scNotification = NULL;
377     }
378 }
379 
schedContext_updateConsumed(sched_context_t * sc)380 time_t schedContext_updateConsumed(sched_context_t *sc)
381 {
382     ticks_t consumed = sc->scConsumed;
383     if (consumed >= getMaxTicksToUs()) {
384         sc->scConsumed -= getMaxTicksToUs();
385         return ticksToUs(getMaxTicksToUs());
386     } else {
387         sc->scConsumed = 0;
388         return ticksToUs(consumed);
389     }
390 }
391 
schedContext_cancelYieldTo(tcb_t * tcb)392 void schedContext_cancelYieldTo(tcb_t *tcb)
393 {
394     if (tcb && tcb->tcbYieldTo) {
395         tcb->tcbYieldTo->scYieldFrom = NULL;
396         tcb->tcbYieldTo = NULL;
397     }
398 }
399 
schedContext_completeYieldTo(tcb_t * yielder)400 void schedContext_completeYieldTo(tcb_t *yielder)
401 {
402     if (yielder && yielder->tcbYieldTo) {
403         setConsumed(yielder->tcbYieldTo, lookupIPCBuffer(true, yielder));
404         schedContext_cancelYieldTo(yielder);
405     }
406 }
407