1 /*
2  * Copyright 2014, General Dynamics C4 Systems
3  *
4  * SPDX-License-Identifier: GPL-2.0-only
5  */
6 
7 #include <assert.h>
8 
9 #include <types.h>
10 #include <kernel/thread.h>
11 #include <object/structures.h>
12 #include <object/tcb.h>
13 #include <object/endpoint.h>
14 #include <model/statedata.h>
15 #include <machine/io.h>
16 
17 #include <object/notification.h>
18 
ntfn_ptr_get_queue(notification_t * ntfnPtr)19 static inline tcb_queue_t PURE ntfn_ptr_get_queue(notification_t *ntfnPtr)
20 {
21     tcb_queue_t ntfn_queue;
22 
23     ntfn_queue.head = (tcb_t *)notification_ptr_get_ntfnQueue_head(ntfnPtr);
24     ntfn_queue.end = (tcb_t *)notification_ptr_get_ntfnQueue_tail(ntfnPtr);
25 
26     return ntfn_queue;
27 }
28 
ntfn_ptr_set_queue(notification_t * ntfnPtr,tcb_queue_t ntfn_queue)29 static inline void ntfn_ptr_set_queue(notification_t *ntfnPtr, tcb_queue_t ntfn_queue)
30 {
31     notification_ptr_set_ntfnQueue_head(ntfnPtr, (word_t)ntfn_queue.head);
32     notification_ptr_set_ntfnQueue_tail(ntfnPtr, (word_t)ntfn_queue.end);
33 }
34 
ntfn_set_active(notification_t * ntfnPtr,word_t badge)35 static inline void ntfn_set_active(notification_t *ntfnPtr, word_t badge)
36 {
37     notification_ptr_set_state(ntfnPtr, NtfnState_Active);
38     notification_ptr_set_ntfnMsgIdentifier(ntfnPtr, badge);
39 }
40 
41 #ifdef CONFIG_KERNEL_MCS
maybeDonateSchedContext(tcb_t * tcb,notification_t * ntfnPtr)42 static inline void maybeDonateSchedContext(tcb_t *tcb, notification_t *ntfnPtr)
43 {
44     if (tcb->tcbSchedContext == NULL) {
45         sched_context_t *sc = SC_PTR(notification_ptr_get_ntfnSchedContext(ntfnPtr));
46         if (sc != NULL && sc->scTcb == NULL) {
47             schedContext_donate(sc, tcb);
48             schedContext_resume(sc);
49         }
50     }
51 }
52 
53 #endif
54 
55 #ifdef CONFIG_KERNEL_MCS
56 #define MCS_DO_IF_SC(tcb, ntfnPtr, _block) \
57     maybeDonateSchedContext(tcb, ntfnPtr); \
58     if (isSchedulable(tcb)) { \
59         _block \
60     }
61 #else
62 #define MCS_DO_IF_SC(tcb, ntfnPtr, _block) \
63     { \
64         _block \
65     }
66 #endif
67 
sendSignal(notification_t * ntfnPtr,word_t badge)68 void sendSignal(notification_t *ntfnPtr, word_t badge)
69 {
70     switch (notification_ptr_get_state(ntfnPtr)) {
71     case NtfnState_Idle: {
72         tcb_t *tcb = (tcb_t *)notification_ptr_get_ntfnBoundTCB(ntfnPtr);
73         /* Check if we are bound and that thread is waiting for a message */
74         if (tcb) {
75             if (thread_state_ptr_get_tsType(&tcb->tcbState) == ThreadState_BlockedOnReceive) {
76                 /* Send and start thread running */
77                 cancelIPC(tcb);
78                 setThreadState(tcb, ThreadState_Running);
79                 setRegister(tcb, badgeRegister, badge);
80                 MCS_DO_IF_SC(tcb, ntfnPtr, {
81                     possibleSwitchTo(tcb);
82                 })
83 #ifdef CONFIG_KERNEL_MCS
84                 if (sc_sporadic(tcb->tcbSchedContext) && sc_active(tcb->tcbSchedContext)) {
85                     /* We know that the tcb can't have the current SC
86                      * as its own SC as this point as it should still be
87                      * associated with the current thread, or no thread.
88                      * This check is added here to reduce the cost of
89                      * proving this to be true as a short-term stop-gap. */
90                     assert(tcb->tcbSchedContext != NODE_STATE(ksCurSC));
91                     if (tcb->tcbSchedContext != NODE_STATE(ksCurSC)) {
92                         refill_unblock_check(tcb->tcbSchedContext);
93                     }
94                 }
95 #endif
96 #ifdef CONFIG_VTX
97             } else if (thread_state_ptr_get_tsType(&tcb->tcbState) == ThreadState_RunningVM) {
98 #ifdef ENABLE_SMP_SUPPORT
99                 if (tcb->tcbAffinity != getCurrentCPUIndex()) {
100                     ntfn_set_active(ntfnPtr, badge);
101                     doRemoteVMCheckBoundNotification(tcb->tcbAffinity, tcb);
102                 } else
103 #endif /* ENABLE_SMP_SUPPORT */
104                 {
105                     setThreadState(tcb, ThreadState_Running);
106                     setRegister(tcb, badgeRegister, badge);
107                     Arch_leaveVMAsyncTransfer(tcb);
108                     MCS_DO_IF_SC(tcb, ntfnPtr, {
109                         possibleSwitchTo(tcb);
110                     })
111 #ifdef CONFIG_KERNEL_MCS
112                     if (tcb->tcbSchedContext != NULL && sc_active(tcb->tcbSchedContext)) {
113                         sched_context_t *sc = SC_PTR(notification_ptr_get_ntfnSchedContext(ntfnPtr));
114                         if (tcb->tcbSchedContext == sc && sc_sporadic(sc) && tcb->tcbSchedContext != NODE_STATE(ksCurSC)) {
115                             /* We know that the tcb can't have the current SC
116                              * as its own SC as this point as it should still be
117                              * associated with the current thread, or no thread.
118                              * This check is added here to reduce the cost of
119                              * proving this to be true as a short-term stop-gap. */
120                             /* Only unblock if the SC was donated from the
121                              * notification */
122                             refill_unblock_check(tcb->tcbSchedContext);
123                         }
124                     }
125 #endif
126                 }
127 #endif /* CONFIG_VTX */
128             } else {
129                 /* In particular, this path is taken when a thread
130                  * is waiting on a reply cap since BlockedOnReply
131                  * would also trigger this path. I.e, a thread
132                  * with a bound notification will not be awakened
133                  * by signals on that bound notification if it is
134                  * in the middle of an seL4_Call.
135                  */
136                 ntfn_set_active(ntfnPtr, badge);
137             }
138         } else {
139             ntfn_set_active(ntfnPtr, badge);
140         }
141         break;
142     }
143     case NtfnState_Waiting: {
144         tcb_queue_t ntfn_queue;
145         tcb_t *dest;
146 
147         ntfn_queue = ntfn_ptr_get_queue(ntfnPtr);
148         dest = ntfn_queue.head;
149 
150         /* Haskell error "WaitingNtfn Notification must have non-empty queue" */
151         assert(dest);
152 
153         /* Dequeue TCB */
154         ntfn_queue = tcbEPDequeue(dest, ntfn_queue);
155         ntfn_ptr_set_queue(ntfnPtr, ntfn_queue);
156 
157         /* set the thread state to idle if the queue is empty */
158         if (!ntfn_queue.head) {
159             notification_ptr_set_state(ntfnPtr, NtfnState_Idle);
160         }
161 
162         setThreadState(dest, ThreadState_Running);
163         setRegister(dest, badgeRegister, badge);
164         MCS_DO_IF_SC(dest, ntfnPtr, {
165             possibleSwitchTo(dest);
166         })
167 
168 #ifdef CONFIG_KERNEL_MCS
169         if (sc_sporadic(dest->tcbSchedContext) && sc_active(dest->tcbSchedContext)) {
170             /* We know that the receiver can't have the current SC
171              * as its own SC as this point as it should still be
172              * associated with the current thread.
173              * This check is added here to reduce the cost of
174              * proving this to be true as a short-term stop-gap. */
175             assert(dest->tcbSchedContext != NODE_STATE(ksCurSC));
176             if (dest->tcbSchedContext != NODE_STATE(ksCurSC)) {
177                 refill_unblock_check(dest->tcbSchedContext);
178             }
179         }
180 #endif
181         break;
182     }
183 
184     case NtfnState_Active: {
185         word_t badge2;
186 
187         badge2 = notification_ptr_get_ntfnMsgIdentifier(ntfnPtr);
188         badge2 |= badge;
189 
190         notification_ptr_set_ntfnMsgIdentifier(ntfnPtr, badge2);
191         break;
192     }
193     }
194 }
195 
receiveSignal(tcb_t * thread,cap_t cap,bool_t isBlocking)196 void receiveSignal(tcb_t *thread, cap_t cap, bool_t isBlocking)
197 {
198     notification_t *ntfnPtr;
199 
200     ntfnPtr = NTFN_PTR(cap_notification_cap_get_capNtfnPtr(cap));
201 
202     switch (notification_ptr_get_state(ntfnPtr)) {
203     case NtfnState_Idle:
204     case NtfnState_Waiting: {
205         tcb_queue_t ntfn_queue;
206 
207         if (isBlocking) {
208             /* Block thread on notification object */
209             thread_state_ptr_set_tsType(&thread->tcbState,
210                                         ThreadState_BlockedOnNotification);
211             thread_state_ptr_set_blockingObject(&thread->tcbState,
212                                                 NTFN_REF(ntfnPtr));
213             scheduleTCB(thread);
214 
215             /* Enqueue TCB */
216             ntfn_queue = ntfn_ptr_get_queue(ntfnPtr);
217             ntfn_queue = tcbEPAppend(thread, ntfn_queue);
218 
219             notification_ptr_set_state(ntfnPtr, NtfnState_Waiting);
220             ntfn_ptr_set_queue(ntfnPtr, ntfn_queue);
221 
222 #ifdef CONFIG_KERNEL_MCS
223             maybeReturnSchedContext(ntfnPtr, thread);
224 #endif
225         } else {
226             doNBRecvFailedTransfer(thread);
227         }
228 
229         break;
230     }
231 
232     case NtfnState_Active:
233         setRegister(
234             thread, badgeRegister,
235             notification_ptr_get_ntfnMsgIdentifier(ntfnPtr));
236         notification_ptr_set_state(ntfnPtr, NtfnState_Idle);
237 #ifdef CONFIG_KERNEL_MCS
238         maybeDonateSchedContext(thread, ntfnPtr);
239         // If the SC has been donated to the current thread (in a reply_recv, send_recv scenario) then
240         // we may need to perform refill_unblock_check if the SC is becoming activated.
241         if (thread->tcbSchedContext != NODE_STATE(ksCurSC) && sc_sporadic(thread->tcbSchedContext)) {
242             refill_unblock_check(thread->tcbSchedContext);
243         }
244 #endif
245         break;
246     }
247 }
248 
cancelAllSignals(notification_t * ntfnPtr)249 void cancelAllSignals(notification_t *ntfnPtr)
250 {
251     if (notification_ptr_get_state(ntfnPtr) == NtfnState_Waiting) {
252         tcb_t *thread = TCB_PTR(notification_ptr_get_ntfnQueue_head(ntfnPtr));
253 
254         notification_ptr_set_state(ntfnPtr, NtfnState_Idle);
255         notification_ptr_set_ntfnQueue_head(ntfnPtr, 0);
256         notification_ptr_set_ntfnQueue_tail(ntfnPtr, 0);
257 
258         /* Set all waiting threads to Restart */
259         for (; thread; thread = thread->tcbEPNext) {
260             setThreadState(thread, ThreadState_Restart);
261 #ifdef CONFIG_KERNEL_MCS
262             if (sc_sporadic(thread->tcbSchedContext)) {
263                 /* We know that the thread can't have the current SC
264                  * as its own SC as this point as it should still be
265                  * associated with the current thread, or no thread.
266                  * This check is added here to reduce the cost of
267                  * proving this to be true as a short-term stop-gap. */
268                 assert(thread->tcbSchedContext != NODE_STATE(ksCurSC));
269                 if (thread->tcbSchedContext != NODE_STATE(ksCurSC)) {
270                     refill_unblock_check(thread->tcbSchedContext);
271                 }
272             }
273             possibleSwitchTo(thread);
274 #else
275             SCHED_ENQUEUE(thread);
276 #endif
277         }
278         rescheduleRequired();
279     }
280 }
281 
cancelSignal(tcb_t * threadPtr,notification_t * ntfnPtr)282 void cancelSignal(tcb_t *threadPtr, notification_t *ntfnPtr)
283 {
284     tcb_queue_t ntfn_queue;
285 
286     /* Haskell error "cancelSignal: notification object must be in a waiting" state */
287     assert(notification_ptr_get_state(ntfnPtr) == NtfnState_Waiting);
288 
289     /* Dequeue TCB */
290     ntfn_queue = ntfn_ptr_get_queue(ntfnPtr);
291     ntfn_queue = tcbEPDequeue(threadPtr, ntfn_queue);
292     ntfn_ptr_set_queue(ntfnPtr, ntfn_queue);
293 
294     /* Make notification object idle */
295     if (!ntfn_queue.head) {
296         notification_ptr_set_state(ntfnPtr, NtfnState_Idle);
297     }
298 
299     /* Make thread inactive */
300     setThreadState(threadPtr, ThreadState_Inactive);
301 }
302 
completeSignal(notification_t * ntfnPtr,tcb_t * tcb)303 void completeSignal(notification_t *ntfnPtr, tcb_t *tcb)
304 {
305     word_t badge;
306 
307     if (likely(tcb && notification_ptr_get_state(ntfnPtr) == NtfnState_Active)) {
308         badge = notification_ptr_get_ntfnMsgIdentifier(ntfnPtr);
309         setRegister(tcb, badgeRegister, badge);
310         notification_ptr_set_state(ntfnPtr, NtfnState_Idle);
311 #ifdef CONFIG_KERNEL_MCS
312         maybeDonateSchedContext(tcb, ntfnPtr);
313         if (sc_sporadic(tcb->tcbSchedContext) && sc_active(tcb->tcbSchedContext)) {
314             sched_context_t *sc = SC_PTR(notification_ptr_get_ntfnSchedContext(ntfnPtr));
315             if (tcb->tcbSchedContext == sc && tcb->tcbSchedContext != NODE_STATE(ksCurSC)) {
316                 /* We know that the tcb can't have the current SC
317                  * as its own SC as this point as it should still be
318                  * associated with the current thread, or no thread.
319                  * This check is added here to reduce the cost of
320                  * proving this to be true as a short-term stop-gap. */
321                 /* Only unblock if the SC was donated from the
322                  * notification */
323                 refill_unblock_check(tcb->tcbSchedContext);
324             }
325         }
326 #endif
327     } else {
328         fail("tried to complete signal with inactive notification object");
329     }
330 }
331 
doUnbindNotification(notification_t * ntfnPtr,tcb_t * tcbptr)332 static inline void doUnbindNotification(notification_t *ntfnPtr, tcb_t *tcbptr)
333 {
334     notification_ptr_set_ntfnBoundTCB(ntfnPtr, (word_t) 0);
335     tcbptr->tcbBoundNotification = NULL;
336 }
337 
unbindMaybeNotification(notification_t * ntfnPtr)338 void unbindMaybeNotification(notification_t *ntfnPtr)
339 {
340     tcb_t *boundTCB;
341     boundTCB = (tcb_t *)notification_ptr_get_ntfnBoundTCB(ntfnPtr);
342 
343     if (boundTCB) {
344         doUnbindNotification(ntfnPtr, boundTCB);
345     }
346 }
347 
unbindNotification(tcb_t * tcb)348 void unbindNotification(tcb_t *tcb)
349 {
350     notification_t *ntfnPtr;
351     ntfnPtr = tcb->tcbBoundNotification;
352 
353     if (ntfnPtr) {
354         doUnbindNotification(ntfnPtr, tcb);
355     }
356 }
357 
bindNotification(tcb_t * tcb,notification_t * ntfnPtr)358 void bindNotification(tcb_t *tcb, notification_t *ntfnPtr)
359 {
360     notification_ptr_set_ntfnBoundTCB(ntfnPtr, (word_t)tcb);
361     tcb->tcbBoundNotification = ntfnPtr;
362 }
363 
364 #ifdef CONFIG_KERNEL_MCS
reorderNTFN(notification_t * ntfnPtr,tcb_t * thread)365 void reorderNTFN(notification_t *ntfnPtr, tcb_t *thread)
366 {
367     tcb_queue_t queue = ntfn_ptr_get_queue(ntfnPtr);
368     queue = tcbEPDequeue(thread, queue);
369     queue = tcbEPAppend(thread, queue);
370     ntfn_ptr_set_queue(ntfnPtr, queue);
371 }
372 #endif
373