1 /*
2  * Copyright 2014, General Dynamics C4 Systems
3  *
4  * SPDX-License-Identifier: GPL-2.0-only
5  */
6 
7 #include <types.h>
8 #include <string.h>
9 #include <sel4/constants.h>
10 #include <kernel/thread.h>
11 #include <kernel/vspace.h>
12 #include <machine/registerset.h>
13 #include <model/statedata.h>
14 #include <object/notification.h>
15 #include <object/cnode.h>
16 #include <object/endpoint.h>
17 #include <object/tcb.h>
18 
ep_ptr_set_queue(endpoint_t * epptr,tcb_queue_t queue)19 static inline void ep_ptr_set_queue(endpoint_t *epptr, tcb_queue_t queue)
20 {
21     endpoint_ptr_set_epQueue_head(epptr, (word_t)queue.head);
22     endpoint_ptr_set_epQueue_tail(epptr, (word_t)queue.end);
23 }
24 
25 #ifdef CONFIG_KERNEL_MCS
sendIPC(bool_t blocking,bool_t do_call,word_t badge,bool_t canGrant,bool_t canGrantReply,bool_t canDonate,tcb_t * thread,endpoint_t * epptr)26 void sendIPC(bool_t blocking, bool_t do_call, word_t badge,
27              bool_t canGrant, bool_t canGrantReply, bool_t canDonate, tcb_t *thread, endpoint_t *epptr)
28 #else
29 void sendIPC(bool_t blocking, bool_t do_call, word_t badge,
30              bool_t canGrant, bool_t canGrantReply, tcb_t *thread, endpoint_t *epptr)
31 #endif
32 {
33     switch (endpoint_ptr_get_state(epptr)) {
34     case EPState_Idle:
35     case EPState_Send:
36         if (blocking) {
37             tcb_queue_t queue;
38 
39             /* Set thread state to BlockedOnSend */
40             thread_state_ptr_set_tsType(&thread->tcbState,
41                                         ThreadState_BlockedOnSend);
42             thread_state_ptr_set_blockingObject(
43                 &thread->tcbState, EP_REF(epptr));
44             thread_state_ptr_set_blockingIPCBadge(
45                 &thread->tcbState, badge);
46             thread_state_ptr_set_blockingIPCCanGrant(
47                 &thread->tcbState, canGrant);
48             thread_state_ptr_set_blockingIPCCanGrantReply(
49                 &thread->tcbState, canGrantReply);
50             thread_state_ptr_set_blockingIPCIsCall(
51                 &thread->tcbState, do_call);
52 
53             scheduleTCB(thread);
54 
55             /* Place calling thread in endpoint queue */
56             queue = ep_ptr_get_queue(epptr);
57             queue = tcbEPAppend(thread, queue);
58             endpoint_ptr_set_state(epptr, EPState_Send);
59             ep_ptr_set_queue(epptr, queue);
60         }
61         break;
62 
63     case EPState_Recv: {
64         tcb_queue_t queue;
65         tcb_t *dest;
66 
67         /* Get the head of the endpoint queue. */
68         queue = ep_ptr_get_queue(epptr);
69         dest = queue.head;
70 
71         /* Haskell error "Receive endpoint queue must not be empty" */
72         assert(dest);
73 
74         /* Dequeue the first TCB */
75         queue = tcbEPDequeue(dest, queue);
76         ep_ptr_set_queue(epptr, queue);
77 
78         if (!queue.head) {
79             endpoint_ptr_set_state(epptr, EPState_Idle);
80         }
81 
82         /* Do the transfer */
83         doIPCTransfer(thread, epptr, badge, canGrant, dest);
84 
85 #ifdef CONFIG_KERNEL_MCS
86         reply_t *reply = REPLY_PTR(thread_state_get_replyObject(dest->tcbState));
87         if (reply) {
88             reply_unlink(reply, dest);
89         }
90 
91         if (do_call ||
92             seL4_Fault_ptr_get_seL4_FaultType(&thread->tcbFault) != seL4_Fault_NullFault) {
93             if (reply != NULL && (canGrant || canGrantReply)) {
94                 reply_push(thread, dest, reply, canDonate);
95             } else {
96                 setThreadState(thread, ThreadState_Inactive);
97             }
98         } else if (canDonate && dest->tcbSchedContext == NULL) {
99             schedContext_donate(thread->tcbSchedContext, dest);
100         }
101 
102         /* blocked threads should have enough budget to get out of the kernel */
103         assert(dest->tcbSchedContext == NULL || refill_sufficient(dest->tcbSchedContext, 0));
104         assert(dest->tcbSchedContext == NULL || refill_ready(dest->tcbSchedContext));
105         setThreadState(dest, ThreadState_Running);
106         if (sc_sporadic(dest->tcbSchedContext) && dest->tcbSchedContext != NODE_STATE(ksCurSC)) {
107             refill_unblock_check(dest->tcbSchedContext);
108         }
109         possibleSwitchTo(dest);
110 #else
111         bool_t replyCanGrant = thread_state_ptr_get_blockingIPCCanGrant(&dest->tcbState);;
112 
113         setThreadState(dest, ThreadState_Running);
114         possibleSwitchTo(dest);
115 
116         if (do_call) {
117             if (canGrant || canGrantReply) {
118                 setupCallerCap(thread, dest, replyCanGrant);
119             } else {
120                 setThreadState(thread, ThreadState_Inactive);
121             }
122         }
123 #endif
124         break;
125     }
126     }
127 }
128 
129 #ifdef CONFIG_KERNEL_MCS
receiveIPC(tcb_t * thread,cap_t cap,bool_t isBlocking,cap_t replyCap)130 void receiveIPC(tcb_t *thread, cap_t cap, bool_t isBlocking, cap_t replyCap)
131 #else
132 void receiveIPC(tcb_t *thread, cap_t cap, bool_t isBlocking)
133 #endif
134 {
135     endpoint_t *epptr;
136     notification_t *ntfnPtr;
137 
138     /* Haskell error "receiveIPC: invalid cap" */
139     assert(cap_get_capType(cap) == cap_endpoint_cap);
140 
141     epptr = EP_PTR(cap_endpoint_cap_get_capEPPtr(cap));
142 
143 #ifdef CONFIG_KERNEL_MCS
144     reply_t *replyPtr = NULL;
145     if (cap_get_capType(replyCap) == cap_reply_cap) {
146         replyPtr = REPLY_PTR(cap_reply_cap_get_capReplyPtr(replyCap));
147         if (unlikely(replyPtr->replyTCB != NULL && replyPtr->replyTCB != thread)) {
148             userError("Reply object already has unexecuted reply!");
149             cancelIPC(replyPtr->replyTCB);
150         }
151     }
152 #endif
153 
154     /* Check for anything waiting in the notification */
155     ntfnPtr = thread->tcbBoundNotification;
156     if (ntfnPtr && notification_ptr_get_state(ntfnPtr) == NtfnState_Active) {
157         completeSignal(ntfnPtr, thread);
158     } else {
159 #ifdef CONFIG_KERNEL_MCS
160         /* If this is a blocking recv and we didn't have a pending notification,
161          * then if we are running on an SC from a bound notification, then we
162          * need to return it so that we can passively wait on the EP for potentially
163          * SC donations from client threads.
164          */
165         if (ntfnPtr && isBlocking) {
166             maybeReturnSchedContext(ntfnPtr, thread);
167         }
168 #endif
169         switch (endpoint_ptr_get_state(epptr)) {
170         case EPState_Idle:
171         case EPState_Recv: {
172             tcb_queue_t queue;
173 
174             if (isBlocking) {
175                 /* Set thread state to BlockedOnReceive */
176                 thread_state_ptr_set_tsType(&thread->tcbState,
177                                             ThreadState_BlockedOnReceive);
178                 thread_state_ptr_set_blockingObject(
179                     &thread->tcbState, EP_REF(epptr));
180 #ifdef CONFIG_KERNEL_MCS
181                 thread_state_ptr_set_replyObject(&thread->tcbState, REPLY_REF(replyPtr));
182                 if (replyPtr) {
183                     replyPtr->replyTCB = thread;
184                 }
185 #else
186                 thread_state_ptr_set_blockingIPCCanGrant(
187                     &thread->tcbState, cap_endpoint_cap_get_capCanGrant(cap));
188 #endif
189                 scheduleTCB(thread);
190 
191                 /* Place calling thread in endpoint queue */
192                 queue = ep_ptr_get_queue(epptr);
193                 queue = tcbEPAppend(thread, queue);
194                 endpoint_ptr_set_state(epptr, EPState_Recv);
195                 ep_ptr_set_queue(epptr, queue);
196             } else {
197                 doNBRecvFailedTransfer(thread);
198             }
199             break;
200         }
201 
202         case EPState_Send: {
203             tcb_queue_t queue;
204             tcb_t *sender;
205             word_t badge;
206             bool_t canGrant;
207             bool_t canGrantReply;
208             bool_t do_call;
209 
210             /* Get the head of the endpoint queue. */
211             queue = ep_ptr_get_queue(epptr);
212             sender = queue.head;
213 
214             /* Haskell error "Send endpoint queue must not be empty" */
215             assert(sender);
216 
217             /* Dequeue the first TCB */
218             queue = tcbEPDequeue(sender, queue);
219             ep_ptr_set_queue(epptr, queue);
220 
221             if (!queue.head) {
222                 endpoint_ptr_set_state(epptr, EPState_Idle);
223             }
224 
225             /* Get sender IPC details */
226             badge = thread_state_ptr_get_blockingIPCBadge(&sender->tcbState);
227             canGrant =
228                 thread_state_ptr_get_blockingIPCCanGrant(&sender->tcbState);
229             canGrantReply =
230                 thread_state_ptr_get_blockingIPCCanGrantReply(&sender->tcbState);
231 
232             /* Do the transfer */
233             doIPCTransfer(sender, epptr, badge,
234                           canGrant, thread);
235 
236             do_call = thread_state_ptr_get_blockingIPCIsCall(&sender->tcbState);
237 
238 #ifdef CONFIG_KERNEL_MCS
239             if (sc_sporadic(sender->tcbSchedContext)) {
240                 /* We know that the sender can't have the current SC as
241                  * its own SC as this point as it should still be
242                  * associated with the current thread, no thread, or a
243                  * thread that isn't blocked. This check is added here
244                  * to reduce the cost of proving this to be true as a
245                  * short-term stop-gap. */
246                 assert(sender->tcbSchedContext != NODE_STATE(ksCurSC));
247                 if (sender->tcbSchedContext != NODE_STATE(ksCurSC)) {
248                     refill_unblock_check(sender->tcbSchedContext);
249                 }
250             }
251 
252             if (do_call ||
253                 seL4_Fault_get_seL4_FaultType(sender->tcbFault) != seL4_Fault_NullFault) {
254                 if ((canGrant || canGrantReply) && replyPtr != NULL) {
255                     bool_t canDonate = sender->tcbSchedContext != NULL
256                                        && seL4_Fault_get_seL4_FaultType(sender->tcbFault) != seL4_Fault_Timeout;
257                     reply_push(sender, thread, replyPtr, canDonate);
258                 } else {
259                     setThreadState(sender, ThreadState_Inactive);
260                 }
261             } else {
262                 setThreadState(sender, ThreadState_Running);
263                 possibleSwitchTo(sender);
264                 assert(sender->tcbSchedContext == NULL || refill_sufficient(sender->tcbSchedContext, 0));
265             }
266 #else
267             if (do_call) {
268                 if (canGrant || canGrantReply) {
269                     setupCallerCap(sender, thread, cap_endpoint_cap_get_capCanGrant(cap));
270                 } else {
271                     setThreadState(sender, ThreadState_Inactive);
272                 }
273             } else {
274                 setThreadState(sender, ThreadState_Running);
275                 possibleSwitchTo(sender);
276             }
277 #endif
278             break;
279         }
280         }
281     }
282 }
283 
replyFromKernel_error(tcb_t * thread)284 void replyFromKernel_error(tcb_t *thread)
285 {
286     word_t len;
287     word_t *ipcBuffer;
288 
289     ipcBuffer = lookupIPCBuffer(true, thread);
290     setRegister(thread, badgeRegister, 0);
291     len = setMRs_syscall_error(thread, ipcBuffer);
292 
293 #ifdef CONFIG_KERNEL_INVOCATION_REPORT_ERROR_IPC
294     char *debugBuffer = (char *)(ipcBuffer + DEBUG_MESSAGE_START + 1);
295     word_t add = strlcpy(debugBuffer, (char *)current_debug_error.errorMessage,
296                          DEBUG_MESSAGE_MAXLEN * sizeof(word_t));
297 
298     len += (add / sizeof(word_t)) + 1;
299 #endif
300 
301     setRegister(thread, msgInfoRegister, wordFromMessageInfo(
302                     seL4_MessageInfo_new(current_syscall_error.type, 0, 0, len)));
303 }
304 
replyFromKernel_success_empty(tcb_t * thread)305 void replyFromKernel_success_empty(tcb_t *thread)
306 {
307     setRegister(thread, badgeRegister, 0);
308     setRegister(thread, msgInfoRegister, wordFromMessageInfo(
309                     seL4_MessageInfo_new(0, 0, 0, 0)));
310 }
311 
cancelIPC(tcb_t * tptr)312 void cancelIPC(tcb_t *tptr)
313 {
314     thread_state_t *state = &tptr->tcbState;
315 
316 #ifdef CONFIG_KERNEL_MCS
317     /* cancel ipc cancels all faults */
318     seL4_Fault_NullFault_ptr_new(&tptr->tcbFault);
319 #endif
320 
321     switch (thread_state_ptr_get_tsType(state)) {
322     case ThreadState_BlockedOnSend:
323     case ThreadState_BlockedOnReceive: {
324         /* blockedIPCCancel state */
325         endpoint_t *epptr;
326         tcb_queue_t queue;
327 
328         epptr = EP_PTR(thread_state_ptr_get_blockingObject(state));
329 
330         /* Haskell error "blockedIPCCancel: endpoint must not be idle" */
331         assert(endpoint_ptr_get_state(epptr) != EPState_Idle);
332 
333         /* Dequeue TCB */
334         queue = ep_ptr_get_queue(epptr);
335         queue = tcbEPDequeue(tptr, queue);
336         ep_ptr_set_queue(epptr, queue);
337 
338         if (!queue.head) {
339             endpoint_ptr_set_state(epptr, EPState_Idle);
340         }
341 
342 #ifdef CONFIG_KERNEL_MCS
343         reply_t *reply = REPLY_PTR(thread_state_get_replyObject(tptr->tcbState));
344         if (reply != NULL) {
345             reply_unlink(reply, tptr);
346         }
347 #endif
348         setThreadState(tptr, ThreadState_Inactive);
349         break;
350     }
351 
352     case ThreadState_BlockedOnNotification:
353         cancelSignal(tptr,
354                      NTFN_PTR(thread_state_ptr_get_blockingObject(state)));
355         break;
356 
357     case ThreadState_BlockedOnReply: {
358 #ifdef CONFIG_KERNEL_MCS
359         reply_remove_tcb(tptr);
360 #else
361         cte_t *slot, *callerCap;
362 
363         tptr->tcbFault = seL4_Fault_NullFault_new();
364 
365         /* Get the reply cap slot */
366         slot = TCB_PTR_CTE_PTR(tptr, tcbReply);
367 
368         callerCap = CTE_PTR(mdb_node_get_mdbNext(slot->cteMDBNode));
369         if (callerCap) {
370             /** GHOSTUPD: "(True,
371                 gs_set_assn cteDeleteOne_'proc (ucast cap_reply_cap))" */
372             cteDeleteOne(callerCap);
373         }
374 #endif
375 
376         break;
377     }
378     }
379 }
380 
cancelAllIPC(endpoint_t * epptr)381 void cancelAllIPC(endpoint_t *epptr)
382 {
383     switch (endpoint_ptr_get_state(epptr)) {
384     case EPState_Idle:
385         break;
386 
387     default: {
388         tcb_t *thread = TCB_PTR(endpoint_ptr_get_epQueue_head(epptr));
389 
390         /* Make endpoint idle */
391         endpoint_ptr_set_state(epptr, EPState_Idle);
392         endpoint_ptr_set_epQueue_head(epptr, 0);
393         endpoint_ptr_set_epQueue_tail(epptr, 0);
394 
395         /* Set all blocked threads to restart */
396         for (; thread; thread = thread->tcbEPNext) {
397 #ifdef CONFIG_KERNEL_MCS
398             reply_t *reply = REPLY_PTR(thread_state_get_replyObject(thread->tcbState));
399             if (reply != NULL) {
400                 reply_unlink(reply, thread);
401             }
402             if (seL4_Fault_get_seL4_FaultType(thread->tcbFault) == seL4_Fault_NullFault) {
403                 setThreadState(thread, ThreadState_Restart);
404                 if (sc_sporadic(thread->tcbSchedContext)) {
405                     /* We know that the thread can't have the current SC
406                      * as its own SC as this point as it should still be
407                      * associated with the current thread, or no thread.
408                      * This check is added here to reduce the cost of
409                      * proving this to be true as a short-term stop-gap. */
410                     assert(thread->tcbSchedContext != NODE_STATE(ksCurSC));
411                     if (thread->tcbSchedContext != NODE_STATE(ksCurSC)) {
412                         refill_unblock_check(thread->tcbSchedContext);
413                     }
414                 }
415                 possibleSwitchTo(thread);
416             } else {
417                 setThreadState(thread, ThreadState_Inactive);
418             }
419 #else
420             setThreadState(thread, ThreadState_Restart);
421             SCHED_ENQUEUE(thread);
422 #endif
423         }
424 
425         rescheduleRequired();
426         break;
427     }
428     }
429 }
430 
cancelBadgedSends(endpoint_t * epptr,word_t badge)431 void cancelBadgedSends(endpoint_t *epptr, word_t badge)
432 {
433     switch (endpoint_ptr_get_state(epptr)) {
434     case EPState_Idle:
435     case EPState_Recv:
436         break;
437 
438     case EPState_Send: {
439         tcb_t *thread, *next;
440         tcb_queue_t queue = ep_ptr_get_queue(epptr);
441 
442         /* this is a de-optimisation for verification
443          * reasons. it allows the contents of the endpoint
444          * queue to be ignored during the for loop. */
445         endpoint_ptr_set_state(epptr, EPState_Idle);
446         endpoint_ptr_set_epQueue_head(epptr, 0);
447         endpoint_ptr_set_epQueue_tail(epptr, 0);
448 
449         for (thread = queue.head; thread; thread = next) {
450             word_t b = thread_state_ptr_get_blockingIPCBadge(
451                            &thread->tcbState);
452             next = thread->tcbEPNext;
453 #ifdef CONFIG_KERNEL_MCS
454             /* senders do not have reply objects in their state, and we are only cancelling sends */
455             assert(REPLY_PTR(thread_state_get_replyObject(thread->tcbState)) == NULL);
456             if (b == badge) {
457                 if (seL4_Fault_get_seL4_FaultType(thread->tcbFault) ==
458                     seL4_Fault_NullFault) {
459                     setThreadState(thread, ThreadState_Restart);
460                     if (sc_sporadic(thread->tcbSchedContext)) {
461                         /* We know that the thread can't have the current SC
462                          * as its own SC as this point as it should still be
463                          * associated with the current thread, or no thread.
464                          * This check is added here to reduce the cost of
465                          * proving this to be true as a short-term stop-gap. */
466                         assert(thread->tcbSchedContext != NODE_STATE(ksCurSC));
467                         if (thread->tcbSchedContext != NODE_STATE(ksCurSC)) {
468                             refill_unblock_check(thread->tcbSchedContext);
469                         }
470                     }
471                     possibleSwitchTo(thread);
472                 } else {
473                     setThreadState(thread, ThreadState_Inactive);
474                 }
475                 queue = tcbEPDequeue(thread, queue);
476             }
477 #else
478             if (b == badge) {
479                 setThreadState(thread, ThreadState_Restart);
480                 SCHED_ENQUEUE(thread);
481                 queue = tcbEPDequeue(thread, queue);
482             }
483 #endif
484         }
485         ep_ptr_set_queue(epptr, queue);
486 
487         if (queue.head) {
488             endpoint_ptr_set_state(epptr, EPState_Send);
489         }
490 
491         rescheduleRequired();
492 
493         break;
494     }
495 
496     default:
497         fail("invalid EP state");
498     }
499 }
500 
501 #ifdef CONFIG_KERNEL_MCS
reorderEP(endpoint_t * epptr,tcb_t * thread)502 void reorderEP(endpoint_t *epptr, tcb_t *thread)
503 {
504     tcb_queue_t queue = ep_ptr_get_queue(epptr);
505     queue = tcbEPDequeue(thread, queue);
506     queue = tcbEPAppend(thread, queue);
507     ep_ptr_set_queue(epptr, queue);
508 }
509 #endif
510