1 /*
2 * Copyright 2014, General Dynamics C4 Systems
3 *
4 * SPDX-License-Identifier: GPL-2.0-only
5 */
6
7 #pragma once
8
9 #include <types.h>
10 #include <util.h>
11 #include <object/structures.h>
12 #include <arch/machine.h>
13 #ifdef CONFIG_KERNEL_MCS
14 #include <kernel/sporadic.h>
15 #include <machine/timer.h>
16 #include <mode/machine.h>
17 #endif
18
ready_queues_index(word_t dom,word_t prio)19 static inline CONST word_t ready_queues_index(word_t dom, word_t prio)
20 {
21 if (CONFIG_NUM_DOMAINS > 1) {
22 return dom * CONFIG_NUM_PRIORITIES + prio;
23 } else {
24 assert(dom == 0);
25 return prio;
26 }
27 }
28
prio_to_l1index(word_t prio)29 static inline CONST word_t prio_to_l1index(word_t prio)
30 {
31 return (prio >> wordRadix);
32 }
33
l1index_to_prio(word_t l1index)34 static inline CONST word_t l1index_to_prio(word_t l1index)
35 {
36 return (l1index << wordRadix);
37 }
38
isRunnable(const tcb_t * thread)39 static inline bool_t PURE isRunnable(const tcb_t *thread)
40 {
41 switch (thread_state_get_tsType(thread->tcbState)) {
42 case ThreadState_Running:
43 case ThreadState_Restart:
44 #ifdef CONFIG_VTX
45 case ThreadState_RunningVM:
46 #endif
47 return true;
48
49 default:
50 return false;
51 }
52 }
53
invert_l1index(word_t l1index)54 static inline CONST word_t invert_l1index(word_t l1index)
55 {
56 word_t inverted = (L2_BITMAP_SIZE - 1 - l1index);
57 assert(inverted < L2_BITMAP_SIZE);
58 return inverted;
59 }
60
getHighestPrio(word_t dom)61 static inline prio_t getHighestPrio(word_t dom)
62 {
63 word_t l1index;
64 word_t l2index;
65 word_t l1index_inverted;
66
67 /* it's undefined to call clzl on 0 */
68 assert(NODE_STATE(ksReadyQueuesL1Bitmap)[dom] != 0);
69
70 l1index = wordBits - 1 - clzl(NODE_STATE(ksReadyQueuesL1Bitmap)[dom]);
71 l1index_inverted = invert_l1index(l1index);
72 assert(NODE_STATE(ksReadyQueuesL2Bitmap)[dom][l1index_inverted] != 0);
73 l2index = wordBits - 1 - clzl(NODE_STATE(ksReadyQueuesL2Bitmap)[dom][l1index_inverted]);
74 return (l1index_to_prio(l1index) | l2index);
75 }
76
isHighestPrio(word_t dom,prio_t prio)77 static inline bool_t isHighestPrio(word_t dom, prio_t prio)
78 {
79 return NODE_STATE(ksReadyQueuesL1Bitmap)[dom] == 0 ||
80 prio >= getHighestPrio(dom);
81 }
82
isBlocked(const tcb_t * thread)83 static inline bool_t PURE isBlocked(const tcb_t *thread)
84 {
85 switch (thread_state_get_tsType(thread->tcbState)) {
86 case ThreadState_BlockedOnReceive:
87 case ThreadState_BlockedOnSend:
88 case ThreadState_BlockedOnNotification:
89 case ThreadState_BlockedOnReply:
90 return true;
91
92 default:
93 return false;
94 }
95 }
96
isStopped(const tcb_t * thread)97 static inline bool_t PURE isStopped(const tcb_t *thread)
98 {
99 switch (thread_state_get_tsType(thread->tcbState)) {
100 case ThreadState_Inactive:
101 case ThreadState_BlockedOnReceive:
102 case ThreadState_BlockedOnSend:
103 case ThreadState_BlockedOnNotification:
104 case ThreadState_BlockedOnReply:
105 return true;
106
107 default:
108 return false;
109 }
110 }
111
112 #ifdef CONFIG_KERNEL_MCS
isRoundRobin(sched_context_t * sc)113 static inline bool_t PURE isRoundRobin(sched_context_t *sc)
114 {
115 return sc->scPeriod == 0;
116 }
117
isCurDomainExpired(void)118 static inline bool_t isCurDomainExpired(void)
119 {
120 return CONFIG_NUM_DOMAINS > 1 &&
121 ksDomainTime == 0;
122 }
123
commitTime(void)124 static inline void commitTime(void)
125 {
126 if (NODE_STATE(ksCurSC)->scRefillMax) {
127 if (likely(NODE_STATE(ksConsumed) > 0)) {
128 /* if this function is called the head refil must be sufficient to
129 * charge ksConsumed */
130 assert(refill_sufficient(NODE_STATE(ksCurSC), NODE_STATE(ksConsumed)));
131 /* and it must be ready to use */
132 assert(refill_ready(NODE_STATE(ksCurSC)));
133
134 if (isRoundRobin(NODE_STATE(ksCurSC))) {
135 /* for round robin threads, there are only two refills: the HEAD, which is what
136 * we are consuming, and the tail, which is what we have consumed */
137 assert(refill_size(NODE_STATE(ksCurSC)) == MIN_REFILLS);
138 refill_head(NODE_STATE(ksCurSC))->rAmount -= NODE_STATE(ksConsumed);
139 refill_tail(NODE_STATE(ksCurSC))->rAmount += NODE_STATE(ksConsumed);
140 } else {
141 refill_budget_check(NODE_STATE(ksConsumed));
142 }
143 assert(refill_sufficient(NODE_STATE(ksCurSC), 0));
144 assert(refill_ready(NODE_STATE(ksCurSC)));
145 }
146 NODE_STATE(ksCurSC)->scConsumed += NODE_STATE(ksConsumed);
147 }
148
149 NODE_STATE(ksConsumed) = 0llu;
150 }
151
isSchedulable(const tcb_t * thread)152 static inline bool_t PURE isSchedulable(const tcb_t *thread)
153 {
154 return isRunnable(thread) &&
155 thread->tcbSchedContext != NULL &&
156 thread->tcbSchedContext->scRefillMax > 0 &&
157 !thread_state_get_tcbInReleaseQueue(thread->tcbState);
158 }
159 #else
160 #define isSchedulable isRunnable
161 #endif
162
163 void configureIdleThread(tcb_t *tcb);
164 void activateThread(void);
165 void suspend(tcb_t *target);
166 void restart(tcb_t *target);
167 void doIPCTransfer(tcb_t *sender, endpoint_t *endpoint,
168 word_t badge, bool_t grant, tcb_t *receiver);
169 #ifdef CONFIG_KERNEL_MCS
170 void doReplyTransfer(tcb_t *sender, reply_t *reply, bool_t grant);
171 #else
172 void doReplyTransfer(tcb_t *sender, tcb_t *receiver, cte_t *slot, bool_t grant);
173 void timerTick(void);
174 #endif
175 void doNormalTransfer(tcb_t *sender, word_t *sendBuffer, endpoint_t *endpoint,
176 word_t badge, bool_t canGrant, tcb_t *receiver,
177 word_t *receiveBuffer);
178 void doFaultTransfer(word_t badge, tcb_t *sender, tcb_t *receiver,
179 word_t *receiverIPCBuffer);
180 void doNBRecvFailedTransfer(tcb_t *thread);
181 void schedule(void);
182 void chooseThread(void);
183 void switchToThread(tcb_t *thread);
184 void switchToIdleThread(void);
185 void setDomain(tcb_t *tptr, dom_t dom);
186 void setPriority(tcb_t *tptr, prio_t prio);
187 void setMCPriority(tcb_t *tptr, prio_t mcp);
188 void scheduleTCB(tcb_t *tptr);
189 void possibleSwitchTo(tcb_t *tptr);
190 void setThreadState(tcb_t *tptr, _thread_state_t ts);
191 void rescheduleRequired(void);
192
193 /* declare that the thread has had its registers (in its user_context_t) modified and it
194 * should ignore any 'efficient' restores next time it is run, and instead restore all
195 * registers into their correct place */
196 void Arch_postModifyRegisters(tcb_t *tptr);
197
198 /* Updates a threads FaultIP to match its NextIP. This is used to indicate that a
199 * thread has completed its fault and by updating the restartPC means that if the thread
200 * should get restarted in the future for any reason it is restart in such a way as to
201 * not cause the fault again. */
updateRestartPC(tcb_t * tcb)202 static inline void updateRestartPC(tcb_t *tcb)
203 {
204 setRegister(tcb, FaultIP, getRegister(tcb, NextIP));
205 }
206
207 #ifdef CONFIG_KERNEL_MCS
208 /* End the timeslice for the current thread.
209 * This will recharge the threads timeslice and place it at the
210 * end of the scheduling queue for its priority.
211 */
212 void endTimeslice(bool_t can_timeout_fault);
213
214 /* called when a thread has used up its head refill */
215 void chargeBudget(ticks_t consumed, bool_t canTimeoutFault, word_t core, bool_t isCurCPU);
216
217 /* Update the kernels timestamp and stores in ksCurTime.
218 * The difference between the previous kernel timestamp and the one just read
219 * is stored in ksConsumed.
220 *
221 * Should be called on every kernel entry
222 * where threads can be billed.
223 */
updateTimestamp(void)224 static inline void updateTimestamp(void)
225 {
226 time_t prev = NODE_STATE(ksCurTime);
227 NODE_STATE(ksCurTime) = getCurrentTime();
228 assert(NODE_STATE(ksCurTime) < MAX_RELEASE_TIME);
229 time_t consumed = (NODE_STATE(ksCurTime) - prev);
230 NODE_STATE(ksConsumed) += consumed;
231 if (CONFIG_NUM_DOMAINS > 1) {
232 if ((consumed + MIN_BUDGET) >= ksDomainTime) {
233 ksDomainTime = 0;
234 } else {
235 ksDomainTime -= consumed;
236 }
237 }
238
239 }
240
241 /*
242 * Check if domain time has expired
243 */
checkDomainTime(void)244 static inline void checkDomainTime(void)
245 {
246 if (unlikely(isCurDomainExpired())) {
247 NODE_STATE(ksReprogram) = true;
248 rescheduleRequired();
249 }
250 }
251
252 /* Check if the current thread/domain budget has expired.
253 * if it has, bill the thread, add it to the scheduler and
254 * set up a reschedule.
255 *
256 * @return true if the thread/domain has enough budget to
257 * get through the current kernel operation.
258 */
checkBudget(void)259 static inline bool_t checkBudget(void)
260 {
261 /* currently running thread must have available capacity */
262 assert(refill_ready(NODE_STATE(ksCurSC)));
263
264 /* if the budget isn't enough, the timeslice for this SC is over. */
265 if (likely(refill_sufficient(NODE_STATE(ksCurSC), NODE_STATE(ksConsumed)))) {
266 if (unlikely(isCurDomainExpired())) {
267 return false;
268 }
269 return true;
270 }
271
272 chargeBudget(NODE_STATE(ksConsumed), true, CURRENT_CPU_INDEX(), true);
273 return false;
274 }
275
276 /* Everything checkBudget does, but also set the thread
277 * state to ThreadState_Restart. To be called from kernel entries
278 * where the operation should be restarted once the current thread
279 * has budget again.
280 */
281
checkBudgetRestart(void)282 static inline bool_t checkBudgetRestart(void)
283 {
284 assert(isRunnable(NODE_STATE(ksCurThread)));
285 bool_t result = checkBudget();
286 if (!result && isRunnable(NODE_STATE(ksCurThread))) {
287 setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
288 }
289 return result;
290 }
291
292
293 /* Set the next kernel tick, which is either the end of the current
294 * domains timeslice OR the end of the current threads timeslice.
295 */
296 void setNextInterrupt(void);
297
298 /* Wake any periodic threads that are ready for budget recharge */
299 void awaken(void);
300 /* Place the thread bound to this scheduling context in the release queue
301 * of periodic threads waiting for budget recharge */
302 void postpone(sched_context_t *sc);
303 #endif
304
305