1 /* --------------------------------------------------------------------------
2 * Copyright (c) 2013-2019 Arm Limited. All rights reserved.
3 * Copyright (c) 2020-2021, Renesas Electronics Corporation. All rights
4 *reserved.
5 *
6 * SPDX-License-Identifier: Apache-2.0
7 *
8 * Licensed under the Apache License, Version 2.0 (the License); you may
9 * not use this file except in compliance with the License.
10 * You may obtain a copy of the License at
11 *
12 * www.apache.org/licenses/LICENSE-2.0
13 *
14 * Unless required by applicable law or agreed to in writing, software
15 * distributed under the License is distributed on an AS IS BASIS, WITHOUT
16 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 * See the License for the specific language governing permissions and
18 * limitations under the License.
19 *
20 * Name: cmsis_os2_tyny4scp.c
21 * Purpose: CMSIS RTOS2 wrapper for FreeRTOS
22 *
23 *---------------------------------------------------------------------------*/
24
25 #include "FreeRTOS.h" // ARM.FreeRTOS::RTOS:Core
26 #include "cmsis_os2.h" // ::CMSIS:RTOS2
27 #include "task.h" // ARM.FreeRTOS::RTOS:Core
28
29 #include <string.h>
30
31 #define __ARM_ARCH_8A__ (1)
32
33 #define IS_IRQ() is_irq()
34
35 /* Kernel version and identification string definition
36 (major.minor.rev: mmnnnrrrr dec) */
37 #define KERNEL_VERSION \
38 (((uint32_t)tskKERNEL_VERSION_MAJOR * 10000000UL) | \
39 ((uint32_t)tskKERNEL_VERSION_MINOR * 10000UL) | \
40 ((uint32_t)tskKERNEL_VERSION_BUILD * 1UL))
41
42 #define KERNEL_ID ("FreeRTOS " tskKERNEL_VERSION_NUMBER)
43
44 #define portYIELD_FROM_ISR(n)
45 #define __STATIC_INLINE static inline
46 /* Limits */
47 #define MAX_BITS_TASK_NOTIFY 31U
48 #define MAX_BITS_EVENT_GROUPS 24U
49
50 #define THREAD_FLAGS_INVALID_BITS (~((1UL << MAX_BITS_TASK_NOTIFY) - 1U))
51 #define EVENT_FLAGS_INVALID_BITS (~((1UL << MAX_BITS_EVENT_GROUPS) - 1U))
52
is_irq(void)53 static inline uint32_t is_irq(void)
54 {
55 uint32_t val;
56 __asm__ volatile("mrs %0, spsel" : "=r"(val));
57 return val & 0x01;
58 }
59
60 /*
61 Setup SVC to reset value.
62 */
SVC_Setup(void)63 __STATIC_INLINE void SVC_Setup(void)
64 {
65 #if ((__ARM_ARCH_7A__ == 0U) && (__ARM_ARCH_8A__ == 0U))
66 /* Service Call interrupt might be configured before kernel start */
67 /* and when its priority is lower or equal to BASEPRI, svc intruction */
68 /* causes a Hard Fault. */
69 NVIC_SetPriority(SVCall_IRQn, 0U);
70 #endif
71 }
72
73 /* Kernel initialization state */
74 static osKernelState_t KernelState = osKernelInactive;
75
osKernelInitialize(void)76 osStatus_t osKernelInitialize(void)
77 {
78 osStatus_t stat;
79
80 if (IS_IRQ()) {
81 stat = osErrorISR;
82 } else {
83 if (KernelState == osKernelInactive) {
84 #if defined(RTE_Compiler_EventRecorder)
85 EvrFreeRTOSSetup(0U);
86 #endif
87 #if defined(RTE_RTOS_FreeRTOS_HEAP_5) && (HEAP_5_REGION_SETUP == 1)
88 vPortDefineHeapRegions(configHEAP_5_REGIONS);
89 #endif
90 KernelState = osKernelReady;
91 stat = osOK;
92 } else {
93 stat = osError;
94 }
95 }
96
97 return (stat);
98 }
99
osKernelGetInfo(osVersion_t * version,char * id_buf,uint32_t id_size)100 osStatus_t osKernelGetInfo(osVersion_t *version, char *id_buf, uint32_t id_size)
101 {
102 if (version != NULL) {
103 /* Version encoding is major.minor.rev: mmnnnrrrr dec */
104 version->api = KERNEL_VERSION;
105 version->kernel = KERNEL_VERSION;
106 }
107
108 if ((id_buf != NULL) && (id_size != 0U)) {
109 if (id_size > sizeof(KERNEL_ID)) {
110 id_size = sizeof(KERNEL_ID);
111 }
112 memcpy(id_buf, KERNEL_ID, id_size);
113 }
114
115 return (osOK);
116 }
117
osKernelGetState(void)118 osKernelState_t osKernelGetState(void)
119 {
120 osKernelState_t state;
121
122 switch (xTaskGetSchedulerState()) {
123 case taskSCHEDULER_RUNNING:
124 state = osKernelRunning;
125 break;
126
127 case taskSCHEDULER_SUSPENDED:
128 state = osKernelLocked;
129 break;
130
131 case taskSCHEDULER_NOT_STARTED:
132 default:
133 if (KernelState == osKernelReady) {
134 state = osKernelReady;
135 } else {
136 state = osKernelInactive;
137 }
138 break;
139 }
140
141 return (state);
142 }
143
osKernelStart(void)144 osStatus_t osKernelStart(void)
145 {
146 osStatus_t stat;
147
148 if (IS_IRQ()) {
149 stat = osErrorISR;
150 } else {
151 if (KernelState == osKernelReady) {
152 /* Ensure SVC priority is at the reset value */
153 SVC_Setup();
154 /* Change state to enable IRQ masking check */
155 KernelState = osKernelRunning;
156 /* Start the kernel scheduler */
157 vTaskStartScheduler();
158 stat = osOK;
159 } else {
160 stat = osError;
161 }
162 }
163
164 return (stat);
165 }
166
osKernelLock(void)167 int32_t osKernelLock(void)
168 {
169 int32_t lock;
170
171 if (IS_IRQ()) {
172 lock = (int32_t)osErrorISR;
173 } else {
174 switch (xTaskGetSchedulerState()) {
175 case taskSCHEDULER_SUSPENDED:
176 lock = 1;
177 break;
178
179 case taskSCHEDULER_RUNNING:
180 vTaskSuspendAll();
181 lock = 0;
182 break;
183
184 case taskSCHEDULER_NOT_STARTED:
185 default:
186 lock = (int32_t)osError;
187 break;
188 }
189 }
190
191 return (lock);
192 }
193
osKernelUnlock(void)194 int32_t osKernelUnlock(void)
195 {
196 int32_t lock;
197
198 if (IS_IRQ()) {
199 lock = (int32_t)osErrorISR;
200 } else {
201 switch (xTaskGetSchedulerState()) {
202 case taskSCHEDULER_SUSPENDED:
203 lock = 1;
204
205 if (xTaskResumeAll() != pdTRUE) {
206 if (xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED) {
207 lock = (int32_t)osError;
208 }
209 }
210 break;
211
212 case taskSCHEDULER_RUNNING:
213 lock = 0;
214 break;
215
216 case taskSCHEDULER_NOT_STARTED:
217 default:
218 lock = (int32_t)osError;
219 break;
220 }
221 }
222
223 return (lock);
224 }
225
osKernelRestoreLock(int32_t lock)226 int32_t osKernelRestoreLock(int32_t lock)
227 {
228 if (IS_IRQ()) {
229 lock = (int32_t)osErrorISR;
230 } else {
231 switch (xTaskGetSchedulerState()) {
232 case taskSCHEDULER_SUSPENDED:
233 case taskSCHEDULER_RUNNING:
234 if (lock == 1) {
235 vTaskSuspendAll();
236 } else {
237 if (lock != 0) {
238 lock = (int32_t)osError;
239 } else {
240 if (xTaskResumeAll() != pdTRUE) {
241 if (xTaskGetSchedulerState() != taskSCHEDULER_RUNNING) {
242 lock = (int32_t)osError;
243 }
244 }
245 }
246 }
247 break;
248
249 case taskSCHEDULER_NOT_STARTED:
250 default:
251 lock = (int32_t)osError;
252 break;
253 }
254 }
255
256 return (lock);
257 }
258
osKernelGetTickCount(void)259 uint32_t osKernelGetTickCount(void)
260 {
261 TickType_t ticks;
262
263 if (IS_IRQ()) {
264 ticks = xTaskGetTickCountFromISR();
265 } else {
266 ticks = xTaskGetTickCount();
267 }
268
269 return (ticks);
270 }
271
osKernelGetTickFreq(void)272 uint32_t osKernelGetTickFreq(void)
273 {
274 return (configTICK_RATE_HZ);
275 }
276
277 /*---------------------------------------------------------------------------*/
278
osThreadNew(osThreadFunc_t func,void * argument,const osThreadAttr_t * attr)279 osThreadId_t osThreadNew(
280 osThreadFunc_t func,
281 void *argument,
282 const osThreadAttr_t *attr)
283 {
284 const char *name;
285 uint32_t stack;
286 TaskHandle_t hTask;
287 UBaseType_t prio;
288 int32_t mem;
289
290 hTask = NULL;
291
292 if (!IS_IRQ() && (func != NULL)) {
293 stack = configMINIMAL_STACK_SIZE;
294 prio = (UBaseType_t)osPriorityNormal;
295
296 name = NULL;
297 mem = -1;
298
299 if (attr != NULL) {
300 if (attr->name != NULL) {
301 name = attr->name;
302 }
303 if (attr->priority != osPriorityNone) {
304 prio = (UBaseType_t)attr->priority;
305 }
306
307 if ((prio < osPriorityIdle) || (prio > osPriorityISR) ||
308 ((attr->attr_bits & osThreadJoinable) == osThreadJoinable)) {
309 return (NULL);
310 }
311
312 if (attr->stack_size > 0U) {
313 /* In FreeRTOS stack is not in bytes, */
314 /* but in sizeof(StackType_t) which is 4 on ARM ports. */
315 /* Stack size should be therefore 4 byte aligned in order to */
316 /* avoid division caused side effects */
317 stack = attr->stack_size / sizeof(StackType_t);
318 }
319
320 if ((attr->cb_mem != NULL) &&
321 (attr->cb_size >= sizeof(StaticTask_t)) &&
322 (attr->stack_mem != NULL) && (attr->stack_size > 0U)) {
323 mem = 1;
324 } else {
325 if ((attr->cb_mem == NULL) && (attr->cb_size == 0U) &&
326 (attr->stack_mem == NULL)) {
327 mem = 0;
328 }
329 }
330 } else {
331 mem = 0;
332 }
333
334 if (mem == 1) {
335 hTask = xTaskCreateStatic(
336 (TaskFunction_t)func,
337 name,
338 stack,
339 argument,
340 prio,
341 (StackType_t *)attr->stack_mem,
342 (StaticTask_t *)attr->cb_mem);
343 } else {
344 if (mem == 0) {
345 if (xTaskCreate(
346 (TaskFunction_t)func,
347 name,
348 (uint16_t)stack,
349 argument,
350 prio,
351 &hTask) != pdPASS) {
352 hTask = NULL;
353 }
354 }
355 }
356 }
357
358 return ((osThreadId_t)hTask);
359 }
360
osThreadFlagsSet(osThreadId_t thread_id,uint32_t flags)361 uint32_t osThreadFlagsSet(osThreadId_t thread_id, uint32_t flags)
362 {
363 TaskHandle_t hTask = (TaskHandle_t)thread_id;
364 uint32_t rflags;
365 BaseType_t yield;
366
367 if ((hTask == NULL) || ((flags & THREAD_FLAGS_INVALID_BITS) != 0U)) {
368 rflags = (uint32_t)osErrorParameter;
369 } else {
370 rflags = (uint32_t)osError;
371
372 if (IS_IRQ()) {
373 yield = pdFALSE;
374
375 (void)xTaskNotifyFromISR(hTask, flags, eSetBits, &yield);
376 (void)xTaskNotifyAndQueryFromISR(
377 hTask, 0, eNoAction, &rflags, NULL);
378
379 portYIELD_FROM_ISR(yield);
380 } else {
381 (void)xTaskNotify(hTask, flags, eSetBits);
382 (void)xTaskNotifyAndQuery(hTask, 0, eNoAction, &rflags);
383 }
384 }
385 /* Return flags after setting */
386 return (rflags);
387 }
388
osThreadFlagsClear(uint32_t flags)389 uint32_t osThreadFlagsClear(uint32_t flags)
390 {
391 TaskHandle_t hTask;
392 uint32_t rflags, cflags;
393
394 if (IS_IRQ()) {
395 rflags = (uint32_t)osErrorISR;
396 } else if ((flags & THREAD_FLAGS_INVALID_BITS) != 0U) {
397 rflags = (uint32_t)osErrorParameter;
398 } else {
399 hTask = xTaskGetCurrentTaskHandle();
400
401 if (xTaskNotifyAndQuery(hTask, 0, eNoAction, &cflags) == pdPASS) {
402 rflags = cflags;
403 cflags &= ~flags;
404
405 if (xTaskNotify(hTask, cflags, eSetValueWithOverwrite) != pdPASS) {
406 rflags = (uint32_t)osError;
407 }
408 } else {
409 rflags = (uint32_t)osError;
410 }
411 }
412
413 /* Return flags before clearing */
414 return (rflags);
415 }
416
osThreadFlagsWait(uint32_t flags,uint32_t options,uint32_t timeout)417 uint32_t osThreadFlagsWait(uint32_t flags, uint32_t options, uint32_t timeout)
418 {
419 uint32_t rflags, nval;
420 uint32_t clear;
421 TickType_t t0, td, tout;
422 BaseType_t rval;
423
424 if (IS_IRQ()) {
425 rflags = (uint32_t)osErrorISR;
426 } else if ((flags & THREAD_FLAGS_INVALID_BITS) != 0U) {
427 rflags = (uint32_t)osErrorParameter;
428 } else {
429 if ((options & osFlagsNoClear) == osFlagsNoClear) {
430 clear = 0U;
431 } else {
432 clear = flags;
433 }
434
435 rflags = 0U;
436 tout = timeout;
437
438 t0 = xTaskGetTickCount();
439 do {
440 rval = xTaskNotifyWait(0, clear, &nval, tout);
441
442 if (rval == pdPASS) {
443 rflags &= flags;
444 rflags |= nval;
445
446 if ((options & osFlagsWaitAll) == osFlagsWaitAll) {
447 if ((flags & rflags) == flags) {
448 break;
449 } else {
450 if (timeout == 0U) {
451 rflags = (uint32_t)osErrorResource;
452 break;
453 }
454 }
455 } else {
456 if ((flags & rflags) != 0) {
457 break;
458 } else {
459 if (timeout == 0U) {
460 rflags = (uint32_t)osErrorResource;
461 break;
462 }
463 }
464 }
465
466 /* Update timeout */
467 td = xTaskGetTickCount() - t0;
468
469 if (td > tout) {
470 tout = 0;
471 } else {
472 tout -= td;
473 }
474 } else {
475 if (timeout == 0) {
476 rflags = (uint32_t)osErrorResource;
477 } else {
478 rflags = (uint32_t)osErrorTimeout;
479 }
480 }
481 } while (rval != pdFAIL);
482 }
483
484 /* Return flags before clearing */
485 return (rflags);
486 }
487
488 /*---------------------------------------------------------------------------*/
489
490 /* External Idle and Timer task static memory allocation functions */
491
492 /* External Idle and Timer task static memory allocation functions */
493 extern void vApplicationGetIdleTaskMemory(
494 StaticTask_t **ppxIdleTaskTCBBuffer,
495 StackType_t **ppxIdleTaskStackBuffer,
496 uint32_t *pulIdleTaskStackSize);
497 extern void vApplicationGetTimerTaskMemory(
498 StaticTask_t **ppxTimerTaskTCBBuffer,
499 StackType_t **ppxTimerTaskStackBuffer,
500 uint32_t *pulTimerTaskStackSize);
501
502 /* Idle task control block and stack */
503 static StaticTask_t Idle_TCB;
504 static StackType_t Idle_Stack[configMINIMAL_STACK_SIZE];
505
506 /* Timer task control block and stack */
507 static StaticTask_t Timer_TCB;
508 static StackType_t Timer_Stack[configTIMER_TASK_STACK_DEPTH];
509
510 /*
511 vApplicationGetIdleTaskMemory gets called when
512 configSUPPORT_STATIC_ALLOCATION
513 equals to 1 and is required for static memory allocation support.
514 */
vApplicationGetIdleTaskMemory(StaticTask_t ** ppxIdleTaskTCBBuffer,StackType_t ** ppxIdleTaskStackBuffer,uint32_t * pulIdleTaskStackSize)515 void vApplicationGetIdleTaskMemory(
516 StaticTask_t **ppxIdleTaskTCBBuffer,
517 StackType_t **ppxIdleTaskStackBuffer,
518 uint32_t *pulIdleTaskStackSize)
519 {
520 *ppxIdleTaskTCBBuffer = &Idle_TCB;
521 *ppxIdleTaskStackBuffer = &Idle_Stack[0];
522 *pulIdleTaskStackSize = (uint32_t)configMINIMAL_STACK_SIZE;
523 }
524
525 /*
526 vApplicationGetTimerTaskMemory gets called when
527 configSUPPORT_STATIC_ALLOCATION
528 equals to 1 and is required for static memory allocation support.
529 */
vApplicationGetTimerTaskMemory(StaticTask_t ** ppxTimerTaskTCBBuffer,StackType_t ** ppxTimerTaskStackBuffer,uint32_t * pulTimerTaskStackSize)530 void vApplicationGetTimerTaskMemory(
531 StaticTask_t **ppxTimerTaskTCBBuffer,
532 StackType_t **ppxTimerTaskStackBuffer,
533 uint32_t *pulTimerTaskStackSize)
534 {
535 *ppxTimerTaskTCBBuffer = &Timer_TCB;
536 *ppxTimerTaskStackBuffer = &Timer_Stack[0];
537 *pulTimerTaskStackSize = (uint32_t)configTIMER_TASK_STACK_DEPTH;
538 }
539