1 /*
2  * FreeRTOS Kernel V10.3.1
3  * Copyright (C) 2020 Amazon.com, Inc. or its affiliates.  All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a copy
6  * of this software and associated documentation files (the "Software"), to deal
7  * in the Software without restriction, including without limitation the rights
8  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9  * copies of the Software, and to permit persons to whom the Software is
10  * furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * http://www.FreeRTOS.org
24  * http://aws.amazon.com/freertos
25  *
26  * 1 tab == 4 spaces!
27  */
28 
29 #include <stdlib.h>
30 #include <string.h>
31 
32 /* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
33 all the API functions to use the MPU wrappers.  That should only be done when
34 task.h is included from an application file. */
35 #define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
36 
37 #include "FreeRTOS.h"
38 #include "queue.h"
39 #include "task.h"
40 
41 #if (configUSE_CO_ROUTINES == 1)
42 #    include "croutine.h"
43 #endif
44 
45 /* Lint e9021, e961 and e750 are suppressed as a MISRA exception justified
46 because the MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined
47 for the header files above, but not in this file, in order to generate the
48 correct privileged Vs unprivileged linkage and placement. */
49 #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750 !e9021. */
50 
51 /* Constants used with the cRxLock and cTxLock structure members. */
52 #define queueUNLOCKED ((int8_t)-1)
53 #define queueLOCKED_UNMODIFIED ((int8_t)0)
54 
55 /* When the Queue_t structure is used to represent a base queue its pcHead and
56 pcTail members are used as pointers into the queue storage area.  When the
57 Queue_t structure is used to represent a mutex pcHead and pcTail pointers are
58 not necessary, and the pcHead pointer is set to NULL to indicate that the
59 structure instead holds a pointer to the mutex holder (if any).  Map alternative
60 names to the pcHead and structure member to ensure the readability of the code
61 is maintained.  The QueuePointers_t and SemaphoreData_t types are used to form
62 a union as their usage is mutually exclusive dependent on what the queue is
63 being used for. */
64 #define uxQueueType pcHead
65 #define queueQUEUE_IS_MUTEX NULL
66 
67 typedef struct QueuePointers {
68     int8_t *pcTail; /*< Points to the byte at the end of the queue storage area.
69                        Once more byte is allocated than necessary to store the
70                        queue items, this is used as a marker. */
71     int8_t *pcReadFrom; /*< Points to the last place that a queued item was read
72                            from when the structure is used as a queue. */
73 } QueuePointers_t;
74 
75 typedef struct SemaphoreData {
76     TaskHandle_t
77         xMutexHolder; /*< The handle of the task that holds the mutex. */
78     UBaseType_t
79         uxRecursiveCallCount; /*< Maintains a count of the number of times a
80                                  recursive mutex has been recursively 'taken'
81                                  when the structure is used as a mutex. */
82 } SemaphoreData_t;
83 
84 /* Semaphores do not actually store or copy data, so have an item size of
85 zero. */
86 #define queueSEMAPHORE_QUEUE_ITEM_LENGTH ((UBaseType_t)0)
87 #define queueMUTEX_GIVE_BLOCK_TIME ((TickType_t)0U)
88 
89 #if (configUSE_PREEMPTION == 0)
90 /* If the cooperative scheduler is being used then a yield should not be
91 performed just because a higher priority task has been woken. */
92 #    define queueYIELD_IF_USING_PREEMPTION()
93 #else
94 #    define queueYIELD_IF_USING_PREEMPTION() portYIELD_WITHIN_API()
95 #endif
96 
97 /*
98  * Definition of the queue used by the scheduler.
99  * Items are queued by copy, not reference.  See the following link for the
100  * rationale: https://www.freertos.org/Embedded-RTOS-Queues.html
101  */
102 typedef struct QueueDefinition /* The old naming convention is used to prevent
103                                   breaking kernel aware debuggers. */
104 {
105     int8_t *pcHead; /*< Points to the beginning of the queue storage area. */
106     int8_t *pcWriteTo; /*< Points to the free next place in the storage area. */
107 
108     union {
109         QueuePointers_t xQueue; /*< Data required exclusively when this
110                                    structure is used as a queue. */
111         SemaphoreData_t xSemaphore; /*< Data required exclusively when this
112                                        structure is used as a semaphore. */
113     } u;
114 
115     List_t
116         xTasksWaitingToSend; /*< List of tasks that are blocked waiting to post
117                                 onto this queue.  Stored in priority order. */
118     List_t xTasksWaitingToReceive; /*< List of tasks that are blocked waiting to
119                                       read from this queue.  Stored in priority
120                                       order. */
121 
122     volatile UBaseType_t
123         uxMessagesWaiting; /*< The number of items currently in the queue. */
124     UBaseType_t uxLength; /*< The length of the queue defined as the number of
125                              items it will hold, not the number of bytes. */
126     UBaseType_t
127         uxItemSize; /*< The size of each items that the queue will hold. */
128 
129     volatile int8_t
130         cRxLock; /*< Stores the number of items received from the queue (removed
131                     from the queue) while the queue was locked.  Set to
132                     queueUNLOCKED when the queue is not locked. */
133     volatile int8_t
134         cTxLock; /*< Stores the number of items transmitted to the queue (added
135                     to the queue) while the queue was locked.  Set to
136                     queueUNLOCKED when the queue is not locked. */
137 
138 #if ( \
139     (configSUPPORT_STATIC_ALLOCATION == 1) && \
140     (configSUPPORT_DYNAMIC_ALLOCATION == 1))
141     uint8_t ucStaticallyAllocated; /*< Set to pdTRUE if the memory used by the
142                                       queue was statically allocated to ensure
143                                       no attempt is made to free the memory. */
144 #endif
145 
146 #if (configUSE_QUEUE_SETS == 1)
147     struct QueueDefinition *pxQueueSetContainer;
148 #endif
149 
150 #if (configUSE_TRACE_FACILITY == 1)
151     UBaseType_t uxQueueNumber;
152     uint8_t ucQueueType;
153 #endif
154 
155 } xQUEUE;
156 
157 /* The old xQUEUE name is maintained above then typedefed to the new Queue_t
158 name below to enable the use of older kernel aware debuggers. */
159 typedef xQUEUE Queue_t;
160 
161 /*-----------------------------------------------------------*/
162 
163 /*
164  * The queue registry is just a means for kernel aware debuggers to locate
165  * queue structures.  It has no other purpose so is an optional component.
166  */
167 #if (configQUEUE_REGISTRY_SIZE > 0)
168 
169 /* The type stored within the queue registry array.  This allows a name
170 to be assigned to each queue making kernel aware debugging a little
171 more user friendly. */
172 typedef struct QUEUE_REGISTRY_ITEM {
173     const char *pcQueueName; /*lint !e971 Unqualified char types are allowed for
174                                 strings and single characters only. */
175     QueueHandle_t xHandle;
176 } xQueueRegistryItem;
177 
178 /* The old xQueueRegistryItem name is maintained above then typedefed to the
179 new xQueueRegistryItem name below to enable the use of older kernel aware
180 debuggers. */
181 typedef xQueueRegistryItem QueueRegistryItem_t;
182 
183 /* The queue registry is simply an array of QueueRegistryItem_t structures.
184 The pcQueueName member of a structure being NULL is indicative of the
185 array position being vacant. */
186 PRIVILEGED_DATA QueueRegistryItem_t xQueueRegistry[configQUEUE_REGISTRY_SIZE];
187 
188 #endif /* configQUEUE_REGISTRY_SIZE */
189 
190 /*
191  * Unlocks a queue locked by a call to prvLockQueue.  Locking a queue does not
192  * prevent an ISR from adding or removing items to the queue, but does prevent
193  * an ISR from removing tasks from the queue event lists.  If an ISR finds a
194  * queue is locked it will instead increment the appropriate queue lock count
195  * to indicate that a task may require unblocking.  When the queue in unlocked
196  * these lock counts are inspected, and the appropriate action taken.
197  */
198 static void prvUnlockQueue(Queue_t *const pxQueue) PRIVILEGED_FUNCTION;
199 
200 /*
201  * Uses a critical section to determine if there is any data in a queue.
202  *
203  * @return pdTRUE if the queue contains no items, otherwise pdFALSE.
204  */
205 static BaseType_t prvIsQueueEmpty(const Queue_t *pxQueue) PRIVILEGED_FUNCTION;
206 
207 /*
208  * Uses a critical section to determine if there is any space in a queue.
209  *
210  * @return pdTRUE if there is no space, otherwise pdFALSE;
211  */
212 static BaseType_t prvIsQueueFull(const Queue_t *pxQueue) PRIVILEGED_FUNCTION;
213 
214 /*
215  * Copies an item into the queue, either at the front of the queue or the
216  * back of the queue.
217  */
218 static BaseType_t prvCopyDataToQueue(
219     Queue_t *const pxQueue,
220     const void *pvItemToQueue,
221     const BaseType_t xPosition) PRIVILEGED_FUNCTION;
222 
223 /*
224  * Copies an item out of a queue.
225  */
226 static void prvCopyDataFromQueue(Queue_t *const pxQueue, void *const pvBuffer)
227     PRIVILEGED_FUNCTION;
228 
229 #if (configUSE_QUEUE_SETS == 1)
230 /*
231  * Checks to see if a queue is a member of a queue set, and if so, notifies
232  * the queue set that the queue contains data.
233  */
234 static BaseType_t prvNotifyQueueSetContainer(const Queue_t *const pxQueue)
235     PRIVILEGED_FUNCTION;
236 #endif
237 
238 /*
239  * Called after a Queue_t structure has been allocated either statically or
240  * dynamically to fill in the structure's members.
241  */
242 static void prvInitialiseNewQueue(
243     const UBaseType_t uxQueueLength,
244     const UBaseType_t uxItemSize,
245     uint8_t *pucQueueStorage,
246     const uint8_t ucQueueType,
247     Queue_t *pxNewQueue) PRIVILEGED_FUNCTION;
248 
249 /*
250  * Mutexes are a special type of queue.  When a mutex is created, first the
251  * queue is created, then prvInitialiseMutex() is called to configure the queue
252  * as a mutex.
253  */
254 #if (configUSE_MUTEXES == 1)
255 static void prvInitialiseMutex(Queue_t *pxNewQueue) PRIVILEGED_FUNCTION;
256 #endif
257 
258 #if (configUSE_MUTEXES == 1)
259 /*
260  * If a task waiting for a mutex causes the mutex holder to inherit a
261  * priority, but the waiting task times out, then the holder should
262  * disinherit the priority - but only down to the highest priority of any
263  * other tasks that are waiting for the same mutex.  This function returns
264  * that priority.
265  */
266 static UBaseType_t prvGetDisinheritPriorityAfterTimeout(
267     const Queue_t *const pxQueue) PRIVILEGED_FUNCTION;
268 #endif
269 /*-----------------------------------------------------------*/
270 
271 /*
272  * Macro to mark a queue as locked.  Locking a queue prevents an ISR from
273  * accessing the queue event lists.
274  */
275 #define prvLockQueue(pxQueue) \
276     taskENTER_CRITICAL(); \
277     { \
278         if ((pxQueue)->cRxLock == queueUNLOCKED) { \
279             (pxQueue)->cRxLock = queueLOCKED_UNMODIFIED; \
280         } \
281         if ((pxQueue)->cTxLock == queueUNLOCKED) { \
282             (pxQueue)->cTxLock = queueLOCKED_UNMODIFIED; \
283         } \
284     } \
285     taskEXIT_CRITICAL()
286 /*-----------------------------------------------------------*/
287 
xQueueGenericReset(QueueHandle_t xQueue,BaseType_t xNewQueue)288 BaseType_t xQueueGenericReset(QueueHandle_t xQueue, BaseType_t xNewQueue)
289 {
290     Queue_t *const pxQueue = xQueue;
291 
292     configASSERT(pxQueue);
293 
294     taskENTER_CRITICAL();
295     {
296         pxQueue->u.xQueue.pcTail = pxQueue->pcHead +
297             (pxQueue->uxLength *
298              pxQueue->uxItemSize); /*lint !e9016 Pointer arithmetic allowed on
299                                       char types, especially when it assists
300                                       conveying intent. */
301         pxQueue->uxMessagesWaiting = (UBaseType_t)0U;
302         pxQueue->pcWriteTo = pxQueue->pcHead;
303         pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead +
304             ((pxQueue->uxLength - 1U) *
305              pxQueue->uxItemSize); /*lint !e9016 Pointer arithmetic allowed on
306                                       char types, especially when it assists
307                                       conveying intent. */
308         pxQueue->cRxLock = queueUNLOCKED;
309         pxQueue->cTxLock = queueUNLOCKED;
310 
311         if (xNewQueue == pdFALSE) {
312             /* If there are tasks blocked waiting to read from the queue, then
313             the tasks will remain blocked as after this function exits the queue
314             will still be empty.  If there are tasks blocked waiting to write to
315             the queue, then one should be unblocked as after this function exits
316             it will be possible to write to it. */
317             if (listLIST_IS_EMPTY(&(pxQueue->xTasksWaitingToSend)) == pdFALSE) {
318                 if (xTaskRemoveFromEventList(&(pxQueue->xTasksWaitingToSend)) !=
319                     pdFALSE) {
320                     queueYIELD_IF_USING_PREEMPTION();
321                 } else {
322                     mtCOVERAGE_TEST_MARKER();
323                 }
324             } else {
325                 mtCOVERAGE_TEST_MARKER();
326             }
327         } else {
328             /* Ensure the event queues start in the correct state. */
329             vListInitialise(&(pxQueue->xTasksWaitingToSend));
330             vListInitialise(&(pxQueue->xTasksWaitingToReceive));
331         }
332     }
333     taskEXIT_CRITICAL();
334 
335     /* A value is returned for calling semantic consistency with previous
336     versions. */
337     return pdPASS;
338 }
339 /*-----------------------------------------------------------*/
340 
341 #if (configSUPPORT_STATIC_ALLOCATION == 1)
342 
xQueueGenericCreateStatic(const UBaseType_t uxQueueLength,const UBaseType_t uxItemSize,uint8_t * pucQueueStorage,StaticQueue_t * pxStaticQueue,const uint8_t ucQueueType)343 QueueHandle_t xQueueGenericCreateStatic(
344     const UBaseType_t uxQueueLength,
345     const UBaseType_t uxItemSize,
346     uint8_t *pucQueueStorage,
347     StaticQueue_t *pxStaticQueue,
348     const uint8_t ucQueueType)
349 {
350     Queue_t *pxNewQueue;
351 
352     configASSERT(uxQueueLength > (UBaseType_t)0);
353 
354     /* The StaticQueue_t structure and the queue storage area must be
355     supplied. */
356     configASSERT(pxStaticQueue != NULL);
357 
358     /* A queue storage area should be provided if the item size is not 0, and
359     should not be provided if the item size is 0. */
360     configASSERT(!((pucQueueStorage != NULL) && (uxItemSize == 0)));
361     configASSERT(!((pucQueueStorage == NULL) && (uxItemSize != 0)));
362 
363 #    if (configASSERT_DEFINED == 1)
364     {
365         /* Sanity check that the size of the structure used to declare a
366         variable of type StaticQueue_t or StaticSemaphore_t equals the size of
367         the real queue and semaphore structures. */
368         volatile size_t xSize = sizeof(StaticQueue_t);
369         configASSERT(xSize == sizeof(Queue_t));
370         (void)xSize; /* Keeps lint quiet when configASSERT() is not defined. */
371     }
372 #    endif /* configASSERT_DEFINED */
373 
374     /* The address of a statically allocated queue was passed in, use it.
375     The address of a statically allocated storage area was also passed in
376     but is already set. */
377     pxNewQueue = (Queue_t *)
378         pxStaticQueue; /*lint !e740 !e9087 Unusual cast is ok as the structures
379                           are designed to have the same alignment, and the size
380                           is checked by an assert. */
381 
382     if (pxNewQueue != NULL) {
383 #    if (configSUPPORT_DYNAMIC_ALLOCATION == 1)
384         {
385             /* Queues can be allocated wither statically or dynamically, so
386             note this queue was allocated statically in case the queue is
387             later deleted. */
388             pxNewQueue->ucStaticallyAllocated = pdTRUE;
389         }
390 #    endif /* configSUPPORT_DYNAMIC_ALLOCATION */
391 
392         prvInitialiseNewQueue(
393             uxQueueLength,
394             uxItemSize,
395             pucQueueStorage,
396             ucQueueType,
397             pxNewQueue);
398     } else {
399         traceQUEUE_CREATE_FAILED(ucQueueType);
400         mtCOVERAGE_TEST_MARKER();
401     }
402 
403     return pxNewQueue;
404 }
405 
406 #endif /* configSUPPORT_STATIC_ALLOCATION */
407 /*-----------------------------------------------------------*/
408 
409 #if (configSUPPORT_DYNAMIC_ALLOCATION == 1)
410 
xQueueGenericCreate(const UBaseType_t uxQueueLength,const UBaseType_t uxItemSize,const uint8_t ucQueueType)411 QueueHandle_t xQueueGenericCreate(
412     const UBaseType_t uxQueueLength,
413     const UBaseType_t uxItemSize,
414     const uint8_t ucQueueType)
415 {
416     Queue_t *pxNewQueue;
417     size_t xQueueSizeInBytes;
418     uint8_t *pucQueueStorage;
419 
420     configASSERT(uxQueueLength > (UBaseType_t)0);
421 
422     /* Allocate enough space to hold the maximum number of items that
423     can be in the queue at any time.  It is valid for uxItemSize to be
424     zero in the case the queue is used as a semaphore. */
425     xQueueSizeInBytes = (size_t)(
426         uxQueueLength * uxItemSize); /*lint !e961 MISRA exception as the casts
427                                         are only redundant for some ports. */
428 
429     /* Allocate the queue and storage area.  Justification for MISRA
430     deviation as follows:  pvPortMalloc() always ensures returned memory
431     blocks are aligned per the requirements of the MCU stack.  In this case
432     pvPortMalloc() must return a pointer that is guaranteed to meet the
433     alignment requirements of the Queue_t structure - which in this case
434     is an int8_t *.  Therefore, whenever the stack alignment requirements
435     are greater than or equal to the pointer to char requirements the cast
436     is safe.  In other cases alignment requirements are not strict (one or
437     two bytes). */
438     pxNewQueue = (Queue_t *)pvPortMalloc(
439         sizeof(Queue_t) +
440         xQueueSizeInBytes); /*lint !e9087 !e9079 see comment above. */
441 
442     if (pxNewQueue != NULL) {
443         /* Jump past the queue structure to find the location of the queue
444         storage area. */
445         pucQueueStorage = (uint8_t *)pxNewQueue;
446         pucQueueStorage += sizeof(
447             Queue_t); /*lint !e9016 Pointer arithmetic allowed on char types,
448                          especially when it assists conveying intent. */
449 
450 #    if (configSUPPORT_STATIC_ALLOCATION == 1)
451         {
452             /* Queues can be created either statically or dynamically, so
453             note this task was created dynamically in case it is later
454             deleted. */
455             pxNewQueue->ucStaticallyAllocated = pdFALSE;
456         }
457 #    endif /* configSUPPORT_STATIC_ALLOCATION */
458 
459         prvInitialiseNewQueue(
460             uxQueueLength,
461             uxItemSize,
462             pucQueueStorage,
463             ucQueueType,
464             pxNewQueue);
465     } else {
466         traceQUEUE_CREATE_FAILED(ucQueueType);
467         mtCOVERAGE_TEST_MARKER();
468     }
469 
470     return pxNewQueue;
471 }
472 
473 #endif /* configSUPPORT_STATIC_ALLOCATION */
474 /*-----------------------------------------------------------*/
475 
prvInitialiseNewQueue(const UBaseType_t uxQueueLength,const UBaseType_t uxItemSize,uint8_t * pucQueueStorage,const uint8_t ucQueueType,Queue_t * pxNewQueue)476 static void prvInitialiseNewQueue(
477     const UBaseType_t uxQueueLength,
478     const UBaseType_t uxItemSize,
479     uint8_t *pucQueueStorage,
480     const uint8_t ucQueueType,
481     Queue_t *pxNewQueue)
482 {
483     /* Remove compiler warnings about unused parameters should
484     configUSE_TRACE_FACILITY not be set to 1. */
485     (void)ucQueueType;
486 
487     if (uxItemSize == (UBaseType_t)0) {
488         /* No RAM was allocated for the queue storage area, but PC head cannot
489         be set to NULL because NULL is used as a key to say the queue is used as
490         a mutex.  Therefore just set pcHead to point to the queue as a benign
491         value that is known to be within the memory map. */
492         pxNewQueue->pcHead = (int8_t *)pxNewQueue;
493     } else {
494         /* Set the head to the start of the queue storage area. */
495         pxNewQueue->pcHead = (int8_t *)pucQueueStorage;
496     }
497 
498     /* Initialise the queue members as described where the queue type is
499     defined. */
500     pxNewQueue->uxLength = uxQueueLength;
501     pxNewQueue->uxItemSize = uxItemSize;
502     (void)xQueueGenericReset(pxNewQueue, pdTRUE);
503 
504 #if (configUSE_TRACE_FACILITY == 1)
505     {
506         pxNewQueue->ucQueueType = ucQueueType;
507     }
508 #endif /* configUSE_TRACE_FACILITY */
509 
510 #if (configUSE_QUEUE_SETS == 1)
511     {
512         pxNewQueue->pxQueueSetContainer = NULL;
513     }
514 #endif /* configUSE_QUEUE_SETS */
515 
516     traceQUEUE_CREATE(pxNewQueue);
517 }
518 /*-----------------------------------------------------------*/
519 
520 #if (configUSE_MUTEXES == 1)
521 
prvInitialiseMutex(Queue_t * pxNewQueue)522 static void prvInitialiseMutex(Queue_t *pxNewQueue)
523 {
524     if (pxNewQueue != NULL) {
525         /* The queue create function will set all the queue structure members
526         correctly for a generic queue, but this function is creating a
527         mutex.  Overwrite those members that need to be set differently -
528         in particular the information required for priority inheritance. */
529         pxNewQueue->u.xSemaphore.xMutexHolder = NULL;
530         pxNewQueue->uxQueueType = queueQUEUE_IS_MUTEX;
531 
532         /* In case this is a recursive mutex. */
533         pxNewQueue->u.xSemaphore.uxRecursiveCallCount = 0;
534 
535         traceCREATE_MUTEX(pxNewQueue);
536 
537         /* Start with the semaphore in the expected state. */
538         (void)xQueueGenericSend(
539             pxNewQueue, NULL, (TickType_t)0U, queueSEND_TO_BACK);
540     } else {
541         traceCREATE_MUTEX_FAILED();
542     }
543 }
544 
545 #endif /* configUSE_MUTEXES */
546 /*-----------------------------------------------------------*/
547 
548 #if ((configUSE_MUTEXES == 1) && (configSUPPORT_DYNAMIC_ALLOCATION == 1))
549 
xQueueCreateMutex(const uint8_t ucQueueType)550 QueueHandle_t xQueueCreateMutex(const uint8_t ucQueueType)
551 {
552     QueueHandle_t xNewQueue;
553     const UBaseType_t uxMutexLength = (UBaseType_t)1,
554                       uxMutexSize = (UBaseType_t)0;
555 
556     xNewQueue = xQueueGenericCreate(uxMutexLength, uxMutexSize, ucQueueType);
557     prvInitialiseMutex((Queue_t *)xNewQueue);
558 
559     return xNewQueue;
560 }
561 
562 #endif /* configUSE_MUTEXES */
563 /*-----------------------------------------------------------*/
564 
565 #if ((configUSE_MUTEXES == 1) && (configSUPPORT_STATIC_ALLOCATION == 1))
566 
xQueueCreateMutexStatic(const uint8_t ucQueueType,StaticQueue_t * pxStaticQueue)567 QueueHandle_t xQueueCreateMutexStatic(
568     const uint8_t ucQueueType,
569     StaticQueue_t *pxStaticQueue)
570 {
571     QueueHandle_t xNewQueue;
572     const UBaseType_t uxMutexLength = (UBaseType_t)1,
573                       uxMutexSize = (UBaseType_t)0;
574 
575     /* Prevent compiler warnings about unused parameters if
576     configUSE_TRACE_FACILITY does not equal 1. */
577     (void)ucQueueType;
578 
579     xNewQueue = xQueueGenericCreateStatic(
580         uxMutexLength, uxMutexSize, NULL, pxStaticQueue, ucQueueType);
581     prvInitialiseMutex((Queue_t *)xNewQueue);
582 
583     return xNewQueue;
584 }
585 
586 #endif /* configUSE_MUTEXES */
587 /*-----------------------------------------------------------*/
588 
589 #if ((configUSE_MUTEXES == 1) && (INCLUDE_xSemaphoreGetMutexHolder == 1))
590 
xQueueGetMutexHolder(QueueHandle_t xSemaphore)591 TaskHandle_t xQueueGetMutexHolder(QueueHandle_t xSemaphore)
592 {
593     TaskHandle_t pxReturn;
594     Queue_t *const pxSemaphore = (Queue_t *)xSemaphore;
595 
596     /* This function is called by xSemaphoreGetMutexHolder(), and should not
597     be called directly.  Note:  This is a good way of determining if the
598     calling task is the mutex holder, but not a good way of determining the
599     identity of the mutex holder, as the holder may change between the
600     following critical section exiting and the function returning. */
601     taskENTER_CRITICAL();
602     {
603         if (pxSemaphore->uxQueueType == queueQUEUE_IS_MUTEX) {
604             pxReturn = pxSemaphore->u.xSemaphore.xMutexHolder;
605         } else {
606             pxReturn = NULL;
607         }
608     }
609     taskEXIT_CRITICAL();
610 
611     return pxReturn;
612 } /*lint !e818 xSemaphore cannot be a pointer to const because it is a typedef.
613    */
614 
615 #endif
616 /*-----------------------------------------------------------*/
617 
618 #if ((configUSE_MUTEXES == 1) && (INCLUDE_xSemaphoreGetMutexHolder == 1))
619 
xQueueGetMutexHolderFromISR(QueueHandle_t xSemaphore)620 TaskHandle_t xQueueGetMutexHolderFromISR(QueueHandle_t xSemaphore)
621 {
622     TaskHandle_t pxReturn;
623 
624     configASSERT(xSemaphore);
625 
626     /* Mutexes cannot be used in interrupt service routines, so the mutex
627     holder should not change in an ISR, and therefore a critical section is
628     not required here. */
629     if (((Queue_t *)xSemaphore)->uxQueueType == queueQUEUE_IS_MUTEX) {
630         pxReturn = ((Queue_t *)xSemaphore)->u.xSemaphore.xMutexHolder;
631     } else {
632         pxReturn = NULL;
633     }
634 
635     return pxReturn;
636 } /*lint !e818 xSemaphore cannot be a pointer to const because it is a typedef.
637    */
638 
639 #endif
640 /*-----------------------------------------------------------*/
641 
642 #if (configUSE_RECURSIVE_MUTEXES == 1)
643 
xQueueGiveMutexRecursive(QueueHandle_t xMutex)644 BaseType_t xQueueGiveMutexRecursive(QueueHandle_t xMutex)
645 {
646     BaseType_t xReturn;
647     Queue_t *const pxMutex = (Queue_t *)xMutex;
648 
649     configASSERT(pxMutex);
650 
651     /* If this is the task that holds the mutex then xMutexHolder will not
652     change outside of this task.  If this task does not hold the mutex then
653     pxMutexHolder can never coincidentally equal the tasks handle, and as
654     this is the only condition we are interested in it does not matter if
655     pxMutexHolder is accessed simultaneously by another task.  Therefore no
656     mutual exclusion is required to test the pxMutexHolder variable. */
657     if (pxMutex->u.xSemaphore.xMutexHolder == xTaskGetCurrentTaskHandle()) {
658         traceGIVE_MUTEX_RECURSIVE(pxMutex);
659 
660         /* uxRecursiveCallCount cannot be zero if xMutexHolder is equal to
661         the task handle, therefore no underflow check is required.  Also,
662         uxRecursiveCallCount is only modified by the mutex holder, and as
663         there can only be one, no mutual exclusion is required to modify the
664         uxRecursiveCallCount member. */
665         (pxMutex->u.xSemaphore.uxRecursiveCallCount)--;
666 
667         /* Has the recursive call count unwound to 0? */
668         if (pxMutex->u.xSemaphore.uxRecursiveCallCount == (UBaseType_t)0) {
669             /* Return the mutex.  This will automatically unblock any other
670             task that might be waiting to access the mutex. */
671             (void)xQueueGenericSend(
672                 pxMutex, NULL, queueMUTEX_GIVE_BLOCK_TIME, queueSEND_TO_BACK);
673         } else {
674             mtCOVERAGE_TEST_MARKER();
675         }
676 
677         xReturn = pdPASS;
678     } else {
679         /* The mutex cannot be given because the calling task is not the
680         holder. */
681         xReturn = pdFAIL;
682 
683         traceGIVE_MUTEX_RECURSIVE_FAILED(pxMutex);
684     }
685 
686     return xReturn;
687 }
688 
689 #endif /* configUSE_RECURSIVE_MUTEXES */
690 /*-----------------------------------------------------------*/
691 
692 #if (configUSE_RECURSIVE_MUTEXES == 1)
693 
xQueueTakeMutexRecursive(QueueHandle_t xMutex,TickType_t xTicksToWait)694 BaseType_t xQueueTakeMutexRecursive(
695     QueueHandle_t xMutex,
696     TickType_t xTicksToWait)
697 {
698     BaseType_t xReturn;
699     Queue_t *const pxMutex = (Queue_t *)xMutex;
700 
701     configASSERT(pxMutex);
702 
703     /* Comments regarding mutual exclusion as per those within
704     xQueueGiveMutexRecursive(). */
705 
706     traceTAKE_MUTEX_RECURSIVE(pxMutex);
707 
708     if (pxMutex->u.xSemaphore.xMutexHolder == xTaskGetCurrentTaskHandle()) {
709         (pxMutex->u.xSemaphore.uxRecursiveCallCount)++;
710         xReturn = pdPASS;
711     } else {
712         xReturn = xQueueSemaphoreTake(pxMutex, xTicksToWait);
713 
714         /* pdPASS will only be returned if the mutex was successfully
715         obtained.  The calling task may have entered the Blocked state
716         before reaching here. */
717         if (xReturn != pdFAIL) {
718             (pxMutex->u.xSemaphore.uxRecursiveCallCount)++;
719         } else {
720             traceTAKE_MUTEX_RECURSIVE_FAILED(pxMutex);
721         }
722     }
723 
724     return xReturn;
725 }
726 
727 #endif /* configUSE_RECURSIVE_MUTEXES */
728 /*-----------------------------------------------------------*/
729 
730 #if ( \
731     (configUSE_COUNTING_SEMAPHORES == 1) && \
732     (configSUPPORT_STATIC_ALLOCATION == 1))
733 
xQueueCreateCountingSemaphoreStatic(const UBaseType_t uxMaxCount,const UBaseType_t uxInitialCount,StaticQueue_t * pxStaticQueue)734 QueueHandle_t xQueueCreateCountingSemaphoreStatic(
735     const UBaseType_t uxMaxCount,
736     const UBaseType_t uxInitialCount,
737     StaticQueue_t *pxStaticQueue)
738 {
739     QueueHandle_t xHandle;
740 
741     configASSERT(uxMaxCount != 0);
742     configASSERT(uxInitialCount <= uxMaxCount);
743 
744     xHandle = xQueueGenericCreateStatic(
745         uxMaxCount,
746         queueSEMAPHORE_QUEUE_ITEM_LENGTH,
747         NULL,
748         pxStaticQueue,
749         queueQUEUE_TYPE_COUNTING_SEMAPHORE);
750 
751     if (xHandle != NULL) {
752         ((Queue_t *)xHandle)->uxMessagesWaiting = uxInitialCount;
753 
754         traceCREATE_COUNTING_SEMAPHORE();
755     } else {
756         traceCREATE_COUNTING_SEMAPHORE_FAILED();
757     }
758 
759     return xHandle;
760 }
761 
762 #endif /* ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( \
763           configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */
764 /*-----------------------------------------------------------*/
765 
766 #if ( \
767     (configUSE_COUNTING_SEMAPHORES == 1) && \
768     (configSUPPORT_DYNAMIC_ALLOCATION == 1))
769 
xQueueCreateCountingSemaphore(const UBaseType_t uxMaxCount,const UBaseType_t uxInitialCount)770 QueueHandle_t xQueueCreateCountingSemaphore(
771     const UBaseType_t uxMaxCount,
772     const UBaseType_t uxInitialCount)
773 {
774     QueueHandle_t xHandle;
775 
776     configASSERT(uxMaxCount != 0);
777     configASSERT(uxInitialCount <= uxMaxCount);
778 
779     xHandle = xQueueGenericCreate(
780         uxMaxCount,
781         queueSEMAPHORE_QUEUE_ITEM_LENGTH,
782         queueQUEUE_TYPE_COUNTING_SEMAPHORE);
783 
784     if (xHandle != NULL) {
785         ((Queue_t *)xHandle)->uxMessagesWaiting = uxInitialCount;
786 
787         traceCREATE_COUNTING_SEMAPHORE();
788     } else {
789         traceCREATE_COUNTING_SEMAPHORE_FAILED();
790     }
791 
792     return xHandle;
793 }
794 
795 #endif /* ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( \
796           configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */
797 /*-----------------------------------------------------------*/
798 
xQueueGenericSend(QueueHandle_t xQueue,const void * const pvItemToQueue,TickType_t xTicksToWait,const BaseType_t xCopyPosition)799 BaseType_t xQueueGenericSend(
800     QueueHandle_t xQueue,
801     const void *const pvItemToQueue,
802     TickType_t xTicksToWait,
803     const BaseType_t xCopyPosition)
804 {
805     BaseType_t xEntryTimeSet = pdFALSE, xYieldRequired;
806     TimeOut_t xTimeOut;
807     Queue_t *const pxQueue = xQueue;
808 
809     configASSERT(pxQueue);
810     configASSERT(
811         !((pvItemToQueue == NULL) && (pxQueue->uxItemSize != (UBaseType_t)0U)));
812     configASSERT(
813         !((xCopyPosition == queueOVERWRITE) && (pxQueue->uxLength != 1)));
814 #if ((INCLUDE_xTaskGetSchedulerState == 1) || (configUSE_TIMERS == 1))
815     {
816         configASSERT(
817             !((xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED) &&
818               (xTicksToWait != 0)));
819     }
820 #endif
821 
822     /*lint -save -e904 This function relaxes the coding standard somewhat to
823     allow return statements within the function itself.  This is done in the
824     interest of execution time efficiency. */
825     for (;;) {
826         taskENTER_CRITICAL();
827         {
828             /* Is there room on the queue now?  The running task must be the
829             highest priority task wanting to access the queue.  If the head item
830             in the queue is to be overwritten then it does not matter if the
831             queue is full. */
832             if ((pxQueue->uxMessagesWaiting < pxQueue->uxLength) ||
833                 (xCopyPosition == queueOVERWRITE)) {
834                 traceQUEUE_SEND(pxQueue);
835 
836 #if (configUSE_QUEUE_SETS == 1)
837                 {
838                     const UBaseType_t uxPreviousMessagesWaiting =
839                         pxQueue->uxMessagesWaiting;
840 
841                     xYieldRequired = prvCopyDataToQueue(
842                         pxQueue, pvItemToQueue, xCopyPosition);
843 
844                     if (pxQueue->pxQueueSetContainer != NULL) {
845                         if ((xCopyPosition == queueOVERWRITE) &&
846                             (uxPreviousMessagesWaiting != (UBaseType_t)0)) {
847                             /* Do not notify the queue set as an existing item
848                             was overwritten in the queue so the number of items
849                             in the queue has not changed. */
850                             mtCOVERAGE_TEST_MARKER();
851                         } else if (
852                             prvNotifyQueueSetContainer(pxQueue) != pdFALSE) {
853                             /* The queue is a member of a queue set, and posting
854                             to the queue set caused a higher priority task to
855                             unblock. A context switch is required. */
856                             queueYIELD_IF_USING_PREEMPTION();
857                         } else {
858                             mtCOVERAGE_TEST_MARKER();
859                         }
860                     } else {
861                         /* If there was a task waiting for data to arrive on the
862                         queue then unblock it now. */
863                         if (listLIST_IS_EMPTY(&(
864                                 pxQueue->xTasksWaitingToReceive)) == pdFALSE) {
865                             if (xTaskRemoveFromEventList(
866                                     &(pxQueue->xTasksWaitingToReceive)) !=
867                                 pdFALSE) {
868                                 /* The unblocked task has a priority higher than
869                                 our own so yield immediately.  Yes it is ok to
870                                 do this from within the critical section - the
871                                 kernel takes care of that. */
872                                 queueYIELD_IF_USING_PREEMPTION();
873                             } else {
874                                 mtCOVERAGE_TEST_MARKER();
875                             }
876                         } else if (xYieldRequired != pdFALSE) {
877                             /* This path is a special case that will only get
878                             executed if the task was holding multiple mutexes
879                             and the mutexes were given back in an order that is
880                             different to that in which they were taken. */
881                             queueYIELD_IF_USING_PREEMPTION();
882                         } else {
883                             mtCOVERAGE_TEST_MARKER();
884                         }
885                     }
886                 }
887 #else /* configUSE_QUEUE_SETS */
888                 {
889                     xYieldRequired = prvCopyDataToQueue(
890                         pxQueue, pvItemToQueue, xCopyPosition);
891 
892                     /* If there was a task waiting for data to arrive on the
893                     queue then unblock it now. */
894                     if (listLIST_IS_EMPTY(&(pxQueue->xTasksWaitingToReceive)) ==
895                         pdFALSE) {
896                         if (xTaskRemoveFromEventList(&(
897                                 pxQueue->xTasksWaitingToReceive)) != pdFALSE) {
898                             /* The unblocked task has a priority higher than
899                             our own so yield immediately.  Yes it is ok to do
900                             this from within the critical section - the kernel
901                             takes care of that. */
902                             queueYIELD_IF_USING_PREEMPTION();
903                         } else {
904                             mtCOVERAGE_TEST_MARKER();
905                         }
906                     } else if (xYieldRequired != pdFALSE) {
907                         /* This path is a special case that will only get
908                         executed if the task was holding multiple mutexes and
909                         the mutexes were given back in an order that is
910                         different to that in which they were taken. */
911                         queueYIELD_IF_USING_PREEMPTION();
912                     } else {
913                         mtCOVERAGE_TEST_MARKER();
914                     }
915                 }
916 #endif /* configUSE_QUEUE_SETS */
917 
918                 taskEXIT_CRITICAL();
919                 return pdPASS;
920             } else {
921                 if (xTicksToWait == (TickType_t)0) {
922                     /* The queue was full and no block time is specified (or
923                     the block time has expired) so leave now. */
924                     taskEXIT_CRITICAL();
925 
926                     /* Return to the original privilege level before exiting
927                     the function. */
928                     traceQUEUE_SEND_FAILED(pxQueue);
929                     return errQUEUE_FULL;
930                 } else if (xEntryTimeSet == pdFALSE) {
931                     /* The queue was full and a block time was specified so
932                     configure the timeout structure. */
933                     vTaskInternalSetTimeOutState(&xTimeOut);
934                     xEntryTimeSet = pdTRUE;
935                 } else {
936                     /* Entry time was already set. */
937                     mtCOVERAGE_TEST_MARKER();
938                 }
939             }
940         }
941         taskEXIT_CRITICAL();
942 
943         /* Interrupts and other tasks can send to and receive from the queue
944         now the critical section has been exited. */
945 
946         vTaskSuspendAll();
947         prvLockQueue(pxQueue);
948 
949         /* Update the timeout state to see if it has expired yet. */
950         if (xTaskCheckForTimeOut(&xTimeOut, &xTicksToWait) == pdFALSE) {
951             if (prvIsQueueFull(pxQueue) != pdFALSE) {
952                 traceBLOCKING_ON_QUEUE_SEND(pxQueue);
953                 vTaskPlaceOnEventList(
954                     &(pxQueue->xTasksWaitingToSend), xTicksToWait);
955 
956                 /* Unlocking the queue means queue events can effect the
957                 event list.  It is possible that interrupts occurring now
958                 remove this task from the event list again - but as the
959                 scheduler is suspended the task will go onto the pending
960                 ready last instead of the actual ready list. */
961                 prvUnlockQueue(pxQueue);
962 
963                 /* Resuming the scheduler will move tasks from the pending
964                 ready list into the ready list - so it is feasible that this
965                 task is already in a ready list before it yields - in which
966                 case the yield will not cause a context switch unless there
967                 is also a higher priority task in the pending ready list. */
968                 if (xTaskResumeAll() == pdFALSE) {
969                     portYIELD_WITHIN_API();
970                 }
971             } else {
972                 /* Try again. */
973                 prvUnlockQueue(pxQueue);
974                 (void)xTaskResumeAll();
975             }
976         } else {
977             /* The timeout has expired. */
978             prvUnlockQueue(pxQueue);
979             (void)xTaskResumeAll();
980 
981             traceQUEUE_SEND_FAILED(pxQueue);
982             return errQUEUE_FULL;
983         }
984     } /*lint -restore */
985 }
986 /*-----------------------------------------------------------*/
987 
xQueueGenericSendFromISR(QueueHandle_t xQueue,const void * const pvItemToQueue,BaseType_t * const pxHigherPriorityTaskWoken,const BaseType_t xCopyPosition)988 BaseType_t xQueueGenericSendFromISR(
989     QueueHandle_t xQueue,
990     const void *const pvItemToQueue,
991     BaseType_t *const pxHigherPriorityTaskWoken,
992     const BaseType_t xCopyPosition)
993 {
994     BaseType_t xReturn;
995     UBaseType_t uxSavedInterruptStatus;
996     Queue_t *const pxQueue = xQueue;
997 
998     configASSERT(pxQueue);
999     configASSERT(
1000         !((pvItemToQueue == NULL) && (pxQueue->uxItemSize != (UBaseType_t)0U)));
1001     configASSERT(
1002         !((xCopyPosition == queueOVERWRITE) && (pxQueue->uxLength != 1)));
1003 
1004     /* RTOS ports that support interrupt nesting have the concept of a maximum
1005     system call (or maximum API call) interrupt priority.  Interrupts that are
1006     above the maximum system call priority are kept permanently enabled, even
1007     when the RTOS kernel is in a critical section, but cannot make any calls to
1008     FreeRTOS API functions.  If configASSERT() is defined in FreeRTOSConfig.h
1009     then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
1010     failure if a FreeRTOS API function is called from an interrupt that has been
1011     assigned a priority above the configured maximum system call priority.
1012     Only FreeRTOS functions that end in FromISR can be called from interrupts
1013     that have been assigned a priority at or (logically) below the maximum
1014     system call	interrupt priority.  FreeRTOS maintains a separate interrupt
1015     safe API to ensure interrupt entry is as fast and as simple as possible.
1016     More information (albeit Cortex-M specific) is provided on the following
1017     link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
1018     portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
1019 
1020     /* Similar to xQueueGenericSend, except without blocking if there is no room
1021     in the queue.  Also don't directly wake a task that was blocked on a queue
1022     read, instead return a flag to say whether a context switch is required or
1023     not (i.e. has a task with a higher priority than us been woken by this
1024     post). */
1025     uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
1026     {
1027         if ((pxQueue->uxMessagesWaiting < pxQueue->uxLength) ||
1028             (xCopyPosition == queueOVERWRITE)) {
1029             const int8_t cTxLock = pxQueue->cTxLock;
1030             const UBaseType_t uxPreviousMessagesWaiting =
1031                 pxQueue->uxMessagesWaiting;
1032 
1033             traceQUEUE_SEND_FROM_ISR(pxQueue);
1034 
1035             /* Semaphores use xQueueGiveFromISR(), so pxQueue will not be a
1036             semaphore or mutex.  That means prvCopyDataToQueue() cannot result
1037             in a task disinheriting a priority and prvCopyDataToQueue() can be
1038             called here even though the disinherit function does not check if
1039             the scheduler is suspended before accessing the ready lists. */
1040             (void)prvCopyDataToQueue(pxQueue, pvItemToQueue, xCopyPosition);
1041 
1042             /* The event list is not altered if the queue is locked.  This will
1043             be done when the queue is unlocked later. */
1044             if (cTxLock == queueUNLOCKED) {
1045 #if (configUSE_QUEUE_SETS == 1)
1046                 {
1047                     if (pxQueue->pxQueueSetContainer != NULL) {
1048                         if ((xCopyPosition == queueOVERWRITE) &&
1049                             (uxPreviousMessagesWaiting != (UBaseType_t)0)) {
1050                             /* Do not notify the queue set as an existing item
1051                             was overwritten in the queue so the number of items
1052                             in the queue has not changed. */
1053                             mtCOVERAGE_TEST_MARKER();
1054                         } else if (
1055                             prvNotifyQueueSetContainer(pxQueue) != pdFALSE) {
1056                             /* The queue is a member of a queue set, and posting
1057                             to the queue set caused a higher priority task to
1058                             unblock.  A context switch is required. */
1059                             if (pxHigherPriorityTaskWoken != NULL) {
1060                                 *pxHigherPriorityTaskWoken = pdTRUE;
1061                             } else {
1062                                 mtCOVERAGE_TEST_MARKER();
1063                             }
1064                         } else {
1065                             mtCOVERAGE_TEST_MARKER();
1066                         }
1067                     } else {
1068                         if (listLIST_IS_EMPTY(&(
1069                                 pxQueue->xTasksWaitingToReceive)) == pdFALSE) {
1070                             if (xTaskRemoveFromEventList(
1071                                     &(pxQueue->xTasksWaitingToReceive)) !=
1072                                 pdFALSE) {
1073                                 /* The task waiting has a higher priority so
1074                                 record that a context switch is required. */
1075                                 if (pxHigherPriorityTaskWoken != NULL) {
1076                                     *pxHigherPriorityTaskWoken = pdTRUE;
1077                                 } else {
1078                                     mtCOVERAGE_TEST_MARKER();
1079                                 }
1080                             } else {
1081                                 mtCOVERAGE_TEST_MARKER();
1082                             }
1083                         } else {
1084                             mtCOVERAGE_TEST_MARKER();
1085                         }
1086                     }
1087                 }
1088 #else /* configUSE_QUEUE_SETS */
1089                 {
1090                     if (listLIST_IS_EMPTY(&(pxQueue->xTasksWaitingToReceive)) ==
1091                         pdFALSE) {
1092                         if (xTaskRemoveFromEventList(&(
1093                                 pxQueue->xTasksWaitingToReceive)) != pdFALSE) {
1094                             /* The task waiting has a higher priority so record
1095                             that a context	switch is required. */
1096                             if (pxHigherPriorityTaskWoken != NULL) {
1097                                 *pxHigherPriorityTaskWoken = pdTRUE;
1098                             } else {
1099                                 mtCOVERAGE_TEST_MARKER();
1100                             }
1101                         } else {
1102                             mtCOVERAGE_TEST_MARKER();
1103                         }
1104                     } else {
1105                         mtCOVERAGE_TEST_MARKER();
1106                     }
1107 
1108                     /* Not used in this path. */
1109                     (void)uxPreviousMessagesWaiting;
1110                 }
1111 #endif /* configUSE_QUEUE_SETS */
1112             } else {
1113                 /* Increment the lock count so the task that unlocks the queue
1114                 knows that data was posted while it was locked. */
1115                 pxQueue->cTxLock = (int8_t)(cTxLock + 1);
1116             }
1117 
1118             xReturn = pdPASS;
1119         } else {
1120             traceQUEUE_SEND_FROM_ISR_FAILED(pxQueue);
1121             xReturn = errQUEUE_FULL;
1122         }
1123     }
1124     portCLEAR_INTERRUPT_MASK_FROM_ISR(uxSavedInterruptStatus);
1125 
1126     return xReturn;
1127 }
1128 /*-----------------------------------------------------------*/
1129 
xQueueGiveFromISR(QueueHandle_t xQueue,BaseType_t * const pxHigherPriorityTaskWoken)1130 BaseType_t xQueueGiveFromISR(
1131     QueueHandle_t xQueue,
1132     BaseType_t *const pxHigherPriorityTaskWoken)
1133 {
1134     BaseType_t xReturn;
1135     UBaseType_t uxSavedInterruptStatus;
1136     Queue_t *const pxQueue = xQueue;
1137 
1138     /* Similar to xQueueGenericSendFromISR() but used with semaphores where the
1139     item size is 0.  Don't directly wake a task that was blocked on a queue
1140     read, instead return a flag to say whether a context switch is required or
1141     not (i.e. has a task with a higher priority than us been woken by this
1142     post). */
1143 
1144     configASSERT(pxQueue);
1145 
1146     /* xQueueGenericSendFromISR() should be used instead of xQueueGiveFromISR()
1147     if the item size is not 0. */
1148     configASSERT(pxQueue->uxItemSize == 0);
1149 
1150     /* Normally a mutex would not be given from an interrupt, especially if
1151     there is a mutex holder, as priority inheritance makes no sense for an
1152     interrupts, only tasks. */
1153     configASSERT(
1154         !((pxQueue->uxQueueType == queueQUEUE_IS_MUTEX) &&
1155           (pxQueue->u.xSemaphore.xMutexHolder != NULL)));
1156 
1157     /* RTOS ports that support interrupt nesting have the concept of a maximum
1158     system call (or maximum API call) interrupt priority.  Interrupts that are
1159     above the maximum system call priority are kept permanently enabled, even
1160     when the RTOS kernel is in a critical section, but cannot make any calls to
1161     FreeRTOS API functions.  If configASSERT() is defined in FreeRTOSConfig.h
1162     then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
1163     failure if a FreeRTOS API function is called from an interrupt that has been
1164     assigned a priority above the configured maximum system call priority.
1165     Only FreeRTOS functions that end in FromISR can be called from interrupts
1166     that have been assigned a priority at or (logically) below the maximum
1167     system call	interrupt priority.  FreeRTOS maintains a separate interrupt
1168     safe API to ensure interrupt entry is as fast and as simple as possible.
1169     More information (albeit Cortex-M specific) is provided on the following
1170     link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
1171     portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
1172 
1173     uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
1174     {
1175         const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
1176 
1177         /* When the queue is used to implement a semaphore no data is ever
1178         moved through the queue but it is still valid to see if the queue 'has
1179         space'. */
1180         if (uxMessagesWaiting < pxQueue->uxLength) {
1181             const int8_t cTxLock = pxQueue->cTxLock;
1182 
1183             traceQUEUE_SEND_FROM_ISR(pxQueue);
1184 
1185             /* A task can only have an inherited priority if it is a mutex
1186             holder - and if there is a mutex holder then the mutex cannot be
1187             given from an ISR.  As this is the ISR version of the function it
1188             can be assumed there is no mutex holder and no need to determine if
1189             priority disinheritance is needed.  Simply increase the count of
1190             messages (semaphores) available. */
1191             pxQueue->uxMessagesWaiting = uxMessagesWaiting + (UBaseType_t)1;
1192 
1193             /* The event list is not altered if the queue is locked.  This will
1194             be done when the queue is unlocked later. */
1195             if (cTxLock == queueUNLOCKED) {
1196 #if (configUSE_QUEUE_SETS == 1)
1197                 {
1198                     if (pxQueue->pxQueueSetContainer != NULL) {
1199                         if (prvNotifyQueueSetContainer(pxQueue) != pdFALSE) {
1200                             /* The semaphore is a member of a queue set, and
1201                             posting	to the queue set caused a higher priority
1202                             task to	unblock.  A context switch is required. */
1203                             if (pxHigherPriorityTaskWoken != NULL) {
1204                                 *pxHigherPriorityTaskWoken = pdTRUE;
1205                             } else {
1206                                 mtCOVERAGE_TEST_MARKER();
1207                             }
1208                         } else {
1209                             mtCOVERAGE_TEST_MARKER();
1210                         }
1211                     } else {
1212                         if (listLIST_IS_EMPTY(&(
1213                                 pxQueue->xTasksWaitingToReceive)) == pdFALSE) {
1214                             if (xTaskRemoveFromEventList(
1215                                     &(pxQueue->xTasksWaitingToReceive)) !=
1216                                 pdFALSE) {
1217                                 /* The task waiting has a higher priority so
1218                                 record that a context switch is required. */
1219                                 if (pxHigherPriorityTaskWoken != NULL) {
1220                                     *pxHigherPriorityTaskWoken = pdTRUE;
1221                                 } else {
1222                                     mtCOVERAGE_TEST_MARKER();
1223                                 }
1224                             } else {
1225                                 mtCOVERAGE_TEST_MARKER();
1226                             }
1227                         } else {
1228                             mtCOVERAGE_TEST_MARKER();
1229                         }
1230                     }
1231                 }
1232 #else /* configUSE_QUEUE_SETS */
1233                 {
1234                     if (listLIST_IS_EMPTY(&(pxQueue->xTasksWaitingToReceive)) ==
1235                         pdFALSE) {
1236                         if (xTaskRemoveFromEventList(&(
1237                                 pxQueue->xTasksWaitingToReceive)) != pdFALSE) {
1238                             /* The task waiting has a higher priority so record
1239                             that a context	switch is required. */
1240                             if (pxHigherPriorityTaskWoken != NULL) {
1241                                 *pxHigherPriorityTaskWoken = pdTRUE;
1242                             } else {
1243                                 mtCOVERAGE_TEST_MARKER();
1244                             }
1245                         } else {
1246                             mtCOVERAGE_TEST_MARKER();
1247                         }
1248                     } else {
1249                         mtCOVERAGE_TEST_MARKER();
1250                     }
1251                 }
1252 #endif /* configUSE_QUEUE_SETS */
1253             } else {
1254                 /* Increment the lock count so the task that unlocks the queue
1255                 knows that data was posted while it was locked. */
1256                 pxQueue->cTxLock = (int8_t)(cTxLock + 1);
1257             }
1258 
1259             xReturn = pdPASS;
1260         } else {
1261             traceQUEUE_SEND_FROM_ISR_FAILED(pxQueue);
1262             xReturn = errQUEUE_FULL;
1263         }
1264     }
1265     portCLEAR_INTERRUPT_MASK_FROM_ISR(uxSavedInterruptStatus);
1266 
1267     return xReturn;
1268 }
1269 /*-----------------------------------------------------------*/
1270 
xQueueReceive(QueueHandle_t xQueue,void * const pvBuffer,TickType_t xTicksToWait)1271 BaseType_t xQueueReceive(
1272     QueueHandle_t xQueue,
1273     void *const pvBuffer,
1274     TickType_t xTicksToWait)
1275 {
1276     BaseType_t xEntryTimeSet = pdFALSE;
1277     TimeOut_t xTimeOut;
1278     Queue_t *const pxQueue = xQueue;
1279 
1280     /* Check the pointer is not NULL. */
1281     configASSERT((pxQueue));
1282 
1283     /* The buffer into which data is received can only be NULL if the data size
1284     is zero (so no data is copied into the buffer. */
1285     configASSERT(
1286         !(((pvBuffer) == NULL) && ((pxQueue)->uxItemSize != (UBaseType_t)0U)));
1287 
1288 /* Cannot block if the scheduler is suspended. */
1289 #if ((INCLUDE_xTaskGetSchedulerState == 1) || (configUSE_TIMERS == 1))
1290     {
1291         configASSERT(
1292             !((xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED) &&
1293               (xTicksToWait != 0)));
1294     }
1295 #endif
1296 
1297     /*lint -save -e904  This function relaxes the coding standard somewhat to
1298     allow return statements within the function itself.  This is done in the
1299     interest of execution time efficiency. */
1300     for (;;) {
1301         taskENTER_CRITICAL();
1302         {
1303             const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
1304 
1305             /* Is there data in the queue now?  To be running the calling task
1306             must be the highest priority task wanting to access the queue. */
1307             if (uxMessagesWaiting > (UBaseType_t)0) {
1308                 /* Data available, remove one item. */
1309                 prvCopyDataFromQueue(pxQueue, pvBuffer);
1310                 traceQUEUE_RECEIVE(pxQueue);
1311                 pxQueue->uxMessagesWaiting = uxMessagesWaiting - (UBaseType_t)1;
1312 
1313                 /* There is now space in the queue, were any tasks waiting to
1314                 post to the queue?  If so, unblock the highest priority waiting
1315                 task. */
1316                 if (listLIST_IS_EMPTY(&(pxQueue->xTasksWaitingToSend)) ==
1317                     pdFALSE) {
1318                     if (xTaskRemoveFromEventList(
1319                             &(pxQueue->xTasksWaitingToSend)) != pdFALSE) {
1320                         queueYIELD_IF_USING_PREEMPTION();
1321                     } else {
1322                         mtCOVERAGE_TEST_MARKER();
1323                     }
1324                 } else {
1325                     mtCOVERAGE_TEST_MARKER();
1326                 }
1327 
1328                 taskEXIT_CRITICAL();
1329                 return pdPASS;
1330             } else {
1331                 if (xTicksToWait == (TickType_t)0) {
1332                     /* The queue was empty and no block time is specified (or
1333                     the block time has expired) so leave now. */
1334                     taskEXIT_CRITICAL();
1335                     traceQUEUE_RECEIVE_FAILED(pxQueue);
1336                     return errQUEUE_EMPTY;
1337                 } else if (xEntryTimeSet == pdFALSE) {
1338                     /* The queue was empty and a block time was specified so
1339                     configure the timeout structure. */
1340                     vTaskInternalSetTimeOutState(&xTimeOut);
1341                     xEntryTimeSet = pdTRUE;
1342                 } else {
1343                     /* Entry time was already set. */
1344                     mtCOVERAGE_TEST_MARKER();
1345                 }
1346             }
1347         }
1348         taskEXIT_CRITICAL();
1349 
1350         /* Interrupts and other tasks can send to and receive from the queue
1351         now the critical section has been exited. */
1352 
1353         vTaskSuspendAll();
1354         prvLockQueue(pxQueue);
1355 
1356         /* Update the timeout state to see if it has expired yet. */
1357         if (xTaskCheckForTimeOut(&xTimeOut, &xTicksToWait) == pdFALSE) {
1358             /* The timeout has not expired.  If the queue is still empty place
1359             the task on the list of tasks waiting to receive from the queue. */
1360             if (prvIsQueueEmpty(pxQueue) != pdFALSE) {
1361                 traceBLOCKING_ON_QUEUE_RECEIVE(pxQueue);
1362                 vTaskPlaceOnEventList(
1363                     &(pxQueue->xTasksWaitingToReceive), xTicksToWait);
1364                 prvUnlockQueue(pxQueue);
1365                 if (xTaskResumeAll() == pdFALSE) {
1366                     portYIELD_WITHIN_API();
1367                 } else {
1368                     mtCOVERAGE_TEST_MARKER();
1369                 }
1370             } else {
1371                 /* The queue contains data again.  Loop back to try and read the
1372                 data. */
1373                 prvUnlockQueue(pxQueue);
1374                 (void)xTaskResumeAll();
1375             }
1376         } else {
1377             /* Timed out.  If there is no data in the queue exit, otherwise loop
1378             back and attempt to read the data. */
1379             prvUnlockQueue(pxQueue);
1380             (void)xTaskResumeAll();
1381 
1382             if (prvIsQueueEmpty(pxQueue) != pdFALSE) {
1383                 traceQUEUE_RECEIVE_FAILED(pxQueue);
1384                 return errQUEUE_EMPTY;
1385             } else {
1386                 mtCOVERAGE_TEST_MARKER();
1387             }
1388         }
1389     } /*lint -restore */
1390 }
1391 /*-----------------------------------------------------------*/
1392 
xQueueSemaphoreTake(QueueHandle_t xQueue,TickType_t xTicksToWait)1393 BaseType_t xQueueSemaphoreTake(QueueHandle_t xQueue, TickType_t xTicksToWait)
1394 {
1395     BaseType_t xEntryTimeSet = pdFALSE;
1396     TimeOut_t xTimeOut;
1397     Queue_t *const pxQueue = xQueue;
1398 
1399 #if (configUSE_MUTEXES == 1)
1400     BaseType_t xInheritanceOccurred = pdFALSE;
1401 #endif
1402 
1403     /* Check the queue pointer is not NULL. */
1404     configASSERT((pxQueue));
1405 
1406     /* Check this really is a semaphore, in which case the item size will be
1407     0. */
1408     configASSERT(pxQueue->uxItemSize == 0);
1409 
1410 /* Cannot block if the scheduler is suspended. */
1411 #if ((INCLUDE_xTaskGetSchedulerState == 1) || (configUSE_TIMERS == 1))
1412     {
1413         configASSERT(
1414             !((xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED) &&
1415               (xTicksToWait != 0)));
1416     }
1417 #endif
1418 
1419     /*lint -save -e904 This function relaxes the coding standard somewhat to
1420     allow return statements within the function itself.  This is done in the
1421     interest of execution time efficiency. */
1422     for (;;) {
1423         taskENTER_CRITICAL();
1424         {
1425             /* Semaphores are queues with an item size of 0, and where the
1426             number of messages in the queue is the semaphore's count value. */
1427             const UBaseType_t uxSemaphoreCount = pxQueue->uxMessagesWaiting;
1428 
1429             /* Is there data in the queue now?  To be running the calling task
1430             must be the highest priority task wanting to access the queue. */
1431             if (uxSemaphoreCount > (UBaseType_t)0) {
1432                 traceQUEUE_RECEIVE(pxQueue);
1433 
1434                 /* Semaphores are queues with a data size of zero and where the
1435                 messages waiting is the semaphore's count.  Reduce the count. */
1436                 pxQueue->uxMessagesWaiting = uxSemaphoreCount - (UBaseType_t)1;
1437 
1438 #if (configUSE_MUTEXES == 1)
1439                 {
1440                     if (pxQueue->uxQueueType == queueQUEUE_IS_MUTEX) {
1441                         /* Record the information required to implement
1442                         priority inheritance should it become necessary. */
1443                         pxQueue->u.xSemaphore.xMutexHolder =
1444                             pvTaskIncrementMutexHeldCount();
1445                     } else {
1446                         mtCOVERAGE_TEST_MARKER();
1447                     }
1448                 }
1449 #endif /* configUSE_MUTEXES */
1450 
1451                 /* Check to see if other tasks are blocked waiting to give the
1452                 semaphore, and if so, unblock the highest priority such task. */
1453                 if (listLIST_IS_EMPTY(&(pxQueue->xTasksWaitingToSend)) ==
1454                     pdFALSE) {
1455                     if (xTaskRemoveFromEventList(
1456                             &(pxQueue->xTasksWaitingToSend)) != pdFALSE) {
1457                         queueYIELD_IF_USING_PREEMPTION();
1458                     } else {
1459                         mtCOVERAGE_TEST_MARKER();
1460                     }
1461                 } else {
1462                     mtCOVERAGE_TEST_MARKER();
1463                 }
1464 
1465                 taskEXIT_CRITICAL();
1466                 return pdPASS;
1467             } else {
1468                 if (xTicksToWait == (TickType_t)0) {
1469 /* For inheritance to have occurred there must have been an
1470 initial timeout, and an adjusted timeout cannot become 0, as
1471 if it were 0 the function would have exited. */
1472 #if (configUSE_MUTEXES == 1)
1473                     {
1474                         configASSERT(xInheritanceOccurred == pdFALSE);
1475                     }
1476 #endif /* configUSE_MUTEXES */
1477 
1478                     /* The semaphore count was 0 and no block time is specified
1479                     (or the block time has expired) so exit now. */
1480                     taskEXIT_CRITICAL();
1481                     traceQUEUE_RECEIVE_FAILED(pxQueue);
1482                     return errQUEUE_EMPTY;
1483                 } else if (xEntryTimeSet == pdFALSE) {
1484                     /* The semaphore count was 0 and a block time was specified
1485                     so configure the timeout structure ready to block. */
1486                     vTaskInternalSetTimeOutState(&xTimeOut);
1487                     xEntryTimeSet = pdTRUE;
1488                 } else {
1489                     /* Entry time was already set. */
1490                     mtCOVERAGE_TEST_MARKER();
1491                 }
1492             }
1493         }
1494         taskEXIT_CRITICAL();
1495 
1496         /* Interrupts and other tasks can give to and take from the semaphore
1497         now the critical section has been exited. */
1498 
1499         vTaskSuspendAll();
1500         prvLockQueue(pxQueue);
1501 
1502         /* Update the timeout state to see if it has expired yet. */
1503         if (xTaskCheckForTimeOut(&xTimeOut, &xTicksToWait) == pdFALSE) {
1504             /* A block time is specified and not expired.  If the semaphore
1505             count is 0 then enter the Blocked state to wait for a semaphore to
1506             become available.  As semaphores are implemented with queues the
1507             queue being empty is equivalent to the semaphore count being 0. */
1508             if (prvIsQueueEmpty(pxQueue) != pdFALSE) {
1509                 traceBLOCKING_ON_QUEUE_RECEIVE(pxQueue);
1510 
1511 #if (configUSE_MUTEXES == 1)
1512                 {
1513                     if (pxQueue->uxQueueType == queueQUEUE_IS_MUTEX) {
1514                         taskENTER_CRITICAL();
1515                         {
1516                             xInheritanceOccurred = xTaskPriorityInherit(
1517                                 pxQueue->u.xSemaphore.xMutexHolder);
1518                         }
1519                         taskEXIT_CRITICAL();
1520                     } else {
1521                         mtCOVERAGE_TEST_MARKER();
1522                     }
1523                 }
1524 #endif
1525 
1526                 vTaskPlaceOnEventList(
1527                     &(pxQueue->xTasksWaitingToReceive), xTicksToWait);
1528                 prvUnlockQueue(pxQueue);
1529                 if (xTaskResumeAll() == pdFALSE) {
1530                     portYIELD_WITHIN_API();
1531                 } else {
1532                     mtCOVERAGE_TEST_MARKER();
1533                 }
1534             } else {
1535                 /* There was no timeout and the semaphore count was not 0, so
1536                 attempt to take the semaphore again. */
1537                 prvUnlockQueue(pxQueue);
1538                 (void)xTaskResumeAll();
1539             }
1540         } else {
1541             /* Timed out. */
1542             prvUnlockQueue(pxQueue);
1543             (void)xTaskResumeAll();
1544 
1545             /* If the semaphore count is 0 exit now as the timeout has
1546             expired.  Otherwise return to attempt to take the semaphore that is
1547             known to be available.  As semaphores are implemented by queues the
1548             queue being empty is equivalent to the semaphore count being 0. */
1549             if (prvIsQueueEmpty(pxQueue) != pdFALSE) {
1550 #if (configUSE_MUTEXES == 1)
1551                 {
1552                     /* xInheritanceOccurred could only have be set if
1553                     pxQueue->uxQueueType == queueQUEUE_IS_MUTEX so no need to
1554                     test the mutex type again to check it is actually a mutex.
1555                   */
1556                     if (xInheritanceOccurred != pdFALSE) {
1557                         taskENTER_CRITICAL();
1558                         {
1559                             UBaseType_t uxHighestWaitingPriority;
1560 
1561                             /* This task blocking on the mutex caused another
1562                             task to inherit this task's priority.  Now this task
1563                             has timed out the priority should be disinherited
1564                             again, but only as low as the next highest priority
1565                             task that is waiting for the same mutex. */
1566                             uxHighestWaitingPriority =
1567                                 prvGetDisinheritPriorityAfterTimeout(pxQueue);
1568                             vTaskPriorityDisinheritAfterTimeout(
1569                                 pxQueue->u.xSemaphore.xMutexHolder,
1570                                 uxHighestWaitingPriority);
1571                         }
1572                         taskEXIT_CRITICAL();
1573                     }
1574                 }
1575 #endif /* configUSE_MUTEXES */
1576 
1577                 traceQUEUE_RECEIVE_FAILED(pxQueue);
1578                 return errQUEUE_EMPTY;
1579             } else {
1580                 mtCOVERAGE_TEST_MARKER();
1581             }
1582         }
1583     } /*lint -restore */
1584 }
1585 /*-----------------------------------------------------------*/
1586 
xQueuePeek(QueueHandle_t xQueue,void * const pvBuffer,TickType_t xTicksToWait)1587 BaseType_t xQueuePeek(
1588     QueueHandle_t xQueue,
1589     void *const pvBuffer,
1590     TickType_t xTicksToWait)
1591 {
1592     BaseType_t xEntryTimeSet = pdFALSE;
1593     TimeOut_t xTimeOut;
1594     int8_t *pcOriginalReadPosition;
1595     Queue_t *const pxQueue = xQueue;
1596 
1597     /* Check the pointer is not NULL. */
1598     configASSERT((pxQueue));
1599 
1600     /* The buffer into which data is received can only be NULL if the data size
1601     is zero (so no data is copied into the buffer. */
1602     configASSERT(
1603         !(((pvBuffer) == NULL) && ((pxQueue)->uxItemSize != (UBaseType_t)0U)));
1604 
1605 /* Cannot block if the scheduler is suspended. */
1606 #if ((INCLUDE_xTaskGetSchedulerState == 1) || (configUSE_TIMERS == 1))
1607     {
1608         configASSERT(
1609             !((xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED) &&
1610               (xTicksToWait != 0)));
1611     }
1612 #endif
1613 
1614     /*lint -save -e904  This function relaxes the coding standard somewhat to
1615     allow return statements within the function itself.  This is done in the
1616     interest of execution time efficiency. */
1617     for (;;) {
1618         taskENTER_CRITICAL();
1619         {
1620             const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
1621 
1622             /* Is there data in the queue now?  To be running the calling task
1623             must be the highest priority task wanting to access the queue. */
1624             if (uxMessagesWaiting > (UBaseType_t)0) {
1625                 /* Remember the read position so it can be reset after the data
1626                 is read from the queue as this function is only peeking the
1627                 data, not removing it. */
1628                 pcOriginalReadPosition = pxQueue->u.xQueue.pcReadFrom;
1629 
1630                 prvCopyDataFromQueue(pxQueue, pvBuffer);
1631                 traceQUEUE_PEEK(pxQueue);
1632 
1633                 /* The data is not being removed, so reset the read pointer. */
1634                 pxQueue->u.xQueue.pcReadFrom = pcOriginalReadPosition;
1635 
1636                 /* The data is being left in the queue, so see if there are
1637                 any other tasks waiting for the data. */
1638                 if (listLIST_IS_EMPTY(&(pxQueue->xTasksWaitingToReceive)) ==
1639                     pdFALSE) {
1640                     if (xTaskRemoveFromEventList(
1641                             &(pxQueue->xTasksWaitingToReceive)) != pdFALSE) {
1642                         /* The task waiting has a higher priority than this
1643                          * task. */
1644                         queueYIELD_IF_USING_PREEMPTION();
1645                     } else {
1646                         mtCOVERAGE_TEST_MARKER();
1647                     }
1648                 } else {
1649                     mtCOVERAGE_TEST_MARKER();
1650                 }
1651 
1652                 taskEXIT_CRITICAL();
1653                 return pdPASS;
1654             } else {
1655                 if (xTicksToWait == (TickType_t)0) {
1656                     /* The queue was empty and no block time is specified (or
1657                     the block time has expired) so leave now. */
1658                     taskEXIT_CRITICAL();
1659                     traceQUEUE_PEEK_FAILED(pxQueue);
1660                     return errQUEUE_EMPTY;
1661                 } else if (xEntryTimeSet == pdFALSE) {
1662                     /* The queue was empty and a block time was specified so
1663                     configure the timeout structure ready to enter the blocked
1664                     state. */
1665                     vTaskInternalSetTimeOutState(&xTimeOut);
1666                     xEntryTimeSet = pdTRUE;
1667                 } else {
1668                     /* Entry time was already set. */
1669                     mtCOVERAGE_TEST_MARKER();
1670                 }
1671             }
1672         }
1673         taskEXIT_CRITICAL();
1674 
1675         /* Interrupts and other tasks can send to and receive from the queue
1676         now the critical section has been exited. */
1677 
1678         vTaskSuspendAll();
1679         prvLockQueue(pxQueue);
1680 
1681         /* Update the timeout state to see if it has expired yet. */
1682         if (xTaskCheckForTimeOut(&xTimeOut, &xTicksToWait) == pdFALSE) {
1683             /* Timeout has not expired yet, check to see if there is data in the
1684             queue now, and if not enter the Blocked state to wait for data. */
1685             if (prvIsQueueEmpty(pxQueue) != pdFALSE) {
1686                 traceBLOCKING_ON_QUEUE_PEEK(pxQueue);
1687                 vTaskPlaceOnEventList(
1688                     &(pxQueue->xTasksWaitingToReceive), xTicksToWait);
1689                 prvUnlockQueue(pxQueue);
1690                 if (xTaskResumeAll() == pdFALSE) {
1691                     portYIELD_WITHIN_API();
1692                 } else {
1693                     mtCOVERAGE_TEST_MARKER();
1694                 }
1695             } else {
1696                 /* There is data in the queue now, so don't enter the blocked
1697                 state, instead return to try and obtain the data. */
1698                 prvUnlockQueue(pxQueue);
1699                 (void)xTaskResumeAll();
1700             }
1701         } else {
1702             /* The timeout has expired.  If there is still no data in the queue
1703             exit, otherwise go back and try to read the data again. */
1704             prvUnlockQueue(pxQueue);
1705             (void)xTaskResumeAll();
1706 
1707             if (prvIsQueueEmpty(pxQueue) != pdFALSE) {
1708                 traceQUEUE_PEEK_FAILED(pxQueue);
1709                 return errQUEUE_EMPTY;
1710             } else {
1711                 mtCOVERAGE_TEST_MARKER();
1712             }
1713         }
1714     } /*lint -restore */
1715 }
1716 /*-----------------------------------------------------------*/
1717 
xQueueReceiveFromISR(QueueHandle_t xQueue,void * const pvBuffer,BaseType_t * const pxHigherPriorityTaskWoken)1718 BaseType_t xQueueReceiveFromISR(
1719     QueueHandle_t xQueue,
1720     void *const pvBuffer,
1721     BaseType_t *const pxHigherPriorityTaskWoken)
1722 {
1723     BaseType_t xReturn;
1724     UBaseType_t uxSavedInterruptStatus;
1725     Queue_t *const pxQueue = xQueue;
1726 
1727     configASSERT(pxQueue);
1728     configASSERT(
1729         !((pvBuffer == NULL) && (pxQueue->uxItemSize != (UBaseType_t)0U)));
1730 
1731     /* RTOS ports that support interrupt nesting have the concept of a maximum
1732     system call (or maximum API call) interrupt priority.  Interrupts that are
1733     above the maximum system call priority are kept permanently enabled, even
1734     when the RTOS kernel is in a critical section, but cannot make any calls to
1735     FreeRTOS API functions.  If configASSERT() is defined in FreeRTOSConfig.h
1736     then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
1737     failure if a FreeRTOS API function is called from an interrupt that has been
1738     assigned a priority above the configured maximum system call priority.
1739     Only FreeRTOS functions that end in FromISR can be called from interrupts
1740     that have been assigned a priority at or (logically) below the maximum
1741     system call	interrupt priority.  FreeRTOS maintains a separate interrupt
1742     safe API to ensure interrupt entry is as fast and as simple as possible.
1743     More information (albeit Cortex-M specific) is provided on the following
1744     link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
1745     portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
1746 
1747     uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
1748     {
1749         const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
1750 
1751         /* Cannot block in an ISR, so check there is data available. */
1752         if (uxMessagesWaiting > (UBaseType_t)0) {
1753             const int8_t cRxLock = pxQueue->cRxLock;
1754 
1755             traceQUEUE_RECEIVE_FROM_ISR(pxQueue);
1756 
1757             prvCopyDataFromQueue(pxQueue, pvBuffer);
1758             pxQueue->uxMessagesWaiting = uxMessagesWaiting - (UBaseType_t)1;
1759 
1760             /* If the queue is locked the event list will not be modified.
1761             Instead update the lock count so the task that unlocks the queue
1762             will know that an ISR has removed data while the queue was
1763             locked. */
1764             if (cRxLock == queueUNLOCKED) {
1765                 if (listLIST_IS_EMPTY(&(pxQueue->xTasksWaitingToSend)) ==
1766                     pdFALSE) {
1767                     if (xTaskRemoveFromEventList(
1768                             &(pxQueue->xTasksWaitingToSend)) != pdFALSE) {
1769                         /* The task waiting has a higher priority than us so
1770                         force a context switch. */
1771                         if (pxHigherPriorityTaskWoken != NULL) {
1772                             *pxHigherPriorityTaskWoken = pdTRUE;
1773                         } else {
1774                             mtCOVERAGE_TEST_MARKER();
1775                         }
1776                     } else {
1777                         mtCOVERAGE_TEST_MARKER();
1778                     }
1779                 } else {
1780                     mtCOVERAGE_TEST_MARKER();
1781                 }
1782             } else {
1783                 /* Increment the lock count so the task that unlocks the queue
1784                 knows that data was removed while it was locked. */
1785                 pxQueue->cRxLock = (int8_t)(cRxLock + 1);
1786             }
1787 
1788             xReturn = pdPASS;
1789         } else {
1790             xReturn = pdFAIL;
1791             traceQUEUE_RECEIVE_FROM_ISR_FAILED(pxQueue);
1792         }
1793     }
1794     portCLEAR_INTERRUPT_MASK_FROM_ISR(uxSavedInterruptStatus);
1795 
1796     return xReturn;
1797 }
1798 /*-----------------------------------------------------------*/
1799 
xQueuePeekFromISR(QueueHandle_t xQueue,void * const pvBuffer)1800 BaseType_t xQueuePeekFromISR(QueueHandle_t xQueue, void *const pvBuffer)
1801 {
1802     BaseType_t xReturn;
1803     UBaseType_t uxSavedInterruptStatus;
1804     int8_t *pcOriginalReadPosition;
1805     Queue_t *const pxQueue = xQueue;
1806 
1807     configASSERT(pxQueue);
1808     configASSERT(
1809         !((pvBuffer == NULL) && (pxQueue->uxItemSize != (UBaseType_t)0U)));
1810     configASSERT(pxQueue->uxItemSize != 0); /* Can't peek a semaphore. */
1811 
1812     /* RTOS ports that support interrupt nesting have the concept of a maximum
1813     system call (or maximum API call) interrupt priority.  Interrupts that are
1814     above the maximum system call priority are kept permanently enabled, even
1815     when the RTOS kernel is in a critical section, but cannot make any calls to
1816     FreeRTOS API functions.  If configASSERT() is defined in FreeRTOSConfig.h
1817     then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
1818     failure if a FreeRTOS API function is called from an interrupt that has been
1819     assigned a priority above the configured maximum system call priority.
1820     Only FreeRTOS functions that end in FromISR can be called from interrupts
1821     that have been assigned a priority at or (logically) below the maximum
1822     system call	interrupt priority.  FreeRTOS maintains a separate interrupt
1823     safe API to ensure interrupt entry is as fast and as simple as possible.
1824     More information (albeit Cortex-M specific) is provided on the following
1825     link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
1826     portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
1827 
1828     uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
1829     {
1830         /* Cannot block in an ISR, so check there is data available. */
1831         if (pxQueue->uxMessagesWaiting > (UBaseType_t)0) {
1832             traceQUEUE_PEEK_FROM_ISR(pxQueue);
1833 
1834             /* Remember the read position so it can be reset as nothing is
1835             actually being removed from the queue. */
1836             pcOriginalReadPosition = pxQueue->u.xQueue.pcReadFrom;
1837             prvCopyDataFromQueue(pxQueue, pvBuffer);
1838             pxQueue->u.xQueue.pcReadFrom = pcOriginalReadPosition;
1839 
1840             xReturn = pdPASS;
1841         } else {
1842             xReturn = pdFAIL;
1843             traceQUEUE_PEEK_FROM_ISR_FAILED(pxQueue);
1844         }
1845     }
1846     portCLEAR_INTERRUPT_MASK_FROM_ISR(uxSavedInterruptStatus);
1847 
1848     return xReturn;
1849 }
1850 /*-----------------------------------------------------------*/
1851 
uxQueueMessagesWaiting(const QueueHandle_t xQueue)1852 UBaseType_t uxQueueMessagesWaiting(const QueueHandle_t xQueue)
1853 {
1854     UBaseType_t uxReturn;
1855 
1856     configASSERT(xQueue);
1857 
1858     taskENTER_CRITICAL();
1859     {
1860         uxReturn = ((Queue_t *)xQueue)->uxMessagesWaiting;
1861     }
1862     taskEXIT_CRITICAL();
1863 
1864     return uxReturn;
1865 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not
1866      pointer. */
1867 /*-----------------------------------------------------------*/
1868 
uxQueueSpacesAvailable(const QueueHandle_t xQueue)1869 UBaseType_t uxQueueSpacesAvailable(const QueueHandle_t xQueue)
1870 {
1871     UBaseType_t uxReturn;
1872     Queue_t *const pxQueue = xQueue;
1873 
1874     configASSERT(pxQueue);
1875 
1876     taskENTER_CRITICAL();
1877     {
1878         uxReturn = pxQueue->uxLength - pxQueue->uxMessagesWaiting;
1879     }
1880     taskEXIT_CRITICAL();
1881 
1882     return uxReturn;
1883 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not
1884      pointer. */
1885 /*-----------------------------------------------------------*/
1886 
uxQueueMessagesWaitingFromISR(const QueueHandle_t xQueue)1887 UBaseType_t uxQueueMessagesWaitingFromISR(const QueueHandle_t xQueue)
1888 {
1889     UBaseType_t uxReturn;
1890     Queue_t *const pxQueue = xQueue;
1891 
1892     configASSERT(pxQueue);
1893     uxReturn = pxQueue->uxMessagesWaiting;
1894 
1895     return uxReturn;
1896 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not
1897      pointer. */
1898 /*-----------------------------------------------------------*/
1899 
vQueueDelete(QueueHandle_t xQueue)1900 void vQueueDelete(QueueHandle_t xQueue)
1901 {
1902     Queue_t *const pxQueue = xQueue;
1903 
1904     configASSERT(pxQueue);
1905     traceQUEUE_DELETE(pxQueue);
1906 
1907 #if (configQUEUE_REGISTRY_SIZE > 0)
1908     {
1909         vQueueUnregisterQueue(pxQueue);
1910     }
1911 #endif
1912 
1913 #if ( \
1914     (configSUPPORT_DYNAMIC_ALLOCATION == 1) && \
1915     (configSUPPORT_STATIC_ALLOCATION == 0))
1916     {
1917         /* The queue can only have been allocated dynamically - free it
1918         again. */
1919         vPortFree(pxQueue);
1920     }
1921 #elif ( \
1922     (configSUPPORT_DYNAMIC_ALLOCATION == 1) && \
1923     (configSUPPORT_STATIC_ALLOCATION == 1))
1924     {
1925         /* The queue could have been allocated statically or dynamically, so
1926         check before attempting to free the memory. */
1927         if (pxQueue->ucStaticallyAllocated == (uint8_t)pdFALSE) {
1928             vPortFree(pxQueue);
1929         } else {
1930             mtCOVERAGE_TEST_MARKER();
1931         }
1932     }
1933 #else
1934     {
1935         /* The queue must have been statically allocated, so is not going to be
1936         deleted.  Avoid compiler warnings about the unused parameter. */
1937         (void)pxQueue;
1938     }
1939 #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
1940 }
1941 /*-----------------------------------------------------------*/
1942 
1943 #if (configUSE_TRACE_FACILITY == 1)
1944 
uxQueueGetQueueNumber(QueueHandle_t xQueue)1945 UBaseType_t uxQueueGetQueueNumber(QueueHandle_t xQueue)
1946 {
1947     return ((Queue_t *)xQueue)->uxQueueNumber;
1948 }
1949 
1950 #endif /* configUSE_TRACE_FACILITY */
1951 /*-----------------------------------------------------------*/
1952 
1953 #if (configUSE_TRACE_FACILITY == 1)
1954 
vQueueSetQueueNumber(QueueHandle_t xQueue,UBaseType_t uxQueueNumber)1955 void vQueueSetQueueNumber(QueueHandle_t xQueue, UBaseType_t uxQueueNumber)
1956 {
1957     ((Queue_t *)xQueue)->uxQueueNumber = uxQueueNumber;
1958 }
1959 
1960 #endif /* configUSE_TRACE_FACILITY */
1961 /*-----------------------------------------------------------*/
1962 
1963 #if (configUSE_TRACE_FACILITY == 1)
1964 
ucQueueGetQueueType(QueueHandle_t xQueue)1965 uint8_t ucQueueGetQueueType(QueueHandle_t xQueue)
1966 {
1967     return ((Queue_t *)xQueue)->ucQueueType;
1968 }
1969 
1970 #endif /* configUSE_TRACE_FACILITY */
1971 /*-----------------------------------------------------------*/
1972 
1973 #if (configUSE_MUTEXES == 1)
1974 
prvGetDisinheritPriorityAfterTimeout(const Queue_t * const pxQueue)1975 static UBaseType_t prvGetDisinheritPriorityAfterTimeout(
1976     const Queue_t *const pxQueue)
1977 {
1978     UBaseType_t uxHighestPriorityOfWaitingTasks;
1979 
1980     /* If a task waiting for a mutex causes the mutex holder to inherit a
1981     priority, but the waiting task times out, then the holder should
1982     disinherit the priority - but only down to the highest priority of any
1983     other tasks that are waiting for the same mutex.  For this purpose,
1984     return the priority of the highest priority task that is waiting for the
1985     mutex. */
1986     if (listCURRENT_LIST_LENGTH(&(pxQueue->xTasksWaitingToReceive)) > 0U) {
1987         uxHighestPriorityOfWaitingTasks = (UBaseType_t)configMAX_PRIORITIES -
1988             (UBaseType_t)listGET_ITEM_VALUE_OF_HEAD_ENTRY(&(
1989                 pxQueue->xTasksWaitingToReceive));
1990     } else {
1991         uxHighestPriorityOfWaitingTasks = tskIDLE_PRIORITY;
1992     }
1993 
1994     return uxHighestPriorityOfWaitingTasks;
1995 }
1996 
1997 #endif /* configUSE_MUTEXES */
1998 /*-----------------------------------------------------------*/
1999 
prvCopyDataToQueue(Queue_t * const pxQueue,const void * pvItemToQueue,const BaseType_t xPosition)2000 static BaseType_t prvCopyDataToQueue(
2001     Queue_t *const pxQueue,
2002     const void *pvItemToQueue,
2003     const BaseType_t xPosition)
2004 {
2005     BaseType_t xReturn = pdFALSE;
2006     UBaseType_t uxMessagesWaiting;
2007 
2008     /* This function is called from a critical section. */
2009 
2010     uxMessagesWaiting = pxQueue->uxMessagesWaiting;
2011 
2012     if (pxQueue->uxItemSize == (UBaseType_t)0) {
2013 #if (configUSE_MUTEXES == 1)
2014         {
2015             if (pxQueue->uxQueueType == queueQUEUE_IS_MUTEX) {
2016                 /* The mutex is no longer being held. */
2017                 xReturn =
2018                     xTaskPriorityDisinherit(pxQueue->u.xSemaphore.xMutexHolder);
2019                 pxQueue->u.xSemaphore.xMutexHolder = NULL;
2020             } else {
2021                 mtCOVERAGE_TEST_MARKER();
2022             }
2023         }
2024 #endif /* configUSE_MUTEXES */
2025     } else if (xPosition == queueSEND_TO_BACK) {
2026         (void)memcpy(
2027             (void *)pxQueue->pcWriteTo,
2028             pvItemToQueue,
2029             (size_t)pxQueue
2030                 ->uxItemSize); /*lint !e961 !e418 !e9087 MISRA exception as the
2031                                   casts are only redundant for some ports, plus
2032                                   previous logic ensures a null pointer can only
2033                                   be passed to memcpy() if the copy size is 0.
2034                                   Cast to void required by function signature
2035                                   and safe as no alignment requirement and copy
2036                                   length specified in bytes. */
2037         pxQueue->pcWriteTo +=
2038             pxQueue->uxItemSize; /*lint !e9016 Pointer arithmetic on char types
2039                                     ok, especially in this use case where it is
2040                                     the clearest way of conveying intent. */
2041         if (pxQueue->pcWriteTo >=
2042             pxQueue->u.xQueue
2043                 .pcTail) /*lint !e946 MISRA exception justified as comparison of
2044                             pointers is the cleanest solution. */
2045         {
2046             pxQueue->pcWriteTo = pxQueue->pcHead;
2047         } else {
2048             mtCOVERAGE_TEST_MARKER();
2049         }
2050     } else {
2051         (void)memcpy(
2052             (void *)pxQueue->u.xQueue.pcReadFrom,
2053             pvItemToQueue,
2054             (size_t)pxQueue
2055                 ->uxItemSize); /*lint !e961 !e9087 !e418 MISRA exception as the
2056                                   casts are only redundant for some ports.  Cast
2057                                   to void required by function signature and
2058                                   safe as no alignment requirement and copy
2059                                   length specified in bytes.  Assert checks null
2060                                   pointer only used when length is 0. */
2061         pxQueue->u.xQueue.pcReadFrom -= pxQueue->uxItemSize;
2062         if (pxQueue->u.xQueue.pcReadFrom <
2063             pxQueue
2064                 ->pcHead) /*lint !e946 MISRA exception justified as comparison
2065                              of pointers is the cleanest solution. */
2066         {
2067             pxQueue->u.xQueue.pcReadFrom =
2068                 (pxQueue->u.xQueue.pcTail - pxQueue->uxItemSize);
2069         } else {
2070             mtCOVERAGE_TEST_MARKER();
2071         }
2072 
2073         if (xPosition == queueOVERWRITE) {
2074             if (uxMessagesWaiting > (UBaseType_t)0) {
2075                 /* An item is not being added but overwritten, so subtract
2076                 one from the recorded number of items in the queue so when
2077                 one is added again below the number of recorded items remains
2078                 correct. */
2079                 --uxMessagesWaiting;
2080             } else {
2081                 mtCOVERAGE_TEST_MARKER();
2082             }
2083         } else {
2084             mtCOVERAGE_TEST_MARKER();
2085         }
2086     }
2087 
2088     pxQueue->uxMessagesWaiting = uxMessagesWaiting + (UBaseType_t)1;
2089 
2090     return xReturn;
2091 }
2092 /*-----------------------------------------------------------*/
2093 
prvCopyDataFromQueue(Queue_t * const pxQueue,void * const pvBuffer)2094 static void prvCopyDataFromQueue(Queue_t *const pxQueue, void *const pvBuffer)
2095 {
2096     if (pxQueue->uxItemSize != (UBaseType_t)0) {
2097         pxQueue->u.xQueue.pcReadFrom +=
2098             pxQueue->uxItemSize; /*lint !e9016 Pointer arithmetic on char types
2099                                     ok, especially in this use case where it is
2100                                     the clearest way of conveying intent. */
2101         if (pxQueue->u.xQueue.pcReadFrom >=
2102             pxQueue->u.xQueue
2103                 .pcTail) /*lint !e946 MISRA exception justified as use of the
2104                             relational operator is the cleanest solutions. */
2105         {
2106             pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead;
2107         } else {
2108             mtCOVERAGE_TEST_MARKER();
2109         }
2110         (void)memcpy(
2111             (void *)pvBuffer,
2112             (void *)pxQueue->u.xQueue.pcReadFrom,
2113             (size_t)pxQueue
2114                 ->uxItemSize); /*lint !e961 !e418 !e9087 MISRA exception as the
2115                                   casts are only redundant for some ports.  Also
2116                                   previous logic ensures a null pointer can only
2117                                   be passed to memcpy() when the count is 0.
2118                                   Cast to void required by function signature
2119                                   and safe as no alignment requirement and copy
2120                                   length specified in bytes. */
2121     }
2122 }
2123 /*-----------------------------------------------------------*/
2124 
prvUnlockQueue(Queue_t * const pxQueue)2125 static void prvUnlockQueue(Queue_t *const pxQueue)
2126 {
2127     /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. */
2128 
2129     /* The lock counts contains the number of extra data items placed or
2130     removed from the queue while the queue was locked.  When a queue is
2131     locked items can be added or removed, but the event lists cannot be
2132     updated. */
2133     taskENTER_CRITICAL();
2134     {
2135         int8_t cTxLock = pxQueue->cTxLock;
2136 
2137         /* See if data was added to the queue while it was locked. */
2138         while (cTxLock > queueLOCKED_UNMODIFIED) {
2139 /* Data was posted while the queue was locked.  Are any tasks
2140 blocked waiting for data to become available? */
2141 #if (configUSE_QUEUE_SETS == 1)
2142             {
2143                 if (pxQueue->pxQueueSetContainer != NULL) {
2144                     if (prvNotifyQueueSetContainer(pxQueue) != pdFALSE) {
2145                         /* The queue is a member of a queue set, and posting to
2146                         the queue set caused a higher priority task to unblock.
2147                         A context switch is required. */
2148                         vTaskMissedYield();
2149                     } else {
2150                         mtCOVERAGE_TEST_MARKER();
2151                     }
2152                 } else {
2153                     /* Tasks that are removed from the event list will get
2154                     added to the pending ready list as the scheduler is still
2155                     suspended. */
2156                     if (listLIST_IS_EMPTY(&(pxQueue->xTasksWaitingToReceive)) ==
2157                         pdFALSE) {
2158                         if (xTaskRemoveFromEventList(&(
2159                                 pxQueue->xTasksWaitingToReceive)) != pdFALSE) {
2160                             /* The task waiting has a higher priority so record
2161                             that a context	switch is required. */
2162                             vTaskMissedYield();
2163                         } else {
2164                             mtCOVERAGE_TEST_MARKER();
2165                         }
2166                     } else {
2167                         break;
2168                     }
2169                 }
2170             }
2171 #else /* configUSE_QUEUE_SETS */
2172             {
2173                 /* Tasks that are removed from the event list will get added to
2174                 the pending ready list as the scheduler is still suspended. */
2175                 if (listLIST_IS_EMPTY(&(pxQueue->xTasksWaitingToReceive)) ==
2176                     pdFALSE) {
2177                     if (xTaskRemoveFromEventList(
2178                             &(pxQueue->xTasksWaitingToReceive)) != pdFALSE) {
2179                         /* The task waiting has a higher priority so record that
2180                         a context switch is required. */
2181                         vTaskMissedYield();
2182                     } else {
2183                         mtCOVERAGE_TEST_MARKER();
2184                     }
2185                 } else {
2186                     break;
2187                 }
2188             }
2189 #endif /* configUSE_QUEUE_SETS */
2190 
2191             --cTxLock;
2192         }
2193 
2194         pxQueue->cTxLock = queueUNLOCKED;
2195     }
2196     taskEXIT_CRITICAL();
2197 
2198     /* Do the same for the Rx lock. */
2199     taskENTER_CRITICAL();
2200     {
2201         int8_t cRxLock = pxQueue->cRxLock;
2202 
2203         while (cRxLock > queueLOCKED_UNMODIFIED) {
2204             if (listLIST_IS_EMPTY(&(pxQueue->xTasksWaitingToSend)) == pdFALSE) {
2205                 if (xTaskRemoveFromEventList(&(pxQueue->xTasksWaitingToSend)) !=
2206                     pdFALSE) {
2207                     vTaskMissedYield();
2208                 } else {
2209                     mtCOVERAGE_TEST_MARKER();
2210                 }
2211 
2212                 --cRxLock;
2213             } else {
2214                 break;
2215             }
2216         }
2217 
2218         pxQueue->cRxLock = queueUNLOCKED;
2219     }
2220     taskEXIT_CRITICAL();
2221 }
2222 /*-----------------------------------------------------------*/
2223 
prvIsQueueEmpty(const Queue_t * pxQueue)2224 static BaseType_t prvIsQueueEmpty(const Queue_t *pxQueue)
2225 {
2226     BaseType_t xReturn;
2227 
2228     taskENTER_CRITICAL();
2229     {
2230         if (pxQueue->uxMessagesWaiting == (UBaseType_t)0) {
2231             xReturn = pdTRUE;
2232         } else {
2233             xReturn = pdFALSE;
2234         }
2235     }
2236     taskEXIT_CRITICAL();
2237 
2238     return xReturn;
2239 }
2240 /*-----------------------------------------------------------*/
2241 
xQueueIsQueueEmptyFromISR(const QueueHandle_t xQueue)2242 BaseType_t xQueueIsQueueEmptyFromISR(const QueueHandle_t xQueue)
2243 {
2244     BaseType_t xReturn;
2245     Queue_t *const pxQueue = xQueue;
2246 
2247     configASSERT(pxQueue);
2248     if (pxQueue->uxMessagesWaiting == (UBaseType_t)0) {
2249         xReturn = pdTRUE;
2250     } else {
2251         xReturn = pdFALSE;
2252     }
2253 
2254     return xReturn;
2255 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
2256 /*-----------------------------------------------------------*/
2257 
prvIsQueueFull(const Queue_t * pxQueue)2258 static BaseType_t prvIsQueueFull(const Queue_t *pxQueue)
2259 {
2260     BaseType_t xReturn;
2261 
2262     taskENTER_CRITICAL();
2263     {
2264         if (pxQueue->uxMessagesWaiting == pxQueue->uxLength) {
2265             xReturn = pdTRUE;
2266         } else {
2267             xReturn = pdFALSE;
2268         }
2269     }
2270     taskEXIT_CRITICAL();
2271 
2272     return xReturn;
2273 }
2274 /*-----------------------------------------------------------*/
2275 
xQueueIsQueueFullFromISR(const QueueHandle_t xQueue)2276 BaseType_t xQueueIsQueueFullFromISR(const QueueHandle_t xQueue)
2277 {
2278     BaseType_t xReturn;
2279     Queue_t *const pxQueue = xQueue;
2280 
2281     configASSERT(pxQueue);
2282     if (pxQueue->uxMessagesWaiting == pxQueue->uxLength) {
2283         xReturn = pdTRUE;
2284     } else {
2285         xReturn = pdFALSE;
2286     }
2287 
2288     return xReturn;
2289 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
2290 /*-----------------------------------------------------------*/
2291 
2292 #if (configUSE_CO_ROUTINES == 1)
2293 
xQueueCRSend(QueueHandle_t xQueue,const void * pvItemToQueue,TickType_t xTicksToWait)2294 BaseType_t xQueueCRSend(
2295     QueueHandle_t xQueue,
2296     const void *pvItemToQueue,
2297     TickType_t xTicksToWait)
2298 {
2299     BaseType_t xReturn;
2300     Queue_t *const pxQueue = xQueue;
2301 
2302     /* If the queue is already full we may have to block.  A critical section
2303     is required to prevent an interrupt removing something from the queue
2304     between the check to see if the queue is full and blocking on the queue. */
2305     portDISABLE_INTERRUPTS();
2306     {
2307         if (prvIsQueueFull(pxQueue) != pdFALSE) {
2308             /* The queue is full - do we want to block or just leave without
2309             posting? */
2310             if (xTicksToWait > (TickType_t)0) {
2311                 /* As this is called from a coroutine we cannot block directly,
2312                 but return indicating that we need to block. */
2313                 vCoRoutineAddToDelayedList(
2314                     xTicksToWait, &(pxQueue->xTasksWaitingToSend));
2315                 portENABLE_INTERRUPTS();
2316                 return errQUEUE_BLOCKED;
2317             } else {
2318                 portENABLE_INTERRUPTS();
2319                 return errQUEUE_FULL;
2320             }
2321         }
2322     }
2323     portENABLE_INTERRUPTS();
2324 
2325     portDISABLE_INTERRUPTS();
2326     {
2327         if (pxQueue->uxMessagesWaiting < pxQueue->uxLength) {
2328             /* There is room in the queue, copy the data into the queue. */
2329             prvCopyDataToQueue(pxQueue, pvItemToQueue, queueSEND_TO_BACK);
2330             xReturn = pdPASS;
2331 
2332             /* Were any co-routines waiting for data to become available? */
2333             if (listLIST_IS_EMPTY(&(pxQueue->xTasksWaitingToReceive)) ==
2334                 pdFALSE) {
2335                 /* In this instance the co-routine could be placed directly
2336                 into the ready list as we are within a critical section.
2337                 Instead the same pending ready list mechanism is used as if
2338                 the event were caused from within an interrupt. */
2339                 if (xCoRoutineRemoveFromEventList(
2340                         &(pxQueue->xTasksWaitingToReceive)) != pdFALSE) {
2341                     /* The co-routine waiting has a higher priority so record
2342                     that a yield might be appropriate. */
2343                     xReturn = errQUEUE_YIELD;
2344                 } else {
2345                     mtCOVERAGE_TEST_MARKER();
2346                 }
2347             } else {
2348                 mtCOVERAGE_TEST_MARKER();
2349             }
2350         } else {
2351             xReturn = errQUEUE_FULL;
2352         }
2353     }
2354     portENABLE_INTERRUPTS();
2355 
2356     return xReturn;
2357 }
2358 
2359 #endif /* configUSE_CO_ROUTINES */
2360 /*-----------------------------------------------------------*/
2361 
2362 #if (configUSE_CO_ROUTINES == 1)
2363 
xQueueCRReceive(QueueHandle_t xQueue,void * pvBuffer,TickType_t xTicksToWait)2364 BaseType_t xQueueCRReceive(
2365     QueueHandle_t xQueue,
2366     void *pvBuffer,
2367     TickType_t xTicksToWait)
2368 {
2369     BaseType_t xReturn;
2370     Queue_t *const pxQueue = xQueue;
2371 
2372     /* If the queue is already empty we may have to block.  A critical section
2373     is required to prevent an interrupt adding something to the queue
2374     between the check to see if the queue is empty and blocking on the queue. */
2375     portDISABLE_INTERRUPTS();
2376     {
2377         if (pxQueue->uxMessagesWaiting == (UBaseType_t)0) {
2378             /* There are no messages in the queue, do we want to block or just
2379             leave with nothing? */
2380             if (xTicksToWait > (TickType_t)0) {
2381                 /* As this is a co-routine we cannot block directly, but return
2382                 indicating that we need to block. */
2383                 vCoRoutineAddToDelayedList(
2384                     xTicksToWait, &(pxQueue->xTasksWaitingToReceive));
2385                 portENABLE_INTERRUPTS();
2386                 return errQUEUE_BLOCKED;
2387             } else {
2388                 portENABLE_INTERRUPTS();
2389                 return errQUEUE_FULL;
2390             }
2391         } else {
2392             mtCOVERAGE_TEST_MARKER();
2393         }
2394     }
2395     portENABLE_INTERRUPTS();
2396 
2397     portDISABLE_INTERRUPTS();
2398     {
2399         if (pxQueue->uxMessagesWaiting > (UBaseType_t)0) {
2400             /* Data is available from the queue. */
2401             pxQueue->u.xQueue.pcReadFrom += pxQueue->uxItemSize;
2402             if (pxQueue->u.xQueue.pcReadFrom >= pxQueue->u.xQueue.pcTail) {
2403                 pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead;
2404             } else {
2405                 mtCOVERAGE_TEST_MARKER();
2406             }
2407             --(pxQueue->uxMessagesWaiting);
2408             (void)memcpy(
2409                 (void *)pvBuffer,
2410                 (void *)pxQueue->u.xQueue.pcReadFrom,
2411                 (unsigned)pxQueue->uxItemSize);
2412 
2413             xReturn = pdPASS;
2414 
2415             /* Were any co-routines waiting for space to become available? */
2416             if (listLIST_IS_EMPTY(&(pxQueue->xTasksWaitingToSend)) == pdFALSE) {
2417                 /* In this instance the co-routine could be placed directly
2418                 into the ready list as we are within a critical section.
2419                 Instead the same pending ready list mechanism is used as if
2420                 the event were caused from within an interrupt. */
2421                 if (xCoRoutineRemoveFromEventList(
2422                         &(pxQueue->xTasksWaitingToSend)) != pdFALSE) {
2423                     xReturn = errQUEUE_YIELD;
2424                 } else {
2425                     mtCOVERAGE_TEST_MARKER();
2426                 }
2427             } else {
2428                 mtCOVERAGE_TEST_MARKER();
2429             }
2430         } else {
2431             xReturn = pdFAIL;
2432         }
2433     }
2434     portENABLE_INTERRUPTS();
2435 
2436     return xReturn;
2437 }
2438 
2439 #endif /* configUSE_CO_ROUTINES */
2440 /*-----------------------------------------------------------*/
2441 
2442 #if (configUSE_CO_ROUTINES == 1)
2443 
xQueueCRSendFromISR(QueueHandle_t xQueue,const void * pvItemToQueue,BaseType_t xCoRoutinePreviouslyWoken)2444 BaseType_t xQueueCRSendFromISR(
2445     QueueHandle_t xQueue,
2446     const void *pvItemToQueue,
2447     BaseType_t xCoRoutinePreviouslyWoken)
2448 {
2449     Queue_t *const pxQueue = xQueue;
2450 
2451     /* Cannot block within an ISR so if there is no space on the queue then
2452     exit without doing anything. */
2453     if (pxQueue->uxMessagesWaiting < pxQueue->uxLength) {
2454         prvCopyDataToQueue(pxQueue, pvItemToQueue, queueSEND_TO_BACK);
2455 
2456         /* We only want to wake one co-routine per ISR, so check that a
2457         co-routine has not already been woken. */
2458         if (xCoRoutinePreviouslyWoken == pdFALSE) {
2459             if (listLIST_IS_EMPTY(&(pxQueue->xTasksWaitingToReceive)) ==
2460                 pdFALSE) {
2461                 if (xCoRoutineRemoveFromEventList(
2462                         &(pxQueue->xTasksWaitingToReceive)) != pdFALSE) {
2463                     return pdTRUE;
2464                 } else {
2465                     mtCOVERAGE_TEST_MARKER();
2466                 }
2467             } else {
2468                 mtCOVERAGE_TEST_MARKER();
2469             }
2470         } else {
2471             mtCOVERAGE_TEST_MARKER();
2472         }
2473     } else {
2474         mtCOVERAGE_TEST_MARKER();
2475     }
2476 
2477     return xCoRoutinePreviouslyWoken;
2478 }
2479 
2480 #endif /* configUSE_CO_ROUTINES */
2481 /*-----------------------------------------------------------*/
2482 
2483 #if (configUSE_CO_ROUTINES == 1)
2484 
xQueueCRReceiveFromISR(QueueHandle_t xQueue,void * pvBuffer,BaseType_t * pxCoRoutineWoken)2485 BaseType_t xQueueCRReceiveFromISR(
2486     QueueHandle_t xQueue,
2487     void *pvBuffer,
2488     BaseType_t *pxCoRoutineWoken)
2489 {
2490     BaseType_t xReturn;
2491     Queue_t *const pxQueue = xQueue;
2492 
2493     /* We cannot block from an ISR, so check there is data available. If
2494     not then just leave without doing anything. */
2495     if (pxQueue->uxMessagesWaiting > (UBaseType_t)0) {
2496         /* Copy the data from the queue. */
2497         pxQueue->u.xQueue.pcReadFrom += pxQueue->uxItemSize;
2498         if (pxQueue->u.xQueue.pcReadFrom >= pxQueue->u.xQueue.pcTail) {
2499             pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead;
2500         } else {
2501             mtCOVERAGE_TEST_MARKER();
2502         }
2503         --(pxQueue->uxMessagesWaiting);
2504         (void)memcpy(
2505             (void *)pvBuffer,
2506             (void *)pxQueue->u.xQueue.pcReadFrom,
2507             (unsigned)pxQueue->uxItemSize);
2508 
2509         if ((*pxCoRoutineWoken) == pdFALSE) {
2510             if (listLIST_IS_EMPTY(&(pxQueue->xTasksWaitingToSend)) == pdFALSE) {
2511                 if (xCoRoutineRemoveFromEventList(
2512                         &(pxQueue->xTasksWaitingToSend)) != pdFALSE) {
2513                     *pxCoRoutineWoken = pdTRUE;
2514                 } else {
2515                     mtCOVERAGE_TEST_MARKER();
2516                 }
2517             } else {
2518                 mtCOVERAGE_TEST_MARKER();
2519             }
2520         } else {
2521             mtCOVERAGE_TEST_MARKER();
2522         }
2523 
2524         xReturn = pdPASS;
2525     } else {
2526         xReturn = pdFAIL;
2527     }
2528 
2529     return xReturn;
2530 }
2531 
2532 #endif /* configUSE_CO_ROUTINES */
2533 /*-----------------------------------------------------------*/
2534 
2535 #if (configQUEUE_REGISTRY_SIZE > 0)
2536 
vQueueAddToRegistry(QueueHandle_t xQueue,const char * pcQueueName)2537 void vQueueAddToRegistry(
2538     QueueHandle_t xQueue,
2539     const char *pcQueueName) /*lint !e971 Unqualified char types are allowed for
2540                                 strings and single characters only. */
2541 {
2542     UBaseType_t ux;
2543 
2544     /* See if there is an empty space in the registry.  A NULL name denotes
2545     a free slot. */
2546     for (ux = (UBaseType_t)0U; ux < (UBaseType_t)configQUEUE_REGISTRY_SIZE;
2547          ux++) {
2548         if (xQueueRegistry[ux].pcQueueName == NULL) {
2549             /* Store the information on this queue. */
2550             xQueueRegistry[ux].pcQueueName = pcQueueName;
2551             xQueueRegistry[ux].xHandle = xQueue;
2552 
2553             traceQUEUE_REGISTRY_ADD(xQueue, pcQueueName);
2554             break;
2555         } else {
2556             mtCOVERAGE_TEST_MARKER();
2557         }
2558     }
2559 }
2560 
2561 #endif /* configQUEUE_REGISTRY_SIZE */
2562 /*-----------------------------------------------------------*/
2563 
2564 #if (configQUEUE_REGISTRY_SIZE > 0)
2565 
pcQueueGetName(QueueHandle_t xQueue)2566 const char *pcQueueGetName(
2567     QueueHandle_t xQueue) /*lint !e971 Unqualified char types are allowed for
2568                              strings and single characters only. */
2569 {
2570     UBaseType_t ux;
2571     const char *pcReturn = NULL; /*lint !e971 Unqualified char types are allowed
2572                                     for strings and single characters only. */
2573 
2574     /* Note there is nothing here to protect against another task adding or
2575     removing entries from the registry while it is being searched. */
2576     for (ux = (UBaseType_t)0U; ux < (UBaseType_t)configQUEUE_REGISTRY_SIZE;
2577          ux++) {
2578         if (xQueueRegistry[ux].xHandle == xQueue) {
2579             pcReturn = xQueueRegistry[ux].pcQueueName;
2580             break;
2581         } else {
2582             mtCOVERAGE_TEST_MARKER();
2583         }
2584     }
2585 
2586     return pcReturn;
2587 } /*lint !e818 xQueue cannot be a pointer to const because it is a typedef. */
2588 
2589 #endif /* configQUEUE_REGISTRY_SIZE */
2590 /*-----------------------------------------------------------*/
2591 
2592 #if (configQUEUE_REGISTRY_SIZE > 0)
2593 
vQueueUnregisterQueue(QueueHandle_t xQueue)2594 void vQueueUnregisterQueue(QueueHandle_t xQueue)
2595 {
2596     UBaseType_t ux;
2597 
2598     /* See if the handle of the queue being unregistered in actually in the
2599     registry. */
2600     for (ux = (UBaseType_t)0U; ux < (UBaseType_t)configQUEUE_REGISTRY_SIZE;
2601          ux++) {
2602         if (xQueueRegistry[ux].xHandle == xQueue) {
2603             /* Set the name to NULL to show that this slot if free again. */
2604             xQueueRegistry[ux].pcQueueName = NULL;
2605 
2606             /* Set the handle to NULL to ensure the same queue handle cannot
2607             appear in the registry twice if it is added, removed, then
2608             added again. */
2609             xQueueRegistry[ux].xHandle = (QueueHandle_t)0;
2610             break;
2611         } else {
2612             mtCOVERAGE_TEST_MARKER();
2613         }
2614     }
2615 
2616 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
2617 
2618 #endif /* configQUEUE_REGISTRY_SIZE */
2619 /*-----------------------------------------------------------*/
2620 
2621 #if (configUSE_TIMERS == 1)
2622 
vQueueWaitForMessageRestricted(QueueHandle_t xQueue,TickType_t xTicksToWait,const BaseType_t xWaitIndefinitely)2623 void vQueueWaitForMessageRestricted(
2624     QueueHandle_t xQueue,
2625     TickType_t xTicksToWait,
2626     const BaseType_t xWaitIndefinitely)
2627 {
2628     Queue_t *const pxQueue = xQueue;
2629 
2630     /* This function should not be called by application code hence the
2631     'Restricted' in its name.  It is not part of the public API.  It is
2632     designed for use by kernel code, and has special calling requirements.
2633     It can result in vListInsert() being called on a list that can only
2634     possibly ever have one item in it, so the list will be fast, but even
2635     so it should be called with the scheduler locked and not from a critical
2636     section. */
2637 
2638     /* Only do anything if there are no messages in the queue.  This function
2639     will not actually cause the task to block, just place it on a blocked
2640     list.  It will not block until the scheduler is unlocked - at which
2641     time a yield will be performed.  If an item is added to the queue while
2642     the queue is locked, and the calling task blocks on the queue, then the
2643     calling task will be immediately unblocked when the queue is unlocked. */
2644     prvLockQueue(pxQueue);
2645     if (pxQueue->uxMessagesWaiting == (UBaseType_t)0U) {
2646         /* There is nothing in the queue, block for the specified period. */
2647         vTaskPlaceOnEventListRestricted(
2648             &(pxQueue->xTasksWaitingToReceive),
2649             xTicksToWait,
2650             xWaitIndefinitely);
2651     } else {
2652         mtCOVERAGE_TEST_MARKER();
2653     }
2654     prvUnlockQueue(pxQueue);
2655 }
2656 
2657 #endif /* configUSE_TIMERS */
2658 /*-----------------------------------------------------------*/
2659 
2660 #if ((configUSE_QUEUE_SETS == 1) && (configSUPPORT_DYNAMIC_ALLOCATION == 1))
2661 
xQueueCreateSet(const UBaseType_t uxEventQueueLength)2662 QueueSetHandle_t xQueueCreateSet(const UBaseType_t uxEventQueueLength)
2663 {
2664     QueueSetHandle_t pxQueue;
2665 
2666     pxQueue = xQueueGenericCreate(
2667         uxEventQueueLength,
2668         (UBaseType_t)sizeof(Queue_t *),
2669         queueQUEUE_TYPE_SET);
2670 
2671     return pxQueue;
2672 }
2673 
2674 #endif /* configUSE_QUEUE_SETS */
2675 /*-----------------------------------------------------------*/
2676 
2677 #if (configUSE_QUEUE_SETS == 1)
2678 
xQueueAddToSet(QueueSetMemberHandle_t xQueueOrSemaphore,QueueSetHandle_t xQueueSet)2679 BaseType_t xQueueAddToSet(
2680     QueueSetMemberHandle_t xQueueOrSemaphore,
2681     QueueSetHandle_t xQueueSet)
2682 {
2683     BaseType_t xReturn;
2684 
2685     taskENTER_CRITICAL();
2686     {
2687         if (((Queue_t *)xQueueOrSemaphore)->pxQueueSetContainer != NULL) {
2688             /* Cannot add a queue/semaphore to more than one queue set. */
2689             xReturn = pdFAIL;
2690         } else if (
2691             ((Queue_t *)xQueueOrSemaphore)->uxMessagesWaiting !=
2692             (UBaseType_t)0) {
2693             /* Cannot add a queue/semaphore to a queue set if there are already
2694             items in the queue/semaphore. */
2695             xReturn = pdFAIL;
2696         } else {
2697             ((Queue_t *)xQueueOrSemaphore)->pxQueueSetContainer = xQueueSet;
2698             xReturn = pdPASS;
2699         }
2700     }
2701     taskEXIT_CRITICAL();
2702 
2703     return xReturn;
2704 }
2705 
2706 #endif /* configUSE_QUEUE_SETS */
2707 /*-----------------------------------------------------------*/
2708 
2709 #if (configUSE_QUEUE_SETS == 1)
2710 
xQueueRemoveFromSet(QueueSetMemberHandle_t xQueueOrSemaphore,QueueSetHandle_t xQueueSet)2711 BaseType_t xQueueRemoveFromSet(
2712     QueueSetMemberHandle_t xQueueOrSemaphore,
2713     QueueSetHandle_t xQueueSet)
2714 {
2715     BaseType_t xReturn;
2716     Queue_t *const pxQueueOrSemaphore = (Queue_t *)xQueueOrSemaphore;
2717 
2718     if (pxQueueOrSemaphore->pxQueueSetContainer != xQueueSet) {
2719         /* The queue was not a member of the set. */
2720         xReturn = pdFAIL;
2721     } else if (pxQueueOrSemaphore->uxMessagesWaiting != (UBaseType_t)0) {
2722         /* It is dangerous to remove a queue from a set when the queue is
2723         not empty because the queue set will still hold pending events for
2724         the queue. */
2725         xReturn = pdFAIL;
2726     } else {
2727         taskENTER_CRITICAL();
2728         {
2729             /* The queue is no longer contained in the set. */
2730             pxQueueOrSemaphore->pxQueueSetContainer = NULL;
2731         }
2732         taskEXIT_CRITICAL();
2733         xReturn = pdPASS;
2734     }
2735 
2736     return xReturn;
2737 } /*lint !e818 xQueueSet could not be declared as pointing to const as it is a
2738      typedef. */
2739 
2740 #endif /* configUSE_QUEUE_SETS */
2741 /*-----------------------------------------------------------*/
2742 
2743 #if (configUSE_QUEUE_SETS == 1)
2744 
xQueueSelectFromSet(QueueSetHandle_t xQueueSet,TickType_t const xTicksToWait)2745 QueueSetMemberHandle_t xQueueSelectFromSet(
2746     QueueSetHandle_t xQueueSet,
2747     TickType_t const xTicksToWait)
2748 {
2749     QueueSetMemberHandle_t xReturn = NULL;
2750 
2751     (void)xQueueReceive(
2752         (QueueHandle_t)xQueueSet,
2753         &xReturn,
2754         xTicksToWait); /*lint !e961 Casting from one typedef to another is not
2755                           redundant. */
2756     return xReturn;
2757 }
2758 
2759 #endif /* configUSE_QUEUE_SETS */
2760 /*-----------------------------------------------------------*/
2761 
2762 #if (configUSE_QUEUE_SETS == 1)
2763 
xQueueSelectFromSetFromISR(QueueSetHandle_t xQueueSet)2764 QueueSetMemberHandle_t xQueueSelectFromSetFromISR(QueueSetHandle_t xQueueSet)
2765 {
2766     QueueSetMemberHandle_t xReturn = NULL;
2767 
2768     (void)xQueueReceiveFromISR(
2769         (QueueHandle_t)xQueueSet,
2770         &xReturn,
2771         NULL); /*lint !e961 Casting from one typedef to another is not
2772                   redundant. */
2773     return xReturn;
2774 }
2775 
2776 #endif /* configUSE_QUEUE_SETS */
2777 /*-----------------------------------------------------------*/
2778 
2779 #if (configUSE_QUEUE_SETS == 1)
2780 
prvNotifyQueueSetContainer(const Queue_t * const pxQueue)2781 static BaseType_t prvNotifyQueueSetContainer(const Queue_t *const pxQueue)
2782 {
2783     Queue_t *pxQueueSetContainer = pxQueue->pxQueueSetContainer;
2784     BaseType_t xReturn = pdFALSE;
2785 
2786     /* This function must be called form a critical section. */
2787 
2788     configASSERT(pxQueueSetContainer);
2789     configASSERT(
2790         pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength);
2791 
2792     if (pxQueueSetContainer->uxMessagesWaiting <
2793         pxQueueSetContainer->uxLength) {
2794         const int8_t cTxLock = pxQueueSetContainer->cTxLock;
2795 
2796         traceQUEUE_SEND(pxQueueSetContainer);
2797 
2798         /* The data copied is the handle of the queue that contains data. */
2799         xReturn = prvCopyDataToQueue(
2800             pxQueueSetContainer, &pxQueue, queueSEND_TO_BACK);
2801 
2802         if (cTxLock == queueUNLOCKED) {
2803             if (listLIST_IS_EMPTY(&(
2804                     pxQueueSetContainer->xTasksWaitingToReceive)) == pdFALSE) {
2805                 if (xTaskRemoveFromEventList(
2806                         &(pxQueueSetContainer->xTasksWaitingToReceive)) !=
2807                     pdFALSE) {
2808                     /* The task waiting has a higher priority. */
2809                     xReturn = pdTRUE;
2810                 } else {
2811                     mtCOVERAGE_TEST_MARKER();
2812                 }
2813             } else {
2814                 mtCOVERAGE_TEST_MARKER();
2815             }
2816         } else {
2817             pxQueueSetContainer->cTxLock = (int8_t)(cTxLock + 1);
2818         }
2819     } else {
2820         mtCOVERAGE_TEST_MARKER();
2821     }
2822 
2823     return xReturn;
2824 }
2825 
2826 #endif /* configUSE_QUEUE_SETS */
2827