1 /*
2 * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230)
3 *
4 * SPDX-License-Identifier: GPL-2.0-only
5 */
6
7 #pragma once
8
9 #include <config.h>
10 #include <types.h>
11 #include <util.h>
12 #include <mode/machine.h>
13 #include <arch/model/statedata.h>
14 #include <smp/ipi.h>
15 #include <util.h>
16
17 #ifdef ENABLE_SMP_SUPPORT
18
19 /* CLH lock is FIFO lock for machines with coherent caches (coherent-FIFO lock).
20 * See ftp://ftp.cs.washington.edu/tr/1993/02/UW-CSE-93-02-02.pdf */
21
22 typedef enum {
23 CLHState_Granted = 0,
24 CLHState_Pending
25 } clh_qnode_state_t;
26
27 typedef struct clh_qnode {
28 clh_qnode_state_t value;
29
30 PAD_TO_NEXT_CACHE_LN(sizeof(clh_qnode_state_t));
31 } clh_qnode_t;
32
33 typedef struct clh_qnode_p {
34 clh_qnode_t *node;
35 clh_qnode_t *next;
36 /* This is the software IPI flag */
37 word_t ipi;
38
39 PAD_TO_NEXT_CACHE_LN(sizeof(clh_qnode_t *) +
40 sizeof(clh_qnode_t *) +
41 sizeof(word_t));
42 } clh_qnode_p_t;
43
44 typedef struct clh_lock {
45 clh_qnode_t nodes[CONFIG_MAX_NUM_NODES + 1];
46 clh_qnode_p_t node_owners[CONFIG_MAX_NUM_NODES];
47
48 clh_qnode_t *head;
49 PAD_TO_NEXT_CACHE_LN(sizeof(clh_qnode_t *));
50 } clh_lock_t;
51
52 extern clh_lock_t big_kernel_lock;
53 BOOT_CODE void clh_lock_init(void);
54
clh_is_ipi_pending(word_t cpu)55 static inline bool_t FORCE_INLINE clh_is_ipi_pending(word_t cpu)
56 {
57 return big_kernel_lock.node_owners[cpu].ipi == 1;
58 }
59
sel4_atomic_exchange(void * ptr,bool_t irqPath,word_t cpu,int memorder)60 static inline void *sel4_atomic_exchange(void *ptr, bool_t
61 irqPath, word_t cpu, int memorder)
62 {
63 clh_qnode_t *prev;
64
65 if (memorder == __ATOMIC_RELEASE || memorder == __ATOMIC_ACQ_REL) {
66 __atomic_thread_fence(__ATOMIC_RELEASE);
67 } else if (memorder == __ATOMIC_SEQ_CST) {
68 __atomic_thread_fence(__ATOMIC_SEQ_CST);
69 }
70
71 while (!try_arch_atomic_exchange_rlx(&big_kernel_lock.head,
72 (void *) big_kernel_lock.node_owners[cpu].node,
73 (void **) &prev)) {
74 if (clh_is_ipi_pending(cpu)) {
75 /* we only handle irq_remote_call_ipi here as other type of IPIs
76 * are async and could be delayed. 'handleIPI' may not return
77 * based on value of the 'irqPath'. */
78 handleIPI(CORE_IRQ_TO_IRQT(cpu, irq_remote_call_ipi), irqPath);
79 }
80
81 arch_pause();
82 }
83
84 if (memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_ACQ_REL) {
85 __atomic_thread_fence(__ATOMIC_ACQUIRE);
86 } else if (memorder == __ATOMIC_SEQ_CST) {
87 __atomic_thread_fence(__ATOMIC_SEQ_CST);
88 }
89
90 return prev;
91 }
92
clh_lock_acquire(word_t cpu,bool_t irqPath)93 static inline void FORCE_INLINE clh_lock_acquire(word_t cpu, bool_t irqPath)
94 {
95 clh_qnode_t *prev;
96 big_kernel_lock.node_owners[cpu].node->value = CLHState_Pending;
97
98 prev = sel4_atomic_exchange(&big_kernel_lock.head, irqPath, cpu, __ATOMIC_ACQ_REL);
99
100 big_kernel_lock.node_owners[cpu].next = prev;
101
102 /* We do not have an __atomic_thread_fence here as this is already handled by the
103 * atomic_exchange just above */
104 while (big_kernel_lock.node_owners[cpu].next->value != CLHState_Granted) {
105 /* As we are in a loop we need to ensure that any loads of future iterations of the
106 * loop are performed after this one */
107 __atomic_thread_fence(__ATOMIC_ACQUIRE);
108 if (clh_is_ipi_pending(cpu)) {
109 /* we only handle irq_remote_call_ipi here as other type of IPIs
110 * are async and could be delayed. 'handleIPI' may not return
111 * based on value of the 'irqPath'. */
112 handleIPI(CORE_IRQ_TO_IRQT(cpu, irq_remote_call_ipi), irqPath);
113 /* We do not need to perform a memory release here as we would have only modified
114 * local state that we do not need to make visible */
115 }
116 arch_pause();
117 }
118
119 /* make sure no resource access passes from this point */
120 __atomic_thread_fence(__ATOMIC_ACQUIRE);
121 }
122
clh_lock_release(word_t cpu)123 static inline void FORCE_INLINE clh_lock_release(word_t cpu)
124 {
125 /* make sure no resource access passes from this point */
126 __atomic_thread_fence(__ATOMIC_RELEASE);
127
128 big_kernel_lock.node_owners[cpu].node->value = CLHState_Granted;
129 big_kernel_lock.node_owners[cpu].node =
130 big_kernel_lock.node_owners[cpu].next;
131 }
132
clh_is_self_in_queue(void)133 static inline bool_t FORCE_INLINE clh_is_self_in_queue(void)
134 {
135 return big_kernel_lock.node_owners[getCurrentCPUIndex()].node->value == CLHState_Pending;
136 }
137
138 #define NODE_LOCK(_irqPath) do { \
139 clh_lock_acquire(getCurrentCPUIndex(), _irqPath); \
140 } while(0)
141
142 #define NODE_UNLOCK do { \
143 clh_lock_release(getCurrentCPUIndex()); \
144 } while(0)
145
146 #define NODE_LOCK_IF(_cond, _irqPath) do { \
147 if((_cond)) { \
148 NODE_LOCK(_irqPath); \
149 } \
150 } while(0)
151
152 #define NODE_UNLOCK_IF_HELD do { \
153 if(clh_is_self_in_queue()) { \
154 NODE_UNLOCK; \
155 } \
156 } while(0)
157
158 #else
159 #define NODE_LOCK(_irq) do {} while (0)
160 #define NODE_UNLOCK do {} while (0)
161 #define NODE_LOCK_IF(_cond, _irq) do {} while (0)
162 #define NODE_UNLOCK_IF_HELD do {} while (0)
163 #endif /* ENABLE_SMP_SUPPORT */
164
165 #define NODE_LOCK_SYS NODE_LOCK(false)
166 #define NODE_LOCK_IRQ NODE_LOCK(true)
167 #define NODE_LOCK_SYS_IF(_cond) NODE_LOCK_IF(_cond, false)
168 #define NODE_LOCK_IRQ_IF(_cond) NODE_LOCK_IF(_cond, true)
169
170