1 /*
2 * SPDX-License-Identifier: BSD-3-Clause
3 * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
4 */
5
6 #ifndef GRANULE_H
7 #define GRANULE_H
8
9 #include <assert.h>
10 #include <atomics.h>
11 #include <buffer.h>
12 #include <granule_types.h>
13 #include <memory.h>
14 #include <spinlock.h>
15 #include <status.h>
16
granule_refcount_read_relaxed(struct granule * g)17 static inline unsigned long granule_refcount_read_relaxed(struct granule *g)
18 {
19 return __sca_read64(&g->refcount);
20 }
21
granule_refcount_read_acquire(struct granule * g)22 static inline unsigned long granule_refcount_read_acquire(struct granule *g)
23 {
24 return __sca_read64_acquire(&g->refcount);
25 }
26
27 /*
28 * Sanity-check unlocked granule invariants.
29 *
30 * These invariants must hold for any granule which is unlocked.
31 *
32 * These invariants may not hold transiently while a granule is locked (e.g.
33 * when transitioning to/from delegated state).
34 *
35 * Note: this function is purely for debug/documentation purposes, and is not
36 * intended as a mechanism to ensure correctness.
37 */
__granule_assert_unlocked_invariants(struct granule * g,enum granule_state state)38 static inline void __granule_assert_unlocked_invariants(struct granule *g,
39 enum granule_state state)
40 {
41 switch (state) {
42 case GRANULE_STATE_NS:
43 assert(granule_refcount_read_relaxed(g) == 0UL);
44 break;
45 case GRANULE_STATE_DELEGATED:
46 assert(g->refcount == 0UL);
47 break;
48 case GRANULE_STATE_RD:
49 /*
50 * refcount is used to check if RD and associated granules can
51 * be freed because they're no longer referenced by any other
52 * object. Can be any non-negative number.
53 */
54 break;
55 case GRANULE_STATE_REC:
56 assert(granule_refcount_read_relaxed(g) <= 1UL);
57 break;
58 case GRANULE_STATE_DATA:
59 assert(g->refcount == 0UL);
60 break;
61 case GRANULE_STATE_RTT:
62 assert(g->refcount >= 0UL);
63 break;
64 case GRANULE_STATE_REC_AUX:
65 assert(g->refcount == 0UL);
66 break;
67 default:
68 /* Unknown granule type */
69 assert(false);
70 }
71 }
72
73 /* Must be called with g->lock held */
granule_get_state(struct granule * g)74 static inline enum granule_state granule_get_state(struct granule *g)
75 {
76 return g->state;
77 }
78
79 /* Must be called with g->lock held */
granule_set_state(struct granule * g,enum granule_state state)80 static inline void granule_set_state(struct granule *g,
81 enum granule_state state)
82 {
83 g->state = state;
84 }
85
86 /*
87 * Acquire the spinlock and then check expected state
88 * Fails if unexpected locking sequence detected.
89 * Also asserts if invariant conditions are met.
90 */
granule_lock_on_state_match(struct granule * g,enum granule_state expected_state)91 static inline bool granule_lock_on_state_match(struct granule *g,
92 enum granule_state expected_state)
93 {
94 spinlock_acquire(&g->lock);
95
96 if (granule_get_state(g) != expected_state) {
97 spinlock_release(&g->lock);
98 return false;
99 }
100
101 __granule_assert_unlocked_invariants(g, expected_state);
102 return true;
103 }
104
105 /*
106 * Used when we're certain of the type of an object (e.g. because we hold a
107 * reference to it). In these cases we should never fail to acquire the lock.
108 */
granule_lock(struct granule * g,enum granule_state expected_state)109 static inline void granule_lock(struct granule *g,
110 enum granule_state expected_state)
111 {
112 __unused bool locked = granule_lock_on_state_match(g, expected_state);
113
114 assert(locked);
115 }
116
granule_unlock(struct granule * g)117 static inline void granule_unlock(struct granule *g)
118 {
119 __granule_assert_unlocked_invariants(g, granule_get_state(g));
120 spinlock_release(&g->lock);
121 }
122
123 /* Transtion state to @new_state and unlock the granule */
granule_unlock_transition(struct granule * g,enum granule_state new_state)124 static inline void granule_unlock_transition(struct granule *g,
125 enum granule_state new_state)
126 {
127 granule_set_state(g, new_state);
128 granule_unlock(g);
129 }
130
131 unsigned long granule_addr(struct granule *g);
132 struct granule *addr_to_granule(unsigned long addr);
133 struct granule *find_granule(unsigned long addr);
134 struct granule *find_lock_granule(unsigned long addr,
135 enum granule_state expected_state);
136
137 bool find_lock_two_granules(unsigned long addr1,
138 enum granule_state expected_state1,
139 struct granule **g1,
140 unsigned long addr2,
141 enum granule_state expected_state2,
142 struct granule **g2);
143
144 void granule_memzero(struct granule *g, enum buffer_slot slot);
145
146 void granule_memzero_mapped(void *buf);
147
148 /* Must be called with g->lock held */
__granule_get(struct granule * g)149 static inline void __granule_get(struct granule *g)
150 {
151 g->refcount++;
152 }
153
154 /* Must be called with g->lock held */
__granule_put(struct granule * g)155 static inline void __granule_put(struct granule *g)
156 {
157 assert(g->refcount > 0UL);
158 g->refcount--;
159 }
160
161 /* Must be called with g->lock held */
__granule_refcount_inc(struct granule * g,unsigned long val)162 static inline void __granule_refcount_inc(struct granule *g, unsigned long val)
163 {
164 g->refcount += val;
165 }
166
167 /* Must be called with g->lock held */
__granule_refcount_dec(struct granule * g,unsigned long val)168 static inline void __granule_refcount_dec(struct granule *g, unsigned long val)
169 {
170 assert(g->refcount >= val);
171 g->refcount -= val;
172 }
173
174 /*
175 * Atomically increments the reference counter of the granule.
176 */
atomic_granule_get(struct granule * g)177 static inline void atomic_granule_get(struct granule *g)
178 {
179 atomic_add_64(&g->refcount, 1UL);
180 }
181
182 /*
183 * Atomically decrements the reference counter of the granule.
184 */
atomic_granule_put(struct granule * g)185 static inline void atomic_granule_put(struct granule *g)
186 {
187 atomic_add_64(&g->refcount, -1L);
188 }
189
190 /*
191 * Atomically decrements the reference counter of the granule.
192 * Stores to memory with release semantics.
193 */
atomic_granule_put_release(struct granule * g)194 static inline void atomic_granule_put_release(struct granule *g)
195 {
196 unsigned long old_refcount __unused;
197
198 old_refcount = atomic_load_add_release_64(&g->refcount, -1L);
199 assert(old_refcount > 0UL);
200 }
201
202 /*
203 * Obtain a pointer to a locked unused granule at @addr if @addr is a valid
204 * granule physical address, the state of the granule at @addr is
205 * @expected_state, and the granule at @addr is unused.
206 *
207 * Returns:
208 * struct granule if @addr is a valid granule physical address.
209 * RMI_ERROR_INPUT if @addr is not aligned to the size of a granule.
210 * RMI_ERROR_INPUT if @addr is out of range.
211 * RMI_ERROR_INPUT if the state of the granule at @addr is not
212 * @expected_state.
213 * RMI_ERROR_IN_USE if the granule at @addr has a non-zero
214 * reference count.
215 */
216 static inline
find_lock_unused_granule(unsigned long addr,enum granule_state expected_state)217 struct granule *find_lock_unused_granule(unsigned long addr,
218 enum granule_state expected_state)
219 {
220 struct granule *g;
221
222 g = find_lock_granule(addr, expected_state);
223 if (g == NULL) {
224 return status_ptr(RMI_ERROR_INPUT);
225 }
226
227 /*
228 * Granules can have lock-free access (e.g. REC), thus using acquire
229 * semantics to avoid race conditions.
230 */
231 if (granule_refcount_read_acquire(g)) {
232 granule_unlock(g);
233 return status_ptr(RMI_ERROR_IN_USE);
234 }
235
236 return g;
237 }
238
239 #endif /* GRANULE_H */
240