1 /*
2 * Copyright 2018 The Hafnium Authors.
3 *
4 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
7 */
8
9 #include "hf/mpool.h"
10
11 #include <stdbool.h>
12
13 #include "hf/arch/std.h"
14
15 struct mpool_chunk {
16 struct mpool_chunk *next_chunk;
17 struct mpool_chunk *limit;
18 };
19
20 struct mpool_entry {
21 struct mpool_entry *next;
22 };
23
24 static bool mpool_locks_enabled = false;
25
26 /**
27 * Enables the locks protecting memory pools. Before this function is called,
28 * the locks are disabled, that is, acquiring/releasing them is a no-op.
29 */
mpool_enable_locks(void)30 void mpool_enable_locks(void)
31 {
32 mpool_locks_enabled = true;
33 }
34
35 /**
36 * Acquires the lock protecting the given memory pool, if locks are enabled.
37 */
mpool_lock(struct mpool * p)38 static void mpool_lock(struct mpool *p)
39 {
40 if (mpool_locks_enabled) {
41 sl_lock(&p->lock);
42 }
43 }
44
45 /**
46 * Releases the lock protecting the given memory pool, if locks are enabled.
47 */
mpool_unlock(struct mpool * p)48 static void mpool_unlock(struct mpool *p)
49 {
50 if (mpool_locks_enabled) {
51 sl_unlock(&p->lock);
52 }
53 }
54
55 /**
56 * Initialises the given memory pool with the given entry size, which must be
57 * at least the size of two pointers.
58 *
59 * All entries stored in the memory pool will be aligned to at least the entry
60 * size.
61 */
mpool_init(struct mpool * p,size_t entry_size)62 void mpool_init(struct mpool *p, size_t entry_size)
63 {
64 p->entry_size = entry_size;
65 p->chunk_list = NULL;
66 p->entry_list = NULL;
67 p->fallback = NULL;
68 sl_init(&p->lock);
69 }
70
71 /**
72 * Initialises the given memory pool by replicating the properties of `from`. It
73 * also pulls the chunk and free lists from `from`, consuming all its resources
74 * and making them available via the new memory pool.
75 */
mpool_init_from(struct mpool * p,struct mpool * from)76 void mpool_init_from(struct mpool *p, struct mpool *from)
77 {
78 mpool_init(p, from->entry_size);
79
80 mpool_lock(from);
81 p->chunk_list = from->chunk_list;
82 p->entry_list = from->entry_list;
83 p->fallback = from->fallback;
84
85 from->chunk_list = NULL;
86 from->entry_list = NULL;
87 from->fallback = NULL;
88 mpool_unlock(from);
89 }
90
91 /**
92 * Initialises the given memory pool with a fallback memory pool if this pool
93 * runs out of memory.
94 */
mpool_init_with_fallback(struct mpool * p,struct mpool * fallback)95 void mpool_init_with_fallback(struct mpool *p, struct mpool *fallback)
96 {
97 mpool_init(p, fallback->entry_size);
98 p->fallback = fallback;
99 }
100
101 /**
102 * Finishes the given memory pool, giving all free memory to the fallback pool
103 * if there is one.
104 */
mpool_fini(struct mpool * p)105 void mpool_fini(struct mpool *p)
106 {
107 struct mpool_entry *entry;
108 struct mpool_chunk *chunk;
109
110 if (!p->fallback) {
111 return;
112 }
113
114 mpool_lock(p);
115
116 /* Merge the freelist into the fallback. */
117 entry = p->entry_list;
118 while (entry != NULL) {
119 void *ptr = entry;
120
121 entry = entry->next;
122 mpool_free(p->fallback, ptr);
123 }
124
125 /* Merge the chunk list into the fallback. */
126 chunk = p->chunk_list;
127 while (chunk != NULL) {
128 void *ptr = chunk;
129 size_t size = (uintptr_t)chunk->limit - (uintptr_t)chunk;
130
131 chunk = chunk->next_chunk;
132 mpool_add_chunk(p->fallback, ptr, size);
133 }
134
135 p->chunk_list = NULL;
136 p->entry_list = NULL;
137 p->fallback = NULL;
138
139 mpool_unlock(p);
140 }
141
142 /**
143 * Adds a contiguous chunk of memory to the given memory pool. The chunk will
144 * eventually be broken up into entries of the size held by the memory pool.
145 *
146 * Only the portions aligned to the entry size will be added to the pool.
147 *
148 * Returns true if at least a portion of the chunk was added to pool, or false
149 * if none of the buffer was usable in the pool.
150 */
mpool_add_chunk(struct mpool * p,void * begin,size_t size)151 bool mpool_add_chunk(struct mpool *p, void *begin, size_t size)
152 {
153 struct mpool_chunk *chunk;
154 char *new_begin;
155 char *new_end;
156
157 /* Round begin address up, and end address down. */
158 new_begin = (void *)align_up((char *)begin, p->entry_size);
159 new_end = (void *)align_down((char *)begin + size, p->entry_size);
160
161 /* Nothing to do if there isn't enough room for an entry. */
162 if (new_begin >= new_end || new_end - new_begin < p->entry_size) {
163 return false;
164 }
165
166 chunk = (struct mpool_chunk *)new_begin;
167 chunk->limit = (struct mpool_chunk *)new_end;
168
169 mpool_lock(p);
170 chunk->next_chunk = p->chunk_list;
171 p->chunk_list = chunk;
172 mpool_unlock(p);
173
174 return true;
175 }
176
177 /**
178 * Allocates an entry from the given memory pool, if one is available. The
179 * fallback will not be used even if there is one.
180 */
mpool_alloc_no_fallback(struct mpool * p)181 static void *mpool_alloc_no_fallback(struct mpool *p)
182 {
183 void *ret;
184 struct mpool_chunk *chunk;
185 struct mpool_chunk *new_chunk;
186
187 /* Fetch an entry from the free list if one is available. */
188 mpool_lock(p);
189 if (p->entry_list != NULL) {
190 struct mpool_entry *entry = p->entry_list;
191
192 p->entry_list = entry->next;
193 ret = entry;
194 goto exit;
195 }
196
197 /* There was no free list available. Try a chunk instead. */
198 chunk = p->chunk_list;
199 if (chunk == NULL) {
200 /* The chunk list is also empty, we're out of entries. */
201 ret = NULL;
202 goto exit;
203 }
204
205 new_chunk = (struct mpool_chunk *)((char *)chunk + p->entry_size);
206 if (new_chunk >= chunk->limit) {
207 p->chunk_list = chunk->next_chunk;
208 } else {
209 *new_chunk = *chunk;
210 p->chunk_list = new_chunk;
211 }
212
213 ret = chunk;
214
215 exit:
216 mpool_unlock(p);
217
218 return ret;
219 }
220
221 /**
222 * Allocates an entry from the given memory pool, if one is available. If there
223 * isn't one available, try and allocate from the fallback if there is one.
224 */
mpool_alloc(struct mpool * p)225 void *mpool_alloc(struct mpool *p)
226 {
227 do {
228 void *ret = mpool_alloc_no_fallback(p);
229
230 if (ret != NULL) {
231 return ret;
232 }
233
234 p = p->fallback;
235 } while (p != NULL);
236
237 return NULL;
238 }
239
240 /**
241 * Frees an entry back into the memory pool, making it available for reuse.
242 *
243 * This is meant to be used for freeing single entries. To free multiple
244 * entries, one must call mpool_add_chunk instead.
245 */
mpool_free(struct mpool * p,void * ptr)246 void mpool_free(struct mpool *p, void *ptr)
247 {
248 struct mpool_entry *e = ptr;
249
250 /* Store the newly freed entry in the front of the free list. */
251 mpool_lock(p);
252 e->next = p->entry_list;
253 p->entry_list = e;
254 mpool_unlock(p);
255 }
256
257 /**
258 * Allocates a number of contiguous and aligned entries. If a suitable
259 * allocation could not be found, the fallback will not be used even if there is
260 * one.
261 */
mpool_alloc_contiguous_no_fallback(struct mpool * p,size_t count,size_t align)262 void *mpool_alloc_contiguous_no_fallback(struct mpool *p, size_t count,
263 size_t align)
264 {
265 struct mpool_chunk **prev;
266 void *ret = NULL;
267
268 align *= p->entry_size;
269
270 mpool_lock(p);
271
272 /*
273 * Go through the chunk list in search of one with enough room for the
274 * requested allocation
275 */
276 prev = &p->chunk_list;
277 while (*prev != NULL) {
278 char *start;
279 struct mpool_chunk *new_chunk;
280 struct mpool_chunk *chunk = *prev;
281
282 /* Round start address up to the required alignment. */
283 start = (void *)align_up((char *)chunk, align);
284
285 /*
286 * Calculate where the new chunk would be if we consume the
287 * requested number of entries. Then check if this chunk is big
288 * enough to satisfy the request.
289 */
290 new_chunk =
291 (struct mpool_chunk *)(start + (count * p->entry_size));
292 if (new_chunk <= chunk->limit) {
293 /* Remove the consumed area. */
294 if (new_chunk == chunk->limit) {
295 *prev = chunk->next_chunk;
296 } else {
297 *new_chunk = *chunk;
298 *prev = new_chunk;
299 }
300
301 /*
302 * Add back the space consumed by the alignment
303 * requirement, if it's big enough to fit an entry.
304 */
305 if (start - (char *)chunk >= p->entry_size) {
306 chunk->next_chunk = *prev;
307 *prev = chunk;
308 chunk->limit = (struct mpool_chunk *)start;
309 }
310
311 ret = (void *)start;
312 break;
313 }
314
315 prev = &chunk->next_chunk;
316 }
317
318 mpool_unlock(p);
319
320 return ret;
321 }
322
323 /**
324 * Allocates a number of contiguous and aligned entries. This is a best-effort
325 * operation and only succeeds if such entries can be found in the chunks list
326 * or the chunks of the fallbacks (i.e., the entry list is never used to satisfy
327 * these allocations).
328 *
329 * The alignment is specified as the number of entries, that is, if `align` is
330 * 4, the alignment in bytes will be 4 * entry_size.
331 *
332 * The caller can enventually free the returned entries by calling
333 * mpool_add_chunk.
334 */
mpool_alloc_contiguous(struct mpool * p,size_t count,size_t align)335 void *mpool_alloc_contiguous(struct mpool *p, size_t count, size_t align)
336 {
337 do {
338 void *ret = mpool_alloc_contiguous_no_fallback(p, count, align);
339
340 if (ret != NULL) {
341 return ret;
342 }
343
344 p = p->fallback;
345 } while (p != NULL);
346
347 return NULL;
348 }
349