1 /*
2  * SPDX-License-Identifier: Apache-2.0
3  * SPDX-FileCopyrightText: Copyright The Mbed TLS Contributors
4  * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
5  *
6  * Licensed under the Apache License, Version 2.0 (the "License"); you may
7  * not use this file except in compliance with the License.
8  * You may obtain a copy of the License at
9  *
10  * http://www.apache.org/licenses/LICENSE-2.0
11  *
12  * Unless required by applicable law or agreed to in writing, software
13  * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14  * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15  * See the License for the specific language governing permissions and
16  * limitations under the License.
17  */
18 
19 #include <arch_helpers.h>
20 #include <assert.h>
21 #include <cpuid.h>
22 #include <debug.h>
23 #include <errno.h>
24 #include <mbedtls/memory_buffer_alloc.h>
25 #include <mbedtls/platform.h>
26 #include <memory_alloc.h>
27 #include <sizes.h>
28 #include <string.h>
29 
30 #define MAGIC1		UL(0xFF00AA55)
31 #define MAGIC2		UL(0xEE119966)
32 #define MAX_BT		20
33 
34 #if defined(MBEDTLS_MEMORY_DEBUG)
35 #error MBEDTLS_MEMORY_DEBUG is not supported by this allocator.
36 #endif
37 
38 #if defined(MBEDTLS_MEMORY_BUFFER_ALLOC_C)
39 /*
40  * If MBEDTLS_MEMORY_BUFFER_ALLOC_C is defined then the allocator from mbedTLS
41  * is going to be used, which is not desired.
42  */
43 #error MBEDTLS_MEMORY_BUFFER_ALLOC_C is defined
44 #endif /* MBEDTLS_MEMORY_BUFFER_ALLOC_C */
45 
46 #if defined(MBEDTLS_MEMORY_BACKTRACE)
47 #error MBEDTLS_MEMORY_BACKTRACE is not supported by this allocator.
48 #endif /* MBEDTLS_MEMORY_BACKTRACE */
49 
50 #if defined(MBEDTLS_THREADING_C)
51 /*
52  * This allocator doesn't support multithreading. On the other hand it
53  * handles multiple heaps
54  */
55 #error MBEDTLS_THREADING_C is not supported by this allocator.
56 #endif /* MBEDTLS_THREADING_C */
57 
58 #if defined(MBEDTLS_SELF_TEST)
59 #error MBEDTLS_SELF_TEST is not supported by this allocator.
60 #endif /* MBEDTLS_SELF_TEST */
61 
62 /* Array of heaps per CPU */
63 static struct buffer_alloc_ctx *ctx_per_cpu[MAX_CPUS];
64 
get_heap_ctx(void)65 static inline struct buffer_alloc_ctx *get_heap_ctx(void)
66 {
67 	struct buffer_alloc_ctx *ctx;
68 	unsigned int cpu_id = my_cpuid();
69 
70 	assert(cpu_id < MAX_CPUS);
71 
72 	ctx = ctx_per_cpu[cpu_id];
73 	/* Programming error if heap is not assigned */
74 	if (ctx == NULL) {
75 		ERROR(" No heap assigned to this CPU %u\n", cpu_id);
76 		panic();
77 	}
78 
79 	return ctx;
80 }
81 
verify_header(struct memory_header_s * hdr)82 static int verify_header(struct memory_header_s *hdr)
83 {
84 	if (hdr->magic1 != MAGIC1) {
85 		return 1;
86 	}
87 
88 	if (hdr->magic2 != MAGIC2) {
89 		return 1;
90 	}
91 
92 	if (hdr->alloc > 1UL) {
93 		return 1;
94 	}
95 
96 	if (hdr->prev != NULL && hdr->prev == hdr->next) {
97 		return 1;
98 	}
99 
100 	if (hdr->prev_free != NULL && hdr->prev_free == hdr->next_free)	{
101 		return 1;
102 	}
103 
104 	return 0;
105 }
106 
verify_chain(struct buffer_alloc_ctx * heap)107 static int verify_chain(struct buffer_alloc_ctx *heap)
108 {
109 	struct memory_header_s *prv = heap->first;
110 	struct memory_header_s *cur;
111 
112 	if (prv == NULL || verify_header(prv) != 0) {
113 		return 1;
114 	}
115 
116 	if (heap->first->prev != NULL) {
117 		return 1;
118 	}
119 
120 	cur = heap->first->next;
121 
122 	while (cur != NULL) {
123 		if (verify_header(cur) != 0) {
124 			return 1;
125 		}
126 
127 		if (cur->prev != prv) {
128 			return 1;
129 		}
130 
131 		prv = cur;
132 		cur = cur->next;
133 	}
134 
135 	return 0;
136 }
137 
buffer_alloc_calloc_with_heap(struct buffer_alloc_ctx * heap,size_t n,size_t size)138 static void *buffer_alloc_calloc_with_heap(struct buffer_alloc_ctx *heap,
139 					   size_t n,
140 					   size_t size)
141 {
142 	struct memory_header_s *new;
143 	struct memory_header_s *cur = heap->first_free;
144 	unsigned char *p;
145 	void *ret;
146 	size_t original_len, len;
147 
148 	if (heap->buf == NULL || heap->first == NULL) {
149 		return NULL;
150 	}
151 
152 	original_len = len = n * size;
153 
154 	if (n == 0UL || size == 0UL || len / n != size) {
155 		return NULL;
156 	} else if (len > (size_t)-MBEDTLS_MEMORY_ALIGN_MULTIPLE) {
157 		return NULL;
158 	}
159 
160 	if ((len % MBEDTLS_MEMORY_ALIGN_MULTIPLE) != 0) {
161 		len -= len % MBEDTLS_MEMORY_ALIGN_MULTIPLE;
162 		len += MBEDTLS_MEMORY_ALIGN_MULTIPLE;
163 	}
164 
165 	/* Find block that fits */
166 	while (cur != NULL) {
167 		if (cur->size >= len) {
168 			break;
169 		}
170 		cur = cur->next_free;
171 	}
172 
173 	if (cur == NULL) {
174 		return NULL;
175 	}
176 
177 	if (cur->alloc != 0UL) {
178 		assert(false);
179 	}
180 
181 	/* Found location, split block if > memory_header + 4 room left */
182 	if ((cur->size - len) <
183 	    (sizeof(struct memory_header_s) + MBEDTLS_MEMORY_ALIGN_MULTIPLE)) {
184 		cur->alloc = 1UL;
185 
186 		/* Remove from free_list */
187 		if (cur->prev_free != NULL) {
188 			cur->prev_free->next_free = cur->next_free;
189 		} else {
190 			heap->first_free = cur->next_free;
191 		}
192 
193 		if (cur->next_free != NULL) {
194 			cur->next_free->prev_free = cur->prev_free;
195 		}
196 
197 		cur->prev_free = NULL;
198 		cur->next_free = NULL;
199 
200 		if (heap->verify & MBEDTLS_MEMORY_VERIFY_ALLOC) {
201 			assert(verify_chain(heap) == 0);
202 		}
203 
204 		ret = (unsigned char *) cur + sizeof(struct memory_header_s);
205 		memset(ret, 0, original_len);
206 
207 		return ret;
208 	}
209 
210 	p = ((unsigned char *) cur) + sizeof(struct memory_header_s) + len;
211 	new = (struct memory_header_s *) p;
212 
213 	new->size = cur->size - len - sizeof(struct memory_header_s);
214 	new->alloc = 0;
215 	new->prev = cur;
216 	new->next = cur->next;
217 	new->magic1 = MAGIC1;
218 	new->magic2 = MAGIC2;
219 
220 	if (new->next != NULL) {
221 		new->next->prev = new;
222 	}
223 
224 	/* Replace cur with new in free_list */
225 	new->prev_free = cur->prev_free;
226 	new->next_free = cur->next_free;
227 	if (new->prev_free != NULL) {
228 		new->prev_free->next_free = new;
229 	} else {
230 		heap->first_free = new;
231 	}
232 
233 	if (new->next_free != NULL) {
234 		new->next_free->prev_free = new;
235 	}
236 
237 	cur->alloc = 1;
238 	cur->size = len;
239 	cur->next = new;
240 	cur->prev_free = NULL;
241 	cur->next_free = NULL;
242 
243 	if ((heap->verify & MBEDTLS_MEMORY_VERIFY_ALLOC) != 0) {
244 		assert(verify_chain(heap) == 0);
245 	}
246 
247 	ret = (unsigned char *) cur + sizeof(struct memory_header_s);
248 	memset(ret, 0, original_len);
249 
250 	return ret;
251 }
252 
buffer_alloc_calloc(size_t n,size_t size)253 void *buffer_alloc_calloc(size_t n, size_t size)
254 {
255 	struct buffer_alloc_ctx *heap = get_heap_ctx();
256 
257 	assert(heap);
258 	return buffer_alloc_calloc_with_heap(heap, n, size);
259 }
260 
buffer_alloc_free_with_heap(struct buffer_alloc_ctx * heap,void * ptr)261 static void buffer_alloc_free_with_heap(struct buffer_alloc_ctx *heap,
262 					void *ptr)
263 {
264 	struct memory_header_s *hdr;
265 	struct memory_header_s *old = NULL;
266 	unsigned char *p = (unsigned char *) ptr;
267 
268 	if (ptr == NULL || heap->buf == NULL || heap->first == NULL) {
269 		return;
270 	}
271 
272 	if (p < heap->buf || p >= heap->buf + heap->len) {
273 		assert(0);
274 	}
275 
276 	p -= sizeof(struct memory_header_s);
277 	hdr = (struct memory_header_s *) p;
278 
279 	assert(verify_header(hdr) == 0);
280 
281 	if (hdr->alloc != 1) {
282 		assert(0);
283 	}
284 
285 	hdr->alloc = 0;
286 
287 	/* Regroup with block before */
288 	if (hdr->prev != NULL && hdr->prev->alloc == 0UL) {
289 		hdr->prev->size += sizeof(struct memory_header_s) + hdr->size;
290 		hdr->prev->next = hdr->next;
291 		old = hdr;
292 		hdr = hdr->prev;
293 
294 		if (hdr->next != NULL) {
295 			hdr->next->prev = hdr;
296 		}
297 
298 		memset(old, 0, sizeof(struct memory_header_s));
299 	}
300 
301 	/* Regroup with block after */
302 	if (hdr->next != NULL && hdr->next->alloc == 0UL) {
303 		hdr->size += sizeof(struct memory_header_s) + hdr->next->size;
304 		old = hdr->next;
305 		hdr->next = hdr->next->next;
306 
307 		if (hdr->prev_free != NULL || hdr->next_free != NULL) {
308 			if (hdr->prev_free != NULL) {
309 				hdr->prev_free->next_free = hdr->next_free;
310 			} else {
311 				heap->first_free = hdr->next_free;
312 			}
313 			if (hdr->next_free != NULL) {
314 				hdr->next_free->prev_free = hdr->prev_free;
315 			}
316 		}
317 
318 		hdr->prev_free = old->prev_free;
319 		hdr->next_free = old->next_free;
320 
321 		if (hdr->prev_free != NULL) {
322 			hdr->prev_free->next_free = hdr;
323 		} else {
324 			heap->first_free = hdr;
325 		}
326 
327 		if (hdr->next_free != NULL) {
328 			hdr->next_free->prev_free = hdr;
329 		}
330 
331 		if (hdr->next != NULL) {
332 			hdr->next->prev = hdr;
333 		}
334 
335 		memset(old, 0, sizeof(struct memory_header_s));
336 	}
337 
338 	/*
339 	 * Prepend to free_list if we have not merged
340 	 * (Does not have to stay in same order as prev / next list)
341 	 */
342 	if (old == NULL) {
343 		hdr->next_free = heap->first_free;
344 		if (heap->first_free != NULL) {
345 			heap->first_free->prev_free = hdr;
346 		}
347 		heap->first_free = hdr;
348 	}
349 
350 	if (heap->verify & MBEDTLS_MEMORY_VERIFY_FREE) {
351 		assert(verify_chain(heap));
352 	}
353 }
354 
buffer_alloc_free(void * ptr)355 void buffer_alloc_free(void *ptr)
356 {
357 	struct buffer_alloc_ctx *heap = get_heap_ctx();
358 
359 	assert(heap);
360 	buffer_alloc_free_with_heap(heap, ptr);
361 }
362 
buffer_alloc_ctx_assign(struct buffer_alloc_ctx * ctx)363 int buffer_alloc_ctx_assign(struct buffer_alloc_ctx *ctx)
364 {
365 	unsigned int cpuid = my_cpuid();
366 
367 	assert(cpuid < MAX_CPUS);
368 
369 	if (ctx == NULL) {
370 		return -EINVAL;
371 	}
372 
373 	if (ctx_per_cpu[cpuid] != NULL) {
374 		/* multiple assign */
375 		return -EINVAL;
376 	}
377 
378 	ctx_per_cpu[cpuid] = ctx;
379 
380 	return 0;
381 }
382 
buffer_alloc_ctx_unassign(void)383 void buffer_alloc_ctx_unassign(void)
384 {
385 	unsigned int cpuid = my_cpuid();
386 
387 	assert(cpuid < MAX_CPUS);
388 
389 	/* multiple unassign */
390 	assert(ctx_per_cpu[cpuid] != NULL);
391 
392 	ctx_per_cpu[cpuid] = NULL;
393 }
394 
mbedtls_memory_buffer_set_verify(int verify)395 void mbedtls_memory_buffer_set_verify(int verify)
396 {
397 	struct buffer_alloc_ctx *heap = get_heap_ctx();
398 
399 	/* this seems to be dead code */
400 	assert(false);
401 
402 	assert(heap);
403 	heap->verify = verify;
404 }
405 
mbedtls_memory_buffer_alloc_verify(void)406 int mbedtls_memory_buffer_alloc_verify(void)
407 {
408 	struct buffer_alloc_ctx *heap = get_heap_ctx();
409 
410 	assert(heap);
411 	return verify_chain(heap);
412 }
413 
mbedtls_memory_buffer_alloc_init(unsigned char * buf,size_t len)414 void mbedtls_memory_buffer_alloc_init(unsigned char *buf, size_t len)
415 {
416 	/* The heap structure is obtained from the REC
417 	 * while the buffer is passed in the init function.
418 	 * This way the interface can remain the same.
419 	 */
420 	struct buffer_alloc_ctx *heap = get_heap_ctx();
421 
422 	assert(heap);
423 
424 	memset(heap, 0, sizeof(struct buffer_alloc_ctx));
425 
426 	if (len < sizeof(struct memory_header_s) +
427 	    MBEDTLS_MEMORY_ALIGN_MULTIPLE) {
428 		return;
429 	} else if ((size_t)buf % MBEDTLS_MEMORY_ALIGN_MULTIPLE)	{
430 		/* Adjust len first since buf is used in the computation */
431 		len -= MBEDTLS_MEMORY_ALIGN_MULTIPLE
432 			- ((size_t)buf % MBEDTLS_MEMORY_ALIGN_MULTIPLE);
433 		buf += MBEDTLS_MEMORY_ALIGN_MULTIPLE
434 			- ((size_t)buf % MBEDTLS_MEMORY_ALIGN_MULTIPLE);
435 	}
436 
437 	memset(buf, 0, len);
438 
439 	heap->buf = buf;
440 	heap->len = len;
441 
442 	heap->first = (struct memory_header_s *)buf;
443 	heap->first->size = len - sizeof(struct memory_header_s);
444 	heap->first->magic1 = MAGIC1;
445 	heap->first->magic2 = MAGIC2;
446 	heap->first_free = heap->first;
447 }
448 
mbedtls_memory_buffer_alloc_free(void)449 void mbedtls_memory_buffer_alloc_free(void)
450 {
451 	struct buffer_alloc_ctx *heap = get_heap_ctx();
452 
453 	assert(heap);
454 	memset(heap, 0, sizeof(struct buffer_alloc_ctx));
455 }
456