1 /*
2  * SPDX-License-Identifier: BSD-3-Clause
3  * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
4  * SPDX-FileCopyrightText: Copyright Arm Limited and Contributors.
5  */
6 
7 /* This file is derived from xlat_table_v2 library in TF-A project */
8 
9 #include <arch_features.h>
10 #include <arch_helpers.h>
11 #include <debug.h>
12 #include <errno.h>
13 #include <limits.h>
14 #include <stdbool.h>
15 #include <stdint.h>
16 #include <string.h>
17 #include <utils_def.h>
18 #include <xlat_contexts.h>
19 #include "xlat_defs_private.h"
20 #include <xlat_tables.h>
21 #include "xlat_tables_private.h"
22 
23 /*
24  * Enumeration of actions that can be made when mapping table entries depending
25  * on the previous value in that entry and information about the region being
26  * mapped.
27  */
28 typedef enum {
29 
30 	/* Do nothing */
31 	ACTION_NONE,
32 
33 	/* Write a block (or page, if in level 3) entry. */
34 	ACTION_WRITE_BLOCK_ENTRY,
35 
36 	/*
37 	 * Create a new table and write a table entry pointing to it. Recurse
38 	 * into it for further processing.
39 	 */
40 	ACTION_CREATE_NEW_TABLE,
41 
42 	/*
43 	 * There is a table descriptor in this entry, read it and recurse into
44 	 * that table for further processing.
45 	 */
46 	ACTION_RECURSE_INTO_TABLE,
47 
48 } action_t;
49 
50 /* Returns a pointer to the first empty translation table. */
xlat_table_get_empty(struct xlat_ctx * ctx)51 static inline uint64_t *xlat_table_get_empty(struct xlat_ctx *ctx)
52 {
53 	assert(ctx->tbls->next_table < ctx->tbls->tables_num);
54 	return ctx->tbls->tables[ctx->tbls->next_table++];
55 }
56 
57 /*
58  * Function that returns the index of the first table affected by the VA on
59  * the specified mmap region.
60  */
xlat_tables_find_start_va(struct xlat_mmap_region * mm,const uintptr_t table_base_va,const unsigned int level)61 static uintptr_t xlat_tables_find_start_va(struct xlat_mmap_region *mm,
62 					   const uintptr_t table_base_va,
63 					   const unsigned int level)
64 {
65 	uintptr_t table_idx_va;
66 
67 	if (mm->base_va > table_base_va) {
68 		/* Find the first index of the table affected by the region. */
69 		table_idx_va = mm->base_va & ~XLAT_BLOCK_MASK(level);
70 	} else {
71 		/* Start from the beginning of the table. */
72 		table_idx_va = table_base_va;
73 	}
74 
75 	return table_idx_va;
76 }
77 
78 /*
79  * Function that returns table index for the given VA and level arguments.
80  */
xlat_tables_va_to_index(const uintptr_t table_base_va,const uintptr_t va,const unsigned int level)81 static inline unsigned int  xlat_tables_va_to_index(const uintptr_t table_base_va,
82 						    const uintptr_t va,
83 						    const unsigned int level)
84 {
85 	return (unsigned int)((va - table_base_va) >> XLAT_ADDR_SHIFT(level));
86 }
87 
88 /*
89  * From the given arguments, it decides which action to take when mapping the
90  * specified region.
91  */
xlat_tables_map_region_action(const struct xlat_mmap_region * mm,unsigned int desc_type,uintptr_t dest_pa,uintptr_t table_entry_base_va,unsigned int level)92 static action_t xlat_tables_map_region_action(const struct xlat_mmap_region *mm,
93 			unsigned int desc_type, uintptr_t dest_pa,
94 			uintptr_t table_entry_base_va, unsigned int level)
95 {
96 	uintptr_t mm_end_va = mm->base_va + mm->size - 1UL;
97 	uintptr_t table_entry_end_va =
98 			table_entry_base_va + XLAT_BLOCK_SIZE(level) - 1UL;
99 
100 	/*
101 	 * The descriptor types allowed depend on the current table level.
102 	 */
103 
104 	if ((mm->base_va <= table_entry_base_va) &&
105 	    (mm_end_va >= table_entry_end_va)) {
106 
107 		/*
108 		 * Table entry is covered by region
109 		 * --------------------------------
110 		 *
111 		 * This means that this table entry can describe the whole
112 		 * translation with this granularity in principle.
113 		 */
114 
115 		if (level == 3U) {
116 			/*
117 			 * Last level, only page descriptors are allowed.
118 			 */
119 			if (desc_type == PAGE_DESC) {
120 				/*
121 				 * There's another region mapped here, don't
122 				 * overwrite.
123 				 */
124 				return ACTION_NONE;
125 			} else {
126 				if (desc_type != INVALID_DESC) {
127 					ERROR("%s (%u): Expected invalid descriptor\n",
128 						__func__, __LINE__);
129 					panic();
130 				}
131 				return ACTION_WRITE_BLOCK_ENTRY;
132 			}
133 
134 		} else {
135 
136 			/*
137 			 * Other levels. Table descriptors are allowed. Block
138 			 * descriptors too, but they have some limitations.
139 			 */
140 
141 			if (desc_type == TABLE_DESC) {
142 				/* There's already a table, recurse into it. */
143 				return ACTION_RECURSE_INTO_TABLE;
144 
145 			} else if (desc_type == INVALID_DESC) {
146 				/*
147 				 * There's nothing mapped here, create a new
148 				 * entry.
149 				 *
150 				 * Check if the destination granularity allows
151 				 * us to use a block descriptor or we need a
152 				 * finer table for it.
153 				 *
154 				 * Also, check if the current level allows block
155 				 * descriptors. If not, create a table instead.
156 				 */
157 				if (((dest_pa & XLAT_BLOCK_MASK(level)) != 0U)
158 				    || (level < MIN_LVL_BLOCK_DESC) ||
159 				    (mm->granularity < XLAT_BLOCK_SIZE(level))) {
160 					return ACTION_CREATE_NEW_TABLE;
161 				} else {
162 					return ACTION_WRITE_BLOCK_ENTRY;
163 				}
164 
165 			} else {
166 				/*
167 				 * There's another region mapped here, don't
168 				 * overwrite.
169 				 */
170 				if (desc_type != BLOCK_DESC) {
171 					ERROR("%s (%u): Excpected block descriptor\n",
172 						__func__, __LINE__);
173 					panic();
174 				}
175 
176 				return ACTION_NONE;
177 			}
178 		}
179 
180 	} else if ((mm->base_va <= table_entry_end_va) ||
181 		   (mm_end_va >= table_entry_base_va)) {
182 
183 		/*
184 		 * Region partially covers table entry
185 		 * -----------------------------------
186 		 *
187 		 * This means that this table entry can't describe the whole
188 		 * translation, a finer table is needed.
189 
190 		 * There cannot be partial block overlaps in level 3. If that
191 		 * happens, some of the preliminary checks when adding the
192 		 * mmap region failed to detect that PA and VA must at least be
193 		 * aligned to PAGE_SIZE.
194 		 */
195 		if (level >= 3U) {
196 			ERROR("%s (%u): Expected table level below 3\n",
197 				__func__, __LINE__);
198 			panic();
199 		}
200 
201 		if (desc_type == INVALID_DESC) {
202 			/*
203 			 * The block is not fully covered by the region. Create
204 			 * a new table, recurse into it and try to map the
205 			 * region with finer granularity.
206 			 */
207 			return ACTION_CREATE_NEW_TABLE;
208 
209 		} else {
210 			if (desc_type != TABLE_DESC) {
211 				ERROR("%s (%u): Expected table descriptor\n",
212 					__func__, __LINE__);
213 				panic();
214 			}
215 			/*
216 			 * The block is not fully covered by the region, but
217 			 * there is already a table here. Recurse into it and
218 			 * try to map with finer granularity.
219 			 *
220 			 * PAGE_DESC for level 3 has the same value as
221 			 * TABLE_DESC, but this code can't run on a level 3
222 			 * table because there can't be overlaps in level 3.
223 			 */
224 			return ACTION_RECURSE_INTO_TABLE;
225 		}
226 	} else {
227 
228 		/*
229 		 * This table entry is outside of the region specified in the
230 		 * arguments, don't write anything to it.
231 		 */
232 		return ACTION_NONE;
233 	}
234 }
235 
236 /*
237  * Recursive function that writes to the translation tables and maps the
238  * specified region. On success, it returns the VA of the last byte that was
239  * successfully mapped. On error, it returns the VA of the next entry that
240  * should have been mapped.
241  *
242  * NOTE: This function violates misra-c2012-17.2 due to recursivity.
243  */
xlat_tables_map_region(struct xlat_ctx * ctx,struct xlat_mmap_region * mm,uintptr_t table_base_va,uintptr_t * const table_base,unsigned int table_entries,unsigned int level)244 static uintptr_t xlat_tables_map_region(struct xlat_ctx *ctx,
245 					struct xlat_mmap_region *mm,
246 					uintptr_t table_base_va,
247 					uintptr_t *const table_base,
248 					unsigned int table_entries,
249 					unsigned int level)
250 {
251 	uintptr_t table_idx_va;
252 	unsigned int table_idx;
253 	uintptr_t mm_end_va;
254 	struct xlat_ctx_cfg *ctx_cfg;
255 
256 	assert(mm != NULL);
257 	assert(ctx != NULL);
258 	ctx_cfg = ctx->cfg;
259 
260 	assert(ctx_cfg != NULL);
261 	assert((level >= ctx_cfg->base_level) &&
262 					(level <= XLAT_TABLE_LEVEL_MAX));
263 
264 	mm_end_va = mm->base_va + mm->size - 1U;
265 
266 	if ((level < ctx_cfg->base_level) || (level > XLAT_TABLE_LEVEL_MAX)) {
267 		ERROR("%s (%u): Level out of boundaries (%u)\n",
268 			__func__, __LINE__, level);
269 		panic();
270 	}
271 
272 	table_idx_va = xlat_tables_find_start_va(mm, table_base_va, level);
273 	table_idx = xlat_tables_va_to_index(table_base_va, table_idx_va, level);
274 
275 	while (table_idx < table_entries) {
276 		uintptr_t table_idx_pa;
277 		uint64_t *subtable;
278 		uint64_t desc;
279 
280 		desc = table_base[table_idx];
281 
282 		table_idx_pa = mm->base_pa + table_idx_va - mm->base_va;
283 
284 		action_t action = xlat_tables_map_region_action(mm,
285 			(uint32_t)(desc & DESC_MASK), table_idx_pa,
286 			table_idx_va, level);
287 
288 		if (action == ACTION_WRITE_BLOCK_ENTRY) {
289 
290 			table_base[table_idx] =
291 				xlat_desc(mm->attr, table_idx_pa, level);
292 
293 		} else if (action == ACTION_CREATE_NEW_TABLE) {
294 			uintptr_t end_va;
295 
296 			subtable = xlat_table_get_empty(ctx);
297 			if (subtable == NULL) {
298 				/* Not enough free tables to map this region */
299 				ERROR("%s (%u): Not enough free tables to map region\n",
300 					__func__, __LINE__);
301 				panic();
302 			}
303 
304 			/* Point to new subtable from this one. */
305 			table_base[table_idx] =
306 				TABLE_DESC | (uintptr_t)(void *)subtable;
307 
308 			/* Recurse to write into subtable */
309 			/* FIXME: This violates misra-c2012-17.2 */
310 			end_va = xlat_tables_map_region(ctx, mm, table_idx_va,
311 					       subtable, XLAT_TABLE_ENTRIES,
312 					       level + 1U);
313 			if (end_va !=
314 				(table_idx_va + XLAT_BLOCK_SIZE(level) - 1UL)) {
315 				return end_va;
316 			}
317 
318 		} else if (action == ACTION_RECURSE_INTO_TABLE) {
319 			uintptr_t end_va;
320 
321 			subtable = (uint64_t *)(void *)(desc & TABLE_ADDR_MASK);
322 			/* Recurse to write into subtable */
323 			/* FIXME: This violates misra-c2012-17.2 */
324 			end_va = xlat_tables_map_region(ctx, mm, table_idx_va,
325 					       subtable, XLAT_TABLE_ENTRIES,
326 					       level + 1U);
327 			if (end_va !=
328 				(table_idx_va + XLAT_BLOCK_SIZE(level) - 1UL)) {
329 				return end_va;
330 			}
331 
332 		} else {
333 			if (action != ACTION_NONE) {
334 				ERROR("%s (%u): Unexpected action: %u\n",
335 					__func__, __LINE__, action);
336 				panic();
337 			}
338 		}
339 
340 		table_idx++;
341 		table_idx_va += XLAT_BLOCK_SIZE(level);
342 
343 		/* If reached the end of the region, exit */
344 		if (mm_end_va <= table_idx_va) {
345 			break;
346 		}
347 	}
348 
349 	return table_idx_va - 1U;
350 }
351 
352 /*
353  * Function that verifies that a region can be mapped.
354  * Returns:
355  *        0: Success, the mapping is allowed.
356  *   EINVAL: Invalid values were used as arguments.
357  *   ERANGE: The memory limits were surpassed.
358  *   ENOMEM: There is not enough memory in the mmap array.
359  *    EPERM: Region overlaps another one in an invalid way.
360  * EALREADY: The context configuration is already marked as initialized.
361  */
mmap_add_region_check(const struct xlat_ctx * ctx,const struct xlat_mmap_region * mm)362 static int mmap_add_region_check(const struct xlat_ctx *ctx,
363 				 const struct xlat_mmap_region *mm)
364 {
365 	uintptr_t base_pa = mm->base_pa;
366 	uintptr_t base_va = mm->base_va;
367 	size_t size = mm->size;
368 	size_t granularity = mm->granularity;
369 	uintptr_t end_pa = base_pa + size - 1UL;
370 	uintptr_t end_va = base_va + size - 1UL;
371 	unsigned int index;
372 	struct xlat_ctx_cfg *ctx_cfg = ctx->cfg;
373 
374 	if (!IS_PAGE_ALIGNED(base_pa) || !IS_PAGE_ALIGNED(base_va) ||
375 			!IS_PAGE_ALIGNED(size)) {
376 		return -EFAULT;
377 	}
378 
379 	if ((granularity != XLAT_BLOCK_SIZE(1U)) &&
380 	    (granularity != XLAT_BLOCK_SIZE(2U)) &&
381 	    (granularity != XLAT_BLOCK_SIZE(3U))) {
382 		return -EINVAL;
383 	}
384 
385 	/* Check for overflows */
386 	if ((base_pa > end_pa) || (base_va > end_va)) {
387 		return -ERANGE;
388 	}
389 
390 	/*
391 	 * end_va is calculated as an offset with regards to the base address
392 	 * for the current context, so compare it against max_va_size to ensure
393 	 * we are within the allowed range.
394 	 */
395 	if (end_va > ctx_cfg->max_va_size) {
396 		return -ERANGE;
397 	}
398 
399 	if (end_pa > xlat_arch_get_max_supported_pa()) {
400 		return -ERANGE;
401 	}
402 
403 	/* Check that there is space in the ctx->mmap array */
404 	if (ctx_cfg->mmap[ctx_cfg->mmap_num - 1U].size != 0UL) {
405 		return -ENOMEM;
406 	}
407 
408 	/* Check for PAs and VAs overlaps with all other regions in this context */
409 	index = 0U;
410 	while ((index < ctx_cfg->mmap_num) &&
411 	       (ctx_cfg->mmap[index].size != 0UL)) {
412 		uintptr_t mm_cursor_end_va = ctx_cfg->mmap[index].base_va +
413 					     ctx_cfg->mmap[index].size - 1UL;
414 
415 		unsigned long long mm_cursor_end_pa =
416 				ctx_cfg->mmap[index].base_pa
417 				+ ctx_cfg->mmap[index].size - 1UL;
418 
419 		bool separated_pa = (end_pa < ctx_cfg->mmap[index].base_pa) ||
420 						(base_pa > mm_cursor_end_pa);
421 		bool separated_va = (end_va < ctx_cfg->mmap[index].base_va) ||
422 						(base_va > mm_cursor_end_va);
423 
424 		if (!separated_va || !separated_pa) {
425 			return -EPERM;
426 		}
427 		++index;
428 	}
429 
430 	return 0;
431 }
432 
433 /*
434  * Returns a block/page table descriptor for the given level and attributes.
435  */
xlat_desc(uint64_t attr,uintptr_t addr_pa,unsigned int level)436 uint64_t xlat_desc(uint64_t attr, uintptr_t addr_pa, unsigned int level)
437 {
438 	uint64_t desc;
439 	uint32_t mem_type;
440 	uint32_t shareability_type;
441 
442 	if ((MT_TYPE(attr) == MT_TRANSIENT)) {
443 		/* Transient entry requested. */
444 		desc = 0ULL;
445 		return desc;
446 	}
447 
448 	/* Make sure that the granularity is fine enough to map this address. */
449 	if ((addr_pa & XLAT_BLOCK_MASK(level)) != 0U) {
450 		ERROR("%s (%u): 0x%lx has incorrect granularity\n",
451 			__func__, __LINE__, addr_pa);
452 	}
453 
454 	desc = addr_pa;
455 	/*
456 	 * There are different translation table descriptors for level 3 and the
457 	 * rest.
458 	 */
459 	desc |= (level == XLAT_TABLE_LEVEL_MAX) ? PAGE_DESC : BLOCK_DESC;
460 	/*
461 	 * Always set the access flag, as this library assumes access flag
462 	 * faults aren't managed.
463 	 */
464 	desc |= LOWER_ATTRS(ACCESS_FLAG);
465 
466 	/* Determine the physical address space this region belongs to. */
467 	desc |= xlat_arch_get_pas(attr);
468 
469 	/*
470 	 * Deduce other fields of the descriptor based on the MT_RW memory
471 	 * region attributes.
472 	 */
473 	desc |= ((attr & MT_RW) != 0UL) ? LOWER_ATTRS(AP_RW) : LOWER_ATTRS(AP_RO);
474 
475 	if ((attr & MT_CONT) != 0UL) {
476 		desc |= XLAT_GET_CONT_HINT();
477 	}
478 
479 	if ((attr & MT_NG) != 0UL) {
480 		desc |= XLAT_GET_NG_HINT();
481 	}
482 
483 	/*
484 	 * Mark this area as non-executable for unpriviledged exception levels.
485 	 */
486 	desc |= XLAT_GET_UXN_DESC();
487 
488 	/*
489 	 * Deduce shareability domain and executability of the memory region
490 	 * from the memory type of the attributes (MT_TYPE).
491 	 *
492 	 * Data accesses to device memory and non-cacheable normal memory are
493 	 * coherent for all observers in the system, and correspondingly are
494 	 * always treated as being Outer Shareable. Therefore, for these 2 types
495 	 * of memory, it is not strictly needed to set the shareability field
496 	 * in the translation tables.
497 	 */
498 	mem_type = MT_TYPE(attr);
499 	if (mem_type == MT_DEVICE) {
500 		desc |= LOWER_ATTRS(ATTR_DEVICE_INDEX | OSH);
501 		/*
502 		 * Always map device memory as execute-never.
503 		 * This is to avoid the possibility of a speculative instruction
504 		 * fetch, which could be an issue if this memory region
505 		 * corresponds to a read-sensitive peripheral.
506 		 */
507 		desc |= XLAT_GET_PXN_DESC();
508 
509 	} else { /* Normal memory */
510 		/*
511 		 * Always map read-write normal memory as execute-never.
512 		 * This library assumes that it is used by software that does
513 		 * not self-modify its code, therefore R/W memory is reserved
514 		 * for data storage, which must not be executable.
515 		 *
516 		 * Note that setting the XN bit here is for consistency only.
517 		 * The function that enables the MMU sets the SCTLR_ELx.WXN bit,
518 		 * which makes any writable memory region to be treated as
519 		 * execute-never, regardless of the value of the XN bit in the
520 		 * translation table.
521 		 *
522 		 * For read-only memory, rely on the MT_EXECUTE/MT_EXECUTE_NEVER
523 		 * attribute to figure out the value of the PXN bit.  The actual
524 		 * XN bit(s) to set in the descriptor depends on the context's
525 		 * translation regime and the policy applied in
526 		 * XLAT_GET_PXN_DESC().
527 		 */
528 		if (((attr & MT_RW) != 0UL) || ((attr & MT_EXECUTE_NEVER) != 0UL)) {
529 			desc |= XLAT_GET_PXN_DESC();
530 		}
531 
532 		shareability_type = MT_SHAREABILITY(attr);
533 		if (mem_type == MT_MEMORY) {
534 			desc |= LOWER_ATTRS(ATTR_IWBWA_OWBWA_NTR_INDEX);
535 			if (shareability_type == MT_SHAREABILITY_NSH) {
536 				desc |= LOWER_ATTRS(NSH);
537 			} else if (shareability_type == MT_SHAREABILITY_OSH) {
538 				desc |= LOWER_ATTRS(OSH);
539 			} else {
540 				desc |= LOWER_ATTRS(ISH);
541 			}
542 
543 			/* Check if Branch Target Identification is enabled */
544 			/* TODO: This is needed if BTI is enabled. Double check this code. */
545 			/* Set GP bit for block and page code entries
546 			 * if BTI mechanism is implemented.
547 			 */
548 		} else {
549 			if (mem_type != MT_NON_CACHEABLE) {
550 				/* Only non cacheable memory at this point */
551 				panic();
552 			}
553 			desc |= LOWER_ATTRS(ATTR_NON_CACHEABLE_INDEX | OSH);
554 		}
555 	}
556 
557 	return desc;
558 }
559 
560 /*****************************************************************************
561  * Public part of the core translation library.
562  ****************************************************************************/
563 
564 /*
565  * Add a memory region with defined base PA and base VA. This function can only
566  * be used before marking the xlat_ctx_cfg for the current xlat_ctx as
567  * initialized.
568  *
569  * The region cannot be removed once added.
570  *
571  * This function returns 0 on success or an error code otherwise.
572  */
xlat_mmap_add_region_ctx(struct xlat_ctx * ctx,struct xlat_mmap_region * mm)573 int xlat_mmap_add_region_ctx(struct xlat_ctx *ctx,
574 			     struct xlat_mmap_region *mm)
575 {
576 	unsigned int mm_last_idx = 0U;
577 	unsigned int mm_cursor_idx = 0U;
578 	uintptr_t end_pa;
579 	uintptr_t end_va;
580 	struct xlat_ctx_cfg *ctx_cfg;
581 	struct xlat_ctx_tbls *ctx_tbls;
582 	int ret;
583 
584 	if (ctx == NULL) {
585 		return -EINVAL;
586 	}
587 
588 	ctx_cfg = ctx->cfg;
589 	ctx_tbls = ctx->tbls;
590 
591 	if (ctx_cfg == NULL || ctx_tbls == NULL) {
592 		return -EINVAL;
593 	}
594 
595 	if (mm == NULL) {
596 		return -EINVAL;
597 	}
598 
599 	/* The context data cannot be initialized */
600 	if (xlat_ctx_cfg_initialized(ctx) == true) {
601 		return -EINVAL;
602 	}
603 
604 	/* Memory regions must be added before initializing the xlat tables. */
605 	assert(ctx_tbls->initialized == false);
606 
607 	/* Ignore empty regions */
608 	if (mm->size == 0UL) {
609 		return 0;
610 	}
611 
612 	if (ctx_cfg->region == VA_LOW_REGION) {
613 		/*
614 		 * Initialize the base_va for the current context if not
615 		 * initialized yet.
616 		 *
617 		 * For the low region, the architecture mandates that
618 		 * base_va has to be 0.
619 		 *
620 		 * Overwriting this field should not be a problem as its value
621 		 * is expected to be always the same.
622 		 */
623 		ctx_cfg->base_va = 0ULL;
624 
625 		if ((mm->base_va & HIGH_REGION_MASK) ||
626 		     ((mm->base_va + mm->size) & HIGH_REGION_MASK)) {
627 			ERROR("%s (%u): Base VA and address space do not match: ",
628 							__func__, __LINE__);
629 			ERROR("Base va = 0x%lx, Address space = Low region\n",
630 				mm->base_va);
631 			return -EINVAL;
632 		}
633 	} else {
634 		/*
635 		 * Initialize the base_va for the current context if not
636 		 * initialized yet.
637 		 *
638 		 * For the high region, the architecture mandates that
639 		 * base_va has to be 0xFFFF-FFFF-FFFF-FFFF minus the VA space
640 		 * size plus one.
641 		 *
642 		 * Overwriting this field should not be a problem as its value
643 		 * is expected to be always the same.
644 		 */
645 		ctx_cfg->base_va = (ULONG_MAX - ctx_cfg->max_va_size + 1ULL);
646 
647 		if (mm->base_va < ctx_cfg->base_va) {
648 			ERROR("%s (%u): Base VA is not aligned with the high region start: ",
649 							__func__, __LINE__);
650 			ERROR("Base VA = 0x%lx, high region start VA = 0x%lx\n",
651 				mm->base_va, ctx_cfg->base_va);
652 			return -EINVAL;
653 		}
654 
655 		/*
656 		 * If this context is handling the high half region of the VA,
657 		 * adjust the start address of this area by substracting the
658 		 * start address of the region as the table entries are
659 		 * relative to the latter. Once ttbr1_el2 is configured, the
660 		 * MMU will translate the addresses properly.
661 		 */
662 		mm->base_va -= ctx_cfg->base_va;
663 	}
664 
665 	end_pa = mm->base_pa + mm->size - 1UL;
666 	end_va = mm->base_va + mm->size - 1UL;
667 
668 	ret = mmap_add_region_check(ctx, mm);
669 	if (ret != 0) {
670 		ERROR("%s (%u): mmap_add_region_check() failed. error %d\n",
671 					__func__, __LINE__, ret);
672 		return ret;
673 	}
674 
675 	/*
676 	 * Find correct place in mmap to insert new region.
677 	 * Overlapping is not allowed.
678 	 */
679 	while (((ctx_cfg->mmap[mm_cursor_idx].base_va) < mm->base_va)
680 	       && (ctx_cfg->mmap[mm_cursor_idx].size != 0UL)
681 	       && (mm_cursor_idx < ctx_cfg->mmap_num)) {
682 		++mm_cursor_idx;
683 	}
684 
685 	/*
686 	 * Find the last entry marker in the mmap
687 	 */
688 	while ((mm_last_idx < ctx_cfg->mmap_num) &&
689 	       (ctx_cfg->mmap[mm_last_idx].size != 0UL)) {
690 		++mm_last_idx;
691 	}
692 
693 	/*
694 	 * Check if we have enough space in the memory mapping table.
695 	 * This shouldn't happen as we have checked in mmap_add_region_check
696 	 * that there is free space.
697 	 */
698 	assert(ctx_cfg->mmap[mm_last_idx].size == 0UL);
699 
700 	/*
701 	 * Make room for new region by moving other regions up by one place.
702 	 */
703 	(void)memmove((void *)(&ctx_cfg->mmap[mm_cursor_idx + 1U]),
704 		      (void *)(&ctx_cfg->mmap[mm_cursor_idx]),
705 		      sizeof(struct xlat_mmap_region) *
706 						(mm_last_idx - mm_cursor_idx));
707 
708 	/* Store the memory mapping information into the context. */
709 	(void)memcpy((void *)(&ctx_cfg->mmap[mm_cursor_idx]), (void *)mm,
710 						sizeof(struct xlat_mmap_region));
711 
712 	if (end_pa > ctx_cfg->max_mapped_pa) {
713 		ctx_cfg->max_mapped_pa = end_pa;
714 	}
715 
716 	if (end_va > ctx_cfg->max_mapped_va_offset) {
717 		ctx_cfg->max_mapped_va_offset = end_va;
718 	}
719 
720 	return 0;
721 }
722 
723 /*
724  * Add an array of memory regions with defined base PA and base VA.
725  * This function needs to be called before initialiting the xlat_ctx_cfg.
726  * Setting the `last` argument to true will initialise the xlat_ctx_cfg.
727  *
728  * The regions cannot be removed once added.
729  *
730  * Return 0 on success or a negative error code otherwise.
731  */
xlat_mmap_add_ctx(struct xlat_ctx * ctx,struct xlat_mmap_region * mm,bool last)732 int xlat_mmap_add_ctx(struct xlat_ctx *ctx,
733 		      struct xlat_mmap_region *mm,
734 		      bool last)
735 {
736 	if ((ctx == NULL) || (mm == NULL)) {
737 		return -EINVAL;
738 	}
739 
740 	struct xlat_mmap_region *mm_cursor = mm;
741 
742 	while (mm_cursor->size != 0UL) {
743 		int retval;
744 
745 		retval = xlat_mmap_add_region_ctx(ctx, mm_cursor);
746 		if (retval != 0) {
747 			/*
748 			 * In case of error, stop an return.
749 			 * Note, the context might be in an invalid
750 			 * state and it will need to be restarted.
751 			 */
752 			return retval;
753 		}
754 		mm_cursor++;
755 	}
756 
757 	if (last) {
758 		/*
759 		 * Mark the configuration part of the context as initialized.
760 		 * From this point on, no more memory mapping areas can be
761 		 * added to this context (or any other sharing the same
762 		 * configuration).
763 		 */
764 		ctx->cfg->initialized = true;
765 		flush_dcache_range((uintptr_t)(void *)ctx->cfg,
766 				   sizeof(struct xlat_ctx_cfg));
767 
768 	}
769 
770 #if LOG_LEVEL >= LOG_LEVEL_VERBOSE
771 	VERBOSE("Runtime mapings");
772 	if (ctx->cfg->region == VA_LOW_REGION) {
773 		VERBOSE("(Low Region):\n");
774 	} else {
775 		VERBOSE("(High Region):\n");
776 	}
777 
778 	for (unsigned int i = 0U; i < ctx->cfg->mmap_num; i++) {
779 		VERBOSE("\tRegion: 0x%lx - 0x%lx has attributes 0x%lx\n",
780 			ctx->cfg->mmap[i].base_va,
781 			ctx->cfg->mmap[i].base_va + ctx->cfg->mmap[i].size - 1U,
782 			ctx->cfg->mmap[i].attr);
783 	}
784 #endif /* LOG_LEVEL_VERBOSE */
785 
786 	return 0;
787 }
788 
789 /*
790  * Initialize translation tables (and mark xlat_ctx_cfg as initialized if
791  * not already initialized) associated to the current context.
792  *
793  * The struct xlat_ctx_cfg of the context might be shared with other
794  * contexts that might have already initialized it. This is expected and
795  * should not cause any problem.
796  *
797  * This function assumes that the xlat_ctx_cfg field of the context has been
798  * properly configured by previous calls to xlat_mmap_add_region_ctx().
799  *
800  * This function returns 0 on success or an error code otherwise.
801  */
xlat_init_tables_ctx(struct xlat_ctx * ctx)802 int xlat_init_tables_ctx(struct xlat_ctx *ctx)
803 {
804 	struct xlat_ctx_cfg *ctx_cfg;
805 	struct xlat_ctx_tbls *ctx_tbls;
806 	unsigned int index;
807 
808 	if (ctx == NULL) {
809 		return -EINVAL;
810 	}
811 
812 	ctx_cfg = ctx->cfg;
813 	ctx_tbls = ctx->tbls;
814 
815 	if (ctx_cfg == NULL || ctx_tbls == NULL) {
816 		return -EINVAL;
817 	}
818 
819 	if (xlat_ctx_tbls_initialized(ctx)) {
820 		VERBOSE("%s (%u): Translation tables already initialized\n",
821 					__func__, __LINE__);
822 		return -EALREADY;
823 	}
824 
825 	if (!xlat_ctx_cfg_initialized(ctx)) {
826 		VERBOSE("%s (%u): Translation context configuration not initialized\n",
827 					__func__, __LINE__);
828 		return -EINVAL;
829 	}
830 
831 	if (is_mmu_enabled() == true) {
832 		ERROR("%s (%u): MMU is already enabled\n", __func__, __LINE__);
833 		return -EINVAL;
834 	}
835 
836 	xlat_mmap_print(ctx);
837 
838 	/*
839 	 * All tables must be zeroed/initialized before mapping any region
840 	 * as they are allocated outside the .bss area.
841 	 */
842 	for (unsigned int i = 0U; i < XLAT_TABLE_ENTRIES; i++) {
843 		ctx_tbls->base_table[i] = INVALID_DESC;
844 	}
845 
846 	for (unsigned int j = 0; j < ctx_tbls->tables_num; j++) {
847 		for (unsigned int i = 0U; i < XLAT_TABLE_ENTRIES; i++) {
848 			ctx_tbls->tables[j][i] = INVALID_DESC;
849 		}
850 	}
851 
852 	index = 0U;
853 	while ((index < ctx_cfg->mmap_num) &&
854 	       (ctx_cfg->mmap[index].size != 0UL)) {
855 		uintptr_t end_va = xlat_tables_map_region(ctx,
856 						&ctx_cfg->mmap[index],
857 						0U,
858 						ctx_tbls->base_table,
859 						ctx_tbls->max_base_table_entries,
860 						ctx_cfg->base_level);
861 		if (end_va != (ctx_cfg->mmap[index].base_va +
862 					ctx_cfg->mmap[index].size - 1UL)) {
863 			ERROR("%s (%u): Not enough memory to map region: "
864 			      " VA:0x%lx  PA:0x%lx  size:0x%zx  attr:0x%lx\n",
865 			      __func__, __LINE__, ctx_cfg->mmap[index].base_va,
866 						  ctx_cfg->mmap[index].base_pa,
867 						  ctx_cfg->mmap[index].size,
868 						  ctx_cfg->mmap[index].attr);
869 			return -ENOMEM;
870 		}
871 
872 		++index;
873 	}
874 
875 	/* Flush the cache as a good measure */
876 	flush_dcache_range((uintptr_t)(void *)ctx_tbls->base_table,
877 			 sizeof(uint64_t) * XLAT_TABLE_ENTRIES);
878 	flush_dcache_range((uintptr_t)(void *)ctx_tbls->tables,
879 			 sizeof(uint64_t) * (unsigned long)ctx_tbls->tables_num
880 						* XLAT_TABLE_ENTRIES);
881 
882 	ctx_tbls->initialized = true;
883 
884 	flush_dcache_range((uintptr_t)(void *)ctx_tbls,
885 			   sizeof(struct xlat_ctx_tbls));
886 	flush_dcache_range((uintptr_t)(void *)ctx, sizeof(struct xlat_ctx));
887 
888 	xlat_tables_print(ctx);
889 
890 	return 0;
891 }
892 
893 /*
894  * Initialize a context dynamically at runtime using the given xlat_ctx_cfg
895  * and xlat_ctx_tbls structures.
896  *
897  * Return 0 if success or a Posix erro code otherwise.
898  */
xlat_ctx_create_dynamic(struct xlat_ctx * ctx,struct xlat_ctx_cfg * cfg,struct xlat_ctx_tbls * tbls,void * base_tables,unsigned int base_level_entries,void * tables_ptr,unsigned int ntables)899 int xlat_ctx_create_dynamic(struct xlat_ctx *ctx,
900 			    struct xlat_ctx_cfg *cfg,
901 			    struct xlat_ctx_tbls *tbls,
902 			    void *base_tables,
903 			    unsigned int base_level_entries,
904 			    void *tables_ptr,
905 			    unsigned int ntables)
906 {
907 	if (ctx == NULL) {
908 		return -EINVAL;
909 	}
910 
911 	if (XLAT_TABLES_CTX_CFG_VALID(ctx) &&
912 	    XLAT_TABLES_CTX_TBL_VALID(ctx)) {
913 		return -EALREADY;
914 	}
915 
916 	/* Add the configuration to the context */
917 	XLAT_SETUP_CTX_CFG(ctx, cfg);
918 
919 	/* Initialize the tables structure */
920 	XLAT_INIT_CTX_TBLS(tbls, tables_ptr, ntables,
921 			   base_tables, base_level_entries);
922 
923 	/* Add the tables to the context */
924 	XLAT_SETUP_CTX_TBLS(ctx, tbls);
925 
926 	return 0;
927 }
928 
929 /*
930  * Returns true if the context is already initialized and false otherwise.
931  * This function only takes into account whether xlat_ctx_cfg is initialized.
932  */
xlat_ctx_cfg_initialized(const struct xlat_ctx * const ctx)933 bool xlat_ctx_cfg_initialized(const struct xlat_ctx * const ctx)
934 {
935 	assert(ctx != NULL);
936 	assert(ctx->cfg != NULL);
937 	return ctx->cfg->initialized;
938 }
939 
940 /*
941  * Returns true if the translation tables on the current context are already
942  * initialized or false otherwise.
943  */
xlat_ctx_tbls_initialized(const struct xlat_ctx * const ctx)944 bool xlat_ctx_tbls_initialized(const struct xlat_ctx * const ctx)
945 {
946 	assert(ctx != NULL);
947 	assert(ctx->tbls != NULL);
948 	return ctx->tbls->initialized;
949 }
950