1 /*
2  * SPDX-License-Identifier: BSD-3-Clause
3  * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
4  * SPDX-FileCopyrightText: Copyright Arm Limited and Contributors.
5  */
6 
7 /* This file is derived from xlat_table_v2 library in TF-A project */
8 
9 #ifndef XLAT_TABLES_H
10 #define XLAT_TABLES_H
11 
12 #ifndef __ASSEMBLER__
13 
14 #include <memory.h>
15 #include <stddef.h>
16 #include <stdint.h>
17 
18 #endif
19 
20 #include <xlat_contexts.h>
21 #include <xlat_defs.h>
22 
23 #ifndef __ASSEMBLER__
24 
25 /*
26  * Default granularity size for a struct xlat_mmap_region.
27  * Useful when no specific granularity is required.
28  *
29  * By default, choose the biggest possible block size allowed by the
30  * architectural state and granule size in order to minimize the number of page
31  * tables required for the mapping.
32  */
33 #define REGION_DEFAULT_GRANULARITY	XLAT_BLOCK_SIZE(MIN_LVL_BLOCK_DESC)
34 
35 /*
36  * Helper macro to define a struct xlat_mmap_region. This macro allows to
37  * specify all the fields of the structure but its parameter list is not
38  * guaranteed to remain stable as we add members to struct xlat_mmap_region.
39  */
40 #define MAP_REGION_FULL_SPEC(_pa, _va, _sz, _attr, _gr)		\
41 	{							\
42 		.base_pa = (_pa),				\
43 		.base_va = (_va),				\
44 		.size = (_sz),					\
45 		.attr = (_attr),				\
46 		.granularity = (_gr),				\
47 	}
48 
49 /* Helper macro to define anstruct xlat_mmap_region. */
50 #define MAP_REGION(_pa, _va, _sz, _attr)	\
51 	MAP_REGION_FULL_SPEC(_pa, _va, _sz, _attr, REGION_DEFAULT_GRANULARITY)
52 
53 /* Helper macro to define anstruct xlat_mmap_region with an identity mapping. */
54 #define MAP_REGION_FLAT(_adr, _sz, _attr)			\
55 	MAP_REGION(_adr, _adr, _sz, _attr)
56 
57 /*
58  * Helper macro to define an mmap_region_t to map with the desired granularity
59  * of translation tables but with invalid page descriptors.
60  *
61  * The granularity value passed to this macro must be a valid block or page
62  * size. When using a 4KB translation granule, this might be 4KB, 2MB or 1GB.
63  * Passing REGION_DEFAULT_GRANULARITY is also allowed and means that the library
64  * is free to choose the granularity for this region.
65  *
66  * This macro can be used to define transient regions where memory used to
67  * reserve VA can be assigned to a PA dynamically. These VA will fault if it
68  * is accessed before a valid PA is assigned to it.
69  */
70 
71 #define MAP_REGION_TRANSIENT(_va, _sz, _gr)			\
72 	MAP_REGION_FULL_SPEC(ULL(0), _va, _sz, MT_TRANSIENT, _gr)
73 
74 /* Definition of an invalid descriptor */
75 #define INVALID_DESC		UL(0x0)
76 
77 /*
78  * Shifts and masks to access fields of an mmap attribute
79  */
80 #define MT_TYPE_SHIFT		UL(0)
81 #define MT_TYPE_WIDTH		UL(4)
82 #define MT_TYPE_MASK		MASK(MT_TYPE)
83 #define MT_TYPE(_attr)		((_attr) & MT_TYPE_MASK)
84 /* Access permissions (RO/RW) */
85 #define MT_PERM_SHIFT		(MT_TYPE_SHIFT + MT_TYPE_WIDTH)
86 /* Access permissions for instruction execution (EXECUTE/EXECUTE_NEVER) */
87 #define MT_EXECUTE_FLAG_SHIFT	(MT_PERM_SHIFT + 1UL)
88 
89 /* Contiguos descriptor flag */
90 #define MT_CONT_SHIFT		(MT_EXECUTE_FLAG_SHIFT + 1UL)
91 
92 /* NG Flag */
93 #define MT_NG_SHIFT		(MT_CONT_SHIFT + 1UL)
94 
95 /* Shareability attribute for the memory region */
96 #define MT_SHAREABILITY_SHIFT	(MT_NG_SHIFT + 1UL)
97 #define MT_SHAREABILITY_WIDTH	UL(2)
98 #define MT_SHAREABILITY_MASK	MASK(MT_SHAREABILITY)
99 #define MT_SHAREABILITY(_attr)	((_attr) & MT_SHAREABILITY_MASK)
100 
101 /* Physical address space (REALM/NS, as ROOT/SECURE do not apply to R-EL2) */
102 #define MT_PAS_SHIFT		(MT_SHAREABILITY_SHIFT + MT_SHAREABILITY_WIDTH)
103 #define MT_PAS_WIDTH		UL(1)
104 #define MT_PAS_MASK		MASK(MT_PAS)
105 #define MT_PAS(_attr)		((_attr) & MT_PAS_MASK)
106 
107 /* All other bits are reserved */
108 
109 /*
110  * Memory mapping attributes
111  */
112 
113 /*
114  * Memory types supported.
115  * These are organised so that, going down the list, the memory types are
116  * getting weaker; conversely going up the list the memory types are getting
117  * stronger.
118  */
119 #define MT_DEVICE		UL(0)
120 #define MT_NON_CACHEABLE	UL(1)
121 #define MT_MEMORY		UL(2)
122 #define MT_TRANSIENT		UL(3)
123 /* Values up to 7 are reserved to add new memory types in the future */
124 
125 #define MT_RO			INPLACE(MT_PERM, 0UL)
126 #define MT_RW			INPLACE(MT_PERM, 1UL)
127 
128 #define MT_REALM		INPLACE(MT_PAS, 0UL)
129 #define MT_NS			INPLACE(MT_PAS, 1UL)
130 
131 /*
132  * Access permissions for instruction execution are only relevant for normal
133  * read-only memory, i.e. MT_MEMORY | MT_RO. They are ignored (and potentially
134  * overridden) otherwise:
135  *  - Device memory is always marked as execute-never.
136  *  - Read-write normal memory is always marked as execute-never.
137  */
138 #define MT_EXECUTE		INPLACE(MT_EXECUTE_FLAG, 0UL)
139 #define MT_EXECUTE_NEVER	INPLACE(MT_EXECUTE_FLAG, 1UL)
140 
141 /*
142  * Shareability defines the visibility of any cache changes to
143  * all masters belonging to a shareable domain.
144  *
145  * MT_SHAREABILITY_ISH: For inner shareable domain
146  * MT_SHAREABILITY_OSH: For outer shareable domain
147  * MT_SHAREABILITY_NSH: For non shareable domain
148  */
149 #define MT_SHAREABILITY_ISH	INPLACE(MT_SHAREABILITY, 1UL)
150 #define MT_SHAREABILITY_OSH	INPLACE(MT_SHAREABILITY, 2UL)
151 #define MT_SHAREABILITY_NSH	INPLACE(MT_SHAREABILITY, 3UL)
152 
153 #define MT_CONT			INPLACE(MT_CONT, 1UL)
154 #define MT_NG			INPLACE(MT_NG, 1UL)
155 
156 /* Compound attributes for most common usages */
157 #define MT_CODE			(MT_MEMORY | MT_SHAREABILITY_ISH \
158 				 | MT_RO | MT_EXECUTE)
159 #define MT_RO_DATA		(MT_MEMORY | MT_SHAREABILITY_ISH \
160 				 | MT_RO | MT_EXECUTE_NEVER)
161 #define MT_RW_DATA		(MT_MEMORY | MT_SHAREABILITY_ISH \
162 				 | MT_RW | MT_EXECUTE_NEVER)
163 
164 /*
165  * Structure for specifying a single region of memory.
166  */
167 struct xlat_mmap_region {
168 	uintptr_t	base_pa;	/* Base PA for the current region. */
169 	uintptr_t	base_va;	/* Base VA for the current region. */
170 	size_t		size;		/* Size of the current region. */
171 	uint64_t	attr;		/* Attrs for the current region. */
172 	size_t		granularity;    /* Region granularity. */
173 };
174 
175 /*
176  * Structure containing a table entry and its related information.
177  */
178 struct xlat_table_entry {
179 	uint64_t *table;	/* Pointer to the translation table. */
180 	uintptr_t base_va;	/* Context base VA for the current entry. */
181 	unsigned int level;	/* Table level of the current entry. */
182 	unsigned int entries;   /* Number of entries used by this table. */
183 };
184 
185 /******************************************************************************
186  * Generic translation table APIs.
187  *****************************************************************************/
188 
xlat_write_descriptor(uint64_t * entry,uint64_t desc)189 static inline void xlat_write_descriptor(uint64_t *entry, uint64_t desc)
190 {
191 	SCA_WRITE64(entry, desc);
192 }
193 
xlat_read_descriptor(uint64_t * entry)194 static inline uint64_t xlat_read_descriptor(uint64_t *entry)
195 {
196 	return SCA_READ64(entry);
197 }
198 
199 /*
200  * Initialize translation tables (and mark xlat_ctx_cfg as initialized if
201  * not already initialized) associated to the current context.
202  *
203  * The struct xlat_ctx_cfg of the context might be shared with other
204  * contexts that might have already initialized it. This is expected and
205  * should not cause any problem.
206  *
207  * This function assumes that the xlat_ctx_cfg field of the context has been
208  * properly configured by previous calls to xlat_mmap_add_region_ctx().
209  *
210  * This function returns 0 on success or an error code otherwise.
211  */
212 int xlat_init_tables_ctx(struct xlat_ctx *ctx);
213 
214 /*
215  * Add a memory region with defined base PA and base VA. This function can only
216  * be used before marking the xlat_ctx_cfg for the current xlat_ctx as
217  * initialized.
218  *
219  * The region cannot be removed once added.
220  *
221  * This function returns 0 on success or an error code otherwise.
222  */
223 int xlat_mmap_add_region_ctx(struct xlat_ctx *ctx,
224 			     struct xlat_mmap_region *mm);
225 
226 /*
227  * Add an array of memory regions with defined base PA and base VA.
228  * This function needs to be called before initialiting the xlat_ctx_cfg.
229  * Setting the `last` argument to true will initialise the xlat_ctx_cfg.
230  *
231  * The regions cannot be removed once added.
232  *
233  * Return 0 on success or a negative error code otherwise.
234  */
235 int xlat_mmap_add_ctx(struct xlat_ctx *ctx,
236 		      struct xlat_mmap_region *mm,
237 		      bool last);
238 
239 /*
240  * Return a table entry structure given a context and a VA.
241  * The return structure is populated on the retval field.
242  *
243  * This function returns 0 on success or a negative error code otherwise.
244  */
245 int xlat_get_table_from_va(struct xlat_table_entry * const retval,
246 			   const struct xlat_ctx * const ctx,
247 			   const uintptr_t va);
248 
249 /*
250  * Function to unmap a physical memory page from the descriptor entry and
251  * VA given.
252  * This function implements the "Break" part of the Break-Before-Make semantics
253  * mandated by the Armv8.x architecture in order to update the page descriptors.
254  *
255  * This function returns 0 on success or a negative error code otherwise.
256  */
257 int xlat_unmap_memory_page(struct xlat_table_entry * const table,
258 			   const uintptr_t va);
259 
260 /*
261  * Function to map a physical memory page from the descriptor table entry
262  * and VA given. This function implements the "Make" part of the
263  * Break-Before-Make semantics mandated by the armv8.x architecture in order
264  * to update the page descriptors.
265  *
266  * This function returns 0 on success or a negative error code otherwise.
267  */
268 int xlat_map_memory_page_with_attrs(const struct xlat_table_entry * const table,
269 				    const uintptr_t va,
270 				    const uintptr_t pa,
271 				    const uint64_t attrs);
272 
273 /*
274  * This function finds the descriptor entry on a table given the corresponding
275  * table entry structure and the VA for that descriptor.
276  *
277  */
278 uint64_t *xlat_get_pte_from_table(const struct xlat_table_entry * const table,
279 				    const uintptr_t va);
280 
281 /*
282  * Set up the MMU configuration registers for the specified platform parameters.
283  *
284  * This function must be called for each context as it configures the
285  * appropriate TTBRx register depending on it.
286  *
287  * This function also assumes that the contexts for high and low VA halfs share
288  * the same virtual address space as well as the same physical address space,
289  * so it is safe to call it for each context initialization.
290  *
291  * Returns 0 on success or a negative error code otherwise.
292  */
293 int xlat_arch_setup_mmu_cfg(struct xlat_ctx * const ctx);
294 
295 /* MMU control */
296 void xlat_enable_mmu_el2(void);
297 
298 /*
299  * Returns true if the xlat_ctx_cfg field in the xlat_ctx is initialized.
300  */
301 bool xlat_ctx_cfg_initialized(const struct xlat_ctx * const ctx);
302 
303 /*
304  * Returns true if the translation tables on the current context are already
305  * initialized or false otherwise.
306  */
307 bool xlat_ctx_tbls_initialized(const struct xlat_ctx * const ctx);
308 
309 /*
310  * Initialize a context dynamically at runtime using the given xlat_ctx_cfg
311  * and xlat_ctx_tbls structures.
312  *
313  * Return 0 if success or a Posix erro code otherwise.
314  */
315 int xlat_ctx_create_dynamic(struct xlat_ctx *ctx,
316 			    struct xlat_ctx_cfg *cfg,
317 			    struct xlat_ctx_tbls *tbls,
318 			    void *base_tables,
319 			    unsigned int base_level_entries,
320 			    void *tables_ptr,
321 			    unsigned int ntables);
322 
323 #endif /*__ASSEMBLER__*/
324 #endif /* XLAT_TABLES_H */
325