1 /*
2  * SPDX-License-Identifier: BSD-3-Clause
3  * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
4  * SPDX-FileCopyrightText: Copyright Arm Limited and Contributors.
5  */
6 
7 /* This file is derived from xlat_table_v2 library in TF-A project */
8 
9 #include <arch_features.h>
10 #include <debug.h>
11 #include <errno.h>
12 #include <xlat_contexts.h>
13 #include <xlat_defs_private.h>
14 #include <xlat_tables.h>
15 #include <xlat_tables_private.h>
16 
17 /*
18  * Encode a Physical Address Space size for its use in TCR_ELx.
19  */
tcr_physical_addr_size_bits(uintptr_t max_addr)20 static unsigned long long tcr_physical_addr_size_bits(uintptr_t max_addr)
21 {
22 	if ((max_addr & ADDR_MASK_48_TO_63) != 0U) {
23 		/* Physical address can't exceed 48 bits */
24 		panic();
25 	}
26 
27 	/* 48 bits address */
28 	if ((max_addr & ADDR_MASK_44_TO_47) != 0U) {
29 		return TCR_PS_BITS_256TB;
30 	}
31 
32 	/* 44 bits address */
33 	if ((max_addr & ADDR_MASK_42_TO_43) != 0U) {
34 		return TCR_PS_BITS_16TB;
35 	}
36 
37 	/* 42 bits address */
38 	if ((max_addr & ADDR_MASK_40_TO_41) != 0U) {
39 		return TCR_PS_BITS_4TB;
40 	}
41 
42 	/* 40 bits address */
43 	if ((max_addr & ADDR_MASK_36_TO_39) != 0U) {
44 		return TCR_PS_BITS_1TB;
45 	}
46 
47 	/* 36 bits address */
48 	if ((max_addr & ADDR_MASK_32_TO_35) != 0U) {
49 		return TCR_PS_BITS_64GB;
50 	}
51 
52 	return TCR_PS_BITS_4GB;
53 }
54 
55 /*
56  * Configure MMU registers. This function assumes that all the contexts use the
57  * same limits for VA and PA spaces.
58  */
xlat_arch_setup_mmu_cfg(struct xlat_ctx * const ctx)59 int xlat_arch_setup_mmu_cfg(struct xlat_ctx * const ctx)
60 {
61 	uint64_t mair;
62 	uint64_t tcr;
63 	uint64_t ttbrx;
64 	uintptr_t va_space_size;
65 	struct xlat_ctx_cfg *ctx_cfg;
66 	struct xlat_ctx_tbls *ctx_tbls;
67 	unsigned int txsz;
68 	unsigned int t0sz;
69 	unsigned int t1sz;
70 
71 	if (ctx == NULL) {
72 		return -EINVAL;
73 	}
74 
75 	ctx_cfg = ctx->cfg;
76 	ctx_tbls = ctx->tbls;
77 
78 	if (ctx_cfg == NULL || ctx_tbls == NULL) {
79 		return -EINVAL;
80 	}
81 
82 	if (xlat_ctx_cfg_initialized(ctx) == false) {
83 		return -EINVAL;
84 	}
85 
86 	/* MMU cannot be enabled at this point */
87 	if (is_mmu_enabled() == true) {
88 		return -EPERM;
89 	}
90 
91 	/* Set attributes in the right indices of the MAIR. */
92 	mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);
93 	mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, ATTR_IWBWA_OWBWA_NTR_INDEX);
94 	mair |= MAIR_ATTR_SET(ATTR_NON_CACHEABLE, ATTR_NON_CACHEABLE_INDEX);
95 
96 	va_space_size = (uintptr_t)ctx_cfg->max_va_size;
97 
98 	/*
99 	 * __builtin_ctzll(0) is undefined but here we are guaranteed that
100 	 * va_space_size is in the range [1,UINTPTR_MAX].
101 	 */
102 	txsz = 64 - __builtin_ctzll(va_space_size);
103 
104 	/*
105 	 * Read TCR_EL2 in order to extract t0sz and t1sz. So we can update the right
106 	 * field depending on which context we are configuring and leave the other one
107 	 * untouched.
108 	 * It will not be a problem if TCR_EL2 was previoulsy configured, as the new
109 	 * value of it will be the same with the only difference of the txsz field we
110 	 * want to update.
111 	 */
112 	tcr = read_tcr_el2();
113 	if (ctx_cfg->region == VA_LOW_REGION) {
114 		t0sz = txsz;
115 		t1sz = (tcr >> TCR_EL2_T1SZ_SHIFT) & TCR_EL2_T1SZ_MASK;
116 	} else {
117 		t0sz = (tcr >> TCR_EL2_T0SZ_SHIFT) & TCR_EL2_T0SZ_MASK;
118 		t1sz = txsz;
119 	}
120 
121 	/* Recompute the value for TCR_EL2 */
122 	tcr = (uint64_t)t0sz << TCR_EL2_T0SZ_SHIFT;
123 	tcr |= (uint64_t)t1sz << TCR_EL2_T1SZ_SHIFT;
124 
125 	/*
126 	 * Set the cacheability and shareability attributes for memory
127 	 * associated with translation table walks.
128 	 */
129 	/* Inner & outer WBWA & shareable for both halfs. */
130 	tcr |= TCR_EL2_IRGN0_WBWA | TCR_EL2_ORGN0_WBWA | TCR_EL2_SH0_IS;
131 	tcr |= TCR_EL2_IRGN1_WBWA | TCR_EL2_ORGN1_WBWA | TCR_EL2_SH1_IS;
132 
133 	/*
134 	 * ASID and hierarchical permissions.
135 	 */
136 	tcr |= TCR_EL2_AS | TCR_EL2_HPD0 | TCR_EL2_HPD1;
137 
138 	/*
139 	 * Granule size. Only 4K supported on both halfs.
140 	 */
141 	tcr |= TCR_EL2_TG0_4K | TCR_EL2_TG1_4K;
142 
143 	/*
144 	 * Set physical address size to the limit supported by the PE.
145 	 */
146 	tcr |= tcr_physical_addr_size_bits(xlat_arch_get_max_supported_pa());
147 
148 	write_mair_el2(mair);
149 	write_tcr_el2(tcr);
150 
151 	/*
152 	 * Set TTBR bits as well and enable CnP bit so as to share page
153 	 * tables with all PEs.
154 	 */
155 	ttbrx = (uint64_t)(void *)ctx_tbls->base_table;
156 
157 	/*
158 	 * The VA region is not common for the HIGH region as it is used
159 	 * by slot buffer.
160 	 */
161 	if (ctx_cfg->region == VA_HIGH_REGION) {
162 		ttbrx &= ~TTBR_CNP_BIT;
163 	} else {
164 		ttbrx |= TTBR_CNP_BIT;
165 	}
166 
167 	if (ctx_cfg->region == VA_LOW_REGION) {
168 		write_ttbr0_el2(ttbrx);
169 	} else {
170 		write_ttbr1_el2(ttbrx);
171 	}
172 
173 	return 0;
174 }
175 
xlat_arch_get_max_supported_pa(void)176 uintptr_t xlat_arch_get_max_supported_pa(void)
177 {
178 	return (1UL << arch_feat_get_pa_width()) - 1UL;
179 }
180 
xlat_arch_tlbi_va(uintptr_t va)181 void xlat_arch_tlbi_va(uintptr_t va)
182 {
183 	/*
184 	 * Ensure the translation table write has drained into memory before
185 	 * invalidating the TLB entry.
186 	 */
187 	dsb(ishst);
188 
189 	tlbivae2is(TLBI_ADDR(va));
190 }
191 
xlat_arch_tlbi_va_sync(void)192 void xlat_arch_tlbi_va_sync(void)
193 {
194 	/*
195 	 * A TLB maintenance instruction can complete at any time after
196 	 * it is issued, but is only guaranteed to be complete after the
197 	 * execution of DSB by the PE that executed the TLB maintenance
198 	 * instruction. After the TLB invalidate instruction is
199 	 * complete, no new memory accesses using the invalidated TLB
200 	 * entries will be observed by any observer of the system
201 	 * domain. See section D4.8.2 of the ARMv8 (issue k), paragraph
202 	 * "Ordering and completion of TLB maintenance instructions".
203 	 */
204 	dsb(ish);
205 
206 	/*
207 	 * The effects of a completed TLB maintenance instruction are
208 	 * only guaranteed to be visible on the PE that executed the
209 	 * instruction after the execution of an ISB instruction by the
210 	 * PE that executed the TLB maintenance instruction.
211 	 */
212 	isb();
213 }
214 
215 /*
216  * Determine the physical address space encoded in the 'attr' parameter.
217  */
xlat_arch_get_pas(uint64_t attr)218 uint64_t xlat_arch_get_pas(uint64_t attr)
219 {
220 	uint64_t pas = MT_PAS(attr);
221 
222 	switch (pas) {
223 	case MT_REALM:
224 		return 0U;
225 	case MT_NS:
226 		return LOWER_ATTRS(NS);
227 	default:
228 		panic();
229 	}
230 
231 	/* Avoid -Werror=return-type. Should never reach here. */
232 	return 0U;
233 }
234