1 /*
2  * SPDX-License-Identifier: BSD-3-Clause
3  * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
4  */
5 
6 #include <assert.h>
7 #include <buffer.h>
8 #include <feature.h>
9 #include <granule.h>
10 #include <measurement.h>
11 #include <realm.h>
12 #include <smc-handler.h>
13 #include <smc-rmi.h>
14 #include <smc.h>
15 #include <stddef.h>
16 #include <string.h>
17 #include <table.h>
18 #include <vmid.h>
19 
smc_realm_activate(unsigned long rd_addr)20 unsigned long smc_realm_activate(unsigned long rd_addr)
21 {
22 	struct rd *rd;
23 	struct granule *g_rd;
24 	unsigned long ret;
25 
26 	g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
27 	if (g_rd == NULL) {
28 		return RMI_ERROR_INPUT;
29 	}
30 
31 	rd = granule_map(g_rd, SLOT_RD);
32 	if (get_rd_state_locked(rd) == REALM_STATE_NEW) {
33 		set_rd_state(rd, REALM_STATE_ACTIVE);
34 		ret = RMI_SUCCESS;
35 	} else {
36 		ret = RMI_ERROR_REALM;
37 	}
38 	buffer_unmap(rd);
39 
40 	granule_unlock(g_rd);
41 
42 	return ret;
43 }
44 
get_realm_params(struct rmi_realm_params * realm_params,unsigned long realm_params_addr)45 static bool get_realm_params(struct rmi_realm_params *realm_params,
46 				unsigned long realm_params_addr)
47 {
48 	bool ns_access_ok;
49 	struct granule *g_realm_params;
50 
51 	g_realm_params = find_granule(realm_params_addr);
52 	if ((g_realm_params == NULL) || (g_realm_params->state != GRANULE_STATE_NS)) {
53 		return false;
54 	}
55 
56 	ns_access_ok = ns_buffer_read(SLOT_NS, g_realm_params, 0U,
57 				      sizeof(*realm_params), realm_params);
58 
59 	return ns_access_ok;
60 }
61 
62 /*
63  * See the library pseudocode
64  * aarch64/translation/vmsa_faults/AArch64.S2InconsistentSL on which this is
65  * modeled.
66  */
s2_inconsistent_sl(unsigned int ipa_bits,int sl)67 static bool s2_inconsistent_sl(unsigned int ipa_bits, int sl)
68 {
69 	int levels = RTT_PAGE_LEVEL - sl;
70 	unsigned int sl_min_ipa_bits, sl_max_ipa_bits;
71 
72 	/*
73 	 * The maximum number of concatenated tables is 16,
74 	 * hence we are adding 4 to the 'sl_max_ipa_bits'.
75 	 */
76 	sl_min_ipa_bits = levels * S2TTE_STRIDE + GRANULE_SHIFT + 1U;
77 	sl_max_ipa_bits = sl_min_ipa_bits + (S2TTE_STRIDE - 1U) + 4U;
78 
79 	return ((ipa_bits < sl_min_ipa_bits) || (ipa_bits > sl_max_ipa_bits));
80 }
81 
validate_ipa_bits_and_sl(unsigned int ipa_bits,long sl)82 static bool validate_ipa_bits_and_sl(unsigned int ipa_bits, long sl)
83 {
84 	if ((ipa_bits < MIN_IPA_BITS) || (ipa_bits > MAX_IPA_BITS)) {
85 		return false;
86 	}
87 
88 	if ((sl < MIN_STARTING_LEVEL) || (sl > RTT_PAGE_LEVEL)) {
89 		return false;
90 	}
91 
92 	/*
93 	 * We assume ARMv8.4-TTST is supported with RME so the only SL
94 	 * configuration we need to check with 4K granules is SL == 0 following
95 	 * the library pseudocode aarch64/translation/vmsa_faults/AArch64.S2InvalidSL.
96 	 *
97 	 * Note that this only checks invalid SL values against the properties
98 	 * of the hardware platform, other misconfigurations between IPA size
99 	 * and SL is checked in s2_inconsistent_sl.
100 	 */
101 	if ((sl == 0L) && (max_ipa_size() < 44U)) {
102 		return false;
103 	}
104 
105 	return !s2_inconsistent_sl(ipa_bits, sl);
106 }
107 
requested_ipa_bits(struct rmi_realm_params * p)108 static unsigned int requested_ipa_bits(struct rmi_realm_params *p)
109 {
110 	return EXTRACT(RMM_FEATURE_REGISTER_0_S2SZ, p->features_0);
111 }
112 
s2_num_root_rtts(unsigned int ipa_bits,int sl)113 static unsigned int s2_num_root_rtts(unsigned int ipa_bits, int sl)
114 {
115 	unsigned int levels = (unsigned int)(RTT_PAGE_LEVEL - sl);
116 	unsigned int sl_ipa_bits;
117 
118 	/* First calculate how many bits can be resolved without concatenation */
119 	sl_ipa_bits = levels * S2TTE_STRIDE /* Bits resolved by table walk without SL */
120 		      + GRANULE_SHIFT	    /* Bits directly mapped to OA */
121 		      + S2TTE_STRIDE;	    /* Bits resolved by single SL */
122 
123 	if (sl_ipa_bits >= ipa_bits) {
124 		return 1U;
125 	}
126 
127 	return (1U << (ipa_bits - sl_ipa_bits));
128 }
129 
validate_realm_params(struct rmi_realm_params * p)130 static bool validate_realm_params(struct rmi_realm_params *p)
131 {
132 	if (!validate_feature_register(RMM_FEATURE_REGISTER_0_INDEX,
133 					p->features_0)) {
134 		return false;
135 	}
136 
137 	if (!validate_ipa_bits_and_sl(requested_ipa_bits(p),
138 					p->rtt_level_start)) {
139 		return false;
140 	}
141 
142 	if (s2_num_root_rtts(requested_ipa_bits(p),
143 				p->rtt_level_start) != p->rtt_num_start) {
144 		return false;
145 	}
146 
147 	/*
148 	 * TODO: Check the VMSA configuration which is either static for the
149 	 * RMM or per realm with the supplied parameters and store the
150 	 * configuration on the RD, and it can potentially be copied into RECs
151 	 * later.
152 	 */
153 
154 	switch (p->hash_algo) {
155 	case RMI_HASH_ALGO_SHA256:
156 	case RMI_HASH_ALGO_SHA512:
157 		break;
158 	default:
159 		return false;
160 	}
161 
162 	/* Check VMID collision and reserve it atomically if available */
163 	return vmid_reserve((unsigned int)p->vmid);
164 }
165 
166 /*
167  * Update the realm measurement with the realm parameters.
168  */
realm_params_measure(struct rd * rd,struct rmi_realm_params * realm_params)169 static void realm_params_measure(struct rd *rd,
170 				 struct rmi_realm_params *realm_params)
171 {
172 	/* By specification realm_params is 4KB */
173 	unsigned char buffer[SZ_4K] = {0};
174 	struct rmi_realm_params *realm_params_measured =
175 		(struct rmi_realm_params *)&buffer[0];
176 
177 	realm_params_measured->hash_algo = realm_params->hash_algo;
178 	/* TODO: Add later */
179 	/* realm_params_measured->features_0 = realm_params->features_0; */
180 
181 	/* Measure relevant realm params this will be the init value of RIM */
182 	measurement_hash_compute(rd->algorithm,
183 			       buffer,
184 			       sizeof(buffer),
185 			       rd->measurement[RIM_MEASUREMENT_SLOT]);
186 }
187 
free_sl_rtts(struct granule * g_rtt,unsigned int num_rtts)188 static void free_sl_rtts(struct granule *g_rtt, unsigned int num_rtts)
189 {
190 	unsigned int i;
191 
192 	for (i = 0U; i < num_rtts; i++) {
193 		struct granule *g = g_rtt + i;
194 
195 		granule_lock(g, GRANULE_STATE_RTT);
196 		granule_memzero(g, SLOT_RTT);
197 		granule_unlock_transition(g, GRANULE_STATE_DELEGATED);
198 	}
199 }
200 
find_lock_rd_granules(unsigned long rd_addr,struct granule ** p_g_rd,unsigned long rtt_base_addr,unsigned int num_rtts,struct granule ** p_g_rtt_base)201 static bool find_lock_rd_granules(unsigned long rd_addr,
202 				  struct granule **p_g_rd,
203 				  unsigned long rtt_base_addr,
204 				  unsigned int num_rtts,
205 				  struct granule **p_g_rtt_base)
206 {
207 	struct granule *g_rd = NULL, *g_rtt_base = NULL;
208 	int i = 0;
209 
210 	if (rd_addr < rtt_base_addr) {
211 		g_rd = find_lock_granule(rd_addr, GRANULE_STATE_DELEGATED);
212 		if (g_rd == NULL) {
213 			goto out_err;
214 		}
215 	}
216 
217 	for (; i < num_rtts; i++) {
218 		unsigned long rtt_addr = rtt_base_addr + i * GRANULE_SIZE;
219 		struct granule *g_rtt;
220 
221 		g_rtt = find_lock_granule(rtt_addr, GRANULE_STATE_DELEGATED);
222 		if (g_rtt == NULL) {
223 			goto out_err;
224 		}
225 
226 		if (i == 0) {
227 			g_rtt_base = g_rtt;
228 		}
229 	}
230 
231 	if (g_rd == NULL) {
232 		g_rd = find_lock_granule(rd_addr, GRANULE_STATE_DELEGATED);
233 		if (g_rd == NULL) {
234 			goto out_err;
235 		}
236 	}
237 
238 	*p_g_rd = g_rd;
239 	*p_g_rtt_base = g_rtt_base;
240 
241 	return true;
242 
243 out_err:
244 	for (i = i - 1; i >= 0; i--) {
245 		granule_unlock(g_rtt_base + i);
246 	}
247 
248 	if (g_rd != NULL) {
249 		granule_unlock(g_rd);
250 	}
251 
252 	return false;
253 }
254 
smc_realm_create(unsigned long rd_addr,unsigned long realm_params_addr)255 unsigned long smc_realm_create(unsigned long rd_addr,
256 			       unsigned long realm_params_addr)
257 {
258 	struct granule *g_rd, *g_rtt_base;
259 	struct rd *rd;
260 	struct rmi_realm_params p;
261 	unsigned int i;
262 
263 	if (!get_realm_params(&p, realm_params_addr)) {
264 		return RMI_ERROR_INPUT;
265 	}
266 
267 	if (!validate_realm_params(&p)) {
268 		return RMI_ERROR_INPUT;
269 	}
270 
271 	/*
272 	 * At this point VMID is reserved for the Realm
273 	 *
274 	 * Check for aliasing between rd_addr and
275 	 * starting level RTT address(es)
276 	 */
277 	if (addr_is_contained(p.rtt_base,
278 			      p.rtt_base + p.rtt_num_start * GRANULE_SIZE,
279 			      rd_addr)) {
280 
281 		/* Free reserved VMID before returning */
282 		vmid_free((unsigned int)p.vmid);
283 		return RMI_ERROR_INPUT;
284 	}
285 
286 	if (!find_lock_rd_granules(rd_addr, &g_rd, p.rtt_base,
287 				  p.rtt_num_start, &g_rtt_base)) {
288 		/* Free reserved VMID */
289 		vmid_free((unsigned int)p.vmid);
290 		return RMI_ERROR_INPUT;
291 	}
292 
293 	rd = granule_map(g_rd, SLOT_RD);
294 	set_rd_state(rd, REALM_STATE_NEW);
295 	set_rd_rec_count(rd, 0UL);
296 	rd->s2_ctx.g_rtt = find_granule(p.rtt_base);
297 	rd->s2_ctx.ipa_bits = requested_ipa_bits(&p);
298 	rd->s2_ctx.s2_starting_level = p.rtt_level_start;
299 	rd->s2_ctx.num_root_rtts = p.rtt_num_start;
300 	memcpy(&rd->rpv[0], &p.rpv[0], RPV_SIZE);
301 
302 	rd->s2_ctx.vmid = (unsigned int)p.vmid;
303 
304 	rd->num_rec_aux = MAX_REC_AUX_GRANULES;
305 
306 	(void)memcpy(&rd->rpv[0], &p.rpv[0], RPV_SIZE);
307 
308 	rd->algorithm = p.hash_algo;
309 
310 	switch (p.hash_algo) {
311 	case RMI_HASH_ALGO_SHA256:
312 		rd->algorithm = HASH_ALGO_SHA256;
313 		break;
314 	case RMI_HASH_ALGO_SHA512:
315 		rd->algorithm = HASH_ALGO_SHA512;
316 		break;
317 	}
318 	realm_params_measure(rd, &p);
319 
320 	buffer_unmap(rd);
321 
322 	granule_unlock_transition(g_rd, GRANULE_STATE_RD);
323 
324 	for (i = 0U; i < p.rtt_num_start; i++) {
325 		granule_unlock_transition(g_rtt_base + i, GRANULE_STATE_RTT);
326 	}
327 
328 	return RMI_SUCCESS;
329 }
330 
total_root_rtt_refcount(struct granule * g_rtt,unsigned int num_rtts)331 static unsigned long total_root_rtt_refcount(struct granule *g_rtt,
332 					     unsigned int num_rtts)
333 {
334 	unsigned long refcount = 0UL;
335 	unsigned int i;
336 
337 	for (i = 0U; i < num_rtts; i++) {
338 		struct granule *g = g_rtt + i;
339 
340 	       /*
341 		* Lock starting from the RTT root.
342 		* Enforcing locking order RD->RTT is enough to ensure
343 		* deadlock free locking guarentee.
344 		*/
345 		granule_lock(g, GRANULE_STATE_RTT);
346 		refcount += g->refcount;
347 		granule_unlock(g);
348 	}
349 
350 	return refcount;
351 }
352 
smc_realm_destroy(unsigned long rd_addr)353 unsigned long smc_realm_destroy(unsigned long rd_addr)
354 {
355 	struct granule *g_rd;
356 	struct granule *g_rtt;
357 	struct rd *rd;
358 	unsigned int num_rtts;
359 
360 	/* RD should not be destroyed if refcount != 0. */
361 	g_rd = find_lock_unused_granule(rd_addr, GRANULE_STATE_RD);
362 	if (ptr_is_err(g_rd)) {
363 		return (unsigned long)ptr_status(g_rd);
364 	}
365 
366 	rd = granule_map(g_rd, SLOT_RD);
367 	g_rtt = rd->s2_ctx.g_rtt;
368 	num_rtts = rd->s2_ctx.num_root_rtts;
369 
370 	/*
371 	 * All the mappings in the Realm have been removed and the TLB caches
372 	 * are invalidated. Therefore, there are no TLB entries tagged with
373 	 * this Realm's VMID (in this security state).
374 	 * Just release the VMID value so it can be used in another Realm.
375 	 */
376 	vmid_free(rd->s2_ctx.vmid);
377 	buffer_unmap(rd);
378 
379 	/* Check if granules are unused */
380 	if (total_root_rtt_refcount(g_rtt, num_rtts) != 0UL) {
381 		granule_unlock(g_rd);
382 		return RMI_ERROR_IN_USE;
383 	}
384 
385 	free_sl_rtts(g_rtt, num_rtts);
386 
387 	/* This implictly destroys the measurement */
388 	granule_memzero(g_rd, SLOT_RD);
389 	granule_unlock_transition(g_rd, GRANULE_STATE_DELEGATED);
390 
391 	return RMI_SUCCESS;
392 }
393