1 /*
2 * SPDX-License-Identifier: BSD-3-Clause
3 * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
4 */
5
6 #include <arch.h>
7 #include <arch_features.h>
8 #include <attestation.h>
9 #include <buffer.h>
10 #include <cpuid.h>
11 #include <gic.h>
12 #include <granule.h>
13 #include <mbedtls/memory_buffer_alloc.h>
14 #include <measurement.h>
15 #include <memory_alloc.h>
16 #include <psci.h>
17 #include <realm.h>
18 #include <rec.h>
19 #include <smc-handler.h>
20 #include <smc-rmi.h>
21 #include <smc.h>
22 #include <spinlock.h>
23 #include <stddef.h>
24 #include <string.h>
25
26 /*
27 * Allocate a dummy rec_params for copying relevant parameters for measurement
28 */
29 static struct rmi_rec_params rec_params_per_cpu[MAX_CPUS];
30
rec_params_measure(struct rd * rd,struct rmi_rec_params * rec_params)31 static void rec_params_measure(struct rd *rd, struct rmi_rec_params *rec_params)
32 {
33 struct measurement_desc_rec measure_desc = {0};
34 struct rmi_rec_params *rec_params_measured =
35 &(rec_params_per_cpu[my_cpuid()]);
36
37 memset(rec_params_measured, 0, sizeof(*rec_params_measured));
38
39 /* Copy the relevant parts of the rmi_rec_params structure to be
40 * measured
41 */
42 rec_params_measured->pc = rec_params->pc;
43 rec_params_measured->flags = rec_params->flags;
44 memcpy(rec_params_measured->gprs,
45 rec_params->gprs,
46 sizeof(rec_params->gprs));
47
48 /* Initialize the measurement descriptior structure */
49 measure_desc.desc_type = MEASURE_DESC_TYPE_REC;
50 measure_desc.len = sizeof(struct measurement_desc_rec);
51 memcpy(measure_desc.rim,
52 &rd->measurement[RIM_MEASUREMENT_SLOT],
53 measurement_get_size(rd->algorithm));
54
55 /*
56 * Hashing the REC params structure and store the result in the
57 * measurement descriptor structure.
58 */
59 measurement_hash_compute(rd->algorithm,
60 rec_params_measured,
61 sizeof(*rec_params_measured),
62 measure_desc.content);
63
64 /*
65 * Hashing the measurement descriptor structure; the result is the
66 * updated RIM.
67 */
68 measurement_hash_compute(rd->algorithm,
69 &measure_desc,
70 sizeof(measure_desc),
71 rd->measurement[RIM_MEASUREMENT_SLOT]);
72 }
73
init_rec_sysregs(struct rec * rec,unsigned long mpidr)74 static void init_rec_sysregs(struct rec *rec, unsigned long mpidr)
75 {
76 /* Set non-zero values only */
77 rec->sysregs.pmcr_el0 = PMCR_EL0_RES1;
78 rec->sysregs.sctlr_el1 = SCTLR_EL1_FLAGS;
79 rec->sysregs.mdscr_el1 = MDSCR_EL1_TDCC_BIT;
80 rec->sysregs.vmpidr_el2 = mpidr | VMPIDR_EL2_RES1;
81
82 rec->sysregs.cnthctl_el2 = CNTHCTL_EL2_NO_TRAPS;
83 }
84
85 /*
86 * Starting level of the stage 2 translation
87 * lookup to VTCR_EL2.SL0[7:6].
88 */
89 static const unsigned long sl0_val[] = {
90 VTCR_SL0_4K_L0,
91 VTCR_SL0_4K_L1,
92 VTCR_SL0_4K_L2,
93 VTCR_SL0_4K_L3
94 };
95
realm_vtcr(struct rd * rd)96 static unsigned long realm_vtcr(struct rd *rd)
97 {
98 unsigned long t0sz, sl0;
99 unsigned long vtcr = is_feat_vmid16_present() ?
100 (VTCR_FLAGS | VTCR_VS) : VTCR_FLAGS;
101 int s2_starting_level = realm_rtt_starting_level(rd);
102
103 /* TODO: Support LPA2 with -1 */
104 assert((s2_starting_level >= 0) && (s2_starting_level <= 3));
105 sl0 = sl0_val[s2_starting_level];
106
107 t0sz = 64UL - realm_ipa_bits(rd);
108 t0sz &= MASK(VTCR_T0SZ);
109
110 vtcr |= t0sz;
111 vtcr |= sl0;
112
113 return vtcr;
114 }
115
init_common_sysregs(struct rec * rec,struct rd * rd)116 static void init_common_sysregs(struct rec *rec, struct rd *rd)
117 {
118 /* Set non-zero values only */
119 rec->common_sysregs.hcr_el2 = HCR_FLAGS;
120 rec->common_sysregs.vtcr_el2 = realm_vtcr(rd);
121 rec->common_sysregs.vttbr_el2 = granule_addr(rd->s2_ctx.g_rtt);
122 rec->common_sysregs.vttbr_el2 &= MASK(TTBRx_EL2_BADDR);
123 rec->common_sysregs.vttbr_el2 |= INPLACE(VTTBR_EL2_VMID, rd->s2_ctx.vmid);
124 }
125
init_rec_regs(struct rec * rec,struct rmi_rec_params * rec_params,struct rd * rd)126 static void init_rec_regs(struct rec *rec,
127 struct rmi_rec_params *rec_params,
128 struct rd *rd)
129 {
130 unsigned int i;
131
132 /*
133 * We only need to set non-zero values here because we're intializing
134 * data structures in the rec granule which was just converted from
135 * the DELEGATED state to REC state, and we can rely on the RMM
136 * invariant that DELEGATED granules are always zero-filled.
137 */
138
139 for (i = 0U; i < REC_CREATE_NR_GPRS; i++) {
140 rec->regs[i] = rec_params->gprs[i];
141 }
142
143 rec->pc = rec_params->pc;
144 rec->pstate = SPSR_EL2_MODE_EL1h |
145 SPSR_EL2_nRW_AARCH64 |
146 SPSR_EL2_F_BIT |
147 SPSR_EL2_I_BIT |
148 SPSR_EL2_A_BIT |
149 SPSR_EL2_D_BIT;
150
151 init_rec_sysregs(rec, rec_params->mpidr);
152 init_common_sysregs(rec, rd);
153 }
154
155 /*
156 * This function will only be invoked when the REC create fails
157 * or when REC is being destroyed. Hence the REC will not be in
158 * use when this function is called and therefore no lock is
159 * acquired before its invocation.
160 */
free_rec_aux_granules(struct granule * rec_aux[],unsigned int cnt,bool scrub)161 static void free_rec_aux_granules(struct granule *rec_aux[],
162 unsigned int cnt, bool scrub)
163 {
164 for (unsigned int i = 0U; i < cnt; i++) {
165 struct granule *g_rec_aux = rec_aux[i];
166
167 granule_lock(g_rec_aux, GRANULE_STATE_REC_AUX);
168 if (scrub) {
169 granule_memzero(g_rec_aux, SLOT_REC_AUX0 + i);
170 }
171 granule_unlock_transition(g_rec_aux, GRANULE_STATE_DELEGATED);
172 }
173 }
174
smc_rec_create(unsigned long rec_addr,unsigned long rd_addr,unsigned long rec_params_addr)175 unsigned long smc_rec_create(unsigned long rec_addr,
176 unsigned long rd_addr,
177 unsigned long rec_params_addr)
178 {
179 struct granule *g_rd;
180 struct granule *g_rec;
181 struct granule *rec_aux_granules[MAX_REC_AUX_GRANULES];
182 struct granule *g_rec_params;
183 struct rec *rec;
184 struct rd *rd;
185 struct rmi_rec_params rec_params;
186 unsigned long rec_idx;
187 enum granule_state new_rec_state = GRANULE_STATE_DELEGATED;
188 unsigned long ret;
189 bool ns_access_ok;
190 unsigned int num_rec_aux;
191
192 g_rec_params = find_granule(rec_params_addr);
193 if ((g_rec_params == NULL) || (g_rec_params->state != GRANULE_STATE_NS)) {
194 return RMI_ERROR_INPUT;
195 }
196
197 ns_access_ok = ns_buffer_read(SLOT_NS, g_rec_params, 0U,
198 sizeof(rec_params), &rec_params);
199
200 if (!ns_access_ok) {
201 return RMI_ERROR_INPUT;
202 }
203
204 num_rec_aux = (unsigned int)rec_params.num_aux;
205 if (num_rec_aux > MAX_REC_AUX_GRANULES) {
206 return RMI_ERROR_INPUT;
207 }
208
209 /* Loop through rec_aux_granules and transit them */
210 for (unsigned int i = 0U; i < num_rec_aux; i++) {
211 struct granule *g_rec_aux = find_lock_granule(
212 rec_params.aux[i],
213 GRANULE_STATE_DELEGATED);
214 if (g_rec_aux == NULL) {
215 free_rec_aux_granules(rec_aux_granules, i, false);
216 return RMI_ERROR_INPUT;
217 }
218 granule_unlock_transition(g_rec_aux, GRANULE_STATE_REC_AUX);
219 rec_aux_granules[i] = g_rec_aux;
220 }
221
222 if (!find_lock_two_granules(rec_addr,
223 GRANULE_STATE_DELEGATED,
224 &g_rec,
225 rd_addr,
226 GRANULE_STATE_RD,
227 &g_rd)) {
228 ret = RMI_ERROR_INPUT;
229 goto out_free_aux;
230 }
231
232 rec = granule_map(g_rec, SLOT_REC);
233 rd = granule_map(g_rd, SLOT_RD);
234
235 if (get_rd_state_locked(rd) != REALM_STATE_NEW) {
236 ret = RMI_ERROR_REALM;
237 goto out_unmap;
238 }
239
240 rec_idx = get_rd_rec_count_locked(rd);
241 if (!mpidr_is_valid(rec_params.mpidr) ||
242 (rec_idx != mpidr_to_rec_idx(rec_params.mpidr))) {
243 ret = RMI_ERROR_INPUT;
244 goto out_unmap;
245 }
246
247 /* Verify the auxiliary granule count with rd lock held */
248 if (num_rec_aux != rd->num_rec_aux) {
249 ret = RMI_ERROR_INPUT;
250 goto out_unmap;
251 }
252
253 rec->g_rec = g_rec;
254 rec->rec_idx = rec_idx;
255
256 init_rec_regs(rec, &rec_params, rd);
257 gic_cpu_state_init(&rec->sysregs.gicstate);
258
259 /* Copy addresses of auxiliary granules */
260 (void)memcpy(rec->g_aux, rec_aux_granules,
261 num_rec_aux * sizeof(rec->g_aux[0]));
262
263 rec->num_rec_aux = num_rec_aux;
264
265 rec->realm_info.ipa_bits = realm_ipa_bits(rd);
266 rec->realm_info.s2_starting_level = realm_rtt_starting_level(rd);
267 rec->realm_info.g_rtt = rd->s2_ctx.g_rtt;
268 rec->realm_info.g_rd = g_rd;
269
270 rec_params_measure(rd, &rec_params);
271
272 /*
273 * RD has a lock-free access from RMI_REC_DESTROY, hence increment
274 * refcount atomically. Also, since the granule is only used for
275 * refcount update, only an atomic operation will suffice and
276 * release/acquire semantics are not required.
277 */
278 atomic_granule_get(g_rd);
279 new_rec_state = GRANULE_STATE_REC;
280 rec->runnable = rec_params.flags & REC_PARAMS_FLAG_RUNNABLE;
281
282 rec->alloc_info.ctx_initialised = false;
283 /* Initialize attestation state */
284 rec->token_sign_ctx.state = ATTEST_SIGN_NOT_STARTED;
285
286 set_rd_rec_count(rd, rec_idx + 1U);
287
288 ret = RMI_SUCCESS;
289
290 out_unmap:
291 buffer_unmap(rd);
292 buffer_unmap(rec);
293
294 granule_unlock(g_rd);
295 granule_unlock_transition(g_rec, new_rec_state);
296
297 out_free_aux:
298 if (ret != RMI_SUCCESS) {
299 free_rec_aux_granules(rec_aux_granules, num_rec_aux, false);
300 }
301 return ret;
302 }
303
smc_rec_destroy(unsigned long rec_addr)304 unsigned long smc_rec_destroy(unsigned long rec_addr)
305 {
306 struct granule *g_rec;
307 struct granule *g_rd;
308 struct rec *rec;
309
310 /* REC should not be destroyed if refcount != 0 */
311 g_rec = find_lock_unused_granule(rec_addr, GRANULE_STATE_REC);
312 if (ptr_is_err(g_rec)) {
313 return (unsigned long)ptr_status(g_rec);
314 }
315
316 rec = granule_map(g_rec, SLOT_REC);
317
318 g_rd = rec->realm_info.g_rd;
319
320 /* Free and scrub the auxiliary granules */
321 free_rec_aux_granules(rec->g_aux, rec->num_rec_aux, true);
322
323 granule_memzero_mapped(rec);
324 buffer_unmap(rec);
325
326 granule_unlock_transition(g_rec, GRANULE_STATE_DELEGATED);
327
328 /*
329 * Decrement refcount. The refcount should be balanced before
330 * RMI_REC_DESTROY returns, and until this occurs a transient
331 * over-estimate of the refcount (in-between the unlock and decreasing
332 * the refcount) is legitimate. Also, since the granule is only used for
333 * refcount update, only an atomic operation will suffice and
334 * release/acquire semantics are not required.
335 */
336 atomic_granule_put(g_rd);
337
338 return RMI_SUCCESS;
339 }
340
smc_rec_aux_count(unsigned long rd_addr,struct smc_result * ret_struct)341 void smc_rec_aux_count(unsigned long rd_addr, struct smc_result *ret_struct)
342 {
343 unsigned int num_rec_aux;
344 struct granule *g_rd;
345 struct rd *rd;
346
347 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
348 if (g_rd == NULL) {
349 ret_struct->x[0] = RMI_ERROR_INPUT;
350 return;
351 }
352
353 rd = granule_map(g_rd, SLOT_RD);
354 num_rec_aux = rd->num_rec_aux;
355 buffer_unmap(rd);
356 granule_unlock(g_rd);
357
358 ret_struct->x[0] = RMI_SUCCESS;
359 ret_struct->x[1] = (unsigned long)num_rec_aux;
360 }
361
smc_psci_complete(unsigned long calling_rec_addr,unsigned long target_rec_addr)362 unsigned long smc_psci_complete(unsigned long calling_rec_addr,
363 unsigned long target_rec_addr)
364 {
365 struct granule *g_calling_rec, *g_target_rec;
366 struct rec *calling_rec, *target_rec;
367 unsigned long ret;
368
369 assert(calling_rec_addr != 0UL);
370 assert(target_rec_addr != 0UL);
371
372 if (!GRANULE_ALIGNED(calling_rec_addr)) {
373 return RMI_ERROR_INPUT;
374 }
375
376 if (!GRANULE_ALIGNED(target_rec_addr)) {
377 return RMI_ERROR_INPUT;
378 }
379
380 if (!find_lock_two_granules(calling_rec_addr,
381 GRANULE_STATE_REC,
382 &g_calling_rec,
383 target_rec_addr,
384 GRANULE_STATE_REC,
385 &g_target_rec)) {
386 return RMI_ERROR_INPUT;
387 }
388
389 /*
390 * The access to a REC from RMI_REC_ENTER is only protected by the
391 * reference counter. Here, we may access the volatile (non constant)
392 * members of REC structure (such as rec->running) only if the counter
393 * is zero.
394 */
395 if (granule_refcount_read_acquire(g_calling_rec) != 0UL) {
396 /*
397 * The `calling` REC is running on another PE and therefore it
398 * may not have a pending PSCI request.
399 */
400 ret = RMI_ERROR_INPUT;
401 goto out_unlock;
402 }
403
404 calling_rec = granule_map(g_calling_rec, SLOT_REC);
405 target_rec = granule_map(g_target_rec, SLOT_REC2);
406
407 ret = psci_complete_request(calling_rec, target_rec);
408
409 buffer_unmap(target_rec);
410 buffer_unmap(calling_rec);
411 out_unlock:
412 granule_unlock(g_calling_rec);
413 granule_unlock(g_target_rec);
414
415 return ret;
416 }
417