1 /*
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
5  */
6 
7 #include <arch.h>
8 #include <arch_helpers.h>
9 #include <attestation_token.h>
10 #include <buffer.h>
11 #include <esr.h>
12 #include <exit.h>
13 #include <fpu_helpers.h>
14 #include <gic.h>
15 #include <granule.h>
16 #include <inject_exp.h>
17 #include <memory_alloc.h>
18 #include <psci.h>
19 #include <realm.h>
20 #include <realm_attest.h>
21 #include <rec.h>
22 #include <rsi-config.h>
23 #include <rsi-handler.h>
24 #include <rsi-host-call.h>
25 #include <rsi-logger.h>
26 #include <rsi-memory.h>
27 #include <rsi-walk.h>
28 #include <smc-rmi.h>
29 #include <smc-rsi.h>
30 #include <status.h>
31 #include <sve.h>
32 #include <sysreg_traps.h>
33 #include <table.h>
34 
35 void save_fpu_state(struct fpu_state *fpu);
36 void restore_fpu_state(struct fpu_state *fpu);
37 
system_abort(void)38 static void system_abort(void)
39 {
40 	/*
41 	 * TODO: report the abort to the EL3.
42 	 * We need to establish the exact EL3 API first.
43 	 */
44 	assert(false);
45 }
46 
fixup_aarch32_data_abort(struct rec * rec,unsigned long * esr)47 static bool fixup_aarch32_data_abort(struct rec *rec, unsigned long *esr)
48 {
49 	unsigned long spsr = read_spsr_el2();
50 
51 	if ((spsr & SPSR_EL2_nRW_AARCH32) != 0UL) {
52 		/*
53 		 * mmio emulation of AArch32 reads/writes is not supported.
54 		 */
55 		*esr &= ~ESR_EL2_ABORT_ISV_BIT;
56 		return true;
57 	}
58 	return false;
59 }
60 
get_dabt_write_value(struct rec * rec,unsigned long esr)61 static unsigned long get_dabt_write_value(struct rec *rec, unsigned long esr)
62 {
63 	unsigned int rt = esr_srt(esr);
64 
65 	/* Handle xzr */
66 	if (rt == 31U) {
67 		return 0UL;
68 	}
69 	return rec->regs[rt] & access_mask(esr);
70 }
71 
72 /*
73  * Returns 'true' if access from @rec to @addr is within the Protected IPA space.
74  */
access_in_rec_par(struct rec * rec,unsigned long addr)75 static bool access_in_rec_par(struct rec *rec, unsigned long addr)
76 {
77 	/*
78 	 * It is OK to check only the base address of the access because:
79 	 * - The Protected IPA space starts at address zero.
80 	 * - The IPA width is below 64 bits, therefore the access cannot
81 	 *   wrap around.
82 	 */
83 	return addr_in_rec_par(rec, addr);
84 }
85 
86 /*
87  * Returns 'true' if the @ipa is in PAR and its RIPAS is 'empty'.
88  *
89  * @ipa must be aligned to the granule size.
90  */
ipa_is_empty(unsigned long ipa,struct rec * rec)91 static bool ipa_is_empty(unsigned long ipa, struct rec *rec)
92 {
93 	unsigned long s2tte, *ll_table;
94 	struct rtt_walk wi;
95 	enum ripas ripas;
96 	bool ret;
97 
98 	assert(GRANULE_ALIGNED(ipa));
99 
100 	if (!addr_in_rec_par(rec, ipa)) {
101 		return false;
102 	}
103 	granule_lock(rec->realm_info.g_rtt, GRANULE_STATE_RTT);
104 
105 	rtt_walk_lock_unlock(rec->realm_info.g_rtt,
106 			     rec->realm_info.s2_starting_level,
107 			     rec->realm_info.ipa_bits,
108 			     ipa, RTT_PAGE_LEVEL, &wi);
109 
110 	ll_table = granule_map(wi.g_llt, SLOT_RTT);
111 	s2tte = s2tte_read(&ll_table[wi.index]);
112 
113 	if (s2tte_is_destroyed(s2tte)) {
114 		ret = false;
115 		goto out_unmap_ll_table;
116 	}
117 	ripas = s2tte_get_ripas(s2tte);
118 	ret = (ripas == RMI_EMPTY);
119 
120 out_unmap_ll_table:
121 	buffer_unmap(ll_table);
122 	granule_unlock(wi.g_llt);
123 	return ret;
124 }
125 
fsc_is_external_abort(unsigned long fsc)126 static bool fsc_is_external_abort(unsigned long fsc)
127 {
128 	if (fsc == ESR_EL2_ABORT_FSC_SEA) {
129 		return true;
130 	}
131 
132 	if ((fsc >= ESR_EL2_ABORT_FSC_SEA_TTW_START) &&
133 	    (fsc <= ESR_EL2_ABORT_FSC_SEA_TTW_END)) {
134 		return true;
135 	}
136 
137 	return false;
138 }
139 
140 /*
141  * Handles Data/Instruction Aborts at a lower EL with External Abort fault
142  * status code (D/IFSC).
143  * Returns 'true' if the exception is the external abort and the `rec_exit`
144  * structure is populated, 'false' otherwise.
145  */
handle_sync_external_abort(struct rec * rec,struct rmi_rec_exit * rec_exit,unsigned long esr)146 static bool handle_sync_external_abort(struct rec *rec,
147 				       struct rmi_rec_exit *rec_exit,
148 				       unsigned long esr)
149 {
150 	unsigned long fsc = esr & ESR_EL2_ABORT_FSC_MASK;
151 	unsigned long set = esr & ESR_EL2_ABORT_SET_MASK;
152 
153 	if (!fsc_is_external_abort(fsc)) {
154 		return false;
155 	}
156 
157 	switch (set) {
158 	case ESR_EL2_ABORT_SET_UER:
159 		/*
160 		 * The recoverable SEA.
161 		 * Inject the sync. abort into the Realm.
162 		 * Report the exception to the host.
163 		 */
164 		inject_sync_idabort(ESR_EL2_ABORT_FSC_SEA);
165 		/*
166 		 * Fall through.
167 		 */
168 	case ESR_EL2_ABORT_SET_UEO:
169 		/*
170 		 * The restartable SEA.
171 		 * Report the exception to the host.
172 		 * The REC restarts the same instruction.
173 		 */
174 		rec_exit->esr = esr & ESR_NONEMULATED_ABORT_MASK;
175 
176 		/*
177 		 * The value of the HPFAR_EL2 is not provided to the host as
178 		 * it is undefined for external aborts.
179 		 *
180 		 * We also don't provide the content of FAR_EL2 because it
181 		 * has no practical value to the host without the HPFAR_EL2.
182 		 */
183 		break;
184 	case ESR_EL2_ABORT_SET_UC:
185 		/*
186 		 * The uncontainable SEA.
187 		 * Fatal to the system.
188 		 */
189 		system_abort();
190 		break;
191 	default:
192 		assert(false);
193 	}
194 
195 	return true;
196 }
197 
emulate_stage2_data_abort(struct rec * rec,struct rmi_rec_exit * rec_exit,unsigned long rtt_level)198 void emulate_stage2_data_abort(struct rec *rec,
199 			       struct rmi_rec_exit *rec_exit,
200 			       unsigned long rtt_level)
201 {
202 	unsigned long fipa = rec->regs[1];
203 
204 	assert(rtt_level <= RTT_PAGE_LEVEL);
205 
206 	/*
207 	 * Setup Exception Syndrom Register to emulate a real data abort
208 	 * and return to NS host to handle it.
209 	 */
210 	rec_exit->esr = (ESR_EL2_EC_DATA_ABORT |
211 			(ESR_EL2_ABORT_FSC_TRANSLATION_FAULT_L0 + rtt_level));
212 	rec_exit->far = 0UL;
213 	rec_exit->hpfar = fipa >> HPFAR_EL2_FIPA_OFFSET;
214 	rec_exit->exit_reason = RMI_EXIT_SYNC;
215 }
216 
217 /*
218  * Returns 'true' if the abort is handled and the RMM should return to the Realm,
219  * and returns 'false' if the exception should be reported to the HS host.
220  */
handle_data_abort(struct rec * rec,struct rmi_rec_exit * rec_exit,unsigned long esr)221 static bool handle_data_abort(struct rec *rec, struct rmi_rec_exit *rec_exit,
222 			      unsigned long esr)
223 {
224 	unsigned long far = 0UL;
225 	unsigned long hpfar = read_hpfar_el2();
226 	unsigned long fipa = (hpfar & HPFAR_EL2_FIPA_MASK) << HPFAR_EL2_FIPA_OFFSET;
227 	unsigned long write_val = 0UL;
228 
229 	if (handle_sync_external_abort(rec, rec_exit, esr)) {
230 		/*
231 		 * All external aborts are immediately reported to the host.
232 		 */
233 		return false;
234 	}
235 
236 	/*
237 	 * The memory access that crosses a page boundary may cause two aborts
238 	 * with `hpfar_el2` values referring to two consecutive pages.
239 	 *
240 	 * Insert the SEA and return to the Realm if the granule's RIPAS is EMPTY.
241 	 */
242 	if (ipa_is_empty(fipa, rec)) {
243 		inject_sync_idabort(ESR_EL2_ABORT_FSC_SEA);
244 		return true;
245 	}
246 
247 	if (fixup_aarch32_data_abort(rec, &esr) ||
248 	    access_in_rec_par(rec, fipa)) {
249 		esr &= ESR_NONEMULATED_ABORT_MASK;
250 		goto end;
251 	}
252 
253 	if (esr_is_write(esr)) {
254 		write_val = get_dabt_write_value(rec, esr);
255 	}
256 
257 	far = read_far_el2() & ~GRANULE_MASK;
258 	esr &= ESR_EMULATED_ABORT_MASK;
259 
260 end:
261 	rec_exit->esr = esr;
262 	rec_exit->far = far;
263 	rec_exit->hpfar = hpfar;
264 	rec_exit->gprs[0] = write_val;
265 
266 	return false;
267 }
268 
269 /*
270  * Returns 'true' if the abort is handled and the RMM should return to the Realm,
271  * and returns 'false' if the exception should be reported to the NS host.
272  */
handle_instruction_abort(struct rec * rec,struct rmi_rec_exit * rec_exit,unsigned long esr)273 static bool handle_instruction_abort(struct rec *rec, struct rmi_rec_exit *rec_exit,
274 				     unsigned long esr)
275 {
276 	unsigned long fsc = esr & ESR_EL2_ABORT_FSC_MASK;
277 	unsigned long fsc_type = fsc & ~ESR_EL2_ABORT_FSC_LEVEL_MASK;
278 	unsigned long hpfar = read_hpfar_el2();
279 	unsigned long fipa = (hpfar & HPFAR_EL2_FIPA_MASK) << HPFAR_EL2_FIPA_OFFSET;
280 
281 	if (handle_sync_external_abort(rec, rec_exit, esr)) {
282 		/*
283 		 * All external aborts are immediately reported to the host.
284 		 */
285 		return false;
286 	}
287 
288 	/*
289 	 * Insert the SEA and return to the Realm if:
290 	 * - The instruction abort is at an Unprotected IPA, or
291 	 * - The granule's RIPAS is EMPTY
292 	 */
293 	if (!access_in_rec_par(rec, fipa) || ipa_is_empty(fipa, rec)) {
294 		inject_sync_idabort(ESR_EL2_ABORT_FSC_SEA);
295 		return true;
296 	}
297 
298 	if (fsc_type != ESR_EL2_ABORT_FSC_TRANSLATION_FAULT) {
299 		unsigned long far = read_far_el2();
300 
301 		/*
302 		 * TODO: Should this ever happen, or is it an indication of an
303 		 * internal consistency failure in the RMM which should lead
304 		 * to a panic instead?
305 		 */
306 
307 		ERROR("Unhandled instruction abort:\n");
308 		ERROR("    FSC: %12s0x%02lx\n", " ", fsc);
309 		ERROR("    FAR: %16lx\n", far);
310 		ERROR("  HPFAR: %16lx\n", hpfar);
311 		return false;
312 	}
313 
314 	rec_exit->hpfar = hpfar;
315 	rec_exit->esr = esr & ESR_NONEMULATED_ABORT_MASK;
316 
317 	return false;
318 }
319 
320 /*
321  * Return 'false' if no IRQ is pending,
322  * return 'true' if there is an IRQ pending, and need to return to host.
323  */
check_pending_irq(void)324 static bool check_pending_irq(void)
325 {
326 	unsigned long pending_irq;
327 
328 	pending_irq = read_isr_el1();
329 
330 	return (pending_irq != 0UL);
331 }
332 
advance_pc(void)333 static void advance_pc(void)
334 {
335 	unsigned long pc = read_elr_el2();
336 
337 	write_elr_el2(pc + 4UL);
338 }
339 
return_result_to_realm(struct rec * rec,struct smc_result result)340 static void return_result_to_realm(struct rec *rec, struct smc_result result)
341 {
342 	rec->regs[0] = result.x[0];
343 	rec->regs[1] = result.x[1];
344 	rec->regs[2] = result.x[2];
345 	rec->regs[3] = result.x[3];
346 }
347 
348 /*
349  * Return 'true' if execution should continue in the REC, otherwise return
350  * 'false' to go back to the NS caller of REC.Enter.
351  */
handle_realm_rsi(struct rec * rec,struct rmi_rec_exit * rec_exit)352 static bool handle_realm_rsi(struct rec *rec, struct rmi_rec_exit *rec_exit)
353 {
354 	bool ret_to_rec = true;	/* Return to Realm */
355 	unsigned int function_id = rec->regs[0];
356 
357 	RSI_LOG_SET(rec->regs[1], rec->regs[2],
358 		    rec->regs[3], rec->regs[4], rec->regs[5]);
359 
360 	if (!IS_SMC32_PSCI_FID(function_id) && !IS_SMC64_PSCI_FID(function_id)
361 	    && !IS_SMC64_RSI_FID(function_id)) {
362 
363 		ERROR("Invalid RSI function_id = %x\n", function_id);
364 		rec->regs[0] = SMC_UNKNOWN;
365 		return true;
366 	}
367 
368 	switch (function_id) {
369 	case SMCCC_VERSION:
370 		rec->regs[0] = SMCCC_VERSION_NUMBER;
371 		break;
372 	case SMC_RSI_ABI_VERSION:
373 		rec->regs[0] = system_rsi_abi_version();
374 		break;
375 	case SMC32_PSCI_FID_MIN ... SMC32_PSCI_FID_MAX:
376 	case SMC64_PSCI_FID_MIN ... SMC64_PSCI_FID_MAX: {
377 		struct psci_result res;
378 
379 		res = psci_rsi(rec,
380 			       function_id,
381 			       rec->regs[1],
382 			       rec->regs[2],
383 			       rec->regs[3]);
384 
385 		if (!rec->psci_info.pending) {
386 			rec->regs[0] = res.smc_res.x[0];
387 			rec->regs[1] = res.smc_res.x[1];
388 			rec->regs[2] = res.smc_res.x[2];
389 			rec->regs[3] = res.smc_res.x[3];
390 		}
391 
392 		if (res.hvc_forward.forward_psci_call) {
393 			unsigned int i;
394 
395 			rec_exit->exit_reason = RMI_EXIT_PSCI;
396 			rec_exit->gprs[0] = function_id;
397 			rec_exit->gprs[1] = res.hvc_forward.x1;
398 			rec_exit->gprs[2] = res.hvc_forward.x2;
399 			rec_exit->gprs[3] = res.hvc_forward.x3;
400 
401 			for (i = 4U; i < REC_EXIT_NR_GPRS; i++) {
402 				rec_exit->gprs[i] = 0UL;
403 			}
404 
405 			advance_pc();
406 			ret_to_rec = false;
407 		}
408 		break;
409 	}
410 	case SMC_RSI_ATTEST_TOKEN_INIT:
411 		rec->regs[0] = handle_rsi_attest_token_init(rec);
412 		break;
413 	case SMC_RSI_ATTEST_TOKEN_CONTINUE: {
414 		struct attest_result res;
415 		attest_realm_token_sign_continue_start();
416 		while (true) {
417 			/*
418 			 * Possible outcomes:
419 			 *     if res.incomplete is true
420 			 *         if IRQ pending
421 			 *             check for pending IRQ and return to host
422 			 *         else try a new iteration
423 			 *     else
424 			 *         if RTT table walk has failed,
425 			 *             emulate data abort back to host
426 			 *         otherwise
427 			 *             return to realm because the token
428 			 *             creation is complete or input parameter
429 			 *             validation failed.
430 			 */
431 			handle_rsi_attest_token_continue(rec, &res);
432 
433 			if (res.incomplete) {
434 				if (check_pending_irq()) {
435 					rec_exit->exit_reason = RMI_EXIT_IRQ;
436 					/* Return to NS host to handle IRQ. */
437 					ret_to_rec = false;
438 					break;
439 				}
440 			} else {
441 				if (res.walk_result.abort) {
442 					emulate_stage2_data_abort(
443 						rec, rec_exit,
444 						res.walk_result.rtt_level);
445 					ret_to_rec = false; /* Exit to Host */
446 					break;
447 				}
448 
449 				/* Return to Realm */
450 				return_result_to_realm(rec, res.smc_res);
451 				break;
452 			}
453 		}
454 		attest_realm_token_sign_continue_finish();
455 		break;
456 	}
457 	case SMC_RSI_MEASUREMENT_READ:
458 		rec->regs[0] = handle_rsi_read_measurement(rec);
459 		break;
460 	case SMC_RSI_MEASUREMENT_EXTEND:
461 		rec->regs[0] = handle_rsi_extend_measurement(rec);
462 		break;
463 	case SMC_RSI_REALM_CONFIG: {
464 		struct rsi_walk_smc_result res;
465 
466 		res = handle_rsi_realm_config(rec);
467 		if (res.walk_result.abort) {
468 			emulate_stage2_data_abort(rec, rec_exit,
469 						  res.walk_result.rtt_level);
470 			ret_to_rec = false; /* Exit to Host */
471 		} else {
472 			/* Return to Realm */
473 			return_result_to_realm(rec, res.smc_res);
474 		}
475 		break;
476 	}
477 	case SMC_RSI_IPA_STATE_SET:
478 		if (handle_rsi_ipa_state_set(rec, rec_exit)) {
479 			rec->regs[0] = RSI_ERROR_INPUT;
480 		} else {
481 			advance_pc();
482 			ret_to_rec = false; /* Return to Host */
483 		}
484 		break;
485 	case SMC_RSI_IPA_STATE_GET: {
486 		struct rsi_walk_smc_result res;
487 
488 		res = handle_rsi_ipa_state_get(rec);
489 		if (res.walk_result.abort) {
490 			emulate_stage2_data_abort(rec, rec_exit,
491 						  res.walk_result.rtt_level);
492 			/* Exit to Host */
493 			ret_to_rec = false;
494 		} else {
495 			/* Exit to Realm */
496 			return_result_to_realm(rec, res.smc_res);
497 		}
498 		break;
499 	}
500 	case SMC_RSI_HOST_CALL: {
501 		struct rsi_host_call_result res;
502 
503 		res = handle_rsi_host_call(rec, rec_exit);
504 
505 		if (res.walk_result.abort) {
506 			emulate_stage2_data_abort(rec, rec_exit,
507 						  res.walk_result.rtt_level);
508 			/* Exit to Host */
509 			ret_to_rec = false;
510 		} else {
511 			rec->regs[0] = res.smc_result;
512 
513 			/*
514 			 * Return to Realm in case of error,
515 			 * parent function calls advance_pc()
516 			 */
517 			if (rec->regs[0] == RSI_SUCCESS) {
518 				advance_pc();
519 
520 				/* Exit to Host */
521 				rec->host_call = true;
522 				rec_exit->exit_reason = RMI_EXIT_HOST_CALL;
523 				ret_to_rec = false;
524 			}
525 		}
526 		break;
527 	}
528 	default:
529 		rec->regs[0] = SMC_UNKNOWN;
530 		break;
531 	}
532 
533 	/* Log RSI call */
534 	RSI_LOG_EXIT(function_id, rec->regs[0], ret_to_rec);
535 	return ret_to_rec;
536 }
537 
538 /*
539  * Return 'true' if the RMM handled the exception,
540  * 'false' to return to the Non-secure host.
541  */
handle_exception_sync(struct rec * rec,struct rmi_rec_exit * rec_exit)542 static bool handle_exception_sync(struct rec *rec, struct rmi_rec_exit *rec_exit)
543 {
544 	const unsigned long esr = read_esr_el2();
545 
546 	switch (esr & ESR_EL2_EC_MASK) {
547 	case ESR_EL2_EC_WFX:
548 		rec_exit->esr = esr & (ESR_EL2_EC_MASK | ESR_EL2_WFx_TI_BIT);
549 		advance_pc();
550 		return false;
551 	case ESR_EL2_EC_HVC:
552 		realm_inject_undef_abort();
553 		return true;
554 	case ESR_EL2_EC_SMC:
555 		if (!handle_realm_rsi(rec, rec_exit)) {
556 			return false;
557 		}
558 		/*
559 		 * Advance PC.
560 		 * HCR_EL2.TSC traps execution of the SMC instruction.
561 		 * It is not a routing control for the SMC exception.
562 		 * Trap exceptions and SMC exceptions have different
563 		 * preferred return addresses.
564 		 */
565 		advance_pc();
566 		return true;
567 	case ESR_EL2_EC_SYSREG: {
568 		bool ret = handle_sysreg_access_trap(rec, rec_exit, esr);
569 
570 		advance_pc();
571 		return ret;
572 	}
573 	case ESR_EL2_EC_INST_ABORT:
574 		return handle_instruction_abort(rec, rec_exit, esr);
575 	case ESR_EL2_EC_DATA_ABORT:
576 		return handle_data_abort(rec, rec_exit, esr);
577 	case ESR_EL2_EC_FPU: {
578 		unsigned long cptr;
579 
580 		/*
581 		 * Realm has requested FPU/SIMD access, so save NS state and
582 		 * load realm state.  Start by disabling traps so we can save
583 		 * the NS state and load the realm state.
584 		 */
585 		cptr = read_cptr_el2();
586 		cptr &= ~(CPTR_EL2_FPEN_MASK << CPTR_EL2_FPEN_SHIFT);
587 		cptr |= (CPTR_EL2_FPEN_NO_TRAP_11 << CPTR_EL2_FPEN_SHIFT);
588 		cptr &= ~(CPTR_EL2_ZEN_MASK << CPTR_EL2_ZEN_SHIFT);
589 		cptr |= (CPTR_EL2_ZEN_NO_TRAP_11 << CPTR_EL2_ZEN_SHIFT);
590 		write_cptr_el2(cptr);
591 
592 		/*
593 		 * Save NS state, restore realm state, and set flag indicating
594 		 * realm has used FPU so we know to save and restore NS state at
595 		 * realm exit.
596 		 */
597 		if (rec->ns->sve != NULL) {
598 			save_sve_state(rec->ns->sve);
599 		} else {
600 			assert(rec->ns->fpu != NULL);
601 			fpu_save_state(rec->ns->fpu);
602 		}
603 		fpu_restore_state(&rec->fpu_ctx.fpu);
604 		rec->fpu_ctx.used = true;
605 
606 		/*
607 		 * Disable SVE for now, until per rec save/restore is
608 		 * implemented
609 		 */
610 		cptr = read_cptr_el2();
611 		cptr &= ~(CPTR_EL2_ZEN_MASK << CPTR_EL2_ZEN_SHIFT);
612 		cptr |= (CPTR_EL2_ZEN_TRAP_ALL_00 << CPTR_EL2_ZEN_SHIFT);
613 		write_cptr_el2(cptr);
614 
615 		/*
616 		 * Return 'true' indicating that this exception
617 		 * has been handled and execution can continue.
618 		 */
619 		return true;
620 	}
621 	default:
622 		/*
623 		 * TODO: Check if there are other exit reasons we could
624 		 * encounter here and handle them appropriately
625 		 */
626 		break;
627 	}
628 
629 	VERBOSE("Unhandled sync exit ESR: %08lx (EC: %lx ISS: %lx)\n",
630 		esr,
631 		(esr & ESR_EL2_EC_MASK) >> ESR_EL2_EC_SHIFT,
632 		(esr & ESR_EL2_ISS_MASK) >> ESR_EL2_ISS_SHIFT);
633 
634 	/*
635 	 * Zero values in esr, far & hpfar of 'rec_exit' structure
636 	 * will be returned to the NS host.
637 	 * The only information that may leak is when there was
638 	 * some unhandled/unknown reason for the exception.
639 	 */
640 	return false;
641 }
642 
643 /*
644  * Return 'true' if the RMM handled the exception, 'false' to return to the
645  * Non-secure host.
646  */
handle_exception_serror_lel(struct rec * rec,struct rmi_rec_exit * rec_exit)647 static bool handle_exception_serror_lel(struct rec *rec, struct rmi_rec_exit *rec_exit)
648 {
649 	const unsigned long esr = read_esr_el2();
650 
651 	if (esr & ESR_EL2_SERROR_IDS_BIT) {
652 		/*
653 		 * Implementation defined content of the esr.
654 		 */
655 		system_abort();
656 	}
657 
658 	if ((esr & ESR_EL2_SERROR_DFSC_MASK) != ESR_EL2_SERROR_DFSC_ASYNC) {
659 		/*
660 		 * Either Uncategorized or Reserved fault status code.
661 		 */
662 		system_abort();
663 	}
664 
665 	switch (esr & ESR_EL2_SERROR_AET_MASK) {
666 	case ESR_EL2_SERROR_AET_UEU:	/* Unrecoverable RAS Error */
667 	case ESR_EL2_SERROR_AET_UER:	/* Recoverable RAS Error */
668 		/*
669 		 * The abort is fatal to the current S/W. Inject the SError into
670 		 * the Realm so it can e.g. shut down gracefully or localize the
671 		 * problem at the specific EL0 application.
672 		 *
673 		 * Note: Consider shutting down the Realm here to avoid
674 		 * the host's attack on unstable Realms.
675 		 */
676 		inject_serror(rec, esr);
677 		/*
678 		 * Fall through.
679 		 */
680 	case ESR_EL2_SERROR_AET_CE:	/* Corrected RAS Error */
681 	case ESR_EL2_SERROR_AET_UEO:	/* Restartable RAS Error */
682 		/*
683 		 * Report the exception to the host.
684 		 */
685 		rec_exit->esr = esr & ESR_SERROR_MASK;
686 		break;
687 	case ESR_EL2_SERROR_AET_UC:	/* Uncontainable RAS Error */
688 		system_abort();
689 		break;
690 	default:
691 		/*
692 		 * Unrecognized Asynchronous Error Type
693 		 */
694 		assert(false);
695 	}
696 
697 	return false;
698 }
699 
handle_exception_irq_lel(struct rec * rec,struct rmi_rec_exit * rec_exit)700 static bool handle_exception_irq_lel(struct rec *rec, struct rmi_rec_exit *rec_exit)
701 {
702 	(void)rec;
703 
704 	rec_exit->exit_reason = RMI_EXIT_IRQ;
705 
706 	/*
707 	 * With GIC all virtual interrupt programming
708 	 * must go via the NS hypervisor.
709 	 */
710 	return false;
711 }
712 
713 /* Returns 'true' when returning to Realm (S) and false when to NS */
handle_realm_exit(struct rec * rec,struct rmi_rec_exit * rec_exit,int exception)714 bool handle_realm_exit(struct rec *rec, struct rmi_rec_exit *rec_exit, int exception)
715 {
716 	switch (exception) {
717 	case ARM_EXCEPTION_SYNC_LEL: {
718 		bool ret;
719 
720 		/*
721 		 * TODO: Sanitize ESR to ensure it doesn't leak sensitive
722 		 * information.
723 		 */
724 		rec_exit->exit_reason = RMI_EXIT_SYNC;
725 		ret = handle_exception_sync(rec, rec_exit);
726 		if (!ret) {
727 			rec->last_run_info.esr = read_esr_el2();
728 			rec->last_run_info.far = read_far_el2();
729 			rec->last_run_info.hpfar = read_hpfar_el2();
730 		}
731 		return ret;
732 
733 		/*
734 		 * TODO: Much more detailed handling of exit reasons.
735 		 */
736 	}
737 	case ARM_EXCEPTION_IRQ_LEL:
738 		return handle_exception_irq_lel(rec, rec_exit);
739 	case ARM_EXCEPTION_FIQ_LEL:
740 		rec_exit->exit_reason = RMI_EXIT_FIQ;
741 		break;
742 	case ARM_EXCEPTION_SERROR_LEL: {
743 		const unsigned long esr = read_esr_el2();
744 		bool ret;
745 
746 		/*
747 		 * TODO: Sanitize ESR to ensure it doesn't leak sensitive
748 		 * information.
749 		 */
750 		rec_exit->exit_reason = RMI_EXIT_SERROR;
751 		ret = handle_exception_serror_lel(rec, rec_exit);
752 		if (!ret) {
753 			rec->last_run_info.esr = esr;
754 			rec->last_run_info.far = read_far_el2();
755 			rec->last_run_info.hpfar = read_hpfar_el2();
756 		}
757 		return ret;
758 	}
759 	default:
760 		INFO("Unrecognized exit reason: %d\n", exception);
761 		break;
762 	};
763 
764 	return false;
765 }
766