1 /*
2 * Copyright 2018 The Hafnium Authors.
3 *
4 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
7 */
8
9 #include <stdnoreturn.h>
10
11 #include "hf/arch/barriers.h"
12 #include "hf/arch/init.h"
13 #include "hf/arch/mmu.h"
14 #include "hf/arch/plat/ffa.h"
15 #include "hf/arch/plat/smc.h"
16
17 #include "hf/api.h"
18 #include "hf/check.h"
19 #include "hf/cpu.h"
20 #include "hf/dlog.h"
21 #include "hf/ffa.h"
22 #include "hf/ffa_internal.h"
23 #include "hf/panic.h"
24 #include "hf/plat/interrupts.h"
25 #include "hf/vm.h"
26
27 #include "vmapi/hf/call.h"
28
29 #include "debug_el1.h"
30 #include "feature_id.h"
31 #include "msr.h"
32 #include "perfmon.h"
33 #include "psci.h"
34 #include "psci_handler.h"
35 #include "smc.h"
36 #include "sysregs.h"
37
38 /**
39 * Hypervisor Fault Address Register Non-Secure.
40 */
41 #define HPFAR_EL2_NS (UINT64_C(0x1) << 63)
42
43 /**
44 * Hypervisor Fault Address Register Faulting IPA.
45 */
46 #define HPFAR_EL2_FIPA (UINT64_C(0xFFFFFFFFFF0))
47
48 /**
49 * Gets the value to increment for the next PC.
50 * The ESR encodes whether the instruction is 2 bytes or 4 bytes long.
51 */
52 #define GET_NEXT_PC_INC(esr) (GET_ESR_IL(esr) ? 4 : 2)
53
54 /**
55 * The Client ID field within X7 for an SMC64 call.
56 */
57 #define CLIENT_ID_MASK UINT64_C(0xffff)
58
59 /*
60 * Target function IDs for framework messages from the SPMD.
61 */
62 #define SPMD_FWK_MSG_BIT (UINT64_C(1) << 31)
63 #define SPMD_FWK_MSG_FUNC_MASK UINT64_C(0xFF)
64 #define SPMD_FWK_MSG_PSCI UINT8_C(0)
65 #define SPMD_FWK_MSG_FFA_VERSION_REQ UINT8_C(0x8)
66 #define SPMD_FWK_MSG_FFA_VERSION_RESP UINT8_C(0x9)
67
68 /**
69 * Returns a reference to the currently executing vCPU.
70 */
current(void)71 static struct vcpu *current(void)
72 {
73 // NOLINTNEXTLINE(performance-no-int-to-ptr)
74 return (struct vcpu *)read_msr(tpidr_el2);
75 }
76
77 /**
78 * Saves the state of per-vCPU peripherals, such as the virtual timer, and
79 * informs the arch-independent sections that registers have been saved.
80 */
complete_saving_state(struct vcpu * vcpu)81 void complete_saving_state(struct vcpu *vcpu)
82 {
83 if (has_vhe_support()) {
84 vcpu->regs.peripherals.cntv_cval_el0 =
85 read_msr(MSR_CNTV_CVAL_EL02);
86 vcpu->regs.peripherals.cntv_ctl_el0 =
87 read_msr(MSR_CNTV_CTL_EL02);
88 } else {
89 vcpu->regs.peripherals.cntv_cval_el0 = read_msr(cntv_cval_el0);
90 vcpu->regs.peripherals.cntv_ctl_el0 = read_msr(cntv_ctl_el0);
91 }
92
93 api_regs_state_saved(vcpu);
94
95 /*
96 * If switching away from the primary, copy the current EL0 virtual
97 * timer registers to the corresponding EL2 physical timer registers.
98 * This is used to emulate the virtual timer for the primary in case it
99 * should fire while the secondary is running.
100 */
101 if (vcpu->vm->id == HF_PRIMARY_VM_ID) {
102 /*
103 * Clear timer control register before copying compare value, to
104 * avoid a spurious timer interrupt. This could be a problem if
105 * the interrupt is configured as edge-triggered, as it would
106 * then be latched in.
107 */
108 write_msr(cnthp_ctl_el2, 0);
109
110 if (has_vhe_support()) {
111 write_msr(cnthp_cval_el2, read_msr(MSR_CNTV_CVAL_EL02));
112 write_msr(cnthp_ctl_el2, read_msr(MSR_CNTV_CTL_EL02));
113 } else {
114 write_msr(cnthp_cval_el2, read_msr(cntv_cval_el0));
115 write_msr(cnthp_ctl_el2, read_msr(cntv_ctl_el0));
116 }
117 }
118 }
119
120 /**
121 * Restores the state of per-vCPU peripherals, such as the virtual timer.
122 */
begin_restoring_state(struct vcpu * vcpu)123 void begin_restoring_state(struct vcpu *vcpu)
124 {
125 /*
126 * Clear timer control register before restoring compare value, to avoid
127 * a spurious timer interrupt. This could be a problem if the interrupt
128 * is configured as edge-triggered, as it would then be latched in.
129 */
130 if (has_vhe_support()) {
131 write_msr(MSR_CNTV_CTL_EL02, 0);
132 write_msr(MSR_CNTV_CVAL_EL02,
133 vcpu->regs.peripherals.cntv_cval_el0);
134 write_msr(MSR_CNTV_CTL_EL02,
135 vcpu->regs.peripherals.cntv_ctl_el0);
136 } else {
137 write_msr(cntv_ctl_el0, 0);
138 write_msr(cntv_cval_el0, vcpu->regs.peripherals.cntv_cval_el0);
139 write_msr(cntv_ctl_el0, vcpu->regs.peripherals.cntv_ctl_el0);
140 }
141
142 /*
143 * If we are switching (back) to the primary, disable the EL2 physical
144 * timer which was being used to emulate the EL0 virtual timer, as the
145 * virtual timer is now running for the primary again.
146 */
147 if (vcpu->vm->id == HF_PRIMARY_VM_ID) {
148 write_msr(cnthp_ctl_el2, 0);
149 write_msr(cnthp_cval_el2, 0);
150 }
151 }
152
153 /**
154 * Invalidate all stage 1 TLB entries on the current (physical) CPU for the
155 * current VMID.
156 */
invalidate_vm_tlb(void)157 static void invalidate_vm_tlb(void)
158 {
159 /*
160 * Ensure that the last VTTBR write has taken effect so we invalidate
161 * the right set of TLB entries.
162 */
163 isb();
164
165 __asm__ volatile("tlbi vmalle1");
166
167 /*
168 * Ensure that no instructions are fetched for the VM until after the
169 * TLB invalidation has taken effect.
170 */
171 isb();
172
173 /*
174 * Ensure that no data reads or writes for the VM happen until after the
175 * TLB invalidation has taken effect. Non-shareable is enough because
176 * the TLB is local to the CPU.
177 */
178 dsb(nsh);
179 }
180
181 /**
182 * Invalidates the TLB if a different vCPU is being run than the last vCPU of
183 * the same VM which was run on the current pCPU.
184 *
185 * This is necessary because VMs may (contrary to the architecture
186 * specification) use inconsistent ASIDs across vCPUs. c.f. KVM's similar
187 * workaround:
188 * https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=94d0e5980d6791b9
189 */
maybe_invalidate_tlb(struct vcpu * vcpu)190 void maybe_invalidate_tlb(struct vcpu *vcpu)
191 {
192 size_t current_cpu_index = cpu_index(vcpu->cpu);
193 ffa_vcpu_index_t new_vcpu_index = vcpu_index(vcpu);
194
195 if (vcpu->vm->arch.last_vcpu_on_cpu[current_cpu_index] !=
196 new_vcpu_index) {
197 /*
198 * The vCPU has changed since the last time this VM was run on
199 * this pCPU, so we need to invalidate the TLB.
200 */
201 invalidate_vm_tlb();
202
203 /* Record the fact that this vCPU is now running on this CPU. */
204 vcpu->vm->arch.last_vcpu_on_cpu[current_cpu_index] =
205 new_vcpu_index;
206 }
207 }
208
irq_current_exception_noreturn(uintreg_t elr,uintreg_t spsr)209 noreturn void irq_current_exception_noreturn(uintreg_t elr, uintreg_t spsr)
210 {
211 (void)elr;
212 (void)spsr;
213
214 panic("IRQ from current exception level.");
215 }
216
fiq_current_exception_noreturn(uintreg_t elr,uintreg_t spsr)217 noreturn void fiq_current_exception_noreturn(uintreg_t elr, uintreg_t spsr)
218 {
219 (void)elr;
220 (void)spsr;
221
222 panic("FIQ from current exception level.");
223 }
224
serr_current_exception_noreturn(uintreg_t elr,uintreg_t spsr)225 noreturn void serr_current_exception_noreturn(uintreg_t elr, uintreg_t spsr)
226 {
227 (void)elr;
228 (void)spsr;
229
230 panic("SError from current exception level.");
231 }
232
sync_current_exception_noreturn(uintreg_t elr,uintreg_t spsr)233 noreturn void sync_current_exception_noreturn(uintreg_t elr, uintreg_t spsr)
234 {
235 uintreg_t esr = read_msr(esr_el2);
236 uintreg_t ec = GET_ESR_EC(esr);
237
238 (void)spsr;
239
240 switch (ec) {
241 case EC_DATA_ABORT_SAME_EL:
242 if (!(esr & (1U << 10))) { /* Check FnV bit. */
243 dlog_error(
244 "Data abort: pc=%#x, esr=%#x, ec=%#x, "
245 "far=%#x\n",
246 elr, esr, ec, read_msr(far_el2));
247 } else {
248 dlog_error(
249 "Data abort: pc=%#x, esr=%#x, ec=%#x, "
250 "far=invalid\n",
251 elr, esr, ec);
252 }
253
254 break;
255
256 default:
257 dlog_error(
258 "Unknown current sync exception pc=%#x, esr=%#x, "
259 "ec=%#x\n",
260 elr, esr, ec);
261 break;
262 }
263
264 panic("EL2 exception");
265 }
266
267 /**
268 * Sets or clears the VI bit in the HCR_EL2 register saved in the given
269 * arch_regs.
270 */
set_virtual_irq(struct arch_regs * r,bool enable)271 static void set_virtual_irq(struct arch_regs *r, bool enable)
272 {
273 if (enable) {
274 r->hyp_state.hcr_el2 |= HCR_EL2_VI;
275 } else {
276 r->hyp_state.hcr_el2 &= ~HCR_EL2_VI;
277 }
278 }
279
280 /**
281 * Sets or clears the VI bit in the HCR_EL2 register.
282 */
set_virtual_irq_current(bool enable)283 static void set_virtual_irq_current(bool enable)
284 {
285 struct vcpu *vcpu = current();
286 uintreg_t hcr_el2 = vcpu->regs.hyp_state.hcr_el2;
287
288 if (enable) {
289 hcr_el2 |= HCR_EL2_VI;
290 } else {
291 hcr_el2 &= ~HCR_EL2_VI;
292 }
293 vcpu->regs.hyp_state.hcr_el2 = hcr_el2;
294 }
295
296 /**
297 * Sets or clears the VF bit in the HCR_EL2 register saved in the given
298 * arch_regs.
299 */
set_virtual_fiq(struct arch_regs * r,bool enable)300 static void set_virtual_fiq(struct arch_regs *r, bool enable)
301 {
302 if (enable) {
303 r->hyp_state.hcr_el2 |= HCR_EL2_VF;
304 } else {
305 r->hyp_state.hcr_el2 &= ~HCR_EL2_VF;
306 }
307 }
308
309 /**
310 * Sets or clears the VF bit in the HCR_EL2 register.
311 */
set_virtual_fiq_current(bool enable)312 static void set_virtual_fiq_current(bool enable)
313 {
314 struct vcpu *vcpu = current();
315 uintreg_t hcr_el2 = vcpu->regs.hyp_state.hcr_el2;
316
317 if (enable) {
318 hcr_el2 |= HCR_EL2_VF;
319 } else {
320 hcr_el2 &= ~HCR_EL2_VF;
321 }
322 vcpu->regs.hyp_state.hcr_el2 = hcr_el2;
323 }
324
325 #if SECURE_WORLD == 1
326
327 /**
328 * Handle special direct messages from SPMD to SPMC. For now related to power
329 * management only.
330 */
spmd_handler(struct ffa_value * args,struct vcpu * current)331 static bool spmd_handler(struct ffa_value *args, struct vcpu *current)
332 {
333 ffa_vm_id_t sender = ffa_sender(*args);
334 ffa_vm_id_t receiver = ffa_receiver(*args);
335 ffa_vm_id_t current_vm_id = current->vm->id;
336 uint32_t fwk_msg = ffa_fwk_msg(*args);
337 uint8_t fwk_msg_func_id = fwk_msg & SPMD_FWK_MSG_FUNC_MASK;
338
339 /*
340 * Check if direct message request is originating from the SPMD,
341 * directed to the SPMC and the message is a framework message.
342 */
343 if (!(sender == HF_SPMD_VM_ID && receiver == HF_SPMC_VM_ID &&
344 current_vm_id == HF_OTHER_WORLD_ID) ||
345 (fwk_msg & SPMD_FWK_MSG_BIT) == 0) {
346 return false;
347 }
348
349 switch (fwk_msg_func_id) {
350 case SPMD_FWK_MSG_PSCI: {
351 switch (args->arg3) {
352 case PSCI_CPU_OFF: {
353 struct vm *vm = vm_get_first_boot();
354 struct vcpu *vcpu =
355 vm_get_vcpu(vm, vcpu_index(current));
356
357 /*
358 * TODO: the PM event reached the SPMC. In a later
359 * iteration, the PM event can be passed to the SP by
360 * resuming it.
361 */
362 *args = (struct ffa_value){
363 .func = FFA_MSG_SEND_DIRECT_RESP_32,
364 .arg1 = ((uint64_t)HF_SPMC_VM_ID << 16) |
365 HF_SPMD_VM_ID,
366 .arg2 = 0U};
367
368 dlog_verbose("%s cpu off notification cpuid %#x\n",
369 __func__, vcpu->cpu->id);
370 cpu_off(vcpu->cpu);
371 break;
372 }
373 default:
374 dlog_verbose("%s PSCI message not handled %#x\n",
375 __func__, args->arg3);
376 return false;
377 }
378 }
379 case SPMD_FWK_MSG_FFA_VERSION_REQ: {
380 struct ffa_value ret = api_ffa_version(current, args->arg3);
381 *args = (struct ffa_value){
382 .func = FFA_MSG_SEND_DIRECT_RESP_32,
383 .arg1 = ((uint64_t)HF_SPMC_VM_ID << 16) | HF_SPMD_VM_ID,
384 /* Set bit 31 since this is a framework message. */
385 .arg2 = SPMD_FWK_MSG_BIT |
386 SPMD_FWK_MSG_FFA_VERSION_RESP,
387 .arg3 = ret.func};
388 break;
389 }
390 default:
391 dlog_verbose("%s message not handled %#x\n", __func__, fwk_msg);
392 *args = (struct ffa_value){
393 .func = FFA_MSG_SEND_DIRECT_RESP_32,
394 .arg1 = ((uint64_t)HF_SPMC_VM_ID << 16) | HF_SPMD_VM_ID,
395 /* Set bit 31 since this is a framework message. */
396 .arg2 = SPMD_FWK_MSG_BIT | fwk_msg_func_id};
397 }
398
399 return true;
400 }
401
402 #endif
403
404 /**
405 * Checks whether to block an SMC being forwarded from a VM.
406 */
smc_is_blocked(const struct vm * vm,uint32_t func)407 static bool smc_is_blocked(const struct vm *vm, uint32_t func)
408 {
409 bool block_by_default = !vm->smc_whitelist.permissive;
410
411 for (size_t i = 0; i < vm->smc_whitelist.smc_count; ++i) {
412 if (func == vm->smc_whitelist.smcs[i]) {
413 return false;
414 }
415 }
416
417 dlog_notice("SMC %#010x attempted from VM %#x, blocked=%u\n", func,
418 vm->id, block_by_default);
419
420 /* Access is still allowed in permissive mode. */
421 return block_by_default;
422 }
423
424 /**
425 * Applies SMC access control according to manifest and forwards the call if
426 * access is granted.
427 */
smc_forwarder(const struct vm * vm,struct ffa_value * args)428 static void smc_forwarder(const struct vm *vm, struct ffa_value *args)
429 {
430 struct ffa_value ret;
431 uint32_t client_id = vm->id;
432 uintreg_t arg7 = args->arg7;
433
434 if (smc_is_blocked(vm, args->func)) {
435 args->func = SMCCC_ERROR_UNKNOWN;
436 return;
437 }
438
439 /*
440 * Set the Client ID but keep the existing Secure OS ID and anything
441 * else (currently unspecified) that the client may have passed in the
442 * upper bits.
443 */
444 args->arg7 = client_id | (arg7 & ~CLIENT_ID_MASK);
445 ret = smc_forward(args->func, args->arg1, args->arg2, args->arg3,
446 args->arg4, args->arg5, args->arg6, args->arg7);
447
448 /*
449 * Preserve the value passed by the caller, rather than the generated
450 * client_id. Note that this would also overwrite any return value that
451 * may be in x7, but the SMCs that we are forwarding are legacy calls
452 * from before SMCCC 1.2 so won't have more than 4 return values anyway.
453 */
454 ret.arg7 = arg7;
455
456 plat_smc_post_forward(*args, &ret);
457
458 *args = ret;
459 }
460
461 /**
462 * In the normal world, ffa_handler is always called from the virtual FF-A
463 * instance (from a VM in EL1). In the secure world, ffa_handler may be called
464 * from the virtual (a secure partition in S-EL1) or physical FF-A instance
465 * (from the normal world via EL3). The function returns true when the call is
466 * handled. The *next pointer is updated to the next vCPU to run, which might be
467 * the 'other world' vCPU if the call originated from the virtual FF-A instance
468 * and has to be forwarded down to EL3, or left as is to resume the current
469 * vCPU.
470 */
ffa_handler(struct ffa_value * args,struct vcpu * current,struct vcpu ** next)471 static bool ffa_handler(struct ffa_value *args, struct vcpu *current,
472 struct vcpu **next)
473 {
474 uint32_t func = args->func;
475
476 /*
477 * NOTE: When adding new methods to this handler update
478 * api_ffa_features accordingly.
479 */
480 switch (func) {
481 case FFA_VERSION_32:
482 *args = api_ffa_version(current, args->arg1);
483 return true;
484 case FFA_PARTITION_INFO_GET_32: {
485 struct ffa_uuid uuid;
486
487 ffa_uuid_init(args->arg1, args->arg2, args->arg3, args->arg4,
488 &uuid);
489 *args = api_ffa_partition_info_get(current, &uuid, args->arg5);
490 return true;
491 }
492 case FFA_ID_GET_32:
493 *args = api_ffa_id_get(current);
494 return true;
495 case FFA_SPM_ID_GET_32:
496 *args = api_ffa_spm_id_get();
497 return true;
498 case FFA_FEATURES_32:
499 *args = api_ffa_features(args->arg1);
500 return true;
501 case FFA_RX_RELEASE_32:
502 *args = api_ffa_rx_release(ffa_receiver(*args), current, next);
503 return true;
504 case FFA_RXTX_MAP_64:
505 *args = api_ffa_rxtx_map(ipa_init(args->arg1),
506 ipa_init(args->arg2), args->arg3,
507 current);
508 return true;
509 case FFA_RXTX_UNMAP_32:
510 *args = api_ffa_rxtx_unmap(args->arg1, current);
511 return true;
512 case FFA_RX_ACQUIRE_32:
513 *args = api_ffa_rx_acquire(ffa_receiver(*args), current);
514 return true;
515 case FFA_YIELD_32:
516 *args = api_yield(current, next);
517 return true;
518 case FFA_MSG_SEND_32:
519 *args = api_ffa_msg_send(ffa_sender(*args), ffa_receiver(*args),
520 ffa_msg_send_size(*args), current,
521 next);
522 return true;
523 case FFA_MSG_SEND2_32:
524 *args = api_ffa_msg_send2(ffa_sender(*args),
525 ffa_msg_send2_flags(*args), current);
526 return true;
527 case FFA_MSG_WAIT_32:
528 *args = api_ffa_msg_wait(current, next, args);
529 return true;
530 case FFA_MSG_POLL_32:
531 *args = api_ffa_msg_recv(false, current, next);
532 return true;
533 case FFA_RUN_32:
534 *args = api_ffa_run(ffa_vm_id(*args), ffa_vcpu_index(*args),
535 current, next);
536 return true;
537 case FFA_MEM_DONATE_32:
538 case FFA_MEM_LEND_32:
539 case FFA_MEM_SHARE_32:
540 *args = api_ffa_mem_send(func, args->arg1, args->arg2,
541 ipa_init(args->arg3), args->arg4,
542 current);
543 return true;
544 case FFA_MEM_RETRIEVE_REQ_32:
545 *args = api_ffa_mem_retrieve_req(args->arg1, args->arg2,
546 ipa_init(args->arg3),
547 args->arg4, current);
548 return true;
549 case FFA_MEM_RELINQUISH_32:
550 *args = api_ffa_mem_relinquish(current);
551 return true;
552 case FFA_MEM_RECLAIM_32:
553 *args = api_ffa_mem_reclaim(
554 ffa_assemble_handle(args->arg1, args->arg2), args->arg3,
555 current);
556 return true;
557 case FFA_MEM_FRAG_RX_32:
558 *args = api_ffa_mem_frag_rx(ffa_frag_handle(*args), args->arg3,
559 (args->arg4 >> 16) & 0xffff,
560 current);
561 return true;
562 case FFA_MEM_FRAG_TX_32:
563 *args = api_ffa_mem_frag_tx(ffa_frag_handle(*args), args->arg3,
564 (args->arg4 >> 16) & 0xffff,
565 current);
566 return true;
567 case FFA_MSG_SEND_DIRECT_REQ_64:
568 case FFA_MSG_SEND_DIRECT_REQ_32: {
569 #if SECURE_WORLD == 1
570 if (spmd_handler(args, current)) {
571 return true;
572 }
573 #endif
574 *args = api_ffa_msg_send_direct_req(ffa_sender(*args),
575 ffa_receiver(*args), *args,
576 current, next);
577 return true;
578 }
579 case FFA_MSG_SEND_DIRECT_RESP_64:
580 case FFA_MSG_SEND_DIRECT_RESP_32:
581 *args = api_ffa_msg_send_direct_resp(ffa_sender(*args),
582 ffa_receiver(*args), *args,
583 current, next);
584 return true;
585 case FFA_SECONDARY_EP_REGISTER_64:
586 /*
587 * DEN0077A FF-A v1.1 Beta0 section 18.3.2.1.1
588 * The callee must return NOT_SUPPORTED if this function is
589 * invoked by a caller that implements version v1.0 of
590 * the Framework.
591 */
592 *args = api_ffa_secondary_ep_register(ipa_init(args->arg1),
593 current);
594 return true;
595 case FFA_NOTIFICATION_BITMAP_CREATE_32:
596 *args = api_ffa_notification_bitmap_create(
597 (ffa_vm_id_t)args->arg1, (ffa_vcpu_count_t)args->arg2,
598 current);
599 return true;
600 case FFA_NOTIFICATION_BITMAP_DESTROY_32:
601 *args = api_ffa_notification_bitmap_destroy(
602 (ffa_vm_id_t)args->arg1, current);
603 return true;
604 case FFA_NOTIFICATION_BIND_32:
605 *args = api_ffa_notification_update_bindings(
606 ffa_sender(*args), ffa_receiver(*args), args->arg2,
607 ffa_notifications_bitmap(args->arg3, args->arg4), true,
608 current);
609 return true;
610 case FFA_NOTIFICATION_UNBIND_32:
611 *args = api_ffa_notification_update_bindings(
612 ffa_sender(*args), ffa_receiver(*args), 0,
613 ffa_notifications_bitmap(args->arg3, args->arg4), false,
614 current);
615 return true;
616 case FFA_MEM_PERM_SET_32:
617 case FFA_MEM_PERM_SET_64:
618 *args = api_ffa_mem_perm_set(va_init(args->arg1), args->arg2,
619 args->arg3, current);
620 return true;
621 case FFA_MEM_PERM_GET_32:
622 case FFA_MEM_PERM_GET_64:
623 *args = api_ffa_mem_perm_get(va_init(args->arg1), current);
624 return true;
625 case FFA_NOTIFICATION_SET_32:
626 *args = api_ffa_notification_set(
627 ffa_sender(*args), ffa_receiver(*args), args->arg2,
628 ffa_notifications_bitmap(args->arg3, args->arg4),
629 current);
630 return true;
631 case FFA_NOTIFICATION_GET_32:
632 *args = api_ffa_notification_get(
633 ffa_receiver(*args), ffa_notifications_get_vcpu(*args),
634 args->arg2, current);
635 return true;
636 case FFA_NOTIFICATION_INFO_GET_64:
637 *args = api_ffa_notification_info_get(current);
638 return true;
639 case FFA_INTERRUPT_32:
640 *args = plat_ffa_handle_secure_interrupt(current, next, true);
641 return true;
642 case FFA_CONSOLE_LOG_32:
643 case FFA_CONSOLE_LOG_64:
644 *args = api_ffa_console_log(*args, current);
645 return true;
646 }
647
648 return false;
649 }
650
651 /**
652 * Set or clear VI/VF bits according to pending interrupts.
653 */
vcpu_update_virtual_interrupts(struct vcpu * next)654 static void vcpu_update_virtual_interrupts(struct vcpu *next)
655 {
656 struct vcpu_locked vcpu_locked;
657
658 if (next == NULL) {
659 if (current()->vm->el0_partition) {
660 return;
661 }
662
663 /*
664 * Not switching vCPUs, set the bit for the current vCPU
665 * directly in the register.
666 */
667 vcpu_locked = vcpu_lock(current());
668 set_virtual_irq_current(
669 vcpu_interrupt_irq_count_get(vcpu_locked) > 0);
670 set_virtual_fiq_current(
671 vcpu_interrupt_fiq_count_get(vcpu_locked) > 0);
672 vcpu_unlock(&vcpu_locked);
673 } else if (vm_id_is_current_world(next->vm->id)) {
674 if (next->vm->el0_partition) {
675 return;
676 }
677 /*
678 * About to switch vCPUs, set the bit for the vCPU to which we
679 * are switching in the saved copy of the register.
680 */
681
682 vcpu_locked = vcpu_lock(next);
683 set_virtual_irq(&next->regs,
684 vcpu_interrupt_irq_count_get(vcpu_locked) > 0);
685 set_virtual_fiq(&next->regs,
686 vcpu_interrupt_fiq_count_get(vcpu_locked) > 0);
687 vcpu_unlock(&vcpu_locked);
688 }
689 }
690
691 /**
692 * Handles PSCI and FF-A calls and writes the return value back to the registers
693 * of the vCPU. This is shared between smc_handler and hvc_handler.
694 *
695 * Returns true if the call was handled.
696 */
hvc_smc_handler(struct ffa_value args,struct vcpu * vcpu,struct vcpu ** next)697 static bool hvc_smc_handler(struct ffa_value args, struct vcpu *vcpu,
698 struct vcpu **next)
699 {
700 /* Do not expect PSCI calls emitted from within the secure world. */
701 #if SECURE_WORLD == 0
702 if (psci_handler(vcpu, args.func, args.arg1, args.arg2, args.arg3,
703 &vcpu->regs.r[0], next)) {
704 return true;
705 }
706 #endif
707
708 if (ffa_handler(&args, vcpu, next)) {
709 #if SECURE_WORLD == 1
710 /*
711 * If giving back execution to the NWd, check if the Schedule
712 * Receiver Interrupt has been delayed, and trigger it on
713 * current core if so.
714 */
715 if ((*next != NULL && (*next)->vm->id == HF_OTHER_WORLD_ID) ||
716 (*next == NULL && vcpu->vm->id == HF_OTHER_WORLD_ID)) {
717 plat_ffa_sri_trigger_if_delayed(vcpu->cpu);
718 }
719 #endif
720 arch_regs_set_retval(&vcpu->regs, args);
721 vcpu_update_virtual_interrupts(*next);
722 return true;
723 }
724
725 return false;
726 }
727
728 /**
729 * Processes SMC instruction calls.
730 */
smc_handler(struct vcpu * vcpu)731 static struct vcpu *smc_handler(struct vcpu *vcpu)
732 {
733 struct ffa_value args = arch_regs_get_args(&vcpu->regs);
734 struct vcpu *next = NULL;
735
736 if (hvc_smc_handler(args, vcpu, &next)) {
737 return next;
738 }
739
740 switch (args.func & ~SMCCC_CONVENTION_MASK) {
741 case HF_DEBUG_LOG:
742 vcpu->regs.r[0] = api_debug_log(args.arg1, vcpu);
743 return NULL;
744 }
745
746 smc_forwarder(vcpu->vm, &args);
747 arch_regs_set_retval(&vcpu->regs, args);
748 return NULL;
749 }
750
751 #if SECURE_WORLD == 1
752
753 /**
754 * Called from other_world_loop return from SMC.
755 * Processes SMC calls originating from the NWd.
756 */
smc_handler_from_nwd(struct vcpu * vcpu)757 struct vcpu *smc_handler_from_nwd(struct vcpu *vcpu)
758 {
759 struct ffa_value args = arch_regs_get_args(&vcpu->regs);
760 struct vcpu *next = NULL;
761
762 if (hvc_smc_handler(args, vcpu, &next)) {
763 return next;
764 }
765
766 /*
767 * If the SMC emitted by the normal world is not handled in the secure
768 * world then return an error stating such ABI is not supported. Only
769 * FF-A calls are supported. We cannot return SMCCC_ERROR_UNKNOWN
770 * directly because the SPMD smc handler would not recognize it as a
771 * standard FF-A call returning from the SPMC.
772 */
773 arch_regs_set_retval(&vcpu->regs, ffa_error(FFA_NOT_SUPPORTED));
774
775 return NULL;
776 }
777
778 #endif
779
780 /*
781 * Exception vector offsets.
782 * See Arm Architecture Reference Manual Armv8-A, D1.10.2.
783 */
784
785 /**
786 * Offset for synchronous exceptions at current EL with SPx.
787 */
788 #define OFFSET_CURRENT_SPX UINT64_C(0x200)
789
790 /**
791 * Offset for synchronous exceptions at lower EL using AArch64.
792 */
793 #define OFFSET_LOWER_EL_64 UINT64_C(0x400)
794
795 /**
796 * Offset for synchronous exceptions at lower EL using AArch32.
797 */
798 #define OFFSET_LOWER_EL_32 UINT64_C(0x600)
799
800 /**
801 * Returns the address for the exception handler at EL1.
802 */
get_el1_exception_handler_addr(const struct vcpu * vcpu)803 static uintreg_t get_el1_exception_handler_addr(const struct vcpu *vcpu)
804 {
805 uintreg_t base_addr = has_vhe_support() ? read_msr(MSR_VBAR_EL12)
806 : read_msr(vbar_el1);
807 uintreg_t pe_mode = vcpu->regs.spsr & PSR_PE_MODE_MASK;
808 bool is_arch32 = vcpu->regs.spsr & PSR_ARCH_MODE_32;
809
810 if (pe_mode == PSR_PE_MODE_EL0T) {
811 if (is_arch32) {
812 base_addr += OFFSET_LOWER_EL_32;
813 } else {
814 base_addr += OFFSET_LOWER_EL_64;
815 }
816 } else {
817 CHECK(!is_arch32);
818 base_addr += OFFSET_CURRENT_SPX;
819 }
820
821 return base_addr;
822 }
823
824 /**
825 * Injects an exception with the specified Exception Syndrom Register value into
826 * the EL1.
827 *
828 * NOTE: This function assumes that the lazy registers haven't been saved, and
829 * writes to the lazy registers of the CPU directly instead of the vCPU.
830 */
inject_el1_exception(struct vcpu * vcpu,uintreg_t esr_el1_value,uintreg_t far_el1_value)831 static void inject_el1_exception(struct vcpu *vcpu, uintreg_t esr_el1_value,
832 uintreg_t far_el1_value)
833 {
834 uintreg_t handler_address = get_el1_exception_handler_addr(vcpu);
835
836 /* Update the CPU state to inject the exception. */
837 if (has_vhe_support()) {
838 write_msr(MSR_ESR_EL12, esr_el1_value);
839 write_msr(MSR_FAR_EL12, far_el1_value);
840 write_msr(MSR_ELR_EL12, vcpu->regs.pc);
841 write_msr(MSR_SPSR_EL12, vcpu->regs.spsr);
842 } else {
843 write_msr(esr_el1, esr_el1_value);
844 write_msr(far_el1, far_el1_value);
845 write_msr(elr_el1, vcpu->regs.pc);
846 write_msr(spsr_el1, vcpu->regs.spsr);
847 }
848
849 /*
850 * Mask (disable) interrupts and run in EL1h mode.
851 * EL1h mode is used because by default, taking an exception selects the
852 * stack pointer for the target Exception level. The software can change
853 * that later in the handler if needed.
854 */
855 vcpu->regs.spsr = PSR_D | PSR_A | PSR_I | PSR_F | PSR_PE_MODE_EL1H;
856
857 /* Transfer control to the exception hander. */
858 vcpu->regs.pc = handler_address;
859 }
860
861 /**
862 * Injects a Data Abort exception (same exception level).
863 */
inject_el1_data_abort_exception(struct vcpu * vcpu,uintreg_t esr_el2,uintreg_t far_el2)864 static void inject_el1_data_abort_exception(struct vcpu *vcpu,
865 uintreg_t esr_el2,
866 uintreg_t far_el2)
867 {
868 /*
869 * ISS encoding remains the same, but the EC is changed to reflect
870 * where the exception came from.
871 * See Arm Architecture Reference Manual Armv8-A, pages D13-2943/2982.
872 */
873 uintreg_t esr_el1_value = GET_ESR_ISS(esr_el2) | GET_ESR_IL(esr_el2) |
874 (EC_DATA_ABORT_SAME_EL << ESR_EC_OFFSET);
875
876 dlog_notice("Injecting Data Abort exception into VM %#x.\n",
877 vcpu->vm->id);
878
879 inject_el1_exception(vcpu, esr_el1_value, far_el2);
880 }
881
882 /**
883 * Injects a Data Abort exception (same exception level).
884 */
inject_el1_instruction_abort_exception(struct vcpu * vcpu,uintreg_t esr_el2,uintreg_t far_el2)885 static void inject_el1_instruction_abort_exception(struct vcpu *vcpu,
886 uintreg_t esr_el2,
887 uintreg_t far_el2)
888 {
889 /*
890 * ISS encoding remains the same, but the EC is changed to reflect
891 * where the exception came from.
892 * See Arm Architecture Reference Manual Armv8-A, pages D13-2941/2980.
893 */
894 uintreg_t esr_el1_value =
895 GET_ESR_ISS(esr_el2) | GET_ESR_IL(esr_el2) |
896 (EC_INSTRUCTION_ABORT_SAME_EL << ESR_EC_OFFSET);
897
898 dlog_notice("Injecting Instruction Abort exception into VM %#x.\n",
899 vcpu->vm->id);
900
901 inject_el1_exception(vcpu, esr_el1_value, far_el2);
902 }
903
904 /**
905 * Injects an exception with an unknown reason into the EL1.
906 */
inject_el1_unknown_exception(struct vcpu * vcpu,uintreg_t esr_el2)907 static void inject_el1_unknown_exception(struct vcpu *vcpu, uintreg_t esr_el2)
908 {
909 uintreg_t esr_el1_value =
910 GET_ESR_IL(esr_el2) | (EC_UNKNOWN << ESR_EC_OFFSET);
911
912 dlog_notice("Injecting Unknown Reason exception into VM %#x.\n",
913 vcpu->vm->id);
914
915 /*
916 * The value of the far_el2 register is UNKNOWN in this case,
917 * therefore, don't propagate it to avoid leaking sensitive information.
918 */
919 inject_el1_exception(vcpu, esr_el1_value, 0);
920 }
921
922 /**
923 * Injects an exception because of a system register trap.
924 */
inject_el1_sysreg_trap_exception(struct vcpu * vcpu,uintreg_t esr_el2)925 static void inject_el1_sysreg_trap_exception(struct vcpu *vcpu,
926 uintreg_t esr_el2)
927 {
928 char *direction_str = ISS_IS_READ(esr_el2) ? "read" : "write";
929
930 dlog_notice(
931 "Trapped access to system register %s: op0=%d, op1=%d, crn=%d, "
932 "crm=%d, op2=%d, rt=%d.\n",
933 direction_str, GET_ISS_OP0(esr_el2), GET_ISS_OP1(esr_el2),
934 GET_ISS_CRN(esr_el2), GET_ISS_CRM(esr_el2),
935 GET_ISS_OP2(esr_el2), GET_ISS_RT(esr_el2));
936
937 inject_el1_unknown_exception(vcpu, esr_el2);
938 }
939
hvc_handler(struct vcpu * vcpu)940 static struct vcpu *hvc_handler(struct vcpu *vcpu)
941 {
942 struct ffa_value args = arch_regs_get_args(&vcpu->regs);
943 struct vcpu *next = NULL;
944
945 if (hvc_smc_handler(args, vcpu, &next)) {
946 return next;
947 }
948
949 switch (args.func) {
950 case HF_MAILBOX_WRITABLE_GET:
951 vcpu->regs.r[0] = api_mailbox_writable_get(vcpu);
952 break;
953
954 case HF_MAILBOX_WAITER_GET:
955 vcpu->regs.r[0] = api_mailbox_waiter_get(args.arg1, vcpu);
956 break;
957
958 case HF_INTERRUPT_ENABLE:
959 vcpu->regs.r[0] = api_interrupt_enable(args.arg1, args.arg2,
960 args.arg3, vcpu);
961 break;
962
963 case HF_INTERRUPT_GET:
964 vcpu->regs.r[0] = api_interrupt_get(vcpu);
965 break;
966
967 case HF_INTERRUPT_INJECT:
968 vcpu->regs.r[0] = api_interrupt_inject(args.arg1, args.arg2,
969 args.arg3, vcpu, &next);
970 break;
971
972 case HF_DEBUG_LOG:
973 vcpu->regs.r[0] = api_debug_log(args.arg1, vcpu);
974 break;
975
976 #if SECURE_WORLD == 1
977 case HF_INTERRUPT_DEACTIVATE:
978 vcpu->regs.r[0] = plat_ffa_interrupt_deactivate(
979 args.arg1, args.arg2, vcpu);
980 break;
981 #endif
982
983 default:
984 vcpu->regs.r[0] = SMCCC_ERROR_UNKNOWN;
985 }
986
987 vcpu_update_virtual_interrupts(next);
988
989 return next;
990 }
991
irq_lower(void)992 struct vcpu *irq_lower(void)
993 {
994 #if SECURE_WORLD == 1
995 struct vcpu *next = NULL;
996
997 plat_ffa_handle_secure_interrupt(current(), &next, false);
998
999 /*
1000 * Since we are in interrupt context, set the bit for the
1001 * next vCPU directly in the register.
1002 */
1003 vcpu_update_virtual_interrupts(next);
1004
1005 return next;
1006 #else
1007 /*
1008 * Switch back to primary VM, interrupts will be handled there.
1009 *
1010 * If the VM has aborted, this vCPU will be aborted when the scheduler
1011 * tries to run it again. This means the interrupt will not be delayed
1012 * by the aborted VM.
1013 *
1014 * TODO: Only switch when the interrupt isn't for the current VM.
1015 */
1016 return api_preempt(current());
1017 #endif
1018 }
1019
fiq_lower(void)1020 struct vcpu *fiq_lower(void)
1021 {
1022 #if SECURE_WORLD == 1
1023 struct vcpu_locked current_locked;
1024 struct vcpu *current_vcpu = current();
1025 int64_t ret;
1026
1027 assert(current_vcpu->vm->ns_interrupts_action != NS_ACTION_QUEUED);
1028
1029 if (plat_ffa_vm_managed_exit_supported(current_vcpu->vm)) {
1030 uint8_t pmr = plat_interrupts_get_priority_mask();
1031
1032 /* Mask all interrupts */
1033 plat_interrupts_set_priority_mask(0x0);
1034
1035 current_locked = vcpu_lock(current_vcpu);
1036 current_vcpu->priority_mask = pmr;
1037 ret = api_interrupt_inject_locked(current_locked,
1038 HF_MANAGED_EXIT_INTID,
1039 current_vcpu, NULL);
1040 if (ret != 0) {
1041 panic("Failed to inject managed exit interrupt\n");
1042 }
1043
1044 /* Entering managed exit sequence. */
1045 current_vcpu->processing_managed_exit = true;
1046
1047 vcpu_unlock(¤t_locked);
1048
1049 /*
1050 * Since we are in interrupt context, set the bit for the
1051 * current vCPU directly in the register.
1052 */
1053 vcpu_update_virtual_interrupts(NULL);
1054
1055 /* Resume current vCPU. */
1056 return NULL;
1057 }
1058
1059 /*
1060 * Unwind Normal World Scheduled Call chain in response to NS
1061 * Interrupt.
1062 */
1063 return plat_ffa_unwind_nwd_call_chain_interrupt(current_vcpu);
1064 #else
1065 return irq_lower();
1066 #endif
1067 }
1068
serr_lower(void)1069 noreturn struct vcpu *serr_lower(void)
1070 {
1071 /*
1072 * SError exceptions should be isolated and handled by the responsible
1073 * VM/exception level. Getting here indicates a bug, that isolation is
1074 * not working, or a processor that does not support ARMv8.2-IESB, in
1075 * which case Hafnium routes SError exceptions to EL2 (here).
1076 */
1077 panic("SError from a lower exception level.");
1078 }
1079
1080 /**
1081 * Initialises a fault info structure. It assumes that an FnV bit exists at
1082 * bit offset 10 of the ESR, and that it is only valid when the bottom 6 bits of
1083 * the ESR (the fault status code) are 010000; this is the case for both
1084 * instruction and data aborts, but not necessarily for other exception reasons.
1085 */
fault_info_init(uintreg_t esr,const struct vcpu * vcpu,uint32_t mode)1086 static struct vcpu_fault_info fault_info_init(uintreg_t esr,
1087 const struct vcpu *vcpu,
1088 uint32_t mode)
1089 {
1090 uint32_t fsc = esr & 0x3f;
1091 struct vcpu_fault_info r;
1092 uint64_t hpfar_el2_val;
1093 uint64_t hpfar_el2_fipa;
1094
1095 r.mode = mode;
1096 r.pc = va_init(vcpu->regs.pc);
1097
1098 /* Get Hypervisor IPA Fault Address value. */
1099 hpfar_el2_val = read_msr(hpfar_el2);
1100
1101 /* Extract Faulting IPA. */
1102 hpfar_el2_fipa = (hpfar_el2_val & HPFAR_EL2_FIPA) << 8;
1103
1104 #if SECURE_WORLD == 1
1105
1106 /**
1107 * Determine if faulting IPA targets NS space.
1108 * At NS-EL2 hpfar_el2 bit 63 is RES0. At S-EL2, this bit determines if
1109 * the faulting Stage-1 address output is a secure or non-secure IPA.
1110 */
1111 if ((hpfar_el2_val & HPFAR_EL2_NS) != 0) {
1112 r.mode |= MM_MODE_NS;
1113 }
1114
1115 #endif
1116
1117 /*
1118 * Check the FnV bit, which is only valid if dfsc/ifsc is 010000. It
1119 * indicates that we cannot rely on far_el2.
1120 */
1121 if (fsc == 0x10 && esr & (1U << 10)) {
1122 r.vaddr = va_init(0);
1123 r.ipaddr = ipa_init(hpfar_el2_fipa);
1124 } else {
1125 r.vaddr = va_init(read_msr(far_el2));
1126 r.ipaddr = ipa_init(hpfar_el2_fipa |
1127 (read_msr(far_el2) & (PAGE_SIZE - 1)));
1128 }
1129
1130 return r;
1131 }
1132
sync_lower_exception(uintreg_t esr,uintreg_t far)1133 struct vcpu *sync_lower_exception(uintreg_t esr, uintreg_t far)
1134 {
1135 struct vcpu *vcpu = current();
1136 struct vcpu_fault_info info;
1137 struct vcpu *new_vcpu = NULL;
1138 uintreg_t ec = GET_ESR_EC(esr);
1139 bool is_el0_partition = vcpu->vm->el0_partition;
1140 bool resume = false;
1141
1142 switch (ec) {
1143 case EC_WFI_WFE:
1144 /* Skip the instruction. */
1145 vcpu->regs.pc += GET_NEXT_PC_INC(esr);
1146
1147 /*
1148 * For EL0 partitions, treat both WFI and WFE the same way so
1149 * that FFA_RUN can be called on the partition to resume it. If
1150 * we treat WFI using api_wait_for_interrupt, the VCPU will be
1151 * in blocked waiting for interrupt but we cannot inject
1152 * interrupts into EL0 partitions.
1153 */
1154 if (is_el0_partition) {
1155 api_yield(vcpu, &new_vcpu);
1156 return new_vcpu;
1157 }
1158
1159 /* Check TI bit of ISS, 0 = WFI, 1 = WFE. */
1160 if (esr & 1) {
1161 /* WFE */
1162 /*
1163 * TODO: consider giving the scheduler more context,
1164 * somehow.
1165 */
1166 api_yield(vcpu, &new_vcpu);
1167 return new_vcpu;
1168 }
1169 /* WFI */
1170 return api_wait_for_interrupt(vcpu);
1171
1172 case EC_DATA_ABORT_LOWER_EL:
1173 info = fault_info_init(
1174 esr, vcpu, (esr & (1U << 6)) ? MM_MODE_W : MM_MODE_R);
1175
1176 resume = vcpu_handle_page_fault(vcpu, &info);
1177 if (is_el0_partition) {
1178 dlog_warning("Data abort on EL0 partition\n");
1179 /*
1180 * Abort EL0 context if we should not resume the
1181 * context, or it is an alignment fault.
1182 * vcpu_handle_page_fault() only checks the mode of the
1183 * page in an architecture agnostic way but alignment
1184 * faults on aarch64 can happen on a correctly mapped
1185 * page.
1186 */
1187 if (!resume || ((esr & 0x3f) == 0x21)) {
1188 return api_abort(vcpu);
1189 }
1190 }
1191
1192 if (resume) {
1193 return NULL;
1194 }
1195
1196 /* Inform the EL1 of the data abort. */
1197 inject_el1_data_abort_exception(vcpu, esr, far);
1198
1199 /* Schedule the same VM to continue running. */
1200 return NULL;
1201
1202 case EC_INSTRUCTION_ABORT_LOWER_EL:
1203 info = fault_info_init(esr, vcpu, MM_MODE_X);
1204
1205 if (vcpu_handle_page_fault(vcpu, &info)) {
1206 return NULL;
1207 }
1208
1209 if (is_el0_partition) {
1210 dlog_warning("Instruction abort on EL0 partition\n");
1211 return api_abort(vcpu);
1212 }
1213
1214 /* Inform the EL1 of the instruction abort. */
1215 inject_el1_instruction_abort_exception(vcpu, esr, far);
1216
1217 /* Schedule the same VM to continue running. */
1218 return NULL;
1219 case EC_SVC:
1220 CHECK(is_el0_partition);
1221 return hvc_handler(vcpu);
1222 case EC_HVC:
1223 if (is_el0_partition) {
1224 dlog_warning("Unexpected HVC Trap on EL0 partition\n");
1225 return api_abort(vcpu);
1226 }
1227 return hvc_handler(vcpu);
1228
1229 case EC_SMC: {
1230 uintreg_t smc_pc = vcpu->regs.pc;
1231 struct vcpu *next = smc_handler(vcpu);
1232
1233 /* Skip the SMC instruction. */
1234 vcpu->regs.pc = smc_pc + GET_NEXT_PC_INC(esr);
1235
1236 return next;
1237 }
1238
1239 case EC_MSR:
1240 /*
1241 * NOTE: This should never be reached because it goes through a
1242 * separate path handled by handle_system_register_access().
1243 */
1244 panic("Handled by handle_system_register_access().");
1245
1246 default:
1247 dlog_notice(
1248 "Unknown lower sync exception pc=%#x, esr=%#x, "
1249 "ec=%#x\n",
1250 vcpu->regs.pc, esr, ec);
1251 break;
1252 }
1253
1254 if (is_el0_partition) {
1255 return api_abort(vcpu);
1256 }
1257
1258 /*
1259 * The exception wasn't handled. Inject to the VM to give it chance to
1260 * handle as an unknown exception.
1261 */
1262 inject_el1_unknown_exception(vcpu, esr);
1263
1264 /* Schedule the same VM to continue running. */
1265 return NULL;
1266 }
1267
1268 /**
1269 * Handles EC = 011000, MSR, MRS instruction traps.
1270 * Returns non-null ONLY if the access failed and the vCPU is changing.
1271 */
handle_system_register_access(uintreg_t esr_el2)1272 void handle_system_register_access(uintreg_t esr_el2)
1273 {
1274 struct vcpu *vcpu = current();
1275 ffa_vm_id_t vm_id = vcpu->vm->id;
1276 uintreg_t ec = GET_ESR_EC(esr_el2);
1277
1278 CHECK(ec == EC_MSR);
1279 /*
1280 * Handle accesses to debug and performance monitor registers.
1281 * Inject an exception for unhandled/unsupported registers.
1282 */
1283 if (debug_el1_is_register_access(esr_el2)) {
1284 if (!debug_el1_process_access(vcpu, vm_id, esr_el2)) {
1285 inject_el1_sysreg_trap_exception(vcpu, esr_el2);
1286 return;
1287 }
1288 } else if (perfmon_is_register_access(esr_el2)) {
1289 if (!perfmon_process_access(vcpu, vm_id, esr_el2)) {
1290 inject_el1_sysreg_trap_exception(vcpu, esr_el2);
1291 return;
1292 }
1293 } else if (feature_id_is_register_access(esr_el2)) {
1294 if (!feature_id_process_access(vcpu, esr_el2)) {
1295 inject_el1_sysreg_trap_exception(vcpu, esr_el2);
1296 return;
1297 }
1298 } else {
1299 inject_el1_sysreg_trap_exception(vcpu, esr_el2);
1300 return;
1301 }
1302
1303 /* Instruction was fulfilled. Skip it and run the next one. */
1304 vcpu->regs.pc += GET_NEXT_PC_INC(esr_el2);
1305 }
1306