1 /*
2  * Copyright 2014, General Dynamics C4 Systems
3  *
4  * SPDX-License-Identifier: GPL-2.0-only
5  */
6 
7 #pragma once
8 
9 #include <config.h>
10 
11 #ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
12 
13 #include <api/failures.h>
14 #include <linker.h>
15 
16 #define HCR_RW       BIT(31)     /* Execution state control        */
17 #define HCR_TRVM     BIT(30)     /* trap reads of VM controls      */
18 #define HCR_HCD      BIT(29)     /* Disable HVC                    */
19 #define HCR_TDZ      BIT(28)     /* trap DC ZVA AArch64 only       */
20 #define HCR_TGE      BIT(27)     /* Trap general exceptions        */
21 #define HCR_TVM      BIT(26)     /* Trap MMU access                */
22 #define HCR_TTLB     BIT(25)     /* Trap TLB operations            */
23 #define HCR_TPU      BIT(24)     /* Trap cache maintenance         */
24 #define HCR_TPC      BIT(23)     /* Trap cache maintenance PoC     */
25 #define HCR_TSW      BIT(22)     /* Trap cache maintenance set/way */
26 #define HCR_TCACHE   (HCR_TPU | HCR_TPC | HCR_TSW)
27 #define HCR_TAC      BIT(21)     /* Trap ACTLR access              */
28 #define HCR_TIDCP    BIT(20)     /* Trap lockdown                  */
29 #define HCR_TSC      BIT(19)     /* Trap SMC instructions          */
30 #define HCR_TID3     BIT(18)     /* Trap ID register 3             */
31 #define HCR_TID2     BIT(17)     /* Trap ID register 2             */
32 #define HCR_TID1     BIT(16)     /* Trap ID register 1             */
33 #define HCR_TID0     BIT(15)     /* Trap ID register 0             */
34 #define HCR_TID      (HCR_TID0 | HCR_TID1 | HCR_TID2 | HCR_TID3)
35 #define HCR_TWE      BIT(14)     /* Trap WFE                       */
36 #define HCR_TWI      BIT(13)     /* Trap WFI                       */
37 #define HCR_DC       BIT(12)     /* Default cacheable              */
38 #define HCR_BSU(x)   ((x) << 10) /* Barrier sharability upgrade    */
39 #define HCR_FB       BIT( 9)     /* Force broadcast                */
40 #define HCR_VA       BIT( 8)     /* Virtual async abort            */
41 #define HCR_VI       BIT( 7)     /* Virtual IRQ                    */
42 #define HCR_VF       BIT( 6)     /* Virtual FIRQ                   */
43 #define HCR_AMO      BIT( 5)     /* CPSR.A override enable         */
44 #define HCR_IMO      BIT( 4)     /* CPSR.I override enable         */
45 #define HCR_FMO      BIT( 3)     /* CPSR.F override enable         */
46 #define HCR_PTW      BIT( 2)     /* Protected table walk           */
47 #define HCR_SWIO     BIT( 1)     /* set/way invalidate override    */
48 #define HCR_VM       BIT( 0)     /* Virtualization MMU enable      */
49 
50 
51 struct gicVCpuIface {
52     uint32_t hcr;
53     uint32_t vmcr;
54     uint32_t apr;
55     virq_t lr[GIC_VCPU_MAX_NUM_LR];
56 };
57 
58 #ifdef CONFIG_VTIMER_UPDATE_VOFFSET
59 struct vTimer {
60     uint64_t last_pcount;
61 };
62 #endif
63 
64 enum VPPIEventIRQ {
65     VPPIEventIRQ_VTimer,
66     n_VPPIEventIRQ,
67     VPPIEventIRQ_invalid = n_VPPIEventIRQ,
68 };
69 typedef word_t VPPIEventIRQ_t;
70 
71 struct vcpu {
72     /* TCB associated with this VCPU. */
73     struct tcb *vcpuTCB;
74     struct gicVCpuIface vgic;
75     word_t regs[seL4_VCPUReg_Num];
76     bool_t vppi_masked[n_VPPIEventIRQ];
77 #ifdef CONFIG_VTIMER_UPDATE_VOFFSET
78     /* vTimer is 8-bytes wide and has same alignment requirement.
79      * To keep the struct packed on 32-bit platforms when accompanied by an
80      * odd number of 32-bit words, we need to add a padding word.
81      * */
82     word_t vcpu_padding;
83     struct vTimer virtTimer;
84 #endif
85 };
86 typedef struct vcpu vcpu_t;
87 compile_assert(vcpu_size_correct, sizeof(struct vcpu) <= BIT(VCPU_SIZE_BITS))
88 
89 void VGICMaintenance(void);
90 void handleVCPUFault(word_t hsr);
91 void VPPIEvent(irq_t irq);
92 
93 void vcpu_init(vcpu_t *vcpu);
94 
95 /* Performs one off initialization of VCPU state and structures. Should be
96  * called in boot code before any other VCPU functions */
97 BOOT_CODE void vcpu_boot_init(void);
98 
99 void vcpu_finalise(vcpu_t *vcpu);
100 
101 void associateVCPUTCB(vcpu_t *vcpu, tcb_t *tcb);
102 
103 void dissociateVCPUTCB(vcpu_t *vcpu, tcb_t *tcb);
104 
105 exception_t decodeARMVCPUInvocation(
106     word_t label,
107     unsigned int length,
108     cptr_t cptr,
109     cte_t *slot,
110     cap_t cap,
111     bool_t call,
112     word_t *buffer
113 );
114 
115 void vcpu_restore(vcpu_t *cpu);
116 void vcpu_switch(vcpu_t *cpu);
117 #ifdef ENABLE_SMP_SUPPORT
118 void handleVCPUInjectInterruptIPI(vcpu_t *vcpu, unsigned long index, virq_t virq);
119 #endif /* ENABLE_SMP_SUPPORT */
120 
121 exception_t decodeVCPUWriteReg(cap_t cap, unsigned int length, word_t *buffer);
122 exception_t decodeVCPUReadReg(cap_t cap, unsigned int length, bool_t call, word_t *buffer);
123 exception_t decodeVCPUInjectIRQ(cap_t cap, unsigned int length, word_t *buffer);
124 exception_t decodeVCPUSetTCB(cap_t cap);
125 exception_t decodeVCPUAckVPPI(cap_t cap, unsigned int length, word_t *buffer);
126 
127 exception_t invokeVCPUWriteReg(vcpu_t *vcpu, word_t field, word_t value);
128 exception_t invokeVCPUReadReg(vcpu_t *vcpu, word_t field, bool_t call);
129 exception_t invokeVCPUInjectIRQ(vcpu_t *vcpu, unsigned long index, virq_t virq);
130 exception_t invokeVCPUSetTCB(vcpu_t *vcpu, tcb_t *tcb);
131 exception_t invokeVCPUAckVPPI(vcpu_t *vcpu, VPPIEventIRQ_t vppi);
132 static word_t vcpu_hw_read_reg(word_t reg_index);
133 static void vcpu_hw_write_reg(word_t reg_index, word_t reg);
134 
vcpu_save_reg(vcpu_t * vcpu,word_t reg)135 static inline void vcpu_save_reg(vcpu_t *vcpu, word_t reg)
136 {
137     if (reg >= seL4_VCPUReg_Num || vcpu == NULL) {
138         fail("ARM/HYP: Invalid register index or NULL VCPU");
139         return;
140     }
141     vcpu->regs[reg] = vcpu_hw_read_reg(reg);
142 }
143 
vcpu_save_reg_range(vcpu_t * vcpu,word_t start,word_t end)144 static inline void vcpu_save_reg_range(vcpu_t *vcpu, word_t start, word_t end)
145 {
146     for (word_t i = start; i <= end; i++) {
147         vcpu_save_reg(vcpu, i);
148     }
149 }
150 
vcpu_restore_reg(vcpu_t * vcpu,word_t reg)151 static inline void vcpu_restore_reg(vcpu_t *vcpu, word_t reg)
152 {
153     if (reg >= seL4_VCPUReg_Num || vcpu == NULL) {
154         fail("ARM/HYP: Invalid register index or NULL VCPU");
155         return;
156     }
157     vcpu_hw_write_reg(reg, vcpu->regs[reg]);
158 }
159 
vcpu_restore_reg_range(vcpu_t * vcpu,word_t start,word_t end)160 static inline void vcpu_restore_reg_range(vcpu_t *vcpu, word_t start, word_t end)
161 {
162     for (word_t i = start; i <= end; i++) {
163         vcpu_restore_reg(vcpu, i);
164     }
165 }
166 
vcpu_read_reg(vcpu_t * vcpu,word_t reg)167 static inline word_t vcpu_read_reg(vcpu_t *vcpu, word_t reg)
168 {
169     if (reg >= seL4_VCPUReg_Num || vcpu == NULL) {
170         fail("ARM/HYP: Invalid register index or NULL VCPU");
171         return 0;
172     }
173     return vcpu->regs[reg];
174 }
175 
vcpu_write_reg(vcpu_t * vcpu,word_t reg,word_t value)176 static inline void vcpu_write_reg(vcpu_t *vcpu, word_t reg, word_t value)
177 {
178     if (reg >= seL4_VCPUReg_Num || vcpu == NULL) {
179         fail("ARM/HYP: Invalid register index or NULL VCPU");
180         return;
181     }
182     vcpu->regs[reg] = value;
183 }
184 
irqVPPIEventIndex(irq_t irq)185 static inline VPPIEventIRQ_t irqVPPIEventIndex(irq_t irq)
186 {
187     switch (IRQT_TO_IRQ(irq)) {
188     case INTERRUPT_VTIMER_EVENT:
189         return VPPIEventIRQ_VTimer;
190 
191     default:
192         return VPPIEventIRQ_invalid;
193     }
194 }
195 
196 #else /* end of CONFIG_ARM_HYPERVISOR_SUPPORT */
197 
198 /* used in boot.c with a guard, use a marco to avoid exposing vcpu_t */
199 #define vcpu_boot_init() do {} while(0)
200 #define vcpu_switch(x) do {} while(0)
VGICMaintenance(void)201 static inline void VGICMaintenance(void) {}
202 
203 #endif /* end of !CONFIG_ARM_HYPERVISOR_SUPPORT */
204 
205