1 /*
2  * Copyright 2019 The Hafnium Authors.
3  *
4  * Use of this source code is governed by a BSD-style
5  * license that can be found in the LICENSE file or at
6  * https://opensource.org/licenses/BSD-3-Clause.
7  */
8 
9 #pragma once
10 
11 #include "hf/arch/types.h"
12 
13 #include "hf/addr.h"
14 #include "hf/interrupt_desc.h"
15 #include "hf/spinlock.h"
16 
17 #include "vmapi/hf/ffa.h"
18 
19 /** Action for non secure interrupt by SPMC. */
20 #define NS_ACTION_QUEUED 0
21 #define NS_ACTION_ME 1
22 #define NS_ACTION_SIGNALED 2
23 #define NS_ACTION_INVALID 3
24 
25 enum vcpu_state {
26 	/** The vCPU is switched off. */
27 	VCPU_STATE_OFF,
28 
29 	/** The vCPU is currently running. */
30 	VCPU_STATE_RUNNING,
31 
32 	/** The vCPU is waiting to be allocated CPU cycles to do work. */
33 	VCPU_STATE_WAITING,
34 
35 	/**
36 	 * The vCPU is blocked and waiting for some work to complete on
37 	 * its behalf.
38 	 */
39 	VCPU_STATE_BLOCKED,
40 
41 	/** The vCPU has been preempted by an interrupt. */
42 	VCPU_STATE_PREEMPTED,
43 
44 	/** The vCPU is waiting for an interrupt. */
45 	VCPU_STATE_BLOCKED_INTERRUPT,
46 
47 	/** The vCPU has aborted. */
48 	VCPU_STATE_ABORTED,
49 };
50 
51 /** Refer to section 7 of the FF-A v1.1 EAC0 spec. */
52 enum partition_runtime_model {
53 	RTM_NONE,
54 	/** Runtime model for FFA_RUN. */
55 	RTM_FFA_RUN,
56 	/** Runtime model for FFA_MSG_SEND_DIRECT_REQUEST. */
57 	RTM_FFA_DIR_REQ,
58 	/** Runtime model for Secure Interrupt handling. */
59 	RTM_SEC_INTERRUPT,
60 	/** Runtime model for SP Initialization. */
61 	RTM_SP_INIT,
62 };
63 
64 /** Refer to section 8.2.3 of the FF-A EAC0 spec. */
65 enum schedule_mode {
66 	NONE,
67 	/** Normal world scheduled mode. */
68 	NWD_MODE,
69 	/** SPMC scheduled mode. */
70 	SPMC_MODE,
71 };
72 
73 struct interrupts {
74 	/** Bitfield keeping track of which interrupts are enabled. */
75 	struct interrupt_bitmap interrupt_enabled;
76 	/** Bitfield keeping track of which interrupts are pending. */
77 	struct interrupt_bitmap interrupt_pending;
78 	/** Bitfield recording the interrupt pin configuration. */
79 	struct interrupt_bitmap interrupt_type;
80 	/**
81 	 * The number of interrupts which are currently both enabled and
82 	 * pending. Count independently virtual IRQ and FIQ interrupt types
83 	 * i.e. the sum of the two counters is the number of bits set in
84 	 * interrupt_enable & interrupt_pending.
85 	 */
86 	uint32_t enabled_and_pending_irq_count;
87 	uint32_t enabled_and_pending_fiq_count;
88 };
89 
90 struct vcpu_fault_info {
91 	ipaddr_t ipaddr;
92 	vaddr_t vaddr;
93 	vaddr_t pc;
94 	uint32_t mode;
95 };
96 
97 struct call_chain {
98 	/** Previous node in the SP call chain. */
99 	struct vcpu *prev_node;
100 
101 	/** Next node in the SP call chain. */
102 	struct vcpu *next_node;
103 };
104 
105 struct vcpu {
106 	struct spinlock lock;
107 
108 	/*
109 	 * The state is only changed in the context of the vCPU being run. This
110 	 * ensures the scheduler can easily keep track of the vCPU state as
111 	 * transitions are indicated by the return code from the run call.
112 	 */
113 	enum vcpu_state state;
114 
115 	bool is_bootstrapped;
116 	struct cpu *cpu;
117 	struct vm *vm;
118 	struct arch_regs regs;
119 	struct interrupts interrupts;
120 
121 	/*
122 	 * Determine whether the 'regs' field is available for use. This is set
123 	 * to false when a vCPU is about to run on a physical CPU, and is set
124 	 * back to true when it is descheduled. This is not relevant for the
125 	 * primary VM vCPUs in the normal world (or the "other world VM" vCPUs
126 	 * in the secure world) as they are pinned to physical CPUs and there
127 	 * is no contention to take care of.
128 	 */
129 	bool regs_available;
130 
131 	/*
132 	 * If the current vCPU is executing as a consequence of a
133 	 * FFA_MSG_SEND_DIRECT_REQ invocation, then this member holds the
134 	 * originating VM ID from which the call originated.
135 	 * The value HF_INVALID_VM_ID implies the vCPU is not executing as
136 	 * a result of a prior FFA_MSG_SEND_DIRECT_REQ invocation.
137 	 */
138 	ffa_vm_id_t direct_request_origin_vm_id;
139 
140 	/** Determine whether partition is currently handling managed exit. */
141 	bool processing_managed_exit;
142 
143 	/**
144 	 * Determine whether vCPU is currently handling secure interrupt.
145 	 */
146 	bool processing_secure_interrupt;
147 	bool secure_interrupt_deactivated;
148 
149 	/**
150 	 * INTID of the current secure interrupt being processed by this vCPU.
151 	 */
152 	uint32_t current_sec_interrupt_id;
153 
154 	/**
155 	 * Track current vCPU which got pre-empted when secure interrupt
156 	 * triggered.
157 	 */
158 	struct vcpu *preempted_vcpu;
159 
160 	/**
161 	 * Current value of the Priority Mask register which is saved/restored
162 	 * during secure interrupt handling.
163 	 */
164 	uint8_t priority_mask;
165 
166 	/**
167 	 * Per FF-A v1.1-Beta0 spec section 8.3, an SP can use multiple
168 	 * mechanisms to signal completion of secure interrupt handling. SP
169 	 * can invoke explicit FF-A ABIs, namely FFA_MSG_WAIT and FFA_RUN,
170 	 * when in WAITING/BLOCKED state respectively, but has to perform
171 	 * implicit signal completion mechanism by dropping the priority
172 	 * of the virtual secure interrupt when SPMC signaled the virtual
173 	 * interrupt in PREEMPTED state(The vCPU was preempted by a Self S-Int
174 	 * while running). This variable helps SPMC to keep a track of such
175 	 * mechanism and perform appropriate bookkeeping.
176 	 */
177 	bool implicit_completion_signal;
178 
179 	/** SP call chain. */
180 	struct call_chain call_chain;
181 
182 	/**
183 	 * Indicates if the current vCPU is running in SPMC scheduled
184 	 * mode or Normal World scheduled mode.
185 	 */
186 	enum schedule_mode scheduling_mode;
187 
188 	/**
189 	 * Present action taken by SP in response to a non secure interrupt
190 	 * based on the precedence rules as specified in section 8.3.1.4 of
191 	 * the FF-A v1.1 EAC0 spec.
192 	 */
193 	uint8_t present_action_ns_interrupts;
194 
195 	/**
196 	 * If the action in response to a non secure interrupt is to queue it,
197 	 * this field is used to save and restore the current priority mask.
198 	 */
199 	uint8_t mask_ns_interrupts;
200 
201 	/** Partition Runtime Model. */
202 	enum partition_runtime_model rt_model;
203 };
204 
205 /** Encapsulates a vCPU whose lock is held. */
206 struct vcpu_locked {
207 	struct vcpu *vcpu;
208 };
209 
210 /** Container for two vcpu_locked structures. */
211 struct two_vcpu_locked {
212 	struct vcpu_locked vcpu1;
213 	struct vcpu_locked vcpu2;
214 };
215 
216 struct vcpu_locked vcpu_lock(struct vcpu *vcpu);
217 struct two_vcpu_locked vcpu_lock_both(struct vcpu *vcpu1, struct vcpu *vcpu2);
218 void vcpu_unlock(struct vcpu_locked *locked);
219 void vcpu_init(struct vcpu *vcpu, struct vm *vm);
220 void vcpu_on(struct vcpu_locked vcpu, ipaddr_t entry, uintreg_t arg);
221 ffa_vcpu_index_t vcpu_index(const struct vcpu *vcpu);
222 bool vcpu_is_off(struct vcpu_locked vcpu);
223 bool vcpu_secondary_reset_and_start(struct vcpu_locked vcpu_locked,
224 				    ipaddr_t entry, uintreg_t arg);
225 
226 bool vcpu_handle_page_fault(const struct vcpu *current,
227 			    struct vcpu_fault_info *f);
228 
229 void vcpu_reset(struct vcpu *vcpu);
230 
231 void vcpu_set_phys_core_idx(struct vcpu *vcpu);
232 
vcpu_is_virt_interrupt_enabled(struct interrupts * interrupts,uint32_t intid)233 static inline bool vcpu_is_virt_interrupt_enabled(struct interrupts *interrupts,
234 						  uint32_t intid)
235 {
236 	return interrupt_bitmap_get_value(&interrupts->interrupt_enabled,
237 					  intid) == 1U;
238 }
239 
vcpu_virt_interrupt_set_enabled(struct interrupts * interrupts,uint32_t intid)240 static inline void vcpu_virt_interrupt_set_enabled(
241 	struct interrupts *interrupts, uint32_t intid)
242 {
243 	interrupt_bitmap_set_value(&interrupts->interrupt_enabled, intid);
244 }
245 
vcpu_virt_interrupt_clear_enabled(struct interrupts * interrupts,uint32_t intid)246 static inline void vcpu_virt_interrupt_clear_enabled(
247 	struct interrupts *interrupts, uint32_t intid)
248 {
249 	interrupt_bitmap_clear_value(&interrupts->interrupt_enabled, intid);
250 }
251 
vcpu_is_virt_interrupt_pending(struct interrupts * interrupts,uint32_t intid)252 static inline bool vcpu_is_virt_interrupt_pending(struct interrupts *interrupts,
253 						  uint32_t intid)
254 {
255 	return interrupt_bitmap_get_value(&interrupts->interrupt_pending,
256 					  intid) == 1U;
257 }
258 
vcpu_virt_interrupt_set_pending(struct interrupts * interrupts,uint32_t intid)259 static inline void vcpu_virt_interrupt_set_pending(
260 	struct interrupts *interrupts, uint32_t intid)
261 {
262 	interrupt_bitmap_set_value(&interrupts->interrupt_pending, intid);
263 }
264 
vcpu_virt_interrupt_clear_pending(struct interrupts * interrupts,uint32_t intid)265 static inline void vcpu_virt_interrupt_clear_pending(
266 	struct interrupts *interrupts, uint32_t intid)
267 {
268 	interrupt_bitmap_clear_value(&interrupts->interrupt_pending, intid);
269 }
270 
vcpu_virt_interrupt_get_type(struct interrupts * interrupts,uint32_t intid)271 static inline enum interrupt_type vcpu_virt_interrupt_get_type(
272 	struct interrupts *interrupts, uint32_t intid)
273 {
274 	return (enum interrupt_type)interrupt_bitmap_get_value(
275 		&interrupts->interrupt_type, intid);
276 }
277 
vcpu_virt_interrupt_set_type(struct interrupts * interrupts,uint32_t intid,enum interrupt_type type)278 static inline void vcpu_virt_interrupt_set_type(struct interrupts *interrupts,
279 						uint32_t intid,
280 						enum interrupt_type type)
281 {
282 	if (type == INTERRUPT_TYPE_IRQ) {
283 		interrupt_bitmap_clear_value(&interrupts->interrupt_type,
284 					     intid);
285 	} else {
286 		interrupt_bitmap_set_value(&interrupts->interrupt_type, intid);
287 	}
288 }
289 
vcpu_irq_count_increment(struct vcpu_locked vcpu_locked)290 static inline void vcpu_irq_count_increment(struct vcpu_locked vcpu_locked)
291 {
292 	vcpu_locked.vcpu->interrupts.enabled_and_pending_irq_count++;
293 }
294 
vcpu_irq_count_decrement(struct vcpu_locked vcpu_locked)295 static inline void vcpu_irq_count_decrement(struct vcpu_locked vcpu_locked)
296 {
297 	vcpu_locked.vcpu->interrupts.enabled_and_pending_irq_count--;
298 }
299 
vcpu_fiq_count_increment(struct vcpu_locked vcpu_locked)300 static inline void vcpu_fiq_count_increment(struct vcpu_locked vcpu_locked)
301 {
302 	vcpu_locked.vcpu->interrupts.enabled_and_pending_fiq_count++;
303 }
304 
vcpu_fiq_count_decrement(struct vcpu_locked vcpu_locked)305 static inline void vcpu_fiq_count_decrement(struct vcpu_locked vcpu_locked)
306 {
307 	vcpu_locked.vcpu->interrupts.enabled_and_pending_fiq_count--;
308 }
309 
vcpu_interrupt_count_increment(struct vcpu_locked vcpu_locked,struct interrupts * interrupts,uint32_t intid)310 static inline void vcpu_interrupt_count_increment(
311 	struct vcpu_locked vcpu_locked, struct interrupts *interrupts,
312 	uint32_t intid)
313 {
314 	if (vcpu_virt_interrupt_get_type(interrupts, intid) ==
315 	    INTERRUPT_TYPE_IRQ) {
316 		vcpu_irq_count_increment(vcpu_locked);
317 	} else {
318 		vcpu_fiq_count_increment(vcpu_locked);
319 	}
320 }
321 
vcpu_interrupt_count_decrement(struct vcpu_locked vcpu_locked,struct interrupts * interrupts,uint32_t intid)322 static inline void vcpu_interrupt_count_decrement(
323 	struct vcpu_locked vcpu_locked, struct interrupts *interrupts,
324 	uint32_t intid)
325 {
326 	if (vcpu_virt_interrupt_get_type(interrupts, intid) ==
327 	    INTERRUPT_TYPE_IRQ) {
328 		vcpu_irq_count_decrement(vcpu_locked);
329 	} else {
330 		vcpu_fiq_count_decrement(vcpu_locked);
331 	}
332 }
333 
vcpu_interrupt_irq_count_get(struct vcpu_locked vcpu_locked)334 static inline uint32_t vcpu_interrupt_irq_count_get(
335 	struct vcpu_locked vcpu_locked)
336 {
337 	return vcpu_locked.vcpu->interrupts.enabled_and_pending_irq_count;
338 }
339 
vcpu_interrupt_fiq_count_get(struct vcpu_locked vcpu_locked)340 static inline uint32_t vcpu_interrupt_fiq_count_get(
341 	struct vcpu_locked vcpu_locked)
342 {
343 	return vcpu_locked.vcpu->interrupts.enabled_and_pending_fiq_count;
344 }
345 
vcpu_interrupt_count_get(struct vcpu_locked vcpu_locked)346 static inline uint32_t vcpu_interrupt_count_get(struct vcpu_locked vcpu_locked)
347 {
348 	return vcpu_locked.vcpu->interrupts.enabled_and_pending_irq_count +
349 	       vcpu_locked.vcpu->interrupts.enabled_and_pending_fiq_count;
350 }
351 
vcpu_call_chain_extend(struct vcpu * vcpu1,struct vcpu * vcpu2)352 static inline void vcpu_call_chain_extend(struct vcpu *vcpu1,
353 					  struct vcpu *vcpu2)
354 {
355 	vcpu1->call_chain.next_node = vcpu2;
356 	vcpu2->call_chain.prev_node = vcpu1;
357 }
358 
vcpu_call_chain_remove_node(struct vcpu * vcpu1,struct vcpu * vcpu2)359 static inline void vcpu_call_chain_remove_node(struct vcpu *vcpu1,
360 					       struct vcpu *vcpu2)
361 {
362 	vcpu1->call_chain.prev_node = NULL;
363 	vcpu2->call_chain.next_node = NULL;
364 }
365