1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) 2019 Western Digital Corporation or its affiliates.
4 *
5 * Authors:
6 * Anup Patel <anup.patel@wdc.com>
7 */
8
9 #ifndef __RISCV_KVM_HOST_H__
10 #define __RISCV_KVM_HOST_H__
11
12 #include <linux/types.h>
13 #include <linux/kvm.h>
14 #include <linux/kvm_types.h>
15 #include <linux/spinlock.h>
16 #include <asm/hwcap.h>
17 #include <asm/kvm_vcpu_fp.h>
18 #include <asm/kvm_vcpu_insn.h>
19 #include <asm/kvm_vcpu_sbi.h>
20 #include <asm/kvm_vcpu_timer.h>
21 #include <asm/kvm_vcpu_pmu.h>
22
23 #define KVM_MAX_VCPUS 1024
24
25 #define KVM_HALT_POLL_NS_DEFAULT 500000
26
27 #define KVM_VCPU_MAX_FEATURES 0
28
29 #define KVM_REQ_SLEEP \
30 KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
31 #define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(1)
32 #define KVM_REQ_UPDATE_HGATP KVM_ARCH_REQ(2)
33 #define KVM_REQ_FENCE_I \
34 KVM_ARCH_REQ_FLAGS(3, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
35 #define KVM_REQ_HFENCE_GVMA_VMID_ALL KVM_REQ_TLB_FLUSH
36 #define KVM_REQ_HFENCE_VVMA_ALL \
37 KVM_ARCH_REQ_FLAGS(4, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
38 #define KVM_REQ_HFENCE \
39 KVM_ARCH_REQ_FLAGS(5, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
40
41 enum kvm_riscv_hfence_type {
42 KVM_RISCV_HFENCE_UNKNOWN = 0,
43 KVM_RISCV_HFENCE_GVMA_VMID_GPA,
44 KVM_RISCV_HFENCE_VVMA_ASID_GVA,
45 KVM_RISCV_HFENCE_VVMA_ASID_ALL,
46 KVM_RISCV_HFENCE_VVMA_GVA,
47 };
48
49 struct kvm_riscv_hfence {
50 enum kvm_riscv_hfence_type type;
51 unsigned long asid;
52 unsigned long order;
53 gpa_t addr;
54 gpa_t size;
55 };
56
57 #define KVM_RISCV_VCPU_MAX_HFENCE 64
58
59 struct kvm_vm_stat {
60 struct kvm_vm_stat_generic generic;
61 };
62
63 struct kvm_vcpu_stat {
64 struct kvm_vcpu_stat_generic generic;
65 u64 ecall_exit_stat;
66 u64 wfi_exit_stat;
67 u64 mmio_exit_user;
68 u64 mmio_exit_kernel;
69 u64 csr_exit_user;
70 u64 csr_exit_kernel;
71 u64 signal_exits;
72 u64 exits;
73 };
74
75 struct kvm_arch_memory_slot {
76 };
77
78 struct kvm_vmid {
79 /*
80 * Writes to vmid_version and vmid happen with vmid_lock held
81 * whereas reads happen without any lock held.
82 */
83 unsigned long vmid_version;
84 unsigned long vmid;
85 };
86
87 struct kvm_arch {
88 /* G-stage vmid */
89 struct kvm_vmid vmid;
90
91 /* G-stage page table */
92 pgd_t *pgd;
93 phys_addr_t pgd_phys;
94
95 /* Guest Timer */
96 struct kvm_guest_timer timer;
97 };
98
99 struct kvm_cpu_trap {
100 unsigned long sepc;
101 unsigned long scause;
102 unsigned long stval;
103 unsigned long htval;
104 unsigned long htinst;
105 };
106
107 struct kvm_cpu_context {
108 unsigned long zero;
109 unsigned long ra;
110 unsigned long sp;
111 unsigned long gp;
112 unsigned long tp;
113 unsigned long t0;
114 unsigned long t1;
115 unsigned long t2;
116 unsigned long s0;
117 unsigned long s1;
118 unsigned long a0;
119 unsigned long a1;
120 unsigned long a2;
121 unsigned long a3;
122 unsigned long a4;
123 unsigned long a5;
124 unsigned long a6;
125 unsigned long a7;
126 unsigned long s2;
127 unsigned long s3;
128 unsigned long s4;
129 unsigned long s5;
130 unsigned long s6;
131 unsigned long s7;
132 unsigned long s8;
133 unsigned long s9;
134 unsigned long s10;
135 unsigned long s11;
136 unsigned long t3;
137 unsigned long t4;
138 unsigned long t5;
139 unsigned long t6;
140 unsigned long sepc;
141 unsigned long sstatus;
142 unsigned long hstatus;
143 union __riscv_fp_state fp;
144 };
145
146 struct kvm_vcpu_csr {
147 unsigned long vsstatus;
148 unsigned long vsie;
149 unsigned long vstvec;
150 unsigned long vsscratch;
151 unsigned long vsepc;
152 unsigned long vscause;
153 unsigned long vstval;
154 unsigned long hvip;
155 unsigned long vsatp;
156 unsigned long scounteren;
157 };
158
159 struct kvm_vcpu_arch {
160 /* VCPU ran at least once */
161 bool ran_atleast_once;
162
163 /* Last Host CPU on which Guest VCPU exited */
164 int last_exit_cpu;
165
166 /* ISA feature bits (similar to MISA) */
167 DECLARE_BITMAP(isa, RISCV_ISA_EXT_MAX);
168
169 /* Vendor, Arch, and Implementation details */
170 unsigned long mvendorid;
171 unsigned long marchid;
172 unsigned long mimpid;
173
174 /* SSCRATCH, STVEC, and SCOUNTEREN of Host */
175 unsigned long host_sscratch;
176 unsigned long host_stvec;
177 unsigned long host_scounteren;
178
179 /* CPU context of Host */
180 struct kvm_cpu_context host_context;
181
182 /* CPU context of Guest VCPU */
183 struct kvm_cpu_context guest_context;
184
185 /* CPU CSR context of Guest VCPU */
186 struct kvm_vcpu_csr guest_csr;
187
188 /* CPU context upon Guest VCPU reset */
189 struct kvm_cpu_context guest_reset_context;
190
191 /* CPU CSR context upon Guest VCPU reset */
192 struct kvm_vcpu_csr guest_reset_csr;
193
194 /*
195 * VCPU interrupts
196 *
197 * We have a lockless approach for tracking pending VCPU interrupts
198 * implemented using atomic bitops. The irqs_pending bitmap represent
199 * pending interrupts whereas irqs_pending_mask represent bits changed
200 * in irqs_pending. Our approach is modeled around multiple producer
201 * and single consumer problem where the consumer is the VCPU itself.
202 */
203 unsigned long irqs_pending;
204 unsigned long irqs_pending_mask;
205
206 /* VCPU Timer */
207 struct kvm_vcpu_timer timer;
208
209 /* HFENCE request queue */
210 spinlock_t hfence_lock;
211 unsigned long hfence_head;
212 unsigned long hfence_tail;
213 struct kvm_riscv_hfence hfence_queue[KVM_RISCV_VCPU_MAX_HFENCE];
214
215 /* MMIO instruction details */
216 struct kvm_mmio_decode mmio_decode;
217
218 /* CSR instruction details */
219 struct kvm_csr_decode csr_decode;
220
221 /* SBI context */
222 struct kvm_vcpu_sbi_context sbi_context;
223
224 /* Cache pages needed to program page tables with spinlock held */
225 struct kvm_mmu_memory_cache mmu_page_cache;
226
227 /* VCPU power-off state */
228 bool power_off;
229
230 /* Don't run the VCPU (blocked) */
231 bool pause;
232
233 /* Performance monitoring context */
234 struct kvm_pmu pmu_context;
235 };
236
kvm_arch_sync_events(struct kvm * kvm)237 static inline void kvm_arch_sync_events(struct kvm *kvm) {}
kvm_arch_sched_in(struct kvm_vcpu * vcpu,int cpu)238 static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
239
240 #define KVM_ARCH_WANT_MMU_NOTIFIER
241
242 #define KVM_RISCV_GSTAGE_TLB_MIN_ORDER 12
243
244 void kvm_riscv_local_hfence_gvma_vmid_gpa(unsigned long vmid,
245 gpa_t gpa, gpa_t gpsz,
246 unsigned long order);
247 void kvm_riscv_local_hfence_gvma_vmid_all(unsigned long vmid);
248 void kvm_riscv_local_hfence_gvma_gpa(gpa_t gpa, gpa_t gpsz,
249 unsigned long order);
250 void kvm_riscv_local_hfence_gvma_all(void);
251 void kvm_riscv_local_hfence_vvma_asid_gva(unsigned long vmid,
252 unsigned long asid,
253 unsigned long gva,
254 unsigned long gvsz,
255 unsigned long order);
256 void kvm_riscv_local_hfence_vvma_asid_all(unsigned long vmid,
257 unsigned long asid);
258 void kvm_riscv_local_hfence_vvma_gva(unsigned long vmid,
259 unsigned long gva, unsigned long gvsz,
260 unsigned long order);
261 void kvm_riscv_local_hfence_vvma_all(unsigned long vmid);
262
263 void kvm_riscv_local_tlb_sanitize(struct kvm_vcpu *vcpu);
264
265 void kvm_riscv_fence_i_process(struct kvm_vcpu *vcpu);
266 void kvm_riscv_hfence_gvma_vmid_all_process(struct kvm_vcpu *vcpu);
267 void kvm_riscv_hfence_vvma_all_process(struct kvm_vcpu *vcpu);
268 void kvm_riscv_hfence_process(struct kvm_vcpu *vcpu);
269
270 void kvm_riscv_fence_i(struct kvm *kvm,
271 unsigned long hbase, unsigned long hmask);
272 void kvm_riscv_hfence_gvma_vmid_gpa(struct kvm *kvm,
273 unsigned long hbase, unsigned long hmask,
274 gpa_t gpa, gpa_t gpsz,
275 unsigned long order);
276 void kvm_riscv_hfence_gvma_vmid_all(struct kvm *kvm,
277 unsigned long hbase, unsigned long hmask);
278 void kvm_riscv_hfence_vvma_asid_gva(struct kvm *kvm,
279 unsigned long hbase, unsigned long hmask,
280 unsigned long gva, unsigned long gvsz,
281 unsigned long order, unsigned long asid);
282 void kvm_riscv_hfence_vvma_asid_all(struct kvm *kvm,
283 unsigned long hbase, unsigned long hmask,
284 unsigned long asid);
285 void kvm_riscv_hfence_vvma_gva(struct kvm *kvm,
286 unsigned long hbase, unsigned long hmask,
287 unsigned long gva, unsigned long gvsz,
288 unsigned long order);
289 void kvm_riscv_hfence_vvma_all(struct kvm *kvm,
290 unsigned long hbase, unsigned long hmask);
291
292 int kvm_riscv_gstage_ioremap(struct kvm *kvm, gpa_t gpa,
293 phys_addr_t hpa, unsigned long size,
294 bool writable, bool in_atomic);
295 void kvm_riscv_gstage_iounmap(struct kvm *kvm, gpa_t gpa,
296 unsigned long size);
297 int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
298 struct kvm_memory_slot *memslot,
299 gpa_t gpa, unsigned long hva, bool is_write);
300 int kvm_riscv_gstage_alloc_pgd(struct kvm *kvm);
301 void kvm_riscv_gstage_free_pgd(struct kvm *kvm);
302 void kvm_riscv_gstage_update_hgatp(struct kvm_vcpu *vcpu);
303 void __init kvm_riscv_gstage_mode_detect(void);
304 unsigned long __init kvm_riscv_gstage_mode(void);
305 int kvm_riscv_gstage_gpa_bits(void);
306
307 void __init kvm_riscv_gstage_vmid_detect(void);
308 unsigned long kvm_riscv_gstage_vmid_bits(void);
309 int kvm_riscv_gstage_vmid_init(struct kvm *kvm);
310 bool kvm_riscv_gstage_vmid_ver_changed(struct kvm_vmid *vmid);
311 void kvm_riscv_gstage_vmid_update(struct kvm_vcpu *vcpu);
312
313 void __kvm_riscv_unpriv_trap(void);
314
315 unsigned long kvm_riscv_vcpu_unpriv_read(struct kvm_vcpu *vcpu,
316 bool read_insn,
317 unsigned long guest_addr,
318 struct kvm_cpu_trap *trap);
319 void kvm_riscv_vcpu_trap_redirect(struct kvm_vcpu *vcpu,
320 struct kvm_cpu_trap *trap);
321 int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
322 struct kvm_cpu_trap *trap);
323
324 void __kvm_riscv_switch_to(struct kvm_vcpu_arch *vcpu_arch);
325
326 int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq);
327 int kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu *vcpu, unsigned int irq);
328 void kvm_riscv_vcpu_flush_interrupts(struct kvm_vcpu *vcpu);
329 void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu *vcpu);
330 bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu *vcpu, unsigned long mask);
331 void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu);
332 void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu);
333
334 #endif /* __RISCV_KVM_HOST_H__ */
335