1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * AArch64 code
4 *
5 * Copyright (C) 2018, Red Hat, Inc.
6 */
7
8 #include <linux/compiler.h>
9 #include <assert.h>
10
11 #include "guest_modes.h"
12 #include "kvm_util.h"
13 #include "processor.h"
14 #include <linux/bitfield.h>
15
16 #define DEFAULT_ARM64_GUEST_STACK_VADDR_MIN 0xac0000
17
18 static vm_vaddr_t exception_handlers;
19
page_align(struct kvm_vm * vm,uint64_t v)20 static uint64_t page_align(struct kvm_vm *vm, uint64_t v)
21 {
22 return (v + vm->page_size) & ~(vm->page_size - 1);
23 }
24
pgd_index(struct kvm_vm * vm,vm_vaddr_t gva)25 static uint64_t pgd_index(struct kvm_vm *vm, vm_vaddr_t gva)
26 {
27 unsigned int shift = (vm->pgtable_levels - 1) * (vm->page_shift - 3) + vm->page_shift;
28 uint64_t mask = (1UL << (vm->va_bits - shift)) - 1;
29
30 return (gva >> shift) & mask;
31 }
32
pud_index(struct kvm_vm * vm,vm_vaddr_t gva)33 static uint64_t pud_index(struct kvm_vm *vm, vm_vaddr_t gva)
34 {
35 unsigned int shift = 2 * (vm->page_shift - 3) + vm->page_shift;
36 uint64_t mask = (1UL << (vm->page_shift - 3)) - 1;
37
38 TEST_ASSERT(vm->pgtable_levels == 4,
39 "Mode %d does not have 4 page table levels", vm->mode);
40
41 return (gva >> shift) & mask;
42 }
43
pmd_index(struct kvm_vm * vm,vm_vaddr_t gva)44 static uint64_t pmd_index(struct kvm_vm *vm, vm_vaddr_t gva)
45 {
46 unsigned int shift = (vm->page_shift - 3) + vm->page_shift;
47 uint64_t mask = (1UL << (vm->page_shift - 3)) - 1;
48
49 TEST_ASSERT(vm->pgtable_levels >= 3,
50 "Mode %d does not have >= 3 page table levels", vm->mode);
51
52 return (gva >> shift) & mask;
53 }
54
pte_index(struct kvm_vm * vm,vm_vaddr_t gva)55 static uint64_t pte_index(struct kvm_vm *vm, vm_vaddr_t gva)
56 {
57 uint64_t mask = (1UL << (vm->page_shift - 3)) - 1;
58 return (gva >> vm->page_shift) & mask;
59 }
60
pte_addr(struct kvm_vm * vm,uint64_t entry)61 static uint64_t pte_addr(struct kvm_vm *vm, uint64_t entry)
62 {
63 uint64_t mask = ((1UL << (vm->va_bits - vm->page_shift)) - 1) << vm->page_shift;
64 return entry & mask;
65 }
66
ptrs_per_pgd(struct kvm_vm * vm)67 static uint64_t ptrs_per_pgd(struct kvm_vm *vm)
68 {
69 unsigned int shift = (vm->pgtable_levels - 1) * (vm->page_shift - 3) + vm->page_shift;
70 return 1 << (vm->va_bits - shift);
71 }
72
ptrs_per_pte(struct kvm_vm * vm)73 static uint64_t __maybe_unused ptrs_per_pte(struct kvm_vm *vm)
74 {
75 return 1 << (vm->page_shift - 3);
76 }
77
virt_arch_pgd_alloc(struct kvm_vm * vm)78 void virt_arch_pgd_alloc(struct kvm_vm *vm)
79 {
80 size_t nr_pages = page_align(vm, ptrs_per_pgd(vm) * 8) / vm->page_size;
81
82 if (vm->pgd_created)
83 return;
84
85 vm->pgd = vm_phy_pages_alloc(vm, nr_pages,
86 KVM_GUEST_PAGE_TABLE_MIN_PADDR,
87 vm->memslots[MEM_REGION_PT]);
88 vm->pgd_created = true;
89 }
90
_virt_pg_map(struct kvm_vm * vm,uint64_t vaddr,uint64_t paddr,uint64_t flags)91 static void _virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
92 uint64_t flags)
93 {
94 uint8_t attr_idx = flags & 7;
95 uint64_t *ptep;
96
97 TEST_ASSERT((vaddr % vm->page_size) == 0,
98 "Virtual address not on page boundary,\n"
99 " vaddr: 0x%lx vm->page_size: 0x%x", vaddr, vm->page_size);
100 TEST_ASSERT(sparsebit_is_set(vm->vpages_valid,
101 (vaddr >> vm->page_shift)),
102 "Invalid virtual address, vaddr: 0x%lx", vaddr);
103 TEST_ASSERT((paddr % vm->page_size) == 0,
104 "Physical address not on page boundary,\n"
105 " paddr: 0x%lx vm->page_size: 0x%x", paddr, vm->page_size);
106 TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn,
107 "Physical address beyond beyond maximum supported,\n"
108 " paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
109 paddr, vm->max_gfn, vm->page_size);
110
111 ptep = addr_gpa2hva(vm, vm->pgd) + pgd_index(vm, vaddr) * 8;
112 if (!*ptep)
113 *ptep = vm_alloc_page_table(vm) | 3;
114
115 switch (vm->pgtable_levels) {
116 case 4:
117 ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pud_index(vm, vaddr) * 8;
118 if (!*ptep)
119 *ptep = vm_alloc_page_table(vm) | 3;
120 /* fall through */
121 case 3:
122 ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pmd_index(vm, vaddr) * 8;
123 if (!*ptep)
124 *ptep = vm_alloc_page_table(vm) | 3;
125 /* fall through */
126 case 2:
127 ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pte_index(vm, vaddr) * 8;
128 break;
129 default:
130 TEST_FAIL("Page table levels must be 2, 3, or 4");
131 }
132
133 *ptep = paddr | 3;
134 *ptep |= (attr_idx << 2) | (1 << 10) /* Access Flag */;
135 }
136
virt_arch_pg_map(struct kvm_vm * vm,uint64_t vaddr,uint64_t paddr)137 void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
138 {
139 uint64_t attr_idx = MT_NORMAL;
140
141 _virt_pg_map(vm, vaddr, paddr, attr_idx);
142 }
143
virt_get_pte_hva(struct kvm_vm * vm,vm_vaddr_t gva)144 uint64_t *virt_get_pte_hva(struct kvm_vm *vm, vm_vaddr_t gva)
145 {
146 uint64_t *ptep;
147
148 if (!vm->pgd_created)
149 goto unmapped_gva;
150
151 ptep = addr_gpa2hva(vm, vm->pgd) + pgd_index(vm, gva) * 8;
152 if (!ptep)
153 goto unmapped_gva;
154
155 switch (vm->pgtable_levels) {
156 case 4:
157 ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pud_index(vm, gva) * 8;
158 if (!ptep)
159 goto unmapped_gva;
160 /* fall through */
161 case 3:
162 ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pmd_index(vm, gva) * 8;
163 if (!ptep)
164 goto unmapped_gva;
165 /* fall through */
166 case 2:
167 ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pte_index(vm, gva) * 8;
168 if (!ptep)
169 goto unmapped_gva;
170 break;
171 default:
172 TEST_FAIL("Page table levels must be 2, 3, or 4");
173 }
174
175 return ptep;
176
177 unmapped_gva:
178 TEST_FAIL("No mapping for vm virtual address, gva: 0x%lx", gva);
179 exit(EXIT_FAILURE);
180 }
181
addr_arch_gva2gpa(struct kvm_vm * vm,vm_vaddr_t gva)182 vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
183 {
184 uint64_t *ptep = virt_get_pte_hva(vm, gva);
185
186 return pte_addr(vm, *ptep) + (gva & (vm->page_size - 1));
187 }
188
pte_dump(FILE * stream,struct kvm_vm * vm,uint8_t indent,uint64_t page,int level)189 static void pte_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent, uint64_t page, int level)
190 {
191 #ifdef DEBUG
192 static const char * const type[] = { "", "pud", "pmd", "pte" };
193 uint64_t pte, *ptep;
194
195 if (level == 4)
196 return;
197
198 for (pte = page; pte < page + ptrs_per_pte(vm) * 8; pte += 8) {
199 ptep = addr_gpa2hva(vm, pte);
200 if (!*ptep)
201 continue;
202 fprintf(stream, "%*s%s: %lx: %lx at %p\n", indent, "", type[level], pte, *ptep, ptep);
203 pte_dump(stream, vm, indent + 1, pte_addr(vm, *ptep), level + 1);
204 }
205 #endif
206 }
207
virt_arch_dump(FILE * stream,struct kvm_vm * vm,uint8_t indent)208 void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
209 {
210 int level = 4 - (vm->pgtable_levels - 1);
211 uint64_t pgd, *ptep;
212
213 if (!vm->pgd_created)
214 return;
215
216 for (pgd = vm->pgd; pgd < vm->pgd + ptrs_per_pgd(vm) * 8; pgd += 8) {
217 ptep = addr_gpa2hva(vm, pgd);
218 if (!*ptep)
219 continue;
220 fprintf(stream, "%*spgd: %lx: %lx at %p\n", indent, "", pgd, *ptep, ptep);
221 pte_dump(stream, vm, indent + 1, pte_addr(vm, *ptep), level);
222 }
223 }
224
aarch64_vcpu_setup(struct kvm_vcpu * vcpu,struct kvm_vcpu_init * init)225 void aarch64_vcpu_setup(struct kvm_vcpu *vcpu, struct kvm_vcpu_init *init)
226 {
227 struct kvm_vcpu_init default_init = { .target = -1, };
228 struct kvm_vm *vm = vcpu->vm;
229 uint64_t sctlr_el1, tcr_el1;
230
231 if (!init)
232 init = &default_init;
233
234 if (init->target == -1) {
235 struct kvm_vcpu_init preferred;
236 vm_ioctl(vm, KVM_ARM_PREFERRED_TARGET, &preferred);
237 init->target = preferred.target;
238 }
239
240 vcpu_ioctl(vcpu, KVM_ARM_VCPU_INIT, init);
241
242 /*
243 * Enable FP/ASIMD to avoid trapping when accessing Q0-Q15
244 * registers, which the variable argument list macros do.
245 */
246 vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_CPACR_EL1), 3 << 20);
247
248 vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_SCTLR_EL1), &sctlr_el1);
249 vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TCR_EL1), &tcr_el1);
250
251 /* Configure base granule size */
252 switch (vm->mode) {
253 case VM_MODE_P52V48_4K:
254 TEST_FAIL("AArch64 does not support 4K sized pages "
255 "with 52-bit physical address ranges");
256 case VM_MODE_PXXV48_4K:
257 TEST_FAIL("AArch64 does not support 4K sized pages "
258 "with ANY-bit physical address ranges");
259 case VM_MODE_P52V48_64K:
260 case VM_MODE_P48V48_64K:
261 case VM_MODE_P40V48_64K:
262 case VM_MODE_P36V48_64K:
263 tcr_el1 |= 1ul << 14; /* TG0 = 64KB */
264 break;
265 case VM_MODE_P48V48_16K:
266 case VM_MODE_P40V48_16K:
267 case VM_MODE_P36V48_16K:
268 case VM_MODE_P36V47_16K:
269 tcr_el1 |= 2ul << 14; /* TG0 = 16KB */
270 break;
271 case VM_MODE_P48V48_4K:
272 case VM_MODE_P40V48_4K:
273 case VM_MODE_P36V48_4K:
274 tcr_el1 |= 0ul << 14; /* TG0 = 4KB */
275 break;
276 default:
277 TEST_FAIL("Unknown guest mode, mode: 0x%x", vm->mode);
278 }
279
280 /* Configure output size */
281 switch (vm->mode) {
282 case VM_MODE_P52V48_64K:
283 tcr_el1 |= 6ul << 32; /* IPS = 52 bits */
284 break;
285 case VM_MODE_P48V48_4K:
286 case VM_MODE_P48V48_16K:
287 case VM_MODE_P48V48_64K:
288 tcr_el1 |= 5ul << 32; /* IPS = 48 bits */
289 break;
290 case VM_MODE_P40V48_4K:
291 case VM_MODE_P40V48_16K:
292 case VM_MODE_P40V48_64K:
293 tcr_el1 |= 2ul << 32; /* IPS = 40 bits */
294 break;
295 case VM_MODE_P36V48_4K:
296 case VM_MODE_P36V48_16K:
297 case VM_MODE_P36V48_64K:
298 case VM_MODE_P36V47_16K:
299 tcr_el1 |= 1ul << 32; /* IPS = 36 bits */
300 break;
301 default:
302 TEST_FAIL("Unknown guest mode, mode: 0x%x", vm->mode);
303 }
304
305 sctlr_el1 |= (1 << 0) | (1 << 2) | (1 << 12) /* M | C | I */;
306 /* TCR_EL1 |= IRGN0:WBWA | ORGN0:WBWA | SH0:Inner-Shareable */;
307 tcr_el1 |= (1 << 8) | (1 << 10) | (3 << 12);
308 tcr_el1 |= (64 - vm->va_bits) /* T0SZ */;
309
310 vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_SCTLR_EL1), sctlr_el1);
311 vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TCR_EL1), tcr_el1);
312 vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_MAIR_EL1), DEFAULT_MAIR_EL1);
313 vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TTBR0_EL1), vm->pgd);
314 vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TPIDR_EL1), vcpu->id);
315 }
316
vcpu_arch_dump(FILE * stream,struct kvm_vcpu * vcpu,uint8_t indent)317 void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent)
318 {
319 uint64_t pstate, pc;
320
321 vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pstate), &pstate);
322 vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pc), &pc);
323
324 fprintf(stream, "%*spstate: 0x%.16lx pc: 0x%.16lx\n",
325 indent, "", pstate, pc);
326 }
327
aarch64_vcpu_add(struct kvm_vm * vm,uint32_t vcpu_id,struct kvm_vcpu_init * init,void * guest_code)328 struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
329 struct kvm_vcpu_init *init, void *guest_code)
330 {
331 size_t stack_size;
332 uint64_t stack_vaddr;
333 struct kvm_vcpu *vcpu = __vm_vcpu_add(vm, vcpu_id);
334
335 stack_size = vm->page_size == 4096 ? DEFAULT_STACK_PGS * vm->page_size :
336 vm->page_size;
337 stack_vaddr = __vm_vaddr_alloc(vm, stack_size,
338 DEFAULT_ARM64_GUEST_STACK_VADDR_MIN,
339 MEM_REGION_DATA);
340
341 aarch64_vcpu_setup(vcpu, init);
342
343 vcpu_set_reg(vcpu, ARM64_CORE_REG(sp_el1), stack_vaddr + stack_size);
344 vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.pc), (uint64_t)guest_code);
345
346 return vcpu;
347 }
348
vm_arch_vcpu_add(struct kvm_vm * vm,uint32_t vcpu_id,void * guest_code)349 struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
350 void *guest_code)
351 {
352 return aarch64_vcpu_add(vm, vcpu_id, NULL, guest_code);
353 }
354
vcpu_args_set(struct kvm_vcpu * vcpu,unsigned int num,...)355 void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...)
356 {
357 va_list ap;
358 int i;
359
360 TEST_ASSERT(num >= 1 && num <= 8, "Unsupported number of args,\n"
361 " num: %u\n", num);
362
363 va_start(ap, num);
364
365 for (i = 0; i < num; i++) {
366 vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.regs[i]),
367 va_arg(ap, uint64_t));
368 }
369
370 va_end(ap);
371 }
372
kvm_exit_unexpected_exception(int vector,uint64_t ec,bool valid_ec)373 void kvm_exit_unexpected_exception(int vector, uint64_t ec, bool valid_ec)
374 {
375 ucall(UCALL_UNHANDLED, 3, vector, ec, valid_ec);
376 while (1)
377 ;
378 }
379
assert_on_unhandled_exception(struct kvm_vcpu * vcpu)380 void assert_on_unhandled_exception(struct kvm_vcpu *vcpu)
381 {
382 struct ucall uc;
383
384 if (get_ucall(vcpu, &uc) != UCALL_UNHANDLED)
385 return;
386
387 if (uc.args[2]) /* valid_ec */ {
388 assert(VECTOR_IS_SYNC(uc.args[0]));
389 TEST_FAIL("Unexpected exception (vector:0x%lx, ec:0x%lx)",
390 uc.args[0], uc.args[1]);
391 } else {
392 assert(!VECTOR_IS_SYNC(uc.args[0]));
393 TEST_FAIL("Unexpected exception (vector:0x%lx)",
394 uc.args[0]);
395 }
396 }
397
398 struct handlers {
399 handler_fn exception_handlers[VECTOR_NUM][ESR_EC_NUM];
400 };
401
vcpu_init_descriptor_tables(struct kvm_vcpu * vcpu)402 void vcpu_init_descriptor_tables(struct kvm_vcpu *vcpu)
403 {
404 extern char vectors;
405
406 vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_VBAR_EL1), (uint64_t)&vectors);
407 }
408
route_exception(struct ex_regs * regs,int vector)409 void route_exception(struct ex_regs *regs, int vector)
410 {
411 struct handlers *handlers = (struct handlers *)exception_handlers;
412 bool valid_ec;
413 int ec = 0;
414
415 switch (vector) {
416 case VECTOR_SYNC_CURRENT:
417 case VECTOR_SYNC_LOWER_64:
418 ec = (read_sysreg(esr_el1) >> ESR_EC_SHIFT) & ESR_EC_MASK;
419 valid_ec = true;
420 break;
421 case VECTOR_IRQ_CURRENT:
422 case VECTOR_IRQ_LOWER_64:
423 case VECTOR_FIQ_CURRENT:
424 case VECTOR_FIQ_LOWER_64:
425 case VECTOR_ERROR_CURRENT:
426 case VECTOR_ERROR_LOWER_64:
427 ec = 0;
428 valid_ec = false;
429 break;
430 default:
431 valid_ec = false;
432 goto unexpected_exception;
433 }
434
435 if (handlers && handlers->exception_handlers[vector][ec])
436 return handlers->exception_handlers[vector][ec](regs);
437
438 unexpected_exception:
439 kvm_exit_unexpected_exception(vector, ec, valid_ec);
440 }
441
vm_init_descriptor_tables(struct kvm_vm * vm)442 void vm_init_descriptor_tables(struct kvm_vm *vm)
443 {
444 vm->handlers = __vm_vaddr_alloc(vm, sizeof(struct handlers),
445 vm->page_size, MEM_REGION_DATA);
446
447 *(vm_vaddr_t *)addr_gva2hva(vm, (vm_vaddr_t)(&exception_handlers)) = vm->handlers;
448 }
449
vm_install_sync_handler(struct kvm_vm * vm,int vector,int ec,void (* handler)(struct ex_regs *))450 void vm_install_sync_handler(struct kvm_vm *vm, int vector, int ec,
451 void (*handler)(struct ex_regs *))
452 {
453 struct handlers *handlers = addr_gva2hva(vm, vm->handlers);
454
455 assert(VECTOR_IS_SYNC(vector));
456 assert(vector < VECTOR_NUM);
457 assert(ec < ESR_EC_NUM);
458 handlers->exception_handlers[vector][ec] = handler;
459 }
460
vm_install_exception_handler(struct kvm_vm * vm,int vector,void (* handler)(struct ex_regs *))461 void vm_install_exception_handler(struct kvm_vm *vm, int vector,
462 void (*handler)(struct ex_regs *))
463 {
464 struct handlers *handlers = addr_gva2hva(vm, vm->handlers);
465
466 assert(!VECTOR_IS_SYNC(vector));
467 assert(vector < VECTOR_NUM);
468 handlers->exception_handlers[vector][0] = handler;
469 }
470
guest_get_vcpuid(void)471 uint32_t guest_get_vcpuid(void)
472 {
473 return read_sysreg(tpidr_el1);
474 }
475
aarch64_get_supported_page_sizes(uint32_t ipa,bool * ps4k,bool * ps16k,bool * ps64k)476 void aarch64_get_supported_page_sizes(uint32_t ipa,
477 bool *ps4k, bool *ps16k, bool *ps64k)
478 {
479 struct kvm_vcpu_init preferred_init;
480 int kvm_fd, vm_fd, vcpu_fd, err;
481 uint64_t val;
482 struct kvm_one_reg reg = {
483 .id = KVM_ARM64_SYS_REG(SYS_ID_AA64MMFR0_EL1),
484 .addr = (uint64_t)&val,
485 };
486
487 kvm_fd = open_kvm_dev_path_or_exit();
488 vm_fd = __kvm_ioctl(kvm_fd, KVM_CREATE_VM, (void *)(unsigned long)ipa);
489 TEST_ASSERT(vm_fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_VM, vm_fd));
490
491 vcpu_fd = ioctl(vm_fd, KVM_CREATE_VCPU, 0);
492 TEST_ASSERT(vcpu_fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_VCPU, vcpu_fd));
493
494 err = ioctl(vm_fd, KVM_ARM_PREFERRED_TARGET, &preferred_init);
495 TEST_ASSERT(err == 0, KVM_IOCTL_ERROR(KVM_ARM_PREFERRED_TARGET, err));
496 err = ioctl(vcpu_fd, KVM_ARM_VCPU_INIT, &preferred_init);
497 TEST_ASSERT(err == 0, KVM_IOCTL_ERROR(KVM_ARM_VCPU_INIT, err));
498
499 err = ioctl(vcpu_fd, KVM_GET_ONE_REG, ®);
500 TEST_ASSERT(err == 0, KVM_IOCTL_ERROR(KVM_GET_ONE_REG, vcpu_fd));
501
502 *ps4k = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_TGRAN4), val) != 0xf;
503 *ps64k = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_TGRAN64), val) == 0;
504 *ps16k = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_TGRAN16), val) != 0;
505
506 close(vcpu_fd);
507 close(vm_fd);
508 close(kvm_fd);
509 }
510
smccc_hvc(uint32_t function_id,uint64_t arg0,uint64_t arg1,uint64_t arg2,uint64_t arg3,uint64_t arg4,uint64_t arg5,uint64_t arg6,struct arm_smccc_res * res)511 void smccc_hvc(uint32_t function_id, uint64_t arg0, uint64_t arg1,
512 uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5,
513 uint64_t arg6, struct arm_smccc_res *res)
514 {
515 asm volatile("mov w0, %w[function_id]\n"
516 "mov x1, %[arg0]\n"
517 "mov x2, %[arg1]\n"
518 "mov x3, %[arg2]\n"
519 "mov x4, %[arg3]\n"
520 "mov x5, %[arg4]\n"
521 "mov x6, %[arg5]\n"
522 "mov x7, %[arg6]\n"
523 "hvc #0\n"
524 "mov %[res0], x0\n"
525 "mov %[res1], x1\n"
526 "mov %[res2], x2\n"
527 "mov %[res3], x3\n"
528 : [res0] "=r"(res->a0), [res1] "=r"(res->a1),
529 [res2] "=r"(res->a2), [res3] "=r"(res->a3)
530 : [function_id] "r"(function_id), [arg0] "r"(arg0),
531 [arg1] "r"(arg1), [arg2] "r"(arg2), [arg3] "r"(arg3),
532 [arg4] "r"(arg4), [arg5] "r"(arg5), [arg6] "r"(arg6)
533 : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7");
534 }
535
kvm_selftest_arch_init(void)536 void kvm_selftest_arch_init(void)
537 {
538 /*
539 * arm64 doesn't have a true default mode, so start by computing the
540 * available IPA space and page sizes early.
541 */
542 guest_modes_append_default();
543 }
544
vm_vaddr_populate_bitmap(struct kvm_vm * vm)545 void vm_vaddr_populate_bitmap(struct kvm_vm *vm)
546 {
547 /*
548 * arm64 selftests use only TTBR0_EL1, meaning that the valid VA space
549 * is [0, 2^(64 - TCR_EL1.T0SZ)).
550 */
551 sparsebit_set_num(vm->vpages_valid, 0,
552 (1ULL << vm->va_bits) >> vm->page_shift);
553 }
554