Lines Matching refs:vm
100 int vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap) in vm_enable_cap() argument
104 ret = ioctl(vm->fd, KVM_ENABLE_CAP, cap); in vm_enable_cap()
124 int vcpu_enable_cap(struct kvm_vm *vm, uint32_t vcpu_id, in vcpu_enable_cap() argument
127 struct vcpu *vcpu = vcpu_find(vm, vcpu_id); in vcpu_enable_cap()
139 void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size) in vm_enable_dirty_ring() argument
145 vm_enable_cap(vm, &cap); in vm_enable_dirty_ring()
146 vm->dirty_ring_size = ring_size; in vm_enable_dirty_ring()
149 static void vm_open(struct kvm_vm *vm, int perm) in vm_open() argument
151 vm->kvm_fd = _open_kvm_dev_path_or_exit(perm); in vm_open()
158 vm->fd = ioctl(vm->kvm_fd, KVM_CREATE_VM, vm->type); in vm_open()
159 TEST_ASSERT(vm->fd >= 0, "KVM_CREATE_VM ioctl failed, " in vm_open()
160 "rc: %i errno: %i", vm->fd, errno); in vm_open()
219 struct kvm_vm *vm; in vm_create() local
224 vm = calloc(1, sizeof(*vm)); in vm_create()
225 TEST_ASSERT(vm != NULL, "Insufficient Memory"); in vm_create()
227 INIT_LIST_HEAD(&vm->vcpus); in vm_create()
228 vm->regions.gpa_tree = RB_ROOT; in vm_create()
229 vm->regions.hva_tree = RB_ROOT; in vm_create()
230 hash_init(vm->regions.slot_hash); in vm_create()
232 vm->mode = mode; in vm_create()
233 vm->type = 0; in vm_create()
235 vm->pa_bits = vm_guest_mode_params[mode].pa_bits; in vm_create()
236 vm->va_bits = vm_guest_mode_params[mode].va_bits; in vm_create()
237 vm->page_size = vm_guest_mode_params[mode].page_size; in vm_create()
238 vm->page_shift = vm_guest_mode_params[mode].page_shift; in vm_create()
241 switch (vm->mode) { in vm_create()
243 vm->pgtable_levels = 4; in vm_create()
246 vm->pgtable_levels = 3; in vm_create()
249 vm->pgtable_levels = 4; in vm_create()
252 vm->pgtable_levels = 3; in vm_create()
255 vm->pgtable_levels = 4; in vm_create()
258 vm->pgtable_levels = 3; in vm_create()
262 kvm_get_cpu_address_width(&vm->pa_bits, &vm->va_bits); in vm_create()
268 TEST_ASSERT(vm->va_bits == 48 || vm->va_bits == 57, in vm_create()
270 vm->va_bits); in vm_create()
272 vm->pa_bits); in vm_create()
273 vm->pgtable_levels = 4; in vm_create()
274 vm->va_bits = 48; in vm_create()
280 vm->pgtable_levels = 5; in vm_create()
283 vm->pgtable_levels = 5; in vm_create()
290 if (vm->pa_bits != 40) in vm_create()
291 vm->type = KVM_VM_TYPE_ARM_IPA_SIZE(vm->pa_bits); in vm_create()
294 vm_open(vm, perm); in vm_create()
297 vm->vpages_valid = sparsebit_alloc(); in vm_create()
298 sparsebit_set_num(vm->vpages_valid, in vm_create()
299 0, (1ULL << (vm->va_bits - 1)) >> vm->page_shift); in vm_create()
300 sparsebit_set_num(vm->vpages_valid, in vm_create()
301 (~((1ULL << (vm->va_bits - 1)) - 1)) >> vm->page_shift, in vm_create()
302 (1ULL << (vm->va_bits - 1)) >> vm->page_shift); in vm_create()
305 vm->max_gfn = vm_compute_max_gfn(vm); in vm_create()
308 vm->vpages_mapped = sparsebit_alloc(); in vm_create()
310 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, in vm_create()
313 return vm; in vm_create()
344 struct kvm_vm *vm; in vm_create_with_vcpus() local
366 vm = vm_create(mode, pages, O_RDWR); in vm_create_with_vcpus()
368 kvm_vm_elf_load(vm, program_invocation_name); in vm_create_with_vcpus()
371 vm_create_irqchip(vm); in vm_create_with_vcpus()
377 vm_vcpu_add_default(vm, vcpuid, guest_code); in vm_create_with_vcpus()
380 return vm; in vm_create_with_vcpus()
433 void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log) in kvm_vm_get_dirty_log() argument
438 ret = ioctl(vm->fd, KVM_GET_DIRTY_LOG, &args); in kvm_vm_get_dirty_log()
443 void kvm_vm_clear_dirty_log(struct kvm_vm *vm, int slot, void *log, in kvm_vm_clear_dirty_log() argument
451 ret = ioctl(vm->fd, KVM_CLEAR_DIRTY_LOG, &args); in kvm_vm_clear_dirty_log()
456 uint32_t kvm_vm_reset_dirty_ring(struct kvm_vm *vm) in kvm_vm_reset_dirty_ring() argument
458 return ioctl(vm->fd, KVM_RESET_DIRTY_RINGS); in kvm_vm_reset_dirty_ring()
481 userspace_mem_region_find(struct kvm_vm *vm, uint64_t start, uint64_t end) in userspace_mem_region_find() argument
485 for (node = vm->regions.gpa_tree.rb_node; node; ) { in userspace_mem_region_find()
520 kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start, in kvm_userspace_memory_region_find() argument
525 region = userspace_mem_region_find(vm, start, end); in kvm_userspace_memory_region_find()
548 struct vcpu *vcpu_find(struct kvm_vm *vm, uint32_t vcpuid) in vcpu_find() argument
552 list_for_each_entry(vcpu, &vm->vcpus, list) { in vcpu_find()
572 static void vm_vcpu_rm(struct kvm_vm *vm, struct vcpu *vcpu) in vm_vcpu_rm() argument
577 ret = munmap(vcpu->dirty_gfns, vm->dirty_ring_size); in vm_vcpu_rm()
611 static void __vm_mem_region_delete(struct kvm_vm *vm, in __vm_mem_region_delete() argument
618 rb_erase(®ion->gpa_node, &vm->regions.gpa_tree); in __vm_mem_region_delete()
619 rb_erase(®ion->hva_node, &vm->regions.hva_tree); in __vm_mem_region_delete()
624 ret = ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region); in __vm_mem_region_delete()
684 int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, vm_vaddr_t gva, size_t len) in kvm_memcmp_hva_gva() argument
699 uintptr_t ptr2 = (uintptr_t)addr_gva2hva(vm, gva + offset); in kvm_memcmp_hva_gva()
706 if ((ptr1 >> vm->page_shift) != ((ptr1 + amt) >> vm->page_shift)) in kvm_memcmp_hva_gva()
707 amt = vm->page_size - (ptr1 % vm->page_size); in kvm_memcmp_hva_gva()
708 if ((ptr2 >> vm->page_shift) != ((ptr2 + amt) >> vm->page_shift)) in kvm_memcmp_hva_gva()
709 amt = vm->page_size - (ptr2 % vm->page_size); in kvm_memcmp_hva_gva()
711 assert((ptr1 >> vm->page_shift) == ((ptr1 + amt - 1) >> vm->page_shift)); in kvm_memcmp_hva_gva()
712 assert((ptr2 >> vm->page_shift) == ((ptr2 + amt - 1) >> vm->page_shift)); in kvm_memcmp_hva_gva()
804 void vm_userspace_mem_region_add(struct kvm_vm *vm, in vm_userspace_mem_region_add() argument
814 TEST_ASSERT(vm_adjust_num_guest_pages(vm->mode, npages) == npages, in vm_userspace_mem_region_add()
816 "Try npages=%d", vm_adjust_num_guest_pages(vm->mode, npages)); in vm_userspace_mem_region_add()
818 TEST_ASSERT((guest_paddr % vm->page_size) == 0, "Guest physical " in vm_userspace_mem_region_add()
821 guest_paddr, vm->page_size); in vm_userspace_mem_region_add()
822 TEST_ASSERT((((guest_paddr >> vm->page_shift) + npages) - 1) in vm_userspace_mem_region_add()
823 <= vm->max_gfn, "Physical range beyond maximum " in vm_userspace_mem_region_add()
827 guest_paddr, npages, vm->max_gfn, vm->page_size); in vm_userspace_mem_region_add()
834 vm, guest_paddr, (guest_paddr + npages * vm->page_size) - 1); in vm_userspace_mem_region_add()
841 guest_paddr, npages, vm->page_size, in vm_userspace_mem_region_add()
846 hash_for_each_possible(vm->regions.slot_hash, region, slot_node, in vm_userspace_mem_region_add()
864 region->mmap_size = npages * vm->page_size; in vm_userspace_mem_region_add()
927 ret = madvise(region->host_mem, npages * vm->page_size, in vm_userspace_mem_region_add()
930 region->host_mem, npages * vm->page_size, in vm_userspace_mem_region_add()
936 guest_paddr >> vm->page_shift, npages); in vm_userspace_mem_region_add()
940 region->region.memory_size = npages * vm->page_size; in vm_userspace_mem_region_add()
942 ret = ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region); in vm_userspace_mem_region_add()
951 vm_userspace_mem_region_gpa_insert(&vm->regions.gpa_tree, region); in vm_userspace_mem_region_add()
952 vm_userspace_mem_region_hva_insert(&vm->regions.hva_tree, region); in vm_userspace_mem_region_add()
953 hash_add(vm->regions.slot_hash, ®ion->slot_node, slot); in vm_userspace_mem_region_add()
985 memslot2region(struct kvm_vm *vm, uint32_t memslot) in memslot2region() argument
989 hash_for_each_possible(vm->regions.slot_hash, region, slot_node, in memslot2region()
997 vm_dump(stderr, vm, 2); in memslot2region()
1016 void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags) in vm_mem_region_set_flags() argument
1021 region = memslot2region(vm, slot); in vm_mem_region_set_flags()
1025 ret = ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region); in vm_mem_region_set_flags()
1046 void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa) in vm_mem_region_move() argument
1051 region = memslot2region(vm, slot); in vm_mem_region_move()
1055 ret = ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region); in vm_mem_region_move()
1075 void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot) in vm_mem_region_delete() argument
1077 __vm_mem_region_delete(vm, memslot2region(vm, slot), true); in vm_mem_region_delete()
1123 void vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid) in vm_vcpu_add() argument
1128 vcpu = vcpu_find(vm, vcpuid); in vm_vcpu_add()
1140 vcpu->fd = ioctl(vm->fd, KVM_CREATE_VCPU, vcpuid); in vm_vcpu_add()
1153 list_add(&vcpu->list, &vm->vcpus); in vm_vcpu_add()
1176 static vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz, in vm_vaddr_unused_gap() argument
1179 uint64_t pages = (sz + vm->page_size - 1) >> vm->page_shift; in vm_vaddr_unused_gap()
1182 uint64_t pgidx_start = (vaddr_min + vm->page_size - 1) >> vm->page_shift; in vm_vaddr_unused_gap()
1183 if ((pgidx_start * vm->page_size) < vaddr_min) in vm_vaddr_unused_gap()
1187 if (!sparsebit_is_set_num(vm->vpages_valid, in vm_vaddr_unused_gap()
1189 pgidx_start = sparsebit_next_set_num(vm->vpages_valid, in vm_vaddr_unused_gap()
1198 if (sparsebit_is_clear_num(vm->vpages_mapped, in vm_vaddr_unused_gap()
1201 pgidx_start = sparsebit_next_clear_num(vm->vpages_mapped, in vm_vaddr_unused_gap()
1210 if (!sparsebit_is_set_num(vm->vpages_valid, in vm_vaddr_unused_gap()
1213 vm->vpages_valid, pgidx_start, pages); in vm_vaddr_unused_gap()
1226 TEST_ASSERT(sparsebit_is_set_num(vm->vpages_valid, in vm_vaddr_unused_gap()
1232 TEST_ASSERT(sparsebit_is_clear_num(vm->vpages_mapped, in vm_vaddr_unused_gap()
1239 return pgidx_start * vm->page_size; in vm_vaddr_unused_gap()
1263 vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min) in vm_vaddr_alloc() argument
1265 uint64_t pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0); in vm_vaddr_alloc()
1267 virt_pgd_alloc(vm); in vm_vaddr_alloc()
1268 vm_paddr_t paddr = vm_phy_pages_alloc(vm, pages, in vm_vaddr_alloc()
1269 KVM_UTIL_MIN_PFN * vm->page_size, 0); in vm_vaddr_alloc()
1275 vm_vaddr_t vaddr_start = vm_vaddr_unused_gap(vm, sz, vaddr_min); in vm_vaddr_alloc()
1279 pages--, vaddr += vm->page_size, paddr += vm->page_size) { in vm_vaddr_alloc()
1281 virt_pg_map(vm, vaddr, paddr); in vm_vaddr_alloc()
1283 sparsebit_set(vm->vpages_mapped, in vm_vaddr_alloc()
1284 vaddr >> vm->page_shift); in vm_vaddr_alloc()
1304 vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages) in vm_vaddr_alloc_pages() argument
1306 return vm_vaddr_alloc(vm, nr_pages * getpagesize(), KVM_UTIL_MIN_VADDR); in vm_vaddr_alloc_pages()
1323 vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm) in vm_vaddr_alloc_page() argument
1325 return vm_vaddr_alloc_pages(vm, 1); in vm_vaddr_alloc_page()
1345 void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, in virt_map() argument
1348 size_t page_size = vm->page_size; in virt_map()
1355 virt_pg_map(vm, vaddr, paddr); in virt_map()
1378 void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa) in addr_gpa2hva() argument
1382 region = userspace_mem_region_find(vm, gpa, gpa); in addr_gpa2hva()
1409 vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva) in addr_hva2gpa() argument
1413 for (node = vm->regions.hva_tree.rb_node; node; ) { in addr_hva2gpa()
1453 void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa) in addr_gpa2alias() argument
1458 region = userspace_mem_region_find(vm, gpa, gpa); in addr_gpa2alias()
1481 void vm_create_irqchip(struct kvm_vm *vm) in vm_create_irqchip() argument
1485 ret = ioctl(vm->fd, KVM_CREATE_IRQCHIP, 0); in vm_create_irqchip()
1489 vm->has_irqchip = true; in vm_create_irqchip()
1507 struct kvm_run *vcpu_state(struct kvm_vm *vm, uint32_t vcpuid) in vcpu_state() argument
1509 struct vcpu *vcpu = vcpu_find(vm, vcpuid); in vcpu_state()
1529 void vcpu_run(struct kvm_vm *vm, uint32_t vcpuid) in vcpu_run() argument
1531 int ret = _vcpu_run(vm, vcpuid); in vcpu_run()
1536 int _vcpu_run(struct kvm_vm *vm, uint32_t vcpuid) in _vcpu_run() argument
1538 struct vcpu *vcpu = vcpu_find(vm, vcpuid); in _vcpu_run()
1546 assert_on_unhandled_exception(vm, vcpuid); in _vcpu_run()
1551 int vcpu_get_fd(struct kvm_vm *vm, uint32_t vcpuid) in vcpu_get_fd() argument
1553 struct vcpu *vcpu = vcpu_find(vm, vcpuid); in vcpu_get_fd()
1560 void vcpu_run_complete_io(struct kvm_vm *vm, uint32_t vcpuid) in vcpu_run_complete_io() argument
1562 struct vcpu *vcpu = vcpu_find(vm, vcpuid); in vcpu_run_complete_io()
1576 void vcpu_set_guest_debug(struct kvm_vm *vm, uint32_t vcpuid, in vcpu_set_guest_debug() argument
1579 struct vcpu *vcpu = vcpu_find(vm, vcpuid); in vcpu_set_guest_debug()
1600 void vcpu_set_mp_state(struct kvm_vm *vm, uint32_t vcpuid, in vcpu_set_mp_state() argument
1603 struct vcpu *vcpu = vcpu_find(vm, vcpuid); in vcpu_set_mp_state()
1629 struct kvm_reg_list *vcpu_get_reg_list(struct kvm_vm *vm, uint32_t vcpuid) in vcpu_get_reg_list() argument
1634 ret = _vcpu_ioctl(vm, vcpuid, KVM_GET_REG_LIST, ®_list_n); in vcpu_get_reg_list()
1638 vcpu_ioctl(vm, vcpuid, KVM_GET_REG_LIST, reg_list); in vcpu_get_reg_list()
1657 void vcpu_regs_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs) in vcpu_regs_get() argument
1659 struct vcpu *vcpu = vcpu_find(vm, vcpuid); in vcpu_regs_get()
1684 void vcpu_regs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs) in vcpu_regs_set() argument
1686 struct vcpu *vcpu = vcpu_find(vm, vcpuid); in vcpu_regs_set()
1697 void vcpu_events_get(struct kvm_vm *vm, uint32_t vcpuid, in vcpu_events_get() argument
1700 struct vcpu *vcpu = vcpu_find(vm, vcpuid); in vcpu_events_get()
1710 void vcpu_events_set(struct kvm_vm *vm, uint32_t vcpuid, in vcpu_events_set() argument
1713 struct vcpu *vcpu = vcpu_find(vm, vcpuid); in vcpu_events_set()
1725 void vcpu_nested_state_get(struct kvm_vm *vm, uint32_t vcpuid, in vcpu_nested_state_get() argument
1728 struct vcpu *vcpu = vcpu_find(vm, vcpuid); in vcpu_nested_state_get()
1739 int vcpu_nested_state_set(struct kvm_vm *vm, uint32_t vcpuid, in vcpu_nested_state_set() argument
1742 struct vcpu *vcpu = vcpu_find(vm, vcpuid); in vcpu_nested_state_set()
1773 void vcpu_sregs_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_sregs *sregs) in vcpu_sregs_get() argument
1775 struct vcpu *vcpu = vcpu_find(vm, vcpuid); in vcpu_sregs_get()
1800 void vcpu_sregs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_sregs *sregs) in vcpu_sregs_set() argument
1802 int ret = _vcpu_sregs_set(vm, vcpuid, sregs); in vcpu_sregs_set()
1807 int _vcpu_sregs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_sregs *sregs) in _vcpu_sregs_set() argument
1809 struct vcpu *vcpu = vcpu_find(vm, vcpuid); in _vcpu_sregs_set()
1816 void vcpu_fpu_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_fpu *fpu) in vcpu_fpu_get() argument
1820 ret = _vcpu_ioctl(vm, vcpuid, KVM_GET_FPU, fpu); in vcpu_fpu_get()
1825 void vcpu_fpu_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_fpu *fpu) in vcpu_fpu_set() argument
1829 ret = _vcpu_ioctl(vm, vcpuid, KVM_SET_FPU, fpu); in vcpu_fpu_set()
1834 void vcpu_get_reg(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_one_reg *reg) in vcpu_get_reg() argument
1838 ret = _vcpu_ioctl(vm, vcpuid, KVM_GET_ONE_REG, reg); in vcpu_get_reg()
1843 void vcpu_set_reg(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_one_reg *reg) in vcpu_set_reg() argument
1847 ret = _vcpu_ioctl(vm, vcpuid, KVM_SET_ONE_REG, reg); in vcpu_set_reg()
1865 void vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid, in vcpu_ioctl() argument
1870 ret = _vcpu_ioctl(vm, vcpuid, cmd, arg); in vcpu_ioctl()
1875 int _vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid, in _vcpu_ioctl() argument
1878 struct vcpu *vcpu = vcpu_find(vm, vcpuid); in _vcpu_ioctl()
1888 void *vcpu_map_dirty_ring(struct kvm_vm *vm, uint32_t vcpuid) in vcpu_map_dirty_ring() argument
1891 uint32_t size = vm->dirty_ring_size; in vcpu_map_dirty_ring()
1895 vcpu = vcpu_find(vm, vcpuid); in vcpu_map_dirty_ring()
1904 vm->page_size * KVM_DIRTY_LOG_PAGE_OFFSET); in vcpu_map_dirty_ring()
1909 vm->page_size * KVM_DIRTY_LOG_PAGE_OFFSET); in vcpu_map_dirty_ring()
1914 vm->page_size * KVM_DIRTY_LOG_PAGE_OFFSET); in vcpu_map_dirty_ring()
1936 void vm_ioctl(struct kvm_vm *vm, unsigned long cmd, void *arg) in vm_ioctl() argument
1940 ret = _vm_ioctl(vm, cmd, arg); in vm_ioctl()
1945 int _vm_ioctl(struct kvm_vm *vm, unsigned long cmd, void *arg) in _vm_ioctl() argument
1947 return ioctl(vm->fd, cmd, arg); in _vm_ioctl()
1962 void kvm_ioctl(struct kvm_vm *vm, unsigned long cmd, void *arg) in kvm_ioctl() argument
1966 ret = ioctl(vm->kvm_fd, cmd, arg); in kvm_ioctl()
1971 int _kvm_ioctl(struct kvm_vm *vm, unsigned long cmd, void *arg) in _kvm_ioctl() argument
1973 return ioctl(vm->kvm_fd, cmd, arg); in _kvm_ioctl()
1999 int _kvm_create_device(struct kvm_vm *vm, uint64_t type, bool test, int *fd) in _kvm_create_device() argument
2007 ret = ioctl(vm_get_fd(vm), KVM_CREATE_DEVICE, &create_dev); in _kvm_create_device()
2012 int kvm_create_device(struct kvm_vm *vm, uint64_t type, bool test) in kvm_create_device() argument
2016 ret = _kvm_create_device(vm, type, test, &fd); in kvm_create_device()
2051 int _vcpu_has_device_attr(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group, in _vcpu_has_device_attr() argument
2054 struct vcpu *vcpu = vcpu_find(vm, vcpuid); in _vcpu_has_device_attr()
2061 int vcpu_has_device_attr(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group, in vcpu_has_device_attr() argument
2064 int ret = _vcpu_has_device_attr(vm, vcpuid, group, attr); in vcpu_has_device_attr()
2070 int _vcpu_access_device_attr(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group, in _vcpu_access_device_attr() argument
2073 struct vcpu *vcpu = vcpu_find(vm, vcpuid); in _vcpu_access_device_attr()
2080 int vcpu_access_device_attr(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group, in vcpu_access_device_attr() argument
2083 int ret = _vcpu_access_device_attr(vm, vcpuid, group, attr, val, write); in vcpu_access_device_attr()
2104 void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) in vm_dump() argument
2110 fprintf(stream, "%*smode: 0x%x\n", indent, "", vm->mode); in vm_dump()
2111 fprintf(stream, "%*sfd: %i\n", indent, "", vm->fd); in vm_dump()
2112 fprintf(stream, "%*spage_size: 0x%x\n", indent, "", vm->page_size); in vm_dump()
2114 hash_for_each(vm->regions.slot_hash, ctr, region, slot_node) { in vm_dump()
2124 sparsebit_dump(stream, vm->vpages_mapped, indent + 2); in vm_dump()
2126 vm->pgd_created); in vm_dump()
2127 if (vm->pgd_created) { in vm_dump()
2130 virt_dump(stream, vm, indent + 4); in vm_dump()
2133 list_for_each_entry(vcpu, &vm->vcpus, list) in vm_dump()
2134 vcpu_dump(stream, vm, vcpu->id, indent + 2); in vm_dump()
2217 vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, in vm_phy_pages_alloc() argument
2225 TEST_ASSERT((paddr_min % vm->page_size) == 0, "Min physical address " in vm_phy_pages_alloc()
2228 paddr_min, vm->page_size); in vm_phy_pages_alloc()
2230 region = memslot2region(vm, memslot); in vm_phy_pages_alloc()
2231 base = pg = paddr_min >> vm->page_shift; in vm_phy_pages_alloc()
2245 paddr_min, vm->page_size, memslot); in vm_phy_pages_alloc()
2247 vm_dump(stderr, vm, 2); in vm_phy_pages_alloc()
2254 return base * vm->page_size; in vm_phy_pages_alloc()
2257 vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min, in vm_phy_page_alloc() argument
2260 return vm_phy_pages_alloc(vm, 1, paddr_min, memslot); in vm_phy_page_alloc()
2266 vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm) in vm_alloc_page_table() argument
2268 return vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0); in vm_alloc_page_table()
2283 void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva) in addr_gva2hva() argument
2285 return addr_gpa2hva(vm, addr_gva2gpa(vm, gva)); in addr_gva2hva()
2300 bool vm_is_unrestricted_guest(struct kvm_vm *vm) in vm_is_unrestricted_guest() argument
2306 if (vm == NULL) { in vm_is_unrestricted_guest()
2321 unsigned int vm_get_page_size(struct kvm_vm *vm) in vm_get_page_size() argument
2323 return vm->page_size; in vm_get_page_size()
2326 unsigned int vm_get_page_shift(struct kvm_vm *vm) in vm_get_page_shift() argument
2328 return vm->page_shift; in vm_get_page_shift()
2331 unsigned long __attribute__((weak)) vm_compute_max_gfn(struct kvm_vm *vm) in vm_compute_max_gfn() argument
2333 return ((1ULL << vm->pa_bits) >> vm->page_shift) - 1; in vm_compute_max_gfn()
2336 uint64_t vm_get_max_gfn(struct kvm_vm *vm) in vm_get_max_gfn() argument
2338 return vm->max_gfn; in vm_get_max_gfn()
2341 int vm_get_fd(struct kvm_vm *vm) in vm_get_fd() argument
2343 return vm->fd; in vm_get_fd()
2386 int vm_get_stats_fd(struct kvm_vm *vm) in vm_get_stats_fd() argument
2388 return ioctl(vm->fd, KVM_GET_STATS_FD, NULL); in vm_get_stats_fd()
2391 int vcpu_get_stats_fd(struct kvm_vm *vm, uint32_t vcpuid) in vcpu_get_stats_fd() argument
2393 struct vcpu *vcpu = vcpu_find(vm, vcpuid); in vcpu_get_stats_fd()