/xen/xen/arch/arm/ |
A D | guest_walk.c | 46 if ( n == 0 || !(gva & mask) ) in guest_walk_sd() 95 paddr |= (gva & mask) >> 18; in guest_walk_sd() 165 *ipa = gva & mask; in guest_walk_sd() 374 #define OFFSETS(gva, gran) \ in guest_walk_ld() argument 376 zeroeth_table_offset_##gran(gva), \ in guest_walk_ld() 377 first_table_offset_##gran(gva), \ in guest_walk_ld() 383 OFFSETS(gva, 4K), in guest_walk_ld() 384 OFFSETS(gva, 16K), in guest_walk_ld() 385 OFFSETS(gva, 64K) in guest_walk_ld() 416 topbit = get_top_bit(d, gva, tcr); in guest_walk_ld() [all …]
|
A D | guestcopy.c | 19 } gva; member 27 #define GVA_INFO(vcpu) ((copy_info_t) { .gva = { vcpu } }) 37 return get_page_from_gva(info.gva.v, addr, in translate_get_page()
|
A D | traps.c | 1807 vaddr_t gva; in get_hfar() local 1811 gva = READ_CP32(HDFAR); in get_hfar() 1813 gva = READ_CP32(HIFAR); in get_hfar() 1815 gva = READ_SYSREG(FAR_EL2); in get_hfar() 1818 return gva; in get_hfar() 1827 ipa |= gva & ~PAGE_MASK; in get_faulting_ipa() 1885 vaddr_t gva; in do_trap_stage2_abort_guest() local 1897 gva = get_hfar(is_data); in do_trap_stage2_abort_guest() 1900 gpa = get_faulting_ipa(gva); in do_trap_stage2_abort_guest() 1912 rc = gva_to_ipa(gva, &gpa, GV2M_READ); in do_trap_stage2_abort_guest() [all …]
|
A D | mem_access.c | 102 p2m_mem_access_check_and_get_page(vaddr_t gva, unsigned long flag, in p2m_mem_access_check_and_get_page() argument 115 rc = gva_to_ipa(gva, &ipa, flag); in p2m_mem_access_check_and_get_page() 129 if ( !guest_walk_tables(v, gva, &ipa, &perms) ) in p2m_mem_access_check_and_get_page()
|
/xen/xen/arch/x86/mm/hap/ |
A D | private.h | 29 unsigned long gva, 33 unsigned long gva, 37 unsigned long gva,
|
A D | guest_walk.c | 43 struct vcpu *v, struct p2m_domain *p2m, unsigned long gva, uint32_t *pfec) in hap_gva_to_gfn() 46 return hap_p2m_ga_to_gfn(GUEST_PAGING_LEVELS)(v, p2m, cr3, gva, pfec, NULL); in hap_gva_to_gfn()
|
A D | hap.c | 826 struct vcpu *v, struct p2m_domain *p2m, unsigned long gva, uint32_t *pfec) in hap_gva_to_gfn_real_mode() argument 828 return ((paddr_t)gva >> PAGE_SHIFT); in hap_gva_to_gfn_real_mode()
|
/xen/tools/debugger/gdbsx/gx/ |
A D | gx_main.c | 428 uint64_t gva; 434 gx_decode_zZ_packet(&rbuf[3], &gva); 435 if (xg_set_bp(gva, ch1)) 446 uint64_t gva; 452 gx_decode_zZ_packet(&rbuf[3], &gva); 453 if (xg_rm_bp(gva, ch1))
|
/xen/xen/include/asm-x86/hvm/vmx/ |
A D | vmx.h | 499 static always_inline void __invvpid(unsigned long type, u16 vpid, u64 gva) in __invvpid() argument 504 u64 gva; in __invvpid() member 505 } operand = {vpid, 0, gva}; in __invvpid() 537 static inline void vpid_sync_vcpu_gva(struct vcpu *v, unsigned long gva) in vpid_sync_vcpu_gva() argument 558 __invvpid(type, v->arch.hvm.n1asid.asid, (u64)gva); in vpid_sync_vcpu_gva()
|
/xen/xen/include/asm-arm/ |
A D | guest_walk.h | 6 vaddr_t gva,
|
A D | mem_access.h | 41 p2m_mem_access_check_and_get_page(vaddr_t gva, unsigned long flag,
|
/xen/tools/debugger/gdbsx/xg/ |
A D | xg_main.c | 782 iop->gva = guestva; in xg_read_mem() 815 iop->gva = guestva; in xg_write_mem()
|
/xen/xen/arch/x86/ |
A D | domctl.c | 43 void * __user gva = (void *)iop->gva, * __user uva = (void *)iop->uva; in gdbsx_guest_mem_io() local 45 iop->remain = dbg_rw_mem(gva, uva, iop->len, domid, in gdbsx_guest_mem_io()
|
/xen/xen/include/public/ |
A D | domctl.h | 754 uint64_aligned_t gva; /* guest virtual address */ member
|