Lines Matching refs:mmu

34 	#define PT_HAVE_ACCESSED_DIRTY(mmu) true  argument
54 #define PT_HAVE_ACCESSED_DIRTY(mmu) true argument
67 #define PT_HAVE_ACCESSED_DIRTY(mmu) ((mmu)->ept_ad) argument
104 static inline void FNAME(protect_clean_gpte)(struct kvm_mmu *mmu, unsigned *access, in FNAME()
110 if (!PT_HAVE_ACCESSED_DIRTY(mmu)) in FNAME()
140 static bool FNAME(is_rsvd_bits_set)(struct kvm_mmu *mmu, u64 gpte, int level) in FNAME()
142 return __is_rsvd_bits_set(&mmu->guest_rsvd_check, gpte, level) || in FNAME()
143 FNAME(is_bad_mt_xwr)(&mmu->guest_rsvd_check, gpte); in FNAME()
146 static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, in FNAME()
197 if (PT_HAVE_ACCESSED_DIRTY(vcpu->arch.mmu) && in FNAME()
201 if (FNAME(is_rsvd_bits_set)(vcpu->arch.mmu, gpte, PG_LEVEL_4K)) in FNAME()
236 struct kvm_mmu *mmu, in FNAME()
247 if (!PT_HAVE_ACCESSED_DIRTY(mmu)) in FNAME()
287 ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index, orig_pte, pte); in FNAME()
308 static inline bool FNAME(is_last_gpte)(struct kvm_mmu *mmu, in FNAME()
326 gpte &= level - (PT32_ROOT_LEVEL + mmu->mmu_role.ext.cr4_pse); in FNAME()
341 struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, in FNAME()
364 walker->level = mmu->root_level; in FNAME()
365 pte = mmu->get_guest_pgd(vcpu); in FNAME()
366 have_ad = PT_HAVE_ACCESSED_DIRTY(mmu); in FNAME()
371 pte = mmu->get_pdptr(vcpu, (addr >> 30) & 3); in FNAME()
406 real_gpa = mmu->translate_gpa(vcpu, gfn_to_gpa(table_gfn), in FNAME()
444 if (unlikely(FNAME(is_rsvd_bits_set)(mmu, pte, walker->level))) { in FNAME()
453 } while (!FNAME(is_last_gpte)(mmu, walker->level, pte)); in FNAME()
460 errcode = permission_fault(vcpu, mmu, walker->pte_access, pte_pkey, access); in FNAME()
470 real_gpa = mmu->translate_gpa(vcpu, gfn_to_gpa(gfn), access, &walker->fault); in FNAME()
477 FNAME(protect_clean_gpte)(mmu, &walker->pte_access, pte); in FNAME()
488 ret = FNAME(update_accessed_dirty_bits)(vcpu, mmu, walker, in FNAME()
503 if (fetch_fault && (is_efer_nx(mmu) || is_cr4_smep(mmu))) in FNAME()
536 walker->fault.nested_page_fault = mmu != vcpu->arch.walk_mmu; in FNAME()
546 return FNAME(walk_addr_generic)(walker, vcpu, vcpu->arch.mmu, addr, in FNAME()
576 FNAME(protect_clean_gpte)(vcpu->arch.mmu, &pte_access, gpte); in FNAME()
670 top_level = vcpu->arch.mmu->root_level; in FNAME()
682 if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa))) in FNAME()
806 (!is_cr0_wp(vcpu->arch.mmu) && !user_fault))) in FNAME()
898 !is_cr0_wp(vcpu->arch.mmu) && !fault->user && fault->slot) { in FNAME()
908 if (is_cr4_smep(vcpu->arch.mmu)) in FNAME()
1060 union kvm_mmu_page_role mmu_role = vcpu->arch.mmu->mmu_role.base; in FNAME()
1118 FNAME(protect_clean_gpte)(vcpu->arch.mmu, &pte_access, gpte); in FNAME()