1 #ifndef __PV_MM_H__
2 #define __PV_MM_H__
3
4 l1_pgentry_t *map_guest_l1e(unsigned long linear, mfn_t *gl1mfn);
5
6 int new_guest_cr3(mfn_t mfn);
7
8 /* Read a PV guest's l1e that maps this linear address. */
guest_get_eff_l1e(unsigned long linear)9 static inline l1_pgentry_t guest_get_eff_l1e(unsigned long linear)
10 {
11 l1_pgentry_t l1e;
12
13 ASSERT(!paging_mode_translate(current->domain));
14 ASSERT(!paging_mode_external(current->domain));
15
16 if ( unlikely(!__addr_ok(linear)) ||
17 __copy_from_user(&l1e,
18 &__linear_l1_table[l1_linear_offset(linear)],
19 sizeof(l1_pgentry_t)) )
20 l1e = l1e_empty();
21
22 return l1e;
23 }
24
25 /*
26 * PTE updates can be done with ordinary writes except:
27 * 1. Debug builds get extra checking by using CMPXCHG[8B].
28 */
29 #ifndef NDEBUG
30 #define PTE_UPDATE_WITH_CMPXCHG
31 #else
32 #undef PTE_UPDATE_WITH_CMPXCHG
33 #endif
34
35 /*
36 * How to write an entry to the guest pagetables.
37 * Returns false for failure (pointer not valid), true for success.
38 */
update_intpte(intpte_t * p,intpte_t old,intpte_t new,mfn_t mfn,struct vcpu * v,bool preserve_ad)39 static inline bool update_intpte(intpte_t *p, intpte_t old, intpte_t new,
40 mfn_t mfn, struct vcpu *v, bool preserve_ad)
41 {
42 bool rv = true;
43
44 #ifndef PTE_UPDATE_WITH_CMPXCHG
45 if ( !preserve_ad )
46 {
47 rv = paging_write_guest_entry(v, p, new, mfn);
48 }
49 else
50 #endif
51 {
52 intpte_t t = old;
53
54 for ( ; ; )
55 {
56 intpte_t _new = new;
57
58 if ( preserve_ad )
59 _new |= old & (_PAGE_ACCESSED | _PAGE_DIRTY);
60
61 rv = paging_cmpxchg_guest_entry(v, p, &t, _new, mfn);
62 if ( unlikely(rv == 0) )
63 {
64 gdprintk(XENLOG_WARNING,
65 "Failed to update %" PRIpte " -> %" PRIpte
66 ": saw %" PRIpte "\n", old, _new, t);
67 break;
68 }
69
70 if ( t == old )
71 break;
72
73 /* Allowed to change in Accessed/Dirty flags only. */
74 BUG_ON((t ^ old) & ~(intpte_t)(_PAGE_ACCESSED|_PAGE_DIRTY));
75
76 old = t;
77 }
78 }
79 return rv;
80 }
81
82 /*
83 * Macro that wraps the appropriate type-changes around update_intpte().
84 * Arguments are: type, ptr, old, new, mfn, vcpu
85 */
86 #define UPDATE_ENTRY(_t,_p,_o,_n,_m,_v,_ad) \
87 update_intpte(&_t ## e_get_intpte(*(_p)), \
88 _t ## e_get_intpte(_o), _t ## e_get_intpte(_n), \
89 (_m), (_v), (_ad))
90
adjust_guest_l1e(l1_pgentry_t l1e,const struct domain * d)91 static always_inline l1_pgentry_t adjust_guest_l1e(l1_pgentry_t l1e,
92 const struct domain *d)
93 {
94 if ( likely(l1e_get_flags(l1e) & _PAGE_PRESENT) &&
95 likely(!is_pv_32bit_domain(d)) )
96 {
97 /* _PAGE_GUEST_KERNEL page cannot have the Global bit set. */
98 if ( (l1e_get_flags(l1e) & (_PAGE_GUEST_KERNEL | _PAGE_GLOBAL)) ==
99 (_PAGE_GUEST_KERNEL | _PAGE_GLOBAL) )
100 gdprintk(XENLOG_WARNING, "Global bit is set in kernel page %lx\n",
101 l1e_get_pfn(l1e));
102
103 if ( !(l1e_get_flags(l1e) & _PAGE_USER) )
104 l1e_add_flags(l1e, (_PAGE_GUEST_KERNEL | _PAGE_USER));
105
106 if ( !(l1e_get_flags(l1e) & _PAGE_GUEST_KERNEL) )
107 l1e_add_flags(l1e, (_PAGE_GLOBAL | _PAGE_USER));
108 }
109
110 return l1e;
111 }
112
adjust_guest_l2e(l2_pgentry_t l2e,const struct domain * d)113 static inline l2_pgentry_t adjust_guest_l2e(l2_pgentry_t l2e,
114 const struct domain *d)
115 {
116 if ( likely(l2e_get_flags(l2e) & _PAGE_PRESENT) &&
117 likely(!is_pv_32bit_domain(d)) )
118 l2e_add_flags(l2e, _PAGE_USER);
119
120 return l2e;
121 }
122
adjust_guest_l3e(l3_pgentry_t l3e,const struct domain * d)123 static always_inline l3_pgentry_t adjust_guest_l3e(l3_pgentry_t l3e,
124 const struct domain *d)
125 {
126 if ( likely(l3e_get_flags(l3e) & _PAGE_PRESENT) )
127 l3e_add_flags(l3e, (likely(!is_pv_32bit_domain(d))
128 ? _PAGE_USER : _PAGE_USER | _PAGE_RW));
129
130 return l3e;
131 }
132
unadjust_guest_l3e(l3_pgentry_t l3e,const struct domain * d)133 static inline l3_pgentry_t unadjust_guest_l3e(l3_pgentry_t l3e,
134 const struct domain *d)
135 {
136 if ( unlikely(is_pv_32bit_domain(d)) &&
137 likely(l3e_get_flags(l3e) & _PAGE_PRESENT) )
138 l3e_remove_flags(l3e, _PAGE_USER | _PAGE_RW | _PAGE_ACCESSED);
139
140 return l3e;
141 }
142
adjust_guest_l4e(l4_pgentry_t l4e,const struct domain * d)143 static always_inline l4_pgentry_t adjust_guest_l4e(l4_pgentry_t l4e,
144 const struct domain *d)
145 {
146 /*
147 * When shadowing an L4 behind the guests back (e.g. for per-pcpu
148 * purposes), we cannot efficiently sync access bit updates from hardware
149 * (on the shadow tables) back into the guest view.
150 *
151 * We therefore unconditionally set _PAGE_ACCESSED even in the guests
152 * view. This will appear to the guest as a CPU which proactively pulls
153 * all valid L4e's into its TLB, which is compatible with the x86 ABI.
154 *
155 * At the time of writing, all PV guests set the access bit anyway, so
156 * this is no actual change in their behaviour.
157 */
158 if ( likely(l4e_get_flags(l4e) & _PAGE_PRESENT) )
159 l4e_add_flags(l4e, (_PAGE_ACCESSED |
160 (is_pv_32bit_domain(d) ? 0 : _PAGE_USER)));
161
162 return l4e;
163 }
164
165 #endif /* __PV_MM_H__ */
166