1 /******************************************************************************
2  * include/asm-x86/shadow.h
3  *
4  * Parts of this code are Copyright (c) 2006 by XenSource Inc.
5  * Parts of this code are Copyright (c) 2006 by Michael A Fetterman
6  * Parts based on earlier work by Michael A Fetterman, Ian Pratt et al.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; If not, see <http://www.gnu.org/licenses/>.
20  */
21 
22 #ifndef _XEN_SHADOW_H
23 #define _XEN_SHADOW_H
24 
25 #include <xen/sched.h>
26 #include <xen/perfc.h>
27 #include <xen/domain_page.h>
28 #include <asm/flushtlb.h>
29 #include <asm/paging.h>
30 #include <asm/p2m.h>
31 #include <asm/spec_ctrl.h>
32 
33 #include <public/domctl.h>
34 
35 /*****************************************************************************
36  * Macros to tell which shadow paging mode a domain is in*/
37 
38 #define shadow_mode_enabled(_d)    paging_mode_shadow(_d)
39 #define shadow_mode_refcounts(_d) (paging_mode_shadow(_d) && \
40                                    paging_mode_refcounts(_d))
41 #define shadow_mode_log_dirty(_d) (paging_mode_shadow(_d) && \
42                                    paging_mode_log_dirty(_d))
43 #define shadow_mode_translate(_d) (paging_mode_shadow(_d) && \
44                                    paging_mode_translate(_d))
45 #define shadow_mode_external(_d)  (paging_mode_shadow(_d) && \
46                                    paging_mode_external(_d))
47 
48 /*****************************************************************************
49  * Entry points into the shadow code */
50 
51 /* Set up the shadow-specific parts of a domain struct at start of day.
52  * Called from paging_domain_init(). */
53 int shadow_domain_init(struct domain *d);
54 
55 /* Setup the shadow-specific parts of a vcpu struct. It is called by
56  * paging_vcpu_init() in paging.c */
57 void shadow_vcpu_init(struct vcpu *v);
58 
59 #ifdef CONFIG_SHADOW_PAGING
60 
61 /* Enable an arbitrary shadow mode.  Call once at domain creation. */
62 int shadow_enable(struct domain *d, u32 mode);
63 
64 /* Enable VRAM dirty bit tracking. */
65 int shadow_track_dirty_vram(struct domain *d,
66                             unsigned long first_pfn,
67                             unsigned long nr,
68                             XEN_GUEST_HANDLE(void) dirty_bitmap);
69 
70 /* Handler for shadow control ops: operations from user-space to enable
71  * and disable ephemeral shadow modes (test mode and log-dirty mode) and
72  * manipulate the log-dirty bitmap. */
73 int shadow_domctl(struct domain *d,
74                   struct xen_domctl_shadow_op *sc,
75                   XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl);
76 
77 /* Call when destroying a domain */
78 void shadow_teardown(struct domain *d, bool *preempted);
79 
80 /* Call once all of the references to the domain have gone away */
81 void shadow_final_teardown(struct domain *d);
82 
83 void sh_remove_shadows(struct domain *d, mfn_t gmfn, int fast, int all);
84 
85 /* Adjust shadows ready for a guest page to change its type. */
86 void shadow_prepare_page_type_change(struct domain *d, struct page_info *page,
87                                      unsigned long new_type);
88 
89 /* Discard _all_ mappings from the domain's shadows. */
90 void shadow_blow_tables_per_domain(struct domain *d);
91 
92 /* Set the pool of shadow pages to the required number of pages.
93  * Input will be rounded up to at least shadow_min_acceptable_pages(),
94  * plus space for the p2m table.
95  * Returns 0 for success, non-zero for failure. */
96 int shadow_set_allocation(struct domain *d, unsigned int pages,
97                           bool *preempted);
98 
99 #else /* !CONFIG_SHADOW_PAGING */
100 
101 #define shadow_teardown(d, p) ASSERT(is_pv_domain(d))
102 #define shadow_final_teardown(d) ASSERT(is_pv_domain(d))
103 #define shadow_enable(d, mode) \
104     ({ ASSERT(is_pv_domain(d)); -EOPNOTSUPP; })
105 #define shadow_track_dirty_vram(d, begin_pfn, nr, bitmap) \
106     ({ ASSERT_UNREACHABLE(); -EOPNOTSUPP; })
107 #define shadow_set_allocation(d, pages, preempted) \
108     ({ ASSERT_UNREACHABLE(); -EOPNOTSUPP; })
109 
sh_remove_shadows(struct domain * d,mfn_t gmfn,int fast,int all)110 static inline void sh_remove_shadows(struct domain *d, mfn_t gmfn,
111                                      int fast, int all) {}
112 
shadow_prepare_page_type_change(struct domain * d,struct page_info * page,unsigned long new_type)113 static inline void shadow_prepare_page_type_change(struct domain *d,
114                                                    struct page_info *page,
115                                                    unsigned long new_type) {}
116 
shadow_blow_tables_per_domain(struct domain * d)117 static inline void shadow_blow_tables_per_domain(struct domain *d) {}
118 
shadow_domctl(struct domain * d,struct xen_domctl_shadow_op * sc,XEN_GUEST_HANDLE_PARAM (xen_domctl_t)u_domctl)119 static inline int shadow_domctl(struct domain *d,
120                                 struct xen_domctl_shadow_op *sc,
121                                 XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl)
122 {
123     return -EINVAL;
124 }
125 
126 #endif /* CONFIG_SHADOW_PAGING */
127 
128 /*
129  * Mitigations for L1TF / CVE-2018-3620 for PV guests.
130  *
131  * We cannot alter an architecturally-legitimate PTE which a PV guest has
132  * chosen to write, as traditional paged-out metadata is L1TF-vulnerable.
133  * What we can do is force a PV guest which writes a vulnerable PTE into
134  * shadow mode, so Xen controls the pagetables which are reachable by the CPU
135  * pagewalk.
136  *
137  * The core of the L1TF vulnerability is that the address bits of the PTE
138  * (accounting for PSE and factoring in the level-relevant part of the linear
139  * access) are sent for an L1D lookup (to retrieve the next-level PTE, or
140  * eventual memory address) before the Present or reserved bits (which would
141  * cause a terminal fault) are accounted for.  If an L1D hit occurs, the
142  * resulting data is available for potentially dependent instructions.
143  *
144  * For Present PTEs, the PV type-count safety logic ensures that the address
145  * bits always point at a guest-accessible frame, which is safe WRT L1TF from
146  * Xen's point of view.  In practice, a PV guest should be unable to set any
147  * reserved bits, so should be unable to create any present L1TF-vulnerable
148  * PTEs at all.
149  *
150  * Therefore, these safety checks apply to Not-Present PTEs only, where
151  * traditionally, Xen would have let the guest write any value it chose.
152  *
153  * The all-zero PTE potentially leaks mfn 0.  All software on the system is
154  * expected to cooperate and not put any secrets there.  In a Xen system,
155  * neither Xen nor dom0 are expected to touch mfn 0, as it typically contains
156  * the real mode IVT and Bios Data Area.  Therefore, mfn 0 is considered safe.
157  *
158  * Any PTE whose address is higher than the maximum cacheable address is safe,
159  * as it won't get an L1D hit.
160  *
161  * Speculative superpages also need accounting for, as PSE is considered
162  * irrespective of Present.  We disallow PSE being set, as it allows an
163  * attacker to leak 2M or 1G of data starting from mfn 0.  Also, because of
164  * recursive/linear pagetables, we must consider PSE even at L4, as hardware
165  * will interpret an L4e as an L3e during a recursive walk.
166  */
167 
is_l1tf_safe_maddr(intpte_t pte)168 static inline bool is_l1tf_safe_maddr(intpte_t pte)
169 {
170     paddr_t maddr = pte & l1tf_addr_mask;
171 
172     return maddr == 0 || maddr >= l1tf_safe_maddr;
173 }
174 
175 #ifdef CONFIG_PV
176 
pv_l1tf_check_pte(struct domain * d,unsigned int level,intpte_t pte)177 static inline bool pv_l1tf_check_pte(struct domain *d, unsigned int level,
178                                      intpte_t pte)
179 {
180     ASSERT(is_pv_domain(d));
181     ASSERT(!(pte & _PAGE_PRESENT));
182 
183     if ( d->arch.pv.check_l1tf && !paging_mode_sh_forced(d) &&
184          (((level > 1) && (pte & _PAGE_PSE)) || !is_l1tf_safe_maddr(pte)) )
185     {
186 #ifdef CONFIG_SHADOW_PAGING
187         struct tasklet *t = &d->arch.paging.shadow.pv_l1tf_tasklet;
188 
189         printk(XENLOG_G_WARNING
190                "d%d L1TF-vulnerable L%ue %016"PRIx64" - Shadowing\n",
191                d->domain_id, level, pte);
192         /*
193          * Safety consideration for accessing tasklet.scheduled_on without the
194          * tasklet lock.  This is a singleshot tasklet with the side effect of
195          * setting PG_SH_forced (checked just above).  Multiple vcpus can race
196          * to schedule the tasklet, but if we observe it scheduled anywhere,
197          * that is good enough.
198          */
199         smp_rmb();
200         if ( !tasklet_is_scheduled(t) )
201             tasklet_schedule(t);
202 #else
203         printk(XENLOG_G_ERR
204                "d%d L1TF-vulnerable L%ue %016"PRIx64" - Crashing\n",
205                d->domain_id, level, pte);
206         domain_crash(d);
207 #endif
208         return true;
209     }
210 
211     return false;
212 }
213 
pv_l1tf_check_l1e(struct domain * d,l1_pgentry_t l1e)214 static inline bool pv_l1tf_check_l1e(struct domain *d, l1_pgentry_t l1e)
215 {
216     return pv_l1tf_check_pte(d, 1, l1e.l1);
217 }
218 
pv_l1tf_check_l2e(struct domain * d,l2_pgentry_t l2e)219 static inline bool pv_l1tf_check_l2e(struct domain *d, l2_pgentry_t l2e)
220 {
221     return pv_l1tf_check_pte(d, 2, l2e.l2);
222 }
223 
pv_l1tf_check_l3e(struct domain * d,l3_pgentry_t l3e)224 static inline bool pv_l1tf_check_l3e(struct domain *d, l3_pgentry_t l3e)
225 {
226     return pv_l1tf_check_pte(d, 3, l3e.l3);
227 }
228 
pv_l1tf_check_l4e(struct domain * d,l4_pgentry_t l4e)229 static inline bool pv_l1tf_check_l4e(struct domain *d, l4_pgentry_t l4e)
230 {
231     return pv_l1tf_check_pte(d, 4, l4e.l4);
232 }
233 
234 void pv_l1tf_tasklet(void *data);
235 
pv_l1tf_domain_init(struct domain * d)236 static inline void pv_l1tf_domain_init(struct domain *d)
237 {
238     d->arch.pv.check_l1tf = is_hardware_domain(d) ? opt_pv_l1tf_hwdom
239                                                   : opt_pv_l1tf_domu;
240 
241 #ifdef CONFIG_SHADOW_PAGING
242     tasklet_init(&d->arch.paging.shadow.pv_l1tf_tasklet, pv_l1tf_tasklet, d);
243 #endif
244 }
245 
pv_l1tf_domain_destroy(struct domain * d)246 static inline void pv_l1tf_domain_destroy(struct domain *d)
247 {
248 #ifdef CONFIG_SHADOW_PAGING
249     tasklet_kill(&d->arch.paging.shadow.pv_l1tf_tasklet);
250 #endif
251 }
252 
253 #endif /* CONFIG_PV */
254 
255 /* Remove all shadows of the guest mfn. */
shadow_remove_all_shadows(struct domain * d,mfn_t gmfn)256 static inline void shadow_remove_all_shadows(struct domain *d, mfn_t gmfn)
257 {
258     /* See the comment about locking in sh_remove_shadows */
259     sh_remove_shadows(d, gmfn, 0 /* Be thorough */, 1 /* Must succeed */);
260 }
261 
262 #endif /* _XEN_SHADOW_H */
263 
264 /*
265  * Local variables:
266  * mode: C
267  * c-file-style: "BSD"
268  * c-basic-offset: 4
269  * indent-tabs-mode: nil
270  * End:
271  */
272