1 /*
2  * hvm.h: Hardware virtual machine assist interface definitions.
3  *
4  * Leendert van Doorn, leendert@watson.ibm.com
5  * Copyright (c) 2005, International Business Machines Corporation.
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2, as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  *
16  * You should have received a copy of the GNU General Public License along with
17  * this program; If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #ifndef __ASM_X86_HVM_HVM_H__
21 #define __ASM_X86_HVM_HVM_H__
22 
23 #include <asm/alternative.h>
24 #include <asm/asm_defns.h>
25 #include <asm/current.h>
26 #include <asm/x86_emulate.h>
27 #include <asm/hvm/asid.h>
28 
29 #ifdef CONFIG_HVM_FEP
30 /* Permit use of the Forced Emulation Prefix in HVM guests */
31 extern bool_t opt_hvm_fep;
32 #else
33 #define opt_hvm_fep 0
34 #endif
35 
36 /* Interrupt acknowledgement sources. */
37 enum hvm_intsrc {
38     hvm_intsrc_none,
39     hvm_intsrc_pic,
40     hvm_intsrc_lapic,
41     hvm_intsrc_nmi,
42     hvm_intsrc_mce,
43     hvm_intsrc_vector
44 };
45 struct hvm_intack {
46     uint8_t source; /* enum hvm_intsrc */
47     uint8_t vector;
48 };
49 #define hvm_intack(src, vec)   ((struct hvm_intack) { hvm_intsrc_##src, vec })
50 #define hvm_intack_none        hvm_intack(none, 0)
51 #define hvm_intack_pic(vec)    hvm_intack(pic, vec)
52 #define hvm_intack_lapic(vec)  hvm_intack(lapic, vec)
53 #define hvm_intack_nmi         hvm_intack(nmi, 2)
54 #define hvm_intack_mce         hvm_intack(mce, 18)
55 #define hvm_intack_vector(vec) hvm_intack(vector, vec)
56 enum hvm_intblk {
57     hvm_intblk_none,      /* not blocked (deliverable) */
58     hvm_intblk_shadow,    /* MOV-SS or STI shadow */
59     hvm_intblk_rflags_ie, /* RFLAGS.IE == 0 */
60     hvm_intblk_tpr,       /* LAPIC TPR too high */
61     hvm_intblk_nmi_iret,  /* NMI blocked until IRET */
62     hvm_intblk_arch,      /* SVM/VMX specific reason */
63 };
64 
65 /* These happen to be the same as the VMX interrupt shadow definitions. */
66 #define HVM_INTR_SHADOW_STI    0x00000001
67 #define HVM_INTR_SHADOW_MOV_SS 0x00000002
68 #define HVM_INTR_SHADOW_SMI    0x00000004
69 #define HVM_INTR_SHADOW_NMI    0x00000008
70 
71 /*
72  * HAP super page capabilities:
73  * bit0: if 2MB super page is allowed?
74  * bit1: if 1GB super page is allowed?
75  */
76 #define HVM_HAP_SUPERPAGE_2MB   0x00000001
77 #define HVM_HAP_SUPERPAGE_1GB   0x00000002
78 
79 #define HVM_EVENT_VECTOR_UNSET    (-1)
80 #define HVM_EVENT_VECTOR_UPDATING (-2)
81 
82 /* update_guest_cr() flags. */
83 #define HVM_UPDATE_GUEST_CR3_NOFLUSH 0x00000001
84 
85 /*
86  * The hardware virtual machine (HVM) interface abstracts away from the
87  * x86/x86_64 CPU virtualization assist specifics. Currently this interface
88  * supports Intel's VT-x and AMD's SVM extensions.
89  */
90 struct hvm_function_table {
91     char *name;
92 
93     /* Support Hardware-Assisted Paging? */
94     bool_t hap_supported;
95 
96     /* Necessary hardware support for alternate p2m's? */
97     bool altp2m_supported;
98 
99     /* Hardware virtual interrupt delivery enable? */
100     bool virtual_intr_delivery_enabled;
101 
102     /* Indicate HAP capabilities. */
103     unsigned int hap_capabilities;
104 
105     /*
106      * Initialise/destroy HVM domain/vcpu resources
107      */
108     int  (*domain_initialise)(struct domain *d);
109     void (*domain_relinquish_resources)(struct domain *d);
110     void (*domain_destroy)(struct domain *d);
111     int  (*vcpu_initialise)(struct vcpu *v);
112     void (*vcpu_destroy)(struct vcpu *v);
113 
114     /* save and load hvm guest cpu context for save/restore */
115     void (*save_cpu_ctxt)(struct vcpu *v, struct hvm_hw_cpu *ctxt);
116     int (*load_cpu_ctxt)(struct vcpu *v, struct hvm_hw_cpu *ctxt);
117 
118     /* Examine specifics of the guest state. */
119     unsigned int (*get_interrupt_shadow)(struct vcpu *v);
120     void (*set_interrupt_shadow)(struct vcpu *v, unsigned int intr_shadow);
121     int (*guest_x86_mode)(struct vcpu *v);
122     unsigned int (*get_cpl)(struct vcpu *v);
123     void (*get_segment_register)(struct vcpu *v, enum x86_segment seg,
124                                  struct segment_register *reg);
125     void (*set_segment_register)(struct vcpu *v, enum x86_segment seg,
126                                  struct segment_register *reg);
127     unsigned long (*get_shadow_gs_base)(struct vcpu *v);
128 
129     /*
130      * Re-set the value of CR3 that Xen runs on when handling VM exits.
131      */
132     void (*update_host_cr3)(struct vcpu *v);
133 
134     /*
135      * Called to inform HVM layer that a guest CRn or EFER has changed.
136      */
137     void (*update_guest_cr)(struct vcpu *v, unsigned int cr,
138                             unsigned int flags);
139     void (*update_guest_efer)(struct vcpu *v);
140 
141     void (*cpuid_policy_changed)(struct vcpu *v);
142 
143     void (*fpu_leave)(struct vcpu *v);
144 
145     int  (*get_guest_pat)(struct vcpu *v, u64 *);
146     int  (*set_guest_pat)(struct vcpu *v, u64);
147 
148     bool (*get_guest_bndcfgs)(struct vcpu *v, u64 *);
149     bool (*set_guest_bndcfgs)(struct vcpu *v, u64);
150 
151     void (*set_tsc_offset)(struct vcpu *v, u64 offset, u64 at_tsc);
152 
153     void (*inject_event)(const struct x86_event *event);
154 
155     void (*init_hypercall_page)(void *ptr);
156 
157     bool (*event_pending)(const struct vcpu *v);
158     bool (*get_pending_event)(struct vcpu *v, struct x86_event *info);
159     void (*invlpg)(struct vcpu *v, unsigned long linear);
160 
161     int  (*cpu_up_prepare)(unsigned int cpu);
162     void (*cpu_dead)(unsigned int cpu);
163 
164     int  (*cpu_up)(void);
165     void (*cpu_down)(void);
166 
167     /* Copy up to 15 bytes from cached instruction bytes at current rIP. */
168     unsigned int (*get_insn_bytes)(struct vcpu *v, uint8_t *buf);
169 
170     /* Instruction intercepts: non-void return values are X86EMUL codes. */
171     void (*wbinvd_intercept)(void);
172     void (*fpu_dirty_intercept)(void);
173     int (*msr_read_intercept)(unsigned int msr, uint64_t *msr_content);
174     int (*msr_write_intercept)(unsigned int msr, uint64_t msr_content);
175     void (*handle_cd)(struct vcpu *v, unsigned long value);
176     void (*set_info_guest)(struct vcpu *v);
177     void (*set_rdtsc_exiting)(struct vcpu *v, bool_t);
178     void (*set_descriptor_access_exiting)(struct vcpu *v, bool);
179 
180     /* Nested HVM */
181     int (*nhvm_vcpu_initialise)(struct vcpu *v);
182     void (*nhvm_vcpu_destroy)(struct vcpu *v);
183     int (*nhvm_vcpu_reset)(struct vcpu *v);
184     int (*nhvm_vcpu_vmexit_event)(struct vcpu *v, const struct x86_event *event);
185     uint64_t (*nhvm_vcpu_p2m_base)(struct vcpu *v);
186     bool_t (*nhvm_vmcx_guest_intercepts_event)(
187         struct vcpu *v, unsigned int vector, int errcode);
188 
189     bool_t (*nhvm_vmcx_hap_enabled)(struct vcpu *v);
190 
191     enum hvm_intblk (*nhvm_intr_blocked)(struct vcpu *v);
192     void (*nhvm_domain_relinquish_resources)(struct domain *d);
193 
194     /* Virtual interrupt delivery */
195     void (*update_eoi_exit_bitmap)(struct vcpu *v, u8 vector, u8 trig);
196     void (*process_isr)(int isr, struct vcpu *v);
197     void (*deliver_posted_intr)(struct vcpu *v, u8 vector);
198     void (*sync_pir_to_irr)(struct vcpu *v);
199     bool (*test_pir)(const struct vcpu *v, uint8_t vector);
200     void (*handle_eoi)(uint8_t vector, int isr);
201 
202     /*Walk nested p2m  */
203     int (*nhvm_hap_walk_L1_p2m)(struct vcpu *v, paddr_t L2_gpa,
204                                 paddr_t *L1_gpa, unsigned int *page_order,
205                                 uint8_t *p2m_acc, bool_t access_r,
206                                 bool_t access_w, bool_t access_x);
207 
208     void (*enable_msr_interception)(struct domain *d, uint32_t msr);
209     bool_t (*is_singlestep_supported)(void);
210 
211     /* Alternate p2m */
212     void (*altp2m_vcpu_update_p2m)(struct vcpu *v);
213     void (*altp2m_vcpu_update_vmfunc_ve)(struct vcpu *v);
214     bool_t (*altp2m_vcpu_emulate_ve)(struct vcpu *v);
215     int (*altp2m_vcpu_emulate_vmfunc)(const struct cpu_user_regs *regs);
216 
217     /*
218      * Parameters and callbacks for hardware-assisted TSC scaling,
219      * which are valid only when the hardware feature is available.
220      */
221     struct {
222         /* number of bits of the fractional part of TSC scaling ratio */
223         uint8_t  ratio_frac_bits;
224         /* maximum-allowed TSC scaling ratio */
225         uint64_t max_ratio;
226 
227         /* Architecture function to setup TSC scaling ratio */
228         void (*setup)(struct vcpu *v);
229     } tsc_scaling;
230 };
231 
232 extern struct hvm_function_table hvm_funcs;
233 extern bool_t hvm_enabled;
234 extern s8 hvm_port80_allowed;
235 
236 extern const struct hvm_function_table *start_svm(void);
237 extern const struct hvm_function_table *start_vmx(void);
238 
239 int hvm_domain_initialise(struct domain *d);
240 void hvm_domain_relinquish_resources(struct domain *d);
241 void hvm_domain_destroy(struct domain *d);
242 
243 int hvm_vcpu_initialise(struct vcpu *v);
244 void hvm_vcpu_destroy(struct vcpu *v);
245 void hvm_vcpu_down(struct vcpu *v);
246 int hvm_vcpu_cacheattr_init(struct vcpu *v);
247 void hvm_vcpu_cacheattr_destroy(struct vcpu *v);
248 void hvm_vcpu_reset_state(struct vcpu *v, uint16_t cs, uint16_t ip);
249 
250 void hvm_get_guest_pat(struct vcpu *v, u64 *guest_pat);
251 int hvm_set_guest_pat(struct vcpu *v, u64 guest_pat);
252 
253 u64 hvm_get_guest_tsc_fixed(struct vcpu *v, u64 at_tsc);
254 
255 u64 hvm_scale_tsc(const struct domain *d, u64 tsc);
256 u64 hvm_get_tsc_scaling_ratio(u32 gtsc_khz);
257 
258 void hvm_init_guest_time(struct domain *d);
259 void hvm_set_guest_time(struct vcpu *v, u64 guest_time);
260 uint64_t hvm_get_guest_time_fixed(const struct vcpu *v, uint64_t at_tsc);
261 
262 int vmsi_deliver(
263     struct domain *d, int vector,
264     uint8_t dest, uint8_t dest_mode,
265     uint8_t delivery_mode, uint8_t trig_mode);
266 struct hvm_pirq_dpci;
267 void vmsi_deliver_pirq(struct domain *d, const struct hvm_pirq_dpci *);
268 int hvm_girq_dest_2_vcpu_id(struct domain *d, uint8_t dest, uint8_t dest_mode);
269 
270 enum hvm_intblk
271 hvm_interrupt_blocked(struct vcpu *v, struct hvm_intack intack);
272 
273 void hvm_init_hypercall_page(struct domain *d, void *ptr);
274 
275 void hvm_get_segment_register(struct vcpu *v, enum x86_segment seg,
276                               struct segment_register *reg);
277 void hvm_set_segment_register(struct vcpu *v, enum x86_segment seg,
278                               struct segment_register *reg);
279 
280 void hvm_set_info_guest(struct vcpu *v);
281 
282 bool hvm_set_guest_bndcfgs(struct vcpu *v, u64 val);
283 
284 int hvm_vmexit_cpuid(struct cpu_user_regs *regs, unsigned int inst_len);
285 void hvm_migrate_timers(struct vcpu *v);
286 void hvm_do_resume(struct vcpu *v);
287 void hvm_migrate_pirq(struct hvm_pirq_dpci *pirq_dpci, const struct vcpu *v);
288 void hvm_migrate_pirqs(struct vcpu *v);
289 
290 void hvm_inject_event(const struct x86_event *event);
291 
292 int hvm_event_needs_reinjection(uint8_t type, uint8_t vector);
293 
294 uint8_t hvm_combine_hw_exceptions(uint8_t vec1, uint8_t vec2);
295 
296 void hvm_set_rdtsc_exiting(struct domain *d, bool_t enable);
297 
298 enum hvm_task_switch_reason { TSW_jmp, TSW_iret, TSW_call_or_int };
299 void hvm_task_switch(
300     uint16_t tss_sel, enum hvm_task_switch_reason taskswitch_reason,
301     int32_t errcode, unsigned int insn_len, unsigned int extra_eflags);
302 
303 enum hvm_access_type {
304     hvm_access_insn_fetch,
305     hvm_access_none,
306     hvm_access_read,
307     hvm_access_write
308 };
309 bool_t hvm_virtual_to_linear_addr(
310     enum x86_segment seg,
311     const struct segment_register *reg,
312     unsigned long offset,
313     unsigned int bytes,
314     enum hvm_access_type access_type,
315     const struct segment_register *active_cs,
316     unsigned long *linear_addr);
317 
318 void *hvm_map_guest_frame_rw(unsigned long gfn, bool_t permanent,
319                              bool_t *writable);
320 void *hvm_map_guest_frame_ro(unsigned long gfn, bool_t permanent);
321 void hvm_unmap_guest_frame(void *p, bool_t permanent);
322 void hvm_mapped_guest_frames_mark_dirty(struct domain *);
323 
324 int hvm_debug_op(struct vcpu *v, int32_t op);
325 
326 /* Caller should pause vcpu before calling this function */
327 void hvm_toggle_singlestep(struct vcpu *v);
328 void hvm_fast_singlestep(struct vcpu *v, uint16_t p2midx);
329 
330 struct npfec;
331 int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla,
332                               struct npfec npfec);
333 
334 /* Check CR4/EFER values */
335 const char *hvm_efer_valid(const struct vcpu *v, uint64_t value,
336                            signed int cr0_pg);
337 unsigned long hvm_cr4_guest_valid_bits(const struct domain *d, bool restore);
338 
339 int hvm_copy_context_and_params(struct domain *src, struct domain *dst);
340 
341 int hvm_get_param(struct domain *d, uint32_t index, uint64_t *value);
342 
343 #ifdef CONFIG_HVM
344 
345 #define hvm_get_guest_tsc(v) hvm_get_guest_tsc_fixed(v, 0)
346 
347 #define hvm_tsc_scaling_supported \
348     (!!hvm_funcs.tsc_scaling.ratio_frac_bits)
349 
350 #define hvm_default_tsc_scaling_ratio \
351     (1ULL << hvm_funcs.tsc_scaling.ratio_frac_bits)
352 
353 #define hvm_tsc_scaling_ratio(d) \
354     ((d)->arch.hvm.tsc_scaling_ratio)
355 
356 #define hvm_get_guest_time(v) hvm_get_guest_time_fixed(v, 0)
357 
358 #define hvm_paging_enabled(v) \
359     (!!((v)->arch.hvm.guest_cr[0] & X86_CR0_PG))
360 #define hvm_wp_enabled(v) \
361     (!!((v)->arch.hvm.guest_cr[0] & X86_CR0_WP))
362 #define hvm_pcid_enabled(v) \
363     (!!((v)->arch.hvm.guest_cr[4] & X86_CR4_PCIDE))
364 #define hvm_pae_enabled(v) \
365     (hvm_paging_enabled(v) && ((v)->arch.hvm.guest_cr[4] & X86_CR4_PAE))
366 #define hvm_smep_enabled(v) \
367     (hvm_paging_enabled(v) && ((v)->arch.hvm.guest_cr[4] & X86_CR4_SMEP))
368 #define hvm_smap_enabled(v) \
369     (hvm_paging_enabled(v) && ((v)->arch.hvm.guest_cr[4] & X86_CR4_SMAP))
370 #define hvm_nx_enabled(v) \
371     ((v)->arch.hvm.guest_efer & EFER_NX)
372 #define hvm_pku_enabled(v) \
373     (hvm_paging_enabled(v) && ((v)->arch.hvm.guest_cr[4] & X86_CR4_PKE))
374 
375 /* Can we use superpages in the HAP p2m table? */
376 #define hap_has_1gb (!!(hvm_funcs.hap_capabilities & HVM_HAP_SUPERPAGE_1GB))
377 #define hap_has_2mb (!!(hvm_funcs.hap_capabilities & HVM_HAP_SUPERPAGE_2MB))
378 
379 #define hvm_long_mode_active(v) (!!((v)->arch.hvm.guest_efer & EFER_LMA))
380 
hvm_has_set_descriptor_access_exiting(void)381 static inline bool hvm_has_set_descriptor_access_exiting(void)
382 {
383     return hvm_funcs.set_descriptor_access_exiting;
384 }
385 
386 static inline int
hvm_guest_x86_mode(struct vcpu * v)387 hvm_guest_x86_mode(struct vcpu *v)
388 {
389     ASSERT(v == current);
390     return alternative_call(hvm_funcs.guest_x86_mode, v);
391 }
392 
393 static inline void
hvm_update_host_cr3(struct vcpu * v)394 hvm_update_host_cr3(struct vcpu *v)
395 {
396     if ( hvm_funcs.update_host_cr3 )
397         alternative_vcall(hvm_funcs.update_host_cr3, v);
398 }
399 
hvm_update_guest_cr(struct vcpu * v,unsigned int cr)400 static inline void hvm_update_guest_cr(struct vcpu *v, unsigned int cr)
401 {
402     alternative_vcall(hvm_funcs.update_guest_cr, v, cr, 0);
403 }
404 
hvm_update_guest_cr3(struct vcpu * v,bool noflush)405 static inline void hvm_update_guest_cr3(struct vcpu *v, bool noflush)
406 {
407     unsigned int flags = noflush ? HVM_UPDATE_GUEST_CR3_NOFLUSH : 0;
408 
409     alternative_vcall(hvm_funcs.update_guest_cr, v, 3, flags);
410 }
411 
hvm_update_guest_efer(struct vcpu * v)412 static inline void hvm_update_guest_efer(struct vcpu *v)
413 {
414     alternative_vcall(hvm_funcs.update_guest_efer, v);
415 }
416 
hvm_cpuid_policy_changed(struct vcpu * v)417 static inline void hvm_cpuid_policy_changed(struct vcpu *v)
418 {
419     alternative_vcall(hvm_funcs.cpuid_policy_changed, v);
420 }
421 
hvm_set_tsc_offset(struct vcpu * v,uint64_t offset,uint64_t at_tsc)422 static inline void hvm_set_tsc_offset(struct vcpu *v, uint64_t offset,
423                                       uint64_t at_tsc)
424 {
425     alternative_vcall(hvm_funcs.set_tsc_offset, v, offset, at_tsc);
426 }
427 
428 /*
429  * Called to ensure than all guest-specific mappings in a tagged TLB are
430  * flushed; does *not* flush Xen's TLB entries, and on processors without a
431  * tagged TLB it will be a noop.
432  */
hvm_flush_guest_tlbs(void)433 static inline void hvm_flush_guest_tlbs(void)
434 {
435     if ( hvm_enabled )
436         hvm_asid_flush_core();
437 }
438 
439 static inline unsigned int
hvm_get_cpl(struct vcpu * v)440 hvm_get_cpl(struct vcpu *v)
441 {
442     return alternative_call(hvm_funcs.get_cpl, v);
443 }
444 
hvm_get_shadow_gs_base(struct vcpu * v)445 static inline unsigned long hvm_get_shadow_gs_base(struct vcpu *v)
446 {
447     return alternative_call(hvm_funcs.get_shadow_gs_base, v);
448 }
449 
hvm_get_guest_bndcfgs(struct vcpu * v,u64 * val)450 static inline bool hvm_get_guest_bndcfgs(struct vcpu *v, u64 *val)
451 {
452     return hvm_funcs.get_guest_bndcfgs &&
453            alternative_call(hvm_funcs.get_guest_bndcfgs, v, val);
454 }
455 
456 #define has_hvm_params(d) \
457     ((d)->arch.hvm.params != NULL)
458 
459 #define viridian_feature_mask(d) \
460     (has_hvm_params(d) ? (d)->arch.hvm.params[HVM_PARAM_VIRIDIAN] : 0)
461 
462 #define is_viridian_domain(d) \
463     (is_hvm_domain(d) && (viridian_feature_mask(d) & HVMPV_base_freq))
464 
465 #define is_viridian_vcpu(v) \
466     is_viridian_domain((v)->domain)
467 
468 #define has_viridian_time_ref_count(d) \
469     (is_viridian_domain(d) && (viridian_feature_mask(d) & HVMPV_time_ref_count))
470 
471 #define has_viridian_apic_assist(d) \
472     (is_viridian_domain(d) && (viridian_feature_mask(d) & HVMPV_apic_assist))
473 
474 #define has_viridian_synic(d) \
475     (is_viridian_domain(d) && (viridian_feature_mask(d) & HVMPV_synic))
476 
hvm_inject_exception(unsigned int vector,unsigned int type,unsigned int insn_len,int error_code)477 static inline void hvm_inject_exception(
478     unsigned int vector, unsigned int type,
479     unsigned int insn_len, int error_code)
480 {
481     struct x86_event event = {
482         .vector = vector,
483         .type = type,
484         .insn_len = insn_len,
485         .error_code = error_code,
486     };
487 
488     hvm_inject_event(&event);
489 }
490 
hvm_inject_hw_exception(unsigned int vector,int errcode)491 static inline void hvm_inject_hw_exception(unsigned int vector, int errcode)
492 {
493     struct x86_event event = {
494         .vector = vector,
495         .type = X86_EVENTTYPE_HW_EXCEPTION,
496         .error_code = errcode,
497     };
498 
499     hvm_inject_event(&event);
500 }
501 
hvm_inject_page_fault(int errcode,unsigned long cr2)502 static inline void hvm_inject_page_fault(int errcode, unsigned long cr2)
503 {
504     struct x86_event event = {
505         .vector = TRAP_page_fault,
506         .type = X86_EVENTTYPE_HW_EXCEPTION,
507         .error_code = errcode,
508         .cr2 = cr2,
509     };
510 
511     hvm_inject_event(&event);
512 }
513 
hvm_event_pending(const struct vcpu * v)514 static inline bool hvm_event_pending(const struct vcpu *v)
515 {
516     return alternative_call(hvm_funcs.event_pending, v);
517 }
518 
hvm_invlpg(struct vcpu * v,unsigned long linear)519 static inline void hvm_invlpg(struct vcpu *v, unsigned long linear)
520 {
521     alternative_vcall(hvm_funcs.invlpg, v, linear);
522 }
523 
524 /* These bits in CR4 are owned by the host. */
525 #define HVM_CR4_HOST_MASK (mmu_cr4_features & \
526     (X86_CR4_VMXE | X86_CR4_PAE | X86_CR4_MCE))
527 
528 /* These exceptions must always be intercepted. */
529 #define HVM_TRAP_MASK ((1U << TRAP_debug)           | \
530                        (1U << TRAP_alignment_check) | \
531                        (1U << TRAP_machine_check))
532 
hvm_cpu_up(void)533 static inline int hvm_cpu_up(void)
534 {
535     return (hvm_funcs.cpu_up ? hvm_funcs.cpu_up() : 0);
536 }
537 
hvm_cpu_down(void)538 static inline void hvm_cpu_down(void)
539 {
540     if ( hvm_funcs.cpu_down )
541         hvm_funcs.cpu_down();
542 }
543 
hvm_get_insn_bytes(struct vcpu * v,uint8_t * buf)544 static inline unsigned int hvm_get_insn_bytes(struct vcpu *v, uint8_t *buf)
545 {
546     return (hvm_funcs.get_insn_bytes
547             ? alternative_call(hvm_funcs.get_insn_bytes, v, buf) : 0);
548 }
549 
hvm_invalidate_regs_fields(struct cpu_user_regs * regs)550 static inline void hvm_invalidate_regs_fields(struct cpu_user_regs *regs)
551 {
552 #ifndef NDEBUG
553     regs->error_code = 0xbeef;
554     regs->entry_vector = 0xbeef;
555     regs->saved_upcall_mask = 0xbf;
556     regs->cs = 0xbeef;
557     regs->ss = 0xbeef;
558     regs->ds = 0xbeef;
559     regs->es = 0xbeef;
560     regs->fs = 0xbeef;
561     regs->gs = 0xbeef;
562 #endif
563 }
564 
565 /*
566  * Nested HVM
567  */
568 
569 /* inject vmexit into l1 guest. l1 guest will see a VMEXIT due to
570  * 'trapnr' exception.
571  */
nhvm_vcpu_vmexit_event(struct vcpu * v,const struct x86_event * event)572 static inline int nhvm_vcpu_vmexit_event(
573     struct vcpu *v, const struct x86_event *event)
574 {
575     return hvm_funcs.nhvm_vcpu_vmexit_event(v, event);
576 }
577 
578 /* returns l1 guest's cr3 that points to the page table used to
579  * translate l2 guest physical address to l1 guest physical address.
580  */
nhvm_vcpu_p2m_base(struct vcpu * v)581 static inline uint64_t nhvm_vcpu_p2m_base(struct vcpu *v)
582 {
583     return hvm_funcs.nhvm_vcpu_p2m_base(v);
584 }
585 
586 /* returns true, when l1 guest intercepts the specified trap */
nhvm_vmcx_guest_intercepts_event(struct vcpu * v,unsigned int vector,int errcode)587 static inline bool_t nhvm_vmcx_guest_intercepts_event(
588     struct vcpu *v, unsigned int vector, int errcode)
589 {
590     return hvm_funcs.nhvm_vmcx_guest_intercepts_event(v, vector, errcode);
591 }
592 
593 /* returns true when l1 guest wants to use hap to run l2 guest */
nhvm_vmcx_hap_enabled(struct vcpu * v)594 static inline bool_t nhvm_vmcx_hap_enabled(struct vcpu *v)
595 {
596     return hvm_funcs.nhvm_vmcx_hap_enabled(v);
597 }
598 
599 /* interrupt */
nhvm_interrupt_blocked(struct vcpu * v)600 static inline enum hvm_intblk nhvm_interrupt_blocked(struct vcpu *v)
601 {
602     return hvm_funcs.nhvm_intr_blocked(v);
603 }
604 
hvm_enable_msr_interception(struct domain * d,uint32_t msr)605 static inline bool_t hvm_enable_msr_interception(struct domain *d, uint32_t msr)
606 {
607     if ( hvm_funcs.enable_msr_interception )
608     {
609         hvm_funcs.enable_msr_interception(d, msr);
610         return 1;
611     }
612 
613     return 0;
614 }
615 
hvm_is_singlestep_supported(void)616 static inline bool_t hvm_is_singlestep_supported(void)
617 {
618     return (hvm_funcs.is_singlestep_supported &&
619             hvm_funcs.is_singlestep_supported());
620 }
621 
hvm_hap_supported(void)622 static inline bool hvm_hap_supported(void)
623 {
624     return hvm_funcs.hap_supported;
625 }
626 
627 /* returns true if hardware supports alternate p2m's */
hvm_altp2m_supported(void)628 static inline bool hvm_altp2m_supported(void)
629 {
630     return hvm_funcs.altp2m_supported;
631 }
632 
633 /* updates the current hardware p2m */
altp2m_vcpu_update_p2m(struct vcpu * v)634 static inline void altp2m_vcpu_update_p2m(struct vcpu *v)
635 {
636     if ( hvm_funcs.altp2m_vcpu_update_p2m )
637         hvm_funcs.altp2m_vcpu_update_p2m(v);
638 }
639 
640 /* updates VMCS fields related to VMFUNC and #VE */
altp2m_vcpu_update_vmfunc_ve(struct vcpu * v)641 static inline void altp2m_vcpu_update_vmfunc_ve(struct vcpu *v)
642 {
643     if ( hvm_funcs.altp2m_vcpu_update_vmfunc_ve )
644         hvm_funcs.altp2m_vcpu_update_vmfunc_ve(v);
645 }
646 
647 /* emulates #VE */
altp2m_vcpu_emulate_ve(struct vcpu * v)648 static inline bool altp2m_vcpu_emulate_ve(struct vcpu *v)
649 {
650     if ( hvm_funcs.altp2m_vcpu_emulate_ve )
651     {
652         hvm_funcs.altp2m_vcpu_emulate_ve(v);
653         return true;
654     }
655     return false;
656 }
657 
658 /*
659  * This must be defined as a macro instead of an inline function,
660  * because it uses 'struct vcpu' and 'struct domain' which have
661  * not been defined yet.
662  */
663 #define arch_vcpu_block(v) ({                                   \
664     struct vcpu *v_ = (v);                                      \
665     struct domain *d_ = v_->domain;                             \
666     if ( is_hvm_domain(d_) && d_->arch.hvm.pi_ops.vcpu_block )  \
667         d_->arch.hvm.pi_ops.vcpu_block(v_);                     \
668 })
669 
670 #else  /* CONFIG_HVM */
671 
672 #define hvm_enabled false
673 
674 /*
675  * List of inline functions above, of which only declarations are
676  * needed because DCE will kick in.
677  */
678 int hvm_guest_x86_mode(struct vcpu *v);
679 unsigned long hvm_get_shadow_gs_base(struct vcpu *v);
680 void hvm_cpuid_policy_changed(struct vcpu *v);
681 void hvm_set_tsc_offset(struct vcpu *v, uint64_t offset, uint64_t at_tsc);
682 bool hvm_get_guest_bndcfgs(struct vcpu *v, uint64_t *val);
683 
684 /* End of prototype list */
685 
686 /* Called by code in other header  */
hvm_is_singlestep_supported(void)687 static inline bool hvm_is_singlestep_supported(void)
688 {
689     return false;
690 }
691 
hvm_hap_supported(void)692 static inline bool hvm_hap_supported(void)
693 {
694     return false;
695 }
696 
nhvm_vmcx_hap_enabled(const struct vcpu * v)697 static inline bool nhvm_vmcx_hap_enabled(const struct vcpu *v)
698 {
699     ASSERT_UNREACHABLE();
700     return false;
701 }
702 
703 
704 /* Called by common code */
hvm_cpu_up(void)705 static inline int hvm_cpu_up(void)
706 {
707     return 0;
708 }
709 
hvm_cpu_down(void)710 static inline void hvm_cpu_down(void) {}
711 
hvm_flush_guest_tlbs(void)712 static inline void hvm_flush_guest_tlbs(void) {}
713 
hvm_invlpg(const struct vcpu * v,unsigned long linear)714 static inline void hvm_invlpg(const struct vcpu *v, unsigned long linear)
715 {
716     ASSERT_UNREACHABLE();
717 }
718 
719 /*
720  * Shadow code needs further cleanup to eliminate some HVM-only paths. For
721  * now provide the stubs here but assert they will never be reached.
722  */
hvm_update_host_cr3(const struct vcpu * v)723 static inline void hvm_update_host_cr3(const struct vcpu *v)
724 {
725     ASSERT_UNREACHABLE();
726 }
727 
hvm_update_guest_cr3(const struct vcpu * v,bool noflush)728 static inline void hvm_update_guest_cr3(const struct vcpu *v, bool noflush)
729 {
730     ASSERT_UNREACHABLE();
731 }
732 
hvm_get_cpl(const struct vcpu * v)733 static inline unsigned int hvm_get_cpl(const struct vcpu *v)
734 {
735     ASSERT_UNREACHABLE();
736     return -1;
737 }
738 
hvm_event_pending(const struct vcpu * v)739 static inline bool hvm_event_pending(const struct vcpu *v)
740 {
741     return false;
742 }
743 
hvm_inject_hw_exception(unsigned int vector,int errcode)744 static inline void hvm_inject_hw_exception(unsigned int vector, int errcode)
745 {
746     ASSERT_UNREACHABLE();
747 }
748 
hvm_has_set_descriptor_access_exiting(void)749 static inline bool hvm_has_set_descriptor_access_exiting(void)
750 {
751     return false;
752 }
753 
754 #define is_viridian_domain(d) ((void)(d), false)
755 #define is_viridian_vcpu(v) ((void)(v), false)
756 #define has_viridian_time_ref_count(d) ((void)(d), false)
757 #define hvm_long_mode_active(v) ((void)(v), false)
758 #define hvm_get_guest_time(v) ((void)(v), 0)
759 
760 #define hvm_tsc_scaling_supported false
761 #define hap_has_1gb false
762 #define hap_has_2mb false
763 
764 #define hvm_paging_enabled(v) ((void)(v), false)
765 #define hvm_wp_enabled(v) ((void)(v), false)
766 #define hvm_pcid_enabled(v) ((void)(v), false)
767 #define hvm_pae_enabled(v) ((void)(v), false)
768 #define hvm_smep_enabled(v) ((void)(v), false)
769 #define hvm_smap_enabled(v) ((void)(v), false)
770 #define hvm_nx_enabled(v) ((void)(v), false)
771 #define hvm_pku_enabled(v) ((void)(v), false)
772 
773 #define arch_vcpu_block(v) ((void)(v))
774 
775 #endif  /* CONFIG_HVM */
776 
777 #endif /* __ASM_X86_HVM_HVM_H__ */
778 
779 /*
780  * Local variables:
781  * mode: C
782  * c-file-style: "BSD"
783  * c-basic-offset: 4
784  * tab-width: 4
785  * indent-tabs-mode: nil
786  * End:
787  */
788