1 
2 /*
3  * vvmx.h: Support virtual VMX for nested virtualization.
4  *
5  * Copyright (c) 2010, Intel Corporation.
6  * Author: Qing He <qing.he@intel.com>
7  *         Eddie Dong <eddie.dong@intel.com>
8  *
9  * This program is free software; you can redistribute it and/or modify it
10  * under the terms and conditions of the GNU General Public License,
11  * version 2, as published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along with
19  * this program; If not, see <http://www.gnu.org/licenses/>.
20  *
21  */
22 #ifndef __ASM_X86_HVM_VVMX_H__
23 #define __ASM_X86_HVM_VVMX_H__
24 
25 struct vvmcs_list {
26     unsigned long vvmcs_mfn;
27     struct list_head node;
28 };
29 
30 struct nestedvmx {
31     /*
32      * vmxon_region_pa is also used to indicate whether a vcpu is in
33      * the VMX operation. When a vcpu is out of the VMX operation, its
34      * vmxon_region_pa is set to an invalid address INVALID_PADDR. We
35      * cannot use 0 for this purpose, because it's a valid VMXON region
36      * address.
37      */
38     paddr_t    vmxon_region_pa;
39     void       *iobitmap[2];		/* map (va) of L1 guest I/O bitmap */
40     void       *msrbitmap;		/* map (va) of L1 guest MSR bitmap */
41     /* deferred nested interrupt */
42     struct {
43         unsigned long intr_info;
44         u32           error_code;
45         u8            source;
46     } intr;
47     struct {
48         bool_t   enabled;
49         uint32_t exit_reason;
50         uint32_t exit_qual;
51     } ept;
52     uint32_t guest_vpid;
53     struct list_head launched_list;
54 };
55 
56 #define vcpu_2_nvmx(v)	(vcpu_nestedhvm(v).u.nvmx)
57 
58 /* bit 1, 2, 4 must be 1 */
59 #define VMX_PINBASED_CTLS_DEFAULT1	0x16
60 /* bit 1, 4-6,8,13-16,26 must be 1 */
61 #define VMX_PROCBASED_CTLS_DEFAULT1	0x401e172
62 /* bit 0-8, 10,11,13,14,16,17 must be 1 */
63 #define VMX_EXIT_CTLS_DEFAULT1		0x36dff
64 /* bit 0-8, and 12 must be 1 */
65 #define VMX_ENTRY_CTLS_DEFAULT1		0x11ff
66 
67 /*
68  * Encode of VMX instructions base on Table 24-11 & 24-12 of SDM 3B
69  */
70 
71 enum vmx_regs_enc {
72     VMX_REG_RAX,
73     VMX_REG_RCX,
74     VMX_REG_RDX,
75     VMX_REG_RBX,
76     VMX_REG_RSP,
77     VMX_REG_RBP,
78     VMX_REG_RSI,
79     VMX_REG_RDI,
80     VMX_REG_R8,
81     VMX_REG_R9,
82     VMX_REG_R10,
83     VMX_REG_R11,
84     VMX_REG_R12,
85     VMX_REG_R13,
86     VMX_REG_R14,
87     VMX_REG_R15,
88 };
89 
90 union vmx_inst_info {
91     struct {
92         unsigned int scaling           :2; /* bit 0-1 */
93         unsigned int __rsvd0           :1; /* bit 2 */
94         unsigned int reg1              :4; /* bit 3-6 */
95         unsigned int addr_size         :3; /* bit 7-9 */
96         unsigned int memreg            :1; /* bit 10 */
97         unsigned int __rsvd1           :4; /* bit 11-14 */
98         unsigned int segment           :3; /* bit 15-17 */
99         unsigned int index_reg         :4; /* bit 18-21 */
100         unsigned int index_reg_invalid :1; /* bit 22 */
101         unsigned int base_reg          :4; /* bit 23-26 */
102         unsigned int base_reg_invalid  :1; /* bit 27 */
103         unsigned int reg2              :4; /* bit 28-31 */
104     } fields;
105     u32 word;
106 };
107 
108 int nvmx_vcpu_initialise(struct vcpu *v);
109 void nvmx_vcpu_destroy(struct vcpu *v);
110 int nvmx_vcpu_reset(struct vcpu *v);
111 uint64_t nvmx_vcpu_eptp_base(struct vcpu *v);
112 enum hvm_intblk nvmx_intr_blocked(struct vcpu *v);
113 bool_t nvmx_intercepts_exception(
114     struct vcpu *v, unsigned int vector, int error_code);
115 void nvmx_domain_relinquish_resources(struct domain *d);
116 
117 bool_t nvmx_ept_enabled(struct vcpu *v);
118 
119 int nvmx_handle_vmxon(struct cpu_user_regs *regs);
120 int nvmx_handle_vmxoff(struct cpu_user_regs *regs);
121 
122 #define EPT_TRANSLATE_SUCCEED       0
123 #define EPT_TRANSLATE_VIOLATION     1
124 #define EPT_TRANSLATE_MISCONFIG     2
125 #define EPT_TRANSLATE_RETRY         3
126 
127 int
128 nvmx_hap_walk_L1_p2m(struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa,
129                      unsigned int *page_order, uint8_t *p2m_acc,
130                      bool_t access_r, bool_t access_w, bool_t access_x);
131 /*
132  * Virtual VMCS layout
133  *
134  * Since physical VMCS layout is unknown, a custom layout is used
135  * for virtual VMCS seen by guest. It occupies a 4k page, and the
136  * field is offset by an 9-bit offset into u64[], The offset is as
137  * follow, which means every <width, type> pair has a max of 32
138  * fields available.
139  *
140  *             9       7      5               0
141  *             --------------------------------
142  *     offset: | width | type |     index     |
143  *             --------------------------------
144  *
145  * Also, since the lower range <width=0, type={0,1}> has only one
146  * field: VPID, it is moved to a higher offset (63), and leaves the
147  * lower range to non-indexed field like VMCS revision.
148  *
149  */
150 
151 struct vvmcs_header {
152     u32 revision;
153     u32 abort;
154 };
155 
156 union vmcs_encoding {
157     struct {
158         u32 access_type : 1;
159         u32 index : 9;
160         u32 type : 2;
161         u32 rsv1 : 1;
162         u32 width : 2;
163         u32 rsv2 : 17;
164     };
165     u32 word;
166 };
167 
168 enum vvmcs_encoding_width {
169     VVMCS_WIDTH_16 = 0,
170     VVMCS_WIDTH_64,
171     VVMCS_WIDTH_32,
172     VVMCS_WIDTH_NATURAL,
173 };
174 
175 enum vvmcs_encoding_type {
176     VVMCS_TYPE_CONTROL = 0,
177     VVMCS_TYPE_RO,
178     VVMCS_TYPE_GSTATE,
179     VVMCS_TYPE_HSTATE,
180 };
181 
182 u64 get_vvmcs_virtual(void *vvmcs, u32 encoding);
183 u64 get_vvmcs_real(const struct vcpu *, u32 encoding);
184 void set_vvmcs_virtual(void *vvmcs, u32 encoding, u64 val);
185 void set_vvmcs_real(const struct vcpu *, u32 encoding, u64 val);
186 enum vmx_insn_errno get_vvmcs_virtual_safe(void *vvmcs, u32 encoding, u64 *val);
187 enum vmx_insn_errno get_vvmcs_real_safe(const struct vcpu *, u32 encoding,
188                                         u64 *val);
189 enum vmx_insn_errno set_vvmcs_virtual_safe(void *vvmcs, u32 encoding, u64 val);
190 enum vmx_insn_errno set_vvmcs_real_safe(const struct vcpu *, u32 encoding,
191                                         u64 val);
192 
193 #define get_vvmcs(vcpu, encoding) \
194   (cpu_has_vmx_vmcs_shadowing ? \
195    get_vvmcs_real(vcpu, encoding) : \
196    get_vvmcs_virtual(vcpu_nestedhvm(vcpu).nv_vvmcx, encoding))
197 
198 #define set_vvmcs(vcpu, encoding, val) \
199   (cpu_has_vmx_vmcs_shadowing ? \
200    set_vvmcs_real(vcpu, encoding, val) : \
201    set_vvmcs_virtual(vcpu_nestedhvm(vcpu).nv_vvmcx, encoding, val))
202 
203 #define get_vvmcs_safe(vcpu, encoding, val) \
204   (cpu_has_vmx_vmcs_shadowing ? \
205    get_vvmcs_real_safe(vcpu, encoding, val) : \
206    get_vvmcs_virtual_safe(vcpu_nestedhvm(vcpu).nv_vvmcx, encoding, val))
207 
208 #define set_vvmcs_safe(vcpu, encoding, val) \
209   (cpu_has_vmx_vmcs_shadowing ? \
210    set_vvmcs_real_safe(vcpu, encoding, val) : \
211    set_vvmcs_virtual_safe(vcpu_nestedhvm(vcpu).nv_vvmcx, encoding, val))
212 
213 uint64_t get_shadow_eptp(struct vcpu *v);
214 
215 void nvmx_destroy_vmcs(struct vcpu *v);
216 int nvmx_handle_vmptrld(struct cpu_user_regs *regs);
217 int nvmx_handle_vmptrst(struct cpu_user_regs *regs);
218 int nvmx_handle_vmclear(struct cpu_user_regs *regs);
219 int nvmx_handle_vmread(struct cpu_user_regs *regs);
220 int nvmx_handle_vmwrite(struct cpu_user_regs *regs);
221 int nvmx_handle_vmresume(struct cpu_user_regs *regs);
222 int nvmx_handle_vmlaunch(struct cpu_user_regs *regs);
223 int nvmx_handle_invept(struct cpu_user_regs *regs);
224 int nvmx_handle_invvpid(struct cpu_user_regs *regs);
225 int nvmx_msr_read_intercept(unsigned int msr,
226                                 u64 *msr_content);
227 
228 void nvmx_update_exec_control(struct vcpu *v, u32 value);
229 void nvmx_update_secondary_exec_control(struct vcpu *v,
230                                         unsigned long value);
231 void nvmx_update_exception_bitmap(struct vcpu *v, unsigned long value);
232 void nvmx_switch_guest(void);
233 void nvmx_idtv_handling(void);
234 u64 nvmx_get_tsc_offset(struct vcpu *v);
235 int nvmx_n2_vmexit_handler(struct cpu_user_regs *regs,
236                           unsigned int exit_reason);
237 void nvmx_set_cr_read_shadow(struct vcpu *v, unsigned int cr);
238 
239 uint64_t nept_get_ept_vpid_cap(void);
240 
241 int nept_translate_l2ga(struct vcpu *v, paddr_t l2ga,
242                         unsigned int *page_order, uint32_t rwx_acc,
243                         unsigned long *l1gfn, uint8_t *p2m_acc,
244                         uint64_t *exit_qual, uint32_t *exit_reason);
245 int nvmx_cpu_up_prepare(unsigned int cpu);
246 void nvmx_cpu_dead(unsigned int cpu);
247 #endif /* __ASM_X86_HVM_VVMX_H__ */
248 
249