1 /*
2  * HVM domain specific functions.
3  *
4  * Copyright (C) 2017 Citrix Systems R&D
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms and conditions of the GNU General Public
8  * License, version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public
16  * License along with this program; If not, see <http://www.gnu.org/licenses/>.
17  */
18 
19 #include <xen/domain_page.h>
20 #include <xen/errno.h>
21 #include <xen/lib.h>
22 #include <xen/paging.h>
23 #include <xen/sched.h>
24 
25 #include <public/hvm/hvm_vcpu.h>
26 
check_segment(struct segment_register * reg,enum x86_segment seg)27 static int check_segment(struct segment_register *reg, enum x86_segment seg)
28 {
29 
30     if ( reg->pad != 0 )
31     {
32         gprintk(XENLOG_ERR, "Segment attribute bits 12-15 are not zero\n");
33         return -EINVAL;
34     }
35 
36     if ( reg->attr == 0 )
37     {
38         if ( seg != x86_seg_ds && seg != x86_seg_es )
39         {
40             gprintk(XENLOG_ERR, "Null selector provided for CS, SS or TR\n");
41             return -EINVAL;
42         }
43         return 0;
44     }
45 
46     if ( seg == x86_seg_tr )
47     {
48         if ( reg->s )
49         {
50             gprintk(XENLOG_ERR, "Code or data segment provided for TR\n");
51             return -EINVAL;
52         }
53 
54         if ( reg->type != SYS_DESC_tss_busy )
55         {
56             gprintk(XENLOG_ERR, "Non-32-bit-TSS segment provided for TR\n");
57             return -EINVAL;
58         }
59     }
60     else if ( !reg->s )
61     {
62         gprintk(XENLOG_ERR,
63                 "System segment provided for a code or data segment\n");
64         return -EINVAL;
65     }
66 
67     if ( !reg->p )
68     {
69         gprintk(XENLOG_ERR, "Non-present segment provided\n");
70         return -EINVAL;
71     }
72 
73     switch ( seg )
74     {
75     case x86_seg_cs:
76         if ( !(reg->type & 0x8) )
77         {
78             gprintk(XENLOG_ERR, "Non-code segment provided for CS\n");
79             return -EINVAL;
80         }
81         break;
82 
83     case x86_seg_ss:
84         if ( (reg->type & 0x8) || !(reg->type & 0x2) )
85         {
86             gprintk(XENLOG_ERR, "Non-writeable segment provided for SS\n");
87             return -EINVAL;
88         }
89         break;
90 
91     case x86_seg_ds:
92     case x86_seg_es:
93         if ( (reg->type & 0x8) && !(reg->type & 0x2) )
94         {
95             gprintk(XENLOG_ERR, "Non-readable segment provided for DS or ES\n");
96             return -EINVAL;
97         }
98         break;
99 
100     case x86_seg_tr:
101         break;
102 
103     default:
104         ASSERT_UNREACHABLE();
105         return -EINVAL;
106     }
107 
108     return 0;
109 }
110 
111 /* Called by VCPUOP_initialise for HVM guests. */
arch_set_info_hvm_guest(struct vcpu * v,const vcpu_hvm_context_t * ctx)112 int arch_set_info_hvm_guest(struct vcpu *v, const vcpu_hvm_context_t *ctx)
113 {
114     struct cpu_user_regs *uregs = &v->arch.user_regs;
115     struct segment_register cs, ds, ss, es, tr;
116     const char *errstr;
117     int rc;
118 
119     if ( ctx->pad != 0 )
120         return -EINVAL;
121 
122     switch ( ctx->mode )
123     {
124     default:
125         return -EINVAL;
126 
127     case VCPU_HVM_MODE_32B:
128     {
129         const struct vcpu_hvm_x86_32 *regs = &ctx->cpu_regs.x86_32;
130         uint32_t limit;
131 
132         if ( ctx->cpu_regs.x86_32.pad1 != 0 ||
133              ctx->cpu_regs.x86_32.pad2[0] != 0 ||
134              ctx->cpu_regs.x86_32.pad2[1] != 0 ||
135              ctx->cpu_regs.x86_32.pad2[2] != 0 )
136             return -EINVAL;
137 
138 #define SEG(s, r) ({                                                        \
139     s = (struct segment_register)                                           \
140         { 0, { (r)->s ## _ar }, (r)->s ## _limit, (r)->s ## _base };        \
141     /* Set accessed / busy bit for present segments. */                     \
142     if ( s.p )                                                              \
143         s.type |= (x86_seg_##s != x86_seg_tr ? 1 : 2);                      \
144     check_segment(&s, x86_seg_ ## s); })
145 
146         rc = SEG(cs, regs);
147         rc |= SEG(ds, regs);
148         rc |= SEG(ss, regs);
149         rc |= SEG(es, regs);
150         rc |= SEG(tr, regs);
151 #undef SEG
152 
153         if ( rc != 0 )
154             return rc;
155 
156         /* Basic sanity checks. */
157         limit = cs.limit;
158         if ( cs.g )
159             limit = (limit << 12) | 0xfff;
160         if ( regs->eip > limit )
161         {
162             gprintk(XENLOG_ERR, "EIP (%#08x) outside CS limit (%#08x)\n",
163                     regs->eip, limit);
164             return -EINVAL;
165         }
166 
167         if ( ss.dpl != cs.dpl )
168         {
169             gprintk(XENLOG_ERR, "SS.DPL (%u) is different than CS.DPL (%u)\n",
170                     ss.dpl, cs.dpl);
171             return -EINVAL;
172         }
173 
174         if ( ds.p && ds.dpl > cs.dpl )
175         {
176             gprintk(XENLOG_ERR, "DS.DPL (%u) is greater than CS.DPL (%u)\n",
177                     ds.dpl, cs.dpl);
178             return -EINVAL;
179         }
180 
181         if ( es.p && es.dpl > cs.dpl )
182         {
183             gprintk(XENLOG_ERR, "ES.DPL (%u) is greater than CS.DPL (%u)\n",
184                     es.dpl, cs.dpl);
185             return -EINVAL;
186         }
187 
188         if ( (regs->efer & EFER_LMA) && !(regs->efer & EFER_LME) )
189         {
190             gprintk(XENLOG_ERR, "EFER.LMA set without EFER.LME (%#016lx)\n",
191                     regs->efer);
192             return -EINVAL;
193         }
194 
195         uregs->rax    = regs->eax;
196         uregs->rcx    = regs->ecx;
197         uregs->rdx    = regs->edx;
198         uregs->rbx    = regs->ebx;
199         uregs->rsp    = regs->esp;
200         uregs->rbp    = regs->ebp;
201         uregs->rsi    = regs->esi;
202         uregs->rdi    = regs->edi;
203         uregs->rip    = regs->eip;
204         uregs->rflags = regs->eflags;
205 
206         v->arch.hvm_vcpu.guest_cr[0] = regs->cr0;
207         v->arch.hvm_vcpu.guest_cr[3] = regs->cr3;
208         v->arch.hvm_vcpu.guest_cr[4] = regs->cr4;
209         v->arch.hvm_vcpu.guest_efer  = regs->efer;
210     }
211     break;
212 
213     case VCPU_HVM_MODE_64B:
214     {
215         const struct vcpu_hvm_x86_64 *regs = &ctx->cpu_regs.x86_64;
216 
217         /* Basic sanity checks. */
218         if ( !is_canonical_address(regs->rip) )
219         {
220             gprintk(XENLOG_ERR, "RIP contains a non-canonical address (%#lx)\n",
221                     regs->rip);
222             return -EINVAL;
223         }
224 
225         if ( !(regs->cr0 & X86_CR0_PG) )
226         {
227             gprintk(XENLOG_ERR, "CR0 doesn't have paging enabled (%#016lx)\n",
228                     regs->cr0);
229             return -EINVAL;
230         }
231 
232         if ( !(regs->cr4 & X86_CR4_PAE) )
233         {
234             gprintk(XENLOG_ERR, "CR4 doesn't have PAE enabled (%#016lx)\n",
235                     regs->cr4);
236             return -EINVAL;
237         }
238 
239         if ( !(regs->efer & EFER_LME) )
240         {
241             gprintk(XENLOG_ERR, "EFER doesn't have LME enabled (%#016lx)\n",
242                     regs->efer);
243             return -EINVAL;
244         }
245 
246         uregs->rax    = regs->rax;
247         uregs->rcx    = regs->rcx;
248         uregs->rdx    = regs->rdx;
249         uregs->rbx    = regs->rbx;
250         uregs->rsp    = regs->rsp;
251         uregs->rbp    = regs->rbp;
252         uregs->rsi    = regs->rsi;
253         uregs->rdi    = regs->rdi;
254         uregs->rip    = regs->rip;
255         uregs->rflags = regs->rflags;
256 
257         v->arch.hvm_vcpu.guest_cr[0] = regs->cr0;
258         v->arch.hvm_vcpu.guest_cr[3] = regs->cr3;
259         v->arch.hvm_vcpu.guest_cr[4] = regs->cr4;
260         v->arch.hvm_vcpu.guest_efer  = regs->efer;
261 
262 #define SEG(l, a) (struct segment_register){ 0, { a }, l, 0 }
263         cs = SEG(~0u, 0xa9b); /* 64bit code segment. */
264         ds = ss = es = SEG(~0u, 0xc93);
265         tr = SEG(0x67, 0x8b); /* 64bit TSS (busy). */
266 #undef SEG
267     }
268     break;
269 
270     }
271 
272     if ( v->arch.hvm_vcpu.guest_efer & EFER_LME )
273         v->arch.hvm_vcpu.guest_efer |= EFER_LMA;
274 
275     if ( v->arch.hvm_vcpu.guest_cr[4] & ~hvm_cr4_guest_valid_bits(v, 0) )
276     {
277         gprintk(XENLOG_ERR, "Bad CR4 value: %#016lx\n",
278                 v->arch.hvm_vcpu.guest_cr[4]);
279         return -EINVAL;
280     }
281 
282     errstr = hvm_efer_valid(v, v->arch.hvm_vcpu.guest_efer, -1);
283     if ( errstr )
284     {
285         gprintk(XENLOG_ERR, "Bad EFER value (%#016lx): %s\n",
286                v->arch.hvm_vcpu.guest_efer, errstr);
287         return -EINVAL;
288     }
289 
290     hvm_update_guest_cr(v, 0);
291     hvm_update_guest_cr(v, 3);
292     hvm_update_guest_cr(v, 4);
293     hvm_update_guest_efer(v);
294 
295     if ( hvm_paging_enabled(v) && !paging_mode_hap(v->domain) )
296     {
297         /* Shadow-mode CR3 change. Check PDBR and update refcounts. */
298         struct page_info *page = get_page_from_gfn(v->domain,
299                                  v->arch.hvm_vcpu.guest_cr[3] >> PAGE_SHIFT,
300                                  NULL, P2M_ALLOC);
301         if ( !page )
302         {
303             gprintk(XENLOG_ERR, "Invalid CR3: %#lx\n",
304                     v->arch.hvm_vcpu.guest_cr[3]);
305             return -EINVAL;
306         }
307 
308         v->arch.guest_table = pagetable_from_page(page);
309     }
310 
311     hvm_set_segment_register(v, x86_seg_cs, &cs);
312     hvm_set_segment_register(v, x86_seg_ds, &ds);
313     hvm_set_segment_register(v, x86_seg_ss, &ss);
314     hvm_set_segment_register(v, x86_seg_es, &es);
315     hvm_set_segment_register(v, x86_seg_tr, &tr);
316 
317     /* Sync AP's TSC with BSP's. */
318     v->arch.hvm_vcpu.cache_tsc_offset =
319         v->domain->vcpu[0]->arch.hvm_vcpu.cache_tsc_offset;
320     hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset,
321                              v->domain->arch.hvm_domain.sync_tsc);
322 
323     paging_update_paging_modes(v);
324 
325     v->is_initialised = 1;
326     set_bit(_VPF_down, &v->pause_flags);
327 
328     return 0;
329 }
330 
331 /*
332  * Local variables:
333  * mode: C
334  * c-file-style: "BSD"
335  * c-basic-offset: 4
336  * tab-width: 4
337  * indent-tabs-mode: nil
338  * End:
339  */
340