1/*
2 * Compatibility hypercall routines.
3 */
4
5        .file "x86_64/compat/entry.S"
6
7#include <xen/errno.h>
8#include <xen/softirq.h>
9#include <asm/asm_defns.h>
10#include <asm/apicdef.h>
11#include <asm/page.h>
12#include <asm/desc.h>
13#include <public/xen.h>
14#include <irq_vectors.h>
15
16ENTRY(entry_int82)
17        ASM_CLAC
18        pushq $0
19        SAVE_VOLATILE type=HYPERCALL_VECTOR compat=1
20        CR4_PV32_RESTORE
21
22        GET_CURRENT(bx)
23
24        mov   %rsp, %rdi
25        call  do_entry_int82
26
27/* %rbx: struct vcpu */
28ENTRY(compat_test_all_events)
29        ASSERT_NOT_IN_ATOMIC
30        cli                             # tests must not race interrupts
31/*compat_test_softirqs:*/
32        movl  VCPU_processor(%rbx),%eax
33        shll  $IRQSTAT_shift,%eax
34        leaq  irq_stat+IRQSTAT_softirq_pending(%rip),%rcx
35        cmpl  $0,(%rcx,%rax,1)
36        jne   compat_process_softirqs
37        testb $1,VCPU_mce_pending(%rbx)
38        jnz   compat_process_mce
39.Lcompat_test_guest_nmi:
40        testb $1,VCPU_nmi_pending(%rbx)
41        jnz   compat_process_nmi
42compat_test_guest_events:
43        movq  VCPU_vcpu_info(%rbx),%rax
44        movzwl COMPAT_VCPUINFO_upcall_pending(%rax),%eax
45        decl  %eax
46        cmpl  $0xfe,%eax
47        ja    compat_restore_all_guest
48/*compat_process_guest_events:*/
49        sti
50        leaq  VCPU_trap_bounce(%rbx),%rdx
51        movl  VCPU_event_addr(%rbx),%eax
52        movl  %eax,TRAPBOUNCE_eip(%rdx)
53        movl  VCPU_event_sel(%rbx),%eax
54        movw  %ax,TRAPBOUNCE_cs(%rdx)
55        movb  $TBF_INTERRUPT,TRAPBOUNCE_flags(%rdx)
56        call  compat_create_bounce_frame
57        jmp   compat_test_all_events
58
59        ALIGN
60/* %rbx: struct vcpu */
61compat_process_softirqs:
62        sti
63        andl  $~TRAP_regs_partial,UREGS_entry_vector(%rsp)
64        call  do_softirq
65        jmp   compat_test_all_events
66
67	ALIGN
68/* %rbx: struct vcpu */
69compat_process_mce:
70        testb $1 << VCPU_TRAP_MCE,VCPU_async_exception_mask(%rbx)
71        jnz   .Lcompat_test_guest_nmi
72        sti
73        movb $0,VCPU_mce_pending(%rbx)
74        call set_guest_machinecheck_trapbounce
75        testl %eax,%eax
76        jz    compat_test_all_events
77        movzbl VCPU_async_exception_mask(%rbx),%edx # save mask for the
78        movb %dl,VCPU_mce_old_mask(%rbx)            # iret hypercall
79        orl  $1 << VCPU_TRAP_MCE,%edx
80        movb %dl,VCPU_async_exception_mask(%rbx)
81        jmp   compat_process_trap
82
83	ALIGN
84/* %rbx: struct vcpu */
85compat_process_nmi:
86        testb $1 << VCPU_TRAP_NMI,VCPU_async_exception_mask(%rbx)
87        jnz  compat_test_guest_events
88        sti
89        movb  $0,VCPU_nmi_pending(%rbx)
90        call  set_guest_nmi_trapbounce
91        testl %eax,%eax
92        jz    compat_test_all_events
93        movzbl VCPU_async_exception_mask(%rbx),%edx # save mask for the
94        movb %dl,VCPU_nmi_old_mask(%rbx)            # iret hypercall
95        orl  $1 << VCPU_TRAP_NMI,%edx
96        movb %dl,VCPU_async_exception_mask(%rbx)
97        /* FALLTHROUGH */
98compat_process_trap:
99        leaq  VCPU_trap_bounce(%rbx),%rdx
100        call  compat_create_bounce_frame
101        jmp   compat_test_all_events
102
103/* %rbx: struct vcpu, interrupts disabled */
104ENTRY(compat_restore_all_guest)
105        ASSERT_INTERRUPTS_DISABLED
106        mov   $~(X86_EFLAGS_IOPL|X86_EFLAGS_NT|X86_EFLAGS_VM),%r11d
107        and   UREGS_eflags(%rsp),%r11d
108.Lcr4_orig:
109        .skip .Lcr4_alt_end - .Lcr4_alt, 0x90
110.Lcr4_orig_end:
111        .pushsection .altinstr_replacement, "ax"
112.Lcr4_alt:
113        testb $3,UREGS_cs(%rsp)
114        jpe   .Lcr4_alt_end
115        mov   CPUINFO_cr4-CPUINFO_guest_cpu_user_regs(%rsp), %rax
116        and   $~XEN_CR4_PV32_BITS, %rax
1171:
118        mov   %rax, CPUINFO_cr4-CPUINFO_guest_cpu_user_regs(%rsp)
119        mov   %rax, %cr4
120        /*
121         * An NMI or MCE may have occurred between the previous two
122         * instructions, leaving register and cache in a state where
123         * the next exit from the guest would trigger the BUG in
124         * cr4_pv32_restore. If this happened, the cached value is no
125         * longer what we just set it to, which we can utilize to
126         * correct that state. Note that we do not have to fear this
127         * loop to cause a live lock: If NMIs/MCEs occurred at that
128         * high a rate, we'd be live locked anyway.
129         */
130        cmp   %rax, CPUINFO_cr4-CPUINFO_guest_cpu_user_regs(%rsp)
131        jne   1b
132.Lcr4_alt_end:
133        .section .altinstructions, "a"
134        altinstruction_entry .Lcr4_orig, .Lcr4_orig, X86_FEATURE_ALWAYS, \
135                             (.Lcr4_orig_end - .Lcr4_orig), 0
136        altinstruction_entry .Lcr4_orig, .Lcr4_alt, X86_FEATURE_XEN_SMEP, \
137                             (.Lcr4_orig_end - .Lcr4_orig), \
138                             (.Lcr4_alt_end - .Lcr4_alt)
139        altinstruction_entry .Lcr4_orig, .Lcr4_alt, X86_FEATURE_XEN_SMAP, \
140                             (.Lcr4_orig_end - .Lcr4_orig), \
141                             (.Lcr4_alt_end - .Lcr4_alt)
142        .popsection
143        or    $X86_EFLAGS_IF,%r11
144        mov   %r11d,UREGS_eflags(%rsp)
145        RESTORE_ALL adj=8 compat=1
146.Lft0:  iretq
147        _ASM_PRE_EXTABLE(.Lft0, handle_exception)
148
149/* This mustn't modify registers other than %rax. */
150ENTRY(cr4_pv32_restore)
151        push  %rdx
152        GET_CPUINFO_FIELD(cr4, dx)
153        mov   (%rdx), %rax
154        test  $XEN_CR4_PV32_BITS, %eax
155        jnz   0f
156        or    cr4_pv32_mask(%rip), %rax
157        mov   %rax, %cr4
158        mov   %rax, (%rdx)
159        pop   %rdx
160        ret
1610:
162#ifndef NDEBUG
163        /* Check that _all_ of the bits intended to be set actually are. */
164        mov   %cr4, %rax
165        and   cr4_pv32_mask(%rip), %rax
166        cmp   cr4_pv32_mask(%rip), %rax
167        je    1f
168        /* Cause cr4_pv32_mask to be visible in the BUG register dump. */
169        mov   cr4_pv32_mask(%rip), %rdx
170        /* Avoid coming back here while handling the #UD we cause below. */
171        mov   %cr4, %rcx
172        or    %rdx, %rcx
173        mov   %rcx, %cr4
174        BUG
1751:
176#endif
177        pop   %rdx
178        xor   %eax, %eax
179        ret
180
181/* %rdx: trap_bounce, %rbx: struct vcpu */
182ENTRY(compat_post_handle_exception)
183        testb $TBF_EXCEPTION,TRAPBOUNCE_flags(%rdx)
184        jz    compat_test_all_events
185.Lcompat_bounce_exception:
186        call  compat_create_bounce_frame
187        movb  $0,TRAPBOUNCE_flags(%rdx)
188        jmp   compat_test_all_events
189
190/* See lstar_enter for entry register state. */
191ENTRY(cstar_enter)
192        sti
193        CR4_PV32_RESTORE
194        movq  8(%rsp),%rax /* Restore %rax. */
195        movq  $FLAT_KERNEL_SS,8(%rsp)
196        pushq %r11
197        pushq $FLAT_USER_CS32
198        pushq %rcx
199        pushq $0
200        SAVE_VOLATILE TRAP_syscall
201        GET_CURRENT(bx)
202        movq  VCPU_domain(%rbx),%rcx
203        cmpb  $0,DOMAIN_is_32bit_pv(%rcx)
204        je    switch_to_kernel
205        cmpb  $0,VCPU_syscall32_disables_events(%rbx)
206        movzwl VCPU_syscall32_sel(%rbx),%esi
207        movq  VCPU_syscall32_addr(%rbx),%rax
208        setne %cl
209        leaq  VCPU_trap_bounce(%rbx),%rdx
210        testl $~3,%esi
211        leal  (,%rcx,TBF_INTERRUPT),%ecx
212UNLIKELY_START(z, compat_syscall_gpf)
213        movq  VCPU_trap_ctxt(%rbx),%rdi
214        movl  $TRAP_gp_fault,UREGS_entry_vector(%rsp)
215        subl  $2,UREGS_rip(%rsp)
216        movl  $0,TRAPBOUNCE_error_code(%rdx)
217        movl  TRAP_gp_fault * TRAPINFO_sizeof + TRAPINFO_eip(%rdi),%eax
218        movzwl TRAP_gp_fault * TRAPINFO_sizeof + TRAPINFO_cs(%rdi),%esi
219        testb $4,TRAP_gp_fault * TRAPINFO_sizeof + TRAPINFO_flags(%rdi)
220        setnz %cl
221        leal  TBF_EXCEPTION|TBF_EXCEPTION_ERRCODE(,%rcx,TBF_INTERRUPT),%ecx
222UNLIKELY_END(compat_syscall_gpf)
223        movq  %rax,TRAPBOUNCE_eip(%rdx)
224        movw  %si,TRAPBOUNCE_cs(%rdx)
225        movb  %cl,TRAPBOUNCE_flags(%rdx)
226        jmp   .Lcompat_bounce_exception
227
228ENTRY(compat_sysenter)
229        CR4_PV32_RESTORE
230        movq  VCPU_trap_ctxt(%rbx),%rcx
231        cmpb  $TRAP_gp_fault,UREGS_entry_vector(%rsp)
232        movzwl VCPU_sysenter_sel(%rbx),%eax
233        movzwl TRAP_gp_fault * TRAPINFO_sizeof + TRAPINFO_cs(%rcx),%ecx
234        cmovel %ecx,%eax
235        testl $~3,%eax
236        movl  $FLAT_COMPAT_USER_SS,UREGS_ss(%rsp)
237        cmovzl %ecx,%eax
238        movw  %ax,TRAPBOUNCE_cs(%rdx)
239        call  compat_create_bounce_frame
240        jmp   compat_test_all_events
241
242ENTRY(compat_int80_direct_trap)
243        CR4_PV32_RESTORE
244        call  compat_create_bounce_frame
245        jmp   compat_test_all_events
246
247/* CREATE A BASIC EXCEPTION FRAME ON GUEST OS (RING-1) STACK:            */
248/*   {[ERRCODE,] EIP, CS, EFLAGS, [ESP, SS]}                             */
249/* %rdx: trap_bounce, %rbx: struct vcpu                                  */
250/* On return only %rbx and %rdx are guaranteed non-clobbered.            */
251compat_create_bounce_frame:
252        ASSERT_INTERRUPTS_ENABLED
253        mov   %fs,%edi
254        ASM_STAC
255        testb $2,UREGS_cs+8(%rsp)
256        jz    1f
257        /* Push new frame at registered guest-OS stack base. */
258        movl  VCPU_kernel_sp(%rbx),%esi
259.Lft1:  mov   VCPU_kernel_ss(%rbx),%fs
260        subl  $2*4,%esi
261        movl  UREGS_rsp+8(%rsp),%eax
262.Lft2:  movl  %eax,%fs:(%rsi)
263        movl  UREGS_ss+8(%rsp),%eax
264.Lft3:  movl  %eax,%fs:4(%rsi)
265        jmp   2f
2661:      /* In kernel context already: push new frame at existing %rsp. */
267        movl  UREGS_rsp+8(%rsp),%esi
268.Lft4:  mov   UREGS_ss+8(%rsp),%fs
2692:
270        movq  VCPU_domain(%rbx),%r8
271        subl  $3*4,%esi
272        movq  VCPU_vcpu_info(%rbx),%rax
273        pushq COMPAT_VCPUINFO_upcall_mask(%rax)
274        testb $TBF_INTERRUPT,TRAPBOUNCE_flags(%rdx)
275        setnz %ch                       # TBF_INTERRUPT -> set upcall mask
276        orb   %ch,COMPAT_VCPUINFO_upcall_mask(%rax)
277        popq  %rax
278        shll  $16,%eax                  # Bits 16-23: saved_upcall_mask
279        movw  UREGS_cs+8(%rsp),%ax      # Bits  0-15: CS
280.Lft5:  movl  %eax,%fs:4(%rsi)          # CS / saved_upcall_mask
281        shrl  $16,%eax
282        testb %al,%al                   # Bits 0-7: saved_upcall_mask
283        setz  %ch                       # %ch == !saved_upcall_mask
284        movl  UREGS_eflags+8(%rsp),%eax
285        andl  $~(X86_EFLAGS_IF|X86_EFLAGS_IOPL),%eax
286        addb  %ch,%ch                   # Bit 9 (EFLAGS.IF)
287        orb   %ch,%ah                   # Fold EFLAGS.IF into %eax
288        xorl  %ecx,%ecx                 # if ( VM_ASSIST(v->domain, architectural_iopl) )
289        testb $1 << VMASST_TYPE_architectural_iopl,DOMAIN_vm_assist(%r8)
290        cmovnzl VCPU_iopl(%rbx),%ecx    # Bits 13:12 (EFLAGS.IOPL)
291        orl   %ecx,%eax                 # Fold EFLAGS.IOPL into %eax
292.Lft6:  movl  %eax,%fs:2*4(%rsi)        # EFLAGS
293        movl  UREGS_rip+8(%rsp),%eax
294.Lft7:  movl  %eax,%fs:(%rsi)           # EIP
295        testb $TBF_EXCEPTION_ERRCODE,TRAPBOUNCE_flags(%rdx)
296        jz    1f
297        subl  $4,%esi
298        movl  TRAPBOUNCE_error_code(%rdx),%eax
299.Lft8:  movl  %eax,%fs:(%rsi)           # ERROR CODE
3001:
301        ASM_CLAC
302        /* Rewrite our stack frame and return to guest-OS mode. */
303        /* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */
304        andl  $~(X86_EFLAGS_VM|X86_EFLAGS_RF|\
305                 X86_EFLAGS_NT|X86_EFLAGS_TF),UREGS_eflags+8(%rsp)
306        mov   %fs,UREGS_ss+8(%rsp)
307        movl  %esi,UREGS_rsp+8(%rsp)
308.Lft13: mov   %edi,%fs
309        movzwl TRAPBOUNCE_cs(%rdx),%eax
310        /* Null selectors (0-3) are not allowed. */
311        testl $~3,%eax
312UNLIKELY_START(z, compat_bounce_null_selector)
313        lea   UNLIKELY_DISPATCH_LABEL(compat_bounce_null_selector)(%rip), %rdi
314        jmp   asm_domain_crash_synchronous  /* Does not return */
315__UNLIKELY_END(compat_bounce_null_selector)
316        movl  %eax,UREGS_cs+8(%rsp)
317        movl  TRAPBOUNCE_eip(%rdx),%eax
318        movl  %eax,UREGS_rip+8(%rsp)
319        ret
320.section .fixup,"ax"
321.Lfx13:
322        xorl  %edi,%edi
323        jmp   .Lft13
324.previous
325        _ASM_EXTABLE(.Lft1,  dom_crash_sync_extable)
326        _ASM_EXTABLE(.Lft2,  compat_crash_page_fault)
327        _ASM_EXTABLE(.Lft3,  compat_crash_page_fault_4)
328        _ASM_EXTABLE(.Lft4,  dom_crash_sync_extable)
329        _ASM_EXTABLE(.Lft5,  compat_crash_page_fault_4)
330        _ASM_EXTABLE(.Lft6,  compat_crash_page_fault_8)
331        _ASM_EXTABLE(.Lft7,  compat_crash_page_fault)
332        _ASM_EXTABLE(.Lft8,  compat_crash_page_fault)
333        _ASM_EXTABLE(.Lft13, .Lfx13)
334
335compat_crash_page_fault_8:
336        addl  $4,%esi
337compat_crash_page_fault_4:
338        addl  $4,%esi
339compat_crash_page_fault:
340.Lft14: mov   %edi,%fs
341        ASM_CLAC
342        movl  %esi,%edi
343        call  show_page_walk
344        jmp   dom_crash_sync_extable
345.section .fixup,"ax"
346.Lfx14:
347        xorl  %edi,%edi
348        jmp   .Lft14
349.previous
350        _ASM_EXTABLE(.Lft14, .Lfx14)
351