1/*
2 * entry.S: SVM architecture-specific entry/exit handling.
3 * Copyright (c) 2005-2007, Advanced Micro Devices, Inc.
4 * Copyright (c) 2004, Intel Corporation.
5 * Copyright (c) 2008, Citrix Systems, Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; If not, see <http://www.gnu.org/licenses/>.
18 */
19
20        .file "svm/entry.S"
21
22#include <xen/errno.h>
23#include <xen/softirq.h>
24#include <asm/types.h>
25#include <asm/asm_defns.h>
26#include <asm/apicdef.h>
27#include <asm/page.h>
28#include <public/xen.h>
29
30#define VMRUN  .byte 0x0F,0x01,0xD8
31#define STGI   .byte 0x0F,0x01,0xDC
32#define CLGI   .byte 0x0F,0x01,0xDD
33
34ENTRY(svm_asm_do_resume)
35        GET_CURRENT(bx)
36.Lsvm_do_resume:
37        call svm_intr_assist
38        mov  %rsp,%rdi
39        call nsvm_vcpu_switch
40        ASSERT_NOT_IN_ATOMIC
41
42        mov  VCPU_processor(%rbx),%eax
43        lea  irq_stat+IRQSTAT_softirq_pending(%rip),%rdx
44        xor  %ecx,%ecx
45        shl  $IRQSTAT_shift,%eax
46        CLGI
47        cmp  %ecx,(%rdx,%rax,1)
48        jne  .Lsvm_process_softirqs
49
50        cmp  %cl,VCPU_nsvm_hap_enabled(%rbx)
51UNLIKELY_START(ne, nsvm_hap)
52        cmp  %rcx,VCPU_nhvm_p2m(%rbx)
53        sete %al
54        test VCPU_nhvm_guestmode(%rbx),%al
55        UNLIKELY_DONE(z, nsvm_hap)
56        /*
57         * Someone shot down our nested p2m table; go round again
58         * and nsvm_vcpu_switch() will fix it for us.
59         */
60        STGI
61        jmp  .Lsvm_do_resume
62__UNLIKELY_END(nsvm_hap)
63
64        call svm_asid_handle_vmrun
65
66        cmpb $0,tb_init_done(%rip)
67UNLIKELY_START(nz, svm_trace)
68        call svm_trace_vmentry
69UNLIKELY_END(svm_trace)
70
71        mov  VCPU_svm_vmcb(%rbx),%rcx
72        mov  UREGS_rax(%rsp),%rax
73        mov  %rax,VMCB_rax(%rcx)
74        mov  UREGS_rip(%rsp),%rax
75        mov  %rax,VMCB_rip(%rcx)
76        mov  UREGS_rsp(%rsp),%rax
77        mov  %rax,VMCB_rsp(%rcx)
78        mov  UREGS_eflags(%rsp),%rax
79        or   $X86_EFLAGS_MBS,%rax
80        mov  %rax,VMCB_rflags(%rcx)
81
82        pop  %r15
83        pop  %r14
84        pop  %r13
85        pop  %r12
86        pop  %rbp
87        mov  VCPU_svm_vmcb_pa(%rbx),%rax
88        pop  %rbx
89        pop  %r11
90        pop  %r10
91        pop  %r9
92        pop  %r8
93        add  $8,%rsp /* Skip %rax: restored by VMRUN. */
94        pop  %rcx
95        pop  %rdx
96        pop  %rsi
97        pop  %rdi
98
99        VMRUN
100
101        GET_CURRENT(ax)
102        push %rdi
103        push %rsi
104        push %rdx
105        push %rcx
106        mov  VCPU_svm_vmcb(%rax),%rcx
107        push %rax
108        push %r8
109        push %r9
110        push %r10
111        push %r11
112        push %rbx
113        mov  %rax,%rbx
114        push %rbp
115        push %r12
116        push %r13
117        push %r14
118        push %r15
119
120        movb $0,VCPU_svm_vmcb_in_sync(%rbx)
121        mov  VMCB_rax(%rcx),%rax
122        mov  %rax,UREGS_rax(%rsp)
123        mov  VMCB_rip(%rcx),%rax
124        mov  %rax,UREGS_rip(%rsp)
125        mov  VMCB_rsp(%rcx),%rax
126        mov  %rax,UREGS_rsp(%rsp)
127        mov  VMCB_rflags(%rcx),%rax
128        mov  %rax,UREGS_eflags(%rsp)
129
130        STGI
131GLOBAL(svm_stgi_label)
132        mov  %rsp,%rdi
133        call svm_vmexit_handler
134        jmp  .Lsvm_do_resume
135
136.Lsvm_process_softirqs:
137        STGI
138        call do_softirq
139        jmp  .Lsvm_do_resume
140