1 /*
2 * intr.c: Interrupt handling for SVM.
3 * Copyright (c) 2005, AMD Inc.
4 * Copyright (c) 2004, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; If not, see <http://www.gnu.org/licenses/>.
17 */
18
19 #include <xen/init.h>
20 #include <xen/mm.h>
21 #include <xen/lib.h>
22 #include <xen/trace.h>
23 #include <xen/errno.h>
24 #include <asm/cpufeature.h>
25 #include <asm/processor.h>
26 #include <asm/msr.h>
27 #include <asm/paging.h>
28 #include <asm/hvm/hvm.h>
29 #include <asm/hvm/io.h>
30 #include <asm/hvm/support.h>
31 #include <asm/hvm/vlapic.h>
32 #include <asm/hvm/svm/svm.h>
33 #include <asm/hvm/svm/intr.h>
34 #include <asm/hvm/nestedhvm.h> /* for nestedhvm_vcpu_in_guestmode */
35 #include <xen/event.h>
36 #include <xen/kernel.h>
37 #include <public/hvm/ioreq.h>
38 #include <xen/domain_page.h>
39 #include <asm/hvm/trace.h>
40
svm_inject_nmi(struct vcpu * v)41 static void svm_inject_nmi(struct vcpu *v)
42 {
43 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
44 u32 general1_intercepts = vmcb_get_general1_intercepts(vmcb);
45 eventinj_t event;
46
47 event.bytes = 0;
48 event.fields.v = 1;
49 event.fields.type = X86_EVENTTYPE_NMI;
50 event.fields.vector = 2;
51
52 ASSERT(vmcb->eventinj.fields.v == 0);
53 vmcb->eventinj = event;
54
55 /*
56 * SVM does not virtualise the NMI mask, so we emulate it by intercepting
57 * the next IRET and blocking NMI injection until the intercept triggers.
58 */
59 vmcb_set_general1_intercepts(
60 vmcb, general1_intercepts | GENERAL1_INTERCEPT_IRET);
61 }
62
svm_inject_extint(struct vcpu * v,int vector)63 static void svm_inject_extint(struct vcpu *v, int vector)
64 {
65 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
66 eventinj_t event;
67
68 event.bytes = 0;
69 event.fields.v = 1;
70 event.fields.type = X86_EVENTTYPE_EXT_INTR;
71 event.fields.vector = vector;
72
73 ASSERT(vmcb->eventinj.fields.v == 0);
74 vmcb->eventinj = event;
75 }
76
svm_enable_intr_window(struct vcpu * v,struct hvm_intack intack)77 static void svm_enable_intr_window(struct vcpu *v, struct hvm_intack intack)
78 {
79 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
80 uint32_t general1_intercepts = vmcb_get_general1_intercepts(vmcb);
81 vintr_t intr;
82
83 ASSERT(intack.source != hvm_intsrc_none);
84
85 if ( nestedhvm_enabled(v->domain) ) {
86 struct nestedvcpu *nv = &vcpu_nestedhvm(v);
87 if ( nv->nv_vmentry_pending ) {
88 struct vmcb_struct *gvmcb = nv->nv_vvmcx;
89
90 /* check if l1 guest injects interrupt into l2 guest via vintr.
91 * return here or l2 guest looses interrupts, otherwise.
92 */
93 ASSERT(gvmcb != NULL);
94 intr = vmcb_get_vintr(gvmcb);
95 if ( intr.fields.irq )
96 return;
97 }
98 }
99
100 HVMTRACE_3D(INTR_WINDOW, intack.vector, intack.source,
101 vmcb->eventinj.fields.v?vmcb->eventinj.fields.vector:-1);
102
103 /*
104 * Create a dummy virtual interrupt to intercept as soon as the
105 * guest can accept the real interrupt.
106 *
107 * TODO: Better NMI handling. We need a way to skip a MOV SS interrupt
108 * shadow. This is hard to do without hardware support. Also we should
109 * not be waiting for EFLAGS.IF to become 1.
110 */
111
112 /*
113 * NMI-blocking window is handled by IRET interception. We should not
114 * inject a VINTR in this case as VINTR is unaware of NMI-blocking and
115 * hence we can enter an endless loop (VINTR intercept fires, yet
116 * hvm_interrupt_blocked() still indicates NMI-blocking is active, so
117 * we inject a VINTR, ...).
118 */
119 if ( (intack.source == hvm_intsrc_nmi) &&
120 (general1_intercepts & GENERAL1_INTERCEPT_IRET) )
121 return;
122
123 intr = vmcb_get_vintr(vmcb);
124 intr.fields.irq = 1;
125 intr.fields.vector = 0;
126 intr.fields.prio = intack.vector >> 4;
127 intr.fields.ign_tpr = (intack.source != hvm_intsrc_lapic);
128 vmcb_set_vintr(vmcb, intr);
129 vmcb_set_general1_intercepts(
130 vmcb, general1_intercepts | GENERAL1_INTERCEPT_VINTR);
131 }
132
svm_intr_assist(void)133 void svm_intr_assist(void)
134 {
135 struct vcpu *v = current;
136 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
137 struct hvm_intack intack;
138 enum hvm_intblk intblk;
139
140 /* Crank the handle on interrupt state. */
141 pt_update_irq(v);
142
143 do {
144 intack = hvm_vcpu_has_pending_irq(v);
145 if ( likely(intack.source == hvm_intsrc_none) )
146 return;
147
148 intblk = hvm_interrupt_blocked(v, intack);
149 if ( intblk == hvm_intblk_svm_gif ) {
150 ASSERT(nestedhvm_enabled(v->domain));
151 return;
152 }
153
154 /* Interrupts for the nested guest are already
155 * in the vmcb.
156 */
157 if ( nestedhvm_enabled(v->domain) && nestedhvm_vcpu_in_guestmode(v) )
158 {
159 int rc;
160
161 /* l2 guest was running when an interrupt for
162 * the l1 guest occured.
163 */
164 rc = nestedsvm_vcpu_interrupt(v, intack);
165 switch (rc) {
166 case NSVM_INTR_NOTINTERCEPTED:
167 /* Inject interrupt into 2nd level guest directly. */
168 break;
169 case NSVM_INTR_NOTHANDLED:
170 case NSVM_INTR_FORCEVMEXIT:
171 return;
172 case NSVM_INTR_MASKED:
173 /* Guest already enabled an interrupt window. */
174 return;
175 default:
176 panic("%s: nestedsvm_vcpu_interrupt can't handle value %#x",
177 __func__, rc);
178 }
179 }
180
181 /*
182 * Pending IRQs must be delayed if:
183 * 1. An event is already pending. This is despite the fact that SVM
184 * provides a VINTR delivery method quite separate from the EVENTINJ
185 * mechanism. The event delivery can arbitrarily delay the injection
186 * of the vintr (for example, if the exception is handled via an
187 * interrupt gate, hence zeroing RFLAGS.IF). In the meantime:
188 * - the vTPR could be modified upwards, so we need to wait until
189 * the exception is delivered before we can safely decide that an
190 * interrupt is deliverable; and
191 * - the guest might look at the APIC/PIC state, so we ought not to
192 * have cleared the interrupt out of the IRR.
193 * 2. The IRQ is masked.
194 */
195 if ( unlikely(vmcb->eventinj.fields.v) || intblk )
196 {
197 svm_enable_intr_window(v, intack);
198 return;
199 }
200
201 intack = hvm_vcpu_ack_pending_irq(v, intack);
202 } while ( intack.source == hvm_intsrc_none );
203
204 if ( intack.source == hvm_intsrc_nmi )
205 {
206 svm_inject_nmi(v);
207 }
208 else
209 {
210 HVMTRACE_2D(INJ_VIRQ, intack.vector, /*fake=*/ 0);
211 svm_inject_extint(v, intack.vector);
212 pt_intr_post(v, intack);
213 }
214
215 /* Is there another IRQ to queue up behind this one? */
216 intack = hvm_vcpu_has_pending_irq(v);
217 if ( unlikely(intack.source != hvm_intsrc_none) )
218 svm_enable_intr_window(v, intack);
219 }
220
221 /*
222 * Local variables:
223 * mode: C
224 * c-file-style: "BSD"
225 * c-basic-offset: 4
226 * tab-width: 4
227 * indent-tabs-mode: nil
228 * End:
229 */
230