1 /******************************************************************************
2  * evtchn.c
3  *
4  * A simplified event channel for para-drivers in unmodified linux
5  *
6  * Copyright (c) 2002-2005, K A Fraser
7  * Copyright (c) 2005, Intel Corporation <xiaofeng.ling@intel.com>
8  *
9  * This file may be distributed separately from the Linux kernel, or
10  * incorporated into other software packages, subject to the following license:
11  *
12  * Permission is hereby granted, free of charge, to any person obtaining a copy
13  * of this source file (the "Software"), to deal in the Software without
14  * restriction, including without limitation the rights to use, copy, modify,
15  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
16  * and to permit persons to whom the Software is furnished to do so, subject to
17  * the following conditions:
18  *
19  * The above copyright notice and this permission notice shall be included in
20  * all copies or substantial portions of the Software.
21  *
22  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28  * IN THE SOFTWARE.
29  */
30 
31 #include <linux/module.h>
32 #include <linux/kernel.h>
33 #include <linux/spinlock.h>
34 #include <xen/evtchn.h>
35 #include <xen/interface/hvm/ioreq.h>
36 #include <xen/features.h>
37 #include "platform-pci.h"
38 
39 #ifdef HAVE_XEN_PLATFORM_COMPAT_H
40 #include <xen/platform-compat.h>
41 #endif
42 
43 void *shared_info_area;
44 
45 #define is_valid_evtchn(x)	((x) != 0)
46 #define evtchn_from_irq(x)	(irq_evtchn[irq].evtchn)
47 
48 static struct {
49 	spinlock_t lock;
50 	irq_handler_t handler;
51 	void *dev_id;
52 	int evtchn;
53 	int close:1; /* close on unbind_from_irqhandler()? */
54 	int inuse:1;
55 	int in_handler:1;
56 } irq_evtchn[256];
57 static int evtchn_to_irq[NR_EVENT_CHANNELS] = {
58 	[0 ...  NR_EVENT_CHANNELS-1] = -1 };
59 
60 static DEFINE_SPINLOCK(irq_alloc_lock);
61 
alloc_xen_irq(void)62 static int alloc_xen_irq(void)
63 {
64 	static int warned;
65 	int irq;
66 
67 	spin_lock(&irq_alloc_lock);
68 
69 	for (irq = 1; irq < ARRAY_SIZE(irq_evtchn); irq++) {
70 		if (irq_evtchn[irq].inuse)
71 			continue;
72 		irq_evtchn[irq].inuse = 1;
73 		spin_unlock(&irq_alloc_lock);
74 		return irq;
75 	}
76 
77 	if (!warned) {
78 		warned = 1;
79 		printk(KERN_WARNING "No available IRQ to bind to: "
80 		       "increase irq_evtchn[] size in evtchn.c.\n");
81 	}
82 
83 	spin_unlock(&irq_alloc_lock);
84 
85 	return -ENOSPC;
86 }
87 
free_xen_irq(int irq)88 static void free_xen_irq(int irq)
89 {
90 	spin_lock(&irq_alloc_lock);
91 	irq_evtchn[irq].inuse = 0;
92 	spin_unlock(&irq_alloc_lock);
93 }
94 
irq_to_evtchn_port(int irq)95 int irq_to_evtchn_port(int irq)
96 {
97 	return irq_evtchn[irq].evtchn;
98 }
99 EXPORT_SYMBOL(irq_to_evtchn_port);
100 
mask_evtchn(int port)101 void mask_evtchn(int port)
102 {
103 	shared_info_t *s = shared_info_area;
104 	synch_set_bit(port, &s->evtchn_mask[0]);
105 }
106 EXPORT_SYMBOL(mask_evtchn);
107 
unmask_evtchn(int port)108 void unmask_evtchn(int port)
109 {
110 	evtchn_unmask_t op = { .port = port };
111 	VOID(HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &op));
112 }
113 EXPORT_SYMBOL(unmask_evtchn);
114 
bind_listening_port_to_irqhandler(unsigned int remote_domain,irq_handler_t handler,unsigned long irqflags,const char * devname,void * dev_id)115 int bind_listening_port_to_irqhandler(
116 	unsigned int remote_domain,
117 	irq_handler_t handler,
118 	unsigned long irqflags,
119 	const char *devname,
120 	void *dev_id)
121 {
122 	struct evtchn_alloc_unbound alloc_unbound;
123 	int err, irq;
124 
125 	irq = alloc_xen_irq();
126 	if (irq < 0)
127 		return irq;
128 
129 	spin_lock_irq(&irq_evtchn[irq].lock);
130 
131 	alloc_unbound.dom        = DOMID_SELF;
132 	alloc_unbound.remote_dom = remote_domain;
133 	err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
134 					  &alloc_unbound);
135 	if (err) {
136 		spin_unlock_irq(&irq_evtchn[irq].lock);
137 		free_xen_irq(irq);
138 		return err;
139 	}
140 
141 	irq_evtchn[irq].handler = handler;
142 	irq_evtchn[irq].dev_id  = dev_id;
143 	irq_evtchn[irq].evtchn  = alloc_unbound.port;
144 	irq_evtchn[irq].close   = 1;
145 
146 	evtchn_to_irq[alloc_unbound.port] = irq;
147 
148 	unmask_evtchn(alloc_unbound.port);
149 
150 	spin_unlock_irq(&irq_evtchn[irq].lock);
151 
152 	return irq;
153 }
154 EXPORT_SYMBOL(bind_listening_port_to_irqhandler);
155 
bind_caller_port_to_irqhandler(unsigned int caller_port,irq_handler_t handler,unsigned long irqflags,const char * devname,void * dev_id)156 int bind_caller_port_to_irqhandler(
157 	unsigned int caller_port,
158 	irq_handler_t handler,
159 	unsigned long irqflags,
160 	const char *devname,
161 	void *dev_id)
162 {
163 	int irq;
164 
165 	irq = alloc_xen_irq();
166 	if (irq < 0)
167 		return irq;
168 
169 	spin_lock_irq(&irq_evtchn[irq].lock);
170 
171 	irq_evtchn[irq].handler = handler;
172 	irq_evtchn[irq].dev_id  = dev_id;
173 	irq_evtchn[irq].evtchn  = caller_port;
174 	irq_evtchn[irq].close   = 0;
175 
176 	evtchn_to_irq[caller_port] = irq;
177 
178 	unmask_evtchn(caller_port);
179 
180 	spin_unlock_irq(&irq_evtchn[irq].lock);
181 
182 	return irq;
183 }
184 EXPORT_SYMBOL(bind_caller_port_to_irqhandler);
185 
unbind_from_irqhandler(unsigned int irq,void * dev_id)186 void unbind_from_irqhandler(unsigned int irq, void *dev_id)
187 {
188 	int evtchn;
189 
190 	spin_lock_irq(&irq_evtchn[irq].lock);
191 
192 	evtchn = evtchn_from_irq(irq);
193 
194 	if (is_valid_evtchn(evtchn)) {
195 		evtchn_to_irq[evtchn] = -1;
196 		mask_evtchn(evtchn);
197 		if (irq_evtchn[irq].close) {
198 			struct evtchn_close close = { .port = evtchn };
199 			if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
200 				BUG();
201 		}
202 	}
203 
204 	irq_evtchn[irq].handler = NULL;
205 	irq_evtchn[irq].evtchn  = 0;
206 
207 	spin_unlock_irq(&irq_evtchn[irq].lock);
208 
209 	while (irq_evtchn[irq].in_handler)
210 		cpu_relax();
211 
212 	free_xen_irq(irq);
213 }
214 EXPORT_SYMBOL(unbind_from_irqhandler);
215 
notify_remote_via_irq(int irq)216 void notify_remote_via_irq(int irq)
217 {
218 	int evtchn;
219 
220 	evtchn = evtchn_from_irq(irq);
221 	if (is_valid_evtchn(evtchn))
222 		notify_remote_via_evtchn(evtchn);
223 }
224 EXPORT_SYMBOL(notify_remote_via_irq);
225 
226 static DEFINE_PER_CPU(unsigned int, last_processed_l1i) = { BITS_PER_LONG - 1 };
227 static DEFINE_PER_CPU(unsigned int, last_processed_l2i) = { BITS_PER_LONG - 1 };
228 
active_evtchns(unsigned int cpu,shared_info_t * sh,unsigned int idx)229 static inline unsigned long active_evtchns(unsigned int cpu, shared_info_t *sh,
230 						unsigned int idx)
231 {
232 	return (sh->evtchn_pending[idx] & ~sh->evtchn_mask[idx]);
233 }
234 
evtchn_interrupt(int irq,void * dev_id,struct pt_regs * regs)235 static irqreturn_t evtchn_interrupt(int irq, void *dev_id
236 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
237 				    , struct pt_regs *regs
238 #else
239 # define handler(irq, dev_id, regs) handler(irq, dev_id)
240 #endif
241 				    )
242 {
243 	unsigned int l1i, l2i, port;
244 	unsigned long masked_l1, masked_l2;
245 	/* XXX: All events are bound to vcpu0 but irq may be redirected. */
246 	int cpu = 0; /*smp_processor_id();*/
247 	irq_handler_t handler;
248 	shared_info_t *s = shared_info_area;
249 	vcpu_info_t *v = &s->vcpu_info[cpu];
250 	unsigned long l1, l2;
251 
252 	v->evtchn_upcall_pending = 0;
253 
254 #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
255 	/* Clear master flag /before/ clearing selector flag. */
256 	wmb();
257 #endif
258 	l1 = xchg(&v->evtchn_pending_sel, 0);
259 
260 	l1i = per_cpu(last_processed_l1i, cpu);
261 	l2i = per_cpu(last_processed_l2i, cpu);
262 
263 	while (l1 != 0) {
264 
265 		l1i = (l1i + 1) % BITS_PER_LONG;
266 		masked_l1 = l1 & ((~0UL) << l1i);
267 
268 		if (masked_l1 == 0) { /* if we masked out all events, wrap around to the beginning */
269 			l1i = BITS_PER_LONG - 1;
270 			l2i = BITS_PER_LONG - 1;
271 			continue;
272 		}
273 		l1i = __ffs(masked_l1);
274 
275 		do {
276 			l2 = active_evtchns(cpu, s, l1i);
277 
278 			l2i = (l2i + 1) % BITS_PER_LONG;
279 			masked_l2 = l2 & ((~0UL) << l2i);
280 
281 			if (masked_l2 == 0) { /* if we masked out all events, move on */
282 				l2i = BITS_PER_LONG - 1;
283 				break;
284 			}
285 			l2i = __ffs(masked_l2);
286 
287 			/* process port */
288 			port = (l1i * BITS_PER_LONG) + l2i;
289 			synch_clear_bit(port, &s->evtchn_pending[0]);
290 
291 			irq = evtchn_to_irq[port];
292 			if (irq < 0)
293 				continue;
294 
295 			spin_lock(&irq_evtchn[irq].lock);
296 			handler = irq_evtchn[irq].handler;
297 			dev_id  = irq_evtchn[irq].dev_id;
298 			if (unlikely(handler == NULL)) {
299 				printk("Xen IRQ%d (port %d) has no handler!\n",
300 				       irq, port);
301 				spin_unlock(&irq_evtchn[irq].lock);
302 				continue;
303 			}
304 			irq_evtchn[irq].in_handler = 1;
305 			spin_unlock(&irq_evtchn[irq].lock);
306 
307 			local_irq_enable();
308 			handler(irq, irq_evtchn[irq].dev_id, regs);
309 			local_irq_disable();
310 
311 			spin_lock(&irq_evtchn[irq].lock);
312 			irq_evtchn[irq].in_handler = 0;
313 			spin_unlock(&irq_evtchn[irq].lock);
314 
315 			/* if this is the final port processed, we'll pick up here+1 next time */
316 			per_cpu(last_processed_l1i, cpu) = l1i;
317 			per_cpu(last_processed_l2i, cpu) = l2i;
318 
319 		} while (l2i != BITS_PER_LONG - 1);
320 
321 		l2 = active_evtchns(cpu, s, l1i);
322 		if (l2 == 0) /* we handled all ports, so we can clear the selector bit */
323 			l1 &= ~(1UL << l1i);
324 	}
325 
326 	return IRQ_HANDLED;
327 }
328 
irq_resume(void)329 void irq_resume(void)
330 {
331 	int evtchn, irq;
332 
333 	for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++) {
334 		mask_evtchn(evtchn);
335 		evtchn_to_irq[evtchn] = -1;
336 	}
337 
338 	for (irq = 0; irq < ARRAY_SIZE(irq_evtchn); irq++)
339 		irq_evtchn[irq].evtchn = 0;
340 }
341 
xen_irq_init(struct pci_dev * pdev)342 int xen_irq_init(struct pci_dev *pdev)
343 {
344 	int irq;
345 
346 	for (irq = 0; irq < ARRAY_SIZE(irq_evtchn); irq++)
347 		spin_lock_init(&irq_evtchn[irq].lock);
348 
349 	return request_irq(pdev->irq, evtchn_interrupt,
350 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
351 			   SA_SHIRQ | SA_SAMPLE_RANDOM | SA_INTERRUPT,
352 #else
353 #ifdef IRQF_SAMPLE_RANDOM
354 			   IRQF_SAMPLE_RANDOM |
355 #endif
356 #ifdef IRQF_DISABLED
357 			   IRQF_DISABLED |
358 #endif
359 			   IRQF_SHARED,
360 #endif
361 			   "xen-platform-pci", pdev);
362 }
363