1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * VFIO PCI interrupt handling
4 *
5 * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
6 * Author: Alex Williamson <alex.williamson@redhat.com>
7 *
8 * Derived from original vfio:
9 * Copyright 2010 Cisco Systems, Inc. All rights reserved.
10 * Author: Tom Lyon, pugs@cisco.com
11 */
12
13 #include <linux/device.h>
14 #include <linux/interrupt.h>
15 #include <linux/eventfd.h>
16 #include <linux/msi.h>
17 #include <linux/pci.h>
18 #include <linux/file.h>
19 #include <linux/vfio.h>
20 #include <linux/wait.h>
21 #include <linux/slab.h>
22
23 #include "vfio_pci_priv.h"
24
25 struct vfio_pci_irq_ctx {
26 struct eventfd_ctx *trigger;
27 struct virqfd *unmask;
28 struct virqfd *mask;
29 char *name;
30 bool masked;
31 struct irq_bypass_producer producer;
32 };
33
irq_is(struct vfio_pci_core_device * vdev,int type)34 static bool irq_is(struct vfio_pci_core_device *vdev, int type)
35 {
36 return vdev->irq_type == type;
37 }
38
is_intx(struct vfio_pci_core_device * vdev)39 static bool is_intx(struct vfio_pci_core_device *vdev)
40 {
41 return vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX;
42 }
43
is_irq_none(struct vfio_pci_core_device * vdev)44 static bool is_irq_none(struct vfio_pci_core_device *vdev)
45 {
46 return !(vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX ||
47 vdev->irq_type == VFIO_PCI_MSI_IRQ_INDEX ||
48 vdev->irq_type == VFIO_PCI_MSIX_IRQ_INDEX);
49 }
50
51 /*
52 * INTx
53 */
vfio_send_intx_eventfd(void * opaque,void * unused)54 static void vfio_send_intx_eventfd(void *opaque, void *unused)
55 {
56 struct vfio_pci_core_device *vdev = opaque;
57
58 if (likely(is_intx(vdev) && !vdev->virq_disabled))
59 eventfd_signal(vdev->ctx[0].trigger, 1);
60 }
61
62 /* Returns true if the INTx vfio_pci_irq_ctx.masked value is changed. */
vfio_pci_intx_mask(struct vfio_pci_core_device * vdev)63 bool vfio_pci_intx_mask(struct vfio_pci_core_device *vdev)
64 {
65 struct pci_dev *pdev = vdev->pdev;
66 unsigned long flags;
67 bool masked_changed = false;
68
69 spin_lock_irqsave(&vdev->irqlock, flags);
70
71 /*
72 * Masking can come from interrupt, ioctl, or config space
73 * via INTx disable. The latter means this can get called
74 * even when not using intx delivery. In this case, just
75 * try to have the physical bit follow the virtual bit.
76 */
77 if (unlikely(!is_intx(vdev))) {
78 if (vdev->pci_2_3)
79 pci_intx(pdev, 0);
80 } else if (!vdev->ctx[0].masked) {
81 /*
82 * Can't use check_and_mask here because we always want to
83 * mask, not just when something is pending.
84 */
85 if (vdev->pci_2_3)
86 pci_intx(pdev, 0);
87 else
88 disable_irq_nosync(pdev->irq);
89
90 vdev->ctx[0].masked = true;
91 masked_changed = true;
92 }
93
94 spin_unlock_irqrestore(&vdev->irqlock, flags);
95 return masked_changed;
96 }
97
98 /*
99 * If this is triggered by an eventfd, we can't call eventfd_signal
100 * or else we'll deadlock on the eventfd wait queue. Return >0 when
101 * a signal is necessary, which can then be handled via a work queue
102 * or directly depending on the caller.
103 */
vfio_pci_intx_unmask_handler(void * opaque,void * unused)104 static int vfio_pci_intx_unmask_handler(void *opaque, void *unused)
105 {
106 struct vfio_pci_core_device *vdev = opaque;
107 struct pci_dev *pdev = vdev->pdev;
108 unsigned long flags;
109 int ret = 0;
110
111 spin_lock_irqsave(&vdev->irqlock, flags);
112
113 /*
114 * Unmasking comes from ioctl or config, so again, have the
115 * physical bit follow the virtual even when not using INTx.
116 */
117 if (unlikely(!is_intx(vdev))) {
118 if (vdev->pci_2_3)
119 pci_intx(pdev, 1);
120 } else if (vdev->ctx[0].masked && !vdev->virq_disabled) {
121 /*
122 * A pending interrupt here would immediately trigger,
123 * but we can avoid that overhead by just re-sending
124 * the interrupt to the user.
125 */
126 if (vdev->pci_2_3) {
127 if (!pci_check_and_unmask_intx(pdev))
128 ret = 1;
129 } else
130 enable_irq(pdev->irq);
131
132 vdev->ctx[0].masked = (ret > 0);
133 }
134
135 spin_unlock_irqrestore(&vdev->irqlock, flags);
136
137 return ret;
138 }
139
vfio_pci_intx_unmask(struct vfio_pci_core_device * vdev)140 void vfio_pci_intx_unmask(struct vfio_pci_core_device *vdev)
141 {
142 if (vfio_pci_intx_unmask_handler(vdev, NULL) > 0)
143 vfio_send_intx_eventfd(vdev, NULL);
144 }
145
vfio_intx_handler(int irq,void * dev_id)146 static irqreturn_t vfio_intx_handler(int irq, void *dev_id)
147 {
148 struct vfio_pci_core_device *vdev = dev_id;
149 unsigned long flags;
150 int ret = IRQ_NONE;
151
152 spin_lock_irqsave(&vdev->irqlock, flags);
153
154 if (!vdev->pci_2_3) {
155 disable_irq_nosync(vdev->pdev->irq);
156 vdev->ctx[0].masked = true;
157 ret = IRQ_HANDLED;
158 } else if (!vdev->ctx[0].masked && /* may be shared */
159 pci_check_and_mask_intx(vdev->pdev)) {
160 vdev->ctx[0].masked = true;
161 ret = IRQ_HANDLED;
162 }
163
164 spin_unlock_irqrestore(&vdev->irqlock, flags);
165
166 if (ret == IRQ_HANDLED)
167 vfio_send_intx_eventfd(vdev, NULL);
168
169 return ret;
170 }
171
vfio_intx_enable(struct vfio_pci_core_device * vdev)172 static int vfio_intx_enable(struct vfio_pci_core_device *vdev)
173 {
174 if (!is_irq_none(vdev))
175 return -EINVAL;
176
177 if (!vdev->pdev->irq)
178 return -ENODEV;
179
180 vdev->ctx = kzalloc(sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL_ACCOUNT);
181 if (!vdev->ctx)
182 return -ENOMEM;
183
184 vdev->num_ctx = 1;
185
186 /*
187 * If the virtual interrupt is masked, restore it. Devices
188 * supporting DisINTx can be masked at the hardware level
189 * here, non-PCI-2.3 devices will have to wait until the
190 * interrupt is enabled.
191 */
192 vdev->ctx[0].masked = vdev->virq_disabled;
193 if (vdev->pci_2_3)
194 pci_intx(vdev->pdev, !vdev->ctx[0].masked);
195
196 vdev->irq_type = VFIO_PCI_INTX_IRQ_INDEX;
197
198 return 0;
199 }
200
vfio_intx_set_signal(struct vfio_pci_core_device * vdev,int fd)201 static int vfio_intx_set_signal(struct vfio_pci_core_device *vdev, int fd)
202 {
203 struct pci_dev *pdev = vdev->pdev;
204 unsigned long irqflags = IRQF_SHARED;
205 struct eventfd_ctx *trigger;
206 unsigned long flags;
207 int ret;
208
209 if (vdev->ctx[0].trigger) {
210 free_irq(pdev->irq, vdev);
211 kfree(vdev->ctx[0].name);
212 eventfd_ctx_put(vdev->ctx[0].trigger);
213 vdev->ctx[0].trigger = NULL;
214 }
215
216 if (fd < 0) /* Disable only */
217 return 0;
218
219 vdev->ctx[0].name = kasprintf(GFP_KERNEL_ACCOUNT, "vfio-intx(%s)",
220 pci_name(pdev));
221 if (!vdev->ctx[0].name)
222 return -ENOMEM;
223
224 trigger = eventfd_ctx_fdget(fd);
225 if (IS_ERR(trigger)) {
226 kfree(vdev->ctx[0].name);
227 return PTR_ERR(trigger);
228 }
229
230 vdev->ctx[0].trigger = trigger;
231
232 if (!vdev->pci_2_3)
233 irqflags = 0;
234
235 ret = request_irq(pdev->irq, vfio_intx_handler,
236 irqflags, vdev->ctx[0].name, vdev);
237 if (ret) {
238 vdev->ctx[0].trigger = NULL;
239 kfree(vdev->ctx[0].name);
240 eventfd_ctx_put(trigger);
241 return ret;
242 }
243
244 /*
245 * INTx disable will stick across the new irq setup,
246 * disable_irq won't.
247 */
248 spin_lock_irqsave(&vdev->irqlock, flags);
249 if (!vdev->pci_2_3 && vdev->ctx[0].masked)
250 disable_irq_nosync(pdev->irq);
251 spin_unlock_irqrestore(&vdev->irqlock, flags);
252
253 return 0;
254 }
255
vfio_intx_disable(struct vfio_pci_core_device * vdev)256 static void vfio_intx_disable(struct vfio_pci_core_device *vdev)
257 {
258 vfio_virqfd_disable(&vdev->ctx[0].unmask);
259 vfio_virqfd_disable(&vdev->ctx[0].mask);
260 vfio_intx_set_signal(vdev, -1);
261 vdev->irq_type = VFIO_PCI_NUM_IRQS;
262 vdev->num_ctx = 0;
263 kfree(vdev->ctx);
264 }
265
266 /*
267 * MSI/MSI-X
268 */
vfio_msihandler(int irq,void * arg)269 static irqreturn_t vfio_msihandler(int irq, void *arg)
270 {
271 struct eventfd_ctx *trigger = arg;
272
273 eventfd_signal(trigger, 1);
274 return IRQ_HANDLED;
275 }
276
vfio_msi_enable(struct vfio_pci_core_device * vdev,int nvec,bool msix)277 static int vfio_msi_enable(struct vfio_pci_core_device *vdev, int nvec, bool msix)
278 {
279 struct pci_dev *pdev = vdev->pdev;
280 unsigned int flag = msix ? PCI_IRQ_MSIX : PCI_IRQ_MSI;
281 int ret;
282 u16 cmd;
283
284 if (!is_irq_none(vdev))
285 return -EINVAL;
286
287 vdev->ctx = kcalloc(nvec, sizeof(struct vfio_pci_irq_ctx),
288 GFP_KERNEL_ACCOUNT);
289 if (!vdev->ctx)
290 return -ENOMEM;
291
292 /* return the number of supported vectors if we can't get all: */
293 cmd = vfio_pci_memory_lock_and_enable(vdev);
294 ret = pci_alloc_irq_vectors(pdev, 1, nvec, flag);
295 if (ret < nvec) {
296 if (ret > 0)
297 pci_free_irq_vectors(pdev);
298 vfio_pci_memory_unlock_and_restore(vdev, cmd);
299 kfree(vdev->ctx);
300 return ret;
301 }
302 vfio_pci_memory_unlock_and_restore(vdev, cmd);
303
304 vdev->num_ctx = nvec;
305 vdev->irq_type = msix ? VFIO_PCI_MSIX_IRQ_INDEX :
306 VFIO_PCI_MSI_IRQ_INDEX;
307
308 if (!msix) {
309 /*
310 * Compute the virtual hardware field for max msi vectors -
311 * it is the log base 2 of the number of vectors.
312 */
313 vdev->msi_qmax = fls(nvec * 2 - 1) - 1;
314 }
315
316 return 0;
317 }
318
vfio_msi_set_vector_signal(struct vfio_pci_core_device * vdev,int vector,int fd,bool msix)319 static int vfio_msi_set_vector_signal(struct vfio_pci_core_device *vdev,
320 int vector, int fd, bool msix)
321 {
322 struct pci_dev *pdev = vdev->pdev;
323 struct eventfd_ctx *trigger;
324 int irq, ret;
325 u16 cmd;
326
327 if (vector < 0 || vector >= vdev->num_ctx)
328 return -EINVAL;
329
330 irq = pci_irq_vector(pdev, vector);
331
332 if (vdev->ctx[vector].trigger) {
333 irq_bypass_unregister_producer(&vdev->ctx[vector].producer);
334
335 cmd = vfio_pci_memory_lock_and_enable(vdev);
336 free_irq(irq, vdev->ctx[vector].trigger);
337 vfio_pci_memory_unlock_and_restore(vdev, cmd);
338
339 kfree(vdev->ctx[vector].name);
340 eventfd_ctx_put(vdev->ctx[vector].trigger);
341 vdev->ctx[vector].trigger = NULL;
342 }
343
344 if (fd < 0)
345 return 0;
346
347 vdev->ctx[vector].name = kasprintf(GFP_KERNEL_ACCOUNT,
348 "vfio-msi%s[%d](%s)",
349 msix ? "x" : "", vector,
350 pci_name(pdev));
351 if (!vdev->ctx[vector].name)
352 return -ENOMEM;
353
354 trigger = eventfd_ctx_fdget(fd);
355 if (IS_ERR(trigger)) {
356 kfree(vdev->ctx[vector].name);
357 return PTR_ERR(trigger);
358 }
359
360 /*
361 * The MSIx vector table resides in device memory which may be cleared
362 * via backdoor resets. We don't allow direct access to the vector
363 * table so even if a userspace driver attempts to save/restore around
364 * such a reset it would be unsuccessful. To avoid this, restore the
365 * cached value of the message prior to enabling.
366 */
367 cmd = vfio_pci_memory_lock_and_enable(vdev);
368 if (msix) {
369 struct msi_msg msg;
370
371 get_cached_msi_msg(irq, &msg);
372 pci_write_msi_msg(irq, &msg);
373 }
374
375 ret = request_irq(irq, vfio_msihandler, 0,
376 vdev->ctx[vector].name, trigger);
377 vfio_pci_memory_unlock_and_restore(vdev, cmd);
378 if (ret) {
379 kfree(vdev->ctx[vector].name);
380 eventfd_ctx_put(trigger);
381 return ret;
382 }
383
384 vdev->ctx[vector].producer.token = trigger;
385 vdev->ctx[vector].producer.irq = irq;
386 ret = irq_bypass_register_producer(&vdev->ctx[vector].producer);
387 if (unlikely(ret)) {
388 dev_info(&pdev->dev,
389 "irq bypass producer (token %p) registration fails: %d\n",
390 vdev->ctx[vector].producer.token, ret);
391
392 vdev->ctx[vector].producer.token = NULL;
393 }
394 vdev->ctx[vector].trigger = trigger;
395
396 return 0;
397 }
398
vfio_msi_set_block(struct vfio_pci_core_device * vdev,unsigned start,unsigned count,int32_t * fds,bool msix)399 static int vfio_msi_set_block(struct vfio_pci_core_device *vdev, unsigned start,
400 unsigned count, int32_t *fds, bool msix)
401 {
402 int i, j, ret = 0;
403
404 if (start >= vdev->num_ctx || start + count > vdev->num_ctx)
405 return -EINVAL;
406
407 for (i = 0, j = start; i < count && !ret; i++, j++) {
408 int fd = fds ? fds[i] : -1;
409 ret = vfio_msi_set_vector_signal(vdev, j, fd, msix);
410 }
411
412 if (ret) {
413 for (--j; j >= (int)start; j--)
414 vfio_msi_set_vector_signal(vdev, j, -1, msix);
415 }
416
417 return ret;
418 }
419
vfio_msi_disable(struct vfio_pci_core_device * vdev,bool msix)420 static void vfio_msi_disable(struct vfio_pci_core_device *vdev, bool msix)
421 {
422 struct pci_dev *pdev = vdev->pdev;
423 int i;
424 u16 cmd;
425
426 for (i = 0; i < vdev->num_ctx; i++) {
427 vfio_virqfd_disable(&vdev->ctx[i].unmask);
428 vfio_virqfd_disable(&vdev->ctx[i].mask);
429 }
430
431 vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix);
432
433 cmd = vfio_pci_memory_lock_and_enable(vdev);
434 pci_free_irq_vectors(pdev);
435 vfio_pci_memory_unlock_and_restore(vdev, cmd);
436
437 /*
438 * Both disable paths above use pci_intx_for_msi() to clear DisINTx
439 * via their shutdown paths. Restore for NoINTx devices.
440 */
441 if (vdev->nointx)
442 pci_intx(pdev, 0);
443
444 vdev->irq_type = VFIO_PCI_NUM_IRQS;
445 vdev->num_ctx = 0;
446 kfree(vdev->ctx);
447 }
448
449 /*
450 * IOCTL support
451 */
vfio_pci_set_intx_unmask(struct vfio_pci_core_device * vdev,unsigned index,unsigned start,unsigned count,uint32_t flags,void * data)452 static int vfio_pci_set_intx_unmask(struct vfio_pci_core_device *vdev,
453 unsigned index, unsigned start,
454 unsigned count, uint32_t flags, void *data)
455 {
456 if (!is_intx(vdev) || start != 0 || count != 1)
457 return -EINVAL;
458
459 if (flags & VFIO_IRQ_SET_DATA_NONE) {
460 vfio_pci_intx_unmask(vdev);
461 } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
462 uint8_t unmask = *(uint8_t *)data;
463 if (unmask)
464 vfio_pci_intx_unmask(vdev);
465 } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
466 int32_t fd = *(int32_t *)data;
467 if (fd >= 0)
468 return vfio_virqfd_enable((void *) vdev,
469 vfio_pci_intx_unmask_handler,
470 vfio_send_intx_eventfd, NULL,
471 &vdev->ctx[0].unmask, fd);
472
473 vfio_virqfd_disable(&vdev->ctx[0].unmask);
474 }
475
476 return 0;
477 }
478
vfio_pci_set_intx_mask(struct vfio_pci_core_device * vdev,unsigned index,unsigned start,unsigned count,uint32_t flags,void * data)479 static int vfio_pci_set_intx_mask(struct vfio_pci_core_device *vdev,
480 unsigned index, unsigned start,
481 unsigned count, uint32_t flags, void *data)
482 {
483 if (!is_intx(vdev) || start != 0 || count != 1)
484 return -EINVAL;
485
486 if (flags & VFIO_IRQ_SET_DATA_NONE) {
487 vfio_pci_intx_mask(vdev);
488 } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
489 uint8_t mask = *(uint8_t *)data;
490 if (mask)
491 vfio_pci_intx_mask(vdev);
492 } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
493 return -ENOTTY; /* XXX implement me */
494 }
495
496 return 0;
497 }
498
vfio_pci_set_intx_trigger(struct vfio_pci_core_device * vdev,unsigned index,unsigned start,unsigned count,uint32_t flags,void * data)499 static int vfio_pci_set_intx_trigger(struct vfio_pci_core_device *vdev,
500 unsigned index, unsigned start,
501 unsigned count, uint32_t flags, void *data)
502 {
503 if (is_intx(vdev) && !count && (flags & VFIO_IRQ_SET_DATA_NONE)) {
504 vfio_intx_disable(vdev);
505 return 0;
506 }
507
508 if (!(is_intx(vdev) || is_irq_none(vdev)) || start != 0 || count != 1)
509 return -EINVAL;
510
511 if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
512 int32_t fd = *(int32_t *)data;
513 int ret;
514
515 if (is_intx(vdev))
516 return vfio_intx_set_signal(vdev, fd);
517
518 ret = vfio_intx_enable(vdev);
519 if (ret)
520 return ret;
521
522 ret = vfio_intx_set_signal(vdev, fd);
523 if (ret)
524 vfio_intx_disable(vdev);
525
526 return ret;
527 }
528
529 if (!is_intx(vdev))
530 return -EINVAL;
531
532 if (flags & VFIO_IRQ_SET_DATA_NONE) {
533 vfio_send_intx_eventfd(vdev, NULL);
534 } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
535 uint8_t trigger = *(uint8_t *)data;
536 if (trigger)
537 vfio_send_intx_eventfd(vdev, NULL);
538 }
539 return 0;
540 }
541
vfio_pci_set_msi_trigger(struct vfio_pci_core_device * vdev,unsigned index,unsigned start,unsigned count,uint32_t flags,void * data)542 static int vfio_pci_set_msi_trigger(struct vfio_pci_core_device *vdev,
543 unsigned index, unsigned start,
544 unsigned count, uint32_t flags, void *data)
545 {
546 int i;
547 bool msix = (index == VFIO_PCI_MSIX_IRQ_INDEX) ? true : false;
548
549 if (irq_is(vdev, index) && !count && (flags & VFIO_IRQ_SET_DATA_NONE)) {
550 vfio_msi_disable(vdev, msix);
551 return 0;
552 }
553
554 if (!(irq_is(vdev, index) || is_irq_none(vdev)))
555 return -EINVAL;
556
557 if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
558 int32_t *fds = data;
559 int ret;
560
561 if (vdev->irq_type == index)
562 return vfio_msi_set_block(vdev, start, count,
563 fds, msix);
564
565 ret = vfio_msi_enable(vdev, start + count, msix);
566 if (ret)
567 return ret;
568
569 ret = vfio_msi_set_block(vdev, start, count, fds, msix);
570 if (ret)
571 vfio_msi_disable(vdev, msix);
572
573 return ret;
574 }
575
576 if (!irq_is(vdev, index) || start + count > vdev->num_ctx)
577 return -EINVAL;
578
579 for (i = start; i < start + count; i++) {
580 if (!vdev->ctx[i].trigger)
581 continue;
582 if (flags & VFIO_IRQ_SET_DATA_NONE) {
583 eventfd_signal(vdev->ctx[i].trigger, 1);
584 } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
585 uint8_t *bools = data;
586 if (bools[i - start])
587 eventfd_signal(vdev->ctx[i].trigger, 1);
588 }
589 }
590 return 0;
591 }
592
vfio_pci_set_ctx_trigger_single(struct eventfd_ctx ** ctx,unsigned int count,uint32_t flags,void * data)593 static int vfio_pci_set_ctx_trigger_single(struct eventfd_ctx **ctx,
594 unsigned int count, uint32_t flags,
595 void *data)
596 {
597 /* DATA_NONE/DATA_BOOL enables loopback testing */
598 if (flags & VFIO_IRQ_SET_DATA_NONE) {
599 if (*ctx) {
600 if (count) {
601 eventfd_signal(*ctx, 1);
602 } else {
603 eventfd_ctx_put(*ctx);
604 *ctx = NULL;
605 }
606 return 0;
607 }
608 } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
609 uint8_t trigger;
610
611 if (!count)
612 return -EINVAL;
613
614 trigger = *(uint8_t *)data;
615 if (trigger && *ctx)
616 eventfd_signal(*ctx, 1);
617
618 return 0;
619 } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
620 int32_t fd;
621
622 if (!count)
623 return -EINVAL;
624
625 fd = *(int32_t *)data;
626 if (fd == -1) {
627 if (*ctx)
628 eventfd_ctx_put(*ctx);
629 *ctx = NULL;
630 } else if (fd >= 0) {
631 struct eventfd_ctx *efdctx;
632
633 efdctx = eventfd_ctx_fdget(fd);
634 if (IS_ERR(efdctx))
635 return PTR_ERR(efdctx);
636
637 if (*ctx)
638 eventfd_ctx_put(*ctx);
639
640 *ctx = efdctx;
641 }
642 return 0;
643 }
644
645 return -EINVAL;
646 }
647
vfio_pci_set_err_trigger(struct vfio_pci_core_device * vdev,unsigned index,unsigned start,unsigned count,uint32_t flags,void * data)648 static int vfio_pci_set_err_trigger(struct vfio_pci_core_device *vdev,
649 unsigned index, unsigned start,
650 unsigned count, uint32_t flags, void *data)
651 {
652 if (index != VFIO_PCI_ERR_IRQ_INDEX || start != 0 || count > 1)
653 return -EINVAL;
654
655 return vfio_pci_set_ctx_trigger_single(&vdev->err_trigger,
656 count, flags, data);
657 }
658
vfio_pci_set_req_trigger(struct vfio_pci_core_device * vdev,unsigned index,unsigned start,unsigned count,uint32_t flags,void * data)659 static int vfio_pci_set_req_trigger(struct vfio_pci_core_device *vdev,
660 unsigned index, unsigned start,
661 unsigned count, uint32_t flags, void *data)
662 {
663 if (index != VFIO_PCI_REQ_IRQ_INDEX || start != 0 || count > 1)
664 return -EINVAL;
665
666 return vfio_pci_set_ctx_trigger_single(&vdev->req_trigger,
667 count, flags, data);
668 }
669
vfio_pci_set_irqs_ioctl(struct vfio_pci_core_device * vdev,uint32_t flags,unsigned index,unsigned start,unsigned count,void * data)670 int vfio_pci_set_irqs_ioctl(struct vfio_pci_core_device *vdev, uint32_t flags,
671 unsigned index, unsigned start, unsigned count,
672 void *data)
673 {
674 int (*func)(struct vfio_pci_core_device *vdev, unsigned index,
675 unsigned start, unsigned count, uint32_t flags,
676 void *data) = NULL;
677
678 switch (index) {
679 case VFIO_PCI_INTX_IRQ_INDEX:
680 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
681 case VFIO_IRQ_SET_ACTION_MASK:
682 func = vfio_pci_set_intx_mask;
683 break;
684 case VFIO_IRQ_SET_ACTION_UNMASK:
685 func = vfio_pci_set_intx_unmask;
686 break;
687 case VFIO_IRQ_SET_ACTION_TRIGGER:
688 func = vfio_pci_set_intx_trigger;
689 break;
690 }
691 break;
692 case VFIO_PCI_MSI_IRQ_INDEX:
693 case VFIO_PCI_MSIX_IRQ_INDEX:
694 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
695 case VFIO_IRQ_SET_ACTION_MASK:
696 case VFIO_IRQ_SET_ACTION_UNMASK:
697 /* XXX Need masking support exported */
698 break;
699 case VFIO_IRQ_SET_ACTION_TRIGGER:
700 func = vfio_pci_set_msi_trigger;
701 break;
702 }
703 break;
704 case VFIO_PCI_ERR_IRQ_INDEX:
705 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
706 case VFIO_IRQ_SET_ACTION_TRIGGER:
707 if (pci_is_pcie(vdev->pdev))
708 func = vfio_pci_set_err_trigger;
709 break;
710 }
711 break;
712 case VFIO_PCI_REQ_IRQ_INDEX:
713 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
714 case VFIO_IRQ_SET_ACTION_TRIGGER:
715 func = vfio_pci_set_req_trigger;
716 break;
717 }
718 break;
719 }
720
721 if (!func)
722 return -ENOTTY;
723
724 return func(vdev, index, start, count, flags, data);
725 }
726