Lines Matching refs:_vq

19 #define BAD_RING(_vq, fmt, args...)				\  argument
21 dev_err(&(_vq)->vq.vdev->dev, \
22 "%s:"fmt, (_vq)->vq.name, ##args); \
26 #define START_USE(_vq) \ argument
28 if ((_vq)->in_use) \
30 (_vq)->vq.name, (_vq)->in_use); \
31 (_vq)->in_use = __LINE__; \
33 #define END_USE(_vq) \ argument
34 do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
35 #define LAST_ADD_TIME_UPDATE(_vq) \ argument
40 if ((_vq)->last_add_time_valid) \
42 (_vq)->last_add_time)) > 100); \
43 (_vq)->last_add_time = now; \
44 (_vq)->last_add_time_valid = true; \
46 #define LAST_ADD_TIME_CHECK(_vq) \ argument
48 if ((_vq)->last_add_time_valid) { \
50 (_vq)->last_add_time)) > 100); \
53 #define LAST_ADD_TIME_INVALID(_vq) \ argument
54 ((_vq)->last_add_time_valid = false)
56 #define BAD_RING(_vq, fmt, args...) \ argument
58 dev_err(&_vq->vq.vdev->dev, \
59 "%s:"fmt, (_vq)->vq.name, ##args); \
60 (_vq)->broken = true; \
206 #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) argument
208 static inline bool virtqueue_use_indirect(struct virtqueue *_vq, in virtqueue_use_indirect() argument
211 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_use_indirect()
426 static struct vring_desc *alloc_indirect_split(struct virtqueue *_vq, in alloc_indirect_split() argument
445 desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1); in alloc_indirect_split()
478 static inline int virtqueue_add_split(struct virtqueue *_vq, in virtqueue_add_split() argument
487 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_add_split()
510 if (virtqueue_use_indirect(_vq, total_sg)) in virtqueue_add_split()
511 desc = alloc_indirect_split(_vq, total_sg, gfp); in virtqueue_add_split()
554 i = virtqueue_add_desc_split(_vq, desc, i, addr, sg->length, in virtqueue_add_split()
569 i = virtqueue_add_desc_split(_vq, desc, i, addr, in virtqueue_add_split()
577 desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT); in virtqueue_add_split()
590 virtqueue_add_desc_split(_vq, vq->split.vring.desc, in virtqueue_add_split()
616 vq->split.vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head); in virtqueue_add_split()
622 vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev, in virtqueue_add_split()
632 virtqueue_kick(_vq); in virtqueue_add_split()
649 i = virtio16_to_cpu(_vq->vdev, desc[i].next); in virtqueue_add_split()
661 static bool virtqueue_kick_prepare_split(struct virtqueue *_vq) in virtqueue_kick_prepare_split() argument
663 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_kick_prepare_split()
680 needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev, in virtqueue_kick_prepare_split()
685 cpu_to_virtio16(_vq->vdev, in virtqueue_kick_prepare_split()
748 static void *virtqueue_get_buf_ctx_split(struct virtqueue *_vq, in virtqueue_get_buf_ctx_split() argument
752 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_get_buf_ctx_split()
774 i = virtio32_to_cpu(_vq->vdev, in virtqueue_get_buf_ctx_split()
776 *len = virtio32_to_cpu(_vq->vdev, in virtqueue_get_buf_ctx_split()
798 cpu_to_virtio16(_vq->vdev, vq->last_used_idx)); in virtqueue_get_buf_ctx_split()
806 static void virtqueue_disable_cb_split(struct virtqueue *_vq) in virtqueue_disable_cb_split() argument
808 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_disable_cb_split()
817 cpu_to_virtio16(_vq->vdev, in virtqueue_disable_cb_split()
822 static unsigned virtqueue_enable_cb_prepare_split(struct virtqueue *_vq) in virtqueue_enable_cb_prepare_split() argument
824 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_enable_cb_prepare_split()
838 cpu_to_virtio16(_vq->vdev, in virtqueue_enable_cb_prepare_split()
841 vring_used_event(&vq->split.vring) = cpu_to_virtio16(_vq->vdev, in virtqueue_enable_cb_prepare_split()
847 static bool virtqueue_poll_split(struct virtqueue *_vq, unsigned last_used_idx) in virtqueue_poll_split() argument
849 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_poll_split()
851 return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev, in virtqueue_poll_split()
855 static bool virtqueue_enable_cb_delayed_split(struct virtqueue *_vq) in virtqueue_enable_cb_delayed_split() argument
857 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_enable_cb_delayed_split()
871 cpu_to_virtio16(_vq->vdev, in virtqueue_enable_cb_delayed_split()
879 cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs)); in virtqueue_enable_cb_delayed_split()
881 if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->split.vring.used->idx) in virtqueue_enable_cb_delayed_split()
891 static void *virtqueue_detach_unused_buf_split(struct virtqueue *_vq) in virtqueue_detach_unused_buf_split() argument
893 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_detach_unused_buf_split()
906 vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev, in virtqueue_detach_unused_buf_split()
1166 static inline int virtqueue_add_packed(struct virtqueue *_vq, in virtqueue_add_packed() argument
1175 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_add_packed()
1197 if (virtqueue_use_indirect(_vq, total_sg)) { in virtqueue_add_packed()
1316 static bool virtqueue_kick_prepare_packed(struct virtqueue *_vq) in virtqueue_kick_prepare_packed() argument
1318 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_kick_prepare_packed()
1430 static void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq, in virtqueue_get_buf_ctx_packed() argument
1434 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_get_buf_ctx_packed()
1495 static void virtqueue_disable_cb_packed(struct virtqueue *_vq) in virtqueue_disable_cb_packed() argument
1497 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_disable_cb_packed()
1506 static unsigned virtqueue_enable_cb_prepare_packed(struct virtqueue *_vq) in virtqueue_enable_cb_prepare_packed() argument
1508 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_enable_cb_prepare_packed()
1542 static bool virtqueue_poll_packed(struct virtqueue *_vq, u16 off_wrap) in virtqueue_poll_packed() argument
1544 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_poll_packed()
1554 static bool virtqueue_enable_cb_delayed_packed(struct virtqueue *_vq) in virtqueue_enable_cb_delayed_packed() argument
1556 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_enable_cb_delayed_packed()
1613 static void *virtqueue_detach_unused_buf_packed(struct virtqueue *_vq) in virtqueue_detach_unused_buf_packed() argument
1615 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_detach_unused_buf_packed()
1791 static inline int virtqueue_add(struct virtqueue *_vq, in virtqueue_add() argument
1800 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_add()
1802 return vq->packed_ring ? virtqueue_add_packed(_vq, sgs, total_sg, in virtqueue_add()
1804 virtqueue_add_split(_vq, sgs, total_sg, in virtqueue_add()
1822 int virtqueue_add_sgs(struct virtqueue *_vq, in virtqueue_add_sgs() argument
1838 return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs, in virtqueue_add_sgs()
1922 bool virtqueue_kick_prepare(struct virtqueue *_vq) in virtqueue_kick_prepare() argument
1924 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_kick_prepare()
1926 return vq->packed_ring ? virtqueue_kick_prepare_packed(_vq) : in virtqueue_kick_prepare()
1927 virtqueue_kick_prepare_split(_vq); in virtqueue_kick_prepare()
1939 bool virtqueue_notify(struct virtqueue *_vq) in virtqueue_notify() argument
1941 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_notify()
1947 if (!vq->notify(_vq)) { in virtqueue_notify()
1992 void *virtqueue_get_buf_ctx(struct virtqueue *_vq, unsigned int *len, in virtqueue_get_buf_ctx() argument
1995 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_get_buf_ctx()
1997 return vq->packed_ring ? virtqueue_get_buf_ctx_packed(_vq, len, ctx) : in virtqueue_get_buf_ctx()
1998 virtqueue_get_buf_ctx_split(_vq, len, ctx); in virtqueue_get_buf_ctx()
2002 void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len) in virtqueue_get_buf() argument
2004 return virtqueue_get_buf_ctx(_vq, len, NULL); in virtqueue_get_buf()
2016 void virtqueue_disable_cb(struct virtqueue *_vq) in virtqueue_disable_cb() argument
2018 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_disable_cb()
2027 virtqueue_disable_cb_packed(_vq); in virtqueue_disable_cb()
2029 virtqueue_disable_cb_split(_vq); in virtqueue_disable_cb()
2045 unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq) in virtqueue_enable_cb_prepare() argument
2047 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_enable_cb_prepare()
2052 return vq->packed_ring ? virtqueue_enable_cb_prepare_packed(_vq) : in virtqueue_enable_cb_prepare()
2053 virtqueue_enable_cb_prepare_split(_vq); in virtqueue_enable_cb_prepare()
2066 bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx) in virtqueue_poll() argument
2068 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_poll()
2074 return vq->packed_ring ? virtqueue_poll_packed(_vq, last_used_idx) : in virtqueue_poll()
2075 virtqueue_poll_split(_vq, last_used_idx); in virtqueue_poll()
2090 bool virtqueue_enable_cb(struct virtqueue *_vq) in virtqueue_enable_cb() argument
2092 unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq); in virtqueue_enable_cb()
2094 return !virtqueue_poll(_vq, last_used_idx); in virtqueue_enable_cb()
2111 bool virtqueue_enable_cb_delayed(struct virtqueue *_vq) in virtqueue_enable_cb_delayed() argument
2113 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_enable_cb_delayed()
2118 return vq->packed_ring ? virtqueue_enable_cb_delayed_packed(_vq) : in virtqueue_enable_cb_delayed()
2119 virtqueue_enable_cb_delayed_split(_vq); in virtqueue_enable_cb_delayed()
2131 void *virtqueue_detach_unused_buf(struct virtqueue *_vq) in virtqueue_detach_unused_buf() argument
2133 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_detach_unused_buf()
2135 return vq->packed_ring ? virtqueue_detach_unused_buf_packed(_vq) : in virtqueue_detach_unused_buf()
2136 virtqueue_detach_unused_buf_split(_vq); in virtqueue_detach_unused_buf()
2145 irqreturn_t vring_interrupt(int irq, void *_vq) in vring_interrupt() argument
2147 struct vring_virtqueue *vq = to_vvq(_vq); in vring_interrupt()
2303 void vring_del_virtqueue(struct virtqueue *_vq) in vring_del_virtqueue() argument
2305 struct vring_virtqueue *vq = to_vvq(_vq); in vring_del_virtqueue()
2308 list_del(&_vq->list); in vring_del_virtqueue()
2379 unsigned int virtqueue_get_vring_size(struct virtqueue *_vq) in virtqueue_get_vring_size() argument
2382 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_get_vring_size()
2388 bool virtqueue_is_broken(struct virtqueue *_vq) in virtqueue_is_broken() argument
2390 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_is_broken()
2402 struct virtqueue *_vq; in virtio_break_device() local
2405 list_for_each_entry(_vq, &dev->vqs, list) { in virtio_break_device()
2406 struct vring_virtqueue *vq = to_vvq(_vq); in virtio_break_device()
2415 dma_addr_t virtqueue_get_desc_addr(struct virtqueue *_vq) in virtqueue_get_desc_addr() argument
2417 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_get_desc_addr()
2428 dma_addr_t virtqueue_get_avail_addr(struct virtqueue *_vq) in virtqueue_get_avail_addr() argument
2430 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_get_avail_addr()
2442 dma_addr_t virtqueue_get_used_addr(struct virtqueue *_vq) in virtqueue_get_used_addr() argument
2444 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_get_used_addr()