Lines Matching refs:sq

187 	struct send_queue *sq;  member
350 struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi; in skb_xmit_done()
503 struct send_queue *sq, in __virtnet_xdp_xmit_one() argument
519 sg_init_one(sq->sg, xdpf->data, xdpf->len); in __virtnet_xdp_xmit_one()
521 err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdp_to_ptr(xdpf), in __virtnet_xdp_xmit_one()
553 v->sq + qp; \
560 txq = netdev_get_tx_queue(v->dev, (q) - v->sq); \
573 struct send_queue *sq; in virtnet_xdp_xmit() local
590 sq = virtnet_xdp_get_sq(vi); in virtnet_xdp_xmit()
598 while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) { in virtnet_xdp_xmit()
616 if (__virtnet_xdp_xmit_one(vi, sq, xdpf)) in virtnet_xdp_xmit()
623 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) in virtnet_xdp_xmit()
627 u64_stats_update_begin(&sq->stats.syncp); in virtnet_xdp_xmit()
628 sq->stats.bytes += bytes; in virtnet_xdp_xmit()
629 sq->stats.packets += packets; in virtnet_xdp_xmit()
630 sq->stats.xdp_tx += n; in virtnet_xdp_xmit()
631 sq->stats.xdp_tx_drops += n - nxmit; in virtnet_xdp_xmit()
632 sq->stats.kicks += kicks; in virtnet_xdp_xmit()
633 u64_stats_update_end(&sq->stats.syncp); in virtnet_xdp_xmit()
635 virtnet_xdp_put_sq(vi, sq); in virtnet_xdp_xmit()
1470 static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi) in free_old_xmit_skbs() argument
1477 while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) { in free_old_xmit_skbs()
1500 u64_stats_update_begin(&sq->stats.syncp); in free_old_xmit_skbs()
1501 sq->stats.bytes += bytes; in free_old_xmit_skbs()
1502 sq->stats.packets += packets; in free_old_xmit_skbs()
1503 u64_stats_update_end(&sq->stats.syncp); in free_old_xmit_skbs()
1520 struct send_queue *sq = &vi->sq[index]; in virtnet_poll_cleantx() local
1523 if (!sq->napi.weight || is_xdp_raw_buffer_queue(vi, index)) in virtnet_poll_cleantx()
1528 virtqueue_disable_cb(sq->vq); in virtnet_poll_cleantx()
1529 free_old_xmit_skbs(sq, true); in virtnet_poll_cleantx()
1530 } while (unlikely(!virtqueue_enable_cb_delayed(sq->vq))); in virtnet_poll_cleantx()
1532 if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) in virtnet_poll_cleantx()
1544 struct send_queue *sq; in virtnet_poll() local
1560 sq = virtnet_xdp_get_sq(vi); in virtnet_poll()
1561 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) { in virtnet_poll()
1562 u64_stats_update_begin(&sq->stats.syncp); in virtnet_poll()
1563 sq->stats.kicks++; in virtnet_poll()
1564 u64_stats_update_end(&sq->stats.syncp); in virtnet_poll()
1566 virtnet_xdp_put_sq(vi, sq); in virtnet_poll()
1595 virtnet_napi_tx_enable(vi, vi->sq[i].vq, &vi->sq[i].napi); in virtnet_open()
1603 struct send_queue *sq = container_of(napi, struct send_queue, napi); in virtnet_poll_tx() local
1604 struct virtnet_info *vi = sq->vq->vdev->priv; in virtnet_poll_tx()
1605 unsigned int index = vq2txq(sq->vq); in virtnet_poll_tx()
1618 virtqueue_disable_cb(sq->vq); in virtnet_poll_tx()
1619 free_old_xmit_skbs(sq, true); in virtnet_poll_tx()
1621 if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) in virtnet_poll_tx()
1624 opaque = virtqueue_enable_cb_prepare(sq->vq); in virtnet_poll_tx()
1629 virtqueue_disable_cb(sq->vq); in virtnet_poll_tx()
1634 if (unlikely(virtqueue_poll(sq->vq, opaque))) { in virtnet_poll_tx()
1637 virtqueue_disable_cb(sq->vq); in virtnet_poll_tx()
1647 static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) in xmit_skb() argument
1651 struct virtnet_info *vi = sq->vq->vdev->priv; in xmit_skb()
1676 sg_init_table(sq->sg, skb_shinfo(skb)->nr_frags + (can_push ? 1 : 2)); in xmit_skb()
1679 num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len); in xmit_skb()
1685 sg_set_buf(sq->sg, hdr, hdr_len); in xmit_skb()
1686 num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len); in xmit_skb()
1691 return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC); in xmit_skb()
1698 struct send_queue *sq = &vi->sq[qnum]; in start_xmit() local
1702 bool use_napi = sq->napi.weight; in start_xmit()
1707 virtqueue_disable_cb(sq->vq); in start_xmit()
1709 free_old_xmit_skbs(sq, false); in start_xmit()
1712 unlikely(!virtqueue_enable_cb_delayed(sq->vq))); in start_xmit()
1718 err = xmit_skb(sq, skb); in start_xmit()
1748 if (sq->vq->num_free < 2+MAX_SKB_FRAGS) { in start_xmit()
1751 unlikely(!virtqueue_enable_cb_delayed(sq->vq))) { in start_xmit()
1753 free_old_xmit_skbs(sq, false); in start_xmit()
1754 if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) { in start_xmit()
1756 virtqueue_disable_cb(sq->vq); in start_xmit()
1762 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) { in start_xmit()
1763 u64_stats_update_begin(&sq->stats.syncp); in start_xmit()
1764 sq->stats.kicks++; in start_xmit()
1765 u64_stats_update_end(&sq->stats.syncp); in start_xmit()
1879 struct send_queue *sq = &vi->sq[i]; in virtnet_stats() local
1882 start = u64_stats_fetch_begin_irq(&sq->stats.syncp); in virtnet_stats()
1883 tpackets = sq->stats.packets; in virtnet_stats()
1884 tbytes = sq->stats.bytes; in virtnet_stats()
1885 terrors = sq->stats.tx_timeouts; in virtnet_stats()
1886 } while (u64_stats_fetch_retry_irq(&sq->stats.syncp, start)); in virtnet_stats()
1965 virtnet_napi_tx_disable(&vi->sq[i].napi); in virtnet_close()
2078 virtqueue_set_affinity(vi->sq[i].vq, NULL); in virtnet_clean_affinity()
2115 virtqueue_set_affinity(vi->sq[i].vq, mask); in virtnet_set_affinity()
2179 ring->tx_max_pending = virtqueue_get_vring_size(vi->sq[0].vq); in virtnet_get_ringparam()
2295 struct send_queue *sq = &vi->sq[i]; in virtnet_get_ethtool_stats() local
2297 stats_base = (u8 *)&sq->stats; in virtnet_get_ethtool_stats()
2299 start = u64_stats_fetch_begin_irq(&sq->stats.syncp); in virtnet_get_ethtool_stats()
2304 } while (u64_stats_fetch_retry_irq(&sq->stats.syncp, start)); in virtnet_get_ethtool_stats()
2356 if (napi_weight ^ vi->sq[0].napi.weight) { in virtnet_set_coalesce()
2360 vi->sq[i].napi.weight = napi_weight; in virtnet_set_coalesce()
2379 if (vi->sq[0].napi.weight) in virtnet_get_coalesce()
2445 virtnet_napi_tx_disable(&vi->sq[i].napi); in virtnet_freeze_down()
2470 virtnet_napi_tx_enable(vi, vi->sq[i].vq, in virtnet_restore_up()
2471 &vi->sq[i].napi); in virtnet_restore_up()
2569 virtnet_napi_tx_disable(&vi->sq[i].napi); in virtnet_xdp_set()
2604 virtnet_napi_tx_enable(vi, vi->sq[i].vq, in virtnet_xdp_set()
2605 &vi->sq[i].napi); in virtnet_xdp_set()
2621 virtnet_napi_tx_enable(vi, vi->sq[i].vq, in virtnet_xdp_set()
2622 &vi->sq[i].napi); in virtnet_xdp_set()
2685 struct send_queue *sq = &priv->sq[txqueue]; in virtnet_tx_timeout() local
2688 u64_stats_update_begin(&sq->stats.syncp); in virtnet_tx_timeout()
2689 sq->stats.tx_timeouts++; in virtnet_tx_timeout()
2690 u64_stats_update_end(&sq->stats.syncp); in virtnet_tx_timeout()
2693 txqueue, sq->name, sq->vq->index, sq->vq->name, in virtnet_tx_timeout()
2761 __netif_napi_del(&vi->sq[i].napi); in virtnet_free_queues()
2770 kfree(vi->sq); in virtnet_free_queues()
2811 struct virtqueue *vq = vi->sq[i].vq; in free_unused_bufs()
2907 sprintf(vi->sq[i].name, "output.%d", i); in virtnet_find_vqs()
2909 names[txq2vq(i)] = vi->sq[i].name; in virtnet_find_vqs()
2928 vi->sq[i].vq = vqs[txq2vq(i)]; in virtnet_find_vqs()
2957 vi->sq = kcalloc(vi->max_queue_pairs, sizeof(*vi->sq), GFP_KERNEL); in virtnet_alloc_queues()
2958 if (!vi->sq) in virtnet_alloc_queues()
2969 netif_tx_napi_add(vi->dev, &vi->sq[i].napi, virtnet_poll_tx, in virtnet_alloc_queues()
2974 sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg)); in virtnet_alloc_queues()
2977 u64_stats_init(&vi->sq[i].stats.syncp); in virtnet_alloc_queues()
2983 kfree(vi->sq); in virtnet_alloc_queues()