Home
last modified time | relevance | path

Searched refs:shinfo (Results 1 – 24 of 24) sorted by relevance

/linux/drivers/net/ethernet/mellanox/mlx4/
A Den_tx.c610 if (shinfo->nr_frags == 1) { in is_inline()
617 if (shinfo->nr_frags) in is_inline()
634 const struct skb_shared_info *shinfo, in get_real_size() argument
643 if (shinfo->gso_size) { in get_real_size()
665 shinfo, pfrag); in get_real_size()
695 if (shinfo->nr_frags) in build_inline_wqe()
715 if (shinfo->nr_frags) in build_inline_wqe()
799 struct skb_shared_info *shinfo, in mlx4_en_build_dma_wqe() argument
852 while (++i_frag < shinfo->nr_frags) { in mlx4_en_build_dma_wqe()
976 shinfo->tx_flags |= SKBTX_IN_PROGRESS; in mlx4_en_xmit()
[all …]
/linux/drivers/net/xen-netback/
A Dnetback.c379 skb_frag_t *frags = shinfo->frags; in xenvif_get_requests()
385 nr_slots = shinfo->nr_frags; in xenvif_get_requests()
390 for (shinfo->nr_frags = start; shinfo->nr_frags < nr_slots; in xenvif_get_requests()
400 shinfo = skb_shinfo(nskb); in xenvif_get_requests()
401 frags = shinfo->frags; in xenvif_get_requests()
403 for (shinfo->nr_frags = 0; shinfo->nr_frags < frag_overflow; in xenvif_get_requests()
461 int nr_frags = shinfo->nr_frags; in xenvif_tx_check_gop()
560 first_shinfo = shinfo; in xenvif_tx_check_gop()
561 shinfo = skb_shinfo(shinfo->frag_list); in xenvif_tx_check_gop()
562 nr_frags = shinfo->nr_frags; in xenvif_tx_check_gop()
[all …]
/linux/include/linux/
A Dvirtio_net.h145 struct skb_shared_info *shinfo = skb_shinfo(skb); in virtio_net_hdr_to_skb() local
153 shinfo->gso_size = gso_size; in virtio_net_hdr_to_skb()
154 shinfo->gso_type = gso_type; in virtio_net_hdr_to_skb()
157 shinfo->gso_type |= SKB_GSO_DODGY; in virtio_net_hdr_to_skb()
158 shinfo->gso_segs = 0; in virtio_net_hdr_to_skb()
A Dskbuff.h4613 static inline void skb_increase_gso_size(struct skb_shared_info *shinfo, in skb_increase_gso_size() argument
4616 if (WARN_ON_ONCE(shinfo->gso_size == GSO_BY_FRAGS)) in skb_increase_gso_size()
4618 shinfo->gso_size += increment; in skb_increase_gso_size()
4621 static inline void skb_decrease_gso_size(struct skb_shared_info *shinfo, in skb_decrease_gso_size() argument
4624 if (WARN_ON_ONCE(shinfo->gso_size == GSO_BY_FRAGS)) in skb_decrease_gso_size()
4626 shinfo->gso_size -= decrement; in skb_decrease_gso_size()
4635 const struct skb_shared_info *shinfo = skb_shinfo(skb); in skb_warn_if_lro() local
4637 if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 && in skb_warn_if_lro()
4638 unlikely(shinfo->gso_type == 0)) { in skb_warn_if_lro()
/linux/drivers/net/ethernet/google/gve/
A Dgve_tx_dqo.c454 const struct skb_shared_info *shinfo = skb_shinfo(skb); in gve_tx_add_skb_no_copy_dqo() local
507 /*eop=*/shinfo->nr_frags == 0, is_gso); in gve_tx_add_skb_no_copy_dqo()
510 for (i = 0; i < shinfo->nr_frags; i++) { in gve_tx_add_skb_no_copy_dqo()
511 const skb_frag_t *frag = &shinfo->frags[i]; in gve_tx_add_skb_no_copy_dqo()
512 bool is_eop = i == (shinfo->nr_frags - 1); in gve_tx_add_skb_no_copy_dqo()
577 const struct skb_shared_info *shinfo = skb_shinfo(skb); in gve_num_buffer_descs_needed() local
583 for (i = 0; i < shinfo->nr_frags; i++) { in gve_num_buffer_descs_needed()
603 const struct skb_shared_info *shinfo = skb_shinfo(skb); in gve_can_send_tso() local
604 const int gso_size = shinfo->gso_size; in gve_can_send_tso()
612 for (i = 0; i < shinfo->nr_frags; i++) { in gve_can_send_tso()
[all …]
A Dgve_tx.c494 const struct skb_shared_info *shinfo = skb_shinfo(skb); in gve_tx_add_skb_no_copy() local
526 payload_nfrags = shinfo->nr_frags; in gve_tx_add_skb_no_copy()
545 for (i = 0; i < shinfo->nr_frags; i++) { in gve_tx_add_skb_no_copy()
546 const skb_frag_t *frag = &shinfo->frags[i]; in gve_tx_add_skb_no_copy()
566 i += (payload_nfrags == shinfo->nr_frags ? 1 : 2); in gve_tx_add_skb_no_copy()
A Dgve_rx_dqo.c608 struct skb_shared_info *shinfo = skb_shinfo(skb); in gve_rx_complete_rsc() local
616 shinfo->gso_type = SKB_GSO_TCPV4; in gve_rx_complete_rsc()
619 shinfo->gso_type = SKB_GSO_TCPV6; in gve_rx_complete_rsc()
625 shinfo->gso_size = le16_to_cpu(desc->rsc_seg_len); in gve_rx_complete_rsc()
/linux/net/core/
A Dskbuff.c193 struct skb_shared_info *shinfo; in __build_skb_around() local
209 shinfo = skb_shinfo(skb); in __build_skb_around()
211 atomic_set(&shinfo->dataref, 1); in __build_skb_around()
666 &shinfo->dataref)) in skb_release_data()
674 if (shinfo->frag_list) in skb_release_data()
675 kfree_skb_list(shinfo->frag_list); in skb_release_data()
5511 return thlen + shinfo->gso_size; in skb_gso_transport_seglen()
6229 shinfo->frag_list = list->next; in pskb_carve_frag_list()
6235 shinfo->frag_list = clone; in pskb_carve_frag_list()
6250 struct skb_shared_info *shinfo; in pskb_carve_inside_nonlinear() local
[all …]
A Dlwt_bpf.c527 struct skb_shared_info *shinfo = skb_shinfo(skb); in handle_gso_type() local
530 shinfo->gso_type |= gso_type; in handle_gso_type()
531 skb_decrease_gso_size(shinfo, encap_len); in handle_gso_type()
532 shinfo->gso_segs = 0; in handle_gso_type()
A Dfilter.c3240 if (shinfo->gso_type & SKB_GSO_TCPV4) { in bpf_skb_proto_4_to_6()
3241 shinfo->gso_type &= ~SKB_GSO_TCPV4; in bpf_skb_proto_4_to_6()
3242 shinfo->gso_type |= SKB_GSO_TCPV6; in bpf_skb_proto_4_to_6()
3270 if (shinfo->gso_type & SKB_GSO_TCPV6) { in bpf_skb_proto_6_to_4()
3271 shinfo->gso_type &= ~SKB_GSO_TCPV6; in bpf_skb_proto_6_to_4()
3272 shinfo->gso_type |= SKB_GSO_TCPV4; in bpf_skb_proto_6_to_4()
3474 skb_decrease_gso_size(shinfo, len_diff); in bpf_skb_net_grow()
3477 shinfo->gso_type |= gso_type; in bpf_skb_net_grow()
3478 shinfo->gso_segs = 0; in bpf_skb_net_grow()
3516 shinfo->gso_type |= SKB_GSO_DODGY; in bpf_skb_net_shrink()
[all …]
A Ddev.c3740 const struct skb_shared_info *shinfo = skb_shinfo(skb); in qdisc_pkt_len_init() local
3747 if (shinfo->gso_size && skb_transport_header_was_set(skb)) { in qdisc_pkt_len_init()
3749 u16 gso_segs = shinfo->gso_segs; in qdisc_pkt_len_init()
3755 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) { in qdisc_pkt_len_init()
3771 if (shinfo->gso_type & SKB_GSO_DODGY) in qdisc_pkt_len_init()
3773 shinfo->gso_size); in qdisc_pkt_len_init()
/linux/net/ipv4/
A Dtcp_output.c1499 shinfo->tx_flags &= ~tsflags; in tcp_fragment_tstamp()
1501 swap(shinfo->tskey, shinfo2->tskey); in tcp_fragment_tstamp()
1629 struct skb_shared_info *shinfo; in __pskb_trim_head() local
1641 shinfo = skb_shinfo(skb); in __pskb_trim_head()
1642 for (i = 0; i < shinfo->nr_frags; i++) { in __pskb_trim_head()
1643 int size = skb_frag_size(&shinfo->frags[i]); in __pskb_trim_head()
1649 shinfo->frags[k] = shinfo->frags[i]; in __pskb_trim_head()
1651 skb_frag_off_add(&shinfo->frags[k], eat); in __pskb_trim_head()
1652 skb_frag_size_sub(&shinfo->frags[k], eat); in __pskb_trim_head()
1658 shinfo->nr_frags = k; in __pskb_trim_head()
[all …]
A Dtcp_ipv4.c1808 struct skb_shared_info *shinfo; in tcp_add_backlog() local
1865 shinfo = skb_shinfo(skb); in tcp_add_backlog()
1866 gso_size = shinfo->gso_size ?: skb->len; in tcp_add_backlog()
1867 gso_segs = shinfo->gso_segs ?: 1; in tcp_add_backlog()
1869 shinfo = skb_shinfo(tail); in tcp_add_backlog()
1870 tail_gso_size = shinfo->gso_size ?: (tail->len - hdrlen); in tcp_add_backlog()
1871 tail_gso_segs = shinfo->gso_segs ?: 1; in tcp_add_backlog()
1899 shinfo->gso_size = max(gso_size, tail_gso_size); in tcp_add_backlog()
1900 shinfo->gso_segs = min_t(u32, gso_segs + tail_gso_segs, 0xFFFF); in tcp_add_backlog()
A Dtcp.c468 struct skb_shared_info *shinfo = skb_shinfo(skb); in tcp_tx_timestamp() local
471 sock_tx_timestamp(sk, tsflags, &shinfo->tx_flags); in tcp_tx_timestamp()
475 shinfo->tskey = TCP_SKB_CB(skb)->seq + skb->len - 1; in tcp_tx_timestamp()
A Dtcp_input.c3198 const struct skb_shared_info *shinfo; in tcp_ack_tstamp() local
3204 shinfo = skb_shinfo(skb); in tcp_ack_tstamp()
3205 if (!before(shinfo->tskey, prior_snd_una) && in tcp_ack_tstamp()
3206 before(shinfo->tskey, tcp_sk(sk)->snd_una)) { in tcp_ack_tstamp()
/linux/drivers/net/wireless/mediatek/mt76/
A Ddma.c530 struct skb_shared_info *shinfo = skb_shinfo(skb); in mt76_add_fragment() local
531 int nr_frags = shinfo->nr_frags; in mt76_add_fragment()
533 if (nr_frags < ARRAY_SIZE(shinfo->frags)) { in mt76_add_fragment()
546 if (nr_frags < ARRAY_SIZE(shinfo->frags)) in mt76_add_fragment()
/linux/drivers/net/ethernet/freescale/enetc/
A Denetc.c1280 struct skb_shared_info *shinfo; in enetc_xdp_frame_to_xdp_tx_swbd() local
1304 shinfo = xdp_get_shared_info_from_frame(xdp_frame); in enetc_xdp_frame_to_xdp_tx_swbd()
1306 for (f = 0, frag = &shinfo->frags[0]; f < shinfo->nr_frags; in enetc_xdp_frame_to_xdp_tx_swbd()
1387 struct skb_shared_info *shinfo; in enetc_map_rx_buff_to_xdp() local
1395 shinfo = xdp_get_shared_info_from_buff(xdp_buff); in enetc_map_rx_buff_to_xdp()
1396 shinfo->nr_frags = 0; in enetc_map_rx_buff_to_xdp()
1402 struct skb_shared_info *shinfo = xdp_get_shared_info_from_buff(xdp_buff); in enetc_add_rx_buff_to_xdp() local
1404 skb_frag_t *frag = &shinfo->frags[shinfo->nr_frags]; in enetc_add_rx_buff_to_xdp()
1413 shinfo->nr_frags++; in enetc_add_rx_buff_to_xdp()
/linux/net/sched/
A Dsch_cake.c1351 const struct skb_shared_info *shinfo = skb_shinfo(skb); in cake_overhead() local
1359 if (!shinfo->gso_size) in cake_overhead()
1366 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | in cake_overhead()
1383 if (unlikely(shinfo->gso_type & SKB_GSO_DODGY)) in cake_overhead()
1385 shinfo->gso_size); in cake_overhead()
1387 segs = shinfo->gso_segs; in cake_overhead()
1389 len = shinfo->gso_size + hdr_len; in cake_overhead()
1390 last_len = skb->len - shinfo->gso_size * (segs - 1); in cake_overhead()
/linux/drivers/net/ethernet/hisilicon/hns3/
A Dhns3_enet.h719 void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size);
A Dhns3_enet.c1840 void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size) in hns3_shinfo_pack() argument
1845 size[i] = skb_frag_size(&shinfo->frags[i]); in hns3_shinfo_pack()
/linux/drivers/net/ethernet/broadcom/
A Dbnx2.c2954 struct skb_shared_info *shinfo; in bnx2_reuse_rx_skb_pages() local
2956 shinfo = skb_shinfo(skb); in bnx2_reuse_rx_skb_pages()
2957 shinfo->nr_frags--; in bnx2_reuse_rx_skb_pages()
2958 page = skb_frag_page(&shinfo->frags[shinfo->nr_frags]); in bnx2_reuse_rx_skb_pages()
2959 __skb_frag_set_page(&shinfo->frags[shinfo->nr_frags], NULL); in bnx2_reuse_rx_skb_pages()
/linux/drivers/net/ethernet/realtek/
A Dr8169_main.c4144 struct skb_shared_info *shinfo = skb_shinfo(skb); in rtl8169_tso_csum_v2() local
4145 u32 mss = shinfo->gso_size; in rtl8169_tso_csum_v2()
4148 if (shinfo->gso_type & SKB_GSO_TCPV4) { in rtl8169_tso_csum_v2()
4150 } else if (shinfo->gso_type & SKB_GSO_TCPV6) { in rtl8169_tso_csum_v2()
/linux/drivers/net/ethernet/intel/e1000e/
A Dnetdev.c1524 struct skb_shared_info *shinfo; in e1000_clean_jumbo_rx_irq() local
1580 shinfo = skb_shinfo(rxtop); in e1000_clean_jumbo_rx_irq()
1581 skb_fill_page_desc(rxtop, shinfo->nr_frags, in e1000_clean_jumbo_rx_irq()
1592 shinfo = skb_shinfo(rxtop); in e1000_clean_jumbo_rx_irq()
1593 skb_fill_page_desc(rxtop, shinfo->nr_frags, in e1000_clean_jumbo_rx_irq()
/linux/drivers/net/ethernet/broadcom/bnxt/
A Dbnxt.c1080 struct skb_shared_info *shinfo; in bnxt_rx_pages() local
1083 shinfo = skb_shinfo(skb); in bnxt_rx_pages()
1084 nr_frags = --shinfo->nr_frags; in bnxt_rx_pages()
1085 __skb_frag_set_page(&shinfo->frags[nr_frags], NULL); in bnxt_rx_pages()

Completed in 180 milliseconds