Home
last modified time | relevance | path

Searched refs:tcp_sock (Results 1 – 25 of 65) sorted by relevance

123

/linux/net/ipv4/
A Dbpf_tcp_ca.c118 case offsetof(struct tcp_sock, snd_cwnd): in bpf_tcp_ca_btf_struct_access()
119 end = offsetofend(struct tcp_sock, snd_cwnd); in bpf_tcp_ca_btf_struct_access()
121 case offsetof(struct tcp_sock, snd_cwnd_cnt): in bpf_tcp_ca_btf_struct_access()
122 end = offsetofend(struct tcp_sock, snd_cwnd_cnt); in bpf_tcp_ca_btf_struct_access()
124 case offsetof(struct tcp_sock, snd_ssthresh): in bpf_tcp_ca_btf_struct_access()
125 end = offsetofend(struct tcp_sock, snd_ssthresh); in bpf_tcp_ca_btf_struct_access()
127 case offsetof(struct tcp_sock, ecn_flags): in bpf_tcp_ca_btf_struct_access()
128 end = offsetofend(struct tcp_sock, ecn_flags); in bpf_tcp_ca_btf_struct_access()
145 BPF_CALL_2(bpf_tcp_send_ack, struct tcp_sock *, tp, u32, rcv_nxt) in BPF_CALL_2() argument
A Dtcp_recovery.c12 struct tcp_sock *tp = tcp_sk(sk); in tcp_rack_reo_wnd()
36 s32 tcp_rack_skb_timeout(struct tcp_sock *tp, struct sk_buff *skb, u32 reo_wnd) in tcp_rack_skb_timeout()
64 struct tcp_sock *tp = tcp_sk(sk); in tcp_rack_detect_loss()
101 struct tcp_sock *tp = tcp_sk(sk); in tcp_rack_mark_lost()
122 void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq, in tcp_rack_advance()
155 struct tcp_sock *tp = tcp_sk(sk); in tcp_rack_reo_timeout()
193 struct tcp_sock *tp = tcp_sk(sk); in tcp_rack_update_reo_wnd()
223 struct tcp_sock *tp = tcp_sk(sk); in tcp_newreno_mark_lost()
A Dtcp_input.c340 struct tcp_sock *tp = tcp_sk(sk); in __tcp_ecn_check_ce()
461 struct tcp_sock *tp = tcp_sk(sk); in __tcp_grow_window()
498 struct tcp_sock *tp = tcp_sk(sk); in tcp_grow_window()
538 struct tcp_sock *tp = tcp_sk(sk); in tcp_init_buffer_space()
574 struct tcp_sock *tp = tcp_sk(sk); in tcp_clamp_window()
676 struct tcp_sock *tp = tcp_sk(sk); in tcp_rcv_rtt_measure_ts()
702 struct tcp_sock *tp = tcp_sk(sk); in tcp_rcv_space_adjust()
775 struct tcp_sock *tp = tcp_sk(sk); in tcp_event_data_recv()
830 struct tcp_sock *tp = tcp_sk(sk); in tcp_rtt_estimator()
1035 struct tcp_sock *tp = tcp_sk(sk); in tcp_check_sack_reordering()
[all …]
A Dtcp_output.c68 struct tcp_sock *tp = tcp_sk(sk); in tcp_event_new_data_sent()
122 struct tcp_sock *tp = tcp_sk(sk); in tcp_advertise_mss()
143 struct tcp_sock *tp = tcp_sk(sk); in tcp_cwnd_restart()
185 struct tcp_sock *tp = tcp_sk(sk); in tcp_event_ack_sent()
261 struct tcp_sock *tp = tcp_sk(sk); in tcp_select_window()
324 struct tcp_sock *tp = tcp_sk(sk); in tcp_ecn_send_syn()
368 struct tcp_sock *tp = tcp_sk(sk); in tcp_ecn_send()
763 struct tcp_sock *tp = tcp_sk(sk); in tcp_syn_options()
1047 struct tcp_sock *tp; in tcp_tasklet_func()
1189 struct tcp_sock *tp = container_of(timer, struct tcp_sock, pacing_timer); in tcp_pace_kick()
[all …]
A Dtcp_bbr.c268 struct tcp_sock *tp = tcp_sk(sk); in bbr_init_pacing_rate_from_rtt()
287 struct tcp_sock *tp = tcp_sk(sk); in bbr_set_pacing_rate()
305 struct tcp_sock *tp = tcp_sk(sk); in bbr_tso_segs_goal()
322 struct tcp_sock *tp = tcp_sk(sk); in bbr_save_cwnd()
333 struct tcp_sock *tp = tcp_sk(sk); in bbr_cwnd_event()
438 struct tcp_sock *tp = tcp_sk(sk); in bbr_packets_in_net_at_edt()
482 struct tcp_sock *tp = tcp_sk(sk); in bbr_set_cwnd_to_recover_or_restore()
521 struct tcp_sock *tp = tcp_sk(sk); in bbr_set_cwnd()
556 struct tcp_sock *tp = tcp_sk(sk); in bbr_is_next_cycle_phase()
592 struct tcp_sock *tp = tcp_sk(sk); in bbr_advance_cycle_phase()
[all …]
A Dtcp_dctcp.c69 static void dctcp_reset(const struct tcp_sock *tp, struct dctcp *ca) in dctcp_reset()
79 const struct tcp_sock *tp = tcp_sk(sk); in dctcp_init()
107 struct tcp_sock *tp = tcp_sk(sk); in dctcp_ssthresh()
115 const struct tcp_sock *tp = tcp_sk(sk); in dctcp_update_alpha()
149 struct tcp_sock *tp = tcp_sk(sk); in dctcp_react_to_loss()
187 const struct tcp_sock *tp = tcp_sk(sk); in dctcp_get_info()
A Dtcp_cdg.c143 struct tcp_sock *tp = tcp_sk(sk); in tcp_cdg_hystart_update()
244 struct tcp_sock *tp = tcp_sk(sk); in tcp_cdg_backoff()
265 struct tcp_sock *tp = tcp_sk(sk); in tcp_cdg_cong_avoid()
302 struct tcp_sock *tp = tcp_sk(sk); in tcp_cdg_acked()
331 struct tcp_sock *tp = tcp_sk(sk); in tcp_cdg_ssthresh()
348 struct tcp_sock *tp = tcp_sk(sk); in tcp_cdg_cwnd_event()
376 struct tcp_sock *tp = tcp_sk(sk); in tcp_cdg_init()
A Dtcp_rate.c42 struct tcp_sock *tp = tcp_sk(sk); in tcp_rate_skb_sent()
82 struct tcp_sock *tp = tcp_sk(sk); in tcp_rate_skb_delivered()
115 struct tcp_sock *tp = tcp_sk(sk); in tcp_rate_gen()
191 struct tcp_sock *tp = tcp_sk(sk); in tcp_rate_check_app_limited()
A Dtcp_highspeed.c102 struct tcp_sock *tp = tcp_sk(sk); in hstcp_init()
114 struct tcp_sock *tp = tcp_sk(sk); in hstcp_cong_avoid()
153 const struct tcp_sock *tp = tcp_sk(sk); in hstcp_ssthresh()
A Dtcp_westwood.c165 const struct tcp_sock *tp = tcp_sk(sk); in westwood_fast_bw()
182 const struct tcp_sock *tp = tcp_sk(sk); in westwood_acked_count()
219 const struct tcp_sock *tp = tcp_sk(sk); in tcp_westwood_bw_rttmin()
242 struct tcp_sock *tp = tcp_sk(sk); in tcp_westwood_event()
A Dtcp_timer.c104 struct tcp_sock *tp = tcp_sk(sk); in tcp_out_of_resources()
234 struct tcp_sock *tp = tcp_sk(sk); in tcp_write_timeout()
359 struct tcp_sock *tp = tcp_sk(sk); in tcp_probe_timer()
411 struct tcp_sock *tp = tcp_sk(sk); in tcp_fastopen_synack_timer()
450 struct tcp_sock *tp = tcp_sk(sk); in tcp_retransmit_timer()
677 struct tcp_sock *tp = tcp_sk(sk); in tcp_keepalive_timer()
762 struct tcp_sock *tp = container_of(timer, struct tcp_sock, compressed_ack_timer); in tcp_compressed_ack_kick()
A Dtcp_scalable.c20 struct tcp_sock *tp = tcp_sk(sk); in tcp_scalable_cong_avoid()
36 const struct tcp_sock *tp = tcp_sk(sk); in tcp_scalable_ssthresh()
A Dtcp_illinois.c59 struct tcp_sock *tp = tcp_sk(sk); in rtt_reset()
224 struct tcp_sock *tp = tcp_sk(sk); in update_params()
262 struct tcp_sock *tp = tcp_sk(sk); in tcp_illinois_cong_avoid()
297 struct tcp_sock *tp = tcp_sk(sk); in tcp_illinois_ssthresh()
A Dtcp_yeah.c43 struct tcp_sock *tp = tcp_sk(sk); in tcp_yeah_init()
60 struct tcp_sock *tp = tcp_sk(sk); in tcp_yeah_cong_avoid()
189 const struct tcp_sock *tp = tcp_sk(sk); in tcp_yeah_ssthresh()
A Dtcp_cong.c396 u32 tcp_slow_start(struct tcp_sock *tp, u32 acked) in tcp_slow_start()
410 void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked) in tcp_cong_avoid_ai()
438 struct tcp_sock *tp = tcp_sk(sk); in tcp_reno_cong_avoid()
457 const struct tcp_sock *tp = tcp_sk(sk); in tcp_reno_ssthresh()
465 const struct tcp_sock *tp = tcp_sk(sk); in tcp_reno_undo_cwnd()
A Dtcp_vegas.c73 const struct tcp_sock *tp = tcp_sk(sk); in vegas_enable()
160 static inline u32 tcp_vegas_ssthresh(struct tcp_sock *tp) in tcp_vegas_ssthresh()
167 struct tcp_sock *tp = tcp_sk(sk); in tcp_vegas_cong_avoid()
A Dtcp_cubic.c120 struct tcp_sock *tp = tcp_sk(sk); in bictcp_hystart_reset()
326 struct tcp_sock *tp = tcp_sk(sk); in cubictcp_cong_avoid()
343 const struct tcp_sock *tp = tcp_sk(sk); in cubictcp_recalc_ssthresh()
388 struct tcp_sock *tp = tcp_sk(sk); in hystart_update()
450 const struct tcp_sock *tp = tcp_sk(sk); in cubictcp_acked()
A Dtcp.c413 struct tcp_sock *tp = tcp_sk(sk); in tcp_init_sock()
597 struct tcp_sock *tp = tcp_sk(sk); in tcp_ioctl()
654 struct tcp_sock *tp = tcp_sk(sk); in tcp_skb_entail()
697 struct tcp_sock *tp = tcp_sk(sk); in tcp_push()
889 struct tcp_sock *tp = tcp_sk(sk); in tcp_xmit_size_goal()
943 struct tcp_sock *tp = tcp_sk(sk); in tcp_build_frag()
1003 struct tcp_sock *tp = tcp_sk(sk); in do_tcp_sendpages()
1132 struct tcp_sock *tp = tcp_sk(sk); in tcp_sendmsg_fastopen()
1177 struct tcp_sock *tp = tcp_sk(sk); in tcp_sendmsg_locked()
1454 struct tcp_sock *tp = tcp_sk(sk); in tcp_recv_urg()
[all …]
/linux/include/net/
A Dtcp.h392 void tcp_clear_retrans(struct tcp_sock *tp);
706 struct tcp_sock *tp = tcp_sk(sk); in tcp_fast_path_check()
1232 const struct tcp_sock *tp = tcp_sk(sk); in tcp_current_ssthresh()
1277 const struct tcp_sock *tp = tcp_sk(sk); in tcp_is_cwnd_limited()
1387 struct tcp_sock *tp = tcp_sk(sk); in tcp_slow_start_after_idle_check()
1429 struct tcp_sock *tp = tcp_sk(sk); in tcp_adjust_rcv_ssthresh()
1459 const struct tcp_sock *tp = tcp_sk(sk); in tcp_epollin_ready()
1839 const struct tcp_sock *tp = tcp_sk(sk); in tcp_write_queue_empty()
1895 struct tcp_sock *tp = tcp_sk(sk); in tcp_push_pending_frames()
2146 struct tcp_sock *tp = tcp_sk(sk); in tcp_inq()
[all …]
/linux/tools/testing/selftests/bpf/
A Dbpf_tcp_helpers.h58 struct tcp_sock { struct
96 static __always_inline struct tcp_sock *tcp_sk(const struct sock *sk) in tcp_sk() argument
98 return (struct tcp_sock *)sk; in tcp_sk()
195 static __always_inline bool tcp_in_slow_start(const struct tcp_sock *tp) in tcp_in_slow_start()
202 const struct tcp_sock *tp = tcp_sk(sk); in tcp_is_cwnd_limited()
225 extern __u32 tcp_slow_start(struct tcp_sock *tp, __u32 acked) __ksym;
226 extern void tcp_cong_avoid_ai(struct tcp_sock *tp, __u32 w, __u32 acked) __ksym;
/linux/tools/testing/selftests/bpf/progs/
A Dbpf_dctcp.c49 static __always_inline void dctcp_reset(const struct tcp_sock *tp, in dctcp_reset()
61 const struct tcp_sock *tp = tcp_sk(sk); in BPF_PROG()
102 struct tcp_sock *tp = tcp_sk(sk); in BPF_PROG()
111 const struct tcp_sock *tp = tcp_sk(sk); in BPF_PROG()
141 struct tcp_sock *tp = tcp_sk(sk); in dctcp_react_to_loss()
160 struct tcp_sock *tp = tcp_sk(sk); in dctcp_ece_ack_cwr()
A Dbpf_cubic.c163 struct tcp_sock *tp = tcp_sk(sk); in bictcp_hystart_reset()
383 struct tcp_sock *tp = tcp_sk(sk); in BPF_STRUCT_OPS()
402 const struct tcp_sock *tp = tcp_sk(sk); in BPF_STRUCT_OPS()
449 struct tcp_sock *tp = tcp_sk(sk); in hystart_update()
496 const struct tcp_sock *tp = tcp_sk(sk); in BPF_STRUCT_OPS()
A Dbpf_iter_tcp4.c69 static bool tcp_in_initial_slowstart(const struct tcp_sock *tcp) in tcp_in_initial_slowstart()
74 static int dump_tcp_sock(struct seq_file *seq, struct tcp_sock *tp, in dump_tcp_sock()
202 struct tcp_sock *tp; in dump_tcp4()
/linux/include/linux/
A Dtcp.h145 struct tcp_sock { struct
437 static inline struct tcp_sock *tcp_sk(const struct sock *sk) in tcp_sk() argument
439 return (struct tcp_sock *)sk; in tcp_sk()
479 static inline void tcp_move_syn(struct tcp_sock *tp, in tcp_move_syn()
486 static inline void tcp_saved_syn_free(struct tcp_sock *tp) in tcp_saved_syn_free()
502 static inline u16 tcp_mss_clamp(const struct tcp_sock *tp, u16 mss) in tcp_mss_clamp()
/linux/include/trace/events/
A Dmptcp.h39 if (subflow->tcp_sock && sk_fullsock(subflow->tcp_sock))
40 __entry->free = sk_stream_memory_free(subflow->tcp_sock);

Completed in 94 milliseconds

123