blob: a118f21dcd586b715c24873156d3eefde2df8fc9 [file] [log] [blame]
lh9ed821d2023-04-07 01:36:19 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Definitions for the TCP module.
7 *
8 * Version: @(#)tcp.h 1.0.5 05/23/93
9 *
10 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
17 */
18#ifndef _TCP_H
19#define _TCP_H
20
21#define FASTRETRANS_DEBUG 1
22
23#include <linux/list.h>
24#include <linux/tcp.h>
25#include <linux/bug.h>
26#include <linux/slab.h>
27#include <linux/cache.h>
28#include <linux/percpu.h>
29#include <linux/skbuff.h>
30#include <linux/dmaengine.h>
31#include <linux/crypto.h>
32#include <linux/cryptohash.h>
33#include <linux/kref.h>
34
35#include <net/inet_connection_sock.h>
36#include <net/inet_timewait_sock.h>
37#include <net/inet_hashtables.h>
38#include <net/checksum.h>
39#include <net/request_sock.h>
40#include <net/sock.h>
41#include <net/snmp.h>
42#include <net/ip.h>
43#include <net/tcp_states.h>
44#include <net/inet_ecn.h>
45#include <net/dst.h>
46
47#include <linux/seq_file.h>
48#include <linux/memcontrol.h>
49
50extern struct inet_hashinfo tcp_hashinfo;
51
52extern struct percpu_counter tcp_orphan_count;
53extern void tcp_time_wait(struct sock *sk, int state, int timeo);
54
55#define MAX_TCP_HEADER (128 + MAX_HEADER)
56#define MAX_TCP_OPTION_SPACE 40
57//hub: CVE-2019-11477
58#define TCP_MIN_SND_MSS 48
59#define TCP_MIN_GSO_SIZE (TCP_MIN_SND_MSS - MAX_TCP_OPTION_SPACE)
60
61/*
62 * Never offer a window over 32767 without using window scaling. Some
63 * poor stacks do signed 16bit maths!
64 */
65#define MAX_TCP_WINDOW 32767U
66
67/* Offer an initial receive window of 10 mss. */
68#define TCP_DEFAULT_INIT_RCVWND 10
69
70/* Minimal accepted MSS. It is (60+60+8) - (20+20). */
71#define TCP_MIN_MSS 88U
72
73/* The least MTU to use for probing */
74#define TCP_BASE_MSS 512
75
76/* After receiving this amount of duplicate ACKs fast retransmit starts. */
77#define TCP_FASTRETRANS_THRESH 3
78
79/* Maximal reordering. */
80#define TCP_MAX_REORDERING 127
81
82/* Maximal number of ACKs sent quickly to accelerate slow-start. */
83#define TCP_MAX_QUICKACKS 16U
84
85/* urg_data states */
86#define TCP_URG_VALID 0x0100
87#define TCP_URG_NOTYET 0x0200
88#define TCP_URG_READ 0x0400
89
90#define TCP_RETR1 3 /*
91 * This is how many retries it does before it
92 * tries to figure out if the gateway is
93 * down. Minimal RFC value is 3; it corresponds
94 * to ~3sec-8min depending on RTO.
95 */
96
97#define TCP_RETR2 15 /*
98 * This should take at least
99 * 90 minutes to time out.
100 * RFC1122 says that the limit is 100 sec.
101 * 15 is ~13-30min depending on RTO.
102 */
103
104#define TCP_SYN_RETRIES 5 /* number of times to retry active opening a
105 * connection: ~180sec is RFC minimum */
106
107#define TCP_SYNACK_RETRIES 5 /* number of times to retry passive opening a
108 * connection: ~180sec is RFC minimum */
109
110#define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT
111 * state, about 60 seconds */
112#define TCP_FIN_TIMEOUT TCP_TIMEWAIT_LEN
113 /* BSD style FIN_WAIT2 deadlock breaker.
114 * It used to be 3min, new value is 60sec,
115 * to combine FIN-WAIT-2 timeout with
116 * TIME-WAIT timer.
117 */
118
119#define TCP_DELACK_MAX ((unsigned)(HZ/5)) /* maximal time to delay before sending an ACK */
120#if HZ >= 100
121#define TCP_DELACK_MIN ((unsigned)(HZ/25)) /* minimal time to delay before sending an ACK */
122#define TCP_ATO_MIN ((unsigned)(HZ/25))
123#else
124#define TCP_DELACK_MIN 4U
125#define TCP_ATO_MIN 4U
126#endif
127#define TCP_RTO_MAX ((unsigned)(120*HZ))
128#define TCP_RTO_MIN ((unsigned)(HZ/5))
129#define TCP_TIMEOUT_INIT ((unsigned)(1*HZ)) /* RFC2988bis initial RTO value */
130#define TCP_TIMEOUT_FALLBACK ((unsigned)(3*HZ)) /* RFC 1122 initial RTO value, now
131 * used as a fallback RTO for the
132 * initial data transmission if no
133 * valid RTT sample has been acquired,
134 * most likely due to retrans in 3WHS.
135 */
136
137#define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes
138 * for local resources.
139 */
140
141#define TCP_KEEPALIVE_TIME (120*60*HZ) /* two hours */
142#define TCP_KEEPALIVE_PROBES 9 /* Max of 9 keepalive probes */
143#define TCP_KEEPALIVE_INTVL (75*HZ)
144
145#define MAX_TCP_KEEPIDLE 32767
146#define MAX_TCP_KEEPINTVL 32767
147#define MAX_TCP_KEEPCNT 127
148#define MAX_TCP_SYNCNT 127
149
150#define TCP_SYNQ_INTERVAL (HZ/5) /* Period of SYNACK timer */
151
152#define TCP_PAWS_24DAYS (60 * 60 * 24 * 24)
153#define TCP_PAWS_MSL 60 /* Per-host timestamps are invalidated
154 * after this time. It should be equal
155 * (or greater than) TCP_TIMEWAIT_LEN
156 * to provide reliability equal to one
157 * provided by timewait state.
158 */
159#define TCP_PAWS_WINDOW 1 /* Replay window for per-host
160 * timestamps. It must be less than
161 * minimal timewait lifetime.
162 */
163/*
164 * TCP option
165 */
166
167#define TCPOPT_NOP 1 /* Padding */
168#define TCPOPT_EOL 0 /* End of options */
169#define TCPOPT_MSS 2 /* Segment size negotiating */
170#define TCPOPT_WINDOW 3 /* Window scaling */
171#define TCPOPT_SACK_PERM 4 /* SACK Permitted */
172#define TCPOPT_SACK 5 /* SACK Block */
173#define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */
174#define TCPOPT_MD5SIG 19 /* MD5 Signature (RFC2385) */
175#define TCPOPT_COOKIE 253 /* Cookie extension (experimental) */
176
177/*
178 * TCP option lengths
179 */
180
181#define TCPOLEN_MSS 4
182#define TCPOLEN_WINDOW 3
183#define TCPOLEN_SACK_PERM 2
184#define TCPOLEN_TIMESTAMP 10
185#define TCPOLEN_MD5SIG 18
186#define TCPOLEN_COOKIE_BASE 2 /* Cookie-less header extension */
187#define TCPOLEN_COOKIE_PAIR 3 /* Cookie pair header extension */
188#define TCPOLEN_COOKIE_MIN (TCPOLEN_COOKIE_BASE+TCP_COOKIE_MIN)
189#define TCPOLEN_COOKIE_MAX (TCPOLEN_COOKIE_BASE+TCP_COOKIE_MAX)
190
191/* But this is what stacks really send out. */
192#define TCPOLEN_TSTAMP_ALIGNED 12
193#define TCPOLEN_WSCALE_ALIGNED 4
194#define TCPOLEN_SACKPERM_ALIGNED 4
195#define TCPOLEN_SACK_BASE 2
196#define TCPOLEN_SACK_BASE_ALIGNED 4
197#define TCPOLEN_SACK_PERBLOCK 8
198#define TCPOLEN_MD5SIG_ALIGNED 20
199#define TCPOLEN_MSS_ALIGNED 4
200
201/* Flags in tp->nonagle */
202#define TCP_NAGLE_OFF 1 /* Nagle's algo is disabled */
203#define TCP_NAGLE_CORK 2 /* Socket is corked */
204#define TCP_NAGLE_PUSH 4 /* Cork is overridden for already queued data */
205
206/* TCP thin-stream limits */
207#define TCP_THIN_LINEAR_RETRIES 6 /* After 6 linear retries, do exp. backoff */
208
209/* TCP initial congestion window as per draft-hkchu-tcpm-initcwnd-01 */
210#define TCP_INIT_CWND 10
211
212extern struct inet_timewait_death_row tcp_death_row;
213
214/* sysctl variables for tcp */
215extern int sysctl_tcp_timestamps;
216extern int sysctl_tcp_window_scaling;
217extern int sysctl_tcp_sack;
218extern int sysctl_tcp_fin_timeout;
219extern int sysctl_tcp_keepalive_time;
220extern int sysctl_tcp_keepalive_probes;
221extern int sysctl_tcp_keepalive_intvl;
222extern int sysctl_tcp_syn_retries;
223extern int sysctl_tcp_synack_retries;
224extern int sysctl_tcp_retries1;
225extern int sysctl_tcp_retries2;
226extern int sysctl_tcp_orphan_retries;
227extern int sysctl_tcp_syncookies;
228extern int sysctl_tcp_retrans_collapse;
229extern int sysctl_tcp_stdurg;
230extern int sysctl_tcp_rfc1337;
231extern int sysctl_tcp_abort_on_overflow;
232extern int sysctl_tcp_max_orphans;
233extern int sysctl_tcp_fack;
234extern int sysctl_tcp_reordering;
235extern int sysctl_tcp_ecn;
236extern int sysctl_tcp_dsack;
237extern int sysctl_tcp_wmem[3];
238extern int sysctl_tcp_rmem[3];
239extern int sysctl_tcp_app_win;
240extern int sysctl_tcp_adv_win_scale;
241extern int sysctl_tcp_tw_reuse;
242extern int sysctl_tcp_frto;
243extern int sysctl_tcp_frto_response;
244extern int sysctl_tcp_low_latency;
245extern int sysctl_tcp_dma_copybreak;
246extern int sysctl_tcp_nometrics_save;
247extern int sysctl_tcp_moderate_rcvbuf;
248extern int sysctl_tcp_tso_win_divisor;
249extern int sysctl_tcp_abc;
250extern int sysctl_tcp_mtu_probing;
251extern int sysctl_tcp_base_mss;
252extern int sysctl_tcp_workaround_signed_windows;
253extern int sysctl_tcp_slow_start_after_idle;
254extern int sysctl_tcp_max_ssthresh;
255extern int sysctl_tcp_cookie_size;
256extern int sysctl_tcp_thin_linear_timeouts;
257extern int sysctl_tcp_thin_dupack;
258extern int sysctl_tcp_challenge_ack_limit;
259
260extern atomic_long_t tcp_memory_allocated;
261extern struct percpu_counter tcp_sockets_allocated;
262extern int tcp_memory_pressure;
263
264/*
265 * The next routines deal with comparing 32 bit unsigned ints
266 * and worry about wraparound (automatic with unsigned arithmetic).
267 */
268
269static inline int before(__u32 seq1, __u32 seq2)
270{
271 return (__s32)(seq1-seq2) < 0;
272}
273#define after(seq2, seq1) before(seq1, seq2)
274
275/* is s2<=s1<=s3 ? */
276static inline int between(__u32 seq1, __u32 seq2, __u32 seq3)
277{
278 return seq3 - seq2 >= seq1 - seq2;
279}
280
281static inline bool tcp_out_of_memory(struct sock *sk)
282{
283 if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
284 sk_memory_allocated(sk) > sk_prot_mem_limits(sk, 2))
285 return true;
286 return false;
287}
288
289static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
290{
291 struct percpu_counter *ocp = sk->sk_prot->orphan_count;
292 int orphans = percpu_counter_read_positive(ocp);
293
294 if (orphans << shift > sysctl_tcp_max_orphans) {
295 orphans = percpu_counter_sum_positive(ocp);
296 if (orphans << shift > sysctl_tcp_max_orphans)
297 return true;
298 }
299 return false;
300}
301
302extern bool tcp_check_oom(struct sock *sk, int shift);
303
304/* syncookies: remember time of last synqueue overflow */
305static inline void tcp_synq_overflow(struct sock *sk)
306{
307 tcp_sk(sk)->rx_opt.ts_recent_stamp = jiffies;
308}
309
310/* syncookies: no recent synqueue overflow on this listening socket? */
311static inline int tcp_synq_no_recent_overflow(const struct sock *sk)
312{
313 unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
314 return time_after(jiffies, last_overflow + TCP_TIMEOUT_FALLBACK);
315}
316
317extern struct proto tcp_prot;
318
319#define TCP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.tcp_statistics, field)
320#define TCP_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.tcp_statistics, field)
321#define TCP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->mib.tcp_statistics, field)
322#define TCP_ADD_STATS_USER(net, field, val) SNMP_ADD_STATS_USER((net)->mib.tcp_statistics, field, val)
323#define TCP_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val)
324
325extern void tcp_init_mem(struct net *net);
326
327extern void tcp_v4_err(struct sk_buff *skb, u32);
328
329extern void tcp_shutdown (struct sock *sk, int how);
330
331extern int tcp_v4_rcv(struct sk_buff *skb);
332
333extern struct inet_peer *tcp_v4_get_peer(struct sock *sk, bool *release_it);
334extern void *tcp_v4_tw_get_peer(struct sock *sk);
335extern int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
336extern int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
337 size_t size);
338extern int tcp_sendpage(struct sock *sk, struct page *page, int offset,
339 size_t size, int flags);
340extern int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg);
341extern int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
342 const struct tcphdr *th, unsigned int len);
343extern int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
344 const struct tcphdr *th, unsigned int len);
345extern void tcp_rcv_space_adjust(struct sock *sk);
346extern void tcp_cleanup_rbuf(struct sock *sk, int copied);
347extern int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
348extern void tcp_twsk_destructor(struct sock *sk);
349extern ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
350 struct pipe_inode_info *pipe, size_t len,
351 unsigned int flags);
352
353static inline void tcp_dec_quickack_mode(struct sock *sk,
354 const unsigned int pkts)
355{
356 struct inet_connection_sock *icsk = inet_csk(sk);
357
358 if (icsk->icsk_ack.quick) {
359 if (pkts >= icsk->icsk_ack.quick) {
360 icsk->icsk_ack.quick = 0;
361 /* Leaving quickack mode we deflate ATO. */
362 icsk->icsk_ack.ato = TCP_ATO_MIN;
363 } else
364 icsk->icsk_ack.quick -= pkts;
365 }
366}
367
368#define TCP_ECN_OK 1
369#define TCP_ECN_QUEUE_CWR 2
370#define TCP_ECN_DEMAND_CWR 4
371#define TCP_ECN_SEEN 8
372
373static __inline__ void
374TCP_ECN_create_request(struct request_sock *req, struct tcphdr *th)
375{
376 if (sysctl_tcp_ecn && th->ece && th->cwr)
377 inet_rsk(req)->ecn_ok = 1;
378}
379
380enum tcp_tw_status {
381 TCP_TW_SUCCESS = 0,
382 TCP_TW_RST = 1,
383 TCP_TW_ACK = 2,
384 TCP_TW_SYN = 3
385};
386
387
388extern enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
389 struct sk_buff *skb,
390 const struct tcphdr *th);
391extern struct sock * tcp_check_req(struct sock *sk,struct sk_buff *skb,
392 struct request_sock *req,
393 struct request_sock **prev);
394extern int tcp_child_process(struct sock *parent, struct sock *child,
395 struct sk_buff *skb);
396extern int tcp_use_frto(struct sock *sk);
397extern void tcp_enter_frto(struct sock *sk);
398extern void tcp_enter_loss(struct sock *sk, int how);
399extern void tcp_clear_retrans(struct tcp_sock *tp);
400extern void tcp_update_metrics(struct sock *sk);
401extern void tcp_close(struct sock *sk, long timeout);
402extern unsigned int tcp_poll(struct file * file, struct socket *sock,
403 struct poll_table_struct *wait);
404extern int tcp_getsockopt(struct sock *sk, int level, int optname,
405 char __user *optval, int __user *optlen);
406extern int tcp_setsockopt(struct sock *sk, int level, int optname,
407 char __user *optval, unsigned int optlen);
408extern int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
409 char __user *optval, int __user *optlen);
410extern int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
411 char __user *optval, unsigned int optlen);
412extern void tcp_set_keepalive(struct sock *sk, int val);
413extern void tcp_syn_ack_timeout(struct sock *sk, struct request_sock *req);
414extern int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
415 size_t len, int nonblock, int flags, int *addr_len);
416extern void tcp_parse_options(const struct sk_buff *skb,
417 struct tcp_options_received *opt_rx, const u8 **hvpp,
418 int estab);
419extern const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
420
421/*
422 * TCP v4 functions exported for the inet6 API
423 */
424
425extern void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
426extern int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
427extern struct sock * tcp_create_openreq_child(struct sock *sk,
428 struct request_sock *req,
429 struct sk_buff *skb);
430extern struct sock * tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
431 struct request_sock *req,
432 struct dst_entry *dst);
433extern int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
434extern int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr,
435 int addr_len);
436extern int tcp_connect(struct sock *sk);
437extern struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst,
438 struct request_sock *req,
439 struct request_values *rvp);
440extern int tcp_disconnect(struct sock *sk, int flags);
441
442
443/* From syncookies.c */
444extern __u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS];
445extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
446 struct ip_options *opt);
447#ifdef CONFIG_SYN_COOKIES
448extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb,
449 __u16 *mss);
450#else
451static inline __u32 cookie_v4_init_sequence(struct sock *sk,
452 struct sk_buff *skb,
453 __u16 *mss)
454{
455 return 0;
456}
457#endif
458
459extern __u32 cookie_init_timestamp(struct request_sock *req);
460extern bool cookie_check_timestamp(struct tcp_options_received *opt, bool *);
461
462/* From net/ipv6/syncookies.c */
463extern struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
464#ifdef CONFIG_SYN_COOKIES
465extern __u32 cookie_v6_init_sequence(struct sock *sk, const struct sk_buff *skb,
466 __u16 *mss);
467#else
468static inline __u32 cookie_v6_init_sequence(struct sock *sk,
469 struct sk_buff *skb,
470 __u16 *mss)
471{
472 return 0;
473}
474#endif
475/* tcp_output.c */
476
477extern void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
478 int nonagle);
479extern int tcp_may_send_now(struct sock *sk);
480extern int tcp_retransmit_skb(struct sock *, struct sk_buff *);
481extern void tcp_retransmit_timer(struct sock *sk);
482extern void tcp_xmit_retransmit_queue(struct sock *);
483extern void tcp_simple_retransmit(struct sock *);
484extern int tcp_trim_head(struct sock *, struct sk_buff *, u32);
485extern int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
486
487extern void tcp_send_probe0(struct sock *);
488extern void tcp_send_partial(struct sock *);
489extern int tcp_write_wakeup(struct sock *);
490extern void tcp_send_fin(struct sock *sk);
491extern void tcp_send_active_reset(struct sock *sk, gfp_t priority);
492extern int tcp_send_synack(struct sock *);
493extern int tcp_syn_flood_action(struct sock *sk,
494 const struct sk_buff *skb,
495 const char *proto);
496extern void tcp_push_one(struct sock *, unsigned int mss_now);
497extern void tcp_send_ack(struct sock *sk);
498extern void tcp_send_delayed_ack(struct sock *sk);
499
500/* tcp_input.c */
501extern void tcp_cwnd_application_limited(struct sock *sk);
502
503/* tcp_timer.c */
504extern void tcp_init_xmit_timers(struct sock *);
505static inline void tcp_clear_xmit_timers(struct sock *sk)
506{
507 inet_csk_clear_xmit_timers(sk);
508}
509
510extern unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
511extern unsigned int tcp_current_mss(struct sock *sk);
512
513/* Bound MSS / TSO packet size with the half of the window */
514static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
515{
516 int cutoff;
517
518 /* When peer uses tiny windows, there is no use in packetizing
519 * to sub-MSS pieces for the sake of SWS or making sure there
520 * are enough packets in the pipe for fast recovery.
521 *
522 * On the other hand, for extremely large MSS devices, handling
523 * smaller than MSS windows in this way does make sense.
524 */
525 if (tp->max_window >= 512)
526 cutoff = (tp->max_window >> 1);
527 else
528 cutoff = tp->max_window;
529
530 if (cutoff && pktsize > cutoff)
531 return max_t(int, cutoff, 68U - tp->tcp_header_len);
532 else
533 return pktsize;
534}
535
536/* tcp.c */
537extern void tcp_get_info(const struct sock *, struct tcp_info *);
538
539/* Read 'sendfile()'-style from a TCP socket */
540typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *,
541 unsigned int, size_t);
542extern int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
543 sk_read_actor_t recv_actor);
544
545extern void tcp_initialize_rcv_mss(struct sock *sk);
546
547extern int tcp_mtu_to_mss(const struct sock *sk, int pmtu);
548extern int tcp_mss_to_mtu(const struct sock *sk, int mss);
549extern void tcp_mtup_init(struct sock *sk);
550extern void tcp_valid_rtt_meas(struct sock *sk, u32 seq_rtt);
551
552static inline void tcp_bound_rto(const struct sock *sk)
553{
554 if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX)
555 inet_csk(sk)->icsk_rto = TCP_RTO_MAX;
556}
557
558static inline u32 __tcp_set_rto(const struct tcp_sock *tp)
559{
560 return (tp->srtt >> 3) + tp->rttvar;
561}
562
563static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
564{
565 tp->pred_flags = htonl((tp->tcp_header_len << 26) |
566 ntohl(TCP_FLAG_ACK) |
567 snd_wnd);
568}
569
570static inline void tcp_fast_path_on(struct tcp_sock *tp)
571{
572 __tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale);
573}
574
575static inline void tcp_fast_path_check(struct sock *sk)
576{
577 struct tcp_sock *tp = tcp_sk(sk);
578
579 if (skb_queue_empty(&tp->out_of_order_queue) &&
580 tp->rcv_wnd &&
581 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
582 !tp->urg_data)
583 tcp_fast_path_on(tp);
584}
585
586/* Compute the actual rto_min value */
587static inline u32 tcp_rto_min(struct sock *sk)
588{
589 const struct dst_entry *dst = __sk_dst_get(sk);
590 u32 rto_min = TCP_RTO_MIN;
591
592 if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
593 rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
594 return rto_min;
595}
596
597/* Compute the actual receive window we are currently advertising.
598 * Rcv_nxt can be after the window if our peer push more data
599 * than the offered window.
600 */
601static inline u32 tcp_receive_window(const struct tcp_sock *tp)
602{
603 s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;
604
605 if (win < 0)
606 win = 0;
607 return (u32) win;
608}
609
610/* Choose a new window, without checks for shrinking, and without
611 * scaling applied to the result. The caller does these things
612 * if necessary. This is a "raw" window selection.
613 */
614extern u32 __tcp_select_window(struct sock *sk);
615
616/* TCP timestamps are only 32-bits, this causes a slight
617 * complication on 64-bit systems since we store a snapshot
618 * of jiffies in the buffer control blocks below. We decided
619 * to use only the low 32-bits of jiffies and hide the ugly
620 * casts with the following macro.
621 */
622#define tcp_time_stamp ((__u32)(jiffies))
623
624#define tcp_flag_byte(th) (((u_int8_t *)th)[13])
625
626#define TCPHDR_FIN 0x01
627#define TCPHDR_SYN 0x02
628#define TCPHDR_RST 0x04
629#define TCPHDR_PSH 0x08
630#define TCPHDR_ACK 0x10
631#define TCPHDR_URG 0x20
632#define TCPHDR_ECE 0x40
633#define TCPHDR_CWR 0x80
634
635/* This is what the send packet queuing engine uses to pass
636 * TCP per-packet control information to the transmission code.
637 * We also store the host-order sequence numbers in here too.
638 * This is 44 bytes if IPV6 is enabled.
639 * If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately.
640 */
641struct tcp_skb_cb {
642 union {
643 struct inet_skb_parm h4;
644#if IS_ENABLED(CONFIG_IPV6)
645 struct inet6_skb_parm h6;
646#endif
647 } header; /* For incoming frames */
648 __u32 seq; /* Starting sequence number */
649 __u32 end_seq; /* SEQ + FIN + SYN + datalen */
650 __u32 when; /* used to compute rtt's */
651 __u8 tcp_flags; /* TCP header flags. (tcp[13]) */
652 __u8 sacked; /* State flags for SACK/FACK. */
653#define TCPCB_SACKED_ACKED 0x01 /* SKB ACK'd by a SACK block */
654#define TCPCB_SACKED_RETRANS 0x02 /* SKB retransmitted */
655#define TCPCB_LOST 0x04 /* SKB is lost */
656#define TCPCB_TAGBITS 0x07 /* All tag bits */
657 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
658 /* 1 byte hole */
659#define TCPCB_EVER_RETRANS 0x80 /* Ever retransmitted frame */
660#define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS)
661
662 __u32 ack_seq; /* Sequence number ACK'd */
663};
664
665#define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
666
667/* Due to TSO, an SKB can be composed of multiple actual
668 * packets. To keep these tracked properly, we use this.
669 */
670static inline int tcp_skb_pcount(const struct sk_buff *skb)
671{
672 return skb_shinfo(skb)->gso_segs;
673}
674
675/* This is valid iff tcp_skb_pcount() > 1. */
676static inline int tcp_skb_mss(const struct sk_buff *skb)
677{
678 return skb_shinfo(skb)->gso_size;
679}
680
681/* Events passed to congestion control interface */
682enum tcp_ca_event {
683 CA_EVENT_TX_START, /* first transmit when no packets in flight */
684 CA_EVENT_CWND_RESTART, /* congestion window restart */
685 CA_EVENT_COMPLETE_CWR, /* end of congestion recovery */
686 CA_EVENT_FRTO, /* fast recovery timeout */
687 CA_EVENT_LOSS, /* loss timeout */
688 CA_EVENT_FAST_ACK, /* in sequence ack */
689 CA_EVENT_SLOW_ACK, /* other ack */
690};
691
692/*
693 * Interface for adding new TCP congestion control handlers
694 */
695#define TCP_CA_NAME_MAX 16
696#define TCP_CA_MAX 128
697#define TCP_CA_BUF_MAX (TCP_CA_NAME_MAX*TCP_CA_MAX)
698
699#define TCP_CONG_NON_RESTRICTED 0x1
700#define TCP_CONG_RTT_STAMP 0x2
701
702struct tcp_congestion_ops {
703 struct list_head list;
704 unsigned long flags;
705
706 /* initialize private data (optional) */
707 void (*init)(struct sock *sk);
708 /* cleanup private data (optional) */
709 void (*release)(struct sock *sk);
710
711 /* return slow start threshold (required) */
712 u32 (*ssthresh)(struct sock *sk);
713 /* lower bound for congestion window (optional) */
714 u32 (*min_cwnd)(const struct sock *sk);
715 /* do new cwnd calculation (required) */
716 void (*cong_avoid)(struct sock *sk, u32 ack, u32 in_flight);
717 /* call before changing ca_state (optional) */
718 void (*set_state)(struct sock *sk, u8 new_state);
719 /* call when cwnd event occurs (optional) */
720 void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
721 /* new value of cwnd after loss (optional) */
722 u32 (*undo_cwnd)(struct sock *sk);
723 /* hook for packet ack accounting (optional) */
724 void (*pkts_acked)(struct sock *sk, u32 num_acked, s32 rtt_us);
725 /* get info for inet_diag (optional) */
726 void (*get_info)(struct sock *sk, u32 ext, struct sk_buff *skb);
727
728 char name[TCP_CA_NAME_MAX];
729 struct module *owner;
730};
731
732extern int tcp_register_congestion_control(struct tcp_congestion_ops *type);
733extern void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
734
735extern void tcp_init_congestion_control(struct sock *sk);
736extern void tcp_cleanup_congestion_control(struct sock *sk);
737extern int tcp_set_default_congestion_control(const char *name);
738extern void tcp_get_default_congestion_control(char *name);
739extern void tcp_get_available_congestion_control(char *buf, size_t len);
740extern void tcp_get_allowed_congestion_control(char *buf, size_t len);
741extern int tcp_set_allowed_congestion_control(char *allowed);
742extern int tcp_set_congestion_control(struct sock *sk, const char *name);
743extern void tcp_slow_start(struct tcp_sock *tp);
744extern void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w);
745
746extern struct tcp_congestion_ops tcp_init_congestion_ops;
747extern u32 tcp_reno_ssthresh(struct sock *sk);
748extern void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight);
749extern u32 tcp_reno_min_cwnd(const struct sock *sk);
750extern struct tcp_congestion_ops tcp_reno;
751
752static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
753{
754 struct inet_connection_sock *icsk = inet_csk(sk);
755
756 if (icsk->icsk_ca_ops->set_state)
757 icsk->icsk_ca_ops->set_state(sk, ca_state);
758 icsk->icsk_ca_state = ca_state;
759}
760
761static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
762{
763 const struct inet_connection_sock *icsk = inet_csk(sk);
764
765 if (icsk->icsk_ca_ops->cwnd_event)
766 icsk->icsk_ca_ops->cwnd_event(sk, event);
767}
768
769/* These functions determine how the current flow behaves in respect of SACK
770 * handling. SACK is negotiated with the peer, and therefore it can vary
771 * between different flows.
772 *
773 * tcp_is_sack - SACK enabled
774 * tcp_is_reno - No SACK
775 * tcp_is_fack - FACK enabled, implies SACK enabled
776 */
777static inline int tcp_is_sack(const struct tcp_sock *tp)
778{
779 return tp->rx_opt.sack_ok;
780}
781
782static inline int tcp_is_reno(const struct tcp_sock *tp)
783{
784 return !tcp_is_sack(tp);
785}
786
787static inline int tcp_is_fack(const struct tcp_sock *tp)
788{
789 return tp->rx_opt.sack_ok & TCP_FACK_ENABLED;
790}
791
792static inline void tcp_enable_fack(struct tcp_sock *tp)
793{
794 tp->rx_opt.sack_ok |= TCP_FACK_ENABLED;
795}
796
797static inline unsigned int tcp_left_out(const struct tcp_sock *tp)
798{
799 return tp->sacked_out + tp->lost_out;
800}
801
802/* This determines how many packets are "in the network" to the best
803 * of our knowledge. In many cases it is conservative, but where
804 * detailed information is available from the receiver (via SACK
805 * blocks etc.) we can make more aggressive calculations.
806 *
807 * Use this for decisions involving congestion control, use just
808 * tp->packets_out to determine if the send queue is empty or not.
809 *
810 * Read this equation as:
811 *
812 * "Packets sent once on transmission queue" MINUS
813 * "Packets left network, but not honestly ACKed yet" PLUS
814 * "Packets fast retransmitted"
815 */
816static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
817{
818 return tp->packets_out - tcp_left_out(tp) + tp->retrans_out;
819}
820
821#define TCP_INFINITE_SSTHRESH 0x7fffffff
822
823static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
824{
825 return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH;
826}
827
828/* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
829 * The exception is rate halving phase, when cwnd is decreasing towards
830 * ssthresh.
831 */
832static inline __u32 tcp_current_ssthresh(const struct sock *sk)
833{
834 const struct tcp_sock *tp = tcp_sk(sk);
835
836 if ((1 << inet_csk(sk)->icsk_ca_state) & (TCPF_CA_CWR | TCPF_CA_Recovery))
837 return tp->snd_ssthresh;
838 else
839 return max(tp->snd_ssthresh,
840 ((tp->snd_cwnd >> 1) +
841 (tp->snd_cwnd >> 2)));
842}
843
844/* Use define here intentionally to get WARN_ON location shown at the caller */
845#define tcp_verify_left_out(tp) WARN_ON(tcp_left_out(tp) > tp->packets_out)
846
847extern void tcp_enter_cwr(struct sock *sk, const int set_ssthresh);
848extern __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst);
849
850/* The maximum number of MSS of available cwnd for which TSO defers
851 * sending if not using sysctl_tcp_tso_win_divisor.
852 */
853static inline __u32 tcp_max_tso_deferred_mss(const struct tcp_sock *tp)
854{
855 return 3;
856}
857
858/* Slow start with delack produces 3 packets of burst, so that
859 * it is safe "de facto". This will be the default - same as
860 * the default reordering threshold - but if reordering increases,
861 * we must be able to allow cwnd to burst at least this much in order
862 * to not pull it back when holes are filled.
863 */
864static __inline__ __u32 tcp_max_burst(const struct tcp_sock *tp)
865{
866 return tp->reordering;
867}
868
869/* Returns end sequence number of the receiver's advertised window */
870static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
871{
872 return tp->snd_una + tp->snd_wnd;
873}
874extern int tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight);
875
876static inline void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss,
877 const struct sk_buff *skb)
878{
879 if (skb->len < mss)
880 tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
881}
882
883static inline void tcp_check_probe_timer(struct sock *sk)
884{
885 const struct tcp_sock *tp = tcp_sk(sk);
886 const struct inet_connection_sock *icsk = inet_csk(sk);
887
888 if (!tp->packets_out && !icsk->icsk_pending)
889 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
890 icsk->icsk_rto, TCP_RTO_MAX);
891}
892
893static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
894{
895 tp->snd_wl1 = seq;
896}
897
898static inline void tcp_update_wl(struct tcp_sock *tp, u32 seq)
899{
900 tp->snd_wl1 = seq;
901}
902
903/*
904 * Calculate(/check) TCP checksum
905 */
906static inline __sum16 tcp_v4_check(int len, __be32 saddr,
907 __be32 daddr, __wsum base)
908{
909 return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base);
910}
911
912static inline __sum16 __tcp_checksum_complete(struct sk_buff *skb)
913{
914 return __skb_checksum_complete(skb);
915}
916
917static inline int tcp_checksum_complete(struct sk_buff *skb)
918{
919 return !skb_csum_unnecessary(skb) &&
920 __tcp_checksum_complete(skb);
921}
922
923/* Prequeue for VJ style copy to user, combined with checksumming. */
924
925static inline void tcp_prequeue_init(struct tcp_sock *tp)
926{
927 tp->ucopy.task = NULL;
928 tp->ucopy.len = 0;
929 tp->ucopy.memory = 0;
930 skb_queue_head_init(&tp->ucopy.prequeue);
931#ifdef CONFIG_NET_DMA
932 tp->ucopy.dma_chan = NULL;
933 tp->ucopy.wakeup = 0;
934 tp->ucopy.pinned_list = NULL;
935 tp->ucopy.dma_cookie = 0;
936#endif
937}
938
939/* Packet is added to VJ-style prequeue for processing in process
940 * context, if a reader task is waiting. Apparently, this exciting
941 * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
942 * failed somewhere. Latency? Burstiness? Well, at least now we will
943 * see, why it failed. 8)8) --ANK
944 *
945 * NOTE: is this not too big to inline?
946 */
947static inline int tcp_prequeue(struct sock *sk, struct sk_buff *skb)
948{
949 struct tcp_sock *tp = tcp_sk(sk);
950
951 if (sysctl_tcp_low_latency || !tp->ucopy.task)
952 return 0;
953
954 skb_dst_force(skb);
955 __skb_queue_tail(&tp->ucopy.prequeue, skb);
956 tp->ucopy.memory += skb->truesize;
957 if (tp->ucopy.memory > sk->sk_rcvbuf) {
958 struct sk_buff *skb1;
959
960 BUG_ON(sock_owned_by_user(sk));
961
962 while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
963 sk_backlog_rcv(sk, skb1);
964 NET_INC_STATS_BH(sock_net(sk),
965 LINUX_MIB_TCPPREQUEUEDROPPED);
966 }
967
968 tp->ucopy.memory = 0;
969 } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
970 wake_up_interruptible_sync_poll(sk_sleep(sk),
971 POLLIN | POLLRDNORM | POLLRDBAND);
972 if (!inet_csk_ack_scheduled(sk))
973 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
974 (3 * tcp_rto_min(sk)) / 4,
975 TCP_RTO_MAX);
976 }
977 return 1;
978}
979
980
981#undef STATE_TRACE
982
983#ifdef STATE_TRACE
984static const char *statename[]={
985 "Unused","Established","Syn Sent","Syn Recv",
986 "Fin Wait 1","Fin Wait 2","Time Wait", "Close",
987 "Close Wait","Last ACK","Listen","Closing"
988};
989#endif
990extern void tcp_set_state(struct sock *sk, int state);
991
992extern void tcp_done(struct sock *sk);
993
994static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
995{
996 rx_opt->dsack = 0;
997 rx_opt->num_sacks = 0;
998}
999
1000/* Determine a window scaling and initial window to offer. */
1001extern void tcp_select_initial_window(int __space, __u32 mss,
1002 __u32 *rcv_wnd, __u32 *window_clamp,
1003 int wscale_ok, __u8 *rcv_wscale,
1004 __u32 init_rcv_wnd);
1005
1006static inline int tcp_win_from_space(int space)
1007{
1008 return sysctl_tcp_adv_win_scale<=0 ?
1009 (space>>(-sysctl_tcp_adv_win_scale)) :
1010 space - (space>>sysctl_tcp_adv_win_scale);
1011}
1012
1013/* Note: caller must be prepared to deal with negative returns */
1014static inline int tcp_space(const struct sock *sk)
1015{
1016 return tcp_win_from_space(sk->sk_rcvbuf -
1017 atomic_read(&sk->sk_rmem_alloc));
1018}
1019
1020static inline int tcp_full_space(const struct sock *sk)
1021{
1022 return tcp_win_from_space(sk->sk_rcvbuf);
1023}
1024
1025static inline void tcp_openreq_init(struct request_sock *req,
1026 struct tcp_options_received *rx_opt,
1027 struct sk_buff *skb)
1028{
1029 struct inet_request_sock *ireq = inet_rsk(req);
1030
1031 req->rcv_wnd = 0; /* So that tcp_send_synack() knows! */
1032 req->cookie_ts = 0;
1033 tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq;
1034 req->mss = rx_opt->mss_clamp;
1035 req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0;
1036 ireq->tstamp_ok = rx_opt->tstamp_ok;
1037 ireq->sack_ok = rx_opt->sack_ok;
1038 ireq->snd_wscale = rx_opt->snd_wscale;
1039 ireq->wscale_ok = rx_opt->wscale_ok;
1040 ireq->acked = 0;
1041 ireq->ecn_ok = 0;
1042 ireq->rmt_port = tcp_hdr(skb)->source;
1043 ireq->loc_port = tcp_hdr(skb)->dest;
1044}
1045
1046extern void tcp_enter_memory_pressure(struct sock *sk);
1047
1048static inline int keepalive_intvl_when(const struct tcp_sock *tp)
1049{
1050 return tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl;
1051}
1052
1053static inline int keepalive_time_when(const struct tcp_sock *tp)
1054{
1055 return tp->keepalive_time ? : sysctl_tcp_keepalive_time;
1056}
1057
1058static inline int keepalive_probes(const struct tcp_sock *tp)
1059{
1060 return tp->keepalive_probes ? : sysctl_tcp_keepalive_probes;
1061}
1062
1063static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)
1064{
1065 const struct inet_connection_sock *icsk = &tp->inet_conn;
1066
1067 return min_t(u32, tcp_time_stamp - icsk->icsk_ack.lrcvtime,
1068 tcp_time_stamp - tp->rcv_tstamp);
1069}
1070
1071static inline int tcp_fin_time(const struct sock *sk)
1072{
1073 int fin_timeout = tcp_sk(sk)->linger2 ? : sysctl_tcp_fin_timeout;
1074 const int rto = inet_csk(sk)->icsk_rto;
1075
1076 if (fin_timeout < (rto << 2) - (rto >> 1))
1077 fin_timeout = (rto << 2) - (rto >> 1);
1078
1079 return fin_timeout;
1080}
1081
1082static inline int tcp_paws_check(const struct tcp_options_received *rx_opt,
1083 int paws_win)
1084{
1085 if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win)
1086 return 1;
1087 if (unlikely(get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS))
1088 return 1;
1089 /*
1090 * Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0,
1091 * then following tcp messages have valid values. Ignore 0 value,
1092 * or else 'negative' tsval might forbid us to accept their packets.
1093 */
1094 if (!rx_opt->ts_recent)
1095 return 1;
1096 return 0;
1097}
1098
1099static inline int tcp_paws_reject(const struct tcp_options_received *rx_opt,
1100 int rst)
1101{
1102 if (tcp_paws_check(rx_opt, 0))
1103 return 0;
1104
1105 /* RST segments are not recommended to carry timestamp,
1106 and, if they do, it is recommended to ignore PAWS because
1107 "their cleanup function should take precedence over timestamps."
1108 Certainly, it is mistake. It is necessary to understand the reasons
1109 of this constraint to relax it: if peer reboots, clock may go
1110 out-of-sync and half-open connections will not be reset.
1111 Actually, the problem would be not existing if all
1112 the implementations followed draft about maintaining clock
1113 via reboots. Linux-2.2 DOES NOT!
1114
1115 However, we can relax time bounds for RST segments to MSL.
1116 */
1117 if (rst && get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL)
1118 return 0;
1119 return 1;
1120}
1121
1122static inline void tcp_mib_init(struct net *net)
1123{
1124 /* See RFC 2012 */
1125 TCP_ADD_STATS_USER(net, TCP_MIB_RTOALGORITHM, 1);
1126 TCP_ADD_STATS_USER(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
1127 TCP_ADD_STATS_USER(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
1128 TCP_ADD_STATS_USER(net, TCP_MIB_MAXCONN, -1);
1129}
1130
1131/* from STCP */
1132static inline void tcp_clear_retrans_hints_partial(struct tcp_sock *tp)
1133{
1134 tp->lost_skb_hint = NULL;
1135 tp->scoreboard_skb_hint = NULL;
1136}
1137
1138static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp)
1139{
1140 tcp_clear_retrans_hints_partial(tp);
1141 tp->retransmit_skb_hint = NULL;
1142}
1143
1144/* MD5 Signature */
1145struct crypto_hash;
1146
1147union tcp_md5_addr {
1148 struct in_addr a4;
1149#if IS_ENABLED(CONFIG_IPV6)
1150 struct in6_addr a6;
1151#endif
1152};
1153
1154/* - key database */
1155struct tcp_md5sig_key {
1156 struct hlist_node node;
1157 u8 keylen;
1158 u8 family; /* AF_INET or AF_INET6 */
1159 union tcp_md5_addr addr;
1160 u8 key[TCP_MD5SIG_MAXKEYLEN];
1161 struct rcu_head rcu;
1162};
1163
1164/* - sock block */
1165struct tcp_md5sig_info {
1166 struct hlist_head head;
1167 struct rcu_head rcu;
1168};
1169
1170/* - pseudo header */
1171struct tcp4_pseudohdr {
1172 __be32 saddr;
1173 __be32 daddr;
1174 __u8 pad;
1175 __u8 protocol;
1176 __be16 len;
1177};
1178
1179struct tcp6_pseudohdr {
1180 struct in6_addr saddr;
1181 struct in6_addr daddr;
1182 __be32 len;
1183 __be32 protocol; /* including padding */
1184};
1185
1186union tcp_md5sum_block {
1187 struct tcp4_pseudohdr ip4;
1188#if IS_ENABLED(CONFIG_IPV6)
1189 struct tcp6_pseudohdr ip6;
1190#endif
1191};
1192
1193/* - pool: digest algorithm, hash description and scratch buffer */
1194struct tcp_md5sig_pool {
1195 struct hash_desc md5_desc;
1196 union tcp_md5sum_block md5_blk;
1197};
1198
1199/* - functions */
1200extern int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
1201 const struct sock *sk,
1202 const struct request_sock *req,
1203 const struct sk_buff *skb);
1204extern int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1205 int family, const u8 *newkey,
1206 u8 newkeylen, gfp_t gfp);
1207extern int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
1208 int family);
1209extern struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
1210 struct sock *addr_sk);
1211
1212#ifdef CONFIG_TCP_MD5SIG
1213extern struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
1214 const union tcp_md5_addr *addr, int family);
1215#define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_key)
1216#else
1217static inline struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
1218 const union tcp_md5_addr *addr,
1219 int family)
1220{
1221 return NULL;
1222}
1223#define tcp_twsk_md5_key(twsk) NULL
1224#endif
1225
1226extern struct tcp_md5sig_pool __percpu *tcp_alloc_md5sig_pool(struct sock *);
1227extern void tcp_free_md5sig_pool(void);
1228
1229extern struct tcp_md5sig_pool *tcp_get_md5sig_pool(void);
1230extern void tcp_put_md5sig_pool(void);
1231
1232extern int tcp_md5_hash_header(struct tcp_md5sig_pool *, const struct tcphdr *);
1233extern int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, const struct sk_buff *,
1234 unsigned header_len);
1235extern int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
1236 const struct tcp_md5sig_key *key);
1237
1238/* write queue abstraction */
1239static inline void tcp_write_queue_purge(struct sock *sk)
1240{
1241 struct sk_buff *skb;
1242
1243 while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL)
1244 sk_wmem_free_skb(sk, skb);
1245 sk_mem_reclaim(sk);
1246 tcp_clear_all_retrans_hints(tcp_sk(sk));
1247}
1248
1249static inline struct sk_buff *tcp_write_queue_head(const struct sock *sk)
1250{
1251 return skb_peek(&sk->sk_write_queue);
1252}
1253
1254static inline struct sk_buff *tcp_write_queue_tail(const struct sock *sk)
1255{
1256 return skb_peek_tail(&sk->sk_write_queue);
1257}
1258
1259static inline struct sk_buff *tcp_write_queue_next(const struct sock *sk,
1260 const struct sk_buff *skb)
1261{
1262 return skb_queue_next(&sk->sk_write_queue, skb);
1263}
1264
1265static inline struct sk_buff *tcp_write_queue_prev(const struct sock *sk,
1266 const struct sk_buff *skb)
1267{
1268 return skb_queue_prev(&sk->sk_write_queue, skb);
1269}
1270
1271#define tcp_for_write_queue(skb, sk) \
1272 skb_queue_walk(&(sk)->sk_write_queue, skb)
1273
1274#define tcp_for_write_queue_from(skb, sk) \
1275 skb_queue_walk_from(&(sk)->sk_write_queue, skb)
1276
1277#define tcp_for_write_queue_from_safe(skb, tmp, sk) \
1278 skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
1279
1280static inline struct sk_buff *tcp_send_head(const struct sock *sk)
1281{
1282 return sk->sk_send_head;
1283}
1284
1285static inline bool tcp_skb_is_last(const struct sock *sk,
1286 const struct sk_buff *skb)
1287{
1288 return skb_queue_is_last(&sk->sk_write_queue, skb);
1289}
1290
1291static inline void tcp_advance_send_head(struct sock *sk, const struct sk_buff *skb)
1292{
1293 if (tcp_skb_is_last(sk, skb))
1294 sk->sk_send_head = NULL;
1295 else
1296 sk->sk_send_head = tcp_write_queue_next(sk, skb);
1297}
1298
1299static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unlinked)
1300{
1301 if (sk->sk_send_head == skb_unlinked)
1302 sk->sk_send_head = NULL;
lh758261d2023-07-13 05:52:04 -07001303 if (tcp_sk(sk)->highest_sack == skb_unlinked) //CVE-2016-6828
1304 tcp_sk(sk)->highest_sack = NULL;
lh9ed821d2023-04-07 01:36:19 -07001305}
1306
1307static inline void tcp_init_send_head(struct sock *sk)
1308{
1309 sk->sk_send_head = NULL;
1310}
1311
1312static inline void __tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1313{
1314 __skb_queue_tail(&sk->sk_write_queue, skb);
1315}
1316
1317static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1318{
1319 __tcp_add_write_queue_tail(sk, skb);
1320
1321 /* Queue it, remembering where we must start sending. */
1322 if (sk->sk_send_head == NULL) {
1323 sk->sk_send_head = skb;
1324
1325 if (tcp_sk(sk)->highest_sack == NULL)
1326 tcp_sk(sk)->highest_sack = skb;
1327 }
1328}
1329
1330static inline void __tcp_add_write_queue_head(struct sock *sk, struct sk_buff *skb)
1331{
1332 __skb_queue_head(&sk->sk_write_queue, skb);
1333}
1334
1335/* Insert buff after skb on the write queue of sk. */
1336static inline void tcp_insert_write_queue_after(struct sk_buff *skb,
1337 struct sk_buff *buff,
1338 struct sock *sk)
1339{
1340 __skb_queue_after(&sk->sk_write_queue, skb, buff);
1341}
1342
1343/* Insert new before skb on the write queue of sk. */
1344static inline void tcp_insert_write_queue_before(struct sk_buff *new,
1345 struct sk_buff *skb,
1346 struct sock *sk)
1347{
1348 __skb_queue_before(&sk->sk_write_queue, skb, new);
1349
1350 if (sk->sk_send_head == skb)
1351 sk->sk_send_head = new;
1352}
1353
1354static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
1355{
1356 __skb_unlink(skb, &sk->sk_write_queue);
1357}
1358
1359static inline int tcp_write_queue_empty(struct sock *sk)
1360{
1361 return skb_queue_empty(&sk->sk_write_queue);
1362}
1363
1364static inline void tcp_push_pending_frames(struct sock *sk)
1365{
1366 if (tcp_send_head(sk)) {
1367 struct tcp_sock *tp = tcp_sk(sk);
1368
1369 __tcp_push_pending_frames(sk, tcp_current_mss(sk), tp->nonagle);
1370 }
1371}
1372
1373/* Start sequence of the skb just after the highest skb with SACKed
1374 * bit, valid only if sacked_out > 0 or when the caller has ensured
1375 * validity by itself.
1376 */
1377static inline u32 tcp_highest_sack_seq(struct tcp_sock *tp)
1378{
1379 if (!tp->sacked_out)
1380 return tp->snd_una;
1381
1382 if (tp->highest_sack == NULL)
1383 return tp->snd_nxt;
1384
1385 return TCP_SKB_CB(tp->highest_sack)->seq;
1386}
1387
1388static inline void tcp_advance_highest_sack(struct sock *sk, struct sk_buff *skb)
1389{
1390 tcp_sk(sk)->highest_sack = tcp_skb_is_last(sk, skb) ? NULL :
1391 tcp_write_queue_next(sk, skb);
1392}
1393
1394static inline struct sk_buff *tcp_highest_sack(struct sock *sk)
1395{
1396 return tcp_sk(sk)->highest_sack;
1397}
1398
1399static inline void tcp_highest_sack_reset(struct sock *sk)
1400{
1401 tcp_sk(sk)->highest_sack = tcp_write_queue_head(sk);
1402}
1403
1404/* Called when old skb is about to be deleted (to be combined with new skb) */
1405static inline void tcp_highest_sack_combine(struct sock *sk,
1406 struct sk_buff *old,
1407 struct sk_buff *new)
1408{
1409 if (tcp_sk(sk)->sacked_out && (old == tcp_sk(sk)->highest_sack))
1410 tcp_sk(sk)->highest_sack = new;
1411}
1412
1413/* Determines whether this is a thin stream (which may suffer from
1414 * increased latency). Used to trigger latency-reducing mechanisms.
1415 */
1416static inline unsigned int tcp_stream_is_thin(struct tcp_sock *tp)
1417{
1418 return tp->packets_out < 4 && !tcp_in_initial_slowstart(tp);
1419}
1420
1421/* /proc */
1422enum tcp_seq_states {
1423 TCP_SEQ_STATE_LISTENING,
1424 TCP_SEQ_STATE_OPENREQ,
1425 TCP_SEQ_STATE_ESTABLISHED,
1426 TCP_SEQ_STATE_TIME_WAIT,
1427};
1428
1429int tcp_seq_open(struct inode *inode, struct file *file);
1430
1431struct tcp_seq_afinfo {
1432 char *name;
1433 sa_family_t family;
1434 const struct file_operations *seq_fops;
1435 struct seq_operations seq_ops;
1436};
1437
1438struct tcp_iter_state {
1439 struct seq_net_private p;
1440 sa_family_t family;
1441 enum tcp_seq_states state;
1442 struct sock *syn_wait_sk;
1443 int bucket, offset, sbucket, num, uid;
1444 loff_t last_pos;
1445};
1446
1447extern int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo);
1448extern void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo);
1449
1450extern struct request_sock_ops tcp_request_sock_ops;
1451extern struct request_sock_ops tcp6_request_sock_ops;
1452
1453extern void tcp_v4_destroy_sock(struct sock *sk);
1454
1455extern int tcp_v4_gso_send_check(struct sk_buff *skb);
1456extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
1457 netdev_features_t features);
1458extern struct sk_buff **tcp_gro_receive(struct sk_buff **head,
1459 struct sk_buff *skb);
1460extern struct sk_buff **tcp4_gro_receive(struct sk_buff **head,
1461 struct sk_buff *skb);
1462extern int tcp_gro_complete(struct sk_buff *skb);
1463extern int tcp4_gro_complete(struct sk_buff *skb);
1464
1465extern int tcp_nuke_addr(struct net *net, struct sockaddr *addr);
1466
1467#ifdef CONFIG_PROC_FS
1468extern int tcp4_proc_init(void);
1469extern void tcp4_proc_exit(void);
1470#endif
1471
1472/* TCP af-specific functions */
1473struct tcp_sock_af_ops {
1474#ifdef CONFIG_TCP_MD5SIG
1475 struct tcp_md5sig_key *(*md5_lookup) (struct sock *sk,
1476 struct sock *addr_sk);
1477 int (*calc_md5_hash) (char *location,
1478 struct tcp_md5sig_key *md5,
1479 const struct sock *sk,
1480 const struct request_sock *req,
1481 const struct sk_buff *skb);
1482 int (*md5_parse) (struct sock *sk,
1483 char __user *optval,
1484 int optlen);
1485#endif
1486};
1487
1488struct tcp_request_sock_ops {
1489#ifdef CONFIG_TCP_MD5SIG
1490 struct tcp_md5sig_key *(*md5_lookup) (struct sock *sk,
1491 struct request_sock *req);
1492 int (*calc_md5_hash) (char *location,
1493 struct tcp_md5sig_key *md5,
1494 const struct sock *sk,
1495 const struct request_sock *req,
1496 const struct sk_buff *skb);
1497#endif
1498};
1499
1500/* Using SHA1 for now, define some constants.
1501 */
1502#define COOKIE_DIGEST_WORDS (SHA_DIGEST_WORDS)
1503#define COOKIE_MESSAGE_WORDS (SHA_MESSAGE_BYTES / 4)
1504#define COOKIE_WORKSPACE_WORDS (COOKIE_DIGEST_WORDS + COOKIE_MESSAGE_WORDS)
1505
1506extern int tcp_cookie_generator(u32 *bakery);
1507
1508/**
1509 * struct tcp_cookie_values - each socket needs extra space for the
1510 * cookies, together with (optional) space for any SYN data.
1511 *
1512 * A tcp_sock contains a pointer to the current value, and this is
1513 * cloned to the tcp_timewait_sock.
1514 *
1515 * @cookie_pair: variable data from the option exchange.
1516 *
1517 * @cookie_desired: user specified tcpct_cookie_desired. Zero
1518 * indicates default (sysctl_tcp_cookie_size).
1519 * After cookie sent, remembers size of cookie.
1520 * Range 0, TCP_COOKIE_MIN to TCP_COOKIE_MAX.
1521 *
1522 * @s_data_desired: user specified tcpct_s_data_desired. When the
1523 * constant payload is specified (@s_data_constant),
1524 * holds its length instead.
1525 * Range 0 to TCP_MSS_DESIRED.
1526 *
1527 * @s_data_payload: constant data that is to be included in the
1528 * payload of SYN or SYNACK segments when the
1529 * cookie option is present.
1530 */
1531struct tcp_cookie_values {
1532 struct kref kref;
1533 u8 cookie_pair[TCP_COOKIE_PAIR_SIZE];
1534 u8 cookie_pair_size;
1535 u8 cookie_desired;
1536 u16 s_data_desired:11,
1537 s_data_constant:1,
1538 s_data_in:1,
1539 s_data_out:1,
1540 s_data_unused:2;
1541 u8 s_data_payload[0];
1542};
1543
1544static inline void tcp_cookie_values_release(struct kref *kref)
1545{
1546 kfree(container_of(kref, struct tcp_cookie_values, kref));
1547}
1548
1549/* The length of constant payload data. Note that s_data_desired is
1550 * overloaded, depending on s_data_constant: either the length of constant
1551 * data (returned here) or the limit on variable data.
1552 */
1553static inline int tcp_s_data_size(const struct tcp_sock *tp)
1554{
1555 return (tp->cookie_values != NULL && tp->cookie_values->s_data_constant)
1556 ? tp->cookie_values->s_data_desired
1557 : 0;
1558}
1559
1560/**
1561 * struct tcp_extend_values - tcp_ipv?.c to tcp_output.c workspace.
1562 *
1563 * As tcp_request_sock has already been extended in other places, the
1564 * only remaining method is to pass stack values along as function
1565 * parameters. These parameters are not needed after sending SYNACK.
1566 *
1567 * @cookie_bakery: cryptographic secret and message workspace.
1568 *
1569 * @cookie_plus: bytes in authenticator/cookie option, copied from
1570 * struct tcp_options_received (above).
1571 */
1572struct tcp_extend_values {
1573 struct request_values rv;
1574 u32 cookie_bakery[COOKIE_WORKSPACE_WORDS];
1575 u8 cookie_plus:6,
1576 cookie_out_never:1,
1577 cookie_in_always:1;
1578};
1579
1580static inline struct tcp_extend_values *tcp_xv(struct request_values *rvp)
1581{
1582 return (struct tcp_extend_values *)rvp;
1583}
1584
1585extern void tcp_v4_init(void);
1586extern void tcp_init(void);
1587
1588#endif /* _TCP_H */