yuezonghe | 824eb0c | 2024-06-27 02:32:26 -0700 | [diff] [blame] | 1 | /* |
| 2 | * INET An implementation of the TCP/IP protocol suite for the LINUX |
| 3 | * operating system. INET is implemented using the BSD Socket |
| 4 | * interface as the means of communication with the user level. |
| 5 | * |
| 6 | * Implementation of the Transmission Control Protocol(TCP). |
| 7 | * |
| 8 | * Authors: Ross Biro |
| 9 | * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> |
| 10 | * Mark Evans, <evansmp@uhura.aston.ac.uk> |
| 11 | * Corey Minyard <wf-rch!minyard@relay.EU.net> |
| 12 | * Florian La Roche, <flla@stud.uni-sb.de> |
| 13 | * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> |
| 14 | * Linus Torvalds, <torvalds@cs.helsinki.fi> |
| 15 | * Alan Cox, <gw4pts@gw4pts.ampr.org> |
| 16 | * Matthew Dillon, <dillon@apollo.west.oic.com> |
| 17 | * Arnt Gulbrandsen, <agulbra@nvg.unit.no> |
| 18 | * Jorge Cwik, <jorge@laser.satlink.net> |
| 19 | */ |
| 20 | |
| 21 | #include <linux/module.h> |
| 22 | #include <linux/gfp.h> |
| 23 | #include <net/tcp.h> |
| 24 | |
| 25 | int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES; |
| 26 | int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES; |
| 27 | int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME; |
| 28 | int sysctl_tcp_keepalive_probes __read_mostly = TCP_KEEPALIVE_PROBES; |
| 29 | int sysctl_tcp_keepalive_intvl __read_mostly = TCP_KEEPALIVE_INTVL; |
| 30 | int sysctl_tcp_retries1 __read_mostly = TCP_RETR1; |
| 31 | int sysctl_tcp_retries2 __read_mostly = TCP_RETR2; |
| 32 | int sysctl_tcp_orphan_retries __read_mostly; |
| 33 | int sysctl_tcp_thin_linear_timeouts __read_mostly; |
| 34 | |
| 35 | static void tcp_write_timer(unsigned long); |
| 36 | static void tcp_delack_timer(unsigned long); |
| 37 | static void tcp_keepalive_timer (unsigned long data); |
| 38 | |
| 39 | void tcp_init_xmit_timers(struct sock *sk) |
| 40 | { |
| 41 | inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer, |
| 42 | &tcp_keepalive_timer); |
| 43 | } |
| 44 | EXPORT_SYMBOL(tcp_init_xmit_timers); |
| 45 | |
| 46 | static void tcp_write_err(struct sock *sk) |
| 47 | { |
| 48 | sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT; |
| 49 | sk->sk_error_report(sk); |
| 50 | |
| 51 | tcp_done(sk); |
| 52 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT); |
| 53 | } |
| 54 | |
| 55 | /* Do not allow orphaned sockets to eat all our resources. |
| 56 | * This is direct violation of TCP specs, but it is required |
| 57 | * to prevent DoS attacks. It is called when a retransmission timeout |
| 58 | * or zero probe timeout occurs on orphaned socket. |
| 59 | * |
| 60 | * Criteria is still not confirmed experimentally and may change. |
| 61 | * We kill the socket, if: |
| 62 | * 1. If number of orphaned sockets exceeds an administratively configured |
| 63 | * limit. |
| 64 | * 2. If we have strong memory pressure. |
| 65 | */ |
| 66 | static int tcp_out_of_resources(struct sock *sk, int do_reset) |
| 67 | { |
| 68 | struct tcp_sock *tp = tcp_sk(sk); |
| 69 | int shift = 0; |
| 70 | |
| 71 | /* If peer does not open window for long time, or did not transmit |
| 72 | * anything for long time, penalize it. */ |
| 73 | if ((s32)(tcp_time_stamp - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset) |
| 74 | shift++; |
| 75 | |
| 76 | /* If some dubious ICMP arrived, penalize even more. */ |
| 77 | if (sk->sk_err_soft) |
| 78 | shift++; |
| 79 | |
| 80 | if (tcp_check_oom(sk, shift)) { |
| 81 | /* Catch exceptional cases, when connection requires reset. |
| 82 | * 1. Last segment was sent recently. */ |
| 83 | if ((s32)(tcp_time_stamp - tp->lsndtime) <= TCP_TIMEWAIT_LEN || |
| 84 | /* 2. Window is closed. */ |
| 85 | (!tp->snd_wnd && !tp->packets_out)) |
| 86 | do_reset = 1; |
| 87 | if (do_reset) |
| 88 | tcp_send_active_reset(sk, GFP_ATOMIC); |
| 89 | tcp_done(sk); |
| 90 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY); |
| 91 | return 1; |
| 92 | } |
| 93 | return 0; |
| 94 | } |
| 95 | |
| 96 | /* Calculate maximal number or retries on an orphaned socket. */ |
| 97 | static int tcp_orphan_retries(struct sock *sk, int alive) |
| 98 | { |
| 99 | int retries = sysctl_tcp_orphan_retries; /* May be zero. */ |
| 100 | |
| 101 | /* We know from an ICMP that something is wrong. */ |
| 102 | if (sk->sk_err_soft && !alive) |
| 103 | retries = 0; |
| 104 | |
| 105 | /* However, if socket sent something recently, select some safe |
| 106 | * number of retries. 8 corresponds to >100 seconds with minimal |
| 107 | * RTO of 200msec. */ |
| 108 | if (retries == 0 && alive) |
| 109 | retries = 8; |
| 110 | return retries; |
| 111 | } |
| 112 | |
| 113 | static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk) |
| 114 | { |
| 115 | /* Black hole detection */ |
| 116 | if (sysctl_tcp_mtu_probing) { |
| 117 | if (!icsk->icsk_mtup.enabled) { |
| 118 | icsk->icsk_mtup.enabled = 1; |
| 119 | tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); |
| 120 | } else { |
| 121 | struct tcp_sock *tp = tcp_sk(sk); |
| 122 | int mss; |
| 123 | |
| 124 | mss = tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low) >> 1; |
| 125 | mss = min(sysctl_tcp_base_mss, mss); |
| 126 | mss = max(mss, 68 - tp->tcp_header_len); |
| 127 | icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss); |
| 128 | tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); |
| 129 | } |
| 130 | } |
| 131 | } |
| 132 | |
| 133 | /* This function calculates a "timeout" which is equivalent to the timeout of a |
| 134 | * TCP connection after "boundary" unsuccessful, exponentially backed-off |
| 135 | * retransmissions with an initial RTO of TCP_RTO_MIN or TCP_TIMEOUT_INIT if |
| 136 | * syn_set flag is set. |
| 137 | */ |
| 138 | static bool retransmits_timed_out(struct sock *sk, |
| 139 | unsigned int boundary, |
| 140 | unsigned int timeout, |
| 141 | bool syn_set) |
| 142 | { |
| 143 | unsigned int linear_backoff_thresh, start_ts; |
| 144 | unsigned int rto_base = syn_set ? TCP_TIMEOUT_INIT : TCP_RTO_MIN; |
| 145 | |
| 146 | if (!inet_csk(sk)->icsk_retransmits) |
| 147 | return false; |
| 148 | |
| 149 | if (unlikely(!tcp_sk(sk)->retrans_stamp)) |
| 150 | start_ts = TCP_SKB_CB(tcp_write_queue_head(sk))->when; |
| 151 | else |
| 152 | start_ts = tcp_sk(sk)->retrans_stamp; |
| 153 | |
| 154 | if (likely(timeout == 0)) { |
| 155 | linear_backoff_thresh = ilog2(TCP_RTO_MAX/rto_base); |
| 156 | |
| 157 | if (boundary <= linear_backoff_thresh) |
| 158 | timeout = ((2 << boundary) - 1) * rto_base; |
| 159 | else |
| 160 | timeout = ((2 << linear_backoff_thresh) - 1) * rto_base + |
| 161 | (boundary - linear_backoff_thresh) * TCP_RTO_MAX; |
| 162 | } |
| 163 | return (tcp_time_stamp - start_ts) >= timeout; |
| 164 | } |
| 165 | |
| 166 | /* A write timeout has occurred. Process the after effects. */ |
| 167 | static int tcp_write_timeout(struct sock *sk) |
| 168 | { |
| 169 | struct inet_connection_sock *icsk = inet_csk(sk); |
| 170 | int retry_until; |
| 171 | bool do_reset, syn_set = false; |
| 172 | |
| 173 | if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { |
| 174 | if (icsk->icsk_retransmits) |
| 175 | dst_negative_advice(sk); |
| 176 | retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries; |
| 177 | syn_set = true; |
| 178 | } else { |
| 179 | if (retransmits_timed_out(sk, sysctl_tcp_retries1, 0, 0)) { |
| 180 | /* Black hole detection */ |
| 181 | tcp_mtu_probing(icsk, sk); |
| 182 | |
| 183 | dst_negative_advice(sk); |
| 184 | } |
| 185 | |
| 186 | retry_until = sysctl_tcp_retries2; |
| 187 | if (sock_flag(sk, SOCK_DEAD)) { |
| 188 | const int alive = (icsk->icsk_rto < TCP_RTO_MAX); |
| 189 | |
| 190 | retry_until = tcp_orphan_retries(sk, alive); |
| 191 | do_reset = alive || |
| 192 | !retransmits_timed_out(sk, retry_until, 0, 0); |
| 193 | |
| 194 | if (tcp_out_of_resources(sk, do_reset)) |
| 195 | return 1; |
| 196 | } |
| 197 | } |
| 198 | |
| 199 | if (retransmits_timed_out(sk, retry_until, |
| 200 | syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) { |
| 201 | /* Has it gone just too far? */ |
| 202 | tcp_write_err(sk); |
| 203 | return 1; |
| 204 | } |
| 205 | return 0; |
| 206 | } |
| 207 | |
| 208 | static void tcp_delack_timer(unsigned long data) |
| 209 | { |
| 210 | struct sock *sk = (struct sock *)data; |
| 211 | struct tcp_sock *tp = tcp_sk(sk); |
| 212 | struct inet_connection_sock *icsk = inet_csk(sk); |
| 213 | |
| 214 | bh_lock_sock(sk); |
| 215 | if (sock_owned_by_user(sk)) { |
| 216 | /* Try again later. */ |
| 217 | icsk->icsk_ack.blocked = 1; |
| 218 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED); |
| 219 | sk_reset_timer(sk, &icsk->icsk_delack_timer, jiffies + TCP_DELACK_MIN); |
| 220 | goto out_unlock; |
| 221 | } |
| 222 | |
| 223 | sk_mem_reclaim_partial(sk); |
| 224 | |
| 225 | if (sk->sk_state == TCP_CLOSE || !(icsk->icsk_ack.pending & ICSK_ACK_TIMER)) |
| 226 | goto out; |
| 227 | |
| 228 | if (time_after(icsk->icsk_ack.timeout, jiffies)) { |
| 229 | sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout); |
| 230 | goto out; |
| 231 | } |
| 232 | icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER; |
| 233 | |
| 234 | if (!skb_queue_empty(&tp->ucopy.prequeue)) { |
| 235 | struct sk_buff *skb; |
| 236 | |
| 237 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSCHEDULERFAILED); |
| 238 | |
| 239 | while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) |
| 240 | sk_backlog_rcv(sk, skb); |
| 241 | |
| 242 | tp->ucopy.memory = 0; |
| 243 | } |
| 244 | |
| 245 | if (inet_csk_ack_scheduled(sk)) { |
| 246 | if (!icsk->icsk_ack.pingpong) { |
| 247 | /* Delayed ACK missed: inflate ATO. */ |
| 248 | icsk->icsk_ack.ato = min(icsk->icsk_ack.ato << 1, icsk->icsk_rto); |
| 249 | } else { |
| 250 | /* Delayed ACK missed: leave pingpong mode and |
| 251 | * deflate ATO. |
| 252 | */ |
| 253 | icsk->icsk_ack.pingpong = 0; |
| 254 | icsk->icsk_ack.ato = TCP_ATO_MIN; |
| 255 | } |
| 256 | tcp_send_ack(sk); |
| 257 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKS); |
| 258 | } |
| 259 | |
| 260 | out: |
| 261 | if (sk_under_memory_pressure(sk)) |
| 262 | sk_mem_reclaim(sk); |
| 263 | out_unlock: |
| 264 | bh_unlock_sock(sk); |
| 265 | sock_put(sk); |
| 266 | } |
| 267 | |
| 268 | static void tcp_probe_timer(struct sock *sk) |
| 269 | { |
| 270 | struct inet_connection_sock *icsk = inet_csk(sk); |
| 271 | struct tcp_sock *tp = tcp_sk(sk); |
| 272 | int max_probes; |
| 273 | |
| 274 | if (tp->packets_out || !tcp_send_head(sk)) { |
| 275 | icsk->icsk_probes_out = 0; |
| 276 | return; |
| 277 | } |
| 278 | |
| 279 | /* *WARNING* RFC 1122 forbids this |
| 280 | * |
| 281 | * It doesn't AFAIK, because we kill the retransmit timer -AK |
| 282 | * |
| 283 | * FIXME: We ought not to do it, Solaris 2.5 actually has fixing |
| 284 | * this behaviour in Solaris down as a bug fix. [AC] |
| 285 | * |
| 286 | * Let me to explain. icsk_probes_out is zeroed by incoming ACKs |
| 287 | * even if they advertise zero window. Hence, connection is killed only |
| 288 | * if we received no ACKs for normal connection timeout. It is not killed |
| 289 | * only because window stays zero for some time, window may be zero |
| 290 | * until armageddon and even later. We are in full accordance |
| 291 | * with RFCs, only probe timer combines both retransmission timeout |
| 292 | * and probe timeout in one bottle. --ANK |
| 293 | */ |
| 294 | max_probes = sysctl_tcp_retries2; |
| 295 | |
| 296 | if (sock_flag(sk, SOCK_DEAD)) { |
| 297 | const int alive = ((icsk->icsk_rto << icsk->icsk_backoff) < TCP_RTO_MAX); |
| 298 | |
| 299 | max_probes = tcp_orphan_retries(sk, alive); |
| 300 | |
| 301 | if (tcp_out_of_resources(sk, alive || icsk->icsk_probes_out <= max_probes)) |
| 302 | return; |
| 303 | } |
| 304 | |
| 305 | if (icsk->icsk_probes_out > max_probes) { |
| 306 | tcp_write_err(sk); |
| 307 | } else { |
| 308 | /* Only send another probe if we didn't close things up. */ |
| 309 | tcp_send_probe0(sk); |
| 310 | } |
| 311 | } |
| 312 | |
| 313 | /* |
| 314 | * The TCP retransmit timer. |
| 315 | */ |
| 316 | |
| 317 | void tcp_retransmit_timer(struct sock *sk) |
| 318 | { |
| 319 | struct tcp_sock *tp = tcp_sk(sk); |
| 320 | struct inet_connection_sock *icsk = inet_csk(sk); |
| 321 | |
| 322 | if (!tp->packets_out) |
| 323 | goto out; |
| 324 | |
| 325 | WARN_ON(tcp_write_queue_empty(sk)); |
| 326 | |
| 327 | if (!tp->snd_wnd && !sock_flag(sk, SOCK_DEAD) && |
| 328 | !((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))) { |
| 329 | /* Receiver dastardly shrinks window. Our retransmits |
| 330 | * become zero probes, but we should not timeout this |
| 331 | * connection. If the socket is an orphan, time it out, |
| 332 | * we cannot allow such beasts to hang infinitely. |
| 333 | */ |
| 334 | struct inet_sock *inet = inet_sk(sk); |
| 335 | if (sk->sk_family == AF_INET) { |
| 336 | LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("Peer %pI4:%u/%u unexpectedly shrunk window %u:%u (repaired)\n"), |
| 337 | &inet->inet_daddr, |
| 338 | ntohs(inet->inet_dport), inet->inet_num, |
| 339 | tp->snd_una, tp->snd_nxt); |
| 340 | } |
| 341 | #if IS_ENABLED(CONFIG_IPV6) |
| 342 | else if (sk->sk_family == AF_INET6) { |
| 343 | struct ipv6_pinfo *np = inet6_sk(sk); |
| 344 | LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("Peer %pI6:%u/%u unexpectedly shrunk window %u:%u (repaired)\n"), |
| 345 | &np->daddr, |
| 346 | ntohs(inet->inet_dport), inet->inet_num, |
| 347 | tp->snd_una, tp->snd_nxt); |
| 348 | } |
| 349 | #endif |
| 350 | if (tcp_time_stamp - tp->rcv_tstamp > TCP_RTO_MAX) { |
| 351 | tcp_write_err(sk); |
| 352 | goto out; |
| 353 | } |
| 354 | tcp_enter_loss(sk, 0); |
| 355 | tcp_retransmit_skb(sk, tcp_write_queue_head(sk)); |
| 356 | __sk_dst_reset(sk); |
| 357 | goto out_reset_timer; |
| 358 | } |
| 359 | |
| 360 | if (tcp_write_timeout(sk)) |
| 361 | goto out; |
| 362 | |
| 363 | if (icsk->icsk_retransmits == 0) { |
| 364 | int mib_idx; |
| 365 | |
| 366 | if (icsk->icsk_ca_state == TCP_CA_Recovery) { |
| 367 | if (tcp_is_sack(tp)) |
| 368 | mib_idx = LINUX_MIB_TCPSACKRECOVERYFAIL; |
| 369 | else |
| 370 | mib_idx = LINUX_MIB_TCPRENORECOVERYFAIL; |
| 371 | } else if (icsk->icsk_ca_state == TCP_CA_Loss) { |
| 372 | mib_idx = LINUX_MIB_TCPLOSSFAILURES; |
| 373 | } else if ((icsk->icsk_ca_state == TCP_CA_Disorder) || |
| 374 | tp->sacked_out) { |
| 375 | if (tcp_is_sack(tp)) |
| 376 | mib_idx = LINUX_MIB_TCPSACKFAILURES; |
| 377 | else |
| 378 | mib_idx = LINUX_MIB_TCPRENOFAILURES; |
| 379 | } else { |
| 380 | mib_idx = LINUX_MIB_TCPTIMEOUTS; |
| 381 | } |
| 382 | NET_INC_STATS_BH(sock_net(sk), mib_idx); |
| 383 | } |
| 384 | |
| 385 | if (tcp_use_frto(sk)) { |
| 386 | tcp_enter_frto(sk); |
| 387 | } else { |
| 388 | tcp_enter_loss(sk, 0); |
| 389 | } |
| 390 | |
| 391 | if (tcp_retransmit_skb(sk, tcp_write_queue_head(sk)) > 0) { |
| 392 | /* Retransmission failed because of local congestion, |
| 393 | * do not backoff. |
| 394 | */ |
| 395 | if (!icsk->icsk_retransmits) |
| 396 | icsk->icsk_retransmits = 1; |
| 397 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, |
| 398 | min(icsk->icsk_rto, TCP_RESOURCE_PROBE_INTERVAL), |
| 399 | TCP_RTO_MAX); |
| 400 | goto out; |
| 401 | } |
| 402 | |
| 403 | /* Increase the timeout each time we retransmit. Note that |
| 404 | * we do not increase the rtt estimate. rto is initialized |
| 405 | * from rtt, but increases here. Jacobson (SIGCOMM 88) suggests |
| 406 | * that doubling rto each time is the least we can get away with. |
| 407 | * In KA9Q, Karn uses this for the first few times, and then |
| 408 | * goes to quadratic. netBSD doubles, but only goes up to *64, |
| 409 | * and clamps at 1 to 64 sec afterwards. Note that 120 sec is |
| 410 | * defined in the protocol as the maximum possible RTT. I guess |
| 411 | * we'll have to use something other than TCP to talk to the |
| 412 | * University of Mars. |
| 413 | * |
| 414 | * PAWS allows us longer timeouts and large windows, so once |
| 415 | * implemented ftp to mars will work nicely. We will have to fix |
| 416 | * the 120 second clamps though! |
| 417 | */ |
| 418 | icsk->icsk_backoff++; |
| 419 | icsk->icsk_retransmits++; |
| 420 | |
| 421 | out_reset_timer: |
| 422 | /* If stream is thin, use linear timeouts. Since 'icsk_backoff' is |
| 423 | * used to reset timer, set to 0. Recalculate 'icsk_rto' as this |
| 424 | * might be increased if the stream oscillates between thin and thick, |
| 425 | * thus the old value might already be too high compared to the value |
| 426 | * set by 'tcp_set_rto' in tcp_input.c which resets the rto without |
| 427 | * backoff. Limit to TCP_THIN_LINEAR_RETRIES before initiating |
| 428 | * exponential backoff behaviour to avoid continue hammering |
| 429 | * linear-timeout retransmissions into a black hole |
| 430 | */ |
| 431 | if (sk->sk_state == TCP_ESTABLISHED && |
| 432 | (tp->thin_lto || sysctl_tcp_thin_linear_timeouts) && |
| 433 | tcp_stream_is_thin(tp) && |
| 434 | icsk->icsk_retransmits <= TCP_THIN_LINEAR_RETRIES) { |
| 435 | icsk->icsk_backoff = 0; |
| 436 | icsk->icsk_rto = min(__tcp_set_rto(tp), TCP_RTO_MAX); |
| 437 | } else { |
| 438 | /* Use normal (exponential) backoff */ |
| 439 | icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX); |
| 440 | } |
| 441 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX); |
| 442 | if (retransmits_timed_out(sk, sysctl_tcp_retries1 + 1, 0, 0)) |
| 443 | __sk_dst_reset(sk); |
| 444 | |
| 445 | out:; |
| 446 | } |
| 447 | |
| 448 | static void tcp_write_timer(unsigned long data) |
| 449 | { |
| 450 | struct sock *sk = (struct sock *)data; |
| 451 | struct inet_connection_sock *icsk = inet_csk(sk); |
| 452 | int event; |
| 453 | |
| 454 | bh_lock_sock(sk); |
| 455 | if (sock_owned_by_user(sk)) { |
| 456 | /* Try again later */ |
| 457 | sk_reset_timer(sk, &icsk->icsk_retransmit_timer, jiffies + (HZ / 20)); |
| 458 | goto out_unlock; |
| 459 | } |
| 460 | |
| 461 | if (sk->sk_state == TCP_CLOSE || !icsk->icsk_pending) |
| 462 | goto out; |
| 463 | |
| 464 | if (time_after(icsk->icsk_timeout, jiffies)) { |
| 465 | sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout); |
| 466 | goto out; |
| 467 | } |
| 468 | |
| 469 | event = icsk->icsk_pending; |
| 470 | icsk->icsk_pending = 0; |
| 471 | |
| 472 | switch (event) { |
| 473 | case ICSK_TIME_RETRANS: |
| 474 | tcp_retransmit_timer(sk); |
| 475 | break; |
| 476 | case ICSK_TIME_PROBE0: |
| 477 | tcp_probe_timer(sk); |
| 478 | break; |
| 479 | } |
| 480 | |
| 481 | out: |
| 482 | sk_mem_reclaim(sk); |
| 483 | out_unlock: |
| 484 | bh_unlock_sock(sk); |
| 485 | sock_put(sk); |
| 486 | } |
| 487 | |
| 488 | /* |
| 489 | * Timer for listening sockets |
| 490 | */ |
| 491 | |
| 492 | static void tcp_synack_timer(struct sock *sk) |
| 493 | { |
| 494 | inet_csk_reqsk_queue_prune(sk, TCP_SYNQ_INTERVAL, |
| 495 | TCP_TIMEOUT_INIT, TCP_RTO_MAX); |
| 496 | } |
| 497 | |
| 498 | void tcp_syn_ack_timeout(struct sock *sk, struct request_sock *req) |
| 499 | { |
| 500 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPTIMEOUTS); |
| 501 | } |
| 502 | EXPORT_SYMBOL(tcp_syn_ack_timeout); |
| 503 | |
| 504 | void tcp_set_keepalive(struct sock *sk, int val) |
| 505 | { |
| 506 | if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) |
| 507 | return; |
| 508 | |
| 509 | if (val && !sock_flag(sk, SOCK_KEEPOPEN)) |
| 510 | inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tcp_sk(sk))); |
| 511 | else if (!val) |
| 512 | inet_csk_delete_keepalive_timer(sk); |
| 513 | } |
| 514 | |
| 515 | |
| 516 | static void tcp_keepalive_timer (unsigned long data) |
| 517 | { |
| 518 | struct sock *sk = (struct sock *) data; |
| 519 | struct inet_connection_sock *icsk = inet_csk(sk); |
| 520 | struct tcp_sock *tp = tcp_sk(sk); |
| 521 | u32 elapsed; |
| 522 | |
| 523 | /* Only process if socket is not in use. */ |
| 524 | bh_lock_sock(sk); |
| 525 | if (sock_owned_by_user(sk)) { |
| 526 | /* Try again later. */ |
| 527 | inet_csk_reset_keepalive_timer (sk, HZ/20); |
| 528 | goto out; |
| 529 | } |
| 530 | |
| 531 | if (sk->sk_state == TCP_LISTEN) { |
| 532 | tcp_synack_timer(sk); |
| 533 | goto out; |
| 534 | } |
| 535 | |
| 536 | if (sk->sk_state == TCP_FIN_WAIT2 && sock_flag(sk, SOCK_DEAD)) { |
| 537 | if (tp->linger2 >= 0) { |
| 538 | const int tmo = tcp_fin_time(sk) - TCP_TIMEWAIT_LEN; |
| 539 | |
| 540 | if (tmo > 0) { |
| 541 | tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); |
| 542 | goto out; |
| 543 | } |
| 544 | } |
| 545 | tcp_send_active_reset(sk, GFP_ATOMIC); |
| 546 | goto death; |
| 547 | } |
| 548 | |
| 549 | if (!sock_flag(sk, SOCK_KEEPOPEN) || sk->sk_state == TCP_CLOSE) |
| 550 | goto out; |
| 551 | |
| 552 | elapsed = keepalive_time_when(tp); |
| 553 | |
| 554 | /* It is alive without keepalive 8) */ |
| 555 | if (tp->packets_out || tcp_send_head(sk)) |
| 556 | goto resched; |
| 557 | |
| 558 | elapsed = keepalive_time_elapsed(tp); |
| 559 | |
| 560 | if (elapsed >= keepalive_time_when(tp)) { |
| 561 | /* If the TCP_USER_TIMEOUT option is enabled, use that |
| 562 | * to determine when to timeout instead. |
| 563 | */ |
| 564 | if ((icsk->icsk_user_timeout != 0 && |
| 565 | elapsed >= icsk->icsk_user_timeout && |
| 566 | icsk->icsk_probes_out > 0) || |
| 567 | (icsk->icsk_user_timeout == 0 && |
| 568 | icsk->icsk_probes_out >= keepalive_probes(tp))) { |
| 569 | tcp_send_active_reset(sk, GFP_ATOMIC); |
| 570 | tcp_write_err(sk); |
| 571 | goto out; |
| 572 | } |
| 573 | if (tcp_write_wakeup(sk) <= 0) { |
| 574 | icsk->icsk_probes_out++; |
| 575 | elapsed = keepalive_intvl_when(tp); |
| 576 | } else { |
| 577 | /* If keepalive was lost due to local congestion, |
| 578 | * try harder. |
| 579 | */ |
| 580 | elapsed = TCP_RESOURCE_PROBE_INTERVAL; |
| 581 | } |
| 582 | } else { |
| 583 | /* It is tp->rcv_tstamp + keepalive_time_when(tp) */ |
| 584 | elapsed = keepalive_time_when(tp) - elapsed; |
| 585 | } |
| 586 | |
| 587 | sk_mem_reclaim(sk); |
| 588 | |
| 589 | resched: |
| 590 | inet_csk_reset_keepalive_timer (sk, elapsed); |
| 591 | goto out; |
| 592 | |
| 593 | death: |
| 594 | tcp_done(sk); |
| 595 | |
| 596 | out: |
| 597 | bh_unlock_sock(sk); |
| 598 | sock_put(sk); |
| 599 | } |