2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Mark Evans, <evansmp@uhura.aston.ac.uk>
11 * Corey Minyard <wf-rch!minyard@relay.EU.net>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
14 * Linus Torvalds, <torvalds@cs.helsinki.fi>
15 * Alan Cox, <gw4pts@gw4pts.ampr.org>
16 * Matthew Dillon, <dillon@apollo.west.oic.com>
17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
18 * Jorge Cwik, <jorge@laser.satlink.net>
21 #include <linux/module.h>
22 #include <linux/gfp.h>
25 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
26 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
27 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
28 int sysctl_tcp_keepalive_probes __read_mostly = TCP_KEEPALIVE_PROBES;
29 int sysctl_tcp_keepalive_intvl __read_mostly = TCP_KEEPALIVE_INTVL;
30 int sysctl_tcp_retries1 __read_mostly = TCP_RETR1;
31 int sysctl_tcp_retries2 __read_mostly = TCP_RETR2;
32 int sysctl_tcp_orphan_retries __read_mostly;
33 int sysctl_tcp_thin_linear_timeouts __read_mostly;
35 /*Function to reset tcp_ack related sysctl on resetting master control */
36 void set_tcp_default(void)
38 sysctl_tcp_delack_seg = TCP_DELACK_SEG;
41 /*sysctl handler for tcp_ack realted master control */
42 int tcp_proc_delayed_ack_control(struct ctl_table *table, int write,
43 void __user *buffer, size_t *length,
46 int ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
48 /* The ret value will be 0 if the input validation is successful
49 * and the values are written to sysctl table. If not, the stack
50 * will continue to work with currently configured values
55 /*sysctl handler for tcp_ack realted master control */
56 int tcp_use_userconfig_sysctl_handler(struct ctl_table *table, int write,
57 void __user *buffer, size_t *length,
60 int ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
62 if (write && ret == 0) {
63 if (!sysctl_tcp_use_userconfig)
69 static void tcp_write_err(struct sock *sk)
71 sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT;
72 sk->sk_error_report(sk);
75 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT);
78 /* Do not allow orphaned sockets to eat all our resources.
79 * This is direct violation of TCP specs, but it is required
80 * to prevent DoS attacks. It is called when a retransmission timeout
81 * or zero probe timeout occurs on orphaned socket.
83 * Also close if our net namespace is exiting; in that case there is no
84 * hope of ever communicating again since all netns interfaces are already
85 * down (or about to be down), and we need to release our dst references,
86 * which have been moved to the netns loopback interface, so the namespace
87 * can finish exiting. This condition is only possible if we are a kernel
88 * socket, as those do not hold references to the namespace.
90 * Criteria is still not confirmed experimentally and may change.
91 * We kill the socket, if:
92 * 1. If number of orphaned sockets exceeds an administratively configured
94 * 2. If we have strong memory pressure.
95 * 3. If our net namespace is exiting.
97 static int tcp_out_of_resources(struct sock *sk, bool do_reset)
99 struct tcp_sock *tp = tcp_sk(sk);
102 /* If peer does not open window for long time, or did not transmit
103 * anything for long time, penalize it. */
104 if ((s32)(tcp_time_stamp - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset)
107 /* If some dubious ICMP arrived, penalize even more. */
111 if (tcp_check_oom(sk, shift)) {
112 /* Catch exceptional cases, when connection requires reset.
113 * 1. Last segment was sent recently. */
114 if ((s32)(tcp_time_stamp - tp->lsndtime) <= TCP_TIMEWAIT_LEN ||
115 /* 2. Window is closed. */
116 (!tp->snd_wnd && !tp->packets_out))
119 tcp_send_active_reset(sk, GFP_ATOMIC);
121 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY);
125 if (!check_net(sock_net(sk))) {
126 /* Not possible to send reset; just close */
134 /* Calculate maximal number or retries on an orphaned socket. */
135 static int tcp_orphan_retries(struct sock *sk, bool alive)
137 int retries = sysctl_tcp_orphan_retries; /* May be zero. */
139 /* We know from an ICMP that something is wrong. */
140 if (sk->sk_err_soft && !alive)
143 /* However, if socket sent something recently, select some safe
144 * number of retries. 8 corresponds to >100 seconds with minimal
146 if (retries == 0 && alive)
151 static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk)
153 struct net *net = sock_net(sk);
155 /* Black hole detection */
156 if (net->ipv4.sysctl_tcp_mtu_probing) {
157 if (!icsk->icsk_mtup.enabled) {
158 icsk->icsk_mtup.enabled = 1;
159 icsk->icsk_mtup.probe_timestamp = tcp_time_stamp;
160 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
162 struct net *net = sock_net(sk);
163 struct tcp_sock *tp = tcp_sk(sk);
166 mss = tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low) >> 1;
167 mss = min(net->ipv4.sysctl_tcp_base_mss, mss);
168 mss = max(mss, 68 - tp->tcp_header_len);
169 mss = max(mss, net->ipv4.sysctl_tcp_min_snd_mss);
170 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss);
171 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
176 /* This function calculates a "timeout" which is equivalent to the timeout of a
177 * TCP connection after "boundary" unsuccessful, exponentially backed-off
178 * retransmissions with an initial RTO of TCP_RTO_MIN or TCP_TIMEOUT_INIT if
179 * syn_set flag is set.
181 static bool retransmits_timed_out(struct sock *sk,
182 unsigned int boundary,
183 unsigned int timeout,
186 unsigned int linear_backoff_thresh, start_ts;
187 unsigned int rto_base = syn_set ? TCP_TIMEOUT_INIT : TCP_RTO_MIN;
189 if (!inet_csk(sk)->icsk_retransmits)
192 start_ts = tcp_sk(sk)->retrans_stamp;
193 if (unlikely(!start_ts))
194 start_ts = tcp_skb_timestamp(tcp_write_queue_head(sk));
196 if (likely(timeout == 0)) {
197 linear_backoff_thresh = ilog2(TCP_RTO_MAX/rto_base);
199 if (boundary <= linear_backoff_thresh)
200 timeout = ((2 << boundary) - 1) * rto_base;
202 timeout = ((2 << linear_backoff_thresh) - 1) * rto_base +
203 (boundary - linear_backoff_thresh) * TCP_RTO_MAX;
205 return (tcp_time_stamp - start_ts) >= timeout;
208 /* A write timeout has occurred. Process the after effects. */
209 static int tcp_write_timeout(struct sock *sk)
211 struct inet_connection_sock *icsk = inet_csk(sk);
212 struct tcp_sock *tp = tcp_sk(sk);
214 bool do_reset, syn_set = false;
216 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
217 if (icsk->icsk_retransmits) {
218 dst_negative_advice(sk);
219 if (tp->syn_fastopen || tp->syn_data)
220 tcp_fastopen_cache_set(sk, 0, NULL, true, 0);
221 if (tp->syn_data && icsk->icsk_retransmits == 1)
222 NET_INC_STATS_BH(sock_net(sk),
223 LINUX_MIB_TCPFASTOPENACTIVEFAIL);
225 retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
228 if (retransmits_timed_out(sk, sysctl_tcp_retries1, 0, 0)) {
229 /* Some middle-boxes may black-hole Fast Open _after_
230 * the handshake. Therefore we conservatively disable
231 * Fast Open on this path on recurring timeouts with
232 * few or zero bytes acked after Fast Open.
234 if (tp->syn_data_acked &&
235 tp->bytes_acked <= tp->rx_opt.mss_clamp) {
236 tcp_fastopen_cache_set(sk, 0, NULL, true, 0);
237 if (icsk->icsk_retransmits == sysctl_tcp_retries1)
238 NET_INC_STATS_BH(sock_net(sk),
239 LINUX_MIB_TCPFASTOPENACTIVEFAIL);
241 /* Black hole detection */
242 tcp_mtu_probing(icsk, sk);
244 dst_negative_advice(sk);
247 retry_until = sysctl_tcp_retries2;
248 if (sock_flag(sk, SOCK_DEAD)) {
249 const bool alive = icsk->icsk_rto < TCP_RTO_MAX;
251 retry_until = tcp_orphan_retries(sk, alive);
253 !retransmits_timed_out(sk, retry_until, 0, 0);
255 if (tcp_out_of_resources(sk, do_reset))
260 if (retransmits_timed_out(sk, retry_until,
261 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
262 /* Has it gone just too far? */
269 void tcp_delack_timer_handler(struct sock *sk)
271 struct tcp_sock *tp = tcp_sk(sk);
272 struct inet_connection_sock *icsk = inet_csk(sk);
274 sk_mem_reclaim_partial(sk);
276 if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
277 !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
280 if (time_after(icsk->icsk_ack.timeout, jiffies)) {
281 sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout);
284 icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER;
286 if (!skb_queue_empty(&tp->ucopy.prequeue)) {
289 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSCHEDULERFAILED);
291 while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
292 sk_backlog_rcv(sk, skb);
294 tp->ucopy.memory = 0;
297 if (inet_csk_ack_scheduled(sk)) {
298 if (!icsk->icsk_ack.pingpong) {
299 /* Delayed ACK missed: inflate ATO. */
300 icsk->icsk_ack.ato = min(icsk->icsk_ack.ato << 1, icsk->icsk_rto);
302 /* Delayed ACK missed: leave pingpong mode and
305 icsk->icsk_ack.pingpong = 0;
306 icsk->icsk_ack.ato = TCP_ATO_MIN;
309 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKS);
313 if (tcp_under_memory_pressure(sk))
317 static void tcp_delack_timer(unsigned long data)
319 struct sock *sk = (struct sock *)data;
322 if (!sock_owned_by_user(sk)) {
323 tcp_delack_timer_handler(sk);
325 inet_csk(sk)->icsk_ack.blocked = 1;
326 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
327 /* deleguate our work to tcp_release_cb() */
328 if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags))
335 static void tcp_probe_timer(struct sock *sk)
337 struct inet_connection_sock *icsk = inet_csk(sk);
338 struct tcp_sock *tp = tcp_sk(sk);
342 if (tp->packets_out || !tcp_send_head(sk)) {
343 icsk->icsk_probes_out = 0;
347 /* RFC 1122 4.2.2.17 requires the sender to stay open indefinitely as
348 * long as the receiver continues to respond probes. We support this by
349 * default and reset icsk_probes_out with incoming ACKs. But if the
350 * socket is orphaned or the user specifies TCP_USER_TIMEOUT, we
351 * kill the socket when the retry count and the time exceeds the
352 * corresponding system limit. We also implement similar policy when
353 * we use RTO to probe window in tcp_retransmit_timer().
355 start_ts = tcp_skb_timestamp(tcp_send_head(sk));
357 skb_mstamp_get(&tcp_send_head(sk)->skb_mstamp);
358 else if (icsk->icsk_user_timeout &&
359 (s32)(tcp_time_stamp - start_ts) > icsk->icsk_user_timeout)
362 max_probes = sysctl_tcp_retries2;
363 if (sock_flag(sk, SOCK_DEAD)) {
364 const bool alive = inet_csk_rto_backoff(icsk, TCP_RTO_MAX) < TCP_RTO_MAX;
366 max_probes = tcp_orphan_retries(sk, alive);
367 if (!alive && icsk->icsk_backoff >= max_probes)
369 if (tcp_out_of_resources(sk, true))
373 if (icsk->icsk_probes_out > max_probes) {
374 abort: tcp_write_err(sk);
376 /* Only send another probe if we didn't close things up. */
382 * Timer for Fast Open socket to retransmit SYNACK. Note that the
383 * sk here is the child socket, not the parent (listener) socket.
385 static void tcp_fastopen_synack_timer(struct sock *sk)
387 struct inet_connection_sock *icsk = inet_csk(sk);
388 int max_retries = icsk->icsk_syn_retries ? :
389 sysctl_tcp_synack_retries + 1; /* add one more retry for fastopen */
390 struct request_sock *req;
392 req = tcp_sk(sk)->fastopen_rsk;
393 req->rsk_ops->syn_ack_timeout(req);
395 if (req->num_timeout >= max_retries) {
399 /* XXX (TFO) - Unlike regular SYN-ACK retransmit, we ignore error
400 * returned from rtx_syn_ack() to make it more persistent like
401 * regular retransmit because if the child socket has been accepted
402 * it's not good to give up too easily.
404 inet_rtx_syn_ack(sk, req);
406 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
407 TCP_TIMEOUT_INIT << req->num_timeout, TCP_RTO_MAX);
411 * The TCP retransmit timer.
414 void tcp_retransmit_timer(struct sock *sk)
416 struct tcp_sock *tp = tcp_sk(sk);
417 struct inet_connection_sock *icsk = inet_csk(sk);
419 if (tp->fastopen_rsk) {
420 WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV &&
421 sk->sk_state != TCP_FIN_WAIT1);
422 tcp_fastopen_synack_timer(sk);
423 /* Before we receive ACK to our SYN-ACK don't retransmit
424 * anything else (e.g., data or FIN segments).
428 if (!tp->packets_out)
431 WARN_ON(tcp_write_queue_empty(sk));
433 tp->tlp_high_seq = 0;
435 if (!tp->snd_wnd && !sock_flag(sk, SOCK_DEAD) &&
436 !((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))) {
437 /* Receiver dastardly shrinks window. Our retransmits
438 * become zero probes, but we should not timeout this
439 * connection. If the socket is an orphan, time it out,
440 * we cannot allow such beasts to hang infinitely.
442 struct inet_sock *inet = inet_sk(sk);
443 if (sk->sk_family == AF_INET) {
444 net_dbg_ratelimited("Peer %pI4:%u/%u unexpectedly shrunk window %u:%u (repaired)\n",
446 ntohs(inet->inet_dport),
448 tp->snd_una, tp->snd_nxt);
450 #if IS_ENABLED(CONFIG_IPV6)
451 else if (sk->sk_family == AF_INET6) {
452 net_dbg_ratelimited("Peer %pI6:%u/%u unexpectedly shrunk window %u:%u (repaired)\n",
454 ntohs(inet->inet_dport),
456 tp->snd_una, tp->snd_nxt);
459 if (tcp_time_stamp - tp->rcv_tstamp > TCP_RTO_MAX) {
464 tcp_retransmit_skb(sk, tcp_write_queue_head(sk));
466 goto out_reset_timer;
469 if (tcp_write_timeout(sk))
472 if (icsk->icsk_retransmits == 0) {
475 if (icsk->icsk_ca_state == TCP_CA_Recovery) {
477 mib_idx = LINUX_MIB_TCPSACKRECOVERYFAIL;
479 mib_idx = LINUX_MIB_TCPRENORECOVERYFAIL;
480 } else if (icsk->icsk_ca_state == TCP_CA_Loss) {
481 mib_idx = LINUX_MIB_TCPLOSSFAILURES;
482 } else if ((icsk->icsk_ca_state == TCP_CA_Disorder) ||
485 mib_idx = LINUX_MIB_TCPSACKFAILURES;
487 mib_idx = LINUX_MIB_TCPRENOFAILURES;
489 mib_idx = LINUX_MIB_TCPTIMEOUTS;
491 NET_INC_STATS_BH(sock_net(sk), mib_idx);
496 if (tcp_retransmit_skb(sk, tcp_write_queue_head(sk)) > 0) {
497 /* Retransmission failed because of local congestion,
500 if (!icsk->icsk_retransmits)
501 icsk->icsk_retransmits = 1;
502 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
503 min(icsk->icsk_rto, TCP_RESOURCE_PROBE_INTERVAL),
508 /* Increase the timeout each time we retransmit. Note that
509 * we do not increase the rtt estimate. rto is initialized
510 * from rtt, but increases here. Jacobson (SIGCOMM 88) suggests
511 * that doubling rto each time is the least we can get away with.
512 * In KA9Q, Karn uses this for the first few times, and then
513 * goes to quadratic. netBSD doubles, but only goes up to *64,
514 * and clamps at 1 to 64 sec afterwards. Note that 120 sec is
515 * defined in the protocol as the maximum possible RTT. I guess
516 * we'll have to use something other than TCP to talk to the
517 * University of Mars.
519 * PAWS allows us longer timeouts and large windows, so once
520 * implemented ftp to mars will work nicely. We will have to fix
521 * the 120 second clamps though!
523 icsk->icsk_backoff++;
524 icsk->icsk_retransmits++;
527 /* If stream is thin, use linear timeouts. Since 'icsk_backoff' is
528 * used to reset timer, set to 0. Recalculate 'icsk_rto' as this
529 * might be increased if the stream oscillates between thin and thick,
530 * thus the old value might already be too high compared to the value
531 * set by 'tcp_set_rto' in tcp_input.c which resets the rto without
532 * backoff. Limit to TCP_THIN_LINEAR_RETRIES before initiating
533 * exponential backoff behaviour to avoid continue hammering
534 * linear-timeout retransmissions into a black hole
536 if (sk->sk_state == TCP_ESTABLISHED &&
537 (tp->thin_lto || sysctl_tcp_thin_linear_timeouts) &&
538 tcp_stream_is_thin(tp) &&
539 icsk->icsk_retransmits <= TCP_THIN_LINEAR_RETRIES) {
540 icsk->icsk_backoff = 0;
541 icsk->icsk_rto = min(__tcp_set_rto(tp), TCP_RTO_MAX);
543 /* Use normal (exponential) backoff */
544 icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
546 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX);
547 if (retransmits_timed_out(sk, sysctl_tcp_retries1 + 1, 0, 0))
553 void tcp_write_timer_handler(struct sock *sk)
555 struct inet_connection_sock *icsk = inet_csk(sk);
558 if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
562 if (time_after(icsk->icsk_timeout, jiffies)) {
563 sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout);
567 event = icsk->icsk_pending;
570 case ICSK_TIME_EARLY_RETRANS:
571 tcp_resume_early_retransmit(sk);
573 case ICSK_TIME_LOSS_PROBE:
574 tcp_send_loss_probe(sk);
576 case ICSK_TIME_RETRANS:
577 icsk->icsk_pending = 0;
578 tcp_retransmit_timer(sk);
580 case ICSK_TIME_PROBE0:
581 icsk->icsk_pending = 0;
590 static void tcp_write_timer(unsigned long data)
592 struct sock *sk = (struct sock *)data;
595 if (!sock_owned_by_user(sk)) {
596 tcp_write_timer_handler(sk);
598 /* deleguate our work to tcp_release_cb() */
599 if (!test_and_set_bit(TCP_WRITE_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags))
606 void tcp_syn_ack_timeout(const struct request_sock *req)
608 struct net *net = read_pnet(&inet_rsk(req)->ireq_net);
610 NET_INC_STATS_BH(net, LINUX_MIB_TCPTIMEOUTS);
612 EXPORT_SYMBOL(tcp_syn_ack_timeout);
614 void tcp_set_keepalive(struct sock *sk, int val)
616 if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
619 if (val && !sock_flag(sk, SOCK_KEEPOPEN))
620 inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tcp_sk(sk)));
622 inet_csk_delete_keepalive_timer(sk);
626 static void tcp_keepalive_timer (unsigned long data)
628 struct sock *sk = (struct sock *) data;
629 struct inet_connection_sock *icsk = inet_csk(sk);
630 struct tcp_sock *tp = tcp_sk(sk);
633 /* Only process if socket is not in use. */
635 if (sock_owned_by_user(sk)) {
636 /* Try again later. */
637 inet_csk_reset_keepalive_timer (sk, HZ/20);
641 if (sk->sk_state == TCP_LISTEN) {
642 pr_err("Hmm... keepalive on a LISTEN ???\n");
646 if (sk->sk_state == TCP_FIN_WAIT2 && sock_flag(sk, SOCK_DEAD)) {
647 if (tp->linger2 >= 0) {
648 const int tmo = tcp_fin_time(sk) - TCP_TIMEWAIT_LEN;
651 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
655 tcp_send_active_reset(sk, GFP_ATOMIC);
659 if (!sock_flag(sk, SOCK_KEEPOPEN) ||
660 ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)))
663 elapsed = keepalive_time_when(tp);
665 /* It is alive without keepalive 8) */
666 if (tp->packets_out || tcp_send_head(sk))
669 elapsed = keepalive_time_elapsed(tp);
671 if (elapsed >= keepalive_time_when(tp)) {
672 /* If the TCP_USER_TIMEOUT option is enabled, use that
673 * to determine when to timeout instead.
675 if ((icsk->icsk_user_timeout != 0 &&
676 elapsed >= icsk->icsk_user_timeout &&
677 icsk->icsk_probes_out > 0) ||
678 (icsk->icsk_user_timeout == 0 &&
679 icsk->icsk_probes_out >= keepalive_probes(tp))) {
680 tcp_send_active_reset(sk, GFP_ATOMIC);
684 if (tcp_write_wakeup(sk, LINUX_MIB_TCPKEEPALIVE) <= 0) {
685 icsk->icsk_probes_out++;
686 elapsed = keepalive_intvl_when(tp);
688 /* If keepalive was lost due to local congestion,
691 elapsed = TCP_RESOURCE_PROBE_INTERVAL;
694 /* It is tp->rcv_tstamp + keepalive_time_when(tp) */
695 elapsed = keepalive_time_when(tp) - elapsed;
701 inet_csk_reset_keepalive_timer (sk, elapsed);
712 void tcp_init_xmit_timers(struct sock *sk)
714 inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer,
715 &tcp_keepalive_timer);