2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * IPv4 specific functions
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
16 * See tcp.c for author information
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
37 * request_sock handling and moved
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
40 * Added new listen semantics.
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
53 #define pr_fmt(fmt) "TCP: " fmt
55 #include <linux/bottom_half.h>
56 #include <linux/types.h>
57 #include <linux/fcntl.h>
58 #include <linux/module.h>
59 #include <linux/random.h>
60 #include <linux/cache.h>
61 #include <linux/jhash.h>
62 #include <linux/init.h>
63 #include <linux/times.h>
64 #include <linux/slab.h>
66 #include <net/net_namespace.h>
68 #include <net/inet_hashtables.h>
70 #include <net/transp_v6.h>
72 #include <net/inet_common.h>
73 #include <net/timewait_sock.h>
75 #include <net/secure_seq.h>
76 #include <net/tcp_memcontrol.h>
77 #include <net/busy_poll.h>
79 #include <linux/inet.h>
80 #include <linux/ipv6.h>
81 #include <linux/stddef.h>
82 #include <linux/proc_fs.h>
83 #include <linux/seq_file.h>
85 #include <linux/crypto.h>
86 #include <linux/scatterlist.h>
88 int sysctl_tcp_tw_reuse __read_mostly;
89 int sysctl_tcp_low_latency __read_mostly;
90 EXPORT_SYMBOL(sysctl_tcp_low_latency);
92 #ifdef CONFIG_TCP_MD5SIG
93 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
94 __be32 daddr, __be32 saddr, const struct tcphdr *th);
97 struct inet_hashinfo tcp_hashinfo;
98 EXPORT_SYMBOL(tcp_hashinfo);
100 static __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
102 return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
105 tcp_hdr(skb)->source);
108 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
110 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
111 struct tcp_sock *tp = tcp_sk(sk);
113 /* With PAWS, it is safe from the viewpoint
114 of data integrity. Even without PAWS it is safe provided sequence
115 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
117 Actually, the idea is close to VJ's one, only timestamp cache is
118 held not per host, but per port pair and TW bucket is used as state
121 If TW bucket has been already destroyed we fall back to VJ's scheme
122 and use initial timestamp retrieved from peer table.
124 if (tcptw->tw_ts_recent_stamp &&
125 (twp == NULL || (sysctl_tcp_tw_reuse &&
126 get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
127 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
128 if (tp->write_seq == 0)
130 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
131 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
138 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
140 /* This will initiate an outgoing connection. */
141 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
143 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
144 struct inet_sock *inet = inet_sk(sk);
145 struct tcp_sock *tp = tcp_sk(sk);
146 __be16 orig_sport, orig_dport;
147 __be32 daddr, nexthop;
151 struct ip_options_rcu *inet_opt;
153 if (addr_len < sizeof(struct sockaddr_in))
156 if (usin->sin_family != AF_INET)
157 return -EAFNOSUPPORT;
159 nexthop = daddr = usin->sin_addr.s_addr;
160 inet_opt = rcu_dereference_protected(inet->inet_opt,
161 sock_owned_by_user(sk));
162 if (inet_opt && inet_opt->opt.srr) {
165 nexthop = inet_opt->opt.faddr;
168 orig_sport = inet->inet_sport;
169 orig_dport = usin->sin_port;
170 fl4 = &inet->cork.fl.u.ip4;
171 rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
172 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
174 orig_sport, orig_dport, sk);
177 if (err == -ENETUNREACH)
178 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
182 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
187 if (!inet_opt || !inet_opt->opt.srr)
190 if (!inet->inet_saddr)
191 inet->inet_saddr = fl4->saddr;
192 sk_rcv_saddr_set(sk, inet->inet_saddr);
194 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
195 /* Reset inherited state */
196 tp->rx_opt.ts_recent = 0;
197 tp->rx_opt.ts_recent_stamp = 0;
198 if (likely(!tp->repair))
202 if (tcp_death_row.sysctl_tw_recycle &&
203 !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr)
204 tcp_fetch_timewait_stamp(sk, &rt->dst);
206 inet->inet_dport = usin->sin_port;
207 sk_daddr_set(sk, daddr);
209 inet_csk(sk)->icsk_ext_hdr_len = 0;
211 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
213 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
215 /* Socket identity is still unknown (sport may be zero).
216 * However we set state to SYN-SENT and not releasing socket
217 * lock select source port, enter ourselves into the hash tables and
218 * complete initialization after this.
220 tcp_set_state(sk, TCP_SYN_SENT);
221 err = inet_hash_connect(&tcp_death_row, sk);
227 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
228 inet->inet_sport, inet->inet_dport, sk);
234 /* OK, now commit destination to socket. */
235 sk->sk_gso_type = SKB_GSO_TCPV4;
236 sk_setup_caps(sk, &rt->dst);
238 if (!tp->write_seq && likely(!tp->repair))
239 tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
244 inet->inet_id = tp->write_seq ^ jiffies;
246 err = tcp_connect(sk);
256 * This unhashes the socket and releases the local port,
259 tcp_set_state(sk, TCP_CLOSE);
261 sk->sk_route_caps = 0;
262 inet->inet_dport = 0;
265 EXPORT_SYMBOL(tcp_v4_connect);
268 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
269 * It can be called through tcp_release_cb() if socket was owned by user
270 * at the time tcp_v4_err() was called to handle ICMP message.
272 void tcp_v4_mtu_reduced(struct sock *sk)
274 struct dst_entry *dst;
275 struct inet_sock *inet = inet_sk(sk);
276 u32 mtu = tcp_sk(sk)->mtu_info;
278 dst = inet_csk_update_pmtu(sk, mtu);
282 /* Something is about to be wrong... Remember soft error
283 * for the case, if this connection will not able to recover.
285 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
286 sk->sk_err_soft = EMSGSIZE;
290 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
291 ip_sk_accept_pmtu(sk) &&
292 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
293 tcp_sync_mss(sk, mtu);
295 /* Resend the TCP packet because it's
296 * clear that the old packet has been
297 * dropped. This is the new "fast" path mtu
300 tcp_simple_retransmit(sk);
301 } /* else let the usual retransmit timer handle it */
303 EXPORT_SYMBOL(tcp_v4_mtu_reduced);
305 static void do_redirect(struct sk_buff *skb, struct sock *sk)
307 struct dst_entry *dst = __sk_dst_check(sk, 0);
310 dst->ops->redirect(dst, sk, skb);
314 /* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
315 void tcp_req_err(struct sock *sk, u32 seq)
317 struct request_sock *req = inet_reqsk(sk);
318 struct net *net = sock_net(sk);
320 /* ICMPs are not backlogged, hence we cannot get
321 * an established socket here.
325 if (seq != tcp_rsk(req)->snt_isn) {
326 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
330 * Still in SYN_RECV, just remove it silently.
331 * There is no good way to pass the error to the newly
332 * created socket, and POSIX does not want network
333 * errors returned from accept().
335 NET_INC_STATS_BH(net, LINUX_MIB_LISTENDROPS);
336 inet_csk_reqsk_queue_drop(req->rsk_listener, req);
339 EXPORT_SYMBOL(tcp_req_err);
342 * This routine is called by the ICMP module when it gets some
343 * sort of error condition. If err < 0 then the socket should
344 * be closed and the error returned to the user. If err > 0
345 * it's just the icmp type << 8 | icmp code. After adjustment
346 * header points to the first 8 bytes of the tcp header. We need
347 * to find the appropriate port.
349 * The locking strategy used here is very "optimistic". When
350 * someone else accesses the socket the ICMP is just dropped
351 * and for some paths there is no check at all.
352 * A more general error queue to queue errors for later handling
353 * is probably better.
357 void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
359 const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
360 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
361 struct inet_connection_sock *icsk;
363 struct inet_sock *inet;
364 const int type = icmp_hdr(icmp_skb)->type;
365 const int code = icmp_hdr(icmp_skb)->code;
368 struct request_sock *fastopen;
372 struct net *net = dev_net(icmp_skb->dev);
374 sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr,
375 th->dest, iph->saddr, ntohs(th->source),
378 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
381 if (sk->sk_state == TCP_TIME_WAIT) {
382 inet_twsk_put(inet_twsk(sk));
385 seq = ntohl(th->seq);
386 if (sk->sk_state == TCP_NEW_SYN_RECV)
387 return tcp_req_err(sk, seq);
390 /* If too many ICMPs get dropped on busy
391 * servers this needs to be solved differently.
392 * We do take care of PMTU discovery (RFC1191) special case :
393 * we can receive locally generated ICMP messages while socket is held.
395 if (sock_owned_by_user(sk)) {
396 if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
397 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
399 if (sk->sk_state == TCP_CLOSE)
402 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
403 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
409 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
410 fastopen = tp->fastopen_rsk;
411 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
412 if (sk->sk_state != TCP_LISTEN &&
413 !between(seq, snd_una, tp->snd_nxt)) {
414 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
420 do_redirect(icmp_skb, sk);
422 case ICMP_SOURCE_QUENCH:
423 /* Just silently ignore these. */
425 case ICMP_PARAMETERPROB:
428 case ICMP_DEST_UNREACH:
429 if (code > NR_ICMP_UNREACH)
432 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
433 /* We are not interested in TCP_LISTEN and open_requests
434 * (SYN-ACKs send out by Linux are always <576bytes so
435 * they should go through unfragmented).
437 if (sk->sk_state == TCP_LISTEN)
441 if (!sock_owned_by_user(sk)) {
442 tcp_v4_mtu_reduced(sk);
444 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags))
450 err = icmp_err_convert[code].errno;
451 /* check if icmp_skb allows revert of backoff
452 * (see draft-zimmermann-tcp-lcd) */
453 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
455 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
456 !icsk->icsk_backoff || fastopen)
459 if (sock_owned_by_user(sk))
462 icsk->icsk_backoff--;
463 icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
465 icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
467 skb = tcp_write_queue_head(sk);
470 remaining = icsk->icsk_rto -
472 tcp_time_stamp - tcp_skb_timestamp(skb));
475 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
476 remaining, TCP_RTO_MAX);
478 /* RTO revert clocked out retransmission.
479 * Will retransmit now */
480 tcp_retransmit_timer(sk);
484 case ICMP_TIME_EXCEEDED:
491 switch (sk->sk_state) {
494 /* Only in fast or simultaneous open. If a fast open socket is
495 * is already accepted it is treated as a connected one below.
497 if (fastopen && fastopen->sk == NULL)
500 if (!sock_owned_by_user(sk)) {
503 sk->sk_error_report(sk);
507 sk->sk_err_soft = err;
512 /* If we've already connected we will keep trying
513 * until we time out, or the user gives up.
515 * rfc1122 4.2.3.9 allows to consider as hard errors
516 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
517 * but it is obsoleted by pmtu discovery).
519 * Note, that in modern internet, where routing is unreliable
520 * and in each dark corner broken firewalls sit, sending random
521 * errors ordered by their masters even this two messages finally lose
522 * their original sense (even Linux sends invalid PORT_UNREACHs)
524 * Now we are in compliance with RFCs.
529 if (!sock_owned_by_user(sk) && inet->recverr) {
531 sk->sk_error_report(sk);
532 } else { /* Only an error on timeout */
533 sk->sk_err_soft = err;
541 void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
543 struct tcphdr *th = tcp_hdr(skb);
545 if (skb->ip_summed == CHECKSUM_PARTIAL) {
546 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
547 skb->csum_start = skb_transport_header(skb) - skb->head;
548 skb->csum_offset = offsetof(struct tcphdr, check);
550 th->check = tcp_v4_check(skb->len, saddr, daddr,
557 /* This routine computes an IPv4 TCP checksum. */
558 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
560 const struct inet_sock *inet = inet_sk(sk);
562 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
564 EXPORT_SYMBOL(tcp_v4_send_check);
567 * This routine will send an RST to the other tcp.
569 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
571 * Answer: if a packet caused RST, it is not for a socket
572 * existing in our system, if it is matched to a socket,
573 * it is just duplicate segment or bug in other side's TCP.
574 * So that we build reply only basing on parameters
575 * arrived with segment.
576 * Exception: precedence violation. We do not implement it in any case.
579 static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
581 const struct tcphdr *th = tcp_hdr(skb);
584 #ifdef CONFIG_TCP_MD5SIG
585 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
588 struct ip_reply_arg arg;
589 #ifdef CONFIG_TCP_MD5SIG
590 struct tcp_md5sig_key *key;
591 const __u8 *hash_location = NULL;
592 unsigned char newhash[16];
594 struct sock *sk1 = NULL;
598 /* Never send a reset in response to a reset. */
602 /* If sk not NULL, it means we did a successful lookup and incoming
603 * route had to be correct. prequeue might have dropped our dst.
605 if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL)
608 /* Swap the send and the receive. */
609 memset(&rep, 0, sizeof(rep));
610 rep.th.dest = th->source;
611 rep.th.source = th->dest;
612 rep.th.doff = sizeof(struct tcphdr) / 4;
616 rep.th.seq = th->ack_seq;
619 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
620 skb->len - (th->doff << 2));
623 memset(&arg, 0, sizeof(arg));
624 arg.iov[0].iov_base = (unsigned char *)&rep;
625 arg.iov[0].iov_len = sizeof(rep.th);
627 net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
628 #ifdef CONFIG_TCP_MD5SIG
629 hash_location = tcp_parse_md5sig_option(th);
630 if (!sk && hash_location) {
632 * active side is lost. Try to find listening socket through
633 * source port, and then find md5 key through listening socket.
634 * we are not loose security here:
635 * Incoming packet is checked with md5 hash with finding key,
636 * no RST generated if md5 hash doesn't match.
638 sk1 = __inet_lookup_listener(net,
639 &tcp_hashinfo, ip_hdr(skb)->saddr,
640 th->source, ip_hdr(skb)->daddr,
641 ntohs(th->source), inet_iif(skb));
642 /* don't send rst if it can't find key */
646 key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
647 &ip_hdr(skb)->saddr, AF_INET);
651 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, NULL, skb);
652 if (genhash || memcmp(hash_location, newhash, 16) != 0)
655 key = sk ? tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
661 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
663 (TCPOPT_MD5SIG << 8) |
665 /* Update length and the length the header thinks exists */
666 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
667 rep.th.doff = arg.iov[0].iov_len / 4;
669 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
670 key, ip_hdr(skb)->saddr,
671 ip_hdr(skb)->daddr, &rep.th);
674 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
675 ip_hdr(skb)->saddr, /* XXX */
676 arg.iov[0].iov_len, IPPROTO_TCP, 0);
677 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
678 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
679 /* When socket is gone, all binding information is lost.
680 * routing might fail in this case. No choice here, if we choose to force
681 * input interface, we will misroute in case of asymmetric route.
684 arg.bound_dev_if = sk->sk_bound_dev_if;
686 arg.tos = ip_hdr(skb)->tos;
687 ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
688 skb, &TCP_SKB_CB(skb)->header.h4.opt,
689 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
690 &arg, arg.iov[0].iov_len);
692 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
693 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
695 #ifdef CONFIG_TCP_MD5SIG
704 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
705 outside socket context is ugly, certainly. What can I do?
708 static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
709 u32 win, u32 tsval, u32 tsecr, int oif,
710 struct tcp_md5sig_key *key,
711 int reply_flags, u8 tos)
713 const struct tcphdr *th = tcp_hdr(skb);
716 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
717 #ifdef CONFIG_TCP_MD5SIG
718 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
722 struct ip_reply_arg arg;
723 struct net *net = dev_net(skb_dst(skb)->dev);
725 memset(&rep.th, 0, sizeof(struct tcphdr));
726 memset(&arg, 0, sizeof(arg));
728 arg.iov[0].iov_base = (unsigned char *)&rep;
729 arg.iov[0].iov_len = sizeof(rep.th);
731 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
732 (TCPOPT_TIMESTAMP << 8) |
734 rep.opt[1] = htonl(tsval);
735 rep.opt[2] = htonl(tsecr);
736 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
739 /* Swap the send and the receive. */
740 rep.th.dest = th->source;
741 rep.th.source = th->dest;
742 rep.th.doff = arg.iov[0].iov_len / 4;
743 rep.th.seq = htonl(seq);
744 rep.th.ack_seq = htonl(ack);
746 rep.th.window = htons(win);
748 #ifdef CONFIG_TCP_MD5SIG
750 int offset = (tsecr) ? 3 : 0;
752 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
754 (TCPOPT_MD5SIG << 8) |
756 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
757 rep.th.doff = arg.iov[0].iov_len/4;
759 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
760 key, ip_hdr(skb)->saddr,
761 ip_hdr(skb)->daddr, &rep.th);
764 arg.flags = reply_flags;
765 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
766 ip_hdr(skb)->saddr, /* XXX */
767 arg.iov[0].iov_len, IPPROTO_TCP, 0);
768 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
770 arg.bound_dev_if = oif;
772 ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
773 skb, &TCP_SKB_CB(skb)->header.h4.opt,
774 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
775 &arg, arg.iov[0].iov_len);
777 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
780 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
782 struct inet_timewait_sock *tw = inet_twsk(sk);
783 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
785 tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
786 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
787 tcp_time_stamp + tcptw->tw_ts_offset,
790 tcp_twsk_md5_key(tcptw),
791 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
798 static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
799 struct request_sock *req)
801 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
802 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
804 tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
805 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
806 tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
810 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
812 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
817 * Send a SYN-ACK after having received a SYN.
818 * This still operates on a request_sock only, not on a big
821 static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
823 struct request_sock *req,
825 struct tcp_fastopen_cookie *foc)
827 const struct inet_request_sock *ireq = inet_rsk(req);
832 /* First, grab a route. */
833 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
836 skb = tcp_make_synack(sk, dst, req, foc);
839 __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
841 skb_set_queue_mapping(skb, queue_mapping);
842 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
845 err = net_xmit_eval(err);
852 * IPv4 request_sock destructor.
854 static void tcp_v4_reqsk_destructor(struct request_sock *req)
856 kfree(inet_rsk(req)->opt);
860 * Return true if a syncookie should be sent
862 bool tcp_syn_flood_action(struct sock *sk,
863 const struct sk_buff *skb,
866 const char *msg = "Dropping request";
867 bool want_cookie = false;
868 struct listen_sock *lopt;
870 #ifdef CONFIG_SYN_COOKIES
871 if (sysctl_tcp_syncookies) {
872 msg = "Sending cookies";
874 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
877 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
879 lopt = inet_csk(sk)->icsk_accept_queue.listen_opt;
880 if (!lopt->synflood_warned && sysctl_tcp_syncookies != 2) {
881 lopt->synflood_warned = 1;
882 pr_info("%s: Possible SYN flooding on port %d. %s. Check SNMP counters.\n",
883 proto, ntohs(tcp_hdr(skb)->dest), msg);
887 EXPORT_SYMBOL(tcp_syn_flood_action);
889 #ifdef CONFIG_TCP_MD5SIG
891 * RFC2385 MD5 checksumming requires a mapping of
892 * IP address->MD5 Key.
893 * We need to maintain these in the sk structure.
896 /* Find the Key structure for an address. */
897 struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
898 const union tcp_md5_addr *addr,
901 struct tcp_sock *tp = tcp_sk(sk);
902 struct tcp_md5sig_key *key;
903 unsigned int size = sizeof(struct in_addr);
904 struct tcp_md5sig_info *md5sig;
906 /* caller either holds rcu_read_lock() or socket lock */
907 md5sig = rcu_dereference_check(tp->md5sig_info,
908 sock_owned_by_user(sk) ||
909 lockdep_is_held(&sk->sk_lock.slock));
912 #if IS_ENABLED(CONFIG_IPV6)
913 if (family == AF_INET6)
914 size = sizeof(struct in6_addr);
916 hlist_for_each_entry_rcu(key, &md5sig->head, node) {
917 if (key->family != family)
919 if (!memcmp(&key->addr, addr, size))
924 EXPORT_SYMBOL(tcp_md5_do_lookup);
926 struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
927 struct sock *addr_sk)
929 union tcp_md5_addr *addr;
931 addr = (union tcp_md5_addr *)&inet_sk(addr_sk)->inet_daddr;
932 return tcp_md5_do_lookup(sk, addr, AF_INET);
934 EXPORT_SYMBOL(tcp_v4_md5_lookup);
936 static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
937 struct request_sock *req)
939 union tcp_md5_addr *addr;
941 addr = (union tcp_md5_addr *)&inet_rsk(req)->ir_rmt_addr;
942 return tcp_md5_do_lookup(sk, addr, AF_INET);
945 /* This can be called on a newly created socket, from other files */
946 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
947 int family, const u8 *newkey, u8 newkeylen, gfp_t gfp)
949 /* Add Key to the list */
950 struct tcp_md5sig_key *key;
951 struct tcp_sock *tp = tcp_sk(sk);
952 struct tcp_md5sig_info *md5sig;
954 key = tcp_md5_do_lookup(sk, addr, family);
956 /* Pre-existing entry - just update that one. */
957 memcpy(key->key, newkey, newkeylen);
958 key->keylen = newkeylen;
962 md5sig = rcu_dereference_protected(tp->md5sig_info,
963 sock_owned_by_user(sk));
965 md5sig = kmalloc(sizeof(*md5sig), gfp);
969 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
970 INIT_HLIST_HEAD(&md5sig->head);
971 rcu_assign_pointer(tp->md5sig_info, md5sig);
974 key = sock_kmalloc(sk, sizeof(*key), gfp);
977 if (!tcp_alloc_md5sig_pool()) {
978 sock_kfree_s(sk, key, sizeof(*key));
982 memcpy(key->key, newkey, newkeylen);
983 key->keylen = newkeylen;
984 key->family = family;
985 memcpy(&key->addr, addr,
986 (family == AF_INET6) ? sizeof(struct in6_addr) :
987 sizeof(struct in_addr));
988 hlist_add_head_rcu(&key->node, &md5sig->head);
991 EXPORT_SYMBOL(tcp_md5_do_add);
993 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
995 struct tcp_md5sig_key *key;
997 key = tcp_md5_do_lookup(sk, addr, family);
1000 hlist_del_rcu(&key->node);
1001 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1002 kfree_rcu(key, rcu);
1005 EXPORT_SYMBOL(tcp_md5_do_del);
1007 static void tcp_clear_md5_list(struct sock *sk)
1009 struct tcp_sock *tp = tcp_sk(sk);
1010 struct tcp_md5sig_key *key;
1011 struct hlist_node *n;
1012 struct tcp_md5sig_info *md5sig;
1014 md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1016 hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
1017 hlist_del_rcu(&key->node);
1018 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1019 kfree_rcu(key, rcu);
1023 static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
1026 struct tcp_md5sig cmd;
1027 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
1029 if (optlen < sizeof(cmd))
1032 if (copy_from_user(&cmd, optval, sizeof(cmd)))
1035 if (sin->sin_family != AF_INET)
1038 if (!cmd.tcpm_keylen)
1039 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1042 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1045 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1046 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen,
1050 static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1051 __be32 daddr, __be32 saddr, int nbytes)
1053 struct tcp4_pseudohdr *bp;
1054 struct scatterlist sg;
1056 bp = &hp->md5_blk.ip4;
1059 * 1. the TCP pseudo-header (in the order: source IP address,
1060 * destination IP address, zero-padded protocol number, and
1066 bp->protocol = IPPROTO_TCP;
1067 bp->len = cpu_to_be16(nbytes);
1069 sg_init_one(&sg, bp, sizeof(*bp));
1070 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1073 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1074 __be32 daddr, __be32 saddr, const struct tcphdr *th)
1076 struct tcp_md5sig_pool *hp;
1077 struct hash_desc *desc;
1079 hp = tcp_get_md5sig_pool();
1081 goto clear_hash_noput;
1082 desc = &hp->md5_desc;
1084 if (crypto_hash_init(desc))
1086 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1088 if (tcp_md5_hash_header(hp, th))
1090 if (tcp_md5_hash_key(hp, key))
1092 if (crypto_hash_final(desc, md5_hash))
1095 tcp_put_md5sig_pool();
1099 tcp_put_md5sig_pool();
1101 memset(md5_hash, 0, 16);
1105 int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
1106 const struct sock *sk, const struct request_sock *req,
1107 const struct sk_buff *skb)
1109 struct tcp_md5sig_pool *hp;
1110 struct hash_desc *desc;
1111 const struct tcphdr *th = tcp_hdr(skb);
1112 __be32 saddr, daddr;
1115 saddr = inet_sk(sk)->inet_saddr;
1116 daddr = inet_sk(sk)->inet_daddr;
1118 saddr = inet_rsk(req)->ir_loc_addr;
1119 daddr = inet_rsk(req)->ir_rmt_addr;
1121 const struct iphdr *iph = ip_hdr(skb);
1126 hp = tcp_get_md5sig_pool();
1128 goto clear_hash_noput;
1129 desc = &hp->md5_desc;
1131 if (crypto_hash_init(desc))
1134 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1136 if (tcp_md5_hash_header(hp, th))
1138 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1140 if (tcp_md5_hash_key(hp, key))
1142 if (crypto_hash_final(desc, md5_hash))
1145 tcp_put_md5sig_pool();
1149 tcp_put_md5sig_pool();
1151 memset(md5_hash, 0, 16);
1154 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1156 static bool __tcp_v4_inbound_md5_hash(struct sock *sk,
1157 const struct sk_buff *skb)
1160 * This gets called for each TCP segment that arrives
1161 * so we want to be efficient.
1162 * We have 3 drop cases:
1163 * o No MD5 hash and one expected.
1164 * o MD5 hash and we're not expecting one.
1165 * o MD5 hash and its wrong.
1167 const __u8 *hash_location = NULL;
1168 struct tcp_md5sig_key *hash_expected;
1169 const struct iphdr *iph = ip_hdr(skb);
1170 const struct tcphdr *th = tcp_hdr(skb);
1172 unsigned char newhash[16];
1174 hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1176 hash_location = tcp_parse_md5sig_option(th);
1178 /* We've parsed the options - do we have a hash? */
1179 if (!hash_expected && !hash_location)
1182 if (hash_expected && !hash_location) {
1183 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1187 if (!hash_expected && hash_location) {
1188 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1192 /* Okay, so this is hash_expected and hash_location -
1193 * so we need to calculate the checksum.
1195 genhash = tcp_v4_md5_hash_skb(newhash,
1199 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1200 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1201 &iph->saddr, ntohs(th->source),
1202 &iph->daddr, ntohs(th->dest),
1203 genhash ? " tcp_v4_calc_md5_hash failed"
1210 static bool tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
1215 ret = __tcp_v4_inbound_md5_hash(sk, skb);
1223 static void tcp_v4_init_req(struct request_sock *req, struct sock *sk_listener,
1224 struct sk_buff *skb)
1226 struct inet_request_sock *ireq = inet_rsk(req);
1228 sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
1229 sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
1230 ireq->no_srccheck = inet_sk(sk_listener)->transparent;
1231 ireq->opt = tcp_v4_save_options(skb);
1232 ireq->ireq_family = AF_INET;
1235 static struct dst_entry *tcp_v4_route_req(struct sock *sk, struct flowi *fl,
1236 const struct request_sock *req,
1239 struct dst_entry *dst = inet_csk_route_req(sk, &fl->u.ip4, req);
1242 if (fl->u.ip4.daddr == inet_rsk(req)->ir_rmt_addr)
1251 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1253 .obj_size = sizeof(struct tcp_request_sock),
1254 .rtx_syn_ack = tcp_rtx_synack,
1255 .send_ack = tcp_v4_reqsk_send_ack,
1256 .destructor = tcp_v4_reqsk_destructor,
1257 .send_reset = tcp_v4_send_reset,
1258 .syn_ack_timeout = tcp_syn_ack_timeout,
1261 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1262 .mss_clamp = TCP_MSS_DEFAULT,
1263 #ifdef CONFIG_TCP_MD5SIG
1264 .md5_lookup = tcp_v4_reqsk_md5_lookup,
1265 .calc_md5_hash = tcp_v4_md5_hash_skb,
1267 .init_req = tcp_v4_init_req,
1268 #ifdef CONFIG_SYN_COOKIES
1269 .cookie_init_seq = cookie_v4_init_sequence,
1271 .route_req = tcp_v4_route_req,
1272 .init_seq = tcp_v4_init_sequence,
1273 .send_synack = tcp_v4_send_synack,
1274 .queue_hash_add = inet_csk_reqsk_queue_hash_add,
1277 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1279 /* Never answer to SYNs send to broadcast or multicast */
1280 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1283 return tcp_conn_request(&tcp_request_sock_ops,
1284 &tcp_request_sock_ipv4_ops, sk, skb);
1287 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1290 EXPORT_SYMBOL(tcp_v4_conn_request);
1294 * The three way handshake has completed - we got a valid synack -
1295 * now create the new socket.
1297 struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1298 struct request_sock *req,
1299 struct dst_entry *dst)
1301 struct inet_request_sock *ireq;
1302 struct inet_sock *newinet;
1303 struct tcp_sock *newtp;
1305 #ifdef CONFIG_TCP_MD5SIG
1306 struct tcp_md5sig_key *key;
1308 struct ip_options_rcu *inet_opt;
1310 if (sk_acceptq_is_full(sk))
1313 newsk = tcp_create_openreq_child(sk, req, skb);
1317 newsk->sk_gso_type = SKB_GSO_TCPV4;
1318 inet_sk_rx_dst_set(newsk, skb);
1320 newtp = tcp_sk(newsk);
1321 newinet = inet_sk(newsk);
1322 ireq = inet_rsk(req);
1323 sk_daddr_set(newsk, ireq->ir_rmt_addr);
1324 sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
1325 newinet->inet_saddr = ireq->ir_loc_addr;
1326 inet_opt = ireq->opt;
1327 rcu_assign_pointer(newinet->inet_opt, inet_opt);
1329 newinet->mc_index = inet_iif(skb);
1330 newinet->mc_ttl = ip_hdr(skb)->ttl;
1331 newinet->rcv_tos = ip_hdr(skb)->tos;
1332 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1333 inet_set_txhash(newsk);
1335 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1336 newinet->inet_id = newtp->write_seq ^ jiffies;
1339 dst = inet_csk_route_child_sock(sk, newsk, req);
1343 /* syncookie case : see end of cookie_v4_check() */
1345 sk_setup_caps(newsk, dst);
1347 tcp_ca_openreq_child(newsk, dst);
1349 tcp_sync_mss(newsk, dst_mtu(dst));
1350 newtp->advmss = dst_metric_advmss(dst);
1351 if (tcp_sk(sk)->rx_opt.user_mss &&
1352 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1353 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1355 tcp_initialize_rcv_mss(newsk);
1357 #ifdef CONFIG_TCP_MD5SIG
1358 /* Copy over the MD5 key from the original socket */
1359 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1363 * We're using one, so create a matching key
1364 * on the newsk structure. If we fail to get
1365 * memory, then we end up not copying the key
1368 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1369 AF_INET, key->key, key->keylen, GFP_ATOMIC);
1370 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1374 if (__inet_inherit_port(sk, newsk) < 0)
1376 __inet_hash_nolisten(newsk, NULL);
1381 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1385 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1388 inet_csk_prepare_forced_close(newsk);
1392 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1394 static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1396 const struct tcphdr *th = tcp_hdr(skb);
1397 const struct iphdr *iph = ip_hdr(skb);
1398 struct request_sock *req;
1401 req = inet_csk_search_req(sk, th->source, iph->saddr, iph->daddr);
1403 nsk = tcp_check_req(sk, skb, req, false);
1408 nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
1409 th->source, iph->daddr, th->dest, inet_iif(skb));
1412 if (nsk->sk_state != TCP_TIME_WAIT) {
1416 inet_twsk_put(inet_twsk(nsk));
1420 #ifdef CONFIG_SYN_COOKIES
1422 sk = cookie_v4_check(sk, skb);
1427 /* The socket must have it's spinlock held when we get
1430 * We have a potential double-lock case here, so even when
1431 * doing backlog processing we use the BH locking scheme.
1432 * This is because we cannot sleep with the original spinlock
1435 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1439 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1440 struct dst_entry *dst = sk->sk_rx_dst;
1442 sock_rps_save_rxhash(sk, skb);
1443 sk_mark_napi_id(sk, skb);
1445 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1446 dst->ops->check(dst, 0) == NULL) {
1448 sk->sk_rx_dst = NULL;
1451 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1455 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1458 if (sk->sk_state == TCP_LISTEN) {
1459 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1464 sock_rps_save_rxhash(nsk, skb);
1465 sk_mark_napi_id(sk, skb);
1466 if (tcp_child_process(sk, nsk, skb)) {
1473 sock_rps_save_rxhash(sk, skb);
1475 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
1482 tcp_v4_send_reset(rsk, skb);
1485 /* Be careful here. If this function gets more complicated and
1486 * gcc suffers from register pressure on the x86, sk (in %ebx)
1487 * might be destroyed here. This current version compiles correctly,
1488 * but you have been warned.
1493 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
1494 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1497 EXPORT_SYMBOL(tcp_v4_do_rcv);
1499 void tcp_v4_early_demux(struct sk_buff *skb)
1501 const struct iphdr *iph;
1502 const struct tcphdr *th;
1505 if (skb->pkt_type != PACKET_HOST)
1508 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1514 if (th->doff < sizeof(struct tcphdr) / 4)
1517 sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1518 iph->saddr, th->source,
1519 iph->daddr, ntohs(th->dest),
1523 skb->destructor = sock_edemux;
1524 if (sk_fullsock(sk)) {
1525 struct dst_entry *dst = sk->sk_rx_dst;
1528 dst = dst_check(dst, 0);
1530 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1531 skb_dst_set_noref(skb, dst);
1536 /* Packet is added to VJ-style prequeue for processing in process
1537 * context, if a reader task is waiting. Apparently, this exciting
1538 * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
1539 * failed somewhere. Latency? Burstiness? Well, at least now we will
1540 * see, why it failed. 8)8) --ANK
1543 bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1545 struct tcp_sock *tp = tcp_sk(sk);
1547 if (sysctl_tcp_low_latency || !tp->ucopy.task)
1550 if (skb->len <= tcp_hdrlen(skb) &&
1551 skb_queue_len(&tp->ucopy.prequeue) == 0)
1554 /* Before escaping RCU protected region, we need to take care of skb
1555 * dst. Prequeue is only enabled for established sockets.
1556 * For such sockets, we might need the skb dst only to set sk->sk_rx_dst
1557 * Instead of doing full sk_rx_dst validity here, let's perform
1558 * an optimistic check.
1560 if (likely(sk->sk_rx_dst))
1565 __skb_queue_tail(&tp->ucopy.prequeue, skb);
1566 tp->ucopy.memory += skb->truesize;
1567 if (tp->ucopy.memory > sk->sk_rcvbuf) {
1568 struct sk_buff *skb1;
1570 BUG_ON(sock_owned_by_user(sk));
1572 while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
1573 sk_backlog_rcv(sk, skb1);
1574 NET_INC_STATS_BH(sock_net(sk),
1575 LINUX_MIB_TCPPREQUEUEDROPPED);
1578 tp->ucopy.memory = 0;
1579 } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
1580 wake_up_interruptible_sync_poll(sk_sleep(sk),
1581 POLLIN | POLLRDNORM | POLLRDBAND);
1582 if (!inet_csk_ack_scheduled(sk))
1583 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
1584 (3 * tcp_rto_min(sk)) / 4,
1589 EXPORT_SYMBOL(tcp_prequeue);
1595 int tcp_v4_rcv(struct sk_buff *skb)
1597 const struct iphdr *iph;
1598 const struct tcphdr *th;
1601 struct net *net = dev_net(skb->dev);
1603 if (skb->pkt_type != PACKET_HOST)
1606 /* Count it even if it's bad */
1607 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1609 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1614 if (th->doff < sizeof(struct tcphdr) / 4)
1616 if (!pskb_may_pull(skb, th->doff * 4))
1619 /* An explanation is required here, I think.
1620 * Packet length and doff are validated by header prediction,
1621 * provided case of th->doff==0 is eliminated.
1622 * So, we defer the checks. */
1624 if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
1629 /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1630 * barrier() makes sure compiler wont play fool^Waliasing games.
1632 memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
1633 sizeof(struct inet_skb_parm));
1636 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1637 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1638 skb->len - th->doff * 4);
1639 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1640 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1641 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1642 TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1643 TCP_SKB_CB(skb)->sacked = 0;
1645 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1650 if (sk->sk_state == TCP_TIME_WAIT)
1653 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1654 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1655 goto discard_and_relse;
1658 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1659 goto discard_and_relse;
1661 #ifdef CONFIG_TCP_MD5SIG
1663 * We really want to reject the packet as early as possible
1665 * o We're expecting an MD5'd packet and this is no MD5 tcp option
1666 * o There is an MD5 option and we're not expecting one
1668 if (tcp_v4_inbound_md5_hash(sk, skb))
1669 goto discard_and_relse;
1674 if (sk_filter(sk, skb))
1675 goto discard_and_relse;
1677 sk_incoming_cpu_update(sk);
1680 bh_lock_sock_nested(sk);
1682 if (!sock_owned_by_user(sk)) {
1683 if (!tcp_prequeue(sk, skb))
1684 ret = tcp_v4_do_rcv(sk, skb);
1685 } else if (unlikely(sk_add_backlog(sk, skb,
1686 sk->sk_rcvbuf + sk->sk_sndbuf))) {
1688 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1689 goto discard_and_relse;
1698 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1701 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1703 TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
1705 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1707 tcp_v4_send_reset(NULL, skb);
1711 /* Discard frame. */
1720 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1721 inet_twsk_put(inet_twsk(sk));
1725 if (skb->len < (th->doff << 2)) {
1726 inet_twsk_put(inet_twsk(sk));
1729 if (tcp_checksum_complete(skb)) {
1730 inet_twsk_put(inet_twsk(sk));
1733 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1735 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
1737 iph->saddr, th->source,
1738 iph->daddr, th->dest,
1741 inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
1742 inet_twsk_put(inet_twsk(sk));
1746 /* Fall through to ACK */
1749 tcp_v4_timewait_ack(sk, skb);
1753 case TCP_TW_SUCCESS:;
1758 static struct timewait_sock_ops tcp_timewait_sock_ops = {
1759 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
1760 .twsk_unique = tcp_twsk_unique,
1761 .twsk_destructor= tcp_twsk_destructor,
1764 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
1766 struct dst_entry *dst = skb_dst(skb);
1770 sk->sk_rx_dst = dst;
1771 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
1774 EXPORT_SYMBOL(inet_sk_rx_dst_set);
1776 const struct inet_connection_sock_af_ops ipv4_specific = {
1777 .queue_xmit = ip_queue_xmit,
1778 .send_check = tcp_v4_send_check,
1779 .rebuild_header = inet_sk_rebuild_header,
1780 .sk_rx_dst_set = inet_sk_rx_dst_set,
1781 .conn_request = tcp_v4_conn_request,
1782 .syn_recv_sock = tcp_v4_syn_recv_sock,
1783 .net_header_len = sizeof(struct iphdr),
1784 .setsockopt = ip_setsockopt,
1785 .getsockopt = ip_getsockopt,
1786 .addr2sockaddr = inet_csk_addr2sockaddr,
1787 .sockaddr_len = sizeof(struct sockaddr_in),
1788 .bind_conflict = inet_csk_bind_conflict,
1789 #ifdef CONFIG_COMPAT
1790 .compat_setsockopt = compat_ip_setsockopt,
1791 .compat_getsockopt = compat_ip_getsockopt,
1793 .mtu_reduced = tcp_v4_mtu_reduced,
1795 EXPORT_SYMBOL(ipv4_specific);
1797 #ifdef CONFIG_TCP_MD5SIG
1798 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
1799 .md5_lookup = tcp_v4_md5_lookup,
1800 .calc_md5_hash = tcp_v4_md5_hash_skb,
1801 .md5_parse = tcp_v4_parse_md5_keys,
1805 /* NOTE: A lot of things set to zero explicitly by call to
1806 * sk_alloc() so need not be done here.
1808 static int tcp_v4_init_sock(struct sock *sk)
1810 struct inet_connection_sock *icsk = inet_csk(sk);
1814 icsk->icsk_af_ops = &ipv4_specific;
1816 #ifdef CONFIG_TCP_MD5SIG
1817 tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
1823 void tcp_v4_destroy_sock(struct sock *sk)
1825 struct tcp_sock *tp = tcp_sk(sk);
1827 tcp_clear_xmit_timers(sk);
1829 tcp_cleanup_congestion_control(sk);
1831 /* Cleanup up the write buffer. */
1832 tcp_write_queue_purge(sk);
1834 /* Cleans up our, hopefully empty, out_of_order_queue. */
1835 __skb_queue_purge(&tp->out_of_order_queue);
1837 #ifdef CONFIG_TCP_MD5SIG
1838 /* Clean up the MD5 key list, if any */
1839 if (tp->md5sig_info) {
1840 tcp_clear_md5_list(sk);
1841 kfree_rcu(tp->md5sig_info, rcu);
1842 tp->md5sig_info = NULL;
1846 /* Clean prequeue, it must be empty really */
1847 __skb_queue_purge(&tp->ucopy.prequeue);
1849 /* Clean up a referenced TCP bind bucket. */
1850 if (inet_csk(sk)->icsk_bind_hash)
1853 BUG_ON(tp->fastopen_rsk != NULL);
1855 /* If socket is aborted during connect operation */
1856 tcp_free_fastopen_req(tp);
1858 sk_sockets_allocated_dec(sk);
1859 sock_release_memcg(sk);
1861 EXPORT_SYMBOL(tcp_v4_destroy_sock);
1863 #ifdef CONFIG_PROC_FS
1864 /* Proc filesystem TCP sock list dumping. */
1867 * Get next listener socket follow cur. If cur is NULL, get first socket
1868 * starting from bucket given in st->bucket; when st->bucket is zero the
1869 * very first socket in the hash table is returned.
1871 static void *listening_get_next(struct seq_file *seq, void *cur)
1873 struct inet_connection_sock *icsk;
1874 struct hlist_nulls_node *node;
1875 struct sock *sk = cur;
1876 struct inet_listen_hashbucket *ilb;
1877 struct tcp_iter_state *st = seq->private;
1878 struct net *net = seq_file_net(seq);
1881 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1882 spin_lock_bh(&ilb->lock);
1883 sk = sk_nulls_head(&ilb->head);
1887 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1891 if (st->state == TCP_SEQ_STATE_OPENREQ) {
1892 struct request_sock *req = cur;
1894 icsk = inet_csk(st->syn_wait_sk);
1898 if (req->rsk_ops->family == st->family) {
1904 if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
1907 req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
1909 sk = sk_nulls_next(st->syn_wait_sk);
1910 st->state = TCP_SEQ_STATE_LISTENING;
1911 spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1913 icsk = inet_csk(sk);
1914 spin_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1915 if (reqsk_queue_len(&icsk->icsk_accept_queue))
1917 spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1918 sk = sk_nulls_next(sk);
1921 sk_nulls_for_each_from(sk, node) {
1922 if (!net_eq(sock_net(sk), net))
1924 if (sk->sk_family == st->family) {
1928 icsk = inet_csk(sk);
1929 spin_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1930 if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
1932 st->uid = sock_i_uid(sk);
1933 st->syn_wait_sk = sk;
1934 st->state = TCP_SEQ_STATE_OPENREQ;
1938 spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1940 spin_unlock_bh(&ilb->lock);
1942 if (++st->bucket < INET_LHTABLE_SIZE) {
1943 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1944 spin_lock_bh(&ilb->lock);
1945 sk = sk_nulls_head(&ilb->head);
1953 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
1955 struct tcp_iter_state *st = seq->private;
1960 rc = listening_get_next(seq, NULL);
1962 while (rc && *pos) {
1963 rc = listening_get_next(seq, rc);
1969 static inline bool empty_bucket(const struct tcp_iter_state *st)
1971 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
1975 * Get first established socket starting from bucket given in st->bucket.
1976 * If st->bucket is zero, the very first socket in the hash is returned.
1978 static void *established_get_first(struct seq_file *seq)
1980 struct tcp_iter_state *st = seq->private;
1981 struct net *net = seq_file_net(seq);
1985 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
1987 struct hlist_nulls_node *node;
1988 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
1990 /* Lockless fast path for the common case of empty buckets */
1991 if (empty_bucket(st))
1995 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
1996 if (sk->sk_family != st->family ||
1997 !net_eq(sock_net(sk), net)) {
2003 spin_unlock_bh(lock);
2009 static void *established_get_next(struct seq_file *seq, void *cur)
2011 struct sock *sk = cur;
2012 struct hlist_nulls_node *node;
2013 struct tcp_iter_state *st = seq->private;
2014 struct net *net = seq_file_net(seq);
2019 sk = sk_nulls_next(sk);
2021 sk_nulls_for_each_from(sk, node) {
2022 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
2026 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2028 return established_get_first(seq);
2031 static void *established_get_idx(struct seq_file *seq, loff_t pos)
2033 struct tcp_iter_state *st = seq->private;
2037 rc = established_get_first(seq);
2040 rc = established_get_next(seq, rc);
2046 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2049 struct tcp_iter_state *st = seq->private;
2051 st->state = TCP_SEQ_STATE_LISTENING;
2052 rc = listening_get_idx(seq, &pos);
2055 st->state = TCP_SEQ_STATE_ESTABLISHED;
2056 rc = established_get_idx(seq, pos);
2062 static void *tcp_seek_last_pos(struct seq_file *seq)
2064 struct tcp_iter_state *st = seq->private;
2065 int offset = st->offset;
2066 int orig_num = st->num;
2069 switch (st->state) {
2070 case TCP_SEQ_STATE_OPENREQ:
2071 case TCP_SEQ_STATE_LISTENING:
2072 if (st->bucket >= INET_LHTABLE_SIZE)
2074 st->state = TCP_SEQ_STATE_LISTENING;
2075 rc = listening_get_next(seq, NULL);
2076 while (offset-- && rc)
2077 rc = listening_get_next(seq, rc);
2081 st->state = TCP_SEQ_STATE_ESTABLISHED;
2083 case TCP_SEQ_STATE_ESTABLISHED:
2084 if (st->bucket > tcp_hashinfo.ehash_mask)
2086 rc = established_get_first(seq);
2087 while (offset-- && rc)
2088 rc = established_get_next(seq, rc);
2096 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2098 struct tcp_iter_state *st = seq->private;
2101 if (*pos && *pos == st->last_pos) {
2102 rc = tcp_seek_last_pos(seq);
2107 st->state = TCP_SEQ_STATE_LISTENING;
2111 rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2114 st->last_pos = *pos;
2118 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2120 struct tcp_iter_state *st = seq->private;
2123 if (v == SEQ_START_TOKEN) {
2124 rc = tcp_get_idx(seq, 0);
2128 switch (st->state) {
2129 case TCP_SEQ_STATE_OPENREQ:
2130 case TCP_SEQ_STATE_LISTENING:
2131 rc = listening_get_next(seq, v);
2133 st->state = TCP_SEQ_STATE_ESTABLISHED;
2136 rc = established_get_first(seq);
2139 case TCP_SEQ_STATE_ESTABLISHED:
2140 rc = established_get_next(seq, v);
2145 st->last_pos = *pos;
2149 static void tcp_seq_stop(struct seq_file *seq, void *v)
2151 struct tcp_iter_state *st = seq->private;
2153 switch (st->state) {
2154 case TCP_SEQ_STATE_OPENREQ:
2156 struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
2157 spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2159 case TCP_SEQ_STATE_LISTENING:
2160 if (v != SEQ_START_TOKEN)
2161 spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
2163 case TCP_SEQ_STATE_ESTABLISHED:
2165 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2170 int tcp_seq_open(struct inode *inode, struct file *file)
2172 struct tcp_seq_afinfo *afinfo = PDE_DATA(inode);
2173 struct tcp_iter_state *s;
2176 err = seq_open_net(inode, file, &afinfo->seq_ops,
2177 sizeof(struct tcp_iter_state));
2181 s = ((struct seq_file *)file->private_data)->private;
2182 s->family = afinfo->family;
2186 EXPORT_SYMBOL(tcp_seq_open);
2188 int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2191 struct proc_dir_entry *p;
2193 afinfo->seq_ops.start = tcp_seq_start;
2194 afinfo->seq_ops.next = tcp_seq_next;
2195 afinfo->seq_ops.stop = tcp_seq_stop;
2197 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2198 afinfo->seq_fops, afinfo);
2203 EXPORT_SYMBOL(tcp_proc_register);
2205 void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2207 remove_proc_entry(afinfo->name, net->proc_net);
2209 EXPORT_SYMBOL(tcp_proc_unregister);
2211 static void get_openreq4(const struct request_sock *req,
2212 struct seq_file *f, int i, kuid_t uid)
2214 const struct inet_request_sock *ireq = inet_rsk(req);
2215 long delta = req->rsk_timer.expires - jiffies;
2217 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2218 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
2223 ntohs(ireq->ir_rmt_port),
2225 0, 0, /* could print option size, but that is af dependent. */
2226 1, /* timers active (only the expire timer) */
2227 jiffies_delta_to_clock_t(delta),
2229 from_kuid_munged(seq_user_ns(f), uid),
2230 0, /* non standard timer */
2231 0, /* open_requests have no inode */
2236 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
2239 unsigned long timer_expires;
2240 const struct tcp_sock *tp = tcp_sk(sk);
2241 const struct inet_connection_sock *icsk = inet_csk(sk);
2242 const struct inet_sock *inet = inet_sk(sk);
2243 struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq;
2244 __be32 dest = inet->inet_daddr;
2245 __be32 src = inet->inet_rcv_saddr;
2246 __u16 destp = ntohs(inet->inet_dport);
2247 __u16 srcp = ntohs(inet->inet_sport);
2250 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2251 icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
2252 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
2254 timer_expires = icsk->icsk_timeout;
2255 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2257 timer_expires = icsk->icsk_timeout;
2258 } else if (timer_pending(&sk->sk_timer)) {
2260 timer_expires = sk->sk_timer.expires;
2263 timer_expires = jiffies;
2266 if (sk->sk_state == TCP_LISTEN)
2267 rx_queue = sk->sk_ack_backlog;
2270 * because we dont lock socket, we might find a transient negative value
2272 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2274 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2275 "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
2276 i, src, srcp, dest, destp, sk->sk_state,
2277 tp->write_seq - tp->snd_una,
2280 jiffies_delta_to_clock_t(timer_expires - jiffies),
2281 icsk->icsk_retransmits,
2282 from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2283 icsk->icsk_probes_out,
2285 atomic_read(&sk->sk_refcnt), sk,
2286 jiffies_to_clock_t(icsk->icsk_rto),
2287 jiffies_to_clock_t(icsk->icsk_ack.ato),
2288 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2290 sk->sk_state == TCP_LISTEN ?
2291 (fastopenq ? fastopenq->max_qlen : 0) :
2292 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
2295 static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2296 struct seq_file *f, int i)
2300 s32 delta = tw->tw_ttd - inet_tw_time_stamp();
2302 dest = tw->tw_daddr;
2303 src = tw->tw_rcv_saddr;
2304 destp = ntohs(tw->tw_dport);
2305 srcp = ntohs(tw->tw_sport);
2307 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2308 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
2309 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2310 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2311 atomic_read(&tw->tw_refcnt), tw);
2316 static int tcp4_seq_show(struct seq_file *seq, void *v)
2318 struct tcp_iter_state *st;
2319 struct sock *sk = v;
2321 seq_setwidth(seq, TMPSZ - 1);
2322 if (v == SEQ_START_TOKEN) {
2323 seq_puts(seq, " sl local_address rem_address st tx_queue "
2324 "rx_queue tr tm->when retrnsmt uid timeout "
2330 switch (st->state) {
2331 case TCP_SEQ_STATE_LISTENING:
2332 case TCP_SEQ_STATE_ESTABLISHED:
2333 if (sk->sk_state == TCP_TIME_WAIT)
2334 get_timewait4_sock(v, seq, st->num);
2336 get_tcp4_sock(v, seq, st->num);
2338 case TCP_SEQ_STATE_OPENREQ:
2339 get_openreq4(v, seq, st->num, st->uid);
2347 static const struct file_operations tcp_afinfo_seq_fops = {
2348 .owner = THIS_MODULE,
2349 .open = tcp_seq_open,
2351 .llseek = seq_lseek,
2352 .release = seq_release_net
2355 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2358 .seq_fops = &tcp_afinfo_seq_fops,
2360 .show = tcp4_seq_show,
2364 static int __net_init tcp4_proc_init_net(struct net *net)
2366 return tcp_proc_register(net, &tcp4_seq_afinfo);
2369 static void __net_exit tcp4_proc_exit_net(struct net *net)
2371 tcp_proc_unregister(net, &tcp4_seq_afinfo);
2374 static struct pernet_operations tcp4_net_ops = {
2375 .init = tcp4_proc_init_net,
2376 .exit = tcp4_proc_exit_net,
2379 int __init tcp4_proc_init(void)
2381 return register_pernet_subsys(&tcp4_net_ops);
2384 void tcp4_proc_exit(void)
2386 unregister_pernet_subsys(&tcp4_net_ops);
2388 #endif /* CONFIG_PROC_FS */
2390 struct proto tcp_prot = {
2392 .owner = THIS_MODULE,
2394 .connect = tcp_v4_connect,
2395 .disconnect = tcp_disconnect,
2396 .accept = inet_csk_accept,
2398 .init = tcp_v4_init_sock,
2399 .destroy = tcp_v4_destroy_sock,
2400 .shutdown = tcp_shutdown,
2401 .setsockopt = tcp_setsockopt,
2402 .getsockopt = tcp_getsockopt,
2403 .recvmsg = tcp_recvmsg,
2404 .sendmsg = tcp_sendmsg,
2405 .sendpage = tcp_sendpage,
2406 .backlog_rcv = tcp_v4_do_rcv,
2407 .release_cb = tcp_release_cb,
2409 .unhash = inet_unhash,
2410 .get_port = inet_csk_get_port,
2411 .enter_memory_pressure = tcp_enter_memory_pressure,
2412 .stream_memory_free = tcp_stream_memory_free,
2413 .sockets_allocated = &tcp_sockets_allocated,
2414 .orphan_count = &tcp_orphan_count,
2415 .memory_allocated = &tcp_memory_allocated,
2416 .memory_pressure = &tcp_memory_pressure,
2417 .sysctl_mem = sysctl_tcp_mem,
2418 .sysctl_wmem = sysctl_tcp_wmem,
2419 .sysctl_rmem = sysctl_tcp_rmem,
2420 .max_header = MAX_TCP_HEADER,
2421 .obj_size = sizeof(struct tcp_sock),
2422 .slab_flags = SLAB_DESTROY_BY_RCU,
2423 .twsk_prot = &tcp_timewait_sock_ops,
2424 .rsk_prot = &tcp_request_sock_ops,
2425 .h.hashinfo = &tcp_hashinfo,
2426 .no_autobind = true,
2427 #ifdef CONFIG_COMPAT
2428 .compat_setsockopt = compat_tcp_setsockopt,
2429 .compat_getsockopt = compat_tcp_getsockopt,
2431 #ifdef CONFIG_MEMCG_KMEM
2432 .init_cgroup = tcp_init_cgroup,
2433 .destroy_cgroup = tcp_destroy_cgroup,
2434 .proto_cgroup = tcp_proto_cgroup,
2437 EXPORT_SYMBOL(tcp_prot);
2439 static void __net_exit tcp_sk_exit(struct net *net)
2443 for_each_possible_cpu(cpu)
2444 inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
2445 free_percpu(net->ipv4.tcp_sk);
2448 static int __net_init tcp_sk_init(struct net *net)
2452 net->ipv4.tcp_sk = alloc_percpu(struct sock *);
2453 if (!net->ipv4.tcp_sk)
2456 for_each_possible_cpu(cpu) {
2459 res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
2463 *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
2465 net->ipv4.sysctl_tcp_ecn = 2;
2466 net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
2467 net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
2468 net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
2477 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2479 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
2482 static struct pernet_operations __net_initdata tcp_sk_ops = {
2483 .init = tcp_sk_init,
2484 .exit = tcp_sk_exit,
2485 .exit_batch = tcp_sk_exit_batch,
2488 void __init tcp_v4_init(void)
2490 inet_hashinfo_init(&tcp_hashinfo);
2491 if (register_pernet_subsys(&tcp_sk_ops))
2492 panic("Failed to create the TCP control socket.\n");