1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
7 * Implementation of the Transmission Control Protocol(TCP).
9 * IPv4 specific functions
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
16 * See tcp.c for author information
21 * David S. Miller : New socket lookup architecture.
22 * This code is dedicated to John Dyson.
23 * David S. Miller : Change semantics of established hash,
24 * half is devoted to TIME_WAIT sockets
25 * and the rest go in the other half.
26 * Andi Kleen : Add support for syncookies and fixed
27 * some bugs: ip options weren't passed to
28 * the TCP layer, missed a check for an
30 * Andi Kleen : Implemented fast path mtu discovery.
31 * Fixed many serious bugs in the
32 * request_sock handling and moved
33 * most of it into the af independent code.
34 * Added tail drop and some other bugfixes.
35 * Added new listen semantics.
36 * Mike McLagan : Routing by source
37 * Juan Jose Ciarlante: ip_dynaddr bits
38 * Andi Kleen: various fixes.
39 * Vitaly E. Lavrov : Transparent proxy revived after year
41 * Andi Kleen : Fix new listen.
42 * Andi Kleen : Fix accept error reporting.
43 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
44 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
45 * a single port at the same time.
48 #define pr_fmt(fmt) "TCP: " fmt
50 #include <linux/bottom_half.h>
51 #include <linux/types.h>
52 #include <linux/fcntl.h>
53 #include <linux/module.h>
54 #include <linux/random.h>
55 #include <linux/cache.h>
56 #include <linux/jhash.h>
57 #include <linux/init.h>
58 #include <linux/times.h>
59 #include <linux/slab.h>
61 #include <net/net_namespace.h>
63 #include <net/inet_hashtables.h>
65 #include <net/transp_v6.h>
67 #include <net/inet_common.h>
68 #include <net/timewait_sock.h>
70 #include <net/secure_seq.h>
71 #include <net/busy_poll.h>
73 #include <linux/inet.h>
74 #include <linux/ipv6.h>
75 #include <linux/stddef.h>
76 #include <linux/proc_fs.h>
77 #include <linux/seq_file.h>
78 #include <linux/inetdevice.h>
79 #include <linux/btf_ids.h>
81 #include <crypto/hash.h>
82 #include <linux/scatterlist.h>
84 #include <trace/events/tcp.h>
86 #ifdef CONFIG_TCP_MD5SIG
87 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
88 __be32 daddr, __be32 saddr, const struct tcphdr *th);
91 struct inet_hashinfo tcp_hashinfo;
92 EXPORT_SYMBOL(tcp_hashinfo);
94 static DEFINE_PER_CPU(struct sock *, ipv4_tcp_sk);
96 static u32 tcp_v4_init_seq(const struct sk_buff *skb)
98 return secure_tcp_seq(ip_hdr(skb)->daddr,
101 tcp_hdr(skb)->source);
104 static u32 tcp_v4_init_ts_off(const struct net *net, const struct sk_buff *skb)
106 return secure_tcp_ts_off(net, ip_hdr(skb)->daddr, ip_hdr(skb)->saddr);
109 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
111 int reuse = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tw_reuse);
112 const struct inet_timewait_sock *tw = inet_twsk(sktw);
113 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
114 struct tcp_sock *tp = tcp_sk(sk);
117 /* Still does not detect *everything* that goes through
118 * lo, since we require a loopback src or dst address
119 * or direct binding to 'lo' interface.
121 bool loopback = false;
122 if (tw->tw_bound_dev_if == LOOPBACK_IFINDEX)
124 #if IS_ENABLED(CONFIG_IPV6)
125 if (tw->tw_family == AF_INET6) {
126 if (ipv6_addr_loopback(&tw->tw_v6_daddr) ||
127 ipv6_addr_v4mapped_loopback(&tw->tw_v6_daddr) ||
128 ipv6_addr_loopback(&tw->tw_v6_rcv_saddr) ||
129 ipv6_addr_v4mapped_loopback(&tw->tw_v6_rcv_saddr))
134 if (ipv4_is_loopback(tw->tw_daddr) ||
135 ipv4_is_loopback(tw->tw_rcv_saddr))
142 /* With PAWS, it is safe from the viewpoint
143 of data integrity. Even without PAWS it is safe provided sequence
144 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
146 Actually, the idea is close to VJ's one, only timestamp cache is
147 held not per host, but per port pair and TW bucket is used as state
150 If TW bucket has been already destroyed we fall back to VJ's scheme
151 and use initial timestamp retrieved from peer table.
153 if (tcptw->tw_ts_recent_stamp &&
154 (!twp || (reuse && time_after32(ktime_get_seconds(),
155 tcptw->tw_ts_recent_stamp)))) {
156 /* In case of repair and re-using TIME-WAIT sockets we still
157 * want to be sure that it is safe as above but honor the
158 * sequence numbers and time stamps set as part of the repair
161 * Without this check re-using a TIME-WAIT socket with TCP
162 * repair would accumulate a -1 on the repair assigned
163 * sequence number. The first time it is reused the sequence
164 * is -1, the second time -2, etc. This fixes that issue
165 * without appearing to create any others.
167 if (likely(!tp->repair)) {
168 u32 seq = tcptw->tw_snd_nxt + 65535 + 2;
172 WRITE_ONCE(tp->write_seq, seq);
173 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
174 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
182 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
184 static int tcp_v4_pre_connect(struct sock *sk, struct sockaddr *uaddr,
187 /* This check is replicated from tcp_v4_connect() and intended to
188 * prevent BPF program called below from accessing bytes that are out
189 * of the bound specified by user in addr_len.
191 if (addr_len < sizeof(struct sockaddr_in))
194 sock_owned_by_me(sk);
196 return BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr);
199 /* This will initiate an outgoing connection. */
200 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
202 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
203 struct inet_timewait_death_row *tcp_death_row;
204 struct inet_sock *inet = inet_sk(sk);
205 struct tcp_sock *tp = tcp_sk(sk);
206 struct ip_options_rcu *inet_opt;
207 struct net *net = sock_net(sk);
208 __be16 orig_sport, orig_dport;
209 __be32 daddr, nexthop;
214 if (addr_len < sizeof(struct sockaddr_in))
217 if (usin->sin_family != AF_INET)
218 return -EAFNOSUPPORT;
220 nexthop = daddr = usin->sin_addr.s_addr;
221 inet_opt = rcu_dereference_protected(inet->inet_opt,
222 lockdep_sock_is_held(sk));
223 if (inet_opt && inet_opt->opt.srr) {
226 nexthop = inet_opt->opt.faddr;
229 orig_sport = inet->inet_sport;
230 orig_dport = usin->sin_port;
231 fl4 = &inet->cork.fl.u.ip4;
232 rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
233 sk->sk_bound_dev_if, IPPROTO_TCP, orig_sport,
237 if (err == -ENETUNREACH)
238 IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
242 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
247 if (!inet_opt || !inet_opt->opt.srr)
250 tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
252 if (!inet->inet_saddr) {
253 err = inet_bhash2_update_saddr(sk, &fl4->saddr, AF_INET);
259 sk_rcv_saddr_set(sk, inet->inet_saddr);
262 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
263 /* Reset inherited state */
264 tp->rx_opt.ts_recent = 0;
265 tp->rx_opt.ts_recent_stamp = 0;
266 if (likely(!tp->repair))
267 WRITE_ONCE(tp->write_seq, 0);
270 inet->inet_dport = usin->sin_port;
271 sk_daddr_set(sk, daddr);
273 inet_csk(sk)->icsk_ext_hdr_len = 0;
275 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
277 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
279 /* Socket identity is still unknown (sport may be zero).
280 * However we set state to SYN-SENT and not releasing socket
281 * lock select source port, enter ourselves into the hash tables and
282 * complete initialization after this.
284 tcp_set_state(sk, TCP_SYN_SENT);
285 err = inet_hash_connect(tcp_death_row, sk);
291 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
292 inet->inet_sport, inet->inet_dport, sk);
298 /* OK, now commit destination to socket. */
299 sk->sk_gso_type = SKB_GSO_TCPV4;
300 sk_setup_caps(sk, &rt->dst);
303 if (likely(!tp->repair)) {
305 WRITE_ONCE(tp->write_seq,
306 secure_tcp_seq(inet->inet_saddr,
310 tp->tsoffset = secure_tcp_ts_off(net, inet->inet_saddr,
314 inet->inet_id = get_random_u16();
316 if (tcp_fastopen_defer_connect(sk, &err))
321 err = tcp_connect(sk);
330 * This unhashes the socket and releases the local port,
333 tcp_set_state(sk, TCP_CLOSE);
334 inet_bhash2_reset_saddr(sk);
336 sk->sk_route_caps = 0;
337 inet->inet_dport = 0;
340 EXPORT_SYMBOL(tcp_v4_connect);
343 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
344 * It can be called through tcp_release_cb() if socket was owned by user
345 * at the time tcp_v4_err() was called to handle ICMP message.
347 void tcp_v4_mtu_reduced(struct sock *sk)
349 struct inet_sock *inet = inet_sk(sk);
350 struct dst_entry *dst;
353 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
355 mtu = READ_ONCE(tcp_sk(sk)->mtu_info);
356 dst = inet_csk_update_pmtu(sk, mtu);
360 /* Something is about to be wrong... Remember soft error
361 * for the case, if this connection will not able to recover.
363 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
364 WRITE_ONCE(sk->sk_err_soft, EMSGSIZE);
368 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
369 ip_sk_accept_pmtu(sk) &&
370 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
371 tcp_sync_mss(sk, mtu);
373 /* Resend the TCP packet because it's
374 * clear that the old packet has been
375 * dropped. This is the new "fast" path mtu
378 tcp_simple_retransmit(sk);
379 } /* else let the usual retransmit timer handle it */
381 EXPORT_SYMBOL(tcp_v4_mtu_reduced);
383 static void do_redirect(struct sk_buff *skb, struct sock *sk)
385 struct dst_entry *dst = __sk_dst_check(sk, 0);
388 dst->ops->redirect(dst, sk, skb);
392 /* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
393 void tcp_req_err(struct sock *sk, u32 seq, bool abort)
395 struct request_sock *req = inet_reqsk(sk);
396 struct net *net = sock_net(sk);
398 /* ICMPs are not backlogged, hence we cannot get
399 * an established socket here.
401 if (seq != tcp_rsk(req)->snt_isn) {
402 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
405 * Still in SYN_RECV, just remove it silently.
406 * There is no good way to pass the error to the newly
407 * created socket, and POSIX does not want network
408 * errors returned from accept().
410 inet_csk_reqsk_queue_drop(req->rsk_listener, req);
411 tcp_listendrop(req->rsk_listener);
415 EXPORT_SYMBOL(tcp_req_err);
417 /* TCP-LD (RFC 6069) logic */
418 void tcp_ld_RTO_revert(struct sock *sk, u32 seq)
420 struct inet_connection_sock *icsk = inet_csk(sk);
421 struct tcp_sock *tp = tcp_sk(sk);
426 if (sock_owned_by_user(sk))
429 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
433 skb = tcp_rtx_queue_head(sk);
434 if (WARN_ON_ONCE(!skb))
437 icsk->icsk_backoff--;
438 icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) : TCP_TIMEOUT_INIT;
439 icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
441 tcp_mstamp_refresh(tp);
442 delta_us = (u32)(tp->tcp_mstamp - tcp_skb_timestamp_us(skb));
443 remaining = icsk->icsk_rto - usecs_to_jiffies(delta_us);
446 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
447 remaining, TCP_RTO_MAX);
449 /* RTO revert clocked out retransmission.
450 * Will retransmit now.
452 tcp_retransmit_timer(sk);
455 EXPORT_SYMBOL(tcp_ld_RTO_revert);
458 * This routine is called by the ICMP module when it gets some
459 * sort of error condition. If err < 0 then the socket should
460 * be closed and the error returned to the user. If err > 0
461 * it's just the icmp type << 8 | icmp code. After adjustment
462 * header points to the first 8 bytes of the tcp header. We need
463 * to find the appropriate port.
465 * The locking strategy used here is very "optimistic". When
466 * someone else accesses the socket the ICMP is just dropped
467 * and for some paths there is no check at all.
468 * A more general error queue to queue errors for later handling
469 * is probably better.
473 int tcp_v4_err(struct sk_buff *skb, u32 info)
475 const struct iphdr *iph = (const struct iphdr *)skb->data;
476 struct tcphdr *th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
478 struct inet_sock *inet;
479 const int type = icmp_hdr(skb)->type;
480 const int code = icmp_hdr(skb)->code;
482 struct request_sock *fastopen;
485 struct net *net = dev_net(skb->dev);
487 sk = __inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
488 iph->daddr, th->dest, iph->saddr,
489 ntohs(th->source), inet_iif(skb), 0);
491 __ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
494 if (sk->sk_state == TCP_TIME_WAIT) {
495 inet_twsk_put(inet_twsk(sk));
498 seq = ntohl(th->seq);
499 if (sk->sk_state == TCP_NEW_SYN_RECV) {
500 tcp_req_err(sk, seq, type == ICMP_PARAMETERPROB ||
501 type == ICMP_TIME_EXCEEDED ||
502 (type == ICMP_DEST_UNREACH &&
503 (code == ICMP_NET_UNREACH ||
504 code == ICMP_HOST_UNREACH)));
509 /* If too many ICMPs get dropped on busy
510 * servers this needs to be solved differently.
511 * We do take care of PMTU discovery (RFC1191) special case :
512 * we can receive locally generated ICMP messages while socket is held.
514 if (sock_owned_by_user(sk)) {
515 if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
516 __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
518 if (sk->sk_state == TCP_CLOSE)
521 if (static_branch_unlikely(&ip4_min_ttl)) {
522 /* min_ttl can be changed concurrently from do_ip_setsockopt() */
523 if (unlikely(iph->ttl < READ_ONCE(inet_sk(sk)->min_ttl))) {
524 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
530 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
531 fastopen = rcu_dereference(tp->fastopen_rsk);
532 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
533 if (sk->sk_state != TCP_LISTEN &&
534 !between(seq, snd_una, tp->snd_nxt)) {
535 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
541 if (!sock_owned_by_user(sk))
542 do_redirect(skb, sk);
544 case ICMP_SOURCE_QUENCH:
545 /* Just silently ignore these. */
547 case ICMP_PARAMETERPROB:
550 case ICMP_DEST_UNREACH:
551 if (code > NR_ICMP_UNREACH)
554 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
555 /* We are not interested in TCP_LISTEN and open_requests
556 * (SYN-ACKs send out by Linux are always <576bytes so
557 * they should go through unfragmented).
559 if (sk->sk_state == TCP_LISTEN)
562 WRITE_ONCE(tp->mtu_info, info);
563 if (!sock_owned_by_user(sk)) {
564 tcp_v4_mtu_reduced(sk);
566 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &sk->sk_tsq_flags))
572 err = icmp_err_convert[code].errno;
573 /* check if this ICMP message allows revert of backoff.
577 (code == ICMP_NET_UNREACH || code == ICMP_HOST_UNREACH))
578 tcp_ld_RTO_revert(sk, seq);
580 case ICMP_TIME_EXCEEDED:
587 switch (sk->sk_state) {
590 /* Only in fast or simultaneous open. If a fast open socket is
591 * already accepted it is treated as a connected one below.
593 if (fastopen && !fastopen->sk)
596 ip_icmp_error(sk, skb, err, th->dest, info, (u8 *)th);
598 if (!sock_owned_by_user(sk)) {
599 WRITE_ONCE(sk->sk_err, err);
605 WRITE_ONCE(sk->sk_err_soft, err);
610 /* If we've already connected we will keep trying
611 * until we time out, or the user gives up.
613 * rfc1122 4.2.3.9 allows to consider as hard errors
614 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
615 * but it is obsoleted by pmtu discovery).
617 * Note, that in modern internet, where routing is unreliable
618 * and in each dark corner broken firewalls sit, sending random
619 * errors ordered by their masters even this two messages finally lose
620 * their original sense (even Linux sends invalid PORT_UNREACHs)
622 * Now we are in compliance with RFCs.
627 if (!sock_owned_by_user(sk) && inet->recverr) {
628 WRITE_ONCE(sk->sk_err, err);
630 } else { /* Only an error on timeout */
631 WRITE_ONCE(sk->sk_err_soft, err);
640 void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
642 struct tcphdr *th = tcp_hdr(skb);
644 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
645 skb->csum_start = skb_transport_header(skb) - skb->head;
646 skb->csum_offset = offsetof(struct tcphdr, check);
649 /* This routine computes an IPv4 TCP checksum. */
650 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
652 const struct inet_sock *inet = inet_sk(sk);
654 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
656 EXPORT_SYMBOL(tcp_v4_send_check);
659 * This routine will send an RST to the other tcp.
661 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
663 * Answer: if a packet caused RST, it is not for a socket
664 * existing in our system, if it is matched to a socket,
665 * it is just duplicate segment or bug in other side's TCP.
666 * So that we build reply only basing on parameters
667 * arrived with segment.
668 * Exception: precedence violation. We do not implement it in any case.
671 #ifdef CONFIG_TCP_MD5SIG
672 #define OPTION_BYTES TCPOLEN_MD5SIG_ALIGNED
674 #define OPTION_BYTES sizeof(__be32)
677 static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
679 const struct tcphdr *th = tcp_hdr(skb);
682 __be32 opt[OPTION_BYTES / sizeof(__be32)];
684 struct ip_reply_arg arg;
685 #ifdef CONFIG_TCP_MD5SIG
686 struct tcp_md5sig_key *key = NULL;
687 const __u8 *hash_location = NULL;
688 unsigned char newhash[16];
690 struct sock *sk1 = NULL;
692 u64 transmit_time = 0;
697 /* Never send a reset in response to a reset. */
701 /* If sk not NULL, it means we did a successful lookup and incoming
702 * route had to be correct. prequeue might have dropped our dst.
704 if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL)
707 /* Swap the send and the receive. */
708 memset(&rep, 0, sizeof(rep));
709 rep.th.dest = th->source;
710 rep.th.source = th->dest;
711 rep.th.doff = sizeof(struct tcphdr) / 4;
715 rep.th.seq = th->ack_seq;
718 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
719 skb->len - (th->doff << 2));
722 memset(&arg, 0, sizeof(arg));
723 arg.iov[0].iov_base = (unsigned char *)&rep;
724 arg.iov[0].iov_len = sizeof(rep.th);
726 net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
727 #ifdef CONFIG_TCP_MD5SIG
729 hash_location = tcp_parse_md5sig_option(th);
730 if (sk && sk_fullsock(sk)) {
731 const union tcp_md5_addr *addr;
734 /* sdif set, means packet ingressed via a device
735 * in an L3 domain and inet_iif is set to it.
737 l3index = tcp_v4_sdif(skb) ? inet_iif(skb) : 0;
738 addr = (union tcp_md5_addr *)&ip_hdr(skb)->saddr;
739 key = tcp_md5_do_lookup(sk, l3index, addr, AF_INET);
740 } else if (hash_location) {
741 const union tcp_md5_addr *addr;
742 int sdif = tcp_v4_sdif(skb);
743 int dif = inet_iif(skb);
747 * active side is lost. Try to find listening socket through
748 * source port, and then find md5 key through listening socket.
749 * we are not loose security here:
750 * Incoming packet is checked with md5 hash with finding key,
751 * no RST generated if md5 hash doesn't match.
753 sk1 = __inet_lookup_listener(net, net->ipv4.tcp_death_row.hashinfo,
754 NULL, 0, ip_hdr(skb)->saddr,
755 th->source, ip_hdr(skb)->daddr,
756 ntohs(th->source), dif, sdif);
757 /* don't send rst if it can't find key */
761 /* sdif set, means packet ingressed via a device
762 * in an L3 domain and dif is set to it.
764 l3index = sdif ? dif : 0;
765 addr = (union tcp_md5_addr *)&ip_hdr(skb)->saddr;
766 key = tcp_md5_do_lookup(sk1, l3index, addr, AF_INET);
771 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
772 if (genhash || memcmp(hash_location, newhash, 16) != 0)
778 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
780 (TCPOPT_MD5SIG << 8) |
782 /* Update length and the length the header thinks exists */
783 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
784 rep.th.doff = arg.iov[0].iov_len / 4;
786 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
787 key, ip_hdr(skb)->saddr,
788 ip_hdr(skb)->daddr, &rep.th);
791 /* Can't co-exist with TCPMD5, hence check rep.opt[0] */
792 if (rep.opt[0] == 0) {
793 __be32 mrst = mptcp_reset_option(skb);
797 arg.iov[0].iov_len += sizeof(mrst);
798 rep.th.doff = arg.iov[0].iov_len / 4;
802 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
803 ip_hdr(skb)->saddr, /* XXX */
804 arg.iov[0].iov_len, IPPROTO_TCP, 0);
805 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
806 arg.flags = (sk && inet_sk_transparent(sk)) ? IP_REPLY_ARG_NOSRCCHECK : 0;
808 /* When socket is gone, all binding information is lost.
809 * routing might fail in this case. No choice here, if we choose to force
810 * input interface, we will misroute in case of asymmetric route.
813 arg.bound_dev_if = sk->sk_bound_dev_if;
815 trace_tcp_send_reset(sk, skb);
818 BUILD_BUG_ON(offsetof(struct sock, sk_bound_dev_if) !=
819 offsetof(struct inet_timewait_sock, tw_bound_dev_if));
821 arg.tos = ip_hdr(skb)->tos;
822 arg.uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
824 ctl_sk = this_cpu_read(ipv4_tcp_sk);
825 sock_net_set(ctl_sk, net);
827 ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ?
828 inet_twsk(sk)->tw_mark : sk->sk_mark;
829 ctl_sk->sk_priority = (sk->sk_state == TCP_TIME_WAIT) ?
830 inet_twsk(sk)->tw_priority : sk->sk_priority;
831 transmit_time = tcp_transmit_time(sk);
832 xfrm_sk_clone_policy(ctl_sk, sk);
833 txhash = (sk->sk_state == TCP_TIME_WAIT) ?
834 inet_twsk(sk)->tw_txhash : sk->sk_txhash;
837 ctl_sk->sk_priority = 0;
839 ip_send_unicast_reply(ctl_sk,
840 skb, &TCP_SKB_CB(skb)->header.h4.opt,
841 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
842 &arg, arg.iov[0].iov_len,
843 transmit_time, txhash);
845 xfrm_sk_free_policy(ctl_sk);
846 sock_net_set(ctl_sk, &init_net);
847 __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
848 __TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
851 #ifdef CONFIG_TCP_MD5SIG
857 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
858 outside socket context is ugly, certainly. What can I do?
861 static void tcp_v4_send_ack(const struct sock *sk,
862 struct sk_buff *skb, u32 seq, u32 ack,
863 u32 win, u32 tsval, u32 tsecr, int oif,
864 struct tcp_md5sig_key *key,
865 int reply_flags, u8 tos, u32 txhash)
867 const struct tcphdr *th = tcp_hdr(skb);
870 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
871 #ifdef CONFIG_TCP_MD5SIG
872 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
876 struct net *net = sock_net(sk);
877 struct ip_reply_arg arg;
881 memset(&rep.th, 0, sizeof(struct tcphdr));
882 memset(&arg, 0, sizeof(arg));
884 arg.iov[0].iov_base = (unsigned char *)&rep;
885 arg.iov[0].iov_len = sizeof(rep.th);
887 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
888 (TCPOPT_TIMESTAMP << 8) |
890 rep.opt[1] = htonl(tsval);
891 rep.opt[2] = htonl(tsecr);
892 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
895 /* Swap the send and the receive. */
896 rep.th.dest = th->source;
897 rep.th.source = th->dest;
898 rep.th.doff = arg.iov[0].iov_len / 4;
899 rep.th.seq = htonl(seq);
900 rep.th.ack_seq = htonl(ack);
902 rep.th.window = htons(win);
904 #ifdef CONFIG_TCP_MD5SIG
906 int offset = (tsecr) ? 3 : 0;
908 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
910 (TCPOPT_MD5SIG << 8) |
912 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
913 rep.th.doff = arg.iov[0].iov_len/4;
915 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
916 key, ip_hdr(skb)->saddr,
917 ip_hdr(skb)->daddr, &rep.th);
920 arg.flags = reply_flags;
921 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
922 ip_hdr(skb)->saddr, /* XXX */
923 arg.iov[0].iov_len, IPPROTO_TCP, 0);
924 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
926 arg.bound_dev_if = oif;
928 arg.uid = sock_net_uid(net, sk_fullsock(sk) ? sk : NULL);
930 ctl_sk = this_cpu_read(ipv4_tcp_sk);
931 sock_net_set(ctl_sk, net);
932 ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ?
933 inet_twsk(sk)->tw_mark : sk->sk_mark;
934 ctl_sk->sk_priority = (sk->sk_state == TCP_TIME_WAIT) ?
935 inet_twsk(sk)->tw_priority : sk->sk_priority;
936 transmit_time = tcp_transmit_time(sk);
937 ip_send_unicast_reply(ctl_sk,
938 skb, &TCP_SKB_CB(skb)->header.h4.opt,
939 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
940 &arg, arg.iov[0].iov_len,
941 transmit_time, txhash);
943 sock_net_set(ctl_sk, &init_net);
944 __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
948 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
950 struct inet_timewait_sock *tw = inet_twsk(sk);
951 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
953 tcp_v4_send_ack(sk, skb,
954 tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
955 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
956 tcp_time_stamp_raw() + tcptw->tw_ts_offset,
959 tcp_twsk_md5_key(tcptw),
960 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
968 static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
969 struct request_sock *req)
971 const union tcp_md5_addr *addr;
974 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
975 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
977 u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 :
981 * The window field (SEG.WND) of every outgoing segment, with the
982 * exception of <SYN> segments, MUST be right-shifted by
983 * Rcv.Wind.Shift bits:
985 addr = (union tcp_md5_addr *)&ip_hdr(skb)->saddr;
986 l3index = tcp_v4_sdif(skb) ? inet_iif(skb) : 0;
987 tcp_v4_send_ack(sk, skb, seq,
988 tcp_rsk(req)->rcv_nxt,
989 req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
990 tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
993 tcp_md5_do_lookup(sk, l3index, addr, AF_INET),
994 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
995 ip_hdr(skb)->tos, tcp_rsk(req)->txhash);
999 * Send a SYN-ACK after having received a SYN.
1000 * This still operates on a request_sock only, not on a big
1003 static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
1005 struct request_sock *req,
1006 struct tcp_fastopen_cookie *foc,
1007 enum tcp_synack_type synack_type,
1008 struct sk_buff *syn_skb)
1010 const struct inet_request_sock *ireq = inet_rsk(req);
1013 struct sk_buff *skb;
1016 /* First, grab a route. */
1017 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
1020 skb = tcp_make_synack(sk, dst, req, foc, synack_type, syn_skb);
1023 __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
1025 tos = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reflect_tos) ?
1026 (tcp_rsk(req)->syn_tos & ~INET_ECN_MASK) |
1027 (inet_sk(sk)->tos & INET_ECN_MASK) :
1030 if (!INET_ECN_is_capable(tos) &&
1031 tcp_bpf_ca_needs_ecn((struct sock *)req))
1032 tos |= INET_ECN_ECT_0;
1035 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
1037 rcu_dereference(ireq->ireq_opt),
1040 err = net_xmit_eval(err);
1047 * IPv4 request_sock destructor.
1049 static void tcp_v4_reqsk_destructor(struct request_sock *req)
1051 kfree(rcu_dereference_protected(inet_rsk(req)->ireq_opt, 1));
1054 #ifdef CONFIG_TCP_MD5SIG
1056 * RFC2385 MD5 checksumming requires a mapping of
1057 * IP address->MD5 Key.
1058 * We need to maintain these in the sk structure.
1061 DEFINE_STATIC_KEY_DEFERRED_FALSE(tcp_md5_needed, HZ);
1062 EXPORT_SYMBOL(tcp_md5_needed);
1064 static bool better_md5_match(struct tcp_md5sig_key *old, struct tcp_md5sig_key *new)
1069 /* l3index always overrides non-l3index */
1070 if (old->l3index && new->l3index == 0)
1072 if (old->l3index == 0 && new->l3index)
1075 return old->prefixlen < new->prefixlen;
1078 /* Find the Key structure for an address. */
1079 struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index,
1080 const union tcp_md5_addr *addr,
1083 const struct tcp_sock *tp = tcp_sk(sk);
1084 struct tcp_md5sig_key *key;
1085 const struct tcp_md5sig_info *md5sig;
1087 struct tcp_md5sig_key *best_match = NULL;
1090 /* caller either holds rcu_read_lock() or socket lock */
1091 md5sig = rcu_dereference_check(tp->md5sig_info,
1092 lockdep_sock_is_held(sk));
1096 hlist_for_each_entry_rcu(key, &md5sig->head, node,
1097 lockdep_sock_is_held(sk)) {
1098 if (key->family != family)
1100 if (key->flags & TCP_MD5SIG_FLAG_IFINDEX && key->l3index != l3index)
1102 if (family == AF_INET) {
1103 mask = inet_make_mask(key->prefixlen);
1104 match = (key->addr.a4.s_addr & mask) ==
1105 (addr->a4.s_addr & mask);
1106 #if IS_ENABLED(CONFIG_IPV6)
1107 } else if (family == AF_INET6) {
1108 match = ipv6_prefix_equal(&key->addr.a6, &addr->a6,
1115 if (match && better_md5_match(best_match, key))
1120 EXPORT_SYMBOL(__tcp_md5_do_lookup);
1122 static struct tcp_md5sig_key *tcp_md5_do_lookup_exact(const struct sock *sk,
1123 const union tcp_md5_addr *addr,
1124 int family, u8 prefixlen,
1125 int l3index, u8 flags)
1127 const struct tcp_sock *tp = tcp_sk(sk);
1128 struct tcp_md5sig_key *key;
1129 unsigned int size = sizeof(struct in_addr);
1130 const struct tcp_md5sig_info *md5sig;
1132 /* caller either holds rcu_read_lock() or socket lock */
1133 md5sig = rcu_dereference_check(tp->md5sig_info,
1134 lockdep_sock_is_held(sk));
1137 #if IS_ENABLED(CONFIG_IPV6)
1138 if (family == AF_INET6)
1139 size = sizeof(struct in6_addr);
1141 hlist_for_each_entry_rcu(key, &md5sig->head, node,
1142 lockdep_sock_is_held(sk)) {
1143 if (key->family != family)
1145 if ((key->flags & TCP_MD5SIG_FLAG_IFINDEX) != (flags & TCP_MD5SIG_FLAG_IFINDEX))
1147 if (key->l3index != l3index)
1149 if (!memcmp(&key->addr, addr, size) &&
1150 key->prefixlen == prefixlen)
1156 struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
1157 const struct sock *addr_sk)
1159 const union tcp_md5_addr *addr;
1162 l3index = l3mdev_master_ifindex_by_index(sock_net(sk),
1163 addr_sk->sk_bound_dev_if);
1164 addr = (const union tcp_md5_addr *)&addr_sk->sk_daddr;
1165 return tcp_md5_do_lookup(sk, l3index, addr, AF_INET);
1167 EXPORT_SYMBOL(tcp_v4_md5_lookup);
1169 static int tcp_md5sig_info_add(struct sock *sk, gfp_t gfp)
1171 struct tcp_sock *tp = tcp_sk(sk);
1172 struct tcp_md5sig_info *md5sig;
1174 md5sig = kmalloc(sizeof(*md5sig), gfp);
1179 INIT_HLIST_HEAD(&md5sig->head);
1180 rcu_assign_pointer(tp->md5sig_info, md5sig);
1184 /* This can be called on a newly created socket, from other files */
1185 static int __tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1186 int family, u8 prefixlen, int l3index, u8 flags,
1187 const u8 *newkey, u8 newkeylen, gfp_t gfp)
1189 /* Add Key to the list */
1190 struct tcp_md5sig_key *key;
1191 struct tcp_sock *tp = tcp_sk(sk);
1192 struct tcp_md5sig_info *md5sig;
1194 key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen, l3index, flags);
1196 /* Pre-existing entry - just update that one.
1197 * Note that the key might be used concurrently.
1198 * data_race() is telling kcsan that we do not care of
1199 * key mismatches, since changing MD5 key on live flows
1200 * can lead to packet drops.
1202 data_race(memcpy(key->key, newkey, newkeylen));
1204 /* Pairs with READ_ONCE() in tcp_md5_hash_key().
1205 * Also note that a reader could catch new key->keylen value
1206 * but old key->key[], this is the reason we use __GFP_ZERO
1207 * at sock_kmalloc() time below these lines.
1209 WRITE_ONCE(key->keylen, newkeylen);
1214 md5sig = rcu_dereference_protected(tp->md5sig_info,
1215 lockdep_sock_is_held(sk));
1217 key = sock_kmalloc(sk, sizeof(*key), gfp | __GFP_ZERO);
1220 if (!tcp_alloc_md5sig_pool()) {
1221 sock_kfree_s(sk, key, sizeof(*key));
1225 memcpy(key->key, newkey, newkeylen);
1226 key->keylen = newkeylen;
1227 key->family = family;
1228 key->prefixlen = prefixlen;
1229 key->l3index = l3index;
1231 memcpy(&key->addr, addr,
1232 (IS_ENABLED(CONFIG_IPV6) && family == AF_INET6) ? sizeof(struct in6_addr) :
1233 sizeof(struct in_addr));
1234 hlist_add_head_rcu(&key->node, &md5sig->head);
1238 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1239 int family, u8 prefixlen, int l3index, u8 flags,
1240 const u8 *newkey, u8 newkeylen)
1242 struct tcp_sock *tp = tcp_sk(sk);
1244 if (!rcu_dereference_protected(tp->md5sig_info, lockdep_sock_is_held(sk))) {
1245 if (tcp_md5sig_info_add(sk, GFP_KERNEL))
1248 if (!static_branch_inc(&tcp_md5_needed.key)) {
1249 struct tcp_md5sig_info *md5sig;
1251 md5sig = rcu_dereference_protected(tp->md5sig_info, lockdep_sock_is_held(sk));
1252 rcu_assign_pointer(tp->md5sig_info, NULL);
1253 kfree_rcu(md5sig, rcu);
1258 return __tcp_md5_do_add(sk, addr, family, prefixlen, l3index, flags,
1259 newkey, newkeylen, GFP_KERNEL);
1261 EXPORT_SYMBOL(tcp_md5_do_add);
1263 int tcp_md5_key_copy(struct sock *sk, const union tcp_md5_addr *addr,
1264 int family, u8 prefixlen, int l3index,
1265 struct tcp_md5sig_key *key)
1267 struct tcp_sock *tp = tcp_sk(sk);
1269 if (!rcu_dereference_protected(tp->md5sig_info, lockdep_sock_is_held(sk))) {
1270 if (tcp_md5sig_info_add(sk, sk_gfp_mask(sk, GFP_ATOMIC)))
1273 if (!static_key_fast_inc_not_disabled(&tcp_md5_needed.key.key)) {
1274 struct tcp_md5sig_info *md5sig;
1276 md5sig = rcu_dereference_protected(tp->md5sig_info, lockdep_sock_is_held(sk));
1277 net_warn_ratelimited("Too many TCP-MD5 keys in the system\n");
1278 rcu_assign_pointer(tp->md5sig_info, NULL);
1279 kfree_rcu(md5sig, rcu);
1284 return __tcp_md5_do_add(sk, addr, family, prefixlen, l3index,
1285 key->flags, key->key, key->keylen,
1286 sk_gfp_mask(sk, GFP_ATOMIC));
1288 EXPORT_SYMBOL(tcp_md5_key_copy);
1290 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family,
1291 u8 prefixlen, int l3index, u8 flags)
1293 struct tcp_md5sig_key *key;
1295 key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen, l3index, flags);
1298 hlist_del_rcu(&key->node);
1299 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1300 kfree_rcu(key, rcu);
1303 EXPORT_SYMBOL(tcp_md5_do_del);
1305 static void tcp_clear_md5_list(struct sock *sk)
1307 struct tcp_sock *tp = tcp_sk(sk);
1308 struct tcp_md5sig_key *key;
1309 struct hlist_node *n;
1310 struct tcp_md5sig_info *md5sig;
1312 md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1314 hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
1315 hlist_del_rcu(&key->node);
1316 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1317 kfree_rcu(key, rcu);
1321 static int tcp_v4_parse_md5_keys(struct sock *sk, int optname,
1322 sockptr_t optval, int optlen)
1324 struct tcp_md5sig cmd;
1325 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
1326 const union tcp_md5_addr *addr;
1331 if (optlen < sizeof(cmd))
1334 if (copy_from_sockptr(&cmd, optval, sizeof(cmd)))
1337 if (sin->sin_family != AF_INET)
1340 flags = cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX;
1342 if (optname == TCP_MD5SIG_EXT &&
1343 cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
1344 prefixlen = cmd.tcpm_prefixlen;
1349 if (optname == TCP_MD5SIG_EXT && cmd.tcpm_ifindex &&
1350 cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX) {
1351 struct net_device *dev;
1354 dev = dev_get_by_index_rcu(sock_net(sk), cmd.tcpm_ifindex);
1355 if (dev && netif_is_l3_master(dev))
1356 l3index = dev->ifindex;
1360 /* ok to reference set/not set outside of rcu;
1361 * right now device MUST be an L3 master
1363 if (!dev || !l3index)
1367 addr = (union tcp_md5_addr *)&sin->sin_addr.s_addr;
1369 if (!cmd.tcpm_keylen)
1370 return tcp_md5_do_del(sk, addr, AF_INET, prefixlen, l3index, flags);
1372 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1375 return tcp_md5_do_add(sk, addr, AF_INET, prefixlen, l3index, flags,
1376 cmd.tcpm_key, cmd.tcpm_keylen);
1379 static int tcp_v4_md5_hash_headers(struct tcp_md5sig_pool *hp,
1380 __be32 daddr, __be32 saddr,
1381 const struct tcphdr *th, int nbytes)
1383 struct tcp4_pseudohdr *bp;
1384 struct scatterlist sg;
1391 bp->protocol = IPPROTO_TCP;
1392 bp->len = cpu_to_be16(nbytes);
1394 _th = (struct tcphdr *)(bp + 1);
1395 memcpy(_th, th, sizeof(*th));
1398 sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
1399 ahash_request_set_crypt(hp->md5_req, &sg, NULL,
1400 sizeof(*bp) + sizeof(*th));
1401 return crypto_ahash_update(hp->md5_req);
1404 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1405 __be32 daddr, __be32 saddr, const struct tcphdr *th)
1407 struct tcp_md5sig_pool *hp;
1408 struct ahash_request *req;
1410 hp = tcp_get_md5sig_pool();
1412 goto clear_hash_noput;
1415 if (crypto_ahash_init(req))
1417 if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
1419 if (tcp_md5_hash_key(hp, key))
1421 ahash_request_set_crypt(req, NULL, md5_hash, 0);
1422 if (crypto_ahash_final(req))
1425 tcp_put_md5sig_pool();
1429 tcp_put_md5sig_pool();
1431 memset(md5_hash, 0, 16);
1435 int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1436 const struct sock *sk,
1437 const struct sk_buff *skb)
1439 struct tcp_md5sig_pool *hp;
1440 struct ahash_request *req;
1441 const struct tcphdr *th = tcp_hdr(skb);
1442 __be32 saddr, daddr;
1444 if (sk) { /* valid for establish/request sockets */
1445 saddr = sk->sk_rcv_saddr;
1446 daddr = sk->sk_daddr;
1448 const struct iphdr *iph = ip_hdr(skb);
1453 hp = tcp_get_md5sig_pool();
1455 goto clear_hash_noput;
1458 if (crypto_ahash_init(req))
1461 if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, skb->len))
1463 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1465 if (tcp_md5_hash_key(hp, key))
1467 ahash_request_set_crypt(req, NULL, md5_hash, 0);
1468 if (crypto_ahash_final(req))
1471 tcp_put_md5sig_pool();
1475 tcp_put_md5sig_pool();
1477 memset(md5_hash, 0, 16);
1480 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1484 static void tcp_v4_init_req(struct request_sock *req,
1485 const struct sock *sk_listener,
1486 struct sk_buff *skb)
1488 struct inet_request_sock *ireq = inet_rsk(req);
1489 struct net *net = sock_net(sk_listener);
1491 sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
1492 sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
1493 RCU_INIT_POINTER(ireq->ireq_opt, tcp_v4_save_options(net, skb));
1496 static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
1497 struct sk_buff *skb,
1499 struct request_sock *req)
1501 tcp_v4_init_req(req, sk, skb);
1503 if (security_inet_conn_request(sk, skb, req))
1506 return inet_csk_route_req(sk, &fl->u.ip4, req);
1509 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1511 .obj_size = sizeof(struct tcp_request_sock),
1512 .rtx_syn_ack = tcp_rtx_synack,
1513 .send_ack = tcp_v4_reqsk_send_ack,
1514 .destructor = tcp_v4_reqsk_destructor,
1515 .send_reset = tcp_v4_send_reset,
1516 .syn_ack_timeout = tcp_syn_ack_timeout,
1519 const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1520 .mss_clamp = TCP_MSS_DEFAULT,
1521 #ifdef CONFIG_TCP_MD5SIG
1522 .req_md5_lookup = tcp_v4_md5_lookup,
1523 .calc_md5_hash = tcp_v4_md5_hash_skb,
1525 #ifdef CONFIG_SYN_COOKIES
1526 .cookie_init_seq = cookie_v4_init_sequence,
1528 .route_req = tcp_v4_route_req,
1529 .init_seq = tcp_v4_init_seq,
1530 .init_ts_off = tcp_v4_init_ts_off,
1531 .send_synack = tcp_v4_send_synack,
1534 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1536 /* Never answer to SYNs send to broadcast or multicast */
1537 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1540 return tcp_conn_request(&tcp_request_sock_ops,
1541 &tcp_request_sock_ipv4_ops, sk, skb);
1547 EXPORT_SYMBOL(tcp_v4_conn_request);
1551 * The three way handshake has completed - we got a valid synack -
1552 * now create the new socket.
1554 struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1555 struct request_sock *req,
1556 struct dst_entry *dst,
1557 struct request_sock *req_unhash,
1560 struct inet_request_sock *ireq;
1561 bool found_dup_sk = false;
1562 struct inet_sock *newinet;
1563 struct tcp_sock *newtp;
1565 #ifdef CONFIG_TCP_MD5SIG
1566 const union tcp_md5_addr *addr;
1567 struct tcp_md5sig_key *key;
1570 struct ip_options_rcu *inet_opt;
1572 if (sk_acceptq_is_full(sk))
1575 newsk = tcp_create_openreq_child(sk, req, skb);
1579 newsk->sk_gso_type = SKB_GSO_TCPV4;
1580 inet_sk_rx_dst_set(newsk, skb);
1582 newtp = tcp_sk(newsk);
1583 newinet = inet_sk(newsk);
1584 ireq = inet_rsk(req);
1585 sk_daddr_set(newsk, ireq->ir_rmt_addr);
1586 sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
1587 newsk->sk_bound_dev_if = ireq->ir_iif;
1588 newinet->inet_saddr = ireq->ir_loc_addr;
1589 inet_opt = rcu_dereference(ireq->ireq_opt);
1590 RCU_INIT_POINTER(newinet->inet_opt, inet_opt);
1591 newinet->mc_index = inet_iif(skb);
1592 newinet->mc_ttl = ip_hdr(skb)->ttl;
1593 newinet->rcv_tos = ip_hdr(skb)->tos;
1594 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1596 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1597 newinet->inet_id = get_random_u16();
1599 /* Set ToS of the new socket based upon the value of incoming SYN.
1600 * ECT bits are set later in tcp_init_transfer().
1602 if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reflect_tos))
1603 newinet->tos = tcp_rsk(req)->syn_tos & ~INET_ECN_MASK;
1606 dst = inet_csk_route_child_sock(sk, newsk, req);
1610 /* syncookie case : see end of cookie_v4_check() */
1612 sk_setup_caps(newsk, dst);
1614 tcp_ca_openreq_child(newsk, dst);
1616 tcp_sync_mss(newsk, dst_mtu(dst));
1617 newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
1619 tcp_initialize_rcv_mss(newsk);
1621 #ifdef CONFIG_TCP_MD5SIG
1622 l3index = l3mdev_master_ifindex_by_index(sock_net(sk), ireq->ir_iif);
1623 /* Copy over the MD5 key from the original socket */
1624 addr = (union tcp_md5_addr *)&newinet->inet_daddr;
1625 key = tcp_md5_do_lookup(sk, l3index, addr, AF_INET);
1627 if (tcp_md5_key_copy(newsk, addr, AF_INET, 32, l3index, key))
1629 sk_gso_disable(newsk);
1633 if (__inet_inherit_port(sk, newsk) < 0)
1635 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash),
1637 if (likely(*own_req)) {
1638 tcp_move_syn(newtp, req);
1639 ireq->ireq_opt = NULL;
1641 newinet->inet_opt = NULL;
1643 if (!req_unhash && found_dup_sk) {
1644 /* This code path should only be executed in the
1645 * syncookie case only
1647 bh_unlock_sock(newsk);
1655 NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1662 newinet->inet_opt = NULL;
1663 inet_csk_prepare_forced_close(newsk);
1667 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1669 static struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb)
1671 #ifdef CONFIG_SYN_COOKIES
1672 const struct tcphdr *th = tcp_hdr(skb);
1675 sk = cookie_v4_check(sk, skb);
1680 u16 tcp_v4_get_syncookie(struct sock *sk, struct iphdr *iph,
1681 struct tcphdr *th, u32 *cookie)
1684 #ifdef CONFIG_SYN_COOKIES
1685 mss = tcp_get_syncookie_mss(&tcp_request_sock_ops,
1686 &tcp_request_sock_ipv4_ops, sk, th);
1688 *cookie = __cookie_v4_init_sequence(iph, th, &mss);
1689 tcp_synq_overflow(sk);
1695 INDIRECT_CALLABLE_DECLARE(struct dst_entry *ipv4_dst_check(struct dst_entry *,
1697 /* The socket must have it's spinlock held when we get
1698 * here, unless it is a TCP_LISTEN socket.
1700 * We have a potential double-lock case here, so even when
1701 * doing backlog processing we use the BH locking scheme.
1702 * This is because we cannot sleep with the original spinlock
1705 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1707 enum skb_drop_reason reason;
1710 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1711 struct dst_entry *dst;
1713 dst = rcu_dereference_protected(sk->sk_rx_dst,
1714 lockdep_sock_is_held(sk));
1716 sock_rps_save_rxhash(sk, skb);
1717 sk_mark_napi_id(sk, skb);
1719 if (sk->sk_rx_dst_ifindex != skb->skb_iif ||
1720 !INDIRECT_CALL_1(dst->ops->check, ipv4_dst_check,
1722 RCU_INIT_POINTER(sk->sk_rx_dst, NULL);
1726 tcp_rcv_established(sk, skb);
1730 reason = SKB_DROP_REASON_NOT_SPECIFIED;
1731 if (tcp_checksum_complete(skb))
1734 if (sk->sk_state == TCP_LISTEN) {
1735 struct sock *nsk = tcp_v4_cookie_check(sk, skb);
1740 if (tcp_child_process(sk, nsk, skb)) {
1747 sock_rps_save_rxhash(sk, skb);
1749 if (tcp_rcv_state_process(sk, skb)) {
1756 tcp_v4_send_reset(rsk, skb);
1758 kfree_skb_reason(skb, reason);
1759 /* Be careful here. If this function gets more complicated and
1760 * gcc suffers from register pressure on the x86, sk (in %ebx)
1761 * might be destroyed here. This current version compiles correctly,
1762 * but you have been warned.
1767 reason = SKB_DROP_REASON_TCP_CSUM;
1768 trace_tcp_bad_csum(skb);
1769 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1770 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1773 EXPORT_SYMBOL(tcp_v4_do_rcv);
1775 int tcp_v4_early_demux(struct sk_buff *skb)
1777 struct net *net = dev_net(skb->dev);
1778 const struct iphdr *iph;
1779 const struct tcphdr *th;
1782 if (skb->pkt_type != PACKET_HOST)
1785 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1791 if (th->doff < sizeof(struct tcphdr) / 4)
1794 sk = __inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
1795 iph->saddr, th->source,
1796 iph->daddr, ntohs(th->dest),
1797 skb->skb_iif, inet_sdif(skb));
1800 skb->destructor = sock_edemux;
1801 if (sk_fullsock(sk)) {
1802 struct dst_entry *dst = rcu_dereference(sk->sk_rx_dst);
1805 dst = dst_check(dst, 0);
1807 sk->sk_rx_dst_ifindex == skb->skb_iif)
1808 skb_dst_set_noref(skb, dst);
1814 bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb,
1815 enum skb_drop_reason *reason)
1817 u32 limit, tail_gso_size, tail_gso_segs;
1818 struct skb_shared_info *shinfo;
1819 const struct tcphdr *th;
1820 struct tcphdr *thtail;
1821 struct sk_buff *tail;
1822 unsigned int hdrlen;
1828 /* In case all data was pulled from skb frags (in __pskb_pull_tail()),
1829 * we can fix skb->truesize to its real value to avoid future drops.
1830 * This is valid because skb is not yet charged to the socket.
1831 * It has been noticed pure SACK packets were sometimes dropped
1832 * (if cooked by drivers without copybreak feature).
1838 if (unlikely(tcp_checksum_complete(skb))) {
1840 trace_tcp_bad_csum(skb);
1841 *reason = SKB_DROP_REASON_TCP_CSUM;
1842 __TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1843 __TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1847 /* Attempt coalescing to last skb in backlog, even if we are
1849 * This is okay because skb capacity is limited to MAX_SKB_FRAGS.
1851 th = (const struct tcphdr *)skb->data;
1852 hdrlen = th->doff * 4;
1854 tail = sk->sk_backlog.tail;
1857 thtail = (struct tcphdr *)tail->data;
1859 if (TCP_SKB_CB(tail)->end_seq != TCP_SKB_CB(skb)->seq ||
1860 TCP_SKB_CB(tail)->ip_dsfield != TCP_SKB_CB(skb)->ip_dsfield ||
1861 ((TCP_SKB_CB(tail)->tcp_flags |
1862 TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_SYN | TCPHDR_RST | TCPHDR_URG)) ||
1863 !((TCP_SKB_CB(tail)->tcp_flags &
1864 TCP_SKB_CB(skb)->tcp_flags) & TCPHDR_ACK) ||
1865 ((TCP_SKB_CB(tail)->tcp_flags ^
1866 TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_ECE | TCPHDR_CWR)) ||
1867 #ifdef CONFIG_TLS_DEVICE
1868 tail->decrypted != skb->decrypted ||
1870 thtail->doff != th->doff ||
1871 memcmp(thtail + 1, th + 1, hdrlen - sizeof(*th)))
1874 __skb_pull(skb, hdrlen);
1876 shinfo = skb_shinfo(skb);
1877 gso_size = shinfo->gso_size ?: skb->len;
1878 gso_segs = shinfo->gso_segs ?: 1;
1880 shinfo = skb_shinfo(tail);
1881 tail_gso_size = shinfo->gso_size ?: (tail->len - hdrlen);
1882 tail_gso_segs = shinfo->gso_segs ?: 1;
1884 if (skb_try_coalesce(tail, skb, &fragstolen, &delta)) {
1885 TCP_SKB_CB(tail)->end_seq = TCP_SKB_CB(skb)->end_seq;
1887 if (likely(!before(TCP_SKB_CB(skb)->ack_seq, TCP_SKB_CB(tail)->ack_seq))) {
1888 TCP_SKB_CB(tail)->ack_seq = TCP_SKB_CB(skb)->ack_seq;
1889 thtail->window = th->window;
1892 /* We have to update both TCP_SKB_CB(tail)->tcp_flags and
1893 * thtail->fin, so that the fast path in tcp_rcv_established()
1894 * is not entered if we append a packet with a FIN.
1895 * SYN, RST, URG are not present.
1896 * ACK is set on both packets.
1897 * PSH : we do not really care in TCP stack,
1898 * at least for 'GRO' packets.
1900 thtail->fin |= th->fin;
1901 TCP_SKB_CB(tail)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
1903 if (TCP_SKB_CB(skb)->has_rxtstamp) {
1904 TCP_SKB_CB(tail)->has_rxtstamp = true;
1905 tail->tstamp = skb->tstamp;
1906 skb_hwtstamps(tail)->hwtstamp = skb_hwtstamps(skb)->hwtstamp;
1909 /* Not as strict as GRO. We only need to carry mss max value */
1910 shinfo->gso_size = max(gso_size, tail_gso_size);
1911 shinfo->gso_segs = min_t(u32, gso_segs + tail_gso_segs, 0xFFFF);
1913 sk->sk_backlog.len += delta;
1914 __NET_INC_STATS(sock_net(sk),
1915 LINUX_MIB_TCPBACKLOGCOALESCE);
1916 kfree_skb_partial(skb, fragstolen);
1919 __skb_push(skb, hdrlen);
1922 limit = (u32)READ_ONCE(sk->sk_rcvbuf) + (u32)(READ_ONCE(sk->sk_sndbuf) >> 1);
1924 /* Only socket owner can try to collapse/prune rx queues
1925 * to reduce memory overhead, so add a little headroom here.
1926 * Few sockets backlog are possibly concurrently non empty.
1930 if (unlikely(sk_add_backlog(sk, skb, limit))) {
1932 *reason = SKB_DROP_REASON_SOCKET_BACKLOG;
1933 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPBACKLOGDROP);
1938 EXPORT_SYMBOL(tcp_add_backlog);
1940 int tcp_filter(struct sock *sk, struct sk_buff *skb)
1942 struct tcphdr *th = (struct tcphdr *)skb->data;
1944 return sk_filter_trim_cap(sk, skb, th->doff * 4);
1946 EXPORT_SYMBOL(tcp_filter);
1948 static void tcp_v4_restore_cb(struct sk_buff *skb)
1950 memmove(IPCB(skb), &TCP_SKB_CB(skb)->header.h4,
1951 sizeof(struct inet_skb_parm));
1954 static void tcp_v4_fill_cb(struct sk_buff *skb, const struct iphdr *iph,
1955 const struct tcphdr *th)
1957 /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1958 * barrier() makes sure compiler wont play fool^Waliasing games.
1960 memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
1961 sizeof(struct inet_skb_parm));
1964 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1965 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1966 skb->len - th->doff * 4);
1967 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1968 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1969 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1970 TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1971 TCP_SKB_CB(skb)->sacked = 0;
1972 TCP_SKB_CB(skb)->has_rxtstamp =
1973 skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
1980 int tcp_v4_rcv(struct sk_buff *skb)
1982 struct net *net = dev_net(skb->dev);
1983 enum skb_drop_reason drop_reason;
1984 int sdif = inet_sdif(skb);
1985 int dif = inet_iif(skb);
1986 const struct iphdr *iph;
1987 const struct tcphdr *th;
1992 drop_reason = SKB_DROP_REASON_NOT_SPECIFIED;
1993 if (skb->pkt_type != PACKET_HOST)
1996 /* Count it even if it's bad */
1997 __TCP_INC_STATS(net, TCP_MIB_INSEGS);
1999 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
2002 th = (const struct tcphdr *)skb->data;
2004 if (unlikely(th->doff < sizeof(struct tcphdr) / 4)) {
2005 drop_reason = SKB_DROP_REASON_PKT_TOO_SMALL;
2008 if (!pskb_may_pull(skb, th->doff * 4))
2011 /* An explanation is required here, I think.
2012 * Packet length and doff are validated by header prediction,
2013 * provided case of th->doff==0 is eliminated.
2014 * So, we defer the checks. */
2016 if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
2019 th = (const struct tcphdr *)skb->data;
2022 sk = __inet_lookup_skb(net->ipv4.tcp_death_row.hashinfo,
2023 skb, __tcp_hdrlen(th), th->source,
2024 th->dest, sdif, &refcounted);
2029 if (sk->sk_state == TCP_TIME_WAIT)
2032 if (sk->sk_state == TCP_NEW_SYN_RECV) {
2033 struct request_sock *req = inet_reqsk(sk);
2034 bool req_stolen = false;
2037 sk = req->rsk_listener;
2038 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
2039 drop_reason = SKB_DROP_REASON_XFRM_POLICY;
2041 drop_reason = tcp_inbound_md5_hash(sk, skb,
2042 &iph->saddr, &iph->daddr,
2043 AF_INET, dif, sdif);
2044 if (unlikely(drop_reason)) {
2045 sk_drops_add(sk, skb);
2049 if (tcp_checksum_complete(skb)) {
2053 if (unlikely(sk->sk_state != TCP_LISTEN)) {
2054 nsk = reuseport_migrate_sock(sk, req_to_sk(req), skb);
2056 inet_csk_reqsk_queue_drop_and_put(sk, req);
2060 /* reuseport_migrate_sock() has already held one sk_refcnt
2064 /* We own a reference on the listener, increase it again
2065 * as we might lose it too soon.
2071 if (!tcp_filter(sk, skb)) {
2072 th = (const struct tcphdr *)skb->data;
2074 tcp_v4_fill_cb(skb, iph, th);
2075 nsk = tcp_check_req(sk, skb, req, false, &req_stolen);
2077 drop_reason = SKB_DROP_REASON_SOCKET_FILTER;
2082 /* Another cpu got exclusive access to req
2083 * and created a full blown socket.
2084 * Try to feed this packet to this socket
2085 * instead of discarding it.
2087 tcp_v4_restore_cb(skb);
2091 goto discard_and_relse;
2096 tcp_v4_restore_cb(skb);
2097 } else if (tcp_child_process(sk, nsk, skb)) {
2098 tcp_v4_send_reset(nsk, skb);
2099 goto discard_and_relse;
2106 if (static_branch_unlikely(&ip4_min_ttl)) {
2107 /* min_ttl can be changed concurrently from do_ip_setsockopt() */
2108 if (unlikely(iph->ttl < READ_ONCE(inet_sk(sk)->min_ttl))) {
2109 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
2110 drop_reason = SKB_DROP_REASON_TCP_MINTTL;
2111 goto discard_and_relse;
2115 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
2116 drop_reason = SKB_DROP_REASON_XFRM_POLICY;
2117 goto discard_and_relse;
2120 drop_reason = tcp_inbound_md5_hash(sk, skb, &iph->saddr,
2121 &iph->daddr, AF_INET, dif, sdif);
2123 goto discard_and_relse;
2127 if (tcp_filter(sk, skb)) {
2128 drop_reason = SKB_DROP_REASON_SOCKET_FILTER;
2129 goto discard_and_relse;
2131 th = (const struct tcphdr *)skb->data;
2133 tcp_v4_fill_cb(skb, iph, th);
2137 if (sk->sk_state == TCP_LISTEN) {
2138 ret = tcp_v4_do_rcv(sk, skb);
2139 goto put_and_return;
2142 sk_incoming_cpu_update(sk);
2144 bh_lock_sock_nested(sk);
2145 tcp_segs_in(tcp_sk(sk), skb);
2147 if (!sock_owned_by_user(sk)) {
2148 ret = tcp_v4_do_rcv(sk, skb);
2150 if (tcp_add_backlog(sk, skb, &drop_reason))
2151 goto discard_and_relse;
2162 drop_reason = SKB_DROP_REASON_NO_SOCKET;
2163 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
2166 tcp_v4_fill_cb(skb, iph, th);
2168 if (tcp_checksum_complete(skb)) {
2170 drop_reason = SKB_DROP_REASON_TCP_CSUM;
2171 trace_tcp_bad_csum(skb);
2172 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
2174 __TCP_INC_STATS(net, TCP_MIB_INERRS);
2176 tcp_v4_send_reset(NULL, skb);
2180 SKB_DR_OR(drop_reason, NOT_SPECIFIED);
2181 /* Discard frame. */
2182 kfree_skb_reason(skb, drop_reason);
2186 sk_drops_add(sk, skb);
2192 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
2193 drop_reason = SKB_DROP_REASON_XFRM_POLICY;
2194 inet_twsk_put(inet_twsk(sk));
2198 tcp_v4_fill_cb(skb, iph, th);
2200 if (tcp_checksum_complete(skb)) {
2201 inet_twsk_put(inet_twsk(sk));
2204 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
2206 struct sock *sk2 = inet_lookup_listener(net,
2207 net->ipv4.tcp_death_row.hashinfo,
2208 skb, __tcp_hdrlen(th),
2209 iph->saddr, th->source,
2210 iph->daddr, th->dest,
2214 inet_twsk_deschedule_put(inet_twsk(sk));
2216 tcp_v4_restore_cb(skb);
2224 tcp_v4_timewait_ack(sk, skb);
2227 tcp_v4_send_reset(sk, skb);
2228 inet_twsk_deschedule_put(inet_twsk(sk));
2230 case TCP_TW_SUCCESS:;
2235 static struct timewait_sock_ops tcp_timewait_sock_ops = {
2236 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
2237 .twsk_unique = tcp_twsk_unique,
2238 .twsk_destructor= tcp_twsk_destructor,
2241 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
2243 struct dst_entry *dst = skb_dst(skb);
2245 if (dst && dst_hold_safe(dst)) {
2246 rcu_assign_pointer(sk->sk_rx_dst, dst);
2247 sk->sk_rx_dst_ifindex = skb->skb_iif;
2250 EXPORT_SYMBOL(inet_sk_rx_dst_set);
2252 const struct inet_connection_sock_af_ops ipv4_specific = {
2253 .queue_xmit = ip_queue_xmit,
2254 .send_check = tcp_v4_send_check,
2255 .rebuild_header = inet_sk_rebuild_header,
2256 .sk_rx_dst_set = inet_sk_rx_dst_set,
2257 .conn_request = tcp_v4_conn_request,
2258 .syn_recv_sock = tcp_v4_syn_recv_sock,
2259 .net_header_len = sizeof(struct iphdr),
2260 .setsockopt = ip_setsockopt,
2261 .getsockopt = ip_getsockopt,
2262 .addr2sockaddr = inet_csk_addr2sockaddr,
2263 .sockaddr_len = sizeof(struct sockaddr_in),
2264 .mtu_reduced = tcp_v4_mtu_reduced,
2266 EXPORT_SYMBOL(ipv4_specific);
2268 #ifdef CONFIG_TCP_MD5SIG
2269 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
2270 .md5_lookup = tcp_v4_md5_lookup,
2271 .calc_md5_hash = tcp_v4_md5_hash_skb,
2272 .md5_parse = tcp_v4_parse_md5_keys,
2276 /* NOTE: A lot of things set to zero explicitly by call to
2277 * sk_alloc() so need not be done here.
2279 static int tcp_v4_init_sock(struct sock *sk)
2281 struct inet_connection_sock *icsk = inet_csk(sk);
2285 icsk->icsk_af_ops = &ipv4_specific;
2287 #ifdef CONFIG_TCP_MD5SIG
2288 tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
2294 void tcp_v4_destroy_sock(struct sock *sk)
2296 struct tcp_sock *tp = tcp_sk(sk);
2298 trace_tcp_destroy_sock(sk);
2300 tcp_clear_xmit_timers(sk);
2302 tcp_cleanup_congestion_control(sk);
2304 tcp_cleanup_ulp(sk);
2306 /* Cleanup up the write buffer. */
2307 tcp_write_queue_purge(sk);
2309 /* Check if we want to disable active TFO */
2310 tcp_fastopen_active_disable_ofo_check(sk);
2312 /* Cleans up our, hopefully empty, out_of_order_queue. */
2313 skb_rbtree_purge(&tp->out_of_order_queue);
2315 #ifdef CONFIG_TCP_MD5SIG
2316 /* Clean up the MD5 key list, if any */
2317 if (tp->md5sig_info) {
2318 tcp_clear_md5_list(sk);
2319 kfree_rcu(rcu_dereference_protected(tp->md5sig_info, 1), rcu);
2320 tp->md5sig_info = NULL;
2321 static_branch_slow_dec_deferred(&tcp_md5_needed);
2325 /* Clean up a referenced TCP bind bucket. */
2326 if (inet_csk(sk)->icsk_bind_hash)
2329 BUG_ON(rcu_access_pointer(tp->fastopen_rsk));
2331 /* If socket is aborted during connect operation */
2332 tcp_free_fastopen_req(tp);
2333 tcp_fastopen_destroy_cipher(sk);
2334 tcp_saved_syn_free(tp);
2336 sk_sockets_allocated_dec(sk);
2338 EXPORT_SYMBOL(tcp_v4_destroy_sock);
2340 #ifdef CONFIG_PROC_FS
2341 /* Proc filesystem TCP sock list dumping. */
2343 static unsigned short seq_file_family(const struct seq_file *seq);
2345 static bool seq_sk_match(struct seq_file *seq, const struct sock *sk)
2347 unsigned short family = seq_file_family(seq);
2349 /* AF_UNSPEC is used as a match all */
2350 return ((family == AF_UNSPEC || family == sk->sk_family) &&
2351 net_eq(sock_net(sk), seq_file_net(seq)));
2354 /* Find a non empty bucket (starting from st->bucket)
2355 * and return the first sk from it.
2357 static void *listening_get_first(struct seq_file *seq)
2359 struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo;
2360 struct tcp_iter_state *st = seq->private;
2363 for (; st->bucket <= hinfo->lhash2_mask; st->bucket++) {
2364 struct inet_listen_hashbucket *ilb2;
2365 struct hlist_nulls_node *node;
2368 ilb2 = &hinfo->lhash2[st->bucket];
2369 if (hlist_nulls_empty(&ilb2->nulls_head))
2372 spin_lock(&ilb2->lock);
2373 sk_nulls_for_each(sk, node, &ilb2->nulls_head) {
2374 if (seq_sk_match(seq, sk))
2377 spin_unlock(&ilb2->lock);
2383 /* Find the next sk of "cur" within the same bucket (i.e. st->bucket).
2384 * If "cur" is the last one in the st->bucket,
2385 * call listening_get_first() to return the first sk of the next
2388 static void *listening_get_next(struct seq_file *seq, void *cur)
2390 struct tcp_iter_state *st = seq->private;
2391 struct inet_listen_hashbucket *ilb2;
2392 struct hlist_nulls_node *node;
2393 struct inet_hashinfo *hinfo;
2394 struct sock *sk = cur;
2399 sk = sk_nulls_next(sk);
2400 sk_nulls_for_each_from(sk, node) {
2401 if (seq_sk_match(seq, sk))
2405 hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo;
2406 ilb2 = &hinfo->lhash2[st->bucket];
2407 spin_unlock(&ilb2->lock);
2409 return listening_get_first(seq);
2412 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2414 struct tcp_iter_state *st = seq->private;
2419 rc = listening_get_first(seq);
2421 while (rc && *pos) {
2422 rc = listening_get_next(seq, rc);
2428 static inline bool empty_bucket(struct inet_hashinfo *hinfo,
2429 const struct tcp_iter_state *st)
2431 return hlist_nulls_empty(&hinfo->ehash[st->bucket].chain);
2435 * Get first established socket starting from bucket given in st->bucket.
2436 * If st->bucket is zero, the very first socket in the hash is returned.
2438 static void *established_get_first(struct seq_file *seq)
2440 struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo;
2441 struct tcp_iter_state *st = seq->private;
2444 for (; st->bucket <= hinfo->ehash_mask; ++st->bucket) {
2446 struct hlist_nulls_node *node;
2447 spinlock_t *lock = inet_ehash_lockp(hinfo, st->bucket);
2449 /* Lockless fast path for the common case of empty buckets */
2450 if (empty_bucket(hinfo, st))
2454 sk_nulls_for_each(sk, node, &hinfo->ehash[st->bucket].chain) {
2455 if (seq_sk_match(seq, sk))
2458 spin_unlock_bh(lock);
2464 static void *established_get_next(struct seq_file *seq, void *cur)
2466 struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo;
2467 struct tcp_iter_state *st = seq->private;
2468 struct hlist_nulls_node *node;
2469 struct sock *sk = cur;
2474 sk = sk_nulls_next(sk);
2476 sk_nulls_for_each_from(sk, node) {
2477 if (seq_sk_match(seq, sk))
2481 spin_unlock_bh(inet_ehash_lockp(hinfo, st->bucket));
2483 return established_get_first(seq);
2486 static void *established_get_idx(struct seq_file *seq, loff_t pos)
2488 struct tcp_iter_state *st = seq->private;
2492 rc = established_get_first(seq);
2495 rc = established_get_next(seq, rc);
2501 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2504 struct tcp_iter_state *st = seq->private;
2506 st->state = TCP_SEQ_STATE_LISTENING;
2507 rc = listening_get_idx(seq, &pos);
2510 st->state = TCP_SEQ_STATE_ESTABLISHED;
2511 rc = established_get_idx(seq, pos);
2517 static void *tcp_seek_last_pos(struct seq_file *seq)
2519 struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo;
2520 struct tcp_iter_state *st = seq->private;
2521 int bucket = st->bucket;
2522 int offset = st->offset;
2523 int orig_num = st->num;
2526 switch (st->state) {
2527 case TCP_SEQ_STATE_LISTENING:
2528 if (st->bucket > hinfo->lhash2_mask)
2530 rc = listening_get_first(seq);
2531 while (offset-- && rc && bucket == st->bucket)
2532 rc = listening_get_next(seq, rc);
2536 st->state = TCP_SEQ_STATE_ESTABLISHED;
2538 case TCP_SEQ_STATE_ESTABLISHED:
2539 if (st->bucket > hinfo->ehash_mask)
2541 rc = established_get_first(seq);
2542 while (offset-- && rc && bucket == st->bucket)
2543 rc = established_get_next(seq, rc);
2551 void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2553 struct tcp_iter_state *st = seq->private;
2556 if (*pos && *pos == st->last_pos) {
2557 rc = tcp_seek_last_pos(seq);
2562 st->state = TCP_SEQ_STATE_LISTENING;
2566 rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2569 st->last_pos = *pos;
2572 EXPORT_SYMBOL(tcp_seq_start);
2574 void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2576 struct tcp_iter_state *st = seq->private;
2579 if (v == SEQ_START_TOKEN) {
2580 rc = tcp_get_idx(seq, 0);
2584 switch (st->state) {
2585 case TCP_SEQ_STATE_LISTENING:
2586 rc = listening_get_next(seq, v);
2588 st->state = TCP_SEQ_STATE_ESTABLISHED;
2591 rc = established_get_first(seq);
2594 case TCP_SEQ_STATE_ESTABLISHED:
2595 rc = established_get_next(seq, v);
2600 st->last_pos = *pos;
2603 EXPORT_SYMBOL(tcp_seq_next);
2605 void tcp_seq_stop(struct seq_file *seq, void *v)
2607 struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo;
2608 struct tcp_iter_state *st = seq->private;
2610 switch (st->state) {
2611 case TCP_SEQ_STATE_LISTENING:
2612 if (v != SEQ_START_TOKEN)
2613 spin_unlock(&hinfo->lhash2[st->bucket].lock);
2615 case TCP_SEQ_STATE_ESTABLISHED:
2617 spin_unlock_bh(inet_ehash_lockp(hinfo, st->bucket));
2621 EXPORT_SYMBOL(tcp_seq_stop);
2623 static void get_openreq4(const struct request_sock *req,
2624 struct seq_file *f, int i)
2626 const struct inet_request_sock *ireq = inet_rsk(req);
2627 long delta = req->rsk_timer.expires - jiffies;
2629 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2630 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
2635 ntohs(ireq->ir_rmt_port),
2637 0, 0, /* could print option size, but that is af dependent. */
2638 1, /* timers active (only the expire timer) */
2639 jiffies_delta_to_clock_t(delta),
2641 from_kuid_munged(seq_user_ns(f),
2642 sock_i_uid(req->rsk_listener)),
2643 0, /* non standard timer */
2644 0, /* open_requests have no inode */
2649 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
2652 unsigned long timer_expires;
2653 const struct tcp_sock *tp = tcp_sk(sk);
2654 const struct inet_connection_sock *icsk = inet_csk(sk);
2655 const struct inet_sock *inet = inet_sk(sk);
2656 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
2657 __be32 dest = inet->inet_daddr;
2658 __be32 src = inet->inet_rcv_saddr;
2659 __u16 destp = ntohs(inet->inet_dport);
2660 __u16 srcp = ntohs(inet->inet_sport);
2664 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2665 icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
2666 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
2668 timer_expires = icsk->icsk_timeout;
2669 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2671 timer_expires = icsk->icsk_timeout;
2672 } else if (timer_pending(&sk->sk_timer)) {
2674 timer_expires = sk->sk_timer.expires;
2677 timer_expires = jiffies;
2680 state = inet_sk_state_load(sk);
2681 if (state == TCP_LISTEN)
2682 rx_queue = READ_ONCE(sk->sk_ack_backlog);
2684 /* Because we don't lock the socket,
2685 * we might find a transient negative value.
2687 rx_queue = max_t(int, READ_ONCE(tp->rcv_nxt) -
2688 READ_ONCE(tp->copied_seq), 0);
2690 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2691 "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
2692 i, src, srcp, dest, destp, state,
2693 READ_ONCE(tp->write_seq) - tp->snd_una,
2696 jiffies_delta_to_clock_t(timer_expires - jiffies),
2697 icsk->icsk_retransmits,
2698 from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2699 icsk->icsk_probes_out,
2701 refcount_read(&sk->sk_refcnt), sk,
2702 jiffies_to_clock_t(icsk->icsk_rto),
2703 jiffies_to_clock_t(icsk->icsk_ack.ato),
2704 (icsk->icsk_ack.quick << 1) | inet_csk_in_pingpong_mode(sk),
2706 state == TCP_LISTEN ?
2707 fastopenq->max_qlen :
2708 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
2711 static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2712 struct seq_file *f, int i)
2714 long delta = tw->tw_timer.expires - jiffies;
2718 dest = tw->tw_daddr;
2719 src = tw->tw_rcv_saddr;
2720 destp = ntohs(tw->tw_dport);
2721 srcp = ntohs(tw->tw_sport);
2723 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2724 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
2725 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2726 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2727 refcount_read(&tw->tw_refcnt), tw);
2732 static int tcp4_seq_show(struct seq_file *seq, void *v)
2734 struct tcp_iter_state *st;
2735 struct sock *sk = v;
2737 seq_setwidth(seq, TMPSZ - 1);
2738 if (v == SEQ_START_TOKEN) {
2739 seq_puts(seq, " sl local_address rem_address st tx_queue "
2740 "rx_queue tr tm->when retrnsmt uid timeout "
2746 if (sk->sk_state == TCP_TIME_WAIT)
2747 get_timewait4_sock(v, seq, st->num);
2748 else if (sk->sk_state == TCP_NEW_SYN_RECV)
2749 get_openreq4(v, seq, st->num);
2751 get_tcp4_sock(v, seq, st->num);
2757 #ifdef CONFIG_BPF_SYSCALL
2758 struct bpf_tcp_iter_state {
2759 struct tcp_iter_state state;
2760 unsigned int cur_sk;
2761 unsigned int end_sk;
2762 unsigned int max_sk;
2763 struct sock **batch;
2764 bool st_bucket_done;
2767 struct bpf_iter__tcp {
2768 __bpf_md_ptr(struct bpf_iter_meta *, meta);
2769 __bpf_md_ptr(struct sock_common *, sk_common);
2770 uid_t uid __aligned(8);
2773 static int tcp_prog_seq_show(struct bpf_prog *prog, struct bpf_iter_meta *meta,
2774 struct sock_common *sk_common, uid_t uid)
2776 struct bpf_iter__tcp ctx;
2778 meta->seq_num--; /* skip SEQ_START_TOKEN */
2780 ctx.sk_common = sk_common;
2782 return bpf_iter_run_prog(prog, &ctx);
2785 static void bpf_iter_tcp_put_batch(struct bpf_tcp_iter_state *iter)
2787 while (iter->cur_sk < iter->end_sk)
2788 sock_gen_put(iter->batch[iter->cur_sk++]);
2791 static int bpf_iter_tcp_realloc_batch(struct bpf_tcp_iter_state *iter,
2792 unsigned int new_batch_sz)
2794 struct sock **new_batch;
2796 new_batch = kvmalloc(sizeof(*new_batch) * new_batch_sz,
2797 GFP_USER | __GFP_NOWARN);
2801 bpf_iter_tcp_put_batch(iter);
2802 kvfree(iter->batch);
2803 iter->batch = new_batch;
2804 iter->max_sk = new_batch_sz;
2809 static unsigned int bpf_iter_tcp_listening_batch(struct seq_file *seq,
2810 struct sock *start_sk)
2812 struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo;
2813 struct bpf_tcp_iter_state *iter = seq->private;
2814 struct tcp_iter_state *st = &iter->state;
2815 struct hlist_nulls_node *node;
2816 unsigned int expected = 1;
2819 sock_hold(start_sk);
2820 iter->batch[iter->end_sk++] = start_sk;
2822 sk = sk_nulls_next(start_sk);
2823 sk_nulls_for_each_from(sk, node) {
2824 if (seq_sk_match(seq, sk)) {
2825 if (iter->end_sk < iter->max_sk) {
2827 iter->batch[iter->end_sk++] = sk;
2832 spin_unlock(&hinfo->lhash2[st->bucket].lock);
2837 static unsigned int bpf_iter_tcp_established_batch(struct seq_file *seq,
2838 struct sock *start_sk)
2840 struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo;
2841 struct bpf_tcp_iter_state *iter = seq->private;
2842 struct tcp_iter_state *st = &iter->state;
2843 struct hlist_nulls_node *node;
2844 unsigned int expected = 1;
2847 sock_hold(start_sk);
2848 iter->batch[iter->end_sk++] = start_sk;
2850 sk = sk_nulls_next(start_sk);
2851 sk_nulls_for_each_from(sk, node) {
2852 if (seq_sk_match(seq, sk)) {
2853 if (iter->end_sk < iter->max_sk) {
2855 iter->batch[iter->end_sk++] = sk;
2860 spin_unlock_bh(inet_ehash_lockp(hinfo, st->bucket));
2865 static struct sock *bpf_iter_tcp_batch(struct seq_file *seq)
2867 struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo;
2868 struct bpf_tcp_iter_state *iter = seq->private;
2869 struct tcp_iter_state *st = &iter->state;
2870 unsigned int expected;
2871 bool resized = false;
2874 /* The st->bucket is done. Directly advance to the next
2875 * bucket instead of having the tcp_seek_last_pos() to skip
2876 * one by one in the current bucket and eventually find out
2877 * it has to advance to the next bucket.
2879 if (iter->st_bucket_done) {
2882 if (st->state == TCP_SEQ_STATE_LISTENING &&
2883 st->bucket > hinfo->lhash2_mask) {
2884 st->state = TCP_SEQ_STATE_ESTABLISHED;
2890 /* Get a new batch */
2893 iter->st_bucket_done = false;
2895 sk = tcp_seek_last_pos(seq);
2897 return NULL; /* Done */
2899 if (st->state == TCP_SEQ_STATE_LISTENING)
2900 expected = bpf_iter_tcp_listening_batch(seq, sk);
2902 expected = bpf_iter_tcp_established_batch(seq, sk);
2904 if (iter->end_sk == expected) {
2905 iter->st_bucket_done = true;
2909 if (!resized && !bpf_iter_tcp_realloc_batch(iter, expected * 3 / 2)) {
2917 static void *bpf_iter_tcp_seq_start(struct seq_file *seq, loff_t *pos)
2919 /* bpf iter does not support lseek, so it always
2920 * continue from where it was stop()-ped.
2923 return bpf_iter_tcp_batch(seq);
2925 return SEQ_START_TOKEN;
2928 static void *bpf_iter_tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2930 struct bpf_tcp_iter_state *iter = seq->private;
2931 struct tcp_iter_state *st = &iter->state;
2934 /* Whenever seq_next() is called, the iter->cur_sk is
2935 * done with seq_show(), so advance to the next sk in
2938 if (iter->cur_sk < iter->end_sk) {
2939 /* Keeping st->num consistent in tcp_iter_state.
2940 * bpf_iter_tcp does not use st->num.
2941 * meta.seq_num is used instead.
2944 /* Move st->offset to the next sk in the bucket such that
2945 * the future start() will resume at st->offset in
2946 * st->bucket. See tcp_seek_last_pos().
2949 sock_gen_put(iter->batch[iter->cur_sk++]);
2952 if (iter->cur_sk < iter->end_sk)
2953 sk = iter->batch[iter->cur_sk];
2955 sk = bpf_iter_tcp_batch(seq);
2958 /* Keeping st->last_pos consistent in tcp_iter_state.
2959 * bpf iter does not do lseek, so st->last_pos always equals to *pos.
2961 st->last_pos = *pos;
2965 static int bpf_iter_tcp_seq_show(struct seq_file *seq, void *v)
2967 struct bpf_iter_meta meta;
2968 struct bpf_prog *prog;
2969 struct sock *sk = v;
2973 if (v == SEQ_START_TOKEN)
2976 if (sk_fullsock(sk))
2979 if (unlikely(sk_unhashed(sk))) {
2984 if (sk->sk_state == TCP_TIME_WAIT) {
2986 } else if (sk->sk_state == TCP_NEW_SYN_RECV) {
2987 const struct request_sock *req = v;
2989 uid = from_kuid_munged(seq_user_ns(seq),
2990 sock_i_uid(req->rsk_listener));
2992 uid = from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk));
2996 prog = bpf_iter_get_info(&meta, false);
2997 ret = tcp_prog_seq_show(prog, &meta, v, uid);
3000 if (sk_fullsock(sk))
3006 static void bpf_iter_tcp_seq_stop(struct seq_file *seq, void *v)
3008 struct bpf_tcp_iter_state *iter = seq->private;
3009 struct bpf_iter_meta meta;
3010 struct bpf_prog *prog;
3014 prog = bpf_iter_get_info(&meta, true);
3016 (void)tcp_prog_seq_show(prog, &meta, v, 0);
3019 if (iter->cur_sk < iter->end_sk) {
3020 bpf_iter_tcp_put_batch(iter);
3021 iter->st_bucket_done = false;
3025 static const struct seq_operations bpf_iter_tcp_seq_ops = {
3026 .show = bpf_iter_tcp_seq_show,
3027 .start = bpf_iter_tcp_seq_start,
3028 .next = bpf_iter_tcp_seq_next,
3029 .stop = bpf_iter_tcp_seq_stop,
3032 static unsigned short seq_file_family(const struct seq_file *seq)
3034 const struct tcp_seq_afinfo *afinfo;
3036 #ifdef CONFIG_BPF_SYSCALL
3037 /* Iterated from bpf_iter. Let the bpf prog to filter instead. */
3038 if (seq->op == &bpf_iter_tcp_seq_ops)
3042 /* Iterated from proc fs */
3043 afinfo = pde_data(file_inode(seq->file));
3044 return afinfo->family;
3047 static const struct seq_operations tcp4_seq_ops = {
3048 .show = tcp4_seq_show,
3049 .start = tcp_seq_start,
3050 .next = tcp_seq_next,
3051 .stop = tcp_seq_stop,
3054 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
3058 static int __net_init tcp4_proc_init_net(struct net *net)
3060 if (!proc_create_net_data("tcp", 0444, net->proc_net, &tcp4_seq_ops,
3061 sizeof(struct tcp_iter_state), &tcp4_seq_afinfo))
3066 static void __net_exit tcp4_proc_exit_net(struct net *net)
3068 remove_proc_entry("tcp", net->proc_net);
3071 static struct pernet_operations tcp4_net_ops = {
3072 .init = tcp4_proc_init_net,
3073 .exit = tcp4_proc_exit_net,
3076 int __init tcp4_proc_init(void)
3078 return register_pernet_subsys(&tcp4_net_ops);
3081 void tcp4_proc_exit(void)
3083 unregister_pernet_subsys(&tcp4_net_ops);
3085 #endif /* CONFIG_PROC_FS */
3087 /* @wake is one when sk_stream_write_space() calls us.
3088 * This sends EPOLLOUT only if notsent_bytes is half the limit.
3089 * This mimics the strategy used in sock_def_write_space().
3091 bool tcp_stream_memory_free(const struct sock *sk, int wake)
3093 const struct tcp_sock *tp = tcp_sk(sk);
3094 u32 notsent_bytes = READ_ONCE(tp->write_seq) -
3095 READ_ONCE(tp->snd_nxt);
3097 return (notsent_bytes << wake) < tcp_notsent_lowat(tp);
3099 EXPORT_SYMBOL(tcp_stream_memory_free);
3101 struct proto tcp_prot = {
3103 .owner = THIS_MODULE,
3105 .pre_connect = tcp_v4_pre_connect,
3106 .connect = tcp_v4_connect,
3107 .disconnect = tcp_disconnect,
3108 .accept = inet_csk_accept,
3110 .init = tcp_v4_init_sock,
3111 .destroy = tcp_v4_destroy_sock,
3112 .shutdown = tcp_shutdown,
3113 .setsockopt = tcp_setsockopt,
3114 .getsockopt = tcp_getsockopt,
3115 .bpf_bypass_getsockopt = tcp_bpf_bypass_getsockopt,
3116 .keepalive = tcp_set_keepalive,
3117 .recvmsg = tcp_recvmsg,
3118 .sendmsg = tcp_sendmsg,
3119 .splice_eof = tcp_splice_eof,
3120 .backlog_rcv = tcp_v4_do_rcv,
3121 .release_cb = tcp_release_cb,
3123 .unhash = inet_unhash,
3124 .get_port = inet_csk_get_port,
3125 .put_port = inet_put_port,
3126 #ifdef CONFIG_BPF_SYSCALL
3127 .psock_update_sk_prot = tcp_bpf_update_proto,
3129 .enter_memory_pressure = tcp_enter_memory_pressure,
3130 .leave_memory_pressure = tcp_leave_memory_pressure,
3131 .stream_memory_free = tcp_stream_memory_free,
3132 .sockets_allocated = &tcp_sockets_allocated,
3133 .orphan_count = &tcp_orphan_count,
3135 .memory_allocated = &tcp_memory_allocated,
3136 .per_cpu_fw_alloc = &tcp_memory_per_cpu_fw_alloc,
3138 .memory_pressure = &tcp_memory_pressure,
3139 .sysctl_mem = sysctl_tcp_mem,
3140 .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_tcp_wmem),
3141 .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_tcp_rmem),
3142 .max_header = MAX_TCP_HEADER,
3143 .obj_size = sizeof(struct tcp_sock),
3144 .slab_flags = SLAB_TYPESAFE_BY_RCU,
3145 .twsk_prot = &tcp_timewait_sock_ops,
3146 .rsk_prot = &tcp_request_sock_ops,
3148 .no_autobind = true,
3149 .diag_destroy = tcp_abort,
3151 EXPORT_SYMBOL(tcp_prot);
3153 static void __net_exit tcp_sk_exit(struct net *net)
3155 if (net->ipv4.tcp_congestion_control)
3156 bpf_module_put(net->ipv4.tcp_congestion_control,
3157 net->ipv4.tcp_congestion_control->owner);
3160 static void __net_init tcp_set_hashinfo(struct net *net)
3162 struct inet_hashinfo *hinfo;
3163 unsigned int ehash_entries;
3164 struct net *old_net;
3166 if (net_eq(net, &init_net))
3169 old_net = current->nsproxy->net_ns;
3170 ehash_entries = READ_ONCE(old_net->ipv4.sysctl_tcp_child_ehash_entries);
3174 ehash_entries = roundup_pow_of_two(ehash_entries);
3175 hinfo = inet_pernet_hashinfo_alloc(&tcp_hashinfo, ehash_entries);
3177 pr_warn("Failed to allocate TCP ehash (entries: %u) "
3178 "for a netns, fallback to the global one\n",
3181 hinfo = &tcp_hashinfo;
3182 ehash_entries = tcp_hashinfo.ehash_mask + 1;
3185 net->ipv4.tcp_death_row.hashinfo = hinfo;
3186 net->ipv4.tcp_death_row.sysctl_max_tw_buckets = ehash_entries / 2;
3187 net->ipv4.sysctl_max_syn_backlog = max(128U, ehash_entries / 128);
3190 static int __net_init tcp_sk_init(struct net *net)
3192 net->ipv4.sysctl_tcp_ecn = 2;
3193 net->ipv4.sysctl_tcp_ecn_fallback = 1;
3195 net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
3196 net->ipv4.sysctl_tcp_min_snd_mss = TCP_MIN_SND_MSS;
3197 net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
3198 net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
3199 net->ipv4.sysctl_tcp_mtu_probe_floor = TCP_MIN_SND_MSS;
3201 net->ipv4.sysctl_tcp_keepalive_time = TCP_KEEPALIVE_TIME;
3202 net->ipv4.sysctl_tcp_keepalive_probes = TCP_KEEPALIVE_PROBES;
3203 net->ipv4.sysctl_tcp_keepalive_intvl = TCP_KEEPALIVE_INTVL;
3205 net->ipv4.sysctl_tcp_syn_retries = TCP_SYN_RETRIES;
3206 net->ipv4.sysctl_tcp_synack_retries = TCP_SYNACK_RETRIES;
3207 net->ipv4.sysctl_tcp_syncookies = 1;
3208 net->ipv4.sysctl_tcp_reordering = TCP_FASTRETRANS_THRESH;
3209 net->ipv4.sysctl_tcp_retries1 = TCP_RETR1;
3210 net->ipv4.sysctl_tcp_retries2 = TCP_RETR2;
3211 net->ipv4.sysctl_tcp_orphan_retries = 0;
3212 net->ipv4.sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
3213 net->ipv4.sysctl_tcp_notsent_lowat = UINT_MAX;
3214 net->ipv4.sysctl_tcp_tw_reuse = 2;
3215 net->ipv4.sysctl_tcp_no_ssthresh_metrics_save = 1;
3217 refcount_set(&net->ipv4.tcp_death_row.tw_refcount, 1);
3218 tcp_set_hashinfo(net);
3220 net->ipv4.sysctl_tcp_sack = 1;
3221 net->ipv4.sysctl_tcp_window_scaling = 1;
3222 net->ipv4.sysctl_tcp_timestamps = 1;
3223 net->ipv4.sysctl_tcp_early_retrans = 3;
3224 net->ipv4.sysctl_tcp_recovery = TCP_RACK_LOSS_DETECTION;
3225 net->ipv4.sysctl_tcp_slow_start_after_idle = 1; /* By default, RFC2861 behavior. */
3226 net->ipv4.sysctl_tcp_retrans_collapse = 1;
3227 net->ipv4.sysctl_tcp_max_reordering = 300;
3228 net->ipv4.sysctl_tcp_dsack = 1;
3229 net->ipv4.sysctl_tcp_app_win = 31;
3230 net->ipv4.sysctl_tcp_adv_win_scale = 1;
3231 net->ipv4.sysctl_tcp_frto = 2;
3232 net->ipv4.sysctl_tcp_moderate_rcvbuf = 1;
3233 /* This limits the percentage of the congestion window which we
3234 * will allow a single TSO frame to consume. Building TSO frames
3235 * which are too large can cause TCP streams to be bursty.
3237 net->ipv4.sysctl_tcp_tso_win_divisor = 3;
3238 /* Default TSQ limit of 16 TSO segments */
3239 net->ipv4.sysctl_tcp_limit_output_bytes = 16 * 65536;
3241 /* rfc5961 challenge ack rate limiting, per net-ns, disabled by default. */
3242 net->ipv4.sysctl_tcp_challenge_ack_limit = INT_MAX;
3244 net->ipv4.sysctl_tcp_min_tso_segs = 2;
3245 net->ipv4.sysctl_tcp_tso_rtt_log = 9; /* 2^9 = 512 usec */
3246 net->ipv4.sysctl_tcp_min_rtt_wlen = 300;
3247 net->ipv4.sysctl_tcp_autocorking = 1;
3248 net->ipv4.sysctl_tcp_invalid_ratelimit = HZ/2;
3249 net->ipv4.sysctl_tcp_pacing_ss_ratio = 200;
3250 net->ipv4.sysctl_tcp_pacing_ca_ratio = 120;
3251 if (net != &init_net) {
3252 memcpy(net->ipv4.sysctl_tcp_rmem,
3253 init_net.ipv4.sysctl_tcp_rmem,
3254 sizeof(init_net.ipv4.sysctl_tcp_rmem));
3255 memcpy(net->ipv4.sysctl_tcp_wmem,
3256 init_net.ipv4.sysctl_tcp_wmem,
3257 sizeof(init_net.ipv4.sysctl_tcp_wmem));
3259 net->ipv4.sysctl_tcp_comp_sack_delay_ns = NSEC_PER_MSEC;
3260 net->ipv4.sysctl_tcp_comp_sack_slack_ns = 100 * NSEC_PER_USEC;
3261 net->ipv4.sysctl_tcp_comp_sack_nr = 44;
3262 net->ipv4.sysctl_tcp_fastopen = TFO_CLIENT_ENABLE;
3263 net->ipv4.sysctl_tcp_fastopen_blackhole_timeout = 0;
3264 atomic_set(&net->ipv4.tfo_active_disable_times, 0);
3266 /* Set default values for PLB */
3267 net->ipv4.sysctl_tcp_plb_enabled = 0; /* Disabled by default */
3268 net->ipv4.sysctl_tcp_plb_idle_rehash_rounds = 3;
3269 net->ipv4.sysctl_tcp_plb_rehash_rounds = 12;
3270 net->ipv4.sysctl_tcp_plb_suspend_rto_sec = 60;
3271 /* Default congestion threshold for PLB to mark a round is 50% */
3272 net->ipv4.sysctl_tcp_plb_cong_thresh = (1 << TCP_PLB_SCALE) / 2;
3274 /* Reno is always built in */
3275 if (!net_eq(net, &init_net) &&
3276 bpf_try_module_get(init_net.ipv4.tcp_congestion_control,
3277 init_net.ipv4.tcp_congestion_control->owner))
3278 net->ipv4.tcp_congestion_control = init_net.ipv4.tcp_congestion_control;
3280 net->ipv4.tcp_congestion_control = &tcp_reno;
3282 net->ipv4.sysctl_tcp_syn_linear_timeouts = 4;
3283 net->ipv4.sysctl_tcp_shrink_window = 0;
3288 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
3292 tcp_twsk_purge(net_exit_list, AF_INET);
3294 list_for_each_entry(net, net_exit_list, exit_list) {
3295 inet_pernet_hashinfo_free(net->ipv4.tcp_death_row.hashinfo);
3296 WARN_ON_ONCE(!refcount_dec_and_test(&net->ipv4.tcp_death_row.tw_refcount));
3297 tcp_fastopen_ctx_destroy(net);
3301 static struct pernet_operations __net_initdata tcp_sk_ops = {
3302 .init = tcp_sk_init,
3303 .exit = tcp_sk_exit,
3304 .exit_batch = tcp_sk_exit_batch,
3307 #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
3308 DEFINE_BPF_ITER_FUNC(tcp, struct bpf_iter_meta *meta,
3309 struct sock_common *sk_common, uid_t uid)
3311 #define INIT_BATCH_SZ 16
3313 static int bpf_iter_init_tcp(void *priv_data, struct bpf_iter_aux_info *aux)
3315 struct bpf_tcp_iter_state *iter = priv_data;
3318 err = bpf_iter_init_seq_net(priv_data, aux);
3322 err = bpf_iter_tcp_realloc_batch(iter, INIT_BATCH_SZ);
3324 bpf_iter_fini_seq_net(priv_data);
3331 static void bpf_iter_fini_tcp(void *priv_data)
3333 struct bpf_tcp_iter_state *iter = priv_data;
3335 bpf_iter_fini_seq_net(priv_data);
3336 kvfree(iter->batch);
3339 static const struct bpf_iter_seq_info tcp_seq_info = {
3340 .seq_ops = &bpf_iter_tcp_seq_ops,
3341 .init_seq_private = bpf_iter_init_tcp,
3342 .fini_seq_private = bpf_iter_fini_tcp,
3343 .seq_priv_size = sizeof(struct bpf_tcp_iter_state),
3346 static const struct bpf_func_proto *
3347 bpf_iter_tcp_get_func_proto(enum bpf_func_id func_id,
3348 const struct bpf_prog *prog)
3351 case BPF_FUNC_setsockopt:
3352 return &bpf_sk_setsockopt_proto;
3353 case BPF_FUNC_getsockopt:
3354 return &bpf_sk_getsockopt_proto;
3360 static struct bpf_iter_reg tcp_reg_info = {
3362 .ctx_arg_info_size = 1,
3364 { offsetof(struct bpf_iter__tcp, sk_common),
3365 PTR_TO_BTF_ID_OR_NULL | PTR_TRUSTED },
3367 .get_func_proto = bpf_iter_tcp_get_func_proto,
3368 .seq_info = &tcp_seq_info,
3371 static void __init bpf_iter_register(void)
3373 tcp_reg_info.ctx_arg_info[0].btf_id = btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON];
3374 if (bpf_iter_reg_target(&tcp_reg_info))
3375 pr_warn("Warning: could not register bpf iterator tcp\n");
3380 void __init tcp_v4_init(void)
3384 for_each_possible_cpu(cpu) {
3387 res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
3388 IPPROTO_TCP, &init_net);
3390 panic("Failed to create the TCP control socket.\n");
3391 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
3393 /* Please enforce IP_DF and IPID==0 for RST and
3394 * ACK sent in SYN-RECV and TIME-WAIT state.
3396 inet_sk(sk)->pmtudisc = IP_PMTUDISC_DO;
3398 per_cpu(ipv4_tcp_sk, cpu) = sk;
3400 if (register_pernet_subsys(&tcp_sk_ops))
3401 panic("Failed to create the TCP control socket.\n");
3403 #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
3404 bpf_iter_register();