OSDN Git Service

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
[tomoyo/tomoyo-test1.git] / net / ipv4 / tcp_ipv4.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * INET         An implementation of the TCP/IP protocol suite for the LINUX
4  *              operating system.  INET is implemented using the  BSD Socket
5  *              interface as the means of communication with the user level.
6  *
7  *              Implementation of the Transmission Control Protocol(TCP).
8  *
9  *              IPv4 specific functions
10  *
11  *              code split from:
12  *              linux/ipv4/tcp.c
13  *              linux/ipv4/tcp_input.c
14  *              linux/ipv4/tcp_output.c
15  *
16  *              See tcp.c for author information
17  */
18
19 /*
20  * Changes:
21  *              David S. Miller :       New socket lookup architecture.
22  *                                      This code is dedicated to John Dyson.
23  *              David S. Miller :       Change semantics of established hash,
24  *                                      half is devoted to TIME_WAIT sockets
25  *                                      and the rest go in the other half.
26  *              Andi Kleen :            Add support for syncookies and fixed
27  *                                      some bugs: ip options weren't passed to
28  *                                      the TCP layer, missed a check for an
29  *                                      ACK bit.
30  *              Andi Kleen :            Implemented fast path mtu discovery.
31  *                                      Fixed many serious bugs in the
32  *                                      request_sock handling and moved
33  *                                      most of it into the af independent code.
34  *                                      Added tail drop and some other bugfixes.
35  *                                      Added new listen semantics.
36  *              Mike McLagan    :       Routing by source
37  *      Juan Jose Ciarlante:            ip_dynaddr bits
38  *              Andi Kleen:             various fixes.
39  *      Vitaly E. Lavrov        :       Transparent proxy revived after year
40  *                                      coma.
41  *      Andi Kleen              :       Fix new listen.
42  *      Andi Kleen              :       Fix accept error reporting.
43  *      YOSHIFUJI Hideaki @USAGI and:   Support IPV6_V6ONLY socket option, which
44  *      Alexey Kuznetsov                allow both IPv4 and IPv6 sockets to bind
45  *                                      a single port at the same time.
46  */
47
48 #define pr_fmt(fmt) "TCP: " fmt
49
50 #include <linux/bottom_half.h>
51 #include <linux/types.h>
52 #include <linux/fcntl.h>
53 #include <linux/module.h>
54 #include <linux/random.h>
55 #include <linux/cache.h>
56 #include <linux/jhash.h>
57 #include <linux/init.h>
58 #include <linux/times.h>
59 #include <linux/slab.h>
60
61 #include <net/net_namespace.h>
62 #include <net/icmp.h>
63 #include <net/inet_hashtables.h>
64 #include <net/tcp.h>
65 #include <net/transp_v6.h>
66 #include <net/ipv6.h>
67 #include <net/inet_common.h>
68 #include <net/timewait_sock.h>
69 #include <net/xfrm.h>
70 #include <net/secure_seq.h>
71 #include <net/busy_poll.h>
72
73 #include <linux/inet.h>
74 #include <linux/ipv6.h>
75 #include <linux/stddef.h>
76 #include <linux/proc_fs.h>
77 #include <linux/seq_file.h>
78 #include <linux/inetdevice.h>
79 #include <linux/btf_ids.h>
80
81 #include <crypto/hash.h>
82 #include <linux/scatterlist.h>
83
84 #include <trace/events/tcp.h>
85
86 #ifdef CONFIG_TCP_MD5SIG
87 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
88                                __be32 daddr, __be32 saddr, const struct tcphdr *th);
89 #endif
90
91 struct inet_hashinfo tcp_hashinfo;
92 EXPORT_SYMBOL(tcp_hashinfo);
93
94 static DEFINE_PER_CPU(struct sock *, ipv4_tcp_sk);
95
96 static u32 tcp_v4_init_seq(const struct sk_buff *skb)
97 {
98         return secure_tcp_seq(ip_hdr(skb)->daddr,
99                               ip_hdr(skb)->saddr,
100                               tcp_hdr(skb)->dest,
101                               tcp_hdr(skb)->source);
102 }
103
104 static u32 tcp_v4_init_ts_off(const struct net *net, const struct sk_buff *skb)
105 {
106         return secure_tcp_ts_off(net, ip_hdr(skb)->daddr, ip_hdr(skb)->saddr);
107 }
108
109 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
110 {
111         int reuse = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tw_reuse);
112         const struct inet_timewait_sock *tw = inet_twsk(sktw);
113         const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
114         struct tcp_sock *tp = tcp_sk(sk);
115
116         if (reuse == 2) {
117                 /* Still does not detect *everything* that goes through
118                  * lo, since we require a loopback src or dst address
119                  * or direct binding to 'lo' interface.
120                  */
121                 bool loopback = false;
122                 if (tw->tw_bound_dev_if == LOOPBACK_IFINDEX)
123                         loopback = true;
124 #if IS_ENABLED(CONFIG_IPV6)
125                 if (tw->tw_family == AF_INET6) {
126                         if (ipv6_addr_loopback(&tw->tw_v6_daddr) ||
127                             ipv6_addr_v4mapped_loopback(&tw->tw_v6_daddr) ||
128                             ipv6_addr_loopback(&tw->tw_v6_rcv_saddr) ||
129                             ipv6_addr_v4mapped_loopback(&tw->tw_v6_rcv_saddr))
130                                 loopback = true;
131                 } else
132 #endif
133                 {
134                         if (ipv4_is_loopback(tw->tw_daddr) ||
135                             ipv4_is_loopback(tw->tw_rcv_saddr))
136                                 loopback = true;
137                 }
138                 if (!loopback)
139                         reuse = 0;
140         }
141
142         /* With PAWS, it is safe from the viewpoint
143            of data integrity. Even without PAWS it is safe provided sequence
144            spaces do not overlap i.e. at data rates <= 80Mbit/sec.
145
146            Actually, the idea is close to VJ's one, only timestamp cache is
147            held not per host, but per port pair and TW bucket is used as state
148            holder.
149
150            If TW bucket has been already destroyed we fall back to VJ's scheme
151            and use initial timestamp retrieved from peer table.
152          */
153         if (tcptw->tw_ts_recent_stamp &&
154             (!twp || (reuse && time_after32(ktime_get_seconds(),
155                                             tcptw->tw_ts_recent_stamp)))) {
156                 /* In case of repair and re-using TIME-WAIT sockets we still
157                  * want to be sure that it is safe as above but honor the
158                  * sequence numbers and time stamps set as part of the repair
159                  * process.
160                  *
161                  * Without this check re-using a TIME-WAIT socket with TCP
162                  * repair would accumulate a -1 on the repair assigned
163                  * sequence number. The first time it is reused the sequence
164                  * is -1, the second time -2, etc. This fixes that issue
165                  * without appearing to create any others.
166                  */
167                 if (likely(!tp->repair)) {
168                         u32 seq = tcptw->tw_snd_nxt + 65535 + 2;
169
170                         if (!seq)
171                                 seq = 1;
172                         WRITE_ONCE(tp->write_seq, seq);
173                         tp->rx_opt.ts_recent       = tcptw->tw_ts_recent;
174                         tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
175                 }
176                 sock_hold(sktw);
177                 return 1;
178         }
179
180         return 0;
181 }
182 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
183
184 static int tcp_v4_pre_connect(struct sock *sk, struct sockaddr *uaddr,
185                               int addr_len)
186 {
187         /* This check is replicated from tcp_v4_connect() and intended to
188          * prevent BPF program called below from accessing bytes that are out
189          * of the bound specified by user in addr_len.
190          */
191         if (addr_len < sizeof(struct sockaddr_in))
192                 return -EINVAL;
193
194         sock_owned_by_me(sk);
195
196         return BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr);
197 }
198
199 /* This will initiate an outgoing connection. */
200 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
201 {
202         struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
203         struct inet_timewait_death_row *tcp_death_row;
204         struct inet_sock *inet = inet_sk(sk);
205         struct tcp_sock *tp = tcp_sk(sk);
206         struct ip_options_rcu *inet_opt;
207         struct net *net = sock_net(sk);
208         __be16 orig_sport, orig_dport;
209         __be32 daddr, nexthop;
210         struct flowi4 *fl4;
211         struct rtable *rt;
212         int err;
213
214         if (addr_len < sizeof(struct sockaddr_in))
215                 return -EINVAL;
216
217         if (usin->sin_family != AF_INET)
218                 return -EAFNOSUPPORT;
219
220         nexthop = daddr = usin->sin_addr.s_addr;
221         inet_opt = rcu_dereference_protected(inet->inet_opt,
222                                              lockdep_sock_is_held(sk));
223         if (inet_opt && inet_opt->opt.srr) {
224                 if (!daddr)
225                         return -EINVAL;
226                 nexthop = inet_opt->opt.faddr;
227         }
228
229         orig_sport = inet->inet_sport;
230         orig_dport = usin->sin_port;
231         fl4 = &inet->cork.fl.u.ip4;
232         rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
233                               sk->sk_bound_dev_if, IPPROTO_TCP, orig_sport,
234                               orig_dport, sk);
235         if (IS_ERR(rt)) {
236                 err = PTR_ERR(rt);
237                 if (err == -ENETUNREACH)
238                         IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
239                 return err;
240         }
241
242         if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
243                 ip_rt_put(rt);
244                 return -ENETUNREACH;
245         }
246
247         if (!inet_opt || !inet_opt->opt.srr)
248                 daddr = fl4->daddr;
249
250         tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
251
252         if (!inet->inet_saddr) {
253                 err = inet_bhash2_update_saddr(sk,  &fl4->saddr, AF_INET);
254                 if (err) {
255                         ip_rt_put(rt);
256                         return err;
257                 }
258         } else {
259                 sk_rcv_saddr_set(sk, inet->inet_saddr);
260         }
261
262         if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
263                 /* Reset inherited state */
264                 tp->rx_opt.ts_recent       = 0;
265                 tp->rx_opt.ts_recent_stamp = 0;
266                 if (likely(!tp->repair))
267                         WRITE_ONCE(tp->write_seq, 0);
268         }
269
270         inet->inet_dport = usin->sin_port;
271         sk_daddr_set(sk, daddr);
272
273         inet_csk(sk)->icsk_ext_hdr_len = 0;
274         if (inet_opt)
275                 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
276
277         tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
278
279         /* Socket identity is still unknown (sport may be zero).
280          * However we set state to SYN-SENT and not releasing socket
281          * lock select source port, enter ourselves into the hash tables and
282          * complete initialization after this.
283          */
284         tcp_set_state(sk, TCP_SYN_SENT);
285         err = inet_hash_connect(tcp_death_row, sk);
286         if (err)
287                 goto failure;
288
289         sk_set_txhash(sk);
290
291         rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
292                                inet->inet_sport, inet->inet_dport, sk);
293         if (IS_ERR(rt)) {
294                 err = PTR_ERR(rt);
295                 rt = NULL;
296                 goto failure;
297         }
298         /* OK, now commit destination to socket.  */
299         sk->sk_gso_type = SKB_GSO_TCPV4;
300         sk_setup_caps(sk, &rt->dst);
301         rt = NULL;
302
303         if (likely(!tp->repair)) {
304                 if (!tp->write_seq)
305                         WRITE_ONCE(tp->write_seq,
306                                    secure_tcp_seq(inet->inet_saddr,
307                                                   inet->inet_daddr,
308                                                   inet->inet_sport,
309                                                   usin->sin_port));
310                 tp->tsoffset = secure_tcp_ts_off(net, inet->inet_saddr,
311                                                  inet->inet_daddr);
312         }
313
314         inet->inet_id = get_random_u16();
315
316         if (tcp_fastopen_defer_connect(sk, &err))
317                 return err;
318         if (err)
319                 goto failure;
320
321         err = tcp_connect(sk);
322
323         if (err)
324                 goto failure;
325
326         return 0;
327
328 failure:
329         /*
330          * This unhashes the socket and releases the local port,
331          * if necessary.
332          */
333         tcp_set_state(sk, TCP_CLOSE);
334         inet_bhash2_reset_saddr(sk);
335         ip_rt_put(rt);
336         sk->sk_route_caps = 0;
337         inet->inet_dport = 0;
338         return err;
339 }
340 EXPORT_SYMBOL(tcp_v4_connect);
341
342 /*
343  * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
344  * It can be called through tcp_release_cb() if socket was owned by user
345  * at the time tcp_v4_err() was called to handle ICMP message.
346  */
347 void tcp_v4_mtu_reduced(struct sock *sk)
348 {
349         struct inet_sock *inet = inet_sk(sk);
350         struct dst_entry *dst;
351         u32 mtu;
352
353         if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
354                 return;
355         mtu = READ_ONCE(tcp_sk(sk)->mtu_info);
356         dst = inet_csk_update_pmtu(sk, mtu);
357         if (!dst)
358                 return;
359
360         /* Something is about to be wrong... Remember soft error
361          * for the case, if this connection will not able to recover.
362          */
363         if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
364                 WRITE_ONCE(sk->sk_err_soft, EMSGSIZE);
365
366         mtu = dst_mtu(dst);
367
368         if (inet->pmtudisc != IP_PMTUDISC_DONT &&
369             ip_sk_accept_pmtu(sk) &&
370             inet_csk(sk)->icsk_pmtu_cookie > mtu) {
371                 tcp_sync_mss(sk, mtu);
372
373                 /* Resend the TCP packet because it's
374                  * clear that the old packet has been
375                  * dropped. This is the new "fast" path mtu
376                  * discovery.
377                  */
378                 tcp_simple_retransmit(sk);
379         } /* else let the usual retransmit timer handle it */
380 }
381 EXPORT_SYMBOL(tcp_v4_mtu_reduced);
382
383 static void do_redirect(struct sk_buff *skb, struct sock *sk)
384 {
385         struct dst_entry *dst = __sk_dst_check(sk, 0);
386
387         if (dst)
388                 dst->ops->redirect(dst, sk, skb);
389 }
390
391
392 /* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
393 void tcp_req_err(struct sock *sk, u32 seq, bool abort)
394 {
395         struct request_sock *req = inet_reqsk(sk);
396         struct net *net = sock_net(sk);
397
398         /* ICMPs are not backlogged, hence we cannot get
399          * an established socket here.
400          */
401         if (seq != tcp_rsk(req)->snt_isn) {
402                 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
403         } else if (abort) {
404                 /*
405                  * Still in SYN_RECV, just remove it silently.
406                  * There is no good way to pass the error to the newly
407                  * created socket, and POSIX does not want network
408                  * errors returned from accept().
409                  */
410                 inet_csk_reqsk_queue_drop(req->rsk_listener, req);
411                 tcp_listendrop(req->rsk_listener);
412         }
413         reqsk_put(req);
414 }
415 EXPORT_SYMBOL(tcp_req_err);
416
417 /* TCP-LD (RFC 6069) logic */
418 void tcp_ld_RTO_revert(struct sock *sk, u32 seq)
419 {
420         struct inet_connection_sock *icsk = inet_csk(sk);
421         struct tcp_sock *tp = tcp_sk(sk);
422         struct sk_buff *skb;
423         s32 remaining;
424         u32 delta_us;
425
426         if (sock_owned_by_user(sk))
427                 return;
428
429         if (seq != tp->snd_una  || !icsk->icsk_retransmits ||
430             !icsk->icsk_backoff)
431                 return;
432
433         skb = tcp_rtx_queue_head(sk);
434         if (WARN_ON_ONCE(!skb))
435                 return;
436
437         icsk->icsk_backoff--;
438         icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) : TCP_TIMEOUT_INIT;
439         icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
440
441         tcp_mstamp_refresh(tp);
442         delta_us = (u32)(tp->tcp_mstamp - tcp_skb_timestamp_us(skb));
443         remaining = icsk->icsk_rto - usecs_to_jiffies(delta_us);
444
445         if (remaining > 0) {
446                 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
447                                           remaining, TCP_RTO_MAX);
448         } else {
449                 /* RTO revert clocked out retransmission.
450                  * Will retransmit now.
451                  */
452                 tcp_retransmit_timer(sk);
453         }
454 }
455 EXPORT_SYMBOL(tcp_ld_RTO_revert);
456
457 /*
458  * This routine is called by the ICMP module when it gets some
459  * sort of error condition.  If err < 0 then the socket should
460  * be closed and the error returned to the user.  If err > 0
461  * it's just the icmp type << 8 | icmp code.  After adjustment
462  * header points to the first 8 bytes of the tcp header.  We need
463  * to find the appropriate port.
464  *
465  * The locking strategy used here is very "optimistic". When
466  * someone else accesses the socket the ICMP is just dropped
467  * and for some paths there is no check at all.
468  * A more general error queue to queue errors for later handling
469  * is probably better.
470  *
471  */
472
473 int tcp_v4_err(struct sk_buff *skb, u32 info)
474 {
475         const struct iphdr *iph = (const struct iphdr *)skb->data;
476         struct tcphdr *th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
477         struct tcp_sock *tp;
478         struct inet_sock *inet;
479         const int type = icmp_hdr(skb)->type;
480         const int code = icmp_hdr(skb)->code;
481         struct sock *sk;
482         struct request_sock *fastopen;
483         u32 seq, snd_una;
484         int err;
485         struct net *net = dev_net(skb->dev);
486
487         sk = __inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
488                                        iph->daddr, th->dest, iph->saddr,
489                                        ntohs(th->source), inet_iif(skb), 0);
490         if (!sk) {
491                 __ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
492                 return -ENOENT;
493         }
494         if (sk->sk_state == TCP_TIME_WAIT) {
495                 inet_twsk_put(inet_twsk(sk));
496                 return 0;
497         }
498         seq = ntohl(th->seq);
499         if (sk->sk_state == TCP_NEW_SYN_RECV) {
500                 tcp_req_err(sk, seq, type == ICMP_PARAMETERPROB ||
501                                      type == ICMP_TIME_EXCEEDED ||
502                                      (type == ICMP_DEST_UNREACH &&
503                                       (code == ICMP_NET_UNREACH ||
504                                        code == ICMP_HOST_UNREACH)));
505                 return 0;
506         }
507
508         bh_lock_sock(sk);
509         /* If too many ICMPs get dropped on busy
510          * servers this needs to be solved differently.
511          * We do take care of PMTU discovery (RFC1191) special case :
512          * we can receive locally generated ICMP messages while socket is held.
513          */
514         if (sock_owned_by_user(sk)) {
515                 if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
516                         __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
517         }
518         if (sk->sk_state == TCP_CLOSE)
519                 goto out;
520
521         if (static_branch_unlikely(&ip4_min_ttl)) {
522                 /* min_ttl can be changed concurrently from do_ip_setsockopt() */
523                 if (unlikely(iph->ttl < READ_ONCE(inet_sk(sk)->min_ttl))) {
524                         __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
525                         goto out;
526                 }
527         }
528
529         tp = tcp_sk(sk);
530         /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
531         fastopen = rcu_dereference(tp->fastopen_rsk);
532         snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
533         if (sk->sk_state != TCP_LISTEN &&
534             !between(seq, snd_una, tp->snd_nxt)) {
535                 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
536                 goto out;
537         }
538
539         switch (type) {
540         case ICMP_REDIRECT:
541                 if (!sock_owned_by_user(sk))
542                         do_redirect(skb, sk);
543                 goto out;
544         case ICMP_SOURCE_QUENCH:
545                 /* Just silently ignore these. */
546                 goto out;
547         case ICMP_PARAMETERPROB:
548                 err = EPROTO;
549                 break;
550         case ICMP_DEST_UNREACH:
551                 if (code > NR_ICMP_UNREACH)
552                         goto out;
553
554                 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
555                         /* We are not interested in TCP_LISTEN and open_requests
556                          * (SYN-ACKs send out by Linux are always <576bytes so
557                          * they should go through unfragmented).
558                          */
559                         if (sk->sk_state == TCP_LISTEN)
560                                 goto out;
561
562                         WRITE_ONCE(tp->mtu_info, info);
563                         if (!sock_owned_by_user(sk)) {
564                                 tcp_v4_mtu_reduced(sk);
565                         } else {
566                                 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &sk->sk_tsq_flags))
567                                         sock_hold(sk);
568                         }
569                         goto out;
570                 }
571
572                 err = icmp_err_convert[code].errno;
573                 /* check if this ICMP message allows revert of backoff.
574                  * (see RFC 6069)
575                  */
576                 if (!fastopen &&
577                     (code == ICMP_NET_UNREACH || code == ICMP_HOST_UNREACH))
578                         tcp_ld_RTO_revert(sk, seq);
579                 break;
580         case ICMP_TIME_EXCEEDED:
581                 err = EHOSTUNREACH;
582                 break;
583         default:
584                 goto out;
585         }
586
587         switch (sk->sk_state) {
588         case TCP_SYN_SENT:
589         case TCP_SYN_RECV:
590                 /* Only in fast or simultaneous open. If a fast open socket is
591                  * already accepted it is treated as a connected one below.
592                  */
593                 if (fastopen && !fastopen->sk)
594                         break;
595
596                 ip_icmp_error(sk, skb, err, th->dest, info, (u8 *)th);
597
598                 if (!sock_owned_by_user(sk)) {
599                         WRITE_ONCE(sk->sk_err, err);
600
601                         sk_error_report(sk);
602
603                         tcp_done(sk);
604                 } else {
605                         WRITE_ONCE(sk->sk_err_soft, err);
606                 }
607                 goto out;
608         }
609
610         /* If we've already connected we will keep trying
611          * until we time out, or the user gives up.
612          *
613          * rfc1122 4.2.3.9 allows to consider as hard errors
614          * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
615          * but it is obsoleted by pmtu discovery).
616          *
617          * Note, that in modern internet, where routing is unreliable
618          * and in each dark corner broken firewalls sit, sending random
619          * errors ordered by their masters even this two messages finally lose
620          * their original sense (even Linux sends invalid PORT_UNREACHs)
621          *
622          * Now we are in compliance with RFCs.
623          *                                                      --ANK (980905)
624          */
625
626         inet = inet_sk(sk);
627         if (!sock_owned_by_user(sk) && inet->recverr) {
628                 WRITE_ONCE(sk->sk_err, err);
629                 sk_error_report(sk);
630         } else  { /* Only an error on timeout */
631                 WRITE_ONCE(sk->sk_err_soft, err);
632         }
633
634 out:
635         bh_unlock_sock(sk);
636         sock_put(sk);
637         return 0;
638 }
639
640 void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
641 {
642         struct tcphdr *th = tcp_hdr(skb);
643
644         th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
645         skb->csum_start = skb_transport_header(skb) - skb->head;
646         skb->csum_offset = offsetof(struct tcphdr, check);
647 }
648
649 /* This routine computes an IPv4 TCP checksum. */
650 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
651 {
652         const struct inet_sock *inet = inet_sk(sk);
653
654         __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
655 }
656 EXPORT_SYMBOL(tcp_v4_send_check);
657
658 /*
659  *      This routine will send an RST to the other tcp.
660  *
661  *      Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
662  *                    for reset.
663  *      Answer: if a packet caused RST, it is not for a socket
664  *              existing in our system, if it is matched to a socket,
665  *              it is just duplicate segment or bug in other side's TCP.
666  *              So that we build reply only basing on parameters
667  *              arrived with segment.
668  *      Exception: precedence violation. We do not implement it in any case.
669  */
670
671 #ifdef CONFIG_TCP_MD5SIG
672 #define OPTION_BYTES TCPOLEN_MD5SIG_ALIGNED
673 #else
674 #define OPTION_BYTES sizeof(__be32)
675 #endif
676
677 static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
678 {
679         const struct tcphdr *th = tcp_hdr(skb);
680         struct {
681                 struct tcphdr th;
682                 __be32 opt[OPTION_BYTES / sizeof(__be32)];
683         } rep;
684         struct ip_reply_arg arg;
685 #ifdef CONFIG_TCP_MD5SIG
686         struct tcp_md5sig_key *key = NULL;
687         const __u8 *hash_location = NULL;
688         unsigned char newhash[16];
689         int genhash;
690         struct sock *sk1 = NULL;
691 #endif
692         u64 transmit_time = 0;
693         struct sock *ctl_sk;
694         struct net *net;
695         u32 txhash = 0;
696
697         /* Never send a reset in response to a reset. */
698         if (th->rst)
699                 return;
700
701         /* If sk not NULL, it means we did a successful lookup and incoming
702          * route had to be correct. prequeue might have dropped our dst.
703          */
704         if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL)
705                 return;
706
707         /* Swap the send and the receive. */
708         memset(&rep, 0, sizeof(rep));
709         rep.th.dest   = th->source;
710         rep.th.source = th->dest;
711         rep.th.doff   = sizeof(struct tcphdr) / 4;
712         rep.th.rst    = 1;
713
714         if (th->ack) {
715                 rep.th.seq = th->ack_seq;
716         } else {
717                 rep.th.ack = 1;
718                 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
719                                        skb->len - (th->doff << 2));
720         }
721
722         memset(&arg, 0, sizeof(arg));
723         arg.iov[0].iov_base = (unsigned char *)&rep;
724         arg.iov[0].iov_len  = sizeof(rep.th);
725
726         net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
727 #ifdef CONFIG_TCP_MD5SIG
728         rcu_read_lock();
729         hash_location = tcp_parse_md5sig_option(th);
730         if (sk && sk_fullsock(sk)) {
731                 const union tcp_md5_addr *addr;
732                 int l3index;
733
734                 /* sdif set, means packet ingressed via a device
735                  * in an L3 domain and inet_iif is set to it.
736                  */
737                 l3index = tcp_v4_sdif(skb) ? inet_iif(skb) : 0;
738                 addr = (union tcp_md5_addr *)&ip_hdr(skb)->saddr;
739                 key = tcp_md5_do_lookup(sk, l3index, addr, AF_INET);
740         } else if (hash_location) {
741                 const union tcp_md5_addr *addr;
742                 int sdif = tcp_v4_sdif(skb);
743                 int dif = inet_iif(skb);
744                 int l3index;
745
746                 /*
747                  * active side is lost. Try to find listening socket through
748                  * source port, and then find md5 key through listening socket.
749                  * we are not loose security here:
750                  * Incoming packet is checked with md5 hash with finding key,
751                  * no RST generated if md5 hash doesn't match.
752                  */
753                 sk1 = __inet_lookup_listener(net, net->ipv4.tcp_death_row.hashinfo,
754                                              NULL, 0, ip_hdr(skb)->saddr,
755                                              th->source, ip_hdr(skb)->daddr,
756                                              ntohs(th->source), dif, sdif);
757                 /* don't send rst if it can't find key */
758                 if (!sk1)
759                         goto out;
760
761                 /* sdif set, means packet ingressed via a device
762                  * in an L3 domain and dif is set to it.
763                  */
764                 l3index = sdif ? dif : 0;
765                 addr = (union tcp_md5_addr *)&ip_hdr(skb)->saddr;
766                 key = tcp_md5_do_lookup(sk1, l3index, addr, AF_INET);
767                 if (!key)
768                         goto out;
769
770
771                 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
772                 if (genhash || memcmp(hash_location, newhash, 16) != 0)
773                         goto out;
774
775         }
776
777         if (key) {
778                 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
779                                    (TCPOPT_NOP << 16) |
780                                    (TCPOPT_MD5SIG << 8) |
781                                    TCPOLEN_MD5SIG);
782                 /* Update length and the length the header thinks exists */
783                 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
784                 rep.th.doff = arg.iov[0].iov_len / 4;
785
786                 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
787                                      key, ip_hdr(skb)->saddr,
788                                      ip_hdr(skb)->daddr, &rep.th);
789         }
790 #endif
791         /* Can't co-exist with TCPMD5, hence check rep.opt[0] */
792         if (rep.opt[0] == 0) {
793                 __be32 mrst = mptcp_reset_option(skb);
794
795                 if (mrst) {
796                         rep.opt[0] = mrst;
797                         arg.iov[0].iov_len += sizeof(mrst);
798                         rep.th.doff = arg.iov[0].iov_len / 4;
799                 }
800         }
801
802         arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
803                                       ip_hdr(skb)->saddr, /* XXX */
804                                       arg.iov[0].iov_len, IPPROTO_TCP, 0);
805         arg.csumoffset = offsetof(struct tcphdr, check) / 2;
806         arg.flags = (sk && inet_sk_transparent(sk)) ? IP_REPLY_ARG_NOSRCCHECK : 0;
807
808         /* When socket is gone, all binding information is lost.
809          * routing might fail in this case. No choice here, if we choose to force
810          * input interface, we will misroute in case of asymmetric route.
811          */
812         if (sk) {
813                 arg.bound_dev_if = sk->sk_bound_dev_if;
814                 if (sk_fullsock(sk))
815                         trace_tcp_send_reset(sk, skb);
816         }
817
818         BUILD_BUG_ON(offsetof(struct sock, sk_bound_dev_if) !=
819                      offsetof(struct inet_timewait_sock, tw_bound_dev_if));
820
821         arg.tos = ip_hdr(skb)->tos;
822         arg.uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
823         local_bh_disable();
824         ctl_sk = this_cpu_read(ipv4_tcp_sk);
825         sock_net_set(ctl_sk, net);
826         if (sk) {
827                 ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ?
828                                    inet_twsk(sk)->tw_mark : sk->sk_mark;
829                 ctl_sk->sk_priority = (sk->sk_state == TCP_TIME_WAIT) ?
830                                    inet_twsk(sk)->tw_priority : sk->sk_priority;
831                 transmit_time = tcp_transmit_time(sk);
832                 xfrm_sk_clone_policy(ctl_sk, sk);
833                 txhash = (sk->sk_state == TCP_TIME_WAIT) ?
834                          inet_twsk(sk)->tw_txhash : sk->sk_txhash;
835         } else {
836                 ctl_sk->sk_mark = 0;
837                 ctl_sk->sk_priority = 0;
838         }
839         ip_send_unicast_reply(ctl_sk,
840                               skb, &TCP_SKB_CB(skb)->header.h4.opt,
841                               ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
842                               &arg, arg.iov[0].iov_len,
843                               transmit_time, txhash);
844
845         xfrm_sk_free_policy(ctl_sk);
846         sock_net_set(ctl_sk, &init_net);
847         __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
848         __TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
849         local_bh_enable();
850
851 #ifdef CONFIG_TCP_MD5SIG
852 out:
853         rcu_read_unlock();
854 #endif
855 }
856
857 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
858    outside socket context is ugly, certainly. What can I do?
859  */
860
861 static void tcp_v4_send_ack(const struct sock *sk,
862                             struct sk_buff *skb, u32 seq, u32 ack,
863                             u32 win, u32 tsval, u32 tsecr, int oif,
864                             struct tcp_md5sig_key *key,
865                             int reply_flags, u8 tos, u32 txhash)
866 {
867         const struct tcphdr *th = tcp_hdr(skb);
868         struct {
869                 struct tcphdr th;
870                 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
871 #ifdef CONFIG_TCP_MD5SIG
872                            + (TCPOLEN_MD5SIG_ALIGNED >> 2)
873 #endif
874                         ];
875         } rep;
876         struct net *net = sock_net(sk);
877         struct ip_reply_arg arg;
878         struct sock *ctl_sk;
879         u64 transmit_time;
880
881         memset(&rep.th, 0, sizeof(struct tcphdr));
882         memset(&arg, 0, sizeof(arg));
883
884         arg.iov[0].iov_base = (unsigned char *)&rep;
885         arg.iov[0].iov_len  = sizeof(rep.th);
886         if (tsecr) {
887                 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
888                                    (TCPOPT_TIMESTAMP << 8) |
889                                    TCPOLEN_TIMESTAMP);
890                 rep.opt[1] = htonl(tsval);
891                 rep.opt[2] = htonl(tsecr);
892                 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
893         }
894
895         /* Swap the send and the receive. */
896         rep.th.dest    = th->source;
897         rep.th.source  = th->dest;
898         rep.th.doff    = arg.iov[0].iov_len / 4;
899         rep.th.seq     = htonl(seq);
900         rep.th.ack_seq = htonl(ack);
901         rep.th.ack     = 1;
902         rep.th.window  = htons(win);
903
904 #ifdef CONFIG_TCP_MD5SIG
905         if (key) {
906                 int offset = (tsecr) ? 3 : 0;
907
908                 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
909                                           (TCPOPT_NOP << 16) |
910                                           (TCPOPT_MD5SIG << 8) |
911                                           TCPOLEN_MD5SIG);
912                 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
913                 rep.th.doff = arg.iov[0].iov_len/4;
914
915                 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
916                                     key, ip_hdr(skb)->saddr,
917                                     ip_hdr(skb)->daddr, &rep.th);
918         }
919 #endif
920         arg.flags = reply_flags;
921         arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
922                                       ip_hdr(skb)->saddr, /* XXX */
923                                       arg.iov[0].iov_len, IPPROTO_TCP, 0);
924         arg.csumoffset = offsetof(struct tcphdr, check) / 2;
925         if (oif)
926                 arg.bound_dev_if = oif;
927         arg.tos = tos;
928         arg.uid = sock_net_uid(net, sk_fullsock(sk) ? sk : NULL);
929         local_bh_disable();
930         ctl_sk = this_cpu_read(ipv4_tcp_sk);
931         sock_net_set(ctl_sk, net);
932         ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ?
933                            inet_twsk(sk)->tw_mark : sk->sk_mark;
934         ctl_sk->sk_priority = (sk->sk_state == TCP_TIME_WAIT) ?
935                            inet_twsk(sk)->tw_priority : sk->sk_priority;
936         transmit_time = tcp_transmit_time(sk);
937         ip_send_unicast_reply(ctl_sk,
938                               skb, &TCP_SKB_CB(skb)->header.h4.opt,
939                               ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
940                               &arg, arg.iov[0].iov_len,
941                               transmit_time, txhash);
942
943         sock_net_set(ctl_sk, &init_net);
944         __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
945         local_bh_enable();
946 }
947
948 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
949 {
950         struct inet_timewait_sock *tw = inet_twsk(sk);
951         struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
952
953         tcp_v4_send_ack(sk, skb,
954                         tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
955                         tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
956                         tcp_time_stamp_raw() + tcptw->tw_ts_offset,
957                         tcptw->tw_ts_recent,
958                         tw->tw_bound_dev_if,
959                         tcp_twsk_md5_key(tcptw),
960                         tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
961                         tw->tw_tos,
962                         tw->tw_txhash
963                         );
964
965         inet_twsk_put(tw);
966 }
967
968 static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
969                                   struct request_sock *req)
970 {
971         const union tcp_md5_addr *addr;
972         int l3index;
973
974         /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
975          * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
976          */
977         u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 :
978                                              tcp_sk(sk)->snd_nxt;
979
980         /* RFC 7323 2.3
981          * The window field (SEG.WND) of every outgoing segment, with the
982          * exception of <SYN> segments, MUST be right-shifted by
983          * Rcv.Wind.Shift bits:
984          */
985         addr = (union tcp_md5_addr *)&ip_hdr(skb)->saddr;
986         l3index = tcp_v4_sdif(skb) ? inet_iif(skb) : 0;
987         tcp_v4_send_ack(sk, skb, seq,
988                         tcp_rsk(req)->rcv_nxt,
989                         req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
990                         tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
991                         req->ts_recent,
992                         0,
993                         tcp_md5_do_lookup(sk, l3index, addr, AF_INET),
994                         inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
995                         ip_hdr(skb)->tos, tcp_rsk(req)->txhash);
996 }
997
998 /*
999  *      Send a SYN-ACK after having received a SYN.
1000  *      This still operates on a request_sock only, not on a big
1001  *      socket.
1002  */
1003 static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
1004                               struct flowi *fl,
1005                               struct request_sock *req,
1006                               struct tcp_fastopen_cookie *foc,
1007                               enum tcp_synack_type synack_type,
1008                               struct sk_buff *syn_skb)
1009 {
1010         const struct inet_request_sock *ireq = inet_rsk(req);
1011         struct flowi4 fl4;
1012         int err = -1;
1013         struct sk_buff *skb;
1014         u8 tos;
1015
1016         /* First, grab a route. */
1017         if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
1018                 return -1;
1019
1020         skb = tcp_make_synack(sk, dst, req, foc, synack_type, syn_skb);
1021
1022         if (skb) {
1023                 __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
1024
1025                 tos = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reflect_tos) ?
1026                                 (tcp_rsk(req)->syn_tos & ~INET_ECN_MASK) |
1027                                 (inet_sk(sk)->tos & INET_ECN_MASK) :
1028                                 inet_sk(sk)->tos;
1029
1030                 if (!INET_ECN_is_capable(tos) &&
1031                     tcp_bpf_ca_needs_ecn((struct sock *)req))
1032                         tos |= INET_ECN_ECT_0;
1033
1034                 rcu_read_lock();
1035                 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
1036                                             ireq->ir_rmt_addr,
1037                                             rcu_dereference(ireq->ireq_opt),
1038                                             tos);
1039                 rcu_read_unlock();
1040                 err = net_xmit_eval(err);
1041         }
1042
1043         return err;
1044 }
1045
1046 /*
1047  *      IPv4 request_sock destructor.
1048  */
1049 static void tcp_v4_reqsk_destructor(struct request_sock *req)
1050 {
1051         kfree(rcu_dereference_protected(inet_rsk(req)->ireq_opt, 1));
1052 }
1053
1054 #ifdef CONFIG_TCP_MD5SIG
1055 /*
1056  * RFC2385 MD5 checksumming requires a mapping of
1057  * IP address->MD5 Key.
1058  * We need to maintain these in the sk structure.
1059  */
1060
1061 DEFINE_STATIC_KEY_DEFERRED_FALSE(tcp_md5_needed, HZ);
1062 EXPORT_SYMBOL(tcp_md5_needed);
1063
1064 static bool better_md5_match(struct tcp_md5sig_key *old, struct tcp_md5sig_key *new)
1065 {
1066         if (!old)
1067                 return true;
1068
1069         /* l3index always overrides non-l3index */
1070         if (old->l3index && new->l3index == 0)
1071                 return false;
1072         if (old->l3index == 0 && new->l3index)
1073                 return true;
1074
1075         return old->prefixlen < new->prefixlen;
1076 }
1077
1078 /* Find the Key structure for an address.  */
1079 struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index,
1080                                            const union tcp_md5_addr *addr,
1081                                            int family)
1082 {
1083         const struct tcp_sock *tp = tcp_sk(sk);
1084         struct tcp_md5sig_key *key;
1085         const struct tcp_md5sig_info *md5sig;
1086         __be32 mask;
1087         struct tcp_md5sig_key *best_match = NULL;
1088         bool match;
1089
1090         /* caller either holds rcu_read_lock() or socket lock */
1091         md5sig = rcu_dereference_check(tp->md5sig_info,
1092                                        lockdep_sock_is_held(sk));
1093         if (!md5sig)
1094                 return NULL;
1095
1096         hlist_for_each_entry_rcu(key, &md5sig->head, node,
1097                                  lockdep_sock_is_held(sk)) {
1098                 if (key->family != family)
1099                         continue;
1100                 if (key->flags & TCP_MD5SIG_FLAG_IFINDEX && key->l3index != l3index)
1101                         continue;
1102                 if (family == AF_INET) {
1103                         mask = inet_make_mask(key->prefixlen);
1104                         match = (key->addr.a4.s_addr & mask) ==
1105                                 (addr->a4.s_addr & mask);
1106 #if IS_ENABLED(CONFIG_IPV6)
1107                 } else if (family == AF_INET6) {
1108                         match = ipv6_prefix_equal(&key->addr.a6, &addr->a6,
1109                                                   key->prefixlen);
1110 #endif
1111                 } else {
1112                         match = false;
1113                 }
1114
1115                 if (match && better_md5_match(best_match, key))
1116                         best_match = key;
1117         }
1118         return best_match;
1119 }
1120 EXPORT_SYMBOL(__tcp_md5_do_lookup);
1121
1122 static struct tcp_md5sig_key *tcp_md5_do_lookup_exact(const struct sock *sk,
1123                                                       const union tcp_md5_addr *addr,
1124                                                       int family, u8 prefixlen,
1125                                                       int l3index, u8 flags)
1126 {
1127         const struct tcp_sock *tp = tcp_sk(sk);
1128         struct tcp_md5sig_key *key;
1129         unsigned int size = sizeof(struct in_addr);
1130         const struct tcp_md5sig_info *md5sig;
1131
1132         /* caller either holds rcu_read_lock() or socket lock */
1133         md5sig = rcu_dereference_check(tp->md5sig_info,
1134                                        lockdep_sock_is_held(sk));
1135         if (!md5sig)
1136                 return NULL;
1137 #if IS_ENABLED(CONFIG_IPV6)
1138         if (family == AF_INET6)
1139                 size = sizeof(struct in6_addr);
1140 #endif
1141         hlist_for_each_entry_rcu(key, &md5sig->head, node,
1142                                  lockdep_sock_is_held(sk)) {
1143                 if (key->family != family)
1144                         continue;
1145                 if ((key->flags & TCP_MD5SIG_FLAG_IFINDEX) != (flags & TCP_MD5SIG_FLAG_IFINDEX))
1146                         continue;
1147                 if (key->l3index != l3index)
1148                         continue;
1149                 if (!memcmp(&key->addr, addr, size) &&
1150                     key->prefixlen == prefixlen)
1151                         return key;
1152         }
1153         return NULL;
1154 }
1155
1156 struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
1157                                          const struct sock *addr_sk)
1158 {
1159         const union tcp_md5_addr *addr;
1160         int l3index;
1161
1162         l3index = l3mdev_master_ifindex_by_index(sock_net(sk),
1163                                                  addr_sk->sk_bound_dev_if);
1164         addr = (const union tcp_md5_addr *)&addr_sk->sk_daddr;
1165         return tcp_md5_do_lookup(sk, l3index, addr, AF_INET);
1166 }
1167 EXPORT_SYMBOL(tcp_v4_md5_lookup);
1168
1169 static int tcp_md5sig_info_add(struct sock *sk, gfp_t gfp)
1170 {
1171         struct tcp_sock *tp = tcp_sk(sk);
1172         struct tcp_md5sig_info *md5sig;
1173
1174         md5sig = kmalloc(sizeof(*md5sig), gfp);
1175         if (!md5sig)
1176                 return -ENOMEM;
1177
1178         sk_gso_disable(sk);
1179         INIT_HLIST_HEAD(&md5sig->head);
1180         rcu_assign_pointer(tp->md5sig_info, md5sig);
1181         return 0;
1182 }
1183
1184 /* This can be called on a newly created socket, from other files */
1185 static int __tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1186                             int family, u8 prefixlen, int l3index, u8 flags,
1187                             const u8 *newkey, u8 newkeylen, gfp_t gfp)
1188 {
1189         /* Add Key to the list */
1190         struct tcp_md5sig_key *key;
1191         struct tcp_sock *tp = tcp_sk(sk);
1192         struct tcp_md5sig_info *md5sig;
1193
1194         key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen, l3index, flags);
1195         if (key) {
1196                 /* Pre-existing entry - just update that one.
1197                  * Note that the key might be used concurrently.
1198                  * data_race() is telling kcsan that we do not care of
1199                  * key mismatches, since changing MD5 key on live flows
1200                  * can lead to packet drops.
1201                  */
1202                 data_race(memcpy(key->key, newkey, newkeylen));
1203
1204                 /* Pairs with READ_ONCE() in tcp_md5_hash_key().
1205                  * Also note that a reader could catch new key->keylen value
1206                  * but old key->key[], this is the reason we use __GFP_ZERO
1207                  * at sock_kmalloc() time below these lines.
1208                  */
1209                 WRITE_ONCE(key->keylen, newkeylen);
1210
1211                 return 0;
1212         }
1213
1214         md5sig = rcu_dereference_protected(tp->md5sig_info,
1215                                            lockdep_sock_is_held(sk));
1216
1217         key = sock_kmalloc(sk, sizeof(*key), gfp | __GFP_ZERO);
1218         if (!key)
1219                 return -ENOMEM;
1220         if (!tcp_alloc_md5sig_pool()) {
1221                 sock_kfree_s(sk, key, sizeof(*key));
1222                 return -ENOMEM;
1223         }
1224
1225         memcpy(key->key, newkey, newkeylen);
1226         key->keylen = newkeylen;
1227         key->family = family;
1228         key->prefixlen = prefixlen;
1229         key->l3index = l3index;
1230         key->flags = flags;
1231         memcpy(&key->addr, addr,
1232                (IS_ENABLED(CONFIG_IPV6) && family == AF_INET6) ? sizeof(struct in6_addr) :
1233                                                                  sizeof(struct in_addr));
1234         hlist_add_head_rcu(&key->node, &md5sig->head);
1235         return 0;
1236 }
1237
1238 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1239                    int family, u8 prefixlen, int l3index, u8 flags,
1240                    const u8 *newkey, u8 newkeylen)
1241 {
1242         struct tcp_sock *tp = tcp_sk(sk);
1243
1244         if (!rcu_dereference_protected(tp->md5sig_info, lockdep_sock_is_held(sk))) {
1245                 if (tcp_md5sig_info_add(sk, GFP_KERNEL))
1246                         return -ENOMEM;
1247
1248                 if (!static_branch_inc(&tcp_md5_needed.key)) {
1249                         struct tcp_md5sig_info *md5sig;
1250
1251                         md5sig = rcu_dereference_protected(tp->md5sig_info, lockdep_sock_is_held(sk));
1252                         rcu_assign_pointer(tp->md5sig_info, NULL);
1253                         kfree_rcu(md5sig, rcu);
1254                         return -EUSERS;
1255                 }
1256         }
1257
1258         return __tcp_md5_do_add(sk, addr, family, prefixlen, l3index, flags,
1259                                 newkey, newkeylen, GFP_KERNEL);
1260 }
1261 EXPORT_SYMBOL(tcp_md5_do_add);
1262
1263 int tcp_md5_key_copy(struct sock *sk, const union tcp_md5_addr *addr,
1264                      int family, u8 prefixlen, int l3index,
1265                      struct tcp_md5sig_key *key)
1266 {
1267         struct tcp_sock *tp = tcp_sk(sk);
1268
1269         if (!rcu_dereference_protected(tp->md5sig_info, lockdep_sock_is_held(sk))) {
1270                 if (tcp_md5sig_info_add(sk, sk_gfp_mask(sk, GFP_ATOMIC)))
1271                         return -ENOMEM;
1272
1273                 if (!static_key_fast_inc_not_disabled(&tcp_md5_needed.key.key)) {
1274                         struct tcp_md5sig_info *md5sig;
1275
1276                         md5sig = rcu_dereference_protected(tp->md5sig_info, lockdep_sock_is_held(sk));
1277                         net_warn_ratelimited("Too many TCP-MD5 keys in the system\n");
1278                         rcu_assign_pointer(tp->md5sig_info, NULL);
1279                         kfree_rcu(md5sig, rcu);
1280                         return -EUSERS;
1281                 }
1282         }
1283
1284         return __tcp_md5_do_add(sk, addr, family, prefixlen, l3index,
1285                                 key->flags, key->key, key->keylen,
1286                                 sk_gfp_mask(sk, GFP_ATOMIC));
1287 }
1288 EXPORT_SYMBOL(tcp_md5_key_copy);
1289
1290 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family,
1291                    u8 prefixlen, int l3index, u8 flags)
1292 {
1293         struct tcp_md5sig_key *key;
1294
1295         key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen, l3index, flags);
1296         if (!key)
1297                 return -ENOENT;
1298         hlist_del_rcu(&key->node);
1299         atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1300         kfree_rcu(key, rcu);
1301         return 0;
1302 }
1303 EXPORT_SYMBOL(tcp_md5_do_del);
1304
1305 static void tcp_clear_md5_list(struct sock *sk)
1306 {
1307         struct tcp_sock *tp = tcp_sk(sk);
1308         struct tcp_md5sig_key *key;
1309         struct hlist_node *n;
1310         struct tcp_md5sig_info *md5sig;
1311
1312         md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1313
1314         hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
1315                 hlist_del_rcu(&key->node);
1316                 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1317                 kfree_rcu(key, rcu);
1318         }
1319 }
1320
1321 static int tcp_v4_parse_md5_keys(struct sock *sk, int optname,
1322                                  sockptr_t optval, int optlen)
1323 {
1324         struct tcp_md5sig cmd;
1325         struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
1326         const union tcp_md5_addr *addr;
1327         u8 prefixlen = 32;
1328         int l3index = 0;
1329         u8 flags;
1330
1331         if (optlen < sizeof(cmd))
1332                 return -EINVAL;
1333
1334         if (copy_from_sockptr(&cmd, optval, sizeof(cmd)))
1335                 return -EFAULT;
1336
1337         if (sin->sin_family != AF_INET)
1338                 return -EINVAL;
1339
1340         flags = cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX;
1341
1342         if (optname == TCP_MD5SIG_EXT &&
1343             cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
1344                 prefixlen = cmd.tcpm_prefixlen;
1345                 if (prefixlen > 32)
1346                         return -EINVAL;
1347         }
1348
1349         if (optname == TCP_MD5SIG_EXT && cmd.tcpm_ifindex &&
1350             cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX) {
1351                 struct net_device *dev;
1352
1353                 rcu_read_lock();
1354                 dev = dev_get_by_index_rcu(sock_net(sk), cmd.tcpm_ifindex);
1355                 if (dev && netif_is_l3_master(dev))
1356                         l3index = dev->ifindex;
1357
1358                 rcu_read_unlock();
1359
1360                 /* ok to reference set/not set outside of rcu;
1361                  * right now device MUST be an L3 master
1362                  */
1363                 if (!dev || !l3index)
1364                         return -EINVAL;
1365         }
1366
1367         addr = (union tcp_md5_addr *)&sin->sin_addr.s_addr;
1368
1369         if (!cmd.tcpm_keylen)
1370                 return tcp_md5_do_del(sk, addr, AF_INET, prefixlen, l3index, flags);
1371
1372         if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1373                 return -EINVAL;
1374
1375         return tcp_md5_do_add(sk, addr, AF_INET, prefixlen, l3index, flags,
1376                               cmd.tcpm_key, cmd.tcpm_keylen);
1377 }
1378
1379 static int tcp_v4_md5_hash_headers(struct tcp_md5sig_pool *hp,
1380                                    __be32 daddr, __be32 saddr,
1381                                    const struct tcphdr *th, int nbytes)
1382 {
1383         struct tcp4_pseudohdr *bp;
1384         struct scatterlist sg;
1385         struct tcphdr *_th;
1386
1387         bp = hp->scratch;
1388         bp->saddr = saddr;
1389         bp->daddr = daddr;
1390         bp->pad = 0;
1391         bp->protocol = IPPROTO_TCP;
1392         bp->len = cpu_to_be16(nbytes);
1393
1394         _th = (struct tcphdr *)(bp + 1);
1395         memcpy(_th, th, sizeof(*th));
1396         _th->check = 0;
1397
1398         sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
1399         ahash_request_set_crypt(hp->md5_req, &sg, NULL,
1400                                 sizeof(*bp) + sizeof(*th));
1401         return crypto_ahash_update(hp->md5_req);
1402 }
1403
1404 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1405                                __be32 daddr, __be32 saddr, const struct tcphdr *th)
1406 {
1407         struct tcp_md5sig_pool *hp;
1408         struct ahash_request *req;
1409
1410         hp = tcp_get_md5sig_pool();
1411         if (!hp)
1412                 goto clear_hash_noput;
1413         req = hp->md5_req;
1414
1415         if (crypto_ahash_init(req))
1416                 goto clear_hash;
1417         if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
1418                 goto clear_hash;
1419         if (tcp_md5_hash_key(hp, key))
1420                 goto clear_hash;
1421         ahash_request_set_crypt(req, NULL, md5_hash, 0);
1422         if (crypto_ahash_final(req))
1423                 goto clear_hash;
1424
1425         tcp_put_md5sig_pool();
1426         return 0;
1427
1428 clear_hash:
1429         tcp_put_md5sig_pool();
1430 clear_hash_noput:
1431         memset(md5_hash, 0, 16);
1432         return 1;
1433 }
1434
1435 int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1436                         const struct sock *sk,
1437                         const struct sk_buff *skb)
1438 {
1439         struct tcp_md5sig_pool *hp;
1440         struct ahash_request *req;
1441         const struct tcphdr *th = tcp_hdr(skb);
1442         __be32 saddr, daddr;
1443
1444         if (sk) { /* valid for establish/request sockets */
1445                 saddr = sk->sk_rcv_saddr;
1446                 daddr = sk->sk_daddr;
1447         } else {
1448                 const struct iphdr *iph = ip_hdr(skb);
1449                 saddr = iph->saddr;
1450                 daddr = iph->daddr;
1451         }
1452
1453         hp = tcp_get_md5sig_pool();
1454         if (!hp)
1455                 goto clear_hash_noput;
1456         req = hp->md5_req;
1457
1458         if (crypto_ahash_init(req))
1459                 goto clear_hash;
1460
1461         if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, skb->len))
1462                 goto clear_hash;
1463         if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1464                 goto clear_hash;
1465         if (tcp_md5_hash_key(hp, key))
1466                 goto clear_hash;
1467         ahash_request_set_crypt(req, NULL, md5_hash, 0);
1468         if (crypto_ahash_final(req))
1469                 goto clear_hash;
1470
1471         tcp_put_md5sig_pool();
1472         return 0;
1473
1474 clear_hash:
1475         tcp_put_md5sig_pool();
1476 clear_hash_noput:
1477         memset(md5_hash, 0, 16);
1478         return 1;
1479 }
1480 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1481
1482 #endif
1483
1484 static void tcp_v4_init_req(struct request_sock *req,
1485                             const struct sock *sk_listener,
1486                             struct sk_buff *skb)
1487 {
1488         struct inet_request_sock *ireq = inet_rsk(req);
1489         struct net *net = sock_net(sk_listener);
1490
1491         sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
1492         sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
1493         RCU_INIT_POINTER(ireq->ireq_opt, tcp_v4_save_options(net, skb));
1494 }
1495
1496 static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
1497                                           struct sk_buff *skb,
1498                                           struct flowi *fl,
1499                                           struct request_sock *req)
1500 {
1501         tcp_v4_init_req(req, sk, skb);
1502
1503         if (security_inet_conn_request(sk, skb, req))
1504                 return NULL;
1505
1506         return inet_csk_route_req(sk, &fl->u.ip4, req);
1507 }
1508
1509 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1510         .family         =       PF_INET,
1511         .obj_size       =       sizeof(struct tcp_request_sock),
1512         .rtx_syn_ack    =       tcp_rtx_synack,
1513         .send_ack       =       tcp_v4_reqsk_send_ack,
1514         .destructor     =       tcp_v4_reqsk_destructor,
1515         .send_reset     =       tcp_v4_send_reset,
1516         .syn_ack_timeout =      tcp_syn_ack_timeout,
1517 };
1518
1519 const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1520         .mss_clamp      =       TCP_MSS_DEFAULT,
1521 #ifdef CONFIG_TCP_MD5SIG
1522         .req_md5_lookup =       tcp_v4_md5_lookup,
1523         .calc_md5_hash  =       tcp_v4_md5_hash_skb,
1524 #endif
1525 #ifdef CONFIG_SYN_COOKIES
1526         .cookie_init_seq =      cookie_v4_init_sequence,
1527 #endif
1528         .route_req      =       tcp_v4_route_req,
1529         .init_seq       =       tcp_v4_init_seq,
1530         .init_ts_off    =       tcp_v4_init_ts_off,
1531         .send_synack    =       tcp_v4_send_synack,
1532 };
1533
1534 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1535 {
1536         /* Never answer to SYNs send to broadcast or multicast */
1537         if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1538                 goto drop;
1539
1540         return tcp_conn_request(&tcp_request_sock_ops,
1541                                 &tcp_request_sock_ipv4_ops, sk, skb);
1542
1543 drop:
1544         tcp_listendrop(sk);
1545         return 0;
1546 }
1547 EXPORT_SYMBOL(tcp_v4_conn_request);
1548
1549
1550 /*
1551  * The three way handshake has completed - we got a valid synack -
1552  * now create the new socket.
1553  */
1554 struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1555                                   struct request_sock *req,
1556                                   struct dst_entry *dst,
1557                                   struct request_sock *req_unhash,
1558                                   bool *own_req)
1559 {
1560         struct inet_request_sock *ireq;
1561         bool found_dup_sk = false;
1562         struct inet_sock *newinet;
1563         struct tcp_sock *newtp;
1564         struct sock *newsk;
1565 #ifdef CONFIG_TCP_MD5SIG
1566         const union tcp_md5_addr *addr;
1567         struct tcp_md5sig_key *key;
1568         int l3index;
1569 #endif
1570         struct ip_options_rcu *inet_opt;
1571
1572         if (sk_acceptq_is_full(sk))
1573                 goto exit_overflow;
1574
1575         newsk = tcp_create_openreq_child(sk, req, skb);
1576         if (!newsk)
1577                 goto exit_nonewsk;
1578
1579         newsk->sk_gso_type = SKB_GSO_TCPV4;
1580         inet_sk_rx_dst_set(newsk, skb);
1581
1582         newtp                 = tcp_sk(newsk);
1583         newinet               = inet_sk(newsk);
1584         ireq                  = inet_rsk(req);
1585         sk_daddr_set(newsk, ireq->ir_rmt_addr);
1586         sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
1587         newsk->sk_bound_dev_if = ireq->ir_iif;
1588         newinet->inet_saddr   = ireq->ir_loc_addr;
1589         inet_opt              = rcu_dereference(ireq->ireq_opt);
1590         RCU_INIT_POINTER(newinet->inet_opt, inet_opt);
1591         newinet->mc_index     = inet_iif(skb);
1592         newinet->mc_ttl       = ip_hdr(skb)->ttl;
1593         newinet->rcv_tos      = ip_hdr(skb)->tos;
1594         inet_csk(newsk)->icsk_ext_hdr_len = 0;
1595         if (inet_opt)
1596                 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1597         newinet->inet_id = get_random_u16();
1598
1599         /* Set ToS of the new socket based upon the value of incoming SYN.
1600          * ECT bits are set later in tcp_init_transfer().
1601          */
1602         if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reflect_tos))
1603                 newinet->tos = tcp_rsk(req)->syn_tos & ~INET_ECN_MASK;
1604
1605         if (!dst) {
1606                 dst = inet_csk_route_child_sock(sk, newsk, req);
1607                 if (!dst)
1608                         goto put_and_exit;
1609         } else {
1610                 /* syncookie case : see end of cookie_v4_check() */
1611         }
1612         sk_setup_caps(newsk, dst);
1613
1614         tcp_ca_openreq_child(newsk, dst);
1615
1616         tcp_sync_mss(newsk, dst_mtu(dst));
1617         newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
1618
1619         tcp_initialize_rcv_mss(newsk);
1620
1621 #ifdef CONFIG_TCP_MD5SIG
1622         l3index = l3mdev_master_ifindex_by_index(sock_net(sk), ireq->ir_iif);
1623         /* Copy over the MD5 key from the original socket */
1624         addr = (union tcp_md5_addr *)&newinet->inet_daddr;
1625         key = tcp_md5_do_lookup(sk, l3index, addr, AF_INET);
1626         if (key) {
1627                 if (tcp_md5_key_copy(newsk, addr, AF_INET, 32, l3index, key))
1628                         goto put_and_exit;
1629                 sk_gso_disable(newsk);
1630         }
1631 #endif
1632
1633         if (__inet_inherit_port(sk, newsk) < 0)
1634                 goto put_and_exit;
1635         *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash),
1636                                        &found_dup_sk);
1637         if (likely(*own_req)) {
1638                 tcp_move_syn(newtp, req);
1639                 ireq->ireq_opt = NULL;
1640         } else {
1641                 newinet->inet_opt = NULL;
1642
1643                 if (!req_unhash && found_dup_sk) {
1644                         /* This code path should only be executed in the
1645                          * syncookie case only
1646                          */
1647                         bh_unlock_sock(newsk);
1648                         sock_put(newsk);
1649                         newsk = NULL;
1650                 }
1651         }
1652         return newsk;
1653
1654 exit_overflow:
1655         NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1656 exit_nonewsk:
1657         dst_release(dst);
1658 exit:
1659         tcp_listendrop(sk);
1660         return NULL;
1661 put_and_exit:
1662         newinet->inet_opt = NULL;
1663         inet_csk_prepare_forced_close(newsk);
1664         tcp_done(newsk);
1665         goto exit;
1666 }
1667 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1668
1669 static struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb)
1670 {
1671 #ifdef CONFIG_SYN_COOKIES
1672         const struct tcphdr *th = tcp_hdr(skb);
1673
1674         if (!th->syn)
1675                 sk = cookie_v4_check(sk, skb);
1676 #endif
1677         return sk;
1678 }
1679
1680 u16 tcp_v4_get_syncookie(struct sock *sk, struct iphdr *iph,
1681                          struct tcphdr *th, u32 *cookie)
1682 {
1683         u16 mss = 0;
1684 #ifdef CONFIG_SYN_COOKIES
1685         mss = tcp_get_syncookie_mss(&tcp_request_sock_ops,
1686                                     &tcp_request_sock_ipv4_ops, sk, th);
1687         if (mss) {
1688                 *cookie = __cookie_v4_init_sequence(iph, th, &mss);
1689                 tcp_synq_overflow(sk);
1690         }
1691 #endif
1692         return mss;
1693 }
1694
1695 INDIRECT_CALLABLE_DECLARE(struct dst_entry *ipv4_dst_check(struct dst_entry *,
1696                                                            u32));
1697 /* The socket must have it's spinlock held when we get
1698  * here, unless it is a TCP_LISTEN socket.
1699  *
1700  * We have a potential double-lock case here, so even when
1701  * doing backlog processing we use the BH locking scheme.
1702  * This is because we cannot sleep with the original spinlock
1703  * held.
1704  */
1705 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1706 {
1707         enum skb_drop_reason reason;
1708         struct sock *rsk;
1709
1710         if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1711                 struct dst_entry *dst;
1712
1713                 dst = rcu_dereference_protected(sk->sk_rx_dst,
1714                                                 lockdep_sock_is_held(sk));
1715
1716                 sock_rps_save_rxhash(sk, skb);
1717                 sk_mark_napi_id(sk, skb);
1718                 if (dst) {
1719                         if (sk->sk_rx_dst_ifindex != skb->skb_iif ||
1720                             !INDIRECT_CALL_1(dst->ops->check, ipv4_dst_check,
1721                                              dst, 0)) {
1722                                 RCU_INIT_POINTER(sk->sk_rx_dst, NULL);
1723                                 dst_release(dst);
1724                         }
1725                 }
1726                 tcp_rcv_established(sk, skb);
1727                 return 0;
1728         }
1729
1730         reason = SKB_DROP_REASON_NOT_SPECIFIED;
1731         if (tcp_checksum_complete(skb))
1732                 goto csum_err;
1733
1734         if (sk->sk_state == TCP_LISTEN) {
1735                 struct sock *nsk = tcp_v4_cookie_check(sk, skb);
1736
1737                 if (!nsk)
1738                         goto discard;
1739                 if (nsk != sk) {
1740                         if (tcp_child_process(sk, nsk, skb)) {
1741                                 rsk = nsk;
1742                                 goto reset;
1743                         }
1744                         return 0;
1745                 }
1746         } else
1747                 sock_rps_save_rxhash(sk, skb);
1748
1749         if (tcp_rcv_state_process(sk, skb)) {
1750                 rsk = sk;
1751                 goto reset;
1752         }
1753         return 0;
1754
1755 reset:
1756         tcp_v4_send_reset(rsk, skb);
1757 discard:
1758         kfree_skb_reason(skb, reason);
1759         /* Be careful here. If this function gets more complicated and
1760          * gcc suffers from register pressure on the x86, sk (in %ebx)
1761          * might be destroyed here. This current version compiles correctly,
1762          * but you have been warned.
1763          */
1764         return 0;
1765
1766 csum_err:
1767         reason = SKB_DROP_REASON_TCP_CSUM;
1768         trace_tcp_bad_csum(skb);
1769         TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1770         TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1771         goto discard;
1772 }
1773 EXPORT_SYMBOL(tcp_v4_do_rcv);
1774
1775 int tcp_v4_early_demux(struct sk_buff *skb)
1776 {
1777         struct net *net = dev_net(skb->dev);
1778         const struct iphdr *iph;
1779         const struct tcphdr *th;
1780         struct sock *sk;
1781
1782         if (skb->pkt_type != PACKET_HOST)
1783                 return 0;
1784
1785         if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1786                 return 0;
1787
1788         iph = ip_hdr(skb);
1789         th = tcp_hdr(skb);
1790
1791         if (th->doff < sizeof(struct tcphdr) / 4)
1792                 return 0;
1793
1794         sk = __inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
1795                                        iph->saddr, th->source,
1796                                        iph->daddr, ntohs(th->dest),
1797                                        skb->skb_iif, inet_sdif(skb));
1798         if (sk) {
1799                 skb->sk = sk;
1800                 skb->destructor = sock_edemux;
1801                 if (sk_fullsock(sk)) {
1802                         struct dst_entry *dst = rcu_dereference(sk->sk_rx_dst);
1803
1804                         if (dst)
1805                                 dst = dst_check(dst, 0);
1806                         if (dst &&
1807                             sk->sk_rx_dst_ifindex == skb->skb_iif)
1808                                 skb_dst_set_noref(skb, dst);
1809                 }
1810         }
1811         return 0;
1812 }
1813
1814 bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb,
1815                      enum skb_drop_reason *reason)
1816 {
1817         u32 limit, tail_gso_size, tail_gso_segs;
1818         struct skb_shared_info *shinfo;
1819         const struct tcphdr *th;
1820         struct tcphdr *thtail;
1821         struct sk_buff *tail;
1822         unsigned int hdrlen;
1823         bool fragstolen;
1824         u32 gso_segs;
1825         u32 gso_size;
1826         int delta;
1827
1828         /* In case all data was pulled from skb frags (in __pskb_pull_tail()),
1829          * we can fix skb->truesize to its real value to avoid future drops.
1830          * This is valid because skb is not yet charged to the socket.
1831          * It has been noticed pure SACK packets were sometimes dropped
1832          * (if cooked by drivers without copybreak feature).
1833          */
1834         skb_condense(skb);
1835
1836         skb_dst_drop(skb);
1837
1838         if (unlikely(tcp_checksum_complete(skb))) {
1839                 bh_unlock_sock(sk);
1840                 trace_tcp_bad_csum(skb);
1841                 *reason = SKB_DROP_REASON_TCP_CSUM;
1842                 __TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1843                 __TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1844                 return true;
1845         }
1846
1847         /* Attempt coalescing to last skb in backlog, even if we are
1848          * above the limits.
1849          * This is okay because skb capacity is limited to MAX_SKB_FRAGS.
1850          */
1851         th = (const struct tcphdr *)skb->data;
1852         hdrlen = th->doff * 4;
1853
1854         tail = sk->sk_backlog.tail;
1855         if (!tail)
1856                 goto no_coalesce;
1857         thtail = (struct tcphdr *)tail->data;
1858
1859         if (TCP_SKB_CB(tail)->end_seq != TCP_SKB_CB(skb)->seq ||
1860             TCP_SKB_CB(tail)->ip_dsfield != TCP_SKB_CB(skb)->ip_dsfield ||
1861             ((TCP_SKB_CB(tail)->tcp_flags |
1862               TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_SYN | TCPHDR_RST | TCPHDR_URG)) ||
1863             !((TCP_SKB_CB(tail)->tcp_flags &
1864               TCP_SKB_CB(skb)->tcp_flags) & TCPHDR_ACK) ||
1865             ((TCP_SKB_CB(tail)->tcp_flags ^
1866               TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_ECE | TCPHDR_CWR)) ||
1867 #ifdef CONFIG_TLS_DEVICE
1868             tail->decrypted != skb->decrypted ||
1869 #endif
1870             thtail->doff != th->doff ||
1871             memcmp(thtail + 1, th + 1, hdrlen - sizeof(*th)))
1872                 goto no_coalesce;
1873
1874         __skb_pull(skb, hdrlen);
1875
1876         shinfo = skb_shinfo(skb);
1877         gso_size = shinfo->gso_size ?: skb->len;
1878         gso_segs = shinfo->gso_segs ?: 1;
1879
1880         shinfo = skb_shinfo(tail);
1881         tail_gso_size = shinfo->gso_size ?: (tail->len - hdrlen);
1882         tail_gso_segs = shinfo->gso_segs ?: 1;
1883
1884         if (skb_try_coalesce(tail, skb, &fragstolen, &delta)) {
1885                 TCP_SKB_CB(tail)->end_seq = TCP_SKB_CB(skb)->end_seq;
1886
1887                 if (likely(!before(TCP_SKB_CB(skb)->ack_seq, TCP_SKB_CB(tail)->ack_seq))) {
1888                         TCP_SKB_CB(tail)->ack_seq = TCP_SKB_CB(skb)->ack_seq;
1889                         thtail->window = th->window;
1890                 }
1891
1892                 /* We have to update both TCP_SKB_CB(tail)->tcp_flags and
1893                  * thtail->fin, so that the fast path in tcp_rcv_established()
1894                  * is not entered if we append a packet with a FIN.
1895                  * SYN, RST, URG are not present.
1896                  * ACK is set on both packets.
1897                  * PSH : we do not really care in TCP stack,
1898                  *       at least for 'GRO' packets.
1899                  */
1900                 thtail->fin |= th->fin;
1901                 TCP_SKB_CB(tail)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
1902
1903                 if (TCP_SKB_CB(skb)->has_rxtstamp) {
1904                         TCP_SKB_CB(tail)->has_rxtstamp = true;
1905                         tail->tstamp = skb->tstamp;
1906                         skb_hwtstamps(tail)->hwtstamp = skb_hwtstamps(skb)->hwtstamp;
1907                 }
1908
1909                 /* Not as strict as GRO. We only need to carry mss max value */
1910                 shinfo->gso_size = max(gso_size, tail_gso_size);
1911                 shinfo->gso_segs = min_t(u32, gso_segs + tail_gso_segs, 0xFFFF);
1912
1913                 sk->sk_backlog.len += delta;
1914                 __NET_INC_STATS(sock_net(sk),
1915                                 LINUX_MIB_TCPBACKLOGCOALESCE);
1916                 kfree_skb_partial(skb, fragstolen);
1917                 return false;
1918         }
1919         __skb_push(skb, hdrlen);
1920
1921 no_coalesce:
1922         limit = (u32)READ_ONCE(sk->sk_rcvbuf) + (u32)(READ_ONCE(sk->sk_sndbuf) >> 1);
1923
1924         /* Only socket owner can try to collapse/prune rx queues
1925          * to reduce memory overhead, so add a little headroom here.
1926          * Few sockets backlog are possibly concurrently non empty.
1927          */
1928         limit += 64 * 1024;
1929
1930         if (unlikely(sk_add_backlog(sk, skb, limit))) {
1931                 bh_unlock_sock(sk);
1932                 *reason = SKB_DROP_REASON_SOCKET_BACKLOG;
1933                 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPBACKLOGDROP);
1934                 return true;
1935         }
1936         return false;
1937 }
1938 EXPORT_SYMBOL(tcp_add_backlog);
1939
1940 int tcp_filter(struct sock *sk, struct sk_buff *skb)
1941 {
1942         struct tcphdr *th = (struct tcphdr *)skb->data;
1943
1944         return sk_filter_trim_cap(sk, skb, th->doff * 4);
1945 }
1946 EXPORT_SYMBOL(tcp_filter);
1947
1948 static void tcp_v4_restore_cb(struct sk_buff *skb)
1949 {
1950         memmove(IPCB(skb), &TCP_SKB_CB(skb)->header.h4,
1951                 sizeof(struct inet_skb_parm));
1952 }
1953
1954 static void tcp_v4_fill_cb(struct sk_buff *skb, const struct iphdr *iph,
1955                            const struct tcphdr *th)
1956 {
1957         /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1958          * barrier() makes sure compiler wont play fool^Waliasing games.
1959          */
1960         memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
1961                 sizeof(struct inet_skb_parm));
1962         barrier();
1963
1964         TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1965         TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1966                                     skb->len - th->doff * 4);
1967         TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1968         TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1969         TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1970         TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1971         TCP_SKB_CB(skb)->sacked  = 0;
1972         TCP_SKB_CB(skb)->has_rxtstamp =
1973                         skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
1974 }
1975
1976 /*
1977  *      From tcp_input.c
1978  */
1979
1980 int tcp_v4_rcv(struct sk_buff *skb)
1981 {
1982         struct net *net = dev_net(skb->dev);
1983         enum skb_drop_reason drop_reason;
1984         int sdif = inet_sdif(skb);
1985         int dif = inet_iif(skb);
1986         const struct iphdr *iph;
1987         const struct tcphdr *th;
1988         bool refcounted;
1989         struct sock *sk;
1990         int ret;
1991
1992         drop_reason = SKB_DROP_REASON_NOT_SPECIFIED;
1993         if (skb->pkt_type != PACKET_HOST)
1994                 goto discard_it;
1995
1996         /* Count it even if it's bad */
1997         __TCP_INC_STATS(net, TCP_MIB_INSEGS);
1998
1999         if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
2000                 goto discard_it;
2001
2002         th = (const struct tcphdr *)skb->data;
2003
2004         if (unlikely(th->doff < sizeof(struct tcphdr) / 4)) {
2005                 drop_reason = SKB_DROP_REASON_PKT_TOO_SMALL;
2006                 goto bad_packet;
2007         }
2008         if (!pskb_may_pull(skb, th->doff * 4))
2009                 goto discard_it;
2010
2011         /* An explanation is required here, I think.
2012          * Packet length and doff are validated by header prediction,
2013          * provided case of th->doff==0 is eliminated.
2014          * So, we defer the checks. */
2015
2016         if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
2017                 goto csum_error;
2018
2019         th = (const struct tcphdr *)skb->data;
2020         iph = ip_hdr(skb);
2021 lookup:
2022         sk = __inet_lookup_skb(net->ipv4.tcp_death_row.hashinfo,
2023                                skb, __tcp_hdrlen(th), th->source,
2024                                th->dest, sdif, &refcounted);
2025         if (!sk)
2026                 goto no_tcp_socket;
2027
2028 process:
2029         if (sk->sk_state == TCP_TIME_WAIT)
2030                 goto do_time_wait;
2031
2032         if (sk->sk_state == TCP_NEW_SYN_RECV) {
2033                 struct request_sock *req = inet_reqsk(sk);
2034                 bool req_stolen = false;
2035                 struct sock *nsk;
2036
2037                 sk = req->rsk_listener;
2038                 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
2039                         drop_reason = SKB_DROP_REASON_XFRM_POLICY;
2040                 else
2041                         drop_reason = tcp_inbound_md5_hash(sk, skb,
2042                                                    &iph->saddr, &iph->daddr,
2043                                                    AF_INET, dif, sdif);
2044                 if (unlikely(drop_reason)) {
2045                         sk_drops_add(sk, skb);
2046                         reqsk_put(req);
2047                         goto discard_it;
2048                 }
2049                 if (tcp_checksum_complete(skb)) {
2050                         reqsk_put(req);
2051                         goto csum_error;
2052                 }
2053                 if (unlikely(sk->sk_state != TCP_LISTEN)) {
2054                         nsk = reuseport_migrate_sock(sk, req_to_sk(req), skb);
2055                         if (!nsk) {
2056                                 inet_csk_reqsk_queue_drop_and_put(sk, req);
2057                                 goto lookup;
2058                         }
2059                         sk = nsk;
2060                         /* reuseport_migrate_sock() has already held one sk_refcnt
2061                          * before returning.
2062                          */
2063                 } else {
2064                         /* We own a reference on the listener, increase it again
2065                          * as we might lose it too soon.
2066                          */
2067                         sock_hold(sk);
2068                 }
2069                 refcounted = true;
2070                 nsk = NULL;
2071                 if (!tcp_filter(sk, skb)) {
2072                         th = (const struct tcphdr *)skb->data;
2073                         iph = ip_hdr(skb);
2074                         tcp_v4_fill_cb(skb, iph, th);
2075                         nsk = tcp_check_req(sk, skb, req, false, &req_stolen);
2076                 } else {
2077                         drop_reason = SKB_DROP_REASON_SOCKET_FILTER;
2078                 }
2079                 if (!nsk) {
2080                         reqsk_put(req);
2081                         if (req_stolen) {
2082                                 /* Another cpu got exclusive access to req
2083                                  * and created a full blown socket.
2084                                  * Try to feed this packet to this socket
2085                                  * instead of discarding it.
2086                                  */
2087                                 tcp_v4_restore_cb(skb);
2088                                 sock_put(sk);
2089                                 goto lookup;
2090                         }
2091                         goto discard_and_relse;
2092                 }
2093                 nf_reset_ct(skb);
2094                 if (nsk == sk) {
2095                         reqsk_put(req);
2096                         tcp_v4_restore_cb(skb);
2097                 } else if (tcp_child_process(sk, nsk, skb)) {
2098                         tcp_v4_send_reset(nsk, skb);
2099                         goto discard_and_relse;
2100                 } else {
2101                         sock_put(sk);
2102                         return 0;
2103                 }
2104         }
2105
2106         if (static_branch_unlikely(&ip4_min_ttl)) {
2107                 /* min_ttl can be changed concurrently from do_ip_setsockopt() */
2108                 if (unlikely(iph->ttl < READ_ONCE(inet_sk(sk)->min_ttl))) {
2109                         __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
2110                         drop_reason = SKB_DROP_REASON_TCP_MINTTL;
2111                         goto discard_and_relse;
2112                 }
2113         }
2114
2115         if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
2116                 drop_reason = SKB_DROP_REASON_XFRM_POLICY;
2117                 goto discard_and_relse;
2118         }
2119
2120         drop_reason = tcp_inbound_md5_hash(sk, skb, &iph->saddr,
2121                                            &iph->daddr, AF_INET, dif, sdif);
2122         if (drop_reason)
2123                 goto discard_and_relse;
2124
2125         nf_reset_ct(skb);
2126
2127         if (tcp_filter(sk, skb)) {
2128                 drop_reason = SKB_DROP_REASON_SOCKET_FILTER;
2129                 goto discard_and_relse;
2130         }
2131         th = (const struct tcphdr *)skb->data;
2132         iph = ip_hdr(skb);
2133         tcp_v4_fill_cb(skb, iph, th);
2134
2135         skb->dev = NULL;
2136
2137         if (sk->sk_state == TCP_LISTEN) {
2138                 ret = tcp_v4_do_rcv(sk, skb);
2139                 goto put_and_return;
2140         }
2141
2142         sk_incoming_cpu_update(sk);
2143
2144         bh_lock_sock_nested(sk);
2145         tcp_segs_in(tcp_sk(sk), skb);
2146         ret = 0;
2147         if (!sock_owned_by_user(sk)) {
2148                 ret = tcp_v4_do_rcv(sk, skb);
2149         } else {
2150                 if (tcp_add_backlog(sk, skb, &drop_reason))
2151                         goto discard_and_relse;
2152         }
2153         bh_unlock_sock(sk);
2154
2155 put_and_return:
2156         if (refcounted)
2157                 sock_put(sk);
2158
2159         return ret;
2160
2161 no_tcp_socket:
2162         drop_reason = SKB_DROP_REASON_NO_SOCKET;
2163         if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
2164                 goto discard_it;
2165
2166         tcp_v4_fill_cb(skb, iph, th);
2167
2168         if (tcp_checksum_complete(skb)) {
2169 csum_error:
2170                 drop_reason = SKB_DROP_REASON_TCP_CSUM;
2171                 trace_tcp_bad_csum(skb);
2172                 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
2173 bad_packet:
2174                 __TCP_INC_STATS(net, TCP_MIB_INERRS);
2175         } else {
2176                 tcp_v4_send_reset(NULL, skb);
2177         }
2178
2179 discard_it:
2180         SKB_DR_OR(drop_reason, NOT_SPECIFIED);
2181         /* Discard frame. */
2182         kfree_skb_reason(skb, drop_reason);
2183         return 0;
2184
2185 discard_and_relse:
2186         sk_drops_add(sk, skb);
2187         if (refcounted)
2188                 sock_put(sk);
2189         goto discard_it;
2190
2191 do_time_wait:
2192         if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
2193                 drop_reason = SKB_DROP_REASON_XFRM_POLICY;
2194                 inet_twsk_put(inet_twsk(sk));
2195                 goto discard_it;
2196         }
2197
2198         tcp_v4_fill_cb(skb, iph, th);
2199
2200         if (tcp_checksum_complete(skb)) {
2201                 inet_twsk_put(inet_twsk(sk));
2202                 goto csum_error;
2203         }
2204         switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
2205         case TCP_TW_SYN: {
2206                 struct sock *sk2 = inet_lookup_listener(net,
2207                                                         net->ipv4.tcp_death_row.hashinfo,
2208                                                         skb, __tcp_hdrlen(th),
2209                                                         iph->saddr, th->source,
2210                                                         iph->daddr, th->dest,
2211                                                         inet_iif(skb),
2212                                                         sdif);
2213                 if (sk2) {
2214                         inet_twsk_deschedule_put(inet_twsk(sk));
2215                         sk = sk2;
2216                         tcp_v4_restore_cb(skb);
2217                         refcounted = false;
2218                         goto process;
2219                 }
2220         }
2221                 /* to ACK */
2222                 fallthrough;
2223         case TCP_TW_ACK:
2224                 tcp_v4_timewait_ack(sk, skb);
2225                 break;
2226         case TCP_TW_RST:
2227                 tcp_v4_send_reset(sk, skb);
2228                 inet_twsk_deschedule_put(inet_twsk(sk));
2229                 goto discard_it;
2230         case TCP_TW_SUCCESS:;
2231         }
2232         goto discard_it;
2233 }
2234
2235 static struct timewait_sock_ops tcp_timewait_sock_ops = {
2236         .twsk_obj_size  = sizeof(struct tcp_timewait_sock),
2237         .twsk_unique    = tcp_twsk_unique,
2238         .twsk_destructor= tcp_twsk_destructor,
2239 };
2240
2241 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
2242 {
2243         struct dst_entry *dst = skb_dst(skb);
2244
2245         if (dst && dst_hold_safe(dst)) {
2246                 rcu_assign_pointer(sk->sk_rx_dst, dst);
2247                 sk->sk_rx_dst_ifindex = skb->skb_iif;
2248         }
2249 }
2250 EXPORT_SYMBOL(inet_sk_rx_dst_set);
2251
2252 const struct inet_connection_sock_af_ops ipv4_specific = {
2253         .queue_xmit        = ip_queue_xmit,
2254         .send_check        = tcp_v4_send_check,
2255         .rebuild_header    = inet_sk_rebuild_header,
2256         .sk_rx_dst_set     = inet_sk_rx_dst_set,
2257         .conn_request      = tcp_v4_conn_request,
2258         .syn_recv_sock     = tcp_v4_syn_recv_sock,
2259         .net_header_len    = sizeof(struct iphdr),
2260         .setsockopt        = ip_setsockopt,
2261         .getsockopt        = ip_getsockopt,
2262         .addr2sockaddr     = inet_csk_addr2sockaddr,
2263         .sockaddr_len      = sizeof(struct sockaddr_in),
2264         .mtu_reduced       = tcp_v4_mtu_reduced,
2265 };
2266 EXPORT_SYMBOL(ipv4_specific);
2267
2268 #ifdef CONFIG_TCP_MD5SIG
2269 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
2270         .md5_lookup             = tcp_v4_md5_lookup,
2271         .calc_md5_hash          = tcp_v4_md5_hash_skb,
2272         .md5_parse              = tcp_v4_parse_md5_keys,
2273 };
2274 #endif
2275
2276 /* NOTE: A lot of things set to zero explicitly by call to
2277  *       sk_alloc() so need not be done here.
2278  */
2279 static int tcp_v4_init_sock(struct sock *sk)
2280 {
2281         struct inet_connection_sock *icsk = inet_csk(sk);
2282
2283         tcp_init_sock(sk);
2284
2285         icsk->icsk_af_ops = &ipv4_specific;
2286
2287 #ifdef CONFIG_TCP_MD5SIG
2288         tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
2289 #endif
2290
2291         return 0;
2292 }
2293
2294 void tcp_v4_destroy_sock(struct sock *sk)
2295 {
2296         struct tcp_sock *tp = tcp_sk(sk);
2297
2298         trace_tcp_destroy_sock(sk);
2299
2300         tcp_clear_xmit_timers(sk);
2301
2302         tcp_cleanup_congestion_control(sk);
2303
2304         tcp_cleanup_ulp(sk);
2305
2306         /* Cleanup up the write buffer. */
2307         tcp_write_queue_purge(sk);
2308
2309         /* Check if we want to disable active TFO */
2310         tcp_fastopen_active_disable_ofo_check(sk);
2311
2312         /* Cleans up our, hopefully empty, out_of_order_queue. */
2313         skb_rbtree_purge(&tp->out_of_order_queue);
2314
2315 #ifdef CONFIG_TCP_MD5SIG
2316         /* Clean up the MD5 key list, if any */
2317         if (tp->md5sig_info) {
2318                 tcp_clear_md5_list(sk);
2319                 kfree_rcu(rcu_dereference_protected(tp->md5sig_info, 1), rcu);
2320                 tp->md5sig_info = NULL;
2321                 static_branch_slow_dec_deferred(&tcp_md5_needed);
2322         }
2323 #endif
2324
2325         /* Clean up a referenced TCP bind bucket. */
2326         if (inet_csk(sk)->icsk_bind_hash)
2327                 inet_put_port(sk);
2328
2329         BUG_ON(rcu_access_pointer(tp->fastopen_rsk));
2330
2331         /* If socket is aborted during connect operation */
2332         tcp_free_fastopen_req(tp);
2333         tcp_fastopen_destroy_cipher(sk);
2334         tcp_saved_syn_free(tp);
2335
2336         sk_sockets_allocated_dec(sk);
2337 }
2338 EXPORT_SYMBOL(tcp_v4_destroy_sock);
2339
2340 #ifdef CONFIG_PROC_FS
2341 /* Proc filesystem TCP sock list dumping. */
2342
2343 static unsigned short seq_file_family(const struct seq_file *seq);
2344
2345 static bool seq_sk_match(struct seq_file *seq, const struct sock *sk)
2346 {
2347         unsigned short family = seq_file_family(seq);
2348
2349         /* AF_UNSPEC is used as a match all */
2350         return ((family == AF_UNSPEC || family == sk->sk_family) &&
2351                 net_eq(sock_net(sk), seq_file_net(seq)));
2352 }
2353
2354 /* Find a non empty bucket (starting from st->bucket)
2355  * and return the first sk from it.
2356  */
2357 static void *listening_get_first(struct seq_file *seq)
2358 {
2359         struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo;
2360         struct tcp_iter_state *st = seq->private;
2361
2362         st->offset = 0;
2363         for (; st->bucket <= hinfo->lhash2_mask; st->bucket++) {
2364                 struct inet_listen_hashbucket *ilb2;
2365                 struct hlist_nulls_node *node;
2366                 struct sock *sk;
2367
2368                 ilb2 = &hinfo->lhash2[st->bucket];
2369                 if (hlist_nulls_empty(&ilb2->nulls_head))
2370                         continue;
2371
2372                 spin_lock(&ilb2->lock);
2373                 sk_nulls_for_each(sk, node, &ilb2->nulls_head) {
2374                         if (seq_sk_match(seq, sk))
2375                                 return sk;
2376                 }
2377                 spin_unlock(&ilb2->lock);
2378         }
2379
2380         return NULL;
2381 }
2382
2383 /* Find the next sk of "cur" within the same bucket (i.e. st->bucket).
2384  * If "cur" is the last one in the st->bucket,
2385  * call listening_get_first() to return the first sk of the next
2386  * non empty bucket.
2387  */
2388 static void *listening_get_next(struct seq_file *seq, void *cur)
2389 {
2390         struct tcp_iter_state *st = seq->private;
2391         struct inet_listen_hashbucket *ilb2;
2392         struct hlist_nulls_node *node;
2393         struct inet_hashinfo *hinfo;
2394         struct sock *sk = cur;
2395
2396         ++st->num;
2397         ++st->offset;
2398
2399         sk = sk_nulls_next(sk);
2400         sk_nulls_for_each_from(sk, node) {
2401                 if (seq_sk_match(seq, sk))
2402                         return sk;
2403         }
2404
2405         hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo;
2406         ilb2 = &hinfo->lhash2[st->bucket];
2407         spin_unlock(&ilb2->lock);
2408         ++st->bucket;
2409         return listening_get_first(seq);
2410 }
2411
2412 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2413 {
2414         struct tcp_iter_state *st = seq->private;
2415         void *rc;
2416
2417         st->bucket = 0;
2418         st->offset = 0;
2419         rc = listening_get_first(seq);
2420
2421         while (rc && *pos) {
2422                 rc = listening_get_next(seq, rc);
2423                 --*pos;
2424         }
2425         return rc;
2426 }
2427
2428 static inline bool empty_bucket(struct inet_hashinfo *hinfo,
2429                                 const struct tcp_iter_state *st)
2430 {
2431         return hlist_nulls_empty(&hinfo->ehash[st->bucket].chain);
2432 }
2433
2434 /*
2435  * Get first established socket starting from bucket given in st->bucket.
2436  * If st->bucket is zero, the very first socket in the hash is returned.
2437  */
2438 static void *established_get_first(struct seq_file *seq)
2439 {
2440         struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo;
2441         struct tcp_iter_state *st = seq->private;
2442
2443         st->offset = 0;
2444         for (; st->bucket <= hinfo->ehash_mask; ++st->bucket) {
2445                 struct sock *sk;
2446                 struct hlist_nulls_node *node;
2447                 spinlock_t *lock = inet_ehash_lockp(hinfo, st->bucket);
2448
2449                 /* Lockless fast path for the common case of empty buckets */
2450                 if (empty_bucket(hinfo, st))
2451                         continue;
2452
2453                 spin_lock_bh(lock);
2454                 sk_nulls_for_each(sk, node, &hinfo->ehash[st->bucket].chain) {
2455                         if (seq_sk_match(seq, sk))
2456                                 return sk;
2457                 }
2458                 spin_unlock_bh(lock);
2459         }
2460
2461         return NULL;
2462 }
2463
2464 static void *established_get_next(struct seq_file *seq, void *cur)
2465 {
2466         struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo;
2467         struct tcp_iter_state *st = seq->private;
2468         struct hlist_nulls_node *node;
2469         struct sock *sk = cur;
2470
2471         ++st->num;
2472         ++st->offset;
2473
2474         sk = sk_nulls_next(sk);
2475
2476         sk_nulls_for_each_from(sk, node) {
2477                 if (seq_sk_match(seq, sk))
2478                         return sk;
2479         }
2480
2481         spin_unlock_bh(inet_ehash_lockp(hinfo, st->bucket));
2482         ++st->bucket;
2483         return established_get_first(seq);
2484 }
2485
2486 static void *established_get_idx(struct seq_file *seq, loff_t pos)
2487 {
2488         struct tcp_iter_state *st = seq->private;
2489         void *rc;
2490
2491         st->bucket = 0;
2492         rc = established_get_first(seq);
2493
2494         while (rc && pos) {
2495                 rc = established_get_next(seq, rc);
2496                 --pos;
2497         }
2498         return rc;
2499 }
2500
2501 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2502 {
2503         void *rc;
2504         struct tcp_iter_state *st = seq->private;
2505
2506         st->state = TCP_SEQ_STATE_LISTENING;
2507         rc        = listening_get_idx(seq, &pos);
2508
2509         if (!rc) {
2510                 st->state = TCP_SEQ_STATE_ESTABLISHED;
2511                 rc        = established_get_idx(seq, pos);
2512         }
2513
2514         return rc;
2515 }
2516
2517 static void *tcp_seek_last_pos(struct seq_file *seq)
2518 {
2519         struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo;
2520         struct tcp_iter_state *st = seq->private;
2521         int bucket = st->bucket;
2522         int offset = st->offset;
2523         int orig_num = st->num;
2524         void *rc = NULL;
2525
2526         switch (st->state) {
2527         case TCP_SEQ_STATE_LISTENING:
2528                 if (st->bucket > hinfo->lhash2_mask)
2529                         break;
2530                 rc = listening_get_first(seq);
2531                 while (offset-- && rc && bucket == st->bucket)
2532                         rc = listening_get_next(seq, rc);
2533                 if (rc)
2534                         break;
2535                 st->bucket = 0;
2536                 st->state = TCP_SEQ_STATE_ESTABLISHED;
2537                 fallthrough;
2538         case TCP_SEQ_STATE_ESTABLISHED:
2539                 if (st->bucket > hinfo->ehash_mask)
2540                         break;
2541                 rc = established_get_first(seq);
2542                 while (offset-- && rc && bucket == st->bucket)
2543                         rc = established_get_next(seq, rc);
2544         }
2545
2546         st->num = orig_num;
2547
2548         return rc;
2549 }
2550
2551 void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2552 {
2553         struct tcp_iter_state *st = seq->private;
2554         void *rc;
2555
2556         if (*pos && *pos == st->last_pos) {
2557                 rc = tcp_seek_last_pos(seq);
2558                 if (rc)
2559                         goto out;
2560         }
2561
2562         st->state = TCP_SEQ_STATE_LISTENING;
2563         st->num = 0;
2564         st->bucket = 0;
2565         st->offset = 0;
2566         rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2567
2568 out:
2569         st->last_pos = *pos;
2570         return rc;
2571 }
2572 EXPORT_SYMBOL(tcp_seq_start);
2573
2574 void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2575 {
2576         struct tcp_iter_state *st = seq->private;
2577         void *rc = NULL;
2578
2579         if (v == SEQ_START_TOKEN) {
2580                 rc = tcp_get_idx(seq, 0);
2581                 goto out;
2582         }
2583
2584         switch (st->state) {
2585         case TCP_SEQ_STATE_LISTENING:
2586                 rc = listening_get_next(seq, v);
2587                 if (!rc) {
2588                         st->state = TCP_SEQ_STATE_ESTABLISHED;
2589                         st->bucket = 0;
2590                         st->offset = 0;
2591                         rc        = established_get_first(seq);
2592                 }
2593                 break;
2594         case TCP_SEQ_STATE_ESTABLISHED:
2595                 rc = established_get_next(seq, v);
2596                 break;
2597         }
2598 out:
2599         ++*pos;
2600         st->last_pos = *pos;
2601         return rc;
2602 }
2603 EXPORT_SYMBOL(tcp_seq_next);
2604
2605 void tcp_seq_stop(struct seq_file *seq, void *v)
2606 {
2607         struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo;
2608         struct tcp_iter_state *st = seq->private;
2609
2610         switch (st->state) {
2611         case TCP_SEQ_STATE_LISTENING:
2612                 if (v != SEQ_START_TOKEN)
2613                         spin_unlock(&hinfo->lhash2[st->bucket].lock);
2614                 break;
2615         case TCP_SEQ_STATE_ESTABLISHED:
2616                 if (v)
2617                         spin_unlock_bh(inet_ehash_lockp(hinfo, st->bucket));
2618                 break;
2619         }
2620 }
2621 EXPORT_SYMBOL(tcp_seq_stop);
2622
2623 static void get_openreq4(const struct request_sock *req,
2624                          struct seq_file *f, int i)
2625 {
2626         const struct inet_request_sock *ireq = inet_rsk(req);
2627         long delta = req->rsk_timer.expires - jiffies;
2628
2629         seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2630                 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
2631                 i,
2632                 ireq->ir_loc_addr,
2633                 ireq->ir_num,
2634                 ireq->ir_rmt_addr,
2635                 ntohs(ireq->ir_rmt_port),
2636                 TCP_SYN_RECV,
2637                 0, 0, /* could print option size, but that is af dependent. */
2638                 1,    /* timers active (only the expire timer) */
2639                 jiffies_delta_to_clock_t(delta),
2640                 req->num_timeout,
2641                 from_kuid_munged(seq_user_ns(f),
2642                                  sock_i_uid(req->rsk_listener)),
2643                 0,  /* non standard timer */
2644                 0, /* open_requests have no inode */
2645                 0,
2646                 req);
2647 }
2648
2649 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
2650 {
2651         int timer_active;
2652         unsigned long timer_expires;
2653         const struct tcp_sock *tp = tcp_sk(sk);
2654         const struct inet_connection_sock *icsk = inet_csk(sk);
2655         const struct inet_sock *inet = inet_sk(sk);
2656         const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
2657         __be32 dest = inet->inet_daddr;
2658         __be32 src = inet->inet_rcv_saddr;
2659         __u16 destp = ntohs(inet->inet_dport);
2660         __u16 srcp = ntohs(inet->inet_sport);
2661         int rx_queue;
2662         int state;
2663
2664         if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2665             icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
2666             icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
2667                 timer_active    = 1;
2668                 timer_expires   = icsk->icsk_timeout;
2669         } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2670                 timer_active    = 4;
2671                 timer_expires   = icsk->icsk_timeout;
2672         } else if (timer_pending(&sk->sk_timer)) {
2673                 timer_active    = 2;
2674                 timer_expires   = sk->sk_timer.expires;
2675         } else {
2676                 timer_active    = 0;
2677                 timer_expires = jiffies;
2678         }
2679
2680         state = inet_sk_state_load(sk);
2681         if (state == TCP_LISTEN)
2682                 rx_queue = READ_ONCE(sk->sk_ack_backlog);
2683         else
2684                 /* Because we don't lock the socket,
2685                  * we might find a transient negative value.
2686                  */
2687                 rx_queue = max_t(int, READ_ONCE(tp->rcv_nxt) -
2688                                       READ_ONCE(tp->copied_seq), 0);
2689
2690         seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2691                         "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
2692                 i, src, srcp, dest, destp, state,
2693                 READ_ONCE(tp->write_seq) - tp->snd_una,
2694                 rx_queue,
2695                 timer_active,
2696                 jiffies_delta_to_clock_t(timer_expires - jiffies),
2697                 icsk->icsk_retransmits,
2698                 from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2699                 icsk->icsk_probes_out,
2700                 sock_i_ino(sk),
2701                 refcount_read(&sk->sk_refcnt), sk,
2702                 jiffies_to_clock_t(icsk->icsk_rto),
2703                 jiffies_to_clock_t(icsk->icsk_ack.ato),
2704                 (icsk->icsk_ack.quick << 1) | inet_csk_in_pingpong_mode(sk),
2705                 tcp_snd_cwnd(tp),
2706                 state == TCP_LISTEN ?
2707                     fastopenq->max_qlen :
2708                     (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
2709 }
2710
2711 static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2712                                struct seq_file *f, int i)
2713 {
2714         long delta = tw->tw_timer.expires - jiffies;
2715         __be32 dest, src;
2716         __u16 destp, srcp;
2717
2718         dest  = tw->tw_daddr;
2719         src   = tw->tw_rcv_saddr;
2720         destp = ntohs(tw->tw_dport);
2721         srcp  = ntohs(tw->tw_sport);
2722
2723         seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2724                 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
2725                 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2726                 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2727                 refcount_read(&tw->tw_refcnt), tw);
2728 }
2729
2730 #define TMPSZ 150
2731
2732 static int tcp4_seq_show(struct seq_file *seq, void *v)
2733 {
2734         struct tcp_iter_state *st;
2735         struct sock *sk = v;
2736
2737         seq_setwidth(seq, TMPSZ - 1);
2738         if (v == SEQ_START_TOKEN) {
2739                 seq_puts(seq, "  sl  local_address rem_address   st tx_queue "
2740                            "rx_queue tr tm->when retrnsmt   uid  timeout "
2741                            "inode");
2742                 goto out;
2743         }
2744         st = seq->private;
2745
2746         if (sk->sk_state == TCP_TIME_WAIT)
2747                 get_timewait4_sock(v, seq, st->num);
2748         else if (sk->sk_state == TCP_NEW_SYN_RECV)
2749                 get_openreq4(v, seq, st->num);
2750         else
2751                 get_tcp4_sock(v, seq, st->num);
2752 out:
2753         seq_pad(seq, '\n');
2754         return 0;
2755 }
2756
2757 #ifdef CONFIG_BPF_SYSCALL
2758 struct bpf_tcp_iter_state {
2759         struct tcp_iter_state state;
2760         unsigned int cur_sk;
2761         unsigned int end_sk;
2762         unsigned int max_sk;
2763         struct sock **batch;
2764         bool st_bucket_done;
2765 };
2766
2767 struct bpf_iter__tcp {
2768         __bpf_md_ptr(struct bpf_iter_meta *, meta);
2769         __bpf_md_ptr(struct sock_common *, sk_common);
2770         uid_t uid __aligned(8);
2771 };
2772
2773 static int tcp_prog_seq_show(struct bpf_prog *prog, struct bpf_iter_meta *meta,
2774                              struct sock_common *sk_common, uid_t uid)
2775 {
2776         struct bpf_iter__tcp ctx;
2777
2778         meta->seq_num--;  /* skip SEQ_START_TOKEN */
2779         ctx.meta = meta;
2780         ctx.sk_common = sk_common;
2781         ctx.uid = uid;
2782         return bpf_iter_run_prog(prog, &ctx);
2783 }
2784
2785 static void bpf_iter_tcp_put_batch(struct bpf_tcp_iter_state *iter)
2786 {
2787         while (iter->cur_sk < iter->end_sk)
2788                 sock_gen_put(iter->batch[iter->cur_sk++]);
2789 }
2790
2791 static int bpf_iter_tcp_realloc_batch(struct bpf_tcp_iter_state *iter,
2792                                       unsigned int new_batch_sz)
2793 {
2794         struct sock **new_batch;
2795
2796         new_batch = kvmalloc(sizeof(*new_batch) * new_batch_sz,
2797                              GFP_USER | __GFP_NOWARN);
2798         if (!new_batch)
2799                 return -ENOMEM;
2800
2801         bpf_iter_tcp_put_batch(iter);
2802         kvfree(iter->batch);
2803         iter->batch = new_batch;
2804         iter->max_sk = new_batch_sz;
2805
2806         return 0;
2807 }
2808
2809 static unsigned int bpf_iter_tcp_listening_batch(struct seq_file *seq,
2810                                                  struct sock *start_sk)
2811 {
2812         struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo;
2813         struct bpf_tcp_iter_state *iter = seq->private;
2814         struct tcp_iter_state *st = &iter->state;
2815         struct hlist_nulls_node *node;
2816         unsigned int expected = 1;
2817         struct sock *sk;
2818
2819         sock_hold(start_sk);
2820         iter->batch[iter->end_sk++] = start_sk;
2821
2822         sk = sk_nulls_next(start_sk);
2823         sk_nulls_for_each_from(sk, node) {
2824                 if (seq_sk_match(seq, sk)) {
2825                         if (iter->end_sk < iter->max_sk) {
2826                                 sock_hold(sk);
2827                                 iter->batch[iter->end_sk++] = sk;
2828                         }
2829                         expected++;
2830                 }
2831         }
2832         spin_unlock(&hinfo->lhash2[st->bucket].lock);
2833
2834         return expected;
2835 }
2836
2837 static unsigned int bpf_iter_tcp_established_batch(struct seq_file *seq,
2838                                                    struct sock *start_sk)
2839 {
2840         struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo;
2841         struct bpf_tcp_iter_state *iter = seq->private;
2842         struct tcp_iter_state *st = &iter->state;
2843         struct hlist_nulls_node *node;
2844         unsigned int expected = 1;
2845         struct sock *sk;
2846
2847         sock_hold(start_sk);
2848         iter->batch[iter->end_sk++] = start_sk;
2849
2850         sk = sk_nulls_next(start_sk);
2851         sk_nulls_for_each_from(sk, node) {
2852                 if (seq_sk_match(seq, sk)) {
2853                         if (iter->end_sk < iter->max_sk) {
2854                                 sock_hold(sk);
2855                                 iter->batch[iter->end_sk++] = sk;
2856                         }
2857                         expected++;
2858                 }
2859         }
2860         spin_unlock_bh(inet_ehash_lockp(hinfo, st->bucket));
2861
2862         return expected;
2863 }
2864
2865 static struct sock *bpf_iter_tcp_batch(struct seq_file *seq)
2866 {
2867         struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo;
2868         struct bpf_tcp_iter_state *iter = seq->private;
2869         struct tcp_iter_state *st = &iter->state;
2870         unsigned int expected;
2871         bool resized = false;
2872         struct sock *sk;
2873
2874         /* The st->bucket is done.  Directly advance to the next
2875          * bucket instead of having the tcp_seek_last_pos() to skip
2876          * one by one in the current bucket and eventually find out
2877          * it has to advance to the next bucket.
2878          */
2879         if (iter->st_bucket_done) {
2880                 st->offset = 0;
2881                 st->bucket++;
2882                 if (st->state == TCP_SEQ_STATE_LISTENING &&
2883                     st->bucket > hinfo->lhash2_mask) {
2884                         st->state = TCP_SEQ_STATE_ESTABLISHED;
2885                         st->bucket = 0;
2886                 }
2887         }
2888
2889 again:
2890         /* Get a new batch */
2891         iter->cur_sk = 0;
2892         iter->end_sk = 0;
2893         iter->st_bucket_done = false;
2894
2895         sk = tcp_seek_last_pos(seq);
2896         if (!sk)
2897                 return NULL; /* Done */
2898
2899         if (st->state == TCP_SEQ_STATE_LISTENING)
2900                 expected = bpf_iter_tcp_listening_batch(seq, sk);
2901         else
2902                 expected = bpf_iter_tcp_established_batch(seq, sk);
2903
2904         if (iter->end_sk == expected) {
2905                 iter->st_bucket_done = true;
2906                 return sk;
2907         }
2908
2909         if (!resized && !bpf_iter_tcp_realloc_batch(iter, expected * 3 / 2)) {
2910                 resized = true;
2911                 goto again;
2912         }
2913
2914         return sk;
2915 }
2916
2917 static void *bpf_iter_tcp_seq_start(struct seq_file *seq, loff_t *pos)
2918 {
2919         /* bpf iter does not support lseek, so it always
2920          * continue from where it was stop()-ped.
2921          */
2922         if (*pos)
2923                 return bpf_iter_tcp_batch(seq);
2924
2925         return SEQ_START_TOKEN;
2926 }
2927
2928 static void *bpf_iter_tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2929 {
2930         struct bpf_tcp_iter_state *iter = seq->private;
2931         struct tcp_iter_state *st = &iter->state;
2932         struct sock *sk;
2933
2934         /* Whenever seq_next() is called, the iter->cur_sk is
2935          * done with seq_show(), so advance to the next sk in
2936          * the batch.
2937          */
2938         if (iter->cur_sk < iter->end_sk) {
2939                 /* Keeping st->num consistent in tcp_iter_state.
2940                  * bpf_iter_tcp does not use st->num.
2941                  * meta.seq_num is used instead.
2942                  */
2943                 st->num++;
2944                 /* Move st->offset to the next sk in the bucket such that
2945                  * the future start() will resume at st->offset in
2946                  * st->bucket.  See tcp_seek_last_pos().
2947                  */
2948                 st->offset++;
2949                 sock_gen_put(iter->batch[iter->cur_sk++]);
2950         }
2951
2952         if (iter->cur_sk < iter->end_sk)
2953                 sk = iter->batch[iter->cur_sk];
2954         else
2955                 sk = bpf_iter_tcp_batch(seq);
2956
2957         ++*pos;
2958         /* Keeping st->last_pos consistent in tcp_iter_state.
2959          * bpf iter does not do lseek, so st->last_pos always equals to *pos.
2960          */
2961         st->last_pos = *pos;
2962         return sk;
2963 }
2964
2965 static int bpf_iter_tcp_seq_show(struct seq_file *seq, void *v)
2966 {
2967         struct bpf_iter_meta meta;
2968         struct bpf_prog *prog;
2969         struct sock *sk = v;
2970         uid_t uid;
2971         int ret;
2972
2973         if (v == SEQ_START_TOKEN)
2974                 return 0;
2975
2976         if (sk_fullsock(sk))
2977                 lock_sock(sk);
2978
2979         if (unlikely(sk_unhashed(sk))) {
2980                 ret = SEQ_SKIP;
2981                 goto unlock;
2982         }
2983
2984         if (sk->sk_state == TCP_TIME_WAIT) {
2985                 uid = 0;
2986         } else if (sk->sk_state == TCP_NEW_SYN_RECV) {
2987                 const struct request_sock *req = v;
2988
2989                 uid = from_kuid_munged(seq_user_ns(seq),
2990                                        sock_i_uid(req->rsk_listener));
2991         } else {
2992                 uid = from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk));
2993         }
2994
2995         meta.seq = seq;
2996         prog = bpf_iter_get_info(&meta, false);
2997         ret = tcp_prog_seq_show(prog, &meta, v, uid);
2998
2999 unlock:
3000         if (sk_fullsock(sk))
3001                 release_sock(sk);
3002         return ret;
3003
3004 }
3005
3006 static void bpf_iter_tcp_seq_stop(struct seq_file *seq, void *v)
3007 {
3008         struct bpf_tcp_iter_state *iter = seq->private;
3009         struct bpf_iter_meta meta;
3010         struct bpf_prog *prog;
3011
3012         if (!v) {
3013                 meta.seq = seq;
3014                 prog = bpf_iter_get_info(&meta, true);
3015                 if (prog)
3016                         (void)tcp_prog_seq_show(prog, &meta, v, 0);
3017         }
3018
3019         if (iter->cur_sk < iter->end_sk) {
3020                 bpf_iter_tcp_put_batch(iter);
3021                 iter->st_bucket_done = false;
3022         }
3023 }
3024
3025 static const struct seq_operations bpf_iter_tcp_seq_ops = {
3026         .show           = bpf_iter_tcp_seq_show,
3027         .start          = bpf_iter_tcp_seq_start,
3028         .next           = bpf_iter_tcp_seq_next,
3029         .stop           = bpf_iter_tcp_seq_stop,
3030 };
3031 #endif
3032 static unsigned short seq_file_family(const struct seq_file *seq)
3033 {
3034         const struct tcp_seq_afinfo *afinfo;
3035
3036 #ifdef CONFIG_BPF_SYSCALL
3037         /* Iterated from bpf_iter.  Let the bpf prog to filter instead. */
3038         if (seq->op == &bpf_iter_tcp_seq_ops)
3039                 return AF_UNSPEC;
3040 #endif
3041
3042         /* Iterated from proc fs */
3043         afinfo = pde_data(file_inode(seq->file));
3044         return afinfo->family;
3045 }
3046
3047 static const struct seq_operations tcp4_seq_ops = {
3048         .show           = tcp4_seq_show,
3049         .start          = tcp_seq_start,
3050         .next           = tcp_seq_next,
3051         .stop           = tcp_seq_stop,
3052 };
3053
3054 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
3055         .family         = AF_INET,
3056 };
3057
3058 static int __net_init tcp4_proc_init_net(struct net *net)
3059 {
3060         if (!proc_create_net_data("tcp", 0444, net->proc_net, &tcp4_seq_ops,
3061                         sizeof(struct tcp_iter_state), &tcp4_seq_afinfo))
3062                 return -ENOMEM;
3063         return 0;
3064 }
3065
3066 static void __net_exit tcp4_proc_exit_net(struct net *net)
3067 {
3068         remove_proc_entry("tcp", net->proc_net);
3069 }
3070
3071 static struct pernet_operations tcp4_net_ops = {
3072         .init = tcp4_proc_init_net,
3073         .exit = tcp4_proc_exit_net,
3074 };
3075
3076 int __init tcp4_proc_init(void)
3077 {
3078         return register_pernet_subsys(&tcp4_net_ops);
3079 }
3080
3081 void tcp4_proc_exit(void)
3082 {
3083         unregister_pernet_subsys(&tcp4_net_ops);
3084 }
3085 #endif /* CONFIG_PROC_FS */
3086
3087 /* @wake is one when sk_stream_write_space() calls us.
3088  * This sends EPOLLOUT only if notsent_bytes is half the limit.
3089  * This mimics the strategy used in sock_def_write_space().
3090  */
3091 bool tcp_stream_memory_free(const struct sock *sk, int wake)
3092 {
3093         const struct tcp_sock *tp = tcp_sk(sk);
3094         u32 notsent_bytes = READ_ONCE(tp->write_seq) -
3095                             READ_ONCE(tp->snd_nxt);
3096
3097         return (notsent_bytes << wake) < tcp_notsent_lowat(tp);
3098 }
3099 EXPORT_SYMBOL(tcp_stream_memory_free);
3100
3101 struct proto tcp_prot = {
3102         .name                   = "TCP",
3103         .owner                  = THIS_MODULE,
3104         .close                  = tcp_close,
3105         .pre_connect            = tcp_v4_pre_connect,
3106         .connect                = tcp_v4_connect,
3107         .disconnect             = tcp_disconnect,
3108         .accept                 = inet_csk_accept,
3109         .ioctl                  = tcp_ioctl,
3110         .init                   = tcp_v4_init_sock,
3111         .destroy                = tcp_v4_destroy_sock,
3112         .shutdown               = tcp_shutdown,
3113         .setsockopt             = tcp_setsockopt,
3114         .getsockopt             = tcp_getsockopt,
3115         .bpf_bypass_getsockopt  = tcp_bpf_bypass_getsockopt,
3116         .keepalive              = tcp_set_keepalive,
3117         .recvmsg                = tcp_recvmsg,
3118         .sendmsg                = tcp_sendmsg,
3119         .splice_eof             = tcp_splice_eof,
3120         .backlog_rcv            = tcp_v4_do_rcv,
3121         .release_cb             = tcp_release_cb,
3122         .hash                   = inet_hash,
3123         .unhash                 = inet_unhash,
3124         .get_port               = inet_csk_get_port,
3125         .put_port               = inet_put_port,
3126 #ifdef CONFIG_BPF_SYSCALL
3127         .psock_update_sk_prot   = tcp_bpf_update_proto,
3128 #endif
3129         .enter_memory_pressure  = tcp_enter_memory_pressure,
3130         .leave_memory_pressure  = tcp_leave_memory_pressure,
3131         .stream_memory_free     = tcp_stream_memory_free,
3132         .sockets_allocated      = &tcp_sockets_allocated,
3133         .orphan_count           = &tcp_orphan_count,
3134
3135         .memory_allocated       = &tcp_memory_allocated,
3136         .per_cpu_fw_alloc       = &tcp_memory_per_cpu_fw_alloc,
3137
3138         .memory_pressure        = &tcp_memory_pressure,
3139         .sysctl_mem             = sysctl_tcp_mem,
3140         .sysctl_wmem_offset     = offsetof(struct net, ipv4.sysctl_tcp_wmem),
3141         .sysctl_rmem_offset     = offsetof(struct net, ipv4.sysctl_tcp_rmem),
3142         .max_header             = MAX_TCP_HEADER,
3143         .obj_size               = sizeof(struct tcp_sock),
3144         .slab_flags             = SLAB_TYPESAFE_BY_RCU,
3145         .twsk_prot              = &tcp_timewait_sock_ops,
3146         .rsk_prot               = &tcp_request_sock_ops,
3147         .h.hashinfo             = NULL,
3148         .no_autobind            = true,
3149         .diag_destroy           = tcp_abort,
3150 };
3151 EXPORT_SYMBOL(tcp_prot);
3152
3153 static void __net_exit tcp_sk_exit(struct net *net)
3154 {
3155         if (net->ipv4.tcp_congestion_control)
3156                 bpf_module_put(net->ipv4.tcp_congestion_control,
3157                                net->ipv4.tcp_congestion_control->owner);
3158 }
3159
3160 static void __net_init tcp_set_hashinfo(struct net *net)
3161 {
3162         struct inet_hashinfo *hinfo;
3163         unsigned int ehash_entries;
3164         struct net *old_net;
3165
3166         if (net_eq(net, &init_net))
3167                 goto fallback;
3168
3169         old_net = current->nsproxy->net_ns;
3170         ehash_entries = READ_ONCE(old_net->ipv4.sysctl_tcp_child_ehash_entries);
3171         if (!ehash_entries)
3172                 goto fallback;
3173
3174         ehash_entries = roundup_pow_of_two(ehash_entries);
3175         hinfo = inet_pernet_hashinfo_alloc(&tcp_hashinfo, ehash_entries);
3176         if (!hinfo) {
3177                 pr_warn("Failed to allocate TCP ehash (entries: %u) "
3178                         "for a netns, fallback to the global one\n",
3179                         ehash_entries);
3180 fallback:
3181                 hinfo = &tcp_hashinfo;
3182                 ehash_entries = tcp_hashinfo.ehash_mask + 1;
3183         }
3184
3185         net->ipv4.tcp_death_row.hashinfo = hinfo;
3186         net->ipv4.tcp_death_row.sysctl_max_tw_buckets = ehash_entries / 2;
3187         net->ipv4.sysctl_max_syn_backlog = max(128U, ehash_entries / 128);
3188 }
3189
3190 static int __net_init tcp_sk_init(struct net *net)
3191 {
3192         net->ipv4.sysctl_tcp_ecn = 2;
3193         net->ipv4.sysctl_tcp_ecn_fallback = 1;
3194
3195         net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
3196         net->ipv4.sysctl_tcp_min_snd_mss = TCP_MIN_SND_MSS;
3197         net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
3198         net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
3199         net->ipv4.sysctl_tcp_mtu_probe_floor = TCP_MIN_SND_MSS;
3200
3201         net->ipv4.sysctl_tcp_keepalive_time = TCP_KEEPALIVE_TIME;
3202         net->ipv4.sysctl_tcp_keepalive_probes = TCP_KEEPALIVE_PROBES;
3203         net->ipv4.sysctl_tcp_keepalive_intvl = TCP_KEEPALIVE_INTVL;
3204
3205         net->ipv4.sysctl_tcp_syn_retries = TCP_SYN_RETRIES;
3206         net->ipv4.sysctl_tcp_synack_retries = TCP_SYNACK_RETRIES;
3207         net->ipv4.sysctl_tcp_syncookies = 1;
3208         net->ipv4.sysctl_tcp_reordering = TCP_FASTRETRANS_THRESH;
3209         net->ipv4.sysctl_tcp_retries1 = TCP_RETR1;
3210         net->ipv4.sysctl_tcp_retries2 = TCP_RETR2;
3211         net->ipv4.sysctl_tcp_orphan_retries = 0;
3212         net->ipv4.sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
3213         net->ipv4.sysctl_tcp_notsent_lowat = UINT_MAX;
3214         net->ipv4.sysctl_tcp_tw_reuse = 2;
3215         net->ipv4.sysctl_tcp_no_ssthresh_metrics_save = 1;
3216
3217         refcount_set(&net->ipv4.tcp_death_row.tw_refcount, 1);
3218         tcp_set_hashinfo(net);
3219
3220         net->ipv4.sysctl_tcp_sack = 1;
3221         net->ipv4.sysctl_tcp_window_scaling = 1;
3222         net->ipv4.sysctl_tcp_timestamps = 1;
3223         net->ipv4.sysctl_tcp_early_retrans = 3;
3224         net->ipv4.sysctl_tcp_recovery = TCP_RACK_LOSS_DETECTION;
3225         net->ipv4.sysctl_tcp_slow_start_after_idle = 1; /* By default, RFC2861 behavior.  */
3226         net->ipv4.sysctl_tcp_retrans_collapse = 1;
3227         net->ipv4.sysctl_tcp_max_reordering = 300;
3228         net->ipv4.sysctl_tcp_dsack = 1;
3229         net->ipv4.sysctl_tcp_app_win = 31;
3230         net->ipv4.sysctl_tcp_adv_win_scale = 1;
3231         net->ipv4.sysctl_tcp_frto = 2;
3232         net->ipv4.sysctl_tcp_moderate_rcvbuf = 1;
3233         /* This limits the percentage of the congestion window which we
3234          * will allow a single TSO frame to consume.  Building TSO frames
3235          * which are too large can cause TCP streams to be bursty.
3236          */
3237         net->ipv4.sysctl_tcp_tso_win_divisor = 3;
3238         /* Default TSQ limit of 16 TSO segments */
3239         net->ipv4.sysctl_tcp_limit_output_bytes = 16 * 65536;
3240
3241         /* rfc5961 challenge ack rate limiting, per net-ns, disabled by default. */
3242         net->ipv4.sysctl_tcp_challenge_ack_limit = INT_MAX;
3243
3244         net->ipv4.sysctl_tcp_min_tso_segs = 2;
3245         net->ipv4.sysctl_tcp_tso_rtt_log = 9;  /* 2^9 = 512 usec */
3246         net->ipv4.sysctl_tcp_min_rtt_wlen = 300;
3247         net->ipv4.sysctl_tcp_autocorking = 1;
3248         net->ipv4.sysctl_tcp_invalid_ratelimit = HZ/2;
3249         net->ipv4.sysctl_tcp_pacing_ss_ratio = 200;
3250         net->ipv4.sysctl_tcp_pacing_ca_ratio = 120;
3251         if (net != &init_net) {
3252                 memcpy(net->ipv4.sysctl_tcp_rmem,
3253                        init_net.ipv4.sysctl_tcp_rmem,
3254                        sizeof(init_net.ipv4.sysctl_tcp_rmem));
3255                 memcpy(net->ipv4.sysctl_tcp_wmem,
3256                        init_net.ipv4.sysctl_tcp_wmem,
3257                        sizeof(init_net.ipv4.sysctl_tcp_wmem));
3258         }
3259         net->ipv4.sysctl_tcp_comp_sack_delay_ns = NSEC_PER_MSEC;
3260         net->ipv4.sysctl_tcp_comp_sack_slack_ns = 100 * NSEC_PER_USEC;
3261         net->ipv4.sysctl_tcp_comp_sack_nr = 44;
3262         net->ipv4.sysctl_tcp_fastopen = TFO_CLIENT_ENABLE;
3263         net->ipv4.sysctl_tcp_fastopen_blackhole_timeout = 0;
3264         atomic_set(&net->ipv4.tfo_active_disable_times, 0);
3265
3266         /* Set default values for PLB */
3267         net->ipv4.sysctl_tcp_plb_enabled = 0; /* Disabled by default */
3268         net->ipv4.sysctl_tcp_plb_idle_rehash_rounds = 3;
3269         net->ipv4.sysctl_tcp_plb_rehash_rounds = 12;
3270         net->ipv4.sysctl_tcp_plb_suspend_rto_sec = 60;
3271         /* Default congestion threshold for PLB to mark a round is 50% */
3272         net->ipv4.sysctl_tcp_plb_cong_thresh = (1 << TCP_PLB_SCALE) / 2;
3273
3274         /* Reno is always built in */
3275         if (!net_eq(net, &init_net) &&
3276             bpf_try_module_get(init_net.ipv4.tcp_congestion_control,
3277                                init_net.ipv4.tcp_congestion_control->owner))
3278                 net->ipv4.tcp_congestion_control = init_net.ipv4.tcp_congestion_control;
3279         else
3280                 net->ipv4.tcp_congestion_control = &tcp_reno;
3281
3282         net->ipv4.sysctl_tcp_syn_linear_timeouts = 4;
3283         net->ipv4.sysctl_tcp_shrink_window = 0;
3284
3285         return 0;
3286 }
3287
3288 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
3289 {
3290         struct net *net;
3291
3292         tcp_twsk_purge(net_exit_list, AF_INET);
3293
3294         list_for_each_entry(net, net_exit_list, exit_list) {
3295                 inet_pernet_hashinfo_free(net->ipv4.tcp_death_row.hashinfo);
3296                 WARN_ON_ONCE(!refcount_dec_and_test(&net->ipv4.tcp_death_row.tw_refcount));
3297                 tcp_fastopen_ctx_destroy(net);
3298         }
3299 }
3300
3301 static struct pernet_operations __net_initdata tcp_sk_ops = {
3302        .init       = tcp_sk_init,
3303        .exit       = tcp_sk_exit,
3304        .exit_batch = tcp_sk_exit_batch,
3305 };
3306
3307 #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
3308 DEFINE_BPF_ITER_FUNC(tcp, struct bpf_iter_meta *meta,
3309                      struct sock_common *sk_common, uid_t uid)
3310
3311 #define INIT_BATCH_SZ 16
3312
3313 static int bpf_iter_init_tcp(void *priv_data, struct bpf_iter_aux_info *aux)
3314 {
3315         struct bpf_tcp_iter_state *iter = priv_data;
3316         int err;
3317
3318         err = bpf_iter_init_seq_net(priv_data, aux);
3319         if (err)
3320                 return err;
3321
3322         err = bpf_iter_tcp_realloc_batch(iter, INIT_BATCH_SZ);
3323         if (err) {
3324                 bpf_iter_fini_seq_net(priv_data);
3325                 return err;
3326         }
3327
3328         return 0;
3329 }
3330
3331 static void bpf_iter_fini_tcp(void *priv_data)
3332 {
3333         struct bpf_tcp_iter_state *iter = priv_data;
3334
3335         bpf_iter_fini_seq_net(priv_data);
3336         kvfree(iter->batch);
3337 }
3338
3339 static const struct bpf_iter_seq_info tcp_seq_info = {
3340         .seq_ops                = &bpf_iter_tcp_seq_ops,
3341         .init_seq_private       = bpf_iter_init_tcp,
3342         .fini_seq_private       = bpf_iter_fini_tcp,
3343         .seq_priv_size          = sizeof(struct bpf_tcp_iter_state),
3344 };
3345
3346 static const struct bpf_func_proto *
3347 bpf_iter_tcp_get_func_proto(enum bpf_func_id func_id,
3348                             const struct bpf_prog *prog)
3349 {
3350         switch (func_id) {
3351         case BPF_FUNC_setsockopt:
3352                 return &bpf_sk_setsockopt_proto;
3353         case BPF_FUNC_getsockopt:
3354                 return &bpf_sk_getsockopt_proto;
3355         default:
3356                 return NULL;
3357         }
3358 }
3359
3360 static struct bpf_iter_reg tcp_reg_info = {
3361         .target                 = "tcp",
3362         .ctx_arg_info_size      = 1,
3363         .ctx_arg_info           = {
3364                 { offsetof(struct bpf_iter__tcp, sk_common),
3365                   PTR_TO_BTF_ID_OR_NULL | PTR_TRUSTED },
3366         },
3367         .get_func_proto         = bpf_iter_tcp_get_func_proto,
3368         .seq_info               = &tcp_seq_info,
3369 };
3370
3371 static void __init bpf_iter_register(void)
3372 {
3373         tcp_reg_info.ctx_arg_info[0].btf_id = btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON];
3374         if (bpf_iter_reg_target(&tcp_reg_info))
3375                 pr_warn("Warning: could not register bpf iterator tcp\n");
3376 }
3377
3378 #endif
3379
3380 void __init tcp_v4_init(void)
3381 {
3382         int cpu, res;
3383
3384         for_each_possible_cpu(cpu) {
3385                 struct sock *sk;
3386
3387                 res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
3388                                            IPPROTO_TCP, &init_net);
3389                 if (res)
3390                         panic("Failed to create the TCP control socket.\n");
3391                 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
3392
3393                 /* Please enforce IP_DF and IPID==0 for RST and
3394                  * ACK sent in SYN-RECV and TIME-WAIT state.
3395                  */
3396                 inet_sk(sk)->pmtudisc = IP_PMTUDISC_DO;
3397
3398                 per_cpu(ipv4_tcp_sk, cpu) = sk;
3399         }
3400         if (register_pernet_subsys(&tcp_sk_ops))
3401                 panic("Failed to create the TCP control socket.\n");
3402
3403 #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
3404         bpf_iter_register();
3405 #endif
3406 }