OSDN Git Service

4e90217003e83f67da99317f7a3aa6a6c2d99b3e
[uclinux-h8/linux.git] / net / ipv4 / tcp_ipv4.c
1 /*
2  * INET         An implementation of the TCP/IP protocol suite for the LINUX
3  *              operating system.  INET is implemented using the  BSD Socket
4  *              interface as the means of communication with the user level.
5  *
6  *              Implementation of the Transmission Control Protocol(TCP).
7  *
8  *              IPv4 specific functions
9  *
10  *
11  *              code split from:
12  *              linux/ipv4/tcp.c
13  *              linux/ipv4/tcp_input.c
14  *              linux/ipv4/tcp_output.c
15  *
16  *              See tcp.c for author information
17  *
18  *      This program is free software; you can redistribute it and/or
19  *      modify it under the terms of the GNU General Public License
20  *      as published by the Free Software Foundation; either version
21  *      2 of the License, or (at your option) any later version.
22  */
23
24 /*
25  * Changes:
26  *              David S. Miller :       New socket lookup architecture.
27  *                                      This code is dedicated to John Dyson.
28  *              David S. Miller :       Change semantics of established hash,
29  *                                      half is devoted to TIME_WAIT sockets
30  *                                      and the rest go in the other half.
31  *              Andi Kleen :            Add support for syncookies and fixed
32  *                                      some bugs: ip options weren't passed to
33  *                                      the TCP layer, missed a check for an
34  *                                      ACK bit.
35  *              Andi Kleen :            Implemented fast path mtu discovery.
36  *                                      Fixed many serious bugs in the
37  *                                      request_sock handling and moved
38  *                                      most of it into the af independent code.
39  *                                      Added tail drop and some other bugfixes.
40  *                                      Added new listen semantics.
41  *              Mike McLagan    :       Routing by source
42  *      Juan Jose Ciarlante:            ip_dynaddr bits
43  *              Andi Kleen:             various fixes.
44  *      Vitaly E. Lavrov        :       Transparent proxy revived after year
45  *                                      coma.
46  *      Andi Kleen              :       Fix new listen.
47  *      Andi Kleen              :       Fix accept error reporting.
48  *      YOSHIFUJI Hideaki @USAGI and:   Support IPV6_V6ONLY socket option, which
49  *      Alexey Kuznetsov                allow both IPv4 and IPv6 sockets to bind
50  *                                      a single port at the same time.
51  */
52
53 #define pr_fmt(fmt) "TCP: " fmt
54
55 #include <linux/bottom_half.h>
56 #include <linux/types.h>
57 #include <linux/fcntl.h>
58 #include <linux/module.h>
59 #include <linux/random.h>
60 #include <linux/cache.h>
61 #include <linux/jhash.h>
62 #include <linux/init.h>
63 #include <linux/times.h>
64 #include <linux/slab.h>
65
66 #include <net/net_namespace.h>
67 #include <net/icmp.h>
68 #include <net/inet_hashtables.h>
69 #include <net/tcp.h>
70 #include <net/transp_v6.h>
71 #include <net/ipv6.h>
72 #include <net/inet_common.h>
73 #include <net/timewait_sock.h>
74 #include <net/xfrm.h>
75 #include <net/secure_seq.h>
76 #include <net/tcp_memcontrol.h>
77 #include <net/busy_poll.h>
78
79 #include <linux/inet.h>
80 #include <linux/ipv6.h>
81 #include <linux/stddef.h>
82 #include <linux/proc_fs.h>
83 #include <linux/seq_file.h>
84
85 #include <linux/crypto.h>
86 #include <linux/scatterlist.h>
87
88 int sysctl_tcp_tw_reuse __read_mostly;
89 int sysctl_tcp_low_latency __read_mostly;
90 EXPORT_SYMBOL(sysctl_tcp_low_latency);
91
92 #ifdef CONFIG_TCP_MD5SIG
93 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
94                                __be32 daddr, __be32 saddr, const struct tcphdr *th);
95 #endif
96
97 struct inet_hashinfo tcp_hashinfo;
98 EXPORT_SYMBOL(tcp_hashinfo);
99
100 static  __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
101 {
102         return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
103                                           ip_hdr(skb)->saddr,
104                                           tcp_hdr(skb)->dest,
105                                           tcp_hdr(skb)->source);
106 }
107
108 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
109 {
110         const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
111         struct tcp_sock *tp = tcp_sk(sk);
112
113         /* With PAWS, it is safe from the viewpoint
114            of data integrity. Even without PAWS it is safe provided sequence
115            spaces do not overlap i.e. at data rates <= 80Mbit/sec.
116
117            Actually, the idea is close to VJ's one, only timestamp cache is
118            held not per host, but per port pair and TW bucket is used as state
119            holder.
120
121            If TW bucket has been already destroyed we fall back to VJ's scheme
122            and use initial timestamp retrieved from peer table.
123          */
124         if (tcptw->tw_ts_recent_stamp &&
125             (twp == NULL || (sysctl_tcp_tw_reuse &&
126                              get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
127                 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
128                 if (tp->write_seq == 0)
129                         tp->write_seq = 1;
130                 tp->rx_opt.ts_recent       = tcptw->tw_ts_recent;
131                 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
132                 sock_hold(sktw);
133                 return 1;
134         }
135
136         return 0;
137 }
138 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
139
140 /* This will initiate an outgoing connection. */
141 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
142 {
143         struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
144         struct inet_sock *inet = inet_sk(sk);
145         struct tcp_sock *tp = tcp_sk(sk);
146         __be16 orig_sport, orig_dport;
147         __be32 daddr, nexthop;
148         struct flowi4 *fl4;
149         struct rtable *rt;
150         int err;
151         struct ip_options_rcu *inet_opt;
152
153         if (addr_len < sizeof(struct sockaddr_in))
154                 return -EINVAL;
155
156         if (usin->sin_family != AF_INET)
157                 return -EAFNOSUPPORT;
158
159         nexthop = daddr = usin->sin_addr.s_addr;
160         inet_opt = rcu_dereference_protected(inet->inet_opt,
161                                              sock_owned_by_user(sk));
162         if (inet_opt && inet_opt->opt.srr) {
163                 if (!daddr)
164                         return -EINVAL;
165                 nexthop = inet_opt->opt.faddr;
166         }
167
168         orig_sport = inet->inet_sport;
169         orig_dport = usin->sin_port;
170         fl4 = &inet->cork.fl.u.ip4;
171         rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
172                               RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
173                               IPPROTO_TCP,
174                               orig_sport, orig_dport, sk);
175         if (IS_ERR(rt)) {
176                 err = PTR_ERR(rt);
177                 if (err == -ENETUNREACH)
178                         IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
179                 return err;
180         }
181
182         if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
183                 ip_rt_put(rt);
184                 return -ENETUNREACH;
185         }
186
187         if (!inet_opt || !inet_opt->opt.srr)
188                 daddr = fl4->daddr;
189
190         if (!inet->inet_saddr)
191                 inet->inet_saddr = fl4->saddr;
192         sk_rcv_saddr_set(sk, inet->inet_saddr);
193
194         if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
195                 /* Reset inherited state */
196                 tp->rx_opt.ts_recent       = 0;
197                 tp->rx_opt.ts_recent_stamp = 0;
198                 if (likely(!tp->repair))
199                         tp->write_seq      = 0;
200         }
201
202         if (tcp_death_row.sysctl_tw_recycle &&
203             !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr)
204                 tcp_fetch_timewait_stamp(sk, &rt->dst);
205
206         inet->inet_dport = usin->sin_port;
207         sk_daddr_set(sk, daddr);
208
209         inet_csk(sk)->icsk_ext_hdr_len = 0;
210         if (inet_opt)
211                 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
212
213         tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
214
215         /* Socket identity is still unknown (sport may be zero).
216          * However we set state to SYN-SENT and not releasing socket
217          * lock select source port, enter ourselves into the hash tables and
218          * complete initialization after this.
219          */
220         tcp_set_state(sk, TCP_SYN_SENT);
221         err = inet_hash_connect(&tcp_death_row, sk);
222         if (err)
223                 goto failure;
224
225         inet_set_txhash(sk);
226
227         rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
228                                inet->inet_sport, inet->inet_dport, sk);
229         if (IS_ERR(rt)) {
230                 err = PTR_ERR(rt);
231                 rt = NULL;
232                 goto failure;
233         }
234         /* OK, now commit destination to socket.  */
235         sk->sk_gso_type = SKB_GSO_TCPV4;
236         sk_setup_caps(sk, &rt->dst);
237
238         if (!tp->write_seq && likely(!tp->repair))
239                 tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
240                                                            inet->inet_daddr,
241                                                            inet->inet_sport,
242                                                            usin->sin_port);
243
244         inet->inet_id = tp->write_seq ^ jiffies;
245
246         err = tcp_connect(sk);
247
248         rt = NULL;
249         if (err)
250                 goto failure;
251
252         return 0;
253
254 failure:
255         /*
256          * This unhashes the socket and releases the local port,
257          * if necessary.
258          */
259         tcp_set_state(sk, TCP_CLOSE);
260         ip_rt_put(rt);
261         sk->sk_route_caps = 0;
262         inet->inet_dport = 0;
263         return err;
264 }
265 EXPORT_SYMBOL(tcp_v4_connect);
266
267 /*
268  * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
269  * It can be called through tcp_release_cb() if socket was owned by user
270  * at the time tcp_v4_err() was called to handle ICMP message.
271  */
272 void tcp_v4_mtu_reduced(struct sock *sk)
273 {
274         struct dst_entry *dst;
275         struct inet_sock *inet = inet_sk(sk);
276         u32 mtu = tcp_sk(sk)->mtu_info;
277
278         dst = inet_csk_update_pmtu(sk, mtu);
279         if (!dst)
280                 return;
281
282         /* Something is about to be wrong... Remember soft error
283          * for the case, if this connection will not able to recover.
284          */
285         if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
286                 sk->sk_err_soft = EMSGSIZE;
287
288         mtu = dst_mtu(dst);
289
290         if (inet->pmtudisc != IP_PMTUDISC_DONT &&
291             ip_sk_accept_pmtu(sk) &&
292             inet_csk(sk)->icsk_pmtu_cookie > mtu) {
293                 tcp_sync_mss(sk, mtu);
294
295                 /* Resend the TCP packet because it's
296                  * clear that the old packet has been
297                  * dropped. This is the new "fast" path mtu
298                  * discovery.
299                  */
300                 tcp_simple_retransmit(sk);
301         } /* else let the usual retransmit timer handle it */
302 }
303 EXPORT_SYMBOL(tcp_v4_mtu_reduced);
304
305 static void do_redirect(struct sk_buff *skb, struct sock *sk)
306 {
307         struct dst_entry *dst = __sk_dst_check(sk, 0);
308
309         if (dst)
310                 dst->ops->redirect(dst, sk, skb);
311 }
312
313
314 /* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
315 void tcp_req_err(struct sock *sk, u32 seq)
316 {
317         struct request_sock *req = inet_reqsk(sk);
318         struct net *net = sock_net(sk);
319
320         /* ICMPs are not backlogged, hence we cannot get
321          * an established socket here.
322          */
323         WARN_ON(req->sk);
324
325         if (seq != tcp_rsk(req)->snt_isn) {
326                 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
327                 reqsk_put(req);
328         } else {
329                 /*
330                  * Still in SYN_RECV, just remove it silently.
331                  * There is no good way to pass the error to the newly
332                  * created socket, and POSIX does not want network
333                  * errors returned from accept().
334                  */
335                 NET_INC_STATS_BH(net, LINUX_MIB_LISTENDROPS);
336                 inet_csk_reqsk_queue_drop(req->rsk_listener, req);
337         }
338 }
339 EXPORT_SYMBOL(tcp_req_err);
340
341 /*
342  * This routine is called by the ICMP module when it gets some
343  * sort of error condition.  If err < 0 then the socket should
344  * be closed and the error returned to the user.  If err > 0
345  * it's just the icmp type << 8 | icmp code.  After adjustment
346  * header points to the first 8 bytes of the tcp header.  We need
347  * to find the appropriate port.
348  *
349  * The locking strategy used here is very "optimistic". When
350  * someone else accesses the socket the ICMP is just dropped
351  * and for some paths there is no check at all.
352  * A more general error queue to queue errors for later handling
353  * is probably better.
354  *
355  */
356
357 void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
358 {
359         const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
360         struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
361         struct inet_connection_sock *icsk;
362         struct tcp_sock *tp;
363         struct inet_sock *inet;
364         const int type = icmp_hdr(icmp_skb)->type;
365         const int code = icmp_hdr(icmp_skb)->code;
366         struct sock *sk;
367         struct sk_buff *skb;
368         struct request_sock *fastopen;
369         __u32 seq, snd_una;
370         __u32 remaining;
371         int err;
372         struct net *net = dev_net(icmp_skb->dev);
373
374         sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr,
375                                        th->dest, iph->saddr, ntohs(th->source),
376                                        inet_iif(icmp_skb));
377         if (!sk) {
378                 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
379                 return;
380         }
381         if (sk->sk_state == TCP_TIME_WAIT) {
382                 inet_twsk_put(inet_twsk(sk));
383                 return;
384         }
385         seq = ntohl(th->seq);
386         if (sk->sk_state == TCP_NEW_SYN_RECV)
387                 return tcp_req_err(sk, seq);
388
389         bh_lock_sock(sk);
390         /* If too many ICMPs get dropped on busy
391          * servers this needs to be solved differently.
392          * We do take care of PMTU discovery (RFC1191) special case :
393          * we can receive locally generated ICMP messages while socket is held.
394          */
395         if (sock_owned_by_user(sk)) {
396                 if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
397                         NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
398         }
399         if (sk->sk_state == TCP_CLOSE)
400                 goto out;
401
402         if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
403                 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
404                 goto out;
405         }
406
407         icsk = inet_csk(sk);
408         tp = tcp_sk(sk);
409         /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
410         fastopen = tp->fastopen_rsk;
411         snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
412         if (sk->sk_state != TCP_LISTEN &&
413             !between(seq, snd_una, tp->snd_nxt)) {
414                 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
415                 goto out;
416         }
417
418         switch (type) {
419         case ICMP_REDIRECT:
420                 do_redirect(icmp_skb, sk);
421                 goto out;
422         case ICMP_SOURCE_QUENCH:
423                 /* Just silently ignore these. */
424                 goto out;
425         case ICMP_PARAMETERPROB:
426                 err = EPROTO;
427                 break;
428         case ICMP_DEST_UNREACH:
429                 if (code > NR_ICMP_UNREACH)
430                         goto out;
431
432                 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
433                         /* We are not interested in TCP_LISTEN and open_requests
434                          * (SYN-ACKs send out by Linux are always <576bytes so
435                          * they should go through unfragmented).
436                          */
437                         if (sk->sk_state == TCP_LISTEN)
438                                 goto out;
439
440                         tp->mtu_info = info;
441                         if (!sock_owned_by_user(sk)) {
442                                 tcp_v4_mtu_reduced(sk);
443                         } else {
444                                 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags))
445                                         sock_hold(sk);
446                         }
447                         goto out;
448                 }
449
450                 err = icmp_err_convert[code].errno;
451                 /* check if icmp_skb allows revert of backoff
452                  * (see draft-zimmermann-tcp-lcd) */
453                 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
454                         break;
455                 if (seq != tp->snd_una  || !icsk->icsk_retransmits ||
456                     !icsk->icsk_backoff || fastopen)
457                         break;
458
459                 if (sock_owned_by_user(sk))
460                         break;
461
462                 icsk->icsk_backoff--;
463                 icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
464                                                TCP_TIMEOUT_INIT;
465                 icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
466
467                 skb = tcp_write_queue_head(sk);
468                 BUG_ON(!skb);
469
470                 remaining = icsk->icsk_rto -
471                             min(icsk->icsk_rto,
472                                 tcp_time_stamp - tcp_skb_timestamp(skb));
473
474                 if (remaining) {
475                         inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
476                                                   remaining, TCP_RTO_MAX);
477                 } else {
478                         /* RTO revert clocked out retransmission.
479                          * Will retransmit now */
480                         tcp_retransmit_timer(sk);
481                 }
482
483                 break;
484         case ICMP_TIME_EXCEEDED:
485                 err = EHOSTUNREACH;
486                 break;
487         default:
488                 goto out;
489         }
490
491         switch (sk->sk_state) {
492         case TCP_SYN_SENT:
493         case TCP_SYN_RECV:
494                 /* Only in fast or simultaneous open. If a fast open socket is
495                  * is already accepted it is treated as a connected one below.
496                  */
497                 if (fastopen && fastopen->sk == NULL)
498                         break;
499
500                 if (!sock_owned_by_user(sk)) {
501                         sk->sk_err = err;
502
503                         sk->sk_error_report(sk);
504
505                         tcp_done(sk);
506                 } else {
507                         sk->sk_err_soft = err;
508                 }
509                 goto out;
510         }
511
512         /* If we've already connected we will keep trying
513          * until we time out, or the user gives up.
514          *
515          * rfc1122 4.2.3.9 allows to consider as hard errors
516          * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
517          * but it is obsoleted by pmtu discovery).
518          *
519          * Note, that in modern internet, where routing is unreliable
520          * and in each dark corner broken firewalls sit, sending random
521          * errors ordered by their masters even this two messages finally lose
522          * their original sense (even Linux sends invalid PORT_UNREACHs)
523          *
524          * Now we are in compliance with RFCs.
525          *                                                      --ANK (980905)
526          */
527
528         inet = inet_sk(sk);
529         if (!sock_owned_by_user(sk) && inet->recverr) {
530                 sk->sk_err = err;
531                 sk->sk_error_report(sk);
532         } else  { /* Only an error on timeout */
533                 sk->sk_err_soft = err;
534         }
535
536 out:
537         bh_unlock_sock(sk);
538         sock_put(sk);
539 }
540
541 void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
542 {
543         struct tcphdr *th = tcp_hdr(skb);
544
545         if (skb->ip_summed == CHECKSUM_PARTIAL) {
546                 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
547                 skb->csum_start = skb_transport_header(skb) - skb->head;
548                 skb->csum_offset = offsetof(struct tcphdr, check);
549         } else {
550                 th->check = tcp_v4_check(skb->len, saddr, daddr,
551                                          csum_partial(th,
552                                                       th->doff << 2,
553                                                       skb->csum));
554         }
555 }
556
557 /* This routine computes an IPv4 TCP checksum. */
558 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
559 {
560         const struct inet_sock *inet = inet_sk(sk);
561
562         __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
563 }
564 EXPORT_SYMBOL(tcp_v4_send_check);
565
566 /*
567  *      This routine will send an RST to the other tcp.
568  *
569  *      Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
570  *                    for reset.
571  *      Answer: if a packet caused RST, it is not for a socket
572  *              existing in our system, if it is matched to a socket,
573  *              it is just duplicate segment or bug in other side's TCP.
574  *              So that we build reply only basing on parameters
575  *              arrived with segment.
576  *      Exception: precedence violation. We do not implement it in any case.
577  */
578
579 static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
580 {
581         const struct tcphdr *th = tcp_hdr(skb);
582         struct {
583                 struct tcphdr th;
584 #ifdef CONFIG_TCP_MD5SIG
585                 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
586 #endif
587         } rep;
588         struct ip_reply_arg arg;
589 #ifdef CONFIG_TCP_MD5SIG
590         struct tcp_md5sig_key *key;
591         const __u8 *hash_location = NULL;
592         unsigned char newhash[16];
593         int genhash;
594         struct sock *sk1 = NULL;
595 #endif
596         struct net *net;
597
598         /* Never send a reset in response to a reset. */
599         if (th->rst)
600                 return;
601
602         /* If sk not NULL, it means we did a successful lookup and incoming
603          * route had to be correct. prequeue might have dropped our dst.
604          */
605         if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL)
606                 return;
607
608         /* Swap the send and the receive. */
609         memset(&rep, 0, sizeof(rep));
610         rep.th.dest   = th->source;
611         rep.th.source = th->dest;
612         rep.th.doff   = sizeof(struct tcphdr) / 4;
613         rep.th.rst    = 1;
614
615         if (th->ack) {
616                 rep.th.seq = th->ack_seq;
617         } else {
618                 rep.th.ack = 1;
619                 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
620                                        skb->len - (th->doff << 2));
621         }
622
623         memset(&arg, 0, sizeof(arg));
624         arg.iov[0].iov_base = (unsigned char *)&rep;
625         arg.iov[0].iov_len  = sizeof(rep.th);
626
627         net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
628 #ifdef CONFIG_TCP_MD5SIG
629         hash_location = tcp_parse_md5sig_option(th);
630         if (!sk && hash_location) {
631                 /*
632                  * active side is lost. Try to find listening socket through
633                  * source port, and then find md5 key through listening socket.
634                  * we are not loose security here:
635                  * Incoming packet is checked with md5 hash with finding key,
636                  * no RST generated if md5 hash doesn't match.
637                  */
638                 sk1 = __inet_lookup_listener(net,
639                                              &tcp_hashinfo, ip_hdr(skb)->saddr,
640                                              th->source, ip_hdr(skb)->daddr,
641                                              ntohs(th->source), inet_iif(skb));
642                 /* don't send rst if it can't find key */
643                 if (!sk1)
644                         return;
645                 rcu_read_lock();
646                 key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
647                                         &ip_hdr(skb)->saddr, AF_INET);
648                 if (!key)
649                         goto release_sk1;
650
651                 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, NULL, skb);
652                 if (genhash || memcmp(hash_location, newhash, 16) != 0)
653                         goto release_sk1;
654         } else {
655                 key = sk ? tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
656                                              &ip_hdr(skb)->saddr,
657                                              AF_INET) : NULL;
658         }
659
660         if (key) {
661                 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
662                                    (TCPOPT_NOP << 16) |
663                                    (TCPOPT_MD5SIG << 8) |
664                                    TCPOLEN_MD5SIG);
665                 /* Update length and the length the header thinks exists */
666                 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
667                 rep.th.doff = arg.iov[0].iov_len / 4;
668
669                 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
670                                      key, ip_hdr(skb)->saddr,
671                                      ip_hdr(skb)->daddr, &rep.th);
672         }
673 #endif
674         arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
675                                       ip_hdr(skb)->saddr, /* XXX */
676                                       arg.iov[0].iov_len, IPPROTO_TCP, 0);
677         arg.csumoffset = offsetof(struct tcphdr, check) / 2;
678         arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
679         /* When socket is gone, all binding information is lost.
680          * routing might fail in this case. No choice here, if we choose to force
681          * input interface, we will misroute in case of asymmetric route.
682          */
683         if (sk)
684                 arg.bound_dev_if = sk->sk_bound_dev_if;
685
686         arg.tos = ip_hdr(skb)->tos;
687         ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
688                               skb, &TCP_SKB_CB(skb)->header.h4.opt,
689                               ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
690                               &arg, arg.iov[0].iov_len);
691
692         TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
693         TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
694
695 #ifdef CONFIG_TCP_MD5SIG
696 release_sk1:
697         if (sk1) {
698                 rcu_read_unlock();
699                 sock_put(sk1);
700         }
701 #endif
702 }
703
704 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
705    outside socket context is ugly, certainly. What can I do?
706  */
707
708 static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
709                             u32 win, u32 tsval, u32 tsecr, int oif,
710                             struct tcp_md5sig_key *key,
711                             int reply_flags, u8 tos)
712 {
713         const struct tcphdr *th = tcp_hdr(skb);
714         struct {
715                 struct tcphdr th;
716                 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
717 #ifdef CONFIG_TCP_MD5SIG
718                            + (TCPOLEN_MD5SIG_ALIGNED >> 2)
719 #endif
720                         ];
721         } rep;
722         struct ip_reply_arg arg;
723         struct net *net = dev_net(skb_dst(skb)->dev);
724
725         memset(&rep.th, 0, sizeof(struct tcphdr));
726         memset(&arg, 0, sizeof(arg));
727
728         arg.iov[0].iov_base = (unsigned char *)&rep;
729         arg.iov[0].iov_len  = sizeof(rep.th);
730         if (tsecr) {
731                 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
732                                    (TCPOPT_TIMESTAMP << 8) |
733                                    TCPOLEN_TIMESTAMP);
734                 rep.opt[1] = htonl(tsval);
735                 rep.opt[2] = htonl(tsecr);
736                 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
737         }
738
739         /* Swap the send and the receive. */
740         rep.th.dest    = th->source;
741         rep.th.source  = th->dest;
742         rep.th.doff    = arg.iov[0].iov_len / 4;
743         rep.th.seq     = htonl(seq);
744         rep.th.ack_seq = htonl(ack);
745         rep.th.ack     = 1;
746         rep.th.window  = htons(win);
747
748 #ifdef CONFIG_TCP_MD5SIG
749         if (key) {
750                 int offset = (tsecr) ? 3 : 0;
751
752                 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
753                                           (TCPOPT_NOP << 16) |
754                                           (TCPOPT_MD5SIG << 8) |
755                                           TCPOLEN_MD5SIG);
756                 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
757                 rep.th.doff = arg.iov[0].iov_len/4;
758
759                 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
760                                     key, ip_hdr(skb)->saddr,
761                                     ip_hdr(skb)->daddr, &rep.th);
762         }
763 #endif
764         arg.flags = reply_flags;
765         arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
766                                       ip_hdr(skb)->saddr, /* XXX */
767                                       arg.iov[0].iov_len, IPPROTO_TCP, 0);
768         arg.csumoffset = offsetof(struct tcphdr, check) / 2;
769         if (oif)
770                 arg.bound_dev_if = oif;
771         arg.tos = tos;
772         ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
773                               skb, &TCP_SKB_CB(skb)->header.h4.opt,
774                               ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
775                               &arg, arg.iov[0].iov_len);
776
777         TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
778 }
779
780 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
781 {
782         struct inet_timewait_sock *tw = inet_twsk(sk);
783         struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
784
785         tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
786                         tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
787                         tcp_time_stamp + tcptw->tw_ts_offset,
788                         tcptw->tw_ts_recent,
789                         tw->tw_bound_dev_if,
790                         tcp_twsk_md5_key(tcptw),
791                         tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
792                         tw->tw_tos
793                         );
794
795         inet_twsk_put(tw);
796 }
797
798 static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
799                                   struct request_sock *req)
800 {
801         /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
802          * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
803          */
804         tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
805                         tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
806                         tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
807                         tcp_time_stamp,
808                         req->ts_recent,
809                         0,
810                         tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
811                                           AF_INET),
812                         inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
813                         ip_hdr(skb)->tos);
814 }
815
816 /*
817  *      Send a SYN-ACK after having received a SYN.
818  *      This still operates on a request_sock only, not on a big
819  *      socket.
820  */
821 static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
822                               struct flowi *fl,
823                               struct request_sock *req,
824                               u16 queue_mapping,
825                               struct tcp_fastopen_cookie *foc)
826 {
827         const struct inet_request_sock *ireq = inet_rsk(req);
828         struct flowi4 fl4;
829         int err = -1;
830         struct sk_buff *skb;
831
832         /* First, grab a route. */
833         if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
834                 return -1;
835
836         skb = tcp_make_synack(sk, dst, req, foc);
837
838         if (skb) {
839                 __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
840
841                 skb_set_queue_mapping(skb, queue_mapping);
842                 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
843                                             ireq->ir_rmt_addr,
844                                             ireq->opt);
845                 err = net_xmit_eval(err);
846         }
847
848         return err;
849 }
850
851 /*
852  *      IPv4 request_sock destructor.
853  */
854 static void tcp_v4_reqsk_destructor(struct request_sock *req)
855 {
856         kfree(inet_rsk(req)->opt);
857 }
858
859 /*
860  * Return true if a syncookie should be sent
861  */
862 bool tcp_syn_flood_action(struct sock *sk,
863                          const struct sk_buff *skb,
864                          const char *proto)
865 {
866         const char *msg = "Dropping request";
867         bool want_cookie = false;
868         struct listen_sock *lopt;
869
870 #ifdef CONFIG_SYN_COOKIES
871         if (sysctl_tcp_syncookies) {
872                 msg = "Sending cookies";
873                 want_cookie = true;
874                 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
875         } else
876 #endif
877                 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
878
879         lopt = inet_csk(sk)->icsk_accept_queue.listen_opt;
880         if (!lopt->synflood_warned && sysctl_tcp_syncookies != 2) {
881                 lopt->synflood_warned = 1;
882                 pr_info("%s: Possible SYN flooding on port %d. %s.  Check SNMP counters.\n",
883                         proto, ntohs(tcp_hdr(skb)->dest), msg);
884         }
885         return want_cookie;
886 }
887 EXPORT_SYMBOL(tcp_syn_flood_action);
888
889 #ifdef CONFIG_TCP_MD5SIG
890 /*
891  * RFC2385 MD5 checksumming requires a mapping of
892  * IP address->MD5 Key.
893  * We need to maintain these in the sk structure.
894  */
895
896 /* Find the Key structure for an address.  */
897 struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
898                                          const union tcp_md5_addr *addr,
899                                          int family)
900 {
901         struct tcp_sock *tp = tcp_sk(sk);
902         struct tcp_md5sig_key *key;
903         unsigned int size = sizeof(struct in_addr);
904         struct tcp_md5sig_info *md5sig;
905
906         /* caller either holds rcu_read_lock() or socket lock */
907         md5sig = rcu_dereference_check(tp->md5sig_info,
908                                        sock_owned_by_user(sk) ||
909                                        lockdep_is_held(&sk->sk_lock.slock));
910         if (!md5sig)
911                 return NULL;
912 #if IS_ENABLED(CONFIG_IPV6)
913         if (family == AF_INET6)
914                 size = sizeof(struct in6_addr);
915 #endif
916         hlist_for_each_entry_rcu(key, &md5sig->head, node) {
917                 if (key->family != family)
918                         continue;
919                 if (!memcmp(&key->addr, addr, size))
920                         return key;
921         }
922         return NULL;
923 }
924 EXPORT_SYMBOL(tcp_md5_do_lookup);
925
926 struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
927                                          struct sock *addr_sk)
928 {
929         union tcp_md5_addr *addr;
930
931         addr = (union tcp_md5_addr *)&inet_sk(addr_sk)->inet_daddr;
932         return tcp_md5_do_lookup(sk, addr, AF_INET);
933 }
934 EXPORT_SYMBOL(tcp_v4_md5_lookup);
935
936 static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
937                                                       struct request_sock *req)
938 {
939         union tcp_md5_addr *addr;
940
941         addr = (union tcp_md5_addr *)&inet_rsk(req)->ir_rmt_addr;
942         return tcp_md5_do_lookup(sk, addr, AF_INET);
943 }
944
945 /* This can be called on a newly created socket, from other files */
946 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
947                    int family, const u8 *newkey, u8 newkeylen, gfp_t gfp)
948 {
949         /* Add Key to the list */
950         struct tcp_md5sig_key *key;
951         struct tcp_sock *tp = tcp_sk(sk);
952         struct tcp_md5sig_info *md5sig;
953
954         key = tcp_md5_do_lookup(sk, addr, family);
955         if (key) {
956                 /* Pre-existing entry - just update that one. */
957                 memcpy(key->key, newkey, newkeylen);
958                 key->keylen = newkeylen;
959                 return 0;
960         }
961
962         md5sig = rcu_dereference_protected(tp->md5sig_info,
963                                            sock_owned_by_user(sk));
964         if (!md5sig) {
965                 md5sig = kmalloc(sizeof(*md5sig), gfp);
966                 if (!md5sig)
967                         return -ENOMEM;
968
969                 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
970                 INIT_HLIST_HEAD(&md5sig->head);
971                 rcu_assign_pointer(tp->md5sig_info, md5sig);
972         }
973
974         key = sock_kmalloc(sk, sizeof(*key), gfp);
975         if (!key)
976                 return -ENOMEM;
977         if (!tcp_alloc_md5sig_pool()) {
978                 sock_kfree_s(sk, key, sizeof(*key));
979                 return -ENOMEM;
980         }
981
982         memcpy(key->key, newkey, newkeylen);
983         key->keylen = newkeylen;
984         key->family = family;
985         memcpy(&key->addr, addr,
986                (family == AF_INET6) ? sizeof(struct in6_addr) :
987                                       sizeof(struct in_addr));
988         hlist_add_head_rcu(&key->node, &md5sig->head);
989         return 0;
990 }
991 EXPORT_SYMBOL(tcp_md5_do_add);
992
993 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
994 {
995         struct tcp_md5sig_key *key;
996
997         key = tcp_md5_do_lookup(sk, addr, family);
998         if (!key)
999                 return -ENOENT;
1000         hlist_del_rcu(&key->node);
1001         atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1002         kfree_rcu(key, rcu);
1003         return 0;
1004 }
1005 EXPORT_SYMBOL(tcp_md5_do_del);
1006
1007 static void tcp_clear_md5_list(struct sock *sk)
1008 {
1009         struct tcp_sock *tp = tcp_sk(sk);
1010         struct tcp_md5sig_key *key;
1011         struct hlist_node *n;
1012         struct tcp_md5sig_info *md5sig;
1013
1014         md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1015
1016         hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
1017                 hlist_del_rcu(&key->node);
1018                 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1019                 kfree_rcu(key, rcu);
1020         }
1021 }
1022
1023 static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
1024                                  int optlen)
1025 {
1026         struct tcp_md5sig cmd;
1027         struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
1028
1029         if (optlen < sizeof(cmd))
1030                 return -EINVAL;
1031
1032         if (copy_from_user(&cmd, optval, sizeof(cmd)))
1033                 return -EFAULT;
1034
1035         if (sin->sin_family != AF_INET)
1036                 return -EINVAL;
1037
1038         if (!cmd.tcpm_keylen)
1039                 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1040                                       AF_INET);
1041
1042         if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1043                 return -EINVAL;
1044
1045         return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1046                               AF_INET, cmd.tcpm_key, cmd.tcpm_keylen,
1047                               GFP_KERNEL);
1048 }
1049
1050 static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1051                                         __be32 daddr, __be32 saddr, int nbytes)
1052 {
1053         struct tcp4_pseudohdr *bp;
1054         struct scatterlist sg;
1055
1056         bp = &hp->md5_blk.ip4;
1057
1058         /*
1059          * 1. the TCP pseudo-header (in the order: source IP address,
1060          * destination IP address, zero-padded protocol number, and
1061          * segment length)
1062          */
1063         bp->saddr = saddr;
1064         bp->daddr = daddr;
1065         bp->pad = 0;
1066         bp->protocol = IPPROTO_TCP;
1067         bp->len = cpu_to_be16(nbytes);
1068
1069         sg_init_one(&sg, bp, sizeof(*bp));
1070         return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1071 }
1072
1073 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1074                                __be32 daddr, __be32 saddr, const struct tcphdr *th)
1075 {
1076         struct tcp_md5sig_pool *hp;
1077         struct hash_desc *desc;
1078
1079         hp = tcp_get_md5sig_pool();
1080         if (!hp)
1081                 goto clear_hash_noput;
1082         desc = &hp->md5_desc;
1083
1084         if (crypto_hash_init(desc))
1085                 goto clear_hash;
1086         if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1087                 goto clear_hash;
1088         if (tcp_md5_hash_header(hp, th))
1089                 goto clear_hash;
1090         if (tcp_md5_hash_key(hp, key))
1091                 goto clear_hash;
1092         if (crypto_hash_final(desc, md5_hash))
1093                 goto clear_hash;
1094
1095         tcp_put_md5sig_pool();
1096         return 0;
1097
1098 clear_hash:
1099         tcp_put_md5sig_pool();
1100 clear_hash_noput:
1101         memset(md5_hash, 0, 16);
1102         return 1;
1103 }
1104
1105 int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
1106                         const struct sock *sk, const struct request_sock *req,
1107                         const struct sk_buff *skb)
1108 {
1109         struct tcp_md5sig_pool *hp;
1110         struct hash_desc *desc;
1111         const struct tcphdr *th = tcp_hdr(skb);
1112         __be32 saddr, daddr;
1113
1114         if (sk) {
1115                 saddr = inet_sk(sk)->inet_saddr;
1116                 daddr = inet_sk(sk)->inet_daddr;
1117         } else if (req) {
1118                 saddr = inet_rsk(req)->ir_loc_addr;
1119                 daddr = inet_rsk(req)->ir_rmt_addr;
1120         } else {
1121                 const struct iphdr *iph = ip_hdr(skb);
1122                 saddr = iph->saddr;
1123                 daddr = iph->daddr;
1124         }
1125
1126         hp = tcp_get_md5sig_pool();
1127         if (!hp)
1128                 goto clear_hash_noput;
1129         desc = &hp->md5_desc;
1130
1131         if (crypto_hash_init(desc))
1132                 goto clear_hash;
1133
1134         if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1135                 goto clear_hash;
1136         if (tcp_md5_hash_header(hp, th))
1137                 goto clear_hash;
1138         if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1139                 goto clear_hash;
1140         if (tcp_md5_hash_key(hp, key))
1141                 goto clear_hash;
1142         if (crypto_hash_final(desc, md5_hash))
1143                 goto clear_hash;
1144
1145         tcp_put_md5sig_pool();
1146         return 0;
1147
1148 clear_hash:
1149         tcp_put_md5sig_pool();
1150 clear_hash_noput:
1151         memset(md5_hash, 0, 16);
1152         return 1;
1153 }
1154 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1155
1156 static bool __tcp_v4_inbound_md5_hash(struct sock *sk,
1157                                       const struct sk_buff *skb)
1158 {
1159         /*
1160          * This gets called for each TCP segment that arrives
1161          * so we want to be efficient.
1162          * We have 3 drop cases:
1163          * o No MD5 hash and one expected.
1164          * o MD5 hash and we're not expecting one.
1165          * o MD5 hash and its wrong.
1166          */
1167         const __u8 *hash_location = NULL;
1168         struct tcp_md5sig_key *hash_expected;
1169         const struct iphdr *iph = ip_hdr(skb);
1170         const struct tcphdr *th = tcp_hdr(skb);
1171         int genhash;
1172         unsigned char newhash[16];
1173
1174         hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1175                                           AF_INET);
1176         hash_location = tcp_parse_md5sig_option(th);
1177
1178         /* We've parsed the options - do we have a hash? */
1179         if (!hash_expected && !hash_location)
1180                 return false;
1181
1182         if (hash_expected && !hash_location) {
1183                 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1184                 return true;
1185         }
1186
1187         if (!hash_expected && hash_location) {
1188                 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1189                 return true;
1190         }
1191
1192         /* Okay, so this is hash_expected and hash_location -
1193          * so we need to calculate the checksum.
1194          */
1195         genhash = tcp_v4_md5_hash_skb(newhash,
1196                                       hash_expected,
1197                                       NULL, NULL, skb);
1198
1199         if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1200                 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1201                                      &iph->saddr, ntohs(th->source),
1202                                      &iph->daddr, ntohs(th->dest),
1203                                      genhash ? " tcp_v4_calc_md5_hash failed"
1204                                      : "");
1205                 return true;
1206         }
1207         return false;
1208 }
1209
1210 static bool tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
1211 {
1212         bool ret;
1213
1214         rcu_read_lock();
1215         ret = __tcp_v4_inbound_md5_hash(sk, skb);
1216         rcu_read_unlock();
1217
1218         return ret;
1219 }
1220
1221 #endif
1222
1223 static void tcp_v4_init_req(struct request_sock *req, struct sock *sk_listener,
1224                             struct sk_buff *skb)
1225 {
1226         struct inet_request_sock *ireq = inet_rsk(req);
1227
1228         sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
1229         sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
1230         ireq->no_srccheck = inet_sk(sk_listener)->transparent;
1231         ireq->opt = tcp_v4_save_options(skb);
1232         ireq->ireq_family = AF_INET;
1233 }
1234
1235 static struct dst_entry *tcp_v4_route_req(struct sock *sk, struct flowi *fl,
1236                                           const struct request_sock *req,
1237                                           bool *strict)
1238 {
1239         struct dst_entry *dst = inet_csk_route_req(sk, &fl->u.ip4, req);
1240
1241         if (strict) {
1242                 if (fl->u.ip4.daddr == inet_rsk(req)->ir_rmt_addr)
1243                         *strict = true;
1244                 else
1245                         *strict = false;
1246         }
1247
1248         return dst;
1249 }
1250
1251 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1252         .family         =       PF_INET,
1253         .obj_size       =       sizeof(struct tcp_request_sock),
1254         .rtx_syn_ack    =       tcp_rtx_synack,
1255         .send_ack       =       tcp_v4_reqsk_send_ack,
1256         .destructor     =       tcp_v4_reqsk_destructor,
1257         .send_reset     =       tcp_v4_send_reset,
1258         .syn_ack_timeout =      tcp_syn_ack_timeout,
1259 };
1260
1261 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1262         .mss_clamp      =       TCP_MSS_DEFAULT,
1263 #ifdef CONFIG_TCP_MD5SIG
1264         .md5_lookup     =       tcp_v4_reqsk_md5_lookup,
1265         .calc_md5_hash  =       tcp_v4_md5_hash_skb,
1266 #endif
1267         .init_req       =       tcp_v4_init_req,
1268 #ifdef CONFIG_SYN_COOKIES
1269         .cookie_init_seq =      cookie_v4_init_sequence,
1270 #endif
1271         .route_req      =       tcp_v4_route_req,
1272         .init_seq       =       tcp_v4_init_sequence,
1273         .send_synack    =       tcp_v4_send_synack,
1274         .queue_hash_add =       inet_csk_reqsk_queue_hash_add,
1275 };
1276
1277 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1278 {
1279         /* Never answer to SYNs send to broadcast or multicast */
1280         if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1281                 goto drop;
1282
1283         return tcp_conn_request(&tcp_request_sock_ops,
1284                                 &tcp_request_sock_ipv4_ops, sk, skb);
1285
1286 drop:
1287         NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1288         return 0;
1289 }
1290 EXPORT_SYMBOL(tcp_v4_conn_request);
1291
1292
1293 /*
1294  * The three way handshake has completed - we got a valid synack -
1295  * now create the new socket.
1296  */
1297 struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1298                                   struct request_sock *req,
1299                                   struct dst_entry *dst)
1300 {
1301         struct inet_request_sock *ireq;
1302         struct inet_sock *newinet;
1303         struct tcp_sock *newtp;
1304         struct sock *newsk;
1305 #ifdef CONFIG_TCP_MD5SIG
1306         struct tcp_md5sig_key *key;
1307 #endif
1308         struct ip_options_rcu *inet_opt;
1309
1310         if (sk_acceptq_is_full(sk))
1311                 goto exit_overflow;
1312
1313         newsk = tcp_create_openreq_child(sk, req, skb);
1314         if (!newsk)
1315                 goto exit_nonewsk;
1316
1317         newsk->sk_gso_type = SKB_GSO_TCPV4;
1318         inet_sk_rx_dst_set(newsk, skb);
1319
1320         newtp                 = tcp_sk(newsk);
1321         newinet               = inet_sk(newsk);
1322         ireq                  = inet_rsk(req);
1323         sk_daddr_set(newsk, ireq->ir_rmt_addr);
1324         sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
1325         newinet->inet_saddr           = ireq->ir_loc_addr;
1326         inet_opt              = ireq->opt;
1327         rcu_assign_pointer(newinet->inet_opt, inet_opt);
1328         ireq->opt             = NULL;
1329         newinet->mc_index     = inet_iif(skb);
1330         newinet->mc_ttl       = ip_hdr(skb)->ttl;
1331         newinet->rcv_tos      = ip_hdr(skb)->tos;
1332         inet_csk(newsk)->icsk_ext_hdr_len = 0;
1333         inet_set_txhash(newsk);
1334         if (inet_opt)
1335                 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1336         newinet->inet_id = newtp->write_seq ^ jiffies;
1337
1338         if (!dst) {
1339                 dst = inet_csk_route_child_sock(sk, newsk, req);
1340                 if (!dst)
1341                         goto put_and_exit;
1342         } else {
1343                 /* syncookie case : see end of cookie_v4_check() */
1344         }
1345         sk_setup_caps(newsk, dst);
1346
1347         tcp_ca_openreq_child(newsk, dst);
1348
1349         tcp_sync_mss(newsk, dst_mtu(dst));
1350         newtp->advmss = dst_metric_advmss(dst);
1351         if (tcp_sk(sk)->rx_opt.user_mss &&
1352             tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1353                 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1354
1355         tcp_initialize_rcv_mss(newsk);
1356
1357 #ifdef CONFIG_TCP_MD5SIG
1358         /* Copy over the MD5 key from the original socket */
1359         key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1360                                 AF_INET);
1361         if (key != NULL) {
1362                 /*
1363                  * We're using one, so create a matching key
1364                  * on the newsk structure. If we fail to get
1365                  * memory, then we end up not copying the key
1366                  * across. Shucks.
1367                  */
1368                 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1369                                AF_INET, key->key, key->keylen, GFP_ATOMIC);
1370                 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1371         }
1372 #endif
1373
1374         if (__inet_inherit_port(sk, newsk) < 0)
1375                 goto put_and_exit;
1376         __inet_hash_nolisten(newsk, NULL);
1377
1378         return newsk;
1379
1380 exit_overflow:
1381         NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1382 exit_nonewsk:
1383         dst_release(dst);
1384 exit:
1385         NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1386         return NULL;
1387 put_and_exit:
1388         inet_csk_prepare_forced_close(newsk);
1389         tcp_done(newsk);
1390         goto exit;
1391 }
1392 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1393
1394 static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1395 {
1396         const struct tcphdr *th = tcp_hdr(skb);
1397         const struct iphdr *iph = ip_hdr(skb);
1398         struct request_sock *req;
1399         struct sock *nsk;
1400
1401         req = inet_csk_search_req(sk, th->source, iph->saddr, iph->daddr);
1402         if (req) {
1403                 nsk = tcp_check_req(sk, skb, req, false);
1404                 reqsk_put(req);
1405                 return nsk;
1406         }
1407
1408         nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
1409                         th->source, iph->daddr, th->dest, inet_iif(skb));
1410
1411         if (nsk) {
1412                 if (nsk->sk_state != TCP_TIME_WAIT) {
1413                         bh_lock_sock(nsk);
1414                         return nsk;
1415                 }
1416                 inet_twsk_put(inet_twsk(nsk));
1417                 return NULL;
1418         }
1419
1420 #ifdef CONFIG_SYN_COOKIES
1421         if (!th->syn)
1422                 sk = cookie_v4_check(sk, skb);
1423 #endif
1424         return sk;
1425 }
1426
1427 /* The socket must have it's spinlock held when we get
1428  * here.
1429  *
1430  * We have a potential double-lock case here, so even when
1431  * doing backlog processing we use the BH locking scheme.
1432  * This is because we cannot sleep with the original spinlock
1433  * held.
1434  */
1435 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1436 {
1437         struct sock *rsk;
1438
1439         if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1440                 struct dst_entry *dst = sk->sk_rx_dst;
1441
1442                 sock_rps_save_rxhash(sk, skb);
1443                 sk_mark_napi_id(sk, skb);
1444                 if (dst) {
1445                         if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1446                             dst->ops->check(dst, 0) == NULL) {
1447                                 dst_release(dst);
1448                                 sk->sk_rx_dst = NULL;
1449                         }
1450                 }
1451                 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1452                 return 0;
1453         }
1454
1455         if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1456                 goto csum_err;
1457
1458         if (sk->sk_state == TCP_LISTEN) {
1459                 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1460                 if (!nsk)
1461                         goto discard;
1462
1463                 if (nsk != sk) {
1464                         sock_rps_save_rxhash(nsk, skb);
1465                         sk_mark_napi_id(sk, skb);
1466                         if (tcp_child_process(sk, nsk, skb)) {
1467                                 rsk = nsk;
1468                                 goto reset;
1469                         }
1470                         return 0;
1471                 }
1472         } else
1473                 sock_rps_save_rxhash(sk, skb);
1474
1475         if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
1476                 rsk = sk;
1477                 goto reset;
1478         }
1479         return 0;
1480
1481 reset:
1482         tcp_v4_send_reset(rsk, skb);
1483 discard:
1484         kfree_skb(skb);
1485         /* Be careful here. If this function gets more complicated and
1486          * gcc suffers from register pressure on the x86, sk (in %ebx)
1487          * might be destroyed here. This current version compiles correctly,
1488          * but you have been warned.
1489          */
1490         return 0;
1491
1492 csum_err:
1493         TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
1494         TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1495         goto discard;
1496 }
1497 EXPORT_SYMBOL(tcp_v4_do_rcv);
1498
1499 void tcp_v4_early_demux(struct sk_buff *skb)
1500 {
1501         const struct iphdr *iph;
1502         const struct tcphdr *th;
1503         struct sock *sk;
1504
1505         if (skb->pkt_type != PACKET_HOST)
1506                 return;
1507
1508         if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1509                 return;
1510
1511         iph = ip_hdr(skb);
1512         th = tcp_hdr(skb);
1513
1514         if (th->doff < sizeof(struct tcphdr) / 4)
1515                 return;
1516
1517         sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1518                                        iph->saddr, th->source,
1519                                        iph->daddr, ntohs(th->dest),
1520                                        skb->skb_iif);
1521         if (sk) {
1522                 skb->sk = sk;
1523                 skb->destructor = sock_edemux;
1524                 if (sk_fullsock(sk)) {
1525                         struct dst_entry *dst = sk->sk_rx_dst;
1526
1527                         if (dst)
1528                                 dst = dst_check(dst, 0);
1529                         if (dst &&
1530                             inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1531                                 skb_dst_set_noref(skb, dst);
1532                 }
1533         }
1534 }
1535
1536 /* Packet is added to VJ-style prequeue for processing in process
1537  * context, if a reader task is waiting. Apparently, this exciting
1538  * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
1539  * failed somewhere. Latency? Burstiness? Well, at least now we will
1540  * see, why it failed. 8)8)                               --ANK
1541  *
1542  */
1543 bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1544 {
1545         struct tcp_sock *tp = tcp_sk(sk);
1546
1547         if (sysctl_tcp_low_latency || !tp->ucopy.task)
1548                 return false;
1549
1550         if (skb->len <= tcp_hdrlen(skb) &&
1551             skb_queue_len(&tp->ucopy.prequeue) == 0)
1552                 return false;
1553
1554         /* Before escaping RCU protected region, we need to take care of skb
1555          * dst. Prequeue is only enabled for established sockets.
1556          * For such sockets, we might need the skb dst only to set sk->sk_rx_dst
1557          * Instead of doing full sk_rx_dst validity here, let's perform
1558          * an optimistic check.
1559          */
1560         if (likely(sk->sk_rx_dst))
1561                 skb_dst_drop(skb);
1562         else
1563                 skb_dst_force(skb);
1564
1565         __skb_queue_tail(&tp->ucopy.prequeue, skb);
1566         tp->ucopy.memory += skb->truesize;
1567         if (tp->ucopy.memory > sk->sk_rcvbuf) {
1568                 struct sk_buff *skb1;
1569
1570                 BUG_ON(sock_owned_by_user(sk));
1571
1572                 while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
1573                         sk_backlog_rcv(sk, skb1);
1574                         NET_INC_STATS_BH(sock_net(sk),
1575                                          LINUX_MIB_TCPPREQUEUEDROPPED);
1576                 }
1577
1578                 tp->ucopy.memory = 0;
1579         } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
1580                 wake_up_interruptible_sync_poll(sk_sleep(sk),
1581                                            POLLIN | POLLRDNORM | POLLRDBAND);
1582                 if (!inet_csk_ack_scheduled(sk))
1583                         inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
1584                                                   (3 * tcp_rto_min(sk)) / 4,
1585                                                   TCP_RTO_MAX);
1586         }
1587         return true;
1588 }
1589 EXPORT_SYMBOL(tcp_prequeue);
1590
1591 /*
1592  *      From tcp_input.c
1593  */
1594
1595 int tcp_v4_rcv(struct sk_buff *skb)
1596 {
1597         const struct iphdr *iph;
1598         const struct tcphdr *th;
1599         struct sock *sk;
1600         int ret;
1601         struct net *net = dev_net(skb->dev);
1602
1603         if (skb->pkt_type != PACKET_HOST)
1604                 goto discard_it;
1605
1606         /* Count it even if it's bad */
1607         TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1608
1609         if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1610                 goto discard_it;
1611
1612         th = tcp_hdr(skb);
1613
1614         if (th->doff < sizeof(struct tcphdr) / 4)
1615                 goto bad_packet;
1616         if (!pskb_may_pull(skb, th->doff * 4))
1617                 goto discard_it;
1618
1619         /* An explanation is required here, I think.
1620          * Packet length and doff are validated by header prediction,
1621          * provided case of th->doff==0 is eliminated.
1622          * So, we defer the checks. */
1623
1624         if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
1625                 goto csum_error;
1626
1627         th = tcp_hdr(skb);
1628         iph = ip_hdr(skb);
1629         /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1630          * barrier() makes sure compiler wont play fool^Waliasing games.
1631          */
1632         memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
1633                 sizeof(struct inet_skb_parm));
1634         barrier();
1635
1636         TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1637         TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1638                                     skb->len - th->doff * 4);
1639         TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1640         TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1641         TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1642         TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1643         TCP_SKB_CB(skb)->sacked  = 0;
1644
1645         sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1646         if (!sk)
1647                 goto no_tcp_socket;
1648
1649 process:
1650         if (sk->sk_state == TCP_TIME_WAIT)
1651                 goto do_time_wait;
1652
1653         if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1654                 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1655                 goto discard_and_relse;
1656         }
1657
1658         if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1659                 goto discard_and_relse;
1660
1661 #ifdef CONFIG_TCP_MD5SIG
1662         /*
1663          * We really want to reject the packet as early as possible
1664          * if:
1665          *  o We're expecting an MD5'd packet and this is no MD5 tcp option
1666          *  o There is an MD5 option and we're not expecting one
1667          */
1668         if (tcp_v4_inbound_md5_hash(sk, skb))
1669                 goto discard_and_relse;
1670 #endif
1671
1672         nf_reset(skb);
1673
1674         if (sk_filter(sk, skb))
1675                 goto discard_and_relse;
1676
1677         sk_incoming_cpu_update(sk);
1678         skb->dev = NULL;
1679
1680         bh_lock_sock_nested(sk);
1681         ret = 0;
1682         if (!sock_owned_by_user(sk)) {
1683                 if (!tcp_prequeue(sk, skb))
1684                         ret = tcp_v4_do_rcv(sk, skb);
1685         } else if (unlikely(sk_add_backlog(sk, skb,
1686                                            sk->sk_rcvbuf + sk->sk_sndbuf))) {
1687                 bh_unlock_sock(sk);
1688                 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1689                 goto discard_and_relse;
1690         }
1691         bh_unlock_sock(sk);
1692
1693         sock_put(sk);
1694
1695         return ret;
1696
1697 no_tcp_socket:
1698         if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1699                 goto discard_it;
1700
1701         if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1702 csum_error:
1703                 TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
1704 bad_packet:
1705                 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1706         } else {
1707                 tcp_v4_send_reset(NULL, skb);
1708         }
1709
1710 discard_it:
1711         /* Discard frame. */
1712         kfree_skb(skb);
1713         return 0;
1714
1715 discard_and_relse:
1716         sock_put(sk);
1717         goto discard_it;
1718
1719 do_time_wait:
1720         if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1721                 inet_twsk_put(inet_twsk(sk));
1722                 goto discard_it;
1723         }
1724
1725         if (skb->len < (th->doff << 2)) {
1726                 inet_twsk_put(inet_twsk(sk));
1727                 goto bad_packet;
1728         }
1729         if (tcp_checksum_complete(skb)) {
1730                 inet_twsk_put(inet_twsk(sk));
1731                 goto csum_error;
1732         }
1733         switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1734         case TCP_TW_SYN: {
1735                 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
1736                                                         &tcp_hashinfo,
1737                                                         iph->saddr, th->source,
1738                                                         iph->daddr, th->dest,
1739                                                         inet_iif(skb));
1740                 if (sk2) {
1741                         inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
1742                         inet_twsk_put(inet_twsk(sk));
1743                         sk = sk2;
1744                         goto process;
1745                 }
1746                 /* Fall through to ACK */
1747         }
1748         case TCP_TW_ACK:
1749                 tcp_v4_timewait_ack(sk, skb);
1750                 break;
1751         case TCP_TW_RST:
1752                 goto no_tcp_socket;
1753         case TCP_TW_SUCCESS:;
1754         }
1755         goto discard_it;
1756 }
1757
1758 static struct timewait_sock_ops tcp_timewait_sock_ops = {
1759         .twsk_obj_size  = sizeof(struct tcp_timewait_sock),
1760         .twsk_unique    = tcp_twsk_unique,
1761         .twsk_destructor= tcp_twsk_destructor,
1762 };
1763
1764 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
1765 {
1766         struct dst_entry *dst = skb_dst(skb);
1767
1768         if (dst) {
1769                 dst_hold(dst);
1770                 sk->sk_rx_dst = dst;
1771                 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
1772         }
1773 }
1774 EXPORT_SYMBOL(inet_sk_rx_dst_set);
1775
1776 const struct inet_connection_sock_af_ops ipv4_specific = {
1777         .queue_xmit        = ip_queue_xmit,
1778         .send_check        = tcp_v4_send_check,
1779         .rebuild_header    = inet_sk_rebuild_header,
1780         .sk_rx_dst_set     = inet_sk_rx_dst_set,
1781         .conn_request      = tcp_v4_conn_request,
1782         .syn_recv_sock     = tcp_v4_syn_recv_sock,
1783         .net_header_len    = sizeof(struct iphdr),
1784         .setsockopt        = ip_setsockopt,
1785         .getsockopt        = ip_getsockopt,
1786         .addr2sockaddr     = inet_csk_addr2sockaddr,
1787         .sockaddr_len      = sizeof(struct sockaddr_in),
1788         .bind_conflict     = inet_csk_bind_conflict,
1789 #ifdef CONFIG_COMPAT
1790         .compat_setsockopt = compat_ip_setsockopt,
1791         .compat_getsockopt = compat_ip_getsockopt,
1792 #endif
1793         .mtu_reduced       = tcp_v4_mtu_reduced,
1794 };
1795 EXPORT_SYMBOL(ipv4_specific);
1796
1797 #ifdef CONFIG_TCP_MD5SIG
1798 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
1799         .md5_lookup             = tcp_v4_md5_lookup,
1800         .calc_md5_hash          = tcp_v4_md5_hash_skb,
1801         .md5_parse              = tcp_v4_parse_md5_keys,
1802 };
1803 #endif
1804
1805 /* NOTE: A lot of things set to zero explicitly by call to
1806  *       sk_alloc() so need not be done here.
1807  */
1808 static int tcp_v4_init_sock(struct sock *sk)
1809 {
1810         struct inet_connection_sock *icsk = inet_csk(sk);
1811
1812         tcp_init_sock(sk);
1813
1814         icsk->icsk_af_ops = &ipv4_specific;
1815
1816 #ifdef CONFIG_TCP_MD5SIG
1817         tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
1818 #endif
1819
1820         return 0;
1821 }
1822
1823 void tcp_v4_destroy_sock(struct sock *sk)
1824 {
1825         struct tcp_sock *tp = tcp_sk(sk);
1826
1827         tcp_clear_xmit_timers(sk);
1828
1829         tcp_cleanup_congestion_control(sk);
1830
1831         /* Cleanup up the write buffer. */
1832         tcp_write_queue_purge(sk);
1833
1834         /* Cleans up our, hopefully empty, out_of_order_queue. */
1835         __skb_queue_purge(&tp->out_of_order_queue);
1836
1837 #ifdef CONFIG_TCP_MD5SIG
1838         /* Clean up the MD5 key list, if any */
1839         if (tp->md5sig_info) {
1840                 tcp_clear_md5_list(sk);
1841                 kfree_rcu(tp->md5sig_info, rcu);
1842                 tp->md5sig_info = NULL;
1843         }
1844 #endif
1845
1846         /* Clean prequeue, it must be empty really */
1847         __skb_queue_purge(&tp->ucopy.prequeue);
1848
1849         /* Clean up a referenced TCP bind bucket. */
1850         if (inet_csk(sk)->icsk_bind_hash)
1851                 inet_put_port(sk);
1852
1853         BUG_ON(tp->fastopen_rsk != NULL);
1854
1855         /* If socket is aborted during connect operation */
1856         tcp_free_fastopen_req(tp);
1857
1858         sk_sockets_allocated_dec(sk);
1859         sock_release_memcg(sk);
1860 }
1861 EXPORT_SYMBOL(tcp_v4_destroy_sock);
1862
1863 #ifdef CONFIG_PROC_FS
1864 /* Proc filesystem TCP sock list dumping. */
1865
1866 /*
1867  * Get next listener socket follow cur.  If cur is NULL, get first socket
1868  * starting from bucket given in st->bucket; when st->bucket is zero the
1869  * very first socket in the hash table is returned.
1870  */
1871 static void *listening_get_next(struct seq_file *seq, void *cur)
1872 {
1873         struct inet_connection_sock *icsk;
1874         struct hlist_nulls_node *node;
1875         struct sock *sk = cur;
1876         struct inet_listen_hashbucket *ilb;
1877         struct tcp_iter_state *st = seq->private;
1878         struct net *net = seq_file_net(seq);
1879
1880         if (!sk) {
1881                 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1882                 spin_lock_bh(&ilb->lock);
1883                 sk = sk_nulls_head(&ilb->head);
1884                 st->offset = 0;
1885                 goto get_sk;
1886         }
1887         ilb = &tcp_hashinfo.listening_hash[st->bucket];
1888         ++st->num;
1889         ++st->offset;
1890
1891         if (st->state == TCP_SEQ_STATE_OPENREQ) {
1892                 struct request_sock *req = cur;
1893
1894                 icsk = inet_csk(st->syn_wait_sk);
1895                 req = req->dl_next;
1896                 while (1) {
1897                         while (req) {
1898                                 if (req->rsk_ops->family == st->family) {
1899                                         cur = req;
1900                                         goto out;
1901                                 }
1902                                 req = req->dl_next;
1903                         }
1904                         if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
1905                                 break;
1906 get_req:
1907                         req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
1908                 }
1909                 sk        = sk_nulls_next(st->syn_wait_sk);
1910                 st->state = TCP_SEQ_STATE_LISTENING;
1911                 spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1912         } else {
1913                 icsk = inet_csk(sk);
1914                 spin_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1915                 if (reqsk_queue_len(&icsk->icsk_accept_queue))
1916                         goto start_req;
1917                 spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1918                 sk = sk_nulls_next(sk);
1919         }
1920 get_sk:
1921         sk_nulls_for_each_from(sk, node) {
1922                 if (!net_eq(sock_net(sk), net))
1923                         continue;
1924                 if (sk->sk_family == st->family) {
1925                         cur = sk;
1926                         goto out;
1927                 }
1928                 icsk = inet_csk(sk);
1929                 spin_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1930                 if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
1931 start_req:
1932                         st->uid         = sock_i_uid(sk);
1933                         st->syn_wait_sk = sk;
1934                         st->state       = TCP_SEQ_STATE_OPENREQ;
1935                         st->sbucket     = 0;
1936                         goto get_req;
1937                 }
1938                 spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1939         }
1940         spin_unlock_bh(&ilb->lock);
1941         st->offset = 0;
1942         if (++st->bucket < INET_LHTABLE_SIZE) {
1943                 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1944                 spin_lock_bh(&ilb->lock);
1945                 sk = sk_nulls_head(&ilb->head);
1946                 goto get_sk;
1947         }
1948         cur = NULL;
1949 out:
1950         return cur;
1951 }
1952
1953 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
1954 {
1955         struct tcp_iter_state *st = seq->private;
1956         void *rc;
1957
1958         st->bucket = 0;
1959         st->offset = 0;
1960         rc = listening_get_next(seq, NULL);
1961
1962         while (rc && *pos) {
1963                 rc = listening_get_next(seq, rc);
1964                 --*pos;
1965         }
1966         return rc;
1967 }
1968
1969 static inline bool empty_bucket(const struct tcp_iter_state *st)
1970 {
1971         return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
1972 }
1973
1974 /*
1975  * Get first established socket starting from bucket given in st->bucket.
1976  * If st->bucket is zero, the very first socket in the hash is returned.
1977  */
1978 static void *established_get_first(struct seq_file *seq)
1979 {
1980         struct tcp_iter_state *st = seq->private;
1981         struct net *net = seq_file_net(seq);
1982         void *rc = NULL;
1983
1984         st->offset = 0;
1985         for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
1986                 struct sock *sk;
1987                 struct hlist_nulls_node *node;
1988                 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
1989
1990                 /* Lockless fast path for the common case of empty buckets */
1991                 if (empty_bucket(st))
1992                         continue;
1993
1994                 spin_lock_bh(lock);
1995                 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
1996                         if (sk->sk_family != st->family ||
1997                             !net_eq(sock_net(sk), net)) {
1998                                 continue;
1999                         }
2000                         rc = sk;
2001                         goto out;
2002                 }
2003                 spin_unlock_bh(lock);
2004         }
2005 out:
2006         return rc;
2007 }
2008
2009 static void *established_get_next(struct seq_file *seq, void *cur)
2010 {
2011         struct sock *sk = cur;
2012         struct hlist_nulls_node *node;
2013         struct tcp_iter_state *st = seq->private;
2014         struct net *net = seq_file_net(seq);
2015
2016         ++st->num;
2017         ++st->offset;
2018
2019         sk = sk_nulls_next(sk);
2020
2021         sk_nulls_for_each_from(sk, node) {
2022                 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
2023                         return sk;
2024         }
2025
2026         spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2027         ++st->bucket;
2028         return established_get_first(seq);
2029 }
2030
2031 static void *established_get_idx(struct seq_file *seq, loff_t pos)
2032 {
2033         struct tcp_iter_state *st = seq->private;
2034         void *rc;
2035
2036         st->bucket = 0;
2037         rc = established_get_first(seq);
2038
2039         while (rc && pos) {
2040                 rc = established_get_next(seq, rc);
2041                 --pos;
2042         }
2043         return rc;
2044 }
2045
2046 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2047 {
2048         void *rc;
2049         struct tcp_iter_state *st = seq->private;
2050
2051         st->state = TCP_SEQ_STATE_LISTENING;
2052         rc        = listening_get_idx(seq, &pos);
2053
2054         if (!rc) {
2055                 st->state = TCP_SEQ_STATE_ESTABLISHED;
2056                 rc        = established_get_idx(seq, pos);
2057         }
2058
2059         return rc;
2060 }
2061
2062 static void *tcp_seek_last_pos(struct seq_file *seq)
2063 {
2064         struct tcp_iter_state *st = seq->private;
2065         int offset = st->offset;
2066         int orig_num = st->num;
2067         void *rc = NULL;
2068
2069         switch (st->state) {
2070         case TCP_SEQ_STATE_OPENREQ:
2071         case TCP_SEQ_STATE_LISTENING:
2072                 if (st->bucket >= INET_LHTABLE_SIZE)
2073                         break;
2074                 st->state = TCP_SEQ_STATE_LISTENING;
2075                 rc = listening_get_next(seq, NULL);
2076                 while (offset-- && rc)
2077                         rc = listening_get_next(seq, rc);
2078                 if (rc)
2079                         break;
2080                 st->bucket = 0;
2081                 st->state = TCP_SEQ_STATE_ESTABLISHED;
2082                 /* Fallthrough */
2083         case TCP_SEQ_STATE_ESTABLISHED:
2084                 if (st->bucket > tcp_hashinfo.ehash_mask)
2085                         break;
2086                 rc = established_get_first(seq);
2087                 while (offset-- && rc)
2088                         rc = established_get_next(seq, rc);
2089         }
2090
2091         st->num = orig_num;
2092
2093         return rc;
2094 }
2095
2096 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2097 {
2098         struct tcp_iter_state *st = seq->private;
2099         void *rc;
2100
2101         if (*pos && *pos == st->last_pos) {
2102                 rc = tcp_seek_last_pos(seq);
2103                 if (rc)
2104                         goto out;
2105         }
2106
2107         st->state = TCP_SEQ_STATE_LISTENING;
2108         st->num = 0;
2109         st->bucket = 0;
2110         st->offset = 0;
2111         rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2112
2113 out:
2114         st->last_pos = *pos;
2115         return rc;
2116 }
2117
2118 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2119 {
2120         struct tcp_iter_state *st = seq->private;
2121         void *rc = NULL;
2122
2123         if (v == SEQ_START_TOKEN) {
2124                 rc = tcp_get_idx(seq, 0);
2125                 goto out;
2126         }
2127
2128         switch (st->state) {
2129         case TCP_SEQ_STATE_OPENREQ:
2130         case TCP_SEQ_STATE_LISTENING:
2131                 rc = listening_get_next(seq, v);
2132                 if (!rc) {
2133                         st->state = TCP_SEQ_STATE_ESTABLISHED;
2134                         st->bucket = 0;
2135                         st->offset = 0;
2136                         rc        = established_get_first(seq);
2137                 }
2138                 break;
2139         case TCP_SEQ_STATE_ESTABLISHED:
2140                 rc = established_get_next(seq, v);
2141                 break;
2142         }
2143 out:
2144         ++*pos;
2145         st->last_pos = *pos;
2146         return rc;
2147 }
2148
2149 static void tcp_seq_stop(struct seq_file *seq, void *v)
2150 {
2151         struct tcp_iter_state *st = seq->private;
2152
2153         switch (st->state) {
2154         case TCP_SEQ_STATE_OPENREQ:
2155                 if (v) {
2156                         struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
2157                         spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2158                 }
2159         case TCP_SEQ_STATE_LISTENING:
2160                 if (v != SEQ_START_TOKEN)
2161                         spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
2162                 break;
2163         case TCP_SEQ_STATE_ESTABLISHED:
2164                 if (v)
2165                         spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2166                 break;
2167         }
2168 }
2169
2170 int tcp_seq_open(struct inode *inode, struct file *file)
2171 {
2172         struct tcp_seq_afinfo *afinfo = PDE_DATA(inode);
2173         struct tcp_iter_state *s;
2174         int err;
2175
2176         err = seq_open_net(inode, file, &afinfo->seq_ops,
2177                           sizeof(struct tcp_iter_state));
2178         if (err < 0)
2179                 return err;
2180
2181         s = ((struct seq_file *)file->private_data)->private;
2182         s->family               = afinfo->family;
2183         s->last_pos             = 0;
2184         return 0;
2185 }
2186 EXPORT_SYMBOL(tcp_seq_open);
2187
2188 int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2189 {
2190         int rc = 0;
2191         struct proc_dir_entry *p;
2192
2193         afinfo->seq_ops.start           = tcp_seq_start;
2194         afinfo->seq_ops.next            = tcp_seq_next;
2195         afinfo->seq_ops.stop            = tcp_seq_stop;
2196
2197         p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2198                              afinfo->seq_fops, afinfo);
2199         if (!p)
2200                 rc = -ENOMEM;
2201         return rc;
2202 }
2203 EXPORT_SYMBOL(tcp_proc_register);
2204
2205 void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2206 {
2207         remove_proc_entry(afinfo->name, net->proc_net);
2208 }
2209 EXPORT_SYMBOL(tcp_proc_unregister);
2210
2211 static void get_openreq4(const struct request_sock *req,
2212                          struct seq_file *f, int i, kuid_t uid)
2213 {
2214         const struct inet_request_sock *ireq = inet_rsk(req);
2215         long delta = req->rsk_timer.expires - jiffies;
2216
2217         seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2218                 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
2219                 i,
2220                 ireq->ir_loc_addr,
2221                 ireq->ir_num,
2222                 ireq->ir_rmt_addr,
2223                 ntohs(ireq->ir_rmt_port),
2224                 TCP_SYN_RECV,
2225                 0, 0, /* could print option size, but that is af dependent. */
2226                 1,    /* timers active (only the expire timer) */
2227                 jiffies_delta_to_clock_t(delta),
2228                 req->num_timeout,
2229                 from_kuid_munged(seq_user_ns(f), uid),
2230                 0,  /* non standard timer */
2231                 0, /* open_requests have no inode */
2232                 0,
2233                 req);
2234 }
2235
2236 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
2237 {
2238         int timer_active;
2239         unsigned long timer_expires;
2240         const struct tcp_sock *tp = tcp_sk(sk);
2241         const struct inet_connection_sock *icsk = inet_csk(sk);
2242         const struct inet_sock *inet = inet_sk(sk);
2243         struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq;
2244         __be32 dest = inet->inet_daddr;
2245         __be32 src = inet->inet_rcv_saddr;
2246         __u16 destp = ntohs(inet->inet_dport);
2247         __u16 srcp = ntohs(inet->inet_sport);
2248         int rx_queue;
2249
2250         if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2251             icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
2252             icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
2253                 timer_active    = 1;
2254                 timer_expires   = icsk->icsk_timeout;
2255         } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2256                 timer_active    = 4;
2257                 timer_expires   = icsk->icsk_timeout;
2258         } else if (timer_pending(&sk->sk_timer)) {
2259                 timer_active    = 2;
2260                 timer_expires   = sk->sk_timer.expires;
2261         } else {
2262                 timer_active    = 0;
2263                 timer_expires = jiffies;
2264         }
2265
2266         if (sk->sk_state == TCP_LISTEN)
2267                 rx_queue = sk->sk_ack_backlog;
2268         else
2269                 /*
2270                  * because we dont lock socket, we might find a transient negative value
2271                  */
2272                 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2273
2274         seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2275                         "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
2276                 i, src, srcp, dest, destp, sk->sk_state,
2277                 tp->write_seq - tp->snd_una,
2278                 rx_queue,
2279                 timer_active,
2280                 jiffies_delta_to_clock_t(timer_expires - jiffies),
2281                 icsk->icsk_retransmits,
2282                 from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2283                 icsk->icsk_probes_out,
2284                 sock_i_ino(sk),
2285                 atomic_read(&sk->sk_refcnt), sk,
2286                 jiffies_to_clock_t(icsk->icsk_rto),
2287                 jiffies_to_clock_t(icsk->icsk_ack.ato),
2288                 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2289                 tp->snd_cwnd,
2290                 sk->sk_state == TCP_LISTEN ?
2291                     (fastopenq ? fastopenq->max_qlen : 0) :
2292                     (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
2293 }
2294
2295 static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2296                                struct seq_file *f, int i)
2297 {
2298         __be32 dest, src;
2299         __u16 destp, srcp;
2300         s32 delta = tw->tw_ttd - inet_tw_time_stamp();
2301
2302         dest  = tw->tw_daddr;
2303         src   = tw->tw_rcv_saddr;
2304         destp = ntohs(tw->tw_dport);
2305         srcp  = ntohs(tw->tw_sport);
2306
2307         seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2308                 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
2309                 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2310                 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2311                 atomic_read(&tw->tw_refcnt), tw);
2312 }
2313
2314 #define TMPSZ 150
2315
2316 static int tcp4_seq_show(struct seq_file *seq, void *v)
2317 {
2318         struct tcp_iter_state *st;
2319         struct sock *sk = v;
2320
2321         seq_setwidth(seq, TMPSZ - 1);
2322         if (v == SEQ_START_TOKEN) {
2323                 seq_puts(seq, "  sl  local_address rem_address   st tx_queue "
2324                            "rx_queue tr tm->when retrnsmt   uid  timeout "
2325                            "inode");
2326                 goto out;
2327         }
2328         st = seq->private;
2329
2330         switch (st->state) {
2331         case TCP_SEQ_STATE_LISTENING:
2332         case TCP_SEQ_STATE_ESTABLISHED:
2333                 if (sk->sk_state == TCP_TIME_WAIT)
2334                         get_timewait4_sock(v, seq, st->num);
2335                 else
2336                         get_tcp4_sock(v, seq, st->num);
2337                 break;
2338         case TCP_SEQ_STATE_OPENREQ:
2339                 get_openreq4(v, seq, st->num, st->uid);
2340                 break;
2341         }
2342 out:
2343         seq_pad(seq, '\n');
2344         return 0;
2345 }
2346
2347 static const struct file_operations tcp_afinfo_seq_fops = {
2348         .owner   = THIS_MODULE,
2349         .open    = tcp_seq_open,
2350         .read    = seq_read,
2351         .llseek  = seq_lseek,
2352         .release = seq_release_net
2353 };
2354
2355 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2356         .name           = "tcp",
2357         .family         = AF_INET,
2358         .seq_fops       = &tcp_afinfo_seq_fops,
2359         .seq_ops        = {
2360                 .show           = tcp4_seq_show,
2361         },
2362 };
2363
2364 static int __net_init tcp4_proc_init_net(struct net *net)
2365 {
2366         return tcp_proc_register(net, &tcp4_seq_afinfo);
2367 }
2368
2369 static void __net_exit tcp4_proc_exit_net(struct net *net)
2370 {
2371         tcp_proc_unregister(net, &tcp4_seq_afinfo);
2372 }
2373
2374 static struct pernet_operations tcp4_net_ops = {
2375         .init = tcp4_proc_init_net,
2376         .exit = tcp4_proc_exit_net,
2377 };
2378
2379 int __init tcp4_proc_init(void)
2380 {
2381         return register_pernet_subsys(&tcp4_net_ops);
2382 }
2383
2384 void tcp4_proc_exit(void)
2385 {
2386         unregister_pernet_subsys(&tcp4_net_ops);
2387 }
2388 #endif /* CONFIG_PROC_FS */
2389
2390 struct proto tcp_prot = {
2391         .name                   = "TCP",
2392         .owner                  = THIS_MODULE,
2393         .close                  = tcp_close,
2394         .connect                = tcp_v4_connect,
2395         .disconnect             = tcp_disconnect,
2396         .accept                 = inet_csk_accept,
2397         .ioctl                  = tcp_ioctl,
2398         .init                   = tcp_v4_init_sock,
2399         .destroy                = tcp_v4_destroy_sock,
2400         .shutdown               = tcp_shutdown,
2401         .setsockopt             = tcp_setsockopt,
2402         .getsockopt             = tcp_getsockopt,
2403         .recvmsg                = tcp_recvmsg,
2404         .sendmsg                = tcp_sendmsg,
2405         .sendpage               = tcp_sendpage,
2406         .backlog_rcv            = tcp_v4_do_rcv,
2407         .release_cb             = tcp_release_cb,
2408         .hash                   = inet_hash,
2409         .unhash                 = inet_unhash,
2410         .get_port               = inet_csk_get_port,
2411         .enter_memory_pressure  = tcp_enter_memory_pressure,
2412         .stream_memory_free     = tcp_stream_memory_free,
2413         .sockets_allocated      = &tcp_sockets_allocated,
2414         .orphan_count           = &tcp_orphan_count,
2415         .memory_allocated       = &tcp_memory_allocated,
2416         .memory_pressure        = &tcp_memory_pressure,
2417         .sysctl_mem             = sysctl_tcp_mem,
2418         .sysctl_wmem            = sysctl_tcp_wmem,
2419         .sysctl_rmem            = sysctl_tcp_rmem,
2420         .max_header             = MAX_TCP_HEADER,
2421         .obj_size               = sizeof(struct tcp_sock),
2422         .slab_flags             = SLAB_DESTROY_BY_RCU,
2423         .twsk_prot              = &tcp_timewait_sock_ops,
2424         .rsk_prot               = &tcp_request_sock_ops,
2425         .h.hashinfo             = &tcp_hashinfo,
2426         .no_autobind            = true,
2427 #ifdef CONFIG_COMPAT
2428         .compat_setsockopt      = compat_tcp_setsockopt,
2429         .compat_getsockopt      = compat_tcp_getsockopt,
2430 #endif
2431 #ifdef CONFIG_MEMCG_KMEM
2432         .init_cgroup            = tcp_init_cgroup,
2433         .destroy_cgroup         = tcp_destroy_cgroup,
2434         .proto_cgroup           = tcp_proto_cgroup,
2435 #endif
2436 };
2437 EXPORT_SYMBOL(tcp_prot);
2438
2439 static void __net_exit tcp_sk_exit(struct net *net)
2440 {
2441         int cpu;
2442
2443         for_each_possible_cpu(cpu)
2444                 inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
2445         free_percpu(net->ipv4.tcp_sk);
2446 }
2447
2448 static int __net_init tcp_sk_init(struct net *net)
2449 {
2450         int res, cpu;
2451
2452         net->ipv4.tcp_sk = alloc_percpu(struct sock *);
2453         if (!net->ipv4.tcp_sk)
2454                 return -ENOMEM;
2455
2456         for_each_possible_cpu(cpu) {
2457                 struct sock *sk;
2458
2459                 res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
2460                                            IPPROTO_TCP, net);
2461                 if (res)
2462                         goto fail;
2463                 *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
2464         }
2465         net->ipv4.sysctl_tcp_ecn = 2;
2466         net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
2467         net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
2468         net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
2469         return 0;
2470
2471 fail:
2472         tcp_sk_exit(net);
2473
2474         return res;
2475 }
2476
2477 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2478 {
2479         inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
2480 }
2481
2482 static struct pernet_operations __net_initdata tcp_sk_ops = {
2483        .init       = tcp_sk_init,
2484        .exit       = tcp_sk_exit,
2485        .exit_batch = tcp_sk_exit_batch,
2486 };
2487
2488 void __init tcp_v4_init(void)
2489 {
2490         inet_hashinfo_init(&tcp_hashinfo);
2491         if (register_pernet_subsys(&tcp_sk_ops))
2492                 panic("Failed to create the TCP control socket.\n");
2493 }