OSDN Git Service

tcp: add tcp_conn_request
[sagit-ice-cold/kernel_xiaomi_msm8998.git] / net / ipv4 / tcp_input.c
index b5c2375..97e48d6 100644 (file)
@@ -5877,3 +5877,151 @@ discard:
        return 0;
 }
 EXPORT_SYMBOL(tcp_rcv_state_process);
+
+static inline void pr_drop_req(struct request_sock *req, __u16 port, int family)
+{
+       struct inet_request_sock *ireq = inet_rsk(req);
+
+       if (family == AF_INET)
+               LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("drop open request from %pI4/%u\n"),
+                              &ireq->ir_rmt_addr, port);
+       else
+               LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("drop open request from %pI6/%u\n"),
+                              &ireq->ir_v6_rmt_addr, port);
+}
+
+int tcp_conn_request(struct request_sock_ops *rsk_ops,
+                    const struct tcp_request_sock_ops *af_ops,
+                    struct sock *sk, struct sk_buff *skb)
+{
+       struct tcp_options_received tmp_opt;
+       struct request_sock *req;
+       struct tcp_sock *tp = tcp_sk(sk);
+       struct dst_entry *dst = NULL;
+       __u32 isn = TCP_SKB_CB(skb)->when;
+       bool want_cookie = false, fastopen;
+       struct flowi fl;
+       struct tcp_fastopen_cookie foc = { .len = -1 };
+       int err;
+
+
+       /* TW buckets are converted to open requests without
+        * limitations, they conserve resources and peer is
+        * evidently real one.
+        */
+       if ((sysctl_tcp_syncookies == 2 ||
+            inet_csk_reqsk_queue_is_full(sk)) && !isn) {
+               want_cookie = tcp_syn_flood_action(sk, skb, rsk_ops->slab_name);
+               if (!want_cookie)
+                       goto drop;
+       }
+
+
+       /* Accept backlog is full. If we have already queued enough
+        * of warm entries in syn queue, drop request. It is better than
+        * clogging syn queue with openreqs with exponentially increasing
+        * timeout.
+        */
+       if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) {
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
+               goto drop;
+       }
+
+       req = inet_reqsk_alloc(rsk_ops);
+       if (!req)
+               goto drop;
+
+       tcp_rsk(req)->af_specific = af_ops;
+
+       tcp_clear_options(&tmp_opt);
+       tmp_opt.mss_clamp = af_ops->mss_clamp;
+       tmp_opt.user_mss  = tp->rx_opt.user_mss;
+       tcp_parse_options(skb, &tmp_opt, 0, want_cookie ? NULL : &foc);
+
+       if (want_cookie && !tmp_opt.saw_tstamp)
+               tcp_clear_options(&tmp_opt);
+
+       tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
+       tcp_openreq_init(req, &tmp_opt, skb, sk);
+
+       af_ops->init_req(req, sk, skb);
+
+       if (security_inet_conn_request(sk, skb, req))
+               goto drop_and_free;
+
+       if (!want_cookie || tmp_opt.tstamp_ok)
+               TCP_ECN_create_request(req, skb, sock_net(sk));
+
+       if (want_cookie) {
+               isn = cookie_init_sequence(af_ops, sk, skb, &req->mss);
+               req->cookie_ts = tmp_opt.tstamp_ok;
+       } else if (!isn) {
+               /* VJ's idea. We save last timestamp seen
+                * from the destination in peer table, when entering
+                * state TIME-WAIT, and check against it before
+                * accepting new connection request.
+                *
+                * If "isn" is not zero, this request hit alive
+                * timewait bucket, so that all the necessary checks
+                * are made in the function processing timewait state.
+                */
+               if (tmp_opt.saw_tstamp && tcp_death_row.sysctl_tw_recycle) {
+                       bool strict;
+
+                       dst = af_ops->route_req(sk, &fl, req, &strict);
+                       if (dst && strict &&
+                           !tcp_peer_is_proven(req, dst, true)) {
+                               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
+                               goto drop_and_release;
+                       }
+               }
+               /* Kill the following clause, if you dislike this way. */
+               else if (!sysctl_tcp_syncookies &&
+                        (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
+                         (sysctl_max_syn_backlog >> 2)) &&
+                        !tcp_peer_is_proven(req, dst, false)) {
+                       /* Without syncookies last quarter of
+                        * backlog is filled with destinations,
+                        * proven to be alive.
+                        * It means that we continue to communicate
+                        * to destinations, already remembered
+                        * to the moment of synflood.
+                        */
+                       pr_drop_req(req, ntohs(tcp_hdr(skb)->source),
+                                   rsk_ops->family);
+                       goto drop_and_release;
+               }
+
+               isn = af_ops->init_seq(skb);
+       }
+       if (!dst) {
+               dst = af_ops->route_req(sk, &fl, req, NULL);
+               if (!dst)
+                       goto drop_and_free;
+       }
+
+       tcp_rsk(req)->snt_isn = isn;
+       tcp_openreq_init_rwin(req, sk, dst);
+       fastopen = !want_cookie &&
+                  tcp_try_fastopen(sk, skb, req, &foc, dst);
+       err = af_ops->send_synack(sk, dst, &fl, req,
+                                 skb_get_queue_mapping(skb), &foc);
+       if (!fastopen) {
+               if (err || want_cookie)
+                       goto drop_and_free;
+
+               tcp_rsk(req)->listener = NULL;
+               af_ops->queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
+       }
+
+       return 0;
+
+drop_and_release:
+       dst_release(dst);
+drop_and_free:
+       reqsk_free(req);
+drop:
+       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
+       return 0;
+}
+EXPORT_SYMBOL(tcp_conn_request);