OSDN Git Service

inet: remove icsk_ack.blocked
authorEric Dumazet <edumazet@google.com>
Wed, 30 Sep 2020 12:54:56 +0000 (05:54 -0700)
committerDavid S. Miller <davem@davemloft.net>
Wed, 30 Sep 2020 21:21:30 +0000 (14:21 -0700)
TCP has been using it to work around the possibility of tcp_delack_timer()
finding the socket owned by user.

After commit 6f458dfb4092 ("tcp: improve latencies of timer triggered events")
we added TCP_DELACK_TIMER_DEFERRED atomic bit for more immediate recovery,
so we can get rid of icsk_ack.blocked

This frees space that following patch will reuse.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Acked-by: Soheil Hassas Yeganeh <soheil@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
include/net/inet_connection_sock.h
net/dccp/timer.c
net/ipv4/inet_connection_sock.c
net/ipv4/tcp.c
net/ipv4/tcp_output.c
net/ipv4/tcp_timer.c

index dc763ca..79875f9 100644 (file)
@@ -110,7 +110,7 @@ struct inet_connection_sock {
                __u8              pending;       /* ACK is pending                         */
                __u8              quick;         /* Scheduled number of quick acks         */
                __u8              pingpong;      /* The session is interactive             */
-               __u8              blocked;       /* Delayed ACK was blocked by socket lock */
+               /* one byte hole. */
                __u32             ato;           /* Predicted tick of soft clock           */
                unsigned long     timeout;       /* Currently scheduled timeout            */
                __u32             lrcvtime;      /* timestamp of last received data packet */
@@ -198,7 +198,7 @@ static inline void inet_csk_clear_xmit_timer(struct sock *sk, const int what)
                sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
 #endif
        } else if (what == ICSK_TIME_DACK) {
-               icsk->icsk_ack.blocked = icsk->icsk_ack.pending = 0;
+               icsk->icsk_ack.pending = 0;
 #ifdef INET_CSK_CLEAR_TIMERS
                sk_stop_timer(sk, &icsk->icsk_delack_timer);
 #endif
index 927c796..a934d29 100644 (file)
@@ -176,7 +176,6 @@ static void dccp_delack_timer(struct timer_list *t)
        bh_lock_sock(sk);
        if (sock_owned_by_user(sk)) {
                /* Try again later. */
-               icsk->icsk_ack.blocked = 1;
                __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
                sk_reset_timer(sk, &icsk->icsk_delack_timer,
                               jiffies + TCP_DELACK_MIN);
index b457dd2..4148f5f 100644 (file)
@@ -564,7 +564,7 @@ void inet_csk_clear_xmit_timers(struct sock *sk)
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
 
-       icsk->icsk_pending = icsk->icsk_ack.pending = icsk->icsk_ack.blocked = 0;
+       icsk->icsk_pending = icsk->icsk_ack.pending = 0;
 
        sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
        sk_stop_timer(sk, &icsk->icsk_delack_timer);
index 2a8bfa8..ed28055 100644 (file)
@@ -1538,10 +1538,8 @@ void tcp_cleanup_rbuf(struct sock *sk, int copied)
 
        if (inet_csk_ack_scheduled(sk)) {
                const struct inet_connection_sock *icsk = inet_csk(sk);
-                  /* Delayed ACKs frequently hit locked sockets during bulk
-                   * receive. */
-               if (icsk->icsk_ack.blocked ||
-                   /* Once-per-two-segments ACK was not sent by tcp_input.c */
+
+               if (/* Once-per-two-segments ACK was not sent by tcp_input.c */
                    tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss ||
                    /*
                     * If this read emptied read buffer, we send ACK, if
index 386978d..6bd4e38 100644 (file)
@@ -3911,11 +3911,8 @@ void tcp_send_delayed_ack(struct sock *sk)
 
        /* Use new timeout only if there wasn't a older one earlier. */
        if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) {
-               /* If delack timer was blocked or is about to expire,
-                * send ACK now.
-                */
-               if (icsk->icsk_ack.blocked ||
-                   time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) {
+               /* If delack timer is about to expire, send ACK now. */
+               if (time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) {
                        tcp_send_ack(sk);
                        return;
                }
index 0c08c42..6c62b9e 100644 (file)
@@ -331,7 +331,6 @@ static void tcp_delack_timer(struct timer_list *t)
        if (!sock_owned_by_user(sk)) {
                tcp_delack_timer_handler(sk);
        } else {
-               icsk->icsk_ack.blocked = 1;
                __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
                /* deleguate our work to tcp_release_cb() */
                if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, &sk->sk_tsq_flags))