OSDN Git Service

tcp: fix cwnd undo in Reno and HTCP congestion controls
authorYuchung Cheng <ycheng@google.com>
Fri, 4 Aug 2017 03:38:51 +0000 (20:38 -0700)
committerDavid S. Miller <davem@davemloft.net>
Mon, 7 Aug 2017 04:25:10 +0000 (21:25 -0700)
Using ssthresh to revert cwnd is less reliable when ssthresh is
bounded to 2 packets. This patch uses an existing variable in TCP
"prior_cwnd" that snapshots the cwnd right before entering fast
recovery and RTO recovery in Reno.  This fixes the issue discussed
in netdev thread: "A buggy behavior for Linux TCP Reno and HTCP"
https://www.spinics.net/lists/netdev/msg444955.html

Suggested-by: Neal Cardwell <ncardwell@google.com>
Reported-by: Wei Sun <unlcsewsun@gmail.com>
Signed-off-by: Yuchung Cheng <ncardwell@google.com>
Signed-off-by: Neal Cardwell <ncardwell@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
include/linux/tcp.h
net/ipv4/tcp_cong.c
net/ipv4/tcp_htcp.c
net/ipv4/tcp_input.c

index d7389ea..267164a 100644 (file)
@@ -258,7 +258,7 @@ struct tcp_sock {
        u32     snd_cwnd_clamp; /* Do not allow snd_cwnd to grow above this */
        u32     snd_cwnd_used;
        u32     snd_cwnd_stamp;
-       u32     prior_cwnd;     /* Congestion window at start of Recovery. */
+       u32     prior_cwnd;     /* cwnd right before starting loss recovery */
        u32     prr_delivered;  /* Number of newly delivered packets to
                                 * receiver in Recovery. */
        u32     prr_out;        /* Total number of pkts sent during Recovery. */
index fde983f..c2b1744 100644 (file)
@@ -456,7 +456,7 @@ u32 tcp_reno_undo_cwnd(struct sock *sk)
 {
        const struct tcp_sock *tp = tcp_sk(sk);
 
-       return max(tp->snd_cwnd, tp->snd_ssthresh << 1);
+       return max(tp->snd_cwnd, tp->prior_cwnd);
 }
 EXPORT_SYMBOL_GPL(tcp_reno_undo_cwnd);
 
index 3eb78cd..082d479 100644 (file)
@@ -66,7 +66,6 @@ static inline void htcp_reset(struct htcp *ca)
 
 static u32 htcp_cwnd_undo(struct sock *sk)
 {
-       const struct tcp_sock *tp = tcp_sk(sk);
        struct htcp *ca = inet_csk_ca(sk);
 
        if (ca->undo_last_cong) {
@@ -76,7 +75,7 @@ static u32 htcp_cwnd_undo(struct sock *sk)
                ca->undo_last_cong = 0;
        }
 
-       return max(tp->snd_cwnd, (tp->snd_ssthresh << 7) / ca->beta);
+       return tcp_reno_undo_cwnd(sk);
 }
 
 static inline void measure_rtt(struct sock *sk, u32 srtt)
index 99cdf4c..842ed75 100644 (file)
@@ -1950,6 +1950,7 @@ void tcp_enter_loss(struct sock *sk)
            !after(tp->high_seq, tp->snd_una) ||
            (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) {
                tp->prior_ssthresh = tcp_current_ssthresh(sk);
+               tp->prior_cwnd = tp->snd_cwnd;
                tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
                tcp_ca_event(sk, CA_EVENT_LOSS);
                tcp_init_undo(tp);