OSDN Git Service

mptcp: use fast lock for subflows when possible
authorPaolo Abeni <pabeni@redhat.com>
Mon, 21 Jun 2021 22:54:34 +0000 (15:54 -0700)
committerDavid S. Miller <davem@davemloft.net>
Tue, 22 Jun 2021 16:57:45 +0000 (09:57 -0700)
There are a bunch of callsite where the ssk socket
lock is acquired using the full-blown version eligible for
the fast variant. Let's move to the latter.

Signed-off-by: Paolo Abeni <pabeni@redhat.com>
Signed-off-by: Mat Martineau <mathew.j.martineau@linux.intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
net/mptcp/pm_netlink.c
net/mptcp/protocol.c

index 0972259..d4732a4 100644 (file)
@@ -540,6 +540,7 @@ void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk)
        subflow = list_first_entry_or_null(&msk->conn_list, typeof(*subflow), node);
        if (subflow) {
                struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+               bool slow;
 
                spin_unlock_bh(&msk->pm.lock);
                pr_debug("send ack for %s%s%s",
@@ -547,9 +548,9 @@ void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk)
                         mptcp_pm_should_add_signal_ipv6(msk) ? " [ipv6]" : "",
                         mptcp_pm_should_add_signal_port(msk) ? " [port]" : "");
 
-               lock_sock(ssk);
+               slow = lock_sock_fast(ssk);
                tcp_send_ack(ssk);
-               release_sock(ssk);
+               unlock_sock_fast(ssk, slow);
                spin_lock_bh(&msk->pm.lock);
        }
 }
@@ -566,6 +567,7 @@ int mptcp_pm_nl_mp_prio_send_ack(struct mptcp_sock *msk,
                struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
                struct sock *sk = (struct sock *)msk;
                struct mptcp_addr_info local;
+               bool slow;
 
                local_address((struct sock_common *)ssk, &local);
                if (!addresses_equal(&local, addr, addr->port))
@@ -578,9 +580,9 @@ int mptcp_pm_nl_mp_prio_send_ack(struct mptcp_sock *msk,
 
                spin_unlock_bh(&msk->pm.lock);
                pr_debug("send ack for mp_prio");
-               lock_sock(ssk);
+               slow = lock_sock_fast(ssk);
                tcp_send_ack(ssk);
-               release_sock(ssk);
+               unlock_sock_fast(ssk, slow);
                spin_lock_bh(&msk->pm.lock);
 
                return 0;
index 77c90d6..c47ce07 100644 (file)
@@ -433,23 +433,25 @@ static void mptcp_send_ack(struct mptcp_sock *msk)
 
        mptcp_for_each_subflow(msk, subflow) {
                struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+               bool slow;
 
-               lock_sock(ssk);
+               slow = lock_sock_fast(ssk);
                if (tcp_can_send_ack(ssk))
                        tcp_send_ack(ssk);
-               release_sock(ssk);
+               unlock_sock_fast(ssk, slow);
        }
 }
 
 static bool mptcp_subflow_cleanup_rbuf(struct sock *ssk)
 {
+       bool slow;
        int ret;
 
-       lock_sock(ssk);
+       slow = lock_sock_fast(ssk);
        ret = tcp_can_send_ack(ssk);
        if (ret)
                tcp_cleanup_rbuf(ssk, 1);
-       release_sock(ssk);
+       unlock_sock_fast(ssk, slow);
        return ret;
 }
 
@@ -2252,13 +2254,14 @@ static void mptcp_check_fastclose(struct mptcp_sock *msk)
 
        list_for_each_entry_safe(subflow, tmp, &msk->conn_list, node) {
                struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow);
+               bool slow;
 
-               lock_sock(tcp_sk);
+               slow = lock_sock_fast(tcp_sk);
                if (tcp_sk->sk_state != TCP_CLOSE) {
                        tcp_send_active_reset(tcp_sk, GFP_ATOMIC);
                        tcp_set_state(tcp_sk, TCP_CLOSE);
                }
-               release_sock(tcp_sk);
+               unlock_sock_fast(tcp_sk, slow);
        }
 
        inet_sk_state_store(sk, TCP_CLOSE);