OSDN Git Service

udp: introduce __sk_mem_schedule() usage
authorJason Xing <kernelxing@tencent.com>
Wed, 8 Mar 2023 02:11:53 +0000 (10:11 +0800)
committerJakub Kicinski <kuba@kernel.org>
Fri, 10 Mar 2023 07:32:12 +0000 (23:32 -0800)
Keep the accounting schema consistent across different protocols
with __sk_mem_schedule(). Besides, it adjusts a little bit on how
to calculate forward allocated memory compared to before. After
applied this patch, we could avoid receive path scheduling extra
amount of memory.

Link: https://lore.kernel.org/lkml/20230221110344.82818-1-kerneljasonxing@gmail.com/
Signed-off-by: Jason Xing <kernelxing@tencent.com>
Reviewed-by: Eric Dumazet <edumazet@google.com>
Link: https://lore.kernel.org/r/20230308021153.99777-1-kerneljasonxing@gmail.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
net/ipv4/udp.c

index c605d17..dc8feb5 100644 (file)
@@ -1531,10 +1531,21 @@ static void busylock_release(spinlock_t *busy)
                spin_unlock(busy);
 }
 
+static int udp_rmem_schedule(struct sock *sk, int size)
+{
+       int delta;
+
+       delta = size - sk->sk_forward_alloc;
+       if (delta > 0 && !__sk_mem_schedule(sk, delta, SK_MEM_RECV))
+               return -ENOBUFS;
+
+       return 0;
+}
+
 int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb)
 {
        struct sk_buff_head *list = &sk->sk_receive_queue;
-       int rmem, delta, amt, err = -ENOMEM;
+       int rmem, err = -ENOMEM;
        spinlock_t *busy = NULL;
        int size;
 
@@ -1567,16 +1578,10 @@ int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb)
                goto uncharge_drop;
 
        spin_lock(&list->lock);
-       if (size >= sk->sk_forward_alloc) {
-               amt = sk_mem_pages(size);
-               delta = amt << PAGE_SHIFT;
-               if (!__sk_mem_raise_allocated(sk, delta, amt, SK_MEM_RECV)) {
-                       err = -ENOBUFS;
-                       spin_unlock(&list->lock);
-                       goto uncharge_drop;
-               }
-
-               sk->sk_forward_alloc += delta;
+       err = udp_rmem_schedule(sk, size);
+       if (err) {
+               spin_unlock(&list->lock);
+               goto uncharge_drop;
        }
 
        sk->sk_forward_alloc -= size;