OSDN Git Service

bpf: introduce frags support to bpf_prog_test_run_xdp()
authorLorenzo Bianconi <lorenzo@kernel.org>
Fri, 21 Jan 2022 10:09:58 +0000 (11:09 +0100)
committerAlexei Starovoitov <ast@kernel.org>
Fri, 21 Jan 2022 22:14:02 +0000 (14:14 -0800)
Introduce the capability to allocate a xdp frags in
bpf_prog_test_run_xdp routine. This is a preliminary patch to
introduce the selftests for new xdp frags ebpf helpers

Acked-by: Toke Hoiland-Jorgensen <toke@redhat.com>
Acked-by: John Fastabend <john.fastabend@gmail.com>
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
Link: https://lore.kernel.org/r/b7c0e425a9287f00f601c4fc0de54738ec6ceeea.1642758637.git.lorenzo@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
net/bpf/test_run.c

index 67f7c7d..394dd48 100644 (file)
@@ -876,16 +876,16 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
                          union bpf_attr __user *uattr)
 {
        u32 tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
-       u32 headroom = XDP_PACKET_HEADROOM;
        u32 size = kattr->test.data_size_in;
+       u32 headroom = XDP_PACKET_HEADROOM;
+       u32 retval, duration, max_data_sz;
        u32 repeat = kattr->test.repeat;
        struct netdev_rx_queue *rxqueue;
+       struct skb_shared_info *sinfo;
        struct xdp_buff xdp = {};
-       u32 retval, duration;
+       int i, ret = -EINVAL;
        struct xdp_md *ctx;
-       u32 max_data_sz;
        void *data;
-       int ret = -EINVAL;
 
        if (prog->expected_attach_type == BPF_XDP_DEVMAP ||
            prog->expected_attach_type == BPF_XDP_CPUMAP)
@@ -905,27 +905,60 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
                headroom -= ctx->data;
        }
 
-       /* XDP have extra tailroom as (most) drivers use full page */
        max_data_sz = 4096 - headroom - tailroom;
+       size = min_t(u32, size, max_data_sz);
 
-       data = bpf_test_init(kattr, kattr->test.data_size_in,
-                            max_data_sz, headroom, tailroom);
+       data = bpf_test_init(kattr, size, max_data_sz, headroom, tailroom);
        if (IS_ERR(data)) {
                ret = PTR_ERR(data);
                goto free_ctx;
        }
 
        rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
-       xdp_init_buff(&xdp, headroom + max_data_sz + tailroom,
-                     &rxqueue->xdp_rxq);
+       rxqueue->xdp_rxq.frag_size = headroom + max_data_sz + tailroom;
+       xdp_init_buff(&xdp, rxqueue->xdp_rxq.frag_size, &rxqueue->xdp_rxq);
        xdp_prepare_buff(&xdp, data, headroom, size, true);
+       sinfo = xdp_get_shared_info_from_buff(&xdp);
 
        ret = xdp_convert_md_to_buff(ctx, &xdp);
        if (ret)
                goto free_data;
 
+       if (unlikely(kattr->test.data_size_in > size)) {
+               void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
+
+               while (size < kattr->test.data_size_in) {
+                       struct page *page;
+                       skb_frag_t *frag;
+                       int data_len;
+
+                       page = alloc_page(GFP_KERNEL);
+                       if (!page) {
+                               ret = -ENOMEM;
+                               goto out;
+                       }
+
+                       frag = &sinfo->frags[sinfo->nr_frags++];
+                       __skb_frag_set_page(frag, page);
+
+                       data_len = min_t(int, kattr->test.data_size_in - size,
+                                        PAGE_SIZE);
+                       skb_frag_size_set(frag, data_len);
+
+                       if (copy_from_user(page_address(page), data_in + size,
+                                          data_len)) {
+                               ret = -EFAULT;
+                               goto out;
+                       }
+                       sinfo->xdp_frags_size += data_len;
+                       size += data_len;
+               }
+               xdp_buff_set_frags_flag(&xdp);
+       }
+
        if (repeat > 1)
                bpf_prog_change_xdp(NULL, prog);
+
        ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration, true);
        /* We convert the xdp_buff back to an xdp_md before checking the return
         * code so the reference count of any held netdevice will be decremented
@@ -935,10 +968,7 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
        if (ret)
                goto out;
 
-       if (xdp.data_meta != data + headroom ||
-           xdp.data_end != xdp.data_meta + size)
-               size = xdp.data_end - xdp.data_meta;
-
+       size = xdp.data_end - xdp.data_meta + sinfo->xdp_frags_size;
        ret = bpf_test_finish(kattr, uattr, xdp.data_meta, size, retval,
                              duration);
        if (!ret)
@@ -949,6 +979,8 @@ out:
        if (repeat > 1)
                bpf_prog_change_xdp(prog, NULL);
 free_data:
+       for (i = 0; i < sinfo->nr_frags; i++)
+               __free_page(skb_frag_page(&sinfo->frags[i]));
        kfree(data);
 free_ctx:
        kfree(ctx);