OSDN Git Service

xsk: proper fill queue descriptor validation
authorBjörn Töpel <bjorn.topel@intel.com>
Mon, 4 Jun 2018 11:57:11 +0000 (13:57 +0200)
committerDaniel Borkmann <daniel@iogearbox.net>
Mon, 4 Jun 2018 15:21:02 +0000 (17:21 +0200)
Previously the fill queue descriptor was not copied to kernel space
prior validating it, making it possible for userland to change the
descriptor post-kernel-validation.

Signed-off-by: Björn Töpel <bjorn.topel@intel.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
net/xdp/xsk.c
net/xdp/xsk_queue.h

index cce0e4f..43554eb 100644 (file)
@@ -41,20 +41,19 @@ bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs)
 
 static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
 {
-       u32 *id, len = xdp->data_end - xdp->data;
+       u32 id, len = xdp->data_end - xdp->data;
        void *buffer;
-       int err = 0;
+       int err;
 
        if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
                return -EINVAL;
 
-       id = xskq_peek_id(xs->umem->fq);
-       if (!id)
+       if (!xskq_peek_id(xs->umem->fq, &id))
                return -ENOSPC;
 
-       buffer = xdp_umem_get_data_with_headroom(xs->umem, *id);
+       buffer = xdp_umem_get_data_with_headroom(xs->umem, id);
        memcpy(buffer, xdp->data, len);
-       err = xskq_produce_batch_desc(xs->rx, *id, len,
+       err = xskq_produce_batch_desc(xs->rx, id, len,
                                      xs->umem->frame_headroom);
        if (!err)
                xskq_discard_id(xs->umem->fq);
index cb8e5be..b5924e7 100644 (file)
@@ -85,14 +85,15 @@ static inline bool xskq_is_valid_id(struct xsk_queue *q, u32 idx)
        return true;
 }
 
-static inline u32 *xskq_validate_id(struct xsk_queue *q)
+static inline u32 *xskq_validate_id(struct xsk_queue *q, u32 *id)
 {
        while (q->cons_tail != q->cons_head) {
                struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
                unsigned int idx = q->cons_tail & q->ring_mask;
 
-               if (xskq_is_valid_id(q, ring->desc[idx]))
-                       return &ring->desc[idx];
+               *id = READ_ONCE(ring->desc[idx]);
+               if (xskq_is_valid_id(q, *id))
+                       return id;
 
                q->cons_tail++;
        }
@@ -100,28 +101,22 @@ static inline u32 *xskq_validate_id(struct xsk_queue *q)
        return NULL;
 }
 
-static inline u32 *xskq_peek_id(struct xsk_queue *q)
+static inline u32 *xskq_peek_id(struct xsk_queue *q, u32 *id)
 {
-       struct xdp_umem_ring *ring;
-
        if (q->cons_tail == q->cons_head) {
                WRITE_ONCE(q->ring->consumer, q->cons_tail);
                q->cons_head = q->cons_tail + xskq_nb_avail(q, RX_BATCH_SIZE);
 
                /* Order consumer and data */
                smp_rmb();
-
-               return xskq_validate_id(q);
        }
 
-       ring = (struct xdp_umem_ring *)q->ring;
-       return &ring->desc[q->cons_tail & q->ring_mask];
+       return xskq_validate_id(q, id);
 }
 
 static inline void xskq_discard_id(struct xsk_queue *q)
 {
        q->cons_tail++;
-       (void)xskq_validate_id(q);
 }
 
 static inline int xskq_produce_id(struct xsk_queue *q, u32 id)
@@ -174,11 +169,9 @@ static inline struct xdp_desc *xskq_validate_desc(struct xsk_queue *q,
                struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
                unsigned int idx = q->cons_tail & q->ring_mask;
 
-               if (xskq_is_valid_desc(q, &ring->desc[idx])) {
-                       if (desc)
-                               *desc = ring->desc[idx];
+               *desc = READ_ONCE(ring->desc[idx]);
+               if (xskq_is_valid_desc(q, desc))
                        return desc;
-               }
 
                q->cons_tail++;
        }
@@ -189,27 +182,20 @@ static inline struct xdp_desc *xskq_validate_desc(struct xsk_queue *q,
 static inline struct xdp_desc *xskq_peek_desc(struct xsk_queue *q,
                                              struct xdp_desc *desc)
 {
-       struct xdp_rxtx_ring *ring;
-
        if (q->cons_tail == q->cons_head) {
                WRITE_ONCE(q->ring->consumer, q->cons_tail);
                q->cons_head = q->cons_tail + xskq_nb_avail(q, RX_BATCH_SIZE);
 
                /* Order consumer and data */
                smp_rmb();
-
-               return xskq_validate_desc(q, desc);
        }
 
-       ring = (struct xdp_rxtx_ring *)q->ring;
-       *desc = ring->desc[q->cons_tail & q->ring_mask];
-       return desc;
+       return xskq_validate_desc(q, desc);
 }
 
 static inline void xskq_discard_desc(struct xsk_queue *q)
 {
        q->cons_tail++;
-       (void)xskq_validate_desc(q, NULL);
 }
 
 static inline int xskq_produce_batch_desc(struct xsk_queue *q,