OSDN Git Service

net: remove unsafe skb_insert()
authorEric Dumazet <edumazet@google.com>
Sun, 25 Nov 2018 16:26:23 +0000 (08:26 -0800)
committerDavid S. Miller <davem@davemloft.net>
Sun, 25 Nov 2018 18:36:19 +0000 (10:36 -0800)
I do not see how one can effectively use skb_insert() without holding
some kind of lock. Otherwise other cpus could have changed the list
right before we have a chance of acquiring list->lock.

Only existing user is in drivers/infiniband/hw/nes/nes_mgt.c and this
one probably meant to use __skb_insert() since it appears nesqp->pau_list
is protected by nesqp->pau_lock. This looks like nesqp->pau_lock
could be removed, since nesqp->pau_list.lock could be used instead.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Faisal Latif <faisal.latif@intel.com>
Cc: Doug Ledford <dledford@redhat.com>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: linux-rdma <linux-rdma@vger.kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/infiniband/hw/nes/nes_mgt.c
include/linux/skbuff.h
net/core/skbuff.c

index fc0c191..cc4dce5 100644 (file)
@@ -551,14 +551,14 @@ static void queue_fpdus(struct sk_buff *skb, struct nes_vnic *nesvnic, struct ne
 
        /* Queue skb by sequence number */
        if (skb_queue_len(&nesqp->pau_list) == 0) {
-               skb_queue_head(&nesqp->pau_list, skb);
+               __skb_queue_head(&nesqp->pau_list, skb);
        } else {
                skb_queue_walk(&nesqp->pau_list, tmpskb) {
                        cb = (struct nes_rskb_cb *)&tmpskb->cb[0];
                        if (before(seqnum, cb->seqnum))
                                break;
                }
-               skb_insert(tmpskb, skb, &nesqp->pau_list);
+               __skb_insert(skb, tmpskb->prev, tmpskb, &nesqp->pau_list);
        }
        if (nesqp->pau_state == PAU_READY)
                process_it = true;
index f17a745..73902ac 100644 (file)
@@ -1749,8 +1749,6 @@ static inline void skb_queue_head_init_class(struct sk_buff_head *list,
  *     The "__skb_xxxx()" functions are the non-atomic ones that
  *     can only be called with interrupts disabled.
  */
-void skb_insert(struct sk_buff *old, struct sk_buff *newsk,
-               struct sk_buff_head *list);
 static inline void __skb_insert(struct sk_buff *newsk,
                                struct sk_buff *prev, struct sk_buff *next,
                                struct sk_buff_head *list)
index 9a8a72c..02cd7ae 100644 (file)
@@ -2990,28 +2990,6 @@ void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head
 }
 EXPORT_SYMBOL(skb_append);
 
-/**
- *     skb_insert      -       insert a buffer
- *     @old: buffer to insert before
- *     @newsk: buffer to insert
- *     @list: list to use
- *
- *     Place a packet before a given packet in a list. The list locks are
- *     taken and this function is atomic with respect to other list locked
- *     calls.
- *
- *     A buffer cannot be placed on two lists at the same time.
- */
-void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&list->lock, flags);
-       __skb_insert(newsk, old->prev, old, list);
-       spin_unlock_irqrestore(&list->lock, flags);
-}
-EXPORT_SYMBOL(skb_insert);
-
 static inline void skb_split_inside_header(struct sk_buff *skb,
                                           struct sk_buff* skb1,
                                           const u32 len, const int pos)