struct sk_buff *skb2, *nskb, *pskb = NULL;
netdev_features_t esp_features = features;
struct xfrm_offload *xo = xfrm_offload(skb);
+ struct net_device *dev = skb->dev;
struct sec_path *sp;
if (!xo)
if (xo->flags & XFRM_GRO || x->xso.flags & XFRM_OFFLOAD_INBOUND)
return skb;
+ /* This skb was already validated on the master dev */
+ if ((x->xso.dev != dev) && (x->xso.slave_dev == dev))
+ return skb;
+
local_irq_save(flags);
sd = this_cpu_ptr(&softnet_data);
err = !skb_queue_empty(&sd->xfrm_backlog);
return skb;
}
- if (skb_is_gso(skb)) {
- struct net_device *dev = skb->dev;
-
- if (unlikely(x->xso.dev != dev)) {
- struct sk_buff *segs;
+ if (skb_is_gso(skb) && unlikely(x->xso.dev != dev)) {
+ struct sk_buff *segs;
- /* Packet got rerouted, fixup features and segment it. */
- esp_features = esp_features & ~(NETIF_F_HW_ESP
- | NETIF_F_GSO_ESP);
+ /* Packet got rerouted, fixup features and segment it. */
+ esp_features = esp_features & ~(NETIF_F_HW_ESP | NETIF_F_GSO_ESP);
- segs = skb_gso_segment(skb, esp_features);
- if (IS_ERR(segs)) {
- kfree_skb(skb);
- atomic_long_inc(&dev->tx_dropped);
- return NULL;
- } else {
- consume_skb(skb);
- skb = segs;
- }
+ segs = skb_gso_segment(skb, esp_features);
+ if (IS_ERR(segs)) {
+ kfree_skb(skb);
+ atomic_long_inc(&dev->tx_dropped);
+ return NULL;
+ } else {
+ consume_skb(skb);
+ skb = segs;
}
}