OSDN Git Service

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
authorDavid S. Miller <davem@davemloft.net>
Wed, 6 Aug 2014 01:46:26 +0000 (18:46 -0700)
committerDavid S. Miller <davem@davemloft.net>
Wed, 6 Aug 2014 01:46:26 +0000 (18:46 -0700)
Conflicts:
drivers/net/Makefile
net/ipv6/sysctl_net_ipv6.c

Two ipv6_table_template[] additions overlap, so the index
of the ipv6_table[x] assignments needed to be adjusted.

In the drivers/net/Makefile case, we've gotten rid of the
garbage whereby we had to list every single USB networking
driver in the top-level Makefile, there is just one
"USB_NETWORKING" that guards everything.

Signed-off-by: David S. Miller <davem@davemloft.net>
27 files changed:
drivers/net/bonding/bond_sysfs_slave.c
drivers/net/ethernet/allwinner/sun4i-emac.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/brocade/bna/bnad.c
drivers/net/ethernet/brocade/bna/bnad_ethtool.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
drivers/net/macvlan.c
drivers/net/phy/mdio_bus.c
drivers/net/usb/cdc_subset.c
drivers/net/usb/usbnet.c
drivers/net/xen-netfront.c
include/linux/usb/usbnet.h
include/net/ip_tunnels.h
lib/iovec.c
net/batman-adv/fragmentation.c
net/bridge/br_fdb.c
net/core/skbuff.c
net/ipv4/ip_tunnel.c
net/ipv4/tcp_vegas.c
net/ipv4/tcp_veno.c
net/ipv6/sysctl_net_ipv6.c
net/netfilter/ipvs/ip_vs_xmit.c
net/netfilter/nf_tables_api.c
net/netfilter/xt_LED.c
net/sctp/output.c

index 198677f..5cd532c 100644 (file)
@@ -125,7 +125,7 @@ int bond_sysfs_slave_add(struct slave *slave)
        for (a = slave_attrs; *a; ++a) {
                err = sysfs_create_file(&slave->kobj, &((*a)->attr));
                if (err) {
-                       kobject_del(&slave->kobj);
+                       kobject_put(&slave->kobj);
                        return err;
                }
        }
@@ -140,5 +140,5 @@ void bond_sysfs_slave_del(struct slave *slave)
        for (a = slave_attrs; *a; ++a)
                sysfs_remove_file(&slave->kobj, &((*a)->attr));
 
-       kobject_del(&slave->kobj);
+       kobject_put(&slave->kobj);
 }
index d81e716..29b9f08 100644 (file)
@@ -633,8 +633,10 @@ static void emac_rx(struct net_device *dev)
                }
 
                /* Move data from EMAC */
-               skb = dev_alloc_skb(rxlen + 4);
-               if (good_packet && skb) {
+               if (good_packet) {
+                       skb = netdev_alloc_skb(dev, rxlen + 4);
+                       if (!skb)
+                               continue;
                        skb_reserve(skb, 2);
                        rdptr = (u8 *) skb_put(skb, rxlen - 4);
 
index 8afa579..a3dd5dc 100644 (file)
@@ -7830,17 +7830,18 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
 
 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
 
-/* Use GSO to workaround a rare TSO bug that may be triggered when the
- * TSO header is greater than 80 bytes.
+/* Use GSO to workaround all TSO packets that meet HW bug conditions
+ * indicated in tg3_tx_frag_set()
  */
-static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
+static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
+                      struct netdev_queue *txq, struct sk_buff *skb)
 {
        struct sk_buff *segs, *nskb;
        u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
 
        /* Estimate the number of fragments in the worst case */
-       if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
-               netif_stop_queue(tp->dev);
+       if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
+               netif_tx_stop_queue(txq);
 
                /* netif_tx_stop_queue() must be done before checking
                 * checking tx index in tg3_tx_avail() below, because in
@@ -7848,13 +7849,14 @@ static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
                 * netif_tx_queue_stopped().
                 */
                smp_mb();
-               if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
+               if (tg3_tx_avail(tnapi) <= frag_cnt_est)
                        return NETDEV_TX_BUSY;
 
-               netif_wake_queue(tp->dev);
+               netif_tx_wake_queue(txq);
        }
 
-       segs = skb_gso_segment(skb, tp->dev->features & ~(NETIF_F_TSO | NETIF_F_TSO6));
+       segs = skb_gso_segment(skb, tp->dev->features &
+                                   ~(NETIF_F_TSO | NETIF_F_TSO6));
        if (IS_ERR(segs) || !segs)
                goto tg3_tso_bug_end;
 
@@ -7930,7 +7932,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
                if (!skb_is_gso_v6(skb)) {
                        if (unlikely((ETH_HLEN + hdr_len) > 80) &&
                            tg3_flag(tp, TSO_BUG))
-                               return tg3_tso_bug(tp, skb);
+                               return tg3_tso_bug(tp, tnapi, txq, skb);
 
                        ip_csum = iph->check;
                        ip_tot_len = iph->tot_len;
@@ -8061,7 +8063,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
                                iph->tot_len = ip_tot_len;
                        }
                        tcph->check = tcp_csum;
-                       return tg3_tso_bug(tp, skb);
+                       return tg3_tso_bug(tp, tnapi, txq, skb);
                }
 
                /* If the workaround fails due to memory/mapping
index 3a77f9e..556aab7 100644 (file)
@@ -600,9 +600,9 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
        prefetch(bnad->netdev);
 
        cq = ccb->sw_q;
-       cmpl = &cq[ccb->producer_index];
 
        while (packets < budget) {
+               cmpl = &cq[ccb->producer_index];
                if (!cmpl->valid)
                        break;
                /* The 'valid' field is set by the adapter, only after writing
index 882cad7..d26adac 100644 (file)
@@ -997,10 +997,8 @@ bnad_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
        unsigned long flags = 0;
        int ret = 0;
 
-       /* Check if the flash read request is valid */
-       if (eeprom->magic != (bnad->pcidev->vendor |
-                            (bnad->pcidev->device << 16)))
-               return -EFAULT;
+       /* Fill the magic value */
+       eeprom->magic = bnad->pcidev->vendor | (bnad->pcidev->device << 16);
 
        /* Query the flash partition based on the offset */
        flash_part = bnad_get_flash_partition_by_offset(bnad,
index 304e247..ffbae29 100644 (file)
@@ -136,7 +136,7 @@ int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter,
        rsp = qlcnic_poll_rsp(adapter);
 
        if (rsp == QLCNIC_CDRP_RSP_TIMEOUT) {
-               dev_err(&pdev->dev, "card response timeout.\n");
+               dev_err(&pdev->dev, "command timeout, response = 0x%x\n", rsp);
                cmd->rsp.arg[0] = QLCNIC_RCODE_TIMEOUT;
        } else if (rsp == QLCNIC_CDRP_RSP_FAIL) {
                cmd->rsp.arg[0] = QLCRD32(adapter, QLCNIC_CDRP_ARG(1), &err);
index 1b7f3db..141f116 100644 (file)
@@ -1290,17 +1290,25 @@ static u64 *qlcnic_fill_stats(u64 *data, void *stats, int type)
 
 void qlcnic_update_stats(struct qlcnic_adapter *adapter)
 {
+       struct qlcnic_tx_queue_stats tx_stats;
        struct qlcnic_host_tx_ring *tx_ring;
        int ring;
 
+       memset(&tx_stats, 0, sizeof(tx_stats));
        for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
                tx_ring = &adapter->tx_ring[ring];
-               adapter->stats.xmit_on += tx_ring->tx_stats.xmit_on;
-               adapter->stats.xmit_off += tx_ring->tx_stats.xmit_off;
-               adapter->stats.xmitcalled += tx_ring->tx_stats.xmit_called;
-               adapter->stats.xmitfinished += tx_ring->tx_stats.xmit_finished;
-               adapter->stats.txbytes += tx_ring->tx_stats.tx_bytes;
+               tx_stats.xmit_on += tx_ring->tx_stats.xmit_on;
+               tx_stats.xmit_off += tx_ring->tx_stats.xmit_off;
+               tx_stats.xmit_called += tx_ring->tx_stats.xmit_called;
+               tx_stats.xmit_finished += tx_ring->tx_stats.xmit_finished;
+               tx_stats.tx_bytes += tx_ring->tx_stats.tx_bytes;
        }
+
+       adapter->stats.xmit_on = tx_stats.xmit_on;
+       adapter->stats.xmit_off = tx_stats.xmit_off;
+       adapter->stats.xmitcalled = tx_stats.xmit_called;
+       adapter->stats.xmitfinished = tx_stats.xmit_finished;
+       adapter->stats.txbytes = tx_stats.tx_bytes;
 }
 
 static u64 *qlcnic_fill_tx_queue_stats(u64 *data, void *stats)
index 0fdbcc8..59846da 100644 (file)
@@ -2324,14 +2324,14 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
        if (err)
                return err;
 
+       qlcnic_dcb_init_dcbnl_ops(adapter->dcb);
+
        err = register_netdev(netdev);
        if (err) {
                dev_err(&pdev->dev, "failed to register net device\n");
                return err;
        }
 
-       qlcnic_dcb_init_dcbnl_ops(adapter->dcb);
-
        return 0;
 }
 
@@ -2624,13 +2624,13 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (err)
                goto err_out_disable_mbx_intr;
 
+       if (adapter->portnum == 0)
+               qlcnic_set_drv_version(adapter);
+
        err = qlcnic_setup_netdev(adapter, netdev, pci_using_dac);
        if (err)
                goto err_out_disable_mbx_intr;
 
-       if (adapter->portnum == 0)
-               qlcnic_set_drv_version(adapter);
-
        pci_set_drvdata(pdev, adapter);
 
        if (qlcnic_82xx_check(adapter))
index 958df38..ef8a5c2 100644 (file)
@@ -646,6 +646,7 @@ static int macvlan_init(struct net_device *dev)
                                  (lowerdev->state & MACVLAN_STATE_MASK);
        dev->features           = lowerdev->features & MACVLAN_FEATURES;
        dev->features           |= ALWAYS_ON_FEATURES;
+       dev->vlan_features      = lowerdev->vlan_features & MACVLAN_FEATURES;
        dev->gso_max_size       = lowerdev->gso_max_size;
        dev->iflink             = lowerdev->ifindex;
        dev->hard_header_len    = lowerdev->hard_header_len;
index 203651e..4eaadcf 100644 (file)
@@ -255,7 +255,6 @@ int mdiobus_register(struct mii_bus *bus)
 
        bus->dev.parent = bus->parent;
        bus->dev.class = &mdio_bus_class;
-       bus->dev.driver = bus->parent->driver;
        bus->dev.groups = NULL;
        dev_set_name(&bus->dev, "%s", bus->id);
 
index 91f0919..6ea98cf 100644 (file)
@@ -85,14 +85,28 @@ static int always_connected (struct usbnet *dev)
  *
  *-------------------------------------------------------------------------*/
 
+static void m5632_recover(struct usbnet *dev)
+{
+       struct usb_device       *udev = dev->udev;
+       struct usb_interface    *intf = dev->intf;
+       int r;
+
+       r = usb_lock_device_for_reset(udev, intf);
+       if (r < 0)
+               return;
+
+       usb_reset_device(udev);
+       usb_unlock_device(udev);
+}
+
 static const struct driver_info        ali_m5632_info = {
        .description =  "ALi M5632",
        .flags       = FLAG_POINTTOPOINT,
+       .recover     = m5632_recover,
 };
 
 #endif
 
-\f
 #ifdef CONFIG_USB_AN2720
 #define        HAVE_HARDWARE
 
@@ -326,12 +340,23 @@ static const struct usb_device_id products [] = {
 MODULE_DEVICE_TABLE(usb, products);
 
 /*-------------------------------------------------------------------------*/
+static int dummy_prereset(struct usb_interface *intf)
+{
+        return 0;
+}
+
+static int dummy_postreset(struct usb_interface *intf)
+{
+        return 0;
+}
 
 static struct usb_driver cdc_subset_driver = {
        .name =         "cdc_subset",
        .probe =        usbnet_probe,
        .suspend =      usbnet_suspend,
        .resume =       usbnet_resume,
+       .pre_reset =    dummy_prereset,
+       .post_reset =   dummy_postreset,
        .disconnect =   usbnet_disconnect,
        .id_table =     products,
        .disable_hub_initiated_lpm = 1,
index f9e96c4..5173821 100644 (file)
@@ -1218,8 +1218,12 @@ void usbnet_tx_timeout (struct net_device *net)
 
        unlink_urbs (dev, &dev->txq);
        tasklet_schedule (&dev->bh);
-
-       // FIXME: device recovery -- reset?
+       /* this needs to be handled individually because the generic layer
+        * doesn't know what is sufficient and could not restore private
+        * information if a remedy of an unconditional reset were used.
+        */
+       if (dev->driver_info->recover)
+               (dev->driver_info->recover)(dev);
 }
 EXPORT_SYMBOL_GPL(usbnet_tx_timeout);
 
index 055222b..28204bc 100644 (file)
@@ -1196,22 +1196,6 @@ static void xennet_release_rx_bufs(struct netfront_queue *queue)
        spin_unlock_bh(&queue->rx_lock);
 }
 
-static void xennet_uninit(struct net_device *dev)
-{
-       struct netfront_info *np = netdev_priv(dev);
-       unsigned int num_queues = dev->real_num_tx_queues;
-       struct netfront_queue *queue;
-       unsigned int i;
-
-       for (i = 0; i < num_queues; ++i) {
-               queue = &np->queues[i];
-               xennet_release_tx_bufs(queue);
-               xennet_release_rx_bufs(queue);
-               gnttab_free_grant_references(queue->gref_tx_head);
-               gnttab_free_grant_references(queue->gref_rx_head);
-       }
-}
-
 static netdev_features_t xennet_fix_features(struct net_device *dev,
        netdev_features_t features)
 {
@@ -1313,7 +1297,6 @@ static void xennet_poll_controller(struct net_device *dev)
 
 static const struct net_device_ops xennet_netdev_ops = {
        .ndo_open            = xennet_open,
-       .ndo_uninit          = xennet_uninit,
        .ndo_stop            = xennet_close,
        .ndo_start_xmit      = xennet_start_xmit,
        .ndo_change_mtu      = xennet_change_mtu,
@@ -1455,6 +1438,11 @@ static void xennet_disconnect_backend(struct netfront_info *info)
 
                napi_synchronize(&queue->napi);
 
+               xennet_release_tx_bufs(queue);
+               xennet_release_rx_bufs(queue);
+               gnttab_free_grant_references(queue->gref_tx_head);
+               gnttab_free_grant_references(queue->gref_rx_head);
+
                /* End access and free the pages */
                xennet_end_access(queue->tx_ring_ref, queue->tx.sring);
                xennet_end_access(queue->rx_ring_ref, queue->rx.sring);
@@ -1827,8 +1815,8 @@ static int xennet_create_queues(struct netfront_info *info,
 
                ret = xennet_init_queue(queue);
                if (ret < 0) {
-                       dev_warn(&info->netdev->dev, "only created %d queues\n",
-                                num_queues);
+                       dev_warn(&info->netdev->dev,
+                                "only created %d queues\n", i);
                        num_queues = i;
                        break;
                }
@@ -2001,7 +1989,7 @@ abort_transaction_no_dev_fatal:
        info->queues = NULL;
        rtnl_lock();
        netif_set_real_num_tx_queues(info->netdev, 0);
-       rtnl_lock();
+       rtnl_unlock();
  out:
        return err;
 }
@@ -2010,10 +1998,7 @@ static int xennet_connect(struct net_device *dev)
 {
        struct netfront_info *np = netdev_priv(dev);
        unsigned int num_queues = 0;
-       int i, requeue_idx, err;
-       struct sk_buff *skb;
-       grant_ref_t ref;
-       struct xen_netif_rx_request *req;
+       int err;
        unsigned int feature_rx_copy;
        unsigned int j = 0;
        struct netfront_queue *queue = NULL;
@@ -2040,47 +2025,8 @@ static int xennet_connect(struct net_device *dev)
        netdev_update_features(dev);
        rtnl_unlock();
 
-       /* By now, the queue structures have been set up */
-       for (j = 0; j < num_queues; ++j) {
-               queue = &np->queues[j];
-
-               /* Step 1: Discard all pending TX packet fragments. */
-               spin_lock_irq(&queue->tx_lock);
-               xennet_release_tx_bufs(queue);
-               spin_unlock_irq(&queue->tx_lock);
-
-               /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */
-               spin_lock_bh(&queue->rx_lock);
-
-               for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) {
-                       skb_frag_t *frag;
-                       const struct page *page;
-                       if (!queue->rx_skbs[i])
-                               continue;
-
-                       skb = queue->rx_skbs[requeue_idx] = xennet_get_rx_skb(queue, i);
-                       ref = queue->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(queue, i);
-                       req = RING_GET_REQUEST(&queue->rx, requeue_idx);
-
-                       frag = &skb_shinfo(skb)->frags[0];
-                       page = skb_frag_page(frag);
-                       gnttab_grant_foreign_access_ref(
-                               ref, queue->info->xbdev->otherend_id,
-                               pfn_to_mfn(page_to_pfn(page)),
-                               0);
-                       req->gref = ref;
-                       req->id   = requeue_idx;
-
-                       requeue_idx++;
-               }
-
-               queue->rx.req_prod_pvt = requeue_idx;
-
-               spin_unlock_bh(&queue->rx_lock);
-       }
-
        /*
-        * Step 3: All public and private state should now be sane.  Get
+        * All public and private state should now be sane.  Get
         * ready to start sending and receiving packets and give the driver
         * domain a kick because we've probably just requeued some
         * packets.
index 0662e98..26088fe 100644 (file)
@@ -148,6 +148,9 @@ struct driver_info {
        struct sk_buff  *(*tx_fixup)(struct usbnet *dev,
                                struct sk_buff *skb, gfp_t flags);
 
+       /* recover from timeout */
+       void    (*recover)(struct usbnet *dev);
+
        /* early initialization code, can sleep. This is for minidrivers
         * having 'subminidrivers' that need to do extra initialization
         * right after minidriver have initialized hardware. */
index a4daf9e..8dd8cab 100644 (file)
@@ -40,6 +40,7 @@ struct ip_tunnel_prl_entry {
 
 struct ip_tunnel_dst {
        struct dst_entry __rcu          *dst;
+       __be32                           saddr;
 };
 
 struct ip_tunnel {
index 7a7c2da..df3abd1 100644 (file)
@@ -85,6 +85,10 @@ EXPORT_SYMBOL(memcpy_toiovecend);
 int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
                        int offset, int len)
 {
+       /* No data? Done! */
+       if (len == 0)
+               return 0;
+
        /* Skip over the finished iovecs */
        while (offset >= iov->iov_len) {
                offset -= iov->iov_len;
index f14e54a..022d18a 100644 (file)
@@ -128,6 +128,7 @@ static bool batadv_frag_insert_packet(struct batadv_orig_node *orig_node,
 {
        struct batadv_frag_table_entry *chain;
        struct batadv_frag_list_entry *frag_entry_new = NULL, *frag_entry_curr;
+       struct batadv_frag_list_entry *frag_entry_last = NULL;
        struct batadv_frag_packet *frag_packet;
        uint8_t bucket;
        uint16_t seqno, hdr_size = sizeof(struct batadv_frag_packet);
@@ -180,11 +181,14 @@ static bool batadv_frag_insert_packet(struct batadv_orig_node *orig_node,
                        ret = true;
                        goto out;
                }
+
+               /* store current entry because it could be the last in list */
+               frag_entry_last = frag_entry_curr;
        }
 
-       /* Reached the end of the list, so insert after 'frag_entry_curr'. */
-       if (likely(frag_entry_curr)) {
-               hlist_add_after(&frag_entry_curr->list, &frag_entry_new->list);
+       /* Reached the end of the list, so insert after 'frag_entry_last'. */
+       if (likely(frag_entry_last)) {
+               hlist_add_after(&frag_entry_last->list, &frag_entry_new->list);
                chain->size += skb->len - hdr_size;
                chain->timestamp = jiffies;
                ret = true;
index 0bb9d8b..6f6c95c 100644 (file)
@@ -629,7 +629,7 @@ static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br,
        if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
                goto nla_put_failure;
 
-       if (nla_put(skb, NDA_VLAN, sizeof(u16), &fdb->vlan_id))
+       if (fdb->vlan_id && nla_put(skb, NDA_VLAN, sizeof(u16), &fdb->vlan_id))
                goto nla_put_failure;
 
        return nlmsg_end(skb, nlh);
index 3dec029..224506a 100644 (file)
@@ -2976,9 +2976,9 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
                tail = nskb;
 
                __copy_skb_header(nskb, head_skb);
-               nskb->mac_len = head_skb->mac_len;
 
                skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom);
+               skb_reset_mac_len(nskb);
 
                skb_copy_from_linear_data_offset(head_skb, -tnl_hlen,
                                                 nskb->data - tnl_hlen,
index dd8c8c7..afed1aa 100644 (file)
@@ -69,23 +69,25 @@ static unsigned int ip_tunnel_hash(__be32 key, __be32 remote)
 }
 
 static void __tunnel_dst_set(struct ip_tunnel_dst *idst,
-                            struct dst_entry *dst)
+                            struct dst_entry *dst, __be32 saddr)
 {
        struct dst_entry *old_dst;
 
        dst_clone(dst);
        old_dst = xchg((__force struct dst_entry **)&idst->dst, dst);
        dst_release(old_dst);
+       idst->saddr = saddr;
 }
 
-static void tunnel_dst_set(struct ip_tunnel *t, struct dst_entry *dst)
+static void tunnel_dst_set(struct ip_tunnel *t,
+                          struct dst_entry *dst, __be32 saddr)
 {
-       __tunnel_dst_set(this_cpu_ptr(t->dst_cache), dst);
+       __tunnel_dst_set(this_cpu_ptr(t->dst_cache), dst, saddr);
 }
 
 static void tunnel_dst_reset(struct ip_tunnel *t)
 {
-       tunnel_dst_set(t, NULL);
+       tunnel_dst_set(t, NULL, 0);
 }
 
 void ip_tunnel_dst_reset_all(struct ip_tunnel *t)
@@ -93,20 +95,25 @@ void ip_tunnel_dst_reset_all(struct ip_tunnel *t)
        int i;
 
        for_each_possible_cpu(i)
-               __tunnel_dst_set(per_cpu_ptr(t->dst_cache, i), NULL);
+               __tunnel_dst_set(per_cpu_ptr(t->dst_cache, i), NULL, 0);
 }
 EXPORT_SYMBOL(ip_tunnel_dst_reset_all);
 
-static struct rtable *tunnel_rtable_get(struct ip_tunnel *t, u32 cookie)
+static struct rtable *tunnel_rtable_get(struct ip_tunnel *t,
+                                       u32 cookie, __be32 *saddr)
 {
+       struct ip_tunnel_dst *idst;
        struct dst_entry *dst;
 
        rcu_read_lock();
-       dst = rcu_dereference(this_cpu_ptr(t->dst_cache)->dst);
+       idst = this_cpu_ptr(t->dst_cache);
+       dst = rcu_dereference(idst->dst);
        if (dst && !atomic_inc_not_zero(&dst->__refcnt))
                dst = NULL;
        if (dst) {
-               if (dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
+               if (!dst->obsolete || dst->ops->check(dst, cookie)) {
+                       *saddr = idst->saddr;
+               } else {
                        tunnel_dst_reset(t);
                        dst_release(dst);
                        dst = NULL;
@@ -367,7 +374,7 @@ static int ip_tunnel_bind_dev(struct net_device *dev)
 
                if (!IS_ERR(rt)) {
                        tdev = rt->dst.dev;
-                       tunnel_dst_set(tunnel, &rt->dst);
+                       tunnel_dst_set(tunnel, &rt->dst, fl4.saddr);
                        ip_rt_put(rt);
                }
                if (dev->type != ARPHRD_ETHER)
@@ -610,7 +617,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
        init_tunnel_flow(&fl4, protocol, dst, tnl_params->saddr,
                         tunnel->parms.o_key, RT_TOS(tos), tunnel->parms.link);
 
-       rt = connected ? tunnel_rtable_get(tunnel, 0) : NULL;
+       rt = connected ? tunnel_rtable_get(tunnel, 0, &fl4.saddr) : NULL;
 
        if (!rt) {
                rt = ip_route_output_key(tunnel->net, &fl4);
@@ -620,7 +627,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
                        goto tx_error;
                }
                if (connected)
-                       tunnel_dst_set(tunnel, &rt->dst);
+                       tunnel_dst_set(tunnel, &rt->dst, fl4.saddr);
        }
 
        if (rt->dst.dev == dev) {
index 9a5e05f..b40ad89 100644 (file)
@@ -218,7 +218,8 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked)
                         * This is:
                         *     (actual rate in segments) * baseRTT
                         */
-                       target_cwnd = tp->snd_cwnd * vegas->baseRTT / rtt;
+                       target_cwnd = (u64)tp->snd_cwnd * vegas->baseRTT;
+                       do_div(target_cwnd, rtt);
 
                        /* Calculate the difference between the window we had,
                         * and the window we would like to have. This quantity
index 27b9825..8276977 100644 (file)
@@ -144,7 +144,7 @@ static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
 
                rtt = veno->minrtt;
 
-               target_cwnd = (tp->snd_cwnd * veno->basertt);
+               target_cwnd = (u64)tp->snd_cwnd * veno->basertt;
                target_cwnd <<= V_PARAM_SHIFT;
                do_div(target_cwnd, rtt);
 
index 5bf7b61..0c56c93 100644 (file)
@@ -82,6 +82,7 @@ static int __net_init ipv6_sysctl_net_init(struct net *net)
        ipv6_table[1].data = &net->ipv6.sysctl.anycast_src_echo_reply;
        ipv6_table[2].data = &net->ipv6.sysctl.flowlabel_consistency;
        ipv6_table[3].data = &net->ipv6.sysctl.auto_flowlabels;
+       ipv6_table[4].data = &net->ipv6.sysctl.fwmark_reflect;
 
        ipv6_route_table = ipv6_route_sysctl_init(net);
        if (!ipv6_route_table)
index 73ba1cc..6f70bdd 100644 (file)
@@ -967,8 +967,8 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
        iph->nexthdr            =       IPPROTO_IPV6;
        iph->payload_len        =       old_iph->payload_len;
        be16_add_cpu(&iph->payload_len, sizeof(*old_iph));
-       iph->priority           =       old_iph->priority;
        memset(&iph->flow_lbl, 0, sizeof(iph->flow_lbl));
+       ipv6_change_dsfield(iph, 0, ipv6_get_dsfield(old_iph));
        iph->daddr = cp->daddr.in6;
        iph->saddr = saddr;
        iph->hop_limit          =       old_iph->hop_limit;
index 93692d6..b8035c2 100644 (file)
@@ -3144,6 +3144,7 @@ static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set,
        if (set->flags & NFT_SET_MAP)
                nft_data_uninit(&elem.data, set->dtype);
 
+       return 0;
 err2:
        nft_data_uninit(&elem.key, desc.type);
 err1:
index f14bcf2..3ba31c1 100644 (file)
@@ -50,11 +50,14 @@ struct xt_led_info_internal {
        struct timer_list timer;
 };
 
+#define XT_LED_BLINK_DELAY 50 /* ms */
+
 static unsigned int
 led_tg(struct sk_buff *skb, const struct xt_action_param *par)
 {
        const struct xt_led_info *ledinfo = par->targinfo;
        struct xt_led_info_internal *ledinternal = ledinfo->internal_data;
+       unsigned long led_delay = XT_LED_BLINK_DELAY;
 
        /*
         * If "always blink" is enabled, and there's still some time until the
@@ -62,9 +65,10 @@ led_tg(struct sk_buff *skb, const struct xt_action_param *par)
         */
        if ((ledinfo->delay > 0) && ledinfo->always_blink &&
            timer_pending(&ledinternal->timer))
-               led_trigger_event(&ledinternal->netfilter_led_trigger, LED_OFF);
-
-       led_trigger_event(&ledinternal->netfilter_led_trigger, LED_FULL);
+               led_trigger_blink_oneshot(&ledinternal->netfilter_led_trigger,
+                                         &led_delay, &led_delay, 1);
+       else
+               led_trigger_event(&ledinternal->netfilter_led_trigger, LED_FULL);
 
        /* If there's a positive delay, start/update the timer */
        if (ledinfo->delay > 0) {
index 1eedba5..42dffd4 100644 (file)
@@ -599,7 +599,7 @@ out:
        return err;
 no_route:
        kfree_skb(nskb);
-       IP_INC_STATS_BH(sock_net(asoc->base.sk), IPSTATS_MIB_OUTNOROUTES);
+       IP_INC_STATS(sock_net(asoc->base.sk), IPSTATS_MIB_OUTNOROUTES);
 
        /* FIXME: Returning the 'err' will effect all the associations
         * associated with a socket, although only one of the paths of the