OSDN Git Service

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 24 Jun 2015 23:49:49 +0000 (16:49 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 24 Jun 2015 23:49:49 +0000 (16:49 -0700)
Pull networking updates from David Miller:

 1) Add TX fast path in mac80211, from Johannes Berg.

 2) Add TSO/GRO support to ibmveth, from Thomas Falcon

 3) Move away from cached routes in ipv6, just like ipv4, from Martin
    KaFai Lau.

 4) Lots of new rhashtable tests, from Thomas Graf.

 5) Run ingress qdisc lockless, from Alexei Starovoitov.

 6) Allow servers to fetch TCP packet headers for SYN packets of new
    connections, for fingerprinting.  From Eric Dumazet.

 7) Add mode parameter to pktgen, for testing receive.  From Alexei
    Starovoitov.

 8) Cache access optimizations via simplifications of build_skb(), from
    Alexander Duyck.

 9) Move page frag allocator under mm/, also from Alexander.

10) Add xmit_more support to hv_netvsc, from KY Srinivasan.

11) Add a counter guard in case we try to perform endless reclassify
    loops in the packet scheduler.

12) Extern flow dissector to be programmable and use it in new "Flower"
    classifier.  From Jiri Pirko.

13) AF_PACKET fanout rollover fixes, performance improvements, and new
    statistics.  From Willem de Bruijn.

14) Add netdev driver for GENEVE tunnels, from John W Linville.

15) Add ingress netfilter hooks and filtering, from Pablo Neira Ayuso.

16) Fix handling of epoll edge triggers in TCP, from Eric Dumazet.

17) Add an ECN retry fallback for the initial TCP handshake, from Daniel
    Borkmann.

18) Add tail call support to BPF, from Alexei Starovoitov.

19) Add several pktgen helper scripts, from Jesper Dangaard Brouer.

20) Add zerocopy support to AF_UNIX, from Hannes Frederic Sowa.

21) Favor even port numbers for allocation to connect() requests, and
    odd port numbers for bind(0), in an effort to help avoid
    ip_local_port_range exhaustion.  From Eric Dumazet.

22) Add Cavium ThunderX driver, from Sunil Goutham.

23) Allow bpf programs to access skb_iif and dev->ifindex SKB metadata,
    from Alexei Starovoitov.

24) Add support for T6 chips in cxgb4vf driver, from Hariprasad Shenai.

25) Double TCP Small Queues default to 256K to accomodate situations
    like the XEN driver and wireless aggregation.  From Wei Liu.

26) Add more entropy inputs to flow dissector, from Tom Herbert.

27) Add CDG congestion control algorithm to TCP, from Kenneth Klette
    Jonassen.

28) Convert ipset over to RCU locking, from Jozsef Kadlecsik.

29) Track and act upon link status of ipv4 route nexthops, from Andy
    Gospodarek.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1670 commits)
  bridge: vlan: flush the dynamically learned entries on port vlan delete
  bridge: multicast: add a comment to br_port_state_selection about blocking state
  net: inet_diag: export IPV6_V6ONLY sockopt
  stmmac: troubleshoot unexpected bits in des0 & des1
  net: ipv4 sysctl option to ignore routes when nexthop link is down
  net: track link-status of ipv4 nexthops
  net: switchdev: ignore unsupported bridge flags
  net: Cavium: Fix MAC address setting in shutdown state
  drivers: net: xgene: fix for ACPI support without ACPI
  ip: report the original address of ICMP messages
  net/mlx5e: Prefetch skb data on RX
  net/mlx5e: Pop cq outside mlx5e_get_cqe
  net/mlx5e: Remove mlx5e_cq.sqrq back-pointer
  net/mlx5e: Remove extra spaces
  net/mlx5e: Avoid TX CQE generation if more xmit packets expected
  net/mlx5e: Avoid redundant dev_kfree_skb() upon NOP completion
  net/mlx5e: Remove re-assignment of wq type in mlx5e_enable_rq()
  net/mlx5e: Use skb_shinfo(skb)->gso_segs rather than counting them
  net/mlx5e: Static mapping of netdev priv resources to/from netdev TX queues
  net/mlx4_en: Use HW counters for rx/tx bytes/packets in PF device
  ...

37 files changed:
1  2 
MAINTAINERS
crypto/af_alg.c
drivers/infiniband/hw/cxgb4/provider.c
drivers/infiniband/hw/mlx4/mad.c
drivers/infiniband/hw/mlx4/main.c
drivers/infiniband/hw/mlx4/mlx4_ib.h
drivers/infiniband/hw/mlx5/cq.c
drivers/infiniband/hw/mlx5/mad.c
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/mlx5/mlx5_ib.h
drivers/net/ethernet/amd/xgbe/xgbe-main.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
drivers/net/ethernet/chelsio/cxgb4/sge.c
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
drivers/net/ethernet/mellanox/mlx4/main.c
drivers/net/ethernet/sfc/efx.c
drivers/net/hyperv/netvsc.c
drivers/net/hyperv/rndis_filter.c
drivers/scsi/cxgbi/libcxgbi.c
drivers/target/iscsi/iscsi_target.c
drivers/target/target_core_file.c
drivers/target/target_core_pr.c
drivers/target/target_core_transport.c
drivers/target/target_core_user.c
drivers/vhost/scsi.c
include/linux/mlx4/device.h
include/linux/mlx5/driver.h
include/linux/pci_ids.h
net/bridge/netfilter/ebtables.c
net/core/pktgen.c
net/key/af_key.c
net/rds/af_rds.c
net/rds/ib.h
net/rds/rds.h
net/sched/sch_api.c

diff --cc MAINTAINERS
Simple merge
diff --cc crypto/af_alg.c
Simple merge
@@@ -827,14 -817,12 +819,12 @@@ static void edit_counter(struct mlx4_co
  }
  
  static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
 -                      struct ib_wc *in_wc, struct ib_grh *in_grh,
 -                      struct ib_mad *in_mad, struct ib_mad *out_mad)
 +                      const struct ib_wc *in_wc, const struct ib_grh *in_grh,
 +                      const struct ib_mad *in_mad, struct ib_mad *out_mad)
  {
-       struct mlx4_cmd_mailbox *mailbox;
+       struct mlx4_counter counter_stats;
        struct mlx4_ib_dev *dev = to_mdev(ibdev);
        int err;
-       u32 inmod = dev->counters[port_num - 1] & 0xffff;
-       u8 mode;
  
        if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_PERF_MGMT)
                return -EINVAL;
  }
  
  int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
 -                      struct ib_wc *in_wc, struct ib_grh *in_grh,
 -                      struct ib_mad *in_mad, struct ib_mad *out_mad)
 +                      const struct ib_wc *in_wc, const struct ib_grh *in_grh,
 +                      const struct ib_mad_hdr *in, size_t in_mad_size,
 +                      struct ib_mad_hdr *out, size_t *out_mad_size,
 +                      u16 *out_mad_pkey_index)
  {
+       struct mlx4_ib_dev *dev = to_mdev(ibdev);
 +      const struct ib_mad *in_mad = (const struct ib_mad *)in;
 +      struct ib_mad *out_mad = (struct ib_mad *)out;
 +
 +      BUG_ON(in_mad_size != sizeof(*in_mad) ||
 +             *out_mad_size != sizeof(*out_mad));
 +
        switch (rdma_port_get_link_layer(ibdev, port_num)) {
        case IB_LINK_LAYER_INFINIBAND:
-               return ib_process_mad(ibdev, mad_flags, port_num, in_wc,
-                                     in_grh, in_mad, out_mad);
+               if (!mlx4_is_slave(dev->dev))
+                       return ib_process_mad(ibdev, mad_flags, port_num, in_wc,
+                                             in_grh, in_mad, out_mad);
        case IB_LINK_LAYER_ETHERNET:
                return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
                                          in_grh, in_mad, out_mad);
@@@ -2158,42 -2080,15 +2137,38 @@@ static void mlx4_ib_free_eqs(struct mlx
                return;
  
        /* Reset the advertised EQ number */
-       ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors;
+       ibdev->ib_dev.num_comp_vectors = 0;
  
-       /* Free only the added eqs */
-       for (i = 0; i < ibdev->eq_added; i++) {
-               /* Don't free legacy eqs if used */
-               if (ibdev->eq_table[i] <= dev->caps.num_comp_vectors)
-                       continue;
+       for (i = 0; i < total_eqs; i++)
                mlx4_release_eq(dev, ibdev->eq_table[i]);
-       }
  
        kfree(ibdev->eq_table);
+       ibdev->eq_table = NULL;
  }
  
 +static int mlx4_port_immutable(struct ib_device *ibdev, u8 port_num,
 +                             struct ib_port_immutable *immutable)
 +{
 +      struct ib_port_attr attr;
 +      int err;
 +
 +      err = mlx4_ib_query_port(ibdev, port_num, &attr);
 +      if (err)
 +              return err;
 +
 +      immutable->pkey_tbl_len = attr.pkey_tbl_len;
 +      immutable->gid_tbl_len = attr.gid_tbl_len;
 +
 +      if (mlx4_ib_port_link_layer(ibdev, port_num) == IB_LINK_LAYER_INFINIBAND)
 +              immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB;
 +      else
 +              immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
 +
 +      immutable->max_mad_size = IB_MGMT_MAD_SIZE;
 +
 +      return 0;
 +}
 +
  static void *mlx4_ib_add(struct mlx4_dev *dev)
  {
        struct mlx4_ib_dev *ibdev;
Simple merge
Simple merge
Simple merge
@@@ -62,36 -63,168 +63,172 @@@ static char mlx5_version[] 
        DRIVER_NAME ": Mellanox Connect-IB Infiniband driver v"
        DRIVER_VERSION " (" DRIVER_RELDATE ")\n";
  
+ static enum rdma_link_layer
+ mlx5_ib_port_link_layer(struct ib_device *device)
+ {
+       struct mlx5_ib_dev *dev = to_mdev(device);
+       switch (MLX5_CAP_GEN(dev->mdev, port_type)) {
+       case MLX5_CAP_PORT_TYPE_IB:
+               return IB_LINK_LAYER_INFINIBAND;
+       case MLX5_CAP_PORT_TYPE_ETH:
+               return IB_LINK_LAYER_ETHERNET;
+       default:
+               return IB_LINK_LAYER_UNSPECIFIED;
+       }
+ }
+ static int mlx5_use_mad_ifc(struct mlx5_ib_dev *dev)
+ {
+       return !dev->mdev->issi;
+ }
+ enum {
+       MLX5_VPORT_ACCESS_METHOD_MAD,
+       MLX5_VPORT_ACCESS_METHOD_HCA,
+       MLX5_VPORT_ACCESS_METHOD_NIC,
+ };
+ static int mlx5_get_vport_access_method(struct ib_device *ibdev)
+ {
+       if (mlx5_use_mad_ifc(to_mdev(ibdev)))
+               return MLX5_VPORT_ACCESS_METHOD_MAD;
+       if (mlx5_ib_port_link_layer(ibdev) ==
+           IB_LINK_LAYER_ETHERNET)
+               return MLX5_VPORT_ACCESS_METHOD_NIC;
+       return MLX5_VPORT_ACCESS_METHOD_HCA;
+ }
+ static int mlx5_query_system_image_guid(struct ib_device *ibdev,
+                                       __be64 *sys_image_guid)
+ {
+       struct mlx5_ib_dev *dev = to_mdev(ibdev);
+       struct mlx5_core_dev *mdev = dev->mdev;
+       u64 tmp;
+       int err;
+       switch (mlx5_get_vport_access_method(ibdev)) {
+       case MLX5_VPORT_ACCESS_METHOD_MAD:
+               return mlx5_query_mad_ifc_system_image_guid(ibdev,
+                                                           sys_image_guid);
+       case MLX5_VPORT_ACCESS_METHOD_HCA:
+               err = mlx5_query_hca_vport_system_image_guid(mdev, &tmp);
+               if (!err)
+                       *sys_image_guid = cpu_to_be64(tmp);
+               return err;
+       default:
+               return -EINVAL;
+       }
+ }
+ static int mlx5_query_max_pkeys(struct ib_device *ibdev,
+                               u16 *max_pkeys)
+ {
+       struct mlx5_ib_dev *dev = to_mdev(ibdev);
+       struct mlx5_core_dev *mdev = dev->mdev;
+       switch (mlx5_get_vport_access_method(ibdev)) {
+       case MLX5_VPORT_ACCESS_METHOD_MAD:
+               return mlx5_query_mad_ifc_max_pkeys(ibdev, max_pkeys);
+       case MLX5_VPORT_ACCESS_METHOD_HCA:
+       case MLX5_VPORT_ACCESS_METHOD_NIC:
+               *max_pkeys = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev,
+                                               pkey_table_size));
+               return 0;
+       default:
+               return -EINVAL;
+       }
+ }
+ static int mlx5_query_vendor_id(struct ib_device *ibdev,
+                               u32 *vendor_id)
+ {
+       struct mlx5_ib_dev *dev = to_mdev(ibdev);
+       switch (mlx5_get_vport_access_method(ibdev)) {
+       case MLX5_VPORT_ACCESS_METHOD_MAD:
+               return mlx5_query_mad_ifc_vendor_id(ibdev, vendor_id);
+       case MLX5_VPORT_ACCESS_METHOD_HCA:
+       case MLX5_VPORT_ACCESS_METHOD_NIC:
+               return mlx5_core_query_vendor_id(dev->mdev, vendor_id);
+       default:
+               return -EINVAL;
+       }
+ }
+ static int mlx5_query_node_guid(struct mlx5_ib_dev *dev,
+                               __be64 *node_guid)
+ {
+       u64 tmp;
+       int err;
+       switch (mlx5_get_vport_access_method(&dev->ib_dev)) {
+       case MLX5_VPORT_ACCESS_METHOD_MAD:
+               return mlx5_query_mad_ifc_node_guid(dev, node_guid);
+       case MLX5_VPORT_ACCESS_METHOD_HCA:
+               err = mlx5_query_hca_vport_node_guid(dev->mdev, &tmp);
+               if (!err)
+                       *node_guid = cpu_to_be64(tmp);
+               return err;
+       default:
+               return -EINVAL;
+       }
+ }
+ struct mlx5_reg_node_desc {
+       u8      desc[64];
+ };
+ static int mlx5_query_node_desc(struct mlx5_ib_dev *dev, char *node_desc)
+ {
+       struct mlx5_reg_node_desc in;
+       if (mlx5_use_mad_ifc(dev))
+               return mlx5_query_mad_ifc_node_desc(dev, node_desc);
+       memset(&in, 0, sizeof(in));
+       return mlx5_core_access_reg(dev->mdev, &in, sizeof(in), node_desc,
+                                   sizeof(struct mlx5_reg_node_desc),
+                                   MLX5_REG_NODE_DESC, 0, 0);
+ }
  static int mlx5_ib_query_device(struct ib_device *ibdev,
 -                              struct ib_device_attr *props)
 +                              struct ib_device_attr *props,
 +                              struct ib_udata *uhw)
  {
        struct mlx5_ib_dev *dev = to_mdev(ibdev);
-       struct ib_smp *in_mad  = NULL;
-       struct ib_smp *out_mad = NULL;
-       struct mlx5_general_caps *gen;
+       struct mlx5_core_dev *mdev = dev->mdev;
        int err = -ENOMEM;
        int max_rq_sg;
        int max_sq_sg;
-       u64 flags;
  
-       gen = &dev->mdev->caps.gen;
-       in_mad  = kzalloc(sizeof(*in_mad), GFP_KERNEL);
-       out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
-       if (!in_mad || !out_mad)
-               goto out;
-       init_query_mad(in_mad);
-       in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
 +      if (uhw->inlen || uhw->outlen)
 +              return -EINVAL;
 +
+       memset(props, 0, sizeof(*props));
+       err = mlx5_query_system_image_guid(ibdev,
+                                          &props->sys_image_guid);
+       if (err)
+               return err;
  
-       err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, 1, NULL, NULL, in_mad, out_mad);
+       err = mlx5_query_max_pkeys(ibdev, &props->max_pkeys);
        if (err)
-               goto out;
+               return err;
  
-       memset(props, 0, sizeof(*props));
+       err = mlx5_query_vendor_id(ibdev, &props->vendor_id);
+       if (err)
+               return err;
  
        props->fw_ver = ((u64)fw_rev_maj(dev->mdev) << 32) |
                (fw_rev_min(dev->mdev) << 16) |
@@@ -911,12 -1067,9 +1071,10 @@@ static int get_port_caps(struct mlx5_ib
  {
        struct ib_device_attr *dprops = NULL;
        struct ib_port_attr *pprops = NULL;
-       struct mlx5_general_caps *gen;
        int err = -ENOMEM;
        int port;
 +      struct ib_udata uhw = {.inlen = 0, .outlen = 0};
  
-       gen = &dev->mdev->caps.gen;
        pprops = kmalloc(sizeof(*pprops), GFP_KERNEL);
        if (!pprops)
                goto out;
@@@ -1311,11 -1473,10 +1499,11 @@@ static void *mlx5_ib_add(struct mlx5_co
        dev->ib_dev.alloc_fast_reg_page_list = mlx5_ib_alloc_fast_reg_page_list;
        dev->ib_dev.free_fast_reg_page_list  = mlx5_ib_free_fast_reg_page_list;
        dev->ib_dev.check_mr_status     = mlx5_ib_check_mr_status;
 +      dev->ib_dev.get_port_immutable  = mlx5_port_immutable;
  
-       mlx5_ib_internal_query_odp_caps(dev);
+       mlx5_ib_internal_fill_odp_caps(dev);
  
-       if (mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_XRC) {
+       if (MLX5_CAP_GEN(mdev, xrc)) {
                dev->ib_dev.alloc_xrcd = mlx5_ib_alloc_xrcd;
                dev->ib_dev.dealloc_xrcd = mlx5_ib_dealloc_xrcd;
                dev->ib_dev.uverbs_cmd_mask |=
Simple merge
@@@ -220,8 -287,36 +264,33 @@@ static int xgbe_of_support(struct xgbe_
        }
        pdata->ptpclk_rate = clk_get_rate(pdata->ptpclk);
  
 -      /* Retrieve the device cache coherency value */
 -      pdata->coherent = of_dma_is_coherent(dev->of_node);
 -
        return 0;
  }
+ static struct platform_device *xgbe_of_get_phy_pdev(struct xgbe_prv_data *pdata)
+ {
+       struct device *dev = pdata->dev;
+       struct device_node *phy_node;
+       struct platform_device *phy_pdev;
+       phy_node = of_parse_phandle(dev->of_node, "phy-handle", 0);
+       if (phy_node) {
+               /* Old style device tree:
+                *   The XGBE and PHY resources are separate
+                */
+               phy_pdev = of_find_device_by_node(phy_node);
+               of_node_put(phy_node);
+       } else {
+               /* New style device tree:
+                *   The XGBE and PHY resources are grouped together with
+                *   the PHY resources listed last
+                */
+               get_device(dev);
+               phy_pdev = pdata->pdev;
+       }
+       return phy_pdev;
+ }
  #else   /* CONFIG_OF */
  static int xgbe_of_support(struct xgbe_prv_data *pdata)
  {
@@@ -1182,10 -1293,9 +1293,10 @@@ int t4_prep_fw(struct adapter *adap, st
  int t4_prep_adapter(struct adapter *adapter);
  
  enum t4_bar2_qtype { T4_BAR2_QTYPE_EGRESS, T4_BAR2_QTYPE_INGRESS };
- int cxgb4_t4_bar2_sge_qregs(struct adapter *adapter,
+ int t4_bar2_sge_qregs(struct adapter *adapter,
                      unsigned int qid,
                      enum t4_bar2_qtype qtype,
 +                    int user,
                      u64 *pbar2_qoffset,
                      unsigned int *pbar2_qid);
  
@@@ -2352,8 -2356,8 +2358,8 @@@ static void process_db_drop(struct work
                unsigned int bar2_qid;
                int ret;
  
-               ret = cxgb4_t4_bar2_sge_qregs(adap, qid, T4_BAR2_QTYPE_EGRESS,
-                                             0, &bar2_qoffset, &bar2_qid);
+               ret = t4_bar2_sge_qregs(adap, qid, T4_BAR2_QTYPE_EGRESS,
 -                                      &bar2_qoffset, &bar2_qid);
++                                      0, &bar2_qoffset, &bar2_qid);
                if (ret)
                        dev_err(adap->pdev_dev, "doorbell drop recovery: "
                                "qid=%d, pidx_inc=%d\n", qid, pidx_inc);
@@@ -2429,8 -2401,8 +2401,8 @@@ static void __iomem *bar2_address(struc
        u64 bar2_qoffset;
        int ret;
  
-       ret = cxgb4_t4_bar2_sge_qregs(adapter, qid, qtype, 0,
-                                     &bar2_qoffset, pbar2_qid);
 -      ret = t4_bar2_sge_qregs(adapter, qid, qtype,
++      ret = t4_bar2_sge_qregs(adapter, qid, qtype, 0,
+                               &bar2_qoffset, pbar2_qid);
        if (ret)
                return NULL;
  
@@@ -5123,10 -6122,9 +6123,10 @@@ int t4_prep_adapter(struct adapter *ada
   *    Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
   *    then these "Inferred Queue ID" register may not be used.
   */
- int cxgb4_t4_bar2_sge_qregs(struct adapter *adapter,
+ int t4_bar2_sge_qregs(struct adapter *adapter,
                      unsigned int qid,
                      enum t4_bar2_qtype qtype,
 +                    int user,
                      u64 *pbar2_qoffset,
                      unsigned int *pbar2_qid)
  {
Simple merge
Simple merge
Simple merge
Simple merge
  #include <linux/crypto.h>
  #include <linux/completion.h>
  #include <linux/module.h>
+ #include <linux/vmalloc.h>
  #include <linux/idr.h>
  #include <asm/unaligned.h>
 -#include <scsi/scsi_device.h>
 +#include <scsi/scsi_proto.h>
  #include <scsi/iscsi_proto.h>
  #include <scsi/scsi_tcq.h>
  #include <target/target_core_base.h>
  #include <linux/slab.h>
  #include <linux/spinlock.h>
  #include <linux/module.h>
+ #include <linux/vmalloc.h>
  #include <linux/falloc.h>
 -#include <scsi/scsi.h>
 -#include <scsi/scsi_host.h>
 +#include <scsi/scsi_proto.h>
  #include <asm/unaligned.h>
  
  #include <target/target_core_base.h>
  #include <linux/slab.h>
  #include <linux/spinlock.h>
  #include <linux/list.h>
+ #include <linux/vmalloc.h>
  #include <linux/file.h>
 -#include <scsi/scsi.h>
 -#include <scsi/scsi_cmnd.h>
 +#include <scsi/scsi_proto.h>
  #include <asm/unaligned.h>
  
  #include <target/target_core_base.h>
  #include <linux/spinlock.h>
  #include <linux/module.h>
  #include <linux/idr.h>
 +#include <linux/kernel.h>
  #include <linux/timer.h>
  #include <linux/parser.h>
 -#include <scsi/scsi.h>
 -#include <scsi/scsi_host.h>
+ #include <linux/vmalloc.h>
  #include <linux/uio_driver.h>
  #include <net/genetlink.h>
 +#include <scsi/scsi_common.h>
 +#include <scsi/scsi_proto.h>
  #include <target/target_core_base.h>
  #include <target/target_core_fabric.h>
  #include <target/target_core_backend.h>
  #include <linux/compat.h>
  #include <linux/eventfd.h>
  #include <linux/fs.h>
+ #include <linux/vmalloc.h>
  #include <linux/miscdevice.h>
  #include <asm/unaligned.h>
 -#include <scsi/scsi.h>
 +#include <scsi/scsi_common.h>
 +#include <scsi/scsi_proto.h>
  #include <target/target_core_base.h>
  #include <target/target_core_fabric.h>
  #include <target/target_core_fabric_configfs.h>
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
diff --cc net/rds/ib.h
Simple merge
diff --cc net/rds/rds.h
Simple merge
Simple merge