OSDN Git Service

igbvf: Fix code comments and whitespace
authorJeff Kirsher <jeffrey.t.kirsher@intel.com>
Wed, 21 Jan 2015 09:57:50 +0000 (09:57 +0000)
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>
Tue, 24 Feb 2015 01:11:54 +0000 (17:11 -0800)
Fix the code comments to align with the drivers/net/ commenting style.
Also fix other checkpatch errors such as using tabs where possible and
properly wrap lines to conform to the 80 char limit (unless it is
a string).

Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Tested-by: Aaron Brown <aaron.f.brown@intel.com>
drivers/net/ethernet/intel/igbvf/defines.h
drivers/net/ethernet/intel/igbvf/ethtool.c
drivers/net/ethernet/intel/igbvf/igbvf.h
drivers/net/ethernet/intel/igbvf/mbx.c
drivers/net/ethernet/intel/igbvf/mbx.h
drivers/net/ethernet/intel/igbvf/netdev.c
drivers/net/ethernet/intel/igbvf/regs.h
drivers/net/ethernet/intel/igbvf/vf.c
drivers/net/ethernet/intel/igbvf/vf.h

index d9fa999..ae3f283 100644 (file)
@@ -13,8 +13,7 @@
   more details.
 
   You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  this program; if not, see <http://www.gnu.org/licenses/>.
 
   The full GNU General Public License is included in this distribution in
   the file called "COPYING".
 #define _E1000_DEFINES_H_
 
 /* Number of Transmit and Receive Descriptors must be a multiple of 8 */
-#define REQ_TX_DESCRIPTOR_MULTIPLE  8
-#define REQ_RX_DESCRIPTOR_MULTIPLE  8
+#define REQ_TX_DESCRIPTOR_MULTIPLE     8
+#define REQ_RX_DESCRIPTOR_MULTIPLE     8
 
 /* IVAR valid bit */
-#define E1000_IVAR_VALID        0x80
+#define E1000_IVAR_VALID       0x80
 
 /* Receive Descriptor bit definitions */
-#define E1000_RXD_STAT_DD       0x01    /* Descriptor Done */
-#define E1000_RXD_STAT_EOP      0x02    /* End of Packet */
-#define E1000_RXD_STAT_IXSM     0x04    /* Ignore checksum */
-#define E1000_RXD_STAT_VP       0x08    /* IEEE VLAN Packet */
-#define E1000_RXD_STAT_UDPCS    0x10    /* UDP xsum calculated */
-#define E1000_RXD_STAT_TCPCS    0x20    /* TCP xsum calculated */
-#define E1000_RXD_STAT_IPCS     0x40    /* IP xsum calculated */
-#define E1000_RXD_ERR_SE        0x02    /* Symbol Error */
-#define E1000_RXD_SPC_VLAN_MASK 0x0FFF  /* VLAN ID is in lower 12 bits */
-
-#define E1000_RXDEXT_STATERR_LB    0x00040000
-#define E1000_RXDEXT_STATERR_CE    0x01000000
-#define E1000_RXDEXT_STATERR_SE    0x02000000
-#define E1000_RXDEXT_STATERR_SEQ   0x04000000
-#define E1000_RXDEXT_STATERR_CXE   0x10000000
-#define E1000_RXDEXT_STATERR_TCPE  0x20000000
-#define E1000_RXDEXT_STATERR_IPE   0x40000000
-#define E1000_RXDEXT_STATERR_RXE   0x80000000
-
+#define E1000_RXD_STAT_DD      0x01    /* Descriptor Done */
+#define E1000_RXD_STAT_EOP     0x02    /* End of Packet */
+#define E1000_RXD_STAT_IXSM    0x04    /* Ignore checksum */
+#define E1000_RXD_STAT_VP      0x08    /* IEEE VLAN Packet */
+#define E1000_RXD_STAT_UDPCS   0x10    /* UDP xsum calculated */
+#define E1000_RXD_STAT_TCPCS   0x20    /* TCP xsum calculated */
+#define E1000_RXD_STAT_IPCS    0x40    /* IP xsum calculated */
+#define E1000_RXD_ERR_SE       0x02    /* Symbol Error */
+#define E1000_RXD_SPC_VLAN_MASK        0x0FFF  /* VLAN ID is in lower 12 bits */
+
+#define E1000_RXDEXT_STATERR_LB        0x00040000
+#define E1000_RXDEXT_STATERR_CE        0x01000000
+#define E1000_RXDEXT_STATERR_SE        0x02000000
+#define E1000_RXDEXT_STATERR_SEQ       0x04000000
+#define E1000_RXDEXT_STATERR_CXE       0x10000000
+#define E1000_RXDEXT_STATERR_TCPE      0x20000000
+#define E1000_RXDEXT_STATERR_IPE       0x40000000
+#define E1000_RXDEXT_STATERR_RXE       0x80000000
 
 /* Same mask, but for extended and packet split descriptors */
 #define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \
-    E1000_RXDEXT_STATERR_CE  |            \
-    E1000_RXDEXT_STATERR_SE  |            \
-    E1000_RXDEXT_STATERR_SEQ |            \
-    E1000_RXDEXT_STATERR_CXE |            \
-    E1000_RXDEXT_STATERR_RXE)
+       E1000_RXDEXT_STATERR_CE  | \
+       E1000_RXDEXT_STATERR_SE  | \
+       E1000_RXDEXT_STATERR_SEQ | \
+       E1000_RXDEXT_STATERR_CXE | \
+       E1000_RXDEXT_STATERR_RXE)
 
 /* Device Control */
-#define E1000_CTRL_RST      0x04000000  /* Global reset */
+#define E1000_CTRL_RST         0x04000000  /* Global reset */
 
 /* Device Status */
-#define E1000_STATUS_FD         0x00000001      /* Full duplex.0=half,1=full */
-#define E1000_STATUS_LU         0x00000002      /* Link up.0=no,1=link */
-#define E1000_STATUS_TXOFF      0x00000010      /* transmission paused */
-#define E1000_STATUS_SPEED_10   0x00000000      /* Speed 10Mb/s */
-#define E1000_STATUS_SPEED_100  0x00000040      /* Speed 100Mb/s */
-#define E1000_STATUS_SPEED_1000 0x00000080      /* Speed 1000Mb/s */
-
-#define SPEED_10    10
-#define SPEED_100   100
-#define SPEED_1000  1000
-#define HALF_DUPLEX 1
-#define FULL_DUPLEX 2
+#define E1000_STATUS_FD                0x00000001      /* Full duplex.0=half,1=full */
+#define E1000_STATUS_LU                0x00000002      /* Link up.0=no,1=link */
+#define E1000_STATUS_TXOFF     0x00000010      /* transmission paused */
+#define E1000_STATUS_SPEED_10  0x00000000      /* Speed 10Mb/s */
+#define E1000_STATUS_SPEED_100 0x00000040      /* Speed 100Mb/s */
+#define E1000_STATUS_SPEED_1000        0x00000080      /* Speed 1000Mb/s */
+
+#define SPEED_10       10
+#define SPEED_100      100
+#define SPEED_1000     1000
+#define HALF_DUPLEX    1
+#define FULL_DUPLEX    2
 
 /* Transmit Descriptor bit definitions */
-#define E1000_TXD_POPTS_IXSM 0x01       /* Insert IP checksum */
-#define E1000_TXD_POPTS_TXSM 0x02       /* Insert TCP/UDP checksum */
-#define E1000_TXD_CMD_DEXT   0x20000000 /* Descriptor extension (0 = legacy) */
-#define E1000_TXD_STAT_DD    0x00000001 /* Descriptor Done */
+#define E1000_TXD_POPTS_IXSM   0x01       /* Insert IP checksum */
+#define E1000_TXD_POPTS_TXSM   0x02       /* Insert TCP/UDP checksum */
+#define E1000_TXD_CMD_DEXT     0x20000000 /* Desc extension (0 = legacy) */
+#define E1000_TXD_STAT_DD      0x00000001 /* Desc Done */
 
-#define MAX_JUMBO_FRAME_SIZE    0x3F00
+#define MAX_JUMBO_FRAME_SIZE   0x3F00
 
 /* 802.1q VLAN Packet Size */
-#define VLAN_TAG_SIZE              4    /* 802.3ac tag (not DMA'd) */
+#define VLAN_TAG_SIZE          4    /* 802.3ac tag (not DMA'd) */
 
 /* Error Codes */
-#define E1000_SUCCESS      0
-#define E1000_ERR_CONFIG   3
-#define E1000_ERR_MAC_INIT 5
-#define E1000_ERR_MBX      15
+#define E1000_SUCCESS          0
+#define E1000_ERR_CONFIG       3
+#define E1000_ERR_MAC_INIT     5
+#define E1000_ERR_MBX          15
 
 /* SRRCTL bit definitions */
-#define E1000_SRRCTL_BSIZEPKT_SHIFT                     10 /* Shift _right_ */
-#define E1000_SRRCTL_BSIZEHDRSIZE_MASK                  0x00000F00
-#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT                 2  /* Shift _left_ */
-#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF                0x02000000
-#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS          0x0A000000
-#define E1000_SRRCTL_DESCTYPE_MASK                      0x0E000000
-#define E1000_SRRCTL_DROP_EN                            0x80000000
+#define E1000_SRRCTL_BSIZEPKT_SHIFT            10 /* Shift _right_ */
+#define E1000_SRRCTL_BSIZEHDRSIZE_MASK         0x00000F00
+#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT                2  /* Shift _left_ */
+#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF       0x02000000
+#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000
+#define E1000_SRRCTL_DESCTYPE_MASK             0x0E000000
+#define E1000_SRRCTL_DROP_EN                   0x80000000
 
-#define E1000_SRRCTL_BSIZEPKT_MASK      0x0000007F
-#define E1000_SRRCTL_BSIZEHDR_MASK      0x00003F00
+#define E1000_SRRCTL_BSIZEPKT_MASK     0x0000007F
+#define E1000_SRRCTL_BSIZEHDR_MASK     0x00003F00
 
 /* Additional Descriptor Control definitions */
-#define E1000_TXDCTL_QUEUE_ENABLE  0x02000000 /* Enable specific Tx Queue */
-#define E1000_RXDCTL_QUEUE_ENABLE  0x02000000 /* Enable specific Rx Queue */
+#define E1000_TXDCTL_QUEUE_ENABLE      0x02000000 /* Enable specific Tx Que */
+#define E1000_RXDCTL_QUEUE_ENABLE      0x02000000 /* Enable specific Rx Que */
 
 /* Direct Cache Access (DCA) definitions */
-#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
+#define E1000_DCA_TXCTRL_TX_WB_RO_EN   (1 << 11) /* Tx Desc writeback RO bit */
 
-#define E1000_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */
+#define E1000_VF_INIT_TIMEOUT  200 /* Number of retries to clear RSTI */
 
 #endif /* _E1000_DEFINES_H_ */
index 2178f87..91a1190 100644 (file)
@@ -13,8 +13,7 @@
   more details.
 
   You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  this program; if not, see <http://www.gnu.org/licenses/>.
 
   The full GNU General Public License is included in this distribution in
   the file called "COPYING".
@@ -36,7 +35,6 @@
 #include "igbvf.h"
 #include <linux/if_vlan.h>
 
-
 struct igbvf_stats {
        char stat_string[ETH_GSTRING_LEN];
        int sizeof_stat;
@@ -74,7 +72,7 @@ static const char igbvf_gstrings_test[][ETH_GSTRING_LEN] = {
 #define IGBVF_TEST_LEN ARRAY_SIZE(igbvf_gstrings_test)
 
 static int igbvf_get_settings(struct net_device *netdev,
-                              struct ethtool_cmd *ecmd)
+                             struct ethtool_cmd *ecmd)
 {
        struct igbvf_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
@@ -111,18 +109,18 @@ static int igbvf_get_settings(struct net_device *netdev,
 }
 
 static int igbvf_set_settings(struct net_device *netdev,
-                              struct ethtool_cmd *ecmd)
+                             struct ethtool_cmd *ecmd)
 {
        return -EOPNOTSUPP;
 }
 
 static void igbvf_get_pauseparam(struct net_device *netdev,
-                                 struct ethtool_pauseparam *pause)
+                                struct ethtool_pauseparam *pause)
 {
 }
 
 static int igbvf_set_pauseparam(struct net_device *netdev,
-                                struct ethtool_pauseparam *pause)
+                               struct ethtool_pauseparam *pause)
 {
        return -EOPNOTSUPP;
 }
@@ -130,12 +128,14 @@ static int igbvf_set_pauseparam(struct net_device *netdev,
 static u32 igbvf_get_msglevel(struct net_device *netdev)
 {
        struct igbvf_adapter *adapter = netdev_priv(netdev);
+
        return adapter->msg_enable;
 }
 
 static void igbvf_set_msglevel(struct net_device *netdev, u32 data)
 {
        struct igbvf_adapter *adapter = netdev_priv(netdev);
+
        adapter->msg_enable = data;
 }
 
@@ -146,7 +146,7 @@ static int igbvf_get_regs_len(struct net_device *netdev)
 }
 
 static void igbvf_get_regs(struct net_device *netdev,
-                           struct ethtool_regs *regs, void *p)
+                          struct ethtool_regs *regs, void *p)
 {
        struct igbvf_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
@@ -175,19 +175,19 @@ static int igbvf_get_eeprom_len(struct net_device *netdev)
 }
 
 static int igbvf_get_eeprom(struct net_device *netdev,
-                            struct ethtool_eeprom *eeprom, u8 *bytes)
+                           struct ethtool_eeprom *eeprom, u8 *bytes)
 {
        return -EOPNOTSUPP;
 }
 
 static int igbvf_set_eeprom(struct net_device *netdev,
-                            struct ethtool_eeprom *eeprom, u8 *bytes)
+                           struct ethtool_eeprom *eeprom, u8 *bytes)
 {
        return -EOPNOTSUPP;
 }
 
 static void igbvf_get_drvinfo(struct net_device *netdev,
-                              struct ethtool_drvinfo *drvinfo)
+                             struct ethtool_drvinfo *drvinfo)
 {
        struct igbvf_adapter *adapter = netdev_priv(netdev);
 
@@ -201,7 +201,7 @@ static void igbvf_get_drvinfo(struct net_device *netdev,
 }
 
 static void igbvf_get_ringparam(struct net_device *netdev,
-                                struct ethtool_ringparam *ring)
+                               struct ethtool_ringparam *ring)
 {
        struct igbvf_adapter *adapter = netdev_priv(netdev);
        struct igbvf_ring *tx_ring = adapter->tx_ring;
@@ -214,7 +214,7 @@ static void igbvf_get_ringparam(struct net_device *netdev,
 }
 
 static int igbvf_set_ringparam(struct net_device *netdev,
-                               struct ethtool_ringparam *ring)
+                              struct ethtool_ringparam *ring)
 {
        struct igbvf_adapter *adapter = netdev_priv(netdev);
        struct igbvf_ring *temp_ring;
@@ -255,10 +255,9 @@ static int igbvf_set_ringparam(struct net_device *netdev,
 
        igbvf_down(adapter);
 
-       /*
-        * We can't just free everything and then setup again,
+       /* We can't just free everything and then setup again,
         * because the ISRs in MSI-X mode get passed pointers
-        * to the tx and rx ring structs.
+        * to the Tx and Rx ring structs.
         */
        if (new_tx_count != adapter->tx_ring->count) {
                memcpy(temp_ring, adapter->tx_ring, sizeof(struct igbvf_ring));
@@ -283,7 +282,7 @@ static int igbvf_set_ringparam(struct net_device *netdev,
 
                igbvf_free_rx_resources(adapter->rx_ring);
 
-               memcpy(adapter->rx_ring, temp_ring,sizeof(struct igbvf_ring));
+               memcpy(adapter->rx_ring, temp_ring, sizeof(struct igbvf_ring));
        }
 err_setup:
        igbvf_up(adapter);
@@ -307,14 +306,13 @@ static int igbvf_link_test(struct igbvf_adapter *adapter, u64 *data)
 }
 
 static void igbvf_diag_test(struct net_device *netdev,
-                            struct ethtool_test *eth_test, u64 *data)
+                           struct ethtool_test *eth_test, u64 *data)
 {
        struct igbvf_adapter *adapter = netdev_priv(netdev);
 
        set_bit(__IGBVF_TESTING, &adapter->state);
 
-       /*
-        * Link test performed before hardware reset so autoneg doesn't
+       /* Link test performed before hardware reset so autoneg doesn't
         * interfere with test result
         */
        if (igbvf_link_test(adapter, &data[0]))
@@ -325,20 +323,20 @@ static void igbvf_diag_test(struct net_device *netdev,
 }
 
 static void igbvf_get_wol(struct net_device *netdev,
-                          struct ethtool_wolinfo *wol)
+                         struct ethtool_wolinfo *wol)
 {
        wol->supported = 0;
        wol->wolopts = 0;
 }
 
 static int igbvf_set_wol(struct net_device *netdev,
-                         struct ethtool_wolinfo *wol)
+                        struct ethtool_wolinfo *wol)
 {
        return -EOPNOTSUPP;
 }
 
 static int igbvf_get_coalesce(struct net_device *netdev,
-                              struct ethtool_coalesce *ec)
+                             struct ethtool_coalesce *ec)
 {
        struct igbvf_adapter *adapter = netdev_priv(netdev);
 
@@ -351,13 +349,13 @@ static int igbvf_get_coalesce(struct net_device *netdev,
 }
 
 static int igbvf_set_coalesce(struct net_device *netdev,
-                              struct ethtool_coalesce *ec)
+                             struct ethtool_coalesce *ec)
 {
        struct igbvf_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
 
        if ((ec->rx_coalesce_usecs >= IGBVF_MIN_ITR_USECS) &&
-            (ec->rx_coalesce_usecs <= IGBVF_MAX_ITR_USECS)) {
+           (ec->rx_coalesce_usecs <= IGBVF_MAX_ITR_USECS)) {
                adapter->current_itr = ec->rx_coalesce_usecs << 2;
                adapter->requested_itr = 1000000000 /
                                        (adapter->current_itr * 256);
@@ -366,8 +364,7 @@ static int igbvf_set_coalesce(struct net_device *netdev,
                adapter->current_itr = IGBVF_START_ITR;
                adapter->requested_itr = ec->rx_coalesce_usecs;
        } else if (ec->rx_coalesce_usecs == 0) {
-               /*
-                * The user's desire is to turn off interrupt throttling
+               /* The user's desire is to turn off interrupt throttling
                 * altogether, but due to HW limitations, we can't do that.
                 * Instead we set a very small value in EITR, which would
                 * allow ~967k interrupts per second, but allow the adapter's
@@ -376,8 +373,9 @@ static int igbvf_set_coalesce(struct net_device *netdev,
                adapter->current_itr = 4;
                adapter->requested_itr = 1000000000 /
                                        (adapter->current_itr * 256);
-       } else
+       } else {
                return -EINVAL;
+       }
 
        writel(adapter->current_itr,
               hw->hw_addr + adapter->rx_ring->itr_register);
@@ -388,15 +386,15 @@ static int igbvf_set_coalesce(struct net_device *netdev,
 static int igbvf_nway_reset(struct net_device *netdev)
 {
        struct igbvf_adapter *adapter = netdev_priv(netdev);
+
        if (netif_running(netdev))
                igbvf_reinit_locked(adapter);
        return 0;
 }
 
-
 static void igbvf_get_ethtool_stats(struct net_device *netdev,
-                                    struct ethtool_stats *stats,
-                                    u64 *data)
+                                   struct ethtool_stats *stats,
+                                   u64 *data)
 {
        struct igbvf_adapter *adapter = netdev_priv(netdev);
        int i;
@@ -404,19 +402,18 @@ static void igbvf_get_ethtool_stats(struct net_device *netdev,
        igbvf_update_stats(adapter);
        for (i = 0; i < IGBVF_GLOBAL_STATS_LEN; i++) {
                char *p = (char *)adapter +
-                         igbvf_gstrings_stats[i].stat_offset;
+                         igbvf_gstrings_stats[i].stat_offset;
                char *b = (char *)adapter +
-                         igbvf_gstrings_stats[i].base_stat_offset;
+                         igbvf_gstrings_stats[i].base_stat_offset;
                data[i] = ((igbvf_gstrings_stats[i].sizeof_stat ==
-                           sizeof(u64)) ? (*(u64 *)p - *(u64 *)b) :
-                           (*(u32 *)p - *(u32 *)b));
+                           sizeof(u64)) ? (*(u64 *)p - *(u64 *)b) :
+                           (*(u32 *)p - *(u32 *)b));
        }
-
 }
 
 static int igbvf_get_sset_count(struct net_device *dev, int stringset)
 {
-       switch(stringset) {
+       switch (stringset) {
        case ETH_SS_TEST:
                return IGBVF_TEST_LEN;
        case ETH_SS_STATS:
@@ -427,7 +424,7 @@ static int igbvf_get_sset_count(struct net_device *dev, int stringset)
 }
 
 static void igbvf_get_strings(struct net_device *netdev, u32 stringset,
-                              u8 *data)
+                             u8 *data)
 {
        u8 *p = data;
        int i;
index 7d6a25c..f166baa 100644 (file)
@@ -13,8 +13,7 @@
   more details.
 
   You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  this program; if not, see <http://www.gnu.org/licenses/>.
 
   The full GNU General Public License is included in this distribution in
   the file called "COPYING".
@@ -43,10 +42,10 @@ struct igbvf_info;
 struct igbvf_adapter;
 
 /* Interrupt defines */
-#define IGBVF_START_ITR                    488 /* ~8000 ints/sec */
-#define IGBVF_4K_ITR                       980
-#define IGBVF_20K_ITR                      196
-#define IGBVF_70K_ITR                       56
+#define IGBVF_START_ITR                488 /* ~8000 ints/sec */
+#define IGBVF_4K_ITR           980
+#define IGBVF_20K_ITR          196
+#define IGBVF_70K_ITR          56
 
 enum latency_range {
        lowest_latency = 0,
@@ -55,56 +54,55 @@ enum latency_range {
        latency_invalid = 255
 };
 
-
 /* Interrupt modes, as used by the IntMode parameter */
-#define IGBVF_INT_MODE_LEGACY           0
-#define IGBVF_INT_MODE_MSI              1
-#define IGBVF_INT_MODE_MSIX             2
+#define IGBVF_INT_MODE_LEGACY  0
+#define IGBVF_INT_MODE_MSI     1
+#define IGBVF_INT_MODE_MSIX    2
 
 /* Tx/Rx descriptor defines */
-#define IGBVF_DEFAULT_TXD               256
-#define IGBVF_MAX_TXD                   4096
-#define IGBVF_MIN_TXD                   80
+#define IGBVF_DEFAULT_TXD      256
+#define IGBVF_MAX_TXD          4096
+#define IGBVF_MIN_TXD          80
 
-#define IGBVF_DEFAULT_RXD               256
-#define IGBVF_MAX_RXD                   4096
-#define IGBVF_MIN_RXD                   80
+#define IGBVF_DEFAULT_RXD      256
+#define IGBVF_MAX_RXD          4096
+#define IGBVF_MIN_RXD          80
 
-#define IGBVF_MIN_ITR_USECS             10 /* 100000 irq/sec */
-#define IGBVF_MAX_ITR_USECS             10000 /* 100    irq/sec */
+#define IGBVF_MIN_ITR_USECS    10 /* 100000 irq/sec */
+#define IGBVF_MAX_ITR_USECS    10000 /* 100    irq/sec */
 
 /* RX descriptor control thresholds.
  * PTHRESH - MAC will consider prefetch if it has fewer than this number of
- *           descriptors available in its onboard memory.
- *           Setting this to 0 disables RX descriptor prefetch.
+ *        descriptors available in its onboard memory.
+ *        Setting this to 0 disables RX descriptor prefetch.
  * HTHRESH - MAC will only prefetch if there are at least this many descriptors
- *           available in host memory.
- *           If PTHRESH is 0, this should also be 0.
+ *        available in host memory.
+ *        If PTHRESH is 0, this should also be 0.
  * WTHRESH - RX descriptor writeback threshold - MAC will delay writing back
- *           descriptors until either it has this many to write back, or the
- *           ITR timer expires.
+ *        descriptors until either it has this many to write back, or the
+ *        ITR timer expires.
  */
-#define IGBVF_RX_PTHRESH                16
-#define IGBVF_RX_HTHRESH                8
-#define IGBVF_RX_WTHRESH                1
+#define IGBVF_RX_PTHRESH       16
+#define IGBVF_RX_HTHRESH       8
+#define IGBVF_RX_WTHRESH       1
 
 /* this is the size past which hardware will drop packets when setting LPE=0 */
-#define MAXIMUM_ETHERNET_VLAN_SIZE      1522
+#define MAXIMUM_ETHERNET_VLAN_SIZE     1522
 
-#define IGBVF_FC_PAUSE_TIME             0x0680 /* 858 usec */
+#define IGBVF_FC_PAUSE_TIME    0x0680 /* 858 usec */
 
 /* How many Tx Descriptors do we need to call netif_wake_queue ? */
-#define IGBVF_TX_QUEUE_WAKE             32
+#define IGBVF_TX_QUEUE_WAKE    32
 /* How many Rx Buffers do we bundle into one write to the hardware ? */
-#define IGBVF_RX_BUFFER_WRITE           16 /* Must be power of 2 */
+#define IGBVF_RX_BUFFER_WRITE  16 /* Must be power of 2 */
 
-#define AUTO_ALL_MODES                  0
-#define IGBVF_EEPROM_APME               0x0400
+#define AUTO_ALL_MODES         0
+#define IGBVF_EEPROM_APME      0x0400
 
-#define IGBVF_MNG_VLAN_NONE             (-1)
+#define IGBVF_MNG_VLAN_NONE    (-1)
 
 /* Number of packet split data buffers (not including the header buffer) */
-#define PS_PAGE_BUFFERS                 (MAX_PS_BUFFERS - 1)
+#define PS_PAGE_BUFFERS                (MAX_PS_BUFFERS - 1)
 
 enum igbvf_boards {
        board_vf,
@@ -116,8 +114,7 @@ struct igbvf_queue_stats {
        u64 bytes;
 };
 
-/*
- * wrappers around a pointer to a socket buffer,
+/* wrappers around a pointer to a socket buffer,
  * so a DMA handle can be stored along with the buffer
  */
 struct igbvf_buffer {
@@ -148,10 +145,10 @@ union igbvf_desc {
 
 struct igbvf_ring {
        struct igbvf_adapter *adapter;  /* backlink */
-       union igbvf_desc *desc;         /* pointer to ring memory  */
-       dma_addr_t dma;                 /* phys address of ring    */
-       unsigned int size;              /* length of ring in bytes */
-       unsigned int count;             /* number of desc. in ring */
+       union igbvf_desc *desc; /* pointer to ring memory  */
+       dma_addr_t dma;         /* phys address of ring    */
+       unsigned int size;      /* length of ring in bytes */
+       unsigned int count;     /* number of desc. in ring */
 
        u16 next_to_use;
        u16 next_to_clean;
@@ -202,9 +199,7 @@ struct igbvf_adapter {
        u32 requested_itr; /* ints/sec or adaptive */
        u32 current_itr; /* Actual ITR register value, not ints/sec */
 
-       /*
-        * Tx
-        */
+       /* Tx */
        struct igbvf_ring *tx_ring /* One per active queue */
        ____cacheline_aligned_in_smp;
 
@@ -226,9 +221,7 @@ struct igbvf_adapter {
        u32 tx_fifo_size;
        u32 tx_dma_failed;
 
-       /*
-        * Rx
-        */
+       /* Rx */
        struct igbvf_ring *rx_ring;
 
        u32 rx_int_delay;
@@ -249,7 +242,7 @@ struct igbvf_adapter {
        struct net_device *netdev;
        struct pci_dev *pdev;
        struct net_device_stats net_stats;
-       spinlock_t stats_lock;      /* prevent concurrent stats updates */
+       spinlock_t stats_lock; /* prevent concurrent stats updates */
 
        /* structs defined in e1000_hw.h */
        struct e1000_hw hw;
@@ -286,16 +279,16 @@ struct igbvf_adapter {
 };
 
 struct igbvf_info {
-       enum e1000_mac_type     mac;
-       unsigned int            flags;
-       u32                     pba;
-       void                    (*init_ops)(struct e1000_hw *);
-       s32                     (*get_variants)(struct igbvf_adapter *);
+       enum e1000_mac_type     mac;
+       unsigned int            flags;
+       u32                     pba;
+       void                    (*init_ops)(struct e1000_hw *);
+       s32                     (*get_variants)(struct igbvf_adapter *);
 };
 
 /* hardware capability, feature, and workaround flags */
-#define IGBVF_FLAG_RX_CSUM_DISABLED             (1 << 0)
-#define IGBVF_FLAG_RX_LB_VLAN_BSWAP            (1 << 1)
+#define IGBVF_FLAG_RX_CSUM_DISABLED    (1 << 0)
+#define IGBVF_FLAG_RX_LB_VLAN_BSWAP    (1 << 1)
 #define IGBVF_RX_DESC_ADV(R, i)     \
        (&((((R).desc))[i].rx_desc))
 #define IGBVF_TX_DESC_ADV(R, i)     \
index b4b65bc..7b6cb4c 100644 (file)
@@ -13,8 +13,7 @@
   more details.
 
   You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  this program; if not, see <http://www.gnu.org/licenses/>.
 
   The full GNU General Public License is included in this distribution in
   the file called "COPYING".
@@ -54,10 +53,10 @@ out:
 }
 
 /**
- *  e1000_poll_for_ack - Wait for message acknowledgement
+ *  e1000_poll_for_ack - Wait for message acknowledgment
  *  @hw: pointer to the HW structure
  *
- *  returns SUCCESS if it successfully received a message acknowledgement
+ *  returns SUCCESS if it successfully received a message acknowledgment
  **/
 static s32 e1000_poll_for_ack(struct e1000_hw *hw)
 {
@@ -218,7 +217,7 @@ static s32 e1000_check_for_rst_vf(struct e1000_hw *hw)
        s32 ret_val = -E1000_ERR_MBX;
 
        if (!e1000_check_for_bit_vf(hw, (E1000_V2PMAILBOX_RSTD |
-                                        E1000_V2PMAILBOX_RSTI))) {
+                                        E1000_V2PMAILBOX_RSTI))) {
                ret_val = E1000_SUCCESS;
                hw->mbx.stats.rsts++;
        }
@@ -239,7 +238,7 @@ static s32 e1000_obtain_mbx_lock_vf(struct e1000_hw *hw)
        /* Take ownership of the buffer */
        ew32(V2PMAILBOX(0), E1000_V2PMAILBOX_VFU);
 
-       /* reserve mailbox for vf use */
+       /* reserve mailbox for VF use */
        if (e1000_read_v2p_mailbox(hw) & E1000_V2PMAILBOX_VFU)
                ret_val = E1000_SUCCESS;
 
@@ -283,7 +282,7 @@ out_no_write:
 }
 
 /**
- *  e1000_read_mbx_vf - Reads a message from the inbox intended for vf
+ *  e1000_read_mbx_vf - Reads a message from the inbox intended for VF
  *  @hw: pointer to the HW structure
  *  @msg: The message buffer
  *  @size: Length of buffer
@@ -315,17 +314,18 @@ out_no_read:
 }
 
 /**
- *  e1000_init_mbx_params_vf - set initial values for vf mailbox
+ *  e1000_init_mbx_params_vf - set initial values for VF mailbox
  *  @hw: pointer to the HW structure
  *
- *  Initializes the hw->mbx struct to correct values for vf mailbox
+ *  Initializes the hw->mbx struct to correct values for VF mailbox
  */
 s32 e1000_init_mbx_params_vf(struct e1000_hw *hw)
 {
        struct e1000_mbx_info *mbx = &hw->mbx;
 
        /* start mailbox as timed out and let the reset_hw call set the timeout
-        * value to being communications */
+        * value to being communications
+        */
        mbx->timeout = 0;
        mbx->usec_delay = E1000_VF_MBX_INIT_DELAY;
 
@@ -347,4 +347,3 @@ s32 e1000_init_mbx_params_vf(struct e1000_hw *hw)
 
        return E1000_SUCCESS;
 }
-
index 24370bc..f800bf8 100644 (file)
@@ -13,8 +13,7 @@
   more details.
 
   You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  this program; if not, see <http://www.gnu.org/licenses/>.
 
   The full GNU General Public License is included in this distribution in
   the file called "COPYING".
 
 #include "vf.h"
 
-#define E1000_V2PMAILBOX_REQ   0x00000001 /* Request for PF Ready bit */
-#define E1000_V2PMAILBOX_ACK   0x00000002 /* Ack PF message received */
-#define E1000_V2PMAILBOX_VFU   0x00000004 /* VF owns the mailbox buffer */
-#define E1000_V2PMAILBOX_PFU   0x00000008 /* PF owns the mailbox buffer */
-#define E1000_V2PMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */
-#define E1000_V2PMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */
-#define E1000_V2PMAILBOX_RSTI  0x00000040 /* PF has reset indication */
-#define E1000_V2PMAILBOX_RSTD  0x00000080 /* PF has indicated reset done */
+#define E1000_V2PMAILBOX_REQ   0x00000001 /* Request for PF Ready bit */
+#define E1000_V2PMAILBOX_ACK   0x00000002 /* Ack PF message received */
+#define E1000_V2PMAILBOX_VFU   0x00000004 /* VF owns the mailbox buffer */
+#define E1000_V2PMAILBOX_PFU   0x00000008 /* PF owns the mailbox buffer */
+#define E1000_V2PMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */
+#define E1000_V2PMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */
+#define E1000_V2PMAILBOX_RSTI  0x00000040 /* PF has reset indication */
+#define E1000_V2PMAILBOX_RSTD  0x00000080 /* PF has indicated reset done */
 #define E1000_V2PMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */
 
-#define E1000_VFMAILBOX_SIZE   16 /* 16 32 bit words - 64 bytes */
+#define E1000_VFMAILBOX_SIZE   16 /* 16 32 bit words - 64 bytes */
 
 /* If it's a E1000_VF_* msg then it originates in the VF and is sent to the
  * PF.  The reverse is true if it is E1000_PF_*.
  * Message ACK's are the value or'd with 0xF0000000
  */
-#define E1000_VT_MSGTYPE_ACK      0x80000000  /* Messages below or'd with
-                                               * this are the ACK */
-#define E1000_VT_MSGTYPE_NACK     0x40000000  /* Messages below or'd with
-                                               * this are the NACK */
-#define E1000_VT_MSGTYPE_CTS      0x20000000  /* Indicates that VF is still
-                                                 clear to send requests */
+/* Messages below or'd with this are the ACK */
+#define E1000_VT_MSGTYPE_ACK   0x80000000
+/* Messages below or'd with this are the NACK */
+#define E1000_VT_MSGTYPE_NACK  0x40000000
+/* Indicates that VF is still clear to send requests */
+#define E1000_VT_MSGTYPE_CTS   0x20000000
 
 /* We have a total wait time of 1s for vf mailbox posted messages */
-#define E1000_VF_MBX_INIT_TIMEOUT 2000 /* retry count for mailbox timeout */
-#define E1000_VF_MBX_INIT_DELAY   500  /* usec delay between retries */
+#define E1000_VF_MBX_INIT_TIMEOUT      2000 /* retry count for mbx timeout */
+#define E1000_VF_MBX_INIT_DELAY                500  /* usec delay between retries */
 
-#define E1000_VT_MSGINFO_SHIFT    16
+#define E1000_VT_MSGINFO_SHIFT 16
 /* bits 23:16 are used for exra info for certain messages */
-#define E1000_VT_MSGINFO_MASK     (0xFF << E1000_VT_MSGINFO_SHIFT)
+#define E1000_VT_MSGINFO_MASK  (0xFF << E1000_VT_MSGINFO_SHIFT)
 
-#define E1000_VF_RESET            0x01 /* VF requests reset */
-#define E1000_VF_SET_MAC_ADDR     0x02 /* VF requests PF to set MAC addr */
-#define E1000_VF_SET_MULTICAST    0x03 /* VF requests PF to set MC addr */
-#define E1000_VF_SET_VLAN         0x04 /* VF requests PF to set VLAN */
-#define E1000_VF_SET_LPE          0x05 /* VF requests PF to set VMOLR.LPE */
+#define E1000_VF_RESET         0x01 /* VF requests reset */
+#define E1000_VF_SET_MAC_ADDR  0x02 /* VF requests PF to set MAC addr */
+#define E1000_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
+#define E1000_VF_SET_VLAN      0x04 /* VF requests PF to set VLAN */
+#define E1000_VF_SET_LPE       0x05 /* VF requests PF to set VMOLR.LPE */
 
-#define E1000_PF_CONTROL_MSG      0x0100 /* PF control message */
+#define E1000_PF_CONTROL_MSG   0x0100 /* PF control message */
 
 void e1000_init_mbx_ops_generic(struct e1000_hw *hw);
 s32 e1000_init_mbx_params_vf(struct e1000_hw *);
index ebf9d4a..55f1404 100644 (file)
@@ -13,8 +13,7 @@
   more details.
 
   You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  this program; if not, see <http://www.gnu.org/licenses/>.
 
   The full GNU General Public License is included in this distribution in
   the file called "COPYING".
@@ -66,26 +65,27 @@ static void igbvf_set_interrupt_capability(struct igbvf_adapter *);
 static void igbvf_reset_interrupt_capability(struct igbvf_adapter *);
 
 static struct igbvf_info igbvf_vf_info = {
-       .mac                    = e1000_vfadapt,
-       .flags                  = 0,
-       .pba                    = 10,
-       .init_ops               = e1000_init_function_pointers_vf,
+       .mac            = e1000_vfadapt,
+       .flags          = 0,
+       .pba            = 10,
+       .init_ops       = e1000_init_function_pointers_vf,
 };
 
 static struct igbvf_info igbvf_i350_vf_info = {
-       .mac                    = e1000_vfadapt_i350,
-       .flags                  = 0,
-       .pba                    = 10,
-       .init_ops               = e1000_init_function_pointers_vf,
+       .mac            = e1000_vfadapt_i350,
+       .flags          = 0,
+       .pba            = 10,
+       .init_ops       = e1000_init_function_pointers_vf,
 };
 
 static const struct igbvf_info *igbvf_info_tbl[] = {
-       [board_vf]              = &igbvf_vf_info,
-       [board_i350_vf]         = &igbvf_i350_vf_info,
+       [board_vf]      = &igbvf_vf_info,
+       [board_i350_vf] = &igbvf_i350_vf_info,
 };
 
 /**
  * igbvf_desc_unused - calculate if we have unused descriptors
+ * @rx_ring: address of receive ring structure
  **/
 static int igbvf_desc_unused(struct igbvf_ring *ring)
 {
@@ -103,9 +103,9 @@ static int igbvf_desc_unused(struct igbvf_ring *ring)
  * @skb: pointer to sk_buff to be indicated to stack
  **/
 static void igbvf_receive_skb(struct igbvf_adapter *adapter,
-                              struct net_device *netdev,
-                              struct sk_buff *skb,
-                              u32 status, u16 vlan)
+                             struct net_device *netdev,
+                             struct sk_buff *skb,
+                             u32 status, u16 vlan)
 {
        u16 vid;
 
@@ -123,7 +123,7 @@ static void igbvf_receive_skb(struct igbvf_adapter *adapter,
 }
 
 static inline void igbvf_rx_checksum_adv(struct igbvf_adapter *adapter,
-                                         u32 status_err, struct sk_buff *skb)
+                                        u32 status_err, struct sk_buff *skb)
 {
        skb_checksum_none_assert(skb);
 
@@ -153,7 +153,7 @@ static inline void igbvf_rx_checksum_adv(struct igbvf_adapter *adapter,
  * @cleaned_count: number of buffers to repopulate
  **/
 static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring,
-                                   int cleaned_count)
+                                  int cleaned_count)
 {
        struct igbvf_adapter *adapter = rx_ring->adapter;
        struct net_device *netdev = adapter->netdev;
@@ -188,8 +188,8 @@ static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring,
                        }
                        buffer_info->page_dma =
                                dma_map_page(&pdev->dev, buffer_info->page,
-                                            buffer_info->page_offset,
-                                            PAGE_SIZE / 2,
+                                            buffer_info->page_offset,
+                                            PAGE_SIZE / 2,
                                             DMA_FROM_DEVICE);
                        if (dma_mapping_error(&pdev->dev,
                                              buffer_info->page_dma)) {
@@ -209,7 +209,7 @@ static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring,
 
                        buffer_info->skb = skb;
                        buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
-                                                         bufsz,
+                                                         bufsz,
                                                          DMA_FROM_DEVICE);
                        if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
                                dev_kfree_skb(buffer_info->skb);
@@ -219,14 +219,14 @@ static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring,
                        }
                }
                /* Refresh the desc even if buffer_addrs didn't change because
-                * each write-back erases this info. */
+                * each write-back erases this info.
+                */
                if (adapter->rx_ps_hdr_size) {
                        rx_desc->read.pkt_addr =
                             cpu_to_le64(buffer_info->page_dma);
                        rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma);
                } else {
-                       rx_desc->read.pkt_addr =
-                            cpu_to_le64(buffer_info->dma);
+                       rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->dma);
                        rx_desc->read.hdr_addr = 0;
                }
 
@@ -247,7 +247,8 @@ no_buffers:
                /* Force memory writes to complete before letting h/w
                 * know there are new descriptors to fetch.  (Only
                 * applicable for weak-ordered memory model archs,
-                * such as IA-64). */
+                * such as IA-64).
+               */
                wmb();
                writel(i, adapter->hw.hw_addr + rx_ring->tail);
        }
@@ -261,7 +262,7 @@ no_buffers:
  * is no guarantee that everything was cleaned
  **/
 static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter,
-                               int *work_done, int work_to_do)
+                              int *work_done, int work_to_do)
 {
        struct igbvf_ring *rx_ring = adapter->rx_ring;
        struct net_device *netdev = adapter->netdev;
@@ -292,8 +293,9 @@ static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter,
                 * that case, it fills the header buffer and spills the rest
                 * into the page.
                 */
-               hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info) &
-                 E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
+               hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info)
+                      & E1000_RXDADV_HDRBUFLEN_MASK) >>
+                      E1000_RXDADV_HDRBUFLEN_SHIFT;
                if (hlen > adapter->rx_ps_hdr_size)
                        hlen = adapter->rx_ps_hdr_size;
 
@@ -306,7 +308,7 @@ static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter,
                buffer_info->skb = NULL;
                if (!adapter->rx_ps_hdr_size) {
                        dma_unmap_single(&pdev->dev, buffer_info->dma,
-                                        adapter->rx_buffer_len,
+                                        adapter->rx_buffer_len,
                                         DMA_FROM_DEVICE);
                        buffer_info->dma = 0;
                        skb_put(skb, length);
@@ -315,21 +317,21 @@ static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter,
 
                if (!skb_shinfo(skb)->nr_frags) {
                        dma_unmap_single(&pdev->dev, buffer_info->dma,
-                                        adapter->rx_ps_hdr_size,
+                                        adapter->rx_ps_hdr_size,
                                         DMA_FROM_DEVICE);
                        skb_put(skb, hlen);
                }
 
                if (length) {
                        dma_unmap_page(&pdev->dev, buffer_info->page_dma,
-                                      PAGE_SIZE / 2,
+                                      PAGE_SIZE / 2,
                                       DMA_FROM_DEVICE);
                        buffer_info->page_dma = 0;
 
                        skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
-                                          buffer_info->page,
-                                          buffer_info->page_offset,
-                                          length);
+                                          buffer_info->page,
+                                          buffer_info->page_offset,
+                                          length);
 
                        if ((adapter->rx_buffer_len > (PAGE_SIZE / 2)) ||
                            (page_count(buffer_info->page) != 1))
@@ -370,7 +372,7 @@ send_up:
                skb->protocol = eth_type_trans(skb, netdev);
 
                igbvf_receive_skb(adapter, netdev, skb, staterr,
-                                 rx_desc->wb.upper.vlan);
+                                 rx_desc->wb.upper.vlan);
 
 next_desc:
                rx_desc->wb.upper.status_error = 0;
@@ -402,7 +404,7 @@ next_desc:
 }
 
 static void igbvf_put_txbuf(struct igbvf_adapter *adapter,
-                            struct igbvf_buffer *buffer_info)
+                           struct igbvf_buffer *buffer_info)
 {
        if (buffer_info->dma) {
                if (buffer_info->mapped_as_page)
@@ -431,7 +433,7 @@ static void igbvf_put_txbuf(struct igbvf_adapter *adapter,
  * Return 0 on success, negative on failure
  **/
 int igbvf_setup_tx_resources(struct igbvf_adapter *adapter,
-                             struct igbvf_ring *tx_ring)
+                            struct igbvf_ring *tx_ring)
 {
        struct pci_dev *pdev = adapter->pdev;
        int size;
@@ -458,7 +460,7 @@ int igbvf_setup_tx_resources(struct igbvf_adapter *adapter,
 err:
        vfree(tx_ring->buffer_info);
        dev_err(&adapter->pdev->dev,
-               "Unable to allocate memory for the transmit descriptor ring\n");
+               "Unable to allocate memory for the transmit descriptor ring\n");
        return -ENOMEM;
 }
 
@@ -501,7 +503,7 @@ err:
        vfree(rx_ring->buffer_info);
        rx_ring->buffer_info = NULL;
        dev_err(&adapter->pdev->dev,
-               "Unable to allocate memory for the receive descriptor ring\n");
+               "Unable to allocate memory for the receive descriptor ring\n");
        return -ENOMEM;
 }
 
@@ -578,13 +580,13 @@ static void igbvf_clean_rx_ring(struct igbvf_ring *rx_ring)
        for (i = 0; i < rx_ring->count; i++) {
                buffer_info = &rx_ring->buffer_info[i];
                if (buffer_info->dma) {
-                       if (adapter->rx_ps_hdr_size){
+                       if (adapter->rx_ps_hdr_size) {
                                dma_unmap_single(&pdev->dev, buffer_info->dma,
-                                                adapter->rx_ps_hdr_size,
+                                                adapter->rx_ps_hdr_size,
                                                 DMA_FROM_DEVICE);
                        } else {
                                dma_unmap_single(&pdev->dev, buffer_info->dma,
-                                                adapter->rx_buffer_len,
+                                                adapter->rx_buffer_len,
                                                 DMA_FROM_DEVICE);
                        }
                        buffer_info->dma = 0;
@@ -599,7 +601,7 @@ static void igbvf_clean_rx_ring(struct igbvf_ring *rx_ring)
                        if (buffer_info->page_dma)
                                dma_unmap_page(&pdev->dev,
                                               buffer_info->page_dma,
-                                              PAGE_SIZE / 2,
+                                              PAGE_SIZE / 2,
                                               DMA_FROM_DEVICE);
                        put_page(buffer_info->page);
                        buffer_info->page = NULL;
@@ -638,7 +640,7 @@ void igbvf_free_rx_resources(struct igbvf_ring *rx_ring)
        rx_ring->buffer_info = NULL;
 
        dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
-                         rx_ring->dma);
+                         rx_ring->dma);
        rx_ring->desc = NULL;
 }
 
@@ -649,13 +651,12 @@ void igbvf_free_rx_resources(struct igbvf_ring *rx_ring)
  * @packets: the number of packets during this measurement interval
  * @bytes: the number of bytes during this measurement interval
  *
- *      Stores a new ITR value based on packets and byte
- *      counts during the last interrupt.  The advantage of per interrupt
- *      computation is faster updates and more accurate ITR for the current
- *      traffic pattern.  Constants in this function were computed
- *      based on theoretical maximum wire speed and thresholds were set based
- *      on testing data as well as attempting to minimize response time
- *      while increasing bulk throughput.
+ * Stores a new ITR value based on packets and byte counts during the last
+ * interrupt.  The advantage of per interrupt computation is faster updates
+ * and more accurate ITR for the current traffic pattern.  Constants in this
+ * function were computed based on theoretical maximum wire speed and thresholds
+ * were set based on testing data as well as attempting to minimize response
+ * time while increasing bulk throughput.
  **/
 static enum latency_range igbvf_update_itr(struct igbvf_adapter *adapter,
                                           enum latency_range itr_setting,
@@ -744,17 +745,15 @@ static void igbvf_set_itr(struct igbvf_adapter *adapter)
 
        new_itr = igbvf_range_to_itr(adapter->tx_ring->itr_range);
 
-
        if (new_itr != adapter->tx_ring->itr_val) {
                u32 current_itr = adapter->tx_ring->itr_val;
-               /*
-                * this attempts to bias the interrupt rate towards Bulk
+               /* this attempts to bias the interrupt rate towards Bulk
                 * by adding intermediate steps when interrupt rate is
                 * increasing
                 */
                new_itr = new_itr > current_itr ?
-                            min(current_itr + (new_itr >> 2), new_itr) :
-                            new_itr;
+                         min(current_itr + (new_itr >> 2), new_itr) :
+                         new_itr;
                adapter->tx_ring->itr_val = new_itr;
 
                adapter->tx_ring->set_itr = 1;
@@ -772,9 +771,10 @@ static void igbvf_set_itr(struct igbvf_adapter *adapter)
 
        if (new_itr != adapter->rx_ring->itr_val) {
                u32 current_itr = adapter->rx_ring->itr_val;
+
                new_itr = new_itr > current_itr ?
-                            min(current_itr + (new_itr >> 2), new_itr) :
-                            new_itr;
+                         min(current_itr + (new_itr >> 2), new_itr) :
+                         new_itr;
                adapter->rx_ring->itr_val = new_itr;
 
                adapter->rx_ring->set_itr = 1;
@@ -829,7 +829,7 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
                                segs = skb_shinfo(skb)->gso_segs ?: 1;
                                /* multiply data chunks by size of headers */
                                bytecount = ((segs - 1) * skb_headlen(skb)) +
-                                           skb->len;
+                                           skb->len;
                                total_packets += segs;
                                total_bytes += bytecount;
                        }
@@ -849,9 +849,8 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
 
        tx_ring->next_to_clean = i;
 
-       if (unlikely(count &&
-                    netif_carrier_ok(netdev) &&
-                    igbvf_desc_unused(tx_ring) >= IGBVF_TX_QUEUE_WAKE)) {
+       if (unlikely(count && netif_carrier_ok(netdev) &&
+           igbvf_desc_unused(tx_ring) >= IGBVF_TX_QUEUE_WAKE)) {
                /* Make sure that anybody stopping the queue after this
                 * sees the new next_to_clean.
                 */
@@ -902,8 +901,9 @@ static irqreturn_t igbvf_intr_msix_tx(int irq, void *data)
        adapter->total_tx_bytes = 0;
        adapter->total_tx_packets = 0;
 
-       /* auto mask will automatically reenable the interrupt when we write
-        * EICS */
+       /* auto mask will automatically re-enable the interrupt when we write
+        * EICS
+        */
        if (!igbvf_clean_tx_irq(tx_ring))
                /* Ring was not completely cleaned, so fire another interrupt */
                ew32(EICS, tx_ring->eims_value);
@@ -941,15 +941,16 @@ static irqreturn_t igbvf_intr_msix_rx(int irq, void *data)
 #define IGBVF_NO_QUEUE -1
 
 static void igbvf_assign_vector(struct igbvf_adapter *adapter, int rx_queue,
-                                int tx_queue, int msix_vector)
+                               int tx_queue, int msix_vector)
 {
        struct e1000_hw *hw = &adapter->hw;
        u32 ivar, index;
 
        /* 82576 uses a table-based method for assigning vectors.
-          Each queue has a single entry in the table to which we write
-          a vector number along with a "valid" bit.  Sadly, the layout
-          of the table is somewhat counterintuitive. */
+        * Each queue has a single entry in the table to which we write
+        * a vector number along with a "valid" bit.  Sadly, the layout
+        * of the table is somewhat counterintuitive.
+        */
        if (rx_queue > IGBVF_NO_QUEUE) {
                index = (rx_queue >> 1);
                ivar = array_er32(IVAR0, index);
@@ -984,6 +985,7 @@ static void igbvf_assign_vector(struct igbvf_adapter *adapter, int rx_queue,
 
 /**
  * igbvf_configure_msix - Configure MSI-X hardware
+ * @adapter: board private structure
  *
  * igbvf_configure_msix sets up the hardware to properly
  * generate MSI-X interrupts.
@@ -1027,6 +1029,7 @@ static void igbvf_reset_interrupt_capability(struct igbvf_adapter *adapter)
 
 /**
  * igbvf_set_interrupt_capability - set MSI or MSI-X if supported
+ * @adapter: board private structure
  *
  * Attempt to configure interrupts using the best available
  * capabilities of the hardware and kernel.
@@ -1036,27 +1039,28 @@ static void igbvf_set_interrupt_capability(struct igbvf_adapter *adapter)
        int err = -ENOMEM;
        int i;
 
-       /* we allocate 3 vectors, 1 for tx, 1 for rx, one for pf messages */
+       /* we allocate 3 vectors, 1 for Tx, 1 for Rx, one for PF messages */
        adapter->msix_entries = kcalloc(3, sizeof(struct msix_entry),
-                                       GFP_KERNEL);
+                                       GFP_KERNEL);
        if (adapter->msix_entries) {
                for (i = 0; i < 3; i++)
                        adapter->msix_entries[i].entry = i;
 
                err = pci_enable_msix_range(adapter->pdev,
-                                           adapter->msix_entries, 3, 3);
+                                           adapter->msix_entries, 3, 3);
        }
 
        if (err < 0) {
                /* MSI-X failed */
                dev_err(&adapter->pdev->dev,
-                       "Failed to initialize MSI-X interrupts.\n");
+                       "Failed to initialize MSI-X interrupts.\n");
                igbvf_reset_interrupt_capability(adapter);
        }
 }
 
 /**
  * igbvf_request_msix - Initialize MSI-X interrupts
+ * @adapter: board private structure
  *
  * igbvf_request_msix allocates MSI-X vectors and requests interrupts from the
  * kernel.
@@ -1075,8 +1079,8 @@ static int igbvf_request_msix(struct igbvf_adapter *adapter)
        }
 
        err = request_irq(adapter->msix_entries[vector].vector,
-                         igbvf_intr_msix_tx, 0, adapter->tx_ring->name,
-                         netdev);
+                         igbvf_intr_msix_tx, 0, adapter->tx_ring->name,
+                         netdev);
        if (err)
                goto out;
 
@@ -1085,8 +1089,8 @@ static int igbvf_request_msix(struct igbvf_adapter *adapter)
        vector++;
 
        err = request_irq(adapter->msix_entries[vector].vector,
-                         igbvf_intr_msix_rx, 0, adapter->rx_ring->name,
-                         netdev);
+                         igbvf_intr_msix_rx, 0, adapter->rx_ring->name,
+                         netdev);
        if (err)
                goto out;
 
@@ -1095,7 +1099,7 @@ static int igbvf_request_msix(struct igbvf_adapter *adapter)
        vector++;
 
        err = request_irq(adapter->msix_entries[vector].vector,
-                         igbvf_msix_other, 0, netdev->name, netdev);
+                         igbvf_msix_other, 0, netdev->name, netdev);
        if (err)
                goto out;
 
@@ -1130,6 +1134,7 @@ static int igbvf_alloc_queues(struct igbvf_adapter *adapter)
 
 /**
  * igbvf_request_irq - initialize interrupts
+ * @adapter: board private structure
  *
  * Attempts to configure interrupts using the best available
  * capabilities of the hardware and kernel.
@@ -1146,7 +1151,7 @@ static int igbvf_request_irq(struct igbvf_adapter *adapter)
                return err;
 
        dev_err(&adapter->pdev->dev,
-               "Unable to allocate interrupt, Error: %d\n", err);
+               "Unable to allocate interrupt, Error: %d\n", err);
 
        return err;
 }
@@ -1164,6 +1169,7 @@ static void igbvf_free_irq(struct igbvf_adapter *adapter)
 
 /**
  * igbvf_irq_disable - Mask off interrupt generation on the NIC
+ * @adapter: board private structure
  **/
 static void igbvf_irq_disable(struct igbvf_adapter *adapter)
 {
@@ -1177,6 +1183,7 @@ static void igbvf_irq_disable(struct igbvf_adapter *adapter)
 
 /**
  * igbvf_irq_enable - Enable default interrupt generation settings
+ * @adapter: board private structure
  **/
 static void igbvf_irq_enable(struct igbvf_adapter *adapter)
 {
@@ -1252,7 +1259,7 @@ static int igbvf_vlan_rx_kill_vid(struct net_device *netdev,
 
        if (hw->mac.ops.set_vfta(hw, vid, false)) {
                dev_err(&adapter->pdev->dev,
-                       "Failed to remove vlan id %d\n", vid);
+                       "Failed to remove vlan id %d\n", vid);
                return -EINVAL;
        }
        clear_bit(vid, adapter->active_vlans);
@@ -1298,7 +1305,7 @@ static void igbvf_configure_tx(struct igbvf_adapter *adapter)
 
        /* Turn off Relaxed Ordering on head write-backs.  The writebacks
         * MUST be delivered in order or it will completely screw up
-        * our bookeeping.
+        * our bookkeeping.
         */
        dca_txctrl = er32(DCA_TXCTRL(0));
        dca_txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN;
@@ -1325,15 +1332,15 @@ static void igbvf_setup_srrctl(struct igbvf_adapter *adapter)
        u32 srrctl = 0;
 
        srrctl &= ~(E1000_SRRCTL_DESCTYPE_MASK |
-                   E1000_SRRCTL_BSIZEHDR_MASK |
-                   E1000_SRRCTL_BSIZEPKT_MASK);
+                   E1000_SRRCTL_BSIZEHDR_MASK |
+                   E1000_SRRCTL_BSIZEPKT_MASK);
 
        /* Enable queue drop to avoid head of line blocking */
        srrctl |= E1000_SRRCTL_DROP_EN;
 
        /* Setup buffer sizes */
        srrctl |= ALIGN(adapter->rx_buffer_len, 1024) >>
-                 E1000_SRRCTL_BSIZEPKT_SHIFT;
+                 E1000_SRRCTL_BSIZEPKT_SHIFT;
 
        if (adapter->rx_buffer_len < 2048) {
                adapter->rx_ps_hdr_size = 0;
@@ -1341,7 +1348,7 @@ static void igbvf_setup_srrctl(struct igbvf_adapter *adapter)
        } else {
                adapter->rx_ps_hdr_size = 128;
                srrctl |= adapter->rx_ps_hdr_size <<
-                         E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
+                         E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
                srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
        }
 
@@ -1369,8 +1376,7 @@ static void igbvf_configure_rx(struct igbvf_adapter *adapter)
 
        rdlen = rx_ring->count * sizeof(union e1000_adv_rx_desc);
 
-       /*
-        * Setup the HW Rx Head and Tail Descriptor Pointers and
+       /* Setup the HW Rx Head and Tail Descriptor Pointers and
         * the Base and Length of the Rx Descriptor Ring
         */
        rdba = rx_ring->dma;
@@ -1441,10 +1447,11 @@ static void igbvf_configure(struct igbvf_adapter *adapter)
        igbvf_setup_srrctl(adapter);
        igbvf_configure_rx(adapter);
        igbvf_alloc_rx_buffers(adapter->rx_ring,
-                              igbvf_desc_unused(adapter->rx_ring));
+                              igbvf_desc_unused(adapter->rx_ring));
 }
 
 /* igbvf_reset - bring the hardware into a known good state
+ * @adapter: private board structure
  *
  * This function boots the hardware and enables some settings that
  * require a configuration cycle of the hardware - those cannot be
@@ -1494,7 +1501,6 @@ int igbvf_up(struct igbvf_adapter *adapter)
        hw->mac.get_link_status = 1;
        mod_timer(&adapter->watchdog_timer, jiffies + 1);
 
-
        return 0;
 }
 
@@ -1504,8 +1510,7 @@ void igbvf_down(struct igbvf_adapter *adapter)
        struct e1000_hw *hw = &adapter->hw;
        u32 rxdctl, txdctl;
 
-       /*
-        * signal that we're down so the interrupt handler does not
+       /* signal that we're down so the interrupt handler does not
         * reschedule our watchdog timer
         */
        set_bit(__IGBVF_DOWN, &adapter->state);
@@ -1662,8 +1667,7 @@ static int igbvf_open(struct net_device *netdev)
        if (err)
                goto err_setup_rx;
 
-       /*
-        * before we allocate an interrupt, we must be ready to handle it.
+       /* before we allocate an interrupt, we must be ready to handle it.
         * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
         * as soon as we call pci_request_irq, so we have to setup our
         * clean_rx handler before we do so.
@@ -1725,6 +1729,7 @@ static int igbvf_close(struct net_device *netdev)
 
        return 0;
 }
+
 /**
  * igbvf_set_mac - Change the Ethernet Address of the NIC
  * @netdev: network interface device structure
@@ -1753,15 +1758,15 @@ static int igbvf_set_mac(struct net_device *netdev, void *p)
        return 0;
 }
 
-#define UPDATE_VF_COUNTER(reg, name)                                    \
-       {                                                               \
-               u32 current_counter = er32(reg);                        \
-               if (current_counter < adapter->stats.last_##name)       \
-                       adapter->stats.name += 0x100000000LL;           \
-               adapter->stats.last_##name = current_counter;           \
-               adapter->stats.name &= 0xFFFFFFFF00000000LL;            \
-               adapter->stats.name |= current_counter;                 \
-       }
+#define UPDATE_VF_COUNTER(reg, name) \
+{ \
+       u32 current_counter = er32(reg); \
+       if (current_counter < adapter->stats.last_##name) \
+               adapter->stats.name += 0x100000000LL; \
+       adapter->stats.last_##name = current_counter; \
+       adapter->stats.name &= 0xFFFFFFFF00000000LL; \
+       adapter->stats.name |= current_counter; \
+}
 
 /**
  * igbvf_update_stats - Update the board statistics counters
@@ -1772,8 +1777,7 @@ void igbvf_update_stats(struct igbvf_adapter *adapter)
        struct e1000_hw *hw = &adapter->hw;
        struct pci_dev *pdev = adapter->pdev;
 
-       /*
-        * Prevent stats update while adapter is being reset, link is down
+       /* Prevent stats update while adapter is being reset, link is down
         * or if the pci connection is down.
         */
        if (adapter->link_speed == 0)
@@ -1832,7 +1836,7 @@ static bool igbvf_has_link(struct igbvf_adapter *adapter)
  **/
 static void igbvf_watchdog(unsigned long data)
 {
-       struct igbvf_adapter *adapter = (struct igbvf_adapter *) data;
+       struct igbvf_adapter *adapter = (struct igbvf_adapter *)data;
 
        /* Do the rest outside of interrupt context */
        schedule_work(&adapter->watchdog_task);
@@ -1841,8 +1845,8 @@ static void igbvf_watchdog(unsigned long data)
 static void igbvf_watchdog_task(struct work_struct *work)
 {
        struct igbvf_adapter *adapter = container_of(work,
-                                                    struct igbvf_adapter,
-                                                    watchdog_task);
+                                                    struct igbvf_adapter,
+                                                    watchdog_task);
        struct net_device *netdev = adapter->netdev;
        struct e1000_mac_info *mac = &adapter->hw.mac;
        struct igbvf_ring *tx_ring = adapter->tx_ring;
@@ -1855,8 +1859,8 @@ static void igbvf_watchdog_task(struct work_struct *work)
        if (link) {
                if (!netif_carrier_ok(netdev)) {
                        mac->ops.get_link_up_info(&adapter->hw,
-                                                 &adapter->link_speed,
-                                                 &adapter->link_duplex);
+                                                 &adapter->link_speed,
+                                                 &adapter->link_duplex);
                        igbvf_print_link_info(adapter);
 
                        netif_carrier_on(netdev);
@@ -1876,10 +1880,9 @@ static void igbvf_watchdog_task(struct work_struct *work)
                igbvf_update_stats(adapter);
        } else {
                tx_pending = (igbvf_desc_unused(tx_ring) + 1 <
-                             tx_ring->count);
+                             tx_ring->count);
                if (tx_pending) {
-                       /*
-                        * We've lost link, so the controller stops DMA,
+                       /* We've lost link, so the controller stops DMA,
                         * but we've got queued Tx work that's never going
                         * to get done, so reset controller to flush Tx.
                         * (Do the reset outside of interrupt context).
@@ -1898,15 +1901,15 @@ static void igbvf_watchdog_task(struct work_struct *work)
                          round_jiffies(jiffies + (2 * HZ)));
 }
 
-#define IGBVF_TX_FLAGS_CSUM             0x00000001
-#define IGBVF_TX_FLAGS_VLAN             0x00000002
-#define IGBVF_TX_FLAGS_TSO              0x00000004
-#define IGBVF_TX_FLAGS_IPV4             0x00000008
-#define IGBVF_TX_FLAGS_VLAN_MASK        0xffff0000
-#define IGBVF_TX_FLAGS_VLAN_SHIFT       16
+#define IGBVF_TX_FLAGS_CSUM            0x00000001
+#define IGBVF_TX_FLAGS_VLAN            0x00000002
+#define IGBVF_TX_FLAGS_TSO             0x00000004
+#define IGBVF_TX_FLAGS_IPV4            0x00000008
+#define IGBVF_TX_FLAGS_VLAN_MASK       0xffff0000
+#define IGBVF_TX_FLAGS_VLAN_SHIFT      16
 
 static int igbvf_tso(struct igbvf_adapter *adapter,
-                     struct igbvf_ring *tx_ring,
+                    struct igbvf_ring *tx_ring,
                     struct sk_buff *skb, u32 tx_flags, u8 *hdr_len,
                     __be16 protocol)
 {
@@ -1930,17 +1933,18 @@ static int igbvf_tso(struct igbvf_adapter *adapter,
 
        if (protocol == htons(ETH_P_IP)) {
                struct iphdr *iph = ip_hdr(skb);
+
                iph->tot_len = 0;
                iph->check = 0;
                tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
-                                                        iph->daddr, 0,
-                                                        IPPROTO_TCP,
-                                                        0);
+                                                        iph->daddr, 0,
+                                                        IPPROTO_TCP,
+                                                        0);
        } else if (skb_is_gso_v6(skb)) {
                ipv6_hdr(skb)->payload_len = 0;
                tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
-                                                      &ipv6_hdr(skb)->daddr,
-                                                      0, IPPROTO_TCP, 0);
+                                                      &ipv6_hdr(skb)->daddr,
+                                                      0, IPPROTO_TCP, 0);
        }
 
        i = tx_ring->next_to_use;
@@ -1984,7 +1988,7 @@ static int igbvf_tso(struct igbvf_adapter *adapter,
 }
 
 static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter,
-                                 struct igbvf_ring *tx_ring,
+                                struct igbvf_ring *tx_ring,
                                 struct sk_buff *skb, u32 tx_flags,
                                 __be16 protocol)
 {
@@ -2005,8 +2009,7 @@ static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter,
                info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
                if (skb->ip_summed == CHECKSUM_PARTIAL)
                        info |= (skb_transport_header(skb) -
-                                skb_network_header(skb));
-
+                                skb_network_header(skb));
 
                context_desc->vlan_macip_lens = cpu_to_le32(info);
 
@@ -2055,6 +2058,10 @@ static int igbvf_maybe_stop_tx(struct net_device *netdev, int size)
 
        netif_stop_queue(netdev);
 
+       /* Herbert's original patch had:
+        *  smp_mb__after_netif_stop_queue();
+        * but since that doesn't exist yet, just open code it.
+        */
        smp_mb();
 
        /* We need to check again just in case room has been made available */
@@ -2067,11 +2074,11 @@ static int igbvf_maybe_stop_tx(struct net_device *netdev, int size)
        return 0;
 }
 
-#define IGBVF_MAX_TXD_PWR       16
-#define IGBVF_MAX_DATA_PER_TXD  (1 << IGBVF_MAX_TXD_PWR)
+#define IGBVF_MAX_TXD_PWR      16
+#define IGBVF_MAX_DATA_PER_TXD (1 << IGBVF_MAX_TXD_PWR)
 
 static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
-                                   struct igbvf_ring *tx_ring,
+                                  struct igbvf_ring *tx_ring,
                                   struct sk_buff *skb)
 {
        struct igbvf_buffer *buffer_info;
@@ -2093,7 +2100,6 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
        if (dma_mapping_error(&pdev->dev, buffer_info->dma))
                goto dma_error;
 
-
        for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
                const struct skb_frag_struct *frag;
 
@@ -2111,7 +2117,7 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
                buffer_info->time_stamp = jiffies;
                buffer_info->mapped_as_page = true;
                buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, 0, len,
-                                               DMA_TO_DEVICE);
+                                                   DMA_TO_DEVICE);
                if (dma_mapping_error(&pdev->dev, buffer_info->dma))
                        goto dma_error;
        }
@@ -2133,7 +2139,7 @@ dma_error:
 
        /* clear timestamp and dma mappings for remaining portion of packet */
        while (count--) {
-               if (i==0)
+               if (i == 0)
                        i += tx_ring->count;
                i--;
                buffer_info = &tx_ring->buffer_info[i];
@@ -2144,10 +2150,10 @@ dma_error:
 }
 
 static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter,
-                                      struct igbvf_ring *tx_ring,
+                                     struct igbvf_ring *tx_ring,
                                      int tx_flags, int count,
                                      unsigned int first, u32 paylen,
-                                      u8 hdr_len)
+                                     u8 hdr_len)
 {
        union e1000_adv_tx_desc *tx_desc = NULL;
        struct igbvf_buffer *buffer_info;
@@ -2155,7 +2161,7 @@ static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter,
        unsigned int i;
 
        cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS |
-                       E1000_ADVTXD_DCMD_DEXT);
+                       E1000_ADVTXD_DCMD_DEXT);
 
        if (tx_flags & IGBVF_TX_FLAGS_VLAN)
                cmd_type_len |= E1000_ADVTXD_DCMD_VLE;
@@ -2182,7 +2188,7 @@ static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter,
                tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i);
                tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
                tx_desc->read.cmd_type_len =
-                        cpu_to_le32(cmd_type_len | buffer_info->length);
+                        cpu_to_le32(cmd_type_len | buffer_info->length);
                tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
                i++;
                if (i == tx_ring->count)
@@ -2193,14 +2199,16 @@ static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter,
        /* Force memory writes to complete before letting h/w
         * know there are new descriptors to fetch.  (Only
         * applicable for weak-ordered memory model archs,
-        * such as IA-64). */
+        * such as IA-64).
+        */
        wmb();
 
        tx_ring->buffer_info[first].next_to_watch = tx_desc;
        tx_ring->next_to_use = i;
        writel(i, adapter->hw.hw_addr + tx_ring->tail);
        /* we need this if more than one processor can write to our tail
-        * at a time, it syncronizes IO on IA64/Altix systems */
+        * at a time, it synchronizes IO on IA64/Altix systems
+        */
        mmiowb();
 }
 
@@ -2225,11 +2233,10 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb,
                return NETDEV_TX_OK;
        }
 
-       /*
-        * need: count + 4 desc gap to keep tail from touching
-         *       + 2 desc gap to keep tail from touching head,
-         *       + 1 desc for skb->data,
-         *       + 1 desc for context descriptor,
+       /* need: count + 4 desc gap to keep tail from touching
+        *       + 2 desc gap to keep tail from touching head,
+        *       + 1 desc for skb->data,
+        *       + 1 desc for context descriptor,
         * head, otherwise try next time
         */
        if (igbvf_maybe_stop_tx(netdev, skb_shinfo(skb)->nr_frags + 4)) {
@@ -2258,11 +2265,10 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb,
        if (tso)
                tx_flags |= IGBVF_TX_FLAGS_TSO;
        else if (igbvf_tx_csum(adapter, tx_ring, skb, tx_flags, protocol) &&
-                (skb->ip_summed == CHECKSUM_PARTIAL))
+                (skb->ip_summed == CHECKSUM_PARTIAL))
                tx_flags |= IGBVF_TX_FLAGS_CSUM;
 
-       /*
-        * count reflects descriptors mapped, if 0 then mapping error
+       /* count reflects descriptors mapped, if 0 then mapping error
         * has occurred and we need to rewind the descriptor queue
         */
        count = igbvf_tx_map_adv(adapter, tx_ring, skb);
@@ -2313,6 +2319,7 @@ static void igbvf_tx_timeout(struct net_device *netdev)
 static void igbvf_reset_task(struct work_struct *work)
 {
        struct igbvf_adapter *adapter;
+
        adapter = container_of(work, struct igbvf_adapter, reset_task);
 
        igbvf_reinit_locked(adapter);
@@ -2356,14 +2363,13 @@ static int igbvf_change_mtu(struct net_device *netdev, int new_mtu)
        }
 
        while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state))
-               msleep(1);
+               usleep_range(1000, 2000);
        /* igbvf_down has a dependency on max_frame_size */
        adapter->max_frame_size = max_frame;
        if (netif_running(netdev))
                igbvf_down(adapter);
 
-       /*
-        * NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
+       /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
         * means we reserve 2 more, this pushes us to allocate from the next
         * larger slab size.
         * i.e. RXBUFFER_2048 --> size-4096 slab
@@ -2382,15 +2388,14 @@ static int igbvf_change_mtu(struct net_device *netdev, int new_mtu)
                adapter->rx_buffer_len = PAGE_SIZE / 2;
 #endif
 
-
        /* adjust allocation if LPE protects us, and we aren't using SBP */
        if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
-            (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN))
+           (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN))
                adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN +
-                                        ETH_FCS_LEN;
+                                        ETH_FCS_LEN;
 
        dev_info(&adapter->pdev->dev, "changing MTU from %d to %d\n",
-                netdev->mtu, new_mtu);
+                netdev->mtu, new_mtu);
        netdev->mtu = new_mtu;
 
        if (netif_running(netdev))
@@ -2477,8 +2482,7 @@ static void igbvf_shutdown(struct pci_dev *pdev)
 }
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
-/*
- * Polling 'interrupt' - used by things like netconsole to send skbs
+/* Polling 'interrupt' - used by things like netconsole to send skbs
  * without having to re-enable interrupts. It's not called while
  * the interrupt routine is executing.
  */
@@ -2503,7 +2507,7 @@ static void igbvf_netpoll(struct net_device *netdev)
  * this device has been detected.
  */
 static pci_ers_result_t igbvf_io_error_detected(struct pci_dev *pdev,
-                                                pci_channel_state_t state)
+                                               pci_channel_state_t state)
 {
        struct net_device *netdev = pci_get_drvdata(pdev);
        struct igbvf_adapter *adapter = netdev_priv(netdev);
@@ -2583,7 +2587,7 @@ static void igbvf_print_device_info(struct igbvf_adapter *adapter)
 }
 
 static int igbvf_set_features(struct net_device *netdev,
-       netdev_features_t features)
+                             netdev_features_t features)
 {
        struct igbvf_adapter *adapter = netdev_priv(netdev);
 
@@ -2596,21 +2600,21 @@ static int igbvf_set_features(struct net_device *netdev,
 }
 
 static const struct net_device_ops igbvf_netdev_ops = {
-       .ndo_open                       = igbvf_open,
-       .ndo_stop                       = igbvf_close,
-       .ndo_start_xmit                 = igbvf_xmit_frame,
-       .ndo_get_stats                  = igbvf_get_stats,
-       .ndo_set_rx_mode                = igbvf_set_multi,
-       .ndo_set_mac_address            = igbvf_set_mac,
-       .ndo_change_mtu                 = igbvf_change_mtu,
-       .ndo_do_ioctl                   = igbvf_ioctl,
-       .ndo_tx_timeout                 = igbvf_tx_timeout,
-       .ndo_vlan_rx_add_vid            = igbvf_vlan_rx_add_vid,
-       .ndo_vlan_rx_kill_vid           = igbvf_vlan_rx_kill_vid,
+       .ndo_open               = igbvf_open,
+       .ndo_stop               = igbvf_close,
+       .ndo_start_xmit         = igbvf_xmit_frame,
+       .ndo_get_stats          = igbvf_get_stats,
+       .ndo_set_rx_mode        = igbvf_set_multi,
+       .ndo_set_mac_address    = igbvf_set_mac,
+       .ndo_change_mtu         = igbvf_change_mtu,
+       .ndo_do_ioctl           = igbvf_ioctl,
+       .ndo_tx_timeout         = igbvf_tx_timeout,
+       .ndo_vlan_rx_add_vid    = igbvf_vlan_rx_add_vid,
+       .ndo_vlan_rx_kill_vid   = igbvf_vlan_rx_kill_vid,
 #ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller            = igbvf_netpoll,
+       .ndo_poll_controller    = igbvf_netpoll,
 #endif
-       .ndo_set_features               = igbvf_set_features,
+       .ndo_set_features       = igbvf_set_features,
 };
 
 /**
@@ -2645,8 +2649,8 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        } else {
                err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
                if (err) {
-                       dev_err(&pdev->dev, "No usable DMA "
-                               "configuration, aborting\n");
+                       dev_err(&pdev->dev,
+                               "No usable DMA configuration, aborting\n");
                        goto err_dma;
                }
        }
@@ -2686,7 +2690,7 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        err = -EIO;
        adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, 0),
-                                     pci_resource_len(pdev, 0));
+                                     pci_resource_len(pdev, 0));
 
        if (!adapter->hw.hw_addr)
                goto err_ioremap;
@@ -2712,16 +2716,16 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        adapter->bd_number = cards_found++;
 
        netdev->hw_features = NETIF_F_SG |
-                          NETIF_F_IP_CSUM |
+                          NETIF_F_IP_CSUM |
                           NETIF_F_IPV6_CSUM |
                           NETIF_F_TSO |
                           NETIF_F_TSO6 |
                           NETIF_F_RXCSUM;
 
        netdev->features = netdev->hw_features |
-                          NETIF_F_HW_VLAN_CTAG_TX |
-                          NETIF_F_HW_VLAN_CTAG_RX |
-                          NETIF_F_HW_VLAN_CTAG_FILTER;
+                          NETIF_F_HW_VLAN_CTAG_TX |
+                          NETIF_F_HW_VLAN_CTAG_RX |
+                          NETIF_F_HW_VLAN_CTAG_FILTER;
 
        if (pci_using_dac)
                netdev->features |= NETIF_F_HIGHDMA;
@@ -2742,7 +2746,8 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                if (err)
                        dev_info(&pdev->dev, "Error reading MAC address.\n");
                else if (is_zero_ether_addr(adapter->hw.mac.addr))
-                       dev_info(&pdev->dev, "MAC address not assigned by administrator.\n");
+                       dev_info(&pdev->dev,
+                                "MAC address not assigned by administrator.\n");
                memcpy(netdev->dev_addr, adapter->hw.mac.addr,
                       netdev->addr_len);
        }
@@ -2751,11 +2756,11 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                dev_info(&pdev->dev, "Assigning random MAC address.\n");
                eth_hw_addr_random(netdev);
                memcpy(adapter->hw.mac.addr, netdev->dev_addr,
-                       netdev->addr_len);
+                      netdev->addr_len);
        }
 
        setup_timer(&adapter->watchdog_timer, &igbvf_watchdog,
-                   (unsigned long) adapter);
+                   (unsigned long)adapter);
 
        INIT_WORK(&adapter->reset_task, igbvf_reset_task);
        INIT_WORK(&adapter->watchdog_task, igbvf_watchdog_task);
@@ -2818,8 +2823,7 @@ static void igbvf_remove(struct pci_dev *pdev)
        struct igbvf_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
 
-       /*
-        * The watchdog timer may be rescheduled, so explicitly
+       /* The watchdog timer may be rescheduled, so explicitly
         * disable it from being rescheduled.
         */
        set_bit(__IGBVF_DOWN, &adapter->state);
@@ -2832,9 +2836,8 @@ static void igbvf_remove(struct pci_dev *pdev)
 
        igbvf_reset_interrupt_capability(adapter);
 
-       /*
-        * it is important to delete the napi struct prior to freeing the
-        * rx ring so that you do not end up with null pointer refs
+       /* it is important to delete the NAPI struct prior to freeing the
+        * Rx ring so that you do not end up with null pointer refs
         */
        netif_napi_del(&adapter->rx_ring->napi);
        kfree(adapter->tx_ring);
@@ -2866,17 +2869,17 @@ MODULE_DEVICE_TABLE(pci, igbvf_pci_tbl);
 
 /* PCI Device API Driver */
 static struct pci_driver igbvf_driver = {
-       .name     = igbvf_driver_name,
-       .id_table = igbvf_pci_tbl,
-       .probe    = igbvf_probe,
-       .remove   = igbvf_remove,
+       .name           = igbvf_driver_name,
+       .id_table       = igbvf_pci_tbl,
+       .probe          = igbvf_probe,
+       .remove         = igbvf_remove,
 #ifdef CONFIG_PM
        /* Power Management Hooks */
-       .suspend  = igbvf_suspend,
-       .resume   = igbvf_resume,
+       .suspend        = igbvf_suspend,
+       .resume         = igbvf_resume,
 #endif
-       .shutdown = igbvf_shutdown,
-       .err_handler = &igbvf_err_handler
+       .shutdown       = igbvf_shutdown,
+       .err_handler    = &igbvf_err_handler
 };
 
 /**
@@ -2888,6 +2891,7 @@ static struct pci_driver igbvf_driver = {
 static int __init igbvf_init_module(void)
 {
        int ret;
+
        pr_info("%s - version %s\n", igbvf_driver_string, igbvf_driver_version);
        pr_info("%s\n", igbvf_copyright);
 
@@ -2909,7 +2913,6 @@ static void __exit igbvf_exit_module(void)
 }
 module_exit(igbvf_exit_module);
 
-
 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
 MODULE_DESCRIPTION("Intel(R) Gigabit Virtual Function Network Driver");
 MODULE_LICENSE("GPL");
index 7dc6341..86a7c12 100644 (file)
@@ -13,8 +13,7 @@
   more details.
 
   You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  this program; if not, see <http://www.gnu.org/licenses/>.
 
   The full GNU General Public License is included in this distribution in
   the file called "COPYING".
 #ifndef _E1000_REGS_H_
 #define _E1000_REGS_H_
 
-#define E1000_CTRL      0x00000 /* Device Control - RW */
-#define E1000_STATUS    0x00008 /* Device Status - RO */
-#define E1000_ITR       0x000C4 /* Interrupt Throttling Rate - RW */
-#define E1000_EICR      0x01580 /* Ext. Interrupt Cause Read - R/clr */
-#define E1000_EITR(_n)  (0x01680 + (0x4 * (_n)))
-#define E1000_EICS      0x01520 /* Ext. Interrupt Cause Set - W0 */
-#define E1000_EIMS      0x01524 /* Ext. Interrupt Mask Set/Read - RW */
-#define E1000_EIMC      0x01528 /* Ext. Interrupt Mask Clear - WO */
-#define E1000_EIAC      0x0152C /* Ext. Interrupt Auto Clear - RW */
-#define E1000_EIAM      0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */
-#define E1000_IVAR0     0x01700 /* Interrupt Vector Allocation (array) - RW */
-#define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */
-/*
- * Convenience macros
+#define E1000_CTRL     0x00000 /* Device Control - RW */
+#define E1000_STATUS   0x00008 /* Device Status - RO */
+#define E1000_ITR      0x000C4 /* Interrupt Throttling Rate - RW */
+#define E1000_EICR     0x01580 /* Ext. Interrupt Cause Read - R/clr */
+#define E1000_EITR(_n) (0x01680 + (0x4 * (_n)))
+#define E1000_EICS     0x01520 /* Ext. Interrupt Cause Set - W0 */
+#define E1000_EIMS     0x01524 /* Ext. Interrupt Mask Set/Read - RW */
+#define E1000_EIMC     0x01528 /* Ext. Interrupt Mask Clear - WO */
+#define E1000_EIAC     0x0152C /* Ext. Interrupt Auto Clear - RW */
+#define E1000_EIAM     0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */
+#define E1000_IVAR0    0x01700 /* Interrupt Vector Allocation (array) - RW */
+#define E1000_IVAR_MISC        0x01740 /* IVAR for "other" causes - RW */
+
+/* Convenience macros
  *
  * Note: "_n" is the queue number of the register to be written to.
  *
  * Example usage:
  * E1000_RDBAL_REG(current_rx_queue)
  */
-#define E1000_RDBAL(_n)      ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) : \
-                                         (0x0C000 + ((_n) * 0x40)))
-#define E1000_RDBAH(_n)      ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) : \
-                                         (0x0C004 + ((_n) * 0x40)))
-#define E1000_RDLEN(_n)      ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) : \
-                                         (0x0C008 + ((_n) * 0x40)))
-#define E1000_SRRCTL(_n)     ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) : \
-                                         (0x0C00C + ((_n) * 0x40)))
-#define E1000_RDH(_n)        ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) : \
-                                         (0x0C010 + ((_n) * 0x40)))
-#define E1000_RDT(_n)        ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) : \
-                                         (0x0C018 + ((_n) * 0x40)))
-#define E1000_RXDCTL(_n)     ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) : \
-                                         (0x0C028 + ((_n) * 0x40)))
-#define E1000_TDBAL(_n)      ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) : \
-                                         (0x0E000 + ((_n) * 0x40)))
-#define E1000_TDBAH(_n)      ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) : \
-                                         (0x0E004 + ((_n) * 0x40)))
-#define E1000_TDLEN(_n)      ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) : \
-                                         (0x0E008 + ((_n) * 0x40)))
-#define E1000_TDH(_n)        ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) : \
-                                         (0x0E010 + ((_n) * 0x40)))
-#define E1000_TDT(_n)        ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) : \
-                                         (0x0E018 + ((_n) * 0x40)))
-#define E1000_TXDCTL(_n)     ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) : \
-                                         (0x0E028 + ((_n) * 0x40)))
-#define E1000_DCA_TXCTRL(_n) (0x03814 + (_n << 8))
-#define E1000_DCA_RXCTRL(_n) (0x02814 + (_n << 8))
-#define E1000_RAL(_i)  (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
-                                       (0x054E0 + ((_i - 16) * 8)))
-#define E1000_RAH(_i)  (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
-                                       (0x054E4 + ((_i - 16) * 8)))
+#define E1000_RDBAL(_n)        ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) : \
+                        (0x0C000 + ((_n) * 0x40)))
+#define E1000_RDBAH(_n)        ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) : \
+                        (0x0C004 + ((_n) * 0x40)))
+#define E1000_RDLEN(_n)        ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) : \
+                        (0x0C008 + ((_n) * 0x40)))
+#define E1000_SRRCTL(_n)       ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) : \
+                                (0x0C00C + ((_n) * 0x40)))
+#define E1000_RDH(_n)  ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) : \
+                        (0x0C010 + ((_n) * 0x40)))
+#define E1000_RDT(_n)  ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) : \
+                        (0x0C018 + ((_n) * 0x40)))
+#define E1000_RXDCTL(_n)       ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) : \
+                                (0x0C028 + ((_n) * 0x40)))
+#define E1000_TDBAL(_n)        ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) : \
+                        (0x0E000 + ((_n) * 0x40)))
+#define E1000_TDBAH(_n)        ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) : \
+                        (0x0E004 + ((_n) * 0x40)))
+#define E1000_TDLEN(_n)        ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) : \
+                        (0x0E008 + ((_n) * 0x40)))
+#define E1000_TDH(_n)  ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) : \
+                        (0x0E010 + ((_n) * 0x40)))
+#define E1000_TDT(_n)  ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) : \
+                        (0x0E018 + ((_n) * 0x40)))
+#define E1000_TXDCTL(_n)       ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) : \
+                                (0x0E028 + ((_n) * 0x40)))
+#define E1000_DCA_TXCTRL(_n)   (0x03814 + (_n << 8))
+#define E1000_DCA_RXCTRL(_n)   (0x02814 + (_n << 8))
+#define E1000_RAL(_i)  (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
+                        (0x054E0 + ((_i - 16) * 8)))
+#define E1000_RAH(_i)  (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
+                        (0x054E4 + ((_i - 16) * 8)))
 
 /* Statistics registers */
-#define E1000_VFGPRC    0x00F10
-#define E1000_VFGORC    0x00F18
-#define E1000_VFMPRC    0x00F3C
-#define E1000_VFGPTC    0x00F14
-#define E1000_VFGOTC    0x00F34
-#define E1000_VFGOTLBC  0x00F50
-#define E1000_VFGPTLBC  0x00F44
-#define E1000_VFGORLBC  0x00F48
-#define E1000_VFGPRLBC  0x00F40
+#define E1000_VFGPRC   0x00F10
+#define E1000_VFGORC   0x00F18
+#define E1000_VFMPRC   0x00F3C
+#define E1000_VFGPTC   0x00F14
+#define E1000_VFGOTC   0x00F34
+#define E1000_VFGOTLBC 0x00F50
+#define E1000_VFGPTLBC 0x00F44
+#define E1000_VFGORLBC 0x00F48
+#define E1000_VFGPRLBC 0x00F40
 
 /* These act per VF so an array friendly macro is used */
-#define E1000_V2PMAILBOX(_n)   (0x00C40 + (4 * (_n)))
-#define E1000_VMBMEM(_n)       (0x00800 + (64 * (_n)))
+#define E1000_V2PMAILBOX(_n)   (0x00C40 + (4 * (_n)))
+#define E1000_VMBMEM(_n)       (0x00800 + (64 * (_n)))
 
 /* Define macros for handling registers */
-#define er32(reg) readl(hw->hw_addr + E1000_##reg)
-#define ew32(reg, val) writel((val), hw->hw_addr +  E1000_##reg)
+#define er32(reg)      readl(hw->hw_addr + E1000_##reg)
+#define ew32(reg, val) writel((val), hw->hw_addr +  E1000_##reg)
 #define array_er32(reg, offset) \
        readl(hw->hw_addr + E1000_##reg + (offset << 2))
 #define array_ew32(reg, offset, val) \
        writel((val), hw->hw_addr +  E1000_##reg + (offset << 2))
-#define e1e_flush() er32(STATUS)
+#define e1e_flush()    er32(STATUS)
 
 #endif
index 955ad8c..a13baa9 100644 (file)
@@ -13,8 +13,7 @@
   more details.
 
   You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  this program; if not, see <http://www.gnu.org/licenses/>.
 
   The full GNU General Public License is included in this distribution in
   the file called "COPYING".
 
 *******************************************************************************/
 
-
 #include "vf.h"
 
 static s32 e1000_check_for_link_vf(struct e1000_hw *hw);
 static s32 e1000_get_link_up_info_vf(struct e1000_hw *hw, u16 *speed,
-                                     u16 *duplex);
+                                    u16 *duplex);
 static s32 e1000_init_hw_vf(struct e1000_hw *hw);
 static s32 e1000_reset_hw_vf(struct e1000_hw *hw);
 
 static void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, u8 *,
-                                         u32, u32, u32);
+                                        u32, u32, u32);
 static void e1000_rar_set_vf(struct e1000_hw *, u8 *, u32);
 static s32 e1000_read_mac_addr_vf(struct e1000_hw *);
 static s32 e1000_set_vfta_vf(struct e1000_hw *, u16, bool);
@@ -94,7 +92,7 @@ void e1000_init_function_pointers_vf(struct e1000_hw *hw)
  *  the status register's data which is often stale and inaccurate.
  **/
 static s32 e1000_get_link_up_info_vf(struct e1000_hw *hw, u16 *speed,
-                                     u16 *duplex)
+                                    u16 *duplex)
 {
        s32 status;
 
@@ -130,7 +128,7 @@ static s32 e1000_reset_hw_vf(struct e1000_hw *hw)
        u8 *addr = (u8 *)(&msgbuf[1]);
        u32 ctrl;
 
-       /* assert vf queue/interrupt reset */
+       /* assert VF queue/interrupt reset */
        ctrl = er32(CTRL);
        ew32(CTRL, ctrl | E1000_CTRL_RST);
 
@@ -144,7 +142,7 @@ static s32 e1000_reset_hw_vf(struct e1000_hw *hw)
                /* mailbox timeout can now become active */
                mbx->timeout = E1000_VF_MBX_INIT_TIMEOUT;
 
-               /* notify pf of vf reset completion */
+               /* notify PF of VF reset completion */
                msgbuf[0] = E1000_VF_RESET;
                mbx->ops.write_posted(hw, msgbuf, 1);
 
@@ -153,7 +151,8 @@ static s32 e1000_reset_hw_vf(struct e1000_hw *hw)
                /* set our "perm_addr" based on info provided by PF */
                ret_val = mbx->ops.read_posted(hw, msgbuf, 3);
                if (!ret_val) {
-                       if (msgbuf[0] == (E1000_VF_RESET | E1000_VT_MSGTYPE_ACK))
+                       if (msgbuf[0] == (E1000_VF_RESET |
+                                         E1000_VT_MSGTYPE_ACK))
                                memcpy(hw->mac.perm_addr, addr, ETH_ALEN);
                        else
                                ret_val = -E1000_ERR_MAC_INIT;
@@ -194,15 +193,14 @@ static u32 e1000_hash_mc_addr_vf(struct e1000_hw *hw, u8 *mc_addr)
        /* Register count multiplied by bits per register */
        hash_mask = (hw->mac.mta_reg_count * 32) - 1;
 
-       /*
-        * The bit_shift is the number of left-shifts
+       /* The bit_shift is the number of left-shifts
         * where 0xFF would still fall within the hash mask.
         */
        while (hash_mask >> bit_shift != 0xFF)
                bit_shift++;
 
        hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) |
-                                 (((u16) mc_addr[5]) << bit_shift)));
+                                 (((u16)mc_addr[5]) << bit_shift)));
 
        return hash_value;
 }
@@ -221,8 +219,8 @@ static u32 e1000_hash_mc_addr_vf(struct e1000_hw *hw, u8 *mc_addr)
  *  unless there are workarounds that change this.
  **/
 static void e1000_update_mc_addr_list_vf(struct e1000_hw *hw,
-                                  u8 *mc_addr_list, u32 mc_addr_count,
-                                  u32 rar_used_count, u32 rar_count)
+                                        u8 *mc_addr_list, u32 mc_addr_count,
+                                        u32 rar_used_count, u32 rar_count)
 {
        struct e1000_mbx_info *mbx = &hw->mbx;
        u32 msgbuf[E1000_VFMAILBOX_SIZE];
@@ -305,7 +303,7 @@ void e1000_rlpml_set_vf(struct e1000_hw *hw, u16 max_size)
  *  @addr: pointer to the receive address
  *  @index: receive address array register
  **/
-static void e1000_rar_set_vf(struct e1000_hw *hw, u8 * addr, u32 index)
+static void e1000_rar_set_vf(struct e1000_hw *hw, u8 *addr, u32 index)
 {
        struct e1000_mbx_info *mbx = &hw->mbx;
        u32 msgbuf[3];
@@ -354,8 +352,7 @@ static s32 e1000_check_for_link_vf(struct e1000_hw *hw)
        s32 ret_val = E1000_SUCCESS;
        u32 in_msg = 0;
 
-       /*
-        * We only want to run this if there has been a rst asserted.
+       /* We only want to run this if there has been a rst asserted.
         * in this case that could mean a link change, device reset,
         * or a virtual function reset
         */
@@ -367,31 +364,33 @@ static s32 e1000_check_for_link_vf(struct e1000_hw *hw)
        if (!mac->get_link_status)
                goto out;
 
-       /* if link status is down no point in checking to see if pf is up */
+       /* if link status is down no point in checking to see if PF is up */
        if (!(er32(STATUS) & E1000_STATUS_LU))
                goto out;
 
        /* if the read failed it could just be a mailbox collision, best wait
-        * until we are called again and don't report an error */
+        * until we are called again and don't report an error
+        */
        if (mbx->ops.read(hw, &in_msg, 1))
                goto out;
 
        /* if incoming message isn't clear to send we are waiting on response */
        if (!(in_msg & E1000_VT_MSGTYPE_CTS)) {
-               /* message is not CTS and is NACK we must have lost CTS status */
+               /* msg is not CTS and is NACK we must have lost CTS status */
                if (in_msg & E1000_VT_MSGTYPE_NACK)
                        ret_val = -E1000_ERR_MAC_INIT;
                goto out;
        }
 
-       /* the pf is talking, if we timed out in the past we reinit */
+       /* the PF is talking, if we timed out in the past we reinit */
        if (!mbx->timeout) {
                ret_val = -E1000_ERR_MAC_INIT;
                goto out;
        }
 
        /* if we passed all the tests above then the link is up and we no
-        * longer need to check for link */
+        * longer need to check for link
+        */
        mac->get_link_status = false;
 
 out:
index 57db3c6..0f1eca6 100644 (file)
@@ -13,8 +13,7 @@
   more details.
 
   You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  this program; if not, see <http://www.gnu.org/licenses/>.
 
   The full GNU General Public License is included in this distribution in
   the file called "COPYING".
 
 struct e1000_hw;
 
-#define E1000_DEV_ID_82576_VF                 0x10CA
-#define E1000_DEV_ID_I350_VF                  0x1520
-#define E1000_REVISION_0 0
-#define E1000_REVISION_1 1
-#define E1000_REVISION_2 2
-#define E1000_REVISION_3 3
-#define E1000_REVISION_4 4
+#define E1000_DEV_ID_82576_VF          0x10CA
+#define E1000_DEV_ID_I350_VF           0x1520
+#define E1000_REVISION_0       0
+#define E1000_REVISION_1       1
+#define E1000_REVISION_2       2
+#define E1000_REVISION_3       3
+#define E1000_REVISION_4       4
 
-#define E1000_FUNC_0     0
-#define E1000_FUNC_1     1
+#define E1000_FUNC_0   0
+#define E1000_FUNC_1   1
 
-/*
- * Receive Address Register Count
+/* Receive Address Register Count
  * Number of high/low register pairs in the RAR.  The RAR (Receive Address
  * Registers) holds the directed and multicast addresses that we monitor.
  * These entries are also used for MAC-based filtering.
  */
-#define E1000_RAR_ENTRIES_VF      1
+#define E1000_RAR_ENTRIES_VF   1
 
 /* Receive Descriptor - Advanced */
 union e1000_adv_rx_desc {
        struct {
-               u64 pkt_addr;             /* Packet buffer address */
-               u64 hdr_addr;             /* Header buffer address */
+               u64 pkt_addr; /* Packet buffer address */
+               u64 hdr_addr; /* Header buffer address */
        } read;
        struct {
                struct {
@@ -69,53 +67,53 @@ union e1000_adv_rx_desc {
                                u32 data;
                                struct {
                                        u16 pkt_info; /* RSS/Packet type */
-                                       u16 hdr_info; /* Split Header,
-                                                      * hdr buffer length */
+                                       /* Split Header, hdr buffer length */
+                                       u16 hdr_info;
                                } hs_rss;
                        } lo_dword;
                        union {
-                               u32 rss;          /* RSS Hash */
+                               u32 rss; /* RSS Hash */
                                struct {
-                                       u16 ip_id;    /* IP id */
-                                       u16 csum;     /* Packet Checksum */
+                                       u16 ip_id; /* IP id */
+                                       u16 csum;  /* Packet Checksum */
                                } csum_ip;
                        } hi_dword;
                } lower;
                struct {
-                       u32 status_error;     /* ext status/error */
-                       u16 length;           /* Packet length */
-                       u16 vlan;             /* VLAN tag */
+                       u32 status_error; /* ext status/error */
+                       u16 length; /* Packet length */
+                       u16 vlan;   /* VLAN tag */
                } upper;
        } wb;  /* writeback */
 };
 
-#define E1000_RXDADV_HDRBUFLEN_MASK      0x7FE0
-#define E1000_RXDADV_HDRBUFLEN_SHIFT     5
+#define E1000_RXDADV_HDRBUFLEN_MASK    0x7FE0
+#define E1000_RXDADV_HDRBUFLEN_SHIFT   5
 
 /* Transmit Descriptor - Advanced */
 union e1000_adv_tx_desc {
        struct {
-               u64 buffer_addr;    /* Address of descriptor's data buf */
+               u64 buffer_addr; /* Address of descriptor's data buf */
                u32 cmd_type_len;
                u32 olinfo_status;
        } read;
        struct {
-               u64 rsvd;       /* Reserved */
+               u64 rsvd; /* Reserved */
                u32 nxtseq_seed;
                u32 status;
        } wb;
 };
 
 /* Adv Transmit Descriptor Config Masks */
-#define E1000_ADVTXD_DTYP_CTXT    0x00200000 /* Advanced Context Descriptor */
-#define E1000_ADVTXD_DTYP_DATA    0x00300000 /* Advanced Data Descriptor */
-#define E1000_ADVTXD_DCMD_EOP     0x01000000 /* End of Packet */
-#define E1000_ADVTXD_DCMD_IFCS    0x02000000 /* Insert FCS (Ethernet CRC) */
-#define E1000_ADVTXD_DCMD_RS      0x08000000 /* Report Status */
-#define E1000_ADVTXD_DCMD_DEXT    0x20000000 /* Descriptor extension (1=Adv) */
-#define E1000_ADVTXD_DCMD_VLE     0x40000000 /* VLAN pkt enable */
-#define E1000_ADVTXD_DCMD_TSE     0x80000000 /* TCP Seg enable */
-#define E1000_ADVTXD_PAYLEN_SHIFT    14 /* Adv desc PAYLEN shift */
+#define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */
+#define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */
+#define E1000_ADVTXD_DCMD_EOP  0x01000000 /* End of Packet */
+#define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
+#define E1000_ADVTXD_DCMD_RS   0x08000000 /* Report Status */
+#define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */
+#define E1000_ADVTXD_DCMD_VLE  0x40000000 /* VLAN pkt enable */
+#define E1000_ADVTXD_DCMD_TSE  0x80000000 /* TCP Seg enable */
+#define E1000_ADVTXD_PAYLEN_SHIFT      14 /* Adv desc PAYLEN shift */
 
 /* Context descriptors */
 struct e1000_adv_tx_context_desc {
@@ -125,11 +123,11 @@ struct e1000_adv_tx_context_desc {
        u32 mss_l4len_idx;
 };
 
-#define E1000_ADVTXD_MACLEN_SHIFT    9  /* Adv ctxt desc mac len shift */
-#define E1000_ADVTXD_TUCMD_IPV4    0x00000400  /* IP Packet Type: 1=IPv4 */
-#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800  /* L4 Packet TYPE of TCP */
-#define E1000_ADVTXD_L4LEN_SHIFT     8  /* Adv ctxt L4LEN shift */
-#define E1000_ADVTXD_MSS_SHIFT      16  /* Adv ctxt MSS shift */
+#define E1000_ADVTXD_MACLEN_SHIFT      9  /* Adv ctxt desc mac len shift */
+#define E1000_ADVTXD_TUCMD_IPV4                0x00000400 /* IP Packet Type: 1=IPv4 */
+#define E1000_ADVTXD_TUCMD_L4T_TCP     0x00000800 /* L4 Packet TYPE of TCP */
+#define E1000_ADVTXD_L4LEN_SHIFT       8  /* Adv ctxt L4LEN shift */
+#define E1000_ADVTXD_MSS_SHIFT         16 /* Adv ctxt MSS shift */
 
 enum e1000_mac_type {
        e1000_undefined = 0,
@@ -262,5 +260,4 @@ struct e1000_hw {
 void e1000_rlpml_set_vf(struct e1000_hw *, u16);
 void e1000_init_function_pointers_vf(struct e1000_hw *hw);
 
-
 #endif /* _E1000_VF_H_ */