OSDN Git Service

ixgb: remove ndo_poll_controller
[uclinux-h8/linux.git] / drivers / net / ethernet / intel / ixgb / ixgb_main.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 1999 - 2008 Intel Corporation. */
3
4 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
5
6 #include <linux/prefetch.h>
7 #include "ixgb.h"
8
9 char ixgb_driver_name[] = "ixgb";
10 static char ixgb_driver_string[] = "Intel(R) PRO/10GbE Network Driver";
11
12 #define DRIVERNAPI "-NAPI"
13 #define DRV_VERSION "1.0.135-k2" DRIVERNAPI
14 const char ixgb_driver_version[] = DRV_VERSION;
15 static const char ixgb_copyright[] = "Copyright (c) 1999-2008 Intel Corporation.";
16
17 #define IXGB_CB_LENGTH 256
18 static unsigned int copybreak __read_mostly = IXGB_CB_LENGTH;
19 module_param(copybreak, uint, 0644);
20 MODULE_PARM_DESC(copybreak,
21         "Maximum size of packet that is copied to a new buffer on receive");
22
23 /* ixgb_pci_tbl - PCI Device ID Table
24  *
25  * Wildcard entries (PCI_ANY_ID) should come last
26  * Last entry must be all 0s
27  *
28  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
29  *   Class, Class Mask, private data (not used) }
30  */
31 static const struct pci_device_id ixgb_pci_tbl[] = {
32         {PCI_VENDOR_ID_INTEL, IXGB_DEVICE_ID_82597EX,
33          PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
34         {PCI_VENDOR_ID_INTEL, IXGB_DEVICE_ID_82597EX_CX4,
35          PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
36         {PCI_VENDOR_ID_INTEL, IXGB_DEVICE_ID_82597EX_SR,
37          PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
38         {PCI_VENDOR_ID_INTEL, IXGB_DEVICE_ID_82597EX_LR,
39          PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
40
41         /* required last entry */
42         {0,}
43 };
44
45 MODULE_DEVICE_TABLE(pci, ixgb_pci_tbl);
46
47 /* Local Function Prototypes */
48 static int ixgb_init_module(void);
49 static void ixgb_exit_module(void);
50 static int ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
51 static void ixgb_remove(struct pci_dev *pdev);
52 static int ixgb_sw_init(struct ixgb_adapter *adapter);
53 static int ixgb_open(struct net_device *netdev);
54 static int ixgb_close(struct net_device *netdev);
55 static void ixgb_configure_tx(struct ixgb_adapter *adapter);
56 static void ixgb_configure_rx(struct ixgb_adapter *adapter);
57 static void ixgb_setup_rctl(struct ixgb_adapter *adapter);
58 static void ixgb_clean_tx_ring(struct ixgb_adapter *adapter);
59 static void ixgb_clean_rx_ring(struct ixgb_adapter *adapter);
60 static void ixgb_set_multi(struct net_device *netdev);
61 static void ixgb_watchdog(struct timer_list *t);
62 static netdev_tx_t ixgb_xmit_frame(struct sk_buff *skb,
63                                    struct net_device *netdev);
64 static int ixgb_change_mtu(struct net_device *netdev, int new_mtu);
65 static int ixgb_set_mac(struct net_device *netdev, void *p);
66 static irqreturn_t ixgb_intr(int irq, void *data);
67 static bool ixgb_clean_tx_irq(struct ixgb_adapter *adapter);
68
69 static int ixgb_clean(struct napi_struct *, int);
70 static bool ixgb_clean_rx_irq(struct ixgb_adapter *, int *, int);
71 static void ixgb_alloc_rx_buffers(struct ixgb_adapter *, int);
72
73 static void ixgb_tx_timeout(struct net_device *dev);
74 static void ixgb_tx_timeout_task(struct work_struct *work);
75
76 static void ixgb_vlan_strip_enable(struct ixgb_adapter *adapter);
77 static void ixgb_vlan_strip_disable(struct ixgb_adapter *adapter);
78 static int ixgb_vlan_rx_add_vid(struct net_device *netdev,
79                                 __be16 proto, u16 vid);
80 static int ixgb_vlan_rx_kill_vid(struct net_device *netdev,
81                                  __be16 proto, u16 vid);
82 static void ixgb_restore_vlan(struct ixgb_adapter *adapter);
83
84 static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev,
85                              enum pci_channel_state state);
86 static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev);
87 static void ixgb_io_resume (struct pci_dev *pdev);
88
89 static const struct pci_error_handlers ixgb_err_handler = {
90         .error_detected = ixgb_io_error_detected,
91         .slot_reset = ixgb_io_slot_reset,
92         .resume = ixgb_io_resume,
93 };
94
95 static struct pci_driver ixgb_driver = {
96         .name     = ixgb_driver_name,
97         .id_table = ixgb_pci_tbl,
98         .probe    = ixgb_probe,
99         .remove   = ixgb_remove,
100         .err_handler = &ixgb_err_handler
101 };
102
103 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
104 MODULE_DESCRIPTION("Intel(R) PRO/10GbE Network Driver");
105 MODULE_LICENSE("GPL");
106 MODULE_VERSION(DRV_VERSION);
107
108 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
109 static int debug = -1;
110 module_param(debug, int, 0);
111 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
112
113 /**
114  * ixgb_init_module - Driver Registration Routine
115  *
116  * ixgb_init_module is the first routine called when the driver is
117  * loaded. All it does is register with the PCI subsystem.
118  **/
119
120 static int __init
121 ixgb_init_module(void)
122 {
123         pr_info("%s - version %s\n", ixgb_driver_string, ixgb_driver_version);
124         pr_info("%s\n", ixgb_copyright);
125
126         return pci_register_driver(&ixgb_driver);
127 }
128
129 module_init(ixgb_init_module);
130
131 /**
132  * ixgb_exit_module - Driver Exit Cleanup Routine
133  *
134  * ixgb_exit_module is called just before the driver is removed
135  * from memory.
136  **/
137
138 static void __exit
139 ixgb_exit_module(void)
140 {
141         pci_unregister_driver(&ixgb_driver);
142 }
143
144 module_exit(ixgb_exit_module);
145
146 /**
147  * ixgb_irq_disable - Mask off interrupt generation on the NIC
148  * @adapter: board private structure
149  **/
150
151 static void
152 ixgb_irq_disable(struct ixgb_adapter *adapter)
153 {
154         IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
155         IXGB_WRITE_FLUSH(&adapter->hw);
156         synchronize_irq(adapter->pdev->irq);
157 }
158
159 /**
160  * ixgb_irq_enable - Enable default interrupt generation settings
161  * @adapter: board private structure
162  **/
163
164 static void
165 ixgb_irq_enable(struct ixgb_adapter *adapter)
166 {
167         u32 val = IXGB_INT_RXT0 | IXGB_INT_RXDMT0 |
168                   IXGB_INT_TXDW | IXGB_INT_LSC;
169         if (adapter->hw.subsystem_vendor_id == PCI_VENDOR_ID_SUN)
170                 val |= IXGB_INT_GPI0;
171         IXGB_WRITE_REG(&adapter->hw, IMS, val);
172         IXGB_WRITE_FLUSH(&adapter->hw);
173 }
174
175 int
176 ixgb_up(struct ixgb_adapter *adapter)
177 {
178         struct net_device *netdev = adapter->netdev;
179         int err, irq_flags = IRQF_SHARED;
180         int max_frame = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
181         struct ixgb_hw *hw = &adapter->hw;
182
183         /* hardware has been reset, we need to reload some things */
184
185         ixgb_rar_set(hw, netdev->dev_addr, 0);
186         ixgb_set_multi(netdev);
187
188         ixgb_restore_vlan(adapter);
189
190         ixgb_configure_tx(adapter);
191         ixgb_setup_rctl(adapter);
192         ixgb_configure_rx(adapter);
193         ixgb_alloc_rx_buffers(adapter, IXGB_DESC_UNUSED(&adapter->rx_ring));
194
195         /* disable interrupts and get the hardware into a known state */
196         IXGB_WRITE_REG(&adapter->hw, IMC, 0xffffffff);
197
198         /* only enable MSI if bus is in PCI-X mode */
199         if (IXGB_READ_REG(&adapter->hw, STATUS) & IXGB_STATUS_PCIX_MODE) {
200                 err = pci_enable_msi(adapter->pdev);
201                 if (!err) {
202                         adapter->have_msi = true;
203                         irq_flags = 0;
204                 }
205                 /* proceed to try to request regular interrupt */
206         }
207
208         err = request_irq(adapter->pdev->irq, ixgb_intr, irq_flags,
209                           netdev->name, netdev);
210         if (err) {
211                 if (adapter->have_msi)
212                         pci_disable_msi(adapter->pdev);
213                 netif_err(adapter, probe, adapter->netdev,
214                           "Unable to allocate interrupt Error: %d\n", err);
215                 return err;
216         }
217
218         if ((hw->max_frame_size != max_frame) ||
219                 (hw->max_frame_size !=
220                 (IXGB_READ_REG(hw, MFS) >> IXGB_MFS_SHIFT))) {
221
222                 hw->max_frame_size = max_frame;
223
224                 IXGB_WRITE_REG(hw, MFS, hw->max_frame_size << IXGB_MFS_SHIFT);
225
226                 if (hw->max_frame_size >
227                    IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) {
228                         u32 ctrl0 = IXGB_READ_REG(hw, CTRL0);
229
230                         if (!(ctrl0 & IXGB_CTRL0_JFE)) {
231                                 ctrl0 |= IXGB_CTRL0_JFE;
232                                 IXGB_WRITE_REG(hw, CTRL0, ctrl0);
233                         }
234                 }
235         }
236
237         clear_bit(__IXGB_DOWN, &adapter->flags);
238
239         napi_enable(&adapter->napi);
240         ixgb_irq_enable(adapter);
241
242         netif_wake_queue(netdev);
243
244         mod_timer(&adapter->watchdog_timer, jiffies);
245
246         return 0;
247 }
248
249 void
250 ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog)
251 {
252         struct net_device *netdev = adapter->netdev;
253
254         /* prevent the interrupt handler from restarting watchdog */
255         set_bit(__IXGB_DOWN, &adapter->flags);
256
257         netif_carrier_off(netdev);
258
259         napi_disable(&adapter->napi);
260         /* waiting for NAPI to complete can re-enable interrupts */
261         ixgb_irq_disable(adapter);
262         free_irq(adapter->pdev->irq, netdev);
263
264         if (adapter->have_msi)
265                 pci_disable_msi(adapter->pdev);
266
267         if (kill_watchdog)
268                 del_timer_sync(&adapter->watchdog_timer);
269
270         adapter->link_speed = 0;
271         adapter->link_duplex = 0;
272         netif_stop_queue(netdev);
273
274         ixgb_reset(adapter);
275         ixgb_clean_tx_ring(adapter);
276         ixgb_clean_rx_ring(adapter);
277 }
278
279 void
280 ixgb_reset(struct ixgb_adapter *adapter)
281 {
282         struct ixgb_hw *hw = &adapter->hw;
283
284         ixgb_adapter_stop(hw);
285         if (!ixgb_init_hw(hw))
286                 netif_err(adapter, probe, adapter->netdev, "ixgb_init_hw failed\n");
287
288         /* restore frame size information */
289         IXGB_WRITE_REG(hw, MFS, hw->max_frame_size << IXGB_MFS_SHIFT);
290         if (hw->max_frame_size >
291             IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) {
292                 u32 ctrl0 = IXGB_READ_REG(hw, CTRL0);
293                 if (!(ctrl0 & IXGB_CTRL0_JFE)) {
294                         ctrl0 |= IXGB_CTRL0_JFE;
295                         IXGB_WRITE_REG(hw, CTRL0, ctrl0);
296                 }
297         }
298 }
299
300 static netdev_features_t
301 ixgb_fix_features(struct net_device *netdev, netdev_features_t features)
302 {
303         /*
304          * Tx VLAN insertion does not work per HW design when Rx stripping is
305          * disabled.
306          */
307         if (!(features & NETIF_F_HW_VLAN_CTAG_RX))
308                 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
309
310         return features;
311 }
312
313 static int
314 ixgb_set_features(struct net_device *netdev, netdev_features_t features)
315 {
316         struct ixgb_adapter *adapter = netdev_priv(netdev);
317         netdev_features_t changed = features ^ netdev->features;
318
319         if (!(changed & (NETIF_F_RXCSUM|NETIF_F_HW_VLAN_CTAG_RX)))
320                 return 0;
321
322         adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
323
324         if (netif_running(netdev)) {
325                 ixgb_down(adapter, true);
326                 ixgb_up(adapter);
327                 ixgb_set_speed_duplex(netdev);
328         } else
329                 ixgb_reset(adapter);
330
331         return 0;
332 }
333
334
335 static const struct net_device_ops ixgb_netdev_ops = {
336         .ndo_open               = ixgb_open,
337         .ndo_stop               = ixgb_close,
338         .ndo_start_xmit         = ixgb_xmit_frame,
339         .ndo_set_rx_mode        = ixgb_set_multi,
340         .ndo_validate_addr      = eth_validate_addr,
341         .ndo_set_mac_address    = ixgb_set_mac,
342         .ndo_change_mtu         = ixgb_change_mtu,
343         .ndo_tx_timeout         = ixgb_tx_timeout,
344         .ndo_vlan_rx_add_vid    = ixgb_vlan_rx_add_vid,
345         .ndo_vlan_rx_kill_vid   = ixgb_vlan_rx_kill_vid,
346         .ndo_fix_features       = ixgb_fix_features,
347         .ndo_set_features       = ixgb_set_features,
348 };
349
350 /**
351  * ixgb_probe - Device Initialization Routine
352  * @pdev: PCI device information struct
353  * @ent: entry in ixgb_pci_tbl
354  *
355  * Returns 0 on success, negative on failure
356  *
357  * ixgb_probe initializes an adapter identified by a pci_dev structure.
358  * The OS initialization, configuring of the adapter private structure,
359  * and a hardware reset occur.
360  **/
361
362 static int
363 ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
364 {
365         struct net_device *netdev = NULL;
366         struct ixgb_adapter *adapter;
367         static int cards_found = 0;
368         int pci_using_dac;
369         int i;
370         int err;
371
372         err = pci_enable_device(pdev);
373         if (err)
374                 return err;
375
376         pci_using_dac = 0;
377         err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
378         if (!err) {
379                 pci_using_dac = 1;
380         } else {
381                 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
382                 if (err) {
383                         pr_err("No usable DMA configuration, aborting\n");
384                         goto err_dma_mask;
385                 }
386         }
387
388         err = pci_request_regions(pdev, ixgb_driver_name);
389         if (err)
390                 goto err_request_regions;
391
392         pci_set_master(pdev);
393
394         netdev = alloc_etherdev(sizeof(struct ixgb_adapter));
395         if (!netdev) {
396                 err = -ENOMEM;
397                 goto err_alloc_etherdev;
398         }
399
400         SET_NETDEV_DEV(netdev, &pdev->dev);
401
402         pci_set_drvdata(pdev, netdev);
403         adapter = netdev_priv(netdev);
404         adapter->netdev = netdev;
405         adapter->pdev = pdev;
406         adapter->hw.back = adapter;
407         adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
408
409         adapter->hw.hw_addr = pci_ioremap_bar(pdev, BAR_0);
410         if (!adapter->hw.hw_addr) {
411                 err = -EIO;
412                 goto err_ioremap;
413         }
414
415         for (i = BAR_1; i <= BAR_5; i++) {
416                 if (pci_resource_len(pdev, i) == 0)
417                         continue;
418                 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
419                         adapter->hw.io_base = pci_resource_start(pdev, i);
420                         break;
421                 }
422         }
423
424         netdev->netdev_ops = &ixgb_netdev_ops;
425         ixgb_set_ethtool_ops(netdev);
426         netdev->watchdog_timeo = 5 * HZ;
427         netif_napi_add(netdev, &adapter->napi, ixgb_clean, 64);
428
429         strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
430
431         adapter->bd_number = cards_found;
432         adapter->link_speed = 0;
433         adapter->link_duplex = 0;
434
435         /* setup the private structure */
436
437         err = ixgb_sw_init(adapter);
438         if (err)
439                 goto err_sw_init;
440
441         netdev->hw_features = NETIF_F_SG |
442                            NETIF_F_TSO |
443                            NETIF_F_HW_CSUM |
444                            NETIF_F_HW_VLAN_CTAG_TX |
445                            NETIF_F_HW_VLAN_CTAG_RX;
446         netdev->features = netdev->hw_features |
447                            NETIF_F_HW_VLAN_CTAG_FILTER;
448         netdev->hw_features |= NETIF_F_RXCSUM;
449
450         if (pci_using_dac) {
451                 netdev->features |= NETIF_F_HIGHDMA;
452                 netdev->vlan_features |= NETIF_F_HIGHDMA;
453         }
454
455         /* MTU range: 68 - 16114 */
456         netdev->min_mtu = ETH_MIN_MTU;
457         netdev->max_mtu = IXGB_MAX_JUMBO_FRAME_SIZE - ETH_HLEN;
458
459         /* make sure the EEPROM is good */
460
461         if (!ixgb_validate_eeprom_checksum(&adapter->hw)) {
462                 netif_err(adapter, probe, adapter->netdev,
463                           "The EEPROM Checksum Is Not Valid\n");
464                 err = -EIO;
465                 goto err_eeprom;
466         }
467
468         ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr);
469
470         if (!is_valid_ether_addr(netdev->dev_addr)) {
471                 netif_err(adapter, probe, adapter->netdev, "Invalid MAC Address\n");
472                 err = -EIO;
473                 goto err_eeprom;
474         }
475
476         adapter->part_num = ixgb_get_ee_pba_number(&adapter->hw);
477
478         timer_setup(&adapter->watchdog_timer, ixgb_watchdog, 0);
479
480         INIT_WORK(&adapter->tx_timeout_task, ixgb_tx_timeout_task);
481
482         strcpy(netdev->name, "eth%d");
483         err = register_netdev(netdev);
484         if (err)
485                 goto err_register;
486
487         /* carrier off reporting is important to ethtool even BEFORE open */
488         netif_carrier_off(netdev);
489
490         netif_info(adapter, probe, adapter->netdev,
491                    "Intel(R) PRO/10GbE Network Connection\n");
492         ixgb_check_options(adapter);
493         /* reset the hardware with the new settings */
494
495         ixgb_reset(adapter);
496
497         cards_found++;
498         return 0;
499
500 err_register:
501 err_sw_init:
502 err_eeprom:
503         iounmap(adapter->hw.hw_addr);
504 err_ioremap:
505         free_netdev(netdev);
506 err_alloc_etherdev:
507         pci_release_regions(pdev);
508 err_request_regions:
509 err_dma_mask:
510         pci_disable_device(pdev);
511         return err;
512 }
513
514 /**
515  * ixgb_remove - Device Removal Routine
516  * @pdev: PCI device information struct
517  *
518  * ixgb_remove is called by the PCI subsystem to alert the driver
519  * that it should release a PCI device.  The could be caused by a
520  * Hot-Plug event, or because the driver is going to be removed from
521  * memory.
522  **/
523
524 static void
525 ixgb_remove(struct pci_dev *pdev)
526 {
527         struct net_device *netdev = pci_get_drvdata(pdev);
528         struct ixgb_adapter *adapter = netdev_priv(netdev);
529
530         cancel_work_sync(&adapter->tx_timeout_task);
531
532         unregister_netdev(netdev);
533
534         iounmap(adapter->hw.hw_addr);
535         pci_release_regions(pdev);
536
537         free_netdev(netdev);
538         pci_disable_device(pdev);
539 }
540
541 /**
542  * ixgb_sw_init - Initialize general software structures (struct ixgb_adapter)
543  * @adapter: board private structure to initialize
544  *
545  * ixgb_sw_init initializes the Adapter private data structure.
546  * Fields are initialized based on PCI device information and
547  * OS network device settings (MTU size).
548  **/
549
550 static int
551 ixgb_sw_init(struct ixgb_adapter *adapter)
552 {
553         struct ixgb_hw *hw = &adapter->hw;
554         struct net_device *netdev = adapter->netdev;
555         struct pci_dev *pdev = adapter->pdev;
556
557         /* PCI config space info */
558
559         hw->vendor_id = pdev->vendor;
560         hw->device_id = pdev->device;
561         hw->subsystem_vendor_id = pdev->subsystem_vendor;
562         hw->subsystem_id = pdev->subsystem_device;
563
564         hw->max_frame_size = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
565         adapter->rx_buffer_len = hw->max_frame_size + 8; /* + 8 for errata */
566
567         if ((hw->device_id == IXGB_DEVICE_ID_82597EX) ||
568             (hw->device_id == IXGB_DEVICE_ID_82597EX_CX4) ||
569             (hw->device_id == IXGB_DEVICE_ID_82597EX_LR) ||
570             (hw->device_id == IXGB_DEVICE_ID_82597EX_SR))
571                 hw->mac_type = ixgb_82597;
572         else {
573                 /* should never have loaded on this device */
574                 netif_err(adapter, probe, adapter->netdev, "unsupported device id\n");
575         }
576
577         /* enable flow control to be programmed */
578         hw->fc.send_xon = 1;
579
580         set_bit(__IXGB_DOWN, &adapter->flags);
581         return 0;
582 }
583
584 /**
585  * ixgb_open - Called when a network interface is made active
586  * @netdev: network interface device structure
587  *
588  * Returns 0 on success, negative value on failure
589  *
590  * The open entry point is called when a network interface is made
591  * active by the system (IFF_UP).  At this point all resources needed
592  * for transmit and receive operations are allocated, the interrupt
593  * handler is registered with the OS, the watchdog timer is started,
594  * and the stack is notified that the interface is ready.
595  **/
596
597 static int
598 ixgb_open(struct net_device *netdev)
599 {
600         struct ixgb_adapter *adapter = netdev_priv(netdev);
601         int err;
602
603         /* allocate transmit descriptors */
604         err = ixgb_setup_tx_resources(adapter);
605         if (err)
606                 goto err_setup_tx;
607
608         netif_carrier_off(netdev);
609
610         /* allocate receive descriptors */
611
612         err = ixgb_setup_rx_resources(adapter);
613         if (err)
614                 goto err_setup_rx;
615
616         err = ixgb_up(adapter);
617         if (err)
618                 goto err_up;
619
620         netif_start_queue(netdev);
621
622         return 0;
623
624 err_up:
625         ixgb_free_rx_resources(adapter);
626 err_setup_rx:
627         ixgb_free_tx_resources(adapter);
628 err_setup_tx:
629         ixgb_reset(adapter);
630
631         return err;
632 }
633
634 /**
635  * ixgb_close - Disables a network interface
636  * @netdev: network interface device structure
637  *
638  * Returns 0, this is not allowed to fail
639  *
640  * The close entry point is called when an interface is de-activated
641  * by the OS.  The hardware is still under the drivers control, but
642  * needs to be disabled.  A global MAC reset is issued to stop the
643  * hardware, and all transmit and receive resources are freed.
644  **/
645
646 static int
647 ixgb_close(struct net_device *netdev)
648 {
649         struct ixgb_adapter *adapter = netdev_priv(netdev);
650
651         ixgb_down(adapter, true);
652
653         ixgb_free_tx_resources(adapter);
654         ixgb_free_rx_resources(adapter);
655
656         return 0;
657 }
658
659 /**
660  * ixgb_setup_tx_resources - allocate Tx resources (Descriptors)
661  * @adapter: board private structure
662  *
663  * Return 0 on success, negative on failure
664  **/
665
666 int
667 ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
668 {
669         struct ixgb_desc_ring *txdr = &adapter->tx_ring;
670         struct pci_dev *pdev = adapter->pdev;
671         int size;
672
673         size = sizeof(struct ixgb_buffer) * txdr->count;
674         txdr->buffer_info = vzalloc(size);
675         if (!txdr->buffer_info)
676                 return -ENOMEM;
677
678         /* round up to nearest 4K */
679
680         txdr->size = txdr->count * sizeof(struct ixgb_tx_desc);
681         txdr->size = ALIGN(txdr->size, 4096);
682
683         txdr->desc = dma_zalloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
684                                          GFP_KERNEL);
685         if (!txdr->desc) {
686                 vfree(txdr->buffer_info);
687                 return -ENOMEM;
688         }
689
690         txdr->next_to_use = 0;
691         txdr->next_to_clean = 0;
692
693         return 0;
694 }
695
696 /**
697  * ixgb_configure_tx - Configure 82597 Transmit Unit after Reset.
698  * @adapter: board private structure
699  *
700  * Configure the Tx unit of the MAC after a reset.
701  **/
702
703 static void
704 ixgb_configure_tx(struct ixgb_adapter *adapter)
705 {
706         u64 tdba = adapter->tx_ring.dma;
707         u32 tdlen = adapter->tx_ring.count * sizeof(struct ixgb_tx_desc);
708         u32 tctl;
709         struct ixgb_hw *hw = &adapter->hw;
710
711         /* Setup the Base and Length of the Tx Descriptor Ring
712          * tx_ring.dma can be either a 32 or 64 bit value
713          */
714
715         IXGB_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL));
716         IXGB_WRITE_REG(hw, TDBAH, (tdba >> 32));
717
718         IXGB_WRITE_REG(hw, TDLEN, tdlen);
719
720         /* Setup the HW Tx Head and Tail descriptor pointers */
721
722         IXGB_WRITE_REG(hw, TDH, 0);
723         IXGB_WRITE_REG(hw, TDT, 0);
724
725         /* don't set up txdctl, it induces performance problems if configured
726          * incorrectly */
727         /* Set the Tx Interrupt Delay register */
728
729         IXGB_WRITE_REG(hw, TIDV, adapter->tx_int_delay);
730
731         /* Program the Transmit Control Register */
732
733         tctl = IXGB_TCTL_TCE | IXGB_TCTL_TXEN | IXGB_TCTL_TPDE;
734         IXGB_WRITE_REG(hw, TCTL, tctl);
735
736         /* Setup Transmit Descriptor Settings for this adapter */
737         adapter->tx_cmd_type =
738                 IXGB_TX_DESC_TYPE |
739                 (adapter->tx_int_delay_enable ? IXGB_TX_DESC_CMD_IDE : 0);
740 }
741
742 /**
743  * ixgb_setup_rx_resources - allocate Rx resources (Descriptors)
744  * @adapter: board private structure
745  *
746  * Returns 0 on success, negative on failure
747  **/
748
749 int
750 ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
751 {
752         struct ixgb_desc_ring *rxdr = &adapter->rx_ring;
753         struct pci_dev *pdev = adapter->pdev;
754         int size;
755
756         size = sizeof(struct ixgb_buffer) * rxdr->count;
757         rxdr->buffer_info = vzalloc(size);
758         if (!rxdr->buffer_info)
759                 return -ENOMEM;
760
761         /* Round up to nearest 4K */
762
763         rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc);
764         rxdr->size = ALIGN(rxdr->size, 4096);
765
766         rxdr->desc = dma_zalloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
767                                          GFP_KERNEL);
768
769         if (!rxdr->desc) {
770                 vfree(rxdr->buffer_info);
771                 return -ENOMEM;
772         }
773
774         rxdr->next_to_clean = 0;
775         rxdr->next_to_use = 0;
776
777         return 0;
778 }
779
780 /**
781  * ixgb_setup_rctl - configure the receive control register
782  * @adapter: Board private structure
783  **/
784
785 static void
786 ixgb_setup_rctl(struct ixgb_adapter *adapter)
787 {
788         u32 rctl;
789
790         rctl = IXGB_READ_REG(&adapter->hw, RCTL);
791
792         rctl &= ~(3 << IXGB_RCTL_MO_SHIFT);
793
794         rctl |=
795                 IXGB_RCTL_BAM | IXGB_RCTL_RDMTS_1_2 |
796                 IXGB_RCTL_RXEN | IXGB_RCTL_CFF |
797                 (adapter->hw.mc_filter_type << IXGB_RCTL_MO_SHIFT);
798
799         rctl |= IXGB_RCTL_SECRC;
800
801         if (adapter->rx_buffer_len <= IXGB_RXBUFFER_2048)
802                 rctl |= IXGB_RCTL_BSIZE_2048;
803         else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_4096)
804                 rctl |= IXGB_RCTL_BSIZE_4096;
805         else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_8192)
806                 rctl |= IXGB_RCTL_BSIZE_8192;
807         else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_16384)
808                 rctl |= IXGB_RCTL_BSIZE_16384;
809
810         IXGB_WRITE_REG(&adapter->hw, RCTL, rctl);
811 }
812
813 /**
814  * ixgb_configure_rx - Configure 82597 Receive Unit after Reset.
815  * @adapter: board private structure
816  *
817  * Configure the Rx unit of the MAC after a reset.
818  **/
819
820 static void
821 ixgb_configure_rx(struct ixgb_adapter *adapter)
822 {
823         u64 rdba = adapter->rx_ring.dma;
824         u32 rdlen = adapter->rx_ring.count * sizeof(struct ixgb_rx_desc);
825         struct ixgb_hw *hw = &adapter->hw;
826         u32 rctl;
827         u32 rxcsum;
828
829         /* make sure receives are disabled while setting up the descriptors */
830
831         rctl = IXGB_READ_REG(hw, RCTL);
832         IXGB_WRITE_REG(hw, RCTL, rctl & ~IXGB_RCTL_RXEN);
833
834         /* set the Receive Delay Timer Register */
835
836         IXGB_WRITE_REG(hw, RDTR, adapter->rx_int_delay);
837
838         /* Setup the Base and Length of the Rx Descriptor Ring */
839
840         IXGB_WRITE_REG(hw, RDBAL, (rdba & 0x00000000ffffffffULL));
841         IXGB_WRITE_REG(hw, RDBAH, (rdba >> 32));
842
843         IXGB_WRITE_REG(hw, RDLEN, rdlen);
844
845         /* Setup the HW Rx Head and Tail Descriptor Pointers */
846         IXGB_WRITE_REG(hw, RDH, 0);
847         IXGB_WRITE_REG(hw, RDT, 0);
848
849         /* due to the hardware errata with RXDCTL, we are unable to use any of
850          * the performance enhancing features of it without causing other
851          * subtle bugs, some of the bugs could include receive length
852          * corruption at high data rates (WTHRESH > 0) and/or receive
853          * descriptor ring irregularites (particularly in hardware cache) */
854         IXGB_WRITE_REG(hw, RXDCTL, 0);
855
856         /* Enable Receive Checksum Offload for TCP and UDP */
857         if (adapter->rx_csum) {
858                 rxcsum = IXGB_READ_REG(hw, RXCSUM);
859                 rxcsum |= IXGB_RXCSUM_TUOFL;
860                 IXGB_WRITE_REG(hw, RXCSUM, rxcsum);
861         }
862
863         /* Enable Receives */
864
865         IXGB_WRITE_REG(hw, RCTL, rctl);
866 }
867
868 /**
869  * ixgb_free_tx_resources - Free Tx Resources
870  * @adapter: board private structure
871  *
872  * Free all transmit software resources
873  **/
874
875 void
876 ixgb_free_tx_resources(struct ixgb_adapter *adapter)
877 {
878         struct pci_dev *pdev = adapter->pdev;
879
880         ixgb_clean_tx_ring(adapter);
881
882         vfree(adapter->tx_ring.buffer_info);
883         adapter->tx_ring.buffer_info = NULL;
884
885         dma_free_coherent(&pdev->dev, adapter->tx_ring.size,
886                           adapter->tx_ring.desc, adapter->tx_ring.dma);
887
888         adapter->tx_ring.desc = NULL;
889 }
890
891 static void
892 ixgb_unmap_and_free_tx_resource(struct ixgb_adapter *adapter,
893                                 struct ixgb_buffer *buffer_info)
894 {
895         if (buffer_info->dma) {
896                 if (buffer_info->mapped_as_page)
897                         dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
898                                        buffer_info->length, DMA_TO_DEVICE);
899                 else
900                         dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
901                                          buffer_info->length, DMA_TO_DEVICE);
902                 buffer_info->dma = 0;
903         }
904
905         if (buffer_info->skb) {
906                 dev_kfree_skb_any(buffer_info->skb);
907                 buffer_info->skb = NULL;
908         }
909         buffer_info->time_stamp = 0;
910         /* these fields must always be initialized in tx
911          * buffer_info->length = 0;
912          * buffer_info->next_to_watch = 0; */
913 }
914
915 /**
916  * ixgb_clean_tx_ring - Free Tx Buffers
917  * @adapter: board private structure
918  **/
919
920 static void
921 ixgb_clean_tx_ring(struct ixgb_adapter *adapter)
922 {
923         struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
924         struct ixgb_buffer *buffer_info;
925         unsigned long size;
926         unsigned int i;
927
928         /* Free all the Tx ring sk_buffs */
929
930         for (i = 0; i < tx_ring->count; i++) {
931                 buffer_info = &tx_ring->buffer_info[i];
932                 ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
933         }
934
935         size = sizeof(struct ixgb_buffer) * tx_ring->count;
936         memset(tx_ring->buffer_info, 0, size);
937
938         /* Zero out the descriptor ring */
939
940         memset(tx_ring->desc, 0, tx_ring->size);
941
942         tx_ring->next_to_use = 0;
943         tx_ring->next_to_clean = 0;
944
945         IXGB_WRITE_REG(&adapter->hw, TDH, 0);
946         IXGB_WRITE_REG(&adapter->hw, TDT, 0);
947 }
948
949 /**
950  * ixgb_free_rx_resources - Free Rx Resources
951  * @adapter: board private structure
952  *
953  * Free all receive software resources
954  **/
955
956 void
957 ixgb_free_rx_resources(struct ixgb_adapter *adapter)
958 {
959         struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
960         struct pci_dev *pdev = adapter->pdev;
961
962         ixgb_clean_rx_ring(adapter);
963
964         vfree(rx_ring->buffer_info);
965         rx_ring->buffer_info = NULL;
966
967         dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
968                           rx_ring->dma);
969
970         rx_ring->desc = NULL;
971 }
972
973 /**
974  * ixgb_clean_rx_ring - Free Rx Buffers
975  * @adapter: board private structure
976  **/
977
978 static void
979 ixgb_clean_rx_ring(struct ixgb_adapter *adapter)
980 {
981         struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
982         struct ixgb_buffer *buffer_info;
983         struct pci_dev *pdev = adapter->pdev;
984         unsigned long size;
985         unsigned int i;
986
987         /* Free all the Rx ring sk_buffs */
988
989         for (i = 0; i < rx_ring->count; i++) {
990                 buffer_info = &rx_ring->buffer_info[i];
991                 if (buffer_info->dma) {
992                         dma_unmap_single(&pdev->dev,
993                                          buffer_info->dma,
994                                          buffer_info->length,
995                                          DMA_FROM_DEVICE);
996                         buffer_info->dma = 0;
997                         buffer_info->length = 0;
998                 }
999
1000                 if (buffer_info->skb) {
1001                         dev_kfree_skb(buffer_info->skb);
1002                         buffer_info->skb = NULL;
1003                 }
1004         }
1005
1006         size = sizeof(struct ixgb_buffer) * rx_ring->count;
1007         memset(rx_ring->buffer_info, 0, size);
1008
1009         /* Zero out the descriptor ring */
1010
1011         memset(rx_ring->desc, 0, rx_ring->size);
1012
1013         rx_ring->next_to_clean = 0;
1014         rx_ring->next_to_use = 0;
1015
1016         IXGB_WRITE_REG(&adapter->hw, RDH, 0);
1017         IXGB_WRITE_REG(&adapter->hw, RDT, 0);
1018 }
1019
1020 /**
1021  * ixgb_set_mac - Change the Ethernet Address of the NIC
1022  * @netdev: network interface device structure
1023  * @p: pointer to an address structure
1024  *
1025  * Returns 0 on success, negative on failure
1026  **/
1027
1028 static int
1029 ixgb_set_mac(struct net_device *netdev, void *p)
1030 {
1031         struct ixgb_adapter *adapter = netdev_priv(netdev);
1032         struct sockaddr *addr = p;
1033
1034         if (!is_valid_ether_addr(addr->sa_data))
1035                 return -EADDRNOTAVAIL;
1036
1037         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1038
1039         ixgb_rar_set(&adapter->hw, addr->sa_data, 0);
1040
1041         return 0;
1042 }
1043
1044 /**
1045  * ixgb_set_multi - Multicast and Promiscuous mode set
1046  * @netdev: network interface device structure
1047  *
1048  * The set_multi entry point is called whenever the multicast address
1049  * list or the network interface flags are updated.  This routine is
1050  * responsible for configuring the hardware for proper multicast,
1051  * promiscuous mode, and all-multi behavior.
1052  **/
1053
1054 static void
1055 ixgb_set_multi(struct net_device *netdev)
1056 {
1057         struct ixgb_adapter *adapter = netdev_priv(netdev);
1058         struct ixgb_hw *hw = &adapter->hw;
1059         struct netdev_hw_addr *ha;
1060         u32 rctl;
1061
1062         /* Check for Promiscuous and All Multicast modes */
1063
1064         rctl = IXGB_READ_REG(hw, RCTL);
1065
1066         if (netdev->flags & IFF_PROMISC) {
1067                 rctl |= (IXGB_RCTL_UPE | IXGB_RCTL_MPE);
1068                 /* disable VLAN filtering */
1069                 rctl &= ~IXGB_RCTL_CFIEN;
1070                 rctl &= ~IXGB_RCTL_VFE;
1071         } else {
1072                 if (netdev->flags & IFF_ALLMULTI) {
1073                         rctl |= IXGB_RCTL_MPE;
1074                         rctl &= ~IXGB_RCTL_UPE;
1075                 } else {
1076                         rctl &= ~(IXGB_RCTL_UPE | IXGB_RCTL_MPE);
1077                 }
1078                 /* enable VLAN filtering */
1079                 rctl |= IXGB_RCTL_VFE;
1080                 rctl &= ~IXGB_RCTL_CFIEN;
1081         }
1082
1083         if (netdev_mc_count(netdev) > IXGB_MAX_NUM_MULTICAST_ADDRESSES) {
1084                 rctl |= IXGB_RCTL_MPE;
1085                 IXGB_WRITE_REG(hw, RCTL, rctl);
1086         } else {
1087                 u8 *mta = kmalloc_array(ETH_ALEN,
1088                                         IXGB_MAX_NUM_MULTICAST_ADDRESSES,
1089                                         GFP_ATOMIC);
1090                 u8 *addr;
1091                 if (!mta)
1092                         goto alloc_failed;
1093
1094                 IXGB_WRITE_REG(hw, RCTL, rctl);
1095
1096                 addr = mta;
1097                 netdev_for_each_mc_addr(ha, netdev) {
1098                         memcpy(addr, ha->addr, ETH_ALEN);
1099                         addr += ETH_ALEN;
1100                 }
1101
1102                 ixgb_mc_addr_list_update(hw, mta, netdev_mc_count(netdev), 0);
1103                 kfree(mta);
1104         }
1105
1106 alloc_failed:
1107         if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
1108                 ixgb_vlan_strip_enable(adapter);
1109         else
1110                 ixgb_vlan_strip_disable(adapter);
1111
1112 }
1113
1114 /**
1115  * ixgb_watchdog - Timer Call-back
1116  * @data: pointer to netdev cast into an unsigned long
1117  **/
1118
1119 static void
1120 ixgb_watchdog(struct timer_list *t)
1121 {
1122         struct ixgb_adapter *adapter = from_timer(adapter, t, watchdog_timer);
1123         struct net_device *netdev = adapter->netdev;
1124         struct ixgb_desc_ring *txdr = &adapter->tx_ring;
1125
1126         ixgb_check_for_link(&adapter->hw);
1127
1128         if (ixgb_check_for_bad_link(&adapter->hw)) {
1129                 /* force the reset path */
1130                 netif_stop_queue(netdev);
1131         }
1132
1133         if (adapter->hw.link_up) {
1134                 if (!netif_carrier_ok(netdev)) {
1135                         netdev_info(netdev,
1136                                     "NIC Link is Up 10 Gbps Full Duplex, Flow Control: %s\n",
1137                                     (adapter->hw.fc.type == ixgb_fc_full) ?
1138                                     "RX/TX" :
1139                                     (adapter->hw.fc.type == ixgb_fc_rx_pause) ?
1140                                      "RX" :
1141                                     (adapter->hw.fc.type == ixgb_fc_tx_pause) ?
1142                                     "TX" : "None");
1143                         adapter->link_speed = 10000;
1144                         adapter->link_duplex = FULL_DUPLEX;
1145                         netif_carrier_on(netdev);
1146                 }
1147         } else {
1148                 if (netif_carrier_ok(netdev)) {
1149                         adapter->link_speed = 0;
1150                         adapter->link_duplex = 0;
1151                         netdev_info(netdev, "NIC Link is Down\n");
1152                         netif_carrier_off(netdev);
1153                 }
1154         }
1155
1156         ixgb_update_stats(adapter);
1157
1158         if (!netif_carrier_ok(netdev)) {
1159                 if (IXGB_DESC_UNUSED(txdr) + 1 < txdr->count) {
1160                         /* We've lost link, so the controller stops DMA,
1161                          * but we've got queued Tx work that's never going
1162                          * to get done, so reset controller to flush Tx.
1163                          * (Do the reset outside of interrupt context). */
1164                         schedule_work(&adapter->tx_timeout_task);
1165                         /* return immediately since reset is imminent */
1166                         return;
1167                 }
1168         }
1169
1170         /* Force detection of hung controller every watchdog period */
1171         adapter->detect_tx_hung = true;
1172
1173         /* generate an interrupt to force clean up of any stragglers */
1174         IXGB_WRITE_REG(&adapter->hw, ICS, IXGB_INT_TXDW);
1175
1176         /* Reset the timer */
1177         mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
1178 }
1179
1180 #define IXGB_TX_FLAGS_CSUM              0x00000001
1181 #define IXGB_TX_FLAGS_VLAN              0x00000002
1182 #define IXGB_TX_FLAGS_TSO               0x00000004
1183
1184 static int
1185 ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
1186 {
1187         struct ixgb_context_desc *context_desc;
1188         unsigned int i;
1189         u8 ipcss, ipcso, tucss, tucso, hdr_len;
1190         u16 ipcse, tucse, mss;
1191
1192         if (likely(skb_is_gso(skb))) {
1193                 struct ixgb_buffer *buffer_info;
1194                 struct iphdr *iph;
1195                 int err;
1196
1197                 err = skb_cow_head(skb, 0);
1198                 if (err < 0)
1199                         return err;
1200
1201                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1202                 mss = skb_shinfo(skb)->gso_size;
1203                 iph = ip_hdr(skb);
1204                 iph->tot_len = 0;
1205                 iph->check = 0;
1206                 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
1207                                                          iph->daddr, 0,
1208                                                          IPPROTO_TCP, 0);
1209                 ipcss = skb_network_offset(skb);
1210                 ipcso = (void *)&(iph->check) - (void *)skb->data;
1211                 ipcse = skb_transport_offset(skb) - 1;
1212                 tucss = skb_transport_offset(skb);
1213                 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
1214                 tucse = 0;
1215
1216                 i = adapter->tx_ring.next_to_use;
1217                 context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i);
1218                 buffer_info = &adapter->tx_ring.buffer_info[i];
1219                 WARN_ON(buffer_info->dma != 0);
1220
1221                 context_desc->ipcss = ipcss;
1222                 context_desc->ipcso = ipcso;
1223                 context_desc->ipcse = cpu_to_le16(ipcse);
1224                 context_desc->tucss = tucss;
1225                 context_desc->tucso = tucso;
1226                 context_desc->tucse = cpu_to_le16(tucse);
1227                 context_desc->mss = cpu_to_le16(mss);
1228                 context_desc->hdr_len = hdr_len;
1229                 context_desc->status = 0;
1230                 context_desc->cmd_type_len = cpu_to_le32(
1231                                                   IXGB_CONTEXT_DESC_TYPE
1232                                                 | IXGB_CONTEXT_DESC_CMD_TSE
1233                                                 | IXGB_CONTEXT_DESC_CMD_IP
1234                                                 | IXGB_CONTEXT_DESC_CMD_TCP
1235                                                 | IXGB_CONTEXT_DESC_CMD_IDE
1236                                                 | (skb->len - (hdr_len)));
1237
1238
1239                 if (++i == adapter->tx_ring.count) i = 0;
1240                 adapter->tx_ring.next_to_use = i;
1241
1242                 return 1;
1243         }
1244
1245         return 0;
1246 }
1247
1248 static bool
1249 ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb)
1250 {
1251         struct ixgb_context_desc *context_desc;
1252         unsigned int i;
1253         u8 css, cso;
1254
1255         if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
1256                 struct ixgb_buffer *buffer_info;
1257                 css = skb_checksum_start_offset(skb);
1258                 cso = css + skb->csum_offset;
1259
1260                 i = adapter->tx_ring.next_to_use;
1261                 context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i);
1262                 buffer_info = &adapter->tx_ring.buffer_info[i];
1263                 WARN_ON(buffer_info->dma != 0);
1264
1265                 context_desc->tucss = css;
1266                 context_desc->tucso = cso;
1267                 context_desc->tucse = 0;
1268                 /* zero out any previously existing data in one instruction */
1269                 *(u32 *)&(context_desc->ipcss) = 0;
1270                 context_desc->status = 0;
1271                 context_desc->hdr_len = 0;
1272                 context_desc->mss = 0;
1273                 context_desc->cmd_type_len =
1274                         cpu_to_le32(IXGB_CONTEXT_DESC_TYPE
1275                                     | IXGB_TX_DESC_CMD_IDE);
1276
1277                 if (++i == adapter->tx_ring.count) i = 0;
1278                 adapter->tx_ring.next_to_use = i;
1279
1280                 return true;
1281         }
1282
1283         return false;
1284 }
1285
1286 #define IXGB_MAX_TXD_PWR        14
1287 #define IXGB_MAX_DATA_PER_TXD   (1<<IXGB_MAX_TXD_PWR)
1288
1289 static int
1290 ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
1291             unsigned int first)
1292 {
1293         struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
1294         struct pci_dev *pdev = adapter->pdev;
1295         struct ixgb_buffer *buffer_info;
1296         int len = skb_headlen(skb);
1297         unsigned int offset = 0, size, count = 0, i;
1298         unsigned int mss = skb_shinfo(skb)->gso_size;
1299         unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
1300         unsigned int f;
1301
1302         i = tx_ring->next_to_use;
1303
1304         while (len) {
1305                 buffer_info = &tx_ring->buffer_info[i];
1306                 size = min(len, IXGB_MAX_DATA_PER_TXD);
1307                 /* Workaround for premature desc write-backs
1308                  * in TSO mode.  Append 4-byte sentinel desc */
1309                 if (unlikely(mss && !nr_frags && size == len && size > 8))
1310                         size -= 4;
1311
1312                 buffer_info->length = size;
1313                 WARN_ON(buffer_info->dma != 0);
1314                 buffer_info->time_stamp = jiffies;
1315                 buffer_info->mapped_as_page = false;
1316                 buffer_info->dma = dma_map_single(&pdev->dev,
1317                                                   skb->data + offset,
1318                                                   size, DMA_TO_DEVICE);
1319                 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
1320                         goto dma_error;
1321                 buffer_info->next_to_watch = 0;
1322
1323                 len -= size;
1324                 offset += size;
1325                 count++;
1326                 if (len) {
1327                         i++;
1328                         if (i == tx_ring->count)
1329                                 i = 0;
1330                 }
1331         }
1332
1333         for (f = 0; f < nr_frags; f++) {
1334                 const struct skb_frag_struct *frag;
1335
1336                 frag = &skb_shinfo(skb)->frags[f];
1337                 len = skb_frag_size(frag);
1338                 offset = 0;
1339
1340                 while (len) {
1341                         i++;
1342                         if (i == tx_ring->count)
1343                                 i = 0;
1344
1345                         buffer_info = &tx_ring->buffer_info[i];
1346                         size = min(len, IXGB_MAX_DATA_PER_TXD);
1347
1348                         /* Workaround for premature desc write-backs
1349                          * in TSO mode.  Append 4-byte sentinel desc */
1350                         if (unlikely(mss && (f == (nr_frags - 1))
1351                                      && size == len && size > 8))
1352                                 size -= 4;
1353
1354                         buffer_info->length = size;
1355                         buffer_info->time_stamp = jiffies;
1356                         buffer_info->mapped_as_page = true;
1357                         buffer_info->dma =
1358                                 skb_frag_dma_map(&pdev->dev, frag, offset, size,
1359                                                  DMA_TO_DEVICE);
1360                         if (dma_mapping_error(&pdev->dev, buffer_info->dma))
1361                                 goto dma_error;
1362                         buffer_info->next_to_watch = 0;
1363
1364                         len -= size;
1365                         offset += size;
1366                         count++;
1367                 }
1368         }
1369         tx_ring->buffer_info[i].skb = skb;
1370         tx_ring->buffer_info[first].next_to_watch = i;
1371
1372         return count;
1373
1374 dma_error:
1375         dev_err(&pdev->dev, "TX DMA map failed\n");
1376         buffer_info->dma = 0;
1377         if (count)
1378                 count--;
1379
1380         while (count--) {
1381                 if (i==0)
1382                         i += tx_ring->count;
1383                 i--;
1384                 buffer_info = &tx_ring->buffer_info[i];
1385                 ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
1386         }
1387
1388         return 0;
1389 }
1390
1391 static void
1392 ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,int tx_flags)
1393 {
1394         struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
1395         struct ixgb_tx_desc *tx_desc = NULL;
1396         struct ixgb_buffer *buffer_info;
1397         u32 cmd_type_len = adapter->tx_cmd_type;
1398         u8 status = 0;
1399         u8 popts = 0;
1400         unsigned int i;
1401
1402         if (tx_flags & IXGB_TX_FLAGS_TSO) {
1403                 cmd_type_len |= IXGB_TX_DESC_CMD_TSE;
1404                 popts |= (IXGB_TX_DESC_POPTS_IXSM | IXGB_TX_DESC_POPTS_TXSM);
1405         }
1406
1407         if (tx_flags & IXGB_TX_FLAGS_CSUM)
1408                 popts |= IXGB_TX_DESC_POPTS_TXSM;
1409
1410         if (tx_flags & IXGB_TX_FLAGS_VLAN)
1411                 cmd_type_len |= IXGB_TX_DESC_CMD_VLE;
1412
1413         i = tx_ring->next_to_use;
1414
1415         while (count--) {
1416                 buffer_info = &tx_ring->buffer_info[i];
1417                 tx_desc = IXGB_TX_DESC(*tx_ring, i);
1418                 tx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
1419                 tx_desc->cmd_type_len =
1420                         cpu_to_le32(cmd_type_len | buffer_info->length);
1421                 tx_desc->status = status;
1422                 tx_desc->popts = popts;
1423                 tx_desc->vlan = cpu_to_le16(vlan_id);
1424
1425                 if (++i == tx_ring->count) i = 0;
1426         }
1427
1428         tx_desc->cmd_type_len |=
1429                 cpu_to_le32(IXGB_TX_DESC_CMD_EOP | IXGB_TX_DESC_CMD_RS);
1430
1431         /* Force memory writes to complete before letting h/w
1432          * know there are new descriptors to fetch.  (Only
1433          * applicable for weak-ordered memory model archs,
1434          * such as IA-64). */
1435         wmb();
1436
1437         tx_ring->next_to_use = i;
1438         IXGB_WRITE_REG(&adapter->hw, TDT, i);
1439 }
1440
1441 static int __ixgb_maybe_stop_tx(struct net_device *netdev, int size)
1442 {
1443         struct ixgb_adapter *adapter = netdev_priv(netdev);
1444         struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
1445
1446         netif_stop_queue(netdev);
1447         /* Herbert's original patch had:
1448          *  smp_mb__after_netif_stop_queue();
1449          * but since that doesn't exist yet, just open code it. */
1450         smp_mb();
1451
1452         /* We need to check again in a case another CPU has just
1453          * made room available. */
1454         if (likely(IXGB_DESC_UNUSED(tx_ring) < size))
1455                 return -EBUSY;
1456
1457         /* A reprieve! */
1458         netif_start_queue(netdev);
1459         ++adapter->restart_queue;
1460         return 0;
1461 }
1462
1463 static int ixgb_maybe_stop_tx(struct net_device *netdev,
1464                               struct ixgb_desc_ring *tx_ring, int size)
1465 {
1466         if (likely(IXGB_DESC_UNUSED(tx_ring) >= size))
1467                 return 0;
1468         return __ixgb_maybe_stop_tx(netdev, size);
1469 }
1470
1471
1472 /* Tx Descriptors needed, worst case */
1473 #define TXD_USE_COUNT(S) (((S) >> IXGB_MAX_TXD_PWR) + \
1474                          (((S) & (IXGB_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
1475 #define DESC_NEEDED TXD_USE_COUNT(IXGB_MAX_DATA_PER_TXD) /* skb->date */ + \
1476         MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1 /* for context */ \
1477         + 1 /* one more needed for sentinel TSO workaround */
1478
1479 static netdev_tx_t
1480 ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1481 {
1482         struct ixgb_adapter *adapter = netdev_priv(netdev);
1483         unsigned int first;
1484         unsigned int tx_flags = 0;
1485         int vlan_id = 0;
1486         int count = 0;
1487         int tso;
1488
1489         if (test_bit(__IXGB_DOWN, &adapter->flags)) {
1490                 dev_kfree_skb_any(skb);
1491                 return NETDEV_TX_OK;
1492         }
1493
1494         if (skb->len <= 0) {
1495                 dev_kfree_skb_any(skb);
1496                 return NETDEV_TX_OK;
1497         }
1498
1499         if (unlikely(ixgb_maybe_stop_tx(netdev, &adapter->tx_ring,
1500                      DESC_NEEDED)))
1501                 return NETDEV_TX_BUSY;
1502
1503         if (skb_vlan_tag_present(skb)) {
1504                 tx_flags |= IXGB_TX_FLAGS_VLAN;
1505                 vlan_id = skb_vlan_tag_get(skb);
1506         }
1507
1508         first = adapter->tx_ring.next_to_use;
1509
1510         tso = ixgb_tso(adapter, skb);
1511         if (tso < 0) {
1512                 dev_kfree_skb_any(skb);
1513                 return NETDEV_TX_OK;
1514         }
1515
1516         if (likely(tso))
1517                 tx_flags |= IXGB_TX_FLAGS_TSO;
1518         else if (ixgb_tx_csum(adapter, skb))
1519                 tx_flags |= IXGB_TX_FLAGS_CSUM;
1520
1521         count = ixgb_tx_map(adapter, skb, first);
1522
1523         if (count) {
1524                 ixgb_tx_queue(adapter, count, vlan_id, tx_flags);
1525                 /* Make sure there is space in the ring for the next send. */
1526                 ixgb_maybe_stop_tx(netdev, &adapter->tx_ring, DESC_NEEDED);
1527
1528         } else {
1529                 dev_kfree_skb_any(skb);
1530                 adapter->tx_ring.buffer_info[first].time_stamp = 0;
1531                 adapter->tx_ring.next_to_use = first;
1532         }
1533
1534         return NETDEV_TX_OK;
1535 }
1536
1537 /**
1538  * ixgb_tx_timeout - Respond to a Tx Hang
1539  * @netdev: network interface device structure
1540  **/
1541
1542 static void
1543 ixgb_tx_timeout(struct net_device *netdev)
1544 {
1545         struct ixgb_adapter *adapter = netdev_priv(netdev);
1546
1547         /* Do the reset outside of interrupt context */
1548         schedule_work(&adapter->tx_timeout_task);
1549 }
1550
1551 static void
1552 ixgb_tx_timeout_task(struct work_struct *work)
1553 {
1554         struct ixgb_adapter *adapter =
1555                 container_of(work, struct ixgb_adapter, tx_timeout_task);
1556
1557         adapter->tx_timeout_count++;
1558         ixgb_down(adapter, true);
1559         ixgb_up(adapter);
1560 }
1561
1562 /**
1563  * ixgb_change_mtu - Change the Maximum Transfer Unit
1564  * @netdev: network interface device structure
1565  * @new_mtu: new value for maximum frame size
1566  *
1567  * Returns 0 on success, negative on failure
1568  **/
1569
1570 static int
1571 ixgb_change_mtu(struct net_device *netdev, int new_mtu)
1572 {
1573         struct ixgb_adapter *adapter = netdev_priv(netdev);
1574         int max_frame = new_mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
1575
1576         if (netif_running(netdev))
1577                 ixgb_down(adapter, true);
1578
1579         adapter->rx_buffer_len = max_frame + 8; /* + 8 for errata */
1580
1581         netdev->mtu = new_mtu;
1582
1583         if (netif_running(netdev))
1584                 ixgb_up(adapter);
1585
1586         return 0;
1587 }
1588
1589 /**
1590  * ixgb_update_stats - Update the board statistics counters.
1591  * @adapter: board private structure
1592  **/
1593
1594 void
1595 ixgb_update_stats(struct ixgb_adapter *adapter)
1596 {
1597         struct net_device *netdev = adapter->netdev;
1598         struct pci_dev *pdev = adapter->pdev;
1599
1600         /* Prevent stats update while adapter is being reset */
1601         if (pci_channel_offline(pdev))
1602                 return;
1603
1604         if ((netdev->flags & IFF_PROMISC) || (netdev->flags & IFF_ALLMULTI) ||
1605            (netdev_mc_count(netdev) > IXGB_MAX_NUM_MULTICAST_ADDRESSES)) {
1606                 u64 multi = IXGB_READ_REG(&adapter->hw, MPRCL);
1607                 u32 bcast_l = IXGB_READ_REG(&adapter->hw, BPRCL);
1608                 u32 bcast_h = IXGB_READ_REG(&adapter->hw, BPRCH);
1609                 u64 bcast = ((u64)bcast_h << 32) | bcast_l;
1610
1611                 multi |= ((u64)IXGB_READ_REG(&adapter->hw, MPRCH) << 32);
1612                 /* fix up multicast stats by removing broadcasts */
1613                 if (multi >= bcast)
1614                         multi -= bcast;
1615
1616                 adapter->stats.mprcl += (multi & 0xFFFFFFFF);
1617                 adapter->stats.mprch += (multi >> 32);
1618                 adapter->stats.bprcl += bcast_l;
1619                 adapter->stats.bprch += bcast_h;
1620         } else {
1621                 adapter->stats.mprcl += IXGB_READ_REG(&adapter->hw, MPRCL);
1622                 adapter->stats.mprch += IXGB_READ_REG(&adapter->hw, MPRCH);
1623                 adapter->stats.bprcl += IXGB_READ_REG(&adapter->hw, BPRCL);
1624                 adapter->stats.bprch += IXGB_READ_REG(&adapter->hw, BPRCH);
1625         }
1626         adapter->stats.tprl += IXGB_READ_REG(&adapter->hw, TPRL);
1627         adapter->stats.tprh += IXGB_READ_REG(&adapter->hw, TPRH);
1628         adapter->stats.gprcl += IXGB_READ_REG(&adapter->hw, GPRCL);
1629         adapter->stats.gprch += IXGB_READ_REG(&adapter->hw, GPRCH);
1630         adapter->stats.uprcl += IXGB_READ_REG(&adapter->hw, UPRCL);
1631         adapter->stats.uprch += IXGB_READ_REG(&adapter->hw, UPRCH);
1632         adapter->stats.vprcl += IXGB_READ_REG(&adapter->hw, VPRCL);
1633         adapter->stats.vprch += IXGB_READ_REG(&adapter->hw, VPRCH);
1634         adapter->stats.jprcl += IXGB_READ_REG(&adapter->hw, JPRCL);
1635         adapter->stats.jprch += IXGB_READ_REG(&adapter->hw, JPRCH);
1636         adapter->stats.gorcl += IXGB_READ_REG(&adapter->hw, GORCL);
1637         adapter->stats.gorch += IXGB_READ_REG(&adapter->hw, GORCH);
1638         adapter->stats.torl += IXGB_READ_REG(&adapter->hw, TORL);
1639         adapter->stats.torh += IXGB_READ_REG(&adapter->hw, TORH);
1640         adapter->stats.rnbc += IXGB_READ_REG(&adapter->hw, RNBC);
1641         adapter->stats.ruc += IXGB_READ_REG(&adapter->hw, RUC);
1642         adapter->stats.roc += IXGB_READ_REG(&adapter->hw, ROC);
1643         adapter->stats.rlec += IXGB_READ_REG(&adapter->hw, RLEC);
1644         adapter->stats.crcerrs += IXGB_READ_REG(&adapter->hw, CRCERRS);
1645         adapter->stats.icbc += IXGB_READ_REG(&adapter->hw, ICBC);
1646         adapter->stats.ecbc += IXGB_READ_REG(&adapter->hw, ECBC);
1647         adapter->stats.mpc += IXGB_READ_REG(&adapter->hw, MPC);
1648         adapter->stats.tptl += IXGB_READ_REG(&adapter->hw, TPTL);
1649         adapter->stats.tpth += IXGB_READ_REG(&adapter->hw, TPTH);
1650         adapter->stats.gptcl += IXGB_READ_REG(&adapter->hw, GPTCL);
1651         adapter->stats.gptch += IXGB_READ_REG(&adapter->hw, GPTCH);
1652         adapter->stats.bptcl += IXGB_READ_REG(&adapter->hw, BPTCL);
1653         adapter->stats.bptch += IXGB_READ_REG(&adapter->hw, BPTCH);
1654         adapter->stats.mptcl += IXGB_READ_REG(&adapter->hw, MPTCL);
1655         adapter->stats.mptch += IXGB_READ_REG(&adapter->hw, MPTCH);
1656         adapter->stats.uptcl += IXGB_READ_REG(&adapter->hw, UPTCL);
1657         adapter->stats.uptch += IXGB_READ_REG(&adapter->hw, UPTCH);
1658         adapter->stats.vptcl += IXGB_READ_REG(&adapter->hw, VPTCL);
1659         adapter->stats.vptch += IXGB_READ_REG(&adapter->hw, VPTCH);
1660         adapter->stats.jptcl += IXGB_READ_REG(&adapter->hw, JPTCL);
1661         adapter->stats.jptch += IXGB_READ_REG(&adapter->hw, JPTCH);
1662         adapter->stats.gotcl += IXGB_READ_REG(&adapter->hw, GOTCL);
1663         adapter->stats.gotch += IXGB_READ_REG(&adapter->hw, GOTCH);
1664         adapter->stats.totl += IXGB_READ_REG(&adapter->hw, TOTL);
1665         adapter->stats.toth += IXGB_READ_REG(&adapter->hw, TOTH);
1666         adapter->stats.dc += IXGB_READ_REG(&adapter->hw, DC);
1667         adapter->stats.plt64c += IXGB_READ_REG(&adapter->hw, PLT64C);
1668         adapter->stats.tsctc += IXGB_READ_REG(&adapter->hw, TSCTC);
1669         adapter->stats.tsctfc += IXGB_READ_REG(&adapter->hw, TSCTFC);
1670         adapter->stats.ibic += IXGB_READ_REG(&adapter->hw, IBIC);
1671         adapter->stats.rfc += IXGB_READ_REG(&adapter->hw, RFC);
1672         adapter->stats.lfc += IXGB_READ_REG(&adapter->hw, LFC);
1673         adapter->stats.pfrc += IXGB_READ_REG(&adapter->hw, PFRC);
1674         adapter->stats.pftc += IXGB_READ_REG(&adapter->hw, PFTC);
1675         adapter->stats.mcfrc += IXGB_READ_REG(&adapter->hw, MCFRC);
1676         adapter->stats.mcftc += IXGB_READ_REG(&adapter->hw, MCFTC);
1677         adapter->stats.xonrxc += IXGB_READ_REG(&adapter->hw, XONRXC);
1678         adapter->stats.xontxc += IXGB_READ_REG(&adapter->hw, XONTXC);
1679         adapter->stats.xoffrxc += IXGB_READ_REG(&adapter->hw, XOFFRXC);
1680         adapter->stats.xofftxc += IXGB_READ_REG(&adapter->hw, XOFFTXC);
1681         adapter->stats.rjc += IXGB_READ_REG(&adapter->hw, RJC);
1682
1683         /* Fill out the OS statistics structure */
1684
1685         netdev->stats.rx_packets = adapter->stats.gprcl;
1686         netdev->stats.tx_packets = adapter->stats.gptcl;
1687         netdev->stats.rx_bytes = adapter->stats.gorcl;
1688         netdev->stats.tx_bytes = adapter->stats.gotcl;
1689         netdev->stats.multicast = adapter->stats.mprcl;
1690         netdev->stats.collisions = 0;
1691
1692         /* ignore RLEC as it reports errors for padded (<64bytes) frames
1693          * with a length in the type/len field */
1694         netdev->stats.rx_errors =
1695             /* adapter->stats.rnbc + */ adapter->stats.crcerrs +
1696             adapter->stats.ruc +
1697             adapter->stats.roc /*+ adapter->stats.rlec */  +
1698             adapter->stats.icbc +
1699             adapter->stats.ecbc + adapter->stats.mpc;
1700
1701         /* see above
1702          * netdev->stats.rx_length_errors = adapter->stats.rlec;
1703          */
1704
1705         netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
1706         netdev->stats.rx_fifo_errors = adapter->stats.mpc;
1707         netdev->stats.rx_missed_errors = adapter->stats.mpc;
1708         netdev->stats.rx_over_errors = adapter->stats.mpc;
1709
1710         netdev->stats.tx_errors = 0;
1711         netdev->stats.rx_frame_errors = 0;
1712         netdev->stats.tx_aborted_errors = 0;
1713         netdev->stats.tx_carrier_errors = 0;
1714         netdev->stats.tx_fifo_errors = 0;
1715         netdev->stats.tx_heartbeat_errors = 0;
1716         netdev->stats.tx_window_errors = 0;
1717 }
1718
1719 #define IXGB_MAX_INTR 10
1720 /**
1721  * ixgb_intr - Interrupt Handler
1722  * @irq: interrupt number
1723  * @data: pointer to a network interface device structure
1724  **/
1725
1726 static irqreturn_t
1727 ixgb_intr(int irq, void *data)
1728 {
1729         struct net_device *netdev = data;
1730         struct ixgb_adapter *adapter = netdev_priv(netdev);
1731         struct ixgb_hw *hw = &adapter->hw;
1732         u32 icr = IXGB_READ_REG(hw, ICR);
1733
1734         if (unlikely(!icr))
1735                 return IRQ_NONE;  /* Not our interrupt */
1736
1737         if (unlikely(icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC)))
1738                 if (!test_bit(__IXGB_DOWN, &adapter->flags))
1739                         mod_timer(&adapter->watchdog_timer, jiffies);
1740
1741         if (napi_schedule_prep(&adapter->napi)) {
1742
1743                 /* Disable interrupts and register for poll. The flush
1744                   of the posted write is intentionally left out.
1745                 */
1746
1747                 IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
1748                 __napi_schedule(&adapter->napi);
1749         }
1750         return IRQ_HANDLED;
1751 }
1752
1753 /**
1754  * ixgb_clean - NAPI Rx polling callback
1755  * @adapter: board private structure
1756  **/
1757
1758 static int
1759 ixgb_clean(struct napi_struct *napi, int budget)
1760 {
1761         struct ixgb_adapter *adapter = container_of(napi, struct ixgb_adapter, napi);
1762         int work_done = 0;
1763
1764         ixgb_clean_tx_irq(adapter);
1765         ixgb_clean_rx_irq(adapter, &work_done, budget);
1766
1767         /* If budget not fully consumed, exit the polling mode */
1768         if (work_done < budget) {
1769                 napi_complete_done(napi, work_done);
1770                 if (!test_bit(__IXGB_DOWN, &adapter->flags))
1771                         ixgb_irq_enable(adapter);
1772         }
1773
1774         return work_done;
1775 }
1776
1777 /**
1778  * ixgb_clean_tx_irq - Reclaim resources after transmit completes
1779  * @adapter: board private structure
1780  **/
1781
1782 static bool
1783 ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
1784 {
1785         struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
1786         struct net_device *netdev = adapter->netdev;
1787         struct ixgb_tx_desc *tx_desc, *eop_desc;
1788         struct ixgb_buffer *buffer_info;
1789         unsigned int i, eop;
1790         bool cleaned = false;
1791
1792         i = tx_ring->next_to_clean;
1793         eop = tx_ring->buffer_info[i].next_to_watch;
1794         eop_desc = IXGB_TX_DESC(*tx_ring, eop);
1795
1796         while (eop_desc->status & IXGB_TX_DESC_STATUS_DD) {
1797
1798                 rmb(); /* read buffer_info after eop_desc */
1799                 for (cleaned = false; !cleaned; ) {
1800                         tx_desc = IXGB_TX_DESC(*tx_ring, i);
1801                         buffer_info = &tx_ring->buffer_info[i];
1802
1803                         if (tx_desc->popts &
1804                            (IXGB_TX_DESC_POPTS_TXSM |
1805                             IXGB_TX_DESC_POPTS_IXSM))
1806                                 adapter->hw_csum_tx_good++;
1807
1808                         ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
1809
1810                         *(u32 *)&(tx_desc->status) = 0;
1811
1812                         cleaned = (i == eop);
1813                         if (++i == tx_ring->count) i = 0;
1814                 }
1815
1816                 eop = tx_ring->buffer_info[i].next_to_watch;
1817                 eop_desc = IXGB_TX_DESC(*tx_ring, eop);
1818         }
1819
1820         tx_ring->next_to_clean = i;
1821
1822         if (unlikely(cleaned && netif_carrier_ok(netdev) &&
1823                      IXGB_DESC_UNUSED(tx_ring) >= DESC_NEEDED)) {
1824                 /* Make sure that anybody stopping the queue after this
1825                  * sees the new next_to_clean. */
1826                 smp_mb();
1827
1828                 if (netif_queue_stopped(netdev) &&
1829                     !(test_bit(__IXGB_DOWN, &adapter->flags))) {
1830                         netif_wake_queue(netdev);
1831                         ++adapter->restart_queue;
1832                 }
1833         }
1834
1835         if (adapter->detect_tx_hung) {
1836                 /* detect a transmit hang in hardware, this serializes the
1837                  * check with the clearing of time_stamp and movement of i */
1838                 adapter->detect_tx_hung = false;
1839                 if (tx_ring->buffer_info[eop].time_stamp &&
1840                    time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + HZ)
1841                    && !(IXGB_READ_REG(&adapter->hw, STATUS) &
1842                         IXGB_STATUS_TXOFF)) {
1843                         /* detected Tx unit hang */
1844                         netif_err(adapter, drv, adapter->netdev,
1845                                   "Detected Tx Unit Hang\n"
1846                                   "  TDH                  <%x>\n"
1847                                   "  TDT                  <%x>\n"
1848                                   "  next_to_use          <%x>\n"
1849                                   "  next_to_clean        <%x>\n"
1850                                   "buffer_info[next_to_clean]\n"
1851                                   "  time_stamp           <%lx>\n"
1852                                   "  next_to_watch        <%x>\n"
1853                                   "  jiffies              <%lx>\n"
1854                                   "  next_to_watch.status <%x>\n",
1855                                   IXGB_READ_REG(&adapter->hw, TDH),
1856                                   IXGB_READ_REG(&adapter->hw, TDT),
1857                                   tx_ring->next_to_use,
1858                                   tx_ring->next_to_clean,
1859                                   tx_ring->buffer_info[eop].time_stamp,
1860                                   eop,
1861                                   jiffies,
1862                                   eop_desc->status);
1863                         netif_stop_queue(netdev);
1864                 }
1865         }
1866
1867         return cleaned;
1868 }
1869
1870 /**
1871  * ixgb_rx_checksum - Receive Checksum Offload for 82597.
1872  * @adapter: board private structure
1873  * @rx_desc: receive descriptor
1874  * @sk_buff: socket buffer with received data
1875  **/
1876
1877 static void
1878 ixgb_rx_checksum(struct ixgb_adapter *adapter,
1879                  struct ixgb_rx_desc *rx_desc,
1880                  struct sk_buff *skb)
1881 {
1882         /* Ignore Checksum bit is set OR
1883          * TCP Checksum has not been calculated
1884          */
1885         if ((rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) ||
1886            (!(rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS))) {
1887                 skb_checksum_none_assert(skb);
1888                 return;
1889         }
1890
1891         /* At this point we know the hardware did the TCP checksum */
1892         /* now look at the TCP checksum error bit */
1893         if (rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE) {
1894                 /* let the stack verify checksum errors */
1895                 skb_checksum_none_assert(skb);
1896                 adapter->hw_csum_rx_error++;
1897         } else {
1898                 /* TCP checksum is good */
1899                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1900                 adapter->hw_csum_rx_good++;
1901         }
1902 }
1903
1904 /*
1905  * this should improve performance for small packets with large amounts
1906  * of reassembly being done in the stack
1907  */
1908 static void ixgb_check_copybreak(struct napi_struct *napi,
1909                                  struct ixgb_buffer *buffer_info,
1910                                  u32 length, struct sk_buff **skb)
1911 {
1912         struct sk_buff *new_skb;
1913
1914         if (length > copybreak)
1915                 return;
1916
1917         new_skb = napi_alloc_skb(napi, length);
1918         if (!new_skb)
1919                 return;
1920
1921         skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN,
1922                                        (*skb)->data - NET_IP_ALIGN,
1923                                        length + NET_IP_ALIGN);
1924         /* save the skb in buffer_info as good */
1925         buffer_info->skb = *skb;
1926         *skb = new_skb;
1927 }
1928
1929 /**
1930  * ixgb_clean_rx_irq - Send received data up the network stack,
1931  * @adapter: board private structure
1932  **/
1933
1934 static bool
1935 ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
1936 {
1937         struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
1938         struct net_device *netdev = adapter->netdev;
1939         struct pci_dev *pdev = adapter->pdev;
1940         struct ixgb_rx_desc *rx_desc, *next_rxd;
1941         struct ixgb_buffer *buffer_info, *next_buffer, *next2_buffer;
1942         u32 length;
1943         unsigned int i, j;
1944         int cleaned_count = 0;
1945         bool cleaned = false;
1946
1947         i = rx_ring->next_to_clean;
1948         rx_desc = IXGB_RX_DESC(*rx_ring, i);
1949         buffer_info = &rx_ring->buffer_info[i];
1950
1951         while (rx_desc->status & IXGB_RX_DESC_STATUS_DD) {
1952                 struct sk_buff *skb;
1953                 u8 status;
1954
1955                 if (*work_done >= work_to_do)
1956                         break;
1957
1958                 (*work_done)++;
1959                 rmb();  /* read descriptor and rx_buffer_info after status DD */
1960                 status = rx_desc->status;
1961                 skb = buffer_info->skb;
1962                 buffer_info->skb = NULL;
1963
1964                 prefetch(skb->data - NET_IP_ALIGN);
1965
1966                 if (++i == rx_ring->count)
1967                         i = 0;
1968                 next_rxd = IXGB_RX_DESC(*rx_ring, i);
1969                 prefetch(next_rxd);
1970
1971                 j = i + 1;
1972                 if (j == rx_ring->count)
1973                         j = 0;
1974                 next2_buffer = &rx_ring->buffer_info[j];
1975                 prefetch(next2_buffer);
1976
1977                 next_buffer = &rx_ring->buffer_info[i];
1978
1979                 cleaned = true;
1980                 cleaned_count++;
1981
1982                 dma_unmap_single(&pdev->dev,
1983                                  buffer_info->dma,
1984                                  buffer_info->length,
1985                                  DMA_FROM_DEVICE);
1986                 buffer_info->dma = 0;
1987
1988                 length = le16_to_cpu(rx_desc->length);
1989                 rx_desc->length = 0;
1990
1991                 if (unlikely(!(status & IXGB_RX_DESC_STATUS_EOP))) {
1992
1993                         /* All receives must fit into a single buffer */
1994
1995                         pr_debug("Receive packet consumed multiple buffers length<%x>\n",
1996                                  length);
1997
1998                         dev_kfree_skb_irq(skb);
1999                         goto rxdesc_done;
2000                 }
2001
2002                 if (unlikely(rx_desc->errors &
2003                     (IXGB_RX_DESC_ERRORS_CE | IXGB_RX_DESC_ERRORS_SE |
2004                      IXGB_RX_DESC_ERRORS_P | IXGB_RX_DESC_ERRORS_RXE))) {
2005                         dev_kfree_skb_irq(skb);
2006                         goto rxdesc_done;
2007                 }
2008
2009                 ixgb_check_copybreak(&adapter->napi, buffer_info, length, &skb);
2010
2011                 /* Good Receive */
2012                 skb_put(skb, length);
2013
2014                 /* Receive Checksum Offload */
2015                 ixgb_rx_checksum(adapter, rx_desc, skb);
2016
2017                 skb->protocol = eth_type_trans(skb, netdev);
2018                 if (status & IXGB_RX_DESC_STATUS_VP)
2019                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2020                                        le16_to_cpu(rx_desc->special));
2021
2022                 netif_receive_skb(skb);
2023
2024 rxdesc_done:
2025                 /* clean up descriptor, might be written over by hw */
2026                 rx_desc->status = 0;
2027
2028                 /* return some buffers to hardware, one at a time is too slow */
2029                 if (unlikely(cleaned_count >= IXGB_RX_BUFFER_WRITE)) {
2030                         ixgb_alloc_rx_buffers(adapter, cleaned_count);
2031                         cleaned_count = 0;
2032                 }
2033
2034                 /* use prefetched values */
2035                 rx_desc = next_rxd;
2036                 buffer_info = next_buffer;
2037         }
2038
2039         rx_ring->next_to_clean = i;
2040
2041         cleaned_count = IXGB_DESC_UNUSED(rx_ring);
2042         if (cleaned_count)
2043                 ixgb_alloc_rx_buffers(adapter, cleaned_count);
2044
2045         return cleaned;
2046 }
2047
2048 /**
2049  * ixgb_alloc_rx_buffers - Replace used receive buffers
2050  * @adapter: address of board private structure
2051  **/
2052
2053 static void
2054 ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter, int cleaned_count)
2055 {
2056         struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
2057         struct net_device *netdev = adapter->netdev;
2058         struct pci_dev *pdev = adapter->pdev;
2059         struct ixgb_rx_desc *rx_desc;
2060         struct ixgb_buffer *buffer_info;
2061         struct sk_buff *skb;
2062         unsigned int i;
2063         long cleancount;
2064
2065         i = rx_ring->next_to_use;
2066         buffer_info = &rx_ring->buffer_info[i];
2067         cleancount = IXGB_DESC_UNUSED(rx_ring);
2068
2069
2070         /* leave three descriptors unused */
2071         while (--cleancount > 2 && cleaned_count--) {
2072                 /* recycle! its good for you */
2073                 skb = buffer_info->skb;
2074                 if (skb) {
2075                         skb_trim(skb, 0);
2076                         goto map_skb;
2077                 }
2078
2079                 skb = netdev_alloc_skb_ip_align(netdev, adapter->rx_buffer_len);
2080                 if (unlikely(!skb)) {
2081                         /* Better luck next round */
2082                         adapter->alloc_rx_buff_failed++;
2083                         break;
2084                 }
2085
2086                 buffer_info->skb = skb;
2087                 buffer_info->length = adapter->rx_buffer_len;
2088 map_skb:
2089                 buffer_info->dma = dma_map_single(&pdev->dev,
2090                                                   skb->data,
2091                                                   adapter->rx_buffer_len,
2092                                                   DMA_FROM_DEVICE);
2093                 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
2094                         adapter->alloc_rx_buff_failed++;
2095                         break;
2096                 }
2097
2098                 rx_desc = IXGB_RX_DESC(*rx_ring, i);
2099                 rx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
2100                 /* guarantee DD bit not set now before h/w gets descriptor
2101                  * this is the rest of the workaround for h/w double
2102                  * writeback. */
2103                 rx_desc->status = 0;
2104
2105
2106                 if (++i == rx_ring->count)
2107                         i = 0;
2108                 buffer_info = &rx_ring->buffer_info[i];
2109         }
2110
2111         if (likely(rx_ring->next_to_use != i)) {
2112                 rx_ring->next_to_use = i;
2113                 if (unlikely(i-- == 0))
2114                         i = (rx_ring->count - 1);
2115
2116                 /* Force memory writes to complete before letting h/w
2117                  * know there are new descriptors to fetch.  (Only
2118                  * applicable for weak-ordered memory model archs, such
2119                  * as IA-64). */
2120                 wmb();
2121                 IXGB_WRITE_REG(&adapter->hw, RDT, i);
2122         }
2123 }
2124
2125 static void
2126 ixgb_vlan_strip_enable(struct ixgb_adapter *adapter)
2127 {
2128         u32 ctrl;
2129
2130         /* enable VLAN tag insert/strip */
2131         ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
2132         ctrl |= IXGB_CTRL0_VME;
2133         IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
2134 }
2135
2136 static void
2137 ixgb_vlan_strip_disable(struct ixgb_adapter *adapter)
2138 {
2139         u32 ctrl;
2140
2141         /* disable VLAN tag insert/strip */
2142         ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
2143         ctrl &= ~IXGB_CTRL0_VME;
2144         IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
2145 }
2146
2147 static int
2148 ixgb_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
2149 {
2150         struct ixgb_adapter *adapter = netdev_priv(netdev);
2151         u32 vfta, index;
2152
2153         /* add VID to filter table */
2154
2155         index = (vid >> 5) & 0x7F;
2156         vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index);
2157         vfta |= (1 << (vid & 0x1F));
2158         ixgb_write_vfta(&adapter->hw, index, vfta);
2159         set_bit(vid, adapter->active_vlans);
2160
2161         return 0;
2162 }
2163
2164 static int
2165 ixgb_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
2166 {
2167         struct ixgb_adapter *adapter = netdev_priv(netdev);
2168         u32 vfta, index;
2169
2170         /* remove VID from filter table */
2171
2172         index = (vid >> 5) & 0x7F;
2173         vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index);
2174         vfta &= ~(1 << (vid & 0x1F));
2175         ixgb_write_vfta(&adapter->hw, index, vfta);
2176         clear_bit(vid, adapter->active_vlans);
2177
2178         return 0;
2179 }
2180
2181 static void
2182 ixgb_restore_vlan(struct ixgb_adapter *adapter)
2183 {
2184         u16 vid;
2185
2186         for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
2187                 ixgb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
2188 }
2189
2190 /**
2191  * ixgb_io_error_detected - called when PCI error is detected
2192  * @pdev:    pointer to pci device with error
2193  * @state:   pci channel state after error
2194  *
2195  * This callback is called by the PCI subsystem whenever
2196  * a PCI bus error is detected.
2197  */
2198 static pci_ers_result_t ixgb_io_error_detected(struct pci_dev *pdev,
2199                                                enum pci_channel_state state)
2200 {
2201         struct net_device *netdev = pci_get_drvdata(pdev);
2202         struct ixgb_adapter *adapter = netdev_priv(netdev);
2203
2204         netif_device_detach(netdev);
2205
2206         if (state == pci_channel_io_perm_failure)
2207                 return PCI_ERS_RESULT_DISCONNECT;
2208
2209         if (netif_running(netdev))
2210                 ixgb_down(adapter, true);
2211
2212         pci_disable_device(pdev);
2213
2214         /* Request a slot reset. */
2215         return PCI_ERS_RESULT_NEED_RESET;
2216 }
2217
2218 /**
2219  * ixgb_io_slot_reset - called after the pci bus has been reset.
2220  * @pdev    pointer to pci device with error
2221  *
2222  * This callback is called after the PCI bus has been reset.
2223  * Basically, this tries to restart the card from scratch.
2224  * This is a shortened version of the device probe/discovery code,
2225  * it resembles the first-half of the ixgb_probe() routine.
2226  */
2227 static pci_ers_result_t ixgb_io_slot_reset(struct pci_dev *pdev)
2228 {
2229         struct net_device *netdev = pci_get_drvdata(pdev);
2230         struct ixgb_adapter *adapter = netdev_priv(netdev);
2231
2232         if (pci_enable_device(pdev)) {
2233                 netif_err(adapter, probe, adapter->netdev,
2234                           "Cannot re-enable PCI device after reset\n");
2235                 return PCI_ERS_RESULT_DISCONNECT;
2236         }
2237
2238         /* Perform card reset only on one instance of the card */
2239         if (0 != PCI_FUNC (pdev->devfn))
2240                 return PCI_ERS_RESULT_RECOVERED;
2241
2242         pci_set_master(pdev);
2243
2244         netif_carrier_off(netdev);
2245         netif_stop_queue(netdev);
2246         ixgb_reset(adapter);
2247
2248         /* Make sure the EEPROM is good */
2249         if (!ixgb_validate_eeprom_checksum(&adapter->hw)) {
2250                 netif_err(adapter, probe, adapter->netdev,
2251                           "After reset, the EEPROM checksum is not valid\n");
2252                 return PCI_ERS_RESULT_DISCONNECT;
2253         }
2254         ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr);
2255         memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
2256
2257         if (!is_valid_ether_addr(netdev->perm_addr)) {
2258                 netif_err(adapter, probe, adapter->netdev,
2259                           "After reset, invalid MAC address\n");
2260                 return PCI_ERS_RESULT_DISCONNECT;
2261         }
2262
2263         return PCI_ERS_RESULT_RECOVERED;
2264 }
2265
2266 /**
2267  * ixgb_io_resume - called when its OK to resume normal operations
2268  * @pdev    pointer to pci device with error
2269  *
2270  * The error recovery driver tells us that its OK to resume
2271  * normal operation. Implementation resembles the second-half
2272  * of the ixgb_probe() routine.
2273  */
2274 static void ixgb_io_resume(struct pci_dev *pdev)
2275 {
2276         struct net_device *netdev = pci_get_drvdata(pdev);
2277         struct ixgb_adapter *adapter = netdev_priv(netdev);
2278
2279         pci_set_master(pdev);
2280
2281         if (netif_running(netdev)) {
2282                 if (ixgb_up(adapter)) {
2283                         pr_err("can't bring device back up after reset\n");
2284                         return;
2285                 }
2286         }
2287
2288         netif_device_attach(netdev);
2289         mod_timer(&adapter->watchdog_timer, jiffies);
2290 }
2291
2292 /* ixgb_main.c */