OSDN Git Service

[ZLIB]: Move bnx2 driver gzip unpacker into zlib.
[sagit-ice-cold/kernel_xiaomi_msm8998.git] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004-2007 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <asm/bitops.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
34 #include <asm/page.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
40 #define BCM_VLAN 1
41 #endif
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
50
51 #include "bnx2.h"
52 #include "bnx2_fw.h"
53 #include "bnx2_fw2.h"
54
55 #define FW_BUF_SIZE             0x8000
56
57 #define DRV_MODULE_NAME         "bnx2"
58 #define PFX DRV_MODULE_NAME     ": "
59 #define DRV_MODULE_VERSION      "1.6.5"
60 #define DRV_MODULE_RELDATE      "September 20, 2007"
61
62 #define RUN_AT(x) (jiffies + (x))
63
64 /* Time in jiffies before concluding the transmitter is hung. */
65 #define TX_TIMEOUT  (5*HZ)
66
67 static const char version[] __devinitdata =
68         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
69
70 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
71 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
72 MODULE_LICENSE("GPL");
73 MODULE_VERSION(DRV_MODULE_VERSION);
74
75 static int disable_msi = 0;
76
77 module_param(disable_msi, int, 0);
78 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
79
80 typedef enum {
81         BCM5706 = 0,
82         NC370T,
83         NC370I,
84         BCM5706S,
85         NC370F,
86         BCM5708,
87         BCM5708S,
88         BCM5709,
89         BCM5709S,
90 } board_t;
91
92 /* indexed by board_t, above */
93 static const struct {
94         char *name;
95 } board_info[] __devinitdata = {
96         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
97         { "HP NC370T Multifunction Gigabit Server Adapter" },
98         { "HP NC370i Multifunction Gigabit Server Adapter" },
99         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
100         { "HP NC370F Multifunction Gigabit Server Adapter" },
101         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
102         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
103         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
104         { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
105         };
106
107 static struct pci_device_id bnx2_pci_tbl[] = {
108         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
110         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
112         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
113           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
114         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
115           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
116         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
118         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
119           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
120         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
121           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
122         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
123           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
124         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
125           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
126         { 0, }
127 };
128
129 static struct flash_spec flash_table[] =
130 {
131 #define BUFFERED_FLAGS          (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
132 #define NONBUFFERED_FLAGS       (BNX2_NV_WREN)
133         /* Slow EEPROM */
134         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
135          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
136          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
137          "EEPROM - slow"},
138         /* Expansion entry 0001 */
139         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
140          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
141          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
142          "Entry 0001"},
143         /* Saifun SA25F010 (non-buffered flash) */
144         /* strap, cfg1, & write1 need updates */
145         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
146          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
147          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
148          "Non-buffered flash (128kB)"},
149         /* Saifun SA25F020 (non-buffered flash) */
150         /* strap, cfg1, & write1 need updates */
151         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
152          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
153          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
154          "Non-buffered flash (256kB)"},
155         /* Expansion entry 0100 */
156         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
157          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
158          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
159          "Entry 0100"},
160         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
161         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
162          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
163          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
164          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
165         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
166         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
167          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
168          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
169          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
170         /* Saifun SA25F005 (non-buffered flash) */
171         /* strap, cfg1, & write1 need updates */
172         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
173          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
174          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
175          "Non-buffered flash (64kB)"},
176         /* Fast EEPROM */
177         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
178          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
179          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
180          "EEPROM - fast"},
181         /* Expansion entry 1001 */
182         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
183          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
184          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
185          "Entry 1001"},
186         /* Expansion entry 1010 */
187         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
188          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
189          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
190          "Entry 1010"},
191         /* ATMEL AT45DB011B (buffered flash) */
192         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
193          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
194          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
195          "Buffered flash (128kB)"},
196         /* Expansion entry 1100 */
197         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
198          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
199          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
200          "Entry 1100"},
201         /* Expansion entry 1101 */
202         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
203          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205          "Entry 1101"},
206         /* Ateml Expansion entry 1110 */
207         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
208          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
209          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
210          "Entry 1110 (Atmel)"},
211         /* ATMEL AT45DB021B (buffered flash) */
212         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
213          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
215          "Buffered flash (256kB)"},
216 };
217
218 static struct flash_spec flash_5709 = {
219         .flags          = BNX2_NV_BUFFERED,
220         .page_bits      = BCM5709_FLASH_PAGE_BITS,
221         .page_size      = BCM5709_FLASH_PAGE_SIZE,
222         .addr_mask      = BCM5709_FLASH_BYTE_ADDR_MASK,
223         .total_size     = BUFFERED_FLASH_TOTAL_SIZE*2,
224         .name           = "5709 Buffered flash (256kB)",
225 };
226
227 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
228
229 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
230 {
231         u32 diff;
232
233         smp_mb();
234
235         /* The ring uses 256 indices for 255 entries, one of them
236          * needs to be skipped.
237          */
238         diff = bp->tx_prod - bp->tx_cons;
239         if (unlikely(diff >= TX_DESC_CNT)) {
240                 diff &= 0xffff;
241                 if (diff == TX_DESC_CNT)
242                         diff = MAX_TX_DESC_CNT;
243         }
244         return (bp->tx_ring_size - diff);
245 }
246
247 static u32
248 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
249 {
250         u32 val;
251
252         spin_lock_bh(&bp->indirect_lock);
253         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
254         val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
255         spin_unlock_bh(&bp->indirect_lock);
256         return val;
257 }
258
259 static void
260 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
261 {
262         spin_lock_bh(&bp->indirect_lock);
263         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
264         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
265         spin_unlock_bh(&bp->indirect_lock);
266 }
267
268 static void
269 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
270 {
271         offset += cid_addr;
272         spin_lock_bh(&bp->indirect_lock);
273         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
274                 int i;
275
276                 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
277                 REG_WR(bp, BNX2_CTX_CTX_CTRL,
278                        offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
279                 for (i = 0; i < 5; i++) {
280                         u32 val;
281                         val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
282                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
283                                 break;
284                         udelay(5);
285                 }
286         } else {
287                 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
288                 REG_WR(bp, BNX2_CTX_DATA, val);
289         }
290         spin_unlock_bh(&bp->indirect_lock);
291 }
292
293 static int
294 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
295 {
296         u32 val1;
297         int i, ret;
298
299         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
300                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
301                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
302
303                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
304                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
305
306                 udelay(40);
307         }
308
309         val1 = (bp->phy_addr << 21) | (reg << 16) |
310                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
311                 BNX2_EMAC_MDIO_COMM_START_BUSY;
312         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
313
314         for (i = 0; i < 50; i++) {
315                 udelay(10);
316
317                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
318                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
319                         udelay(5);
320
321                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
322                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
323
324                         break;
325                 }
326         }
327
328         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
329                 *val = 0x0;
330                 ret = -EBUSY;
331         }
332         else {
333                 *val = val1;
334                 ret = 0;
335         }
336
337         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
338                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
339                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
340
341                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
342                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
343
344                 udelay(40);
345         }
346
347         return ret;
348 }
349
350 static int
351 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
352 {
353         u32 val1;
354         int i, ret;
355
356         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
357                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
358                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
359
360                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
361                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
362
363                 udelay(40);
364         }
365
366         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
367                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
368                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
369         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
370
371         for (i = 0; i < 50; i++) {
372                 udelay(10);
373
374                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
375                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
376                         udelay(5);
377                         break;
378                 }
379         }
380
381         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
382                 ret = -EBUSY;
383         else
384                 ret = 0;
385
386         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
387                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
388                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
389
390                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
391                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
392
393                 udelay(40);
394         }
395
396         return ret;
397 }
398
399 static void
400 bnx2_disable_int(struct bnx2 *bp)
401 {
402         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
403                BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
404         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
405 }
406
407 static void
408 bnx2_enable_int(struct bnx2 *bp)
409 {
410         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
411                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
412                BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
413
414         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
415                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
416
417         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
418 }
419
420 static void
421 bnx2_disable_int_sync(struct bnx2 *bp)
422 {
423         atomic_inc(&bp->intr_sem);
424         bnx2_disable_int(bp);
425         synchronize_irq(bp->pdev->irq);
426 }
427
428 static void
429 bnx2_netif_stop(struct bnx2 *bp)
430 {
431         bnx2_disable_int_sync(bp);
432         if (netif_running(bp->dev)) {
433                 napi_disable(&bp->napi);
434                 netif_tx_disable(bp->dev);
435                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
436         }
437 }
438
439 static void
440 bnx2_netif_start(struct bnx2 *bp)
441 {
442         if (atomic_dec_and_test(&bp->intr_sem)) {
443                 if (netif_running(bp->dev)) {
444                         netif_wake_queue(bp->dev);
445                         napi_enable(&bp->napi);
446                         bnx2_enable_int(bp);
447                 }
448         }
449 }
450
451 static void
452 bnx2_free_mem(struct bnx2 *bp)
453 {
454         int i;
455
456         for (i = 0; i < bp->ctx_pages; i++) {
457                 if (bp->ctx_blk[i]) {
458                         pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
459                                             bp->ctx_blk[i],
460                                             bp->ctx_blk_mapping[i]);
461                         bp->ctx_blk[i] = NULL;
462                 }
463         }
464         if (bp->status_blk) {
465                 pci_free_consistent(bp->pdev, bp->status_stats_size,
466                                     bp->status_blk, bp->status_blk_mapping);
467                 bp->status_blk = NULL;
468                 bp->stats_blk = NULL;
469         }
470         if (bp->tx_desc_ring) {
471                 pci_free_consistent(bp->pdev,
472                                     sizeof(struct tx_bd) * TX_DESC_CNT,
473                                     bp->tx_desc_ring, bp->tx_desc_mapping);
474                 bp->tx_desc_ring = NULL;
475         }
476         kfree(bp->tx_buf_ring);
477         bp->tx_buf_ring = NULL;
478         for (i = 0; i < bp->rx_max_ring; i++) {
479                 if (bp->rx_desc_ring[i])
480                         pci_free_consistent(bp->pdev,
481                                             sizeof(struct rx_bd) * RX_DESC_CNT,
482                                             bp->rx_desc_ring[i],
483                                             bp->rx_desc_mapping[i]);
484                 bp->rx_desc_ring[i] = NULL;
485         }
486         vfree(bp->rx_buf_ring);
487         bp->rx_buf_ring = NULL;
488 }
489
490 static int
491 bnx2_alloc_mem(struct bnx2 *bp)
492 {
493         int i, status_blk_size;
494
495         bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
496                                   GFP_KERNEL);
497         if (bp->tx_buf_ring == NULL)
498                 return -ENOMEM;
499
500         bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
501                                                 sizeof(struct tx_bd) *
502                                                 TX_DESC_CNT,
503                                                 &bp->tx_desc_mapping);
504         if (bp->tx_desc_ring == NULL)
505                 goto alloc_mem_err;
506
507         bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
508                                   bp->rx_max_ring);
509         if (bp->rx_buf_ring == NULL)
510                 goto alloc_mem_err;
511
512         memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
513                                    bp->rx_max_ring);
514
515         for (i = 0; i < bp->rx_max_ring; i++) {
516                 bp->rx_desc_ring[i] =
517                         pci_alloc_consistent(bp->pdev,
518                                              sizeof(struct rx_bd) * RX_DESC_CNT,
519                                              &bp->rx_desc_mapping[i]);
520                 if (bp->rx_desc_ring[i] == NULL)
521                         goto alloc_mem_err;
522
523         }
524
525         /* Combine status and statistics blocks into one allocation. */
526         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
527         bp->status_stats_size = status_blk_size +
528                                 sizeof(struct statistics_block);
529
530         bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
531                                               &bp->status_blk_mapping);
532         if (bp->status_blk == NULL)
533                 goto alloc_mem_err;
534
535         memset(bp->status_blk, 0, bp->status_stats_size);
536
537         bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
538                                   status_blk_size);
539
540         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
541
542         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
543                 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
544                 if (bp->ctx_pages == 0)
545                         bp->ctx_pages = 1;
546                 for (i = 0; i < bp->ctx_pages; i++) {
547                         bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
548                                                 BCM_PAGE_SIZE,
549                                                 &bp->ctx_blk_mapping[i]);
550                         if (bp->ctx_blk[i] == NULL)
551                                 goto alloc_mem_err;
552                 }
553         }
554         return 0;
555
556 alloc_mem_err:
557         bnx2_free_mem(bp);
558         return -ENOMEM;
559 }
560
561 static void
562 bnx2_report_fw_link(struct bnx2 *bp)
563 {
564         u32 fw_link_status = 0;
565
566         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
567                 return;
568
569         if (bp->link_up) {
570                 u32 bmsr;
571
572                 switch (bp->line_speed) {
573                 case SPEED_10:
574                         if (bp->duplex == DUPLEX_HALF)
575                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
576                         else
577                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
578                         break;
579                 case SPEED_100:
580                         if (bp->duplex == DUPLEX_HALF)
581                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
582                         else
583                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
584                         break;
585                 case SPEED_1000:
586                         if (bp->duplex == DUPLEX_HALF)
587                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
588                         else
589                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
590                         break;
591                 case SPEED_2500:
592                         if (bp->duplex == DUPLEX_HALF)
593                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
594                         else
595                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
596                         break;
597                 }
598
599                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
600
601                 if (bp->autoneg) {
602                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
603
604                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
605                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
606
607                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
608                             bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
609                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
610                         else
611                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
612                 }
613         }
614         else
615                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
616
617         REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
618 }
619
620 static char *
621 bnx2_xceiver_str(struct bnx2 *bp)
622 {
623         return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
624                 ((bp->phy_flags & PHY_SERDES_FLAG) ? "Remote Copper" :
625                  "Copper"));
626 }
627
628 static void
629 bnx2_report_link(struct bnx2 *bp)
630 {
631         if (bp->link_up) {
632                 netif_carrier_on(bp->dev);
633                 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
634                        bnx2_xceiver_str(bp));
635
636                 printk("%d Mbps ", bp->line_speed);
637
638                 if (bp->duplex == DUPLEX_FULL)
639                         printk("full duplex");
640                 else
641                         printk("half duplex");
642
643                 if (bp->flow_ctrl) {
644                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
645                                 printk(", receive ");
646                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
647                                         printk("& transmit ");
648                         }
649                         else {
650                                 printk(", transmit ");
651                         }
652                         printk("flow control ON");
653                 }
654                 printk("\n");
655         }
656         else {
657                 netif_carrier_off(bp->dev);
658                 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
659                        bnx2_xceiver_str(bp));
660         }
661
662         bnx2_report_fw_link(bp);
663 }
664
665 static void
666 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
667 {
668         u32 local_adv, remote_adv;
669
670         bp->flow_ctrl = 0;
671         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
672                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
673
674                 if (bp->duplex == DUPLEX_FULL) {
675                         bp->flow_ctrl = bp->req_flow_ctrl;
676                 }
677                 return;
678         }
679
680         if (bp->duplex != DUPLEX_FULL) {
681                 return;
682         }
683
684         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
685             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
686                 u32 val;
687
688                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
689                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
690                         bp->flow_ctrl |= FLOW_CTRL_TX;
691                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
692                         bp->flow_ctrl |= FLOW_CTRL_RX;
693                 return;
694         }
695
696         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
697         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
698
699         if (bp->phy_flags & PHY_SERDES_FLAG) {
700                 u32 new_local_adv = 0;
701                 u32 new_remote_adv = 0;
702
703                 if (local_adv & ADVERTISE_1000XPAUSE)
704                         new_local_adv |= ADVERTISE_PAUSE_CAP;
705                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
706                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
707                 if (remote_adv & ADVERTISE_1000XPAUSE)
708                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
709                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
710                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
711
712                 local_adv = new_local_adv;
713                 remote_adv = new_remote_adv;
714         }
715
716         /* See Table 28B-3 of 802.3ab-1999 spec. */
717         if (local_adv & ADVERTISE_PAUSE_CAP) {
718                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
719                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
720                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
721                         }
722                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
723                                 bp->flow_ctrl = FLOW_CTRL_RX;
724                         }
725                 }
726                 else {
727                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
728                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
729                         }
730                 }
731         }
732         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
733                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
734                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
735
736                         bp->flow_ctrl = FLOW_CTRL_TX;
737                 }
738         }
739 }
740
741 static int
742 bnx2_5709s_linkup(struct bnx2 *bp)
743 {
744         u32 val, speed;
745
746         bp->link_up = 1;
747
748         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
749         bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
750         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
751
752         if ((bp->autoneg & AUTONEG_SPEED) == 0) {
753                 bp->line_speed = bp->req_line_speed;
754                 bp->duplex = bp->req_duplex;
755                 return 0;
756         }
757         speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
758         switch (speed) {
759                 case MII_BNX2_GP_TOP_AN_SPEED_10:
760                         bp->line_speed = SPEED_10;
761                         break;
762                 case MII_BNX2_GP_TOP_AN_SPEED_100:
763                         bp->line_speed = SPEED_100;
764                         break;
765                 case MII_BNX2_GP_TOP_AN_SPEED_1G:
766                 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
767                         bp->line_speed = SPEED_1000;
768                         break;
769                 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
770                         bp->line_speed = SPEED_2500;
771                         break;
772         }
773         if (val & MII_BNX2_GP_TOP_AN_FD)
774                 bp->duplex = DUPLEX_FULL;
775         else
776                 bp->duplex = DUPLEX_HALF;
777         return 0;
778 }
779
780 static int
781 bnx2_5708s_linkup(struct bnx2 *bp)
782 {
783         u32 val;
784
785         bp->link_up = 1;
786         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
787         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
788                 case BCM5708S_1000X_STAT1_SPEED_10:
789                         bp->line_speed = SPEED_10;
790                         break;
791                 case BCM5708S_1000X_STAT1_SPEED_100:
792                         bp->line_speed = SPEED_100;
793                         break;
794                 case BCM5708S_1000X_STAT1_SPEED_1G:
795                         bp->line_speed = SPEED_1000;
796                         break;
797                 case BCM5708S_1000X_STAT1_SPEED_2G5:
798                         bp->line_speed = SPEED_2500;
799                         break;
800         }
801         if (val & BCM5708S_1000X_STAT1_FD)
802                 bp->duplex = DUPLEX_FULL;
803         else
804                 bp->duplex = DUPLEX_HALF;
805
806         return 0;
807 }
808
809 static int
810 bnx2_5706s_linkup(struct bnx2 *bp)
811 {
812         u32 bmcr, local_adv, remote_adv, common;
813
814         bp->link_up = 1;
815         bp->line_speed = SPEED_1000;
816
817         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
818         if (bmcr & BMCR_FULLDPLX) {
819                 bp->duplex = DUPLEX_FULL;
820         }
821         else {
822                 bp->duplex = DUPLEX_HALF;
823         }
824
825         if (!(bmcr & BMCR_ANENABLE)) {
826                 return 0;
827         }
828
829         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
830         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
831
832         common = local_adv & remote_adv;
833         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
834
835                 if (common & ADVERTISE_1000XFULL) {
836                         bp->duplex = DUPLEX_FULL;
837                 }
838                 else {
839                         bp->duplex = DUPLEX_HALF;
840                 }
841         }
842
843         return 0;
844 }
845
846 static int
847 bnx2_copper_linkup(struct bnx2 *bp)
848 {
849         u32 bmcr;
850
851         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
852         if (bmcr & BMCR_ANENABLE) {
853                 u32 local_adv, remote_adv, common;
854
855                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
856                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
857
858                 common = local_adv & (remote_adv >> 2);
859                 if (common & ADVERTISE_1000FULL) {
860                         bp->line_speed = SPEED_1000;
861                         bp->duplex = DUPLEX_FULL;
862                 }
863                 else if (common & ADVERTISE_1000HALF) {
864                         bp->line_speed = SPEED_1000;
865                         bp->duplex = DUPLEX_HALF;
866                 }
867                 else {
868                         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
869                         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
870
871                         common = local_adv & remote_adv;
872                         if (common & ADVERTISE_100FULL) {
873                                 bp->line_speed = SPEED_100;
874                                 bp->duplex = DUPLEX_FULL;
875                         }
876                         else if (common & ADVERTISE_100HALF) {
877                                 bp->line_speed = SPEED_100;
878                                 bp->duplex = DUPLEX_HALF;
879                         }
880                         else if (common & ADVERTISE_10FULL) {
881                                 bp->line_speed = SPEED_10;
882                                 bp->duplex = DUPLEX_FULL;
883                         }
884                         else if (common & ADVERTISE_10HALF) {
885                                 bp->line_speed = SPEED_10;
886                                 bp->duplex = DUPLEX_HALF;
887                         }
888                         else {
889                                 bp->line_speed = 0;
890                                 bp->link_up = 0;
891                         }
892                 }
893         }
894         else {
895                 if (bmcr & BMCR_SPEED100) {
896                         bp->line_speed = SPEED_100;
897                 }
898                 else {
899                         bp->line_speed = SPEED_10;
900                 }
901                 if (bmcr & BMCR_FULLDPLX) {
902                         bp->duplex = DUPLEX_FULL;
903                 }
904                 else {
905                         bp->duplex = DUPLEX_HALF;
906                 }
907         }
908
909         return 0;
910 }
911
912 static int
913 bnx2_set_mac_link(struct bnx2 *bp)
914 {
915         u32 val;
916
917         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
918         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
919                 (bp->duplex == DUPLEX_HALF)) {
920                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
921         }
922
923         /* Configure the EMAC mode register. */
924         val = REG_RD(bp, BNX2_EMAC_MODE);
925
926         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
927                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
928                 BNX2_EMAC_MODE_25G_MODE);
929
930         if (bp->link_up) {
931                 switch (bp->line_speed) {
932                         case SPEED_10:
933                                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
934                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
935                                         break;
936                                 }
937                                 /* fall through */
938                         case SPEED_100:
939                                 val |= BNX2_EMAC_MODE_PORT_MII;
940                                 break;
941                         case SPEED_2500:
942                                 val |= BNX2_EMAC_MODE_25G_MODE;
943                                 /* fall through */
944                         case SPEED_1000:
945                                 val |= BNX2_EMAC_MODE_PORT_GMII;
946                                 break;
947                 }
948         }
949         else {
950                 val |= BNX2_EMAC_MODE_PORT_GMII;
951         }
952
953         /* Set the MAC to operate in the appropriate duplex mode. */
954         if (bp->duplex == DUPLEX_HALF)
955                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
956         REG_WR(bp, BNX2_EMAC_MODE, val);
957
958         /* Enable/disable rx PAUSE. */
959         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
960
961         if (bp->flow_ctrl & FLOW_CTRL_RX)
962                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
963         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
964
965         /* Enable/disable tx PAUSE. */
966         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
967         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
968
969         if (bp->flow_ctrl & FLOW_CTRL_TX)
970                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
971         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
972
973         /* Acknowledge the interrupt. */
974         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
975
976         return 0;
977 }
978
979 static void
980 bnx2_enable_bmsr1(struct bnx2 *bp)
981 {
982         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
983             (CHIP_NUM(bp) == CHIP_NUM_5709))
984                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
985                                MII_BNX2_BLK_ADDR_GP_STATUS);
986 }
987
988 static void
989 bnx2_disable_bmsr1(struct bnx2 *bp)
990 {
991         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
992             (CHIP_NUM(bp) == CHIP_NUM_5709))
993                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
994                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
995 }
996
997 static int
998 bnx2_test_and_enable_2g5(struct bnx2 *bp)
999 {
1000         u32 up1;
1001         int ret = 1;
1002
1003         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1004                 return 0;
1005
1006         if (bp->autoneg & AUTONEG_SPEED)
1007                 bp->advertising |= ADVERTISED_2500baseX_Full;
1008
1009         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1010                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1011
1012         bnx2_read_phy(bp, bp->mii_up1, &up1);
1013         if (!(up1 & BCM5708S_UP1_2G5)) {
1014                 up1 |= BCM5708S_UP1_2G5;
1015                 bnx2_write_phy(bp, bp->mii_up1, up1);
1016                 ret = 0;
1017         }
1018
1019         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1020                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1021                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1022
1023         return ret;
1024 }
1025
1026 static int
1027 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1028 {
1029         u32 up1;
1030         int ret = 0;
1031
1032         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1033                 return 0;
1034
1035         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1036                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1037
1038         bnx2_read_phy(bp, bp->mii_up1, &up1);
1039         if (up1 & BCM5708S_UP1_2G5) {
1040                 up1 &= ~BCM5708S_UP1_2G5;
1041                 bnx2_write_phy(bp, bp->mii_up1, up1);
1042                 ret = 1;
1043         }
1044
1045         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1046                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1047                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1048
1049         return ret;
1050 }
1051
1052 static void
1053 bnx2_enable_forced_2g5(struct bnx2 *bp)
1054 {
1055         u32 bmcr;
1056
1057         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1058                 return;
1059
1060         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1061                 u32 val;
1062
1063                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1064                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1065                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1066                 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1067                 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1068                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1069
1070                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1071                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1072                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1073
1074         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1075                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1076                 bmcr |= BCM5708S_BMCR_FORCE_2500;
1077         }
1078
1079         if (bp->autoneg & AUTONEG_SPEED) {
1080                 bmcr &= ~BMCR_ANENABLE;
1081                 if (bp->req_duplex == DUPLEX_FULL)
1082                         bmcr |= BMCR_FULLDPLX;
1083         }
1084         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1085 }
1086
1087 static void
1088 bnx2_disable_forced_2g5(struct bnx2 *bp)
1089 {
1090         u32 bmcr;
1091
1092         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1093                 return;
1094
1095         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1096                 u32 val;
1097
1098                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1099                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1100                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1101                 val &= ~MII_BNX2_SD_MISC1_FORCE;
1102                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1103
1104                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1105                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1106                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1107
1108         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1109                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1110                 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1111         }
1112
1113         if (bp->autoneg & AUTONEG_SPEED)
1114                 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1115         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1116 }
1117
1118 static int
1119 bnx2_set_link(struct bnx2 *bp)
1120 {
1121         u32 bmsr;
1122         u8 link_up;
1123
1124         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1125                 bp->link_up = 1;
1126                 return 0;
1127         }
1128
1129         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1130                 return 0;
1131
1132         link_up = bp->link_up;
1133
1134         bnx2_enable_bmsr1(bp);
1135         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1136         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1137         bnx2_disable_bmsr1(bp);
1138
1139         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1140             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1141                 u32 val;
1142
1143                 val = REG_RD(bp, BNX2_EMAC_STATUS);
1144                 if (val & BNX2_EMAC_STATUS_LINK)
1145                         bmsr |= BMSR_LSTATUS;
1146                 else
1147                         bmsr &= ~BMSR_LSTATUS;
1148         }
1149
1150         if (bmsr & BMSR_LSTATUS) {
1151                 bp->link_up = 1;
1152
1153                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1154                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1155                                 bnx2_5706s_linkup(bp);
1156                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1157                                 bnx2_5708s_linkup(bp);
1158                         else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1159                                 bnx2_5709s_linkup(bp);
1160                 }
1161                 else {
1162                         bnx2_copper_linkup(bp);
1163                 }
1164                 bnx2_resolve_flow_ctrl(bp);
1165         }
1166         else {
1167                 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1168                     (bp->autoneg & AUTONEG_SPEED))
1169                         bnx2_disable_forced_2g5(bp);
1170
1171                 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1172                 bp->link_up = 0;
1173         }
1174
1175         if (bp->link_up != link_up) {
1176                 bnx2_report_link(bp);
1177         }
1178
1179         bnx2_set_mac_link(bp);
1180
1181         return 0;
1182 }
1183
1184 static int
1185 bnx2_reset_phy(struct bnx2 *bp)
1186 {
1187         int i;
1188         u32 reg;
1189
1190         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1191
1192 #define PHY_RESET_MAX_WAIT 100
1193         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1194                 udelay(10);
1195
1196                 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1197                 if (!(reg & BMCR_RESET)) {
1198                         udelay(20);
1199                         break;
1200                 }
1201         }
1202         if (i == PHY_RESET_MAX_WAIT) {
1203                 return -EBUSY;
1204         }
1205         return 0;
1206 }
1207
1208 static u32
1209 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1210 {
1211         u32 adv = 0;
1212
1213         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1214                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1215
1216                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1217                         adv = ADVERTISE_1000XPAUSE;
1218                 }
1219                 else {
1220                         adv = ADVERTISE_PAUSE_CAP;
1221                 }
1222         }
1223         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1224                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1225                         adv = ADVERTISE_1000XPSE_ASYM;
1226                 }
1227                 else {
1228                         adv = ADVERTISE_PAUSE_ASYM;
1229                 }
1230         }
1231         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1232                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1233                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1234                 }
1235                 else {
1236                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1237                 }
1238         }
1239         return adv;
1240 }
1241
1242 static int bnx2_fw_sync(struct bnx2 *, u32, int);
1243
1244 static int
1245 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1246 {
1247         u32 speed_arg = 0, pause_adv;
1248
1249         pause_adv = bnx2_phy_get_pause_adv(bp);
1250
1251         if (bp->autoneg & AUTONEG_SPEED) {
1252                 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1253                 if (bp->advertising & ADVERTISED_10baseT_Half)
1254                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1255                 if (bp->advertising & ADVERTISED_10baseT_Full)
1256                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1257                 if (bp->advertising & ADVERTISED_100baseT_Half)
1258                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1259                 if (bp->advertising & ADVERTISED_100baseT_Full)
1260                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1261                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1262                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1263                 if (bp->advertising & ADVERTISED_2500baseX_Full)
1264                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1265         } else {
1266                 if (bp->req_line_speed == SPEED_2500)
1267                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1268                 else if (bp->req_line_speed == SPEED_1000)
1269                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1270                 else if (bp->req_line_speed == SPEED_100) {
1271                         if (bp->req_duplex == DUPLEX_FULL)
1272                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1273                         else
1274                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1275                 } else if (bp->req_line_speed == SPEED_10) {
1276                         if (bp->req_duplex == DUPLEX_FULL)
1277                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1278                         else
1279                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1280                 }
1281         }
1282
1283         if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1284                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1285         if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_1000XPSE_ASYM))
1286                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1287
1288         if (port == PORT_TP)
1289                 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1290                              BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1291
1292         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB_ARG0, speed_arg);
1293
1294         spin_unlock_bh(&bp->phy_lock);
1295         bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
1296         spin_lock_bh(&bp->phy_lock);
1297
1298         return 0;
1299 }
1300
1301 static int
1302 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1303 {
1304         u32 adv, bmcr;
1305         u32 new_adv = 0;
1306
1307         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1308                 return (bnx2_setup_remote_phy(bp, port));
1309
1310         if (!(bp->autoneg & AUTONEG_SPEED)) {
1311                 u32 new_bmcr;
1312                 int force_link_down = 0;
1313
1314                 if (bp->req_line_speed == SPEED_2500) {
1315                         if (!bnx2_test_and_enable_2g5(bp))
1316                                 force_link_down = 1;
1317                 } else if (bp->req_line_speed == SPEED_1000) {
1318                         if (bnx2_test_and_disable_2g5(bp))
1319                                 force_link_down = 1;
1320                 }
1321                 bnx2_read_phy(bp, bp->mii_adv, &adv);
1322                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1323
1324                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1325                 new_bmcr = bmcr & ~BMCR_ANENABLE;
1326                 new_bmcr |= BMCR_SPEED1000;
1327
1328                 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1329                         if (bp->req_line_speed == SPEED_2500)
1330                                 bnx2_enable_forced_2g5(bp);
1331                         else if (bp->req_line_speed == SPEED_1000) {
1332                                 bnx2_disable_forced_2g5(bp);
1333                                 new_bmcr &= ~0x2000;
1334                         }
1335
1336                 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1337                         if (bp->req_line_speed == SPEED_2500)
1338                                 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1339                         else
1340                                 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1341                 }
1342
1343                 if (bp->req_duplex == DUPLEX_FULL) {
1344                         adv |= ADVERTISE_1000XFULL;
1345                         new_bmcr |= BMCR_FULLDPLX;
1346                 }
1347                 else {
1348                         adv |= ADVERTISE_1000XHALF;
1349                         new_bmcr &= ~BMCR_FULLDPLX;
1350                 }
1351                 if ((new_bmcr != bmcr) || (force_link_down)) {
1352                         /* Force a link down visible on the other side */
1353                         if (bp->link_up) {
1354                                 bnx2_write_phy(bp, bp->mii_adv, adv &
1355                                                ~(ADVERTISE_1000XFULL |
1356                                                  ADVERTISE_1000XHALF));
1357                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1358                                         BMCR_ANRESTART | BMCR_ANENABLE);
1359
1360                                 bp->link_up = 0;
1361                                 netif_carrier_off(bp->dev);
1362                                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1363                                 bnx2_report_link(bp);
1364                         }
1365                         bnx2_write_phy(bp, bp->mii_adv, adv);
1366                         bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1367                 } else {
1368                         bnx2_resolve_flow_ctrl(bp);
1369                         bnx2_set_mac_link(bp);
1370                 }
1371                 return 0;
1372         }
1373
1374         bnx2_test_and_enable_2g5(bp);
1375
1376         if (bp->advertising & ADVERTISED_1000baseT_Full)
1377                 new_adv |= ADVERTISE_1000XFULL;
1378
1379         new_adv |= bnx2_phy_get_pause_adv(bp);
1380
1381         bnx2_read_phy(bp, bp->mii_adv, &adv);
1382         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1383
1384         bp->serdes_an_pending = 0;
1385         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1386                 /* Force a link down visible on the other side */
1387                 if (bp->link_up) {
1388                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1389                         spin_unlock_bh(&bp->phy_lock);
1390                         msleep(20);
1391                         spin_lock_bh(&bp->phy_lock);
1392                 }
1393
1394                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1395                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1396                         BMCR_ANENABLE);
1397                 /* Speed up link-up time when the link partner
1398                  * does not autonegotiate which is very common
1399                  * in blade servers. Some blade servers use
1400                  * IPMI for kerboard input and it's important
1401                  * to minimize link disruptions. Autoneg. involves
1402                  * exchanging base pages plus 3 next pages and
1403                  * normally completes in about 120 msec.
1404                  */
1405                 bp->current_interval = SERDES_AN_TIMEOUT;
1406                 bp->serdes_an_pending = 1;
1407                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1408         } else {
1409                 bnx2_resolve_flow_ctrl(bp);
1410                 bnx2_set_mac_link(bp);
1411         }
1412
1413         return 0;
1414 }
1415
1416 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1417         (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ?                       \
1418                 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1419                 (ADVERTISED_1000baseT_Full)
1420
1421 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1422         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1423         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1424         ADVERTISED_1000baseT_Full)
1425
1426 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1427         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1428
1429 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1430
1431 static void
1432 bnx2_set_default_remote_link(struct bnx2 *bp)
1433 {
1434         u32 link;
1435
1436         if (bp->phy_port == PORT_TP)
1437                 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_COPPER_LINK);
1438         else
1439                 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_SERDES_LINK);
1440
1441         if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1442                 bp->req_line_speed = 0;
1443                 bp->autoneg |= AUTONEG_SPEED;
1444                 bp->advertising = ADVERTISED_Autoneg;
1445                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1446                         bp->advertising |= ADVERTISED_10baseT_Half;
1447                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1448                         bp->advertising |= ADVERTISED_10baseT_Full;
1449                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1450                         bp->advertising |= ADVERTISED_100baseT_Half;
1451                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1452                         bp->advertising |= ADVERTISED_100baseT_Full;
1453                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1454                         bp->advertising |= ADVERTISED_1000baseT_Full;
1455                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1456                         bp->advertising |= ADVERTISED_2500baseX_Full;
1457         } else {
1458                 bp->autoneg = 0;
1459                 bp->advertising = 0;
1460                 bp->req_duplex = DUPLEX_FULL;
1461                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1462                         bp->req_line_speed = SPEED_10;
1463                         if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1464                                 bp->req_duplex = DUPLEX_HALF;
1465                 }
1466                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1467                         bp->req_line_speed = SPEED_100;
1468                         if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1469                                 bp->req_duplex = DUPLEX_HALF;
1470                 }
1471                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1472                         bp->req_line_speed = SPEED_1000;
1473                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1474                         bp->req_line_speed = SPEED_2500;
1475         }
1476 }
1477
1478 static void
1479 bnx2_set_default_link(struct bnx2 *bp)
1480 {
1481         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1482                 return bnx2_set_default_remote_link(bp);
1483
1484         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1485         bp->req_line_speed = 0;
1486         if (bp->phy_flags & PHY_SERDES_FLAG) {
1487                 u32 reg;
1488
1489                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1490
1491                 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
1492                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1493                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1494                         bp->autoneg = 0;
1495                         bp->req_line_speed = bp->line_speed = SPEED_1000;
1496                         bp->req_duplex = DUPLEX_FULL;
1497                 }
1498         } else
1499                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1500 }
1501
1502 static void
1503 bnx2_send_heart_beat(struct bnx2 *bp)
1504 {
1505         u32 msg;
1506         u32 addr;
1507
1508         spin_lock(&bp->indirect_lock);
1509         msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1510         addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1511         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1512         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1513         spin_unlock(&bp->indirect_lock);
1514 }
1515
1516 static void
1517 bnx2_remote_phy_event(struct bnx2 *bp)
1518 {
1519         u32 msg;
1520         u8 link_up = bp->link_up;
1521         u8 old_port;
1522
1523         msg = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
1524
1525         if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1526                 bnx2_send_heart_beat(bp);
1527
1528         msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1529
1530         if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1531                 bp->link_up = 0;
1532         else {
1533                 u32 speed;
1534
1535                 bp->link_up = 1;
1536                 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1537                 bp->duplex = DUPLEX_FULL;
1538                 switch (speed) {
1539                         case BNX2_LINK_STATUS_10HALF:
1540                                 bp->duplex = DUPLEX_HALF;
1541                         case BNX2_LINK_STATUS_10FULL:
1542                                 bp->line_speed = SPEED_10;
1543                                 break;
1544                         case BNX2_LINK_STATUS_100HALF:
1545                                 bp->duplex = DUPLEX_HALF;
1546                         case BNX2_LINK_STATUS_100BASE_T4:
1547                         case BNX2_LINK_STATUS_100FULL:
1548                                 bp->line_speed = SPEED_100;
1549                                 break;
1550                         case BNX2_LINK_STATUS_1000HALF:
1551                                 bp->duplex = DUPLEX_HALF;
1552                         case BNX2_LINK_STATUS_1000FULL:
1553                                 bp->line_speed = SPEED_1000;
1554                                 break;
1555                         case BNX2_LINK_STATUS_2500HALF:
1556                                 bp->duplex = DUPLEX_HALF;
1557                         case BNX2_LINK_STATUS_2500FULL:
1558                                 bp->line_speed = SPEED_2500;
1559                                 break;
1560                         default:
1561                                 bp->line_speed = 0;
1562                                 break;
1563                 }
1564
1565                 spin_lock(&bp->phy_lock);
1566                 bp->flow_ctrl = 0;
1567                 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1568                     (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1569                         if (bp->duplex == DUPLEX_FULL)
1570                                 bp->flow_ctrl = bp->req_flow_ctrl;
1571                 } else {
1572                         if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1573                                 bp->flow_ctrl |= FLOW_CTRL_TX;
1574                         if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1575                                 bp->flow_ctrl |= FLOW_CTRL_RX;
1576                 }
1577
1578                 old_port = bp->phy_port;
1579                 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1580                         bp->phy_port = PORT_FIBRE;
1581                 else
1582                         bp->phy_port = PORT_TP;
1583
1584                 if (old_port != bp->phy_port)
1585                         bnx2_set_default_link(bp);
1586
1587                 spin_unlock(&bp->phy_lock);
1588         }
1589         if (bp->link_up != link_up)
1590                 bnx2_report_link(bp);
1591
1592         bnx2_set_mac_link(bp);
1593 }
1594
1595 static int
1596 bnx2_set_remote_link(struct bnx2 *bp)
1597 {
1598         u32 evt_code;
1599
1600         evt_code = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_EVT_CODE_MB);
1601         switch (evt_code) {
1602                 case BNX2_FW_EVT_CODE_LINK_EVENT:
1603                         bnx2_remote_phy_event(bp);
1604                         break;
1605                 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1606                 default:
1607                         bnx2_send_heart_beat(bp);
1608                         break;
1609         }
1610         return 0;
1611 }
1612
1613 static int
1614 bnx2_setup_copper_phy(struct bnx2 *bp)
1615 {
1616         u32 bmcr;
1617         u32 new_bmcr;
1618
1619         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1620
1621         if (bp->autoneg & AUTONEG_SPEED) {
1622                 u32 adv_reg, adv1000_reg;
1623                 u32 new_adv_reg = 0;
1624                 u32 new_adv1000_reg = 0;
1625
1626                 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1627                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1628                         ADVERTISE_PAUSE_ASYM);
1629
1630                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1631                 adv1000_reg &= PHY_ALL_1000_SPEED;
1632
1633                 if (bp->advertising & ADVERTISED_10baseT_Half)
1634                         new_adv_reg |= ADVERTISE_10HALF;
1635                 if (bp->advertising & ADVERTISED_10baseT_Full)
1636                         new_adv_reg |= ADVERTISE_10FULL;
1637                 if (bp->advertising & ADVERTISED_100baseT_Half)
1638                         new_adv_reg |= ADVERTISE_100HALF;
1639                 if (bp->advertising & ADVERTISED_100baseT_Full)
1640                         new_adv_reg |= ADVERTISE_100FULL;
1641                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1642                         new_adv1000_reg |= ADVERTISE_1000FULL;
1643
1644                 new_adv_reg |= ADVERTISE_CSMA;
1645
1646                 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1647
1648                 if ((adv1000_reg != new_adv1000_reg) ||
1649                         (adv_reg != new_adv_reg) ||
1650                         ((bmcr & BMCR_ANENABLE) == 0)) {
1651
1652                         bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1653                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1654                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1655                                 BMCR_ANENABLE);
1656                 }
1657                 else if (bp->link_up) {
1658                         /* Flow ctrl may have changed from auto to forced */
1659                         /* or vice-versa. */
1660
1661                         bnx2_resolve_flow_ctrl(bp);
1662                         bnx2_set_mac_link(bp);
1663                 }
1664                 return 0;
1665         }
1666
1667         new_bmcr = 0;
1668         if (bp->req_line_speed == SPEED_100) {
1669                 new_bmcr |= BMCR_SPEED100;
1670         }
1671         if (bp->req_duplex == DUPLEX_FULL) {
1672                 new_bmcr |= BMCR_FULLDPLX;
1673         }
1674         if (new_bmcr != bmcr) {
1675                 u32 bmsr;
1676
1677                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1678                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1679
1680                 if (bmsr & BMSR_LSTATUS) {
1681                         /* Force link down */
1682                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1683                         spin_unlock_bh(&bp->phy_lock);
1684                         msleep(50);
1685                         spin_lock_bh(&bp->phy_lock);
1686
1687                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1688                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1689                 }
1690
1691                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1692
1693                 /* Normally, the new speed is setup after the link has
1694                  * gone down and up again. In some cases, link will not go
1695                  * down so we need to set up the new speed here.
1696                  */
1697                 if (bmsr & BMSR_LSTATUS) {
1698                         bp->line_speed = bp->req_line_speed;
1699                         bp->duplex = bp->req_duplex;
1700                         bnx2_resolve_flow_ctrl(bp);
1701                         bnx2_set_mac_link(bp);
1702                 }
1703         } else {
1704                 bnx2_resolve_flow_ctrl(bp);
1705                 bnx2_set_mac_link(bp);
1706         }
1707         return 0;
1708 }
1709
1710 static int
1711 bnx2_setup_phy(struct bnx2 *bp, u8 port)
1712 {
1713         if (bp->loopback == MAC_LOOPBACK)
1714                 return 0;
1715
1716         if (bp->phy_flags & PHY_SERDES_FLAG) {
1717                 return (bnx2_setup_serdes_phy(bp, port));
1718         }
1719         else {
1720                 return (bnx2_setup_copper_phy(bp));
1721         }
1722 }
1723
1724 static int
1725 bnx2_init_5709s_phy(struct bnx2 *bp)
1726 {
1727         u32 val;
1728
1729         bp->mii_bmcr = MII_BMCR + 0x10;
1730         bp->mii_bmsr = MII_BMSR + 0x10;
1731         bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1732         bp->mii_adv = MII_ADVERTISE + 0x10;
1733         bp->mii_lpa = MII_LPA + 0x10;
1734         bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1735
1736         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1737         bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1738
1739         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1740         bnx2_reset_phy(bp);
1741
1742         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1743
1744         bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1745         val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1746         val |= MII_BNX2_SD_1000XCTL1_FIBER;
1747         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1748
1749         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1750         bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1751         if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
1752                 val |= BCM5708S_UP1_2G5;
1753         else
1754                 val &= ~BCM5708S_UP1_2G5;
1755         bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1756
1757         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1758         bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1759         val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1760         bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1761
1762         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1763
1764         val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1765               MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1766         bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1767
1768         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1769
1770         return 0;
1771 }
1772
1773 static int
1774 bnx2_init_5708s_phy(struct bnx2 *bp)
1775 {
1776         u32 val;
1777
1778         bnx2_reset_phy(bp);
1779
1780         bp->mii_up1 = BCM5708S_UP1;
1781
1782         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1783         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1784         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1785
1786         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1787         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1788         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1789
1790         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1791         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1792         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1793
1794         if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1795                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1796                 val |= BCM5708S_UP1_2G5;
1797                 bnx2_write_phy(bp, BCM5708S_UP1, val);
1798         }
1799
1800         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1801             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1802             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1803                 /* increase tx signal amplitude */
1804                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1805                                BCM5708S_BLK_ADDR_TX_MISC);
1806                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1807                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1808                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1809                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1810         }
1811
1812         val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1813               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1814
1815         if (val) {
1816                 u32 is_backplane;
1817
1818                 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1819                                           BNX2_SHARED_HW_CFG_CONFIG);
1820                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1821                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1822                                        BCM5708S_BLK_ADDR_TX_MISC);
1823                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1824                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1825                                        BCM5708S_BLK_ADDR_DIG);
1826                 }
1827         }
1828         return 0;
1829 }
1830
1831 static int
1832 bnx2_init_5706s_phy(struct bnx2 *bp)
1833 {
1834         bnx2_reset_phy(bp);
1835
1836         bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1837
1838         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1839                 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
1840
1841         if (bp->dev->mtu > 1500) {
1842                 u32 val;
1843
1844                 /* Set extended packet length bit */
1845                 bnx2_write_phy(bp, 0x18, 0x7);
1846                 bnx2_read_phy(bp, 0x18, &val);
1847                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1848
1849                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1850                 bnx2_read_phy(bp, 0x1c, &val);
1851                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1852         }
1853         else {
1854                 u32 val;
1855
1856                 bnx2_write_phy(bp, 0x18, 0x7);
1857                 bnx2_read_phy(bp, 0x18, &val);
1858                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1859
1860                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1861                 bnx2_read_phy(bp, 0x1c, &val);
1862                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1863         }
1864
1865         return 0;
1866 }
1867
1868 static int
1869 bnx2_init_copper_phy(struct bnx2 *bp)
1870 {
1871         u32 val;
1872
1873         bnx2_reset_phy(bp);
1874
1875         if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1876                 bnx2_write_phy(bp, 0x18, 0x0c00);
1877                 bnx2_write_phy(bp, 0x17, 0x000a);
1878                 bnx2_write_phy(bp, 0x15, 0x310b);
1879                 bnx2_write_phy(bp, 0x17, 0x201f);
1880                 bnx2_write_phy(bp, 0x15, 0x9506);
1881                 bnx2_write_phy(bp, 0x17, 0x401f);
1882                 bnx2_write_phy(bp, 0x15, 0x14e2);
1883                 bnx2_write_phy(bp, 0x18, 0x0400);
1884         }
1885
1886         if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1887                 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1888                                MII_BNX2_DSP_EXPAND_REG | 0x8);
1889                 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1890                 val &= ~(1 << 8);
1891                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1892         }
1893
1894         if (bp->dev->mtu > 1500) {
1895                 /* Set extended packet length bit */
1896                 bnx2_write_phy(bp, 0x18, 0x7);
1897                 bnx2_read_phy(bp, 0x18, &val);
1898                 bnx2_write_phy(bp, 0x18, val | 0x4000);
1899
1900                 bnx2_read_phy(bp, 0x10, &val);
1901                 bnx2_write_phy(bp, 0x10, val | 0x1);
1902         }
1903         else {
1904                 bnx2_write_phy(bp, 0x18, 0x7);
1905                 bnx2_read_phy(bp, 0x18, &val);
1906                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1907
1908                 bnx2_read_phy(bp, 0x10, &val);
1909                 bnx2_write_phy(bp, 0x10, val & ~0x1);
1910         }
1911
1912         /* ethernet@wirespeed */
1913         bnx2_write_phy(bp, 0x18, 0x7007);
1914         bnx2_read_phy(bp, 0x18, &val);
1915         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1916         return 0;
1917 }
1918
1919
1920 static int
1921 bnx2_init_phy(struct bnx2 *bp)
1922 {
1923         u32 val;
1924         int rc = 0;
1925
1926         bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1927         bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1928
1929         bp->mii_bmcr = MII_BMCR;
1930         bp->mii_bmsr = MII_BMSR;
1931         bp->mii_bmsr1 = MII_BMSR;
1932         bp->mii_adv = MII_ADVERTISE;
1933         bp->mii_lpa = MII_LPA;
1934
1935         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1936
1937         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1938                 goto setup_phy;
1939
1940         bnx2_read_phy(bp, MII_PHYSID1, &val);
1941         bp->phy_id = val << 16;
1942         bnx2_read_phy(bp, MII_PHYSID2, &val);
1943         bp->phy_id |= val & 0xffff;
1944
1945         if (bp->phy_flags & PHY_SERDES_FLAG) {
1946                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1947                         rc = bnx2_init_5706s_phy(bp);
1948                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1949                         rc = bnx2_init_5708s_phy(bp);
1950                 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1951                         rc = bnx2_init_5709s_phy(bp);
1952         }
1953         else {
1954                 rc = bnx2_init_copper_phy(bp);
1955         }
1956
1957 setup_phy:
1958         if (!rc)
1959                 rc = bnx2_setup_phy(bp, bp->phy_port);
1960
1961         return rc;
1962 }
1963
1964 static int
1965 bnx2_set_mac_loopback(struct bnx2 *bp)
1966 {
1967         u32 mac_mode;
1968
1969         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1970         mac_mode &= ~BNX2_EMAC_MODE_PORT;
1971         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1972         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1973         bp->link_up = 1;
1974         return 0;
1975 }
1976
1977 static int bnx2_test_link(struct bnx2 *);
1978
1979 static int
1980 bnx2_set_phy_loopback(struct bnx2 *bp)
1981 {
1982         u32 mac_mode;
1983         int rc, i;
1984
1985         spin_lock_bh(&bp->phy_lock);
1986         rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
1987                             BMCR_SPEED1000);
1988         spin_unlock_bh(&bp->phy_lock);
1989         if (rc)
1990                 return rc;
1991
1992         for (i = 0; i < 10; i++) {
1993                 if (bnx2_test_link(bp) == 0)
1994                         break;
1995                 msleep(100);
1996         }
1997
1998         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1999         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2000                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2001                       BNX2_EMAC_MODE_25G_MODE);
2002
2003         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2004         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2005         bp->link_up = 1;
2006         return 0;
2007 }
2008
2009 static int
2010 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
2011 {
2012         int i;
2013         u32 val;
2014
2015         bp->fw_wr_seq++;
2016         msg_data |= bp->fw_wr_seq;
2017
2018         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
2019
2020         /* wait for an acknowledgement. */
2021         for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
2022                 msleep(10);
2023
2024                 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
2025
2026                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2027                         break;
2028         }
2029         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2030                 return 0;
2031
2032         /* If we timed out, inform the firmware that this is the case. */
2033         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2034                 if (!silent)
2035                         printk(KERN_ERR PFX "fw sync timeout, reset code = "
2036                                             "%x\n", msg_data);
2037
2038                 msg_data &= ~BNX2_DRV_MSG_CODE;
2039                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2040
2041                 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
2042
2043                 return -EBUSY;
2044         }
2045
2046         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2047                 return -EIO;
2048
2049         return 0;
2050 }
2051
2052 static int
2053 bnx2_init_5709_context(struct bnx2 *bp)
2054 {
2055         int i, ret = 0;
2056         u32 val;
2057
2058         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2059         val |= (BCM_PAGE_BITS - 8) << 16;
2060         REG_WR(bp, BNX2_CTX_COMMAND, val);
2061         for (i = 0; i < 10; i++) {
2062                 val = REG_RD(bp, BNX2_CTX_COMMAND);
2063                 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2064                         break;
2065                 udelay(2);
2066         }
2067         if (val & BNX2_CTX_COMMAND_MEM_INIT)
2068                 return -EBUSY;
2069
2070         for (i = 0; i < bp->ctx_pages; i++) {
2071                 int j;
2072
2073                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2074                        (bp->ctx_blk_mapping[i] & 0xffffffff) |
2075                        BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2076                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2077                        (u64) bp->ctx_blk_mapping[i] >> 32);
2078                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2079                        BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2080                 for (j = 0; j < 10; j++) {
2081
2082                         val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2083                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2084                                 break;
2085                         udelay(5);
2086                 }
2087                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2088                         ret = -EBUSY;
2089                         break;
2090                 }
2091         }
2092         return ret;
2093 }
2094
2095 static void
2096 bnx2_init_context(struct bnx2 *bp)
2097 {
2098         u32 vcid;
2099
2100         vcid = 96;
2101         while (vcid) {
2102                 u32 vcid_addr, pcid_addr, offset;
2103                 int i;
2104
2105                 vcid--;
2106
2107                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2108                         u32 new_vcid;
2109
2110                         vcid_addr = GET_PCID_ADDR(vcid);
2111                         if (vcid & 0x8) {
2112                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2113                         }
2114                         else {
2115                                 new_vcid = vcid;
2116                         }
2117                         pcid_addr = GET_PCID_ADDR(new_vcid);
2118                 }
2119                 else {
2120                         vcid_addr = GET_CID_ADDR(vcid);
2121                         pcid_addr = vcid_addr;
2122                 }
2123
2124                 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2125                         vcid_addr += (i << PHY_CTX_SHIFT);
2126                         pcid_addr += (i << PHY_CTX_SHIFT);
2127
2128                         REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
2129                         REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2130
2131                         /* Zero out the context. */
2132                         for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2133                                 CTX_WR(bp, 0x00, offset, 0);
2134
2135                         REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2136                         REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2137                 }
2138         }
2139 }
2140
2141 static int
2142 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2143 {
2144         u16 *good_mbuf;
2145         u32 good_mbuf_cnt;
2146         u32 val;
2147
2148         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2149         if (good_mbuf == NULL) {
2150                 printk(KERN_ERR PFX "Failed to allocate memory in "
2151                                     "bnx2_alloc_bad_rbuf\n");
2152                 return -ENOMEM;
2153         }
2154
2155         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2156                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2157
2158         good_mbuf_cnt = 0;
2159
2160         /* Allocate a bunch of mbufs and save the good ones in an array. */
2161         val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2162         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2163                 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
2164
2165                 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
2166
2167                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2168
2169                 /* The addresses with Bit 9 set are bad memory blocks. */
2170                 if (!(val & (1 << 9))) {
2171                         good_mbuf[good_mbuf_cnt] = (u16) val;
2172                         good_mbuf_cnt++;
2173                 }
2174
2175                 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2176         }
2177
2178         /* Free the good ones back to the mbuf pool thus discarding
2179          * all the bad ones. */
2180         while (good_mbuf_cnt) {
2181                 good_mbuf_cnt--;
2182
2183                 val = good_mbuf[good_mbuf_cnt];
2184                 val = (val << 9) | val | 1;
2185
2186                 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
2187         }
2188         kfree(good_mbuf);
2189         return 0;
2190 }
2191
2192 static void
2193 bnx2_set_mac_addr(struct bnx2 *bp)
2194 {
2195         u32 val;
2196         u8 *mac_addr = bp->dev->dev_addr;
2197
2198         val = (mac_addr[0] << 8) | mac_addr[1];
2199
2200         REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
2201
2202         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2203                 (mac_addr[4] << 8) | mac_addr[5];
2204
2205         REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
2206 }
2207
2208 static inline int
2209 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
2210 {
2211         struct sk_buff *skb;
2212         struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
2213         dma_addr_t mapping;
2214         struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2215         unsigned long align;
2216
2217         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2218         if (skb == NULL) {
2219                 return -ENOMEM;
2220         }
2221
2222         if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2223                 skb_reserve(skb, BNX2_RX_ALIGN - align);
2224
2225         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2226                 PCI_DMA_FROMDEVICE);
2227
2228         rx_buf->skb = skb;
2229         pci_unmap_addr_set(rx_buf, mapping, mapping);
2230
2231         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2232         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2233
2234         bp->rx_prod_bseq += bp->rx_buf_use_size;
2235
2236         return 0;
2237 }
2238
2239 static int
2240 bnx2_phy_event_is_set(struct bnx2 *bp, u32 event)
2241 {
2242         struct status_block *sblk = bp->status_blk;
2243         u32 new_link_state, old_link_state;
2244         int is_set = 1;
2245
2246         new_link_state = sblk->status_attn_bits & event;
2247         old_link_state = sblk->status_attn_bits_ack & event;
2248         if (new_link_state != old_link_state) {
2249                 if (new_link_state)
2250                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2251                 else
2252                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2253         } else
2254                 is_set = 0;
2255
2256         return is_set;
2257 }
2258
2259 static void
2260 bnx2_phy_int(struct bnx2 *bp)
2261 {
2262         if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_LINK_STATE)) {
2263                 spin_lock(&bp->phy_lock);
2264                 bnx2_set_link(bp);
2265                 spin_unlock(&bp->phy_lock);
2266         }
2267         if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_TIMER_ABORT))
2268                 bnx2_set_remote_link(bp);
2269
2270 }
2271
2272 static void
2273 bnx2_tx_int(struct bnx2 *bp)
2274 {
2275         struct status_block *sblk = bp->status_blk;
2276         u16 hw_cons, sw_cons, sw_ring_cons;
2277         int tx_free_bd = 0;
2278
2279         hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
2280         if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2281                 hw_cons++;
2282         }
2283         sw_cons = bp->tx_cons;
2284
2285         while (sw_cons != hw_cons) {
2286                 struct sw_bd *tx_buf;
2287                 struct sk_buff *skb;
2288                 int i, last;
2289
2290                 sw_ring_cons = TX_RING_IDX(sw_cons);
2291
2292                 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
2293                 skb = tx_buf->skb;
2294
2295                 /* partial BD completions possible with TSO packets */
2296                 if (skb_is_gso(skb)) {
2297                         u16 last_idx, last_ring_idx;
2298
2299                         last_idx = sw_cons +
2300                                 skb_shinfo(skb)->nr_frags + 1;
2301                         last_ring_idx = sw_ring_cons +
2302                                 skb_shinfo(skb)->nr_frags + 1;
2303                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2304                                 last_idx++;
2305                         }
2306                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2307                                 break;
2308                         }
2309                 }
2310
2311                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2312                         skb_headlen(skb), PCI_DMA_TODEVICE);
2313
2314                 tx_buf->skb = NULL;
2315                 last = skb_shinfo(skb)->nr_frags;
2316
2317                 for (i = 0; i < last; i++) {
2318                         sw_cons = NEXT_TX_BD(sw_cons);
2319
2320                         pci_unmap_page(bp->pdev,
2321                                 pci_unmap_addr(
2322                                         &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2323                                         mapping),
2324                                 skb_shinfo(skb)->frags[i].size,
2325                                 PCI_DMA_TODEVICE);
2326                 }
2327
2328                 sw_cons = NEXT_TX_BD(sw_cons);
2329
2330                 tx_free_bd += last + 1;
2331
2332                 dev_kfree_skb(skb);
2333
2334                 hw_cons = bp->hw_tx_cons =
2335                         sblk->status_tx_quick_consumer_index0;
2336
2337                 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2338                         hw_cons++;
2339                 }
2340         }
2341
2342         bp->tx_cons = sw_cons;
2343         /* Need to make the tx_cons update visible to bnx2_start_xmit()
2344          * before checking for netif_queue_stopped().  Without the
2345          * memory barrier, there is a small possibility that bnx2_start_xmit()
2346          * will miss it and cause the queue to be stopped forever.
2347          */
2348         smp_mb();
2349
2350         if (unlikely(netif_queue_stopped(bp->dev)) &&
2351                      (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
2352                 netif_tx_lock(bp->dev);
2353                 if ((netif_queue_stopped(bp->dev)) &&
2354                     (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
2355                         netif_wake_queue(bp->dev);
2356                 netif_tx_unlock(bp->dev);
2357         }
2358 }
2359
2360 static inline void
2361 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
2362         u16 cons, u16 prod)
2363 {
2364         struct sw_bd *cons_rx_buf, *prod_rx_buf;
2365         struct rx_bd *cons_bd, *prod_bd;
2366
2367         cons_rx_buf = &bp->rx_buf_ring[cons];
2368         prod_rx_buf = &bp->rx_buf_ring[prod];
2369
2370         pci_dma_sync_single_for_device(bp->pdev,
2371                 pci_unmap_addr(cons_rx_buf, mapping),
2372                 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2373
2374         bp->rx_prod_bseq += bp->rx_buf_use_size;
2375
2376         prod_rx_buf->skb = skb;
2377
2378         if (cons == prod)
2379                 return;
2380
2381         pci_unmap_addr_set(prod_rx_buf, mapping,
2382                         pci_unmap_addr(cons_rx_buf, mapping));
2383
2384         cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2385         prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2386         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2387         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2388 }
2389
2390 static int
2391 bnx2_rx_int(struct bnx2 *bp, int budget)
2392 {
2393         struct status_block *sblk = bp->status_blk;
2394         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2395         struct l2_fhdr *rx_hdr;
2396         int rx_pkt = 0;
2397
2398         hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
2399         if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
2400                 hw_cons++;
2401         }
2402         sw_cons = bp->rx_cons;
2403         sw_prod = bp->rx_prod;
2404
2405         /* Memory barrier necessary as speculative reads of the rx
2406          * buffer can be ahead of the index in the status block
2407          */
2408         rmb();
2409         while (sw_cons != hw_cons) {
2410                 unsigned int len;
2411                 u32 status;
2412                 struct sw_bd *rx_buf;
2413                 struct sk_buff *skb;
2414                 dma_addr_t dma_addr;
2415
2416                 sw_ring_cons = RX_RING_IDX(sw_cons);
2417                 sw_ring_prod = RX_RING_IDX(sw_prod);
2418
2419                 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2420                 skb = rx_buf->skb;
2421
2422                 rx_buf->skb = NULL;
2423
2424                 dma_addr = pci_unmap_addr(rx_buf, mapping);
2425
2426                 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2427                         bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2428
2429                 rx_hdr = (struct l2_fhdr *) skb->data;
2430                 len = rx_hdr->l2_fhdr_pkt_len - 4;
2431
2432                 if ((status = rx_hdr->l2_fhdr_status) &
2433                         (L2_FHDR_ERRORS_BAD_CRC |
2434                         L2_FHDR_ERRORS_PHY_DECODE |
2435                         L2_FHDR_ERRORS_ALIGNMENT |
2436                         L2_FHDR_ERRORS_TOO_SHORT |
2437                         L2_FHDR_ERRORS_GIANT_FRAME)) {
2438
2439                         goto reuse_rx;
2440                 }
2441
2442                 /* Since we don't have a jumbo ring, copy small packets
2443                  * if mtu > 1500
2444                  */
2445                 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
2446                         struct sk_buff *new_skb;
2447
2448                         new_skb = netdev_alloc_skb(bp->dev, len + 2);
2449                         if (new_skb == NULL)
2450                                 goto reuse_rx;
2451
2452                         /* aligned copy */
2453                         skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2454                                       new_skb->data, len + 2);
2455                         skb_reserve(new_skb, 2);
2456                         skb_put(new_skb, len);
2457
2458                         bnx2_reuse_rx_skb(bp, skb,
2459                                 sw_ring_cons, sw_ring_prod);
2460
2461                         skb = new_skb;
2462                 }
2463                 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
2464                         pci_unmap_single(bp->pdev, dma_addr,
2465                                 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
2466
2467                         skb_reserve(skb, bp->rx_offset);
2468                         skb_put(skb, len);
2469                 }
2470                 else {
2471 reuse_rx:
2472                         bnx2_reuse_rx_skb(bp, skb,
2473                                 sw_ring_cons, sw_ring_prod);
2474                         goto next_rx;
2475                 }
2476
2477                 skb->protocol = eth_type_trans(skb, bp->dev);
2478
2479                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
2480                         (ntohs(skb->protocol) != 0x8100)) {
2481
2482                         dev_kfree_skb(skb);
2483                         goto next_rx;
2484
2485                 }
2486
2487                 skb->ip_summed = CHECKSUM_NONE;
2488                 if (bp->rx_csum &&
2489                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2490                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
2491
2492                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2493                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
2494                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
2495                 }
2496
2497 #ifdef BCM_VLAN
2498                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
2499                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2500                                 rx_hdr->l2_fhdr_vlan_tag);
2501                 }
2502                 else
2503 #endif
2504                         netif_receive_skb(skb);
2505
2506                 bp->dev->last_rx = jiffies;
2507                 rx_pkt++;
2508
2509 next_rx:
2510                 sw_cons = NEXT_RX_BD(sw_cons);
2511                 sw_prod = NEXT_RX_BD(sw_prod);
2512
2513                 if ((rx_pkt == budget))
2514                         break;
2515
2516                 /* Refresh hw_cons to see if there is new work */
2517                 if (sw_cons == hw_cons) {
2518                         hw_cons = bp->hw_rx_cons =
2519                                 sblk->status_rx_quick_consumer_index0;
2520                         if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
2521                                 hw_cons++;
2522                         rmb();
2523                 }
2524         }
2525         bp->rx_cons = sw_cons;
2526         bp->rx_prod = sw_prod;
2527
2528         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2529
2530         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
2531
2532         mmiowb();
2533
2534         return rx_pkt;
2535
2536 }
2537
2538 /* MSI ISR - The only difference between this and the INTx ISR
2539  * is that the MSI interrupt is always serviced.
2540  */
2541 static irqreturn_t
2542 bnx2_msi(int irq, void *dev_instance)
2543 {
2544         struct net_device *dev = dev_instance;
2545         struct bnx2 *bp = netdev_priv(dev);
2546
2547         prefetch(bp->status_blk);
2548         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2549                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2550                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2551
2552         /* Return here if interrupt is disabled. */
2553         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2554                 return IRQ_HANDLED;
2555
2556         netif_rx_schedule(dev, &bp->napi);
2557
2558         return IRQ_HANDLED;
2559 }
2560
2561 static irqreturn_t
2562 bnx2_msi_1shot(int irq, void *dev_instance)
2563 {
2564         struct net_device *dev = dev_instance;
2565         struct bnx2 *bp = netdev_priv(dev);
2566
2567         prefetch(bp->status_blk);
2568
2569         /* Return here if interrupt is disabled. */
2570         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2571                 return IRQ_HANDLED;
2572
2573         netif_rx_schedule(dev, &bp->napi);
2574
2575         return IRQ_HANDLED;
2576 }
2577
2578 static irqreturn_t
2579 bnx2_interrupt(int irq, void *dev_instance)
2580 {
2581         struct net_device *dev = dev_instance;
2582         struct bnx2 *bp = netdev_priv(dev);
2583         struct status_block *sblk = bp->status_blk;
2584
2585         /* When using INTx, it is possible for the interrupt to arrive
2586          * at the CPU before the status block posted prior to the
2587          * interrupt. Reading a register will flush the status block.
2588          * When using MSI, the MSI message will always complete after
2589          * the status block write.
2590          */
2591         if ((sblk->status_idx == bp->last_status_idx) &&
2592             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2593              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
2594                 return IRQ_NONE;
2595
2596         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2597                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2598                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2599
2600         /* Read back to deassert IRQ immediately to avoid too many
2601          * spurious interrupts.
2602          */
2603         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
2604
2605         /* Return here if interrupt is shared and is disabled. */
2606         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2607                 return IRQ_HANDLED;
2608
2609         if (netif_rx_schedule_prep(dev, &bp->napi)) {
2610                 bp->last_status_idx = sblk->status_idx;
2611                 __netif_rx_schedule(dev, &bp->napi);
2612         }
2613
2614         return IRQ_HANDLED;
2615 }
2616
2617 #define STATUS_ATTN_EVENTS      (STATUS_ATTN_BITS_LINK_STATE | \
2618                                  STATUS_ATTN_BITS_TIMER_ABORT)
2619
2620 static inline int
2621 bnx2_has_work(struct bnx2 *bp)
2622 {
2623         struct status_block *sblk = bp->status_blk;
2624
2625         if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
2626             (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2627                 return 1;
2628
2629         if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
2630             (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
2631                 return 1;
2632
2633         return 0;
2634 }
2635
2636 static int
2637 bnx2_poll(struct napi_struct *napi, int budget)
2638 {
2639         struct bnx2 *bp = container_of(napi, struct bnx2, napi);
2640         struct net_device *dev = bp->dev;
2641         struct status_block *sblk = bp->status_blk;
2642         u32 status_attn_bits = sblk->status_attn_bits;
2643         u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
2644         int work_done = 0;
2645
2646         if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
2647             (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
2648
2649                 bnx2_phy_int(bp);
2650
2651                 /* This is needed to take care of transient status
2652                  * during link changes.
2653                  */
2654                 REG_WR(bp, BNX2_HC_COMMAND,
2655                        bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2656                 REG_RD(bp, BNX2_HC_COMMAND);
2657         }
2658
2659         if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
2660                 bnx2_tx_int(bp);
2661
2662         if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons)
2663                 work_done = bnx2_rx_int(bp, budget);
2664
2665         bp->last_status_idx = bp->status_blk->status_idx;
2666         rmb();
2667
2668         if (!bnx2_has_work(bp)) {
2669                 netif_rx_complete(dev, napi);
2670                 if (likely(bp->flags & USING_MSI_FLAG)) {
2671                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2672                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2673                                bp->last_status_idx);
2674                         return 0;
2675                 }
2676                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2677                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2678                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2679                        bp->last_status_idx);
2680
2681                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2682                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2683                        bp->last_status_idx);
2684         }
2685
2686         return work_done;
2687 }
2688
2689 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
2690  * from set_multicast.
2691  */
2692 static void
2693 bnx2_set_rx_mode(struct net_device *dev)
2694 {
2695         struct bnx2 *bp = netdev_priv(dev);
2696         u32 rx_mode, sort_mode;
2697         int i;
2698
2699         spin_lock_bh(&bp->phy_lock);
2700
2701         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2702                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2703         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2704 #ifdef BCM_VLAN
2705         if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
2706                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2707 #else
2708         if (!(bp->flags & ASF_ENABLE_FLAG))
2709                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2710 #endif
2711         if (dev->flags & IFF_PROMISC) {
2712                 /* Promiscuous mode. */
2713                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
2714                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2715                              BNX2_RPM_SORT_USER0_PROM_VLAN;
2716         }
2717         else if (dev->flags & IFF_ALLMULTI) {
2718                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2719                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2720                                0xffffffff);
2721                 }
2722                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2723         }
2724         else {
2725                 /* Accept one or more multicast(s). */
2726                 struct dev_mc_list *mclist;
2727                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2728                 u32 regidx;
2729                 u32 bit;
2730                 u32 crc;
2731
2732                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2733
2734                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2735                      i++, mclist = mclist->next) {
2736
2737                         crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2738                         bit = crc & 0xff;
2739                         regidx = (bit & 0xe0) >> 5;
2740                         bit &= 0x1f;
2741                         mc_filter[regidx] |= (1 << bit);
2742                 }
2743
2744                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2745                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2746                                mc_filter[i]);
2747                 }
2748
2749                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2750         }
2751
2752         if (rx_mode != bp->rx_mode) {
2753                 bp->rx_mode = rx_mode;
2754                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2755         }
2756
2757         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2758         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2759         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2760
2761         spin_unlock_bh(&bp->phy_lock);
2762 }
2763
2764 static void
2765 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2766         u32 rv2p_proc)
2767 {
2768         int i;
2769         u32 val;
2770
2771
2772         for (i = 0; i < rv2p_code_len; i += 8) {
2773                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
2774                 rv2p_code++;
2775                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
2776                 rv2p_code++;
2777
2778                 if (rv2p_proc == RV2P_PROC1) {
2779                         val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2780                         REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2781                 }
2782                 else {
2783                         val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2784                         REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2785                 }
2786         }
2787
2788         /* Reset the processor, un-stall is done later. */
2789         if (rv2p_proc == RV2P_PROC1) {
2790                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2791         }
2792         else {
2793                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2794         }
2795 }
2796
2797 static int
2798 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2799 {
2800         u32 offset;
2801         u32 val;
2802         int rc;
2803
2804         /* Halt the CPU. */
2805         val = REG_RD_IND(bp, cpu_reg->mode);
2806         val |= cpu_reg->mode_value_halt;
2807         REG_WR_IND(bp, cpu_reg->mode, val);
2808         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2809
2810         /* Load the Text area. */
2811         offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2812         if (fw->gz_text) {
2813                 u32 *text;
2814                 int j;
2815
2816                 text = vmalloc(FW_BUF_SIZE);
2817                 if (!text)
2818                         return -ENOMEM;
2819                 rc = zlib_inflate_blob(text, FW_BUF_SIZE, fw->gz_text, fw->gz_text_len);
2820                 if (rc < 0) {
2821                         vfree(text);
2822                         return rc;
2823                 }
2824                 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2825                         REG_WR_IND(bp, offset, cpu_to_le32(text[j]));
2826                 }
2827                 vfree(text);
2828         }
2829
2830         /* Load the Data area. */
2831         offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2832         if (fw->data) {
2833                 int j;
2834
2835                 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2836                         REG_WR_IND(bp, offset, fw->data[j]);
2837                 }
2838         }
2839
2840         /* Load the SBSS area. */
2841         offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2842         if (fw->sbss) {
2843                 int j;
2844
2845                 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2846                         REG_WR_IND(bp, offset, fw->sbss[j]);
2847                 }
2848         }
2849
2850         /* Load the BSS area. */
2851         offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2852         if (fw->bss) {
2853                 int j;
2854
2855                 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2856                         REG_WR_IND(bp, offset, fw->bss[j]);
2857                 }
2858         }
2859
2860         /* Load the Read-Only area. */
2861         offset = cpu_reg->spad_base +
2862                 (fw->rodata_addr - cpu_reg->mips_view_base);
2863         if (fw->rodata) {
2864                 int j;
2865
2866                 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2867                         REG_WR_IND(bp, offset, fw->rodata[j]);
2868                 }
2869         }
2870
2871         /* Clear the pre-fetch instruction. */
2872         REG_WR_IND(bp, cpu_reg->inst, 0);
2873         REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2874
2875         /* Start the CPU. */
2876         val = REG_RD_IND(bp, cpu_reg->mode);
2877         val &= ~cpu_reg->mode_value_halt;
2878         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2879         REG_WR_IND(bp, cpu_reg->mode, val);
2880
2881         return 0;
2882 }
2883
2884 static int
2885 bnx2_init_cpus(struct bnx2 *bp)
2886 {
2887         struct cpu_reg cpu_reg;
2888         struct fw_info *fw;
2889         int rc;
2890         void *text;
2891
2892         /* Initialize the RV2P processor. */
2893         text = vmalloc(FW_BUF_SIZE);
2894         if (!text)
2895                 return -ENOMEM;
2896         rc = zlib_inflate_blob(text, FW_BUF_SIZE, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1));
2897         if (rc < 0) {
2898                 vfree(text);
2899                 goto init_cpu_err;
2900         }
2901         load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC1);
2902
2903         rc = zlib_inflate_blob(text, FW_BUF_SIZE, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2));
2904         if (rc < 0) {
2905                 vfree(text);
2906                 goto init_cpu_err;
2907         }
2908         load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC2);
2909         vfree(text);
2910
2911         /* Initialize the RX Processor. */
2912         cpu_reg.mode = BNX2_RXP_CPU_MODE;
2913         cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2914         cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2915         cpu_reg.state = BNX2_RXP_CPU_STATE;
2916         cpu_reg.state_value_clear = 0xffffff;
2917         cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2918         cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2919         cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2920         cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2921         cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2922         cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2923         cpu_reg.mips_view_base = 0x8000000;
2924
2925         if (CHIP_NUM(bp) == CHIP_NUM_5709)
2926                 fw = &bnx2_rxp_fw_09;
2927         else
2928                 fw = &bnx2_rxp_fw_06;
2929
2930         rc = load_cpu_fw(bp, &cpu_reg, fw);
2931         if (rc)
2932                 goto init_cpu_err;
2933
2934         /* Initialize the TX Processor. */
2935         cpu_reg.mode = BNX2_TXP_CPU_MODE;
2936         cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2937         cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2938         cpu_reg.state = BNX2_TXP_CPU_STATE;
2939         cpu_reg.state_value_clear = 0xffffff;
2940         cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2941         cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2942         cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2943         cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2944         cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2945         cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2946         cpu_reg.mips_view_base = 0x8000000;
2947
2948         if (CHIP_NUM(bp) == CHIP_NUM_5709)
2949                 fw = &bnx2_txp_fw_09;
2950         else
2951                 fw = &bnx2_txp_fw_06;
2952
2953         rc = load_cpu_fw(bp, &cpu_reg, fw);
2954         if (rc)
2955                 goto init_cpu_err;
2956
2957         /* Initialize the TX Patch-up Processor. */
2958         cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2959         cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2960         cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2961         cpu_reg.state = BNX2_TPAT_CPU_STATE;
2962         cpu_reg.state_value_clear = 0xffffff;
2963         cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2964         cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2965         cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2966         cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2967         cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2968         cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2969         cpu_reg.mips_view_base = 0x8000000;
2970
2971         if (CHIP_NUM(bp) == CHIP_NUM_5709)
2972                 fw = &bnx2_tpat_fw_09;
2973         else
2974                 fw = &bnx2_tpat_fw_06;
2975
2976         rc = load_cpu_fw(bp, &cpu_reg, fw);
2977         if (rc)
2978                 goto init_cpu_err;
2979
2980         /* Initialize the Completion Processor. */
2981         cpu_reg.mode = BNX2_COM_CPU_MODE;
2982         cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2983         cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2984         cpu_reg.state = BNX2_COM_CPU_STATE;
2985         cpu_reg.state_value_clear = 0xffffff;
2986         cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2987         cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2988         cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2989         cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2990         cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2991         cpu_reg.spad_base = BNX2_COM_SCRATCH;
2992         cpu_reg.mips_view_base = 0x8000000;
2993
2994         if (CHIP_NUM(bp) == CHIP_NUM_5709)
2995                 fw = &bnx2_com_fw_09;
2996         else
2997                 fw = &bnx2_com_fw_06;
2998
2999         rc = load_cpu_fw(bp, &cpu_reg, fw);
3000         if (rc)
3001                 goto init_cpu_err;
3002
3003         /* Initialize the Command Processor. */
3004         cpu_reg.mode = BNX2_CP_CPU_MODE;
3005         cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
3006         cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
3007         cpu_reg.state = BNX2_CP_CPU_STATE;
3008         cpu_reg.state_value_clear = 0xffffff;
3009         cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
3010         cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
3011         cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
3012         cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
3013         cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
3014         cpu_reg.spad_base = BNX2_CP_SCRATCH;
3015         cpu_reg.mips_view_base = 0x8000000;
3016
3017         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3018                 fw = &bnx2_cp_fw_09;
3019
3020                 rc = load_cpu_fw(bp, &cpu_reg, fw);
3021                 if (rc)
3022                         goto init_cpu_err;
3023         }
3024 init_cpu_err:
3025         return rc;
3026 }
3027
3028 static int
3029 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3030 {
3031         u16 pmcsr;
3032
3033         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3034
3035         switch (state) {
3036         case PCI_D0: {
3037                 u32 val;
3038
3039                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3040                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3041                         PCI_PM_CTRL_PME_STATUS);
3042
3043                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3044                         /* delay required during transition out of D3hot */
3045                         msleep(20);
3046
3047                 val = REG_RD(bp, BNX2_EMAC_MODE);
3048                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3049                 val &= ~BNX2_EMAC_MODE_MPKT;
3050                 REG_WR(bp, BNX2_EMAC_MODE, val);
3051
3052                 val = REG_RD(bp, BNX2_RPM_CONFIG);
3053                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3054                 REG_WR(bp, BNX2_RPM_CONFIG, val);
3055                 break;
3056         }
3057         case PCI_D3hot: {
3058                 int i;
3059                 u32 val, wol_msg;
3060
3061                 if (bp->wol) {
3062                         u32 advertising;
3063                         u8 autoneg;
3064
3065                         autoneg = bp->autoneg;
3066                         advertising = bp->advertising;
3067
3068                         bp->autoneg = AUTONEG_SPEED;
3069                         bp->advertising = ADVERTISED_10baseT_Half |
3070                                 ADVERTISED_10baseT_Full |
3071                                 ADVERTISED_100baseT_Half |
3072                                 ADVERTISED_100baseT_Full |
3073                                 ADVERTISED_Autoneg;
3074
3075                         bnx2_setup_copper_phy(bp);
3076
3077                         bp->autoneg = autoneg;
3078                         bp->advertising = advertising;
3079
3080                         bnx2_set_mac_addr(bp);
3081
3082                         val = REG_RD(bp, BNX2_EMAC_MODE);
3083
3084                         /* Enable port mode. */
3085                         val &= ~BNX2_EMAC_MODE_PORT;
3086                         val |= BNX2_EMAC_MODE_PORT_MII |
3087                                BNX2_EMAC_MODE_MPKT_RCVD |
3088                                BNX2_EMAC_MODE_ACPI_RCVD |
3089                                BNX2_EMAC_MODE_MPKT;
3090
3091                         REG_WR(bp, BNX2_EMAC_MODE, val);
3092
3093                         /* receive all multicast */
3094                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3095                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3096                                        0xffffffff);
3097                         }
3098                         REG_WR(bp, BNX2_EMAC_RX_MODE,
3099                                BNX2_EMAC_RX_MODE_SORT_MODE);
3100
3101                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3102                               BNX2_RPM_SORT_USER0_MC_EN;
3103                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3104                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3105                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3106                                BNX2_RPM_SORT_USER0_ENA);
3107
3108                         /* Need to enable EMAC and RPM for WOL. */
3109                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3110                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3111                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3112                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3113
3114                         val = REG_RD(bp, BNX2_RPM_CONFIG);
3115                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3116                         REG_WR(bp, BNX2_RPM_CONFIG, val);
3117
3118                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3119                 }
3120                 else {
3121                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3122                 }
3123
3124                 if (!(bp->flags & NO_WOL_FLAG))
3125                         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
3126
3127                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3128                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3129                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3130
3131                         if (bp->wol)
3132                                 pmcsr |= 3;
3133                 }
3134                 else {
3135                         pmcsr |= 3;
3136                 }
3137                 if (bp->wol) {
3138                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3139                 }
3140                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3141                                       pmcsr);
3142
3143                 /* No more memory access after this point until
3144                  * device is brought back to D0.
3145                  */
3146                 udelay(50);
3147                 break;
3148         }
3149         default:
3150                 return -EINVAL;
3151         }
3152         return 0;
3153 }
3154
3155 static int
3156 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3157 {
3158         u32 val;
3159         int j;
3160
3161         /* Request access to the flash interface. */
3162         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3163         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3164                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3165                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3166                         break;
3167
3168                 udelay(5);
3169         }
3170
3171         if (j >= NVRAM_TIMEOUT_COUNT)
3172                 return -EBUSY;
3173
3174         return 0;
3175 }
3176
3177 static int
3178 bnx2_release_nvram_lock(struct bnx2 *bp)
3179 {
3180         int j;
3181         u32 val;
3182
3183         /* Relinquish nvram interface. */
3184         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3185
3186         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3187                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3188                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3189                         break;
3190
3191                 udelay(5);
3192         }
3193
3194         if (j >= NVRAM_TIMEOUT_COUNT)
3195                 return -EBUSY;
3196
3197         return 0;
3198 }
3199
3200
3201 static int
3202 bnx2_enable_nvram_write(struct bnx2 *bp)
3203 {
3204         u32 val;
3205
3206         val = REG_RD(bp, BNX2_MISC_CFG);
3207         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3208
3209         if (bp->flash_info->flags & BNX2_NV_WREN) {
3210                 int j;
3211
3212                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3213                 REG_WR(bp, BNX2_NVM_COMMAND,
3214                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3215
3216                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3217                         udelay(5);
3218
3219                         val = REG_RD(bp, BNX2_NVM_COMMAND);
3220                         if (val & BNX2_NVM_COMMAND_DONE)
3221                                 break;
3222                 }
3223
3224                 if (j >= NVRAM_TIMEOUT_COUNT)
3225                         return -EBUSY;
3226         }
3227         return 0;
3228 }
3229
3230 static void
3231 bnx2_disable_nvram_write(struct bnx2 *bp)
3232 {
3233         u32 val;
3234
3235         val = REG_RD(bp, BNX2_MISC_CFG);
3236         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3237 }
3238
3239
3240 static void
3241 bnx2_enable_nvram_access(struct bnx2 *bp)
3242 {
3243         u32 val;
3244
3245         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3246         /* Enable both bits, even on read. */
3247         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3248                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3249 }
3250
3251 static void
3252 bnx2_disable_nvram_access(struct bnx2 *bp)
3253 {
3254         u32 val;
3255
3256         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3257         /* Disable both bits, even after read. */
3258         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3259                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3260                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
3261 }
3262
3263 static int
3264 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3265 {
3266         u32 cmd;
3267         int j;
3268
3269         if (bp->flash_info->flags & BNX2_NV_BUFFERED)
3270                 /* Buffered flash, no erase needed */
3271                 return 0;
3272
3273         /* Build an erase command */
3274         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3275               BNX2_NVM_COMMAND_DOIT;
3276
3277         /* Need to clear DONE bit separately. */
3278         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3279
3280         /* Address of the NVRAM to read from. */
3281         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3282
3283         /* Issue an erase command. */
3284         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3285
3286         /* Wait for completion. */
3287         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3288                 u32 val;
3289
3290                 udelay(5);
3291
3292                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3293                 if (val & BNX2_NVM_COMMAND_DONE)
3294                         break;
3295         }
3296
3297         if (j >= NVRAM_TIMEOUT_COUNT)
3298                 return -EBUSY;
3299
3300         return 0;
3301 }
3302
3303 static int
3304 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3305 {
3306         u32 cmd;
3307         int j;
3308
3309         /* Build the command word. */
3310         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3311
3312         /* Calculate an offset of a buffered flash, not needed for 5709. */
3313         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3314                 offset = ((offset / bp->flash_info->page_size) <<
3315                            bp->flash_info->page_bits) +
3316                           (offset % bp->flash_info->page_size);
3317         }
3318
3319         /* Need to clear DONE bit separately. */
3320         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3321
3322         /* Address of the NVRAM to read from. */
3323         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3324
3325         /* Issue a read command. */
3326         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3327
3328         /* Wait for completion. */
3329         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3330                 u32 val;
3331
3332                 udelay(5);
3333
3334                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3335                 if (val & BNX2_NVM_COMMAND_DONE) {
3336                         val = REG_RD(bp, BNX2_NVM_READ);
3337
3338                         val = be32_to_cpu(val);
3339                         memcpy(ret_val, &val, 4);
3340                         break;
3341                 }
3342         }
3343         if (j >= NVRAM_TIMEOUT_COUNT)
3344                 return -EBUSY;
3345
3346         return 0;
3347 }
3348
3349
3350 static int
3351 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3352 {
3353         u32 cmd, val32;
3354         int j;
3355
3356         /* Build the command word. */
3357         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3358
3359         /* Calculate an offset of a buffered flash, not needed for 5709. */
3360         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3361                 offset = ((offset / bp->flash_info->page_size) <<
3362                           bp->flash_info->page_bits) +
3363                          (offset % bp->flash_info->page_size);
3364         }
3365
3366         /* Need to clear DONE bit separately. */
3367         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3368
3369         memcpy(&val32, val, 4);
3370         val32 = cpu_to_be32(val32);
3371
3372         /* Write the data. */
3373         REG_WR(bp, BNX2_NVM_WRITE, val32);
3374
3375         /* Address of the NVRAM to write to. */
3376         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3377
3378         /* Issue the write command. */
3379         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3380
3381         /* Wait for completion. */
3382         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3383                 udelay(5);
3384
3385                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3386                         break;
3387         }
3388         if (j >= NVRAM_TIMEOUT_COUNT)
3389                 return -EBUSY;
3390
3391         return 0;
3392 }
3393
3394 static int
3395 bnx2_init_nvram(struct bnx2 *bp)
3396 {
3397         u32 val;
3398         int j, entry_count, rc = 0;
3399         struct flash_spec *flash;
3400
3401         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3402                 bp->flash_info = &flash_5709;
3403                 goto get_flash_size;
3404         }
3405
3406         /* Determine the selected interface. */
3407         val = REG_RD(bp, BNX2_NVM_CFG1);
3408
3409         entry_count = ARRAY_SIZE(flash_table);
3410
3411         if (val & 0x40000000) {
3412
3413                 /* Flash interface has been reconfigured */
3414                 for (j = 0, flash = &flash_table[0]; j < entry_count;
3415                      j++, flash++) {
3416                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
3417                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
3418                                 bp->flash_info = flash;
3419                                 break;
3420                         }
3421                 }
3422         }
3423         else {
3424                 u32 mask;
3425                 /* Not yet been reconfigured */
3426
3427                 if (val & (1 << 23))
3428                         mask = FLASH_BACKUP_STRAP_MASK;
3429                 else
3430                         mask = FLASH_STRAP_MASK;
3431
3432                 for (j = 0, flash = &flash_table[0]; j < entry_count;
3433                         j++, flash++) {
3434
3435                         if ((val & mask) == (flash->strapping & mask)) {
3436                                 bp->flash_info = flash;
3437
3438                                 /* Request access to the flash interface. */
3439                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3440                                         return rc;
3441
3442                                 /* Enable access to flash interface */
3443                                 bnx2_enable_nvram_access(bp);
3444
3445                                 /* Reconfigure the flash interface */
3446                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3447                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3448                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3449                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3450
3451                                 /* Disable access to flash interface */
3452                                 bnx2_disable_nvram_access(bp);
3453                                 bnx2_release_nvram_lock(bp);
3454
3455                                 break;
3456                         }
3457                 }
3458         } /* if (val & 0x40000000) */
3459
3460         if (j == entry_count) {
3461                 bp->flash_info = NULL;
3462                 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
3463                 return -ENODEV;
3464         }
3465
3466 get_flash_size:
3467         val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
3468         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3469         if (val)
3470                 bp->flash_size = val;
3471         else
3472                 bp->flash_size = bp->flash_info->total_size;
3473
3474         return rc;
3475 }
3476
3477 static int
3478 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3479                 int buf_size)
3480 {
3481         int rc = 0;
3482         u32 cmd_flags, offset32, len32, extra;
3483
3484         if (buf_size == 0)
3485                 return 0;
3486
3487         /* Request access to the flash interface. */
3488         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3489                 return rc;
3490
3491         /* Enable access to flash interface */
3492         bnx2_enable_nvram_access(bp);
3493
3494         len32 = buf_size;
3495         offset32 = offset;
3496         extra = 0;
3497
3498         cmd_flags = 0;
3499
3500         if (offset32 & 3) {
3501                 u8 buf[4];
3502                 u32 pre_len;
3503
3504                 offset32 &= ~3;
3505                 pre_len = 4 - (offset & 3);
3506
3507                 if (pre_len >= len32) {
3508                         pre_len = len32;
3509                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
3510                                     BNX2_NVM_COMMAND_LAST;
3511                 }
3512                 else {
3513                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
3514                 }
3515
3516                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3517
3518                 if (rc)
3519                         return rc;
3520
3521                 memcpy(ret_buf, buf + (offset & 3), pre_len);
3522
3523                 offset32 += 4;
3524                 ret_buf += pre_len;
3525                 len32 -= pre_len;
3526         }
3527         if (len32 & 3) {
3528                 extra = 4 - (len32 & 3);
3529                 len32 = (len32 + 4) & ~3;
3530         }
3531
3532         if (len32 == 4) {
3533                 u8 buf[4];
3534
3535                 if (cmd_flags)
3536                         cmd_flags = BNX2_NVM_COMMAND_LAST;
3537                 else
3538                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
3539                                     BNX2_NVM_COMMAND_LAST;
3540
3541                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3542
3543                 memcpy(ret_buf, buf, 4 - extra);
3544         }
3545         else if (len32 > 0) {
3546                 u8 buf[4];
3547
3548                 /* Read the first word. */
3549                 if (cmd_flags)
3550                         cmd_flags = 0;
3551                 else
3552                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
3553
3554                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3555
3556                 /* Advance to the next dword. */
3557                 offset32 += 4;
3558                 ret_buf += 4;
3559                 len32 -= 4;
3560
3561                 while (len32 > 4 && rc == 0) {
3562                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3563
3564                         /* Advance to the next dword. */
3565                         offset32 += 4;
3566                         ret_buf += 4;
3567                         len32 -= 4;
3568                 }
3569
3570                 if (rc)
3571                         return rc;
3572
3573                 cmd_flags = BNX2_NVM_COMMAND_LAST;
3574                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3575
3576                 memcpy(ret_buf, buf, 4 - extra);
3577         }
3578
3579         /* Disable access to flash interface */
3580         bnx2_disable_nvram_access(bp);
3581
3582         bnx2_release_nvram_lock(bp);
3583
3584         return rc;
3585 }
3586
3587 static int
3588 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3589                 int buf_size)
3590 {
3591         u32 written, offset32, len32;
3592         u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
3593         int rc = 0;
3594         int align_start, align_end;
3595
3596         buf = data_buf;
3597         offset32 = offset;
3598         len32 = buf_size;
3599         align_start = align_end = 0;
3600
3601         if ((align_start = (offset32 & 3))) {
3602                 offset32 &= ~3;
3603                 len32 += align_start;
3604                 if (len32 < 4)
3605                         len32 = 4;
3606                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3607                         return rc;
3608         }
3609
3610         if (len32 & 3) {
3611                 align_end = 4 - (len32 & 3);
3612                 len32 += align_end;
3613                 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3614                         return rc;
3615         }
3616
3617         if (align_start || align_end) {
3618                 align_buf = kmalloc(len32, GFP_KERNEL);
3619                 if (align_buf == NULL)
3620                         return -ENOMEM;
3621                 if (align_start) {
3622                         memcpy(align_buf, start, 4);
3623                 }
3624                 if (align_end) {
3625                         memcpy(align_buf + len32 - 4, end, 4);
3626                 }
3627                 memcpy(align_buf + align_start, data_buf, buf_size);
3628                 buf = align_buf;
3629         }
3630
3631         if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3632                 flash_buffer = kmalloc(264, GFP_KERNEL);
3633                 if (flash_buffer == NULL) {
3634                         rc = -ENOMEM;
3635                         goto nvram_write_end;
3636                 }
3637         }
3638
3639         written = 0;
3640         while ((written < len32) && (rc == 0)) {
3641                 u32 page_start, page_end, data_start, data_end;
3642                 u32 addr, cmd_flags;
3643                 int i;
3644
3645                 /* Find the page_start addr */
3646                 page_start = offset32 + written;
3647                 page_start -= (page_start % bp->flash_info->page_size);
3648                 /* Find the page_end addr */
3649                 page_end = page_start + bp->flash_info->page_size;
3650                 /* Find the data_start addr */
3651                 data_start = (written == 0) ? offset32 : page_start;
3652                 /* Find the data_end addr */
3653                 data_end = (page_end > offset32 + len32) ?
3654                         (offset32 + len32) : page_end;
3655
3656                 /* Request access to the flash interface. */
3657                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3658                         goto nvram_write_end;
3659
3660                 /* Enable access to flash interface */
3661                 bnx2_enable_nvram_access(bp);
3662
3663                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3664                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3665                         int j;
3666
3667                         /* Read the whole page into the buffer
3668                          * (non-buffer flash only) */
3669                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
3670                                 if (j == (bp->flash_info->page_size - 4)) {
3671                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
3672                                 }
3673                                 rc = bnx2_nvram_read_dword(bp,
3674                                         page_start + j,
3675                                         &flash_buffer[j],
3676                                         cmd_flags);
3677
3678                                 if (rc)
3679                                         goto nvram_write_end;
3680
3681                                 cmd_flags = 0;
3682                         }
3683                 }
3684
3685                 /* Enable writes to flash interface (unlock write-protect) */
3686                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3687                         goto nvram_write_end;
3688
3689                 /* Loop to write back the buffer data from page_start to
3690                  * data_start */
3691                 i = 0;
3692                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3693                         /* Erase the page */
3694                         if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3695                                 goto nvram_write_end;
3696
3697                         /* Re-enable the write again for the actual write */
3698                         bnx2_enable_nvram_write(bp);
3699
3700                         for (addr = page_start; addr < data_start;
3701                                 addr += 4, i += 4) {
3702
3703                                 rc = bnx2_nvram_write_dword(bp, addr,
3704                                         &flash_buffer[i], cmd_flags);
3705
3706                                 if (rc != 0)
3707                                         goto nvram_write_end;
3708
3709                                 cmd_flags = 0;
3710                         }
3711                 }
3712
3713                 /* Loop to write the new data from data_start to data_end */
3714                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
3715                         if ((addr == page_end - 4) ||
3716                                 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
3717                                  (addr == data_end - 4))) {
3718
3719                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3720                         }
3721                         rc = bnx2_nvram_write_dword(bp, addr, buf,
3722                                 cmd_flags);
3723
3724                         if (rc != 0)
3725                                 goto nvram_write_end;
3726
3727                         cmd_flags = 0;
3728                         buf += 4;
3729                 }
3730
3731                 /* Loop to write back the buffer data from data_end
3732                  * to page_end */
3733                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3734                         for (addr = data_end; addr < page_end;
3735                                 addr += 4, i += 4) {
3736
3737                                 if (addr == page_end-4) {
3738                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
3739                                 }
3740                                 rc = bnx2_nvram_write_dword(bp, addr,
3741                                         &flash_buffer[i], cmd_flags);
3742
3743                                 if (rc != 0)
3744                                         goto nvram_write_end;
3745
3746                                 cmd_flags = 0;
3747                         }
3748                 }
3749
3750                 /* Disable writes to flash interface (lock write-protect) */
3751                 bnx2_disable_nvram_write(bp);
3752
3753                 /* Disable access to flash interface */
3754                 bnx2_disable_nvram_access(bp);
3755                 bnx2_release_nvram_lock(bp);
3756
3757                 /* Increment written */
3758                 written += data_end - data_start;
3759         }
3760
3761 nvram_write_end:
3762         kfree(flash_buffer);
3763         kfree(align_buf);
3764         return rc;
3765 }
3766
3767 static void
3768 bnx2_init_remote_phy(struct bnx2 *bp)
3769 {
3770         u32 val;
3771
3772         bp->phy_flags &= ~REMOTE_PHY_CAP_FLAG;
3773         if (!(bp->phy_flags & PHY_SERDES_FLAG))
3774                 return;
3775
3776         val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_CAP_MB);
3777         if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
3778                 return;
3779
3780         if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
3781                 if (netif_running(bp->dev)) {
3782                         val = BNX2_DRV_ACK_CAP_SIGNATURE |
3783                               BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
3784                         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_ACK_CAP_MB,
3785                                    val);
3786                 }
3787                 bp->phy_flags |= REMOTE_PHY_CAP_FLAG;
3788
3789                 val = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
3790                 if (val & BNX2_LINK_STATUS_SERDES_LINK)
3791                         bp->phy_port = PORT_FIBRE;
3792                 else
3793                         bp->phy_port = PORT_TP;
3794         }
3795 }
3796
3797 static int
3798 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3799 {
3800         u32 val;
3801         int i, rc = 0;
3802
3803         /* Wait for the current PCI transaction to complete before
3804          * issuing a reset. */
3805         REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3806                BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3807                BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3808                BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3809                BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3810         val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3811         udelay(5);
3812
3813         /* Wait for the firmware to tell us it is ok to issue a reset. */
3814         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3815
3816         /* Deposit a driver reset signature so the firmware knows that
3817          * this is a soft reset. */
3818         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
3819                    BNX2_DRV_RESET_SIGNATURE_MAGIC);
3820
3821         /* Do a dummy read to force the chip to complete all current transaction
3822          * before we issue a reset. */
3823         val = REG_RD(bp, BNX2_MISC_ID);
3824
3825         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3826                 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
3827                 REG_RD(bp, BNX2_MISC_COMMAND);
3828                 udelay(5);
3829
3830                 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3831                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3832
3833                 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
3834
3835         } else {
3836                 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3837                       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3838                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3839
3840                 /* Chip reset. */
3841                 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3842
3843                 /* Reading back any register after chip reset will hang the
3844                  * bus on 5706 A0 and A1.  The msleep below provides plenty
3845                  * of margin for write posting.
3846                  */
3847                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3848                     (CHIP_ID(bp) == CHIP_ID_5706_A1))
3849                         msleep(20);
3850
3851                 /* Reset takes approximate 30 usec */
3852                 for (i = 0; i < 10; i++) {
3853                         val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3854                         if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3855                                     BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3856                                 break;
3857                         udelay(10);
3858                 }
3859
3860                 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3861                            BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3862                         printk(KERN_ERR PFX "Chip reset did not complete\n");
3863                         return -EBUSY;
3864                 }
3865         }
3866
3867         /* Make sure byte swapping is properly configured. */
3868         val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3869         if (val != 0x01020304) {
3870                 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3871                 return -ENODEV;
3872         }
3873
3874         /* Wait for the firmware to finish its initialization. */
3875         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3876         if (rc)
3877                 return rc;
3878
3879         spin_lock_bh(&bp->phy_lock);
3880         bnx2_init_remote_phy(bp);
3881         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
3882                 bnx2_set_default_remote_link(bp);
3883         spin_unlock_bh(&bp->phy_lock);
3884
3885         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3886                 /* Adjust the voltage regular to two steps lower.  The default
3887                  * of this register is 0x0000000e. */
3888                 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3889
3890                 /* Remove bad rbuf memory from the free pool. */
3891                 rc = bnx2_alloc_bad_rbuf(bp);
3892         }
3893
3894         return rc;
3895 }
3896
3897 static int
3898 bnx2_init_chip(struct bnx2 *bp)
3899 {
3900         u32 val;
3901         int rc;
3902
3903         /* Make sure the interrupt is not active. */
3904         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3905
3906         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3907               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3908 #ifdef __BIG_ENDIAN
3909               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
3910 #endif
3911               BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
3912               DMA_READ_CHANS << 12 |
3913               DMA_WRITE_CHANS << 16;
3914
3915         val |= (0x2 << 20) | (1 << 11);
3916
3917         if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
3918                 val |= (1 << 23);
3919
3920         if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3921             (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3922                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3923
3924         REG_WR(bp, BNX2_DMA_CONFIG, val);
3925
3926         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3927                 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3928                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3929                 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3930         }
3931
3932         if (bp->flags & PCIX_FLAG) {
3933                 u16 val16;
3934
3935                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3936                                      &val16);
3937                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3938                                       val16 & ~PCI_X_CMD_ERO);
3939         }
3940
3941         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3942                BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3943                BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3944                BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3945
3946         /* Initialize context mapping and zero out the quick contexts.  The
3947          * context block must have already been enabled. */
3948         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3949                 rc = bnx2_init_5709_context(bp);
3950                 if (rc)
3951                         return rc;
3952         } else
3953                 bnx2_init_context(bp);
3954
3955         if ((rc = bnx2_init_cpus(bp)) != 0)
3956                 return rc;
3957
3958         bnx2_init_nvram(bp);
3959
3960         bnx2_set_mac_addr(bp);
3961
3962         val = REG_RD(bp, BNX2_MQ_CONFIG);
3963         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3964         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3965         if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
3966                 val |= BNX2_MQ_CONFIG_HALT_DIS;
3967
3968         REG_WR(bp, BNX2_MQ_CONFIG, val);
3969
3970         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3971         REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
3972         REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
3973
3974         val = (BCM_PAGE_BITS - 8) << 24;
3975         REG_WR(bp, BNX2_RV2P_CONFIG, val);
3976
3977         /* Configure page size. */
3978         val = REG_RD(bp, BNX2_TBDR_CONFIG);
3979         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
3980         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3981         REG_WR(bp, BNX2_TBDR_CONFIG, val);
3982
3983         val = bp->mac_addr[0] +
3984               (bp->mac_addr[1] << 8) +
3985               (bp->mac_addr[2] << 16) +
3986               bp->mac_addr[3] +
3987               (bp->mac_addr[4] << 8) +
3988               (bp->mac_addr[5] << 16);
3989         REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
3990
3991         /* Program the MTU.  Also include 4 bytes for CRC32. */
3992         val = bp->dev->mtu + ETH_HLEN + 4;
3993         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
3994                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
3995         REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
3996
3997         bp->last_status_idx = 0;
3998         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
3999
4000         /* Set up how to generate a link change interrupt. */
4001         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4002
4003         REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4004                (u64) bp->status_blk_mapping & 0xffffffff);
4005         REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4006
4007         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4008                (u64) bp->stats_blk_mapping & 0xffffffff);
4009         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4010                (u64) bp->stats_blk_mapping >> 32);
4011
4012         REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4013                (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4014
4015         REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4016                (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4017
4018         REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4019                (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4020
4021         REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4022
4023         REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4024
4025         REG_WR(bp, BNX2_HC_COM_TICKS,
4026                (bp->com_ticks_int << 16) | bp->com_ticks);
4027
4028         REG_WR(bp, BNX2_HC_CMD_TICKS,
4029                (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4030
4031         if (CHIP_NUM(bp) == CHIP_NUM_5708)
4032                 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4033         else
4034                 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4035         REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
4036
4037         if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4038                 val = BNX2_HC_CONFIG_COLLECT_STATS;
4039         else {
4040                 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4041                       BNX2_HC_CONFIG_COLLECT_STATS;
4042         }
4043
4044         if (bp->flags & ONE_SHOT_MSI_FLAG)
4045                 val |= BNX2_HC_CONFIG_ONE_SHOT;
4046
4047         REG_WR(bp, BNX2_HC_CONFIG, val);
4048
4049         /* Clear internal stats counters. */
4050         REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4051
4052         REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4053
4054         /* Initialize the receive filter. */
4055         bnx2_set_rx_mode(bp->dev);
4056
4057         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4058                 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4059                 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4060                 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4061         }
4062         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4063                           0);
4064
4065         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4066         REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4067
4068         udelay(20);
4069
4070         bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4071
4072         return rc;
4073 }
4074
4075 static void
4076 bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
4077 {
4078         u32 val, offset0, offset1, offset2, offset3;
4079
4080         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4081                 offset0 = BNX2_L2CTX_TYPE_XI;
4082                 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4083                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4084                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4085         } else {
4086                 offset0 = BNX2_L2CTX_TYPE;
4087                 offset1 = BNX2_L2CTX_CMD_TYPE;
4088                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4089                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4090         }
4091         val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4092         CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
4093
4094         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4095         CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
4096
4097         val = (u64) bp->tx_desc_mapping >> 32;
4098         CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
4099
4100         val = (u64) bp->tx_desc_mapping & 0xffffffff;
4101         CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
4102 }
4103
4104 static void
4105 bnx2_init_tx_ring(struct bnx2 *bp)
4106 {
4107         struct tx_bd *txbd;
4108         u32 cid;
4109
4110         bp->tx_wake_thresh = bp->tx_ring_size / 2;
4111
4112         txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
4113
4114         txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
4115         txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
4116
4117         bp->tx_prod = 0;
4118         bp->tx_cons = 0;
4119         bp->hw_tx_cons = 0;
4120         bp->tx_prod_bseq = 0;
4121
4122         cid = TX_CID;
4123         bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4124         bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
4125
4126         bnx2_init_tx_context(bp, cid);
4127 }
4128
4129 static void
4130 bnx2_init_rx_ring(struct bnx2 *bp)
4131 {
4132         struct rx_bd *rxbd;
4133         int i;
4134         u16 prod, ring_prod;
4135         u32 val;
4136
4137         /* 8 for CRC and VLAN */
4138         bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
4139         /* hw alignment */
4140         bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
4141
4142         ring_prod = prod = bp->rx_prod = 0;
4143         bp->rx_cons = 0;
4144         bp->hw_rx_cons = 0;
4145         bp->rx_prod_bseq = 0;
4146
4147         for (i = 0; i < bp->rx_max_ring; i++) {
4148                 int j;
4149
4150                 rxbd = &bp->rx_desc_ring[i][0];
4151                 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4152                         rxbd->rx_bd_len = bp->rx_buf_use_size;
4153                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4154                 }
4155                 if (i == (bp->rx_max_ring - 1))
4156                         j = 0;
4157                 else
4158                         j = i + 1;
4159                 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
4160                 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
4161                                        0xffffffff;
4162         }
4163
4164         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
4165         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
4166         val |= 0x02 << 8;
4167         CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
4168
4169         val = (u64) bp->rx_desc_mapping[0] >> 32;
4170         CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
4171
4172         val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
4173         CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
4174
4175         for (i = 0; i < bp->rx_ring_size; i++) {
4176                 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
4177                         break;
4178                 }
4179                 prod = NEXT_RX_BD(prod);
4180                 ring_prod = RX_RING_IDX(prod);
4181         }
4182         bp->rx_prod = prod;
4183
4184         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
4185
4186         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
4187 }
4188
4189 static void
4190 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4191 {
4192         u32 num_rings, max;
4193
4194         bp->rx_ring_size = size;
4195         num_rings = 1;
4196         while (size > MAX_RX_DESC_CNT) {
4197                 size -= MAX_RX_DESC_CNT;
4198                 num_rings++;
4199         }
4200         /* round to next power of 2 */
4201         max = MAX_RX_RINGS;
4202         while ((max & num_rings) == 0)
4203                 max >>= 1;
4204
4205         if (num_rings != max)
4206                 max <<= 1;
4207
4208         bp->rx_max_ring = max;
4209         bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
4210 }
4211
4212 static void
4213 bnx2_free_tx_skbs(struct bnx2 *bp)
4214 {
4215         int i;
4216
4217         if (bp->tx_buf_ring == NULL)
4218                 return;
4219
4220         for (i = 0; i < TX_DESC_CNT; ) {
4221                 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
4222                 struct sk_buff *skb = tx_buf->skb;
4223                 int j, last;
4224
4225                 if (skb == NULL) {
4226                         i++;
4227                         continue;
4228                 }
4229
4230                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
4231                         skb_headlen(skb), PCI_DMA_TODEVICE);
4232
4233                 tx_buf->skb = NULL;
4234
4235                 last = skb_shinfo(skb)->nr_frags;
4236                 for (j = 0; j < last; j++) {
4237                         tx_buf = &bp->tx_buf_ring[i + j + 1];
4238                         pci_unmap_page(bp->pdev,
4239                                 pci_unmap_addr(tx_buf, mapping),
4240                                 skb_shinfo(skb)->frags[j].size,
4241                                 PCI_DMA_TODEVICE);
4242                 }
4243                 dev_kfree_skb(skb);
4244                 i += j + 1;
4245         }
4246
4247 }
4248
4249 static void
4250 bnx2_free_rx_skbs(struct bnx2 *bp)
4251 {
4252         int i;
4253
4254         if (bp->rx_buf_ring == NULL)
4255                 return;
4256
4257         for (i = 0; i < bp->rx_max_ring_idx; i++) {
4258                 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
4259                 struct sk_buff *skb = rx_buf->skb;
4260
4261                 if (skb == NULL)
4262                         continue;
4263
4264                 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
4265                         bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
4266
4267                 rx_buf->skb = NULL;
4268
4269                 dev_kfree_skb(skb);
4270         }
4271 }
4272
4273 static void
4274 bnx2_free_skbs(struct bnx2 *bp)
4275 {
4276         bnx2_free_tx_skbs(bp);
4277         bnx2_free_rx_skbs(bp);
4278 }
4279
4280 static int
4281 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4282 {
4283         int rc;
4284
4285         rc = bnx2_reset_chip(bp, reset_code);
4286         bnx2_free_skbs(bp);
4287         if (rc)
4288                 return rc;
4289
4290         if ((rc = bnx2_init_chip(bp)) != 0)
4291                 return rc;
4292
4293         bnx2_init_tx_ring(bp);
4294         bnx2_init_rx_ring(bp);
4295         return 0;
4296 }
4297
4298 static int
4299 bnx2_init_nic(struct bnx2 *bp)
4300 {
4301         int rc;
4302
4303         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4304                 return rc;
4305
4306         spin_lock_bh(&bp->phy_lock);
4307         bnx2_init_phy(bp);
4308         bnx2_set_link(bp);
4309         spin_unlock_bh(&bp->phy_lock);
4310         return 0;
4311 }
4312
4313 static int
4314 bnx2_test_registers(struct bnx2 *bp)
4315 {
4316         int ret;
4317         int i, is_5709;
4318         static const struct {
4319                 u16   offset;
4320                 u16   flags;
4321 #define BNX2_FL_NOT_5709        1
4322                 u32   rw_mask;
4323                 u32   ro_mask;
4324         } reg_tbl[] = {
4325                 { 0x006c, 0, 0x00000000, 0x0000003f },
4326                 { 0x0090, 0, 0xffffffff, 0x00000000 },
4327                 { 0x0094, 0, 0x00000000, 0x00000000 },
4328
4329                 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4330                 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4331                 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4332                 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4333                 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4334                 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4335                 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4336                 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4337                 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4338
4339                 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4340                 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4341                 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4342                 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4343                 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4344                 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4345
4346                 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4347                 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4348                 { 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
4349
4350                 { 0x1000, 0, 0x00000000, 0x00000001 },
4351                 { 0x1004, 0, 0x00000000, 0x000f0001 },
4352
4353                 { 0x1408, 0, 0x01c00800, 0x00000000 },
4354                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4355                 { 0x14a8, 0, 0x00000000, 0x000001ff },
4356                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
4357                 { 0x14b0, 0, 0x00000002, 0x00000001 },
4358                 { 0x14b8, 0, 0x00000000, 0x00000000 },
4359                 { 0x14c0, 0, 0x00000000, 0x00000009 },
4360                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4361                 { 0x14cc, 0, 0x00000000, 0x00000001 },
4362                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
4363
4364                 { 0x1800, 0, 0x00000000, 0x00000001 },
4365                 { 0x1804, 0, 0x00000000, 0x00000003 },
4366
4367                 { 0x2800, 0, 0x00000000, 0x00000001 },
4368                 { 0x2804, 0, 0x00000000, 0x00003f01 },
4369                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4370                 { 0x2810, 0, 0xffff0000, 0x00000000 },
4371                 { 0x2814, 0, 0xffff0000, 0x00000000 },
4372                 { 0x2818, 0, 0xffff0000, 0x00000000 },
4373                 { 0x281c, 0, 0xffff0000, 0x00000000 },
4374                 { 0x2834, 0, 0xffffffff, 0x00000000 },
4375                 { 0x2840, 0, 0x00000000, 0xffffffff },
4376                 { 0x2844, 0, 0x00000000, 0xffffffff },
4377                 { 0x2848, 0, 0xffffffff, 0x00000000 },
4378                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4379
4380                 { 0x2c00, 0, 0x00000000, 0x00000011 },
4381                 { 0x2c04, 0, 0x00000000, 0x00030007 },
4382
4383                 { 0x3c00, 0, 0x00000000, 0x00000001 },
4384                 { 0x3c04, 0, 0x00000000, 0x00070000 },
4385                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4386                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4387                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4388                 { 0x3c14, 0, 0x00000000, 0xffffffff },
4389                 { 0x3c18, 0, 0x00000000, 0xffffffff },
4390                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4391                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
4392
4393                 { 0x5004, 0, 0x00000000, 0x0000007f },
4394                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
4395
4396                 { 0x5c00, 0, 0x00000000, 0x00000001 },
4397                 { 0x5c04, 0, 0x00000000, 0x0003000f },
4398                 { 0x5c08, 0, 0x00000003, 0x00000000 },
4399                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4400                 { 0x5c10, 0, 0x00000000, 0xffffffff },
4401                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4402                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4403                 { 0x5c88, 0, 0x00000000, 0x00077373 },
4404                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4405
4406                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4407                 { 0x680c, 0, 0xffffffff, 0x00000000 },
4408                 { 0x6810, 0, 0xffffffff, 0x00000000 },
4409                 { 0x6814, 0, 0xffffffff, 0x00000000 },
4410                 { 0x6818, 0, 0xffffffff, 0x00000000 },
4411                 { 0x681c, 0, 0xffffffff, 0x00000000 },
4412                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4413                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4414                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
4415                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
4416                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
4417                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
4418                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
4419                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
4420                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
4421                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
4422                 { 0x684c, 0, 0xffffffff, 0x00000000 },
4423                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
4424                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
4425                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
4426                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
4427                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
4428                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
4429
4430                 { 0xffff, 0, 0x00000000, 0x00000000 },
4431         };
4432
4433         ret = 0;
4434         is_5709 = 0;
4435         if (CHIP_NUM(bp) == CHIP_NUM_5709)
4436                 is_5709 = 1;
4437
4438         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
4439                 u32 offset, rw_mask, ro_mask, save_val, val;
4440                 u16 flags = reg_tbl[i].flags;
4441
4442                 if (is_5709 && (flags & BNX2_FL_NOT_5709))
4443                         continue;
4444
4445                 offset = (u32) reg_tbl[i].offset;
4446                 rw_mask = reg_tbl[i].rw_mask;
4447                 ro_mask = reg_tbl[i].ro_mask;
4448
4449                 save_val = readl(bp->regview + offset);
4450
4451                 writel(0, bp->regview + offset);
4452
4453                 val = readl(bp->regview + offset);
4454                 if ((val & rw_mask) != 0) {
4455                         goto reg_test_err;
4456                 }
4457
4458                 if ((val & ro_mask) != (save_val & ro_mask)) {
4459                         goto reg_test_err;
4460                 }
4461
4462                 writel(0xffffffff, bp->regview + offset);
4463
4464                 val = readl(bp->regview + offset);
4465                 if ((val & rw_mask) != rw_mask) {
4466                         goto reg_test_err;
4467                 }
4468
4469                 if ((val & ro_mask) != (save_val & ro_mask)) {
4470                         goto reg_test_err;
4471                 }
4472
4473                 writel(save_val, bp->regview + offset);
4474                 continue;
4475
4476 reg_test_err:
4477                 writel(save_val, bp->regview + offset);
4478                 ret = -ENODEV;
4479                 break;
4480         }
4481         return ret;
4482 }
4483
4484 static int
4485 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
4486 {
4487         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
4488                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
4489         int i;
4490
4491         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
4492                 u32 offset;
4493
4494                 for (offset = 0; offset < size; offset += 4) {
4495
4496                         REG_WR_IND(bp, start + offset, test_pattern[i]);
4497
4498                         if (REG_RD_IND(bp, start + offset) !=
4499                                 test_pattern[i]) {
4500                                 return -ENODEV;
4501                         }
4502                 }
4503         }
4504         return 0;
4505 }
4506
4507 static int
4508 bnx2_test_memory(struct bnx2 *bp)
4509 {
4510         int ret = 0;
4511         int i;
4512         static struct mem_entry {
4513                 u32   offset;
4514                 u32   len;
4515         } mem_tbl_5706[] = {
4516                 { 0x60000,  0x4000 },
4517                 { 0xa0000,  0x3000 },
4518                 { 0xe0000,  0x4000 },
4519                 { 0x120000, 0x4000 },
4520                 { 0x1a0000, 0x4000 },
4521                 { 0x160000, 0x4000 },
4522                 { 0xffffffff, 0    },
4523         },
4524         mem_tbl_5709[] = {
4525                 { 0x60000,  0x4000 },
4526                 { 0xa0000,  0x3000 },
4527                 { 0xe0000,  0x4000 },
4528                 { 0x120000, 0x4000 },
4529                 { 0x1a0000, 0x4000 },
4530                 { 0xffffffff, 0    },
4531         };
4532         struct mem_entry *mem_tbl;
4533
4534         if (CHIP_NUM(bp) == CHIP_NUM_5709)
4535                 mem_tbl = mem_tbl_5709;
4536         else
4537                 mem_tbl = mem_tbl_5706;
4538
4539         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
4540                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
4541                         mem_tbl[i].len)) != 0) {
4542                         return ret;
4543                 }
4544         }
4545
4546         return ret;
4547 }
4548
4549 #define BNX2_MAC_LOOPBACK       0
4550 #define BNX2_PHY_LOOPBACK       1
4551
4552 static int
4553 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
4554 {
4555         unsigned int pkt_size, num_pkts, i;
4556         struct sk_buff *skb, *rx_skb;
4557         unsigned char *packet;
4558         u16 rx_start_idx, rx_idx;
4559         dma_addr_t map;
4560         struct tx_bd *txbd;
4561         struct sw_bd *rx_buf;
4562         struct l2_fhdr *rx_hdr;
4563         int ret = -ENODEV;
4564
4565         if (loopback_mode == BNX2_MAC_LOOPBACK) {
4566                 bp->loopback = MAC_LOOPBACK;
4567                 bnx2_set_mac_loopback(bp);
4568         }
4569         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
4570                 bp->loopback = PHY_LOOPBACK;
4571                 bnx2_set_phy_loopback(bp);
4572         }
4573         else
4574                 return -EINVAL;
4575
4576         pkt_size = 1514;
4577         skb = netdev_alloc_skb(bp->dev, pkt_size);
4578         if (!skb)
4579                 return -ENOMEM;
4580         packet = skb_put(skb, pkt_size);
4581         memcpy(packet, bp->dev->dev_addr, 6);
4582         memset(packet + 6, 0x0, 8);
4583         for (i = 14; i < pkt_size; i++)
4584                 packet[i] = (unsigned char) (i & 0xff);
4585
4586         map = pci_map_single(bp->pdev, skb->data, pkt_size,
4587                 PCI_DMA_TODEVICE);
4588
4589         REG_WR(bp, BNX2_HC_COMMAND,
4590                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4591
4592         REG_RD(bp, BNX2_HC_COMMAND);
4593
4594         udelay(5);
4595         rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
4596
4597         num_pkts = 0;
4598
4599         txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
4600
4601         txbd->tx_bd_haddr_hi = (u64) map >> 32;
4602         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4603         txbd->tx_bd_mss_nbytes = pkt_size;
4604         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4605
4606         num_pkts++;
4607         bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4608         bp->tx_prod_bseq += pkt_size;
4609
4610         REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4611         REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
4612
4613         udelay(100);
4614
4615         REG_WR(bp, BNX2_HC_COMMAND,
4616                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4617
4618         REG_RD(bp, BNX2_HC_COMMAND);
4619
4620         udelay(5);
4621
4622         pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
4623         dev_kfree_skb(skb);
4624
4625         if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
4626                 goto loopback_test_done;
4627         }
4628
4629         rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4630         if (rx_idx != rx_start_idx + num_pkts) {
4631                 goto loopback_test_done;
4632         }
4633
4634         rx_buf = &bp->rx_buf_ring[rx_start_idx];
4635         rx_skb = rx_buf->skb;
4636
4637         rx_hdr = (struct l2_fhdr *) rx_skb->data;
4638         skb_reserve(rx_skb, bp->rx_offset);
4639
4640         pci_dma_sync_single_for_cpu(bp->pdev,
4641                 pci_unmap_addr(rx_buf, mapping),
4642                 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4643
4644         if (rx_hdr->l2_fhdr_status &
4645                 (L2_FHDR_ERRORS_BAD_CRC |
4646                 L2_FHDR_ERRORS_PHY_DECODE |
4647                 L2_FHDR_ERRORS_ALIGNMENT |
4648                 L2_FHDR_ERRORS_TOO_SHORT |
4649                 L2_FHDR_ERRORS_GIANT_FRAME)) {
4650
4651                 goto loopback_test_done;
4652         }
4653
4654         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4655                 goto loopback_test_done;
4656         }
4657
4658         for (i = 14; i < pkt_size; i++) {
4659                 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4660                         goto loopback_test_done;
4661                 }
4662         }
4663
4664         ret = 0;
4665
4666 loopback_test_done:
4667         bp->loopback = 0;
4668         return ret;
4669 }
4670
4671 #define BNX2_MAC_LOOPBACK_FAILED        1
4672 #define BNX2_PHY_LOOPBACK_FAILED        2
4673 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
4674                                          BNX2_PHY_LOOPBACK_FAILED)
4675
4676 static int
4677 bnx2_test_loopback(struct bnx2 *bp)
4678 {
4679         int rc = 0;
4680
4681         if (!netif_running(bp->dev))
4682                 return BNX2_LOOPBACK_FAILED;
4683
4684         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4685         spin_lock_bh(&bp->phy_lock);
4686         bnx2_init_phy(bp);
4687         spin_unlock_bh(&bp->phy_lock);
4688         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4689                 rc |= BNX2_MAC_LOOPBACK_FAILED;
4690         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4691                 rc |= BNX2_PHY_LOOPBACK_FAILED;
4692         return rc;
4693 }
4694
4695 #define NVRAM_SIZE 0x200
4696 #define CRC32_RESIDUAL 0xdebb20e3
4697
4698 static int
4699 bnx2_test_nvram(struct bnx2 *bp)
4700 {
4701         u32 buf[NVRAM_SIZE / 4];
4702         u8 *data = (u8 *) buf;
4703         int rc = 0;
4704         u32 magic, csum;
4705
4706         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4707                 goto test_nvram_done;
4708
4709         magic = be32_to_cpu(buf[0]);
4710         if (magic != 0x669955aa) {
4711                 rc = -ENODEV;
4712                 goto test_nvram_done;
4713         }
4714
4715         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4716                 goto test_nvram_done;
4717
4718         csum = ether_crc_le(0x100, data);
4719         if (csum != CRC32_RESIDUAL) {
4720                 rc = -ENODEV;
4721                 goto test_nvram_done;
4722         }
4723
4724         csum = ether_crc_le(0x100, data + 0x100);
4725         if (csum != CRC32_RESIDUAL) {
4726                 rc = -ENODEV;
4727         }
4728
4729 test_nvram_done:
4730         return rc;
4731 }
4732
4733 static int
4734 bnx2_test_link(struct bnx2 *bp)
4735 {
4736         u32 bmsr;
4737
4738         spin_lock_bh(&bp->phy_lock);
4739         bnx2_enable_bmsr1(bp);
4740         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4741         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4742         bnx2_disable_bmsr1(bp);
4743         spin_unlock_bh(&bp->phy_lock);
4744
4745         if (bmsr & BMSR_LSTATUS) {
4746                 return 0;
4747         }
4748         return -ENODEV;
4749 }
4750
4751 static int
4752 bnx2_test_intr(struct bnx2 *bp)
4753 {
4754         int i;
4755         u16 status_idx;
4756
4757         if (!netif_running(bp->dev))
4758                 return -ENODEV;
4759
4760         status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4761
4762         /* This register is not touched during run-time. */
4763         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
4764         REG_RD(bp, BNX2_HC_COMMAND);
4765
4766         for (i = 0; i < 10; i++) {
4767                 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4768                         status_idx) {
4769
4770                         break;
4771                 }
4772
4773                 msleep_interruptible(10);
4774         }
4775         if (i < 10)
4776                 return 0;
4777
4778         return -ENODEV;
4779 }
4780
4781 static void
4782 bnx2_5706_serdes_timer(struct bnx2 *bp)
4783 {
4784         spin_lock(&bp->phy_lock);
4785         if (bp->serdes_an_pending)
4786                 bp->serdes_an_pending--;
4787         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4788                 u32 bmcr;
4789
4790                 bp->current_interval = bp->timer_interval;
4791
4792                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4793
4794                 if (bmcr & BMCR_ANENABLE) {
4795                         u32 phy1, phy2;
4796
4797                         bnx2_write_phy(bp, 0x1c, 0x7c00);
4798                         bnx2_read_phy(bp, 0x1c, &phy1);
4799
4800                         bnx2_write_phy(bp, 0x17, 0x0f01);
4801                         bnx2_read_phy(bp, 0x15, &phy2);
4802                         bnx2_write_phy(bp, 0x17, 0x0f01);
4803                         bnx2_read_phy(bp, 0x15, &phy2);
4804
4805                         if ((phy1 & 0x10) &&    /* SIGNAL DETECT */
4806                                 !(phy2 & 0x20)) {       /* no CONFIG */
4807
4808                                 bmcr &= ~BMCR_ANENABLE;
4809                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4810                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
4811                                 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4812                         }
4813                 }
4814         }
4815         else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4816                  (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4817                 u32 phy2;
4818
4819                 bnx2_write_phy(bp, 0x17, 0x0f01);
4820                 bnx2_read_phy(bp, 0x15, &phy2);
4821                 if (phy2 & 0x20) {
4822                         u32 bmcr;
4823
4824                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4825                         bmcr |= BMCR_ANENABLE;
4826                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
4827
4828                         bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4829                 }
4830         } else
4831                 bp->current_interval = bp->timer_interval;
4832
4833         spin_unlock(&bp->phy_lock);
4834 }
4835
4836 static void
4837 bnx2_5708_serdes_timer(struct bnx2 *bp)
4838 {
4839         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
4840                 return;
4841
4842         if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4843                 bp->serdes_an_pending = 0;
4844                 return;
4845         }
4846
4847         spin_lock(&bp->phy_lock);
4848         if (bp->serdes_an_pending)
4849                 bp->serdes_an_pending--;
4850         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4851                 u32 bmcr;
4852
4853                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4854                 if (bmcr & BMCR_ANENABLE) {
4855                         bnx2_enable_forced_2g5(bp);
4856                         bp->current_interval = SERDES_FORCED_TIMEOUT;
4857                 } else {
4858                         bnx2_disable_forced_2g5(bp);
4859                         bp->serdes_an_pending = 2;
4860                         bp->current_interval = bp->timer_interval;
4861                 }
4862
4863         } else
4864                 bp->current_interval = bp->timer_interval;
4865
4866         spin_unlock(&bp->phy_lock);
4867 }
4868
4869 static void
4870 bnx2_timer(unsigned long data)
4871 {
4872         struct bnx2 *bp = (struct bnx2 *) data;
4873
4874         if (!netif_running(bp->dev))
4875                 return;
4876
4877         if (atomic_read(&bp->intr_sem) != 0)
4878                 goto bnx2_restart_timer;
4879
4880         bnx2_send_heart_beat(bp);
4881
4882         bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4883
4884         /* workaround occasional corrupted counters */
4885         if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
4886                 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
4887                                             BNX2_HC_COMMAND_STATS_NOW);
4888
4889         if (bp->phy_flags & PHY_SERDES_FLAG) {
4890                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
4891                         bnx2_5706_serdes_timer(bp);
4892                 else
4893                         bnx2_5708_serdes_timer(bp);
4894         }
4895
4896 bnx2_restart_timer:
4897         mod_timer(&bp->timer, jiffies + bp->current_interval);
4898 }
4899
4900 static int
4901 bnx2_request_irq(struct bnx2 *bp)
4902 {
4903         struct net_device *dev = bp->dev;
4904         int rc = 0;
4905
4906         if (bp->flags & USING_MSI_FLAG) {
4907                 irq_handler_t   fn = bnx2_msi;
4908
4909                 if (bp->flags & ONE_SHOT_MSI_FLAG)
4910                         fn = bnx2_msi_1shot;
4911
4912                 rc = request_irq(bp->pdev->irq, fn, 0, dev->name, dev);
4913         } else
4914                 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4915                                  IRQF_SHARED, dev->name, dev);
4916         return rc;
4917 }
4918
4919 static void
4920 bnx2_free_irq(struct bnx2 *bp)
4921 {
4922         struct net_device *dev = bp->dev;
4923
4924         if (bp->flags & USING_MSI_FLAG) {
4925                 free_irq(bp->pdev->irq, dev);
4926                 pci_disable_msi(bp->pdev);
4927                 bp->flags &= ~(USING_MSI_FLAG | ONE_SHOT_MSI_FLAG);
4928         } else
4929                 free_irq(bp->pdev->irq, dev);
4930 }
4931
4932 /* Called with rtnl_lock */
4933 static int
4934 bnx2_open(struct net_device *dev)
4935 {
4936         struct bnx2 *bp = netdev_priv(dev);
4937         int rc;
4938
4939         netif_carrier_off(dev);
4940
4941         bnx2_set_power_state(bp, PCI_D0);
4942         bnx2_disable_int(bp);
4943
4944         rc = bnx2_alloc_mem(bp);
4945         if (rc)
4946                 return rc;
4947
4948         napi_enable(&bp->napi);
4949
4950         if ((bp->flags & MSI_CAP_FLAG) && !disable_msi) {
4951                 if (pci_enable_msi(bp->pdev) == 0) {
4952                         bp->flags |= USING_MSI_FLAG;
4953                         if (CHIP_NUM(bp) == CHIP_NUM_5709)
4954                                 bp->flags |= ONE_SHOT_MSI_FLAG;
4955                 }
4956         }
4957         rc = bnx2_request_irq(bp);
4958
4959         if (rc) {
4960                 napi_disable(&bp->napi);
4961                 bnx2_free_mem(bp);
4962                 return rc;
4963         }
4964
4965         rc = bnx2_init_nic(bp);
4966
4967         if (rc) {
4968                 napi_disable(&bp->napi);
4969                 bnx2_free_irq(bp);
4970                 bnx2_free_skbs(bp);
4971                 bnx2_free_mem(bp);
4972                 return rc;
4973         }
4974
4975         mod_timer(&bp->timer, jiffies + bp->current_interval);
4976
4977         atomic_set(&bp->intr_sem, 0);
4978
4979         bnx2_enable_int(bp);
4980
4981         if (bp->flags & USING_MSI_FLAG) {
4982                 /* Test MSI to make sure it is working
4983                  * If MSI test fails, go back to INTx mode
4984                  */
4985                 if (bnx2_test_intr(bp) != 0) {
4986                         printk(KERN_WARNING PFX "%s: No interrupt was generated"
4987                                " using MSI, switching to INTx mode. Please"
4988                                " report this failure to the PCI maintainer"
4989                                " and include system chipset information.\n",
4990                                bp->dev->name);
4991
4992                         bnx2_disable_int(bp);
4993                         bnx2_free_irq(bp);
4994
4995                         rc = bnx2_init_nic(bp);
4996
4997                         if (!rc)
4998                                 rc = bnx2_request_irq(bp);
4999
5000                         if (rc) {
5001                                 napi_disable(&bp->napi);
5002                                 bnx2_free_skbs(bp);
5003                                 bnx2_free_mem(bp);
5004                                 del_timer_sync(&bp->timer);
5005                                 return rc;
5006                         }
5007                         bnx2_enable_int(bp);
5008                 }
5009         }
5010         if (bp->flags & USING_MSI_FLAG) {
5011                 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5012         }
5013
5014         netif_start_queue(dev);
5015
5016         return 0;
5017 }
5018
5019 static void
5020 bnx2_reset_task(struct work_struct *work)
5021 {
5022         struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
5023
5024         if (!netif_running(bp->dev))
5025                 return;
5026
5027         bp->in_reset_task = 1;
5028         bnx2_netif_stop(bp);
5029
5030         bnx2_init_nic(bp);
5031
5032         atomic_set(&bp->intr_sem, 1);
5033         bnx2_netif_start(bp);
5034         bp->in_reset_task = 0;
5035 }
5036
5037 static void
5038 bnx2_tx_timeout(struct net_device *dev)
5039 {
5040         struct bnx2 *bp = netdev_priv(dev);
5041
5042         /* This allows the netif to be shutdown gracefully before resetting */
5043         schedule_work(&bp->reset_task);
5044 }
5045
5046 #ifdef BCM_VLAN
5047 /* Called with rtnl_lock */
5048 static void
5049 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5050 {
5051         struct bnx2 *bp = netdev_priv(dev);
5052
5053         bnx2_netif_stop(bp);
5054
5055         bp->vlgrp = vlgrp;
5056         bnx2_set_rx_mode(dev);
5057
5058         bnx2_netif_start(bp);
5059 }
5060 #endif
5061
5062 /* Called with netif_tx_lock.
5063  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
5064  * netif_wake_queue().
5065  */
5066 static int
5067 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5068 {
5069         struct bnx2 *bp = netdev_priv(dev);
5070         dma_addr_t mapping;
5071         struct tx_bd *txbd;
5072         struct sw_bd *tx_buf;
5073         u32 len, vlan_tag_flags, last_frag, mss;
5074         u16 prod, ring_prod;
5075         int i;
5076
5077         if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
5078                 netif_stop_queue(dev);
5079                 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5080                         dev->name);
5081
5082                 return NETDEV_TX_BUSY;
5083         }
5084         len = skb_headlen(skb);
5085         prod = bp->tx_prod;
5086         ring_prod = TX_RING_IDX(prod);
5087
5088         vlan_tag_flags = 0;
5089         if (skb->ip_summed == CHECKSUM_PARTIAL) {
5090                 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5091         }
5092
5093         if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
5094                 vlan_tag_flags |=
5095                         (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
5096         }
5097         if ((mss = skb_shinfo(skb)->gso_size)) {
5098                 u32 tcp_opt_len, ip_tcp_len;
5099                 struct iphdr *iph;
5100
5101                 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
5102
5103                 tcp_opt_len = tcp_optlen(skb);
5104
5105                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5106                         u32 tcp_off = skb_transport_offset(skb) -
5107                                       sizeof(struct ipv6hdr) - ETH_HLEN;
5108
5109                         vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
5110                                           TX_BD_FLAGS_SW_FLAGS;
5111                         if (likely(tcp_off == 0))
5112                                 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
5113                         else {
5114                                 tcp_off >>= 3;
5115                                 vlan_tag_flags |= ((tcp_off & 0x3) <<
5116                                                    TX_BD_FLAGS_TCP6_OFF0_SHL) |
5117                                                   ((tcp_off & 0x10) <<
5118                                                    TX_BD_FLAGS_TCP6_OFF4_SHL);
5119                                 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
5120                         }
5121                 } else {
5122                         if (skb_header_cloned(skb) &&
5123                             pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5124                                 dev_kfree_skb(skb);
5125                                 return NETDEV_TX_OK;
5126                         }
5127
5128                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5129
5130                         iph = ip_hdr(skb);
5131                         iph->check = 0;
5132                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5133                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5134                                                                  iph->daddr, 0,
5135                                                                  IPPROTO_TCP,
5136                                                                  0);
5137                         if (tcp_opt_len || (iph->ihl > 5)) {
5138                                 vlan_tag_flags |= ((iph->ihl - 5) +
5139                                                    (tcp_opt_len >> 2)) << 8;
5140                         }
5141                 }
5142         } else
5143                 mss = 0;
5144
5145         mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5146
5147         tx_buf = &bp->tx_buf_ring[ring_prod];
5148         tx_buf->skb = skb;
5149         pci_unmap_addr_set(tx_buf, mapping, mapping);
5150
5151         txbd = &bp->tx_desc_ring[ring_prod];
5152
5153         txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5154         txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5155         txbd->tx_bd_mss_nbytes = len | (mss << 16);
5156         txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
5157
5158         last_frag = skb_shinfo(skb)->nr_frags;
5159
5160         for (i = 0; i < last_frag; i++) {
5161                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5162
5163                 prod = NEXT_TX_BD(prod);
5164                 ring_prod = TX_RING_IDX(prod);
5165                 txbd = &bp->tx_desc_ring[ring_prod];
5166
5167                 len = frag->size;
5168                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
5169                         len, PCI_DMA_TODEVICE);
5170                 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
5171                                 mapping, mapping);
5172
5173                 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5174                 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5175                 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5176                 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
5177
5178         }
5179         txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
5180
5181         prod = NEXT_TX_BD(prod);
5182         bp->tx_prod_bseq += skb->len;
5183
5184         REG_WR16(bp, bp->tx_bidx_addr, prod);
5185         REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
5186
5187         mmiowb();
5188
5189         bp->tx_prod = prod;
5190         dev->trans_start = jiffies;
5191
5192         if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
5193                 netif_stop_queue(dev);
5194                 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
5195                         netif_wake_queue(dev);
5196         }
5197
5198         return NETDEV_TX_OK;
5199 }
5200
5201 /* Called with rtnl_lock */
5202 static int
5203 bnx2_close(struct net_device *dev)
5204 {
5205         struct bnx2 *bp = netdev_priv(dev);
5206         u32 reset_code;
5207
5208         /* Calling flush_scheduled_work() may deadlock because
5209          * linkwatch_event() may be on the workqueue and it will try to get
5210          * the rtnl_lock which we are holding.
5211          */
5212         while (bp->in_reset_task)
5213                 msleep(1);
5214
5215         bnx2_disable_int_sync(bp);
5216         napi_disable(&bp->napi);
5217         del_timer_sync(&bp->timer);
5218         if (bp->flags & NO_WOL_FLAG)
5219                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5220         else if (bp->wol)
5221                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5222         else
5223                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5224         bnx2_reset_chip(bp, reset_code);
5225         bnx2_free_irq(bp);
5226         bnx2_free_skbs(bp);
5227         bnx2_free_mem(bp);
5228         bp->link_up = 0;
5229         netif_carrier_off(bp->dev);
5230         bnx2_set_power_state(bp, PCI_D3hot);
5231         return 0;
5232 }
5233
5234 #define GET_NET_STATS64(ctr)                                    \
5235         (unsigned long) ((unsigned long) (ctr##_hi) << 32) +    \
5236         (unsigned long) (ctr##_lo)
5237
5238 #define GET_NET_STATS32(ctr)            \
5239         (ctr##_lo)
5240
5241 #if (BITS_PER_LONG == 64)
5242 #define GET_NET_STATS   GET_NET_STATS64
5243 #else
5244 #define GET_NET_STATS   GET_NET_STATS32
5245 #endif
5246
5247 static struct net_device_stats *
5248 bnx2_get_stats(struct net_device *dev)
5249 {
5250         struct bnx2 *bp = netdev_priv(dev);
5251         struct statistics_block *stats_blk = bp->stats_blk;
5252         struct net_device_stats *net_stats = &bp->net_stats;
5253
5254         if (bp->stats_blk == NULL) {
5255                 return net_stats;
5256         }
5257         net_stats->rx_packets =
5258                 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
5259                 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
5260                 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
5261
5262         net_stats->tx_packets =
5263                 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
5264                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
5265                 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
5266
5267         net_stats->rx_bytes =
5268                 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
5269
5270         net_stats->tx_bytes =
5271                 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
5272
5273         net_stats->multicast =
5274                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
5275
5276         net_stats->collisions =
5277                 (unsigned long) stats_blk->stat_EtherStatsCollisions;
5278
5279         net_stats->rx_length_errors =
5280                 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
5281                 stats_blk->stat_EtherStatsOverrsizePkts);
5282
5283         net_stats->rx_over_errors =
5284                 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
5285
5286         net_stats->rx_frame_errors =
5287                 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
5288
5289         net_stats->rx_crc_errors =
5290                 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
5291
5292         net_stats->rx_errors = net_stats->rx_length_errors +
5293                 net_stats->rx_over_errors + net_stats->rx_frame_errors +
5294                 net_stats->rx_crc_errors;
5295
5296         net_stats->tx_aborted_errors =
5297                 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
5298                 stats_blk->stat_Dot3StatsLateCollisions);
5299
5300         if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
5301             (CHIP_ID(bp) == CHIP_ID_5708_A0))
5302                 net_stats->tx_carrier_errors = 0;
5303         else {
5304                 net_stats->tx_carrier_errors =
5305                         (unsigned long)
5306                         stats_blk->stat_Dot3StatsCarrierSenseErrors;
5307         }
5308
5309         net_stats->tx_errors =
5310                 (unsigned long)
5311                 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
5312                 +
5313                 net_stats->tx_aborted_errors +
5314                 net_stats->tx_carrier_errors;
5315
5316         net_stats->rx_missed_errors =
5317                 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
5318                 stats_blk->stat_FwRxDrop);
5319
5320         return net_stats;
5321 }
5322
5323 /* All ethtool functions called with rtnl_lock */
5324
5325 static int
5326 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5327 {
5328         struct bnx2 *bp = netdev_priv(dev);
5329         int support_serdes = 0, support_copper = 0;
5330
5331         cmd->supported = SUPPORTED_Autoneg;
5332         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5333                 support_serdes = 1;
5334                 support_copper = 1;
5335         } else if (bp->phy_port == PORT_FIBRE)
5336                 support_serdes = 1;
5337         else
5338                 support_copper = 1;
5339
5340         if (support_serdes) {
5341                 cmd->supported |= SUPPORTED_1000baseT_Full |
5342                         SUPPORTED_FIBRE;
5343                 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
5344                         cmd->supported |= SUPPORTED_2500baseX_Full;
5345
5346         }
5347         if (support_copper) {
5348                 cmd->supported |= SUPPORTED_10baseT_Half |
5349                         SUPPORTED_10baseT_Full |
5350                         SUPPORTED_100baseT_Half |
5351                         SUPPORTED_100baseT_Full |
5352                         SUPPORTED_1000baseT_Full |
5353                         SUPPORTED_TP;
5354
5355         }
5356
5357         spin_lock_bh(&bp->phy_lock);
5358         cmd->port = bp->phy_port;
5359         cmd->advertising = bp->advertising;
5360
5361         if (bp->autoneg & AUTONEG_SPEED) {
5362                 cmd->autoneg = AUTONEG_ENABLE;
5363         }
5364         else {
5365                 cmd->autoneg = AUTONEG_DISABLE;
5366         }
5367
5368         if (netif_carrier_ok(dev)) {
5369                 cmd->speed = bp->line_speed;
5370                 cmd->duplex = bp->duplex;
5371         }
5372         else {
5373                 cmd->speed = -1;
5374                 cmd->duplex = -1;
5375         }
5376         spin_unlock_bh(&bp->phy_lock);
5377
5378         cmd->transceiver = XCVR_INTERNAL;
5379         cmd->phy_address = bp->phy_addr;
5380
5381         return 0;
5382 }
5383
5384 static int
5385 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5386 {
5387         struct bnx2 *bp = netdev_priv(dev);
5388         u8 autoneg = bp->autoneg;
5389         u8 req_duplex = bp->req_duplex;
5390         u16 req_line_speed = bp->req_line_speed;
5391         u32 advertising = bp->advertising;
5392         int err = -EINVAL;
5393
5394         spin_lock_bh(&bp->phy_lock);
5395
5396         if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
5397                 goto err_out_unlock;
5398
5399         if (cmd->port != bp->phy_port && !(bp->phy_flags & REMOTE_PHY_CAP_FLAG))
5400                 goto err_out_unlock;
5401
5402         if (cmd->autoneg == AUTONEG_ENABLE) {
5403                 autoneg |= AUTONEG_SPEED;
5404
5405                 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
5406
5407                 /* allow advertising 1 speed */
5408                 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
5409                         (cmd->advertising == ADVERTISED_10baseT_Full) ||
5410                         (cmd->advertising == ADVERTISED_100baseT_Half) ||
5411                         (cmd->advertising == ADVERTISED_100baseT_Full)) {
5412
5413                         if (cmd->port == PORT_FIBRE)
5414                                 goto err_out_unlock;
5415
5416                         advertising = cmd->advertising;
5417
5418                 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
5419                         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ||
5420                             (cmd->port == PORT_TP))
5421                                 goto err_out_unlock;
5422                 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
5423                         advertising = cmd->advertising;
5424                 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
5425                         goto err_out_unlock;
5426                 else {
5427                         if (cmd->port == PORT_FIBRE)
5428                                 advertising = ETHTOOL_ALL_FIBRE_SPEED;
5429                         else
5430                                 advertising = ETHTOOL_ALL_COPPER_SPEED;
5431                 }
5432                 advertising |= ADVERTISED_Autoneg;
5433         }
5434         else {
5435                 if (cmd->port == PORT_FIBRE) {
5436                         if ((cmd->speed != SPEED_1000 &&
5437                              cmd->speed != SPEED_2500) ||
5438                             (cmd->duplex != DUPLEX_FULL))
5439                                 goto err_out_unlock;
5440
5441                         if (cmd->speed == SPEED_2500 &&
5442                             !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
5443                                 goto err_out_unlock;
5444                 }
5445                 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
5446                         goto err_out_unlock;
5447
5448                 autoneg &= ~AUTONEG_SPEED;
5449                 req_line_speed = cmd->speed;
5450                 req_duplex = cmd->duplex;
5451                 advertising = 0;
5452         }
5453
5454         bp->autoneg = autoneg;
5455         bp->advertising = advertising;
5456         bp->req_line_speed = req_line_speed;
5457         bp->req_duplex = req_duplex;
5458
5459         err = bnx2_setup_phy(bp, cmd->port);
5460
5461 err_out_unlock:
5462         spin_unlock_bh(&bp->phy_lock);
5463
5464         return err;
5465 }
5466
5467 static void
5468 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
5469 {
5470         struct bnx2 *bp = netdev_priv(dev);
5471
5472         strcpy(info->driver, DRV_MODULE_NAME);
5473         strcpy(info->version, DRV_MODULE_VERSION);
5474         strcpy(info->bus_info, pci_name(bp->pdev));
5475         strcpy(info->fw_version, bp->fw_version);
5476 }
5477
5478 #define BNX2_REGDUMP_LEN                (32 * 1024)
5479
5480 static int
5481 bnx2_get_regs_len(struct net_device *dev)
5482 {
5483         return BNX2_REGDUMP_LEN;
5484 }
5485
5486 static void
5487 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
5488 {
5489         u32 *p = _p, i, offset;
5490         u8 *orig_p = _p;
5491         struct bnx2 *bp = netdev_priv(dev);
5492         u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
5493                                  0x0800, 0x0880, 0x0c00, 0x0c10,
5494                                  0x0c30, 0x0d08, 0x1000, 0x101c,
5495                                  0x1040, 0x1048, 0x1080, 0x10a4,
5496                                  0x1400, 0x1490, 0x1498, 0x14f0,
5497                                  0x1500, 0x155c, 0x1580, 0x15dc,
5498                                  0x1600, 0x1658, 0x1680, 0x16d8,
5499                                  0x1800, 0x1820, 0x1840, 0x1854,
5500                                  0x1880, 0x1894, 0x1900, 0x1984,
5501                                  0x1c00, 0x1c0c, 0x1c40, 0x1c54,
5502                                  0x1c80, 0x1c94, 0x1d00, 0x1d84,
5503                                  0x2000, 0x2030, 0x23c0, 0x2400,
5504                                  0x2800, 0x2820, 0x2830, 0x2850,
5505                                  0x2b40, 0x2c10, 0x2fc0, 0x3058,
5506                                  0x3c00, 0x3c94, 0x4000, 0x4010,
5507                                  0x4080, 0x4090, 0x43c0, 0x4458,
5508                                  0x4c00, 0x4c18, 0x4c40, 0x4c54,
5509                                  0x4fc0, 0x5010, 0x53c0, 0x5444,
5510                                  0x5c00, 0x5c18, 0x5c80, 0x5c90,
5511                                  0x5fc0, 0x6000, 0x6400, 0x6428,
5512                                  0x6800, 0x6848, 0x684c, 0x6860,
5513                                  0x6888, 0x6910, 0x8000 };
5514
5515         regs->version = 0;
5516
5517         memset(p, 0, BNX2_REGDUMP_LEN);
5518
5519         if (!netif_running(bp->dev))
5520                 return;
5521
5522         i = 0;
5523         offset = reg_boundaries[0];
5524         p += offset;
5525         while (offset < BNX2_REGDUMP_LEN) {
5526                 *p++ = REG_RD(bp, offset);
5527                 offset += 4;
5528                 if (offset == reg_boundaries[i + 1]) {
5529                         offset = reg_boundaries[i + 2];
5530                         p = (u32 *) (orig_p + offset);
5531                         i += 2;
5532                 }
5533         }
5534 }
5535
5536 static void
5537 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5538 {
5539         struct bnx2 *bp = netdev_priv(dev);
5540
5541         if (bp->flags & NO_WOL_FLAG) {
5542                 wol->supported = 0;
5543                 wol->wolopts = 0;
5544         }
5545         else {
5546                 wol->supported = WAKE_MAGIC;
5547                 if (bp->wol)
5548                         wol->wolopts = WAKE_MAGIC;
5549                 else
5550                         wol->wolopts = 0;
5551         }
5552         memset(&wol->sopass, 0, sizeof(wol->sopass));
5553 }
5554
5555 static int
5556 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5557 {
5558         struct bnx2 *bp = netdev_priv(dev);
5559
5560         if (wol->wolopts & ~WAKE_MAGIC)
5561                 return -EINVAL;
5562
5563         if (wol->wolopts & WAKE_MAGIC) {
5564                 if (bp->flags & NO_WOL_FLAG)
5565                         return -EINVAL;
5566
5567                 bp->wol = 1;
5568         }
5569         else {
5570                 bp->wol = 0;
5571         }
5572         return 0;
5573 }
5574
5575 static int
5576 bnx2_nway_reset(struct net_device *dev)
5577 {
5578         struct bnx2 *bp = netdev_priv(dev);
5579         u32 bmcr;
5580
5581         if (!(bp->autoneg & AUTONEG_SPEED)) {
5582                 return -EINVAL;
5583         }
5584
5585         spin_lock_bh(&bp->phy_lock);
5586
5587         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5588                 int rc;
5589
5590                 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
5591                 spin_unlock_bh(&bp->phy_lock);
5592                 return rc;
5593         }
5594
5595         /* Force a link down visible on the other side */
5596         if (bp->phy_flags & PHY_SERDES_FLAG) {
5597                 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
5598                 spin_unlock_bh(&bp->phy_lock);
5599
5600                 msleep(20);
5601
5602                 spin_lock_bh(&bp->phy_lock);
5603
5604                 bp->current_interval = SERDES_AN_TIMEOUT;
5605                 bp->serdes_an_pending = 1;
5606                 mod_timer(&bp->timer, jiffies + bp->current_interval);
5607         }
5608
5609         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5610         bmcr &= ~BMCR_LOOPBACK;
5611         bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
5612
5613         spin_unlock_bh(&bp->phy_lock);
5614
5615         return 0;
5616 }
5617
5618 static int
5619 bnx2_get_eeprom_len(struct net_device *dev)
5620 {
5621         struct bnx2 *bp = netdev_priv(dev);
5622
5623         if (bp->flash_info == NULL)
5624                 return 0;
5625
5626         return (int) bp->flash_size;
5627 }
5628
5629 static int
5630 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5631                 u8 *eebuf)
5632 {
5633         struct bnx2 *bp = netdev_priv(dev);
5634         int rc;
5635
5636         /* parameters already validated in ethtool_get_eeprom */
5637
5638         rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5639
5640         return rc;
5641 }
5642
5643 static int
5644 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5645                 u8 *eebuf)
5646 {
5647         struct bnx2 *bp = netdev_priv(dev);
5648         int rc;
5649
5650         /* parameters already validated in ethtool_set_eeprom */
5651
5652         rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5653
5654         return rc;
5655 }
5656
5657 static int
5658 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5659 {
5660         struct bnx2 *bp = netdev_priv(dev);
5661
5662         memset(coal, 0, sizeof(struct ethtool_coalesce));
5663
5664         coal->rx_coalesce_usecs = bp->rx_ticks;
5665         coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5666         coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5667         coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5668
5669         coal->tx_coalesce_usecs = bp->tx_ticks;
5670         coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
5671         coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5672         coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5673
5674         coal->stats_block_coalesce_usecs = bp->stats_ticks;
5675
5676         return 0;
5677 }
5678
5679 static int
5680 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5681 {
5682         struct bnx2 *bp = netdev_priv(dev);
5683
5684         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5685         if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5686
5687         bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
5688         if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5689
5690         bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5691         if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5692
5693         bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5694         if (bp->rx_quick_cons_trip_int > 0xff)
5695                 bp->rx_quick_cons_trip_int = 0xff;
5696
5697         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5698         if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5699
5700         bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5701         if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5702
5703         bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5704         if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5705
5706         bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5707         if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5708                 0xff;
5709
5710         bp->stats_ticks = coal->stats_block_coalesce_usecs;
5711         if (CHIP_NUM(bp) == CHIP_NUM_5708) {
5712                 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
5713                         bp->stats_ticks = USEC_PER_SEC;
5714         }
5715         if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
5716                 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
5717         bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
5718
5719         if (netif_running(bp->dev)) {
5720                 bnx2_netif_stop(bp);
5721                 bnx2_init_nic(bp);
5722                 bnx2_netif_start(bp);
5723         }
5724
5725         return 0;
5726 }
5727
5728 static void
5729 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5730 {
5731         struct bnx2 *bp = netdev_priv(dev);
5732
5733         ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
5734         ering->rx_mini_max_pending = 0;
5735         ering->rx_jumbo_max_pending = 0;
5736
5737         ering->rx_pending = bp->rx_ring_size;
5738         ering->rx_mini_pending = 0;
5739         ering->rx_jumbo_pending = 0;
5740
5741         ering->tx_max_pending = MAX_TX_DESC_CNT;
5742         ering->tx_pending = bp->tx_ring_size;
5743 }
5744
5745 static int
5746 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5747 {
5748         struct bnx2 *bp = netdev_priv(dev);
5749
5750         if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
5751                 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5752                 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5753
5754                 return -EINVAL;
5755         }
5756         if (netif_running(bp->dev)) {
5757                 bnx2_netif_stop(bp);
5758                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5759                 bnx2_free_skbs(bp);
5760                 bnx2_free_mem(bp);
5761         }
5762
5763         bnx2_set_rx_ring_size(bp, ering->rx_pending);
5764         bp->tx_ring_size = ering->tx_pending;
5765
5766         if (netif_running(bp->dev)) {
5767                 int rc;
5768
5769                 rc = bnx2_alloc_mem(bp);
5770                 if (rc)
5771                         return rc;
5772                 bnx2_init_nic(bp);
5773                 bnx2_netif_start(bp);
5774         }
5775
5776         return 0;
5777 }
5778
5779 static void
5780 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5781 {
5782         struct bnx2 *bp = netdev_priv(dev);
5783
5784         epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5785         epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5786         epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5787 }
5788
5789 static int
5790 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5791 {
5792         struct bnx2 *bp = netdev_priv(dev);
5793
5794         bp->req_flow_ctrl = 0;
5795         if (epause->rx_pause)
5796                 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5797         if (epause->tx_pause)
5798                 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5799
5800         if (epause->autoneg) {
5801                 bp->autoneg |= AUTONEG_FLOW_CTRL;
5802         }
5803         else {
5804                 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5805         }
5806
5807         spin_lock_bh(&bp->phy_lock);
5808
5809         bnx2_setup_phy(bp, bp->phy_port);
5810
5811         spin_unlock_bh(&bp->phy_lock);
5812
5813         return 0;
5814 }
5815
5816 static u32
5817 bnx2_get_rx_csum(struct net_device *dev)
5818 {
5819         struct bnx2 *bp = netdev_priv(dev);
5820
5821         return bp->rx_csum;
5822 }
5823
5824 static int
5825 bnx2_set_rx_csum(struct net_device *dev, u32 data)
5826 {
5827         struct bnx2 *bp = netdev_priv(dev);
5828
5829         bp->rx_csum = data;
5830         return 0;
5831 }
5832
5833 static int
5834 bnx2_set_tso(struct net_device *dev, u32 data)
5835 {
5836         struct bnx2 *bp = netdev_priv(dev);
5837
5838         if (data) {
5839                 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
5840                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5841                         dev->features |= NETIF_F_TSO6;
5842         } else
5843                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
5844                                    NETIF_F_TSO_ECN);
5845         return 0;
5846 }
5847
5848 #define BNX2_NUM_STATS 46
5849
5850 static struct {
5851         char string[ETH_GSTRING_LEN];
5852 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5853         { "rx_bytes" },
5854         { "rx_error_bytes" },
5855         { "tx_bytes" },
5856         { "tx_error_bytes" },
5857         { "rx_ucast_packets" },
5858         { "rx_mcast_packets" },
5859         { "rx_bcast_packets" },
5860         { "tx_ucast_packets" },
5861         { "tx_mcast_packets" },
5862         { "tx_bcast_packets" },
5863         { "tx_mac_errors" },
5864         { "tx_carrier_errors" },
5865         { "rx_crc_errors" },
5866         { "rx_align_errors" },
5867         { "tx_single_collisions" },
5868         { "tx_multi_collisions" },
5869         { "tx_deferred" },
5870         { "tx_excess_collisions" },
5871         { "tx_late_collisions" },
5872         { "tx_total_collisions" },
5873         { "rx_fragments" },
5874         { "rx_jabbers" },
5875         { "rx_undersize_packets" },
5876         { "rx_oversize_packets" },
5877         { "rx_64_byte_packets" },
5878         { "rx_65_to_127_byte_packets" },
5879         { "rx_128_to_255_byte_packets" },
5880         { "rx_256_to_511_byte_packets" },
5881         { "rx_512_to_1023_byte_packets" },
5882         { "rx_1024_to_1522_byte_packets" },
5883         { "rx_1523_to_9022_byte_packets" },
5884         { "tx_64_byte_packets" },
5885         { "tx_65_to_127_byte_packets" },
5886         { "tx_128_to_255_byte_packets" },
5887         { "tx_256_to_511_byte_packets" },
5888         { "tx_512_to_1023_byte_packets" },
5889         { "tx_1024_to_1522_byte_packets" },
5890         { "tx_1523_to_9022_byte_packets" },
5891         { "rx_xon_frames" },
5892         { "rx_xoff_frames" },
5893         { "tx_xon_frames" },
5894         { "tx_xoff_frames" },
5895         { "rx_mac_ctrl_frames" },
5896         { "rx_filtered_packets" },
5897         { "rx_discards" },
5898         { "rx_fw_discards" },
5899 };
5900
5901 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5902
5903 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
5904     STATS_OFFSET32(stat_IfHCInOctets_hi),
5905     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5906     STATS_OFFSET32(stat_IfHCOutOctets_hi),
5907     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5908     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5909     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5910     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5911     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5912     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5913     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5914     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
5915     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5916     STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5917     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5918     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5919     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5920     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5921     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5922     STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5923     STATS_OFFSET32(stat_EtherStatsCollisions),
5924     STATS_OFFSET32(stat_EtherStatsFragments),
5925     STATS_OFFSET32(stat_EtherStatsJabbers),
5926     STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5927     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5928     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5929     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5930     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5931     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5932     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5933     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5934     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5935     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5936     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5937     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5938     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5939     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5940     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
5941     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
5942     STATS_OFFSET32(stat_XonPauseFramesReceived),
5943     STATS_OFFSET32(stat_XoffPauseFramesReceived),
5944     STATS_OFFSET32(stat_OutXonSent),
5945     STATS_OFFSET32(stat_OutXoffSent),
5946     STATS_OFFSET32(stat_MacControlFramesReceived),
5947     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
5948     STATS_OFFSET32(stat_IfInMBUFDiscards),
5949     STATS_OFFSET32(stat_FwRxDrop),
5950 };
5951
5952 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5953  * skipped because of errata.
5954  */
5955 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
5956         8,0,8,8,8,8,8,8,8,8,
5957         4,0,4,4,4,4,4,4,4,4,
5958         4,4,4,4,4,4,4,4,4,4,
5959         4,4,4,4,4,4,4,4,4,4,
5960         4,4,4,4,4,4,
5961 };
5962
5963 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
5964         8,0,8,8,8,8,8,8,8,8,
5965         4,4,4,4,4,4,4,4,4,4,
5966         4,4,4,4,4,4,4,4,4,4,
5967         4,4,4,4,4,4,4,4,4,4,
5968         4,4,4,4,4,4,
5969 };
5970
5971 #define BNX2_NUM_TESTS 6
5972
5973 static struct {
5974         char string[ETH_GSTRING_LEN];
5975 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
5976         { "register_test (offline)" },
5977         { "memory_test (offline)" },
5978         { "loopback_test (offline)" },
5979         { "nvram_test (online)" },
5980         { "interrupt_test (online)" },
5981         { "link_test (online)" },
5982 };
5983
5984 static int
5985 bnx2_get_sset_count(struct net_device *dev, int sset)
5986 {
5987         switch (sset) {
5988         case ETH_SS_TEST:
5989                 return BNX2_NUM_TESTS;
5990         case ETH_SS_STATS:
5991                 return BNX2_NUM_STATS;
5992         default:
5993                 return -EOPNOTSUPP;
5994         }
5995 }
5996
5997 static void
5998 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
5999 {
6000         struct bnx2 *bp = netdev_priv(dev);
6001
6002         memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
6003         if (etest->flags & ETH_TEST_FL_OFFLINE) {
6004                 int i;
6005
6006                 bnx2_netif_stop(bp);
6007                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
6008                 bnx2_free_skbs(bp);
6009
6010                 if (bnx2_test_registers(bp) != 0) {
6011                         buf[0] = 1;
6012                         etest->flags |= ETH_TEST_FL_FAILED;
6013                 }
6014                 if (bnx2_test_memory(bp) != 0) {
6015                         buf[1] = 1;
6016                         etest->flags |= ETH_TEST_FL_FAILED;
6017                 }
6018                 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
6019                         etest->flags |= ETH_TEST_FL_FAILED;
6020
6021                 if (!netif_running(bp->dev)) {
6022                         bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6023                 }
6024                 else {
6025                         bnx2_init_nic(bp);
6026                         bnx2_netif_start(bp);
6027                 }
6028
6029                 /* wait for link up */
6030                 for (i = 0; i < 7; i++) {
6031                         if (bp->link_up)
6032                                 break;
6033                         msleep_interruptible(1000);
6034                 }
6035         }
6036
6037         if (bnx2_test_nvram(bp) != 0) {
6038                 buf[3] = 1;
6039                 etest->flags |= ETH_TEST_FL_FAILED;
6040         }
6041         if (bnx2_test_intr(bp) != 0) {
6042                 buf[4] = 1;
6043                 etest->flags |= ETH_TEST_FL_FAILED;
6044         }
6045
6046         if (bnx2_test_link(bp) != 0) {
6047                 buf[5] = 1;
6048                 etest->flags |= ETH_TEST_FL_FAILED;
6049
6050         }
6051 }
6052
6053 static void
6054 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
6055 {
6056         switch (stringset) {
6057         case ETH_SS_STATS:
6058                 memcpy(buf, bnx2_stats_str_arr,
6059                         sizeof(bnx2_stats_str_arr));
6060                 break;
6061         case ETH_SS_TEST:
6062                 memcpy(buf, bnx2_tests_str_arr,
6063                         sizeof(bnx2_tests_str_arr));
6064                 break;
6065         }
6066 }
6067
6068 static void
6069 bnx2_get_ethtool_stats(struct net_device *dev,
6070                 struct ethtool_stats *stats, u64 *buf)
6071 {
6072         struct bnx2 *bp = netdev_priv(dev);
6073         int i;
6074         u32 *hw_stats = (u32 *) bp->stats_blk;
6075         u8 *stats_len_arr = NULL;
6076
6077         if (hw_stats == NULL) {
6078                 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
6079                 return;
6080         }
6081
6082         if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
6083             (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
6084             (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
6085             (CHIP_ID(bp) == CHIP_ID_5708_A0))
6086                 stats_len_arr = bnx2_5706_stats_len_arr;
6087         else
6088                 stats_len_arr = bnx2_5708_stats_len_arr;
6089
6090         for (i = 0; i < BNX2_NUM_STATS; i++) {
6091                 if (stats_len_arr[i] == 0) {
6092                         /* skip this counter */
6093                         buf[i] = 0;
6094                         continue;
6095                 }
6096                 if (stats_len_arr[i] == 4) {
6097                         /* 4-byte counter */
6098                         buf[i] = (u64)
6099                                 *(hw_stats + bnx2_stats_offset_arr[i]);
6100                         continue;
6101                 }
6102                 /* 8-byte counter */
6103                 buf[i] = (((u64) *(hw_stats +
6104                                         bnx2_stats_offset_arr[i])) << 32) +
6105                                 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
6106         }
6107 }
6108
6109 static int
6110 bnx2_phys_id(struct net_device *dev, u32 data)
6111 {
6112         struct bnx2 *bp = netdev_priv(dev);
6113         int i;
6114         u32 save;
6115
6116         if (data == 0)
6117                 data = 2;
6118
6119         save = REG_RD(bp, BNX2_MISC_CFG);
6120         REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
6121
6122         for (i = 0; i < (data * 2); i++) {
6123                 if ((i % 2) == 0) {
6124                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
6125                 }
6126                 else {
6127                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
6128                                 BNX2_EMAC_LED_1000MB_OVERRIDE |
6129                                 BNX2_EMAC_LED_100MB_OVERRIDE |
6130                                 BNX2_EMAC_LED_10MB_OVERRIDE |
6131                                 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
6132                                 BNX2_EMAC_LED_TRAFFIC);
6133                 }
6134                 msleep_interruptible(500);
6135                 if (signal_pending(current))
6136                         break;
6137         }
6138         REG_WR(bp, BNX2_EMAC_LED, 0);
6139         REG_WR(bp, BNX2_MISC_CFG, save);
6140         return 0;
6141 }
6142
6143 static int
6144 bnx2_set_tx_csum(struct net_device *dev, u32 data)
6145 {
6146         struct bnx2 *bp = netdev_priv(dev);
6147
6148         if (CHIP_NUM(bp) == CHIP_NUM_5709)
6149                 return (ethtool_op_set_tx_ipv6_csum(dev, data));
6150         else
6151                 return (ethtool_op_set_tx_csum(dev, data));
6152 }
6153
6154 static const struct ethtool_ops bnx2_ethtool_ops = {
6155         .get_settings           = bnx2_get_settings,
6156         .set_settings           = bnx2_set_settings,
6157         .get_drvinfo            = bnx2_get_drvinfo,
6158         .get_regs_len           = bnx2_get_regs_len,
6159         .get_regs               = bnx2_get_regs,
6160         .get_wol                = bnx2_get_wol,
6161         .set_wol                = bnx2_set_wol,
6162         .nway_reset             = bnx2_nway_reset,
6163         .get_link               = ethtool_op_get_link,
6164         .get_eeprom_len         = bnx2_get_eeprom_len,
6165         .get_eeprom             = bnx2_get_eeprom,
6166         .set_eeprom             = bnx2_set_eeprom,
6167         .get_coalesce           = bnx2_get_coalesce,
6168         .set_coalesce           = bnx2_set_coalesce,
6169         .get_ringparam          = bnx2_get_ringparam,
6170         .set_ringparam          = bnx2_set_ringparam,
6171         .get_pauseparam         = bnx2_get_pauseparam,
6172         .set_pauseparam         = bnx2_set_pauseparam,
6173         .get_rx_csum            = bnx2_get_rx_csum,
6174         .set_rx_csum            = bnx2_set_rx_csum,
6175         .set_tx_csum            = bnx2_set_tx_csum,
6176         .set_sg                 = ethtool_op_set_sg,
6177         .set_tso                = bnx2_set_tso,
6178         .self_test              = bnx2_self_test,
6179         .get_strings            = bnx2_get_strings,
6180         .phys_id                = bnx2_phys_id,
6181         .get_ethtool_stats      = bnx2_get_ethtool_stats,
6182         .get_sset_count         = bnx2_get_sset_count,
6183 };
6184
6185 /* Called with rtnl_lock */
6186 static int
6187 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6188 {
6189         struct mii_ioctl_data *data = if_mii(ifr);
6190         struct bnx2 *bp = netdev_priv(dev);
6191         int err;
6192
6193         switch(cmd) {
6194         case SIOCGMIIPHY:
6195                 data->phy_id = bp->phy_addr;
6196
6197                 /* fallthru */
6198         case SIOCGMIIREG: {
6199                 u32 mii_regval;
6200
6201                 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6202                         return -EOPNOTSUPP;
6203
6204                 if (!netif_running(dev))
6205                         return -EAGAIN;
6206
6207                 spin_lock_bh(&bp->phy_lock);
6208                 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
6209                 spin_unlock_bh(&bp->phy_lock);
6210
6211                 data->val_out = mii_regval;
6212
6213                 return err;
6214         }
6215
6216         case SIOCSMIIREG:
6217                 if (!capable(CAP_NET_ADMIN))
6218                         return -EPERM;
6219
6220                 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6221                         return -EOPNOTSUPP;
6222
6223                 if (!netif_running(dev))
6224                         return -EAGAIN;
6225
6226                 spin_lock_bh(&bp->phy_lock);
6227                 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
6228                 spin_unlock_bh(&bp->phy_lock);
6229
6230                 return err;
6231
6232         default:
6233                 /* do nothing */
6234                 break;
6235         }
6236         return -EOPNOTSUPP;
6237 }
6238
6239 /* Called with rtnl_lock */
6240 static int
6241 bnx2_change_mac_addr(struct net_device *dev, void *p)
6242 {
6243         struct sockaddr *addr = p;
6244         struct bnx2 *bp = netdev_priv(dev);
6245
6246         if (!is_valid_ether_addr(addr->sa_data))
6247                 return -EINVAL;
6248
6249         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6250         if (netif_running(dev))
6251                 bnx2_set_mac_addr(bp);
6252
6253         return 0;
6254 }
6255
6256 /* Called with rtnl_lock */
6257 static int
6258 bnx2_change_mtu(struct net_device *dev, int new_mtu)
6259 {
6260         struct bnx2 *bp = netdev_priv(dev);
6261
6262         if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
6263                 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
6264                 return -EINVAL;
6265
6266         dev->mtu = new_mtu;
6267         if (netif_running(dev)) {
6268                 bnx2_netif_stop(bp);
6269
6270                 bnx2_init_nic(bp);
6271
6272                 bnx2_netif_start(bp);
6273         }
6274         return 0;
6275 }
6276
6277 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6278 static void
6279 poll_bnx2(struct net_device *dev)
6280 {
6281         struct bnx2 *bp = netdev_priv(dev);
6282
6283         disable_irq(bp->pdev->irq);
6284         bnx2_interrupt(bp->pdev->irq, dev);
6285         enable_irq(bp->pdev->irq);
6286 }
6287 #endif
6288
6289 static void __devinit
6290 bnx2_get_5709_media(struct bnx2 *bp)
6291 {
6292         u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
6293         u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
6294         u32 strap;
6295
6296         if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
6297                 return;
6298         else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
6299                 bp->phy_flags |= PHY_SERDES_FLAG;
6300                 return;
6301         }
6302
6303         if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
6304                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
6305         else
6306                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
6307
6308         if (PCI_FUNC(bp->pdev->devfn) == 0) {
6309                 switch (strap) {
6310                 case 0x4:
6311                 case 0x5:
6312                 case 0x6:
6313                         bp->phy_flags |= PHY_SERDES_FLAG;
6314                         return;
6315                 }
6316         } else {
6317                 switch (strap) {
6318                 case 0x1:
6319                 case 0x2:
6320                 case 0x4:
6321                         bp->phy_flags |= PHY_SERDES_FLAG;
6322                         return;
6323                 }
6324         }
6325 }
6326
6327 static void __devinit
6328 bnx2_get_pci_speed(struct bnx2 *bp)
6329 {
6330         u32 reg;
6331
6332         reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
6333         if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
6334                 u32 clkreg;
6335
6336                 bp->flags |= PCIX_FLAG;
6337
6338                 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
6339
6340                 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
6341                 switch (clkreg) {
6342                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
6343                         bp->bus_speed_mhz = 133;
6344                         break;
6345
6346                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
6347                         bp->bus_speed_mhz = 100;
6348                         break;
6349
6350                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
6351                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
6352                         bp->bus_speed_mhz = 66;
6353                         break;
6354
6355                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
6356                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
6357                         bp->bus_speed_mhz = 50;
6358                         break;
6359
6360                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
6361                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
6362                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
6363                         bp->bus_speed_mhz = 33;
6364                         break;
6365                 }
6366         }
6367         else {
6368                 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
6369                         bp->bus_speed_mhz = 66;
6370                 else
6371                         bp->bus_speed_mhz = 33;
6372         }
6373
6374         if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
6375                 bp->flags |= PCI_32BIT_FLAG;
6376
6377 }
6378
6379 static int __devinit
6380 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
6381 {
6382         struct bnx2 *bp;
6383         unsigned long mem_len;
6384         int rc, i, j;
6385         u32 reg;
6386         u64 dma_mask, persist_dma_mask;
6387
6388         SET_NETDEV_DEV(dev, &pdev->dev);
6389         bp = netdev_priv(dev);
6390
6391         bp->flags = 0;
6392         bp->phy_flags = 0;
6393
6394         /* enable device (incl. PCI PM wakeup), and bus-mastering */
6395         rc = pci_enable_device(pdev);
6396         if (rc) {
6397                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.");
6398                 goto err_out;
6399         }
6400
6401         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
6402                 dev_err(&pdev->dev,
6403                         "Cannot find PCI device base address, aborting.\n");
6404                 rc = -ENODEV;
6405                 goto err_out_disable;
6406         }
6407
6408         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
6409         if (rc) {
6410                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
6411                 goto err_out_disable;
6412         }
6413
6414         pci_set_master(pdev);
6415
6416         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
6417         if (bp->pm_cap == 0) {
6418                 dev_err(&pdev->dev,
6419                         "Cannot find power management capability, aborting.\n");
6420                 rc = -EIO;
6421                 goto err_out_release;
6422         }
6423
6424         bp->dev = dev;
6425         bp->pdev = pdev;
6426
6427         spin_lock_init(&bp->phy_lock);
6428         spin_lock_init(&bp->indirect_lock);
6429         INIT_WORK(&bp->reset_task, bnx2_reset_task);
6430
6431         dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
6432         mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
6433         dev->mem_end = dev->mem_start + mem_len;
6434         dev->irq = pdev->irq;
6435
6436         bp->regview = ioremap_nocache(dev->base_addr, mem_len);
6437
6438         if (!bp->regview) {
6439                 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
6440                 rc = -ENOMEM;
6441                 goto err_out_release;
6442         }
6443
6444         /* Configure byte swap and enable write to the reg_window registers.
6445          * Rely on CPU to do target byte swapping on big endian systems
6446          * The chip's target access swapping will not swap all accesses
6447          */
6448         pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
6449                                BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
6450                                BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
6451
6452         bnx2_set_power_state(bp, PCI_D0);
6453
6454         bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
6455
6456         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6457                 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
6458                         dev_err(&pdev->dev,
6459                                 "Cannot find PCIE capability, aborting.\n");
6460                         rc = -EIO;
6461                         goto err_out_unmap;
6462                 }
6463                 bp->flags |= PCIE_FLAG;
6464         } else {
6465                 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
6466                 if (bp->pcix_cap == 0) {
6467                         dev_err(&pdev->dev,
6468                                 "Cannot find PCIX capability, aborting.\n");
6469                         rc = -EIO;
6470                         goto err_out_unmap;
6471                 }
6472         }
6473
6474         if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
6475                 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
6476                         bp->flags |= MSI_CAP_FLAG;
6477         }
6478
6479         /* 5708 cannot support DMA addresses > 40-bit.  */
6480         if (CHIP_NUM(bp) == CHIP_NUM_5708)
6481                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
6482         else
6483                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
6484
6485         /* Configure DMA attributes. */
6486         if (pci_set_dma_mask(pdev, dma_mask) == 0) {
6487                 dev->features |= NETIF_F_HIGHDMA;
6488                 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
6489                 if (rc) {
6490                         dev_err(&pdev->dev,
6491                                 "pci_set_consistent_dma_mask failed, aborting.\n");
6492                         goto err_out_unmap;
6493                 }
6494         } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
6495                 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
6496                 goto err_out_unmap;
6497         }
6498
6499         if (!(bp->flags & PCIE_FLAG))
6500                 bnx2_get_pci_speed(bp);
6501
6502         /* 5706A0 may falsely detect SERR and PERR. */
6503         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6504                 reg = REG_RD(bp, PCI_COMMAND);
6505                 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
6506                 REG_WR(bp, PCI_COMMAND, reg);
6507         }
6508         else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
6509                 !(bp->flags & PCIX_FLAG)) {
6510
6511                 dev_err(&pdev->dev,
6512                         "5706 A1 can only be used in a PCIX bus, aborting.\n");
6513                 goto err_out_unmap;
6514         }
6515
6516         bnx2_init_nvram(bp);
6517
6518         reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
6519
6520         if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
6521             BNX2_SHM_HDR_SIGNATURE_SIG) {
6522                 u32 off = PCI_FUNC(pdev->devfn) << 2;
6523
6524                 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0 + off);
6525         } else
6526                 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
6527
6528         /* Get the permanent MAC address.  First we need to make sure the
6529          * firmware is actually running.
6530          */
6531         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
6532
6533         if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
6534             BNX2_DEV_INFO_SIGNATURE_MAGIC) {
6535                 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
6536                 rc = -ENODEV;
6537                 goto err_out_unmap;
6538         }
6539
6540         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
6541         for (i = 0, j = 0; i < 3; i++) {
6542                 u8 num, k, skip0;
6543
6544                 num = (u8) (reg >> (24 - (i * 8)));
6545                 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
6546                         if (num >= k || !skip0 || k == 1) {
6547                                 bp->fw_version[j++] = (num / k) + '0';
6548                                 skip0 = 0;
6549                         }
6550                 }
6551                 if (i != 2)
6552                         bp->fw_version[j++] = '.';
6553         }
6554         if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
6555             BNX2_PORT_FEATURE_ASF_ENABLED) {
6556                 bp->flags |= ASF_ENABLE_FLAG;
6557
6558                 for (i = 0; i < 30; i++) {
6559                         reg = REG_RD_IND(bp, bp->shmem_base +
6560                                              BNX2_BC_STATE_CONDITION);
6561                         if (reg & BNX2_CONDITION_MFW_RUN_MASK)
6562                                 break;
6563                         msleep(10);
6564                 }
6565         }
6566         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_BC_STATE_CONDITION);
6567         reg &= BNX2_CONDITION_MFW_RUN_MASK;
6568         if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
6569             reg != BNX2_CONDITION_MFW_RUN_NONE) {
6570                 int i;
6571                 u32 addr = REG_RD_IND(bp, bp->shmem_base + BNX2_MFW_VER_PTR);
6572
6573                 bp->fw_version[j++] = ' ';
6574                 for (i = 0; i < 3; i++) {
6575                         reg = REG_RD_IND(bp, addr + i * 4);
6576                         reg = swab32(reg);
6577                         memcpy(&bp->fw_version[j], &reg, 4);
6578                         j += 4;
6579                 }
6580         }
6581
6582         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
6583         bp->mac_addr[0] = (u8) (reg >> 8);
6584         bp->mac_addr[1] = (u8) reg;
6585
6586         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
6587         bp->mac_addr[2] = (u8) (reg >> 24);
6588         bp->mac_addr[3] = (u8) (reg >> 16);
6589         bp->mac_addr[4] = (u8) (reg >> 8);
6590         bp->mac_addr[5] = (u8) reg;
6591
6592         bp->tx_ring_size = MAX_TX_DESC_CNT;
6593         bnx2_set_rx_ring_size(bp, 255);
6594
6595         bp->rx_csum = 1;
6596
6597         bp->rx_offset = sizeof(struct l2_fhdr) + 2;
6598
6599         bp->tx_quick_cons_trip_int = 20;
6600         bp->tx_quick_cons_trip = 20;
6601         bp->tx_ticks_int = 80;
6602         bp->tx_ticks = 80;
6603
6604         bp->rx_quick_cons_trip_int = 6;
6605         bp->rx_quick_cons_trip = 6;
6606         bp->rx_ticks_int = 18;
6607         bp->rx_ticks = 18;
6608
6609         bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6610
6611         bp->timer_interval =  HZ;
6612         bp->current_interval =  HZ;
6613
6614         bp->phy_addr = 1;
6615
6616         /* Disable WOL support if we are running on a SERDES chip. */
6617         if (CHIP_NUM(bp) == CHIP_NUM_5709)
6618                 bnx2_get_5709_media(bp);
6619         else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
6620                 bp->phy_flags |= PHY_SERDES_FLAG;
6621
6622         bp->phy_port = PORT_TP;
6623         if (bp->phy_flags & PHY_SERDES_FLAG) {
6624                 bp->phy_port = PORT_FIBRE;
6625                 bp->flags |= NO_WOL_FLAG;
6626                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
6627                         bp->phy_addr = 2;
6628                         reg = REG_RD_IND(bp, bp->shmem_base +
6629                                          BNX2_SHARED_HW_CFG_CONFIG);
6630                         if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
6631                                 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
6632                 }
6633                 bnx2_init_remote_phy(bp);
6634
6635         } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
6636                    CHIP_NUM(bp) == CHIP_NUM_5708)
6637                 bp->phy_flags |= PHY_CRC_FIX_FLAG;
6638         else if (CHIP_ID(bp) == CHIP_ID_5709_A0 ||
6639                  CHIP_ID(bp) == CHIP_ID_5709_A1)
6640                 bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG;
6641
6642         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
6643             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
6644             (CHIP_ID(bp) == CHIP_ID_5708_B1))
6645                 bp->flags |= NO_WOL_FLAG;
6646
6647         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6648                 bp->tx_quick_cons_trip_int =
6649                         bp->tx_quick_cons_trip;
6650                 bp->tx_ticks_int = bp->tx_ticks;
6651                 bp->rx_quick_cons_trip_int =
6652                         bp->rx_quick_cons_trip;
6653                 bp->rx_ticks_int = bp->rx_ticks;
6654                 bp->comp_prod_trip_int = bp->comp_prod_trip;
6655                 bp->com_ticks_int = bp->com_ticks;
6656                 bp->cmd_ticks_int = bp->cmd_ticks;
6657         }
6658
6659         /* Disable MSI on 5706 if AMD 8132 bridge is found.
6660          *
6661          * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
6662          * with byte enables disabled on the unused 32-bit word.  This is legal
6663          * but causes problems on the AMD 8132 which will eventually stop
6664          * responding after a while.
6665          *
6666          * AMD believes this incompatibility is unique to the 5706, and
6667          * prefers to locally disable MSI rather than globally disabling it.
6668          */
6669         if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
6670                 struct pci_dev *amd_8132 = NULL;
6671
6672                 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
6673                                                   PCI_DEVICE_ID_AMD_8132_BRIDGE,
6674                                                   amd_8132))) {
6675
6676                         if (amd_8132->revision >= 0x10 &&
6677                             amd_8132->revision <= 0x13) {
6678                                 disable_msi = 1;
6679                                 pci_dev_put(amd_8132);
6680                                 break;
6681                         }
6682                 }
6683         }
6684
6685         bnx2_set_default_link(bp);
6686         bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
6687
6688         init_timer(&bp->timer);
6689         bp->timer.expires = RUN_AT(bp->timer_interval);
6690         bp->timer.data = (unsigned long) bp;
6691         bp->timer.function = bnx2_timer;
6692
6693         return 0;
6694
6695 err_out_unmap:
6696         if (bp->regview) {
6697                 iounmap(bp->regview);
6698                 bp->regview = NULL;
6699         }
6700
6701 err_out_release:
6702         pci_release_regions(pdev);
6703
6704 err_out_disable:
6705         pci_disable_device(pdev);
6706         pci_set_drvdata(pdev, NULL);
6707
6708 err_out:
6709         return rc;
6710 }
6711
6712 static char * __devinit
6713 bnx2_bus_string(struct bnx2 *bp, char *str)
6714 {
6715         char *s = str;
6716
6717         if (bp->flags & PCIE_FLAG) {
6718                 s += sprintf(s, "PCI Express");
6719         } else {
6720                 s += sprintf(s, "PCI");
6721                 if (bp->flags & PCIX_FLAG)
6722                         s += sprintf(s, "-X");
6723                 if (bp->flags & PCI_32BIT_FLAG)
6724                         s += sprintf(s, " 32-bit");
6725                 else
6726                         s += sprintf(s, " 64-bit");
6727                 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
6728         }
6729         return str;
6730 }
6731
6732 static int __devinit
6733 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6734 {
6735         static int version_printed = 0;
6736         struct net_device *dev = NULL;
6737         struct bnx2 *bp;
6738         int rc;
6739         char str[40];
6740         DECLARE_MAC_BUF(mac);
6741
6742         if (version_printed++ == 0)
6743                 printk(KERN_INFO "%s", version);
6744
6745         /* dev zeroed in init_etherdev */
6746         dev = alloc_etherdev(sizeof(*bp));
6747
6748         if (!dev)
6749                 return -ENOMEM;
6750
6751         rc = bnx2_init_board(pdev, dev);
6752         if (rc < 0) {
6753                 free_netdev(dev);
6754                 return rc;
6755         }
6756
6757         dev->open = bnx2_open;
6758         dev->hard_start_xmit = bnx2_start_xmit;
6759         dev->stop = bnx2_close;
6760         dev->get_stats = bnx2_get_stats;
6761         dev->set_multicast_list = bnx2_set_rx_mode;
6762         dev->do_ioctl = bnx2_ioctl;
6763         dev->set_mac_address = bnx2_change_mac_addr;
6764         dev->change_mtu = bnx2_change_mtu;
6765         dev->tx_timeout = bnx2_tx_timeout;
6766         dev->watchdog_timeo = TX_TIMEOUT;
6767 #ifdef BCM_VLAN
6768         dev->vlan_rx_register = bnx2_vlan_rx_register;
6769 #endif
6770         dev->ethtool_ops = &bnx2_ethtool_ops;
6771
6772         bp = netdev_priv(dev);
6773         netif_napi_add(dev, &bp->napi, bnx2_poll, 64);
6774
6775 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6776         dev->poll_controller = poll_bnx2;
6777 #endif
6778
6779         pci_set_drvdata(pdev, dev);
6780
6781         memcpy(dev->dev_addr, bp->mac_addr, 6);
6782         memcpy(dev->perm_addr, bp->mac_addr, 6);
6783         bp->name = board_info[ent->driver_data].name;
6784
6785         dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
6786         if (CHIP_NUM(bp) == CHIP_NUM_5709)
6787                 dev->features |= NETIF_F_IPV6_CSUM;
6788
6789 #ifdef BCM_VLAN
6790         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6791 #endif
6792         dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6793         if (CHIP_NUM(bp) == CHIP_NUM_5709)
6794                 dev->features |= NETIF_F_TSO6;
6795
6796         if ((rc = register_netdev(dev))) {
6797                 dev_err(&pdev->dev, "Cannot register net device\n");
6798                 if (bp->regview)
6799                         iounmap(bp->regview);
6800                 pci_release_regions(pdev);
6801                 pci_disable_device(pdev);
6802                 pci_set_drvdata(pdev, NULL);
6803                 free_netdev(dev);
6804                 return rc;
6805         }
6806
6807         printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
6808                 "IRQ %d, node addr %s\n",
6809                 dev->name,
6810                 bp->name,
6811                 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
6812                 ((CHIP_ID(bp) & 0x0ff0) >> 4),
6813                 bnx2_bus_string(bp, str),
6814                 dev->base_addr,
6815                 bp->pdev->irq, print_mac(mac, dev->dev_addr));
6816
6817         return 0;
6818 }
6819
6820 static void __devexit
6821 bnx2_remove_one(struct pci_dev *pdev)
6822 {
6823         struct net_device *dev = pci_get_drvdata(pdev);
6824         struct bnx2 *bp = netdev_priv(dev);
6825
6826         flush_scheduled_work();
6827
6828         unregister_netdev(dev);
6829
6830         if (bp->regview)
6831                 iounmap(bp->regview);
6832
6833         free_netdev(dev);
6834         pci_release_regions(pdev);
6835         pci_disable_device(pdev);
6836         pci_set_drvdata(pdev, NULL);
6837 }
6838
6839 static int
6840 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
6841 {
6842         struct net_device *dev = pci_get_drvdata(pdev);
6843         struct bnx2 *bp = netdev_priv(dev);
6844         u32 reset_code;
6845
6846         /* PCI register 4 needs to be saved whether netif_running() or not.
6847          * MSI address and data need to be saved if using MSI and
6848          * netif_running().
6849          */
6850         pci_save_state(pdev);
6851         if (!netif_running(dev))
6852                 return 0;
6853
6854         flush_scheduled_work();
6855         bnx2_netif_stop(bp);
6856         netif_device_detach(dev);
6857         del_timer_sync(&bp->timer);
6858         if (bp->flags & NO_WOL_FLAG)
6859                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
6860         else if (bp->wol)
6861                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6862         else
6863                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6864         bnx2_reset_chip(bp, reset_code);
6865         bnx2_free_skbs(bp);
6866         bnx2_set_power_state(bp, pci_choose_state(pdev, state));
6867         return 0;
6868 }
6869
6870 static int
6871 bnx2_resume(struct pci_dev *pdev)
6872 {
6873         struct net_device *dev = pci_get_drvdata(pdev);
6874         struct bnx2 *bp = netdev_priv(dev);
6875
6876         pci_restore_state(pdev);
6877         if (!netif_running(dev))
6878                 return 0;
6879
6880         bnx2_set_power_state(bp, PCI_D0);
6881         netif_device_attach(dev);
6882         bnx2_init_nic(bp);
6883         bnx2_netif_start(bp);
6884         return 0;
6885 }
6886
6887 static struct pci_driver bnx2_pci_driver = {
6888         .name           = DRV_MODULE_NAME,
6889         .id_table       = bnx2_pci_tbl,
6890         .probe          = bnx2_init_one,
6891         .remove         = __devexit_p(bnx2_remove_one),
6892         .suspend        = bnx2_suspend,
6893         .resume         = bnx2_resume,
6894 };
6895
6896 static int __init bnx2_init(void)
6897 {
6898         return pci_register_driver(&bnx2_pci_driver);
6899 }
6900
6901 static void __exit bnx2_cleanup(void)
6902 {
6903         pci_unregister_driver(&bnx2_pci_driver);
6904 }
6905
6906 module_init(bnx2_init);
6907 module_exit(bnx2_cleanup);
6908
6909
6910