2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2009 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/phy.h>
36 #include <linux/brcmphy.h>
37 #include <linux/if_vlan.h>
39 #include <linux/tcp.h>
40 #include <linux/workqueue.h>
41 #include <linux/prefetch.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/firmware.h>
45 #include <net/checksum.h>
48 #include <asm/system.h>
50 #include <asm/byteorder.h>
51 #include <asm/uaccess.h>
54 #include <asm/idprom.h>
61 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
62 #define TG3_VLAN_TAG_USED 1
64 #define TG3_VLAN_TAG_USED 0
69 #define DRV_MODULE_NAME "tg3"
70 #define PFX DRV_MODULE_NAME ": "
71 #define DRV_MODULE_VERSION "3.100"
72 #define DRV_MODULE_RELDATE "August 25, 2009"
74 #define TG3_DEF_MAC_MODE 0
75 #define TG3_DEF_RX_MODE 0
76 #define TG3_DEF_TX_MODE 0
77 #define TG3_DEF_MSG_ENABLE \
87 /* length of time before we decide the hardware is borked,
88 * and dev->tx_timeout() should be called to fix the problem
90 #define TG3_TX_TIMEOUT (5 * HZ)
92 /* hardware minimum and maximum for a single frame's data payload */
93 #define TG3_MIN_MTU 60
94 #define TG3_MAX_MTU(tp) \
95 ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) ? 9000 : 1500)
97 /* These numbers seem to be hard coded in the NIC firmware somehow.
98 * You can't change the ring sizes, but you can change where you place
99 * them in the NIC onboard memory.
101 #define TG3_RX_RING_SIZE 512
102 #define TG3_DEF_RX_RING_PENDING 200
103 #define TG3_RX_JUMBO_RING_SIZE 256
104 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
106 /* Do not place this n-ring entries value into the tp struct itself,
107 * we really want to expose these constants to GCC so that modulo et
108 * al. operations are done with shifts and masks instead of with
109 * hw multiply/modulo instructions. Another solution would be to
110 * replace things like '% foo' with '& (foo - 1)'.
112 #define TG3_RX_RCB_RING_SIZE(tp) \
113 ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 512 : 1024)
115 #define TG3_TX_RING_SIZE 512
116 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
118 #define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
120 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_ext_rx_buffer_desc) * \
121 TG3_RX_JUMBO_RING_SIZE)
122 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
123 TG3_RX_RCB_RING_SIZE(tp))
124 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
126 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
128 #define TG3_DMA_BYTE_ENAB 64
130 #define TG3_RX_STD_DMA_SZ 1536
131 #define TG3_RX_JMB_DMA_SZ 9046
133 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
135 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
136 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
138 /* minimum number of free TX descriptors required to wake up TX process */
139 #define TG3_TX_WAKEUP_THRESH(tp) ((tp)->tx_pending / 4)
141 #define TG3_RAW_IP_ALIGN 2
143 /* number of ETHTOOL_GSTATS u64's */
144 #define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
146 #define TG3_NUM_TEST 6
148 #define FIRMWARE_TG3 "tigon/tg3.bin"
149 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
150 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
152 static char version[] __devinitdata =
153 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
155 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
156 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
157 MODULE_LICENSE("GPL");
158 MODULE_VERSION(DRV_MODULE_VERSION);
159 MODULE_FIRMWARE(FIRMWARE_TG3);
160 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
161 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
164 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
165 module_param(tg3_debug, int, 0);
166 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
168 static struct pci_device_id tg3_pci_tbl[] = {
169 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
170 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
171 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
172 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
173 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
174 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
175 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
176 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
177 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
178 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
179 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
180 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
181 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
182 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
183 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
184 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
185 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
186 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
187 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
188 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
189 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
190 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
191 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
192 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
193 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
194 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
195 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
196 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
197 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
198 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
199 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
200 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
201 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
202 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
203 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
204 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
205 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
206 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
207 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
208 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
209 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
210 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
211 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
212 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
213 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
214 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
215 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
216 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
217 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
218 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
219 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
220 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
221 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
222 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
223 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
224 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
225 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
226 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
227 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
228 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
235 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
236 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
237 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
238 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
239 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
240 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
241 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
245 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
247 static const struct {
248 const char string[ETH_GSTRING_LEN];
249 } ethtool_stats_keys[TG3_NUM_STATS] = {
252 { "rx_ucast_packets" },
253 { "rx_mcast_packets" },
254 { "rx_bcast_packets" },
256 { "rx_align_errors" },
257 { "rx_xon_pause_rcvd" },
258 { "rx_xoff_pause_rcvd" },
259 { "rx_mac_ctrl_rcvd" },
260 { "rx_xoff_entered" },
261 { "rx_frame_too_long_errors" },
263 { "rx_undersize_packets" },
264 { "rx_in_length_errors" },
265 { "rx_out_length_errors" },
266 { "rx_64_or_less_octet_packets" },
267 { "rx_65_to_127_octet_packets" },
268 { "rx_128_to_255_octet_packets" },
269 { "rx_256_to_511_octet_packets" },
270 { "rx_512_to_1023_octet_packets" },
271 { "rx_1024_to_1522_octet_packets" },
272 { "rx_1523_to_2047_octet_packets" },
273 { "rx_2048_to_4095_octet_packets" },
274 { "rx_4096_to_8191_octet_packets" },
275 { "rx_8192_to_9022_octet_packets" },
282 { "tx_flow_control" },
284 { "tx_single_collisions" },
285 { "tx_mult_collisions" },
287 { "tx_excessive_collisions" },
288 { "tx_late_collisions" },
289 { "tx_collide_2times" },
290 { "tx_collide_3times" },
291 { "tx_collide_4times" },
292 { "tx_collide_5times" },
293 { "tx_collide_6times" },
294 { "tx_collide_7times" },
295 { "tx_collide_8times" },
296 { "tx_collide_9times" },
297 { "tx_collide_10times" },
298 { "tx_collide_11times" },
299 { "tx_collide_12times" },
300 { "tx_collide_13times" },
301 { "tx_collide_14times" },
302 { "tx_collide_15times" },
303 { "tx_ucast_packets" },
304 { "tx_mcast_packets" },
305 { "tx_bcast_packets" },
306 { "tx_carrier_sense_errors" },
310 { "dma_writeq_full" },
311 { "dma_write_prioq_full" },
315 { "rx_threshold_hit" },
317 { "dma_readq_full" },
318 { "dma_read_prioq_full" },
319 { "tx_comp_queue_full" },
321 { "ring_set_send_prod_index" },
322 { "ring_status_update" },
324 { "nic_avoided_irqs" },
325 { "nic_tx_threshold_hit" }
328 static const struct {
329 const char string[ETH_GSTRING_LEN];
330 } ethtool_test_keys[TG3_NUM_TEST] = {
331 { "nvram test (online) " },
332 { "link test (online) " },
333 { "register test (offline)" },
334 { "memory test (offline)" },
335 { "loopback test (offline)" },
336 { "interrupt test (offline)" },
339 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
341 writel(val, tp->regs + off);
344 static u32 tg3_read32(struct tg3 *tp, u32 off)
346 return (readl(tp->regs + off));
349 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
351 writel(val, tp->aperegs + off);
354 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
356 return (readl(tp->aperegs + off));
359 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
363 spin_lock_irqsave(&tp->indirect_lock, flags);
364 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
365 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
366 spin_unlock_irqrestore(&tp->indirect_lock, flags);
369 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
371 writel(val, tp->regs + off);
372 readl(tp->regs + off);
375 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
380 spin_lock_irqsave(&tp->indirect_lock, flags);
381 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
382 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
383 spin_unlock_irqrestore(&tp->indirect_lock, flags);
387 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
391 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
392 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
393 TG3_64BIT_REG_LOW, val);
396 if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
397 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
398 TG3_64BIT_REG_LOW, val);
402 spin_lock_irqsave(&tp->indirect_lock, flags);
403 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
404 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
405 spin_unlock_irqrestore(&tp->indirect_lock, flags);
407 /* In indirect mode when disabling interrupts, we also need
408 * to clear the interrupt bit in the GRC local ctrl register.
410 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
412 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
413 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
417 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
422 spin_lock_irqsave(&tp->indirect_lock, flags);
423 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
424 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
425 spin_unlock_irqrestore(&tp->indirect_lock, flags);
429 /* usec_wait specifies the wait time in usec when writing to certain registers
430 * where it is unsafe to read back the register without some delay.
431 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
432 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
434 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
436 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
437 (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
438 /* Non-posted methods */
439 tp->write32(tp, off, val);
442 tg3_write32(tp, off, val);
447 /* Wait again after the read for the posted method to guarantee that
448 * the wait time is met.
454 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
456 tp->write32_mbox(tp, off, val);
457 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
458 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
459 tp->read32_mbox(tp, off);
462 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
464 void __iomem *mbox = tp->regs + off;
466 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
468 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
472 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
474 return (readl(tp->regs + off + GRCMBOX_BASE));
477 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
479 writel(val, tp->regs + off + GRCMBOX_BASE);
482 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
483 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
484 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
485 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
486 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
488 #define tw32(reg,val) tp->write32(tp, reg, val)
489 #define tw32_f(reg,val) _tw32_flush(tp,(reg),(val), 0)
490 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
491 #define tr32(reg) tp->read32(tp, reg)
493 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
497 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
498 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
501 spin_lock_irqsave(&tp->indirect_lock, flags);
502 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
503 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
504 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
506 /* Always leave this as zero. */
507 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
509 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
510 tw32_f(TG3PCI_MEM_WIN_DATA, val);
512 /* Always leave this as zero. */
513 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
515 spin_unlock_irqrestore(&tp->indirect_lock, flags);
518 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
522 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
523 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
528 spin_lock_irqsave(&tp->indirect_lock, flags);
529 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
530 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
531 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
533 /* Always leave this as zero. */
534 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
536 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
537 *val = tr32(TG3PCI_MEM_WIN_DATA);
539 /* Always leave this as zero. */
540 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
542 spin_unlock_irqrestore(&tp->indirect_lock, flags);
545 static void tg3_ape_lock_init(struct tg3 *tp)
549 /* Make sure the driver hasn't any stale locks. */
550 for (i = 0; i < 8; i++)
551 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
552 APE_LOCK_GRANT_DRIVER);
555 static int tg3_ape_lock(struct tg3 *tp, int locknum)
561 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
565 case TG3_APE_LOCK_GRC:
566 case TG3_APE_LOCK_MEM:
574 tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
576 /* Wait for up to 1 millisecond to acquire lock. */
577 for (i = 0; i < 100; i++) {
578 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
579 if (status == APE_LOCK_GRANT_DRIVER)
584 if (status != APE_LOCK_GRANT_DRIVER) {
585 /* Revoke the lock request. */
586 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
587 APE_LOCK_GRANT_DRIVER);
595 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
599 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
603 case TG3_APE_LOCK_GRC:
604 case TG3_APE_LOCK_MEM:
611 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
614 static void tg3_disable_ints(struct tg3 *tp)
616 tw32(TG3PCI_MISC_HOST_CTRL,
617 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
618 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
621 static inline void tg3_cond_int(struct tg3 *tp)
623 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
624 (tp->hw_status->status & SD_STATUS_UPDATED))
625 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
627 tw32(HOSTCC_MODE, tp->coalesce_mode |
628 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
631 static void tg3_enable_ints(struct tg3 *tp)
636 tw32(TG3PCI_MISC_HOST_CTRL,
637 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
638 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
639 (tp->last_tag << 24));
640 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
641 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
642 (tp->last_tag << 24));
646 static inline unsigned int tg3_has_work(struct tg3 *tp)
648 struct tg3_hw_status *sblk = tp->hw_status;
649 unsigned int work_exists = 0;
651 /* check for phy events */
652 if (!(tp->tg3_flags &
653 (TG3_FLAG_USE_LINKCHG_REG |
654 TG3_FLAG_POLL_SERDES))) {
655 if (sblk->status & SD_STATUS_LINK_CHG)
658 /* check for RX/TX work to do */
659 if (sblk->idx[0].tx_consumer != tp->tx_cons ||
660 sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
667 * similar to tg3_enable_ints, but it accurately determines whether there
668 * is new work pending and can return without flushing the PIO write
669 * which reenables interrupts
671 static void tg3_restart_ints(struct tg3 *tp)
673 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
677 /* When doing tagged status, this work check is unnecessary.
678 * The last_tag we write above tells the chip which piece of
679 * work we've completed.
681 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
683 tw32(HOSTCC_MODE, tp->coalesce_mode |
684 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
687 static inline void tg3_netif_stop(struct tg3 *tp)
689 tp->dev->trans_start = jiffies; /* prevent tx timeout */
690 napi_disable(&tp->napi);
691 netif_tx_disable(tp->dev);
694 static inline void tg3_netif_start(struct tg3 *tp)
696 netif_wake_queue(tp->dev);
697 /* NOTE: unconditional netif_wake_queue is only appropriate
698 * so long as all callers are assured to have free tx slots
699 * (such as after tg3_init_hw)
701 napi_enable(&tp->napi);
702 tp->hw_status->status |= SD_STATUS_UPDATED;
706 static void tg3_switch_clocks(struct tg3 *tp)
708 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
711 if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
712 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
715 orig_clock_ctrl = clock_ctrl;
716 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
717 CLOCK_CTRL_CLKRUN_OENABLE |
719 tp->pci_clock_ctrl = clock_ctrl;
721 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
722 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
723 tw32_wait_f(TG3PCI_CLOCK_CTRL,
724 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
726 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
727 tw32_wait_f(TG3PCI_CLOCK_CTRL,
729 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
731 tw32_wait_f(TG3PCI_CLOCK_CTRL,
732 clock_ctrl | (CLOCK_CTRL_ALTCLK),
735 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
738 #define PHY_BUSY_LOOPS 5000
740 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
746 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
748 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
754 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
755 MI_COM_PHY_ADDR_MASK);
756 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
757 MI_COM_REG_ADDR_MASK);
758 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
760 tw32_f(MAC_MI_COM, frame_val);
762 loops = PHY_BUSY_LOOPS;
765 frame_val = tr32(MAC_MI_COM);
767 if ((frame_val & MI_COM_BUSY) == 0) {
769 frame_val = tr32(MAC_MI_COM);
777 *val = frame_val & MI_COM_DATA_MASK;
781 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
782 tw32_f(MAC_MI_MODE, tp->mi_mode);
789 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
795 if ((tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) &&
796 (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
799 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
801 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
805 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
806 MI_COM_PHY_ADDR_MASK);
807 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
808 MI_COM_REG_ADDR_MASK);
809 frame_val |= (val & MI_COM_DATA_MASK);
810 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
812 tw32_f(MAC_MI_COM, frame_val);
814 loops = PHY_BUSY_LOOPS;
817 frame_val = tr32(MAC_MI_COM);
818 if ((frame_val & MI_COM_BUSY) == 0) {
820 frame_val = tr32(MAC_MI_COM);
830 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
831 tw32_f(MAC_MI_MODE, tp->mi_mode);
838 static int tg3_bmcr_reset(struct tg3 *tp)
843 /* OK, reset it, and poll the BMCR_RESET bit until it
844 * clears or we time out.
846 phy_control = BMCR_RESET;
847 err = tg3_writephy(tp, MII_BMCR, phy_control);
853 err = tg3_readphy(tp, MII_BMCR, &phy_control);
857 if ((phy_control & BMCR_RESET) == 0) {
869 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
871 struct tg3 *tp = bp->priv;
874 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
877 if (tg3_readphy(tp, reg, &val))
883 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
885 struct tg3 *tp = bp->priv;
887 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
890 if (tg3_writephy(tp, reg, val))
896 static int tg3_mdio_reset(struct mii_bus *bp)
901 static void tg3_mdio_config_5785(struct tg3 *tp)
904 struct phy_device *phydev;
906 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
907 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
908 case TG3_PHY_ID_BCM50610:
909 val = MAC_PHYCFG2_50610_LED_MODES;
911 case TG3_PHY_ID_BCMAC131:
912 val = MAC_PHYCFG2_AC131_LED_MODES;
914 case TG3_PHY_ID_RTL8211C:
915 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
917 case TG3_PHY_ID_RTL8201E:
918 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
924 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
925 tw32(MAC_PHYCFG2, val);
927 val = tr32(MAC_PHYCFG1);
928 val &= ~(MAC_PHYCFG1_RGMII_INT |
929 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
930 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
931 tw32(MAC_PHYCFG1, val);
936 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE))
937 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
938 MAC_PHYCFG2_FMODE_MASK_MASK |
939 MAC_PHYCFG2_GMODE_MASK_MASK |
940 MAC_PHYCFG2_ACT_MASK_MASK |
941 MAC_PHYCFG2_QUAL_MASK_MASK |
942 MAC_PHYCFG2_INBAND_ENABLE;
944 tw32(MAC_PHYCFG2, val);
946 val = tr32(MAC_PHYCFG1);
947 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
948 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
949 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)) {
950 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
951 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
952 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
953 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
955 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
956 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
957 tw32(MAC_PHYCFG1, val);
959 val = tr32(MAC_EXT_RGMII_MODE);
960 val &= ~(MAC_RGMII_MODE_RX_INT_B |
961 MAC_RGMII_MODE_RX_QUALITY |
962 MAC_RGMII_MODE_RX_ACTIVITY |
963 MAC_RGMII_MODE_RX_ENG_DET |
964 MAC_RGMII_MODE_TX_ENABLE |
965 MAC_RGMII_MODE_TX_LOWPWR |
966 MAC_RGMII_MODE_TX_RESET);
967 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)) {
968 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
969 val |= MAC_RGMII_MODE_RX_INT_B |
970 MAC_RGMII_MODE_RX_QUALITY |
971 MAC_RGMII_MODE_RX_ACTIVITY |
972 MAC_RGMII_MODE_RX_ENG_DET;
973 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
974 val |= MAC_RGMII_MODE_TX_ENABLE |
975 MAC_RGMII_MODE_TX_LOWPWR |
976 MAC_RGMII_MODE_TX_RESET;
978 tw32(MAC_EXT_RGMII_MODE, val);
981 static void tg3_mdio_start(struct tg3 *tp)
983 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
984 mutex_lock(&tp->mdio_bus->mdio_lock);
985 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
986 mutex_unlock(&tp->mdio_bus->mdio_lock);
989 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
990 tw32_f(MAC_MI_MODE, tp->mi_mode);
993 if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) &&
994 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
995 tg3_mdio_config_5785(tp);
998 static void tg3_mdio_stop(struct tg3 *tp)
1000 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1001 mutex_lock(&tp->mdio_bus->mdio_lock);
1002 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_PAUSED;
1003 mutex_unlock(&tp->mdio_bus->mdio_lock);
1007 static int tg3_mdio_init(struct tg3 *tp)
1011 struct phy_device *phydev;
1015 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) ||
1016 (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED))
1019 tp->mdio_bus = mdiobus_alloc();
1020 if (tp->mdio_bus == NULL)
1023 tp->mdio_bus->name = "tg3 mdio bus";
1024 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1025 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1026 tp->mdio_bus->priv = tp;
1027 tp->mdio_bus->parent = &tp->pdev->dev;
1028 tp->mdio_bus->read = &tg3_mdio_read;
1029 tp->mdio_bus->write = &tg3_mdio_write;
1030 tp->mdio_bus->reset = &tg3_mdio_reset;
1031 tp->mdio_bus->phy_mask = ~(1 << PHY_ADDR);
1032 tp->mdio_bus->irq = &tp->mdio_irq[0];
1034 for (i = 0; i < PHY_MAX_ADDR; i++)
1035 tp->mdio_bus->irq[i] = PHY_POLL;
1037 /* The bus registration will look for all the PHYs on the mdio bus.
1038 * Unfortunately, it does not ensure the PHY is powered up before
1039 * accessing the PHY ID registers. A chip reset is the
1040 * quickest way to bring the device back to an operational state..
1042 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1045 i = mdiobus_register(tp->mdio_bus);
1047 printk(KERN_WARNING "%s: mdiobus_reg failed (0x%x)\n",
1049 mdiobus_free(tp->mdio_bus);
1053 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1055 if (!phydev || !phydev->drv) {
1056 printk(KERN_WARNING "%s: No PHY devices\n", tp->dev->name);
1057 mdiobus_unregister(tp->mdio_bus);
1058 mdiobus_free(tp->mdio_bus);
1062 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1063 case TG3_PHY_ID_BCM57780:
1064 phydev->interface = PHY_INTERFACE_MODE_GMII;
1066 case TG3_PHY_ID_BCM50610:
1067 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)
1068 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1069 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1070 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1071 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1072 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1074 case TG3_PHY_ID_RTL8211C:
1075 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1077 case TG3_PHY_ID_RTL8201E:
1078 case TG3_PHY_ID_BCMAC131:
1079 phydev->interface = PHY_INTERFACE_MODE_MII;
1080 tp->tg3_flags3 |= TG3_FLG3_PHY_IS_FET;
1084 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED;
1086 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1087 tg3_mdio_config_5785(tp);
1092 static void tg3_mdio_fini(struct tg3 *tp)
1094 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1095 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
1096 mdiobus_unregister(tp->mdio_bus);
1097 mdiobus_free(tp->mdio_bus);
1098 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
1102 /* tp->lock is held. */
1103 static inline void tg3_generate_fw_event(struct tg3 *tp)
1107 val = tr32(GRC_RX_CPU_EVENT);
1108 val |= GRC_RX_CPU_DRIVER_EVENT;
1109 tw32_f(GRC_RX_CPU_EVENT, val);
1111 tp->last_event_jiffies = jiffies;
1114 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1116 /* tp->lock is held. */
1117 static void tg3_wait_for_event_ack(struct tg3 *tp)
1120 unsigned int delay_cnt;
1123 /* If enough time has passed, no wait is necessary. */
1124 time_remain = (long)(tp->last_event_jiffies + 1 +
1125 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1127 if (time_remain < 0)
1130 /* Check if we can shorten the wait time. */
1131 delay_cnt = jiffies_to_usecs(time_remain);
1132 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1133 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1134 delay_cnt = (delay_cnt >> 3) + 1;
1136 for (i = 0; i < delay_cnt; i++) {
1137 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1143 /* tp->lock is held. */
1144 static void tg3_ump_link_report(struct tg3 *tp)
1149 if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1150 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1153 tg3_wait_for_event_ack(tp);
1155 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1157 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1160 if (!tg3_readphy(tp, MII_BMCR, ®))
1162 if (!tg3_readphy(tp, MII_BMSR, ®))
1163 val |= (reg & 0xffff);
1164 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1167 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1169 if (!tg3_readphy(tp, MII_LPA, ®))
1170 val |= (reg & 0xffff);
1171 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1174 if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) {
1175 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1177 if (!tg3_readphy(tp, MII_STAT1000, ®))
1178 val |= (reg & 0xffff);
1180 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1182 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1186 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1188 tg3_generate_fw_event(tp);
1191 static void tg3_link_report(struct tg3 *tp)
1193 if (!netif_carrier_ok(tp->dev)) {
1194 if (netif_msg_link(tp))
1195 printk(KERN_INFO PFX "%s: Link is down.\n",
1197 tg3_ump_link_report(tp);
1198 } else if (netif_msg_link(tp)) {
1199 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1201 (tp->link_config.active_speed == SPEED_1000 ?
1203 (tp->link_config.active_speed == SPEED_100 ?
1205 (tp->link_config.active_duplex == DUPLEX_FULL ?
1208 printk(KERN_INFO PFX
1209 "%s: Flow control is %s for TX and %s for RX.\n",
1211 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1213 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1215 tg3_ump_link_report(tp);
1219 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1223 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1224 miireg = ADVERTISE_PAUSE_CAP;
1225 else if (flow_ctrl & FLOW_CTRL_TX)
1226 miireg = ADVERTISE_PAUSE_ASYM;
1227 else if (flow_ctrl & FLOW_CTRL_RX)
1228 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1235 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1239 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1240 miireg = ADVERTISE_1000XPAUSE;
1241 else if (flow_ctrl & FLOW_CTRL_TX)
1242 miireg = ADVERTISE_1000XPSE_ASYM;
1243 else if (flow_ctrl & FLOW_CTRL_RX)
1244 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1251 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1255 if (lcladv & ADVERTISE_1000XPAUSE) {
1256 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1257 if (rmtadv & LPA_1000XPAUSE)
1258 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1259 else if (rmtadv & LPA_1000XPAUSE_ASYM)
1262 if (rmtadv & LPA_1000XPAUSE)
1263 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1265 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1266 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1273 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1277 u32 old_rx_mode = tp->rx_mode;
1278 u32 old_tx_mode = tp->tx_mode;
1280 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
1281 autoneg = tp->mdio_bus->phy_map[PHY_ADDR]->autoneg;
1283 autoneg = tp->link_config.autoneg;
1285 if (autoneg == AUTONEG_ENABLE &&
1286 (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1287 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1288 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1290 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1292 flowctrl = tp->link_config.flowctrl;
1294 tp->link_config.active_flowctrl = flowctrl;
1296 if (flowctrl & FLOW_CTRL_RX)
1297 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1299 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1301 if (old_rx_mode != tp->rx_mode)
1302 tw32_f(MAC_RX_MODE, tp->rx_mode);
1304 if (flowctrl & FLOW_CTRL_TX)
1305 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1307 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1309 if (old_tx_mode != tp->tx_mode)
1310 tw32_f(MAC_TX_MODE, tp->tx_mode);
1313 static void tg3_adjust_link(struct net_device *dev)
1315 u8 oldflowctrl, linkmesg = 0;
1316 u32 mac_mode, lcl_adv, rmt_adv;
1317 struct tg3 *tp = netdev_priv(dev);
1318 struct phy_device *phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1320 spin_lock(&tp->lock);
1322 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1323 MAC_MODE_HALF_DUPLEX);
1325 oldflowctrl = tp->link_config.active_flowctrl;
1331 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1332 mac_mode |= MAC_MODE_PORT_MODE_MII;
1334 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1336 if (phydev->duplex == DUPLEX_HALF)
1337 mac_mode |= MAC_MODE_HALF_DUPLEX;
1339 lcl_adv = tg3_advert_flowctrl_1000T(
1340 tp->link_config.flowctrl);
1343 rmt_adv = LPA_PAUSE_CAP;
1344 if (phydev->asym_pause)
1345 rmt_adv |= LPA_PAUSE_ASYM;
1348 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1350 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1352 if (mac_mode != tp->mac_mode) {
1353 tp->mac_mode = mac_mode;
1354 tw32_f(MAC_MODE, tp->mac_mode);
1358 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1359 if (phydev->speed == SPEED_10)
1361 MAC_MI_STAT_10MBPS_MODE |
1362 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1364 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1367 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1368 tw32(MAC_TX_LENGTHS,
1369 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1370 (6 << TX_LENGTHS_IPG_SHIFT) |
1371 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1373 tw32(MAC_TX_LENGTHS,
1374 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1375 (6 << TX_LENGTHS_IPG_SHIFT) |
1376 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1378 if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1379 (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1380 phydev->speed != tp->link_config.active_speed ||
1381 phydev->duplex != tp->link_config.active_duplex ||
1382 oldflowctrl != tp->link_config.active_flowctrl)
1385 tp->link_config.active_speed = phydev->speed;
1386 tp->link_config.active_duplex = phydev->duplex;
1388 spin_unlock(&tp->lock);
1391 tg3_link_report(tp);
1394 static int tg3_phy_init(struct tg3 *tp)
1396 struct phy_device *phydev;
1398 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
1401 /* Bring the PHY back to a known state. */
1404 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1406 /* Attach the MAC to the PHY. */
1407 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1408 phydev->dev_flags, phydev->interface);
1409 if (IS_ERR(phydev)) {
1410 printk(KERN_ERR "%s: Could not attach to PHY\n", tp->dev->name);
1411 return PTR_ERR(phydev);
1414 /* Mask with MAC supported features. */
1415 switch (phydev->interface) {
1416 case PHY_INTERFACE_MODE_GMII:
1417 case PHY_INTERFACE_MODE_RGMII:
1418 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1419 phydev->supported &= (PHY_GBIT_FEATURES |
1421 SUPPORTED_Asym_Pause);
1425 case PHY_INTERFACE_MODE_MII:
1426 phydev->supported &= (PHY_BASIC_FEATURES |
1428 SUPPORTED_Asym_Pause);
1431 phy_disconnect(tp->mdio_bus->phy_map[PHY_ADDR]);
1435 tp->tg3_flags3 |= TG3_FLG3_PHY_CONNECTED;
1437 phydev->advertising = phydev->supported;
1442 static void tg3_phy_start(struct tg3 *tp)
1444 struct phy_device *phydev;
1446 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1449 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1451 if (tp->link_config.phy_is_low_power) {
1452 tp->link_config.phy_is_low_power = 0;
1453 phydev->speed = tp->link_config.orig_speed;
1454 phydev->duplex = tp->link_config.orig_duplex;
1455 phydev->autoneg = tp->link_config.orig_autoneg;
1456 phydev->advertising = tp->link_config.orig_advertising;
1461 phy_start_aneg(phydev);
1464 static void tg3_phy_stop(struct tg3 *tp)
1466 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1469 phy_stop(tp->mdio_bus->phy_map[PHY_ADDR]);
1472 static void tg3_phy_fini(struct tg3 *tp)
1474 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
1475 phy_disconnect(tp->mdio_bus->phy_map[PHY_ADDR]);
1476 tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED;
1480 static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1482 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1483 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1486 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1490 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1493 tg3_writephy(tp, MII_TG3_FET_TEST,
1494 phytest | MII_TG3_FET_SHADOW_EN);
1495 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1497 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1499 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1500 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1502 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1506 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1510 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
1513 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
1514 tg3_phy_fet_toggle_apd(tp, enable);
1518 reg = MII_TG3_MISC_SHDW_WREN |
1519 MII_TG3_MISC_SHDW_SCR5_SEL |
1520 MII_TG3_MISC_SHDW_SCR5_LPED |
1521 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1522 MII_TG3_MISC_SHDW_SCR5_SDTL |
1523 MII_TG3_MISC_SHDW_SCR5_C125OE;
1524 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1525 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1527 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1530 reg = MII_TG3_MISC_SHDW_WREN |
1531 MII_TG3_MISC_SHDW_APD_SEL |
1532 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1534 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1536 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1539 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1543 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1544 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1547 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
1550 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1551 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1553 tg3_writephy(tp, MII_TG3_FET_TEST,
1554 ephy | MII_TG3_FET_SHADOW_EN);
1555 if (!tg3_readphy(tp, reg, &phy)) {
1557 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1559 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1560 tg3_writephy(tp, reg, phy);
1562 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1565 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
1566 MII_TG3_AUXCTL_SHDWSEL_MISC;
1567 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
1568 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
1570 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1572 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1573 phy |= MII_TG3_AUXCTL_MISC_WREN;
1574 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1579 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1583 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
1586 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
1587 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
1588 tg3_writephy(tp, MII_TG3_AUX_CTRL,
1589 (val | (1 << 15) | (1 << 4)));
1592 static void tg3_phy_apply_otp(struct tg3 *tp)
1601 /* Enable SM_DSP clock and tx 6dB coding. */
1602 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1603 MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
1604 MII_TG3_AUXCTL_ACTL_TX_6DB;
1605 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1607 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1608 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1609 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1611 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1612 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1613 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1615 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1616 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1617 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1619 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1620 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1622 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1623 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1625 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1626 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1627 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1629 /* Turn off SM_DSP clock. */
1630 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1631 MII_TG3_AUXCTL_ACTL_TX_6DB;
1632 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1635 static int tg3_wait_macro_done(struct tg3 *tp)
1642 if (!tg3_readphy(tp, 0x16, &tmp32)) {
1643 if ((tmp32 & 0x1000) == 0)
1653 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1655 static const u32 test_pat[4][6] = {
1656 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1657 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1658 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1659 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1663 for (chan = 0; chan < 4; chan++) {
1666 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1667 (chan * 0x2000) | 0x0200);
1668 tg3_writephy(tp, 0x16, 0x0002);
1670 for (i = 0; i < 6; i++)
1671 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1674 tg3_writephy(tp, 0x16, 0x0202);
1675 if (tg3_wait_macro_done(tp)) {
1680 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1681 (chan * 0x2000) | 0x0200);
1682 tg3_writephy(tp, 0x16, 0x0082);
1683 if (tg3_wait_macro_done(tp)) {
1688 tg3_writephy(tp, 0x16, 0x0802);
1689 if (tg3_wait_macro_done(tp)) {
1694 for (i = 0; i < 6; i += 2) {
1697 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1698 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1699 tg3_wait_macro_done(tp)) {
1705 if (low != test_pat[chan][i] ||
1706 high != test_pat[chan][i+1]) {
1707 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1708 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1709 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1719 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1723 for (chan = 0; chan < 4; chan++) {
1726 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1727 (chan * 0x2000) | 0x0200);
1728 tg3_writephy(tp, 0x16, 0x0002);
1729 for (i = 0; i < 6; i++)
1730 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1731 tg3_writephy(tp, 0x16, 0x0202);
1732 if (tg3_wait_macro_done(tp))
1739 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1741 u32 reg32, phy9_orig;
1742 int retries, do_phy_reset, err;
1748 err = tg3_bmcr_reset(tp);
1754 /* Disable transmitter and interrupt. */
1755 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
1759 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1761 /* Set full-duplex, 1000 mbps. */
1762 tg3_writephy(tp, MII_BMCR,
1763 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1765 /* Set to master mode. */
1766 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1769 tg3_writephy(tp, MII_TG3_CTRL,
1770 (MII_TG3_CTRL_AS_MASTER |
1771 MII_TG3_CTRL_ENABLE_AS_MASTER));
1773 /* Enable SM_DSP_CLOCK and 6dB. */
1774 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1776 /* Block the PHY control access. */
1777 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1778 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1780 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1783 } while (--retries);
1785 err = tg3_phy_reset_chanpat(tp);
1789 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1790 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1792 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1793 tg3_writephy(tp, 0x16, 0x0000);
1795 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1796 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1797 /* Set Extended packet length bit for jumbo frames */
1798 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1801 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1804 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1806 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) {
1808 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1815 /* This will reset the tigon3 PHY if there is no valid
1816 * link unless the FORCE argument is non-zero.
1818 static int tg3_phy_reset(struct tg3 *tp)
1824 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1827 val = tr32(GRC_MISC_CFG);
1828 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1831 err = tg3_readphy(tp, MII_BMSR, &phy_status);
1832 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1836 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1837 netif_carrier_off(tp->dev);
1838 tg3_link_report(tp);
1841 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1842 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1843 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1844 err = tg3_phy_reset_5703_4_5(tp);
1851 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
1852 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
1853 cpmuctrl = tr32(TG3_CPMU_CTRL);
1854 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
1856 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
1859 err = tg3_bmcr_reset(tp);
1863 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
1866 phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
1867 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy);
1869 tw32(TG3_CPMU_CTRL, cpmuctrl);
1872 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
1873 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
1876 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1877 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1878 CPMU_LSPD_1000MB_MACCLK_12_5) {
1879 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1881 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1885 tg3_phy_apply_otp(tp);
1887 if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
1888 tg3_phy_toggle_apd(tp, true);
1890 tg3_phy_toggle_apd(tp, false);
1893 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1894 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1895 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1896 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1897 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1898 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1899 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1901 if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1902 tg3_writephy(tp, 0x1c, 0x8d68);
1903 tg3_writephy(tp, 0x1c, 0x8d68);
1905 if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1906 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1907 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1908 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1909 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1910 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1911 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1912 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1913 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1915 else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1916 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1917 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1918 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1919 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1920 tg3_writephy(tp, MII_TG3_TEST1,
1921 MII_TG3_TEST1_TRIM_EN | 0x4);
1923 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1924 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1926 /* Set Extended packet length bit (bit 14) on all chips that */
1927 /* support jumbo frames */
1928 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1929 /* Cannot do read-modify-write on 5401 */
1930 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1931 } else if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
1934 /* Set bit 14 with read-modify-write to preserve other bits */
1935 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1936 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1937 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1940 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1941 * jumbo frames transmission.
1943 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
1946 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1947 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1948 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1951 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1952 /* adjust output voltage */
1953 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
1956 tg3_phy_toggle_automdix(tp, 1);
1957 tg3_phy_set_wirespeed(tp);
1961 static void tg3_frob_aux_power(struct tg3 *tp)
1963 struct tg3 *tp_peer = tp;
1965 if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
1968 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1969 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1970 struct net_device *dev_peer;
1972 dev_peer = pci_get_drvdata(tp->pdev_peer);
1973 /* remove_one() may have been run on the peer. */
1977 tp_peer = netdev_priv(dev_peer);
1980 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1981 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1982 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1983 (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1984 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1985 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1986 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1987 (GRC_LCLCTRL_GPIO_OE0 |
1988 GRC_LCLCTRL_GPIO_OE1 |
1989 GRC_LCLCTRL_GPIO_OE2 |
1990 GRC_LCLCTRL_GPIO_OUTPUT0 |
1991 GRC_LCLCTRL_GPIO_OUTPUT1),
1993 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
1994 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
1995 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
1996 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
1997 GRC_LCLCTRL_GPIO_OE1 |
1998 GRC_LCLCTRL_GPIO_OE2 |
1999 GRC_LCLCTRL_GPIO_OUTPUT0 |
2000 GRC_LCLCTRL_GPIO_OUTPUT1 |
2002 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2004 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2005 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2007 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2008 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2011 u32 grc_local_ctrl = 0;
2013 if (tp_peer != tp &&
2014 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
2017 /* Workaround to prevent overdrawing Amps. */
2018 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2020 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2021 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2022 grc_local_ctrl, 100);
2025 /* On 5753 and variants, GPIO2 cannot be used. */
2026 no_gpio2 = tp->nic_sram_data_cfg &
2027 NIC_SRAM_DATA_CFG_NO_GPIO2;
2029 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2030 GRC_LCLCTRL_GPIO_OE1 |
2031 GRC_LCLCTRL_GPIO_OE2 |
2032 GRC_LCLCTRL_GPIO_OUTPUT1 |
2033 GRC_LCLCTRL_GPIO_OUTPUT2;
2035 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2036 GRC_LCLCTRL_GPIO_OUTPUT2);
2038 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2039 grc_local_ctrl, 100);
2041 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2043 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2044 grc_local_ctrl, 100);
2047 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2048 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2049 grc_local_ctrl, 100);
2053 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
2054 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
2055 if (tp_peer != tp &&
2056 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
2059 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2060 (GRC_LCLCTRL_GPIO_OE1 |
2061 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2063 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2064 GRC_LCLCTRL_GPIO_OE1, 100);
2066 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2067 (GRC_LCLCTRL_GPIO_OE1 |
2068 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2073 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2075 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2077 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
2078 if (speed != SPEED_10)
2080 } else if (speed == SPEED_10)
2086 static int tg3_setup_phy(struct tg3 *, int);
2088 #define RESET_KIND_SHUTDOWN 0
2089 #define RESET_KIND_INIT 1
2090 #define RESET_KIND_SUSPEND 2
2092 static void tg3_write_sig_post_reset(struct tg3 *, int);
2093 static int tg3_halt_cpu(struct tg3 *, u32);
2095 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2099 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2100 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2101 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2102 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2105 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2106 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2107 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2112 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2114 val = tr32(GRC_MISC_CFG);
2115 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2118 } else if (do_low_power) {
2119 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2120 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2122 tg3_writephy(tp, MII_TG3_AUX_CTRL,
2123 MII_TG3_AUXCTL_SHDWSEL_PWRCTL |
2124 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2125 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2126 MII_TG3_AUXCTL_PCTL_VREG_11V);
2129 /* The PHY should not be powered down on some chips because
2132 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2133 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2134 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2135 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
2138 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2139 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2140 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2141 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2142 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2143 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2146 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2149 /* tp->lock is held. */
2150 static int tg3_nvram_lock(struct tg3 *tp)
2152 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
2155 if (tp->nvram_lock_cnt == 0) {
2156 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2157 for (i = 0; i < 8000; i++) {
2158 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2163 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2167 tp->nvram_lock_cnt++;
2172 /* tp->lock is held. */
2173 static void tg3_nvram_unlock(struct tg3 *tp)
2175 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
2176 if (tp->nvram_lock_cnt > 0)
2177 tp->nvram_lock_cnt--;
2178 if (tp->nvram_lock_cnt == 0)
2179 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2183 /* tp->lock is held. */
2184 static void tg3_enable_nvram_access(struct tg3 *tp)
2186 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2187 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
2188 u32 nvaccess = tr32(NVRAM_ACCESS);
2190 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2194 /* tp->lock is held. */
2195 static void tg3_disable_nvram_access(struct tg3 *tp)
2197 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2198 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
2199 u32 nvaccess = tr32(NVRAM_ACCESS);
2201 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2205 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2206 u32 offset, u32 *val)
2211 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2214 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2215 EEPROM_ADDR_DEVID_MASK |
2217 tw32(GRC_EEPROM_ADDR,
2219 (0 << EEPROM_ADDR_DEVID_SHIFT) |
2220 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2221 EEPROM_ADDR_ADDR_MASK) |
2222 EEPROM_ADDR_READ | EEPROM_ADDR_START);
2224 for (i = 0; i < 1000; i++) {
2225 tmp = tr32(GRC_EEPROM_ADDR);
2227 if (tmp & EEPROM_ADDR_COMPLETE)
2231 if (!(tmp & EEPROM_ADDR_COMPLETE))
2234 tmp = tr32(GRC_EEPROM_DATA);
2237 * The data will always be opposite the native endian
2238 * format. Perform a blind byteswap to compensate.
2245 #define NVRAM_CMD_TIMEOUT 10000
2247 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2251 tw32(NVRAM_CMD, nvram_cmd);
2252 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2254 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2260 if (i == NVRAM_CMD_TIMEOUT)
2266 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2268 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
2269 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
2270 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
2271 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
2272 (tp->nvram_jedecnum == JEDEC_ATMEL))
2274 addr = ((addr / tp->nvram_pagesize) <<
2275 ATMEL_AT45DB0X1B_PAGE_POS) +
2276 (addr % tp->nvram_pagesize);
2281 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2283 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
2284 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
2285 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
2286 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
2287 (tp->nvram_jedecnum == JEDEC_ATMEL))
2289 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2290 tp->nvram_pagesize) +
2291 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2296 /* NOTE: Data read in from NVRAM is byteswapped according to
2297 * the byteswapping settings for all other register accesses.
2298 * tg3 devices are BE devices, so on a BE machine, the data
2299 * returned will be exactly as it is seen in NVRAM. On a LE
2300 * machine, the 32-bit value will be byteswapped.
2302 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2306 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
2307 return tg3_nvram_read_using_eeprom(tp, offset, val);
2309 offset = tg3_nvram_phys_addr(tp, offset);
2311 if (offset > NVRAM_ADDR_MSK)
2314 ret = tg3_nvram_lock(tp);
2318 tg3_enable_nvram_access(tp);
2320 tw32(NVRAM_ADDR, offset);
2321 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2322 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2325 *val = tr32(NVRAM_RDDATA);
2327 tg3_disable_nvram_access(tp);
2329 tg3_nvram_unlock(tp);
2334 /* Ensures NVRAM data is in bytestream format. */
2335 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2338 int res = tg3_nvram_read(tp, offset, &v);
2340 *val = cpu_to_be32(v);
2344 /* tp->lock is held. */
2345 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2347 u32 addr_high, addr_low;
2350 addr_high = ((tp->dev->dev_addr[0] << 8) |
2351 tp->dev->dev_addr[1]);
2352 addr_low = ((tp->dev->dev_addr[2] << 24) |
2353 (tp->dev->dev_addr[3] << 16) |
2354 (tp->dev->dev_addr[4] << 8) |
2355 (tp->dev->dev_addr[5] << 0));
2356 for (i = 0; i < 4; i++) {
2357 if (i == 1 && skip_mac_1)
2359 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2360 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2363 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2364 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2365 for (i = 0; i < 12; i++) {
2366 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2367 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2371 addr_high = (tp->dev->dev_addr[0] +
2372 tp->dev->dev_addr[1] +
2373 tp->dev->dev_addr[2] +
2374 tp->dev->dev_addr[3] +
2375 tp->dev->dev_addr[4] +
2376 tp->dev->dev_addr[5]) &
2377 TX_BACKOFF_SEED_MASK;
2378 tw32(MAC_TX_BACKOFF_SEED, addr_high);
2381 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2384 bool device_should_wake, do_low_power;
2386 /* Make sure register accesses (indirect or otherwise)
2387 * will function correctly.
2389 pci_write_config_dword(tp->pdev,
2390 TG3PCI_MISC_HOST_CTRL,
2391 tp->misc_host_ctrl);
2395 pci_enable_wake(tp->pdev, state, false);
2396 pci_set_power_state(tp->pdev, PCI_D0);
2398 /* Switch out of Vaux if it is a NIC */
2399 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
2400 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2410 printk(KERN_ERR PFX "%s: Invalid power state (D%d) requested\n",
2411 tp->dev->name, state);
2415 /* Restore the CLKREQ setting. */
2416 if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
2419 pci_read_config_word(tp->pdev,
2420 tp->pcie_cap + PCI_EXP_LNKCTL,
2422 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2423 pci_write_config_word(tp->pdev,
2424 tp->pcie_cap + PCI_EXP_LNKCTL,
2428 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2429 tw32(TG3PCI_MISC_HOST_CTRL,
2430 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2432 device_should_wake = pci_pme_capable(tp->pdev, state) &&
2433 device_may_wakeup(&tp->pdev->dev) &&
2434 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
2436 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
2437 do_low_power = false;
2438 if ((tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) &&
2439 !tp->link_config.phy_is_low_power) {
2440 struct phy_device *phydev;
2441 u32 phyid, advertising;
2443 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
2445 tp->link_config.phy_is_low_power = 1;
2447 tp->link_config.orig_speed = phydev->speed;
2448 tp->link_config.orig_duplex = phydev->duplex;
2449 tp->link_config.orig_autoneg = phydev->autoneg;
2450 tp->link_config.orig_advertising = phydev->advertising;
2452 advertising = ADVERTISED_TP |
2454 ADVERTISED_Autoneg |
2455 ADVERTISED_10baseT_Half;
2457 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2458 device_should_wake) {
2459 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2461 ADVERTISED_100baseT_Half |
2462 ADVERTISED_100baseT_Full |
2463 ADVERTISED_10baseT_Full;
2465 advertising |= ADVERTISED_10baseT_Full;
2468 phydev->advertising = advertising;
2470 phy_start_aneg(phydev);
2472 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2473 if (phyid != TG3_PHY_ID_BCMAC131) {
2474 phyid &= TG3_PHY_OUI_MASK;
2475 if (phyid == TG3_PHY_OUI_1 ||
2476 phyid == TG3_PHY_OUI_2 ||
2477 phyid == TG3_PHY_OUI_3)
2478 do_low_power = true;
2482 do_low_power = true;
2484 if (tp->link_config.phy_is_low_power == 0) {
2485 tp->link_config.phy_is_low_power = 1;
2486 tp->link_config.orig_speed = tp->link_config.speed;
2487 tp->link_config.orig_duplex = tp->link_config.duplex;
2488 tp->link_config.orig_autoneg = tp->link_config.autoneg;
2491 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
2492 tp->link_config.speed = SPEED_10;
2493 tp->link_config.duplex = DUPLEX_HALF;
2494 tp->link_config.autoneg = AUTONEG_ENABLE;
2495 tg3_setup_phy(tp, 0);
2499 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2502 val = tr32(GRC_VCPU_EXT_CTRL);
2503 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2504 } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2508 for (i = 0; i < 200; i++) {
2509 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2510 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2515 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
2516 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2517 WOL_DRV_STATE_SHUTDOWN |
2521 if (device_should_wake) {
2524 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
2526 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
2530 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
2531 mac_mode = MAC_MODE_PORT_MODE_GMII;
2533 mac_mode = MAC_MODE_PORT_MODE_MII;
2535 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2536 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2538 u32 speed = (tp->tg3_flags &
2539 TG3_FLAG_WOL_SPEED_100MB) ?
2540 SPEED_100 : SPEED_10;
2541 if (tg3_5700_link_polarity(tp, speed))
2542 mac_mode |= MAC_MODE_LINK_POLARITY;
2544 mac_mode &= ~MAC_MODE_LINK_POLARITY;
2547 mac_mode = MAC_MODE_PORT_MODE_TBI;
2550 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
2551 tw32(MAC_LED_CTRL, tp->led_ctrl);
2553 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2554 if (((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
2555 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) &&
2556 ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2557 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)))
2558 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2560 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
2561 mac_mode |= tp->mac_mode &
2562 (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
2563 if (mac_mode & MAC_MODE_APE_TX_EN)
2564 mac_mode |= MAC_MODE_TDE_ENABLE;
2567 tw32_f(MAC_MODE, mac_mode);
2570 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2574 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
2575 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2576 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2579 base_val = tp->pci_clock_ctrl;
2580 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2581 CLOCK_CTRL_TXCLK_DISABLE);
2583 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2584 CLOCK_CTRL_PWRDOWN_PLL133, 40);
2585 } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
2586 (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
2587 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
2589 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2590 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
2591 u32 newbits1, newbits2;
2593 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2594 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2595 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2596 CLOCK_CTRL_TXCLK_DISABLE |
2598 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2599 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
2600 newbits1 = CLOCK_CTRL_625_CORE;
2601 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2603 newbits1 = CLOCK_CTRL_ALTCLK;
2604 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2607 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2610 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2613 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2616 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2617 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2618 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2619 CLOCK_CTRL_TXCLK_DISABLE |
2620 CLOCK_CTRL_44MHZ_CORE);
2622 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2625 tw32_wait_f(TG3PCI_CLOCK_CTRL,
2626 tp->pci_clock_ctrl | newbits3, 40);
2630 if (!(device_should_wake) &&
2631 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
2632 tg3_power_down_phy(tp, do_low_power);
2634 tg3_frob_aux_power(tp);
2636 /* Workaround for unstable PLL clock */
2637 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2638 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2639 u32 val = tr32(0x7d00);
2641 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2643 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2646 err = tg3_nvram_lock(tp);
2647 tg3_halt_cpu(tp, RX_CPU_BASE);
2649 tg3_nvram_unlock(tp);
2653 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2655 if (device_should_wake)
2656 pci_enable_wake(tp->pdev, state, true);
2658 /* Finally, set the new power state. */
2659 pci_set_power_state(tp->pdev, state);
2664 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2666 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2667 case MII_TG3_AUX_STAT_10HALF:
2669 *duplex = DUPLEX_HALF;
2672 case MII_TG3_AUX_STAT_10FULL:
2674 *duplex = DUPLEX_FULL;
2677 case MII_TG3_AUX_STAT_100HALF:
2679 *duplex = DUPLEX_HALF;
2682 case MII_TG3_AUX_STAT_100FULL:
2684 *duplex = DUPLEX_FULL;
2687 case MII_TG3_AUX_STAT_1000HALF:
2688 *speed = SPEED_1000;
2689 *duplex = DUPLEX_HALF;
2692 case MII_TG3_AUX_STAT_1000FULL:
2693 *speed = SPEED_1000;
2694 *duplex = DUPLEX_FULL;
2698 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
2699 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2701 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2705 *speed = SPEED_INVALID;
2706 *duplex = DUPLEX_INVALID;
2711 static void tg3_phy_copper_begin(struct tg3 *tp)
2716 if (tp->link_config.phy_is_low_power) {
2717 /* Entering low power mode. Disable gigabit and
2718 * 100baseT advertisements.
2720 tg3_writephy(tp, MII_TG3_CTRL, 0);
2722 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
2723 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
2724 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2725 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
2727 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2728 } else if (tp->link_config.speed == SPEED_INVALID) {
2729 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
2730 tp->link_config.advertising &=
2731 ~(ADVERTISED_1000baseT_Half |
2732 ADVERTISED_1000baseT_Full);
2734 new_adv = ADVERTISE_CSMA;
2735 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
2736 new_adv |= ADVERTISE_10HALF;
2737 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
2738 new_adv |= ADVERTISE_10FULL;
2739 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
2740 new_adv |= ADVERTISE_100HALF;
2741 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
2742 new_adv |= ADVERTISE_100FULL;
2744 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2746 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2748 if (tp->link_config.advertising &
2749 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
2751 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2752 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2753 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2754 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2755 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
2756 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2757 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
2758 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2759 MII_TG3_CTRL_ENABLE_AS_MASTER);
2760 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2762 tg3_writephy(tp, MII_TG3_CTRL, 0);
2765 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2766 new_adv |= ADVERTISE_CSMA;
2768 /* Asking for a specific link mode. */
2769 if (tp->link_config.speed == SPEED_1000) {
2770 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2772 if (tp->link_config.duplex == DUPLEX_FULL)
2773 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
2775 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
2776 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2777 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2778 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2779 MII_TG3_CTRL_ENABLE_AS_MASTER);
2781 if (tp->link_config.speed == SPEED_100) {
2782 if (tp->link_config.duplex == DUPLEX_FULL)
2783 new_adv |= ADVERTISE_100FULL;
2785 new_adv |= ADVERTISE_100HALF;
2787 if (tp->link_config.duplex == DUPLEX_FULL)
2788 new_adv |= ADVERTISE_10FULL;
2790 new_adv |= ADVERTISE_10HALF;
2792 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2797 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2800 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
2801 tp->link_config.speed != SPEED_INVALID) {
2802 u32 bmcr, orig_bmcr;
2804 tp->link_config.active_speed = tp->link_config.speed;
2805 tp->link_config.active_duplex = tp->link_config.duplex;
2808 switch (tp->link_config.speed) {
2814 bmcr |= BMCR_SPEED100;
2818 bmcr |= TG3_BMCR_SPEED1000;
2822 if (tp->link_config.duplex == DUPLEX_FULL)
2823 bmcr |= BMCR_FULLDPLX;
2825 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
2826 (bmcr != orig_bmcr)) {
2827 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
2828 for (i = 0; i < 1500; i++) {
2832 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
2833 tg3_readphy(tp, MII_BMSR, &tmp))
2835 if (!(tmp & BMSR_LSTATUS)) {
2840 tg3_writephy(tp, MII_BMCR, bmcr);
2844 tg3_writephy(tp, MII_BMCR,
2845 BMCR_ANENABLE | BMCR_ANRESTART);
2849 static int tg3_init_5401phy_dsp(struct tg3 *tp)
2853 /* Turn off tap power management. */
2854 /* Set Extended packet length bit */
2855 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2857 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
2858 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
2860 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
2861 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
2863 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2864 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
2866 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2867 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
2869 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
2870 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
2877 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
2879 u32 adv_reg, all_mask = 0;
2881 if (mask & ADVERTISED_10baseT_Half)
2882 all_mask |= ADVERTISE_10HALF;
2883 if (mask & ADVERTISED_10baseT_Full)
2884 all_mask |= ADVERTISE_10FULL;
2885 if (mask & ADVERTISED_100baseT_Half)
2886 all_mask |= ADVERTISE_100HALF;
2887 if (mask & ADVERTISED_100baseT_Full)
2888 all_mask |= ADVERTISE_100FULL;
2890 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
2893 if ((adv_reg & all_mask) != all_mask)
2895 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
2899 if (mask & ADVERTISED_1000baseT_Half)
2900 all_mask |= ADVERTISE_1000HALF;
2901 if (mask & ADVERTISED_1000baseT_Full)
2902 all_mask |= ADVERTISE_1000FULL;
2904 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
2907 if ((tg3_ctrl & all_mask) != all_mask)
2913 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
2917 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
2920 curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2921 reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2923 if (tp->link_config.active_duplex == DUPLEX_FULL) {
2924 if (curadv != reqadv)
2927 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
2928 tg3_readphy(tp, MII_LPA, rmtadv);
2930 /* Reprogram the advertisement register, even if it
2931 * does not affect the current link. If the link
2932 * gets renegotiated in the future, we can save an
2933 * additional renegotiation cycle by advertising
2934 * it correctly in the first place.
2936 if (curadv != reqadv) {
2937 *lcladv &= ~(ADVERTISE_PAUSE_CAP |
2938 ADVERTISE_PAUSE_ASYM);
2939 tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
2946 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
2948 int current_link_up;
2950 u32 lcl_adv, rmt_adv;
2958 (MAC_STATUS_SYNC_CHANGED |
2959 MAC_STATUS_CFG_CHANGED |
2960 MAC_STATUS_MI_COMPLETION |
2961 MAC_STATUS_LNKSTATE_CHANGED));
2964 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
2966 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
2970 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
2972 /* Some third-party PHYs need to be reset on link going
2975 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2976 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2977 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
2978 netif_carrier_ok(tp->dev)) {
2979 tg3_readphy(tp, MII_BMSR, &bmsr);
2980 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2981 !(bmsr & BMSR_LSTATUS))
2987 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
2988 tg3_readphy(tp, MII_BMSR, &bmsr);
2989 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
2990 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
2993 if (!(bmsr & BMSR_LSTATUS)) {
2994 err = tg3_init_5401phy_dsp(tp);
2998 tg3_readphy(tp, MII_BMSR, &bmsr);
2999 for (i = 0; i < 1000; i++) {
3001 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3002 (bmsr & BMSR_LSTATUS)) {
3008 if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
3009 !(bmsr & BMSR_LSTATUS) &&
3010 tp->link_config.active_speed == SPEED_1000) {
3011 err = tg3_phy_reset(tp);
3013 err = tg3_init_5401phy_dsp(tp);
3018 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3019 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3020 /* 5701 {A0,B0} CRC bug workaround */
3021 tg3_writephy(tp, 0x15, 0x0a75);
3022 tg3_writephy(tp, 0x1c, 0x8c68);
3023 tg3_writephy(tp, 0x1c, 0x8d68);
3024 tg3_writephy(tp, 0x1c, 0x8c68);
3027 /* Clear pending interrupts... */
3028 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
3029 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
3031 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
3032 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3033 else if (!(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET))
3034 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3036 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3037 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3038 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3039 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3040 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3042 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3045 current_link_up = 0;
3046 current_speed = SPEED_INVALID;
3047 current_duplex = DUPLEX_INVALID;
3049 if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
3052 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
3053 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
3054 if (!(val & (1 << 10))) {
3056 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
3062 for (i = 0; i < 100; i++) {
3063 tg3_readphy(tp, MII_BMSR, &bmsr);
3064 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3065 (bmsr & BMSR_LSTATUS))
3070 if (bmsr & BMSR_LSTATUS) {
3073 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3074 for (i = 0; i < 2000; i++) {
3076 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3081 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3086 for (i = 0; i < 200; i++) {
3087 tg3_readphy(tp, MII_BMCR, &bmcr);
3088 if (tg3_readphy(tp, MII_BMCR, &bmcr))
3090 if (bmcr && bmcr != 0x7fff)
3098 tp->link_config.active_speed = current_speed;
3099 tp->link_config.active_duplex = current_duplex;
3101 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3102 if ((bmcr & BMCR_ANENABLE) &&
3103 tg3_copper_is_advertising_all(tp,
3104 tp->link_config.advertising)) {
3105 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3107 current_link_up = 1;
3110 if (!(bmcr & BMCR_ANENABLE) &&
3111 tp->link_config.speed == current_speed &&
3112 tp->link_config.duplex == current_duplex &&
3113 tp->link_config.flowctrl ==
3114 tp->link_config.active_flowctrl) {
3115 current_link_up = 1;
3119 if (current_link_up == 1 &&
3120 tp->link_config.active_duplex == DUPLEX_FULL)
3121 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
3125 if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
3128 tg3_phy_copper_begin(tp);
3130 tg3_readphy(tp, MII_BMSR, &tmp);
3131 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
3132 (tmp & BMSR_LSTATUS))
3133 current_link_up = 1;
3136 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
3137 if (current_link_up == 1) {
3138 if (tp->link_config.active_speed == SPEED_100 ||
3139 tp->link_config.active_speed == SPEED_10)
3140 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3142 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3143 } else if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET)
3144 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3146 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3148 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3149 if (tp->link_config.active_duplex == DUPLEX_HALF)
3150 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3152 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
3153 if (current_link_up == 1 &&
3154 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
3155 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
3157 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
3160 /* ??? Without this setting Netgear GA302T PHY does not
3161 * ??? send/receive packets...
3163 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
3164 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3165 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3166 tw32_f(MAC_MI_MODE, tp->mi_mode);
3170 tw32_f(MAC_MODE, tp->mac_mode);
3173 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
3174 /* Polled via timer. */
3175 tw32_f(MAC_EVENT, 0);
3177 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3181 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3182 current_link_up == 1 &&
3183 tp->link_config.active_speed == SPEED_1000 &&
3184 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
3185 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
3188 (MAC_STATUS_SYNC_CHANGED |
3189 MAC_STATUS_CFG_CHANGED));
3192 NIC_SRAM_FIRMWARE_MBOX,
3193 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
3196 /* Prevent send BD corruption. */
3197 if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
3198 u16 oldlnkctl, newlnkctl;
3200 pci_read_config_word(tp->pdev,
3201 tp->pcie_cap + PCI_EXP_LNKCTL,
3203 if (tp->link_config.active_speed == SPEED_100 ||
3204 tp->link_config.active_speed == SPEED_10)
3205 newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
3207 newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
3208 if (newlnkctl != oldlnkctl)
3209 pci_write_config_word(tp->pdev,
3210 tp->pcie_cap + PCI_EXP_LNKCTL,
3212 } else if (tp->tg3_flags3 & TG3_FLG3_TOGGLE_10_100_L1PLLPD) {
3213 u32 newreg, oldreg = tr32(TG3_PCIE_LNKCTL);
3214 if (tp->link_config.active_speed == SPEED_100 ||
3215 tp->link_config.active_speed == SPEED_10)
3216 newreg = oldreg & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
3218 newreg = oldreg | TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
3219 if (newreg != oldreg)
3220 tw32(TG3_PCIE_LNKCTL, newreg);
3223 if (current_link_up != netif_carrier_ok(tp->dev)) {
3224 if (current_link_up)
3225 netif_carrier_on(tp->dev);
3227 netif_carrier_off(tp->dev);
3228 tg3_link_report(tp);
3234 struct tg3_fiber_aneginfo {
3236 #define ANEG_STATE_UNKNOWN 0
3237 #define ANEG_STATE_AN_ENABLE 1
3238 #define ANEG_STATE_RESTART_INIT 2
3239 #define ANEG_STATE_RESTART 3
3240 #define ANEG_STATE_DISABLE_LINK_OK 4
3241 #define ANEG_STATE_ABILITY_DETECT_INIT 5
3242 #define ANEG_STATE_ABILITY_DETECT 6
3243 #define ANEG_STATE_ACK_DETECT_INIT 7
3244 #define ANEG_STATE_ACK_DETECT 8
3245 #define ANEG_STATE_COMPLETE_ACK_INIT 9
3246 #define ANEG_STATE_COMPLETE_ACK 10
3247 #define ANEG_STATE_IDLE_DETECT_INIT 11
3248 #define ANEG_STATE_IDLE_DETECT 12
3249 #define ANEG_STATE_LINK_OK 13
3250 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
3251 #define ANEG_STATE_NEXT_PAGE_WAIT 15
3254 #define MR_AN_ENABLE 0x00000001
3255 #define MR_RESTART_AN 0x00000002
3256 #define MR_AN_COMPLETE 0x00000004
3257 #define MR_PAGE_RX 0x00000008
3258 #define MR_NP_LOADED 0x00000010
3259 #define MR_TOGGLE_TX 0x00000020
3260 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
3261 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
3262 #define MR_LP_ADV_SYM_PAUSE 0x00000100
3263 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
3264 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3265 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3266 #define MR_LP_ADV_NEXT_PAGE 0x00001000
3267 #define MR_TOGGLE_RX 0x00002000
3268 #define MR_NP_RX 0x00004000
3270 #define MR_LINK_OK 0x80000000
3272 unsigned long link_time, cur_time;
3274 u32 ability_match_cfg;
3275 int ability_match_count;
3277 char ability_match, idle_match, ack_match;
3279 u32 txconfig, rxconfig;
3280 #define ANEG_CFG_NP 0x00000080
3281 #define ANEG_CFG_ACK 0x00000040
3282 #define ANEG_CFG_RF2 0x00000020
3283 #define ANEG_CFG_RF1 0x00000010
3284 #define ANEG_CFG_PS2 0x00000001
3285 #define ANEG_CFG_PS1 0x00008000
3286 #define ANEG_CFG_HD 0x00004000
3287 #define ANEG_CFG_FD 0x00002000
3288 #define ANEG_CFG_INVAL 0x00001f06
3293 #define ANEG_TIMER_ENAB 2
3294 #define ANEG_FAILED -1
3296 #define ANEG_STATE_SETTLE_TIME 10000
3298 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3299 struct tg3_fiber_aneginfo *ap)
3302 unsigned long delta;
3306 if (ap->state == ANEG_STATE_UNKNOWN) {
3310 ap->ability_match_cfg = 0;
3311 ap->ability_match_count = 0;
3312 ap->ability_match = 0;
3318 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3319 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3321 if (rx_cfg_reg != ap->ability_match_cfg) {
3322 ap->ability_match_cfg = rx_cfg_reg;
3323 ap->ability_match = 0;
3324 ap->ability_match_count = 0;
3326 if (++ap->ability_match_count > 1) {
3327 ap->ability_match = 1;
3328 ap->ability_match_cfg = rx_cfg_reg;
3331 if (rx_cfg_reg & ANEG_CFG_ACK)
3339 ap->ability_match_cfg = 0;
3340 ap->ability_match_count = 0;
3341 ap->ability_match = 0;
3347 ap->rxconfig = rx_cfg_reg;
3351 case ANEG_STATE_UNKNOWN:
3352 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3353 ap->state = ANEG_STATE_AN_ENABLE;
3356 case ANEG_STATE_AN_ENABLE:
3357 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3358 if (ap->flags & MR_AN_ENABLE) {
3361 ap->ability_match_cfg = 0;
3362 ap->ability_match_count = 0;
3363 ap->ability_match = 0;
3367 ap->state = ANEG_STATE_RESTART_INIT;
3369 ap->state = ANEG_STATE_DISABLE_LINK_OK;
3373 case ANEG_STATE_RESTART_INIT:
3374 ap->link_time = ap->cur_time;
3375 ap->flags &= ~(MR_NP_LOADED);
3377 tw32(MAC_TX_AUTO_NEG, 0);
3378 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3379 tw32_f(MAC_MODE, tp->mac_mode);
3382 ret = ANEG_TIMER_ENAB;
3383 ap->state = ANEG_STATE_RESTART;
3386 case ANEG_STATE_RESTART:
3387 delta = ap->cur_time - ap->link_time;
3388 if (delta > ANEG_STATE_SETTLE_TIME) {
3389 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3391 ret = ANEG_TIMER_ENAB;
3395 case ANEG_STATE_DISABLE_LINK_OK:
3399 case ANEG_STATE_ABILITY_DETECT_INIT:
3400 ap->flags &= ~(MR_TOGGLE_TX);
3401 ap->txconfig = ANEG_CFG_FD;
3402 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3403 if (flowctrl & ADVERTISE_1000XPAUSE)
3404 ap->txconfig |= ANEG_CFG_PS1;
3405 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3406 ap->txconfig |= ANEG_CFG_PS2;
3407 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3408 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3409 tw32_f(MAC_MODE, tp->mac_mode);
3412 ap->state = ANEG_STATE_ABILITY_DETECT;
3415 case ANEG_STATE_ABILITY_DETECT:
3416 if (ap->ability_match != 0 && ap->rxconfig != 0) {
3417 ap->state = ANEG_STATE_ACK_DETECT_INIT;
3421 case ANEG_STATE_ACK_DETECT_INIT:
3422 ap->txconfig |= ANEG_CFG_ACK;
3423 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3424 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3425 tw32_f(MAC_MODE, tp->mac_mode);
3428 ap->state = ANEG_STATE_ACK_DETECT;
3431 case ANEG_STATE_ACK_DETECT:
3432 if (ap->ack_match != 0) {
3433 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3434 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3435 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3437 ap->state = ANEG_STATE_AN_ENABLE;
3439 } else if (ap->ability_match != 0 &&
3440 ap->rxconfig == 0) {
3441 ap->state = ANEG_STATE_AN_ENABLE;
3445 case ANEG_STATE_COMPLETE_ACK_INIT:
3446 if (ap->rxconfig & ANEG_CFG_INVAL) {
3450 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3451 MR_LP_ADV_HALF_DUPLEX |
3452 MR_LP_ADV_SYM_PAUSE |
3453 MR_LP_ADV_ASYM_PAUSE |
3454 MR_LP_ADV_REMOTE_FAULT1 |
3455 MR_LP_ADV_REMOTE_FAULT2 |
3456 MR_LP_ADV_NEXT_PAGE |
3459 if (ap->rxconfig & ANEG_CFG_FD)
3460 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3461 if (ap->rxconfig & ANEG_CFG_HD)
3462 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3463 if (ap->rxconfig & ANEG_CFG_PS1)
3464 ap->flags |= MR_LP_ADV_SYM_PAUSE;
3465 if (ap->rxconfig & ANEG_CFG_PS2)
3466 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3467 if (ap->rxconfig & ANEG_CFG_RF1)
3468 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3469 if (ap->rxconfig & ANEG_CFG_RF2)
3470 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3471 if (ap->rxconfig & ANEG_CFG_NP)
3472 ap->flags |= MR_LP_ADV_NEXT_PAGE;
3474 ap->link_time = ap->cur_time;
3476 ap->flags ^= (MR_TOGGLE_TX);
3477 if (ap->rxconfig & 0x0008)
3478 ap->flags |= MR_TOGGLE_RX;
3479 if (ap->rxconfig & ANEG_CFG_NP)
3480 ap->flags |= MR_NP_RX;
3481 ap->flags |= MR_PAGE_RX;
3483 ap->state = ANEG_STATE_COMPLETE_ACK;
3484 ret = ANEG_TIMER_ENAB;
3487 case ANEG_STATE_COMPLETE_ACK:
3488 if (ap->ability_match != 0 &&
3489 ap->rxconfig == 0) {
3490 ap->state = ANEG_STATE_AN_ENABLE;
3493 delta = ap->cur_time - ap->link_time;
3494 if (delta > ANEG_STATE_SETTLE_TIME) {
3495 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3496 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3498 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3499 !(ap->flags & MR_NP_RX)) {
3500 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3508 case ANEG_STATE_IDLE_DETECT_INIT:
3509 ap->link_time = ap->cur_time;
3510 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3511 tw32_f(MAC_MODE, tp->mac_mode);
3514 ap->state = ANEG_STATE_IDLE_DETECT;
3515 ret = ANEG_TIMER_ENAB;
3518 case ANEG_STATE_IDLE_DETECT:
3519 if (ap->ability_match != 0 &&
3520 ap->rxconfig == 0) {
3521 ap->state = ANEG_STATE_AN_ENABLE;
3524 delta = ap->cur_time - ap->link_time;
3525 if (delta > ANEG_STATE_SETTLE_TIME) {
3526 /* XXX another gem from the Broadcom driver :( */
3527 ap->state = ANEG_STATE_LINK_OK;
3531 case ANEG_STATE_LINK_OK:
3532 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3536 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3537 /* ??? unimplemented */
3540 case ANEG_STATE_NEXT_PAGE_WAIT:
3541 /* ??? unimplemented */
3552 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3555 struct tg3_fiber_aneginfo aninfo;
3556 int status = ANEG_FAILED;
3560 tw32_f(MAC_TX_AUTO_NEG, 0);
3562 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3563 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3566 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3569 memset(&aninfo, 0, sizeof(aninfo));
3570 aninfo.flags |= MR_AN_ENABLE;
3571 aninfo.state = ANEG_STATE_UNKNOWN;
3572 aninfo.cur_time = 0;
3574 while (++tick < 195000) {
3575 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3576 if (status == ANEG_DONE || status == ANEG_FAILED)
3582 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3583 tw32_f(MAC_MODE, tp->mac_mode);
3586 *txflags = aninfo.txconfig;
3587 *rxflags = aninfo.flags;
3589 if (status == ANEG_DONE &&
3590 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3591 MR_LP_ADV_FULL_DUPLEX)))
3597 static void tg3_init_bcm8002(struct tg3 *tp)
3599 u32 mac_status = tr32(MAC_STATUS);
3602 /* Reset when initting first time or we have a link. */
3603 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
3604 !(mac_status & MAC_STATUS_PCS_SYNCED))
3607 /* Set PLL lock range. */
3608 tg3_writephy(tp, 0x16, 0x8007);
3611 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3613 /* Wait for reset to complete. */
3614 /* XXX schedule_timeout() ... */
3615 for (i = 0; i < 500; i++)
3618 /* Config mode; select PMA/Ch 1 regs. */
3619 tg3_writephy(tp, 0x10, 0x8411);
3621 /* Enable auto-lock and comdet, select txclk for tx. */
3622 tg3_writephy(tp, 0x11, 0x0a10);
3624 tg3_writephy(tp, 0x18, 0x00a0);
3625 tg3_writephy(tp, 0x16, 0x41ff);
3627 /* Assert and deassert POR. */
3628 tg3_writephy(tp, 0x13, 0x0400);
3630 tg3_writephy(tp, 0x13, 0x0000);
3632 tg3_writephy(tp, 0x11, 0x0a50);
3634 tg3_writephy(tp, 0x11, 0x0a10);
3636 /* Wait for signal to stabilize */
3637 /* XXX schedule_timeout() ... */
3638 for (i = 0; i < 15000; i++)
3641 /* Deselect the channel register so we can read the PHYID
3644 tg3_writephy(tp, 0x10, 0x8011);
3647 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3650 u32 sg_dig_ctrl, sg_dig_status;
3651 u32 serdes_cfg, expected_sg_dig_ctrl;
3652 int workaround, port_a;
3653 int current_link_up;
3656 expected_sg_dig_ctrl = 0;
3659 current_link_up = 0;
3661 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3662 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3664 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3667 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3668 /* preserve bits 20-23 for voltage regulator */
3669 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3672 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3674 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3675 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3677 u32 val = serdes_cfg;
3683 tw32_f(MAC_SERDES_CFG, val);
3686 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3688 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3689 tg3_setup_flow_control(tp, 0, 0);
3690 current_link_up = 1;
3695 /* Want auto-negotiation. */
3696 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3698 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3699 if (flowctrl & ADVERTISE_1000XPAUSE)
3700 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3701 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3702 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3704 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3705 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
3706 tp->serdes_counter &&
3707 ((mac_status & (MAC_STATUS_PCS_SYNCED |
3708 MAC_STATUS_RCVD_CFG)) ==
3709 MAC_STATUS_PCS_SYNCED)) {
3710 tp->serdes_counter--;
3711 current_link_up = 1;
3716 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3717 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3719 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3721 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3722 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3723 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3724 MAC_STATUS_SIGNAL_DET)) {
3725 sg_dig_status = tr32(SG_DIG_STATUS);
3726 mac_status = tr32(MAC_STATUS);
3728 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3729 (mac_status & MAC_STATUS_PCS_SYNCED)) {
3730 u32 local_adv = 0, remote_adv = 0;
3732 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3733 local_adv |= ADVERTISE_1000XPAUSE;
3734 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3735 local_adv |= ADVERTISE_1000XPSE_ASYM;
3737 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3738 remote_adv |= LPA_1000XPAUSE;
3739 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3740 remote_adv |= LPA_1000XPAUSE_ASYM;
3742 tg3_setup_flow_control(tp, local_adv, remote_adv);
3743 current_link_up = 1;
3744 tp->serdes_counter = 0;
3745 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3746 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3747 if (tp->serdes_counter)
3748 tp->serdes_counter--;
3751 u32 val = serdes_cfg;
3758 tw32_f(MAC_SERDES_CFG, val);
3761 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3764 /* Link parallel detection - link is up */
3765 /* only if we have PCS_SYNC and not */
3766 /* receiving config code words */
3767 mac_status = tr32(MAC_STATUS);
3768 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
3769 !(mac_status & MAC_STATUS_RCVD_CFG)) {
3770 tg3_setup_flow_control(tp, 0, 0);
3771 current_link_up = 1;
3773 TG3_FLG2_PARALLEL_DETECT;
3774 tp->serdes_counter =
3775 SERDES_PARALLEL_DET_TIMEOUT;
3777 goto restart_autoneg;
3781 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3782 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3786 return current_link_up;
3789 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
3791 int current_link_up = 0;
3793 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
3796 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3797 u32 txflags, rxflags;
3800 if (fiber_autoneg(tp, &txflags, &rxflags)) {
3801 u32 local_adv = 0, remote_adv = 0;
3803 if (txflags & ANEG_CFG_PS1)
3804 local_adv |= ADVERTISE_1000XPAUSE;
3805 if (txflags & ANEG_CFG_PS2)
3806 local_adv |= ADVERTISE_1000XPSE_ASYM;
3808 if (rxflags & MR_LP_ADV_SYM_PAUSE)
3809 remote_adv |= LPA_1000XPAUSE;
3810 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
3811 remote_adv |= LPA_1000XPAUSE_ASYM;
3813 tg3_setup_flow_control(tp, local_adv, remote_adv);
3815 current_link_up = 1;
3817 for (i = 0; i < 30; i++) {
3820 (MAC_STATUS_SYNC_CHANGED |
3821 MAC_STATUS_CFG_CHANGED));
3823 if ((tr32(MAC_STATUS) &
3824 (MAC_STATUS_SYNC_CHANGED |
3825 MAC_STATUS_CFG_CHANGED)) == 0)
3829 mac_status = tr32(MAC_STATUS);
3830 if (current_link_up == 0 &&
3831 (mac_status & MAC_STATUS_PCS_SYNCED) &&
3832 !(mac_status & MAC_STATUS_RCVD_CFG))
3833 current_link_up = 1;
3835 tg3_setup_flow_control(tp, 0, 0);
3837 /* Forcing 1000FD link up. */
3838 current_link_up = 1;
3840 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
3843 tw32_f(MAC_MODE, tp->mac_mode);
3848 return current_link_up;
3851 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
3854 u16 orig_active_speed;
3855 u8 orig_active_duplex;
3857 int current_link_up;
3860 orig_pause_cfg = tp->link_config.active_flowctrl;
3861 orig_active_speed = tp->link_config.active_speed;
3862 orig_active_duplex = tp->link_config.active_duplex;
3864 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
3865 netif_carrier_ok(tp->dev) &&
3866 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
3867 mac_status = tr32(MAC_STATUS);
3868 mac_status &= (MAC_STATUS_PCS_SYNCED |
3869 MAC_STATUS_SIGNAL_DET |
3870 MAC_STATUS_CFG_CHANGED |
3871 MAC_STATUS_RCVD_CFG);
3872 if (mac_status == (MAC_STATUS_PCS_SYNCED |
3873 MAC_STATUS_SIGNAL_DET)) {
3874 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3875 MAC_STATUS_CFG_CHANGED));
3880 tw32_f(MAC_TX_AUTO_NEG, 0);
3882 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
3883 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
3884 tw32_f(MAC_MODE, tp->mac_mode);
3887 if (tp->phy_id == PHY_ID_BCM8002)
3888 tg3_init_bcm8002(tp);
3890 /* Enable link change event even when serdes polling. */
3891 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3894 current_link_up = 0;
3895 mac_status = tr32(MAC_STATUS);
3897 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
3898 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
3900 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
3902 tp->hw_status->status =
3903 (SD_STATUS_UPDATED |
3904 (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
3906 for (i = 0; i < 100; i++) {
3907 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3908 MAC_STATUS_CFG_CHANGED));
3910 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
3911 MAC_STATUS_CFG_CHANGED |
3912 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
3916 mac_status = tr32(MAC_STATUS);
3917 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
3918 current_link_up = 0;
3919 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
3920 tp->serdes_counter == 0) {
3921 tw32_f(MAC_MODE, (tp->mac_mode |
3922 MAC_MODE_SEND_CONFIGS));
3924 tw32_f(MAC_MODE, tp->mac_mode);
3928 if (current_link_up == 1) {
3929 tp->link_config.active_speed = SPEED_1000;
3930 tp->link_config.active_duplex = DUPLEX_FULL;
3931 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3932 LED_CTRL_LNKLED_OVERRIDE |
3933 LED_CTRL_1000MBPS_ON));
3935 tp->link_config.active_speed = SPEED_INVALID;
3936 tp->link_config.active_duplex = DUPLEX_INVALID;
3937 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3938 LED_CTRL_LNKLED_OVERRIDE |
3939 LED_CTRL_TRAFFIC_OVERRIDE));
3942 if (current_link_up != netif_carrier_ok(tp->dev)) {
3943 if (current_link_up)
3944 netif_carrier_on(tp->dev);
3946 netif_carrier_off(tp->dev);
3947 tg3_link_report(tp);
3949 u32 now_pause_cfg = tp->link_config.active_flowctrl;
3950 if (orig_pause_cfg != now_pause_cfg ||
3951 orig_active_speed != tp->link_config.active_speed ||
3952 orig_active_duplex != tp->link_config.active_duplex)
3953 tg3_link_report(tp);
3959 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
3961 int current_link_up, err = 0;
3965 u32 local_adv, remote_adv;
3967 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3968 tw32_f(MAC_MODE, tp->mac_mode);
3974 (MAC_STATUS_SYNC_CHANGED |
3975 MAC_STATUS_CFG_CHANGED |
3976 MAC_STATUS_MI_COMPLETION |
3977 MAC_STATUS_LNKSTATE_CHANGED));
3983 current_link_up = 0;
3984 current_speed = SPEED_INVALID;
3985 current_duplex = DUPLEX_INVALID;
3987 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3988 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3989 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
3990 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3991 bmsr |= BMSR_LSTATUS;
3993 bmsr &= ~BMSR_LSTATUS;
3996 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
3998 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
3999 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
4000 /* do nothing, just check for link up at the end */
4001 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4004 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4005 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4006 ADVERTISE_1000XPAUSE |
4007 ADVERTISE_1000XPSE_ASYM |
4010 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4012 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4013 new_adv |= ADVERTISE_1000XHALF;
4014 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4015 new_adv |= ADVERTISE_1000XFULL;
4017 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4018 tg3_writephy(tp, MII_ADVERTISE, new_adv);
4019 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4020 tg3_writephy(tp, MII_BMCR, bmcr);
4022 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4023 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4024 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4031 bmcr &= ~BMCR_SPEED1000;
4032 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4034 if (tp->link_config.duplex == DUPLEX_FULL)
4035 new_bmcr |= BMCR_FULLDPLX;
4037 if (new_bmcr != bmcr) {
4038 /* BMCR_SPEED1000 is a reserved bit that needs
4039 * to be set on write.
4041 new_bmcr |= BMCR_SPEED1000;
4043 /* Force a linkdown */
4044 if (netif_carrier_ok(tp->dev)) {
4047 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4048 adv &= ~(ADVERTISE_1000XFULL |
4049 ADVERTISE_1000XHALF |
4051 tg3_writephy(tp, MII_ADVERTISE, adv);
4052 tg3_writephy(tp, MII_BMCR, bmcr |
4056 netif_carrier_off(tp->dev);
4058 tg3_writephy(tp, MII_BMCR, new_bmcr);
4060 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4061 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4062 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4064 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4065 bmsr |= BMSR_LSTATUS;
4067 bmsr &= ~BMSR_LSTATUS;
4069 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4073 if (bmsr & BMSR_LSTATUS) {
4074 current_speed = SPEED_1000;
4075 current_link_up = 1;
4076 if (bmcr & BMCR_FULLDPLX)
4077 current_duplex = DUPLEX_FULL;
4079 current_duplex = DUPLEX_HALF;
4084 if (bmcr & BMCR_ANENABLE) {
4087 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4088 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4089 common = local_adv & remote_adv;
4090 if (common & (ADVERTISE_1000XHALF |
4091 ADVERTISE_1000XFULL)) {
4092 if (common & ADVERTISE_1000XFULL)
4093 current_duplex = DUPLEX_FULL;
4095 current_duplex = DUPLEX_HALF;
4098 current_link_up = 0;
4102 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4103 tg3_setup_flow_control(tp, local_adv, remote_adv);
4105 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4106 if (tp->link_config.active_duplex == DUPLEX_HALF)
4107 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4109 tw32_f(MAC_MODE, tp->mac_mode);
4112 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4114 tp->link_config.active_speed = current_speed;
4115 tp->link_config.active_duplex = current_duplex;
4117 if (current_link_up != netif_carrier_ok(tp->dev)) {
4118 if (current_link_up)
4119 netif_carrier_on(tp->dev);
4121 netif_carrier_off(tp->dev);
4122 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4124 tg3_link_report(tp);
4129 static void tg3_serdes_parallel_detect(struct tg3 *tp)
4131 if (tp->serdes_counter) {
4132 /* Give autoneg time to complete. */
4133 tp->serdes_counter--;
4136 if (!netif_carrier_ok(tp->dev) &&
4137 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4140 tg3_readphy(tp, MII_BMCR, &bmcr);
4141 if (bmcr & BMCR_ANENABLE) {
4144 /* Select shadow register 0x1f */
4145 tg3_writephy(tp, 0x1c, 0x7c00);
4146 tg3_readphy(tp, 0x1c, &phy1);
4148 /* Select expansion interrupt status register */
4149 tg3_writephy(tp, 0x17, 0x0f01);
4150 tg3_readphy(tp, 0x15, &phy2);
4151 tg3_readphy(tp, 0x15, &phy2);
4153 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4154 /* We have signal detect and not receiving
4155 * config code words, link is up by parallel
4159 bmcr &= ~BMCR_ANENABLE;
4160 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4161 tg3_writephy(tp, MII_BMCR, bmcr);
4162 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
4166 else if (netif_carrier_ok(tp->dev) &&
4167 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4168 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
4171 /* Select expansion interrupt status register */
4172 tg3_writephy(tp, 0x17, 0x0f01);
4173 tg3_readphy(tp, 0x15, &phy2);
4177 /* Config code words received, turn on autoneg. */
4178 tg3_readphy(tp, MII_BMCR, &bmcr);
4179 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4181 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4187 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4191 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4192 err = tg3_setup_fiber_phy(tp, force_reset);
4193 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4194 err = tg3_setup_fiber_mii_phy(tp, force_reset);
4196 err = tg3_setup_copper_phy(tp, force_reset);
4199 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4202 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4203 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
4205 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
4210 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
4211 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
4212 tw32(GRC_MISC_CFG, val);
4215 if (tp->link_config.active_speed == SPEED_1000 &&
4216 tp->link_config.active_duplex == DUPLEX_HALF)
4217 tw32(MAC_TX_LENGTHS,
4218 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4219 (6 << TX_LENGTHS_IPG_SHIFT) |
4220 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
4222 tw32(MAC_TX_LENGTHS,
4223 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4224 (6 << TX_LENGTHS_IPG_SHIFT) |
4225 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
4227 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4228 if (netif_carrier_ok(tp->dev)) {
4229 tw32(HOSTCC_STAT_COAL_TICKS,
4230 tp->coal.stats_block_coalesce_usecs);
4232 tw32(HOSTCC_STAT_COAL_TICKS, 0);
4236 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
4237 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
4238 if (!netif_carrier_ok(tp->dev))
4239 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4242 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4243 tw32(PCIE_PWR_MGMT_THRESH, val);
4249 /* This is called whenever we suspect that the system chipset is re-
4250 * ordering the sequence of MMIO to the tx send mailbox. The symptom
4251 * is bogus tx completions. We try to recover by setting the
4252 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4255 static void tg3_tx_recover(struct tg3 *tp)
4257 BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
4258 tp->write32_tx_mbox == tg3_write_indirect_mbox);
4260 printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
4261 "mapped I/O cycles to the network device, attempting to "
4262 "recover. Please report the problem to the driver maintainer "
4263 "and include system chipset information.\n", tp->dev->name);
4265 spin_lock(&tp->lock);
4266 tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
4267 spin_unlock(&tp->lock);
4270 static inline u32 tg3_tx_avail(struct tg3 *tp)
4273 return (tp->tx_pending -
4274 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
4277 /* Tigon3 never reports partial packet sends. So we do not
4278 * need special logic to handle SKBs that have not had all
4279 * of their frags sent yet, like SunGEM does.
4281 static void tg3_tx(struct tg3 *tp)
4283 u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
4284 u32 sw_idx = tp->tx_cons;
4286 while (sw_idx != hw_idx) {
4287 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
4288 struct sk_buff *skb = ri->skb;
4291 if (unlikely(skb == NULL)) {
4296 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
4300 sw_idx = NEXT_TX(sw_idx);
4302 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4303 ri = &tp->tx_buffers[sw_idx];
4304 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4306 sw_idx = NEXT_TX(sw_idx);
4311 if (unlikely(tx_bug)) {
4317 tp->tx_cons = sw_idx;
4319 /* Need to make the tx_cons update visible to tg3_start_xmit()
4320 * before checking for netif_queue_stopped(). Without the
4321 * memory barrier, there is a small possibility that tg3_start_xmit()
4322 * will miss it and cause the queue to be stopped forever.
4326 if (unlikely(netif_queue_stopped(tp->dev) &&
4327 (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
4328 netif_tx_lock(tp->dev);
4329 if (netif_queue_stopped(tp->dev) &&
4330 (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
4331 netif_wake_queue(tp->dev);
4332 netif_tx_unlock(tp->dev);
4336 /* Returns size of skb allocated or < 0 on error.
4338 * We only need to fill in the address because the other members
4339 * of the RX descriptor are invariant, see tg3_init_rings.
4341 * Note the purposeful assymetry of cpu vs. chip accesses. For
4342 * posting buffers we only dirty the first cache line of the RX
4343 * descriptor (containing the address). Whereas for the RX status
4344 * buffers the cpu only reads the last cacheline of the RX descriptor
4345 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4347 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
4348 int src_idx, u32 dest_idx_unmasked)
4350 struct tg3_rx_buffer_desc *desc;
4351 struct ring_info *map, *src_map;
4352 struct sk_buff *skb;
4354 int skb_size, dest_idx;
4355 struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
4358 switch (opaque_key) {
4359 case RXD_OPAQUE_RING_STD:
4360 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4361 desc = &tpr->rx_std[dest_idx];
4362 map = &tpr->rx_std_buffers[dest_idx];
4364 src_map = &tpr->rx_std_buffers[src_idx];
4365 skb_size = tp->rx_pkt_map_sz;
4368 case RXD_OPAQUE_RING_JUMBO:
4369 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4370 desc = &tpr->rx_jmb[dest_idx].std;
4371 map = &tpr->rx_jmb_buffers[dest_idx];
4373 src_map = &tpr->rx_jmb_buffers[src_idx];
4374 skb_size = TG3_RX_JMB_MAP_SZ;
4381 /* Do not overwrite any of the map or rp information
4382 * until we are sure we can commit to a new buffer.
4384 * Callers depend upon this behavior and assume that
4385 * we leave everything unchanged if we fail.
4387 skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
4391 skb_reserve(skb, tp->rx_offset);
4393 mapping = pci_map_single(tp->pdev, skb->data, skb_size,
4394 PCI_DMA_FROMDEVICE);
4397 pci_unmap_addr_set(map, mapping, mapping);
4399 if (src_map != NULL)
4400 src_map->skb = NULL;
4402 desc->addr_hi = ((u64)mapping >> 32);
4403 desc->addr_lo = ((u64)mapping & 0xffffffff);
4408 /* We only need to move over in the address because the other
4409 * members of the RX descriptor are invariant. See notes above
4410 * tg3_alloc_rx_skb for full details.
4412 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
4413 int src_idx, u32 dest_idx_unmasked)
4415 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4416 struct ring_info *src_map, *dest_map;
4418 struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
4420 switch (opaque_key) {
4421 case RXD_OPAQUE_RING_STD:
4422 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4423 dest_desc = &tpr->rx_std[dest_idx];
4424 dest_map = &tpr->rx_std_buffers[dest_idx];
4425 src_desc = &tpr->rx_std[src_idx];
4426 src_map = &tpr->rx_std_buffers[src_idx];
4429 case RXD_OPAQUE_RING_JUMBO:
4430 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4431 dest_desc = &tpr->rx_jmb[dest_idx].std;
4432 dest_map = &tpr->rx_jmb_buffers[dest_idx];
4433 src_desc = &tpr->rx_jmb[src_idx].std;
4434 src_map = &tpr->rx_jmb_buffers[src_idx];
4441 dest_map->skb = src_map->skb;
4442 pci_unmap_addr_set(dest_map, mapping,
4443 pci_unmap_addr(src_map, mapping));
4444 dest_desc->addr_hi = src_desc->addr_hi;
4445 dest_desc->addr_lo = src_desc->addr_lo;
4447 src_map->skb = NULL;
4450 #if TG3_VLAN_TAG_USED
4451 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
4453 return vlan_gro_receive(&tp->napi, tp->vlgrp, vlan_tag, skb);
4457 /* The RX ring scheme is composed of multiple rings which post fresh
4458 * buffers to the chip, and one special ring the chip uses to report
4459 * status back to the host.
4461 * The special ring reports the status of received packets to the
4462 * host. The chip does not write into the original descriptor the
4463 * RX buffer was obtained from. The chip simply takes the original
4464 * descriptor as provided by the host, updates the status and length
4465 * field, then writes this into the next status ring entry.
4467 * Each ring the host uses to post buffers to the chip is described
4468 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
4469 * it is first placed into the on-chip ram. When the packet's length
4470 * is known, it walks down the TG3_BDINFO entries to select the ring.
4471 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4472 * which is within the range of the new packet's length is chosen.
4474 * The "separate ring for rx status" scheme may sound queer, but it makes
4475 * sense from a cache coherency perspective. If only the host writes
4476 * to the buffer post rings, and only the chip writes to the rx status
4477 * rings, then cache lines never move beyond shared-modified state.
4478 * If both the host and chip were to write into the same ring, cache line
4479 * eviction could occur since both entities want it in an exclusive state.
4481 static int tg3_rx(struct tg3 *tp, int budget)
4483 u32 work_mask, rx_std_posted = 0;
4484 u32 sw_idx = tp->rx_rcb_ptr;
4487 struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
4489 hw_idx = tp->hw_status->idx[0].rx_producer;
4491 * We need to order the read of hw_idx and the read of
4492 * the opaque cookie.
4497 while (sw_idx != hw_idx && budget > 0) {
4498 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
4500 struct sk_buff *skb;
4501 dma_addr_t dma_addr;
4502 u32 opaque_key, desc_idx, *post_ptr;
4504 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4505 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4506 if (opaque_key == RXD_OPAQUE_RING_STD) {
4507 struct ring_info *ri = &tpr->rx_std_buffers[desc_idx];
4508 dma_addr = pci_unmap_addr(ri, mapping);
4510 post_ptr = &tpr->rx_std_ptr;
4512 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4513 struct ring_info *ri = &tpr->rx_jmb_buffers[desc_idx];
4514 dma_addr = pci_unmap_addr(ri, mapping);
4516 post_ptr = &tpr->rx_jmb_ptr;
4518 goto next_pkt_nopost;
4520 work_mask |= opaque_key;
4522 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4523 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4525 tg3_recycle_rx(tp, opaque_key,
4526 desc_idx, *post_ptr);
4528 /* Other statistics kept track of by card. */
4529 tp->net_stats.rx_dropped++;
4533 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
4536 if (len > RX_COPY_THRESHOLD
4537 && tp->rx_offset == NET_IP_ALIGN
4538 /* rx_offset will likely not equal NET_IP_ALIGN
4539 * if this is a 5701 card running in PCI-X mode
4540 * [see tg3_get_invariants()]
4545 skb_size = tg3_alloc_rx_skb(tp, opaque_key,
4546 desc_idx, *post_ptr);
4550 pci_unmap_single(tp->pdev, dma_addr, skb_size,
4551 PCI_DMA_FROMDEVICE);
4555 struct sk_buff *copy_skb;
4557 tg3_recycle_rx(tp, opaque_key,
4558 desc_idx, *post_ptr);
4560 copy_skb = netdev_alloc_skb(tp->dev,
4561 len + TG3_RAW_IP_ALIGN);
4562 if (copy_skb == NULL)
4563 goto drop_it_no_recycle;
4565 skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
4566 skb_put(copy_skb, len);
4567 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4568 skb_copy_from_linear_data(skb, copy_skb->data, len);
4569 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4571 /* We'll reuse the original ring buffer. */
4575 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
4576 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4577 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4578 >> RXD_TCPCSUM_SHIFT) == 0xffff))
4579 skb->ip_summed = CHECKSUM_UNNECESSARY;
4581 skb->ip_summed = CHECKSUM_NONE;
4583 skb->protocol = eth_type_trans(skb, tp->dev);
4585 if (len > (tp->dev->mtu + ETH_HLEN) &&
4586 skb->protocol != htons(ETH_P_8021Q)) {
4591 #if TG3_VLAN_TAG_USED
4592 if (tp->vlgrp != NULL &&
4593 desc->type_flags & RXD_FLAG_VLAN) {
4594 tg3_vlan_rx(tp, skb,
4595 desc->err_vlan & RXD_VLAN_MASK);
4598 napi_gro_receive(&tp->napi, skb);
4606 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
4607 u32 idx = *post_ptr % TG3_RX_RING_SIZE;
4609 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
4610 TG3_64BIT_REG_LOW, idx);
4611 work_mask &= ~RXD_OPAQUE_RING_STD;
4616 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
4618 /* Refresh hw_idx to see if there is new work */
4619 if (sw_idx == hw_idx) {
4620 hw_idx = tp->hw_status->idx[0].rx_producer;
4625 /* ACK the status ring. */
4626 tp->rx_rcb_ptr = sw_idx;
4627 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
4629 /* Refill RX ring(s). */
4630 if (work_mask & RXD_OPAQUE_RING_STD) {
4631 sw_idx = tpr->rx_std_ptr % TG3_RX_RING_SIZE;
4632 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
4635 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
4636 sw_idx = tpr->rx_jmb_ptr % TG3_RX_JUMBO_RING_SIZE;
4637 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
4645 static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
4647 struct tg3_hw_status *sblk = tp->hw_status;
4649 /* handle link change and other phy events */
4650 if (!(tp->tg3_flags &
4651 (TG3_FLAG_USE_LINKCHG_REG |
4652 TG3_FLAG_POLL_SERDES))) {
4653 if (sblk->status & SD_STATUS_LINK_CHG) {
4654 sblk->status = SD_STATUS_UPDATED |
4655 (sblk->status & ~SD_STATUS_LINK_CHG);
4656 spin_lock(&tp->lock);
4657 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
4659 (MAC_STATUS_SYNC_CHANGED |
4660 MAC_STATUS_CFG_CHANGED |
4661 MAC_STATUS_MI_COMPLETION |
4662 MAC_STATUS_LNKSTATE_CHANGED));
4665 tg3_setup_phy(tp, 0);
4666 spin_unlock(&tp->lock);
4670 /* run TX completion thread */
4671 if (sblk->idx[0].tx_consumer != tp->tx_cons) {
4673 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4677 /* run RX thread, within the bounds set by NAPI.
4678 * All RX "locking" is done by ensuring outside
4679 * code synchronizes with tg3->napi.poll()
4681 if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
4682 work_done += tg3_rx(tp, budget - work_done);
4687 static int tg3_poll(struct napi_struct *napi, int budget)
4689 struct tg3 *tp = container_of(napi, struct tg3, napi);
4691 struct tg3_hw_status *sblk = tp->hw_status;
4694 work_done = tg3_poll_work(tp, work_done, budget);
4696 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4699 if (unlikely(work_done >= budget))
4702 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
4703 /* tp->last_tag is used in tg3_restart_ints() below
4704 * to tell the hw how much work has been processed,
4705 * so we must read it before checking for more work.
4707 tp->last_tag = sblk->status_tag;
4708 tp->last_irq_tag = tp->last_tag;
4711 sblk->status &= ~SD_STATUS_UPDATED;
4713 if (likely(!tg3_has_work(tp))) {
4714 napi_complete(napi);
4715 tg3_restart_ints(tp);
4723 /* work_done is guaranteed to be less than budget. */
4724 napi_complete(napi);
4725 schedule_work(&tp->reset_task);
4729 static void tg3_irq_quiesce(struct tg3 *tp)
4731 BUG_ON(tp->irq_sync);
4736 synchronize_irq(tp->pdev->irq);
4739 static inline int tg3_irq_sync(struct tg3 *tp)
4741 return tp->irq_sync;
4744 /* Fully shutdown all tg3 driver activity elsewhere in the system.
4745 * If irq_sync is non-zero, then the IRQ handler must be synchronized
4746 * with as well. Most of the time, this is not necessary except when
4747 * shutting down the device.
4749 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
4751 spin_lock_bh(&tp->lock);
4753 tg3_irq_quiesce(tp);
4756 static inline void tg3_full_unlock(struct tg3 *tp)
4758 spin_unlock_bh(&tp->lock);
4761 /* One-shot MSI handler - Chip automatically disables interrupt
4762 * after sending MSI so driver doesn't have to do it.
4764 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
4766 struct net_device *dev = dev_id;
4767 struct tg3 *tp = netdev_priv(dev);
4769 prefetch(tp->hw_status);
4770 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4772 if (likely(!tg3_irq_sync(tp)))
4773 napi_schedule(&tp->napi);
4778 /* MSI ISR - No need to check for interrupt sharing and no need to
4779 * flush status block and interrupt mailbox. PCI ordering rules
4780 * guarantee that MSI will arrive after the status block.
4782 static irqreturn_t tg3_msi(int irq, void *dev_id)
4784 struct net_device *dev = dev_id;
4785 struct tg3 *tp = netdev_priv(dev);
4787 prefetch(tp->hw_status);
4788 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4790 * Writing any value to intr-mbox-0 clears PCI INTA# and
4791 * chip-internal interrupt pending events.
4792 * Writing non-zero to intr-mbox-0 additional tells the
4793 * NIC to stop sending us irqs, engaging "in-intr-handler"
4796 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4797 if (likely(!tg3_irq_sync(tp)))
4798 napi_schedule(&tp->napi);
4800 return IRQ_RETVAL(1);
4803 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
4805 struct net_device *dev = dev_id;
4806 struct tg3 *tp = netdev_priv(dev);
4807 struct tg3_hw_status *sblk = tp->hw_status;
4808 unsigned int handled = 1;
4810 /* In INTx mode, it is possible for the interrupt to arrive at
4811 * the CPU before the status block posted prior to the interrupt.
4812 * Reading the PCI State register will confirm whether the
4813 * interrupt is ours and will flush the status block.
4815 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
4816 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4817 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4824 * Writing any value to intr-mbox-0 clears PCI INTA# and
4825 * chip-internal interrupt pending events.
4826 * Writing non-zero to intr-mbox-0 additional tells the
4827 * NIC to stop sending us irqs, engaging "in-intr-handler"
4830 * Flush the mailbox to de-assert the IRQ immediately to prevent
4831 * spurious interrupts. The flush impacts performance but
4832 * excessive spurious interrupts can be worse in some cases.
4834 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4835 if (tg3_irq_sync(tp))
4837 sblk->status &= ~SD_STATUS_UPDATED;
4838 if (likely(tg3_has_work(tp))) {
4839 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4840 napi_schedule(&tp->napi);
4842 /* No work, shared interrupt perhaps? re-enable
4843 * interrupts, and flush that PCI write
4845 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
4849 return IRQ_RETVAL(handled);
4852 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
4854 struct net_device *dev = dev_id;
4855 struct tg3 *tp = netdev_priv(dev);
4856 struct tg3_hw_status *sblk = tp->hw_status;
4857 unsigned int handled = 1;
4859 /* In INTx mode, it is possible for the interrupt to arrive at
4860 * the CPU before the status block posted prior to the interrupt.
4861 * Reading the PCI State register will confirm whether the
4862 * interrupt is ours and will flush the status block.
4864 if (unlikely(sblk->status_tag == tp->last_irq_tag)) {
4865 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4866 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4873 * writing any value to intr-mbox-0 clears PCI INTA# and
4874 * chip-internal interrupt pending events.
4875 * writing non-zero to intr-mbox-0 additional tells the
4876 * NIC to stop sending us irqs, engaging "in-intr-handler"
4879 * Flush the mailbox to de-assert the IRQ immediately to prevent
4880 * spurious interrupts. The flush impacts performance but
4881 * excessive spurious interrupts can be worse in some cases.
4883 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4886 * In a shared interrupt configuration, sometimes other devices'
4887 * interrupts will scream. We record the current status tag here
4888 * so that the above check can report that the screaming interrupts
4889 * are unhandled. Eventually they will be silenced.
4891 tp->last_irq_tag = sblk->status_tag;
4893 if (tg3_irq_sync(tp))
4896 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4898 napi_schedule(&tp->napi);
4901 return IRQ_RETVAL(handled);
4904 /* ISR for interrupt test */
4905 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
4907 struct net_device *dev = dev_id;
4908 struct tg3 *tp = netdev_priv(dev);
4909 struct tg3_hw_status *sblk = tp->hw_status;
4911 if ((sblk->status & SD_STATUS_UPDATED) ||
4912 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4913 tg3_disable_ints(tp);
4914 return IRQ_RETVAL(1);
4916 return IRQ_RETVAL(0);
4919 static int tg3_init_hw(struct tg3 *, int);
4920 static int tg3_halt(struct tg3 *, int, int);
4922 /* Restart hardware after configuration changes, self-test, etc.
4923 * Invoked with tp->lock held.
4925 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
4926 __releases(tp->lock)
4927 __acquires(tp->lock)
4931 err = tg3_init_hw(tp, reset_phy);
4933 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
4934 "aborting.\n", tp->dev->name);
4935 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4936 tg3_full_unlock(tp);
4937 del_timer_sync(&tp->timer);
4939 napi_enable(&tp->napi);
4941 tg3_full_lock(tp, 0);
4946 #ifdef CONFIG_NET_POLL_CONTROLLER
4947 static void tg3_poll_controller(struct net_device *dev)
4949 struct tg3 *tp = netdev_priv(dev);
4951 tg3_interrupt(tp->pdev->irq, dev);
4955 static void tg3_reset_task(struct work_struct *work)
4957 struct tg3 *tp = container_of(work, struct tg3, reset_task);
4959 unsigned int restart_timer;
4961 tg3_full_lock(tp, 0);
4963 if (!netif_running(tp->dev)) {
4964 tg3_full_unlock(tp);
4968 tg3_full_unlock(tp);
4974 tg3_full_lock(tp, 1);
4976 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
4977 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
4979 if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
4980 tp->write32_tx_mbox = tg3_write32_tx_mbox;
4981 tp->write32_rx_mbox = tg3_write_flush_reg32;
4982 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
4983 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
4986 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
4987 err = tg3_init_hw(tp, 1);
4991 tg3_netif_start(tp);
4994 mod_timer(&tp->timer, jiffies + 1);
4997 tg3_full_unlock(tp);
5003 static void tg3_dump_short_state(struct tg3 *tp)
5005 printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
5006 tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
5007 printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
5008 tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
5011 static void tg3_tx_timeout(struct net_device *dev)
5013 struct tg3 *tp = netdev_priv(dev);
5015 if (netif_msg_tx_err(tp)) {
5016 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
5018 tg3_dump_short_state(tp);
5021 schedule_work(&tp->reset_task);
5024 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5025 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
5027 u32 base = (u32) mapping & 0xffffffff;
5029 return ((base > 0xffffdcc0) &&
5030 (base + len + 8 < base));
5033 /* Test for DMA addresses > 40-bit */
5034 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5037 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5038 if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
5039 return (((u64) mapping + len) > DMA_BIT_MASK(40));
5046 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
5048 /* Workaround 4GB and 40-bit hardware DMA bugs. */
5049 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
5050 u32 last_plus_one, u32 *start,
5051 u32 base_flags, u32 mss)
5053 struct sk_buff *new_skb;
5054 dma_addr_t new_addr = 0;
5058 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
5059 new_skb = skb_copy(skb, GFP_ATOMIC);
5061 int more_headroom = 4 - ((unsigned long)skb->data & 3);
5063 new_skb = skb_copy_expand(skb,
5064 skb_headroom(skb) + more_headroom,
5065 skb_tailroom(skb), GFP_ATOMIC);
5071 /* New SKB is guaranteed to be linear. */
5073 ret = skb_dma_map(&tp->pdev->dev, new_skb, DMA_TO_DEVICE);
5074 new_addr = skb_shinfo(new_skb)->dma_head;
5076 /* Make sure new skb does not cross any 4G boundaries.
5077 * Drop the packet if it does.
5079 if (ret || tg3_4g_overflow_test(new_addr, new_skb->len)) {
5081 skb_dma_unmap(&tp->pdev->dev, new_skb,
5084 dev_kfree_skb(new_skb);
5087 tg3_set_txd(tp, entry, new_addr, new_skb->len,
5088 base_flags, 1 | (mss << 1));
5089 *start = NEXT_TX(entry);
5093 /* Now clean up the sw ring entries. */
5095 while (entry != last_plus_one) {
5097 tp->tx_buffers[entry].skb = new_skb;
5099 tp->tx_buffers[entry].skb = NULL;
5101 entry = NEXT_TX(entry);
5105 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
5111 static void tg3_set_txd(struct tg3 *tp, int entry,
5112 dma_addr_t mapping, int len, u32 flags,
5115 struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
5116 int is_end = (mss_and_is_end & 0x1);
5117 u32 mss = (mss_and_is_end >> 1);
5121 flags |= TXD_FLAG_END;
5122 if (flags & TXD_FLAG_VLAN) {
5123 vlan_tag = flags >> 16;
5126 vlan_tag |= (mss << TXD_MSS_SHIFT);
5128 txd->addr_hi = ((u64) mapping >> 32);
5129 txd->addr_lo = ((u64) mapping & 0xffffffff);
5130 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
5131 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
5134 /* hard_start_xmit for devices that don't have any bugs and
5135 * support TG3_FLG2_HW_TSO_2 only.
5137 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
5139 struct tg3 *tp = netdev_priv(dev);
5140 u32 len, entry, base_flags, mss;
5141 struct skb_shared_info *sp;
5144 len = skb_headlen(skb);
5146 /* We are running in BH disabled context with netif_tx_lock
5147 * and TX reclaim runs via tp->napi.poll inside of a software
5148 * interrupt. Furthermore, IRQ processing runs lockless so we have
5149 * no IRQ context deadlocks to worry about either. Rejoice!
5151 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
5152 if (!netif_queue_stopped(dev)) {
5153 netif_stop_queue(dev);
5155 /* This is a hard error, log it. */
5156 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
5157 "queue awake!\n", dev->name);
5159 return NETDEV_TX_BUSY;
5162 entry = tp->tx_prod;
5165 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
5166 int tcp_opt_len, ip_tcp_len;
5168 if (skb_header_cloned(skb) &&
5169 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5174 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
5175 mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
5177 struct iphdr *iph = ip_hdr(skb);
5179 tcp_opt_len = tcp_optlen(skb);
5180 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5183 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5184 mss |= (ip_tcp_len + tcp_opt_len) << 9;
5187 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5188 TXD_FLAG_CPU_POST_DMA);
5190 tcp_hdr(skb)->check = 0;
5193 else if (skb->ip_summed == CHECKSUM_PARTIAL)
5194 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5195 #if TG3_VLAN_TAG_USED
5196 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
5197 base_flags |= (TXD_FLAG_VLAN |
5198 (vlan_tx_tag_get(skb) << 16));
5201 if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
5206 sp = skb_shinfo(skb);
5208 mapping = sp->dma_head;
5210 tp->tx_buffers[entry].skb = skb;
5212 tg3_set_txd(tp, entry, mapping, len, base_flags,
5213 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
5215 entry = NEXT_TX(entry);
5217 /* Now loop through additional data fragments, and queue them. */
5218 if (skb_shinfo(skb)->nr_frags > 0) {
5219 unsigned int i, last;
5221 last = skb_shinfo(skb)->nr_frags - 1;
5222 for (i = 0; i <= last; i++) {
5223 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5226 mapping = sp->dma_maps[i];
5227 tp->tx_buffers[entry].skb = NULL;
5229 tg3_set_txd(tp, entry, mapping, len,
5230 base_flags, (i == last) | (mss << 1));
5232 entry = NEXT_TX(entry);
5236 /* Packets are ready, update Tx producer idx local and on card. */
5237 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
5239 tp->tx_prod = entry;
5240 if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
5241 netif_stop_queue(dev);
5242 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
5243 netif_wake_queue(tp->dev);
5249 return NETDEV_TX_OK;
5252 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
5254 /* Use GSO to workaround a rare TSO bug that may be triggered when the
5255 * TSO header is greater than 80 bytes.
5257 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
5259 struct sk_buff *segs, *nskb;
5261 /* Estimate the number of fragments in the worst case */
5262 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
5263 netif_stop_queue(tp->dev);
5264 if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
5265 return NETDEV_TX_BUSY;
5267 netif_wake_queue(tp->dev);
5270 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
5272 goto tg3_tso_bug_end;
5278 tg3_start_xmit_dma_bug(nskb, tp->dev);
5284 return NETDEV_TX_OK;
5287 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
5288 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
5290 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
5292 struct tg3 *tp = netdev_priv(dev);
5293 u32 len, entry, base_flags, mss;
5294 struct skb_shared_info *sp;
5295 int would_hit_hwbug;
5298 len = skb_headlen(skb);
5300 /* We are running in BH disabled context with netif_tx_lock
5301 * and TX reclaim runs via tp->napi.poll inside of a software
5302 * interrupt. Furthermore, IRQ processing runs lockless so we have
5303 * no IRQ context deadlocks to worry about either. Rejoice!
5305 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
5306 if (!netif_queue_stopped(dev)) {
5307 netif_stop_queue(dev);
5309 /* This is a hard error, log it. */
5310 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
5311 "queue awake!\n", dev->name);
5313 return NETDEV_TX_BUSY;
5316 entry = tp->tx_prod;
5318 if (skb->ip_summed == CHECKSUM_PARTIAL)
5319 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5321 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
5323 int tcp_opt_len, ip_tcp_len, hdr_len;
5325 if (skb_header_cloned(skb) &&
5326 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5331 tcp_opt_len = tcp_optlen(skb);
5332 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5334 hdr_len = ip_tcp_len + tcp_opt_len;
5335 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
5336 (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
5337 return (tg3_tso_bug(tp, skb));
5339 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5340 TXD_FLAG_CPU_POST_DMA);
5344 iph->tot_len = htons(mss + hdr_len);
5345 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
5346 tcp_hdr(skb)->check = 0;
5347 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
5349 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5354 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
5355 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
5356 if (tcp_opt_len || iph->ihl > 5) {
5359 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5360 mss |= (tsflags << 11);
5363 if (tcp_opt_len || iph->ihl > 5) {
5366 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5367 base_flags |= tsflags << 12;
5371 #if TG3_VLAN_TAG_USED
5372 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
5373 base_flags |= (TXD_FLAG_VLAN |
5374 (vlan_tx_tag_get(skb) << 16));
5377 if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
5382 sp = skb_shinfo(skb);
5384 mapping = sp->dma_head;
5386 tp->tx_buffers[entry].skb = skb;
5388 would_hit_hwbug = 0;
5390 if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
5391 would_hit_hwbug = 1;
5392 else if (tg3_4g_overflow_test(mapping, len))
5393 would_hit_hwbug = 1;
5395 tg3_set_txd(tp, entry, mapping, len, base_flags,
5396 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
5398 entry = NEXT_TX(entry);
5400 /* Now loop through additional data fragments, and queue them. */
5401 if (skb_shinfo(skb)->nr_frags > 0) {
5402 unsigned int i, last;
5404 last = skb_shinfo(skb)->nr_frags - 1;
5405 for (i = 0; i <= last; i++) {
5406 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5409 mapping = sp->dma_maps[i];
5411 tp->tx_buffers[entry].skb = NULL;
5413 if (tg3_4g_overflow_test(mapping, len))
5414 would_hit_hwbug = 1;
5416 if (tg3_40bit_overflow_test(tp, mapping, len))
5417 would_hit_hwbug = 1;
5419 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5420 tg3_set_txd(tp, entry, mapping, len,
5421 base_flags, (i == last)|(mss << 1));
5423 tg3_set_txd(tp, entry, mapping, len,
5424 base_flags, (i == last));
5426 entry = NEXT_TX(entry);
5430 if (would_hit_hwbug) {
5431 u32 last_plus_one = entry;
5434 start = entry - 1 - skb_shinfo(skb)->nr_frags;
5435 start &= (TG3_TX_RING_SIZE - 1);
5437 /* If the workaround fails due to memory/mapping
5438 * failure, silently drop this packet.
5440 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
5441 &start, base_flags, mss))
5447 /* Packets are ready, update Tx producer idx local and on card. */
5448 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
5450 tp->tx_prod = entry;
5451 if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
5452 netif_stop_queue(dev);
5453 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
5454 netif_wake_queue(tp->dev);
5460 return NETDEV_TX_OK;
5463 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
5468 if (new_mtu > ETH_DATA_LEN) {
5469 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5470 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
5471 ethtool_op_set_tso(dev, 0);
5474 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
5476 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5477 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
5478 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
5482 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
5484 struct tg3 *tp = netdev_priv(dev);
5487 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
5490 if (!netif_running(dev)) {
5491 /* We'll just catch it later when the
5494 tg3_set_mtu(dev, tp, new_mtu);
5502 tg3_full_lock(tp, 1);
5504 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5506 tg3_set_mtu(dev, tp, new_mtu);
5508 err = tg3_restart_hw(tp, 0);
5511 tg3_netif_start(tp);
5513 tg3_full_unlock(tp);
5521 static void tg3_rx_prodring_free(struct tg3 *tp,
5522 struct tg3_rx_prodring_set *tpr)
5524 struct ring_info *rxp;
5527 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5528 rxp = &tpr->rx_std_buffers[i];
5530 if (rxp->skb == NULL)
5533 pci_unmap_single(tp->pdev,
5534 pci_unmap_addr(rxp, mapping),
5536 PCI_DMA_FROMDEVICE);
5537 dev_kfree_skb_any(rxp->skb);
5541 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
5542 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5543 rxp = &tpr->rx_jmb_buffers[i];
5545 if (rxp->skb == NULL)
5548 pci_unmap_single(tp->pdev,
5549 pci_unmap_addr(rxp, mapping),
5551 PCI_DMA_FROMDEVICE);
5552 dev_kfree_skb_any(rxp->skb);
5558 /* Initialize tx/rx rings for packet processing.
5560 * The chip has been shut down and the driver detached from
5561 * the networking, so no interrupts or new tx packets will
5562 * end up in the driver. tp->{tx,}lock are held and thus
5565 static int tg3_rx_prodring_alloc(struct tg3 *tp,
5566 struct tg3_rx_prodring_set *tpr)
5568 u32 i, rx_pkt_dma_sz;
5570 /* Zero out all descriptors. */
5571 memset(tpr->rx_std, 0, TG3_RX_RING_BYTES);
5573 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
5574 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
5575 tp->dev->mtu > ETH_DATA_LEN)
5576 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
5577 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
5579 /* Initialize invariants of the rings, we only set this
5580 * stuff once. This works because the card does not
5581 * write into the rx buffer posting rings.
5583 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5584 struct tg3_rx_buffer_desc *rxd;
5586 rxd = &tpr->rx_std[i];
5587 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
5588 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
5589 rxd->opaque = (RXD_OPAQUE_RING_STD |
5590 (i << RXD_OPAQUE_INDEX_SHIFT));
5593 /* Now allocate fresh SKBs for each rx ring. */
5594 for (i = 0; i < tp->rx_pending; i++) {
5595 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
5596 printk(KERN_WARNING PFX
5597 "%s: Using a smaller RX standard ring, "
5598 "only %d out of %d buffers were allocated "
5600 tp->dev->name, i, tp->rx_pending);
5608 if (!(tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE))
5611 memset(tpr->rx_jmb, 0, TG3_RX_JUMBO_RING_BYTES);
5613 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5614 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5615 struct tg3_rx_buffer_desc *rxd;
5617 rxd = &tpr->rx_jmb[i].std;
5618 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
5619 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
5621 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
5622 (i << RXD_OPAQUE_INDEX_SHIFT));
5625 for (i = 0; i < tp->rx_jumbo_pending; i++) {
5626 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
5628 printk(KERN_WARNING PFX
5629 "%s: Using a smaller RX jumbo ring, "
5630 "only %d out of %d buffers were "
5631 "allocated successfully.\n",
5632 tp->dev->name, i, tp->rx_jumbo_pending);
5635 tp->rx_jumbo_pending = i;
5645 tg3_rx_prodring_free(tp, tpr);
5649 static void tg3_rx_prodring_fini(struct tg3 *tp,
5650 struct tg3_rx_prodring_set *tpr)
5652 kfree(tpr->rx_std_buffers);
5653 tpr->rx_std_buffers = NULL;
5654 kfree(tpr->rx_jmb_buffers);
5655 tpr->rx_jmb_buffers = NULL;
5657 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
5658 tpr->rx_std, tpr->rx_std_mapping);
5662 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
5663 tpr->rx_jmb, tpr->rx_jmb_mapping);
5668 static int tg3_rx_prodring_init(struct tg3 *tp,
5669 struct tg3_rx_prodring_set *tpr)
5671 tpr->rx_std_buffers = kzalloc(sizeof(struct ring_info) *
5672 TG3_RX_RING_SIZE, GFP_KERNEL);
5673 if (!tpr->rx_std_buffers)
5676 tpr->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
5677 &tpr->rx_std_mapping);
5681 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
5682 tpr->rx_jmb_buffers = kzalloc(sizeof(struct ring_info) *
5683 TG3_RX_JUMBO_RING_SIZE,
5685 if (!tpr->rx_jmb_buffers)
5688 tpr->rx_jmb = pci_alloc_consistent(tp->pdev,
5689 TG3_RX_JUMBO_RING_BYTES,
5690 &tpr->rx_jmb_mapping);
5698 tg3_rx_prodring_fini(tp, tpr);
5702 /* Free up pending packets in all rx/tx rings.
5704 * The chip has been shut down and the driver detached from
5705 * the networking, so no interrupts or new tx packets will
5706 * end up in the driver. tp->{tx,}lock is not held and we are not
5707 * in an interrupt context and thus may sleep.
5709 static void tg3_free_rings(struct tg3 *tp)
5713 for (i = 0; i < TG3_TX_RING_SIZE; ) {
5714 struct tx_ring_info *txp;
5715 struct sk_buff *skb;
5717 txp = &tp->tx_buffers[i];
5725 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
5729 i += skb_shinfo(skb)->nr_frags + 1;
5731 dev_kfree_skb_any(skb);
5734 tg3_rx_prodring_free(tp, &tp->prodring[0]);
5737 /* Initialize tx/rx rings for packet processing.
5739 * The chip has been shut down and the driver detached from
5740 * the networking, so no interrupts or new tx packets will
5741 * end up in the driver. tp->{tx,}lock are held and thus
5744 static int tg3_init_rings(struct tg3 *tp)
5746 /* Free up all the SKBs. */
5749 /* Zero out all descriptors. */
5750 memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
5751 memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
5753 return tg3_rx_prodring_alloc(tp, &tp->prodring[0]);
5757 * Must not be invoked with interrupt sources disabled and
5758 * the hardware shutdown down.
5760 static void tg3_free_consistent(struct tg3 *tp)
5762 kfree(tp->tx_buffers);
5763 tp->tx_buffers = NULL;
5765 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
5766 tp->rx_rcb, tp->rx_rcb_mapping);
5770 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
5771 tp->tx_ring, tp->tx_desc_mapping);
5774 if (tp->hw_status) {
5775 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
5776 tp->hw_status, tp->status_mapping);
5777 tp->hw_status = NULL;
5780 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
5781 tp->hw_stats, tp->stats_mapping);
5782 tp->hw_stats = NULL;
5784 tg3_rx_prodring_fini(tp, &tp->prodring[0]);
5788 * Must not be invoked with interrupt sources disabled and
5789 * the hardware shutdown down. Can sleep.
5791 static int tg3_alloc_consistent(struct tg3 *tp)
5793 if (tg3_rx_prodring_init(tp, &tp->prodring[0]))
5796 tp->tx_buffers = kzalloc(sizeof(struct tx_ring_info) *
5797 TG3_TX_RING_SIZE, GFP_KERNEL);
5798 if (!tp->tx_buffers)
5801 tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
5802 &tp->rx_rcb_mapping);
5806 tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
5807 &tp->tx_desc_mapping);
5811 tp->hw_status = pci_alloc_consistent(tp->pdev,
5813 &tp->status_mapping);
5817 tp->hw_stats = pci_alloc_consistent(tp->pdev,
5818 sizeof(struct tg3_hw_stats),
5819 &tp->stats_mapping);
5823 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5824 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5829 tg3_free_consistent(tp);
5833 #define MAX_WAIT_CNT 1000
5835 /* To stop a block, clear the enable bit and poll till it
5836 * clears. tp->lock is held.
5838 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
5843 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5850 /* We can't enable/disable these bits of the
5851 * 5705/5750, just say success.
5864 for (i = 0; i < MAX_WAIT_CNT; i++) {
5867 if ((val & enable_bit) == 0)
5871 if (i == MAX_WAIT_CNT && !silent) {
5872 printk(KERN_ERR PFX "tg3_stop_block timed out, "
5873 "ofs=%lx enable_bit=%x\n",
5881 /* tp->lock is held. */
5882 static int tg3_abort_hw(struct tg3 *tp, int silent)
5886 tg3_disable_ints(tp);
5888 tp->rx_mode &= ~RX_MODE_ENABLE;
5889 tw32_f(MAC_RX_MODE, tp->rx_mode);
5892 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
5893 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
5894 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
5895 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
5896 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
5897 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
5899 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
5900 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
5901 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
5902 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
5903 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
5904 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
5905 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
5907 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
5908 tw32_f(MAC_MODE, tp->mac_mode);
5911 tp->tx_mode &= ~TX_MODE_ENABLE;
5912 tw32_f(MAC_TX_MODE, tp->tx_mode);
5914 for (i = 0; i < MAX_WAIT_CNT; i++) {
5916 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
5919 if (i >= MAX_WAIT_CNT) {
5920 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
5921 "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
5922 tp->dev->name, tr32(MAC_TX_MODE));
5926 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
5927 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
5928 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
5930 tw32(FTQ_RESET, 0xffffffff);
5931 tw32(FTQ_RESET, 0x00000000);
5933 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
5934 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
5937 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5939 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5944 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
5949 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
5950 if (apedata != APE_SEG_SIG_MAGIC)
5953 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
5954 if (!(apedata & APE_FW_STATUS_READY))
5957 /* Wait for up to 1 millisecond for APE to service previous event. */
5958 for (i = 0; i < 10; i++) {
5959 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
5962 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
5964 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5965 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
5966 event | APE_EVENT_STATUS_EVENT_PENDING);
5968 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
5970 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5976 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5977 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
5980 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
5985 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
5989 case RESET_KIND_INIT:
5990 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
5991 APE_HOST_SEG_SIG_MAGIC);
5992 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
5993 APE_HOST_SEG_LEN_MAGIC);
5994 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
5995 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
5996 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
5997 APE_HOST_DRIVER_ID_MAGIC);
5998 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
5999 APE_HOST_BEHAV_NO_PHYLOCK);
6001 event = APE_EVENT_STATUS_STATE_START;
6003 case RESET_KIND_SHUTDOWN:
6004 /* With the interface we are currently using,
6005 * APE does not track driver state. Wiping
6006 * out the HOST SEGMENT SIGNATURE forces
6007 * the APE to assume OS absent status.
6009 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
6011 event = APE_EVENT_STATUS_STATE_UNLOAD;
6013 case RESET_KIND_SUSPEND:
6014 event = APE_EVENT_STATUS_STATE_SUSPEND;
6020 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
6022 tg3_ape_send_event(tp, event);
6025 /* tp->lock is held. */
6026 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
6028 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
6029 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
6031 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
6033 case RESET_KIND_INIT:
6034 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6038 case RESET_KIND_SHUTDOWN:
6039 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6043 case RESET_KIND_SUSPEND:
6044 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6053 if (kind == RESET_KIND_INIT ||
6054 kind == RESET_KIND_SUSPEND)
6055 tg3_ape_driver_state_change(tp, kind);
6058 /* tp->lock is held. */
6059 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
6061 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
6063 case RESET_KIND_INIT:
6064 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6065 DRV_STATE_START_DONE);
6068 case RESET_KIND_SHUTDOWN:
6069 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6070 DRV_STATE_UNLOAD_DONE);
6078 if (kind == RESET_KIND_SHUTDOWN)
6079 tg3_ape_driver_state_change(tp, kind);
6082 /* tp->lock is held. */
6083 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
6085 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6087 case RESET_KIND_INIT:
6088 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6092 case RESET_KIND_SHUTDOWN:
6093 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6097 case RESET_KIND_SUSPEND:
6098 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6108 static int tg3_poll_fw(struct tg3 *tp)
6113 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6114 /* Wait up to 20ms for init done. */
6115 for (i = 0; i < 200; i++) {
6116 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
6123 /* Wait for firmware initialization to complete. */
6124 for (i = 0; i < 100000; i++) {
6125 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
6126 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
6131 /* Chip might not be fitted with firmware. Some Sun onboard
6132 * parts are configured like that. So don't signal the timeout
6133 * of the above loop as an error, but do report the lack of
6134 * running firmware once.
6137 !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
6138 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
6140 printk(KERN_INFO PFX "%s: No firmware running.\n",
6147 /* Save PCI command register before chip reset */
6148 static void tg3_save_pci_state(struct tg3 *tp)
6150 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
6153 /* Restore PCI state after chip reset */
6154 static void tg3_restore_pci_state(struct tg3 *tp)
6158 /* Re-enable indirect register accesses. */
6159 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
6160 tp->misc_host_ctrl);
6162 /* Set MAX PCI retry to zero. */
6163 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
6164 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
6165 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
6166 val |= PCISTATE_RETRY_SAME_DMA;
6167 /* Allow reads and writes to the APE register and memory space. */
6168 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
6169 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
6170 PCISTATE_ALLOW_APE_SHMEM_WR;
6171 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
6173 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
6175 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
6176 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6177 pcie_set_readrq(tp->pdev, 4096);
6179 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
6180 tp->pci_cacheline_sz);
6181 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
6186 /* Make sure PCI-X relaxed ordering bit is clear. */
6187 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
6190 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6192 pcix_cmd &= ~PCI_X_CMD_ERO;
6193 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6197 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
6199 /* Chip reset on 5780 will reset MSI enable bit,
6200 * so need to restore it.
6202 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6205 pci_read_config_word(tp->pdev,
6206 tp->msi_cap + PCI_MSI_FLAGS,
6208 pci_write_config_word(tp->pdev,
6209 tp->msi_cap + PCI_MSI_FLAGS,
6210 ctrl | PCI_MSI_FLAGS_ENABLE);
6211 val = tr32(MSGINT_MODE);
6212 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
6217 static void tg3_stop_fw(struct tg3 *);
6219 /* tp->lock is held. */
6220 static int tg3_chip_reset(struct tg3 *tp)
6223 void (*write_op)(struct tg3 *, u32, u32);
6230 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
6232 /* No matching tg3_nvram_unlock() after this because
6233 * chip reset below will undo the nvram lock.
6235 tp->nvram_lock_cnt = 0;
6237 /* GRC_MISC_CFG core clock reset will clear the memory
6238 * enable bit in PCI register 4 and the MSI enable bit
6239 * on some chips, so we save relevant registers here.
6241 tg3_save_pci_state(tp);
6243 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
6244 (tp->tg3_flags3 & TG3_FLG3_5755_PLUS))
6245 tw32(GRC_FASTBOOT_PC, 0);
6248 * We must avoid the readl() that normally takes place.
6249 * It locks machines, causes machine checks, and other
6250 * fun things. So, temporarily disable the 5701
6251 * hardware workaround, while we do the reset.
6253 write_op = tp->write32;
6254 if (write_op == tg3_write_flush_reg32)
6255 tp->write32 = tg3_write32;
6257 /* Prevent the irq handler from reading or writing PCI registers
6258 * during chip reset when the memory enable bit in the PCI command
6259 * register may be cleared. The chip does not generate interrupt
6260 * at this time, but the irq handler may still be called due to irq
6261 * sharing or irqpoll.
6263 tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
6264 if (tp->hw_status) {
6265 tp->hw_status->status = 0;
6266 tp->hw_status->status_tag = 0;
6269 tp->last_irq_tag = 0;
6271 synchronize_irq(tp->pdev->irq);
6273 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
6274 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
6275 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
6279 val = GRC_MISC_CFG_CORECLK_RESET;
6281 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
6282 if (tr32(0x7e2c) == 0x60) {
6285 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
6286 tw32(GRC_MISC_CFG, (1 << 29));
6291 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6292 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
6293 tw32(GRC_VCPU_EXT_CTRL,
6294 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
6297 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6298 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
6299 tw32(GRC_MISC_CFG, val);
6301 /* restore 5701 hardware bug workaround write method */
6302 tp->write32 = write_op;
6304 /* Unfortunately, we have to delay before the PCI read back.
6305 * Some 575X chips even will not respond to a PCI cfg access
6306 * when the reset command is given to the chip.
6308 * How do these hardware designers expect things to work
6309 * properly if the PCI write is posted for a long period
6310 * of time? It is always necessary to have some method by
6311 * which a register read back can occur to push the write
6312 * out which does the reset.
6314 * For most tg3 variants the trick below was working.
6319 /* Flush PCI posted writes. The normal MMIO registers
6320 * are inaccessible at this time so this is the only
6321 * way to make this reliably (actually, this is no longer
6322 * the case, see above). I tried to use indirect
6323 * register read/write but this upset some 5701 variants.
6325 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
6329 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && tp->pcie_cap) {
6332 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
6336 /* Wait for link training to complete. */
6337 for (i = 0; i < 5000; i++)
6340 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
6341 pci_write_config_dword(tp->pdev, 0xc4,
6342 cfg_val | (1 << 15));
6345 /* Clear the "no snoop" and "relaxed ordering" bits. */
6346 pci_read_config_word(tp->pdev,
6347 tp->pcie_cap + PCI_EXP_DEVCTL,
6349 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
6350 PCI_EXP_DEVCTL_NOSNOOP_EN);
6352 * Older PCIe devices only support the 128 byte
6353 * MPS setting. Enforce the restriction.
6355 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
6356 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784))
6357 val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
6358 pci_write_config_word(tp->pdev,
6359 tp->pcie_cap + PCI_EXP_DEVCTL,
6362 pcie_set_readrq(tp->pdev, 4096);
6364 /* Clear error status */
6365 pci_write_config_word(tp->pdev,
6366 tp->pcie_cap + PCI_EXP_DEVSTA,
6367 PCI_EXP_DEVSTA_CED |
6368 PCI_EXP_DEVSTA_NFED |
6369 PCI_EXP_DEVSTA_FED |
6370 PCI_EXP_DEVSTA_URD);
6373 tg3_restore_pci_state(tp);
6375 tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
6378 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
6379 val = tr32(MEMARB_MODE);
6380 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
6382 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
6384 tw32(0x5000, 0x400);
6387 tw32(GRC_MODE, tp->grc_mode);
6389 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
6392 tw32(0xc4, val | (1 << 15));
6395 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
6396 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6397 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
6398 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
6399 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
6400 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
6403 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6404 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
6405 tw32_f(MAC_MODE, tp->mac_mode);
6406 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6407 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
6408 tw32_f(MAC_MODE, tp->mac_mode);
6409 } else if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
6410 tp->mac_mode &= (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
6411 if (tp->mac_mode & MAC_MODE_APE_TX_EN)
6412 tp->mac_mode |= MAC_MODE_TDE_ENABLE;
6413 tw32_f(MAC_MODE, tp->mac_mode);
6415 tw32_f(MAC_MODE, 0);
6418 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
6420 err = tg3_poll_fw(tp);
6426 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
6427 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
6430 tw32(0x7c00, val | (1 << 25));
6433 /* Reprobe ASF enable state. */
6434 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
6435 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
6436 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
6437 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
6440 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
6441 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
6442 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
6443 tp->last_event_jiffies = jiffies;
6444 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
6445 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
6452 /* tp->lock is held. */
6453 static void tg3_stop_fw(struct tg3 *tp)
6455 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
6456 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
6457 /* Wait for RX cpu to ACK the previous event. */
6458 tg3_wait_for_event_ack(tp);
6460 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
6462 tg3_generate_fw_event(tp);
6464 /* Wait for RX cpu to ACK this event. */
6465 tg3_wait_for_event_ack(tp);
6469 /* tp->lock is held. */
6470 static int tg3_halt(struct tg3 *tp, int kind, int silent)
6476 tg3_write_sig_pre_reset(tp, kind);
6478 tg3_abort_hw(tp, silent);
6479 err = tg3_chip_reset(tp);
6481 __tg3_set_mac_addr(tp, 0);
6483 tg3_write_sig_legacy(tp, kind);
6484 tg3_write_sig_post_reset(tp, kind);
6492 #define RX_CPU_SCRATCH_BASE 0x30000
6493 #define RX_CPU_SCRATCH_SIZE 0x04000
6494 #define TX_CPU_SCRATCH_BASE 0x34000
6495 #define TX_CPU_SCRATCH_SIZE 0x04000
6497 /* tp->lock is held. */
6498 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
6502 BUG_ON(offset == TX_CPU_BASE &&
6503 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
6505 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6506 u32 val = tr32(GRC_VCPU_EXT_CTRL);
6508 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
6511 if (offset == RX_CPU_BASE) {
6512 for (i = 0; i < 10000; i++) {
6513 tw32(offset + CPU_STATE, 0xffffffff);
6514 tw32(offset + CPU_MODE, CPU_MODE_HALT);
6515 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
6519 tw32(offset + CPU_STATE, 0xffffffff);
6520 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
6523 for (i = 0; i < 10000; i++) {
6524 tw32(offset + CPU_STATE, 0xffffffff);
6525 tw32(offset + CPU_MODE, CPU_MODE_HALT);
6526 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
6532 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
6535 (offset == RX_CPU_BASE ? "RX" : "TX"));
6539 /* Clear firmware's nvram arbitration. */
6540 if (tp->tg3_flags & TG3_FLAG_NVRAM)
6541 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
6546 unsigned int fw_base;
6547 unsigned int fw_len;
6548 const __be32 *fw_data;
6551 /* tp->lock is held. */
6552 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
6553 int cpu_scratch_size, struct fw_info *info)
6555 int err, lock_err, i;
6556 void (*write_op)(struct tg3 *, u32, u32);
6558 if (cpu_base == TX_CPU_BASE &&
6559 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6560 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
6561 "TX cpu firmware on %s which is 5705.\n",
6566 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6567 write_op = tg3_write_mem;
6569 write_op = tg3_write_indirect_reg32;
6571 /* It is possible that bootcode is still loading at this point.
6572 * Get the nvram lock first before halting the cpu.
6574 lock_err = tg3_nvram_lock(tp);
6575 err = tg3_halt_cpu(tp, cpu_base);
6577 tg3_nvram_unlock(tp);
6581 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
6582 write_op(tp, cpu_scratch_base + i, 0);
6583 tw32(cpu_base + CPU_STATE, 0xffffffff);
6584 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
6585 for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
6586 write_op(tp, (cpu_scratch_base +
6587 (info->fw_base & 0xffff) +
6589 be32_to_cpu(info->fw_data[i]));
6597 /* tp->lock is held. */
6598 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
6600 struct fw_info info;
6601 const __be32 *fw_data;
6604 fw_data = (void *)tp->fw->data;
6606 /* Firmware blob starts with version numbers, followed by
6607 start address and length. We are setting complete length.
6608 length = end_address_of_bss - start_address_of_text.
6609 Remainder is the blob to be loaded contiguously
6610 from start address. */
6612 info.fw_base = be32_to_cpu(fw_data[1]);
6613 info.fw_len = tp->fw->size - 12;
6614 info.fw_data = &fw_data[3];
6616 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
6617 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
6622 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
6623 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
6628 /* Now startup only the RX cpu. */
6629 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6630 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
6632 for (i = 0; i < 5; i++) {
6633 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
6635 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6636 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
6637 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
6641 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
6642 "to set RX CPU PC, is %08x should be %08x\n",
6643 tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
6647 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6648 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
6653 /* 5705 needs a special version of the TSO firmware. */
6655 /* tp->lock is held. */
6656 static int tg3_load_tso_firmware(struct tg3 *tp)
6658 struct fw_info info;
6659 const __be32 *fw_data;
6660 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
6663 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6666 fw_data = (void *)tp->fw->data;
6668 /* Firmware blob starts with version numbers, followed by
6669 start address and length. We are setting complete length.
6670 length = end_address_of_bss - start_address_of_text.
6671 Remainder is the blob to be loaded contiguously
6672 from start address. */
6674 info.fw_base = be32_to_cpu(fw_data[1]);
6675 cpu_scratch_size = tp->fw_len;
6676 info.fw_len = tp->fw->size - 12;
6677 info.fw_data = &fw_data[3];
6679 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6680 cpu_base = RX_CPU_BASE;
6681 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
6683 cpu_base = TX_CPU_BASE;
6684 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
6685 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
6688 err = tg3_load_firmware_cpu(tp, cpu_base,
6689 cpu_scratch_base, cpu_scratch_size,
6694 /* Now startup the cpu. */
6695 tw32(cpu_base + CPU_STATE, 0xffffffff);
6696 tw32_f(cpu_base + CPU_PC, info.fw_base);
6698 for (i = 0; i < 5; i++) {
6699 if (tr32(cpu_base + CPU_PC) == info.fw_base)
6701 tw32(cpu_base + CPU_STATE, 0xffffffff);
6702 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
6703 tw32_f(cpu_base + CPU_PC, info.fw_base);
6707 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
6708 "to set CPU PC, is %08x should be %08x\n",
6709 tp->dev->name, tr32(cpu_base + CPU_PC),
6713 tw32(cpu_base + CPU_STATE, 0xffffffff);
6714 tw32_f(cpu_base + CPU_MODE, 0x00000000);
6719 static int tg3_set_mac_addr(struct net_device *dev, void *p)
6721 struct tg3 *tp = netdev_priv(dev);
6722 struct sockaddr *addr = p;
6723 int err = 0, skip_mac_1 = 0;
6725 if (!is_valid_ether_addr(addr->sa_data))
6728 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6730 if (!netif_running(dev))
6733 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6734 u32 addr0_high, addr0_low, addr1_high, addr1_low;
6736 addr0_high = tr32(MAC_ADDR_0_HIGH);
6737 addr0_low = tr32(MAC_ADDR_0_LOW);
6738 addr1_high = tr32(MAC_ADDR_1_HIGH);
6739 addr1_low = tr32(MAC_ADDR_1_LOW);
6741 /* Skip MAC addr 1 if ASF is using it. */
6742 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
6743 !(addr1_high == 0 && addr1_low == 0))
6746 spin_lock_bh(&tp->lock);
6747 __tg3_set_mac_addr(tp, skip_mac_1);
6748 spin_unlock_bh(&tp->lock);
6753 /* tp->lock is held. */
6754 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
6755 dma_addr_t mapping, u32 maxlen_flags,
6759 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
6760 ((u64) mapping >> 32));
6762 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
6763 ((u64) mapping & 0xffffffff));
6765 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
6768 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6770 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
6774 static void __tg3_set_rx_mode(struct net_device *);
6775 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
6777 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
6778 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
6779 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
6780 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
6781 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6782 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
6783 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
6785 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
6786 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
6787 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6788 u32 val = ec->stats_block_coalesce_usecs;
6790 if (!netif_carrier_ok(tp->dev))
6793 tw32(HOSTCC_STAT_COAL_TICKS, val);
6797 /* tp->lock is held. */
6798 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
6800 u32 val, rdmac_mode;
6802 struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
6804 tg3_disable_ints(tp);
6808 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
6810 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
6811 tg3_abort_hw(tp, 1);
6815 !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB))
6818 err = tg3_chip_reset(tp);
6822 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
6824 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
6825 val = tr32(TG3_CPMU_CTRL);
6826 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
6827 tw32(TG3_CPMU_CTRL, val);
6829 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
6830 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
6831 val |= CPMU_LSPD_10MB_MACCLK_6_25;
6832 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
6834 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
6835 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
6836 val |= CPMU_LNK_AWARE_MACCLK_6_25;
6837 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
6839 val = tr32(TG3_CPMU_HST_ACC);
6840 val &= ~CPMU_HST_ACC_MACCLK_MASK;
6841 val |= CPMU_HST_ACC_MACCLK_6_25;
6842 tw32(TG3_CPMU_HST_ACC, val);
6845 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
6846 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
6847 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
6848 PCIE_PWR_MGMT_L1_THRESH_4MS;
6849 tw32(PCIE_PWR_MGMT_THRESH, val);
6851 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
6852 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
6854 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
6857 if (tp->tg3_flags3 & TG3_FLG3_TOGGLE_10_100_L1PLLPD) {
6858 val = tr32(TG3_PCIE_LNKCTL);
6859 if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG)
6860 val |= TG3_PCIE_LNKCTL_L1_PLL_PD_DIS;
6862 val &= ~TG3_PCIE_LNKCTL_L1_PLL_PD_DIS;
6863 tw32(TG3_PCIE_LNKCTL, val);
6866 /* This works around an issue with Athlon chipsets on
6867 * B3 tigon3 silicon. This bit has no effect on any
6868 * other revision. But do not set this on PCI Express
6869 * chips and don't even touch the clocks if the CPMU is present.
6871 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
6872 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
6873 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
6874 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
6877 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
6878 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
6879 val = tr32(TG3PCI_PCISTATE);
6880 val |= PCISTATE_RETRY_SAME_DMA;
6881 tw32(TG3PCI_PCISTATE, val);
6884 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
6885 /* Allow reads and writes to the
6886 * APE register and memory space.
6888 val = tr32(TG3PCI_PCISTATE);
6889 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
6890 PCISTATE_ALLOW_APE_SHMEM_WR;
6891 tw32(TG3PCI_PCISTATE, val);
6894 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
6895 /* Enable some hw fixes. */
6896 val = tr32(TG3PCI_MSI_DATA);
6897 val |= (1 << 26) | (1 << 28) | (1 << 29);
6898 tw32(TG3PCI_MSI_DATA, val);
6901 /* Descriptor ring init may make accesses to the
6902 * NIC SRAM area to setup the TX descriptors, so we
6903 * can only do this after the hardware has been
6904 * successfully reset.
6906 err = tg3_init_rings(tp);
6910 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
6911 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
6912 /* This value is determined during the probe time DMA
6913 * engine test, tg3_test_dma.
6915 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
6918 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
6919 GRC_MODE_4X_NIC_SEND_RINGS |
6920 GRC_MODE_NO_TX_PHDR_CSUM |
6921 GRC_MODE_NO_RX_PHDR_CSUM);
6922 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
6924 /* Pseudo-header checksum is done by hardware logic and not
6925 * the offload processers, so make the chip do the pseudo-
6926 * header checksums on receive. For transmit it is more
6927 * convenient to do the pseudo-header checksum in software
6928 * as Linux does that on transmit for us in all cases.
6930 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
6934 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
6936 /* Setup the timer prescalar register. Clock is always 66Mhz. */
6937 val = tr32(GRC_MISC_CFG);
6939 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
6940 tw32(GRC_MISC_CFG, val);
6942 /* Initialize MBUF/DESC pool. */
6943 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6945 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
6946 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
6947 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
6948 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
6950 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
6951 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
6952 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
6954 else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6957 fw_len = tp->fw_len;
6958 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
6959 tw32(BUFMGR_MB_POOL_ADDR,
6960 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
6961 tw32(BUFMGR_MB_POOL_SIZE,
6962 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
6965 if (tp->dev->mtu <= ETH_DATA_LEN) {
6966 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6967 tp->bufmgr_config.mbuf_read_dma_low_water);
6968 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6969 tp->bufmgr_config.mbuf_mac_rx_low_water);
6970 tw32(BUFMGR_MB_HIGH_WATER,
6971 tp->bufmgr_config.mbuf_high_water);
6973 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6974 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
6975 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6976 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
6977 tw32(BUFMGR_MB_HIGH_WATER,
6978 tp->bufmgr_config.mbuf_high_water_jumbo);
6980 tw32(BUFMGR_DMA_LOW_WATER,
6981 tp->bufmgr_config.dma_low_water);
6982 tw32(BUFMGR_DMA_HIGH_WATER,
6983 tp->bufmgr_config.dma_high_water);
6985 tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
6986 for (i = 0; i < 2000; i++) {
6987 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
6992 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
6997 /* Setup replenish threshold. */
6998 val = tp->rx_pending / 8;
7001 else if (val > tp->rx_std_max_post)
7002 val = tp->rx_std_max_post;
7003 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7004 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
7005 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
7007 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
7008 val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
7011 tw32(RCVBDI_STD_THRESH, val);
7013 /* Initialize TG3_BDINFO's at:
7014 * RCVDBDI_STD_BD: standard eth size rx ring
7015 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
7016 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
7019 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
7020 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
7021 * ring attribute flags
7022 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
7024 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
7025 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
7027 * The size of each ring is fixed in the firmware, but the location is
7030 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7031 ((u64) tpr->rx_std_mapping >> 32));
7032 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7033 ((u64) tpr->rx_std_mapping & 0xffffffff));
7034 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
7035 NIC_SRAM_RX_BUFFER_DESC);
7037 /* Disable the mini ring */
7038 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7039 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
7040 BDINFO_FLAGS_DISABLED);
7042 /* Program the jumbo buffer descriptor ring control
7043 * blocks on those devices that have them.
7045 if ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) &&
7046 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
7047 /* Setup replenish threshold. */
7048 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
7050 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
7051 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7052 ((u64) tpr->rx_jmb_mapping >> 32));
7053 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7054 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
7055 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7056 (RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT) |
7057 BDINFO_FLAGS_USE_EXT_RECV);
7058 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
7059 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
7061 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7062 BDINFO_FLAGS_DISABLED);
7065 val = RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT;
7067 val = RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT;
7069 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
7071 /* There is only one send ring on 5705/5750, no need to explicitly
7072 * disable the others.
7074 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7075 /* Clear out send RCB ring in SRAM. */
7076 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
7077 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
7078 BDINFO_FLAGS_DISABLED);
7083 tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
7084 tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
7086 tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
7087 tp->tx_desc_mapping,
7088 (TG3_TX_RING_SIZE <<
7089 BDINFO_FLAGS_MAXLEN_SHIFT),
7090 NIC_SRAM_TX_BUFFER_DESC);
7092 /* There is only one receive return ring on 5705/5750, no need
7093 * to explicitly disable the others.
7095 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7096 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
7097 i += TG3_BDINFO_SIZE) {
7098 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
7099 BDINFO_FLAGS_DISABLED);
7104 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
7106 tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
7108 (TG3_RX_RCB_RING_SIZE(tp) <<
7109 BDINFO_FLAGS_MAXLEN_SHIFT),
7112 tpr->rx_std_ptr = tp->rx_pending;
7113 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
7116 tpr->rx_jmb_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
7117 tp->rx_jumbo_pending : 0;
7118 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
7121 /* Initialize MAC address and backoff seed. */
7122 __tg3_set_mac_addr(tp, 0);
7124 /* MTU + ethernet header + FCS + optional VLAN tag */
7125 tw32(MAC_RX_MTU_SIZE,
7126 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
7128 /* The slot time is changed by tg3_setup_phy if we
7129 * run at gigabit with half duplex.
7131 tw32(MAC_TX_LENGTHS,
7132 (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
7133 (6 << TX_LENGTHS_IPG_SHIFT) |
7134 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
7136 /* Receive rules. */
7137 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
7138 tw32(RCVLPC_CONFIG, 0x0181);
7140 /* Calculate RDMAC_MODE setting early, we need it to determine
7141 * the RCVLPC_STATE_ENABLE mask.
7143 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
7144 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
7145 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
7146 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
7147 RDMAC_MODE_LNGREAD_ENAB);
7149 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
7150 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
7151 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
7152 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
7153 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
7154 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
7156 /* If statement applies to 5705 and 5750 PCI devices only */
7157 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7158 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7159 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
7160 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
7161 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7162 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
7163 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7164 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
7165 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7169 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
7170 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7172 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7173 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
7175 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
7176 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
7177 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
7179 /* Receive/send statistics. */
7180 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7181 val = tr32(RCVLPC_STATS_ENABLE);
7182 val &= ~RCVLPC_STATSENAB_DACK_FIX;
7183 tw32(RCVLPC_STATS_ENABLE, val);
7184 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
7185 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7186 val = tr32(RCVLPC_STATS_ENABLE);
7187 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
7188 tw32(RCVLPC_STATS_ENABLE, val);
7190 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
7192 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
7193 tw32(SNDDATAI_STATSENAB, 0xffffff);
7194 tw32(SNDDATAI_STATSCTRL,
7195 (SNDDATAI_SCTRL_ENABLE |
7196 SNDDATAI_SCTRL_FASTUPD));
7198 /* Setup host coalescing engine. */
7199 tw32(HOSTCC_MODE, 0);
7200 for (i = 0; i < 2000; i++) {
7201 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
7206 __tg3_set_coalesce(tp, &tp->coal);
7208 /* set status block DMA address */
7209 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7210 ((u64) tp->status_mapping >> 32));
7211 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7212 ((u64) tp->status_mapping & 0xffffffff));
7214 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7215 /* Status/statistics block address. See tg3_timer,
7216 * the tg3_periodic_fetch_stats call there, and
7217 * tg3_get_stats to see how this works for 5705/5750 chips.
7219 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7220 ((u64) tp->stats_mapping >> 32));
7221 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7222 ((u64) tp->stats_mapping & 0xffffffff));
7223 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
7224 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
7227 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
7229 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
7230 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
7231 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7232 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
7234 /* Clear statistics/status block in chip, and status block in ram. */
7235 for (i = NIC_SRAM_STATS_BLK;
7236 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
7238 tg3_write_mem(tp, i, 0);
7241 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
7243 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
7244 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
7245 /* reset to prevent losing 1st rx packet intermittently */
7246 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7250 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7251 tp->mac_mode &= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
7254 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
7255 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
7256 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7257 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7258 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
7259 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7260 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
7263 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
7264 * If TG3_FLG2_IS_NIC is zero, we should read the
7265 * register to preserve the GPIO settings for LOMs. The GPIOs,
7266 * whether used as inputs or outputs, are set by boot code after
7269 if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
7272 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
7273 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
7274 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
7276 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7277 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
7278 GRC_LCLCTRL_GPIO_OUTPUT3;
7280 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
7281 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
7283 tp->grc_local_ctrl &= ~gpio_mask;
7284 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
7286 /* GPIO1 must be driven high for eeprom write protect */
7287 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
7288 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
7289 GRC_LCLCTRL_GPIO_OUTPUT1);
7291 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7294 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
7296 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7297 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
7301 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
7302 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
7303 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
7304 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
7305 WDMAC_MODE_LNGREAD_ENAB);
7307 /* If statement applies to 5705 and 5750 PCI devices only */
7308 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7309 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7310 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
7311 if ((tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
7312 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
7313 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
7315 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7316 !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7317 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
7318 val |= WDMAC_MODE_RX_ACCEL;
7322 /* Enable host coalescing bug fix */
7323 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
7324 val |= WDMAC_MODE_STATUS_TAG_FIX;
7326 tw32_f(WDMAC_MODE, val);
7329 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
7332 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7334 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
7335 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
7336 pcix_cmd |= PCI_X_CMD_READ_2K;
7337 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
7338 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
7339 pcix_cmd |= PCI_X_CMD_READ_2K;
7341 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7345 tw32_f(RDMAC_MODE, rdmac_mode);
7348 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
7349 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7350 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
7352 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
7354 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
7356 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
7358 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
7359 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
7360 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
7361 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
7362 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7363 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
7364 tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
7365 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
7367 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
7368 err = tg3_load_5701_a0_firmware_fix(tp);
7373 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7374 err = tg3_load_tso_firmware(tp);
7379 tp->tx_mode = TX_MODE_ENABLE;
7380 tw32_f(MAC_TX_MODE, tp->tx_mode);
7383 tp->rx_mode = RX_MODE_ENABLE;
7384 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
7385 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
7387 tw32_f(MAC_RX_MODE, tp->rx_mode);
7390 tw32(MAC_LED_CTRL, tp->led_ctrl);
7392 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
7393 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7394 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7397 tw32_f(MAC_RX_MODE, tp->rx_mode);
7400 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7401 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
7402 !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
7403 /* Set drive transmission level to 1.2V */
7404 /* only if the signal pre-emphasis bit is not set */
7405 val = tr32(MAC_SERDES_CFG);
7408 tw32(MAC_SERDES_CFG, val);
7410 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
7411 tw32(MAC_SERDES_CFG, 0x616000);
7414 /* Prevent chip from dropping frames when flow control
7417 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
7419 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
7420 (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
7421 /* Use hardware link auto-negotiation */
7422 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
7425 if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
7426 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
7429 tmp = tr32(SERDES_RX_CTRL);
7430 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
7431 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
7432 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
7433 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7436 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
7437 if (tp->link_config.phy_is_low_power) {
7438 tp->link_config.phy_is_low_power = 0;
7439 tp->link_config.speed = tp->link_config.orig_speed;
7440 tp->link_config.duplex = tp->link_config.orig_duplex;
7441 tp->link_config.autoneg = tp->link_config.orig_autoneg;
7444 err = tg3_setup_phy(tp, 0);
7448 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7449 !(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET)) {
7452 /* Clear CRC stats. */
7453 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
7454 tg3_writephy(tp, MII_TG3_TEST1,
7455 tmp | MII_TG3_TEST1_CRC_EN);
7456 tg3_readphy(tp, 0x14, &tmp);
7461 __tg3_set_rx_mode(tp->dev);
7463 /* Initialize receive rules. */
7464 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
7465 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
7466 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
7467 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
7469 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7470 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
7474 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
7478 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
7480 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
7482 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
7484 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
7486 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
7488 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
7490 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
7492 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
7494 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
7496 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
7498 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
7500 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
7502 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
7504 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
7512 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7513 /* Write our heartbeat update interval to APE. */
7514 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
7515 APE_HOST_HEARTBEAT_INT_DISABLE);
7517 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
7522 /* Called at device open time to get the chip ready for
7523 * packet processing. Invoked with tp->lock held.
7525 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
7527 tg3_switch_clocks(tp);
7529 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
7531 return tg3_reset_hw(tp, reset_phy);
7534 #define TG3_STAT_ADD32(PSTAT, REG) \
7535 do { u32 __val = tr32(REG); \
7536 (PSTAT)->low += __val; \
7537 if ((PSTAT)->low < __val) \
7538 (PSTAT)->high += 1; \
7541 static void tg3_periodic_fetch_stats(struct tg3 *tp)
7543 struct tg3_hw_stats *sp = tp->hw_stats;
7545 if (!netif_carrier_ok(tp->dev))
7548 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
7549 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
7550 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
7551 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
7552 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
7553 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
7554 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
7555 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
7556 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
7557 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
7558 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
7559 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
7560 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
7562 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
7563 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
7564 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
7565 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
7566 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
7567 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
7568 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
7569 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
7570 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
7571 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
7572 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
7573 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
7574 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
7575 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
7577 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
7578 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
7579 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
7582 static void tg3_timer(unsigned long __opaque)
7584 struct tg3 *tp = (struct tg3 *) __opaque;
7589 spin_lock(&tp->lock);
7591 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7592 /* All of this garbage is because when using non-tagged
7593 * IRQ status the mailbox/status_block protocol the chip
7594 * uses with the cpu is race prone.
7596 if (tp->hw_status->status & SD_STATUS_UPDATED) {
7597 tw32(GRC_LOCAL_CTRL,
7598 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
7600 tw32(HOSTCC_MODE, tp->coalesce_mode |
7601 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
7604 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
7605 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
7606 spin_unlock(&tp->lock);
7607 schedule_work(&tp->reset_task);
7612 /* This part only runs once per second. */
7613 if (!--tp->timer_counter) {
7614 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7615 tg3_periodic_fetch_stats(tp);
7617 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
7621 mac_stat = tr32(MAC_STATUS);
7624 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
7625 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
7627 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
7631 tg3_setup_phy(tp, 0);
7632 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
7633 u32 mac_stat = tr32(MAC_STATUS);
7636 if (netif_carrier_ok(tp->dev) &&
7637 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
7640 if (! netif_carrier_ok(tp->dev) &&
7641 (mac_stat & (MAC_STATUS_PCS_SYNCED |
7642 MAC_STATUS_SIGNAL_DET))) {
7646 if (!tp->serdes_counter) {
7649 ~MAC_MODE_PORT_MODE_MASK));
7651 tw32_f(MAC_MODE, tp->mac_mode);
7654 tg3_setup_phy(tp, 0);
7656 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
7657 tg3_serdes_parallel_detect(tp);
7659 tp->timer_counter = tp->timer_multiplier;
7662 /* Heartbeat is only sent once every 2 seconds.
7664 * The heartbeat is to tell the ASF firmware that the host
7665 * driver is still alive. In the event that the OS crashes,
7666 * ASF needs to reset the hardware to free up the FIFO space
7667 * that may be filled with rx packets destined for the host.
7668 * If the FIFO is full, ASF will no longer function properly.
7670 * Unintended resets have been reported on real time kernels
7671 * where the timer doesn't run on time. Netpoll will also have
7674 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
7675 * to check the ring condition when the heartbeat is expiring
7676 * before doing the reset. This will prevent most unintended
7679 if (!--tp->asf_counter) {
7680 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
7681 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
7682 tg3_wait_for_event_ack(tp);
7684 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
7685 FWCMD_NICDRV_ALIVE3);
7686 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
7687 /* 5 seconds timeout */
7688 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
7690 tg3_generate_fw_event(tp);
7692 tp->asf_counter = tp->asf_multiplier;
7695 spin_unlock(&tp->lock);
7698 tp->timer.expires = jiffies + tp->timer_offset;
7699 add_timer(&tp->timer);
7702 static int tg3_request_irq(struct tg3 *tp)
7705 unsigned long flags;
7706 struct net_device *dev = tp->dev;
7708 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7710 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
7712 flags = IRQF_SAMPLE_RANDOM;
7715 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7716 fn = tg3_interrupt_tagged;
7717 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
7719 return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
7722 static int tg3_test_interrupt(struct tg3 *tp)
7724 struct net_device *dev = tp->dev;
7725 int err, i, intr_ok = 0;
7727 if (!netif_running(dev))
7730 tg3_disable_ints(tp);
7732 free_irq(tp->pdev->irq, dev);
7734 err = request_irq(tp->pdev->irq, tg3_test_isr,
7735 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
7739 tp->hw_status->status &= ~SD_STATUS_UPDATED;
7740 tg3_enable_ints(tp);
7742 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7745 for (i = 0; i < 5; i++) {
7746 u32 int_mbox, misc_host_ctrl;
7748 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
7750 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
7752 if ((int_mbox != 0) ||
7753 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
7761 tg3_disable_ints(tp);
7763 free_irq(tp->pdev->irq, dev);
7765 err = tg3_request_irq(tp);
7776 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
7777 * successfully restored
7779 static int tg3_test_msi(struct tg3 *tp)
7781 struct net_device *dev = tp->dev;
7785 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
7788 /* Turn off SERR reporting in case MSI terminates with Master
7791 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7792 pci_write_config_word(tp->pdev, PCI_COMMAND,
7793 pci_cmd & ~PCI_COMMAND_SERR);
7795 err = tg3_test_interrupt(tp);
7797 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7802 /* other failures */
7806 /* MSI test failed, go back to INTx mode */
7807 printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
7808 "switching to INTx mode. Please report this failure to "
7809 "the PCI maintainer and include system chipset information.\n",
7812 free_irq(tp->pdev->irq, dev);
7813 pci_disable_msi(tp->pdev);
7815 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7817 err = tg3_request_irq(tp);
7821 /* Need to reset the chip because the MSI cycle may have terminated
7822 * with Master Abort.
7824 tg3_full_lock(tp, 1);
7826 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7827 err = tg3_init_hw(tp, 1);
7829 tg3_full_unlock(tp);
7832 free_irq(tp->pdev->irq, dev);
7837 static int tg3_request_firmware(struct tg3 *tp)
7839 const __be32 *fw_data;
7841 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
7842 printk(KERN_ERR "%s: Failed to load firmware \"%s\"\n",
7843 tp->dev->name, tp->fw_needed);
7847 fw_data = (void *)tp->fw->data;
7849 /* Firmware blob starts with version numbers, followed by
7850 * start address and _full_ length including BSS sections
7851 * (which must be longer than the actual data, of course
7854 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
7855 if (tp->fw_len < (tp->fw->size - 12)) {
7856 printk(KERN_ERR "%s: bogus length %d in \"%s\"\n",
7857 tp->dev->name, tp->fw_len, tp->fw_needed);
7858 release_firmware(tp->fw);
7863 /* We no longer need firmware; we have it. */
7864 tp->fw_needed = NULL;
7868 static void tg3_ints_init(struct tg3 *tp)
7870 if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) {
7871 /* All MSI supporting chips should support tagged
7872 * status. Assert that this is the case.
7874 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7875 printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
7876 "Not using MSI.\n", tp->dev->name);
7877 } else if (pci_enable_msi(tp->pdev) == 0) {
7880 msi_mode = tr32(MSGINT_MODE);
7881 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
7882 tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
7887 static void tg3_ints_fini(struct tg3 *tp)
7889 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7890 pci_disable_msi(tp->pdev);
7891 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7895 static int tg3_open(struct net_device *dev)
7897 struct tg3 *tp = netdev_priv(dev);
7900 if (tp->fw_needed) {
7901 err = tg3_request_firmware(tp);
7902 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
7906 printk(KERN_WARNING "%s: TSO capability disabled.\n",
7908 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
7909 } else if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7910 printk(KERN_NOTICE "%s: TSO capability restored.\n",
7912 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
7916 netif_carrier_off(tp->dev);
7918 err = tg3_set_power_state(tp, PCI_D0);
7922 tg3_full_lock(tp, 0);
7924 tg3_disable_ints(tp);
7925 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
7927 tg3_full_unlock(tp);
7929 /* The placement of this call is tied
7930 * to the setup and use of Host TX descriptors.
7932 err = tg3_alloc_consistent(tp);
7938 napi_enable(&tp->napi);
7940 err = tg3_request_irq(tp);
7945 tg3_full_lock(tp, 0);
7947 err = tg3_init_hw(tp, 1);
7949 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7952 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7953 tp->timer_offset = HZ;
7955 tp->timer_offset = HZ / 10;
7957 BUG_ON(tp->timer_offset > HZ);
7958 tp->timer_counter = tp->timer_multiplier =
7959 (HZ / tp->timer_offset);
7960 tp->asf_counter = tp->asf_multiplier =
7961 ((HZ / tp->timer_offset) * 2);
7963 init_timer(&tp->timer);
7964 tp->timer.expires = jiffies + tp->timer_offset;
7965 tp->timer.data = (unsigned long) tp;
7966 tp->timer.function = tg3_timer;
7969 tg3_full_unlock(tp);
7974 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7975 err = tg3_test_msi(tp);
7978 tg3_full_lock(tp, 0);
7979 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7981 tg3_full_unlock(tp);
7986 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7987 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
7988 u32 val = tr32(PCIE_TRANSACTION_CFG);
7990 tw32(PCIE_TRANSACTION_CFG,
7991 val | PCIE_TRANS_CFG_1SHOT_MSI);
7998 tg3_full_lock(tp, 0);
8000 add_timer(&tp->timer);
8001 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8002 tg3_enable_ints(tp);
8004 tg3_full_unlock(tp);
8006 netif_start_queue(dev);
8011 free_irq(tp->pdev->irq, dev);
8014 napi_disable(&tp->napi);
8016 tg3_free_consistent(tp);
8021 /*static*/ void tg3_dump_state(struct tg3 *tp)
8023 u32 val32, val32_2, val32_3, val32_4, val32_5;
8027 pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
8028 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
8029 printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
8033 printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
8034 tr32(MAC_MODE), tr32(MAC_STATUS));
8035 printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
8036 tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
8037 printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
8038 tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
8039 printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
8040 tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
8042 /* Send data initiator control block */
8043 printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
8044 tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
8045 printk(" SNDDATAI_STATSCTRL[%08x]\n",
8046 tr32(SNDDATAI_STATSCTRL));
8048 /* Send data completion control block */
8049 printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
8051 /* Send BD ring selector block */
8052 printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
8053 tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
8055 /* Send BD initiator control block */
8056 printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
8057 tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
8059 /* Send BD completion control block */
8060 printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
8062 /* Receive list placement control block */
8063 printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
8064 tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
8065 printk(" RCVLPC_STATSCTRL[%08x]\n",
8066 tr32(RCVLPC_STATSCTRL));
8068 /* Receive data and receive BD initiator control block */
8069 printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
8070 tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
8072 /* Receive data completion control block */
8073 printk("DEBUG: RCVDCC_MODE[%08x]\n",
8076 /* Receive BD initiator control block */
8077 printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
8078 tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
8080 /* Receive BD completion control block */
8081 printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
8082 tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
8084 /* Receive list selector control block */
8085 printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
8086 tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
8088 /* Mbuf cluster free block */
8089 printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
8090 tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
8092 /* Host coalescing control block */
8093 printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
8094 tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
8095 printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
8096 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8097 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8098 printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
8099 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8100 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8101 printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
8102 tr32(HOSTCC_STATS_BLK_NIC_ADDR));
8103 printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
8104 tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
8106 /* Memory arbiter control block */
8107 printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
8108 tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
8110 /* Buffer manager control block */
8111 printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
8112 tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
8113 printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
8114 tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
8115 printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
8116 "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
8117 tr32(BUFMGR_DMA_DESC_POOL_ADDR),
8118 tr32(BUFMGR_DMA_DESC_POOL_SIZE));
8120 /* Read DMA control block */
8121 printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
8122 tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
8124 /* Write DMA control block */
8125 printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
8126 tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
8128 /* DMA completion block */
8129 printk("DEBUG: DMAC_MODE[%08x]\n",
8133 printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
8134 tr32(GRC_MODE), tr32(GRC_MISC_CFG));
8135 printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
8136 tr32(GRC_LOCAL_CTRL));
8139 printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
8140 tr32(RCVDBDI_JUMBO_BD + 0x0),
8141 tr32(RCVDBDI_JUMBO_BD + 0x4),
8142 tr32(RCVDBDI_JUMBO_BD + 0x8),
8143 tr32(RCVDBDI_JUMBO_BD + 0xc));
8144 printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
8145 tr32(RCVDBDI_STD_BD + 0x0),
8146 tr32(RCVDBDI_STD_BD + 0x4),
8147 tr32(RCVDBDI_STD_BD + 0x8),
8148 tr32(RCVDBDI_STD_BD + 0xc));
8149 printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
8150 tr32(RCVDBDI_MINI_BD + 0x0),
8151 tr32(RCVDBDI_MINI_BD + 0x4),
8152 tr32(RCVDBDI_MINI_BD + 0x8),
8153 tr32(RCVDBDI_MINI_BD + 0xc));
8155 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
8156 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
8157 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
8158 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
8159 printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
8160 val32, val32_2, val32_3, val32_4);
8162 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
8163 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
8164 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
8165 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
8166 printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
8167 val32, val32_2, val32_3, val32_4);
8169 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
8170 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
8171 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
8172 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
8173 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
8174 printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
8175 val32, val32_2, val32_3, val32_4, val32_5);
8177 /* SW status block */
8178 printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
8179 tp->hw_status->status,
8180 tp->hw_status->status_tag,
8181 tp->hw_status->rx_jumbo_consumer,
8182 tp->hw_status->rx_consumer,
8183 tp->hw_status->rx_mini_consumer,
8184 tp->hw_status->idx[0].rx_producer,
8185 tp->hw_status->idx[0].tx_consumer);
8187 /* SW statistics block */
8188 printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
8189 ((u32 *)tp->hw_stats)[0],
8190 ((u32 *)tp->hw_stats)[1],
8191 ((u32 *)tp->hw_stats)[2],
8192 ((u32 *)tp->hw_stats)[3]);
8195 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
8196 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
8197 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
8198 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
8199 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
8201 /* NIC side send descriptors. */
8202 for (i = 0; i < 6; i++) {
8205 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
8206 + (i * sizeof(struct tg3_tx_buffer_desc));
8207 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
8209 readl(txd + 0x0), readl(txd + 0x4),
8210 readl(txd + 0x8), readl(txd + 0xc));
8213 /* NIC side RX descriptors. */
8214 for (i = 0; i < 6; i++) {
8217 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
8218 + (i * sizeof(struct tg3_rx_buffer_desc));
8219 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
8221 readl(rxd + 0x0), readl(rxd + 0x4),
8222 readl(rxd + 0x8), readl(rxd + 0xc));
8223 rxd += (4 * sizeof(u32));
8224 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
8226 readl(rxd + 0x0), readl(rxd + 0x4),
8227 readl(rxd + 0x8), readl(rxd + 0xc));
8230 for (i = 0; i < 6; i++) {
8233 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
8234 + (i * sizeof(struct tg3_rx_buffer_desc));
8235 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
8237 readl(rxd + 0x0), readl(rxd + 0x4),
8238 readl(rxd + 0x8), readl(rxd + 0xc));
8239 rxd += (4 * sizeof(u32));
8240 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
8242 readl(rxd + 0x0), readl(rxd + 0x4),
8243 readl(rxd + 0x8), readl(rxd + 0xc));
8248 static struct net_device_stats *tg3_get_stats(struct net_device *);
8249 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
8251 static int tg3_close(struct net_device *dev)
8253 struct tg3 *tp = netdev_priv(dev);
8255 napi_disable(&tp->napi);
8256 cancel_work_sync(&tp->reset_task);
8258 netif_stop_queue(dev);
8260 del_timer_sync(&tp->timer);
8262 tg3_full_lock(tp, 1);
8267 tg3_disable_ints(tp);
8269 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8271 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
8273 tg3_full_unlock(tp);
8275 free_irq(tp->pdev->irq, dev);
8279 memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
8280 sizeof(tp->net_stats_prev));
8281 memcpy(&tp->estats_prev, tg3_get_estats(tp),
8282 sizeof(tp->estats_prev));
8284 tg3_free_consistent(tp);
8286 tg3_set_power_state(tp, PCI_D3hot);
8288 netif_carrier_off(tp->dev);
8293 static inline unsigned long get_stat64(tg3_stat64_t *val)
8297 #if (BITS_PER_LONG == 32)
8300 ret = ((u64)val->high << 32) | ((u64)val->low);
8305 static inline u64 get_estat64(tg3_stat64_t *val)
8307 return ((u64)val->high << 32) | ((u64)val->low);
8310 static unsigned long calc_crc_errors(struct tg3 *tp)
8312 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8314 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
8315 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8316 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
8319 spin_lock_bh(&tp->lock);
8320 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
8321 tg3_writephy(tp, MII_TG3_TEST1,
8322 val | MII_TG3_TEST1_CRC_EN);
8323 tg3_readphy(tp, 0x14, &val);
8326 spin_unlock_bh(&tp->lock);
8328 tp->phy_crc_errors += val;
8330 return tp->phy_crc_errors;
8333 return get_stat64(&hw_stats->rx_fcs_errors);
8336 #define ESTAT_ADD(member) \
8337 estats->member = old_estats->member + \
8338 get_estat64(&hw_stats->member)
8340 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
8342 struct tg3_ethtool_stats *estats = &tp->estats;
8343 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
8344 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8349 ESTAT_ADD(rx_octets);
8350 ESTAT_ADD(rx_fragments);
8351 ESTAT_ADD(rx_ucast_packets);
8352 ESTAT_ADD(rx_mcast_packets);
8353 ESTAT_ADD(rx_bcast_packets);
8354 ESTAT_ADD(rx_fcs_errors);
8355 ESTAT_ADD(rx_align_errors);
8356 ESTAT_ADD(rx_xon_pause_rcvd);
8357 ESTAT_ADD(rx_xoff_pause_rcvd);
8358 ESTAT_ADD(rx_mac_ctrl_rcvd);
8359 ESTAT_ADD(rx_xoff_entered);
8360 ESTAT_ADD(rx_frame_too_long_errors);
8361 ESTAT_ADD(rx_jabbers);
8362 ESTAT_ADD(rx_undersize_packets);
8363 ESTAT_ADD(rx_in_length_errors);
8364 ESTAT_ADD(rx_out_length_errors);
8365 ESTAT_ADD(rx_64_or_less_octet_packets);
8366 ESTAT_ADD(rx_65_to_127_octet_packets);
8367 ESTAT_ADD(rx_128_to_255_octet_packets);
8368 ESTAT_ADD(rx_256_to_511_octet_packets);
8369 ESTAT_ADD(rx_512_to_1023_octet_packets);
8370 ESTAT_ADD(rx_1024_to_1522_octet_packets);
8371 ESTAT_ADD(rx_1523_to_2047_octet_packets);
8372 ESTAT_ADD(rx_2048_to_4095_octet_packets);
8373 ESTAT_ADD(rx_4096_to_8191_octet_packets);
8374 ESTAT_ADD(rx_8192_to_9022_octet_packets);
8376 ESTAT_ADD(tx_octets);
8377 ESTAT_ADD(tx_collisions);
8378 ESTAT_ADD(tx_xon_sent);
8379 ESTAT_ADD(tx_xoff_sent);
8380 ESTAT_ADD(tx_flow_control);
8381 ESTAT_ADD(tx_mac_errors);
8382 ESTAT_ADD(tx_single_collisions);
8383 ESTAT_ADD(tx_mult_collisions);
8384 ESTAT_ADD(tx_deferred);
8385 ESTAT_ADD(tx_excessive_collisions);
8386 ESTAT_ADD(tx_late_collisions);
8387 ESTAT_ADD(tx_collide_2times);
8388 ESTAT_ADD(tx_collide_3times);
8389 ESTAT_ADD(tx_collide_4times);
8390 ESTAT_ADD(tx_collide_5times);
8391 ESTAT_ADD(tx_collide_6times);
8392 ESTAT_ADD(tx_collide_7times);
8393 ESTAT_ADD(tx_collide_8times);
8394 ESTAT_ADD(tx_collide_9times);
8395 ESTAT_ADD(tx_collide_10times);
8396 ESTAT_ADD(tx_collide_11times);
8397 ESTAT_ADD(tx_collide_12times);
8398 ESTAT_ADD(tx_collide_13times);
8399 ESTAT_ADD(tx_collide_14times);
8400 ESTAT_ADD(tx_collide_15times);
8401 ESTAT_ADD(tx_ucast_packets);
8402 ESTAT_ADD(tx_mcast_packets);
8403 ESTAT_ADD(tx_bcast_packets);
8404 ESTAT_ADD(tx_carrier_sense_errors);
8405 ESTAT_ADD(tx_discards);
8406 ESTAT_ADD(tx_errors);
8408 ESTAT_ADD(dma_writeq_full);
8409 ESTAT_ADD(dma_write_prioq_full);
8410 ESTAT_ADD(rxbds_empty);
8411 ESTAT_ADD(rx_discards);
8412 ESTAT_ADD(rx_errors);
8413 ESTAT_ADD(rx_threshold_hit);
8415 ESTAT_ADD(dma_readq_full);
8416 ESTAT_ADD(dma_read_prioq_full);
8417 ESTAT_ADD(tx_comp_queue_full);
8419 ESTAT_ADD(ring_set_send_prod_index);
8420 ESTAT_ADD(ring_status_update);
8421 ESTAT_ADD(nic_irqs);
8422 ESTAT_ADD(nic_avoided_irqs);
8423 ESTAT_ADD(nic_tx_threshold_hit);
8428 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
8430 struct tg3 *tp = netdev_priv(dev);
8431 struct net_device_stats *stats = &tp->net_stats;
8432 struct net_device_stats *old_stats = &tp->net_stats_prev;
8433 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8438 stats->rx_packets = old_stats->rx_packets +
8439 get_stat64(&hw_stats->rx_ucast_packets) +
8440 get_stat64(&hw_stats->rx_mcast_packets) +
8441 get_stat64(&hw_stats->rx_bcast_packets);
8443 stats->tx_packets = old_stats->tx_packets +
8444 get_stat64(&hw_stats->tx_ucast_packets) +
8445 get_stat64(&hw_stats->tx_mcast_packets) +
8446 get_stat64(&hw_stats->tx_bcast_packets);
8448 stats->rx_bytes = old_stats->rx_bytes +
8449 get_stat64(&hw_stats->rx_octets);
8450 stats->tx_bytes = old_stats->tx_bytes +
8451 get_stat64(&hw_stats->tx_octets);
8453 stats->rx_errors = old_stats->rx_errors +
8454 get_stat64(&hw_stats->rx_errors);
8455 stats->tx_errors = old_stats->tx_errors +
8456 get_stat64(&hw_stats->tx_errors) +
8457 get_stat64(&hw_stats->tx_mac_errors) +
8458 get_stat64(&hw_stats->tx_carrier_sense_errors) +
8459 get_stat64(&hw_stats->tx_discards);
8461 stats->multicast = old_stats->multicast +
8462 get_stat64(&hw_stats->rx_mcast_packets);
8463 stats->collisions = old_stats->collisions +
8464 get_stat64(&hw_stats->tx_collisions);
8466 stats->rx_length_errors = old_stats->rx_length_errors +
8467 get_stat64(&hw_stats->rx_frame_too_long_errors) +
8468 get_stat64(&hw_stats->rx_undersize_packets);
8470 stats->rx_over_errors = old_stats->rx_over_errors +
8471 get_stat64(&hw_stats->rxbds_empty);
8472 stats->rx_frame_errors = old_stats->rx_frame_errors +
8473 get_stat64(&hw_stats->rx_align_errors);
8474 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
8475 get_stat64(&hw_stats->tx_discards);
8476 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
8477 get_stat64(&hw_stats->tx_carrier_sense_errors);
8479 stats->rx_crc_errors = old_stats->rx_crc_errors +
8480 calc_crc_errors(tp);
8482 stats->rx_missed_errors = old_stats->rx_missed_errors +
8483 get_stat64(&hw_stats->rx_discards);
8488 static inline u32 calc_crc(unsigned char *buf, int len)
8496 for (j = 0; j < len; j++) {
8499 for (k = 0; k < 8; k++) {
8513 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8515 /* accept or reject all multicast frames */
8516 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8517 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8518 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8519 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8522 static void __tg3_set_rx_mode(struct net_device *dev)
8524 struct tg3 *tp = netdev_priv(dev);
8527 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8528 RX_MODE_KEEP_VLAN_TAG);
8530 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8533 #if TG3_VLAN_TAG_USED
8535 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8536 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8538 /* By definition, VLAN is disabled always in this
8541 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8542 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8545 if (dev->flags & IFF_PROMISC) {
8546 /* Promiscuous mode. */
8547 rx_mode |= RX_MODE_PROMISC;
8548 } else if (dev->flags & IFF_ALLMULTI) {
8549 /* Accept all multicast. */
8550 tg3_set_multi (tp, 1);
8551 } else if (dev->mc_count < 1) {
8552 /* Reject all multicast. */
8553 tg3_set_multi (tp, 0);
8555 /* Accept one or more multicast(s). */
8556 struct dev_mc_list *mclist;
8558 u32 mc_filter[4] = { 0, };
8563 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
8564 i++, mclist = mclist->next) {
8566 crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
8568 regidx = (bit & 0x60) >> 5;
8570 mc_filter[regidx] |= (1 << bit);
8573 tw32(MAC_HASH_REG_0, mc_filter[0]);
8574 tw32(MAC_HASH_REG_1, mc_filter[1]);
8575 tw32(MAC_HASH_REG_2, mc_filter[2]);
8576 tw32(MAC_HASH_REG_3, mc_filter[3]);
8579 if (rx_mode != tp->rx_mode) {
8580 tp->rx_mode = rx_mode;
8581 tw32_f(MAC_RX_MODE, rx_mode);
8586 static void tg3_set_rx_mode(struct net_device *dev)
8588 struct tg3 *tp = netdev_priv(dev);
8590 if (!netif_running(dev))
8593 tg3_full_lock(tp, 0);
8594 __tg3_set_rx_mode(dev);
8595 tg3_full_unlock(tp);
8598 #define TG3_REGDUMP_LEN (32 * 1024)
8600 static int tg3_get_regs_len(struct net_device *dev)
8602 return TG3_REGDUMP_LEN;
8605 static void tg3_get_regs(struct net_device *dev,
8606 struct ethtool_regs *regs, void *_p)
8609 struct tg3 *tp = netdev_priv(dev);
8615 memset(p, 0, TG3_REGDUMP_LEN);
8617 if (tp->link_config.phy_is_low_power)
8620 tg3_full_lock(tp, 0);
8622 #define __GET_REG32(reg) (*(p)++ = tr32(reg))
8623 #define GET_REG32_LOOP(base,len) \
8624 do { p = (u32 *)(orig_p + (base)); \
8625 for (i = 0; i < len; i += 4) \
8626 __GET_REG32((base) + i); \
8628 #define GET_REG32_1(reg) \
8629 do { p = (u32 *)(orig_p + (reg)); \
8630 __GET_REG32((reg)); \
8633 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
8634 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
8635 GET_REG32_LOOP(MAC_MODE, 0x4f0);
8636 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
8637 GET_REG32_1(SNDDATAC_MODE);
8638 GET_REG32_LOOP(SNDBDS_MODE, 0x80);
8639 GET_REG32_LOOP(SNDBDI_MODE, 0x48);
8640 GET_REG32_1(SNDBDC_MODE);
8641 GET_REG32_LOOP(RCVLPC_MODE, 0x20);
8642 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
8643 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
8644 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
8645 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
8646 GET_REG32_1(RCVDCC_MODE);
8647 GET_REG32_LOOP(RCVBDI_MODE, 0x20);
8648 GET_REG32_LOOP(RCVCC_MODE, 0x14);
8649 GET_REG32_LOOP(RCVLSC_MODE, 0x08);
8650 GET_REG32_1(MBFREE_MODE);
8651 GET_REG32_LOOP(HOSTCC_MODE, 0x100);
8652 GET_REG32_LOOP(MEMARB_MODE, 0x10);
8653 GET_REG32_LOOP(BUFMGR_MODE, 0x58);
8654 GET_REG32_LOOP(RDMAC_MODE, 0x08);
8655 GET_REG32_LOOP(WDMAC_MODE, 0x08);
8656 GET_REG32_1(RX_CPU_MODE);
8657 GET_REG32_1(RX_CPU_STATE);
8658 GET_REG32_1(RX_CPU_PGMCTR);
8659 GET_REG32_1(RX_CPU_HWBKPT);
8660 GET_REG32_1(TX_CPU_MODE);
8661 GET_REG32_1(TX_CPU_STATE);
8662 GET_REG32_1(TX_CPU_PGMCTR);
8663 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
8664 GET_REG32_LOOP(FTQ_RESET, 0x120);
8665 GET_REG32_LOOP(MSGINT_MODE, 0x0c);
8666 GET_REG32_1(DMAC_MODE);
8667 GET_REG32_LOOP(GRC_MODE, 0x4c);
8668 if (tp->tg3_flags & TG3_FLAG_NVRAM)
8669 GET_REG32_LOOP(NVRAM_CMD, 0x24);
8672 #undef GET_REG32_LOOP
8675 tg3_full_unlock(tp);
8678 static int tg3_get_eeprom_len(struct net_device *dev)
8680 struct tg3 *tp = netdev_priv(dev);
8682 return tp->nvram_size;
8685 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8687 struct tg3 *tp = netdev_priv(dev);
8690 u32 i, offset, len, b_offset, b_count;
8693 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM)
8696 if (tp->link_config.phy_is_low_power)
8699 offset = eeprom->offset;
8703 eeprom->magic = TG3_EEPROM_MAGIC;
8706 /* adjustments to start on required 4 byte boundary */
8707 b_offset = offset & 3;
8708 b_count = 4 - b_offset;
8709 if (b_count > len) {
8710 /* i.e. offset=1 len=2 */
8713 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
8716 memcpy(data, ((char*)&val) + b_offset, b_count);
8719 eeprom->len += b_count;
8722 /* read bytes upto the last 4 byte boundary */
8723 pd = &data[eeprom->len];
8724 for (i = 0; i < (len - (len & 3)); i += 4) {
8725 ret = tg3_nvram_read_be32(tp, offset + i, &val);
8730 memcpy(pd + i, &val, 4);
8735 /* read last bytes not ending on 4 byte boundary */
8736 pd = &data[eeprom->len];
8738 b_offset = offset + len - b_count;
8739 ret = tg3_nvram_read_be32(tp, b_offset, &val);
8742 memcpy(pd, &val, b_count);
8743 eeprom->len += b_count;
8748 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
8750 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8752 struct tg3 *tp = netdev_priv(dev);
8754 u32 offset, len, b_offset, odd_len;
8758 if (tp->link_config.phy_is_low_power)
8761 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
8762 eeprom->magic != TG3_EEPROM_MAGIC)
8765 offset = eeprom->offset;
8768 if ((b_offset = (offset & 3))) {
8769 /* adjustments to start on required 4 byte boundary */
8770 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
8781 /* adjustments to end on required 4 byte boundary */
8783 len = (len + 3) & ~3;
8784 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
8790 if (b_offset || odd_len) {
8791 buf = kmalloc(len, GFP_KERNEL);
8795 memcpy(buf, &start, 4);
8797 memcpy(buf+len-4, &end, 4);
8798 memcpy(buf + b_offset, data, eeprom->len);
8801 ret = tg3_nvram_write_block(tp, offset, len, buf);
8809 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8811 struct tg3 *tp = netdev_priv(dev);
8813 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
8814 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
8816 return phy_ethtool_gset(tp->mdio_bus->phy_map[PHY_ADDR], cmd);
8819 cmd->supported = (SUPPORTED_Autoneg);
8821 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
8822 cmd->supported |= (SUPPORTED_1000baseT_Half |
8823 SUPPORTED_1000baseT_Full);
8825 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
8826 cmd->supported |= (SUPPORTED_100baseT_Half |
8827 SUPPORTED_100baseT_Full |
8828 SUPPORTED_10baseT_Half |
8829 SUPPORTED_10baseT_Full |
8831 cmd->port = PORT_TP;
8833 cmd->supported |= SUPPORTED_FIBRE;
8834 cmd->port = PORT_FIBRE;
8837 cmd->advertising = tp->link_config.advertising;
8838 if (netif_running(dev)) {
8839 cmd->speed = tp->link_config.active_speed;
8840 cmd->duplex = tp->link_config.active_duplex;
8842 cmd->phy_address = PHY_ADDR;
8843 cmd->transceiver = XCVR_INTERNAL;
8844 cmd->autoneg = tp->link_config.autoneg;
8850 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8852 struct tg3 *tp = netdev_priv(dev);
8854 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
8855 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
8857 return phy_ethtool_sset(tp->mdio_bus->phy_map[PHY_ADDR], cmd);
8860 if (cmd->autoneg != AUTONEG_ENABLE &&
8861 cmd->autoneg != AUTONEG_DISABLE)
8864 if (cmd->autoneg == AUTONEG_DISABLE &&
8865 cmd->duplex != DUPLEX_FULL &&
8866 cmd->duplex != DUPLEX_HALF)
8869 if (cmd->autoneg == AUTONEG_ENABLE) {
8870 u32 mask = ADVERTISED_Autoneg |
8872 ADVERTISED_Asym_Pause;
8874 if (!(tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
8875 mask |= ADVERTISED_1000baseT_Half |
8876 ADVERTISED_1000baseT_Full;
8878 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
8879 mask |= ADVERTISED_100baseT_Half |
8880 ADVERTISED_100baseT_Full |
8881 ADVERTISED_10baseT_Half |
8882 ADVERTISED_10baseT_Full |
8885 mask |= ADVERTISED_FIBRE;
8887 if (cmd->advertising & ~mask)
8890 mask &= (ADVERTISED_1000baseT_Half |
8891 ADVERTISED_1000baseT_Full |
8892 ADVERTISED_100baseT_Half |
8893 ADVERTISED_100baseT_Full |
8894 ADVERTISED_10baseT_Half |
8895 ADVERTISED_10baseT_Full);
8897 cmd->advertising &= mask;
8899 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
8900 if (cmd->speed != SPEED_1000)
8903 if (cmd->duplex != DUPLEX_FULL)
8906 if (cmd->speed != SPEED_100 &&
8907 cmd->speed != SPEED_10)
8912 tg3_full_lock(tp, 0);
8914 tp->link_config.autoneg = cmd->autoneg;
8915 if (cmd->autoneg == AUTONEG_ENABLE) {
8916 tp->link_config.advertising = (cmd->advertising |
8917 ADVERTISED_Autoneg);
8918 tp->link_config.speed = SPEED_INVALID;
8919 tp->link_config.duplex = DUPLEX_INVALID;
8921 tp->link_config.advertising = 0;
8922 tp->link_config.speed = cmd->speed;
8923 tp->link_config.duplex = cmd->duplex;
8926 tp->link_config.orig_speed = tp->link_config.speed;
8927 tp->link_config.orig_duplex = tp->link_config.duplex;
8928 tp->link_config.orig_autoneg = tp->link_config.autoneg;
8930 if (netif_running(dev))
8931 tg3_setup_phy(tp, 1);
8933 tg3_full_unlock(tp);
8938 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
8940 struct tg3 *tp = netdev_priv(dev);
8942 strcpy(info->driver, DRV_MODULE_NAME);
8943 strcpy(info->version, DRV_MODULE_VERSION);
8944 strcpy(info->fw_version, tp->fw_ver);
8945 strcpy(info->bus_info, pci_name(tp->pdev));
8948 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8950 struct tg3 *tp = netdev_priv(dev);
8952 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
8953 device_can_wakeup(&tp->pdev->dev))
8954 wol->supported = WAKE_MAGIC;
8958 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
8959 device_can_wakeup(&tp->pdev->dev))
8960 wol->wolopts = WAKE_MAGIC;
8961 memset(&wol->sopass, 0, sizeof(wol->sopass));
8964 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8966 struct tg3 *tp = netdev_priv(dev);
8967 struct device *dp = &tp->pdev->dev;
8969 if (wol->wolopts & ~WAKE_MAGIC)
8971 if ((wol->wolopts & WAKE_MAGIC) &&
8972 !((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(dp)))
8975 spin_lock_bh(&tp->lock);
8976 if (wol->wolopts & WAKE_MAGIC) {
8977 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
8978 device_set_wakeup_enable(dp, true);
8980 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
8981 device_set_wakeup_enable(dp, false);
8983 spin_unlock_bh(&tp->lock);
8988 static u32 tg3_get_msglevel(struct net_device *dev)
8990 struct tg3 *tp = netdev_priv(dev);
8991 return tp->msg_enable;
8994 static void tg3_set_msglevel(struct net_device *dev, u32 value)
8996 struct tg3 *tp = netdev_priv(dev);
8997 tp->msg_enable = value;
9000 static int tg3_set_tso(struct net_device *dev, u32 value)
9002 struct tg3 *tp = netdev_priv(dev);
9004 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
9009 if ((dev->features & NETIF_F_IPV6_CSUM) &&
9010 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)) {
9012 dev->features |= NETIF_F_TSO6;
9013 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9014 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
9015 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
9016 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9017 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9018 dev->features |= NETIF_F_TSO_ECN;
9020 dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
9022 return ethtool_op_set_tso(dev, value);
9025 static int tg3_nway_reset(struct net_device *dev)
9027 struct tg3 *tp = netdev_priv(dev);
9030 if (!netif_running(dev))
9033 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9036 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9037 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9039 r = phy_start_aneg(tp->mdio_bus->phy_map[PHY_ADDR]);
9043 spin_lock_bh(&tp->lock);
9045 tg3_readphy(tp, MII_BMCR, &bmcr);
9046 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
9047 ((bmcr & BMCR_ANENABLE) ||
9048 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
9049 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
9053 spin_unlock_bh(&tp->lock);
9059 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9061 struct tg3 *tp = netdev_priv(dev);
9063 ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
9064 ering->rx_mini_max_pending = 0;
9065 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9066 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
9068 ering->rx_jumbo_max_pending = 0;
9070 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
9072 ering->rx_pending = tp->rx_pending;
9073 ering->rx_mini_pending = 0;
9074 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9075 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
9077 ering->rx_jumbo_pending = 0;
9079 ering->tx_pending = tp->tx_pending;
9082 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9084 struct tg3 *tp = netdev_priv(dev);
9085 int irq_sync = 0, err = 0;
9087 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
9088 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
9089 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
9090 (ering->tx_pending <= MAX_SKB_FRAGS) ||
9091 ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
9092 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
9095 if (netif_running(dev)) {
9101 tg3_full_lock(tp, irq_sync);
9103 tp->rx_pending = ering->rx_pending;
9105 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
9106 tp->rx_pending > 63)
9107 tp->rx_pending = 63;
9108 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
9109 tp->tx_pending = ering->tx_pending;
9111 if (netif_running(dev)) {
9112 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9113 err = tg3_restart_hw(tp, 1);
9115 tg3_netif_start(tp);
9118 tg3_full_unlock(tp);
9120 if (irq_sync && !err)
9126 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9128 struct tg3 *tp = netdev_priv(dev);
9130 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
9132 if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
9133 epause->rx_pause = 1;
9135 epause->rx_pause = 0;
9137 if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
9138 epause->tx_pause = 1;
9140 epause->tx_pause = 0;
9143 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9145 struct tg3 *tp = netdev_priv(dev);
9148 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9149 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9152 if (epause->autoneg) {
9154 struct phy_device *phydev;
9156 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
9158 if (epause->rx_pause) {
9159 if (epause->tx_pause)
9160 newadv = ADVERTISED_Pause;
9162 newadv = ADVERTISED_Pause |
9163 ADVERTISED_Asym_Pause;
9164 } else if (epause->tx_pause) {
9165 newadv = ADVERTISED_Asym_Pause;
9169 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
9170 u32 oldadv = phydev->advertising &
9172 ADVERTISED_Asym_Pause);
9173 if (oldadv != newadv) {
9174 phydev->advertising &=
9175 ~(ADVERTISED_Pause |
9176 ADVERTISED_Asym_Pause);
9177 phydev->advertising |= newadv;
9178 err = phy_start_aneg(phydev);
9181 tp->link_config.advertising &=
9182 ~(ADVERTISED_Pause |
9183 ADVERTISED_Asym_Pause);
9184 tp->link_config.advertising |= newadv;
9187 if (epause->rx_pause)
9188 tp->link_config.flowctrl |= FLOW_CTRL_RX;
9190 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
9192 if (epause->tx_pause)
9193 tp->link_config.flowctrl |= FLOW_CTRL_TX;
9195 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
9197 if (netif_running(dev))
9198 tg3_setup_flow_control(tp, 0, 0);
9203 if (netif_running(dev)) {
9208 tg3_full_lock(tp, irq_sync);
9210 if (epause->autoneg)
9211 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
9213 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
9214 if (epause->rx_pause)
9215 tp->link_config.flowctrl |= FLOW_CTRL_RX;
9217 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
9218 if (epause->tx_pause)
9219 tp->link_config.flowctrl |= FLOW_CTRL_TX;
9221 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
9223 if (netif_running(dev)) {
9224 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9225 err = tg3_restart_hw(tp, 1);
9227 tg3_netif_start(tp);
9230 tg3_full_unlock(tp);
9236 static u32 tg3_get_rx_csum(struct net_device *dev)
9238 struct tg3 *tp = netdev_priv(dev);
9239 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
9242 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
9244 struct tg3 *tp = netdev_priv(dev);
9246 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
9252 spin_lock_bh(&tp->lock);
9254 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
9256 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
9257 spin_unlock_bh(&tp->lock);
9262 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
9264 struct tg3 *tp = netdev_priv(dev);
9266 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
9272 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
9273 ethtool_op_set_tx_ipv6_csum(dev, data);
9275 ethtool_op_set_tx_csum(dev, data);
9280 static int tg3_get_sset_count (struct net_device *dev, int sset)
9284 return TG3_NUM_TEST;
9286 return TG3_NUM_STATS;
9292 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
9294 switch (stringset) {
9296 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
9299 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
9302 WARN_ON(1); /* we need a WARN() */
9307 static int tg3_phys_id(struct net_device *dev, u32 data)
9309 struct tg3 *tp = netdev_priv(dev);
9312 if (!netif_running(tp->dev))
9316 data = UINT_MAX / 2;
9318 for (i = 0; i < (data * 2); i++) {
9320 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
9321 LED_CTRL_1000MBPS_ON |
9322 LED_CTRL_100MBPS_ON |
9323 LED_CTRL_10MBPS_ON |
9324 LED_CTRL_TRAFFIC_OVERRIDE |
9325 LED_CTRL_TRAFFIC_BLINK |
9326 LED_CTRL_TRAFFIC_LED);
9329 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
9330 LED_CTRL_TRAFFIC_OVERRIDE);
9332 if (msleep_interruptible(500))
9335 tw32(MAC_LED_CTRL, tp->led_ctrl);
9339 static void tg3_get_ethtool_stats (struct net_device *dev,
9340 struct ethtool_stats *estats, u64 *tmp_stats)
9342 struct tg3 *tp = netdev_priv(dev);
9343 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
9346 #define NVRAM_TEST_SIZE 0x100
9347 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
9348 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
9349 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
9350 #define NVRAM_SELFBOOT_HW_SIZE 0x20
9351 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
9353 static int tg3_test_nvram(struct tg3 *tp)
9357 int i, j, k, err = 0, size;
9359 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM)
9362 if (tg3_nvram_read(tp, 0, &magic) != 0)
9365 if (magic == TG3_EEPROM_MAGIC)
9366 size = NVRAM_TEST_SIZE;
9367 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
9368 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
9369 TG3_EEPROM_SB_FORMAT_1) {
9370 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
9371 case TG3_EEPROM_SB_REVISION_0:
9372 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
9374 case TG3_EEPROM_SB_REVISION_2:
9375 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
9377 case TG3_EEPROM_SB_REVISION_3:
9378 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
9385 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
9386 size = NVRAM_SELFBOOT_HW_SIZE;
9390 buf = kmalloc(size, GFP_KERNEL);
9395 for (i = 0, j = 0; i < size; i += 4, j++) {
9396 err = tg3_nvram_read_be32(tp, i, &buf[j]);
9403 /* Selfboot format */
9404 magic = be32_to_cpu(buf[0]);
9405 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
9406 TG3_EEPROM_MAGIC_FW) {
9407 u8 *buf8 = (u8 *) buf, csum8 = 0;
9409 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
9410 TG3_EEPROM_SB_REVISION_2) {
9411 /* For rev 2, the csum doesn't include the MBA. */
9412 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
9414 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
9417 for (i = 0; i < size; i++)
9430 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
9431 TG3_EEPROM_MAGIC_HW) {
9432 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
9433 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
9434 u8 *buf8 = (u8 *) buf;
9436 /* Separate the parity bits and the data bytes. */
9437 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
9438 if ((i == 0) || (i == 8)) {
9442 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
9443 parity[k++] = buf8[i] & msk;
9450 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
9451 parity[k++] = buf8[i] & msk;
9454 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
9455 parity[k++] = buf8[i] & msk;
9458 data[j++] = buf8[i];
9462 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
9463 u8 hw8 = hweight8(data[i]);
9465 if ((hw8 & 0x1) && parity[i])
9467 else if (!(hw8 & 0x1) && !parity[i])
9474 /* Bootstrap checksum at offset 0x10 */
9475 csum = calc_crc((unsigned char *) buf, 0x10);
9476 if (csum != be32_to_cpu(buf[0x10/4]))
9479 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
9480 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
9481 if (csum != be32_to_cpu(buf[0xfc/4]))
9491 #define TG3_SERDES_TIMEOUT_SEC 2
9492 #define TG3_COPPER_TIMEOUT_SEC 6
9494 static int tg3_test_link(struct tg3 *tp)
9498 if (!netif_running(tp->dev))
9501 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
9502 max = TG3_SERDES_TIMEOUT_SEC;
9504 max = TG3_COPPER_TIMEOUT_SEC;
9506 for (i = 0; i < max; i++) {
9507 if (netif_carrier_ok(tp->dev))
9510 if (msleep_interruptible(1000))
9517 /* Only test the commonly used registers */
9518 static int tg3_test_registers(struct tg3 *tp)
9520 int i, is_5705, is_5750;
9521 u32 offset, read_mask, write_mask, val, save_val, read_val;
9525 #define TG3_FL_5705 0x1
9526 #define TG3_FL_NOT_5705 0x2
9527 #define TG3_FL_NOT_5788 0x4
9528 #define TG3_FL_NOT_5750 0x8
9532 /* MAC Control Registers */
9533 { MAC_MODE, TG3_FL_NOT_5705,
9534 0x00000000, 0x00ef6f8c },
9535 { MAC_MODE, TG3_FL_5705,
9536 0x00000000, 0x01ef6b8c },
9537 { MAC_STATUS, TG3_FL_NOT_5705,
9538 0x03800107, 0x00000000 },
9539 { MAC_STATUS, TG3_FL_5705,
9540 0x03800100, 0x00000000 },
9541 { MAC_ADDR_0_HIGH, 0x0000,
9542 0x00000000, 0x0000ffff },
9543 { MAC_ADDR_0_LOW, 0x0000,
9544 0x00000000, 0xffffffff },
9545 { MAC_RX_MTU_SIZE, 0x0000,
9546 0x00000000, 0x0000ffff },
9547 { MAC_TX_MODE, 0x0000,
9548 0x00000000, 0x00000070 },
9549 { MAC_TX_LENGTHS, 0x0000,
9550 0x00000000, 0x00003fff },
9551 { MAC_RX_MODE, TG3_FL_NOT_5705,
9552 0x00000000, 0x000007fc },
9553 { MAC_RX_MODE, TG3_FL_5705,
9554 0x00000000, 0x000007dc },
9555 { MAC_HASH_REG_0, 0x0000,
9556 0x00000000, 0xffffffff },
9557 { MAC_HASH_REG_1, 0x0000,
9558 0x00000000, 0xffffffff },
9559 { MAC_HASH_REG_2, 0x0000,
9560 0x00000000, 0xffffffff },
9561 { MAC_HASH_REG_3, 0x0000,
9562 0x00000000, 0xffffffff },
9564 /* Receive Data and Receive BD Initiator Control Registers. */
9565 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
9566 0x00000000, 0xffffffff },
9567 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
9568 0x00000000, 0xffffffff },
9569 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
9570 0x00000000, 0x00000003 },
9571 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
9572 0x00000000, 0xffffffff },
9573 { RCVDBDI_STD_BD+0, 0x0000,
9574 0x00000000, 0xffffffff },
9575 { RCVDBDI_STD_BD+4, 0x0000,
9576 0x00000000, 0xffffffff },
9577 { RCVDBDI_STD_BD+8, 0x0000,
9578 0x00000000, 0xffff0002 },
9579 { RCVDBDI_STD_BD+0xc, 0x0000,
9580 0x00000000, 0xffffffff },
9582 /* Receive BD Initiator Control Registers. */
9583 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
9584 0x00000000, 0xffffffff },
9585 { RCVBDI_STD_THRESH, TG3_FL_5705,
9586 0x00000000, 0x000003ff },
9587 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
9588 0x00000000, 0xffffffff },
9590 /* Host Coalescing Control Registers. */
9591 { HOSTCC_MODE, TG3_FL_NOT_5705,
9592 0x00000000, 0x00000004 },
9593 { HOSTCC_MODE, TG3_FL_5705,
9594 0x00000000, 0x000000f6 },
9595 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
9596 0x00000000, 0xffffffff },
9597 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
9598 0x00000000, 0x000003ff },
9599 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
9600 0x00000000, 0xffffffff },
9601 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
9602 0x00000000, 0x000003ff },
9603 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
9604 0x00000000, 0xffffffff },
9605 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9606 0x00000000, 0x000000ff },
9607 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
9608 0x00000000, 0xffffffff },
9609 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9610 0x00000000, 0x000000ff },
9611 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
9612 0x00000000, 0xffffffff },
9613 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
9614 0x00000000, 0xffffffff },
9615 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9616 0x00000000, 0xffffffff },
9617 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9618 0x00000000, 0x000000ff },
9619 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9620 0x00000000, 0xffffffff },
9621 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9622 0x00000000, 0x000000ff },
9623 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
9624 0x00000000, 0xffffffff },
9625 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
9626 0x00000000, 0xffffffff },
9627 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
9628 0x00000000, 0xffffffff },
9629 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
9630 0x00000000, 0xffffffff },
9631 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
9632 0x00000000, 0xffffffff },
9633 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
9634 0xffffffff, 0x00000000 },
9635 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
9636 0xffffffff, 0x00000000 },
9638 /* Buffer Manager Control Registers. */
9639 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
9640 0x00000000, 0x007fff80 },
9641 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
9642 0x00000000, 0x007fffff },
9643 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
9644 0x00000000, 0x0000003f },
9645 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
9646 0x00000000, 0x000001ff },
9647 { BUFMGR_MB_HIGH_WATER, 0x0000,
9648 0x00000000, 0x000001ff },
9649 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
9650 0xffffffff, 0x00000000 },
9651 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
9652 0xffffffff, 0x00000000 },
9654 /* Mailbox Registers */
9655 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
9656 0x00000000, 0x000001ff },
9657 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
9658 0x00000000, 0x000001ff },
9659 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
9660 0x00000000, 0x000007ff },
9661 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
9662 0x00000000, 0x000001ff },
9664 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
9667 is_5705 = is_5750 = 0;
9668 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9670 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9674 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
9675 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
9678 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
9681 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9682 (reg_tbl[i].flags & TG3_FL_NOT_5788))
9685 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
9688 offset = (u32) reg_tbl[i].offset;
9689 read_mask = reg_tbl[i].read_mask;
9690 write_mask = reg_tbl[i].write_mask;
9692 /* Save the original register content */
9693 save_val = tr32(offset);
9695 /* Determine the read-only value. */
9696 read_val = save_val & read_mask;
9698 /* Write zero to the register, then make sure the read-only bits
9699 * are not changed and the read/write bits are all zeros.
9705 /* Test the read-only and read/write bits. */
9706 if (((val & read_mask) != read_val) || (val & write_mask))
9709 /* Write ones to all the bits defined by RdMask and WrMask, then
9710 * make sure the read-only bits are not changed and the
9711 * read/write bits are all ones.
9713 tw32(offset, read_mask | write_mask);
9717 /* Test the read-only bits. */
9718 if ((val & read_mask) != read_val)
9721 /* Test the read/write bits. */
9722 if ((val & write_mask) != write_mask)
9725 tw32(offset, save_val);
9731 if (netif_msg_hw(tp))
9732 printk(KERN_ERR PFX "Register test failed at offset %x\n",
9734 tw32(offset, save_val);
9738 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
9740 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
9744 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
9745 for (j = 0; j < len; j += 4) {
9748 tg3_write_mem(tp, offset + j, test_pattern[i]);
9749 tg3_read_mem(tp, offset + j, &val);
9750 if (val != test_pattern[i])
9757 static int tg3_test_memory(struct tg3 *tp)
9759 static struct mem_entry {
9762 } mem_tbl_570x[] = {
9763 { 0x00000000, 0x00b50},
9764 { 0x00002000, 0x1c000},
9765 { 0xffffffff, 0x00000}
9766 }, mem_tbl_5705[] = {
9767 { 0x00000100, 0x0000c},
9768 { 0x00000200, 0x00008},
9769 { 0x00004000, 0x00800},
9770 { 0x00006000, 0x01000},
9771 { 0x00008000, 0x02000},
9772 { 0x00010000, 0x0e000},
9773 { 0xffffffff, 0x00000}
9774 }, mem_tbl_5755[] = {
9775 { 0x00000200, 0x00008},
9776 { 0x00004000, 0x00800},
9777 { 0x00006000, 0x00800},
9778 { 0x00008000, 0x02000},
9779 { 0x00010000, 0x0c000},
9780 { 0xffffffff, 0x00000}
9781 }, mem_tbl_5906[] = {
9782 { 0x00000200, 0x00008},
9783 { 0x00004000, 0x00400},
9784 { 0x00006000, 0x00400},
9785 { 0x00008000, 0x01000},
9786 { 0x00010000, 0x01000},
9787 { 0xffffffff, 0x00000}
9789 struct mem_entry *mem_tbl;
9793 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
9794 mem_tbl = mem_tbl_5755;
9795 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9796 mem_tbl = mem_tbl_5906;
9797 else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
9798 mem_tbl = mem_tbl_5705;
9800 mem_tbl = mem_tbl_570x;
9802 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
9803 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
9804 mem_tbl[i].len)) != 0)
9811 #define TG3_MAC_LOOPBACK 0
9812 #define TG3_PHY_LOOPBACK 1
9814 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
9816 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
9818 struct sk_buff *skb, *rx_skb;
9821 int num_pkts, tx_len, rx_len, i, err;
9822 struct tg3_rx_buffer_desc *desc;
9823 struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
9825 if (loopback_mode == TG3_MAC_LOOPBACK) {
9826 /* HW errata - mac loopback fails in some cases on 5780.
9827 * Normal traffic and PHY loopback are not affected by
9830 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
9833 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
9834 MAC_MODE_PORT_INT_LPBACK;
9835 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9836 mac_mode |= MAC_MODE_LINK_POLARITY;
9837 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9838 mac_mode |= MAC_MODE_PORT_MODE_MII;
9840 mac_mode |= MAC_MODE_PORT_MODE_GMII;
9841 tw32(MAC_MODE, mac_mode);
9842 } else if (loopback_mode == TG3_PHY_LOOPBACK) {
9845 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
9846 tg3_phy_fet_toggle_apd(tp, false);
9847 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
9849 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
9851 tg3_phy_toggle_automdix(tp, 0);
9853 tg3_writephy(tp, MII_BMCR, val);
9856 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
9857 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
9858 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9859 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x1800);
9860 mac_mode |= MAC_MODE_PORT_MODE_MII;
9862 mac_mode |= MAC_MODE_PORT_MODE_GMII;
9864 /* reset to prevent losing 1st rx packet intermittently */
9865 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
9866 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9868 tw32_f(MAC_RX_MODE, tp->rx_mode);
9870 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
9871 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
9872 mac_mode &= ~MAC_MODE_LINK_POLARITY;
9873 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
9874 mac_mode |= MAC_MODE_LINK_POLARITY;
9875 tg3_writephy(tp, MII_TG3_EXT_CTRL,
9876 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
9878 tw32(MAC_MODE, mac_mode);
9886 skb = netdev_alloc_skb(tp->dev, tx_len);
9890 tx_data = skb_put(skb, tx_len);
9891 memcpy(tx_data, tp->dev->dev_addr, 6);
9892 memset(tx_data + 6, 0x0, 8);
9894 tw32(MAC_RX_MTU_SIZE, tx_len + 4);
9896 for (i = 14; i < tx_len; i++)
9897 tx_data[i] = (u8) (i & 0xff);
9899 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
9901 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9906 rx_start_idx = tp->hw_status->idx[0].rx_producer;
9910 tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
9915 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
9917 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
9921 /* 250 usec to allow enough time on some 10/100 Mbps devices. */
9922 for (i = 0; i < 25; i++) {
9923 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9928 tx_idx = tp->hw_status->idx[0].tx_consumer;
9929 rx_idx = tp->hw_status->idx[0].rx_producer;
9930 if ((tx_idx == tp->tx_prod) &&
9931 (rx_idx == (rx_start_idx + num_pkts)))
9935 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
9938 if (tx_idx != tp->tx_prod)
9941 if (rx_idx != rx_start_idx + num_pkts)
9944 desc = &tp->rx_rcb[rx_start_idx];
9945 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
9946 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
9947 if (opaque_key != RXD_OPAQUE_RING_STD)
9950 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
9951 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
9954 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
9955 if (rx_len != tx_len)
9958 rx_skb = tpr->rx_std_buffers[desc_idx].skb;
9960 map = pci_unmap_addr(&tpr->rx_std_buffers[desc_idx], mapping);
9961 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
9963 for (i = 14; i < tx_len; i++) {
9964 if (*(rx_skb->data + i) != (u8) (i & 0xff))
9969 /* tg3_free_rings will unmap and free the rx_skb */
9974 #define TG3_MAC_LOOPBACK_FAILED 1
9975 #define TG3_PHY_LOOPBACK_FAILED 2
9976 #define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \
9977 TG3_PHY_LOOPBACK_FAILED)
9979 static int tg3_test_loopback(struct tg3 *tp)
9984 if (!netif_running(tp->dev))
9985 return TG3_LOOPBACK_FAILED;
9987 err = tg3_reset_hw(tp, 1);
9989 return TG3_LOOPBACK_FAILED;
9991 /* Turn off gphy autopowerdown. */
9992 if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
9993 tg3_phy_toggle_apd(tp, false);
9995 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
9999 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
10001 /* Wait for up to 40 microseconds to acquire lock. */
10002 for (i = 0; i < 4; i++) {
10003 status = tr32(TG3_CPMU_MUTEX_GNT);
10004 if (status == CPMU_MUTEX_GNT_DRIVER)
10009 if (status != CPMU_MUTEX_GNT_DRIVER)
10010 return TG3_LOOPBACK_FAILED;
10012 /* Turn off link-based power management. */
10013 cpmuctrl = tr32(TG3_CPMU_CTRL);
10014 tw32(TG3_CPMU_CTRL,
10015 cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
10016 CPMU_CTRL_LINK_AWARE_MODE));
10019 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
10020 err |= TG3_MAC_LOOPBACK_FAILED;
10022 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
10023 tw32(TG3_CPMU_CTRL, cpmuctrl);
10025 /* Release the mutex */
10026 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
10029 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
10030 !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
10031 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
10032 err |= TG3_PHY_LOOPBACK_FAILED;
10035 /* Re-enable gphy autopowerdown. */
10036 if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
10037 tg3_phy_toggle_apd(tp, true);
10042 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
10045 struct tg3 *tp = netdev_priv(dev);
10047 if (tp->link_config.phy_is_low_power)
10048 tg3_set_power_state(tp, PCI_D0);
10050 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
10052 if (tg3_test_nvram(tp) != 0) {
10053 etest->flags |= ETH_TEST_FL_FAILED;
10056 if (tg3_test_link(tp) != 0) {
10057 etest->flags |= ETH_TEST_FL_FAILED;
10060 if (etest->flags & ETH_TEST_FL_OFFLINE) {
10061 int err, err2 = 0, irq_sync = 0;
10063 if (netif_running(dev)) {
10065 tg3_netif_stop(tp);
10069 tg3_full_lock(tp, irq_sync);
10071 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
10072 err = tg3_nvram_lock(tp);
10073 tg3_halt_cpu(tp, RX_CPU_BASE);
10074 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10075 tg3_halt_cpu(tp, TX_CPU_BASE);
10077 tg3_nvram_unlock(tp);
10079 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
10082 if (tg3_test_registers(tp) != 0) {
10083 etest->flags |= ETH_TEST_FL_FAILED;
10086 if (tg3_test_memory(tp) != 0) {
10087 etest->flags |= ETH_TEST_FL_FAILED;
10090 if ((data[4] = tg3_test_loopback(tp)) != 0)
10091 etest->flags |= ETH_TEST_FL_FAILED;
10093 tg3_full_unlock(tp);
10095 if (tg3_test_interrupt(tp) != 0) {
10096 etest->flags |= ETH_TEST_FL_FAILED;
10100 tg3_full_lock(tp, 0);
10102 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10103 if (netif_running(dev)) {
10104 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
10105 err2 = tg3_restart_hw(tp, 1);
10107 tg3_netif_start(tp);
10110 tg3_full_unlock(tp);
10112 if (irq_sync && !err2)
10115 if (tp->link_config.phy_is_low_power)
10116 tg3_set_power_state(tp, PCI_D3hot);
10120 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10122 struct mii_ioctl_data *data = if_mii(ifr);
10123 struct tg3 *tp = netdev_priv(dev);
10126 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
10127 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
10129 return phy_mii_ioctl(tp->mdio_bus->phy_map[PHY_ADDR], data, cmd);
10134 data->phy_id = PHY_ADDR;
10137 case SIOCGMIIREG: {
10140 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10141 break; /* We have no PHY */
10143 if (tp->link_config.phy_is_low_power)
10146 spin_lock_bh(&tp->lock);
10147 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
10148 spin_unlock_bh(&tp->lock);
10150 data->val_out = mii_regval;
10156 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10157 break; /* We have no PHY */
10159 if (!capable(CAP_NET_ADMIN))
10162 if (tp->link_config.phy_is_low_power)
10165 spin_lock_bh(&tp->lock);
10166 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
10167 spin_unlock_bh(&tp->lock);
10175 return -EOPNOTSUPP;
10178 #if TG3_VLAN_TAG_USED
10179 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
10181 struct tg3 *tp = netdev_priv(dev);
10183 if (!netif_running(dev)) {
10188 tg3_netif_stop(tp);
10190 tg3_full_lock(tp, 0);
10194 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
10195 __tg3_set_rx_mode(dev);
10197 tg3_netif_start(tp);
10199 tg3_full_unlock(tp);
10203 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
10205 struct tg3 *tp = netdev_priv(dev);
10207 memcpy(ec, &tp->coal, sizeof(*ec));
10211 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
10213 struct tg3 *tp = netdev_priv(dev);
10214 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
10215 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
10217 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
10218 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
10219 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
10220 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
10221 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
10224 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
10225 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
10226 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
10227 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
10228 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
10229 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
10230 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
10231 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
10232 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
10233 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
10236 /* No rx interrupts will be generated if both are zero */
10237 if ((ec->rx_coalesce_usecs == 0) &&
10238 (ec->rx_max_coalesced_frames == 0))
10241 /* No tx interrupts will be generated if both are zero */
10242 if ((ec->tx_coalesce_usecs == 0) &&
10243 (ec->tx_max_coalesced_frames == 0))
10246 /* Only copy relevant parameters, ignore all others. */
10247 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
10248 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
10249 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
10250 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
10251 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
10252 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
10253 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
10254 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
10255 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
10257 if (netif_running(dev)) {
10258 tg3_full_lock(tp, 0);
10259 __tg3_set_coalesce(tp, &tp->coal);
10260 tg3_full_unlock(tp);
10265 static const struct ethtool_ops tg3_ethtool_ops = {
10266 .get_settings = tg3_get_settings,
10267 .set_settings = tg3_set_settings,
10268 .get_drvinfo = tg3_get_drvinfo,
10269 .get_regs_len = tg3_get_regs_len,
10270 .get_regs = tg3_get_regs,
10271 .get_wol = tg3_get_wol,
10272 .set_wol = tg3_set_wol,
10273 .get_msglevel = tg3_get_msglevel,
10274 .set_msglevel = tg3_set_msglevel,
10275 .nway_reset = tg3_nway_reset,
10276 .get_link = ethtool_op_get_link,
10277 .get_eeprom_len = tg3_get_eeprom_len,
10278 .get_eeprom = tg3_get_eeprom,
10279 .set_eeprom = tg3_set_eeprom,
10280 .get_ringparam = tg3_get_ringparam,
10281 .set_ringparam = tg3_set_ringparam,
10282 .get_pauseparam = tg3_get_pauseparam,
10283 .set_pauseparam = tg3_set_pauseparam,
10284 .get_rx_csum = tg3_get_rx_csum,
10285 .set_rx_csum = tg3_set_rx_csum,
10286 .set_tx_csum = tg3_set_tx_csum,
10287 .set_sg = ethtool_op_set_sg,
10288 .set_tso = tg3_set_tso,
10289 .self_test = tg3_self_test,
10290 .get_strings = tg3_get_strings,
10291 .phys_id = tg3_phys_id,
10292 .get_ethtool_stats = tg3_get_ethtool_stats,
10293 .get_coalesce = tg3_get_coalesce,
10294 .set_coalesce = tg3_set_coalesce,
10295 .get_sset_count = tg3_get_sset_count,
10298 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
10300 u32 cursize, val, magic;
10302 tp->nvram_size = EEPROM_CHIP_SIZE;
10304 if (tg3_nvram_read(tp, 0, &magic) != 0)
10307 if ((magic != TG3_EEPROM_MAGIC) &&
10308 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
10309 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
10313 * Size the chip by reading offsets at increasing powers of two.
10314 * When we encounter our validation signature, we know the addressing
10315 * has wrapped around, and thus have our chip size.
10319 while (cursize < tp->nvram_size) {
10320 if (tg3_nvram_read(tp, cursize, &val) != 0)
10329 tp->nvram_size = cursize;
10332 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
10336 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
10337 tg3_nvram_read(tp, 0, &val) != 0)
10340 /* Selfboot format */
10341 if (val != TG3_EEPROM_MAGIC) {
10342 tg3_get_eeprom_size(tp);
10346 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
10348 /* This is confusing. We want to operate on the
10349 * 16-bit value at offset 0xf2. The tg3_nvram_read()
10350 * call will read from NVRAM and byteswap the data
10351 * according to the byteswapping settings for all
10352 * other register accesses. This ensures the data we
10353 * want will always reside in the lower 16-bits.
10354 * However, the data in NVRAM is in LE format, which
10355 * means the data from the NVRAM read will always be
10356 * opposite the endianness of the CPU. The 16-bit
10357 * byteswap then brings the data to CPU endianness.
10359 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
10363 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10366 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
10370 nvcfg1 = tr32(NVRAM_CFG1);
10371 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
10372 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10374 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10375 tw32(NVRAM_CFG1, nvcfg1);
10378 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
10379 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
10380 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
10381 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
10382 tp->nvram_jedecnum = JEDEC_ATMEL;
10383 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10384 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10386 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
10387 tp->nvram_jedecnum = JEDEC_ATMEL;
10388 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
10390 case FLASH_VENDOR_ATMEL_EEPROM:
10391 tp->nvram_jedecnum = JEDEC_ATMEL;
10392 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10393 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10395 case FLASH_VENDOR_ST:
10396 tp->nvram_jedecnum = JEDEC_ST;
10397 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
10398 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10400 case FLASH_VENDOR_SAIFUN:
10401 tp->nvram_jedecnum = JEDEC_SAIFUN;
10402 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
10404 case FLASH_VENDOR_SST_SMALL:
10405 case FLASH_VENDOR_SST_LARGE:
10406 tp->nvram_jedecnum = JEDEC_SST;
10407 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
10411 tp->nvram_jedecnum = JEDEC_ATMEL;
10412 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10413 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10417 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
10421 nvcfg1 = tr32(NVRAM_CFG1);
10423 /* NVRAM protection for TPM */
10424 if (nvcfg1 & (1 << 27))
10425 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10427 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10428 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
10429 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
10430 tp->nvram_jedecnum = JEDEC_ATMEL;
10431 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10433 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10434 tp->nvram_jedecnum = JEDEC_ATMEL;
10435 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10436 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10438 case FLASH_5752VENDOR_ST_M45PE10:
10439 case FLASH_5752VENDOR_ST_M45PE20:
10440 case FLASH_5752VENDOR_ST_M45PE40:
10441 tp->nvram_jedecnum = JEDEC_ST;
10442 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10443 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10447 if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
10448 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
10449 case FLASH_5752PAGE_SIZE_256:
10450 tp->nvram_pagesize = 256;
10452 case FLASH_5752PAGE_SIZE_512:
10453 tp->nvram_pagesize = 512;
10455 case FLASH_5752PAGE_SIZE_1K:
10456 tp->nvram_pagesize = 1024;
10458 case FLASH_5752PAGE_SIZE_2K:
10459 tp->nvram_pagesize = 2048;
10461 case FLASH_5752PAGE_SIZE_4K:
10462 tp->nvram_pagesize = 4096;
10464 case FLASH_5752PAGE_SIZE_264:
10465 tp->nvram_pagesize = 264;
10469 /* For eeprom, set pagesize to maximum eeprom size */
10470 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10472 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10473 tw32(NVRAM_CFG1, nvcfg1);
10477 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
10479 u32 nvcfg1, protect = 0;
10481 nvcfg1 = tr32(NVRAM_CFG1);
10483 /* NVRAM protection for TPM */
10484 if (nvcfg1 & (1 << 27)) {
10485 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10489 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10491 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10492 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10493 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10494 case FLASH_5755VENDOR_ATMEL_FLASH_5:
10495 tp->nvram_jedecnum = JEDEC_ATMEL;
10496 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10497 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10498 tp->nvram_pagesize = 264;
10499 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
10500 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
10501 tp->nvram_size = (protect ? 0x3e200 :
10502 TG3_NVRAM_SIZE_512KB);
10503 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
10504 tp->nvram_size = (protect ? 0x1f200 :
10505 TG3_NVRAM_SIZE_256KB);
10507 tp->nvram_size = (protect ? 0x1f200 :
10508 TG3_NVRAM_SIZE_128KB);
10510 case FLASH_5752VENDOR_ST_M45PE10:
10511 case FLASH_5752VENDOR_ST_M45PE20:
10512 case FLASH_5752VENDOR_ST_M45PE40:
10513 tp->nvram_jedecnum = JEDEC_ST;
10514 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10515 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10516 tp->nvram_pagesize = 256;
10517 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
10518 tp->nvram_size = (protect ?
10519 TG3_NVRAM_SIZE_64KB :
10520 TG3_NVRAM_SIZE_128KB);
10521 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
10522 tp->nvram_size = (protect ?
10523 TG3_NVRAM_SIZE_64KB :
10524 TG3_NVRAM_SIZE_256KB);
10526 tp->nvram_size = (protect ?
10527 TG3_NVRAM_SIZE_128KB :
10528 TG3_NVRAM_SIZE_512KB);
10533 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
10537 nvcfg1 = tr32(NVRAM_CFG1);
10539 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10540 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
10541 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
10542 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
10543 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
10544 tp->nvram_jedecnum = JEDEC_ATMEL;
10545 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10546 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10548 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10549 tw32(NVRAM_CFG1, nvcfg1);
10551 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10552 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10553 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10554 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10555 tp->nvram_jedecnum = JEDEC_ATMEL;
10556 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10557 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10558 tp->nvram_pagesize = 264;
10560 case FLASH_5752VENDOR_ST_M45PE10:
10561 case FLASH_5752VENDOR_ST_M45PE20:
10562 case FLASH_5752VENDOR_ST_M45PE40:
10563 tp->nvram_jedecnum = JEDEC_ST;
10564 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10565 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10566 tp->nvram_pagesize = 256;
10571 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
10573 u32 nvcfg1, protect = 0;
10575 nvcfg1 = tr32(NVRAM_CFG1);
10577 /* NVRAM protection for TPM */
10578 if (nvcfg1 & (1 << 27)) {
10579 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10583 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10585 case FLASH_5761VENDOR_ATMEL_ADB021D:
10586 case FLASH_5761VENDOR_ATMEL_ADB041D:
10587 case FLASH_5761VENDOR_ATMEL_ADB081D:
10588 case FLASH_5761VENDOR_ATMEL_ADB161D:
10589 case FLASH_5761VENDOR_ATMEL_MDB021D:
10590 case FLASH_5761VENDOR_ATMEL_MDB041D:
10591 case FLASH_5761VENDOR_ATMEL_MDB081D:
10592 case FLASH_5761VENDOR_ATMEL_MDB161D:
10593 tp->nvram_jedecnum = JEDEC_ATMEL;
10594 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10595 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10596 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10597 tp->nvram_pagesize = 256;
10599 case FLASH_5761VENDOR_ST_A_M45PE20:
10600 case FLASH_5761VENDOR_ST_A_M45PE40:
10601 case FLASH_5761VENDOR_ST_A_M45PE80:
10602 case FLASH_5761VENDOR_ST_A_M45PE16:
10603 case FLASH_5761VENDOR_ST_M_M45PE20:
10604 case FLASH_5761VENDOR_ST_M_M45PE40:
10605 case FLASH_5761VENDOR_ST_M_M45PE80:
10606 case FLASH_5761VENDOR_ST_M_M45PE16:
10607 tp->nvram_jedecnum = JEDEC_ST;
10608 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10609 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10610 tp->nvram_pagesize = 256;
10615 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
10618 case FLASH_5761VENDOR_ATMEL_ADB161D:
10619 case FLASH_5761VENDOR_ATMEL_MDB161D:
10620 case FLASH_5761VENDOR_ST_A_M45PE16:
10621 case FLASH_5761VENDOR_ST_M_M45PE16:
10622 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
10624 case FLASH_5761VENDOR_ATMEL_ADB081D:
10625 case FLASH_5761VENDOR_ATMEL_MDB081D:
10626 case FLASH_5761VENDOR_ST_A_M45PE80:
10627 case FLASH_5761VENDOR_ST_M_M45PE80:
10628 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
10630 case FLASH_5761VENDOR_ATMEL_ADB041D:
10631 case FLASH_5761VENDOR_ATMEL_MDB041D:
10632 case FLASH_5761VENDOR_ST_A_M45PE40:
10633 case FLASH_5761VENDOR_ST_M_M45PE40:
10634 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10636 case FLASH_5761VENDOR_ATMEL_ADB021D:
10637 case FLASH_5761VENDOR_ATMEL_MDB021D:
10638 case FLASH_5761VENDOR_ST_A_M45PE20:
10639 case FLASH_5761VENDOR_ST_M_M45PE20:
10640 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
10646 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
10648 tp->nvram_jedecnum = JEDEC_ATMEL;
10649 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10650 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10653 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
10657 nvcfg1 = tr32(NVRAM_CFG1);
10659 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10660 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
10661 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
10662 tp->nvram_jedecnum = JEDEC_ATMEL;
10663 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10664 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10666 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10667 tw32(NVRAM_CFG1, nvcfg1);
10669 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10670 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
10671 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
10672 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
10673 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
10674 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
10675 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
10676 tp->nvram_jedecnum = JEDEC_ATMEL;
10677 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10678 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10680 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10681 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10682 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
10683 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
10684 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
10686 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
10687 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
10688 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
10690 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
10691 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
10692 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10696 case FLASH_5752VENDOR_ST_M45PE10:
10697 case FLASH_5752VENDOR_ST_M45PE20:
10698 case FLASH_5752VENDOR_ST_M45PE40:
10699 tp->nvram_jedecnum = JEDEC_ST;
10700 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10701 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10703 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10704 case FLASH_5752VENDOR_ST_M45PE10:
10705 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
10707 case FLASH_5752VENDOR_ST_M45PE20:
10708 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
10710 case FLASH_5752VENDOR_ST_M45PE40:
10711 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10716 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM;
10720 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
10721 case FLASH_5752PAGE_SIZE_256:
10722 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10723 tp->nvram_pagesize = 256;
10725 case FLASH_5752PAGE_SIZE_512:
10726 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10727 tp->nvram_pagesize = 512;
10729 case FLASH_5752PAGE_SIZE_1K:
10730 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10731 tp->nvram_pagesize = 1024;
10733 case FLASH_5752PAGE_SIZE_2K:
10734 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10735 tp->nvram_pagesize = 2048;
10737 case FLASH_5752PAGE_SIZE_4K:
10738 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10739 tp->nvram_pagesize = 4096;
10741 case FLASH_5752PAGE_SIZE_264:
10742 tp->nvram_pagesize = 264;
10744 case FLASH_5752PAGE_SIZE_528:
10745 tp->nvram_pagesize = 528;
10750 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
10751 static void __devinit tg3_nvram_init(struct tg3 *tp)
10753 tw32_f(GRC_EEPROM_ADDR,
10754 (EEPROM_ADDR_FSM_RESET |
10755 (EEPROM_DEFAULT_CLOCK_PERIOD <<
10756 EEPROM_ADDR_CLKPERD_SHIFT)));
10760 /* Enable seeprom accesses. */
10761 tw32_f(GRC_LOCAL_CTRL,
10762 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
10765 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10766 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
10767 tp->tg3_flags |= TG3_FLAG_NVRAM;
10769 if (tg3_nvram_lock(tp)) {
10770 printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
10771 "tg3_nvram_init failed.\n", tp->dev->name);
10774 tg3_enable_nvram_access(tp);
10776 tp->nvram_size = 0;
10778 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10779 tg3_get_5752_nvram_info(tp);
10780 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10781 tg3_get_5755_nvram_info(tp);
10782 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10783 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
10784 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
10785 tg3_get_5787_nvram_info(tp);
10786 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
10787 tg3_get_5761_nvram_info(tp);
10788 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10789 tg3_get_5906_nvram_info(tp);
10790 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
10791 tg3_get_57780_nvram_info(tp);
10793 tg3_get_nvram_info(tp);
10795 if (tp->nvram_size == 0)
10796 tg3_get_nvram_size(tp);
10798 tg3_disable_nvram_access(tp);
10799 tg3_nvram_unlock(tp);
10802 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
10804 tg3_get_eeprom_size(tp);
10808 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
10809 u32 offset, u32 len, u8 *buf)
10814 for (i = 0; i < len; i += 4) {
10820 memcpy(&data, buf + i, 4);
10823 * The SEEPROM interface expects the data to always be opposite
10824 * the native endian format. We accomplish this by reversing
10825 * all the operations that would have been performed on the
10826 * data from a call to tg3_nvram_read_be32().
10828 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
10830 val = tr32(GRC_EEPROM_ADDR);
10831 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
10833 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
10835 tw32(GRC_EEPROM_ADDR, val |
10836 (0 << EEPROM_ADDR_DEVID_SHIFT) |
10837 (addr & EEPROM_ADDR_ADDR_MASK) |
10838 EEPROM_ADDR_START |
10839 EEPROM_ADDR_WRITE);
10841 for (j = 0; j < 1000; j++) {
10842 val = tr32(GRC_EEPROM_ADDR);
10844 if (val & EEPROM_ADDR_COMPLETE)
10848 if (!(val & EEPROM_ADDR_COMPLETE)) {
10857 /* offset and length are dword aligned */
10858 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
10862 u32 pagesize = tp->nvram_pagesize;
10863 u32 pagemask = pagesize - 1;
10867 tmp = kmalloc(pagesize, GFP_KERNEL);
10873 u32 phy_addr, page_off, size;
10875 phy_addr = offset & ~pagemask;
10877 for (j = 0; j < pagesize; j += 4) {
10878 ret = tg3_nvram_read_be32(tp, phy_addr + j,
10879 (__be32 *) (tmp + j));
10886 page_off = offset & pagemask;
10893 memcpy(tmp + page_off, buf, size);
10895 offset = offset + (pagesize - page_off);
10897 tg3_enable_nvram_access(tp);
10900 * Before we can erase the flash page, we need
10901 * to issue a special "write enable" command.
10903 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10905 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10908 /* Erase the target page */
10909 tw32(NVRAM_ADDR, phy_addr);
10911 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
10912 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
10914 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10917 /* Issue another write enable to start the write. */
10918 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10920 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10923 for (j = 0; j < pagesize; j += 4) {
10926 data = *((__be32 *) (tmp + j));
10928 tw32(NVRAM_WRDATA, be32_to_cpu(data));
10930 tw32(NVRAM_ADDR, phy_addr + j);
10932 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
10936 nvram_cmd |= NVRAM_CMD_FIRST;
10937 else if (j == (pagesize - 4))
10938 nvram_cmd |= NVRAM_CMD_LAST;
10940 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
10947 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10948 tg3_nvram_exec_cmd(tp, nvram_cmd);
10955 /* offset and length are dword aligned */
10956 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
10961 for (i = 0; i < len; i += 4, offset += 4) {
10962 u32 page_off, phy_addr, nvram_cmd;
10965 memcpy(&data, buf + i, 4);
10966 tw32(NVRAM_WRDATA, be32_to_cpu(data));
10968 page_off = offset % tp->nvram_pagesize;
10970 phy_addr = tg3_nvram_phys_addr(tp, offset);
10972 tw32(NVRAM_ADDR, phy_addr);
10974 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
10976 if ((page_off == 0) || (i == 0))
10977 nvram_cmd |= NVRAM_CMD_FIRST;
10978 if (page_off == (tp->nvram_pagesize - 4))
10979 nvram_cmd |= NVRAM_CMD_LAST;
10981 if (i == (len - 4))
10982 nvram_cmd |= NVRAM_CMD_LAST;
10984 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
10985 !(tp->tg3_flags3 & TG3_FLG3_5755_PLUS) &&
10986 (tp->nvram_jedecnum == JEDEC_ST) &&
10987 (nvram_cmd & NVRAM_CMD_FIRST)) {
10989 if ((ret = tg3_nvram_exec_cmd(tp,
10990 NVRAM_CMD_WREN | NVRAM_CMD_GO |
10995 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
10996 /* We always do complete word writes to eeprom. */
10997 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
11000 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11006 /* offset and length are dword aligned */
11007 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
11011 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
11012 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
11013 ~GRC_LCLCTRL_GPIO_OUTPUT1);
11017 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
11018 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
11023 ret = tg3_nvram_lock(tp);
11027 tg3_enable_nvram_access(tp);
11028 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
11029 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
11030 tw32(NVRAM_WRITE1, 0x406);
11032 grc_mode = tr32(GRC_MODE);
11033 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
11035 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
11036 !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11038 ret = tg3_nvram_write_block_buffered(tp, offset, len,
11042 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
11046 grc_mode = tr32(GRC_MODE);
11047 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
11049 tg3_disable_nvram_access(tp);
11050 tg3_nvram_unlock(tp);
11053 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
11054 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
11061 struct subsys_tbl_ent {
11062 u16 subsys_vendor, subsys_devid;
11066 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
11067 /* Broadcom boards. */
11068 { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
11069 { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
11070 { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
11071 { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 }, /* BCM95700A9 */
11072 { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
11073 { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
11074 { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 }, /* BCM95701A7 */
11075 { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
11076 { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
11077 { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
11078 { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
11081 { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
11082 { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
11083 { PCI_VENDOR_ID_3COM, 0x1004, 0 }, /* 3C996SX */
11084 { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
11085 { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
11088 { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
11089 { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
11090 { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
11091 { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
11093 /* Compaq boards. */
11094 { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
11095 { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
11096 { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 }, /* CHANGELING */
11097 { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
11098 { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
11101 { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
11104 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
11108 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
11109 if ((subsys_id_to_phy_id[i].subsys_vendor ==
11110 tp->pdev->subsystem_vendor) &&
11111 (subsys_id_to_phy_id[i].subsys_devid ==
11112 tp->pdev->subsystem_device))
11113 return &subsys_id_to_phy_id[i];
11118 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
11123 /* On some early chips the SRAM cannot be accessed in D3hot state,
11124 * so need make sure we're in D0.
11126 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
11127 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
11128 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
11131 /* Make sure register accesses (indirect or otherwise)
11132 * will function correctly.
11134 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11135 tp->misc_host_ctrl);
11137 /* The memory arbiter has to be enabled in order for SRAM accesses
11138 * to succeed. Normally on powerup the tg3 chip firmware will make
11139 * sure it is enabled, but other entities such as system netboot
11140 * code might disable it.
11142 val = tr32(MEMARB_MODE);
11143 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
11145 tp->phy_id = PHY_ID_INVALID;
11146 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11148 /* Assume an onboard device and WOL capable by default. */
11149 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
11151 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11152 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
11153 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11154 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
11156 val = tr32(VCPU_CFGSHDW);
11157 if (val & VCPU_CFGSHDW_ASPM_DBNC)
11158 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
11159 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
11160 (val & VCPU_CFGSHDW_WOL_MAGPKT))
11161 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
11165 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
11166 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
11167 u32 nic_cfg, led_cfg;
11168 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
11169 int eeprom_phy_serdes = 0;
11171 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
11172 tp->nic_sram_data_cfg = nic_cfg;
11174 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
11175 ver >>= NIC_SRAM_DATA_VER_SHIFT;
11176 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
11177 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
11178 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
11179 (ver > 0) && (ver < 0x100))
11180 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
11182 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11183 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
11185 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
11186 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
11187 eeprom_phy_serdes = 1;
11189 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
11190 if (nic_phy_id != 0) {
11191 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
11192 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
11194 eeprom_phy_id = (id1 >> 16) << 10;
11195 eeprom_phy_id |= (id2 & 0xfc00) << 16;
11196 eeprom_phy_id |= (id2 & 0x03ff) << 0;
11200 tp->phy_id = eeprom_phy_id;
11201 if (eeprom_phy_serdes) {
11202 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
11203 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
11205 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11208 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
11209 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
11210 SHASTA_EXT_LED_MODE_MASK);
11212 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
11216 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
11217 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11220 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
11221 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
11224 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
11225 tp->led_ctrl = LED_CTRL_MODE_MAC;
11227 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
11228 * read on some older 5700/5701 bootcode.
11230 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
11232 GET_ASIC_REV(tp->pci_chip_rev_id) ==
11234 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11238 case SHASTA_EXT_LED_SHARED:
11239 tp->led_ctrl = LED_CTRL_MODE_SHARED;
11240 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
11241 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
11242 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
11243 LED_CTRL_MODE_PHY_2);
11246 case SHASTA_EXT_LED_MAC:
11247 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
11250 case SHASTA_EXT_LED_COMBO:
11251 tp->led_ctrl = LED_CTRL_MODE_COMBO;
11252 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
11253 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
11254 LED_CTRL_MODE_PHY_2);
11259 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11260 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
11261 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
11262 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
11264 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
11265 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11267 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
11268 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
11269 if ((tp->pdev->subsystem_vendor ==
11270 PCI_VENDOR_ID_ARIMA) &&
11271 (tp->pdev->subsystem_device == 0x205a ||
11272 tp->pdev->subsystem_device == 0x2063))
11273 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11275 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11276 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
11279 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
11280 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
11281 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
11282 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
11285 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
11286 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
11287 tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
11289 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
11290 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
11291 tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
11293 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
11294 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE))
11295 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
11297 if (cfg2 & (1 << 17))
11298 tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
11300 /* serdes signal pre-emphasis in register 0x590 set by */
11301 /* bootcode if bit 18 is set */
11302 if (cfg2 & (1 << 18))
11303 tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
11305 if (((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
11306 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
11307 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
11308 tp->tg3_flags3 |= TG3_FLG3_PHY_ENABLE_APD;
11310 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11313 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
11314 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
11315 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
11318 if (cfg4 & NIC_SRAM_RGMII_STD_IBND_DISABLE)
11319 tp->tg3_flags3 |= TG3_FLG3_RGMII_STD_IBND_DISABLE;
11320 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
11321 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_RX_EN;
11322 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
11323 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN;
11326 device_init_wakeup(&tp->pdev->dev, tp->tg3_flags & TG3_FLAG_WOL_CAP);
11327 device_set_wakeup_enable(&tp->pdev->dev,
11328 tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
11331 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
11336 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
11337 tw32(OTP_CTRL, cmd);
11339 /* Wait for up to 1 ms for command to execute. */
11340 for (i = 0; i < 100; i++) {
11341 val = tr32(OTP_STATUS);
11342 if (val & OTP_STATUS_CMD_DONE)
11347 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
11350 /* Read the gphy configuration from the OTP region of the chip. The gphy
11351 * configuration is a 32-bit value that straddles the alignment boundary.
11352 * We do two 32-bit reads and then shift and merge the results.
11354 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
11356 u32 bhalf_otp, thalf_otp;
11358 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
11360 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
11363 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
11365 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11368 thalf_otp = tr32(OTP_READ_DATA);
11370 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
11372 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11375 bhalf_otp = tr32(OTP_READ_DATA);
11377 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
11380 static int __devinit tg3_phy_probe(struct tg3 *tp)
11382 u32 hw_phy_id_1, hw_phy_id_2;
11383 u32 hw_phy_id, hw_phy_id_masked;
11386 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
11387 return tg3_phy_init(tp);
11389 /* Reading the PHY ID register can conflict with ASF
11390 * firmware access to the PHY hardware.
11393 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
11394 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
11395 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
11397 /* Now read the physical PHY_ID from the chip and verify
11398 * that it is sane. If it doesn't look good, we fall back
11399 * to either the hard-coded table based PHY_ID and failing
11400 * that the value found in the eeprom area.
11402 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
11403 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
11405 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
11406 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
11407 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
11409 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
11412 if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
11413 tp->phy_id = hw_phy_id;
11414 if (hw_phy_id_masked == PHY_ID_BCM8002)
11415 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11417 tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
11419 if (tp->phy_id != PHY_ID_INVALID) {
11420 /* Do nothing, phy ID already set up in
11421 * tg3_get_eeprom_hw_cfg().
11424 struct subsys_tbl_ent *p;
11426 /* No eeprom signature? Try the hardcoded
11427 * subsys device table.
11429 p = lookup_by_subsys(tp);
11433 tp->phy_id = p->phy_id;
11435 tp->phy_id == PHY_ID_BCM8002)
11436 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11440 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
11441 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
11442 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
11443 u32 bmsr, adv_reg, tg3_ctrl, mask;
11445 tg3_readphy(tp, MII_BMSR, &bmsr);
11446 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
11447 (bmsr & BMSR_LSTATUS))
11448 goto skip_phy_reset;
11450 err = tg3_phy_reset(tp);
11454 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
11455 ADVERTISE_100HALF | ADVERTISE_100FULL |
11456 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
11458 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
11459 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
11460 MII_TG3_CTRL_ADV_1000_FULL);
11461 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
11462 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
11463 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
11464 MII_TG3_CTRL_ENABLE_AS_MASTER);
11467 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11468 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11469 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
11470 if (!tg3_copper_is_advertising_all(tp, mask)) {
11471 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11473 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11474 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11476 tg3_writephy(tp, MII_BMCR,
11477 BMCR_ANENABLE | BMCR_ANRESTART);
11479 tg3_phy_set_wirespeed(tp);
11481 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11482 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11483 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11487 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
11488 err = tg3_init_5401phy_dsp(tp);
11493 if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
11494 err = tg3_init_5401phy_dsp(tp);
11497 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
11498 tp->link_config.advertising =
11499 (ADVERTISED_1000baseT_Half |
11500 ADVERTISED_1000baseT_Full |
11501 ADVERTISED_Autoneg |
11503 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
11504 tp->link_config.advertising &=
11505 ~(ADVERTISED_1000baseT_Half |
11506 ADVERTISED_1000baseT_Full);
11511 static void __devinit tg3_read_partno(struct tg3 *tp)
11513 unsigned char vpd_data[256]; /* in little-endian format */
11517 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
11518 tg3_nvram_read(tp, 0x0, &magic))
11519 goto out_not_found;
11521 if (magic == TG3_EEPROM_MAGIC) {
11522 for (i = 0; i < 256; i += 4) {
11525 /* The data is in little-endian format in NVRAM.
11526 * Use the big-endian read routines to preserve
11527 * the byte order as it exists in NVRAM.
11529 if (tg3_nvram_read_be32(tp, 0x100 + i, &tmp))
11530 goto out_not_found;
11532 memcpy(&vpd_data[i], &tmp, sizeof(tmp));
11537 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
11538 for (i = 0; i < 256; i += 4) {
11543 pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
11545 while (j++ < 100) {
11546 pci_read_config_word(tp->pdev, vpd_cap +
11547 PCI_VPD_ADDR, &tmp16);
11548 if (tmp16 & 0x8000)
11552 if (!(tmp16 & 0x8000))
11553 goto out_not_found;
11555 pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
11557 v = cpu_to_le32(tmp);
11558 memcpy(&vpd_data[i], &v, sizeof(v));
11562 /* Now parse and find the part number. */
11563 for (i = 0; i < 254; ) {
11564 unsigned char val = vpd_data[i];
11565 unsigned int block_end;
11567 if (val == 0x82 || val == 0x91) {
11570 (vpd_data[i + 2] << 8)));
11575 goto out_not_found;
11577 block_end = (i + 3 +
11579 (vpd_data[i + 2] << 8)));
11582 if (block_end > 256)
11583 goto out_not_found;
11585 while (i < (block_end - 2)) {
11586 if (vpd_data[i + 0] == 'P' &&
11587 vpd_data[i + 1] == 'N') {
11588 int partno_len = vpd_data[i + 2];
11591 if (partno_len > 24 || (partno_len + i) > 256)
11592 goto out_not_found;
11594 memcpy(tp->board_part_number,
11595 &vpd_data[i], partno_len);
11600 i += 3 + vpd_data[i + 2];
11603 /* Part number not found. */
11604 goto out_not_found;
11608 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11609 strcpy(tp->board_part_number, "BCM95906");
11610 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
11611 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
11612 strcpy(tp->board_part_number, "BCM57780");
11613 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
11614 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
11615 strcpy(tp->board_part_number, "BCM57760");
11616 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
11617 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
11618 strcpy(tp->board_part_number, "BCM57790");
11619 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
11620 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
11621 strcpy(tp->board_part_number, "BCM57788");
11623 strcpy(tp->board_part_number, "none");
11626 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
11630 if (tg3_nvram_read(tp, offset, &val) ||
11631 (val & 0xfc000000) != 0x0c000000 ||
11632 tg3_nvram_read(tp, offset + 4, &val) ||
11639 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
11641 u32 val, offset, start, ver_offset;
11643 bool newver = false;
11645 if (tg3_nvram_read(tp, 0xc, &offset) ||
11646 tg3_nvram_read(tp, 0x4, &start))
11649 offset = tg3_nvram_logical_addr(tp, offset);
11651 if (tg3_nvram_read(tp, offset, &val))
11654 if ((val & 0xfc000000) == 0x0c000000) {
11655 if (tg3_nvram_read(tp, offset + 4, &val))
11663 if (tg3_nvram_read(tp, offset + 8, &ver_offset))
11666 offset = offset + ver_offset - start;
11667 for (i = 0; i < 16; i += 4) {
11669 if (tg3_nvram_read_be32(tp, offset + i, &v))
11672 memcpy(tp->fw_ver + i, &v, sizeof(v));
11677 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
11680 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
11681 TG3_NVM_BCVER_MAJSFT;
11682 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
11683 snprintf(&tp->fw_ver[0], 32, "v%d.%02d", major, minor);
11687 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
11689 u32 val, major, minor;
11691 /* Use native endian representation */
11692 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
11695 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
11696 TG3_NVM_HWSB_CFG1_MAJSFT;
11697 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
11698 TG3_NVM_HWSB_CFG1_MINSFT;
11700 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
11703 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
11705 u32 offset, major, minor, build;
11707 tp->fw_ver[0] = 's';
11708 tp->fw_ver[1] = 'b';
11709 tp->fw_ver[2] = '\0';
11711 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
11714 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
11715 case TG3_EEPROM_SB_REVISION_0:
11716 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
11718 case TG3_EEPROM_SB_REVISION_2:
11719 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
11721 case TG3_EEPROM_SB_REVISION_3:
11722 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
11728 if (tg3_nvram_read(tp, offset, &val))
11731 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
11732 TG3_EEPROM_SB_EDH_BLD_SHFT;
11733 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
11734 TG3_EEPROM_SB_EDH_MAJ_SHFT;
11735 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
11737 if (minor > 99 || build > 26)
11740 snprintf(&tp->fw_ver[2], 30, " v%d.%02d", major, minor);
11743 tp->fw_ver[8] = 'a' + build - 1;
11744 tp->fw_ver[9] = '\0';
11748 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
11750 u32 val, offset, start;
11753 for (offset = TG3_NVM_DIR_START;
11754 offset < TG3_NVM_DIR_END;
11755 offset += TG3_NVM_DIRENT_SIZE) {
11756 if (tg3_nvram_read(tp, offset, &val))
11759 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
11763 if (offset == TG3_NVM_DIR_END)
11766 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
11767 start = 0x08000000;
11768 else if (tg3_nvram_read(tp, offset - 4, &start))
11771 if (tg3_nvram_read(tp, offset + 4, &offset) ||
11772 !tg3_fw_img_is_valid(tp, offset) ||
11773 tg3_nvram_read(tp, offset + 8, &val))
11776 offset += val - start;
11778 vlen = strlen(tp->fw_ver);
11780 tp->fw_ver[vlen++] = ',';
11781 tp->fw_ver[vlen++] = ' ';
11783 for (i = 0; i < 4; i++) {
11785 if (tg3_nvram_read_be32(tp, offset, &v))
11788 offset += sizeof(v);
11790 if (vlen > TG3_VER_SIZE - sizeof(v)) {
11791 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
11795 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
11800 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
11805 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) ||
11806 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
11809 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
11810 if (apedata != APE_SEG_SIG_MAGIC)
11813 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
11814 if (!(apedata & APE_FW_STATUS_READY))
11817 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
11819 vlen = strlen(tp->fw_ver);
11821 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " DASH v%d.%d.%d.%d",
11822 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
11823 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
11824 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
11825 (apedata & APE_FW_VERSION_BLDMSK));
11828 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
11832 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) {
11833 tp->fw_ver[0] = 's';
11834 tp->fw_ver[1] = 'b';
11835 tp->fw_ver[2] = '\0';
11840 if (tg3_nvram_read(tp, 0, &val))
11843 if (val == TG3_EEPROM_MAGIC)
11844 tg3_read_bc_ver(tp);
11845 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
11846 tg3_read_sb_ver(tp, val);
11847 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
11848 tg3_read_hwsb_ver(tp);
11852 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
11853 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
11856 tg3_read_mgmtfw_ver(tp);
11858 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
11861 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
11863 static int __devinit tg3_get_invariants(struct tg3 *tp)
11865 static struct pci_device_id write_reorder_chipsets[] = {
11866 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11867 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
11868 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11869 PCI_DEVICE_ID_AMD_8131_BRIDGE) },
11870 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
11871 PCI_DEVICE_ID_VIA_8385_0) },
11875 u32 pci_state_reg, grc_misc_cfg;
11880 /* Force memory write invalidate off. If we leave it on,
11881 * then on 5700_BX chips we have to enable a workaround.
11882 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
11883 * to match the cacheline size. The Broadcom driver have this
11884 * workaround but turns MWI off all the times so never uses
11885 * it. This seems to suggest that the workaround is insufficient.
11887 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11888 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
11889 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11891 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
11892 * has the register indirect write enable bit set before
11893 * we try to access any of the MMIO registers. It is also
11894 * critical that the PCI-X hw workaround situation is decided
11895 * before that as well.
11897 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11900 tp->pci_chip_rev_id = (misc_ctrl_reg >>
11901 MISC_HOST_CTRL_CHIPREV_SHIFT);
11902 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
11903 u32 prod_id_asic_rev;
11905 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
11906 &prod_id_asic_rev);
11907 tp->pci_chip_rev_id = prod_id_asic_rev;
11910 /* Wrong chip ID in 5752 A0. This code can be removed later
11911 * as A0 is not in production.
11913 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
11914 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
11916 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
11917 * we need to disable memory and use config. cycles
11918 * only to access all registers. The 5702/03 chips
11919 * can mistakenly decode the special cycles from the
11920 * ICH chipsets as memory write cycles, causing corruption
11921 * of register and memory space. Only certain ICH bridges
11922 * will drive special cycles with non-zero data during the
11923 * address phase which can fall within the 5703's address
11924 * range. This is not an ICH bug as the PCI spec allows
11925 * non-zero address during special cycles. However, only
11926 * these ICH bridges are known to drive non-zero addresses
11927 * during special cycles.
11929 * Since special cycles do not cross PCI bridges, we only
11930 * enable this workaround if the 5703 is on the secondary
11931 * bus of these ICH bridges.
11933 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
11934 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
11935 static struct tg3_dev_id {
11939 } ich_chipsets[] = {
11940 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
11942 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
11944 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
11946 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
11950 struct tg3_dev_id *pci_id = &ich_chipsets[0];
11951 struct pci_dev *bridge = NULL;
11953 while (pci_id->vendor != 0) {
11954 bridge = pci_get_device(pci_id->vendor, pci_id->device,
11960 if (pci_id->rev != PCI_ANY_ID) {
11961 if (bridge->revision > pci_id->rev)
11964 if (bridge->subordinate &&
11965 (bridge->subordinate->number ==
11966 tp->pdev->bus->number)) {
11968 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
11969 pci_dev_put(bridge);
11975 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
11976 static struct tg3_dev_id {
11979 } bridge_chipsets[] = {
11980 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
11981 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
11984 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
11985 struct pci_dev *bridge = NULL;
11987 while (pci_id->vendor != 0) {
11988 bridge = pci_get_device(pci_id->vendor,
11995 if (bridge->subordinate &&
11996 (bridge->subordinate->number <=
11997 tp->pdev->bus->number) &&
11998 (bridge->subordinate->subordinate >=
11999 tp->pdev->bus->number)) {
12000 tp->tg3_flags3 |= TG3_FLG3_5701_DMA_BUG;
12001 pci_dev_put(bridge);
12007 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
12008 * DMA addresses > 40-bit. This bridge may have other additional
12009 * 57xx devices behind it in some 4-port NIC designs for example.
12010 * Any tg3 device found behind the bridge will also need the 40-bit
12013 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
12014 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
12015 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
12016 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
12017 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
12020 struct pci_dev *bridge = NULL;
12023 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
12024 PCI_DEVICE_ID_SERVERWORKS_EPB,
12026 if (bridge && bridge->subordinate &&
12027 (bridge->subordinate->number <=
12028 tp->pdev->bus->number) &&
12029 (bridge->subordinate->subordinate >=
12030 tp->pdev->bus->number)) {
12031 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
12032 pci_dev_put(bridge);
12038 /* Initialize misc host control in PCI block. */
12039 tp->misc_host_ctrl |= (misc_ctrl_reg &
12040 MISC_HOST_CTRL_CHIPREV);
12041 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12042 tp->misc_host_ctrl);
12044 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
12045 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
12046 tp->pdev_peer = tg3_find_peer(tp);
12048 /* Intentionally exclude ASIC_REV_5906 */
12049 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12050 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12051 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12052 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12053 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12054 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
12055 tp->tg3_flags3 |= TG3_FLG3_5755_PLUS;
12057 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12058 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
12059 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
12060 (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
12061 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12062 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
12064 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
12065 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
12066 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
12068 /* 5700 B0 chips do not support checksumming correctly due
12069 * to hardware bugs.
12071 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
12072 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
12074 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
12075 tp->dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
12076 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
12077 tp->dev->features |= NETIF_F_IPV6_CSUM;
12080 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
12081 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
12082 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
12083 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
12084 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
12085 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
12086 tp->pdev_peer == tp->pdev))
12087 tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
12089 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
12090 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12091 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
12092 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
12094 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
12095 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12097 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
12098 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
12102 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12103 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12104 tp->tg3_flags |= TG3_FLAG_JUMBO_CAPABLE;
12106 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
12109 tp->pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
12110 if (tp->pcie_cap != 0) {
12113 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
12115 pcie_set_readrq(tp->pdev, 4096);
12117 pci_read_config_word(tp->pdev,
12118 tp->pcie_cap + PCI_EXP_LNKCTL,
12120 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
12121 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12122 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
12123 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12124 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12125 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
12126 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
12127 tp->tg3_flags3 |= TG3_FLG3_CLKREQ_BUG;
12129 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
12130 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
12131 } else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12132 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
12133 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
12134 if (!tp->pcix_cap) {
12135 printk(KERN_ERR PFX "Cannot find PCI-X "
12136 "capability, aborting.\n");
12140 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
12141 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
12144 /* If we have an AMD 762 or VIA K8T800 chipset, write
12145 * reordering to the mailbox registers done by the host
12146 * controller can cause major troubles. We read back from
12147 * every mailbox register write to force the writes to be
12148 * posted to the chip in order.
12150 if (pci_dev_present(write_reorder_chipsets) &&
12151 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12152 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
12154 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
12155 &tp->pci_cacheline_sz);
12156 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
12157 &tp->pci_lat_timer);
12158 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
12159 tp->pci_lat_timer < 64) {
12160 tp->pci_lat_timer = 64;
12161 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
12162 tp->pci_lat_timer);
12165 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
12166 /* 5700 BX chips need to have their TX producer index
12167 * mailboxes written twice to workaround a bug.
12169 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
12171 /* If we are in PCI-X mode, enable register write workaround.
12173 * The workaround is to use indirect register accesses
12174 * for all chip writes not to mailbox registers.
12176 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
12179 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
12181 /* The chip can have it's power management PCI config
12182 * space registers clobbered due to this bug.
12183 * So explicitly force the chip into D0 here.
12185 pci_read_config_dword(tp->pdev,
12186 tp->pm_cap + PCI_PM_CTRL,
12188 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
12189 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
12190 pci_write_config_dword(tp->pdev,
12191 tp->pm_cap + PCI_PM_CTRL,
12194 /* Also, force SERR#/PERR# in PCI command. */
12195 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12196 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
12197 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12201 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
12202 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
12203 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
12204 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
12206 /* Chip-specific fixup from Broadcom driver */
12207 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
12208 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
12209 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
12210 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
12213 /* Default fast path register access methods */
12214 tp->read32 = tg3_read32;
12215 tp->write32 = tg3_write32;
12216 tp->read32_mbox = tg3_read32;
12217 tp->write32_mbox = tg3_write32;
12218 tp->write32_tx_mbox = tg3_write32;
12219 tp->write32_rx_mbox = tg3_write32;
12221 /* Various workaround register access methods */
12222 if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
12223 tp->write32 = tg3_write_indirect_reg32;
12224 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
12225 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
12226 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
12228 * Back to back register writes can cause problems on these
12229 * chips, the workaround is to read back all reg writes
12230 * except those to mailbox regs.
12232 * See tg3_write_indirect_reg32().
12234 tp->write32 = tg3_write_flush_reg32;
12238 if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
12239 (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
12240 tp->write32_tx_mbox = tg3_write32_tx_mbox;
12241 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
12242 tp->write32_rx_mbox = tg3_write_flush_reg32;
12245 if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
12246 tp->read32 = tg3_read_indirect_reg32;
12247 tp->write32 = tg3_write_indirect_reg32;
12248 tp->read32_mbox = tg3_read_indirect_mbox;
12249 tp->write32_mbox = tg3_write_indirect_mbox;
12250 tp->write32_tx_mbox = tg3_write_indirect_mbox;
12251 tp->write32_rx_mbox = tg3_write_indirect_mbox;
12256 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12257 pci_cmd &= ~PCI_COMMAND_MEMORY;
12258 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12260 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12261 tp->read32_mbox = tg3_read32_mbox_5906;
12262 tp->write32_mbox = tg3_write32_mbox_5906;
12263 tp->write32_tx_mbox = tg3_write32_mbox_5906;
12264 tp->write32_rx_mbox = tg3_write32_mbox_5906;
12267 if (tp->write32 == tg3_write_indirect_reg32 ||
12268 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12269 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12270 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
12271 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
12273 /* Get eeprom hw config before calling tg3_set_power_state().
12274 * In particular, the TG3_FLG2_IS_NIC flag must be
12275 * determined before calling tg3_set_power_state() so that
12276 * we know whether or not to switch out of Vaux power.
12277 * When the flag is set, it means that GPIO1 is used for eeprom
12278 * write protect and also implies that it is a LOM where GPIOs
12279 * are not used to switch power.
12281 tg3_get_eeprom_hw_cfg(tp);
12283 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
12284 /* Allow reads and writes to the
12285 * APE register and memory space.
12287 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
12288 PCISTATE_ALLOW_APE_SHMEM_WR;
12289 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
12293 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12294 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12295 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12296 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
12297 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
12299 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
12300 * GPIO1 driven high will bring 5700's external PHY out of reset.
12301 * It is also used as eeprom write protect on LOMs.
12303 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
12304 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
12305 (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
12306 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
12307 GRC_LCLCTRL_GPIO_OUTPUT1);
12308 /* Unused GPIO3 must be driven as output on 5752 because there
12309 * are no pull-up resistors on unused GPIO pins.
12311 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12312 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
12314 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12315 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
12316 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
12318 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
12319 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
12320 /* Turn off the debug UART. */
12321 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
12322 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
12323 /* Keep VMain power. */
12324 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
12325 GRC_LCLCTRL_GPIO_OUTPUT0;
12328 /* Force the chip into D0. */
12329 err = tg3_set_power_state(tp, PCI_D0);
12331 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
12332 pci_name(tp->pdev));
12336 /* Derive initial jumbo mode from MTU assigned in
12337 * ether_setup() via the alloc_etherdev() call
12339 if (tp->dev->mtu > ETH_DATA_LEN &&
12340 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12341 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
12343 /* Determine WakeOnLan speed to use. */
12344 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12345 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
12346 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
12347 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
12348 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
12350 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
12353 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12354 tp->tg3_flags3 |= TG3_FLG3_PHY_IS_FET;
12356 /* A few boards don't want Ethernet@WireSpeed phy feature */
12357 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
12358 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
12359 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
12360 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
12361 (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) ||
12362 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
12363 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
12365 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
12366 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
12367 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
12368 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
12369 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
12371 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
12372 !(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) &&
12373 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
12374 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780) {
12375 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12376 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12377 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12378 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
12379 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
12380 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
12381 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
12382 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
12383 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
12385 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
12388 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12389 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
12390 tp->phy_otp = tg3_read_otp_phycfg(tp);
12391 if (tp->phy_otp == 0)
12392 tp->phy_otp = TG3_OTP_DEFAULT;
12395 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)
12396 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
12398 tp->mi_mode = MAC_MI_MODE_BASE;
12400 tp->coalesce_mode = 0;
12401 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
12402 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
12403 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
12405 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12406 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
12407 tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB;
12409 if ((tp->pci_chip_rev_id == CHIPREV_ID_57780_A1 &&
12410 tr32(RCVLPC_STATS_ENABLE) & RCVLPC_STATSENAB_ASF_FIX) ||
12411 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0)
12412 tp->tg3_flags3 |= TG3_FLG3_TOGGLE_10_100_L1PLLPD;
12414 err = tg3_mdio_init(tp);
12418 /* Initialize data/descriptor byte/word swapping. */
12419 val = tr32(GRC_MODE);
12420 val &= GRC_MODE_HOST_STACKUP;
12421 tw32(GRC_MODE, val | tp->grc_mode);
12423 tg3_switch_clocks(tp);
12425 /* Clear this out for sanity. */
12426 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
12428 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
12430 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
12431 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
12432 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
12434 if (chiprevid == CHIPREV_ID_5701_A0 ||
12435 chiprevid == CHIPREV_ID_5701_B0 ||
12436 chiprevid == CHIPREV_ID_5701_B2 ||
12437 chiprevid == CHIPREV_ID_5701_B5) {
12438 void __iomem *sram_base;
12440 /* Write some dummy words into the SRAM status block
12441 * area, see if it reads back correctly. If the return
12442 * value is bad, force enable the PCIX workaround.
12444 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
12446 writel(0x00000000, sram_base);
12447 writel(0x00000000, sram_base + 4);
12448 writel(0xffffffff, sram_base + 4);
12449 if (readl(sram_base) != 0x00000000)
12450 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
12455 tg3_nvram_init(tp);
12457 grc_misc_cfg = tr32(GRC_MISC_CFG);
12458 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
12460 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
12461 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
12462 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
12463 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
12465 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
12466 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
12467 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
12468 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
12469 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
12470 HOSTCC_MODE_CLRTICK_TXBD);
12472 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
12473 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12474 tp->misc_host_ctrl);
12477 /* Preserve the APE MAC_MODE bits */
12478 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
12479 tp->mac_mode = tr32(MAC_MODE) |
12480 MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
12482 tp->mac_mode = TG3_DEF_MAC_MODE;
12484 /* these are limited to 10/100 only */
12485 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
12486 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
12487 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
12488 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
12489 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
12490 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
12491 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
12492 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
12493 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
12494 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
12495 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
12496 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
12497 (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET))
12498 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
12500 err = tg3_phy_probe(tp);
12502 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
12503 pci_name(tp->pdev), err);
12504 /* ... but do not return immediately ... */
12508 tg3_read_partno(tp);
12509 tg3_read_fw_ver(tp);
12511 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
12512 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
12514 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
12515 tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
12517 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
12520 /* 5700 {AX,BX} chips have a broken status block link
12521 * change bit implementation, so we must use the
12522 * status register in those cases.
12524 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
12525 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
12527 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
12529 /* The led_ctrl is set during tg3_phy_probe, here we might
12530 * have to force the link status polling mechanism based
12531 * upon subsystem IDs.
12533 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
12534 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
12535 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
12536 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
12537 TG3_FLAG_USE_LINKCHG_REG);
12540 /* For all SERDES we poll the MAC status register. */
12541 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
12542 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
12544 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
12546 tp->rx_offset = NET_IP_ALIGN;
12547 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
12548 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
12551 tp->rx_std_max_post = TG3_RX_RING_SIZE;
12553 /* Increment the rx prod index on the rx std ring by at most
12554 * 8 for these chips to workaround hw errata.
12556 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12557 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
12558 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12559 tp->rx_std_max_post = 8;
12561 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
12562 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
12563 PCIE_PWR_MGMT_L1_THRESH_MSK;
12568 #ifdef CONFIG_SPARC
12569 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
12571 struct net_device *dev = tp->dev;
12572 struct pci_dev *pdev = tp->pdev;
12573 struct device_node *dp = pci_device_to_OF_node(pdev);
12574 const unsigned char *addr;
12577 addr = of_get_property(dp, "local-mac-address", &len);
12578 if (addr && len == 6) {
12579 memcpy(dev->dev_addr, addr, 6);
12580 memcpy(dev->perm_addr, dev->dev_addr, 6);
12586 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
12588 struct net_device *dev = tp->dev;
12590 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
12591 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
12596 static int __devinit tg3_get_device_address(struct tg3 *tp)
12598 struct net_device *dev = tp->dev;
12599 u32 hi, lo, mac_offset;
12602 #ifdef CONFIG_SPARC
12603 if (!tg3_get_macaddr_sparc(tp))
12608 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
12609 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
12610 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
12612 if (tg3_nvram_lock(tp))
12613 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
12615 tg3_nvram_unlock(tp);
12617 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12620 /* First try to get it from MAC address mailbox. */
12621 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
12622 if ((hi >> 16) == 0x484b) {
12623 dev->dev_addr[0] = (hi >> 8) & 0xff;
12624 dev->dev_addr[1] = (hi >> 0) & 0xff;
12626 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
12627 dev->dev_addr[2] = (lo >> 24) & 0xff;
12628 dev->dev_addr[3] = (lo >> 16) & 0xff;
12629 dev->dev_addr[4] = (lo >> 8) & 0xff;
12630 dev->dev_addr[5] = (lo >> 0) & 0xff;
12632 /* Some old bootcode may report a 0 MAC address in SRAM */
12633 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
12636 /* Next, try NVRAM. */
12637 if (!(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) &&
12638 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
12639 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
12640 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
12641 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
12643 /* Finally just fetch it out of the MAC control regs. */
12645 hi = tr32(MAC_ADDR_0_HIGH);
12646 lo = tr32(MAC_ADDR_0_LOW);
12648 dev->dev_addr[5] = lo & 0xff;
12649 dev->dev_addr[4] = (lo >> 8) & 0xff;
12650 dev->dev_addr[3] = (lo >> 16) & 0xff;
12651 dev->dev_addr[2] = (lo >> 24) & 0xff;
12652 dev->dev_addr[1] = hi & 0xff;
12653 dev->dev_addr[0] = (hi >> 8) & 0xff;
12657 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
12658 #ifdef CONFIG_SPARC
12659 if (!tg3_get_default_macaddr_sparc(tp))
12664 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
12668 #define BOUNDARY_SINGLE_CACHELINE 1
12669 #define BOUNDARY_MULTI_CACHELINE 2
12671 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
12673 int cacheline_size;
12677 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
12679 cacheline_size = 1024;
12681 cacheline_size = (int) byte * 4;
12683 /* On 5703 and later chips, the boundary bits have no
12686 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12687 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12688 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12691 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
12692 goal = BOUNDARY_MULTI_CACHELINE;
12694 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
12695 goal = BOUNDARY_SINGLE_CACHELINE;
12704 /* PCI controllers on most RISC systems tend to disconnect
12705 * when a device tries to burst across a cache-line boundary.
12706 * Therefore, letting tg3 do so just wastes PCI bandwidth.
12708 * Unfortunately, for PCI-E there are only limited
12709 * write-side controls for this, and thus for reads
12710 * we will still get the disconnects. We'll also waste
12711 * these PCI cycles for both read and write for chips
12712 * other than 5700 and 5701 which do not implement the
12715 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12716 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
12717 switch (cacheline_size) {
12722 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12723 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
12724 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
12726 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12727 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12732 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
12733 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
12737 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12738 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12741 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12742 switch (cacheline_size) {
12746 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12747 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12748 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
12754 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12755 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
12759 switch (cacheline_size) {
12761 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12762 val |= (DMA_RWCTRL_READ_BNDRY_16 |
12763 DMA_RWCTRL_WRITE_BNDRY_16);
12768 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12769 val |= (DMA_RWCTRL_READ_BNDRY_32 |
12770 DMA_RWCTRL_WRITE_BNDRY_32);
12775 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12776 val |= (DMA_RWCTRL_READ_BNDRY_64 |
12777 DMA_RWCTRL_WRITE_BNDRY_64);
12782 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12783 val |= (DMA_RWCTRL_READ_BNDRY_128 |
12784 DMA_RWCTRL_WRITE_BNDRY_128);
12789 val |= (DMA_RWCTRL_READ_BNDRY_256 |
12790 DMA_RWCTRL_WRITE_BNDRY_256);
12793 val |= (DMA_RWCTRL_READ_BNDRY_512 |
12794 DMA_RWCTRL_WRITE_BNDRY_512);
12798 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
12799 DMA_RWCTRL_WRITE_BNDRY_1024);
12808 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
12810 struct tg3_internal_buffer_desc test_desc;
12811 u32 sram_dma_descs;
12814 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
12816 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
12817 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
12818 tw32(RDMAC_STATUS, 0);
12819 tw32(WDMAC_STATUS, 0);
12821 tw32(BUFMGR_MODE, 0);
12822 tw32(FTQ_RESET, 0);
12824 test_desc.addr_hi = ((u64) buf_dma) >> 32;
12825 test_desc.addr_lo = buf_dma & 0xffffffff;
12826 test_desc.nic_mbuf = 0x00002100;
12827 test_desc.len = size;
12830 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
12831 * the *second* time the tg3 driver was getting loaded after an
12834 * Broadcom tells me:
12835 * ...the DMA engine is connected to the GRC block and a DMA
12836 * reset may affect the GRC block in some unpredictable way...
12837 * The behavior of resets to individual blocks has not been tested.
12839 * Broadcom noted the GRC reset will also reset all sub-components.
12842 test_desc.cqid_sqid = (13 << 8) | 2;
12844 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
12847 test_desc.cqid_sqid = (16 << 8) | 7;
12849 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
12852 test_desc.flags = 0x00000005;
12854 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
12857 val = *(((u32 *)&test_desc) + i);
12858 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
12859 sram_dma_descs + (i * sizeof(u32)));
12860 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
12862 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
12865 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
12867 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
12871 for (i = 0; i < 40; i++) {
12875 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
12877 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
12878 if ((val & 0xffff) == sram_dma_descs) {
12889 #define TEST_BUFFER_SIZE 0x2000
12891 static int __devinit tg3_test_dma(struct tg3 *tp)
12893 dma_addr_t buf_dma;
12894 u32 *buf, saved_dma_rwctrl;
12897 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
12903 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
12904 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
12906 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
12908 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12909 /* DMA read watermark not used on PCIE */
12910 tp->dma_rwctrl |= 0x00180000;
12911 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
12912 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
12913 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
12914 tp->dma_rwctrl |= 0x003f0000;
12916 tp->dma_rwctrl |= 0x003f000f;
12918 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12919 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
12920 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
12921 u32 read_water = 0x7;
12923 /* If the 5704 is behind the EPB bridge, we can
12924 * do the less restrictive ONE_DMA workaround for
12925 * better performance.
12927 if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
12928 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12929 tp->dma_rwctrl |= 0x8000;
12930 else if (ccval == 0x6 || ccval == 0x7)
12931 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
12933 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
12935 /* Set bit 23 to enable PCIX hw bug fix */
12937 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
12938 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
12940 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
12941 /* 5780 always in PCIX mode */
12942 tp->dma_rwctrl |= 0x00144000;
12943 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
12944 /* 5714 always in PCIX mode */
12945 tp->dma_rwctrl |= 0x00148000;
12947 tp->dma_rwctrl |= 0x001b000f;
12951 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12952 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12953 tp->dma_rwctrl &= 0xfffffff0;
12955 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12956 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
12957 /* Remove this if it causes problems for some boards. */
12958 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
12960 /* On 5700/5701 chips, we need to set this bit.
12961 * Otherwise the chip will issue cacheline transactions
12962 * to streamable DMA memory with not all the byte
12963 * enables turned on. This is an error on several
12964 * RISC PCI controllers, in particular sparc64.
12966 * On 5703/5704 chips, this bit has been reassigned
12967 * a different meaning. In particular, it is used
12968 * on those chips to enable a PCI-X workaround.
12970 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
12973 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12976 /* Unneeded, already done by tg3_get_invariants. */
12977 tg3_switch_clocks(tp);
12981 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12982 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
12985 /* It is best to perform DMA test with maximum write burst size
12986 * to expose the 5700/5701 write DMA bug.
12988 saved_dma_rwctrl = tp->dma_rwctrl;
12989 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12990 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12995 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
12998 /* Send the buffer to the chip. */
12999 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
13001 printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
13006 /* validate data reached card RAM correctly. */
13007 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
13009 tg3_read_mem(tp, 0x2100 + (i*4), &val);
13010 if (le32_to_cpu(val) != p[i]) {
13011 printk(KERN_ERR " tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val, i);
13012 /* ret = -ENODEV here? */
13017 /* Now read it back. */
13018 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
13020 printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
13026 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
13030 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
13031 DMA_RWCTRL_WRITE_BNDRY_16) {
13032 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13033 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
13034 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13037 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
13043 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
13049 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
13050 DMA_RWCTRL_WRITE_BNDRY_16) {
13051 static struct pci_device_id dma_wait_state_chipsets[] = {
13052 { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
13053 PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
13057 /* DMA test passed without adjusting DMA boundary,
13058 * now look for chipsets that are known to expose the
13059 * DMA bug without failing the test.
13061 if (pci_dev_present(dma_wait_state_chipsets)) {
13062 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13063 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
13066 /* Safe to use the calculated DMA boundary. */
13067 tp->dma_rwctrl = saved_dma_rwctrl;
13069 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13073 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
13078 static void __devinit tg3_init_link_config(struct tg3 *tp)
13080 tp->link_config.advertising =
13081 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
13082 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
13083 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
13084 ADVERTISED_Autoneg | ADVERTISED_MII);
13085 tp->link_config.speed = SPEED_INVALID;
13086 tp->link_config.duplex = DUPLEX_INVALID;
13087 tp->link_config.autoneg = AUTONEG_ENABLE;
13088 tp->link_config.active_speed = SPEED_INVALID;
13089 tp->link_config.active_duplex = DUPLEX_INVALID;
13090 tp->link_config.phy_is_low_power = 0;
13091 tp->link_config.orig_speed = SPEED_INVALID;
13092 tp->link_config.orig_duplex = DUPLEX_INVALID;
13093 tp->link_config.orig_autoneg = AUTONEG_INVALID;
13096 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
13098 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
13099 tp->bufmgr_config.mbuf_read_dma_low_water =
13100 DEFAULT_MB_RDMA_LOW_WATER_5705;
13101 tp->bufmgr_config.mbuf_mac_rx_low_water =
13102 DEFAULT_MB_MACRX_LOW_WATER_5705;
13103 tp->bufmgr_config.mbuf_high_water =
13104 DEFAULT_MB_HIGH_WATER_5705;
13105 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13106 tp->bufmgr_config.mbuf_mac_rx_low_water =
13107 DEFAULT_MB_MACRX_LOW_WATER_5906;
13108 tp->bufmgr_config.mbuf_high_water =
13109 DEFAULT_MB_HIGH_WATER_5906;
13112 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
13113 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
13114 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
13115 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
13116 tp->bufmgr_config.mbuf_high_water_jumbo =
13117 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
13119 tp->bufmgr_config.mbuf_read_dma_low_water =
13120 DEFAULT_MB_RDMA_LOW_WATER;
13121 tp->bufmgr_config.mbuf_mac_rx_low_water =
13122 DEFAULT_MB_MACRX_LOW_WATER;
13123 tp->bufmgr_config.mbuf_high_water =
13124 DEFAULT_MB_HIGH_WATER;
13126 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
13127 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
13128 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
13129 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
13130 tp->bufmgr_config.mbuf_high_water_jumbo =
13131 DEFAULT_MB_HIGH_WATER_JUMBO;
13134 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
13135 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
13138 static char * __devinit tg3_phy_string(struct tg3 *tp)
13140 switch (tp->phy_id & PHY_ID_MASK) {
13141 case PHY_ID_BCM5400: return "5400";
13142 case PHY_ID_BCM5401: return "5401";
13143 case PHY_ID_BCM5411: return "5411";
13144 case PHY_ID_BCM5701: return "5701";
13145 case PHY_ID_BCM5703: return "5703";
13146 case PHY_ID_BCM5704: return "5704";
13147 case PHY_ID_BCM5705: return "5705";
13148 case PHY_ID_BCM5750: return "5750";
13149 case PHY_ID_BCM5752: return "5752";
13150 case PHY_ID_BCM5714: return "5714";
13151 case PHY_ID_BCM5780: return "5780";
13152 case PHY_ID_BCM5755: return "5755";
13153 case PHY_ID_BCM5787: return "5787";
13154 case PHY_ID_BCM5784: return "5784";
13155 case PHY_ID_BCM5756: return "5722/5756";
13156 case PHY_ID_BCM5906: return "5906";
13157 case PHY_ID_BCM5761: return "5761";
13158 case PHY_ID_BCM8002: return "8002/serdes";
13159 case 0: return "serdes";
13160 default: return "unknown";
13164 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
13166 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
13167 strcpy(str, "PCI Express");
13169 } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
13170 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
13172 strcpy(str, "PCIX:");
13174 if ((clock_ctrl == 7) ||
13175 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
13176 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
13177 strcat(str, "133MHz");
13178 else if (clock_ctrl == 0)
13179 strcat(str, "33MHz");
13180 else if (clock_ctrl == 2)
13181 strcat(str, "50MHz");
13182 else if (clock_ctrl == 4)
13183 strcat(str, "66MHz");
13184 else if (clock_ctrl == 6)
13185 strcat(str, "100MHz");
13187 strcpy(str, "PCI:");
13188 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
13189 strcat(str, "66MHz");
13191 strcat(str, "33MHz");
13193 if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
13194 strcat(str, ":32-bit");
13196 strcat(str, ":64-bit");
13200 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
13202 struct pci_dev *peer;
13203 unsigned int func, devnr = tp->pdev->devfn & ~7;
13205 for (func = 0; func < 8; func++) {
13206 peer = pci_get_slot(tp->pdev->bus, devnr | func);
13207 if (peer && peer != tp->pdev)
13211 /* 5704 can be configured in single-port mode, set peer to
13212 * tp->pdev in that case.
13220 * We don't need to keep the refcount elevated; there's no way
13221 * to remove one half of this device without removing the other
13228 static void __devinit tg3_init_coal(struct tg3 *tp)
13230 struct ethtool_coalesce *ec = &tp->coal;
13232 memset(ec, 0, sizeof(*ec));
13233 ec->cmd = ETHTOOL_GCOALESCE;
13234 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
13235 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
13236 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
13237 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
13238 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
13239 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
13240 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
13241 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
13242 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
13244 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
13245 HOSTCC_MODE_CLRTICK_TXBD)) {
13246 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
13247 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
13248 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
13249 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
13252 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
13253 ec->rx_coalesce_usecs_irq = 0;
13254 ec->tx_coalesce_usecs_irq = 0;
13255 ec->stats_block_coalesce_usecs = 0;
13259 static const struct net_device_ops tg3_netdev_ops = {
13260 .ndo_open = tg3_open,
13261 .ndo_stop = tg3_close,
13262 .ndo_start_xmit = tg3_start_xmit,
13263 .ndo_get_stats = tg3_get_stats,
13264 .ndo_validate_addr = eth_validate_addr,
13265 .ndo_set_multicast_list = tg3_set_rx_mode,
13266 .ndo_set_mac_address = tg3_set_mac_addr,
13267 .ndo_do_ioctl = tg3_ioctl,
13268 .ndo_tx_timeout = tg3_tx_timeout,
13269 .ndo_change_mtu = tg3_change_mtu,
13270 #if TG3_VLAN_TAG_USED
13271 .ndo_vlan_rx_register = tg3_vlan_rx_register,
13273 #ifdef CONFIG_NET_POLL_CONTROLLER
13274 .ndo_poll_controller = tg3_poll_controller,
13278 static const struct net_device_ops tg3_netdev_ops_dma_bug = {
13279 .ndo_open = tg3_open,
13280 .ndo_stop = tg3_close,
13281 .ndo_start_xmit = tg3_start_xmit_dma_bug,
13282 .ndo_get_stats = tg3_get_stats,
13283 .ndo_validate_addr = eth_validate_addr,
13284 .ndo_set_multicast_list = tg3_set_rx_mode,
13285 .ndo_set_mac_address = tg3_set_mac_addr,
13286 .ndo_do_ioctl = tg3_ioctl,
13287 .ndo_tx_timeout = tg3_tx_timeout,
13288 .ndo_change_mtu = tg3_change_mtu,
13289 #if TG3_VLAN_TAG_USED
13290 .ndo_vlan_rx_register = tg3_vlan_rx_register,
13292 #ifdef CONFIG_NET_POLL_CONTROLLER
13293 .ndo_poll_controller = tg3_poll_controller,
13297 static int __devinit tg3_init_one(struct pci_dev *pdev,
13298 const struct pci_device_id *ent)
13300 static int tg3_version_printed = 0;
13301 struct net_device *dev;
13305 u64 dma_mask, persist_dma_mask;
13307 if (tg3_version_printed++ == 0)
13308 printk(KERN_INFO "%s", version);
13310 err = pci_enable_device(pdev);
13312 printk(KERN_ERR PFX "Cannot enable PCI device, "
13317 err = pci_request_regions(pdev, DRV_MODULE_NAME);
13319 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
13321 goto err_out_disable_pdev;
13324 pci_set_master(pdev);
13326 /* Find power-management capability. */
13327 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
13329 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
13332 goto err_out_free_res;
13335 dev = alloc_etherdev(sizeof(*tp));
13337 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
13339 goto err_out_free_res;
13342 SET_NETDEV_DEV(dev, &pdev->dev);
13344 #if TG3_VLAN_TAG_USED
13345 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
13348 tp = netdev_priv(dev);
13351 tp->pm_cap = pm_cap;
13352 tp->rx_mode = TG3_DEF_RX_MODE;
13353 tp->tx_mode = TG3_DEF_TX_MODE;
13356 tp->msg_enable = tg3_debug;
13358 tp->msg_enable = TG3_DEF_MSG_ENABLE;
13360 /* The word/byte swap controls here control register access byte
13361 * swapping. DMA data byte swapping is controlled in the GRC_MODE
13364 tp->misc_host_ctrl =
13365 MISC_HOST_CTRL_MASK_PCI_INT |
13366 MISC_HOST_CTRL_WORD_SWAP |
13367 MISC_HOST_CTRL_INDIR_ACCESS |
13368 MISC_HOST_CTRL_PCISTATE_RW;
13370 /* The NONFRM (non-frame) byte/word swap controls take effect
13371 * on descriptor entries, anything which isn't packet data.
13373 * The StrongARM chips on the board (one for tx, one for rx)
13374 * are running in big-endian mode.
13376 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
13377 GRC_MODE_WSWAP_NONFRM_DATA);
13378 #ifdef __BIG_ENDIAN
13379 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
13381 spin_lock_init(&tp->lock);
13382 spin_lock_init(&tp->indirect_lock);
13383 INIT_WORK(&tp->reset_task, tg3_reset_task);
13385 tp->regs = pci_ioremap_bar(pdev, BAR_0);
13387 printk(KERN_ERR PFX "Cannot map device registers, "
13390 goto err_out_free_dev;
13393 tg3_init_link_config(tp);
13395 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
13396 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
13397 tp->tx_pending = TG3_DEF_TX_RING_PENDING;
13399 netif_napi_add(dev, &tp->napi, tg3_poll, 64);
13400 dev->ethtool_ops = &tg3_ethtool_ops;
13401 dev->watchdog_timeo = TG3_TX_TIMEOUT;
13402 dev->irq = pdev->irq;
13404 err = tg3_get_invariants(tp);
13406 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
13408 goto err_out_iounmap;
13411 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
13412 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13413 dev->netdev_ops = &tg3_netdev_ops;
13415 dev->netdev_ops = &tg3_netdev_ops_dma_bug;
13418 /* The EPB bridge inside 5714, 5715, and 5780 and any
13419 * device behind the EPB cannot support DMA addresses > 40-bit.
13420 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
13421 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
13422 * do DMA address check in tg3_start_xmit().
13424 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
13425 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
13426 else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
13427 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
13428 #ifdef CONFIG_HIGHMEM
13429 dma_mask = DMA_BIT_MASK(64);
13432 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
13434 /* Configure DMA attributes. */
13435 if (dma_mask > DMA_BIT_MASK(32)) {
13436 err = pci_set_dma_mask(pdev, dma_mask);
13438 dev->features |= NETIF_F_HIGHDMA;
13439 err = pci_set_consistent_dma_mask(pdev,
13442 printk(KERN_ERR PFX "Unable to obtain 64 bit "
13443 "DMA for consistent allocations\n");
13444 goto err_out_iounmap;
13448 if (err || dma_mask == DMA_BIT_MASK(32)) {
13449 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
13451 printk(KERN_ERR PFX "No usable DMA configuration, "
13453 goto err_out_iounmap;
13457 tg3_init_bufmgr_config(tp);
13459 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
13460 tp->fw_needed = FIRMWARE_TG3;
13462 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
13463 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
13465 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13466 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
13467 tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
13468 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13469 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
13470 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
13472 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
13473 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
13474 tp->fw_needed = FIRMWARE_TG3TSO5;
13476 tp->fw_needed = FIRMWARE_TG3TSO;
13479 /* TSO is on by default on chips that support hardware TSO.
13480 * Firmware TSO on older chips gives lower performance, so it
13481 * is off by default, but can be enabled using ethtool.
13483 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
13484 if (dev->features & NETIF_F_IP_CSUM)
13485 dev->features |= NETIF_F_TSO;
13486 if ((dev->features & NETIF_F_IPV6_CSUM) &&
13487 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2))
13488 dev->features |= NETIF_F_TSO6;
13489 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13490 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13491 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
13492 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13493 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
13494 dev->features |= NETIF_F_TSO_ECN;
13498 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
13499 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
13500 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
13501 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
13502 tp->rx_pending = 63;
13505 err = tg3_get_device_address(tp);
13507 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
13512 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
13513 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
13514 if (!tp->aperegs) {
13515 printk(KERN_ERR PFX "Cannot map APE registers, "
13521 tg3_ape_lock_init(tp);
13523 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
13524 tg3_read_dash_ver(tp);
13528 * Reset chip in case UNDI or EFI driver did not shutdown
13529 * DMA self test will enable WDMAC and we'll see (spurious)
13530 * pending DMA on the PCI bus at that point.
13532 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
13533 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
13534 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
13535 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13538 err = tg3_test_dma(tp);
13540 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
13541 goto err_out_apeunmap;
13544 /* flow control autonegotiation is default behavior */
13545 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
13546 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13550 pci_set_drvdata(pdev, dev);
13552 err = register_netdev(dev);
13554 printk(KERN_ERR PFX "Cannot register net device, "
13556 goto err_out_apeunmap;
13559 printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
13561 tp->board_part_number,
13562 tp->pci_chip_rev_id,
13563 tg3_bus_string(tp, str),
13566 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
13568 "%s: attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
13570 tp->mdio_bus->phy_map[PHY_ADDR]->drv->name,
13571 dev_name(&tp->mdio_bus->phy_map[PHY_ADDR]->dev));
13574 "%s: attached PHY is %s (%s Ethernet) (WireSpeed[%d])\n",
13575 tp->dev->name, tg3_phy_string(tp),
13576 ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
13577 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
13578 "10/100/1000Base-T")),
13579 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0);
13581 printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
13583 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
13584 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
13585 (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
13586 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
13587 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
13588 printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
13589 dev->name, tp->dma_rwctrl,
13590 (pdev->dma_mask == DMA_BIT_MASK(32)) ? 32 :
13591 (((u64) pdev->dma_mask == DMA_BIT_MASK(40)) ? 40 : 64));
13597 iounmap(tp->aperegs);
13598 tp->aperegs = NULL;
13603 release_firmware(tp->fw);
13615 pci_release_regions(pdev);
13617 err_out_disable_pdev:
13618 pci_disable_device(pdev);
13619 pci_set_drvdata(pdev, NULL);
13623 static void __devexit tg3_remove_one(struct pci_dev *pdev)
13625 struct net_device *dev = pci_get_drvdata(pdev);
13628 struct tg3 *tp = netdev_priv(dev);
13631 release_firmware(tp->fw);
13633 flush_scheduled_work();
13635 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
13640 unregister_netdev(dev);
13642 iounmap(tp->aperegs);
13643 tp->aperegs = NULL;
13650 pci_release_regions(pdev);
13651 pci_disable_device(pdev);
13652 pci_set_drvdata(pdev, NULL);
13656 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
13658 struct net_device *dev = pci_get_drvdata(pdev);
13659 struct tg3 *tp = netdev_priv(dev);
13660 pci_power_t target_state;
13663 /* PCI register 4 needs to be saved whether netif_running() or not.
13664 * MSI address and data need to be saved if using MSI and
13667 pci_save_state(pdev);
13669 if (!netif_running(dev))
13672 flush_scheduled_work();
13674 tg3_netif_stop(tp);
13676 del_timer_sync(&tp->timer);
13678 tg3_full_lock(tp, 1);
13679 tg3_disable_ints(tp);
13680 tg3_full_unlock(tp);
13682 netif_device_detach(dev);
13684 tg3_full_lock(tp, 0);
13685 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13686 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
13687 tg3_full_unlock(tp);
13689 target_state = pdev->pm_cap ? pci_target_state(pdev) : PCI_D3hot;
13691 err = tg3_set_power_state(tp, target_state);
13695 tg3_full_lock(tp, 0);
13697 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
13698 err2 = tg3_restart_hw(tp, 1);
13702 tp->timer.expires = jiffies + tp->timer_offset;
13703 add_timer(&tp->timer);
13705 netif_device_attach(dev);
13706 tg3_netif_start(tp);
13709 tg3_full_unlock(tp);
13718 static int tg3_resume(struct pci_dev *pdev)
13720 struct net_device *dev = pci_get_drvdata(pdev);
13721 struct tg3 *tp = netdev_priv(dev);
13724 pci_restore_state(tp->pdev);
13726 if (!netif_running(dev))
13729 err = tg3_set_power_state(tp, PCI_D0);
13733 netif_device_attach(dev);
13735 tg3_full_lock(tp, 0);
13737 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
13738 err = tg3_restart_hw(tp, 1);
13742 tp->timer.expires = jiffies + tp->timer_offset;
13743 add_timer(&tp->timer);
13745 tg3_netif_start(tp);
13748 tg3_full_unlock(tp);
13756 static struct pci_driver tg3_driver = {
13757 .name = DRV_MODULE_NAME,
13758 .id_table = tg3_pci_tbl,
13759 .probe = tg3_init_one,
13760 .remove = __devexit_p(tg3_remove_one),
13761 .suspend = tg3_suspend,
13762 .resume = tg3_resume
13765 static int __init tg3_init(void)
13767 return pci_register_driver(&tg3_driver);
13770 static void __exit tg3_cleanup(void)
13772 pci_unregister_driver(&tg3_driver);
13775 module_init(tg3_init);
13776 module_exit(tg3_cleanup);