2 * Network device driver for the GMAC ethernet controller on
5 * Copyright (C) 2000 Paul Mackerras & Ben. Herrenschmidt
7 * portions based on sunhme.c by David S. Miller
10 * Arnaldo Carvalho de Melo <acme@conectiva.com.br> - 08/06/2000
11 * - check init_etherdev return in gmac_probe1
12 * BenH <benh@kernel.crashing.org> - 03/09/2000
13 * - Add support for new PHYs
14 * - Add some PowerBook sleep code
15 * BenH <benh@kernel.crashing.org> - ??/??/????
17 * BenH <benh@kernel.crashing.org> - 08/08/2001
18 * - Add more PHYs, fixes to sleep code
19 * Matt Domsch <Matt_Domsch@dell.com> - 11/12/2001
20 * - use library crc32 functions
23 #include <linux/module.h>
25 #include <linux/config.h>
26 #include <linux/kernel.h>
27 #include <linux/sched.h>
28 #include <linux/types.h>
29 #include <linux/fcntl.h>
30 #include <linux/interrupt.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/delay.h>
34 #include <linux/string.h>
35 #include <linux/timer.h>
36 #include <linux/init.h>
37 #include <linux/pci.h>
38 #include <linux/crc32.h>
41 #include <asm/pgtable.h>
42 #include <asm/machdep.h>
43 #include <asm/pmac_feature.h>
44 #include <asm/keylargo.h>
45 #include <asm/pci-bridge.h>
46 #ifdef CONFIG_PMAC_PBOOK
47 #include <linux/adb.h>
48 #include <linux/pmu.h>
56 /* Driver version 1.5, kernel 2.4.x */
57 #define GMAC_VERSION "v1.5k4"
59 #define DUMMY_BUF_LEN RX_BUF_ALLOC_SIZE + RX_OFFSET + GMAC_BUFFER_ALIGN
60 static unsigned char *dummy_buf;
61 static struct net_device *gmacs;
64 static int mii_read(struct gmac *gm, int phy, int r);
65 static int mii_write(struct gmac *gm, int phy, int r, int v);
66 static void mii_poll_start(struct gmac *gm);
67 static void mii_poll_stop(struct gmac *gm);
68 static void mii_interrupt(struct gmac *gm);
69 static int mii_lookup_and_reset(struct gmac *gm);
70 static void mii_setup_phy(struct gmac *gm);
71 static int mii_do_reset_phy(struct gmac *gm, int phy_addr);
72 static void mii_init_BCM5400(struct gmac *gm);
73 static void mii_init_BCM5401(struct gmac *gm);
75 static void gmac_set_power(struct gmac *gm, int power_up);
76 static int gmac_powerup_and_reset(struct net_device *dev);
77 static void gmac_set_gigabit_mode(struct gmac *gm, int gigabit);
78 static void gmac_set_duplex_mode(struct gmac *gm, int full_duplex);
79 static void gmac_mac_init(struct gmac *gm, unsigned char *mac_addr);
80 static void gmac_init_rings(struct gmac *gm, int from_irq);
81 static void gmac_start_dma(struct gmac *gm);
82 static void gmac_stop_dma(struct gmac *gm);
83 static void gmac_set_multicast(struct net_device *dev);
84 static int gmac_open(struct net_device *dev);
85 static int gmac_close(struct net_device *dev);
86 static void gmac_tx_timeout(struct net_device *dev);
87 static int gmac_xmit_start(struct sk_buff *skb, struct net_device *dev);
88 static void gmac_tx_cleanup(struct net_device *dev, int force_cleanup);
89 static void gmac_receive(struct net_device *dev);
90 static void gmac_interrupt(int irq, void *dev_id, struct pt_regs *regs);
91 static struct net_device_stats *gmac_stats(struct net_device *dev);
92 static int gmac_probe(void);
93 static void gmac_probe1(struct device_node *gmac);
95 #ifdef CONFIG_PMAC_PBOOK
96 int gmac_sleep_notify(struct pmu_sleep_notifier *self, int when);
97 static struct pmu_sleep_notifier gmac_sleep_notifier = {
98 gmac_sleep_notify, SLEEP_LEVEL_NET,
103 * Read via the mii interface from a PHY register
106 mii_read(struct gmac *gm, int phy, int r)
110 GM_OUT(GM_MIF_FRAME_CTL_DATA,
111 (0x01 << GM_MIF_FRAME_START_SHIFT) |
112 (0x02 << GM_MIF_FRAME_OPCODE_SHIFT) |
113 GM_MIF_FRAME_TURNAROUND_HI |
114 (phy << GM_MIF_FRAME_PHY_ADDR_SHIFT) |
115 (r << GM_MIF_FRAME_REG_ADDR_SHIFT));
117 for (timeout = 1000; timeout > 0; --timeout) {
119 if (GM_IN(GM_MIF_FRAME_CTL_DATA) & GM_MIF_FRAME_TURNAROUND_LO)
120 return GM_IN(GM_MIF_FRAME_CTL_DATA) & GM_MIF_FRAME_DATA_MASK;
126 * Write on the mii interface to a PHY register
129 mii_write(struct gmac *gm, int phy, int r, int v)
133 GM_OUT(GM_MIF_FRAME_CTL_DATA,
134 (0x01 << GM_MIF_FRAME_START_SHIFT) |
135 (0x01 << GM_MIF_FRAME_OPCODE_SHIFT) |
136 GM_MIF_FRAME_TURNAROUND_HI |
137 (phy << GM_MIF_FRAME_PHY_ADDR_SHIFT) |
138 (r << GM_MIF_FRAME_REG_ADDR_SHIFT) |
139 (v & GM_MIF_FRAME_DATA_MASK));
141 for (timeout = 1000; timeout > 0; --timeout) {
143 if (GM_IN(GM_MIF_FRAME_CTL_DATA) & GM_MIF_FRAME_TURNAROUND_LO)
150 * Start MIF autopolling of the PHY status register
153 mii_poll_start(struct gmac *gm)
157 /* Start the MIF polling on the external transceiver. */
158 tmp = GM_IN(GM_MIF_CFG);
159 tmp &= ~(GM_MIF_CFGPR_MASK | GM_MIF_CFGPD_MASK);
160 tmp |= ((gm->phy_addr & 0x1f) << GM_MIF_CFGPD_SHIFT);
161 tmp |= (MII_SR << GM_MIF_CFGPR_SHIFT);
163 GM_OUT(GM_MIF_CFG, tmp);
165 /* Let the bits set. */
166 udelay(GM_MIF_POLL_DELAY);
168 GM_OUT(GM_MIF_IRQ_MASK, 0xffc0);
172 * Stop MIF autopolling of the PHY status register
175 mii_poll_stop(struct gmac *gm)
177 GM_OUT(GM_MIF_IRQ_MASK, 0xffff);
178 GM_BIC(GM_MIF_CFG, GM_MIF_CFGPE);
179 udelay(GM_MIF_POLL_DELAY);
183 * Called when the MIF detect a change of the PHY status
185 * handles monitoring the link and updating GMAC with the correct
188 * Note: Are we missing status changes ? In this case, we'll have to
189 * a timer and control the autoneg. process more closely. Also, we may
190 * want to stop rx and tx side when the link is down.
193 /* Link modes of the BCM5400 PHY */
194 static int phy_BCM5400_link_table[8][3] = {
195 { 0, 0, 0 }, /* No link */
196 { 0, 0, 0 }, /* 10BT Half Duplex */
197 { 1, 0, 0 }, /* 10BT Full Duplex */
198 { 0, 1, 0 }, /* 100BT Half Duplex */
199 { 0, 1, 0 }, /* 100BT Half Duplex */
200 { 1, 1, 0 }, /* 100BT Full Duplex*/
201 { 1, 0, 1 }, /* 1000BT */
202 { 1, 0, 1 }, /* 1000BT */
206 mii_interrupt(struct gmac *gm)
213 /* May the status change before polling is re-enabled ? */
216 /* We read the Auxilliary Status Summary register */
217 phy_status = mii_read(gm, gm->phy_addr, MII_SR);
218 if ((phy_status ^ gm->phy_status) & (MII_SR_ASSC | MII_SR_LKS)) {
223 printk(KERN_INFO "%s: Link state change, phy_status: 0x%04x\n",
224 gm->dev->name, phy_status);
226 gm->phy_status = phy_status;
228 /* Should we enable that in generic mode ? */
229 lpar_ability = mii_read(gm, gm->phy_addr, MII_ANLPA);
230 if (lpar_ability & MII_ANLPA_PAUS)
231 GM_BIS(GM_MAC_CTRL_CONFIG, GM_MAC_CTRL_CONF_SND_PAUSE_EN);
233 GM_BIC(GM_MAC_CTRL_CONFIG, GM_MAC_CTRL_CONF_SND_PAUSE_EN);
235 /* Link ? Check for speed and duplex */
236 if ((phy_status & MII_SR_LKS) && (phy_status & MII_SR_ASSC)) {
239 switch (gm->phy_type) {
242 aux_stat = mii_read(gm, gm->phy_addr, MII_BCM5201_AUXCTLSTATUS);
244 printk(KERN_INFO "%s: Link up ! BCM5201/5221 aux_stat: 0x%04x\n",
245 gm->dev->name, aux_stat);
247 full_duplex = ((aux_stat & MII_BCM5201_AUXCTLSTATUS_DUPLEX) != 0);
248 link_100 = ((aux_stat & MII_BCM5201_AUXCTLSTATUS_SPEED) != 0);
249 netif_carrier_on(gm->dev);
254 aux_stat = mii_read(gm, gm->phy_addr, MII_BCM5400_AUXSTATUS);
255 link = (aux_stat & MII_BCM5400_AUXSTATUS_LINKMODE_MASK) >>
256 MII_BCM5400_AUXSTATUS_LINKMODE_SHIFT;
258 printk(KERN_INFO "%s: Link up ! BCM54xx aux_stat: 0x%04x (link mode: %d)\n",
259 gm->dev->name, aux_stat, link);
261 full_duplex = phy_BCM5400_link_table[link][0];
262 link_100 = phy_BCM5400_link_table[link][1];
263 gigabit = phy_BCM5400_link_table[link][2];
264 netif_carrier_on(gm->dev);
267 aux_stat = mii_read(gm, gm->phy_addr, MII_LXT971_STATUS2);
269 printk(KERN_INFO "%s: Link up ! LXT971 stat2: 0x%04x\n",
270 gm->dev->name, aux_stat);
272 full_duplex = ((aux_stat & MII_LXT971_STATUS2_FULLDUPLEX) != 0);
273 link_100 = ((aux_stat & MII_LXT971_STATUS2_SPEED) != 0);
274 netif_carrier_on(gm->dev);
277 full_duplex = (lpar_ability & MII_ANLPA_FDAM) != 0;
278 link_100 = (lpar_ability & MII_ANLPA_100M) != 0;
282 printk(KERN_INFO "%s: Full Duplex: %d, Speed: %s\n",
283 gm->dev->name, full_duplex,
284 gigabit ? "1000" : (link_100 ? "100" : "10"));
286 if (gigabit != gm->gigabit) {
287 gm->gigabit = gigabit;
288 gmac_set_gigabit_mode(gm, gm->gigabit);
291 if (full_duplex != gm->full_duplex) {
292 gm->full_duplex = full_duplex;
293 gmac_set_duplex_mode(gm, gm->full_duplex);
298 } else if (!(phy_status & MII_SR_LKS)) {
300 printk(KERN_INFO "%s: Link down !\n", gm->dev->name);
302 netif_carrier_off(gm->dev);
307 #ifdef CONFIG_PMAC_PBOOK
308 /* Power management: stop PHY chip for suspend mode
310 * TODO: This will have to be modified is WOL is to be supported.
313 gmac_suspend(struct gmac* gm)
319 netif_device_detach(gm->dev);
322 spin_lock_irqsave(&gm->lock, flags);
324 disable_irq(gm->dev->irq);
325 /* Stop polling PHY */
328 /* Mask out all chips interrupts */
329 GM_OUT(GM_IRQ_MASK, 0xffffffff);
330 spin_unlock_irqrestore(&gm->lock, flags);
334 /* Empty Tx ring of any remaining gremlins */
335 gmac_tx_cleanup(gm->dev, 1);
337 /* Empty Rx ring of any remaining gremlins */
338 for (i = 0; i < NRX; ++i) {
339 if (gm->rx_buff[i] != 0) {
340 dev_kfree_skb_irq(gm->rx_buff[i]);
346 /* Clear interrupts on 5201 */
347 if (gm->phy_type == PHY_B5201 || gm->phy_type == PHY_B5221)
348 mii_write(gm, gm->phy_addr, MII_BCM5201_INTERRUPT, 0);
350 /* Drive MDIO high */
351 GM_OUT(GM_MIF_CFG, 0);
353 /* Unchanged, don't ask me why */
354 data = mii_read(gm, gm->phy_addr, MII_ANLPA);
355 mii_write(gm, gm->phy_addr, MII_ANLPA, data);
357 /* Stop everything */
358 GM_OUT(GM_MAC_RX_CONFIG, 0);
359 GM_OUT(GM_MAC_TX_CONFIG, 0);
360 GM_OUT(GM_MAC_XIF_CONFIG, 0);
361 GM_OUT(GM_TX_CONF, 0);
362 GM_OUT(GM_RX_CONF, 0);
364 /* Set MAC in reset state */
365 GM_OUT(GM_RESET, GM_RESET_TX | GM_RESET_RX);
366 for (timeout = 100; timeout > 0; --timeout) {
368 if ((GM_IN(GM_RESET) & (GM_RESET_TX | GM_RESET_RX)) == 0)
371 GM_OUT(GM_MAC_TX_RESET, GM_MAC_TX_RESET_NOW);
372 GM_OUT(GM_MAC_RX_RESET, GM_MAC_RX_RESET_NOW);
374 /* Superisolate PHY */
375 if (gm->phy_type == PHY_B5201 || gm->phy_type == PHY_B5221)
376 mii_write(gm, gm->phy_addr, MII_BCM5201_MULTIPHY,
377 MII_BCM5201_MULTIPHY_SUPERISOLATE);
379 /* Put MDIO in sane electric state. According to an obscure
380 * Apple comment, not doing so may let them drive some current
381 * during sleep and possibly damage BCM PHYs.
383 GM_OUT(GM_MIF_CFG, GM_MIF_CFGBB);
384 GM_OUT(GM_MIF_BB_CLOCK, 0);
385 GM_OUT(GM_MIF_BB_DATA, 0);
386 GM_OUT(GM_MIF_BB_OUT_ENABLE, 0);
387 GM_OUT(GM_MAC_XIF_CONFIG,
388 GM_MAC_XIF_CONF_GMII_MODE|GM_MAC_XIF_CONF_MII_INT_LOOP);
389 (void)GM_IN(GM_MAC_XIF_CONFIG);
391 /* Unclock the GMAC chip */
392 gmac_set_power(gm, 0);
396 gmac_resume(struct gmac *gm)
400 if (gmac_powerup_and_reset(gm->dev)) {
401 printk(KERN_ERR "%s: Couldn't revive gmac ethernet !\n", gm->dev->name);
408 /* Create fresh rings */
409 gmac_init_rings(gm, 1);
410 /* re-initialize the MAC */
411 gmac_mac_init(gm, gm->dev->dev_addr);
412 /* re-initialize the multicast tables & promisc mode if any */
413 gmac_set_multicast(gm->dev);
416 /* Early enable Tx and Rx so that we are clocked */
417 GM_BIS(GM_TX_CONF, GM_TX_CONF_DMA_EN);
419 GM_BIS(GM_RX_CONF, GM_RX_CONF_DMA_EN);
421 GM_BIS(GM_MAC_TX_CONFIG, GM_MAC_TX_CONF_ENABLE);
423 GM_BIS(GM_MAC_RX_CONFIG, GM_MAC_RX_CONF_ENABLE);
425 if (gm->phy_type == PHY_B5201 || gm->phy_type == PHY_B5221) {
426 data = mii_read(gm, gm->phy_addr, MII_BCM5201_MULTIPHY);
427 mii_write(gm, gm->phy_addr, MII_BCM5201_MULTIPHY,
428 data & ~MII_BCM5201_MULTIPHY_SUPERISOLATE);
433 /* restart polling PHY */
435 /* restart DMA operations */
437 netif_device_attach(gm->dev);
438 enable_irq(gm->dev->irq);
440 /* Driver not opened, just leave things off. Note that
441 * we could be smart and superisolate the PHY when the
442 * driver is closed, but I won't do that unless I have
443 * a better understanding of some electrical issues with
444 * this PHY chip --BenH
446 GM_OUT(GM_MAC_RX_CONFIG, 0);
447 GM_OUT(GM_MAC_TX_CONFIG, 0);
448 GM_OUT(GM_MAC_XIF_CONFIG, 0);
449 GM_OUT(GM_TX_CONF, 0);
450 GM_OUT(GM_RX_CONF, 0);
456 mii_do_reset_phy(struct gmac *gm, int phy_addr)
458 int mii_control, timeout;
460 mii_control = mii_read(gm, phy_addr, MII_CR);
461 mii_write(gm, phy_addr, MII_CR, mii_control | MII_CR_RST);
463 for (timeout = 100; timeout > 0; --timeout) {
464 mii_control = mii_read(gm, phy_addr, MII_CR);
465 if (mii_control == -1) {
466 printk(KERN_ERR "%s PHY died after reset !\n",
470 if ((mii_control & MII_CR_RST) == 0)
474 if (mii_control & MII_CR_RST) {
475 printk(KERN_ERR "%s PHY reset timeout !\n", gm->dev->name);
478 mii_write(gm, phy_addr, MII_CR, mii_control & ~MII_CR_ISOL);
482 /* Here's a bunch of configuration routines for
483 * Broadcom PHYs used on various Mac models. Unfortunately,
484 * except for the 5201, Broadcom never sent me any documentation,
485 * so this is from my understanding of Apple's Open Firmware
486 * drivers and Darwin's implementation
490 mii_init_BCM5400(struct gmac *gm)
494 /* Configure for gigabit full duplex */
495 data = mii_read(gm, gm->phy_addr, MII_BCM5400_AUXCONTROL);
496 data |= MII_BCM5400_AUXCONTROL_PWR10BASET;
497 mii_write(gm, gm->phy_addr, MII_BCM5400_AUXCONTROL, data);
499 data = mii_read(gm, gm->phy_addr, MII_BCM5400_GB_CONTROL);
500 data |= MII_BCM5400_GB_CONTROL_FULLDUPLEXCAP;
501 mii_write(gm, gm->phy_addr, MII_BCM5400_GB_CONTROL, data);
505 /* Reset and configure cascaded 10/100 PHY */
506 mii_do_reset_phy(gm, 0x1f);
508 data = mii_read(gm, 0x1f, MII_BCM5201_MULTIPHY);
509 data |= MII_BCM5201_MULTIPHY_SERIALMODE;
510 mii_write(gm, 0x1f, MII_BCM5201_MULTIPHY, data);
512 data = mii_read(gm, gm->phy_addr, MII_BCM5400_AUXCONTROL);
513 data &= ~MII_BCM5400_AUXCONTROL_PWR10BASET;
514 mii_write(gm, gm->phy_addr, MII_BCM5400_AUXCONTROL, data);
518 mii_init_BCM5401(struct gmac *gm)
523 rev = mii_read(gm, gm->phy_addr, MII_ID1) & 0x000f;
524 if (rev == 0 || rev == 3) {
525 /* Some revisions of 5401 appear to need this
526 * initialisation sequence to disable, according
527 * to OF, "tap power management"
529 * WARNING ! OF and Darwin don't agree on the
530 * register addresses. OF seem to interpret the
531 * register numbers below as decimal
533 mii_write(gm, gm->phy_addr, 0x18, 0x0c20);
534 mii_write(gm, gm->phy_addr, 0x17, 0x0012);
535 mii_write(gm, gm->phy_addr, 0x15, 0x1804);
536 mii_write(gm, gm->phy_addr, 0x17, 0x0013);
537 mii_write(gm, gm->phy_addr, 0x15, 0x1204);
538 mii_write(gm, gm->phy_addr, 0x17, 0x8006);
539 mii_write(gm, gm->phy_addr, 0x15, 0x0132);
540 mii_write(gm, gm->phy_addr, 0x17, 0x8006);
541 mii_write(gm, gm->phy_addr, 0x15, 0x0232);
542 mii_write(gm, gm->phy_addr, 0x17, 0x201f);
543 mii_write(gm, gm->phy_addr, 0x15, 0x0a20);
546 /* Configure for gigabit full duplex */
547 data = mii_read(gm, gm->phy_addr, MII_BCM5400_GB_CONTROL);
548 data |= MII_BCM5400_GB_CONTROL_FULLDUPLEXCAP;
549 mii_write(gm, gm->phy_addr, MII_BCM5400_GB_CONTROL, data);
553 /* Reset and configure cascaded 10/100 PHY */
554 mii_do_reset_phy(gm, 0x1f);
556 data = mii_read(gm, 0x1f, MII_BCM5201_MULTIPHY);
557 data |= MII_BCM5201_MULTIPHY_SERIALMODE;
558 mii_write(gm, 0x1f, MII_BCM5201_MULTIPHY, data);
562 mii_init_BCM5411(struct gmac *gm)
566 /* Here's some more Apple black magic to setup
567 * some voltage stuffs.
569 mii_write(gm, gm->phy_addr, 0x1c, 0x8c23);
570 mii_write(gm, gm->phy_addr, 0x1c, 0x8ca3);
571 mii_write(gm, gm->phy_addr, 0x1c, 0x8c23);
573 /* Here, Apple seems to want to reset it, do
576 mii_write(gm, gm->phy_addr, MII_CR, MII_CR_RST);
579 mii_write(gm, gm->phy_addr, MII_CR,
580 MII_CR_ASSE|MII_CR_FDM| /* Autospeed, full duplex */
582 MII_CR_SPEEDSEL2 /* chip specific, gigabit enable ? */);
584 data = mii_read(gm, gm->phy_addr, MII_BCM5400_GB_CONTROL);
585 data |= MII_BCM5400_GB_CONTROL_FULLDUPLEXCAP;
586 mii_write(gm, gm->phy_addr, MII_BCM5400_GB_CONTROL, data);
590 mii_lookup_and_reset(struct gmac *gm)
592 int i, mii_status, mii_control;
595 gm->phy_type = PHY_UNKNOWN;
597 /* Hard reset the PHY */
598 pmac_call_feature(PMAC_FTR_GMAC_PHY_RESET, gm->of_node, 0, 0);
601 for(i=0; i<=31; i++) {
602 mii_control = mii_read(gm, i, MII_CR);
603 mii_status = mii_read(gm, i, MII_SR);
604 if (mii_control != -1 && mii_status != -1 &&
605 (mii_control != 0xffff || mii_status != 0xffff))
609 if (gm->phy_addr > 31)
613 if (mii_do_reset_phy(gm, gm->phy_addr))
616 /* Read the PHY ID */
617 gm->phy_id = (mii_read(gm, gm->phy_addr, MII_ID0) << 16) |
618 mii_read(gm, gm->phy_addr, MII_ID1);
620 printk(KERN_INFO "%s: PHY ID: 0x%08x\n", gm->dev->name, gm->phy_id);
622 if ((gm->phy_id & MII_BCM5400_MASK) == MII_BCM5400_ID) {
623 gm->phy_type = PHY_B5400;
624 printk(KERN_INFO "%s: Found Broadcom BCM5400 PHY (Gigabit)\n",
626 mii_init_BCM5400(gm);
627 } else if ((gm->phy_id & MII_BCM5401_MASK) == MII_BCM5401_ID) {
628 gm->phy_type = PHY_B5401;
629 printk(KERN_INFO "%s: Found Broadcom BCM5401 PHY (Gigabit)\n",
631 mii_init_BCM5401(gm);
632 } else if ((gm->phy_id & MII_BCM5411_MASK) == MII_BCM5411_ID) {
633 gm->phy_type = PHY_B5411;
634 printk(KERN_INFO "%s: Found Broadcom BCM5411 PHY (Gigabit)\n",
636 mii_init_BCM5411(gm);
637 } else if ((gm->phy_id & MII_BCM5201_MASK) == MII_BCM5201_ID) {
638 gm->phy_type = PHY_B5201;
639 printk(KERN_INFO "%s: Found Broadcom BCM5201 PHY\n", gm->dev->name);
640 } else if ((gm->phy_id & MII_BCM5221_MASK) == MII_BCM5221_ID) {
641 gm->phy_type = PHY_B5221;
642 printk(KERN_INFO "%s: Found Broadcom BCM5221 PHY\n", gm->dev->name);
643 } else if ((gm->phy_id & MII_LXT971_MASK) == MII_LXT971_ID) {
644 gm->phy_type = PHY_LXT971;
645 printk(KERN_INFO "%s: Found LevelOne LX971 PHY\n", gm->dev->name);
647 printk(KERN_WARNING "%s: Warning ! Unknown PHY ID 0x%08x, using generic mode...\n",
648 gm->dev->name, gm->phy_id);
659 * Setup the PHY autonegociation parameters
661 * Code to force the PHY duplex mode and speed should be
665 mii_setup_phy(struct gmac *gm)
669 /* Stop auto-negociation */
670 data = mii_read(gm, gm->phy_addr, MII_CR);
671 mii_write(gm, gm->phy_addr, MII_CR, data & ~MII_CR_ASSE);
673 /* Set advertisement to 10/100 and Half/Full duplex
674 * (full capabilities) */
675 data = mii_read(gm, gm->phy_addr, MII_ANA);
676 data |= MII_ANA_TXAM | MII_ANA_FDAM | MII_ANA_10M;
677 mii_write(gm, gm->phy_addr, MII_ANA, data);
679 /* Restart auto-negociation */
680 data = mii_read(gm, gm->phy_addr, MII_CR);
682 mii_write(gm, gm->phy_addr, MII_CR, data);
684 mii_write(gm, gm->phy_addr, MII_CR, data);
688 * Turn On/Off the gmac cell inside Uni-N
690 * ToDo: Add code to support powering down of the PHY.
693 gmac_set_power(struct gmac *gm, int power_up)
696 pmac_call_feature(PMAC_FTR_GMAC_ENABLE, gm->of_node, 0, 1);
697 if (gm->pci_devfn != 0xff) {
701 * Make sure PCI is correctly configured
703 * We use old pci_bios versions of the function since, by
704 * default, gmac is not powered up, and so will be absent
705 * from the kernel initial PCI lookup.
707 * Should be replaced by 2.4 new PCI mecanisms and really
708 * regiser the device.
710 pcibios_read_config_word(gm->pci_bus, gm->pci_devfn,
712 cmd |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE;
713 pcibios_write_config_word(gm->pci_bus, gm->pci_devfn,
715 pcibios_write_config_byte(gm->pci_bus, gm->pci_devfn,
716 PCI_LATENCY_TIMER, 16);
717 pcibios_write_config_byte(gm->pci_bus, gm->pci_devfn,
718 PCI_CACHE_LINE_SIZE, 8);
721 pmac_call_feature(PMAC_FTR_GMAC_ENABLE, gm->of_node, 0, 0);
726 * Makes sure the GMAC cell is powered up, and reset it
729 gmac_powerup_and_reset(struct net_device *dev)
731 struct gmac *gm = (struct gmac *) dev->priv;
734 /* turn on GB clock */
735 gmac_set_power(gm, 1);
736 /* Perform a software reset */
737 GM_OUT(GM_RESET, GM_RESET_TX | GM_RESET_RX);
738 for (timeout = 100; timeout > 0; --timeout) {
740 if ((GM_IN(GM_RESET) & (GM_RESET_TX | GM_RESET_RX)) == 0) {
741 /* Mask out all chips interrupts */
742 GM_OUT(GM_IRQ_MASK, 0xffffffff);
743 GM_OUT(GM_MAC_TX_RESET, GM_MAC_TX_RESET_NOW);
744 GM_OUT(GM_MAC_RX_RESET, GM_MAC_RX_RESET_NOW);
748 printk(KERN_ERR "%s reset failed!\n", dev->name);
749 gmac_set_power(gm, 0);
755 * Set the MAC duplex mode.
757 * Side effect: stops Tx MAC
760 gmac_set_duplex_mode(struct gmac *gm, int full_duplex)
763 GM_BIC(GM_MAC_TX_CONFIG, GM_MAC_TX_CONF_ENABLE);
764 while(GM_IN(GM_MAC_TX_CONFIG) & GM_MAC_TX_CONF_ENABLE)
768 GM_BIS(GM_MAC_TX_CONFIG, GM_MAC_TX_CONF_IGNORE_CARRIER
769 | GM_MAC_TX_CONF_IGNORE_COLL);
770 GM_BIC(GM_MAC_XIF_CONFIG, GM_MAC_XIF_CONF_DISABLE_ECHO);
772 GM_BIC(GM_MAC_TX_CONFIG, GM_MAC_TX_CONF_IGNORE_CARRIER
773 | GM_MAC_TX_CONF_IGNORE_COLL);
774 GM_BIS(GM_MAC_XIF_CONFIG, GM_MAC_XIF_CONF_DISABLE_ECHO);
778 /* Set the MAC gigabit mode. Side effect: stops Tx MAC */
780 gmac_set_gigabit_mode(struct gmac *gm, int gigabit)
783 GM_BIC(GM_MAC_TX_CONFIG, GM_MAC_TX_CONF_ENABLE);
784 while(GM_IN(GM_MAC_TX_CONFIG) & GM_MAC_TX_CONF_ENABLE)
788 GM_BIS(GM_MAC_XIF_CONFIG, GM_MAC_XIF_CONF_GMII_MODE);
790 GM_BIC(GM_MAC_XIF_CONFIG, GM_MAC_XIF_CONF_GMII_MODE);
795 * Initialize a bunch of registers to put the chip into a known
796 * and hopefully happy state
799 gmac_mac_init(struct gmac *gm, unsigned char *mac_addr)
803 /* Set random seed to low bits of MAC address */
804 GM_OUT(GM_MAC_RANDOM_SEED, mac_addr[5] | (mac_addr[4] << 8));
806 /* Configure the data path mode to MII/GII */
807 GM_OUT(GM_PCS_DATAPATH_MODE, GM_PCS_DATAPATH_MII);
809 /* Configure XIF to MII mode. Full duplex led is set
812 GM_OUT(GM_MAC_XIF_CONFIG, GM_MAC_XIF_CONF_TX_MII_OUT_EN
813 | GM_MAC_XIF_CONF_FULL_DPLX_LED);
815 /* Mask out all MAC interrupts */
816 GM_OUT(GM_MAC_TX_MASK, 0xffff);
817 GM_OUT(GM_MAC_RX_MASK, 0xffff);
818 GM_OUT(GM_MAC_CTRLSTAT_MASK, 0xff);
820 /* Setup bits of MAC */
821 GM_OUT(GM_MAC_SND_PAUSE, GM_MAC_SND_PAUSE_DEFAULT);
822 GM_OUT(GM_MAC_CTRL_CONFIG, GM_MAC_CTRL_CONF_RCV_PAUSE_EN);
824 /* Configure GEM DMA */
825 GM_OUT(GM_GCONF, GM_GCONF_BURST_SZ |
826 (31 << GM_GCONF_TXDMA_LIMIT_SHIFT) |
827 (31 << GM_GCONF_RXDMA_LIMIT_SHIFT));
829 (GM_TX_CONF_FIFO_THR_DEFAULT << GM_TX_CONF_FIFO_THR_SHIFT) |
832 /* 34 byte offset for checksum computation. This works because ip_input() will clear out
833 * the skb->csum and skb->ip_summed fields and recompute the csum if IP options are
834 * present in the header. 34 == (ethernet header len) + sizeof(struct iphdr)
837 (RX_OFFSET << GM_RX_CONF_FBYTE_OFF_SHIFT) |
838 (0x22 << GM_RX_CONF_CHK_START_SHIFT) |
839 (GM_RX_CONF_DMA_THR_DEFAULT << GM_RX_CONF_DMA_THR_SHIFT) |
842 /* Configure other bits of MAC */
843 GM_OUT(GM_MAC_INTR_PKT_GAP0, GM_MAC_INTR_PKT_GAP0_DEFAULT);
844 GM_OUT(GM_MAC_INTR_PKT_GAP1, GM_MAC_INTR_PKT_GAP1_DEFAULT);
845 GM_OUT(GM_MAC_INTR_PKT_GAP2, GM_MAC_INTR_PKT_GAP2_DEFAULT);
846 GM_OUT(GM_MAC_MIN_FRAME_SIZE, GM_MAC_MIN_FRAME_SIZE_DEFAULT);
847 GM_OUT(GM_MAC_MAX_FRAME_SIZE, GM_MAC_MAX_FRAME_SIZE_DEFAULT);
848 GM_OUT(GM_MAC_PREAMBLE_LEN, GM_MAC_PREAMBLE_LEN_DEFAULT);
849 GM_OUT(GM_MAC_JAM_SIZE, GM_MAC_JAM_SIZE_DEFAULT);
850 GM_OUT(GM_MAC_ATTEMPT_LIMIT, GM_MAC_ATTEMPT_LIMIT_DEFAULT);
851 GM_OUT(GM_MAC_SLOT_TIME, GM_MAC_SLOT_TIME_DEFAULT);
852 GM_OUT(GM_MAC_CONTROL_TYPE, GM_MAC_CONTROL_TYPE_DEFAULT);
854 /* Setup MAC addresses, clear filters, clear hash table */
855 GM_OUT(GM_MAC_ADDR_NORMAL0, (mac_addr[4] << 8) + mac_addr[5]);
856 GM_OUT(GM_MAC_ADDR_NORMAL1, (mac_addr[2] << 8) + mac_addr[3]);
857 GM_OUT(GM_MAC_ADDR_NORMAL2, (mac_addr[0] << 8) + mac_addr[1]);
858 GM_OUT(GM_MAC_ADDR_ALT0, 0);
859 GM_OUT(GM_MAC_ADDR_ALT1, 0);
860 GM_OUT(GM_MAC_ADDR_ALT2, 0);
861 GM_OUT(GM_MAC_ADDR_CTRL0, 0x0001);
862 GM_OUT(GM_MAC_ADDR_CTRL1, 0xc200);
863 GM_OUT(GM_MAC_ADDR_CTRL2, 0x0180);
864 GM_OUT(GM_MAC_ADDR_FILTER0, 0);
865 GM_OUT(GM_MAC_ADDR_FILTER1, 0);
866 GM_OUT(GM_MAC_ADDR_FILTER2, 0);
867 GM_OUT(GM_MAC_ADDR_FILTER_MASK1_2, 0);
868 GM_OUT(GM_MAC_ADDR_FILTER_MASK0, 0);
869 for (i = 0; i < 27; ++i)
870 GM_OUT(GM_MAC_ADDR_FILTER_HASH0 + i, 0);
872 /* Clear stat counters */
873 GM_OUT(GM_MAC_COLLISION_CTR, 0);
874 GM_OUT(GM_MAC_FIRST_COLLISION_CTR, 0);
875 GM_OUT(GM_MAC_EXCS_COLLISION_CTR, 0);
876 GM_OUT(GM_MAC_LATE_COLLISION_CTR, 0);
877 GM_OUT(GM_MAC_DEFER_TIMER_COUNTER, 0);
878 GM_OUT(GM_MAC_PEAK_ATTEMPTS, 0);
879 GM_OUT(GM_MAC_RX_FRAME_CTR, 0);
880 GM_OUT(GM_MAC_RX_LEN_ERR_CTR, 0);
881 GM_OUT(GM_MAC_RX_ALIGN_ERR_CTR, 0);
882 GM_OUT(GM_MAC_RX_CRC_ERR_CTR, 0);
883 GM_OUT(GM_MAC_RX_CODE_VIOLATION_CTR, 0);
885 /* default to half duplex */
886 GM_OUT(GM_MAC_TX_CONFIG, 0);
887 GM_OUT(GM_MAC_RX_CONFIG, 0);
888 gmac_set_duplex_mode(gm, gm->full_duplex);
890 /* Setup pause thresholds */
891 fifo_size = GM_IN(GM_RX_FIFO_SIZE);
893 ((fifo_size - ((GM_MAC_MAX_FRAME_SIZE_ALIGN + 8) * 2 / GM_RX_PTH_UNITS))
894 << GM_RX_PTH_OFF_SHIFT) |
895 ((fifo_size - ((GM_MAC_MAX_FRAME_SIZE_ALIGN + 8) * 3 / GM_RX_PTH_UNITS))
896 << GM_RX_PTH_ON_SHIFT));
898 /* Setup interrupt blanking */
899 if (GM_IN(GM_BIF_CFG) & GM_BIF_CFG_M66EN)
900 GM_OUT(GM_RX_BLANK, (5 << GM_RX_BLANK_INTR_PACKETS_SHIFT)
901 | (8 << GM_RX_BLANK_INTR_TIME_SHIFT));
903 GM_OUT(GM_RX_BLANK, (5 << GM_RX_BLANK_INTR_PACKETS_SHIFT)
904 | (4 << GM_RX_BLANK_INTR_TIME_SHIFT));
908 * Fill the Rx and Tx rings with good initial values, alloc
912 gmac_init_rings(struct gmac *gm, int from_irq)
917 struct gmac_dma_desc *ring;
918 int gfp_flags = GFP_KERNEL;
920 if (from_irq || in_interrupt())
921 gfp_flags = GFP_ATOMIC;
924 ring = (struct gmac_dma_desc *) gm->rxring;
925 memset(ring, 0, NRX * sizeof(struct gmac_dma_desc));
926 for (i = 0; i < NRX; ++i, ++ring) {
928 gm->rx_buff[i] = skb = gmac_alloc_skb(RX_BUF_ALLOC_SIZE, gfp_flags);
931 skb_put(skb, ETH_FRAME_LEN + RX_OFFSET);
932 skb_reserve(skb, RX_OFFSET);
933 data = skb->data - RX_OFFSET;
935 st_le32(&ring->lo_addr, virt_to_bus(data));
936 st_le32(&ring->size, RX_SZ_OWN | ((RX_BUF_ALLOC_SIZE-RX_OFFSET) << RX_SZ_SHIFT));
940 ring = (struct gmac_dma_desc *) gm->txring;
941 memset(ring, 0, NTX * sizeof(struct gmac_dma_desc));
947 /* set pointers in chip */
949 GM_OUT(GM_RX_DESC_HI, 0);
950 GM_OUT(GM_RX_DESC_LO, virt_to_bus(gm->rxring));
951 GM_OUT(GM_TX_DESC_HI, 0);
952 GM_OUT(GM_TX_DESC_LO, virt_to_bus(gm->txring));
956 * Start the Tx and Rx DMA engines and enable interrupts
958 * Note: The various mdelay(20); come from Darwin implentation. Some
959 * tests (doc ?) are needed to replace those with something more intrusive.
962 gmac_start_dma(struct gmac *gm)
964 /* Enable Tx and Rx */
965 GM_BIS(GM_TX_CONF, GM_TX_CONF_DMA_EN);
967 GM_BIS(GM_RX_CONF, GM_RX_CONF_DMA_EN);
969 GM_BIS(GM_MAC_RX_CONFIG, GM_MAC_RX_CONF_ENABLE);
971 GM_BIS(GM_MAC_TX_CONFIG, GM_MAC_TX_CONF_ENABLE);
973 /* Kick the receiver and enable interrupts */
974 GM_OUT(GM_RX_KICK, NRX);
975 GM_BIC(GM_IRQ_MASK, GM_IRQ_TX_INT_ME |
985 * Stop the Tx and Rx DMA engines after disabling interrupts
987 * Note: The various mdelay(20); come from Darwin implentation. Some
988 * tests (doc ?) are needed to replace those with something more intrusive.
991 gmac_stop_dma(struct gmac *gm)
993 /* disable interrupts */
994 GM_OUT(GM_IRQ_MASK, 0xffffffff);
995 /* Enable Tx and Rx */
996 GM_BIC(GM_TX_CONF, GM_TX_CONF_DMA_EN);
998 GM_BIC(GM_RX_CONF, GM_RX_CONF_DMA_EN);
1000 GM_BIC(GM_MAC_RX_CONFIG, GM_MAC_RX_CONF_ENABLE);
1002 GM_BIC(GM_MAC_TX_CONFIG, GM_MAC_TX_CONF_ENABLE);
1007 * Configure promisc mode and setup multicast hash table
1011 gmac_set_multicast(struct net_device *dev)
1013 struct gmac *gm = (struct gmac *) dev->priv;
1014 struct dev_mc_list *dmi = dev->mc_list;
1017 int multicast_hash = 0;
1018 int multicast_all = 0;
1024 /* Lock out others. */
1025 netif_stop_queue(dev);
1028 if (dev->flags & IFF_PROMISC)
1030 else if ((dev->flags & IFF_ALLMULTI) /* || (dev->mc_count > XXX) */) {
1035 for(i = 0; i < 16; i++)
1038 for (i = 0; i < dev->mc_count; i++) {
1039 crc = ether_crc_le(6, dmi->dmi_addr);
1040 j = crc >> 24; /* bit number in multicast_filter */
1041 hash_table[j >> 4] |= 1 << (15 - (j & 0xf));
1045 for (i = 0; i < 16; i++)
1046 GM_OUT(GM_MAC_ADDR_FILTER_HASH0 + (i*4), hash_table[i]);
1047 GM_BIS(GM_MAC_RX_CONFIG, GM_MAC_RX_CONF_HASH_ENABLE);
1052 GM_BIS(GM_MAC_RX_CONFIG, GM_MAC_RX_CONF_RX_ALL);
1054 GM_BIC(GM_MAC_RX_CONFIG, GM_MAC_RX_CONF_RX_ALL);
1057 GM_BIS(GM_MAC_RX_CONFIG, GM_MAC_RX_CONF_HASH_ENABLE);
1059 GM_BIC(GM_MAC_RX_CONFIG, GM_MAC_RX_CONF_HASH_ENABLE);
1062 GM_BIS(GM_MAC_RX_CONFIG, GM_MAC_RX_CONF_RX_ALL_MULTI);
1064 GM_BIC(GM_MAC_RX_CONFIG, GM_MAC_RX_CONF_RX_ALL_MULTI);
1066 /* Let us get going again. */
1067 netif_wake_queue(dev);
1071 * Open the interface
1074 gmac_open(struct net_device *dev)
1077 struct gmac *gm = (struct gmac *) dev->priv;
1079 /* Power up and reset chip */
1080 if (gmac_powerup_and_reset(dev))
1083 /* Get our interrupt */
1084 ret = request_irq(dev->irq, gmac_interrupt, 0, dev->name, dev);
1086 printk(KERN_ERR "%s can't get irq %d\n", dev->name, dev->irq);
1090 gm->full_duplex = 0;
1094 if (!mii_lookup_and_reset(gm))
1095 printk(KERN_WARNING "%s WARNING ! Can't find PHY\n", dev->name);
1097 /* Configure the PHY */
1100 /* Initialize the descriptor rings */
1101 gmac_init_rings(gm, 0);
1103 /* Initialize the MAC */
1104 gmac_mac_init(gm, dev->dev_addr);
1106 /* Initialize the multicast tables & promisc mode if any */
1107 gmac_set_multicast(dev);
1109 /* Initialize the carrier status */
1110 netif_carrier_off(dev);
1113 * Check out PHY status and start auto-poll
1115 * Note: do this before enabling interrutps
1119 /* Start the chip */
1128 * Close the interface
1131 gmac_close(struct net_device *dev)
1133 struct gmac *gm = (struct gmac *) dev->priv;
1138 /* Stop chip and interrupts */
1141 /* Stop polling PHY */
1144 /* Free interrupt */
1145 free_irq(dev->irq, dev);
1147 /* Shut down chip */
1148 gmac_set_power(gm, 0);
1151 /* Empty rings of any remaining gremlins */
1152 for (i = 0; i < NRX; ++i) {
1153 if (gm->rx_buff[i] != 0) {
1154 dev_kfree_skb(gm->rx_buff[i]);
1158 for (i = 0; i < NTX; ++i) {
1159 if (gm->tx_buff[i] != 0) {
1160 dev_kfree_skb(gm->tx_buff[i]);
1168 #ifdef CONFIG_PMAC_PBOOK
1170 gmac_sleep_notify(struct pmu_sleep_notifier *self, int when)
1174 /* XXX should handle more than one */
1176 return PBOOK_SLEEP_OK;
1178 gm = (struct gmac *) gmacs->priv;
1180 return PBOOK_SLEEP_OK;
1183 case PBOOK_SLEEP_REQUEST:
1185 case PBOOK_SLEEP_REJECT:
1187 case PBOOK_SLEEP_NOW:
1194 return PBOOK_SLEEP_OK;
1196 #endif /* CONFIG_PMAC_PBOOK */
1199 * Handle a transmit timeout
1202 gmac_tx_timeout(struct net_device *dev)
1204 struct gmac *gm = (struct gmac *) dev->priv;
1206 unsigned long flags;
1211 printk (KERN_ERR "%s: transmit timed out, resetting\n", dev->name);
1213 spin_lock_irqsave(&gm->lock, flags);
1217 /* Empty Tx ring of any remaining gremlins */
1218 gmac_tx_cleanup(dev, 1);
1219 /* Empty Rx ring of any remaining gremlins */
1220 for (i = 0; i < NRX; ++i) {
1221 if (gm->rx_buff[i] != 0) {
1222 dev_kfree_skb_irq(gm->rx_buff[i]);
1226 /* Perform a software reset */
1227 GM_OUT(GM_RESET, GM_RESET_TX | GM_RESET_RX);
1228 for (timeout = 100; timeout > 0; --timeout) {
1230 if ((GM_IN(GM_RESET) & (GM_RESET_TX | GM_RESET_RX)) == 0) {
1231 /* Mask out all chips interrupts */
1232 GM_OUT(GM_IRQ_MASK, 0xffffffff);
1233 GM_OUT(GM_MAC_TX_RESET, GM_MAC_TX_RESET_NOW);
1234 GM_OUT(GM_MAC_RX_RESET, GM_MAC_RX_RESET_NOW);
1239 printk(KERN_ERR "%s reset chip failed !\n", dev->name);
1240 /* Create fresh rings */
1241 gmac_init_rings(gm, 1);
1242 /* re-initialize the MAC */
1243 gmac_mac_init(gm, dev->dev_addr);
1244 /* re-initialize the multicast tables & promisc mode if any */
1245 gmac_set_multicast(dev);
1246 /* Restart PHY auto-poll */
1251 spin_unlock_irqrestore(&gm->lock, flags);
1253 netif_wake_queue(dev);
1257 * Add a packet to the transmit ring
1260 gmac_xmit_start(struct sk_buff *skb, struct net_device *dev)
1262 struct gmac *gm = (struct gmac *) dev->priv;
1263 volatile struct gmac_dma_desc *dp;
1264 unsigned long flags;
1270 spin_lock_irqsave(&gm->lock, flags);
1273 if (gm->tx_buff[i] != 0) {
1275 * Buffer is full, can't send this packet at the moment
1277 * Can this ever happen in 2.4 ?
1279 netif_stop_queue(dev);
1280 spin_unlock_irqrestore(&gm->lock, flags);
1283 gm->next_tx = (i + 1) & (NTX - 1);
1284 gm->tx_buff[i] = skb;
1286 dp = &gm->txring[i];
1287 /* FIXME: Interrupt on all packet for now, change this to every N packet,
1288 * with N to be adjusted
1290 dp->flags = TX_FL_INTERRUPT;
1292 st_le32(&dp->lo_addr, virt_to_bus(skb->data));
1294 st_le32(&dp->size, TX_SZ_SOP | TX_SZ_EOP | skb->len);
1297 GM_OUT(GM_TX_KICK, gm->next_tx);
1299 if (gm->tx_buff[gm->next_tx] != 0)
1300 netif_stop_queue(dev);
1302 spin_unlock_irqrestore(&gm->lock, flags);
1304 dev->trans_start = jiffies;
1310 * Handle servicing of the transmit ring by deallocating used
1311 * Tx packets and restoring flow control when necessary
1314 gmac_tx_cleanup(struct net_device *dev, int force_cleanup)
1316 struct gmac *gm = (struct gmac *) dev->priv;
1317 volatile struct gmac_dma_desc *dp;
1318 struct sk_buff *skb;
1323 /* Note: If i==gone, we empty the entire ring. This works because
1324 * if the ring was empty, we wouldn't have received the interrupt
1327 gone = GM_IN(GM_TX_COMP);
1328 skb = gm->tx_buff[i];
1331 dp = &gm->txring[i];
1333 ++gm->stats.tx_errors;
1335 ++gm->stats.tx_packets;
1336 gm->stats.tx_bytes += skb->len;
1338 gm->tx_buff[i] = NULL;
1339 dev_kfree_skb_irq(skb);
1342 } while (force_cleanup || i != gone);
1345 if (!force_cleanup && netif_queue_stopped(dev) &&
1346 (gm->tx_buff[gm->next_tx] == 0))
1347 netif_wake_queue(dev);
1351 * Handle servicing of receive ring
1354 gmac_receive(struct net_device *dev)
1356 struct gmac *gm = (struct gmac *) dev->priv;
1357 int i = gm->next_rx;
1358 volatile struct gmac_dma_desc *dp;
1359 struct sk_buff *skb, *new_skb;
1360 int len, flags, drop, last;
1361 unsigned char *data;
1366 dp = &gm->rxring[i];
1367 /* Buffer not yet filled, no more Rx buffers to handle */
1368 if (ld_le32(&dp->size) & RX_SZ_OWN)
1370 /* Get packet length, flags, etc... */
1371 len = (ld_le32(&dp->size) >> 16) & 0x7fff;
1372 flags = ld_le32(&dp->flags);
1373 skb = gm->rx_buff[i];
1376 csum = ld_le32(&dp->size) & RX_SZ_CKSUM_MASK;
1379 if ((len < ETH_ZLEN)||(flags & RX_FL_CRC_ERROR)||(!skb)) {
1380 ++gm->stats.rx_errors;
1382 ++gm->stats.rx_length_errors;
1383 if (flags & RX_FL_CRC_ERROR)
1384 ++gm->stats.rx_crc_errors;
1386 ++gm->stats.rx_dropped;
1387 skb = gmac_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
1389 gm->rx_buff[i] = skb;
1391 skb_put(skb, ETH_FRAME_LEN + RX_OFFSET);
1392 skb_reserve(skb, RX_OFFSET);
1397 /* Large packet, alloc a new skb for the ring */
1398 if (len > RX_COPY_THRESHOLD) {
1399 new_skb = gmac_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
1401 printk(KERN_INFO "%s: Out of SKBs in Rx, packet dropped !\n",
1404 ++gm->stats.rx_dropped;
1408 gm->rx_buff[i] = new_skb;
1410 skb_put(new_skb, ETH_FRAME_LEN + RX_OFFSET);
1411 skb_reserve(new_skb, RX_OFFSET);
1414 /* Small packet, copy it to a new small skb */
1415 struct sk_buff *copy_skb = dev_alloc_skb(len + RX_OFFSET);
1418 printk(KERN_INFO "%s: Out of SKBs in Rx, packet dropped !\n",
1421 ++gm->stats.rx_dropped;
1425 copy_skb->dev = dev;
1426 skb_reserve(copy_skb, RX_OFFSET);
1427 skb_put(copy_skb, len);
1428 memcpy(copy_skb->data, skb->data, len);
1435 /* Need to drop packet ? */
1441 /* Put back ring entry */
1442 data = new_skb ? (new_skb->data - RX_OFFSET) : dummy_buf;
1444 st_le32(&dp->lo_addr, virt_to_bus(data));
1446 st_le32(&dp->size, RX_SZ_OWN | ((RX_BUF_ALLOC_SIZE-RX_OFFSET) << RX_SZ_SHIFT));
1448 /* Got Rx packet ? */
1450 /* Yes, baby, keep that hot ;) */
1451 if(!(csum ^ 0xffff))
1452 skb->ip_summed = CHECKSUM_UNNECESSARY;
1454 skb->ip_summed = CHECKSUM_NONE;
1455 skb->ip_summed = CHECKSUM_NONE;
1456 skb->protocol = eth_type_trans(skb, dev);
1457 gm->stats.rx_bytes += skb->len;
1459 dev->last_rx = jiffies;
1460 ++gm->stats.rx_packets;
1470 GM_OUT(GM_RX_KICK, last & 0xfffffffc);
1475 * Service chip interrupts
1478 gmac_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1480 struct net_device *dev = (struct net_device *) dev_id;
1481 struct gmac *gm = (struct gmac *) dev->priv;
1482 unsigned int status;
1484 status = GM_IN(GM_IRQ_STATUS);
1485 if (status & (GM_IRQ_BUS_ERROR | GM_IRQ_MIF))
1486 GM_OUT(GM_IRQ_ACK, status & (GM_IRQ_BUS_ERROR | GM_IRQ_MIF));
1488 if (status & (GM_IRQ_RX_TAG_ERR | GM_IRQ_BUS_ERROR)) {
1489 printk(KERN_ERR "%s: IRQ Error status: 0x%08x\n",
1493 if (status & GM_IRQ_MIF) {
1494 spin_lock(&gm->lock);
1496 spin_unlock(&gm->lock);
1499 if (status & GM_IRQ_RX_DONE) {
1500 spin_lock(&gm->lock);
1502 spin_unlock(&gm->lock);
1505 if (status & (GM_IRQ_TX_INT_ME | GM_IRQ_TX_ALL)) {
1506 spin_lock(&gm->lock);
1507 gmac_tx_cleanup(dev, 0);
1508 spin_unlock(&gm->lock);
1513 * Retreive some error stats from chip and return them
1516 static struct net_device_stats *
1517 gmac_stats(struct net_device *dev)
1519 struct gmac *gm = (struct gmac *) dev->priv;
1520 struct net_device_stats *stats = &gm->stats;
1522 if (gm && gm->opened && !gm->sleeping) {
1523 stats->rx_crc_errors += GM_IN(GM_MAC_RX_CRC_ERR_CTR);
1524 GM_OUT(GM_MAC_RX_CRC_ERR_CTR, 0);
1526 stats->rx_frame_errors += GM_IN(GM_MAC_RX_ALIGN_ERR_CTR);
1527 GM_OUT(GM_MAC_RX_ALIGN_ERR_CTR, 0);
1529 stats->rx_length_errors += GM_IN(GM_MAC_RX_LEN_ERR_CTR);
1530 GM_OUT(GM_MAC_RX_LEN_ERR_CTR, 0);
1532 stats->tx_aborted_errors += GM_IN(GM_MAC_EXCS_COLLISION_CTR);
1534 stats->collisions +=
1535 (GM_IN(GM_MAC_EXCS_COLLISION_CTR) +
1536 GM_IN(GM_MAC_LATE_COLLISION_CTR));
1537 GM_OUT(GM_MAC_EXCS_COLLISION_CTR, 0);
1538 GM_OUT(GM_MAC_LATE_COLLISION_CTR, 0);
1547 struct device_node *gmac;
1549 /* We bump use count during probe since get_free_page can sleep
1550 * which can be a race condition if module is unloaded at this
1556 * We don't use PCI scanning on pmac since the GMAC cell is disabled
1557 * by default, and thus absent from kernel original PCI probing.
1559 for (gmac = find_compatible_devices("network", "gmac"); gmac != 0;
1563 #ifdef CONFIG_PMAC_PBOOK
1565 pmu_register_sleep_notifier(&gmac_sleep_notifier);
1570 return gmacs? 0: -ENODEV;
1574 gmac_probe1(struct device_node *gmac)
1577 unsigned long tx_descpage, rx_descpage;
1578 unsigned char *addr;
1579 struct net_device *dev;
1582 if (gmac->n_addrs < 1 || gmac->n_intrs < 1) {
1583 printk(KERN_ERR "can't use GMAC %s: %d addrs and %d intrs\n",
1584 gmac->full_name, gmac->n_addrs, gmac->n_intrs);
1588 addr = get_property(gmac, "local-mac-address", NULL);
1590 printk(KERN_ERR "Can't get mac-address for GMAC %s\n",
1595 if (dummy_buf == NULL) {
1596 dummy_buf = kmalloc(DUMMY_BUF_LEN, GFP_KERNEL);
1597 if (dummy_buf == NULL) {
1598 printk(KERN_ERR "GMAC: failed to allocated dummy buffer\n");
1603 tx_descpage = get_free_page(GFP_KERNEL);
1604 if (tx_descpage == 0) {
1605 printk(KERN_ERR "GMAC: can't get a page for tx descriptors\n");
1608 rx_descpage = get_free_page(GFP_KERNEL);
1609 if (rx_descpage == 0) {
1610 printk(KERN_ERR "GMAC: can't get a page for rx descriptors\n");
1614 dev = init_etherdev(NULL, sizeof(struct gmac));
1616 printk(KERN_ERR "GMAC: init_etherdev failed, out of memory\n");
1619 SET_MODULE_OWNER(dev);
1623 if (!request_OF_resource(gmac, 0, " (gmac)")) {
1624 printk(KERN_ERR "GMAC: can't request IO resource !\n");
1628 dev->base_addr = gmac->addrs[0].address;
1629 gm->regs = (volatile unsigned int *)
1630 ioremap(gmac->addrs[0].address, 0x10000);
1632 printk(KERN_ERR "GMAC: unable to map I/O registers\n");
1635 dev->irq = gmac->intrs[0].line;
1638 spin_lock_init(&gm->lock);
1640 if (pci_device_from_OF_node(gmac, &gm->pci_bus, &gm->pci_devfn)) {
1641 gm->pci_bus = gm->pci_devfn = 0xff;
1642 printk(KERN_ERR "Can't locate GMAC PCI entry\n");
1645 printk(KERN_INFO "%s: GMAC at", dev->name);
1646 for (i = 0; i < 6; ++i) {
1647 dev->dev_addr[i] = addr[i];
1648 printk("%c%.2x", (i? ':': ' '), addr[i]);
1650 printk(", driver " GMAC_VERSION "\n");
1652 gm->tx_desc_page = tx_descpage;
1653 gm->rx_desc_page = rx_descpage;
1654 gm->rxring = (volatile struct gmac_dma_desc *) rx_descpage;
1655 gm->txring = (volatile struct gmac_dma_desc *) tx_descpage;
1661 dev->open = gmac_open;
1662 dev->stop = gmac_close;
1663 dev->hard_start_xmit = gmac_xmit_start;
1664 dev->get_stats = gmac_stats;
1665 dev->set_multicast_list = &gmac_set_multicast;
1666 dev->tx_timeout = &gmac_tx_timeout;
1667 dev->watchdog_timeo = 5*HZ;
1671 gm->next_gmac = gmacs;
1676 unregister_netdev(dev);
1678 release_OF_resource(gm->of_node, 0);
1681 free_page(rx_descpage);
1683 free_page(tx_descpage);
1686 MODULE_AUTHOR("Paul Mackerras/Ben Herrenschmidt");
1687 MODULE_DESCRIPTION("PowerMac GMAC driver.");
1688 MODULE_LICENSE("GPL");
1691 static void __exit gmac_cleanup_module(void)
1694 struct net_device *dev;
1696 #ifdef CONFIG_PMAC_PBOOK
1698 pmu_unregister_sleep_notifier(&gmac_sleep_notifier);
1701 while ((dev = gmacs) != NULL) {
1702 gm = (struct gmac *) dev->priv;
1703 unregister_netdev(dev);
1704 iounmap((void *) gm->regs);
1705 free_page(gm->tx_desc_page);
1706 free_page(gm->rx_desc_page);
1707 release_OF_resource(gm->of_node, 0);
1708 gmacs = gm->next_gmac;
1711 if (dummy_buf != NULL) {
1717 module_init(gmac_probe);
1718 module_exit(gmac_cleanup_module);