1 /* 8139cp.c: A Linux PCI Ethernet driver for the RealTek 8139C+ chips. */
3 Copyright 2001-2004 Jeff Garzik <jgarzik@pobox.com>
5 Copyright (C) 2001, 2002 David S. Miller (davem@redhat.com) [tg3.c]
6 Copyright (C) 2000, 2001 David S. Miller (davem@redhat.com) [sungem.c]
7 Copyright 2001 Manfred Spraul [natsemi.c]
8 Copyright 1999-2001 by Donald Becker. [natsemi.c]
9 Written 1997-2001 by Donald Becker. [8139too.c]
10 Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>. [acenic.c]
12 This software may be used and distributed according to the terms of
13 the GNU General Public License (GPL), incorporated herein by reference.
14 Drivers based on or derived from this code fall under the GPL and must
15 retain the authorship, copyright and license notice. This file is not
16 a complete program and may only be used when the entire operating
17 system is licensed under the GPL.
19 See the file COPYING in this distribution for more information.
23 Wake-on-LAN support - Felipe Damasio <felipewd@terra.com.br>
24 PCI suspend/resume - Felipe Damasio <felipewd@terra.com.br>
25 LinkChg interrupt - Felipe Damasio <felipewd@terra.com.br>
28 * Test Tx checksumming thoroughly
31 * Complete reset on PciErr
32 * Consider Rx interrupt mitigation using TimerIntr
33 * Investigate using skb->priority with h/w VLAN priority
34 * Investigate using High Priority Tx Queue with skb->priority
35 * Adjust Rx FIFO threshold and Max Rx DMA burst on Rx FIFO error
36 * Adjust Tx FIFO threshold and Max Tx DMA burst on Tx FIFO error
37 * Implement Tx software interrupt mitigation via
39 * The real minimum of CP_MIN_MTU is 4 bytes. However,
40 for this to be supported, one must(?) turn on packet padding.
41 * Support external MII transceivers (patch available)
44 * TX checksumming is considered experimental. It is off by
45 default, use ethtool to turn it on.
49 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
51 #define DRV_NAME "8139cp"
52 #define DRV_VERSION "1.3"
53 #define DRV_RELDATE "Mar 22, 2004"
56 #include <linux/module.h>
57 #include <linux/moduleparam.h>
58 #include <linux/kernel.h>
59 #include <linux/compiler.h>
60 #include <linux/netdevice.h>
61 #include <linux/etherdevice.h>
62 #include <linux/init.h>
63 #include <linux/interrupt.h>
64 #include <linux/pci.h>
65 #include <linux/dma-mapping.h>
66 #include <linux/delay.h>
67 #include <linux/ethtool.h>
68 #include <linux/gfp.h>
69 #include <linux/mii.h>
70 #include <linux/if_vlan.h>
71 #include <linux/crc32.h>
74 #include <linux/tcp.h>
75 #include <linux/udp.h>
76 #include <linux/cache.h>
79 #include <asm/uaccess.h>
81 /* These identify the driver base version and may not be removed. */
82 static char version[] =
83 DRV_NAME ": 10/100 PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")\n";
85 MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
86 MODULE_DESCRIPTION("RealTek RTL-8139C+ series 10/100 PCI Ethernet driver");
87 MODULE_VERSION(DRV_VERSION);
88 MODULE_LICENSE("GPL");
90 static int debug = -1;
91 module_param(debug, int, 0);
92 MODULE_PARM_DESC (debug, "8139cp: bitmapped message enable number");
94 /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
95 The RTL chips use a 64 element hash table based on the Ethernet CRC. */
96 static int multicast_filter_limit = 32;
97 module_param(multicast_filter_limit, int, 0);
98 MODULE_PARM_DESC (multicast_filter_limit, "8139cp: maximum number of filtered multicast addresses");
100 #define CP_DEF_MSG_ENABLE (NETIF_MSG_DRV | \
103 #define CP_NUM_STATS 14 /* struct cp_dma_stats, plus one */
104 #define CP_STATS_SIZE 64 /* size in bytes of DMA stats block */
105 #define CP_REGS_SIZE (0xff + 1)
106 #define CP_REGS_VER 1 /* version 1 */
107 #define CP_RX_RING_SIZE 64
108 #define CP_TX_RING_SIZE 64
109 #define CP_RING_BYTES \
110 ((sizeof(struct cp_desc) * CP_RX_RING_SIZE) + \
111 (sizeof(struct cp_desc) * CP_TX_RING_SIZE) + \
113 #define NEXT_TX(N) (((N) + 1) & (CP_TX_RING_SIZE - 1))
114 #define NEXT_RX(N) (((N) + 1) & (CP_RX_RING_SIZE - 1))
115 #define TX_BUFFS_AVAIL(CP) \
116 (((CP)->tx_tail <= (CP)->tx_head) ? \
117 (CP)->tx_tail + (CP_TX_RING_SIZE - 1) - (CP)->tx_head : \
118 (CP)->tx_tail - (CP)->tx_head - 1)
120 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
121 #define CP_INTERNAL_PHY 32
123 /* The following settings are log_2(bytes)-4: 0 == 16 bytes .. 6==1024, 7==end of packet. */
124 #define RX_FIFO_THRESH 5 /* Rx buffer level before first PCI xfer. */
125 #define RX_DMA_BURST 4 /* Maximum PCI burst, '4' is 256 */
126 #define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
127 #define TX_EARLY_THRESH 256 /* Early Tx threshold, in bytes */
129 /* Time in jiffies before concluding the transmitter is hung. */
130 #define TX_TIMEOUT (6*HZ)
132 /* hardware minimum and maximum for a single frame's data payload */
133 #define CP_MIN_MTU 60 /* TODO: allow lower, but pad */
134 #define CP_MAX_MTU 4096
137 /* NIC register offsets */
138 MAC0 = 0x00, /* Ethernet hardware address. */
139 MAR0 = 0x08, /* Multicast filter. */
140 StatsAddr = 0x10, /* 64-bit start addr of 64-byte DMA stats blk */
141 TxRingAddr = 0x20, /* 64-bit start addr of Tx ring */
142 HiTxRingAddr = 0x28, /* 64-bit start addr of high priority Tx ring */
143 Cmd = 0x37, /* Command register */
144 IntrMask = 0x3C, /* Interrupt mask */
145 IntrStatus = 0x3E, /* Interrupt status */
146 TxConfig = 0x40, /* Tx configuration */
147 ChipVersion = 0x43, /* 8-bit chip version, inside TxConfig */
148 RxConfig = 0x44, /* Rx configuration */
149 RxMissed = 0x4C, /* 24 bits valid, write clears */
150 Cfg9346 = 0x50, /* EEPROM select/control; Cfg reg [un]lock */
151 Config1 = 0x52, /* Config1 */
152 Config3 = 0x59, /* Config3 */
153 Config4 = 0x5A, /* Config4 */
154 MultiIntr = 0x5C, /* Multiple interrupt select */
155 BasicModeCtrl = 0x62, /* MII BMCR */
156 BasicModeStatus = 0x64, /* MII BMSR */
157 NWayAdvert = 0x66, /* MII ADVERTISE */
158 NWayLPAR = 0x68, /* MII LPA */
159 NWayExpansion = 0x6A, /* MII Expansion */
160 Config5 = 0xD8, /* Config5 */
161 TxPoll = 0xD9, /* Tell chip to check Tx descriptors for work */
162 RxMaxSize = 0xDA, /* Max size of an Rx packet (8169 only) */
163 CpCmd = 0xE0, /* C+ Command register (C+ mode only) */
164 IntrMitigate = 0xE2, /* rx/tx interrupt mitigation control */
165 RxRingAddr = 0xE4, /* 64-bit start addr of Rx ring */
166 TxThresh = 0xEC, /* Early Tx threshold */
167 OldRxBufAddr = 0x30, /* DMA address of Rx ring buffer (C mode) */
168 OldTSD0 = 0x10, /* DMA address of first Tx desc (C mode) */
170 /* Tx and Rx status descriptors */
171 DescOwn = (1 << 31), /* Descriptor is owned by NIC */
172 RingEnd = (1 << 30), /* End of descriptor ring */
173 FirstFrag = (1 << 29), /* First segment of a packet */
174 LastFrag = (1 << 28), /* Final segment of a packet */
175 LargeSend = (1 << 27), /* TCP Large Send Offload (TSO) */
176 MSSShift = 16, /* MSS value position */
177 MSSMask = 0xfff, /* MSS value: 11 bits */
178 TxError = (1 << 23), /* Tx error summary */
179 RxError = (1 << 20), /* Rx error summary */
180 IPCS = (1 << 18), /* Calculate IP checksum */
181 UDPCS = (1 << 17), /* Calculate UDP/IP checksum */
182 TCPCS = (1 << 16), /* Calculate TCP/IP checksum */
183 TxVlanTag = (1 << 17), /* Add VLAN tag */
184 RxVlanTagged = (1 << 16), /* Rx VLAN tag available */
185 IPFail = (1 << 15), /* IP checksum failed */
186 UDPFail = (1 << 14), /* UDP/IP checksum failed */
187 TCPFail = (1 << 13), /* TCP/IP checksum failed */
188 NormalTxPoll = (1 << 6), /* One or more normal Tx packets to send */
189 PID1 = (1 << 17), /* 2 protocol id bits: 0==non-IP, */
190 PID0 = (1 << 16), /* 1==UDP/IP, 2==TCP/IP, 3==IP */
194 TxFIFOUnder = (1 << 25), /* Tx FIFO underrun */
195 TxOWC = (1 << 22), /* Tx Out-of-window collision */
196 TxLinkFail = (1 << 21), /* Link failed during Tx of packet */
197 TxMaxCol = (1 << 20), /* Tx aborted due to excessive collisions */
198 TxColCntShift = 16, /* Shift, to get 4-bit Tx collision cnt */
199 TxColCntMask = 0x01 | 0x02 | 0x04 | 0x08, /* 4-bit collision count */
200 RxErrFrame = (1 << 27), /* Rx frame alignment error */
201 RxMcast = (1 << 26), /* Rx multicast packet rcv'd */
202 RxErrCRC = (1 << 18), /* Rx CRC error */
203 RxErrRunt = (1 << 19), /* Rx error, packet < 64 bytes */
204 RxErrLong = (1 << 21), /* Rx error, packet > 4096 bytes */
205 RxErrFIFO = (1 << 22), /* Rx error, FIFO overflowed, pkt bad */
207 /* StatsAddr register */
208 DumpStats = (1 << 3), /* Begin stats dump */
210 /* RxConfig register */
211 RxCfgFIFOShift = 13, /* Shift, to get Rx FIFO thresh value */
212 RxCfgDMAShift = 8, /* Shift, to get Rx Max DMA value */
213 AcceptErr = 0x20, /* Accept packets with CRC errors */
214 AcceptRunt = 0x10, /* Accept runt (<64 bytes) packets */
215 AcceptBroadcast = 0x08, /* Accept broadcast packets */
216 AcceptMulticast = 0x04, /* Accept multicast packets */
217 AcceptMyPhys = 0x02, /* Accept pkts with our MAC as dest */
218 AcceptAllPhys = 0x01, /* Accept all pkts w/ physical dest */
220 /* IntrMask / IntrStatus registers */
221 PciErr = (1 << 15), /* System error on the PCI bus */
222 TimerIntr = (1 << 14), /* Asserted when TCTR reaches TimerInt value */
223 LenChg = (1 << 13), /* Cable length change */
224 SWInt = (1 << 8), /* Software-requested interrupt */
225 TxEmpty = (1 << 7), /* No Tx descriptors available */
226 RxFIFOOvr = (1 << 6), /* Rx FIFO Overflow */
227 LinkChg = (1 << 5), /* Packet underrun, or link change */
228 RxEmpty = (1 << 4), /* No Rx descriptors available */
229 TxErr = (1 << 3), /* Tx error */
230 TxOK = (1 << 2), /* Tx packet sent */
231 RxErr = (1 << 1), /* Rx error */
232 RxOK = (1 << 0), /* Rx packet received */
233 IntrResvd = (1 << 10), /* reserved, according to RealTek engineers,
234 but hardware likes to raise it */
236 IntrAll = PciErr | TimerIntr | LenChg | SWInt | TxEmpty |
237 RxFIFOOvr | LinkChg | RxEmpty | TxErr | TxOK |
238 RxErr | RxOK | IntrResvd,
240 /* C mode command register */
241 CmdReset = (1 << 4), /* Enable to reset; self-clearing */
242 RxOn = (1 << 3), /* Rx mode enable */
243 TxOn = (1 << 2), /* Tx mode enable */
245 /* C+ mode command register */
246 RxVlanOn = (1 << 6), /* Rx VLAN de-tagging enable */
247 RxChkSum = (1 << 5), /* Rx checksum offload enable */
248 PCIDAC = (1 << 4), /* PCI Dual Address Cycle (64-bit PCI) */
249 PCIMulRW = (1 << 3), /* Enable PCI read/write multiple */
250 CpRxOn = (1 << 1), /* Rx mode enable */
251 CpTxOn = (1 << 0), /* Tx mode enable */
253 /* Cfg9436 EEPROM control register */
254 Cfg9346_Lock = 0x00, /* Lock ConfigX/MII register access */
255 Cfg9346_Unlock = 0xC0, /* Unlock ConfigX/MII register access */
257 /* TxConfig register */
258 IFG = (1 << 25) | (1 << 24), /* standard IEEE interframe gap */
259 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
261 /* Early Tx Threshold register */
262 TxThreshMask = 0x3f, /* Mask bits 5-0 */
263 TxThreshMax = 2048, /* Max early Tx threshold */
265 /* Config1 register */
266 DriverLoaded = (1 << 5), /* Software marker, driver is loaded */
267 LWACT = (1 << 4), /* LWAKE active mode */
268 PMEnable = (1 << 0), /* Enable various PM features of chip */
270 /* Config3 register */
271 PARMEnable = (1 << 6), /* Enable auto-loading of PHY parms */
272 MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */
273 LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */
275 /* Config4 register */
276 LWPTN = (1 << 1), /* LWAKE Pattern */
277 LWPME = (1 << 4), /* LANWAKE vs PMEB */
279 /* Config5 register */
280 BWF = (1 << 6), /* Accept Broadcast wakeup frame */
281 MWF = (1 << 5), /* Accept Multicast wakeup frame */
282 UWF = (1 << 4), /* Accept Unicast wakeup frame */
283 LANWake = (1 << 1), /* Enable LANWake signal */
284 PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */
286 cp_norx_intr_mask = PciErr | LinkChg | TxOK | TxErr | TxEmpty,
287 cp_rx_intr_mask = RxOK | RxErr | RxEmpty | RxFIFOOvr,
288 cp_intr_mask = cp_rx_intr_mask | cp_norx_intr_mask,
291 static const unsigned int cp_rx_config =
292 (RX_FIFO_THRESH << RxCfgFIFOShift) |
293 (RX_DMA_BURST << RxCfgDMAShift);
301 struct cp_dma_stats {
317 struct cp_extra_stats {
318 unsigned long rx_frags;
323 struct net_device *dev;
327 struct napi_struct napi;
329 struct pci_dev *pdev;
333 struct cp_extra_stats cp_stats;
335 unsigned rx_head ____cacheline_aligned;
337 struct cp_desc *rx_ring;
338 struct sk_buff *rx_skb[CP_RX_RING_SIZE];
340 unsigned tx_head ____cacheline_aligned;
342 struct cp_desc *tx_ring;
343 struct sk_buff *tx_skb[CP_TX_RING_SIZE];
346 unsigned wol_enabled : 1; /* Is Wake-on-LAN enabled? */
350 struct mii_if_info mii_if;
353 #define cpr8(reg) readb(cp->regs + (reg))
354 #define cpr16(reg) readw(cp->regs + (reg))
355 #define cpr32(reg) readl(cp->regs + (reg))
356 #define cpw8(reg,val) writeb((val), cp->regs + (reg))
357 #define cpw16(reg,val) writew((val), cp->regs + (reg))
358 #define cpw32(reg,val) writel((val), cp->regs + (reg))
359 #define cpw8_f(reg,val) do { \
360 writeb((val), cp->regs + (reg)); \
361 readb(cp->regs + (reg)); \
363 #define cpw16_f(reg,val) do { \
364 writew((val), cp->regs + (reg)); \
365 readw(cp->regs + (reg)); \
367 #define cpw32_f(reg,val) do { \
368 writel((val), cp->regs + (reg)); \
369 readl(cp->regs + (reg)); \
373 static void __cp_set_rx_mode (struct net_device *dev);
374 static void cp_tx (struct cp_private *cp);
375 static void cp_clean_rings (struct cp_private *cp);
376 #ifdef CONFIG_NET_POLL_CONTROLLER
377 static void cp_poll_controller(struct net_device *dev);
379 static int cp_get_eeprom_len(struct net_device *dev);
380 static int cp_get_eeprom(struct net_device *dev,
381 struct ethtool_eeprom *eeprom, u8 *data);
382 static int cp_set_eeprom(struct net_device *dev,
383 struct ethtool_eeprom *eeprom, u8 *data);
386 const char str[ETH_GSTRING_LEN];
387 } ethtool_stats_keys[] = {
405 static inline void cp_set_rxbufsize (struct cp_private *cp)
407 unsigned int mtu = cp->dev->mtu;
409 if (mtu > ETH_DATA_LEN)
410 /* MTU + ethernet header + FCS + optional VLAN tag */
411 cp->rx_buf_sz = mtu + ETH_HLEN + 8;
413 cp->rx_buf_sz = PKT_BUF_SZ;
416 static inline void cp_rx_skb (struct cp_private *cp, struct sk_buff *skb,
417 struct cp_desc *desc)
419 u32 opts2 = le32_to_cpu(desc->opts2);
421 skb->protocol = eth_type_trans (skb, cp->dev);
423 cp->dev->stats.rx_packets++;
424 cp->dev->stats.rx_bytes += skb->len;
426 if (opts2 & RxVlanTagged)
427 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), swab16(opts2 & 0xffff));
429 napi_gro_receive(&cp->napi, skb);
432 static void cp_rx_err_acct (struct cp_private *cp, unsigned rx_tail,
435 netif_dbg(cp, rx_err, cp->dev, "rx err, slot %d status 0x%x len %d\n",
436 rx_tail, status, len);
437 cp->dev->stats.rx_errors++;
438 if (status & RxErrFrame)
439 cp->dev->stats.rx_frame_errors++;
440 if (status & RxErrCRC)
441 cp->dev->stats.rx_crc_errors++;
442 if ((status & RxErrRunt) || (status & RxErrLong))
443 cp->dev->stats.rx_length_errors++;
444 if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag))
445 cp->dev->stats.rx_length_errors++;
446 if (status & RxErrFIFO)
447 cp->dev->stats.rx_fifo_errors++;
450 static inline unsigned int cp_rx_csum_ok (u32 status)
452 unsigned int protocol = (status >> 16) & 0x3;
454 if (((protocol == RxProtoTCP) && !(status & TCPFail)) ||
455 ((protocol == RxProtoUDP) && !(status & UDPFail)))
461 static int cp_rx_poll(struct napi_struct *napi, int budget)
463 struct cp_private *cp = container_of(napi, struct cp_private, napi);
464 struct net_device *dev = cp->dev;
465 unsigned int rx_tail = cp->rx_tail;
470 cpw16(IntrStatus, cp_rx_intr_mask);
472 while (rx < budget) {
474 dma_addr_t mapping, new_mapping;
475 struct sk_buff *skb, *new_skb;
476 struct cp_desc *desc;
477 const unsigned buflen = cp->rx_buf_sz;
479 skb = cp->rx_skb[rx_tail];
482 desc = &cp->rx_ring[rx_tail];
483 status = le32_to_cpu(desc->opts1);
484 if (status & DescOwn)
487 len = (status & 0x1fff) - 4;
488 mapping = le64_to_cpu(desc->addr);
490 if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag)) {
491 /* we don't support incoming fragmented frames.
492 * instead, we attempt to ensure that the
493 * pre-allocated RX skbs are properly sized such
494 * that RX fragments are never encountered
496 cp_rx_err_acct(cp, rx_tail, status, len);
497 dev->stats.rx_dropped++;
498 cp->cp_stats.rx_frags++;
502 if (status & (RxError | RxErrFIFO)) {
503 cp_rx_err_acct(cp, rx_tail, status, len);
507 netif_dbg(cp, rx_status, dev, "rx slot %d status 0x%x len %d\n",
508 rx_tail, status, len);
510 new_skb = napi_alloc_skb(napi, buflen);
512 dev->stats.rx_dropped++;
516 new_mapping = dma_map_single(&cp->pdev->dev, new_skb->data, buflen,
518 if (dma_mapping_error(&cp->pdev->dev, new_mapping)) {
519 dev->stats.rx_dropped++;
524 dma_unmap_single(&cp->pdev->dev, mapping,
525 buflen, PCI_DMA_FROMDEVICE);
527 /* Handle checksum offloading for incoming packets. */
528 if (cp_rx_csum_ok(status))
529 skb->ip_summed = CHECKSUM_UNNECESSARY;
531 skb_checksum_none_assert(skb);
535 cp->rx_skb[rx_tail] = new_skb;
537 cp_rx_skb(cp, skb, desc);
539 mapping = new_mapping;
542 cp->rx_ring[rx_tail].opts2 = 0;
543 cp->rx_ring[rx_tail].addr = cpu_to_le64(mapping);
544 if (rx_tail == (CP_RX_RING_SIZE - 1))
545 desc->opts1 = cpu_to_le32(DescOwn | RingEnd |
548 desc->opts1 = cpu_to_le32(DescOwn | cp->rx_buf_sz);
549 rx_tail = NEXT_RX(rx_tail);
552 cp->rx_tail = rx_tail;
554 /* if we did not reach work limit, then we're done with
555 * this round of polling
560 if (cpr16(IntrStatus) & cp_rx_intr_mask)
563 napi_gro_flush(napi, false);
564 spin_lock_irqsave(&cp->lock, flags);
565 __napi_complete(napi);
566 cpw16_f(IntrMask, cp_intr_mask);
567 spin_unlock_irqrestore(&cp->lock, flags);
573 static irqreturn_t cp_interrupt (int irq, void *dev_instance)
575 struct net_device *dev = dev_instance;
576 struct cp_private *cp;
580 if (unlikely(dev == NULL))
582 cp = netdev_priv(dev);
584 spin_lock(&cp->lock);
586 status = cpr16(IntrStatus);
587 if (!status || (status == 0xFFFF))
592 netif_dbg(cp, intr, dev, "intr, status %04x cmd %02x cpcmd %04x\n",
593 status, cpr8(Cmd), cpr16(CpCmd));
595 cpw16(IntrStatus, status & ~cp_rx_intr_mask);
597 /* close possible race's with dev_close */
598 if (unlikely(!netif_running(dev))) {
603 if (status & (RxOK | RxErr | RxEmpty | RxFIFOOvr))
604 if (napi_schedule_prep(&cp->napi)) {
605 cpw16_f(IntrMask, cp_norx_intr_mask);
606 __napi_schedule(&cp->napi);
609 if (status & (TxOK | TxErr | TxEmpty | SWInt))
611 if (status & LinkChg)
612 mii_check_media(&cp->mii_if, netif_msg_link(cp), false);
615 if (status & PciErr) {
618 pci_read_config_word(cp->pdev, PCI_STATUS, &pci_status);
619 pci_write_config_word(cp->pdev, PCI_STATUS, pci_status);
620 netdev_err(dev, "PCI bus error, status=%04x, PCI status=%04x\n",
623 /* TODO: reset hardware */
627 spin_unlock(&cp->lock);
629 return IRQ_RETVAL(handled);
632 #ifdef CONFIG_NET_POLL_CONTROLLER
634 * Polling receive - used by netconsole and other diagnostic tools
635 * to allow network i/o with interrupts disabled.
637 static void cp_poll_controller(struct net_device *dev)
639 struct cp_private *cp = netdev_priv(dev);
640 const int irq = cp->pdev->irq;
643 cp_interrupt(irq, dev);
648 static void cp_tx (struct cp_private *cp)
650 unsigned tx_head = cp->tx_head;
651 unsigned tx_tail = cp->tx_tail;
652 unsigned bytes_compl = 0, pkts_compl = 0;
654 while (tx_tail != tx_head) {
655 struct cp_desc *txd = cp->tx_ring + tx_tail;
660 status = le32_to_cpu(txd->opts1);
661 if (status & DescOwn)
664 skb = cp->tx_skb[tx_tail];
667 dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr),
668 le32_to_cpu(txd->opts1) & 0xffff,
671 if (status & LastFrag) {
672 if (status & (TxError | TxFIFOUnder)) {
673 netif_dbg(cp, tx_err, cp->dev,
674 "tx err, status 0x%x\n", status);
675 cp->dev->stats.tx_errors++;
677 cp->dev->stats.tx_window_errors++;
678 if (status & TxMaxCol)
679 cp->dev->stats.tx_aborted_errors++;
680 if (status & TxLinkFail)
681 cp->dev->stats.tx_carrier_errors++;
682 if (status & TxFIFOUnder)
683 cp->dev->stats.tx_fifo_errors++;
685 cp->dev->stats.collisions +=
686 ((status >> TxColCntShift) & TxColCntMask);
687 cp->dev->stats.tx_packets++;
688 cp->dev->stats.tx_bytes += skb->len;
689 netif_dbg(cp, tx_done, cp->dev,
690 "tx done, slot %d\n", tx_tail);
692 bytes_compl += skb->len;
694 dev_kfree_skb_irq(skb);
697 cp->tx_skb[tx_tail] = NULL;
699 tx_tail = NEXT_TX(tx_tail);
702 cp->tx_tail = tx_tail;
704 netdev_completed_queue(cp->dev, pkts_compl, bytes_compl);
705 if (TX_BUFFS_AVAIL(cp) > (MAX_SKB_FRAGS + 1))
706 netif_wake_queue(cp->dev);
709 static inline u32 cp_tx_vlan_tag(struct sk_buff *skb)
711 return skb_vlan_tag_present(skb) ?
712 TxVlanTag | swab16(skb_vlan_tag_get(skb)) : 0x00;
715 static void unwind_tx_frag_mapping(struct cp_private *cp, struct sk_buff *skb,
716 int first, int entry_last)
720 skb_frag_t *this_frag;
721 for (frag = 0; frag+first < entry_last; frag++) {
723 cp->tx_skb[index] = NULL;
724 txd = &cp->tx_ring[index];
725 this_frag = &skb_shinfo(skb)->frags[frag];
726 dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr),
727 skb_frag_size(this_frag), PCI_DMA_TODEVICE);
731 static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
732 struct net_device *dev)
734 struct cp_private *cp = netdev_priv(dev);
737 unsigned long intr_flags;
741 spin_lock_irqsave(&cp->lock, intr_flags);
743 /* This is a hard error, log it. */
744 if (TX_BUFFS_AVAIL(cp) <= (skb_shinfo(skb)->nr_frags + 1)) {
745 netif_stop_queue(dev);
746 spin_unlock_irqrestore(&cp->lock, intr_flags);
747 netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
748 return NETDEV_TX_BUSY;
752 eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
753 mss = skb_shinfo(skb)->gso_size;
755 opts2 = cpu_to_le32(cp_tx_vlan_tag(skb));
757 if (skb_shinfo(skb)->nr_frags == 0) {
758 struct cp_desc *txd = &cp->tx_ring[entry];
763 mapping = dma_map_single(&cp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
764 if (dma_mapping_error(&cp->pdev->dev, mapping))
768 txd->addr = cpu_to_le64(mapping);
771 flags = eor | len | DescOwn | FirstFrag | LastFrag;
774 flags |= LargeSend | ((mss & MSSMask) << MSSShift);
775 else if (skb->ip_summed == CHECKSUM_PARTIAL) {
776 const struct iphdr *ip = ip_hdr(skb);
777 if (ip->protocol == IPPROTO_TCP)
778 flags |= IPCS | TCPCS;
779 else if (ip->protocol == IPPROTO_UDP)
780 flags |= IPCS | UDPCS;
782 WARN_ON(1); /* we need a WARN() */
785 txd->opts1 = cpu_to_le32(flags);
788 cp->tx_skb[entry] = skb;
789 netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n",
793 u32 first_len, first_eor;
794 dma_addr_t first_mapping;
795 int frag, first_entry = entry;
796 const struct iphdr *ip = ip_hdr(skb);
798 /* We must give this initial chunk to the device last.
799 * Otherwise we could race with the device.
802 first_len = skb_headlen(skb);
803 first_mapping = dma_map_single(&cp->pdev->dev, skb->data,
804 first_len, PCI_DMA_TODEVICE);
805 if (dma_mapping_error(&cp->pdev->dev, first_mapping))
808 cp->tx_skb[entry] = skb;
810 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
811 const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
816 entry = NEXT_TX(entry);
818 len = skb_frag_size(this_frag);
819 mapping = dma_map_single(&cp->pdev->dev,
820 skb_frag_address(this_frag),
821 len, PCI_DMA_TODEVICE);
822 if (dma_mapping_error(&cp->pdev->dev, mapping)) {
823 unwind_tx_frag_mapping(cp, skb, first_entry, entry);
827 eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
829 ctrl = eor | len | DescOwn;
833 ((mss & MSSMask) << MSSShift);
834 else if (skb->ip_summed == CHECKSUM_PARTIAL) {
835 if (ip->protocol == IPPROTO_TCP)
836 ctrl |= IPCS | TCPCS;
837 else if (ip->protocol == IPPROTO_UDP)
838 ctrl |= IPCS | UDPCS;
843 if (frag == skb_shinfo(skb)->nr_frags - 1)
846 txd = &cp->tx_ring[entry];
848 txd->addr = cpu_to_le64(mapping);
851 txd->opts1 = cpu_to_le32(ctrl);
853 cp->tx_skb[entry] = skb;
856 txd = &cp->tx_ring[first_entry];
858 txd->addr = cpu_to_le64(first_mapping);
861 if (skb->ip_summed == CHECKSUM_PARTIAL) {
862 if (ip->protocol == IPPROTO_TCP)
863 txd->opts1 = cpu_to_le32(first_eor | first_len |
864 FirstFrag | DescOwn |
866 else if (ip->protocol == IPPROTO_UDP)
867 txd->opts1 = cpu_to_le32(first_eor | first_len |
868 FirstFrag | DescOwn |
873 txd->opts1 = cpu_to_le32(first_eor | first_len |
874 FirstFrag | DescOwn);
877 netif_dbg(cp, tx_queued, cp->dev, "tx queued, slots %d-%d, skblen %d\n",
878 first_entry, entry, skb->len);
880 cp->tx_head = NEXT_TX(entry);
882 netdev_sent_queue(dev, skb->len);
883 if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
884 netif_stop_queue(dev);
887 spin_unlock_irqrestore(&cp->lock, intr_flags);
889 cpw8(TxPoll, NormalTxPoll);
893 dev_kfree_skb_any(skb);
894 cp->dev->stats.tx_dropped++;
898 /* Set or clear the multicast filter for this adaptor.
899 This routine is not state sensitive and need not be SMP locked. */
901 static void __cp_set_rx_mode (struct net_device *dev)
903 struct cp_private *cp = netdev_priv(dev);
904 u32 mc_filter[2]; /* Multicast hash filter */
907 /* Note: do not reorder, GCC is clever about common statements. */
908 if (dev->flags & IFF_PROMISC) {
909 /* Unconditionally log net taps. */
911 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
913 mc_filter[1] = mc_filter[0] = 0xffffffff;
914 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
915 (dev->flags & IFF_ALLMULTI)) {
916 /* Too many to filter perfectly -- accept all multicasts. */
917 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
918 mc_filter[1] = mc_filter[0] = 0xffffffff;
920 struct netdev_hw_addr *ha;
921 rx_mode = AcceptBroadcast | AcceptMyPhys;
922 mc_filter[1] = mc_filter[0] = 0;
923 netdev_for_each_mc_addr(ha, dev) {
924 int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
926 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
927 rx_mode |= AcceptMulticast;
931 /* We can safely update without stopping the chip. */
932 cp->rx_config = cp_rx_config | rx_mode;
933 cpw32_f(RxConfig, cp->rx_config);
935 cpw32_f (MAR0 + 0, mc_filter[0]);
936 cpw32_f (MAR0 + 4, mc_filter[1]);
939 static void cp_set_rx_mode (struct net_device *dev)
942 struct cp_private *cp = netdev_priv(dev);
944 spin_lock_irqsave (&cp->lock, flags);
945 __cp_set_rx_mode(dev);
946 spin_unlock_irqrestore (&cp->lock, flags);
949 static void __cp_get_stats(struct cp_private *cp)
951 /* only lower 24 bits valid; write any value to clear */
952 cp->dev->stats.rx_missed_errors += (cpr32 (RxMissed) & 0xffffff);
956 static struct net_device_stats *cp_get_stats(struct net_device *dev)
958 struct cp_private *cp = netdev_priv(dev);
961 /* The chip only need report frame silently dropped. */
962 spin_lock_irqsave(&cp->lock, flags);
963 if (netif_running(dev) && netif_device_present(dev))
965 spin_unlock_irqrestore(&cp->lock, flags);
970 static void cp_stop_hw (struct cp_private *cp)
972 cpw16(IntrStatus, ~(cpr16(IntrStatus)));
973 cpw16_f(IntrMask, 0);
976 cpw16_f(IntrStatus, ~(cpr16(IntrStatus)));
979 cp->tx_head = cp->tx_tail = 0;
981 netdev_reset_queue(cp->dev);
984 static void cp_reset_hw (struct cp_private *cp)
986 unsigned work = 1000;
991 if (!(cpr8(Cmd) & CmdReset))
994 schedule_timeout_uninterruptible(10);
997 netdev_err(cp->dev, "hardware reset timeout\n");
1000 static inline void cp_start_hw (struct cp_private *cp)
1002 dma_addr_t ring_dma;
1004 cpw16(CpCmd, cp->cpcmd);
1007 * These (at least TxRingAddr) need to be configured after the
1008 * corresponding bits in CpCmd are enabled. Datasheet v1.6 §6.33
1009 * (C+ Command Register) recommends that these and more be configured
1010 * *after* the [RT]xEnable bits in CpCmd are set. And on some hardware
1011 * it's been observed that the TxRingAddr is actually reset to garbage
1012 * when C+ mode Tx is enabled in CpCmd.
1014 cpw32_f(HiTxRingAddr, 0);
1015 cpw32_f(HiTxRingAddr + 4, 0);
1017 ring_dma = cp->ring_dma;
1018 cpw32_f(RxRingAddr, ring_dma & 0xffffffff);
1019 cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16);
1021 ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE;
1022 cpw32_f(TxRingAddr, ring_dma & 0xffffffff);
1023 cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);
1026 * Strictly speaking, the datasheet says this should be enabled
1027 * *before* setting the descriptor addresses. But what, then, would
1028 * prevent it from doing DMA to random unconfigured addresses?
1029 * This variant appears to work fine.
1031 cpw8(Cmd, RxOn | TxOn);
1033 netdev_reset_queue(cp->dev);
1036 static void cp_enable_irq(struct cp_private *cp)
1038 cpw16_f(IntrMask, cp_intr_mask);
1041 static void cp_init_hw (struct cp_private *cp)
1043 struct net_device *dev = cp->dev;
1047 cpw8_f (Cfg9346, Cfg9346_Unlock);
1049 /* Restore our idea of the MAC address. */
1050 cpw32_f (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0)));
1051 cpw32_f (MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4)));
1054 cpw8(TxThresh, 0x06); /* XXX convert magic num to a constant */
1056 __cp_set_rx_mode(dev);
1057 cpw32_f (TxConfig, IFG | (TX_DMA_BURST << TxDMAShift));
1059 cpw8(Config1, cpr8(Config1) | DriverLoaded | PMEnable);
1060 /* Disable Wake-on-LAN. Can be turned on with ETHTOOL_SWOL */
1061 cpw8(Config3, PARMEnable);
1062 cp->wol_enabled = 0;
1064 cpw8(Config5, cpr8(Config5) & PMEStatus);
1066 cpw16(MultiIntr, 0);
1068 cpw8_f(Cfg9346, Cfg9346_Lock);
1071 static int cp_refill_rx(struct cp_private *cp)
1073 struct net_device *dev = cp->dev;
1076 for (i = 0; i < CP_RX_RING_SIZE; i++) {
1077 struct sk_buff *skb;
1080 skb = netdev_alloc_skb_ip_align(dev, cp->rx_buf_sz);
1084 mapping = dma_map_single(&cp->pdev->dev, skb->data,
1085 cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1086 if (dma_mapping_error(&cp->pdev->dev, mapping)) {
1090 cp->rx_skb[i] = skb;
1092 cp->rx_ring[i].opts2 = 0;
1093 cp->rx_ring[i].addr = cpu_to_le64(mapping);
1094 if (i == (CP_RX_RING_SIZE - 1))
1095 cp->rx_ring[i].opts1 =
1096 cpu_to_le32(DescOwn | RingEnd | cp->rx_buf_sz);
1098 cp->rx_ring[i].opts1 =
1099 cpu_to_le32(DescOwn | cp->rx_buf_sz);
1109 static void cp_init_rings_index (struct cp_private *cp)
1112 cp->tx_head = cp->tx_tail = 0;
1115 static int cp_init_rings (struct cp_private *cp)
1117 memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
1118 cp->tx_ring[CP_TX_RING_SIZE - 1].opts1 = cpu_to_le32(RingEnd);
1120 cp_init_rings_index(cp);
1122 return cp_refill_rx (cp);
1125 static int cp_alloc_rings (struct cp_private *cp)
1127 struct device *d = &cp->pdev->dev;
1131 mem = dma_alloc_coherent(d, CP_RING_BYTES, &cp->ring_dma, GFP_KERNEL);
1136 cp->tx_ring = &cp->rx_ring[CP_RX_RING_SIZE];
1138 rc = cp_init_rings(cp);
1140 dma_free_coherent(d, CP_RING_BYTES, cp->rx_ring, cp->ring_dma);
1145 static void cp_clean_rings (struct cp_private *cp)
1147 struct cp_desc *desc;
1150 for (i = 0; i < CP_RX_RING_SIZE; i++) {
1151 if (cp->rx_skb[i]) {
1152 desc = cp->rx_ring + i;
1153 dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr),
1154 cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1155 dev_kfree_skb_any(cp->rx_skb[i]);
1159 for (i = 0; i < CP_TX_RING_SIZE; i++) {
1160 if (cp->tx_skb[i]) {
1161 struct sk_buff *skb = cp->tx_skb[i];
1163 desc = cp->tx_ring + i;
1164 dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr),
1165 le32_to_cpu(desc->opts1) & 0xffff,
1167 if (le32_to_cpu(desc->opts1) & LastFrag)
1168 dev_kfree_skb_any(skb);
1169 cp->dev->stats.tx_dropped++;
1172 netdev_reset_queue(cp->dev);
1174 memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE);
1175 memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
1177 memset(cp->rx_skb, 0, sizeof(struct sk_buff *) * CP_RX_RING_SIZE);
1178 memset(cp->tx_skb, 0, sizeof(struct sk_buff *) * CP_TX_RING_SIZE);
1181 static void cp_free_rings (struct cp_private *cp)
1184 dma_free_coherent(&cp->pdev->dev, CP_RING_BYTES, cp->rx_ring,
1190 static int cp_open (struct net_device *dev)
1192 struct cp_private *cp = netdev_priv(dev);
1193 const int irq = cp->pdev->irq;
1196 netif_dbg(cp, ifup, dev, "enabling interface\n");
1198 rc = cp_alloc_rings(cp);
1202 napi_enable(&cp->napi);
1206 rc = request_irq(irq, cp_interrupt, IRQF_SHARED, dev->name, dev);
1212 netif_carrier_off(dev);
1213 mii_check_media(&cp->mii_if, netif_msg_link(cp), true);
1214 netif_start_queue(dev);
1219 napi_disable(&cp->napi);
1225 static int cp_close (struct net_device *dev)
1227 struct cp_private *cp = netdev_priv(dev);
1228 unsigned long flags;
1230 napi_disable(&cp->napi);
1232 netif_dbg(cp, ifdown, dev, "disabling interface\n");
1234 spin_lock_irqsave(&cp->lock, flags);
1236 netif_stop_queue(dev);
1237 netif_carrier_off(dev);
1241 spin_unlock_irqrestore(&cp->lock, flags);
1243 free_irq(cp->pdev->irq, dev);
1249 static void cp_tx_timeout(struct net_device *dev)
1251 struct cp_private *cp = netdev_priv(dev);
1252 unsigned long flags;
1255 netdev_warn(dev, "Transmit timeout, status %2x %4x %4x %4x\n",
1256 cpr8(Cmd), cpr16(CpCmd),
1257 cpr16(IntrStatus), cpr16(IntrMask));
1259 spin_lock_irqsave(&cp->lock, flags);
1263 rc = cp_init_rings(cp);
1265 __cp_set_rx_mode(dev);
1266 cpw16_f(IntrMask, cp_norx_intr_mask);
1268 netif_wake_queue(dev);
1269 napi_schedule_irqoff(&cp->napi);
1271 spin_unlock_irqrestore(&cp->lock, flags);
1274 static int cp_change_mtu(struct net_device *dev, int new_mtu)
1276 struct cp_private *cp = netdev_priv(dev);
1278 /* check for invalid MTU, according to hardware limits */
1279 if (new_mtu < CP_MIN_MTU || new_mtu > CP_MAX_MTU)
1282 /* if network interface not up, no need for complexity */
1283 if (!netif_running(dev)) {
1285 cp_set_rxbufsize(cp); /* set new rx buf size */
1289 /* network IS up, close it, reset MTU, and come up again. */
1292 cp_set_rxbufsize(cp);
1293 return cp_open(dev);
1296 static const char mii_2_8139_map[8] = {
1307 static int mdio_read(struct net_device *dev, int phy_id, int location)
1309 struct cp_private *cp = netdev_priv(dev);
1311 return location < 8 && mii_2_8139_map[location] ?
1312 readw(cp->regs + mii_2_8139_map[location]) : 0;
1316 static void mdio_write(struct net_device *dev, int phy_id, int location,
1319 struct cp_private *cp = netdev_priv(dev);
1321 if (location == 0) {
1322 cpw8(Cfg9346, Cfg9346_Unlock);
1323 cpw16(BasicModeCtrl, value);
1324 cpw8(Cfg9346, Cfg9346_Lock);
1325 } else if (location < 8 && mii_2_8139_map[location])
1326 cpw16(mii_2_8139_map[location], value);
1329 /* Set the ethtool Wake-on-LAN settings */
1330 static int netdev_set_wol (struct cp_private *cp,
1331 const struct ethtool_wolinfo *wol)
1335 options = cpr8 (Config3) & ~(LinkUp | MagicPacket);
1336 /* If WOL is being disabled, no need for complexity */
1338 if (wol->wolopts & WAKE_PHY) options |= LinkUp;
1339 if (wol->wolopts & WAKE_MAGIC) options |= MagicPacket;
1342 cpw8 (Cfg9346, Cfg9346_Unlock);
1343 cpw8 (Config3, options);
1344 cpw8 (Cfg9346, Cfg9346_Lock);
1346 options = 0; /* Paranoia setting */
1347 options = cpr8 (Config5) & ~(UWF | MWF | BWF);
1348 /* If WOL is being disabled, no need for complexity */
1350 if (wol->wolopts & WAKE_UCAST) options |= UWF;
1351 if (wol->wolopts & WAKE_BCAST) options |= BWF;
1352 if (wol->wolopts & WAKE_MCAST) options |= MWF;
1355 cpw8 (Config5, options);
1357 cp->wol_enabled = (wol->wolopts) ? 1 : 0;
1362 /* Get the ethtool Wake-on-LAN settings */
1363 static void netdev_get_wol (struct cp_private *cp,
1364 struct ethtool_wolinfo *wol)
1368 wol->wolopts = 0; /* Start from scratch */
1369 wol->supported = WAKE_PHY | WAKE_BCAST | WAKE_MAGIC |
1370 WAKE_MCAST | WAKE_UCAST;
1371 /* We don't need to go on if WOL is disabled */
1372 if (!cp->wol_enabled) return;
1374 options = cpr8 (Config3);
1375 if (options & LinkUp) wol->wolopts |= WAKE_PHY;
1376 if (options & MagicPacket) wol->wolopts |= WAKE_MAGIC;
1378 options = 0; /* Paranoia setting */
1379 options = cpr8 (Config5);
1380 if (options & UWF) wol->wolopts |= WAKE_UCAST;
1381 if (options & BWF) wol->wolopts |= WAKE_BCAST;
1382 if (options & MWF) wol->wolopts |= WAKE_MCAST;
1385 static void cp_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1387 struct cp_private *cp = netdev_priv(dev);
1389 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1390 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1391 strlcpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info));
1394 static void cp_get_ringparam(struct net_device *dev,
1395 struct ethtool_ringparam *ring)
1397 ring->rx_max_pending = CP_RX_RING_SIZE;
1398 ring->tx_max_pending = CP_TX_RING_SIZE;
1399 ring->rx_pending = CP_RX_RING_SIZE;
1400 ring->tx_pending = CP_TX_RING_SIZE;
1403 static int cp_get_regs_len(struct net_device *dev)
1405 return CP_REGS_SIZE;
1408 static int cp_get_sset_count (struct net_device *dev, int sset)
1412 return CP_NUM_STATS;
1418 static int cp_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1420 struct cp_private *cp = netdev_priv(dev);
1422 unsigned long flags;
1424 spin_lock_irqsave(&cp->lock, flags);
1425 rc = mii_ethtool_gset(&cp->mii_if, cmd);
1426 spin_unlock_irqrestore(&cp->lock, flags);
1431 static int cp_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1433 struct cp_private *cp = netdev_priv(dev);
1435 unsigned long flags;
1437 spin_lock_irqsave(&cp->lock, flags);
1438 rc = mii_ethtool_sset(&cp->mii_if, cmd);
1439 spin_unlock_irqrestore(&cp->lock, flags);
1444 static int cp_nway_reset(struct net_device *dev)
1446 struct cp_private *cp = netdev_priv(dev);
1447 return mii_nway_restart(&cp->mii_if);
1450 static u32 cp_get_msglevel(struct net_device *dev)
1452 struct cp_private *cp = netdev_priv(dev);
1453 return cp->msg_enable;
1456 static void cp_set_msglevel(struct net_device *dev, u32 value)
1458 struct cp_private *cp = netdev_priv(dev);
1459 cp->msg_enable = value;
1462 static int cp_set_features(struct net_device *dev, netdev_features_t features)
1464 struct cp_private *cp = netdev_priv(dev);
1465 unsigned long flags;
1467 if (!((dev->features ^ features) & NETIF_F_RXCSUM))
1470 spin_lock_irqsave(&cp->lock, flags);
1472 if (features & NETIF_F_RXCSUM)
1473 cp->cpcmd |= RxChkSum;
1475 cp->cpcmd &= ~RxChkSum;
1477 if (features & NETIF_F_HW_VLAN_CTAG_RX)
1478 cp->cpcmd |= RxVlanOn;
1480 cp->cpcmd &= ~RxVlanOn;
1482 cpw16_f(CpCmd, cp->cpcmd);
1483 spin_unlock_irqrestore(&cp->lock, flags);
1488 static void cp_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1491 struct cp_private *cp = netdev_priv(dev);
1492 unsigned long flags;
1494 if (regs->len < CP_REGS_SIZE)
1495 return /* -EINVAL */;
1497 regs->version = CP_REGS_VER;
1499 spin_lock_irqsave(&cp->lock, flags);
1500 memcpy_fromio(p, cp->regs, CP_REGS_SIZE);
1501 spin_unlock_irqrestore(&cp->lock, flags);
1504 static void cp_get_wol (struct net_device *dev, struct ethtool_wolinfo *wol)
1506 struct cp_private *cp = netdev_priv(dev);
1507 unsigned long flags;
1509 spin_lock_irqsave (&cp->lock, flags);
1510 netdev_get_wol (cp, wol);
1511 spin_unlock_irqrestore (&cp->lock, flags);
1514 static int cp_set_wol (struct net_device *dev, struct ethtool_wolinfo *wol)
1516 struct cp_private *cp = netdev_priv(dev);
1517 unsigned long flags;
1520 spin_lock_irqsave (&cp->lock, flags);
1521 rc = netdev_set_wol (cp, wol);
1522 spin_unlock_irqrestore (&cp->lock, flags);
1527 static void cp_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
1529 switch (stringset) {
1531 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
1539 static void cp_get_ethtool_stats (struct net_device *dev,
1540 struct ethtool_stats *estats, u64 *tmp_stats)
1542 struct cp_private *cp = netdev_priv(dev);
1543 struct cp_dma_stats *nic_stats;
1547 nic_stats = dma_alloc_coherent(&cp->pdev->dev, sizeof(*nic_stats),
1552 /* begin NIC statistics dump */
1553 cpw32(StatsAddr + 4, (u64)dma >> 32);
1554 cpw32(StatsAddr, ((u64)dma & DMA_BIT_MASK(32)) | DumpStats);
1557 for (i = 0; i < 1000; i++) {
1558 if ((cpr32(StatsAddr) & DumpStats) == 0)
1562 cpw32(StatsAddr, 0);
1563 cpw32(StatsAddr + 4, 0);
1567 tmp_stats[i++] = le64_to_cpu(nic_stats->tx_ok);
1568 tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok);
1569 tmp_stats[i++] = le64_to_cpu(nic_stats->tx_err);
1570 tmp_stats[i++] = le32_to_cpu(nic_stats->rx_err);
1571 tmp_stats[i++] = le16_to_cpu(nic_stats->rx_fifo);
1572 tmp_stats[i++] = le16_to_cpu(nic_stats->frame_align);
1573 tmp_stats[i++] = le32_to_cpu(nic_stats->tx_ok_1col);
1574 tmp_stats[i++] = le32_to_cpu(nic_stats->tx_ok_mcol);
1575 tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok_phys);
1576 tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok_bcast);
1577 tmp_stats[i++] = le32_to_cpu(nic_stats->rx_ok_mcast);
1578 tmp_stats[i++] = le16_to_cpu(nic_stats->tx_abort);
1579 tmp_stats[i++] = le16_to_cpu(nic_stats->tx_underrun);
1580 tmp_stats[i++] = cp->cp_stats.rx_frags;
1581 BUG_ON(i != CP_NUM_STATS);
1583 dma_free_coherent(&cp->pdev->dev, sizeof(*nic_stats), nic_stats, dma);
1586 static const struct ethtool_ops cp_ethtool_ops = {
1587 .get_drvinfo = cp_get_drvinfo,
1588 .get_regs_len = cp_get_regs_len,
1589 .get_sset_count = cp_get_sset_count,
1590 .get_settings = cp_get_settings,
1591 .set_settings = cp_set_settings,
1592 .nway_reset = cp_nway_reset,
1593 .get_link = ethtool_op_get_link,
1594 .get_msglevel = cp_get_msglevel,
1595 .set_msglevel = cp_set_msglevel,
1596 .get_regs = cp_get_regs,
1597 .get_wol = cp_get_wol,
1598 .set_wol = cp_set_wol,
1599 .get_strings = cp_get_strings,
1600 .get_ethtool_stats = cp_get_ethtool_stats,
1601 .get_eeprom_len = cp_get_eeprom_len,
1602 .get_eeprom = cp_get_eeprom,
1603 .set_eeprom = cp_set_eeprom,
1604 .get_ringparam = cp_get_ringparam,
1607 static int cp_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
1609 struct cp_private *cp = netdev_priv(dev);
1611 unsigned long flags;
1613 if (!netif_running(dev))
1616 spin_lock_irqsave(&cp->lock, flags);
1617 rc = generic_mii_ioctl(&cp->mii_if, if_mii(rq), cmd, NULL);
1618 spin_unlock_irqrestore(&cp->lock, flags);
1622 static int cp_set_mac_address(struct net_device *dev, void *p)
1624 struct cp_private *cp = netdev_priv(dev);
1625 struct sockaddr *addr = p;
1627 if (!is_valid_ether_addr(addr->sa_data))
1628 return -EADDRNOTAVAIL;
1630 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1632 spin_lock_irq(&cp->lock);
1634 cpw8_f(Cfg9346, Cfg9346_Unlock);
1635 cpw32_f(MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0)));
1636 cpw32_f(MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4)));
1637 cpw8_f(Cfg9346, Cfg9346_Lock);
1639 spin_unlock_irq(&cp->lock);
1644 /* Serial EEPROM section. */
1646 /* EEPROM_Ctrl bits. */
1647 #define EE_SHIFT_CLK 0x04 /* EEPROM shift clock. */
1648 #define EE_CS 0x08 /* EEPROM chip select. */
1649 #define EE_DATA_WRITE 0x02 /* EEPROM chip data in. */
1650 #define EE_WRITE_0 0x00
1651 #define EE_WRITE_1 0x02
1652 #define EE_DATA_READ 0x01 /* EEPROM chip data out. */
1653 #define EE_ENB (0x80 | EE_CS)
1655 /* Delay between EEPROM clock transitions.
1656 No extra delay is needed with 33Mhz PCI, but 66Mhz may change this.
1659 #define eeprom_delay() readb(ee_addr)
1661 /* The EEPROM commands include the alway-set leading bit. */
1662 #define EE_EXTEND_CMD (4)
1663 #define EE_WRITE_CMD (5)
1664 #define EE_READ_CMD (6)
1665 #define EE_ERASE_CMD (7)
1667 #define EE_EWDS_ADDR (0)
1668 #define EE_WRAL_ADDR (1)
1669 #define EE_ERAL_ADDR (2)
1670 #define EE_EWEN_ADDR (3)
1672 #define CP_EEPROM_MAGIC PCI_DEVICE_ID_REALTEK_8139
1674 static void eeprom_cmd_start(void __iomem *ee_addr)
1676 writeb (EE_ENB & ~EE_CS, ee_addr);
1677 writeb (EE_ENB, ee_addr);
1681 static void eeprom_cmd(void __iomem *ee_addr, int cmd, int cmd_len)
1685 /* Shift the command bits out. */
1686 for (i = cmd_len - 1; i >= 0; i--) {
1687 int dataval = (cmd & (1 << i)) ? EE_DATA_WRITE : 0;
1688 writeb (EE_ENB | dataval, ee_addr);
1690 writeb (EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
1693 writeb (EE_ENB, ee_addr);
1697 static void eeprom_cmd_end(void __iomem *ee_addr)
1703 static void eeprom_extend_cmd(void __iomem *ee_addr, int extend_cmd,
1706 int cmd = (EE_EXTEND_CMD << addr_len) | (extend_cmd << (addr_len - 2));
1708 eeprom_cmd_start(ee_addr);
1709 eeprom_cmd(ee_addr, cmd, 3 + addr_len);
1710 eeprom_cmd_end(ee_addr);
1713 static u16 read_eeprom (void __iomem *ioaddr, int location, int addr_len)
1717 void __iomem *ee_addr = ioaddr + Cfg9346;
1718 int read_cmd = location | (EE_READ_CMD << addr_len);
1720 eeprom_cmd_start(ee_addr);
1721 eeprom_cmd(ee_addr, read_cmd, 3 + addr_len);
1723 for (i = 16; i > 0; i--) {
1724 writeb (EE_ENB | EE_SHIFT_CLK, ee_addr);
1727 (retval << 1) | ((readb (ee_addr) & EE_DATA_READ) ? 1 :
1729 writeb (EE_ENB, ee_addr);
1733 eeprom_cmd_end(ee_addr);
1738 static void write_eeprom(void __iomem *ioaddr, int location, u16 val,
1742 void __iomem *ee_addr = ioaddr + Cfg9346;
1743 int write_cmd = location | (EE_WRITE_CMD << addr_len);
1745 eeprom_extend_cmd(ee_addr, EE_EWEN_ADDR, addr_len);
1747 eeprom_cmd_start(ee_addr);
1748 eeprom_cmd(ee_addr, write_cmd, 3 + addr_len);
1749 eeprom_cmd(ee_addr, val, 16);
1750 eeprom_cmd_end(ee_addr);
1752 eeprom_cmd_start(ee_addr);
1753 for (i = 0; i < 20000; i++)
1754 if (readb(ee_addr) & EE_DATA_READ)
1756 eeprom_cmd_end(ee_addr);
1758 eeprom_extend_cmd(ee_addr, EE_EWDS_ADDR, addr_len);
1761 static int cp_get_eeprom_len(struct net_device *dev)
1763 struct cp_private *cp = netdev_priv(dev);
1766 spin_lock_irq(&cp->lock);
1767 size = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 256 : 128;
1768 spin_unlock_irq(&cp->lock);
1773 static int cp_get_eeprom(struct net_device *dev,
1774 struct ethtool_eeprom *eeprom, u8 *data)
1776 struct cp_private *cp = netdev_priv(dev);
1777 unsigned int addr_len;
1779 u32 offset = eeprom->offset >> 1;
1780 u32 len = eeprom->len;
1783 eeprom->magic = CP_EEPROM_MAGIC;
1785 spin_lock_irq(&cp->lock);
1787 addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6;
1789 if (eeprom->offset & 1) {
1790 val = read_eeprom(cp->regs, offset, addr_len);
1791 data[i++] = (u8)(val >> 8);
1795 while (i < len - 1) {
1796 val = read_eeprom(cp->regs, offset, addr_len);
1797 data[i++] = (u8)val;
1798 data[i++] = (u8)(val >> 8);
1803 val = read_eeprom(cp->regs, offset, addr_len);
1807 spin_unlock_irq(&cp->lock);
1811 static int cp_set_eeprom(struct net_device *dev,
1812 struct ethtool_eeprom *eeprom, u8 *data)
1814 struct cp_private *cp = netdev_priv(dev);
1815 unsigned int addr_len;
1817 u32 offset = eeprom->offset >> 1;
1818 u32 len = eeprom->len;
1821 if (eeprom->magic != CP_EEPROM_MAGIC)
1824 spin_lock_irq(&cp->lock);
1826 addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6;
1828 if (eeprom->offset & 1) {
1829 val = read_eeprom(cp->regs, offset, addr_len) & 0xff;
1830 val |= (u16)data[i++] << 8;
1831 write_eeprom(cp->regs, offset, val, addr_len);
1835 while (i < len - 1) {
1836 val = (u16)data[i++];
1837 val |= (u16)data[i++] << 8;
1838 write_eeprom(cp->regs, offset, val, addr_len);
1843 val = read_eeprom(cp->regs, offset, addr_len) & 0xff00;
1844 val |= (u16)data[i];
1845 write_eeprom(cp->regs, offset, val, addr_len);
1848 spin_unlock_irq(&cp->lock);
1852 /* Put the board into D3cold state and wait for WakeUp signal */
1853 static void cp_set_d3_state (struct cp_private *cp)
1855 pci_enable_wake(cp->pdev, PCI_D0, 1); /* Enable PME# generation */
1856 pci_set_power_state (cp->pdev, PCI_D3hot);
1859 static const struct net_device_ops cp_netdev_ops = {
1860 .ndo_open = cp_open,
1861 .ndo_stop = cp_close,
1862 .ndo_validate_addr = eth_validate_addr,
1863 .ndo_set_mac_address = cp_set_mac_address,
1864 .ndo_set_rx_mode = cp_set_rx_mode,
1865 .ndo_get_stats = cp_get_stats,
1866 .ndo_do_ioctl = cp_ioctl,
1867 .ndo_start_xmit = cp_start_xmit,
1868 .ndo_tx_timeout = cp_tx_timeout,
1869 .ndo_set_features = cp_set_features,
1870 .ndo_change_mtu = cp_change_mtu,
1872 #ifdef CONFIG_NET_POLL_CONTROLLER
1873 .ndo_poll_controller = cp_poll_controller,
1877 static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1879 struct net_device *dev;
1880 struct cp_private *cp;
1883 resource_size_t pciaddr;
1884 unsigned int addr_len, i, pci_using_dac;
1886 pr_info_once("%s", version);
1888 if (pdev->vendor == PCI_VENDOR_ID_REALTEK &&
1889 pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pdev->revision < 0x20) {
1890 dev_info(&pdev->dev,
1891 "This (id %04x:%04x rev %02x) is not an 8139C+ compatible chip, use 8139too\n",
1892 pdev->vendor, pdev->device, pdev->revision);
1896 dev = alloc_etherdev(sizeof(struct cp_private));
1899 SET_NETDEV_DEV(dev, &pdev->dev);
1901 cp = netdev_priv(dev);
1904 cp->msg_enable = (debug < 0 ? CP_DEF_MSG_ENABLE : debug);
1905 spin_lock_init (&cp->lock);
1906 cp->mii_if.dev = dev;
1907 cp->mii_if.mdio_read = mdio_read;
1908 cp->mii_if.mdio_write = mdio_write;
1909 cp->mii_if.phy_id = CP_INTERNAL_PHY;
1910 cp->mii_if.phy_id_mask = 0x1f;
1911 cp->mii_if.reg_num_mask = 0x1f;
1912 cp_set_rxbufsize(cp);
1914 rc = pci_enable_device(pdev);
1918 rc = pci_set_mwi(pdev);
1920 goto err_out_disable;
1922 rc = pci_request_regions(pdev, DRV_NAME);
1926 pciaddr = pci_resource_start(pdev, 1);
1929 dev_err(&pdev->dev, "no MMIO resource\n");
1932 if (pci_resource_len(pdev, 1) < CP_REGS_SIZE) {
1934 dev_err(&pdev->dev, "MMIO resource (%llx) too small\n",
1935 (unsigned long long)pci_resource_len(pdev, 1));
1939 /* Configure DMA attributes. */
1940 if ((sizeof(dma_addr_t) > 4) &&
1941 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) &&
1942 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
1947 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1950 "No usable DMA configuration, aborting\n");
1953 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1956 "No usable consistent DMA configuration, aborting\n");
1961 cp->cpcmd = (pci_using_dac ? PCIDAC : 0) |
1962 PCIMulRW | RxChkSum | CpRxOn | CpTxOn;
1964 dev->features |= NETIF_F_RXCSUM;
1965 dev->hw_features |= NETIF_F_RXCSUM;
1967 regs = ioremap(pciaddr, CP_REGS_SIZE);
1970 dev_err(&pdev->dev, "Cannot map PCI MMIO (%Lx@%Lx)\n",
1971 (unsigned long long)pci_resource_len(pdev, 1),
1972 (unsigned long long)pciaddr);
1979 /* read MAC address from EEPROM */
1980 addr_len = read_eeprom (regs, 0, 8) == 0x8129 ? 8 : 6;
1981 for (i = 0; i < 3; i++)
1982 ((__le16 *) (dev->dev_addr))[i] =
1983 cpu_to_le16(read_eeprom (regs, i + 7, addr_len));
1985 dev->netdev_ops = &cp_netdev_ops;
1986 netif_napi_add(dev, &cp->napi, cp_rx_poll, 16);
1987 dev->ethtool_ops = &cp_ethtool_ops;
1988 dev->watchdog_timeo = TX_TIMEOUT;
1990 dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
1993 dev->features |= NETIF_F_HIGHDMA;
1995 /* disabled by default until verified */
1996 dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
1997 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
1998 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
2001 rc = register_netdev(dev);
2005 netdev_info(dev, "RTL-8139C+ at 0x%p, %pM, IRQ %d\n",
2006 regs, dev->dev_addr, pdev->irq);
2008 pci_set_drvdata(pdev, dev);
2010 /* enable busmastering and memory-write-invalidate */
2011 pci_set_master(pdev);
2013 if (cp->wol_enabled)
2014 cp_set_d3_state (cp);
2021 pci_release_regions(pdev);
2023 pci_clear_mwi(pdev);
2025 pci_disable_device(pdev);
2031 static void cp_remove_one (struct pci_dev *pdev)
2033 struct net_device *dev = pci_get_drvdata(pdev);
2034 struct cp_private *cp = netdev_priv(dev);
2036 unregister_netdev(dev);
2038 if (cp->wol_enabled)
2039 pci_set_power_state (pdev, PCI_D0);
2040 pci_release_regions(pdev);
2041 pci_clear_mwi(pdev);
2042 pci_disable_device(pdev);
2047 static int cp_suspend (struct pci_dev *pdev, pm_message_t state)
2049 struct net_device *dev = pci_get_drvdata(pdev);
2050 struct cp_private *cp = netdev_priv(dev);
2051 unsigned long flags;
2053 if (!netif_running(dev))
2056 netif_device_detach (dev);
2057 netif_stop_queue (dev);
2059 spin_lock_irqsave (&cp->lock, flags);
2061 /* Disable Rx and Tx */
2062 cpw16 (IntrMask, 0);
2063 cpw8 (Cmd, cpr8 (Cmd) & (~RxOn | ~TxOn));
2065 spin_unlock_irqrestore (&cp->lock, flags);
2067 pci_save_state(pdev);
2068 pci_enable_wake(pdev, pci_choose_state(pdev, state), cp->wol_enabled);
2069 pci_set_power_state(pdev, pci_choose_state(pdev, state));
2074 static int cp_resume (struct pci_dev *pdev)
2076 struct net_device *dev = pci_get_drvdata (pdev);
2077 struct cp_private *cp = netdev_priv(dev);
2078 unsigned long flags;
2080 if (!netif_running(dev))
2083 netif_device_attach (dev);
2085 pci_set_power_state(pdev, PCI_D0);
2086 pci_restore_state(pdev);
2087 pci_enable_wake(pdev, PCI_D0, 0);
2089 /* FIXME: sh*t may happen if the Rx ring buffer is depleted */
2090 cp_init_rings_index (cp);
2093 netif_start_queue (dev);
2095 spin_lock_irqsave (&cp->lock, flags);
2097 mii_check_media(&cp->mii_if, netif_msg_link(cp), false);
2099 spin_unlock_irqrestore (&cp->lock, flags);
2103 #endif /* CONFIG_PM */
2105 static const struct pci_device_id cp_pci_tbl[] = {
2106 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, PCI_DEVICE_ID_REALTEK_8139), },
2107 { PCI_DEVICE(PCI_VENDOR_ID_TTTECH, PCI_DEVICE_ID_TTTECH_MC322), },
2110 MODULE_DEVICE_TABLE(pci, cp_pci_tbl);
2112 static struct pci_driver cp_driver = {
2114 .id_table = cp_pci_tbl,
2115 .probe = cp_init_one,
2116 .remove = cp_remove_one,
2118 .resume = cp_resume,
2119 .suspend = cp_suspend,
2123 module_pci_driver(cp_driver);