1 // SPDX-License-Identifier: GPL-2.0-only
7 * Converted to DMA API, added zero-copy buffer handling, and
8 * (from the mac68k project) introduced dhd's support for 16-bit cards.
10 * (C) 1996,1998 by Thomas Bogendoerfer (tsbogend@alpha.franken.de)
12 * This driver is based on work from Andreas Busse, but most of
13 * the code is rewritten.
15 * (C) 1995 by Andreas Busse (andy@waldorf-gmbh.de)
17 * Core code included by system sonic drivers
19 * And... partially rewritten again by David Huggins-Daines in order
20 * to cope with screwed up Macintosh NICs that may or may not use
23 * (C) 1999 David Huggins-Daines <dhd@debian.org>
28 * Sources: Olivetti M700-10 Risc Personal Computer hardware handbook,
29 * National Semiconductors data sheet for the DP83932B Sonic Ethernet
30 * controller, and the files "8390.c" and "skeleton.c" in this directory.
32 * Additional sources: Nat Semi data sheet for the DP83932C and Nat Semi
33 * Application Note AN-746, the files "lance.c" and "ibmlana.c". See also
34 * the NetBSD file "sys/arch/mac68k/dev/if_sn.c".
37 static unsigned int version_printed;
39 static int sonic_debug = -1;
40 module_param(sonic_debug, int, 0);
41 MODULE_PARM_DESC(sonic_debug, "debug message level");
43 static void sonic_msg_init(struct net_device *dev)
45 struct sonic_local *lp = netdev_priv(dev);
47 lp->msg_enable = netif_msg_init(sonic_debug, 0);
49 if (version_printed++ == 0)
50 netif_dbg(lp, drv, dev, "%s", version);
54 * Open/initialize the SONIC controller.
56 * This routine should set everything up anew at each open, even
57 * registers that "should" only need to be set once at boot, so that
58 * there is non-reboot way to recover if something goes wrong.
60 static int sonic_open(struct net_device *dev)
62 struct sonic_local *lp = netdev_priv(dev);
65 netif_dbg(lp, ifup, dev, "%s: initializing sonic driver\n", __func__);
67 spin_lock_init(&lp->lock);
69 for (i = 0; i < SONIC_NUM_RRS; i++) {
70 struct sk_buff *skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2);
72 while(i > 0) { /* free any that were allocated successfully */
74 dev_kfree_skb(lp->rx_skb[i]);
77 printk(KERN_ERR "%s: couldn't allocate receive buffers\n",
81 /* align IP header unless DMA requires otherwise */
82 if (SONIC_BUS_SCALE(lp->dma_bitmode) == 2)
87 for (i = 0; i < SONIC_NUM_RRS; i++) {
88 dma_addr_t laddr = dma_map_single(lp->device, skb_put(lp->rx_skb[i], SONIC_RBSIZE),
89 SONIC_RBSIZE, DMA_FROM_DEVICE);
90 if (dma_mapping_error(lp->device, laddr)) {
91 while(i > 0) { /* free any that were mapped successfully */
93 dma_unmap_single(lp->device, lp->rx_laddr[i], SONIC_RBSIZE, DMA_FROM_DEVICE);
94 lp->rx_laddr[i] = (dma_addr_t)0;
96 for (i = 0; i < SONIC_NUM_RRS; i++) {
97 dev_kfree_skb(lp->rx_skb[i]);
100 printk(KERN_ERR "%s: couldn't map rx DMA buffers\n",
104 lp->rx_laddr[i] = laddr;
108 * Initialize the SONIC
112 netif_start_queue(dev);
114 netif_dbg(lp, ifup, dev, "%s: Initialization done\n", __func__);
121 * Close the SONIC device
123 static int sonic_close(struct net_device *dev)
125 struct sonic_local *lp = netdev_priv(dev);
128 netif_dbg(lp, ifdown, dev, "%s\n", __func__);
130 netif_stop_queue(dev);
133 * stop the SONIC, disable interrupts
135 SONIC_WRITE(SONIC_IMR, 0);
136 SONIC_WRITE(SONIC_ISR, 0x7fff);
137 SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
139 /* unmap and free skbs that haven't been transmitted */
140 for (i = 0; i < SONIC_NUM_TDS; i++) {
141 if(lp->tx_laddr[i]) {
142 dma_unmap_single(lp->device, lp->tx_laddr[i], lp->tx_len[i], DMA_TO_DEVICE);
143 lp->tx_laddr[i] = (dma_addr_t)0;
146 dev_kfree_skb(lp->tx_skb[i]);
147 lp->tx_skb[i] = NULL;
151 /* unmap and free the receive buffers */
152 for (i = 0; i < SONIC_NUM_RRS; i++) {
153 if(lp->rx_laddr[i]) {
154 dma_unmap_single(lp->device, lp->rx_laddr[i], SONIC_RBSIZE, DMA_FROM_DEVICE);
155 lp->rx_laddr[i] = (dma_addr_t)0;
158 dev_kfree_skb(lp->rx_skb[i]);
159 lp->rx_skb[i] = NULL;
166 static void sonic_tx_timeout(struct net_device *dev)
168 struct sonic_local *lp = netdev_priv(dev);
171 * put the Sonic into software-reset mode and
172 * disable all interrupts before releasing DMA buffers
174 SONIC_WRITE(SONIC_IMR, 0);
175 SONIC_WRITE(SONIC_ISR, 0x7fff);
176 SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
177 /* We could resend the original skbs. Easier to re-initialise. */
178 for (i = 0; i < SONIC_NUM_TDS; i++) {
179 if(lp->tx_laddr[i]) {
180 dma_unmap_single(lp->device, lp->tx_laddr[i], lp->tx_len[i], DMA_TO_DEVICE);
181 lp->tx_laddr[i] = (dma_addr_t)0;
184 dev_kfree_skb(lp->tx_skb[i]);
185 lp->tx_skb[i] = NULL;
188 /* Try to restart the adaptor. */
190 lp->stats.tx_errors++;
191 netif_trans_update(dev); /* prevent tx timeout */
192 netif_wake_queue(dev);
198 * Appends new TD during transmission thus avoiding any TX interrupts
199 * until we run out of TDs.
200 * This routine interacts closely with the ISR in that it may,
202 * reset the status flags of the new TD
203 * set and reset EOL flags
205 * The ISR interacts with this routine in various ways. It may,
207 * test the EOL and status flags of the TDs
209 * Concurrently with all of this, the SONIC is potentially writing to
210 * the status flags of the TDs.
213 static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
215 struct sonic_local *lp = netdev_priv(dev);
221 netif_dbg(lp, tx_queued, dev, "%s: skb=%p\n", __func__, skb);
224 if (length < ETH_ZLEN) {
225 if (skb_padto(skb, ETH_ZLEN))
231 * Map the packet data into the logical DMA address space
234 laddr = dma_map_single(lp->device, skb->data, length, DMA_TO_DEVICE);
236 pr_err_ratelimited("%s: failed to map tx DMA buffer.\n", dev->name);
237 dev_kfree_skb_any(skb);
241 spin_lock_irqsave(&lp->lock, flags);
245 sonic_tda_put(dev, entry, SONIC_TD_STATUS, 0); /* clear status */
246 sonic_tda_put(dev, entry, SONIC_TD_FRAG_COUNT, 1); /* single fragment */
247 sonic_tda_put(dev, entry, SONIC_TD_PKTSIZE, length); /* length of packet */
248 sonic_tda_put(dev, entry, SONIC_TD_FRAG_PTR_L, laddr & 0xffff);
249 sonic_tda_put(dev, entry, SONIC_TD_FRAG_PTR_H, laddr >> 16);
250 sonic_tda_put(dev, entry, SONIC_TD_FRAG_SIZE, length);
251 sonic_tda_put(dev, entry, SONIC_TD_LINK,
252 sonic_tda_get(dev, entry, SONIC_TD_LINK) | SONIC_EOL);
255 lp->tx_len[entry] = length;
256 lp->tx_laddr[entry] = laddr;
257 lp->tx_skb[entry] = skb;
260 sonic_tda_put(dev, lp->eol_tx, SONIC_TD_LINK,
261 sonic_tda_get(dev, lp->eol_tx, SONIC_TD_LINK) & ~SONIC_EOL);
264 lp->next_tx = (entry + 1) & SONIC_TDS_MASK;
265 if (lp->tx_skb[lp->next_tx] != NULL) {
266 /* The ring is full, the ISR has yet to process the next TD. */
267 netif_dbg(lp, tx_queued, dev, "%s: stopping queue\n", __func__);
268 netif_stop_queue(dev);
269 /* after this packet, wait for ISR to free up some TDAs */
270 } else netif_start_queue(dev);
272 netif_dbg(lp, tx_queued, dev, "%s: issuing Tx command\n", __func__);
274 SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP);
276 spin_unlock_irqrestore(&lp->lock, flags);
282 * The typical workload of the driver:
283 * Handle the network interface interrupts.
285 static irqreturn_t sonic_interrupt(int irq, void *dev_id)
287 struct net_device *dev = dev_id;
288 struct sonic_local *lp = netdev_priv(dev);
292 /* The lock has two purposes. Firstly, it synchronizes sonic_interrupt()
293 * with sonic_send_packet() so that the two functions can share state.
294 * Secondly, it makes sonic_interrupt() re-entrant, as that is required
295 * by macsonic which must use two IRQs with different priority levels.
297 spin_lock_irqsave(&lp->lock, flags);
299 status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT;
301 spin_unlock_irqrestore(&lp->lock, flags);
307 SONIC_WRITE(SONIC_ISR, status); /* clear the interrupt(s) */
309 if (status & SONIC_INT_PKTRX) {
310 netif_dbg(lp, intr, dev, "%s: packet rx\n", __func__);
311 sonic_rx(dev); /* got packet(s) */
314 if (status & SONIC_INT_TXDN) {
315 int entry = lp->cur_tx;
319 /* The state of a Transmit Descriptor may be inferred
320 * from { tx_skb[entry], td_status } as follows.
321 * { clear, clear } => the TD has never been used
322 * { set, clear } => the TD was handed to SONIC
323 * { set, set } => the TD was handed back
324 * { clear, set } => the TD is available for re-use
327 netif_dbg(lp, intr, dev, "%s: tx done\n", __func__);
329 while (lp->tx_skb[entry] != NULL) {
330 if ((td_status = sonic_tda_get(dev, entry, SONIC_TD_STATUS)) == 0)
333 if (td_status & SONIC_TCR_PTX) {
334 lp->stats.tx_packets++;
335 lp->stats.tx_bytes += sonic_tda_get(dev, entry, SONIC_TD_PKTSIZE);
337 if (td_status & (SONIC_TCR_EXD |
338 SONIC_TCR_EXC | SONIC_TCR_BCM))
339 lp->stats.tx_aborted_errors++;
341 (SONIC_TCR_NCRS | SONIC_TCR_CRLS))
342 lp->stats.tx_carrier_errors++;
343 if (td_status & SONIC_TCR_OWC)
344 lp->stats.tx_window_errors++;
345 if (td_status & SONIC_TCR_FU)
346 lp->stats.tx_fifo_errors++;
349 /* We must free the original skb */
350 dev_consume_skb_irq(lp->tx_skb[entry]);
351 lp->tx_skb[entry] = NULL;
352 /* and unmap DMA buffer */
353 dma_unmap_single(lp->device, lp->tx_laddr[entry], lp->tx_len[entry], DMA_TO_DEVICE);
354 lp->tx_laddr[entry] = (dma_addr_t)0;
357 if (sonic_tda_get(dev, entry, SONIC_TD_LINK) & SONIC_EOL) {
358 entry = (entry + 1) & SONIC_TDS_MASK;
361 entry = (entry + 1) & SONIC_TDS_MASK;
364 if (freed_some || lp->tx_skb[entry] == NULL)
365 netif_wake_queue(dev); /* The ring is no longer full */
370 * check error conditions
372 if (status & SONIC_INT_RFO) {
373 netif_dbg(lp, rx_err, dev, "%s: rx fifo overrun\n",
376 if (status & SONIC_INT_RDE) {
377 netif_dbg(lp, rx_err, dev, "%s: rx descriptors exhausted\n",
380 if (status & SONIC_INT_RBAE) {
381 netif_dbg(lp, rx_err, dev, "%s: rx buffer area exceeded\n",
385 /* counter overruns; all counters are 16bit wide */
386 if (status & SONIC_INT_FAE)
387 lp->stats.rx_frame_errors += 65536;
388 if (status & SONIC_INT_CRC)
389 lp->stats.rx_crc_errors += 65536;
390 if (status & SONIC_INT_MP)
391 lp->stats.rx_missed_errors += 65536;
394 if (status & SONIC_INT_TXER)
395 if (SONIC_READ(SONIC_TCR) & SONIC_TCR_FU)
396 netif_dbg(lp, tx_err, dev, "%s: tx fifo underrun\n",
400 if (status & SONIC_INT_BR) {
401 printk(KERN_ERR "%s: Bus retry occurred! Device interrupt disabled.\n",
403 /* ... to help debug DMA problems causing endless interrupts. */
404 /* Bounce the eth interface to turn on the interrupt again. */
405 SONIC_WRITE(SONIC_IMR, 0);
408 status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT;
411 spin_unlock_irqrestore(&lp->lock, flags);
417 * We have a good packet(s), pass it/them up the network stack.
419 static void sonic_rx(struct net_device *dev)
421 struct sonic_local *lp = netdev_priv(dev);
423 int entry = lp->cur_rx;
425 while (sonic_rda_get(dev, entry, SONIC_RD_IN_USE) == 0) {
426 struct sk_buff *used_skb;
427 struct sk_buff *new_skb;
428 dma_addr_t new_laddr;
433 status = sonic_rda_get(dev, entry, SONIC_RD_STATUS);
434 if (status & SONIC_RCR_PRX) {
435 /* Malloc up new buffer. */
436 new_skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2);
437 if (new_skb == NULL) {
438 lp->stats.rx_dropped++;
441 /* provide 16 byte IP header alignment unless DMA requires otherwise */
442 if(SONIC_BUS_SCALE(lp->dma_bitmode) == 2)
443 skb_reserve(new_skb, 2);
445 new_laddr = dma_map_single(lp->device, skb_put(new_skb, SONIC_RBSIZE),
446 SONIC_RBSIZE, DMA_FROM_DEVICE);
448 dev_kfree_skb(new_skb);
449 printk(KERN_ERR "%s: Failed to map rx buffer, dropping packet.\n", dev->name);
450 lp->stats.rx_dropped++;
454 /* now we have a new skb to replace it, pass the used one up the stack */
455 dma_unmap_single(lp->device, lp->rx_laddr[entry], SONIC_RBSIZE, DMA_FROM_DEVICE);
456 used_skb = lp->rx_skb[entry];
457 pkt_len = sonic_rda_get(dev, entry, SONIC_RD_PKTLEN);
458 skb_trim(used_skb, pkt_len);
459 used_skb->protocol = eth_type_trans(used_skb, dev);
461 lp->stats.rx_packets++;
462 lp->stats.rx_bytes += pkt_len;
464 /* and insert the new skb */
465 lp->rx_laddr[entry] = new_laddr;
466 lp->rx_skb[entry] = new_skb;
468 bufadr_l = (unsigned long)new_laddr & 0xffff;
469 bufadr_h = (unsigned long)new_laddr >> 16;
470 sonic_rra_put(dev, entry, SONIC_RR_BUFADR_L, bufadr_l);
471 sonic_rra_put(dev, entry, SONIC_RR_BUFADR_H, bufadr_h);
473 /* This should only happen, if we enable accepting broken packets. */
475 if (status & SONIC_RCR_LPKT) {
477 * this was the last packet out of the current receive buffer
478 * give the buffer back to the SONIC
480 lp->cur_rwp += SIZEOF_SONIC_RR * SONIC_BUS_SCALE(lp->dma_bitmode);
481 if (lp->cur_rwp >= lp->rra_end) lp->cur_rwp = lp->rra_laddr & 0xffff;
482 SONIC_WRITE(SONIC_RWP, lp->cur_rwp);
483 if (SONIC_READ(SONIC_ISR) & SONIC_INT_RBE) {
484 netif_dbg(lp, rx_err, dev, "%s: rx buffer exhausted\n",
486 SONIC_WRITE(SONIC_ISR, SONIC_INT_RBE); /* clear the flag */
489 printk(KERN_ERR "%s: rx desc without RCR_LPKT. Shouldn't happen !?\n",
492 * give back the descriptor
494 sonic_rda_put(dev, entry, SONIC_RD_LINK,
495 sonic_rda_get(dev, entry, SONIC_RD_LINK) | SONIC_EOL);
496 sonic_rda_put(dev, entry, SONIC_RD_IN_USE, 1);
497 sonic_rda_put(dev, lp->eol_rx, SONIC_RD_LINK,
498 sonic_rda_get(dev, lp->eol_rx, SONIC_RD_LINK) & ~SONIC_EOL);
500 lp->cur_rx = entry = (entry + 1) & SONIC_RDS_MASK;
503 * If any worth-while packets have been received, netif_rx()
504 * has done a mark_bh(NET_BH) for us and will work on them
505 * when we get to the bottom-half routine.
511 * Get the current statistics.
512 * This may be called with the device open or closed.
514 static struct net_device_stats *sonic_get_stats(struct net_device *dev)
516 struct sonic_local *lp = netdev_priv(dev);
518 /* read the tally counter from the SONIC and reset them */
519 lp->stats.rx_crc_errors += SONIC_READ(SONIC_CRCT);
520 SONIC_WRITE(SONIC_CRCT, 0xffff);
521 lp->stats.rx_frame_errors += SONIC_READ(SONIC_FAET);
522 SONIC_WRITE(SONIC_FAET, 0xffff);
523 lp->stats.rx_missed_errors += SONIC_READ(SONIC_MPT);
524 SONIC_WRITE(SONIC_MPT, 0xffff);
531 * Set or clear the multicast filter for this adaptor.
533 static void sonic_multicast_list(struct net_device *dev)
535 struct sonic_local *lp = netdev_priv(dev);
537 struct netdev_hw_addr *ha;
541 rcr = SONIC_READ(SONIC_RCR) & ~(SONIC_RCR_PRO | SONIC_RCR_AMC);
542 rcr |= SONIC_RCR_BRD; /* accept broadcast packets */
544 if (dev->flags & IFF_PROMISC) { /* set promiscuous mode */
545 rcr |= SONIC_RCR_PRO;
547 if ((dev->flags & IFF_ALLMULTI) ||
548 (netdev_mc_count(dev) > 15)) {
549 rcr |= SONIC_RCR_AMC;
551 netif_dbg(lp, ifup, dev, "%s: mc_count %d\n", __func__,
552 netdev_mc_count(dev));
553 sonic_set_cam_enable(dev, 1); /* always enable our own address */
555 netdev_for_each_mc_addr(ha, dev) {
557 sonic_cda_put(dev, i, SONIC_CD_CAP0, addr[1] << 8 | addr[0]);
558 sonic_cda_put(dev, i, SONIC_CD_CAP1, addr[3] << 8 | addr[2]);
559 sonic_cda_put(dev, i, SONIC_CD_CAP2, addr[5] << 8 | addr[4]);
560 sonic_set_cam_enable(dev, sonic_get_cam_enable(dev) | (1 << i));
563 SONIC_WRITE(SONIC_CDC, 16);
564 /* issue Load CAM command */
565 SONIC_WRITE(SONIC_CDP, lp->cda_laddr & 0xffff);
566 SONIC_WRITE(SONIC_CMD, SONIC_CR_LCAM);
570 netif_dbg(lp, ifup, dev, "%s: setting RCR=%x\n", __func__, rcr);
572 SONIC_WRITE(SONIC_RCR, rcr);
577 * Initialize the SONIC ethernet controller.
579 static int sonic_init(struct net_device *dev)
582 struct sonic_local *lp = netdev_priv(dev);
586 * put the Sonic into software-reset mode and
587 * disable all interrupts
589 SONIC_WRITE(SONIC_IMR, 0);
590 SONIC_WRITE(SONIC_ISR, 0x7fff);
591 SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
594 * clear software reset flag, disable receiver, clear and
595 * enable interrupts, then completely initialize the SONIC
597 SONIC_WRITE(SONIC_CMD, 0);
598 SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS);
601 * initialize the receive resource area
603 netif_dbg(lp, ifup, dev, "%s: initialize receive resource area\n",
606 for (i = 0; i < SONIC_NUM_RRS; i++) {
607 u16 bufadr_l = (unsigned long)lp->rx_laddr[i] & 0xffff;
608 u16 bufadr_h = (unsigned long)lp->rx_laddr[i] >> 16;
609 sonic_rra_put(dev, i, SONIC_RR_BUFADR_L, bufadr_l);
610 sonic_rra_put(dev, i, SONIC_RR_BUFADR_H, bufadr_h);
611 sonic_rra_put(dev, i, SONIC_RR_BUFSIZE_L, SONIC_RBSIZE >> 1);
612 sonic_rra_put(dev, i, SONIC_RR_BUFSIZE_H, 0);
615 /* initialize all RRA registers */
616 lp->rra_end = (lp->rra_laddr + SONIC_NUM_RRS * SIZEOF_SONIC_RR *
617 SONIC_BUS_SCALE(lp->dma_bitmode)) & 0xffff;
618 lp->cur_rwp = (lp->rra_laddr + (SONIC_NUM_RRS - 1) * SIZEOF_SONIC_RR *
619 SONIC_BUS_SCALE(lp->dma_bitmode)) & 0xffff;
621 SONIC_WRITE(SONIC_RSA, lp->rra_laddr & 0xffff);
622 SONIC_WRITE(SONIC_REA, lp->rra_end);
623 SONIC_WRITE(SONIC_RRP, lp->rra_laddr & 0xffff);
624 SONIC_WRITE(SONIC_RWP, lp->cur_rwp);
625 SONIC_WRITE(SONIC_URRA, lp->rra_laddr >> 16);
626 SONIC_WRITE(SONIC_EOBC, (SONIC_RBSIZE >> 1) - (lp->dma_bitmode ? 2 : 1));
628 /* load the resource pointers */
629 netif_dbg(lp, ifup, dev, "%s: issuing RRRA command\n", __func__);
631 SONIC_WRITE(SONIC_CMD, SONIC_CR_RRRA);
634 if (SONIC_READ(SONIC_CMD) & SONIC_CR_RRRA)
638 netif_dbg(lp, ifup, dev, "%s: status=%x, i=%d\n", __func__,
639 SONIC_READ(SONIC_CMD), i);
642 * Initialize the receive descriptors so that they
643 * become a circular linked list, ie. let the last
644 * descriptor point to the first again.
646 netif_dbg(lp, ifup, dev, "%s: initialize receive descriptors\n",
649 for (i=0; i<SONIC_NUM_RDS; i++) {
650 sonic_rda_put(dev, i, SONIC_RD_STATUS, 0);
651 sonic_rda_put(dev, i, SONIC_RD_PKTLEN, 0);
652 sonic_rda_put(dev, i, SONIC_RD_PKTPTR_L, 0);
653 sonic_rda_put(dev, i, SONIC_RD_PKTPTR_H, 0);
654 sonic_rda_put(dev, i, SONIC_RD_SEQNO, 0);
655 sonic_rda_put(dev, i, SONIC_RD_IN_USE, 1);
656 sonic_rda_put(dev, i, SONIC_RD_LINK,
658 ((i+1) * SIZEOF_SONIC_RD * SONIC_BUS_SCALE(lp->dma_bitmode)));
660 /* fix last descriptor */
661 sonic_rda_put(dev, SONIC_NUM_RDS - 1, SONIC_RD_LINK,
662 (lp->rda_laddr & 0xffff) | SONIC_EOL);
663 lp->eol_rx = SONIC_NUM_RDS - 1;
665 SONIC_WRITE(SONIC_URDA, lp->rda_laddr >> 16);
666 SONIC_WRITE(SONIC_CRDA, lp->rda_laddr & 0xffff);
669 * initialize transmit descriptors
671 netif_dbg(lp, ifup, dev, "%s: initialize transmit descriptors\n",
674 for (i = 0; i < SONIC_NUM_TDS; i++) {
675 sonic_tda_put(dev, i, SONIC_TD_STATUS, 0);
676 sonic_tda_put(dev, i, SONIC_TD_CONFIG, 0);
677 sonic_tda_put(dev, i, SONIC_TD_PKTSIZE, 0);
678 sonic_tda_put(dev, i, SONIC_TD_FRAG_COUNT, 0);
679 sonic_tda_put(dev, i, SONIC_TD_LINK,
680 (lp->tda_laddr & 0xffff) +
681 (i + 1) * SIZEOF_SONIC_TD * SONIC_BUS_SCALE(lp->dma_bitmode));
682 lp->tx_skb[i] = NULL;
684 /* fix last descriptor */
685 sonic_tda_put(dev, SONIC_NUM_TDS - 1, SONIC_TD_LINK,
686 (lp->tda_laddr & 0xffff));
688 SONIC_WRITE(SONIC_UTDA, lp->tda_laddr >> 16);
689 SONIC_WRITE(SONIC_CTDA, lp->tda_laddr & 0xffff);
690 lp->cur_tx = lp->next_tx = 0;
691 lp->eol_tx = SONIC_NUM_TDS - 1;
694 * put our own address to CAM desc[0]
696 sonic_cda_put(dev, 0, SONIC_CD_CAP0, dev->dev_addr[1] << 8 | dev->dev_addr[0]);
697 sonic_cda_put(dev, 0, SONIC_CD_CAP1, dev->dev_addr[3] << 8 | dev->dev_addr[2]);
698 sonic_cda_put(dev, 0, SONIC_CD_CAP2, dev->dev_addr[5] << 8 | dev->dev_addr[4]);
699 sonic_set_cam_enable(dev, 1);
701 for (i = 0; i < 16; i++)
702 sonic_cda_put(dev, i, SONIC_CD_ENTRY_POINTER, i);
705 * initialize CAM registers
707 SONIC_WRITE(SONIC_CDP, lp->cda_laddr & 0xffff);
708 SONIC_WRITE(SONIC_CDC, 16);
713 SONIC_WRITE(SONIC_CMD, SONIC_CR_LCAM);
717 if (SONIC_READ(SONIC_ISR) & SONIC_INT_LCD)
720 netif_dbg(lp, ifup, dev, "%s: CMD=%x, ISR=%x, i=%d\n", __func__,
721 SONIC_READ(SONIC_CMD), SONIC_READ(SONIC_ISR), i);
724 * enable receiver, disable loopback
725 * and enable all interrupts
727 SONIC_WRITE(SONIC_CMD, SONIC_CR_RXEN | SONIC_CR_STP);
728 SONIC_WRITE(SONIC_RCR, SONIC_RCR_DEFAULT);
729 SONIC_WRITE(SONIC_TCR, SONIC_TCR_DEFAULT);
730 SONIC_WRITE(SONIC_ISR, 0x7fff);
731 SONIC_WRITE(SONIC_IMR, SONIC_IMR_DEFAULT);
733 cmd = SONIC_READ(SONIC_CMD);
734 if ((cmd & SONIC_CR_RXEN) == 0 || (cmd & SONIC_CR_STP) == 0)
735 printk(KERN_ERR "sonic_init: failed, status=%x\n", cmd);
737 netif_dbg(lp, ifup, dev, "%s: new status=%x\n", __func__,
738 SONIC_READ(SONIC_CMD));
743 MODULE_LICENSE("GPL");