2 * Broadcom BCM7xxx System Port Ethernet MAC driver
4 * Copyright (C) 2014 Broadcom Corporation
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/init.h>
14 #include <linux/interrupt.h>
15 #include <linux/module.h>
16 #include <linux/kernel.h>
17 #include <linux/netdevice.h>
18 #include <linux/etherdevice.h>
19 #include <linux/platform_device.h>
21 #include <linux/of_net.h>
22 #include <linux/of_mdio.h>
23 #include <linux/phy.h>
24 #include <linux/phy_fixed.h>
28 #include "bcmsysport.h"
30 /* I/O accessors register helpers */
31 #define BCM_SYSPORT_IO_MACRO(name, offset) \
32 static inline u32 name##_readl(struct bcm_sysport_priv *priv, u32 off) \
34 u32 reg = __raw_readl(priv->base + offset + off); \
37 static inline void name##_writel(struct bcm_sysport_priv *priv, \
40 __raw_writel(val, priv->base + offset + off); \
43 BCM_SYSPORT_IO_MACRO(intrl2_0, SYS_PORT_INTRL2_0_OFFSET);
44 BCM_SYSPORT_IO_MACRO(intrl2_1, SYS_PORT_INTRL2_1_OFFSET);
45 BCM_SYSPORT_IO_MACRO(umac, SYS_PORT_UMAC_OFFSET);
46 BCM_SYSPORT_IO_MACRO(tdma, SYS_PORT_TDMA_OFFSET);
47 BCM_SYSPORT_IO_MACRO(rdma, SYS_PORT_RDMA_OFFSET);
48 BCM_SYSPORT_IO_MACRO(rxchk, SYS_PORT_RXCHK_OFFSET);
49 BCM_SYSPORT_IO_MACRO(txchk, SYS_PORT_TXCHK_OFFSET);
50 BCM_SYSPORT_IO_MACRO(rbuf, SYS_PORT_RBUF_OFFSET);
51 BCM_SYSPORT_IO_MACRO(tbuf, SYS_PORT_TBUF_OFFSET);
52 BCM_SYSPORT_IO_MACRO(topctrl, SYS_PORT_TOPCTRL_OFFSET);
54 /* L2-interrupt masking/unmasking helpers, does automatic saving of the applied
55 * mask in a software copy to avoid CPU_MASK_STATUS reads in hot-paths.
57 #define BCM_SYSPORT_INTR_L2(which) \
58 static inline void intrl2_##which##_mask_clear(struct bcm_sysport_priv *priv, \
61 intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR); \
62 priv->irq##which##_mask &= ~(mask); \
64 static inline void intrl2_##which##_mask_set(struct bcm_sysport_priv *priv, \
67 intrl2_## which##_writel(priv, mask, INTRL2_CPU_MASK_SET); \
68 priv->irq##which##_mask |= (mask); \
71 BCM_SYSPORT_INTR_L2(0)
72 BCM_SYSPORT_INTR_L2(1)
74 /* Register accesses to GISB/RBUS registers are expensive (few hundred
75 * nanoseconds), so keep the check for 64-bits explicit here to save
76 * one register write per-packet on 32-bits platforms.
78 static inline void dma_desc_set_addr(struct bcm_sysport_priv *priv,
82 #ifdef CONFIG_PHYS_ADDR_T_64BIT
83 __raw_writel(upper_32_bits(addr) & DESC_ADDR_HI_MASK,
84 d + DESC_ADDR_HI_STATUS_LEN);
86 __raw_writel(lower_32_bits(addr), d + DESC_ADDR_LO);
89 static inline void tdma_port_write_desc_addr(struct bcm_sysport_priv *priv,
90 struct dma_desc *desc,
93 /* Ports are latched, so write upper address first */
94 tdma_writel(priv, desc->addr_status_len, TDMA_WRITE_PORT_HI(port));
95 tdma_writel(priv, desc->addr_lo, TDMA_WRITE_PORT_LO(port));
98 /* Ethtool operations */
99 static int bcm_sysport_set_settings(struct net_device *dev,
100 struct ethtool_cmd *cmd)
102 struct bcm_sysport_priv *priv = netdev_priv(dev);
104 if (!netif_running(dev))
107 return phy_ethtool_sset(priv->phydev, cmd);
110 static int bcm_sysport_get_settings(struct net_device *dev,
111 struct ethtool_cmd *cmd)
113 struct bcm_sysport_priv *priv = netdev_priv(dev);
115 if (!netif_running(dev))
118 return phy_ethtool_gset(priv->phydev, cmd);
121 static int bcm_sysport_set_rx_csum(struct net_device *dev,
122 netdev_features_t wanted)
124 struct bcm_sysport_priv *priv = netdev_priv(dev);
127 priv->rx_chk_en = !!(wanted & NETIF_F_RXCSUM);
128 reg = rxchk_readl(priv, RXCHK_CONTROL);
134 /* If UniMAC forwards CRC, we need to skip over it to get
135 * a valid CHK bit to be set in the per-packet status word
137 if (priv->rx_chk_en && priv->crc_fwd)
138 reg |= RXCHK_SKIP_FCS;
140 reg &= ~RXCHK_SKIP_FCS;
142 rxchk_writel(priv, reg, RXCHK_CONTROL);
147 static int bcm_sysport_set_tx_csum(struct net_device *dev,
148 netdev_features_t wanted)
150 struct bcm_sysport_priv *priv = netdev_priv(dev);
153 /* Hardware transmit checksum requires us to enable the Transmit status
154 * block prepended to the packet contents
156 priv->tsb_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
157 reg = tdma_readl(priv, TDMA_CONTROL);
162 tdma_writel(priv, reg, TDMA_CONTROL);
167 static int bcm_sysport_set_features(struct net_device *dev,
168 netdev_features_t features)
170 netdev_features_t changed = features ^ dev->features;
171 netdev_features_t wanted = dev->wanted_features;
174 if (changed & NETIF_F_RXCSUM)
175 ret = bcm_sysport_set_rx_csum(dev, wanted);
176 if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))
177 ret = bcm_sysport_set_tx_csum(dev, wanted);
182 /* Hardware counters must be kept in sync because the order/offset
183 * is important here (order in structure declaration = order in hardware)
185 static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = {
187 STAT_NETDEV(rx_packets),
188 STAT_NETDEV(tx_packets),
189 STAT_NETDEV(rx_bytes),
190 STAT_NETDEV(tx_bytes),
191 STAT_NETDEV(rx_errors),
192 STAT_NETDEV(tx_errors),
193 STAT_NETDEV(rx_dropped),
194 STAT_NETDEV(tx_dropped),
195 STAT_NETDEV(multicast),
196 /* UniMAC RSV counters */
197 STAT_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64),
198 STAT_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127),
199 STAT_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255),
200 STAT_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511),
201 STAT_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023),
202 STAT_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518),
203 STAT_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv),
204 STAT_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047),
205 STAT_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095),
206 STAT_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216),
207 STAT_MIB_RX("rx_pkts", mib.rx.pkt),
208 STAT_MIB_RX("rx_bytes", mib.rx.bytes),
209 STAT_MIB_RX("rx_multicast", mib.rx.mca),
210 STAT_MIB_RX("rx_broadcast", mib.rx.bca),
211 STAT_MIB_RX("rx_fcs", mib.rx.fcs),
212 STAT_MIB_RX("rx_control", mib.rx.cf),
213 STAT_MIB_RX("rx_pause", mib.rx.pf),
214 STAT_MIB_RX("rx_unknown", mib.rx.uo),
215 STAT_MIB_RX("rx_align", mib.rx.aln),
216 STAT_MIB_RX("rx_outrange", mib.rx.flr),
217 STAT_MIB_RX("rx_code", mib.rx.cde),
218 STAT_MIB_RX("rx_carrier", mib.rx.fcr),
219 STAT_MIB_RX("rx_oversize", mib.rx.ovr),
220 STAT_MIB_RX("rx_jabber", mib.rx.jbr),
221 STAT_MIB_RX("rx_mtu_err", mib.rx.mtue),
222 STAT_MIB_RX("rx_good_pkts", mib.rx.pok),
223 STAT_MIB_RX("rx_unicast", mib.rx.uc),
224 STAT_MIB_RX("rx_ppp", mib.rx.ppp),
225 STAT_MIB_RX("rx_crc", mib.rx.rcrc),
226 /* UniMAC TSV counters */
227 STAT_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64),
228 STAT_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127),
229 STAT_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255),
230 STAT_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511),
231 STAT_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023),
232 STAT_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518),
233 STAT_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv),
234 STAT_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047),
235 STAT_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095),
236 STAT_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216),
237 STAT_MIB_TX("tx_pkts", mib.tx.pkts),
238 STAT_MIB_TX("tx_multicast", mib.tx.mca),
239 STAT_MIB_TX("tx_broadcast", mib.tx.bca),
240 STAT_MIB_TX("tx_pause", mib.tx.pf),
241 STAT_MIB_TX("tx_control", mib.tx.cf),
242 STAT_MIB_TX("tx_fcs_err", mib.tx.fcs),
243 STAT_MIB_TX("tx_oversize", mib.tx.ovr),
244 STAT_MIB_TX("tx_defer", mib.tx.drf),
245 STAT_MIB_TX("tx_excess_defer", mib.tx.edf),
246 STAT_MIB_TX("tx_single_col", mib.tx.scl),
247 STAT_MIB_TX("tx_multi_col", mib.tx.mcl),
248 STAT_MIB_TX("tx_late_col", mib.tx.lcl),
249 STAT_MIB_TX("tx_excess_col", mib.tx.ecl),
250 STAT_MIB_TX("tx_frags", mib.tx.frg),
251 STAT_MIB_TX("tx_total_col", mib.tx.ncl),
252 STAT_MIB_TX("tx_jabber", mib.tx.jbr),
253 STAT_MIB_TX("tx_bytes", mib.tx.bytes),
254 STAT_MIB_TX("tx_good_pkts", mib.tx.pok),
255 STAT_MIB_TX("tx_unicast", mib.tx.uc),
256 /* UniMAC RUNT counters */
257 STAT_RUNT("rx_runt_pkts", mib.rx_runt_cnt),
258 STAT_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs),
259 STAT_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align),
260 STAT_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
261 /* RXCHK misc statistics */
262 STAT_RXCHK("rxchk_bad_csum", mib.rxchk_bad_csum, RXCHK_BAD_CSUM_CNTR),
263 STAT_RXCHK("rxchk_other_pkt_disc", mib.rxchk_other_pkt_disc,
264 RXCHK_OTHER_DISC_CNTR),
265 /* RBUF misc statistics */
266 STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR),
267 STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR),
270 #define BCM_SYSPORT_STATS_LEN ARRAY_SIZE(bcm_sysport_gstrings_stats)
272 static void bcm_sysport_get_drvinfo(struct net_device *dev,
273 struct ethtool_drvinfo *info)
275 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
276 strlcpy(info->version, "0.1", sizeof(info->version));
277 strlcpy(info->bus_info, "platform", sizeof(info->bus_info));
278 info->n_stats = BCM_SYSPORT_STATS_LEN;
281 static u32 bcm_sysport_get_msglvl(struct net_device *dev)
283 struct bcm_sysport_priv *priv = netdev_priv(dev);
285 return priv->msg_enable;
288 static void bcm_sysport_set_msglvl(struct net_device *dev, u32 enable)
290 struct bcm_sysport_priv *priv = netdev_priv(dev);
292 priv->msg_enable = enable;
295 static int bcm_sysport_get_sset_count(struct net_device *dev, int string_set)
297 switch (string_set) {
299 return BCM_SYSPORT_STATS_LEN;
305 static void bcm_sysport_get_strings(struct net_device *dev,
306 u32 stringset, u8 *data)
312 for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
313 memcpy(data + i * ETH_GSTRING_LEN,
314 bcm_sysport_gstrings_stats[i].stat_string,
323 static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv)
327 for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
328 const struct bcm_sysport_stats *s;
333 s = &bcm_sysport_gstrings_stats[i];
335 case BCM_SYSPORT_STAT_NETDEV:
337 case BCM_SYSPORT_STAT_MIB_RX:
338 case BCM_SYSPORT_STAT_MIB_TX:
339 case BCM_SYSPORT_STAT_RUNT:
340 if (s->type != BCM_SYSPORT_STAT_MIB_RX)
341 offset = UMAC_MIB_STAT_OFFSET;
342 val = umac_readl(priv, UMAC_MIB_START + j + offset);
344 case BCM_SYSPORT_STAT_RXCHK:
345 val = rxchk_readl(priv, s->reg_offset);
347 rxchk_writel(priv, 0, s->reg_offset);
349 case BCM_SYSPORT_STAT_RBUF:
350 val = rbuf_readl(priv, s->reg_offset);
352 rbuf_writel(priv, 0, s->reg_offset);
357 p = (char *)priv + s->stat_offset;
361 netif_dbg(priv, hw, priv->netdev, "updated MIB counters\n");
364 static void bcm_sysport_get_stats(struct net_device *dev,
365 struct ethtool_stats *stats, u64 *data)
367 struct bcm_sysport_priv *priv = netdev_priv(dev);
370 if (netif_running(dev))
371 bcm_sysport_update_mib_counters(priv);
373 for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
374 const struct bcm_sysport_stats *s;
377 s = &bcm_sysport_gstrings_stats[i];
378 if (s->type == BCM_SYSPORT_STAT_NETDEV)
379 p = (char *)&dev->stats;
387 static void bcm_sysport_get_wol(struct net_device *dev,
388 struct ethtool_wolinfo *wol)
390 struct bcm_sysport_priv *priv = netdev_priv(dev);
393 wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE;
394 wol->wolopts = priv->wolopts;
396 if (!(priv->wolopts & WAKE_MAGICSECURE))
399 /* Return the programmed SecureOn password */
400 reg = umac_readl(priv, UMAC_PSW_MS);
401 put_unaligned_be16(reg, &wol->sopass[0]);
402 reg = umac_readl(priv, UMAC_PSW_LS);
403 put_unaligned_be32(reg, &wol->sopass[2]);
406 static int bcm_sysport_set_wol(struct net_device *dev,
407 struct ethtool_wolinfo *wol)
409 struct bcm_sysport_priv *priv = netdev_priv(dev);
410 struct device *kdev = &priv->pdev->dev;
411 u32 supported = WAKE_MAGIC | WAKE_MAGICSECURE;
413 if (!device_can_wakeup(kdev))
416 if (wol->wolopts & ~supported)
419 /* Program the SecureOn password */
420 if (wol->wolopts & WAKE_MAGICSECURE) {
421 umac_writel(priv, get_unaligned_be16(&wol->sopass[0]),
423 umac_writel(priv, get_unaligned_be32(&wol->sopass[2]),
427 /* Flag the device and relevant IRQ as wakeup capable */
429 device_set_wakeup_enable(kdev, 1);
430 enable_irq_wake(priv->wol_irq);
431 priv->wol_irq_disabled = 0;
433 device_set_wakeup_enable(kdev, 0);
434 /* Avoid unbalanced disable_irq_wake calls */
435 if (!priv->wol_irq_disabled)
436 disable_irq_wake(priv->wol_irq);
437 priv->wol_irq_disabled = 1;
440 priv->wolopts = wol->wolopts;
445 static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb)
447 dev_kfree_skb_any(cb->skb);
449 dma_unmap_addr_set(cb, dma_addr, 0);
452 static int bcm_sysport_rx_refill(struct bcm_sysport_priv *priv,
453 struct bcm_sysport_cb *cb)
455 struct device *kdev = &priv->pdev->dev;
456 struct net_device *ndev = priv->netdev;
460 cb->skb = netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH);
462 netif_err(priv, rx_err, ndev, "SKB alloc failed\n");
466 mapping = dma_map_single(kdev, cb->skb->data,
467 RX_BUF_LENGTH, DMA_FROM_DEVICE);
468 ret = dma_mapping_error(kdev, mapping);
470 bcm_sysport_free_cb(cb);
471 netif_err(priv, rx_err, ndev, "DMA mapping failure\n");
475 dma_unmap_addr_set(cb, dma_addr, mapping);
476 dma_desc_set_addr(priv, priv->rx_bd_assign_ptr, mapping);
478 priv->rx_bd_assign_index++;
479 priv->rx_bd_assign_index &= (priv->num_rx_bds - 1);
480 priv->rx_bd_assign_ptr = priv->rx_bds +
481 (priv->rx_bd_assign_index * DESC_SIZE);
483 netif_dbg(priv, rx_status, ndev, "RX refill\n");
488 static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv *priv)
490 struct bcm_sysport_cb *cb;
494 for (i = 0; i < priv->num_rx_bds; i++) {
495 cb = &priv->rx_cbs[priv->rx_bd_assign_index];
499 ret = bcm_sysport_rx_refill(priv, cb);
507 /* Poll the hardware for up to budget packets to process */
508 static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
511 struct device *kdev = &priv->pdev->dev;
512 struct net_device *ndev = priv->netdev;
513 unsigned int processed = 0, to_process;
514 struct bcm_sysport_cb *cb;
516 unsigned int p_index;
520 /* Determine how much we should process since last call */
521 p_index = rdma_readl(priv, RDMA_PROD_INDEX);
522 p_index &= RDMA_PROD_INDEX_MASK;
524 if (p_index < priv->rx_c_index)
525 to_process = (RDMA_CONS_INDEX_MASK + 1) -
526 priv->rx_c_index + p_index;
528 to_process = p_index - priv->rx_c_index;
530 netif_dbg(priv, rx_status, ndev,
531 "p_index=%d rx_c_index=%d to_process=%d\n",
532 p_index, priv->rx_c_index, to_process);
534 while ((processed < to_process) &&
535 (processed < budget)) {
537 cb = &priv->rx_cbs[priv->rx_read_ptr];
539 dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
540 RX_BUF_LENGTH, DMA_FROM_DEVICE);
542 /* Extract the Receive Status Block prepended */
543 rsb = (struct bcm_rsb *)skb->data;
544 len = (rsb->rx_status_len >> DESC_LEN_SHIFT) & DESC_LEN_MASK;
545 status = (rsb->rx_status_len >> DESC_STATUS_SHIFT) &
550 if (priv->rx_read_ptr == priv->num_rx_bds)
551 priv->rx_read_ptr = 0;
553 netif_dbg(priv, rx_status, ndev,
554 "p=%d, c=%d, rd_ptr=%d, len=%d, flag=0x%04x\n",
555 p_index, priv->rx_c_index, priv->rx_read_ptr,
558 if (unlikely(!skb)) {
559 netif_err(priv, rx_err, ndev, "out of memory!\n");
560 ndev->stats.rx_dropped++;
561 ndev->stats.rx_errors++;
565 if (unlikely(!(status & DESC_EOP) || !(status & DESC_SOP))) {
566 netif_err(priv, rx_status, ndev, "fragmented packet!\n");
567 ndev->stats.rx_dropped++;
568 ndev->stats.rx_errors++;
569 bcm_sysport_free_cb(cb);
573 if (unlikely(status & (RX_STATUS_ERR | RX_STATUS_OVFLOW))) {
574 netif_err(priv, rx_err, ndev, "error packet\n");
575 if (status & RX_STATUS_OVFLOW)
576 ndev->stats.rx_over_errors++;
577 ndev->stats.rx_dropped++;
578 ndev->stats.rx_errors++;
579 bcm_sysport_free_cb(cb);
585 /* Hardware validated our checksum */
586 if (likely(status & DESC_L4_CSUM))
587 skb->ip_summed = CHECKSUM_UNNECESSARY;
589 /* Hardware pre-pends packets with 2bytes before Ethernet
590 * header plus we have the Receive Status Block, strip off all
591 * of this from the SKB.
593 skb_pull(skb, sizeof(*rsb) + 2);
594 len -= (sizeof(*rsb) + 2);
596 /* UniMAC may forward CRC */
598 skb_trim(skb, len - ETH_FCS_LEN);
602 skb->protocol = eth_type_trans(skb, ndev);
603 ndev->stats.rx_packets++;
604 ndev->stats.rx_bytes += len;
606 napi_gro_receive(&priv->napi, skb);
608 bcm_sysport_rx_refill(priv, cb);
614 static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_priv *priv,
615 struct bcm_sysport_cb *cb,
616 unsigned int *bytes_compl,
617 unsigned int *pkts_compl)
619 struct device *kdev = &priv->pdev->dev;
620 struct net_device *ndev = priv->netdev;
623 ndev->stats.tx_bytes += cb->skb->len;
624 *bytes_compl += cb->skb->len;
625 dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
626 dma_unmap_len(cb, dma_len),
628 ndev->stats.tx_packets++;
630 bcm_sysport_free_cb(cb);
632 } else if (dma_unmap_addr(cb, dma_addr)) {
633 ndev->stats.tx_bytes += dma_unmap_len(cb, dma_len);
634 dma_unmap_page(kdev, dma_unmap_addr(cb, dma_addr),
635 dma_unmap_len(cb, dma_len), DMA_TO_DEVICE);
636 dma_unmap_addr_set(cb, dma_addr, 0);
640 /* Reclaim queued SKBs for transmission completion, lockless version */
641 static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
642 struct bcm_sysport_tx_ring *ring)
644 struct net_device *ndev = priv->netdev;
645 unsigned int c_index, last_c_index, last_tx_cn, num_tx_cbs;
646 unsigned int pkts_compl = 0, bytes_compl = 0;
647 struct bcm_sysport_cb *cb;
648 struct netdev_queue *txq;
651 txq = netdev_get_tx_queue(ndev, ring->index);
653 /* Compute how many descriptors have been processed since last call */
654 hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index));
655 c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK;
656 ring->p_index = (hw_ind & RING_PROD_INDEX_MASK);
658 last_c_index = ring->c_index;
659 num_tx_cbs = ring->size;
661 c_index &= (num_tx_cbs - 1);
663 if (c_index >= last_c_index)
664 last_tx_cn = c_index - last_c_index;
666 last_tx_cn = num_tx_cbs - last_c_index + c_index;
668 netif_dbg(priv, tx_done, ndev,
669 "ring=%d c_index=%d last_tx_cn=%d last_c_index=%d\n",
670 ring->index, c_index, last_tx_cn, last_c_index);
672 while (last_tx_cn-- > 0) {
673 cb = ring->cbs + last_c_index;
674 bcm_sysport_tx_reclaim_one(priv, cb, &bytes_compl, &pkts_compl);
678 last_c_index &= (num_tx_cbs - 1);
681 ring->c_index = c_index;
683 if (netif_tx_queue_stopped(txq) && pkts_compl)
684 netif_tx_wake_queue(txq);
686 netif_dbg(priv, tx_done, ndev,
687 "ring=%d c_index=%d pkts_compl=%d, bytes_compl=%d\n",
688 ring->index, ring->c_index, pkts_compl, bytes_compl);
693 /* Locked version of the per-ring TX reclaim routine */
694 static unsigned int bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
695 struct bcm_sysport_tx_ring *ring)
697 unsigned int released;
700 spin_lock_irqsave(&ring->lock, flags);
701 released = __bcm_sysport_tx_reclaim(priv, ring);
702 spin_unlock_irqrestore(&ring->lock, flags);
707 static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget)
709 struct bcm_sysport_tx_ring *ring =
710 container_of(napi, struct bcm_sysport_tx_ring, napi);
711 unsigned int work_done = 0;
713 work_done = bcm_sysport_tx_reclaim(ring->priv, ring);
715 if (work_done < budget) {
717 /* re-enable TX interrupt */
718 intrl2_1_mask_clear(ring->priv, BIT(ring->index));
724 static void bcm_sysport_tx_reclaim_all(struct bcm_sysport_priv *priv)
728 for (q = 0; q < priv->netdev->num_tx_queues; q++)
729 bcm_sysport_tx_reclaim(priv, &priv->tx_rings[q]);
732 static int bcm_sysport_poll(struct napi_struct *napi, int budget)
734 struct bcm_sysport_priv *priv =
735 container_of(napi, struct bcm_sysport_priv, napi);
736 unsigned int work_done = 0;
738 work_done = bcm_sysport_desc_rx(priv, budget);
740 priv->rx_c_index += work_done;
741 priv->rx_c_index &= RDMA_CONS_INDEX_MASK;
742 rdma_writel(priv, priv->rx_c_index, RDMA_CONS_INDEX);
744 if (work_done < budget) {
746 /* re-enable RX interrupts */
747 intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE);
753 static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv)
757 /* Stop monitoring MPD interrupt */
758 intrl2_0_mask_set(priv, INTRL2_0_MPD);
760 /* Clear the MagicPacket detection logic */
761 reg = umac_readl(priv, UMAC_MPD_CTRL);
763 umac_writel(priv, reg, UMAC_MPD_CTRL);
765 netif_dbg(priv, wol, priv->netdev, "resumed from WOL\n");
768 /* RX and misc interrupt routine */
769 static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
771 struct net_device *dev = dev_id;
772 struct bcm_sysport_priv *priv = netdev_priv(dev);
774 priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
775 ~intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
776 intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
778 if (unlikely(priv->irq0_stat == 0)) {
779 netdev_warn(priv->netdev, "spurious RX interrupt\n");
783 if (priv->irq0_stat & INTRL2_0_RDMA_MBDONE) {
784 if (likely(napi_schedule_prep(&priv->napi))) {
785 /* disable RX interrupts */
786 intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE);
787 __napi_schedule(&priv->napi);
791 /* TX ring is full, perform a full reclaim since we do not know
792 * which one would trigger this interrupt
794 if (priv->irq0_stat & INTRL2_0_TX_RING_FULL)
795 bcm_sysport_tx_reclaim_all(priv);
797 if (priv->irq0_stat & INTRL2_0_MPD) {
798 netdev_info(priv->netdev, "Wake-on-LAN interrupt!\n");
799 bcm_sysport_resume_from_wol(priv);
805 /* TX interrupt service routine */
806 static irqreturn_t bcm_sysport_tx_isr(int irq, void *dev_id)
808 struct net_device *dev = dev_id;
809 struct bcm_sysport_priv *priv = netdev_priv(dev);
810 struct bcm_sysport_tx_ring *txr;
813 priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) &
814 ~intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
815 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
817 if (unlikely(priv->irq1_stat == 0)) {
818 netdev_warn(priv->netdev, "spurious TX interrupt\n");
822 for (ring = 0; ring < dev->num_tx_queues; ring++) {
823 if (!(priv->irq1_stat & BIT(ring)))
826 txr = &priv->tx_rings[ring];
828 if (likely(napi_schedule_prep(&txr->napi))) {
829 intrl2_1_mask_set(priv, BIT(ring));
830 __napi_schedule(&txr->napi);
837 static irqreturn_t bcm_sysport_wol_isr(int irq, void *dev_id)
839 struct bcm_sysport_priv *priv = dev_id;
841 pm_wakeup_event(&priv->pdev->dev, 0);
846 static int bcm_sysport_insert_tsb(struct sk_buff *skb, struct net_device *dev)
848 struct sk_buff *nskb;
855 /* Re-allocate SKB if needed */
856 if (unlikely(skb_headroom(skb) < sizeof(*tsb))) {
857 nskb = skb_realloc_headroom(skb, sizeof(*tsb));
860 dev->stats.tx_errors++;
861 dev->stats.tx_dropped++;
867 tsb = (struct bcm_tsb *)skb_push(skb, sizeof(*tsb));
868 /* Zero-out TSB by default */
869 memset(tsb, 0, sizeof(*tsb));
871 if (skb->ip_summed == CHECKSUM_PARTIAL) {
872 ip_ver = htons(skb->protocol);
875 ip_proto = ip_hdr(skb)->protocol;
878 ip_proto = ipv6_hdr(skb)->nexthdr;
884 /* Get the checksum offset and the L4 (transport) offset */
885 csum_start = skb_checksum_start_offset(skb) - sizeof(*tsb);
886 csum_info = (csum_start + skb->csum_offset) & L4_CSUM_PTR_MASK;
887 csum_info |= (csum_start << L4_PTR_SHIFT);
889 if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) {
890 csum_info |= L4_LENGTH_VALID;
891 if (ip_proto == IPPROTO_UDP && ip_ver == ETH_P_IP)
896 tsb->l4_ptr_dest_map = csum_info;
902 static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
903 struct net_device *dev)
905 struct bcm_sysport_priv *priv = netdev_priv(dev);
906 struct device *kdev = &priv->pdev->dev;
907 struct bcm_sysport_tx_ring *ring;
908 struct bcm_sysport_cb *cb;
909 struct netdev_queue *txq;
910 struct dma_desc *desc;
911 unsigned int skb_len;
918 queue = skb_get_queue_mapping(skb);
919 txq = netdev_get_tx_queue(dev, queue);
920 ring = &priv->tx_rings[queue];
922 /* lock against tx reclaim in BH context and TX ring full interrupt */
923 spin_lock_irqsave(&ring->lock, flags);
924 if (unlikely(ring->desc_count == 0)) {
925 netif_tx_stop_queue(txq);
926 netdev_err(dev, "queue %d awake and ring full!\n", queue);
927 ret = NETDEV_TX_BUSY;
931 /* Insert TSB and checksum infos */
933 ret = bcm_sysport_insert_tsb(skb, dev);
940 /* The Ethernet switch we are interfaced with needs packets to be at
941 * least 64 bytes (including FCS) otherwise they will be discarded when
942 * they enter the switch port logic. When Broadcom tags are enabled, we
943 * need to make sure that packets are at least 68 bytes
944 * (including FCS and tag) because the length verification is done after
945 * the Broadcom tag is stripped off the ingress packet.
947 if (skb_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN)) {
952 skb_len = skb->len < ETH_ZLEN + ENET_BRCM_TAG_LEN ?
953 ETH_ZLEN + ENET_BRCM_TAG_LEN : skb->len;
955 mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
956 if (dma_mapping_error(kdev, mapping)) {
957 netif_err(priv, tx_err, dev, "DMA map failed at %p (len=%d)\n",
963 /* Remember the SKB for future freeing */
964 cb = &ring->cbs[ring->curr_desc];
966 dma_unmap_addr_set(cb, dma_addr, mapping);
967 dma_unmap_len_set(cb, dma_len, skb_len);
969 /* Fetch a descriptor entry from our pool */
970 desc = ring->desc_cpu;
972 desc->addr_lo = lower_32_bits(mapping);
973 len_status = upper_32_bits(mapping) & DESC_ADDR_HI_MASK;
974 len_status |= (skb_len << DESC_LEN_SHIFT);
975 len_status |= (DESC_SOP | DESC_EOP | TX_STATUS_APP_CRC) <<
977 if (skb->ip_summed == CHECKSUM_PARTIAL)
978 len_status |= (DESC_L4_CSUM << DESC_STATUS_SHIFT);
981 if (ring->curr_desc == ring->size)
985 /* Ensure write completion of the descriptor status/length
986 * in DRAM before the System Port WRITE_PORT register latches
990 desc->addr_status_len = len_status;
993 /* Write this descriptor address to the RING write port */
994 tdma_port_write_desc_addr(priv, desc, ring->index);
996 /* Check ring space and update SW control flow */
997 if (ring->desc_count == 0)
998 netif_tx_stop_queue(txq);
1000 netif_dbg(priv, tx_queued, dev, "ring=%d desc_count=%d, curr_desc=%d\n",
1001 ring->index, ring->desc_count, ring->curr_desc);
1005 spin_unlock_irqrestore(&ring->lock, flags);
1009 static void bcm_sysport_tx_timeout(struct net_device *dev)
1011 netdev_warn(dev, "transmit timeout!\n");
1013 dev->trans_start = jiffies;
1014 dev->stats.tx_errors++;
1016 netif_tx_wake_all_queues(dev);
1019 /* phylib adjust link callback */
1020 static void bcm_sysport_adj_link(struct net_device *dev)
1022 struct bcm_sysport_priv *priv = netdev_priv(dev);
1023 struct phy_device *phydev = priv->phydev;
1024 unsigned int changed = 0;
1025 u32 cmd_bits = 0, reg;
1027 if (priv->old_link != phydev->link) {
1029 priv->old_link = phydev->link;
1032 if (priv->old_duplex != phydev->duplex) {
1034 priv->old_duplex = phydev->duplex;
1037 switch (phydev->speed) {
1039 cmd_bits = CMD_SPEED_2500;
1042 cmd_bits = CMD_SPEED_1000;
1045 cmd_bits = CMD_SPEED_100;
1048 cmd_bits = CMD_SPEED_10;
1053 cmd_bits <<= CMD_SPEED_SHIFT;
1055 if (phydev->duplex == DUPLEX_HALF)
1056 cmd_bits |= CMD_HD_EN;
1058 if (priv->old_pause != phydev->pause) {
1060 priv->old_pause = phydev->pause;
1064 cmd_bits |= CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE;
1067 reg = umac_readl(priv, UMAC_CMD);
1068 reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) |
1069 CMD_HD_EN | CMD_RX_PAUSE_IGNORE |
1070 CMD_TX_PAUSE_IGNORE);
1072 umac_writel(priv, reg, UMAC_CMD);
1074 phy_print_status(priv->phydev);
1078 static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
1081 struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
1082 struct device *kdev = &priv->pdev->dev;
1087 /* Simple descriptors partitioning for now */
1090 /* We just need one DMA descriptor which is DMA-able, since writing to
1091 * the port will allocate a new descriptor in its internal linked-list
1093 p = dma_zalloc_coherent(kdev, 1, &ring->desc_dma, GFP_KERNEL);
1095 netif_err(priv, hw, priv->netdev, "DMA alloc failed\n");
1099 ring->cbs = kzalloc(sizeof(struct bcm_sysport_cb) * size, GFP_KERNEL);
1101 netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
1105 /* Initialize SW view of the ring */
1106 spin_lock_init(&ring->lock);
1108 netif_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64);
1109 ring->index = index;
1111 ring->alloc_size = ring->size;
1113 ring->desc_count = ring->size;
1114 ring->curr_desc = 0;
1116 /* Initialize HW ring */
1117 tdma_writel(priv, RING_EN, TDMA_DESC_RING_HEAD_TAIL_PTR(index));
1118 tdma_writel(priv, 0, TDMA_DESC_RING_COUNT(index));
1119 tdma_writel(priv, 1, TDMA_DESC_RING_INTR_CONTROL(index));
1120 tdma_writel(priv, 0, TDMA_DESC_RING_PROD_CONS_INDEX(index));
1121 tdma_writel(priv, RING_IGNORE_STATUS, TDMA_DESC_RING_MAPPING(index));
1122 tdma_writel(priv, 0, TDMA_DESC_RING_PCP_DEI_VID(index));
1124 /* Program the number of descriptors as MAX_THRESHOLD and half of
1125 * its size for the hysteresis trigger
1127 tdma_writel(priv, ring->size |
1128 1 << RING_HYST_THRESH_SHIFT,
1129 TDMA_DESC_RING_MAX_HYST(index));
1131 /* Enable the ring queue in the arbiter */
1132 reg = tdma_readl(priv, TDMA_TIER1_ARB_0_QUEUE_EN);
1133 reg |= (1 << index);
1134 tdma_writel(priv, reg, TDMA_TIER1_ARB_0_QUEUE_EN);
1136 napi_enable(&ring->napi);
1138 netif_dbg(priv, hw, priv->netdev,
1139 "TDMA cfg, size=%d, desc_cpu=%p\n",
1140 ring->size, ring->desc_cpu);
1145 static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,
1148 struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
1149 struct device *kdev = &priv->pdev->dev;
1152 /* Caller should stop the TDMA engine */
1153 reg = tdma_readl(priv, TDMA_STATUS);
1154 if (!(reg & TDMA_DISABLED))
1155 netdev_warn(priv->netdev, "TDMA not stopped!\n");
1157 napi_disable(&ring->napi);
1158 netif_napi_del(&ring->napi);
1160 bcm_sysport_tx_reclaim(priv, ring);
1165 if (ring->desc_dma) {
1166 dma_free_coherent(kdev, 1, ring->desc_cpu, ring->desc_dma);
1170 ring->alloc_size = 0;
1172 netif_dbg(priv, hw, priv->netdev, "TDMA fini done\n");
1176 static inline int rdma_enable_set(struct bcm_sysport_priv *priv,
1177 unsigned int enable)
1179 unsigned int timeout = 1000;
1182 reg = rdma_readl(priv, RDMA_CONTROL);
1187 rdma_writel(priv, reg, RDMA_CONTROL);
1189 /* Poll for RMDA disabling completion */
1191 reg = rdma_readl(priv, RDMA_STATUS);
1192 if (!!(reg & RDMA_DISABLED) == !enable)
1194 usleep_range(1000, 2000);
1195 } while (timeout-- > 0);
1197 netdev_err(priv->netdev, "timeout waiting for RDMA to finish\n");
1203 static inline int tdma_enable_set(struct bcm_sysport_priv *priv,
1204 unsigned int enable)
1206 unsigned int timeout = 1000;
1209 reg = tdma_readl(priv, TDMA_CONTROL);
1214 tdma_writel(priv, reg, TDMA_CONTROL);
1216 /* Poll for TMDA disabling completion */
1218 reg = tdma_readl(priv, TDMA_STATUS);
1219 if (!!(reg & TDMA_DISABLED) == !enable)
1222 usleep_range(1000, 2000);
1223 } while (timeout-- > 0);
1225 netdev_err(priv->netdev, "timeout waiting for TDMA to finish\n");
1230 static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv)
1235 /* Initialize SW view of the RX ring */
1236 priv->num_rx_bds = NUM_RX_DESC;
1237 priv->rx_bds = priv->base + SYS_PORT_RDMA_OFFSET;
1238 priv->rx_bd_assign_ptr = priv->rx_bds;
1239 priv->rx_bd_assign_index = 0;
1240 priv->rx_c_index = 0;
1241 priv->rx_read_ptr = 0;
1242 priv->rx_cbs = kzalloc(priv->num_rx_bds *
1243 sizeof(struct bcm_sysport_cb), GFP_KERNEL);
1244 if (!priv->rx_cbs) {
1245 netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
1249 ret = bcm_sysport_alloc_rx_bufs(priv);
1251 netif_err(priv, hw, priv->netdev, "SKB allocation failed\n");
1255 /* Initialize HW, ensure RDMA is disabled */
1256 reg = rdma_readl(priv, RDMA_STATUS);
1257 if (!(reg & RDMA_DISABLED))
1258 rdma_enable_set(priv, 0);
1260 rdma_writel(priv, 0, RDMA_WRITE_PTR_LO);
1261 rdma_writel(priv, 0, RDMA_WRITE_PTR_HI);
1262 rdma_writel(priv, 0, RDMA_PROD_INDEX);
1263 rdma_writel(priv, 0, RDMA_CONS_INDEX);
1264 rdma_writel(priv, priv->num_rx_bds << RDMA_RING_SIZE_SHIFT |
1265 RX_BUF_LENGTH, RDMA_RING_BUF_SIZE);
1266 /* Operate the queue in ring mode */
1267 rdma_writel(priv, 0, RDMA_START_ADDR_HI);
1268 rdma_writel(priv, 0, RDMA_START_ADDR_LO);
1269 rdma_writel(priv, 0, RDMA_END_ADDR_HI);
1270 rdma_writel(priv, NUM_HW_RX_DESC_WORDS - 1, RDMA_END_ADDR_LO);
1272 rdma_writel(priv, 1, RDMA_MBDONE_INTR);
1274 netif_dbg(priv, hw, priv->netdev,
1275 "RDMA cfg, num_rx_bds=%d, rx_bds=%p\n",
1276 priv->num_rx_bds, priv->rx_bds);
1281 static void bcm_sysport_fini_rx_ring(struct bcm_sysport_priv *priv)
1283 struct bcm_sysport_cb *cb;
1287 /* Caller should ensure RDMA is disabled */
1288 reg = rdma_readl(priv, RDMA_STATUS);
1289 if (!(reg & RDMA_DISABLED))
1290 netdev_warn(priv->netdev, "RDMA not stopped!\n");
1292 for (i = 0; i < priv->num_rx_bds; i++) {
1293 cb = &priv->rx_cbs[i];
1294 if (dma_unmap_addr(cb, dma_addr))
1295 dma_unmap_single(&priv->pdev->dev,
1296 dma_unmap_addr(cb, dma_addr),
1297 RX_BUF_LENGTH, DMA_FROM_DEVICE);
1298 bcm_sysport_free_cb(cb);
1301 kfree(priv->rx_cbs);
1302 priv->rx_cbs = NULL;
1304 netif_dbg(priv, hw, priv->netdev, "RDMA fini done\n");
1307 static void bcm_sysport_set_rx_mode(struct net_device *dev)
1309 struct bcm_sysport_priv *priv = netdev_priv(dev);
1312 reg = umac_readl(priv, UMAC_CMD);
1313 if (dev->flags & IFF_PROMISC)
1316 reg &= ~CMD_PROMISC;
1317 umac_writel(priv, reg, UMAC_CMD);
1319 /* No support for ALLMULTI */
1320 if (dev->flags & IFF_ALLMULTI)
1324 static inline void umac_enable_set(struct bcm_sysport_priv *priv,
1325 u32 mask, unsigned int enable)
1329 reg = umac_readl(priv, UMAC_CMD);
1334 umac_writel(priv, reg, UMAC_CMD);
1336 /* UniMAC stops on a packet boundary, wait for a full-sized packet
1337 * to be processed (1 msec).
1340 usleep_range(1000, 2000);
1343 static inline int umac_reset(struct bcm_sysport_priv *priv)
1345 unsigned int timeout = 0;
1349 umac_writel(priv, 0, UMAC_CMD);
1350 while (timeout++ < 1000) {
1351 reg = umac_readl(priv, UMAC_CMD);
1352 if (!(reg & CMD_SW_RESET))
1358 if (timeout == 1000) {
1359 dev_err(&priv->pdev->dev,
1360 "timeout waiting for MAC to come out of reset\n");
1367 static void umac_set_hw_addr(struct bcm_sysport_priv *priv,
1368 unsigned char *addr)
1370 umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) |
1371 (addr[2] << 8) | addr[3], UMAC_MAC0);
1372 umac_writel(priv, (addr[4] << 8) | addr[5], UMAC_MAC1);
1375 static void topctrl_flush(struct bcm_sysport_priv *priv)
1377 topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL);
1378 topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL);
1380 topctrl_writel(priv, 0, RX_FLUSH_CNTL);
1381 topctrl_writel(priv, 0, TX_FLUSH_CNTL);
1384 static void bcm_sysport_netif_start(struct net_device *dev)
1386 struct bcm_sysport_priv *priv = netdev_priv(dev);
1389 napi_enable(&priv->napi);
1391 phy_start(priv->phydev);
1393 /* Enable TX interrupts for the 32 TXQs */
1394 intrl2_1_mask_clear(priv, 0xffffffff);
1396 /* Last call before we start the real business */
1397 netif_tx_start_all_queues(dev);
1400 static void rbuf_init(struct bcm_sysport_priv *priv)
1404 reg = rbuf_readl(priv, RBUF_CONTROL);
1405 reg |= RBUF_4B_ALGN | RBUF_RSB_EN;
1406 rbuf_writel(priv, reg, RBUF_CONTROL);
1409 static int bcm_sysport_open(struct net_device *dev)
1411 struct bcm_sysport_priv *priv = netdev_priv(dev);
1416 ret = umac_reset(priv);
1418 netdev_err(dev, "UniMAC reset failed\n");
1422 /* Flush TX and RX FIFOs at TOPCTRL level */
1423 topctrl_flush(priv);
1425 /* Disable the UniMAC RX/TX */
1426 umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 0);
1428 /* Enable RBUF 2bytes alignment and Receive Status Block */
1431 /* Set maximum frame length */
1432 umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
1434 /* Set MAC address */
1435 umac_set_hw_addr(priv, dev->dev_addr);
1437 /* Read CRC forward */
1438 priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD);
1440 priv->phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link,
1441 0, priv->phy_interface);
1442 if (!priv->phydev) {
1443 netdev_err(dev, "could not attach to PHY\n");
1447 /* Reset house keeping link status */
1448 priv->old_duplex = -1;
1449 priv->old_link = -1;
1450 priv->old_pause = -1;
1452 /* mask all interrupts and request them */
1453 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
1454 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
1455 intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
1456 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
1457 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
1458 intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
1460 ret = request_irq(priv->irq0, bcm_sysport_rx_isr, 0, dev->name, dev);
1462 netdev_err(dev, "failed to request RX interrupt\n");
1463 goto out_phy_disconnect;
1466 ret = request_irq(priv->irq1, bcm_sysport_tx_isr, 0, dev->name, dev);
1468 netdev_err(dev, "failed to request TX interrupt\n");
1472 /* Initialize both hardware and software ring */
1473 for (i = 0; i < dev->num_tx_queues; i++) {
1474 ret = bcm_sysport_init_tx_ring(priv, i);
1476 netdev_err(dev, "failed to initialize TX ring %d\n",
1478 goto out_free_tx_ring;
1482 /* Initialize linked-list */
1483 tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS);
1485 /* Initialize RX ring */
1486 ret = bcm_sysport_init_rx_ring(priv);
1488 netdev_err(dev, "failed to initialize RX ring\n");
1489 goto out_free_rx_ring;
1493 ret = rdma_enable_set(priv, 1);
1495 goto out_free_rx_ring;
1497 /* Enable RX interrupt and TX ring full interrupt */
1498 intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
1501 ret = tdma_enable_set(priv, 1);
1503 goto out_clear_rx_int;
1505 /* Turn on UniMAC TX/RX */
1506 umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 1);
1508 bcm_sysport_netif_start(dev);
1513 intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
1515 bcm_sysport_fini_rx_ring(priv);
1517 for (i = 0; i < dev->num_tx_queues; i++)
1518 bcm_sysport_fini_tx_ring(priv, i);
1519 free_irq(priv->irq1, dev);
1521 free_irq(priv->irq0, dev);
1523 phy_disconnect(priv->phydev);
1527 static void bcm_sysport_netif_stop(struct net_device *dev)
1529 struct bcm_sysport_priv *priv = netdev_priv(dev);
1531 /* stop all software from updating hardware */
1532 netif_tx_stop_all_queues(dev);
1533 napi_disable(&priv->napi);
1534 phy_stop(priv->phydev);
1536 /* mask all interrupts */
1537 intrl2_0_mask_set(priv, 0xffffffff);
1538 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
1539 intrl2_1_mask_set(priv, 0xffffffff);
1540 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
1543 static int bcm_sysport_stop(struct net_device *dev)
1545 struct bcm_sysport_priv *priv = netdev_priv(dev);
1549 bcm_sysport_netif_stop(dev);
1551 /* Disable UniMAC RX */
1552 umac_enable_set(priv, CMD_RX_EN, 0);
1554 ret = tdma_enable_set(priv, 0);
1556 netdev_err(dev, "timeout disabling RDMA\n");
1560 /* Wait for a maximum packet size to be drained */
1561 usleep_range(2000, 3000);
1563 ret = rdma_enable_set(priv, 0);
1565 netdev_err(dev, "timeout disabling TDMA\n");
1569 /* Disable UniMAC TX */
1570 umac_enable_set(priv, CMD_TX_EN, 0);
1572 /* Free RX/TX rings SW structures */
1573 for (i = 0; i < dev->num_tx_queues; i++)
1574 bcm_sysport_fini_tx_ring(priv, i);
1575 bcm_sysport_fini_rx_ring(priv);
1577 free_irq(priv->irq0, dev);
1578 free_irq(priv->irq1, dev);
1580 /* Disconnect from PHY */
1581 phy_disconnect(priv->phydev);
1586 static struct ethtool_ops bcm_sysport_ethtool_ops = {
1587 .get_settings = bcm_sysport_get_settings,
1588 .set_settings = bcm_sysport_set_settings,
1589 .get_drvinfo = bcm_sysport_get_drvinfo,
1590 .get_msglevel = bcm_sysport_get_msglvl,
1591 .set_msglevel = bcm_sysport_set_msglvl,
1592 .get_link = ethtool_op_get_link,
1593 .get_strings = bcm_sysport_get_strings,
1594 .get_ethtool_stats = bcm_sysport_get_stats,
1595 .get_sset_count = bcm_sysport_get_sset_count,
1596 .get_wol = bcm_sysport_get_wol,
1597 .set_wol = bcm_sysport_set_wol,
1600 static const struct net_device_ops bcm_sysport_netdev_ops = {
1601 .ndo_start_xmit = bcm_sysport_xmit,
1602 .ndo_tx_timeout = bcm_sysport_tx_timeout,
1603 .ndo_open = bcm_sysport_open,
1604 .ndo_stop = bcm_sysport_stop,
1605 .ndo_set_features = bcm_sysport_set_features,
1606 .ndo_set_rx_mode = bcm_sysport_set_rx_mode,
1609 #define REV_FMT "v%2x.%02x"
1611 static int bcm_sysport_probe(struct platform_device *pdev)
1613 struct bcm_sysport_priv *priv;
1614 struct device_node *dn;
1615 struct net_device *dev;
1616 const void *macaddr;
1621 dn = pdev->dev.of_node;
1622 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1624 /* Read the Transmit/Receive Queue properties */
1625 if (of_property_read_u32(dn, "systemport,num-txq", &txq))
1626 txq = TDMA_NUM_RINGS;
1627 if (of_property_read_u32(dn, "systemport,num-rxq", &rxq))
1630 dev = alloc_etherdev_mqs(sizeof(*priv), txq, rxq);
1634 /* Initialize private members */
1635 priv = netdev_priv(dev);
1637 priv->irq0 = platform_get_irq(pdev, 0);
1638 priv->irq1 = platform_get_irq(pdev, 1);
1639 priv->wol_irq = platform_get_irq(pdev, 2);
1640 if (priv->irq0 <= 0 || priv->irq1 <= 0) {
1641 dev_err(&pdev->dev, "invalid interrupts\n");
1646 priv->base = devm_ioremap_resource(&pdev->dev, r);
1647 if (IS_ERR(priv->base)) {
1648 ret = PTR_ERR(priv->base);
1655 priv->phy_interface = of_get_phy_mode(dn);
1656 /* Default to GMII interface mode */
1657 if (priv->phy_interface < 0)
1658 priv->phy_interface = PHY_INTERFACE_MODE_GMII;
1660 /* In the case of a fixed PHY, the DT node associated
1661 * to the PHY is the Ethernet MAC DT node.
1663 if (of_phy_is_fixed_link(dn)) {
1664 ret = of_phy_register_fixed_link(dn);
1666 dev_err(&pdev->dev, "failed to register fixed PHY\n");
1673 /* Initialize netdevice members */
1674 macaddr = of_get_mac_address(dn);
1675 if (!macaddr || !is_valid_ether_addr(macaddr)) {
1676 dev_warn(&pdev->dev, "using random Ethernet MAC\n");
1677 random_ether_addr(dev->dev_addr);
1679 ether_addr_copy(dev->dev_addr, macaddr);
1682 SET_NETDEV_DEV(dev, &pdev->dev);
1683 dev_set_drvdata(&pdev->dev, dev);
1684 dev->ethtool_ops = &bcm_sysport_ethtool_ops;
1685 dev->netdev_ops = &bcm_sysport_netdev_ops;
1686 netif_napi_add(dev, &priv->napi, bcm_sysport_poll, 64);
1688 /* HW supported features, none enabled by default */
1689 dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_HIGHDMA |
1690 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1692 /* Request the WOL interrupt and advertise suspend if available */
1693 priv->wol_irq_disabled = 1;
1694 ret = devm_request_irq(&pdev->dev, priv->wol_irq,
1695 bcm_sysport_wol_isr, 0, dev->name, priv);
1697 device_set_wakeup_capable(&pdev->dev, 1);
1699 /* Set the needed headroom once and for all */
1700 BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8);
1701 dev->needed_headroom += sizeof(struct bcm_tsb);
1703 /* We are interfaced to a switch which handles the multicast
1704 * filtering for us, so we do not support programming any
1705 * multicast hash table in this Ethernet MAC.
1707 dev->flags &= ~IFF_MULTICAST;
1709 /* libphy will adjust the link state accordingly */
1710 netif_carrier_off(dev);
1712 ret = register_netdev(dev);
1714 dev_err(&pdev->dev, "failed to register net_device\n");
1718 priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK;
1719 dev_info(&pdev->dev,
1720 "Broadcom SYSTEMPORT" REV_FMT
1721 " at 0x%p (irqs: %d, %d, TXQs: %d, RXQs: %d)\n",
1722 (priv->rev >> 8) & 0xff, priv->rev & 0xff,
1723 priv->base, priv->irq0, priv->irq1, txq, rxq);
1731 static int bcm_sysport_remove(struct platform_device *pdev)
1733 struct net_device *dev = dev_get_drvdata(&pdev->dev);
1735 /* Not much to do, ndo_close has been called
1736 * and we use managed allocations
1738 unregister_netdev(dev);
1740 dev_set_drvdata(&pdev->dev, NULL);
1745 #ifdef CONFIG_PM_SLEEP
1746 static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv)
1748 struct net_device *ndev = priv->netdev;
1749 unsigned int timeout = 1000;
1752 /* Password has already been programmed */
1753 reg = umac_readl(priv, UMAC_MPD_CTRL);
1756 if (priv->wolopts & WAKE_MAGICSECURE)
1758 umac_writel(priv, reg, UMAC_MPD_CTRL);
1760 /* Make sure RBUF entered WoL mode as result */
1762 reg = rbuf_readl(priv, RBUF_STATUS);
1763 if (reg & RBUF_WOL_MODE)
1767 } while (timeout-- > 0);
1769 /* Do not leave the UniMAC RBUF matching only MPD packets */
1771 reg = umac_readl(priv, UMAC_MPD_CTRL);
1773 umac_writel(priv, reg, UMAC_MPD_CTRL);
1774 netif_err(priv, wol, ndev, "failed to enter WOL mode\n");
1778 /* UniMAC receive needs to be turned on */
1779 umac_enable_set(priv, CMD_RX_EN, 1);
1781 /* Enable the interrupt wake-up source */
1782 intrl2_0_mask_clear(priv, INTRL2_0_MPD);
1784 netif_dbg(priv, wol, ndev, "entered WOL mode\n");
1789 static int bcm_sysport_suspend(struct device *d)
1791 struct net_device *dev = dev_get_drvdata(d);
1792 struct bcm_sysport_priv *priv = netdev_priv(dev);
1797 if (!netif_running(dev))
1800 bcm_sysport_netif_stop(dev);
1802 phy_suspend(priv->phydev);
1804 netif_device_detach(dev);
1806 /* Disable UniMAC RX */
1807 umac_enable_set(priv, CMD_RX_EN, 0);
1809 ret = rdma_enable_set(priv, 0);
1811 netdev_err(dev, "RDMA timeout!\n");
1815 /* Disable RXCHK if enabled */
1816 if (priv->rx_chk_en) {
1817 reg = rxchk_readl(priv, RXCHK_CONTROL);
1819 rxchk_writel(priv, reg, RXCHK_CONTROL);
1824 topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL);
1826 ret = tdma_enable_set(priv, 0);
1828 netdev_err(dev, "TDMA timeout!\n");
1832 /* Wait for a packet boundary */
1833 usleep_range(2000, 3000);
1835 umac_enable_set(priv, CMD_TX_EN, 0);
1837 topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL);
1839 /* Free RX/TX rings SW structures */
1840 for (i = 0; i < dev->num_tx_queues; i++)
1841 bcm_sysport_fini_tx_ring(priv, i);
1842 bcm_sysport_fini_rx_ring(priv);
1844 /* Get prepared for Wake-on-LAN */
1845 if (device_may_wakeup(d) && priv->wolopts)
1846 ret = bcm_sysport_suspend_to_wol(priv);
1851 static int bcm_sysport_resume(struct device *d)
1853 struct net_device *dev = dev_get_drvdata(d);
1854 struct bcm_sysport_priv *priv = netdev_priv(dev);
1859 if (!netif_running(dev))
1862 /* We may have been suspended and never received a WOL event that
1863 * would turn off MPD detection, take care of that now
1865 bcm_sysport_resume_from_wol(priv);
1867 /* Initialize both hardware and software ring */
1868 for (i = 0; i < dev->num_tx_queues; i++) {
1869 ret = bcm_sysport_init_tx_ring(priv, i);
1871 netdev_err(dev, "failed to initialize TX ring %d\n",
1873 goto out_free_tx_rings;
1877 /* Initialize linked-list */
1878 tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS);
1880 /* Initialize RX ring */
1881 ret = bcm_sysport_init_rx_ring(priv);
1883 netdev_err(dev, "failed to initialize RX ring\n");
1884 goto out_free_rx_ring;
1887 netif_device_attach(dev);
1889 /* Enable RX interrupt and TX ring full interrupt */
1890 intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
1892 /* RX pipe enable */
1893 topctrl_writel(priv, 0, RX_FLUSH_CNTL);
1895 ret = rdma_enable_set(priv, 1);
1897 netdev_err(dev, "failed to enable RDMA\n");
1898 goto out_free_rx_ring;
1902 if (priv->rx_chk_en) {
1903 reg = rxchk_readl(priv, RXCHK_CONTROL);
1905 rxchk_writel(priv, reg, RXCHK_CONTROL);
1910 /* Set maximum frame length */
1911 umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
1913 /* Set MAC address */
1914 umac_set_hw_addr(priv, dev->dev_addr);
1916 umac_enable_set(priv, CMD_RX_EN, 1);
1918 /* TX pipe enable */
1919 topctrl_writel(priv, 0, TX_FLUSH_CNTL);
1921 umac_enable_set(priv, CMD_TX_EN, 1);
1923 ret = tdma_enable_set(priv, 1);
1925 netdev_err(dev, "TDMA timeout!\n");
1926 goto out_free_rx_ring;
1929 phy_resume(priv->phydev);
1931 bcm_sysport_netif_start(dev);
1936 bcm_sysport_fini_rx_ring(priv);
1938 for (i = 0; i < dev->num_tx_queues; i++)
1939 bcm_sysport_fini_tx_ring(priv, i);
1944 static SIMPLE_DEV_PM_OPS(bcm_sysport_pm_ops,
1945 bcm_sysport_suspend, bcm_sysport_resume);
1947 static const struct of_device_id bcm_sysport_of_match[] = {
1948 { .compatible = "brcm,systemport-v1.00" },
1949 { .compatible = "brcm,systemport" },
1953 static struct platform_driver bcm_sysport_driver = {
1954 .probe = bcm_sysport_probe,
1955 .remove = bcm_sysport_remove,
1957 .name = "brcm-systemport",
1958 .owner = THIS_MODULE,
1959 .of_match_table = bcm_sysport_of_match,
1960 .pm = &bcm_sysport_pm_ops,
1963 module_platform_driver(bcm_sysport_driver);
1965 MODULE_AUTHOR("Broadcom Corporation");
1966 MODULE_DESCRIPTION("Broadcom System Port Ethernet MAC driver");
1967 MODULE_ALIAS("platform:brcm-systemport");
1968 MODULE_LICENSE("GPL");