2 * Broadcom GENET (Gigabit Ethernet) controller driver
4 * Copyright (c) 2014 Broadcom Corporation
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #define pr_fmt(fmt) "bcmgenet: " fmt
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/sched.h>
16 #include <linux/types.h>
17 #include <linux/fcntl.h>
18 #include <linux/interrupt.h>
19 #include <linux/string.h>
20 #include <linux/if_ether.h>
21 #include <linux/init.h>
22 #include <linux/errno.h>
23 #include <linux/delay.h>
24 #include <linux/platform_device.h>
25 #include <linux/dma-mapping.h>
27 #include <linux/clk.h>
29 #include <linux/of_address.h>
30 #include <linux/of_irq.h>
31 #include <linux/of_net.h>
32 #include <linux/of_platform.h>
35 #include <linux/mii.h>
36 #include <linux/ethtool.h>
37 #include <linux/netdevice.h>
38 #include <linux/inetdevice.h>
39 #include <linux/etherdevice.h>
40 #include <linux/skbuff.h>
43 #include <linux/ipv6.h>
44 #include <linux/phy.h>
45 #include <linux/platform_data/bcmgenet.h>
47 #include <asm/unaligned.h>
51 /* Maximum number of hardware queues, downsized if needed */
52 #define GENET_MAX_MQ_CNT 4
54 /* Default highest priority queue for multi queue support */
55 #define GENET_Q0_PRIORITY 0
57 #define GENET_Q16_RX_BD_CNT \
58 (TOTAL_DESC - priv->hw_params->rx_queues * priv->hw_params->rx_bds_per_q)
59 #define GENET_Q16_TX_BD_CNT \
60 (TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->tx_bds_per_q)
62 #define RX_BUF_LENGTH 2048
63 #define SKB_ALIGNMENT 32
65 /* Tx/Rx DMA register offset, skip 256 descriptors */
66 #define WORDS_PER_BD(p) (p->hw_params->words_per_bd)
67 #define DMA_DESC_SIZE (WORDS_PER_BD(priv) * sizeof(u32))
69 #define GENET_TDMA_REG_OFF (priv->hw_params->tdma_offset + \
70 TOTAL_DESC * DMA_DESC_SIZE)
72 #define GENET_RDMA_REG_OFF (priv->hw_params->rdma_offset + \
73 TOTAL_DESC * DMA_DESC_SIZE)
75 static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv,
76 void __iomem *d, u32 value)
78 __raw_writel(value, d + DMA_DESC_LENGTH_STATUS);
81 static inline u32 dmadesc_get_length_status(struct bcmgenet_priv *priv,
84 return __raw_readl(d + DMA_DESC_LENGTH_STATUS);
87 static inline void dmadesc_set_addr(struct bcmgenet_priv *priv,
91 __raw_writel(lower_32_bits(addr), d + DMA_DESC_ADDRESS_LO);
93 /* Register writes to GISB bus can take couple hundred nanoseconds
94 * and are done for each packet, save these expensive writes unless
95 * the platform is explicitly configured for 64-bits/LPAE.
97 #ifdef CONFIG_PHYS_ADDR_T_64BIT
98 if (priv->hw_params->flags & GENET_HAS_40BITS)
99 __raw_writel(upper_32_bits(addr), d + DMA_DESC_ADDRESS_HI);
103 /* Combined address + length/status setter */
104 static inline void dmadesc_set(struct bcmgenet_priv *priv,
105 void __iomem *d, dma_addr_t addr, u32 val)
107 dmadesc_set_length_status(priv, d, val);
108 dmadesc_set_addr(priv, d, addr);
111 static inline dma_addr_t dmadesc_get_addr(struct bcmgenet_priv *priv,
116 addr = __raw_readl(d + DMA_DESC_ADDRESS_LO);
118 /* Register writes to GISB bus can take couple hundred nanoseconds
119 * and are done for each packet, save these expensive writes unless
120 * the platform is explicitly configured for 64-bits/LPAE.
122 #ifdef CONFIG_PHYS_ADDR_T_64BIT
123 if (priv->hw_params->flags & GENET_HAS_40BITS)
124 addr |= (u64)__raw_readl(d + DMA_DESC_ADDRESS_HI) << 32;
129 #define GENET_VER_FMT "%1d.%1d EPHY: 0x%04x"
131 #define GENET_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
134 static inline u32 bcmgenet_rbuf_ctrl_get(struct bcmgenet_priv *priv)
136 if (GENET_IS_V1(priv))
137 return bcmgenet_rbuf_readl(priv, RBUF_FLUSH_CTRL_V1);
139 return bcmgenet_sys_readl(priv, SYS_RBUF_FLUSH_CTRL);
142 static inline void bcmgenet_rbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val)
144 if (GENET_IS_V1(priv))
145 bcmgenet_rbuf_writel(priv, val, RBUF_FLUSH_CTRL_V1);
147 bcmgenet_sys_writel(priv, val, SYS_RBUF_FLUSH_CTRL);
150 /* These macros are defined to deal with register map change
151 * between GENET1.1 and GENET2. Only those currently being used
152 * by driver are defined.
154 static inline u32 bcmgenet_tbuf_ctrl_get(struct bcmgenet_priv *priv)
156 if (GENET_IS_V1(priv))
157 return bcmgenet_rbuf_readl(priv, TBUF_CTRL_V1);
159 return __raw_readl(priv->base +
160 priv->hw_params->tbuf_offset + TBUF_CTRL);
163 static inline void bcmgenet_tbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val)
165 if (GENET_IS_V1(priv))
166 bcmgenet_rbuf_writel(priv, val, TBUF_CTRL_V1);
168 __raw_writel(val, priv->base +
169 priv->hw_params->tbuf_offset + TBUF_CTRL);
172 static inline u32 bcmgenet_bp_mc_get(struct bcmgenet_priv *priv)
174 if (GENET_IS_V1(priv))
175 return bcmgenet_rbuf_readl(priv, TBUF_BP_MC_V1);
177 return __raw_readl(priv->base +
178 priv->hw_params->tbuf_offset + TBUF_BP_MC);
181 static inline void bcmgenet_bp_mc_set(struct bcmgenet_priv *priv, u32 val)
183 if (GENET_IS_V1(priv))
184 bcmgenet_rbuf_writel(priv, val, TBUF_BP_MC_V1);
186 __raw_writel(val, priv->base +
187 priv->hw_params->tbuf_offset + TBUF_BP_MC);
190 /* RX/TX DMA register accessors */
202 static const u8 bcmgenet_dma_regs_v3plus[] = {
203 [DMA_RING_CFG] = 0x00,
206 [DMA_SCB_BURST_SIZE] = 0x0C,
207 [DMA_ARB_CTRL] = 0x2C,
208 [DMA_PRIORITY_0] = 0x30,
209 [DMA_PRIORITY_1] = 0x34,
210 [DMA_PRIORITY_2] = 0x38,
213 static const u8 bcmgenet_dma_regs_v2[] = {
214 [DMA_RING_CFG] = 0x00,
217 [DMA_SCB_BURST_SIZE] = 0x0C,
218 [DMA_ARB_CTRL] = 0x30,
219 [DMA_PRIORITY_0] = 0x34,
220 [DMA_PRIORITY_1] = 0x38,
221 [DMA_PRIORITY_2] = 0x3C,
224 static const u8 bcmgenet_dma_regs_v1[] = {
227 [DMA_SCB_BURST_SIZE] = 0x0C,
228 [DMA_ARB_CTRL] = 0x30,
229 [DMA_PRIORITY_0] = 0x34,
230 [DMA_PRIORITY_1] = 0x38,
231 [DMA_PRIORITY_2] = 0x3C,
234 /* Set at runtime once bcmgenet version is known */
235 static const u8 *bcmgenet_dma_regs;
237 static inline struct bcmgenet_priv *dev_to_priv(struct device *dev)
239 return netdev_priv(dev_get_drvdata(dev));
242 static inline u32 bcmgenet_tdma_readl(struct bcmgenet_priv *priv,
245 return __raw_readl(priv->base + GENET_TDMA_REG_OFF +
246 DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
249 static inline void bcmgenet_tdma_writel(struct bcmgenet_priv *priv,
250 u32 val, enum dma_reg r)
252 __raw_writel(val, priv->base + GENET_TDMA_REG_OFF +
253 DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
256 static inline u32 bcmgenet_rdma_readl(struct bcmgenet_priv *priv,
259 return __raw_readl(priv->base + GENET_RDMA_REG_OFF +
260 DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
263 static inline void bcmgenet_rdma_writel(struct bcmgenet_priv *priv,
264 u32 val, enum dma_reg r)
266 __raw_writel(val, priv->base + GENET_RDMA_REG_OFF +
267 DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
270 /* RDMA/TDMA ring registers and accessors
271 * we merge the common fields and just prefix with T/D the registers
272 * having different meaning depending on the direction
276 RDMA_WRITE_PTR = TDMA_READ_PTR,
278 RDMA_WRITE_PTR_HI = TDMA_READ_PTR_HI,
280 RDMA_PROD_INDEX = TDMA_CONS_INDEX,
282 RDMA_CONS_INDEX = TDMA_PROD_INDEX,
288 DMA_MBUF_DONE_THRESH,
290 RDMA_XON_XOFF_THRESH = TDMA_FLOW_PERIOD,
292 RDMA_READ_PTR = TDMA_WRITE_PTR,
294 RDMA_READ_PTR_HI = TDMA_WRITE_PTR_HI
297 /* GENET v4 supports 40-bits pointer addressing
298 * for obvious reasons the LO and HI word parts
299 * are contiguous, but this offsets the other
302 static const u8 genet_dma_ring_regs_v4[] = {
303 [TDMA_READ_PTR] = 0x00,
304 [TDMA_READ_PTR_HI] = 0x04,
305 [TDMA_CONS_INDEX] = 0x08,
306 [TDMA_PROD_INDEX] = 0x0C,
307 [DMA_RING_BUF_SIZE] = 0x10,
308 [DMA_START_ADDR] = 0x14,
309 [DMA_START_ADDR_HI] = 0x18,
310 [DMA_END_ADDR] = 0x1C,
311 [DMA_END_ADDR_HI] = 0x20,
312 [DMA_MBUF_DONE_THRESH] = 0x24,
313 [TDMA_FLOW_PERIOD] = 0x28,
314 [TDMA_WRITE_PTR] = 0x2C,
315 [TDMA_WRITE_PTR_HI] = 0x30,
318 static const u8 genet_dma_ring_regs_v123[] = {
319 [TDMA_READ_PTR] = 0x00,
320 [TDMA_CONS_INDEX] = 0x04,
321 [TDMA_PROD_INDEX] = 0x08,
322 [DMA_RING_BUF_SIZE] = 0x0C,
323 [DMA_START_ADDR] = 0x10,
324 [DMA_END_ADDR] = 0x14,
325 [DMA_MBUF_DONE_THRESH] = 0x18,
326 [TDMA_FLOW_PERIOD] = 0x1C,
327 [TDMA_WRITE_PTR] = 0x20,
330 /* Set at runtime once GENET version is known */
331 static const u8 *genet_dma_ring_regs;
333 static inline u32 bcmgenet_tdma_ring_readl(struct bcmgenet_priv *priv,
337 return __raw_readl(priv->base + GENET_TDMA_REG_OFF +
338 (DMA_RING_SIZE * ring) +
339 genet_dma_ring_regs[r]);
342 static inline void bcmgenet_tdma_ring_writel(struct bcmgenet_priv *priv,
343 unsigned int ring, u32 val,
346 __raw_writel(val, priv->base + GENET_TDMA_REG_OFF +
347 (DMA_RING_SIZE * ring) +
348 genet_dma_ring_regs[r]);
351 static inline u32 bcmgenet_rdma_ring_readl(struct bcmgenet_priv *priv,
355 return __raw_readl(priv->base + GENET_RDMA_REG_OFF +
356 (DMA_RING_SIZE * ring) +
357 genet_dma_ring_regs[r]);
360 static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv,
361 unsigned int ring, u32 val,
364 __raw_writel(val, priv->base + GENET_RDMA_REG_OFF +
365 (DMA_RING_SIZE * ring) +
366 genet_dma_ring_regs[r]);
369 static int bcmgenet_get_settings(struct net_device *dev,
370 struct ethtool_cmd *cmd)
372 struct bcmgenet_priv *priv = netdev_priv(dev);
374 if (!netif_running(dev))
380 return phy_ethtool_gset(priv->phydev, cmd);
383 static int bcmgenet_set_settings(struct net_device *dev,
384 struct ethtool_cmd *cmd)
386 struct bcmgenet_priv *priv = netdev_priv(dev);
388 if (!netif_running(dev))
394 return phy_ethtool_sset(priv->phydev, cmd);
397 static int bcmgenet_set_rx_csum(struct net_device *dev,
398 netdev_features_t wanted)
400 struct bcmgenet_priv *priv = netdev_priv(dev);
404 rx_csum_en = !!(wanted & NETIF_F_RXCSUM);
406 rbuf_chk_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CHK_CTRL);
408 /* enable rx checksumming */
410 rbuf_chk_ctrl |= RBUF_RXCHK_EN;
412 rbuf_chk_ctrl &= ~RBUF_RXCHK_EN;
413 priv->desc_rxchk_en = rx_csum_en;
415 /* If UniMAC forwards CRC, we need to skip over it to get
416 * a valid CHK bit to be set in the per-packet status word
418 if (rx_csum_en && priv->crc_fwd_en)
419 rbuf_chk_ctrl |= RBUF_SKIP_FCS;
421 rbuf_chk_ctrl &= ~RBUF_SKIP_FCS;
423 bcmgenet_rbuf_writel(priv, rbuf_chk_ctrl, RBUF_CHK_CTRL);
428 static int bcmgenet_set_tx_csum(struct net_device *dev,
429 netdev_features_t wanted)
431 struct bcmgenet_priv *priv = netdev_priv(dev);
433 u32 tbuf_ctrl, rbuf_ctrl;
435 tbuf_ctrl = bcmgenet_tbuf_ctrl_get(priv);
436 rbuf_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CTRL);
438 desc_64b_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
440 /* enable 64 bytes descriptor in both directions (RBUF and TBUF) */
442 tbuf_ctrl |= RBUF_64B_EN;
443 rbuf_ctrl |= RBUF_64B_EN;
445 tbuf_ctrl &= ~RBUF_64B_EN;
446 rbuf_ctrl &= ~RBUF_64B_EN;
448 priv->desc_64b_en = desc_64b_en;
450 bcmgenet_tbuf_ctrl_set(priv, tbuf_ctrl);
451 bcmgenet_rbuf_writel(priv, rbuf_ctrl, RBUF_CTRL);
456 static int bcmgenet_set_features(struct net_device *dev,
457 netdev_features_t features)
459 netdev_features_t changed = features ^ dev->features;
460 netdev_features_t wanted = dev->wanted_features;
463 if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))
464 ret = bcmgenet_set_tx_csum(dev, wanted);
465 if (changed & (NETIF_F_RXCSUM))
466 ret = bcmgenet_set_rx_csum(dev, wanted);
471 static u32 bcmgenet_get_msglevel(struct net_device *dev)
473 struct bcmgenet_priv *priv = netdev_priv(dev);
475 return priv->msg_enable;
478 static void bcmgenet_set_msglevel(struct net_device *dev, u32 level)
480 struct bcmgenet_priv *priv = netdev_priv(dev);
482 priv->msg_enable = level;
485 /* standard ethtool support functions. */
486 enum bcmgenet_stat_type {
487 BCMGENET_STAT_NETDEV = -1,
488 BCMGENET_STAT_MIB_RX,
489 BCMGENET_STAT_MIB_TX,
495 struct bcmgenet_stats {
496 char stat_string[ETH_GSTRING_LEN];
499 enum bcmgenet_stat_type type;
500 /* reg offset from UMAC base for misc counters */
504 #define STAT_NETDEV(m) { \
505 .stat_string = __stringify(m), \
506 .stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \
507 .stat_offset = offsetof(struct net_device_stats, m), \
508 .type = BCMGENET_STAT_NETDEV, \
511 #define STAT_GENET_MIB(str, m, _type) { \
512 .stat_string = str, \
513 .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
514 .stat_offset = offsetof(struct bcmgenet_priv, m), \
518 #define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX)
519 #define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX)
520 #define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT)
521 #define STAT_GENET_SOFT_MIB(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_SOFT)
523 #define STAT_GENET_MISC(str, m, offset) { \
524 .stat_string = str, \
525 .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
526 .stat_offset = offsetof(struct bcmgenet_priv, m), \
527 .type = BCMGENET_STAT_MISC, \
528 .reg_offset = offset, \
532 /* There is a 0xC gap between the end of RX and beginning of TX stats and then
533 * between the end of TX stats and the beginning of the RX RUNT
535 #define BCMGENET_STAT_OFFSET 0xc
537 /* Hardware counters must be kept in sync because the order/offset
538 * is important here (order in structure declaration = order in hardware)
540 static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
542 STAT_NETDEV(rx_packets),
543 STAT_NETDEV(tx_packets),
544 STAT_NETDEV(rx_bytes),
545 STAT_NETDEV(tx_bytes),
546 STAT_NETDEV(rx_errors),
547 STAT_NETDEV(tx_errors),
548 STAT_NETDEV(rx_dropped),
549 STAT_NETDEV(tx_dropped),
550 STAT_NETDEV(multicast),
551 /* UniMAC RSV counters */
552 STAT_GENET_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64),
553 STAT_GENET_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127),
554 STAT_GENET_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255),
555 STAT_GENET_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511),
556 STAT_GENET_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023),
557 STAT_GENET_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518),
558 STAT_GENET_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv),
559 STAT_GENET_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047),
560 STAT_GENET_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095),
561 STAT_GENET_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216),
562 STAT_GENET_MIB_RX("rx_pkts", mib.rx.pkt),
563 STAT_GENET_MIB_RX("rx_bytes", mib.rx.bytes),
564 STAT_GENET_MIB_RX("rx_multicast", mib.rx.mca),
565 STAT_GENET_MIB_RX("rx_broadcast", mib.rx.bca),
566 STAT_GENET_MIB_RX("rx_fcs", mib.rx.fcs),
567 STAT_GENET_MIB_RX("rx_control", mib.rx.cf),
568 STAT_GENET_MIB_RX("rx_pause", mib.rx.pf),
569 STAT_GENET_MIB_RX("rx_unknown", mib.rx.uo),
570 STAT_GENET_MIB_RX("rx_align", mib.rx.aln),
571 STAT_GENET_MIB_RX("rx_outrange", mib.rx.flr),
572 STAT_GENET_MIB_RX("rx_code", mib.rx.cde),
573 STAT_GENET_MIB_RX("rx_carrier", mib.rx.fcr),
574 STAT_GENET_MIB_RX("rx_oversize", mib.rx.ovr),
575 STAT_GENET_MIB_RX("rx_jabber", mib.rx.jbr),
576 STAT_GENET_MIB_RX("rx_mtu_err", mib.rx.mtue),
577 STAT_GENET_MIB_RX("rx_good_pkts", mib.rx.pok),
578 STAT_GENET_MIB_RX("rx_unicast", mib.rx.uc),
579 STAT_GENET_MIB_RX("rx_ppp", mib.rx.ppp),
580 STAT_GENET_MIB_RX("rx_crc", mib.rx.rcrc),
581 /* UniMAC TSV counters */
582 STAT_GENET_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64),
583 STAT_GENET_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127),
584 STAT_GENET_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255),
585 STAT_GENET_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511),
586 STAT_GENET_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023),
587 STAT_GENET_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518),
588 STAT_GENET_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv),
589 STAT_GENET_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047),
590 STAT_GENET_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095),
591 STAT_GENET_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216),
592 STAT_GENET_MIB_TX("tx_pkts", mib.tx.pkts),
593 STAT_GENET_MIB_TX("tx_multicast", mib.tx.mca),
594 STAT_GENET_MIB_TX("tx_broadcast", mib.tx.bca),
595 STAT_GENET_MIB_TX("tx_pause", mib.tx.pf),
596 STAT_GENET_MIB_TX("tx_control", mib.tx.cf),
597 STAT_GENET_MIB_TX("tx_fcs_err", mib.tx.fcs),
598 STAT_GENET_MIB_TX("tx_oversize", mib.tx.ovr),
599 STAT_GENET_MIB_TX("tx_defer", mib.tx.drf),
600 STAT_GENET_MIB_TX("tx_excess_defer", mib.tx.edf),
601 STAT_GENET_MIB_TX("tx_single_col", mib.tx.scl),
602 STAT_GENET_MIB_TX("tx_multi_col", mib.tx.mcl),
603 STAT_GENET_MIB_TX("tx_late_col", mib.tx.lcl),
604 STAT_GENET_MIB_TX("tx_excess_col", mib.tx.ecl),
605 STAT_GENET_MIB_TX("tx_frags", mib.tx.frg),
606 STAT_GENET_MIB_TX("tx_total_col", mib.tx.ncl),
607 STAT_GENET_MIB_TX("tx_jabber", mib.tx.jbr),
608 STAT_GENET_MIB_TX("tx_bytes", mib.tx.bytes),
609 STAT_GENET_MIB_TX("tx_good_pkts", mib.tx.pok),
610 STAT_GENET_MIB_TX("tx_unicast", mib.tx.uc),
611 /* UniMAC RUNT counters */
612 STAT_GENET_RUNT("rx_runt_pkts", mib.rx_runt_cnt),
613 STAT_GENET_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs),
614 STAT_GENET_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align),
615 STAT_GENET_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
616 /* Misc UniMAC counters */
617 STAT_GENET_MISC("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt,
619 STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT),
620 STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT),
621 STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
622 STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed),
623 STAT_GENET_SOFT_MIB("tx_dma_failed", mib.tx_dma_failed),
626 #define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats)
628 static void bcmgenet_get_drvinfo(struct net_device *dev,
629 struct ethtool_drvinfo *info)
631 strlcpy(info->driver, "bcmgenet", sizeof(info->driver));
632 strlcpy(info->version, "v2.0", sizeof(info->version));
633 info->n_stats = BCMGENET_STATS_LEN;
636 static int bcmgenet_get_sset_count(struct net_device *dev, int string_set)
638 switch (string_set) {
640 return BCMGENET_STATS_LEN;
646 static void bcmgenet_get_strings(struct net_device *dev, u32 stringset,
653 for (i = 0; i < BCMGENET_STATS_LEN; i++) {
654 memcpy(data + i * ETH_GSTRING_LEN,
655 bcmgenet_gstrings_stats[i].stat_string,
662 static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
666 for (i = 0; i < BCMGENET_STATS_LEN; i++) {
667 const struct bcmgenet_stats *s;
672 s = &bcmgenet_gstrings_stats[i];
674 case BCMGENET_STAT_NETDEV:
675 case BCMGENET_STAT_SOFT:
677 case BCMGENET_STAT_MIB_RX:
678 case BCMGENET_STAT_MIB_TX:
679 case BCMGENET_STAT_RUNT:
680 if (s->type != BCMGENET_STAT_MIB_RX)
681 offset = BCMGENET_STAT_OFFSET;
682 val = bcmgenet_umac_readl(priv,
683 UMAC_MIB_START + j + offset);
685 case BCMGENET_STAT_MISC:
686 val = bcmgenet_umac_readl(priv, s->reg_offset);
687 /* clear if overflowed */
689 bcmgenet_umac_writel(priv, 0, s->reg_offset);
694 p = (char *)priv + s->stat_offset;
699 static void bcmgenet_get_ethtool_stats(struct net_device *dev,
700 struct ethtool_stats *stats,
703 struct bcmgenet_priv *priv = netdev_priv(dev);
706 if (netif_running(dev))
707 bcmgenet_update_mib_counters(priv);
709 for (i = 0; i < BCMGENET_STATS_LEN; i++) {
710 const struct bcmgenet_stats *s;
713 s = &bcmgenet_gstrings_stats[i];
714 if (s->type == BCMGENET_STAT_NETDEV)
715 p = (char *)&dev->stats;
723 static void bcmgenet_eee_enable_set(struct net_device *dev, bool enable)
725 struct bcmgenet_priv *priv = netdev_priv(dev);
726 u32 off = priv->hw_params->tbuf_offset + TBUF_ENERGY_CTRL;
729 if (enable && !priv->clk_eee_enabled) {
730 clk_prepare_enable(priv->clk_eee);
731 priv->clk_eee_enabled = true;
734 reg = bcmgenet_umac_readl(priv, UMAC_EEE_CTRL);
739 bcmgenet_umac_writel(priv, reg, UMAC_EEE_CTRL);
741 /* Enable EEE and switch to a 27Mhz clock automatically */
742 reg = __raw_readl(priv->base + off);
744 reg |= TBUF_EEE_EN | TBUF_PM_EN;
746 reg &= ~(TBUF_EEE_EN | TBUF_PM_EN);
747 __raw_writel(reg, priv->base + off);
749 /* Do the same for thing for RBUF */
750 reg = bcmgenet_rbuf_readl(priv, RBUF_ENERGY_CTRL);
752 reg |= RBUF_EEE_EN | RBUF_PM_EN;
754 reg &= ~(RBUF_EEE_EN | RBUF_PM_EN);
755 bcmgenet_rbuf_writel(priv, reg, RBUF_ENERGY_CTRL);
757 if (!enable && priv->clk_eee_enabled) {
758 clk_disable_unprepare(priv->clk_eee);
759 priv->clk_eee_enabled = false;
762 priv->eee.eee_enabled = enable;
763 priv->eee.eee_active = enable;
766 static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e)
768 struct bcmgenet_priv *priv = netdev_priv(dev);
769 struct ethtool_eee *p = &priv->eee;
771 if (GENET_IS_V1(priv))
774 e->eee_enabled = p->eee_enabled;
775 e->eee_active = p->eee_active;
776 e->tx_lpi_timer = bcmgenet_umac_readl(priv, UMAC_EEE_LPI_TIMER);
778 return phy_ethtool_get_eee(priv->phydev, e);
781 static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
783 struct bcmgenet_priv *priv = netdev_priv(dev);
784 struct ethtool_eee *p = &priv->eee;
787 if (GENET_IS_V1(priv))
790 p->eee_enabled = e->eee_enabled;
792 if (!p->eee_enabled) {
793 bcmgenet_eee_enable_set(dev, false);
795 ret = phy_init_eee(priv->phydev, 0);
797 netif_err(priv, hw, dev, "EEE initialization failed\n");
801 bcmgenet_umac_writel(priv, e->tx_lpi_timer, UMAC_EEE_LPI_TIMER);
802 bcmgenet_eee_enable_set(dev, true);
805 return phy_ethtool_set_eee(priv->phydev, e);
808 static int bcmgenet_nway_reset(struct net_device *dev)
810 struct bcmgenet_priv *priv = netdev_priv(dev);
812 return genphy_restart_aneg(priv->phydev);
815 /* standard ethtool support functions. */
816 static struct ethtool_ops bcmgenet_ethtool_ops = {
817 .get_strings = bcmgenet_get_strings,
818 .get_sset_count = bcmgenet_get_sset_count,
819 .get_ethtool_stats = bcmgenet_get_ethtool_stats,
820 .get_settings = bcmgenet_get_settings,
821 .set_settings = bcmgenet_set_settings,
822 .get_drvinfo = bcmgenet_get_drvinfo,
823 .get_link = ethtool_op_get_link,
824 .get_msglevel = bcmgenet_get_msglevel,
825 .set_msglevel = bcmgenet_set_msglevel,
826 .get_wol = bcmgenet_get_wol,
827 .set_wol = bcmgenet_set_wol,
828 .get_eee = bcmgenet_get_eee,
829 .set_eee = bcmgenet_set_eee,
830 .nway_reset = bcmgenet_nway_reset,
833 /* Power down the unimac, based on mode. */
834 static void bcmgenet_power_down(struct bcmgenet_priv *priv,
835 enum bcmgenet_power_mode mode)
840 case GENET_POWER_CABLE_SENSE:
841 phy_detach(priv->phydev);
844 case GENET_POWER_WOL_MAGIC:
845 bcmgenet_wol_power_down_cfg(priv, mode);
848 case GENET_POWER_PASSIVE:
850 if (priv->hw_params->flags & GENET_HAS_EXT) {
851 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
852 reg |= (EXT_PWR_DOWN_PHY |
853 EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS);
854 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
862 static void bcmgenet_power_up(struct bcmgenet_priv *priv,
863 enum bcmgenet_power_mode mode)
867 if (!(priv->hw_params->flags & GENET_HAS_EXT))
870 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
873 case GENET_POWER_PASSIVE:
874 reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_PHY |
877 case GENET_POWER_CABLE_SENSE:
879 reg |= EXT_PWR_DN_EN_LD;
881 case GENET_POWER_WOL_MAGIC:
882 bcmgenet_wol_power_up_cfg(priv, mode);
888 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
890 if (mode == GENET_POWER_PASSIVE)
891 bcmgenet_mii_reset(priv->dev);
894 /* ioctl handle special commands that are not present in ethtool. */
895 static int bcmgenet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
897 struct bcmgenet_priv *priv = netdev_priv(dev);
900 if (!netif_running(dev))
910 val = phy_mii_ioctl(priv->phydev, rq, cmd);
921 static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv,
922 struct bcmgenet_tx_ring *ring)
924 struct enet_cb *tx_cb_ptr;
926 tx_cb_ptr = ring->cbs;
927 tx_cb_ptr += ring->write_ptr - ring->cb_ptr;
929 /* Advancing local write pointer */
930 if (ring->write_ptr == ring->end_ptr)
931 ring->write_ptr = ring->cb_ptr;
938 /* Simple helper to free a control block's resources */
939 static void bcmgenet_free_cb(struct enet_cb *cb)
941 dev_kfree_skb_any(cb->skb);
943 dma_unmap_addr_set(cb, dma_addr, 0);
946 static inline void bcmgenet_tx_ring16_int_disable(struct bcmgenet_priv *priv,
947 struct bcmgenet_tx_ring *ring)
949 bcmgenet_intrl2_0_writel(priv,
950 UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE,
951 INTRL2_CPU_MASK_SET);
954 static inline void bcmgenet_tx_ring16_int_enable(struct bcmgenet_priv *priv,
955 struct bcmgenet_tx_ring *ring)
957 bcmgenet_intrl2_0_writel(priv,
958 UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE,
959 INTRL2_CPU_MASK_CLEAR);
962 static inline void bcmgenet_tx_ring_int_enable(struct bcmgenet_priv *priv,
963 struct bcmgenet_tx_ring *ring)
965 bcmgenet_intrl2_1_writel(priv, (1 << ring->index),
966 INTRL2_CPU_MASK_CLEAR);
967 priv->int1_mask &= ~(1 << ring->index);
970 static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_priv *priv,
971 struct bcmgenet_tx_ring *ring)
973 bcmgenet_intrl2_1_writel(priv, (1 << ring->index),
974 INTRL2_CPU_MASK_SET);
975 priv->int1_mask |= (1 << ring->index);
978 /* Unlocked version of the reclaim routine */
979 static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
980 struct bcmgenet_tx_ring *ring)
982 struct bcmgenet_priv *priv = netdev_priv(dev);
983 struct enet_cb *tx_cb_ptr;
984 struct netdev_queue *txq;
985 unsigned int pkts_compl = 0;
986 unsigned int c_index;
987 unsigned int txbds_ready;
988 unsigned int txbds_processed = 0;
990 /* Compute how many buffers are transmitted since last xmit call */
991 c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX);
992 c_index &= DMA_C_INDEX_MASK;
994 if (likely(c_index >= ring->c_index))
995 txbds_ready = c_index - ring->c_index;
997 txbds_ready = (DMA_C_INDEX_MASK + 1) - ring->c_index + c_index;
999 netif_dbg(priv, tx_done, dev,
1000 "%s ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n",
1001 __func__, ring->index, ring->c_index, c_index, txbds_ready);
1003 /* Reclaim transmitted buffers */
1004 while (txbds_processed < txbds_ready) {
1005 tx_cb_ptr = &priv->tx_cbs[ring->clean_ptr];
1006 if (tx_cb_ptr->skb) {
1008 dev->stats.tx_packets++;
1009 dev->stats.tx_bytes += tx_cb_ptr->skb->len;
1010 dma_unmap_single(&dev->dev,
1011 dma_unmap_addr(tx_cb_ptr, dma_addr),
1012 tx_cb_ptr->skb->len,
1014 bcmgenet_free_cb(tx_cb_ptr);
1015 } else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) {
1016 dev->stats.tx_bytes +=
1017 dma_unmap_len(tx_cb_ptr, dma_len);
1018 dma_unmap_page(&dev->dev,
1019 dma_unmap_addr(tx_cb_ptr, dma_addr),
1020 dma_unmap_len(tx_cb_ptr, dma_len),
1022 dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0);
1026 if (likely(ring->clean_ptr < ring->end_ptr))
1029 ring->clean_ptr = ring->cb_ptr;
1032 ring->free_bds += txbds_processed;
1033 ring->c_index = (ring->c_index + txbds_processed) & DMA_C_INDEX_MASK;
1035 if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
1036 txq = netdev_get_tx_queue(dev, ring->queue);
1037 if (netif_tx_queue_stopped(txq))
1038 netif_tx_wake_queue(txq);
1044 static unsigned int bcmgenet_tx_reclaim(struct net_device *dev,
1045 struct bcmgenet_tx_ring *ring)
1047 unsigned int released;
1048 unsigned long flags;
1050 spin_lock_irqsave(&ring->lock, flags);
1051 released = __bcmgenet_tx_reclaim(dev, ring);
1052 spin_unlock_irqrestore(&ring->lock, flags);
1057 static int bcmgenet_tx_poll(struct napi_struct *napi, int budget)
1059 struct bcmgenet_tx_ring *ring =
1060 container_of(napi, struct bcmgenet_tx_ring, napi);
1061 unsigned int work_done = 0;
1063 work_done = bcmgenet_tx_reclaim(ring->priv->dev, ring);
1065 if (work_done == 0) {
1066 napi_complete(napi);
1067 ring->int_enable(ring->priv, ring);
1075 static void bcmgenet_tx_reclaim_all(struct net_device *dev)
1077 struct bcmgenet_priv *priv = netdev_priv(dev);
1080 if (netif_is_multiqueue(dev)) {
1081 for (i = 0; i < priv->hw_params->tx_queues; i++)
1082 bcmgenet_tx_reclaim(dev, &priv->tx_rings[i]);
1085 bcmgenet_tx_reclaim(dev, &priv->tx_rings[DESC_INDEX]);
1088 /* Transmits a single SKB (either head of a fragment or a single SKB)
1089 * caller must hold priv->lock
1091 static int bcmgenet_xmit_single(struct net_device *dev,
1092 struct sk_buff *skb,
1094 struct bcmgenet_tx_ring *ring)
1096 struct bcmgenet_priv *priv = netdev_priv(dev);
1097 struct device *kdev = &priv->pdev->dev;
1098 struct enet_cb *tx_cb_ptr;
1099 unsigned int skb_len;
1104 tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
1106 if (unlikely(!tx_cb_ptr))
1109 tx_cb_ptr->skb = skb;
1111 skb_len = skb_headlen(skb) < ETH_ZLEN ? ETH_ZLEN : skb_headlen(skb);
1113 mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
1114 ret = dma_mapping_error(kdev, mapping);
1116 priv->mib.tx_dma_failed++;
1117 netif_err(priv, tx_err, dev, "Tx DMA map failed\n");
1122 dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
1123 dma_unmap_len_set(tx_cb_ptr, dma_len, skb->len);
1124 length_status = (skb_len << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
1125 (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT) |
1128 if (skb->ip_summed == CHECKSUM_PARTIAL)
1129 length_status |= DMA_TX_DO_CSUM;
1131 dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, length_status);
1136 /* Transmit a SKB fragment */
1137 static int bcmgenet_xmit_frag(struct net_device *dev,
1140 struct bcmgenet_tx_ring *ring)
1142 struct bcmgenet_priv *priv = netdev_priv(dev);
1143 struct device *kdev = &priv->pdev->dev;
1144 struct enet_cb *tx_cb_ptr;
1148 tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
1150 if (unlikely(!tx_cb_ptr))
1152 tx_cb_ptr->skb = NULL;
1154 mapping = skb_frag_dma_map(kdev, frag, 0,
1155 skb_frag_size(frag), DMA_TO_DEVICE);
1156 ret = dma_mapping_error(kdev, mapping);
1158 priv->mib.tx_dma_failed++;
1159 netif_err(priv, tx_err, dev, "%s: Tx DMA map failed\n",
1164 dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
1165 dma_unmap_len_set(tx_cb_ptr, dma_len, frag->size);
1167 dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping,
1168 (frag->size << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
1169 (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT));
1174 /* Reallocate the SKB to put enough headroom in front of it and insert
1175 * the transmit checksum offsets in the descriptors
1177 static struct sk_buff *bcmgenet_put_tx_csum(struct net_device *dev,
1178 struct sk_buff *skb)
1180 struct status_64 *status = NULL;
1181 struct sk_buff *new_skb;
1187 if (unlikely(skb_headroom(skb) < sizeof(*status))) {
1188 /* If 64 byte status block enabled, must make sure skb has
1189 * enough headroom for us to insert 64B status block.
1191 new_skb = skb_realloc_headroom(skb, sizeof(*status));
1194 dev->stats.tx_errors++;
1195 dev->stats.tx_dropped++;
1201 skb_push(skb, sizeof(*status));
1202 status = (struct status_64 *)skb->data;
1204 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1205 ip_ver = htons(skb->protocol);
1208 ip_proto = ip_hdr(skb)->protocol;
1211 ip_proto = ipv6_hdr(skb)->nexthdr;
1217 offset = skb_checksum_start_offset(skb) - sizeof(*status);
1218 tx_csum_info = (offset << STATUS_TX_CSUM_START_SHIFT) |
1219 (offset + skb->csum_offset);
1221 /* Set the length valid bit for TCP and UDP and just set
1222 * the special UDP flag for IPv4, else just set to 0.
1224 if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) {
1225 tx_csum_info |= STATUS_TX_CSUM_LV;
1226 if (ip_proto == IPPROTO_UDP && ip_ver == ETH_P_IP)
1227 tx_csum_info |= STATUS_TX_CSUM_PROTO_UDP;
1232 status->tx_csum_info = tx_csum_info;
1238 static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
1240 struct bcmgenet_priv *priv = netdev_priv(dev);
1241 struct bcmgenet_tx_ring *ring = NULL;
1242 struct netdev_queue *txq;
1243 unsigned long flags = 0;
1244 int nr_frags, index;
1249 index = skb_get_queue_mapping(skb);
1250 /* Mapping strategy:
1251 * queue_mapping = 0, unclassified, packet xmited through ring16
1252 * queue_mapping = 1, goes to ring 0. (highest priority queue
1253 * queue_mapping = 2, goes to ring 1.
1254 * queue_mapping = 3, goes to ring 2.
1255 * queue_mapping = 4, goes to ring 3.
1262 nr_frags = skb_shinfo(skb)->nr_frags;
1263 ring = &priv->tx_rings[index];
1264 txq = netdev_get_tx_queue(dev, ring->queue);
1266 spin_lock_irqsave(&ring->lock, flags);
1267 if (ring->free_bds <= nr_frags + 1) {
1268 netif_tx_stop_queue(txq);
1269 netdev_err(dev, "%s: tx ring %d full when queue %d awake\n",
1270 __func__, index, ring->queue);
1271 ret = NETDEV_TX_BUSY;
1275 if (skb_padto(skb, ETH_ZLEN)) {
1280 /* set the SKB transmit checksum */
1281 if (priv->desc_64b_en) {
1282 skb = bcmgenet_put_tx_csum(dev, skb);
1289 dma_desc_flags = DMA_SOP;
1291 dma_desc_flags |= DMA_EOP;
1293 /* Transmit single SKB or head of fragment list */
1294 ret = bcmgenet_xmit_single(dev, skb, dma_desc_flags, ring);
1301 for (i = 0; i < nr_frags; i++) {
1302 ret = bcmgenet_xmit_frag(dev,
1303 &skb_shinfo(skb)->frags[i],
1304 (i == nr_frags - 1) ? DMA_EOP : 0,
1312 skb_tx_timestamp(skb);
1314 /* Decrement total BD count and advance our write pointer */
1315 ring->free_bds -= nr_frags + 1;
1316 ring->prod_index += nr_frags + 1;
1317 ring->prod_index &= DMA_P_INDEX_MASK;
1319 bcmgenet_tdma_ring_writel(priv, ring->index,
1320 ring->prod_index, TDMA_PROD_INDEX);
1322 if (ring->free_bds <= (MAX_SKB_FRAGS + 1))
1323 netif_tx_stop_queue(txq);
1326 spin_unlock_irqrestore(&ring->lock, flags);
1331 static struct sk_buff *bcmgenet_rx_refill(struct bcmgenet_priv *priv,
1334 struct device *kdev = &priv->pdev->dev;
1335 struct sk_buff *skb;
1336 struct sk_buff *rx_skb;
1339 /* Allocate a new Rx skb */
1340 skb = netdev_alloc_skb(priv->dev, priv->rx_buf_len + SKB_ALIGNMENT);
1342 priv->mib.alloc_rx_buff_failed++;
1343 netif_err(priv, rx_err, priv->dev,
1344 "%s: Rx skb allocation failed\n", __func__);
1348 /* DMA-map the new Rx skb */
1349 mapping = dma_map_single(kdev, skb->data, priv->rx_buf_len,
1351 if (dma_mapping_error(kdev, mapping)) {
1352 priv->mib.rx_dma_failed++;
1353 dev_kfree_skb_any(skb);
1354 netif_err(priv, rx_err, priv->dev,
1355 "%s: Rx skb DMA mapping failed\n", __func__);
1359 /* Grab the current Rx skb from the ring and DMA-unmap it */
1362 dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
1363 priv->rx_buf_len, DMA_FROM_DEVICE);
1365 /* Put the new Rx skb on the ring */
1367 dma_unmap_addr_set(cb, dma_addr, mapping);
1368 dmadesc_set_addr(priv, cb->bd_addr, mapping);
1370 /* Return the current Rx skb to caller */
1374 /* bcmgenet_desc_rx - descriptor based rx process.
1375 * this could be called from bottom half, or from NAPI polling method.
1377 static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv,
1379 unsigned int budget)
1381 struct bcmgenet_rx_ring *ring = &priv->rx_rings[index];
1382 struct net_device *dev = priv->dev;
1384 struct sk_buff *skb;
1385 u32 dma_length_status;
1386 unsigned long dma_flag;
1388 unsigned int rxpktprocessed = 0, rxpkttoprocess;
1389 unsigned int p_index;
1390 unsigned int discards;
1391 unsigned int chksum_ok = 0;
1393 p_index = bcmgenet_rdma_ring_readl(priv, index, RDMA_PROD_INDEX);
1395 discards = (p_index >> DMA_P_INDEX_DISCARD_CNT_SHIFT) &
1396 DMA_P_INDEX_DISCARD_CNT_MASK;
1397 if (discards > ring->old_discards) {
1398 discards = discards - ring->old_discards;
1399 dev->stats.rx_missed_errors += discards;
1400 dev->stats.rx_errors += discards;
1401 ring->old_discards += discards;
1403 /* Clear HW register when we reach 75% of maximum 0xFFFF */
1404 if (ring->old_discards >= 0xC000) {
1405 ring->old_discards = 0;
1406 bcmgenet_rdma_ring_writel(priv, index, 0,
1411 p_index &= DMA_P_INDEX_MASK;
1413 if (likely(p_index >= ring->c_index))
1414 rxpkttoprocess = p_index - ring->c_index;
1416 rxpkttoprocess = (DMA_C_INDEX_MASK + 1) - ring->c_index +
1419 netif_dbg(priv, rx_status, dev,
1420 "RDMA: rxpkttoprocess=%d\n", rxpkttoprocess);
1422 while ((rxpktprocessed < rxpkttoprocess) &&
1423 (rxpktprocessed < budget)) {
1424 cb = &priv->rx_cbs[ring->read_ptr];
1425 skb = bcmgenet_rx_refill(priv, cb);
1427 if (unlikely(!skb)) {
1428 dev->stats.rx_dropped++;
1429 dev->stats.rx_errors++;
1433 if (!priv->desc_64b_en) {
1435 dmadesc_get_length_status(priv, cb->bd_addr);
1437 struct status_64 *status;
1439 status = (struct status_64 *)skb->data;
1440 dma_length_status = status->length_status;
1443 /* DMA flags and length are still valid no matter how
1444 * we got the Receive Status Vector (64B RSB or register)
1446 dma_flag = dma_length_status & 0xffff;
1447 len = dma_length_status >> DMA_BUFLENGTH_SHIFT;
1449 netif_dbg(priv, rx_status, dev,
1450 "%s:p_ind=%d c_ind=%d read_ptr=%d len_stat=0x%08x\n",
1451 __func__, p_index, ring->c_index,
1452 ring->read_ptr, dma_length_status);
1454 if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) {
1455 netif_err(priv, rx_status, dev,
1456 "dropping fragmented packet!\n");
1457 dev->stats.rx_dropped++;
1458 dev->stats.rx_errors++;
1459 dev_kfree_skb_any(skb);
1464 if (unlikely(dma_flag & (DMA_RX_CRC_ERROR |
1469 netif_err(priv, rx_status, dev, "dma_flag=0x%x\n",
1470 (unsigned int)dma_flag);
1471 if (dma_flag & DMA_RX_CRC_ERROR)
1472 dev->stats.rx_crc_errors++;
1473 if (dma_flag & DMA_RX_OV)
1474 dev->stats.rx_over_errors++;
1475 if (dma_flag & DMA_RX_NO)
1476 dev->stats.rx_frame_errors++;
1477 if (dma_flag & DMA_RX_LG)
1478 dev->stats.rx_length_errors++;
1479 dev->stats.rx_dropped++;
1480 dev->stats.rx_errors++;
1481 dev_kfree_skb_any(skb);
1483 } /* error packet */
1485 chksum_ok = (dma_flag & priv->dma_rx_chk_bit) &&
1486 priv->desc_rxchk_en;
1489 if (priv->desc_64b_en) {
1494 if (likely(chksum_ok))
1495 skb->ip_summed = CHECKSUM_UNNECESSARY;
1497 /* remove hardware 2bytes added for IP alignment */
1501 if (priv->crc_fwd_en) {
1502 skb_trim(skb, len - ETH_FCS_LEN);
1506 /*Finish setting up the received SKB and send it to the kernel*/
1507 skb->protocol = eth_type_trans(skb, priv->dev);
1508 dev->stats.rx_packets++;
1509 dev->stats.rx_bytes += len;
1510 if (dma_flag & DMA_RX_MULT)
1511 dev->stats.multicast++;
1514 napi_gro_receive(&priv->napi, skb);
1515 netif_dbg(priv, rx_status, dev, "pushed up to kernel\n");
1519 if (likely(ring->read_ptr < ring->end_ptr))
1522 ring->read_ptr = ring->cb_ptr;
1524 ring->c_index = (ring->c_index + 1) & DMA_C_INDEX_MASK;
1525 bcmgenet_rdma_ring_writel(priv, index, ring->c_index, RDMA_CONS_INDEX);
1528 return rxpktprocessed;
1531 /* Assign skb to RX DMA descriptor. */
1532 static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv,
1533 struct bcmgenet_rx_ring *ring)
1536 struct sk_buff *skb;
1539 netif_dbg(priv, hw, priv->dev, "%s\n", __func__);
1541 /* loop here for each buffer needing assign */
1542 for (i = 0; i < ring->size; i++) {
1544 skb = bcmgenet_rx_refill(priv, cb);
1546 dev_kfree_skb_any(skb);
1554 static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv)
1559 for (i = 0; i < priv->num_rx_bds; i++) {
1560 cb = &priv->rx_cbs[i];
1562 if (dma_unmap_addr(cb, dma_addr)) {
1563 dma_unmap_single(&priv->dev->dev,
1564 dma_unmap_addr(cb, dma_addr),
1565 priv->rx_buf_len, DMA_FROM_DEVICE);
1566 dma_unmap_addr_set(cb, dma_addr, 0);
1570 bcmgenet_free_cb(cb);
1574 static void umac_enable_set(struct bcmgenet_priv *priv, u32 mask, bool enable)
1578 reg = bcmgenet_umac_readl(priv, UMAC_CMD);
1583 bcmgenet_umac_writel(priv, reg, UMAC_CMD);
1585 /* UniMAC stops on a packet boundary, wait for a full-size packet
1589 usleep_range(1000, 2000);
1592 static int reset_umac(struct bcmgenet_priv *priv)
1594 struct device *kdev = &priv->pdev->dev;
1595 unsigned int timeout = 0;
1598 /* 7358a0/7552a0: bad default in RBUF_FLUSH_CTRL.umac_sw_rst */
1599 bcmgenet_rbuf_ctrl_set(priv, 0);
1602 /* disable MAC while updating its registers */
1603 bcmgenet_umac_writel(priv, 0, UMAC_CMD);
1605 /* issue soft reset, wait for it to complete */
1606 bcmgenet_umac_writel(priv, CMD_SW_RESET, UMAC_CMD);
1607 while (timeout++ < 1000) {
1608 reg = bcmgenet_umac_readl(priv, UMAC_CMD);
1609 if (!(reg & CMD_SW_RESET))
1615 if (timeout == 1000) {
1617 "timeout waiting for MAC to come out of reset\n");
1624 static void bcmgenet_intr_disable(struct bcmgenet_priv *priv)
1626 /* Mask all interrupts.*/
1627 bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
1628 bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
1629 bcmgenet_intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
1630 bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
1631 bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
1632 bcmgenet_intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
1635 static int init_umac(struct bcmgenet_priv *priv)
1637 struct device *kdev = &priv->pdev->dev;
1639 u32 reg, cpu_mask_clear;
1642 dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n");
1644 ret = reset_umac(priv);
1648 bcmgenet_umac_writel(priv, 0, UMAC_CMD);
1649 /* clear tx/rx counter */
1650 bcmgenet_umac_writel(priv,
1651 MIB_RESET_RX | MIB_RESET_TX | MIB_RESET_RUNT,
1653 bcmgenet_umac_writel(priv, 0, UMAC_MIB_CTRL);
1655 bcmgenet_umac_writel(priv, ENET_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
1657 /* init rx registers, enable ip header optimization */
1658 reg = bcmgenet_rbuf_readl(priv, RBUF_CTRL);
1659 reg |= RBUF_ALIGN_2B;
1660 bcmgenet_rbuf_writel(priv, reg, RBUF_CTRL);
1662 if (!GENET_IS_V1(priv) && !GENET_IS_V2(priv))
1663 bcmgenet_rbuf_writel(priv, 1, RBUF_TBUF_SIZE_CTRL);
1665 bcmgenet_intr_disable(priv);
1667 cpu_mask_clear = UMAC_IRQ_RXDMA_BDONE | UMAC_IRQ_TXDMA_BDONE;
1669 dev_dbg(kdev, "%s:Enabling RXDMA_BDONE interrupt\n", __func__);
1671 /* Monitor cable plug/unplugged event for internal PHY */
1672 if (phy_is_internal(priv->phydev)) {
1673 cpu_mask_clear |= (UMAC_IRQ_LINK_DOWN | UMAC_IRQ_LINK_UP);
1674 } else if (priv->ext_phy) {
1675 cpu_mask_clear |= (UMAC_IRQ_LINK_DOWN | UMAC_IRQ_LINK_UP);
1676 } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
1677 reg = bcmgenet_bp_mc_get(priv);
1678 reg |= BIT(priv->hw_params->bp_in_en_shift);
1680 /* bp_mask: back pressure mask */
1681 if (netif_is_multiqueue(priv->dev))
1682 reg |= priv->hw_params->bp_in_mask;
1684 reg &= ~priv->hw_params->bp_in_mask;
1685 bcmgenet_bp_mc_set(priv, reg);
1688 /* Enable MDIO interrupts on GENET v3+ */
1689 if (priv->hw_params->flags & GENET_HAS_MDIO_INTR)
1690 cpu_mask_clear |= UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR;
1692 bcmgenet_intrl2_0_writel(priv, cpu_mask_clear, INTRL2_CPU_MASK_CLEAR);
1694 for (index = 0; index < priv->hw_params->tx_queues; index++)
1695 bcmgenet_intrl2_1_writel(priv, (1 << index),
1696 INTRL2_CPU_MASK_CLEAR);
1698 /* Enable rx/tx engine.*/
1699 dev_dbg(kdev, "done init umac\n");
1704 /* Initialize a Tx ring along with corresponding hardware registers */
1705 static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
1706 unsigned int index, unsigned int size,
1707 unsigned int start_ptr, unsigned int end_ptr)
1709 struct bcmgenet_tx_ring *ring = &priv->tx_rings[index];
1710 u32 words_per_bd = WORDS_PER_BD(priv);
1711 u32 flow_period_val = 0;
1713 spin_lock_init(&ring->lock);
1715 netif_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64);
1716 ring->index = index;
1717 if (index == DESC_INDEX) {
1719 ring->int_enable = bcmgenet_tx_ring16_int_enable;
1720 ring->int_disable = bcmgenet_tx_ring16_int_disable;
1722 ring->queue = index + 1;
1723 ring->int_enable = bcmgenet_tx_ring_int_enable;
1724 ring->int_disable = bcmgenet_tx_ring_int_disable;
1726 ring->cbs = priv->tx_cbs + start_ptr;
1728 ring->clean_ptr = start_ptr;
1730 ring->free_bds = size;
1731 ring->write_ptr = start_ptr;
1732 ring->cb_ptr = start_ptr;
1733 ring->end_ptr = end_ptr - 1;
1734 ring->prod_index = 0;
1736 /* Set flow period for ring != 16 */
1737 if (index != DESC_INDEX)
1738 flow_period_val = ENET_MAX_MTU_SIZE << 16;
1740 bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_PROD_INDEX);
1741 bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_CONS_INDEX);
1742 bcmgenet_tdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH);
1743 /* Disable rate control for now */
1744 bcmgenet_tdma_ring_writel(priv, index, flow_period_val,
1746 bcmgenet_tdma_ring_writel(priv, index,
1747 ((size << DMA_RING_SIZE_SHIFT) |
1748 RX_BUF_LENGTH), DMA_RING_BUF_SIZE);
1750 /* Set start and end address, read and write pointers */
1751 bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd,
1753 bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd,
1755 bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd,
1757 bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
1760 napi_enable(&ring->napi);
1763 static void bcmgenet_fini_tx_ring(struct bcmgenet_priv *priv,
1766 struct bcmgenet_tx_ring *ring = &priv->tx_rings[index];
1768 napi_disable(&ring->napi);
1769 netif_napi_del(&ring->napi);
1772 /* Initialize a RDMA ring */
1773 static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv,
1774 unsigned int index, unsigned int size,
1775 unsigned int start_ptr, unsigned int end_ptr)
1777 struct bcmgenet_rx_ring *ring = &priv->rx_rings[index];
1778 u32 words_per_bd = WORDS_PER_BD(priv);
1781 ring->index = index;
1782 ring->cbs = priv->rx_cbs + start_ptr;
1785 ring->read_ptr = start_ptr;
1786 ring->cb_ptr = start_ptr;
1787 ring->end_ptr = end_ptr - 1;
1789 ret = bcmgenet_alloc_rx_buffers(priv, ring);
1793 bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_PROD_INDEX);
1794 bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_CONS_INDEX);
1795 bcmgenet_rdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH);
1796 bcmgenet_rdma_ring_writel(priv, index,
1797 ((size << DMA_RING_SIZE_SHIFT) |
1798 RX_BUF_LENGTH), DMA_RING_BUF_SIZE);
1799 bcmgenet_rdma_ring_writel(priv, index,
1800 (DMA_FC_THRESH_LO <<
1801 DMA_XOFF_THRESHOLD_SHIFT) |
1802 DMA_FC_THRESH_HI, RDMA_XON_XOFF_THRESH);
1804 /* Set start and end address, read and write pointers */
1805 bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd,
1807 bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd,
1809 bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd,
1811 bcmgenet_rdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
1817 /* Initialize Tx queues
1819 * Queues 0-3 are priority-based, each one has 32 descriptors,
1820 * with queue 0 being the highest priority queue.
1822 * Queue 16 is the default Tx queue with
1823 * GENET_Q16_TX_BD_CNT = 256 - 4 * 32 = 128 descriptors.
1825 * The transmit control block pool is then partitioned as follows:
1826 * - Tx queue 0 uses tx_cbs[0..31]
1827 * - Tx queue 1 uses tx_cbs[32..63]
1828 * - Tx queue 2 uses tx_cbs[64..95]
1829 * - Tx queue 3 uses tx_cbs[96..127]
1830 * - Tx queue 16 uses tx_cbs[128..255]
1832 static void bcmgenet_init_tx_queues(struct net_device *dev)
1834 struct bcmgenet_priv *priv = netdev_priv(dev);
1836 u32 dma_ctrl, ring_cfg;
1837 u32 dma_priority[3] = {0, 0, 0};
1839 dma_ctrl = bcmgenet_tdma_readl(priv, DMA_CTRL);
1840 dma_enable = dma_ctrl & DMA_EN;
1841 dma_ctrl &= ~DMA_EN;
1842 bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL);
1847 /* Enable strict priority arbiter mode */
1848 bcmgenet_tdma_writel(priv, DMA_ARBITER_SP, DMA_ARB_CTRL);
1850 /* Initialize Tx priority queues */
1851 for (i = 0; i < priv->hw_params->tx_queues; i++) {
1852 bcmgenet_init_tx_ring(priv, i, priv->hw_params->tx_bds_per_q,
1853 i * priv->hw_params->tx_bds_per_q,
1854 (i + 1) * priv->hw_params->tx_bds_per_q);
1855 ring_cfg |= (1 << i);
1856 dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
1857 dma_priority[DMA_PRIO_REG_INDEX(i)] |=
1858 ((GENET_Q0_PRIORITY + i) << DMA_PRIO_REG_SHIFT(i));
1861 /* Initialize Tx default queue 16 */
1862 bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_Q16_TX_BD_CNT,
1863 priv->hw_params->tx_queues *
1864 priv->hw_params->tx_bds_per_q,
1866 ring_cfg |= (1 << DESC_INDEX);
1867 dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT));
1868 dma_priority[DMA_PRIO_REG_INDEX(DESC_INDEX)] |=
1869 ((GENET_Q0_PRIORITY + priv->hw_params->tx_queues) <<
1870 DMA_PRIO_REG_SHIFT(DESC_INDEX));
1872 /* Set Tx queue priorities */
1873 bcmgenet_tdma_writel(priv, dma_priority[0], DMA_PRIORITY_0);
1874 bcmgenet_tdma_writel(priv, dma_priority[1], DMA_PRIORITY_1);
1875 bcmgenet_tdma_writel(priv, dma_priority[2], DMA_PRIORITY_2);
1877 /* Enable Tx queues */
1878 bcmgenet_tdma_writel(priv, ring_cfg, DMA_RING_CFG);
1883 bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL);
1886 /* Initialize Rx queues
1888 * Queues 0-15 are priority queues. Hardware Filtering Block (HFB) can be
1889 * used to direct traffic to these queues.
1891 * Queue 16 is the default Rx queue with GENET_Q16_RX_BD_CNT descriptors.
1893 static int bcmgenet_init_rx_queues(struct net_device *dev)
1895 struct bcmgenet_priv *priv = netdev_priv(dev);
1902 dma_ctrl = bcmgenet_rdma_readl(priv, DMA_CTRL);
1903 dma_enable = dma_ctrl & DMA_EN;
1904 dma_ctrl &= ~DMA_EN;
1905 bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL);
1910 /* Initialize Rx priority queues */
1911 for (i = 0; i < priv->hw_params->rx_queues; i++) {
1912 ret = bcmgenet_init_rx_ring(priv, i,
1913 priv->hw_params->rx_bds_per_q,
1914 i * priv->hw_params->rx_bds_per_q,
1916 priv->hw_params->rx_bds_per_q);
1920 ring_cfg |= (1 << i);
1921 dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
1924 /* Initialize Rx default queue 16 */
1925 ret = bcmgenet_init_rx_ring(priv, DESC_INDEX, GENET_Q16_RX_BD_CNT,
1926 priv->hw_params->rx_queues *
1927 priv->hw_params->rx_bds_per_q,
1932 ring_cfg |= (1 << DESC_INDEX);
1933 dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT));
1936 bcmgenet_rdma_writel(priv, ring_cfg, DMA_RING_CFG);
1938 /* Configure ring as descriptor ring and re-enable DMA if enabled */
1941 bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL);
1946 static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
1952 /* Disable TDMA to stop add more frames in TX DMA */
1953 reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
1955 bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
1957 /* Check TDMA status register to confirm TDMA is disabled */
1958 while (timeout++ < DMA_TIMEOUT_VAL) {
1959 reg = bcmgenet_tdma_readl(priv, DMA_STATUS);
1960 if (reg & DMA_DISABLED)
1966 if (timeout == DMA_TIMEOUT_VAL) {
1967 netdev_warn(priv->dev, "Timed out while disabling TX DMA\n");
1971 /* Wait 10ms for packet drain in both tx and rx dma */
1972 usleep_range(10000, 20000);
1975 reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
1977 bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
1980 /* Check RDMA status register to confirm RDMA is disabled */
1981 while (timeout++ < DMA_TIMEOUT_VAL) {
1982 reg = bcmgenet_rdma_readl(priv, DMA_STATUS);
1983 if (reg & DMA_DISABLED)
1989 if (timeout == DMA_TIMEOUT_VAL) {
1990 netdev_warn(priv->dev, "Timed out while disabling RX DMA\n");
1997 static void __bcmgenet_fini_dma(struct bcmgenet_priv *priv)
2002 bcmgenet_dma_teardown(priv);
2004 for (i = 0; i < priv->num_tx_bds; i++) {
2005 if (priv->tx_cbs[i].skb != NULL) {
2006 dev_kfree_skb(priv->tx_cbs[i].skb);
2007 priv->tx_cbs[i].skb = NULL;
2011 bcmgenet_free_rx_buffers(priv);
2012 kfree(priv->rx_cbs);
2013 kfree(priv->tx_cbs);
2016 static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
2020 bcmgenet_fini_tx_ring(priv, DESC_INDEX);
2022 for (i = 0; i < priv->hw_params->tx_queues; i++)
2023 bcmgenet_fini_tx_ring(priv, i);
2025 __bcmgenet_fini_dma(priv);
2028 /* init_edma: Initialize DMA control register */
2029 static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
2035 netif_dbg(priv, hw, priv->dev, "%s\n", __func__);
2038 bcmgenet_rdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE);
2040 /* Initialize common Rx ring structures */
2041 priv->rx_bds = priv->base + priv->hw_params->rdma_offset;
2042 priv->num_rx_bds = TOTAL_DESC;
2043 priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct enet_cb),
2048 for (i = 0; i < priv->num_rx_bds; i++) {
2049 cb = priv->rx_cbs + i;
2050 cb->bd_addr = priv->rx_bds + i * DMA_DESC_SIZE;
2053 /* Initialize Rx queues */
2054 ret = bcmgenet_init_rx_queues(priv->dev);
2056 netdev_err(priv->dev, "failed to initialize Rx queues\n");
2057 bcmgenet_free_rx_buffers(priv);
2058 kfree(priv->rx_cbs);
2063 bcmgenet_tdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE);
2065 /* Initialize common TX ring structures */
2066 priv->tx_bds = priv->base + priv->hw_params->tdma_offset;
2067 priv->num_tx_bds = TOTAL_DESC;
2068 priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb),
2070 if (!priv->tx_cbs) {
2071 __bcmgenet_fini_dma(priv);
2075 for (i = 0; i < priv->num_tx_bds; i++) {
2076 cb = priv->tx_cbs + i;
2077 cb->bd_addr = priv->tx_bds + i * DMA_DESC_SIZE;
2080 /* Initialize Tx queues */
2081 bcmgenet_init_tx_queues(priv->dev);
2086 /* NAPI polling method*/
2087 static int bcmgenet_poll(struct napi_struct *napi, int budget)
2089 struct bcmgenet_priv *priv = container_of(napi,
2090 struct bcmgenet_priv, napi);
2091 unsigned int work_done;
2093 work_done = bcmgenet_desc_rx(priv, DESC_INDEX, budget);
2095 if (work_done < budget) {
2096 napi_complete(napi);
2097 bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_RXDMA_BDONE,
2098 INTRL2_CPU_MASK_CLEAR);
2104 /* Interrupt bottom half */
2105 static void bcmgenet_irq_task(struct work_struct *work)
2107 struct bcmgenet_priv *priv = container_of(
2108 work, struct bcmgenet_priv, bcmgenet_irq_work);
2110 netif_dbg(priv, intr, priv->dev, "%s\n", __func__);
2112 if (priv->irq0_stat & UMAC_IRQ_MPD_R) {
2113 priv->irq0_stat &= ~UMAC_IRQ_MPD_R;
2114 netif_dbg(priv, wol, priv->dev,
2115 "magic packet detected, waking up\n");
2116 bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC);
2119 /* Link UP/DOWN event */
2120 if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
2121 (priv->irq0_stat & (UMAC_IRQ_LINK_UP|UMAC_IRQ_LINK_DOWN))) {
2122 phy_mac_interrupt(priv->phydev,
2123 priv->irq0_stat & UMAC_IRQ_LINK_UP);
2124 priv->irq0_stat &= ~(UMAC_IRQ_LINK_UP|UMAC_IRQ_LINK_DOWN);
2128 /* bcmgenet_isr1: interrupt handler for ring buffer. */
2129 static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
2131 struct bcmgenet_priv *priv = dev_id;
2132 struct bcmgenet_tx_ring *ring;
2135 /* Save irq status for bottom-half processing. */
2137 bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
2138 ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
2139 /* clear interrupts */
2140 bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
2142 netif_dbg(priv, intr, priv->dev,
2143 "%s: IRQ=0x%x\n", __func__, priv->irq1_stat);
2145 /* Check the MBDONE interrupts.
2146 * packet is done, reclaim descriptors
2148 for (index = 0; index < priv->hw_params->tx_queues; index++) {
2149 if (!(priv->irq1_stat & BIT(index)))
2152 ring = &priv->tx_rings[index];
2154 if (likely(napi_schedule_prep(&ring->napi))) {
2155 ring->int_disable(priv, ring);
2156 __napi_schedule(&ring->napi);
2163 /* bcmgenet_isr0: Handle various interrupts. */
2164 static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
2166 struct bcmgenet_priv *priv = dev_id;
2168 /* Save irq status for bottom-half processing. */
2170 bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) &
2171 ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
2172 /* clear interrupts */
2173 bcmgenet_intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
2175 netif_dbg(priv, intr, priv->dev,
2176 "IRQ=0x%x\n", priv->irq0_stat);
2178 if (priv->irq0_stat & (UMAC_IRQ_RXDMA_BDONE | UMAC_IRQ_RXDMA_PDONE)) {
2179 /* We use NAPI(software interrupt throttling, if
2180 * Rx Descriptor throttling is not used.
2181 * Disable interrupt, will be enabled in the poll method.
2183 if (likely(napi_schedule_prep(&priv->napi))) {
2184 bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_RXDMA_BDONE,
2185 INTRL2_CPU_MASK_SET);
2186 __napi_schedule(&priv->napi);
2189 if (priv->irq0_stat &
2190 (UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE)) {
2191 struct bcmgenet_tx_ring *ring = &priv->tx_rings[DESC_INDEX];
2193 if (likely(napi_schedule_prep(&ring->napi))) {
2194 ring->int_disable(priv, ring);
2195 __napi_schedule(&ring->napi);
2198 if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R |
2199 UMAC_IRQ_PHY_DET_F |
2201 UMAC_IRQ_LINK_DOWN |
2205 /* all other interested interrupts handled in bottom half */
2206 schedule_work(&priv->bcmgenet_irq_work);
2209 if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
2210 priv->irq0_stat & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) {
2211 priv->irq0_stat &= ~(UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR);
2218 static irqreturn_t bcmgenet_wol_isr(int irq, void *dev_id)
2220 struct bcmgenet_priv *priv = dev_id;
2222 pm_wakeup_event(&priv->pdev->dev, 0);
2227 static void bcmgenet_umac_reset(struct bcmgenet_priv *priv)
2231 reg = bcmgenet_rbuf_ctrl_get(priv);
2233 bcmgenet_rbuf_ctrl_set(priv, reg);
2237 bcmgenet_rbuf_ctrl_set(priv, reg);
2241 static void bcmgenet_set_hw_addr(struct bcmgenet_priv *priv,
2242 unsigned char *addr)
2244 bcmgenet_umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) |
2245 (addr[2] << 8) | addr[3], UMAC_MAC0);
2246 bcmgenet_umac_writel(priv, (addr[4] << 8) | addr[5], UMAC_MAC1);
2249 /* Returns a reusable dma control register value */
2250 static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv)
2256 dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN;
2257 reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
2259 bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
2261 reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
2263 bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
2265 bcmgenet_umac_writel(priv, 1, UMAC_TX_FLUSH);
2267 bcmgenet_umac_writel(priv, 0, UMAC_TX_FLUSH);
2272 static void bcmgenet_enable_dma(struct bcmgenet_priv *priv, u32 dma_ctrl)
2276 reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
2278 bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
2280 reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
2282 bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
2285 static void bcmgenet_netif_start(struct net_device *dev)
2287 struct bcmgenet_priv *priv = netdev_priv(dev);
2289 /* Start the network engine */
2290 napi_enable(&priv->napi);
2292 umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true);
2294 if (phy_is_internal(priv->phydev))
2295 bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
2297 netif_tx_start_all_queues(dev);
2299 phy_start(priv->phydev);
2302 static int bcmgenet_open(struct net_device *dev)
2304 struct bcmgenet_priv *priv = netdev_priv(dev);
2305 unsigned long dma_ctrl;
2309 netif_dbg(priv, ifup, dev, "bcmgenet_open\n");
2311 /* Turn on the clock */
2312 if (!IS_ERR(priv->clk))
2313 clk_prepare_enable(priv->clk);
2315 /* take MAC out of reset */
2316 bcmgenet_umac_reset(priv);
2318 ret = init_umac(priv);
2320 goto err_clk_disable;
2322 /* disable ethernet MAC while updating its registers */
2323 umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false);
2325 /* Make sure we reflect the value of CRC_CMD_FWD */
2326 reg = bcmgenet_umac_readl(priv, UMAC_CMD);
2327 priv->crc_fwd_en = !!(reg & CMD_CRC_FWD);
2329 bcmgenet_set_hw_addr(priv, dev->dev_addr);
2331 if (phy_is_internal(priv->phydev)) {
2332 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
2333 reg |= EXT_ENERGY_DET_MASK;
2334 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
2337 /* Disable RX/TX DMA and flush TX queues */
2338 dma_ctrl = bcmgenet_dma_disable(priv);
2340 /* Reinitialize TDMA and RDMA and SW housekeeping */
2341 ret = bcmgenet_init_dma(priv);
2343 netdev_err(dev, "failed to initialize DMA\n");
2347 /* Always enable ring 16 - descriptor ring */
2348 bcmgenet_enable_dma(priv, dma_ctrl);
2350 ret = request_irq(priv->irq0, bcmgenet_isr0, IRQF_SHARED,
2353 netdev_err(dev, "can't request IRQ %d\n", priv->irq0);
2357 ret = request_irq(priv->irq1, bcmgenet_isr1, IRQF_SHARED,
2360 netdev_err(dev, "can't request IRQ %d\n", priv->irq1);
2364 /* Re-configure the port multiplexer towards the PHY device */
2365 bcmgenet_mii_config(priv->dev, false);
2367 phy_connect_direct(dev, priv->phydev, bcmgenet_mii_setup,
2368 priv->phy_interface);
2370 bcmgenet_netif_start(dev);
2375 free_irq(priv->irq0, dev);
2377 bcmgenet_fini_dma(priv);
2379 if (!IS_ERR(priv->clk))
2380 clk_disable_unprepare(priv->clk);
2384 static void bcmgenet_netif_stop(struct net_device *dev)
2386 struct bcmgenet_priv *priv = netdev_priv(dev);
2388 netif_tx_stop_all_queues(dev);
2389 napi_disable(&priv->napi);
2390 phy_stop(priv->phydev);
2392 bcmgenet_intr_disable(priv);
2394 /* Wait for pending work items to complete. Since interrupts are
2395 * disabled no new work will be scheduled.
2397 cancel_work_sync(&priv->bcmgenet_irq_work);
2399 priv->old_link = -1;
2400 priv->old_speed = -1;
2401 priv->old_duplex = -1;
2402 priv->old_pause = -1;
2405 static int bcmgenet_close(struct net_device *dev)
2407 struct bcmgenet_priv *priv = netdev_priv(dev);
2410 netif_dbg(priv, ifdown, dev, "bcmgenet_close\n");
2412 bcmgenet_netif_stop(dev);
2414 /* Really kill the PHY state machine and disconnect from it */
2415 phy_disconnect(priv->phydev);
2417 /* Disable MAC receive */
2418 umac_enable_set(priv, CMD_RX_EN, false);
2420 ret = bcmgenet_dma_teardown(priv);
2424 /* Disable MAC transmit. TX DMA disabled have to done before this */
2425 umac_enable_set(priv, CMD_TX_EN, false);
2428 bcmgenet_tx_reclaim_all(dev);
2429 bcmgenet_fini_dma(priv);
2431 free_irq(priv->irq0, priv);
2432 free_irq(priv->irq1, priv);
2434 if (phy_is_internal(priv->phydev))
2435 bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
2437 if (!IS_ERR(priv->clk))
2438 clk_disable_unprepare(priv->clk);
2443 static void bcmgenet_timeout(struct net_device *dev)
2445 struct bcmgenet_priv *priv = netdev_priv(dev);
2447 netif_dbg(priv, tx_err, dev, "bcmgenet_timeout\n");
2449 dev->trans_start = jiffies;
2451 dev->stats.tx_errors++;
2453 netif_tx_wake_all_queues(dev);
2456 #define MAX_MC_COUNT 16
2458 static inline void bcmgenet_set_mdf_addr(struct bcmgenet_priv *priv,
2459 unsigned char *addr,
2465 bcmgenet_umac_writel(priv, addr[0] << 8 | addr[1],
2466 UMAC_MDF_ADDR + (*i * 4));
2467 bcmgenet_umac_writel(priv, addr[2] << 24 | addr[3] << 16 |
2468 addr[4] << 8 | addr[5],
2469 UMAC_MDF_ADDR + ((*i + 1) * 4));
2470 reg = bcmgenet_umac_readl(priv, UMAC_MDF_CTRL);
2471 reg |= (1 << (MAX_MC_COUNT - *mc));
2472 bcmgenet_umac_writel(priv, reg, UMAC_MDF_CTRL);
2477 static void bcmgenet_set_rx_mode(struct net_device *dev)
2479 struct bcmgenet_priv *priv = netdev_priv(dev);
2480 struct netdev_hw_addr *ha;
2484 netif_dbg(priv, hw, dev, "%s: %08X\n", __func__, dev->flags);
2486 /* Promiscuous mode */
2487 reg = bcmgenet_umac_readl(priv, UMAC_CMD);
2488 if (dev->flags & IFF_PROMISC) {
2490 bcmgenet_umac_writel(priv, reg, UMAC_CMD);
2491 bcmgenet_umac_writel(priv, 0, UMAC_MDF_CTRL);
2494 reg &= ~CMD_PROMISC;
2495 bcmgenet_umac_writel(priv, reg, UMAC_CMD);
2498 /* UniMac doesn't support ALLMULTI */
2499 if (dev->flags & IFF_ALLMULTI) {
2500 netdev_warn(dev, "ALLMULTI is not supported\n");
2504 /* update MDF filter */
2508 bcmgenet_set_mdf_addr(priv, dev->broadcast, &i, &mc);
2509 /* my own address.*/
2510 bcmgenet_set_mdf_addr(priv, dev->dev_addr, &i, &mc);
2512 if (netdev_uc_count(dev) > (MAX_MC_COUNT - mc))
2515 if (!netdev_uc_empty(dev))
2516 netdev_for_each_uc_addr(ha, dev)
2517 bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc);
2519 if (netdev_mc_empty(dev) || netdev_mc_count(dev) >= (MAX_MC_COUNT - mc))
2522 netdev_for_each_mc_addr(ha, dev)
2523 bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc);
2526 /* Set the hardware MAC address. */
2527 static int bcmgenet_set_mac_addr(struct net_device *dev, void *p)
2529 struct sockaddr *addr = p;
2531 /* Setting the MAC address at the hardware level is not possible
2532 * without disabling the UniMAC RX/TX enable bits.
2534 if (netif_running(dev))
2537 ether_addr_copy(dev->dev_addr, addr->sa_data);
2542 static const struct net_device_ops bcmgenet_netdev_ops = {
2543 .ndo_open = bcmgenet_open,
2544 .ndo_stop = bcmgenet_close,
2545 .ndo_start_xmit = bcmgenet_xmit,
2546 .ndo_tx_timeout = bcmgenet_timeout,
2547 .ndo_set_rx_mode = bcmgenet_set_rx_mode,
2548 .ndo_set_mac_address = bcmgenet_set_mac_addr,
2549 .ndo_do_ioctl = bcmgenet_ioctl,
2550 .ndo_set_features = bcmgenet_set_features,
2553 /* Array of GENET hardware parameters/characteristics */
2554 static struct bcmgenet_hw_params bcmgenet_hw_params[] = {
2560 .bp_in_en_shift = 16,
2561 .bp_in_mask = 0xffff,
2562 .hfb_filter_cnt = 16,
2564 .hfb_offset = 0x1000,
2565 .rdma_offset = 0x2000,
2566 .tdma_offset = 0x3000,
2574 .bp_in_en_shift = 16,
2575 .bp_in_mask = 0xffff,
2576 .hfb_filter_cnt = 16,
2578 .tbuf_offset = 0x0600,
2579 .hfb_offset = 0x1000,
2580 .hfb_reg_offset = 0x2000,
2581 .rdma_offset = 0x3000,
2582 .tdma_offset = 0x4000,
2584 .flags = GENET_HAS_EXT,
2591 .bp_in_en_shift = 17,
2592 .bp_in_mask = 0x1ffff,
2593 .hfb_filter_cnt = 48,
2595 .tbuf_offset = 0x0600,
2596 .hfb_offset = 0x8000,
2597 .hfb_reg_offset = 0xfc00,
2598 .rdma_offset = 0x10000,
2599 .tdma_offset = 0x11000,
2601 .flags = GENET_HAS_EXT | GENET_HAS_MDIO_INTR,
2608 .bp_in_en_shift = 17,
2609 .bp_in_mask = 0x1ffff,
2610 .hfb_filter_cnt = 48,
2612 .tbuf_offset = 0x0600,
2613 .hfb_offset = 0x8000,
2614 .hfb_reg_offset = 0xfc00,
2615 .rdma_offset = 0x2000,
2616 .tdma_offset = 0x4000,
2618 .flags = GENET_HAS_40BITS | GENET_HAS_EXT | GENET_HAS_MDIO_INTR,
2622 /* Infer hardware parameters from the detected GENET version */
2623 static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
2625 struct bcmgenet_hw_params *params;
2630 if (GENET_IS_V4(priv)) {
2631 bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
2632 genet_dma_ring_regs = genet_dma_ring_regs_v4;
2633 priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS;
2634 priv->version = GENET_V4;
2635 } else if (GENET_IS_V3(priv)) {
2636 bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
2637 genet_dma_ring_regs = genet_dma_ring_regs_v123;
2638 priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS;
2639 priv->version = GENET_V3;
2640 } else if (GENET_IS_V2(priv)) {
2641 bcmgenet_dma_regs = bcmgenet_dma_regs_v2;
2642 genet_dma_ring_regs = genet_dma_ring_regs_v123;
2643 priv->dma_rx_chk_bit = DMA_RX_CHK_V12;
2644 priv->version = GENET_V2;
2645 } else if (GENET_IS_V1(priv)) {
2646 bcmgenet_dma_regs = bcmgenet_dma_regs_v1;
2647 genet_dma_ring_regs = genet_dma_ring_regs_v123;
2648 priv->dma_rx_chk_bit = DMA_RX_CHK_V12;
2649 priv->version = GENET_V1;
2652 /* enum genet_version starts at 1 */
2653 priv->hw_params = &bcmgenet_hw_params[priv->version];
2654 params = priv->hw_params;
2656 /* Read GENET HW version */
2657 reg = bcmgenet_sys_readl(priv, SYS_REV_CTRL);
2658 major = (reg >> 24 & 0x0f);
2661 else if (major == 0)
2663 if (major != priv->version) {
2664 dev_err(&priv->pdev->dev,
2665 "GENET version mismatch, got: %d, configured for: %d\n",
2666 major, priv->version);
2669 /* Print the GENET core version */
2670 dev_info(&priv->pdev->dev, "GENET " GENET_VER_FMT,
2671 major, (reg >> 16) & 0x0f, reg & 0xffff);
2673 /* Store the integrated PHY revision for the MDIO probing function
2674 * to pass this information to the PHY driver. The PHY driver expects
2675 * to find the PHY major revision in bits 15:8 while the GENET register
2676 * stores that information in bits 7:0, account for that.
2678 * On newer chips, starting with PHY revision G0, a new scheme is
2679 * deployed similar to the Starfighter 2 switch with GPHY major
2680 * revision in bits 15:8 and patch level in bits 7:0. Major revision 0
2681 * is reserved as well as special value 0x01ff, we have a small
2682 * heuristic to check for the new GPHY revision and re-arrange things
2683 * so the GPHY driver is happy.
2685 gphy_rev = reg & 0xffff;
2687 /* This is the good old scheme, just GPHY major, no minor nor patch */
2688 if ((gphy_rev & 0xf0) != 0)
2689 priv->gphy_rev = gphy_rev << 8;
2691 /* This is the new scheme, GPHY major rolls over with 0x10 = rev G0 */
2692 else if ((gphy_rev & 0xff00) != 0)
2693 priv->gphy_rev = gphy_rev;
2695 /* This is reserved so should require special treatment */
2696 else if (gphy_rev == 0 || gphy_rev == 0x01ff) {
2697 pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev);
2701 #ifdef CONFIG_PHYS_ADDR_T_64BIT
2702 if (!(params->flags & GENET_HAS_40BITS))
2703 pr_warn("GENET does not support 40-bits PA\n");
2706 pr_debug("Configuration for version: %d\n"
2707 "TXq: %1d, TXqBDs: %1d, RXq: %1d, RXqBDs: %1d\n"
2708 "BP << en: %2d, BP msk: 0x%05x\n"
2709 "HFB count: %2d, QTAQ msk: 0x%05x\n"
2710 "TBUF: 0x%04x, HFB: 0x%04x, HFBreg: 0x%04x\n"
2711 "RDMA: 0x%05x, TDMA: 0x%05x\n"
2714 params->tx_queues, params->tx_bds_per_q,
2715 params->rx_queues, params->rx_bds_per_q,
2716 params->bp_in_en_shift, params->bp_in_mask,
2717 params->hfb_filter_cnt, params->qtag_mask,
2718 params->tbuf_offset, params->hfb_offset,
2719 params->hfb_reg_offset,
2720 params->rdma_offset, params->tdma_offset,
2721 params->words_per_bd);
2724 static const struct of_device_id bcmgenet_match[] = {
2725 { .compatible = "brcm,genet-v1", .data = (void *)GENET_V1 },
2726 { .compatible = "brcm,genet-v2", .data = (void *)GENET_V2 },
2727 { .compatible = "brcm,genet-v3", .data = (void *)GENET_V3 },
2728 { .compatible = "brcm,genet-v4", .data = (void *)GENET_V4 },
2732 static int bcmgenet_probe(struct platform_device *pdev)
2734 struct bcmgenet_platform_data *pd = pdev->dev.platform_data;
2735 struct device_node *dn = pdev->dev.of_node;
2736 const struct of_device_id *of_id = NULL;
2737 struct bcmgenet_priv *priv;
2738 struct net_device *dev;
2739 const void *macaddr;
2743 /* Up to GENET_MAX_MQ_CNT + 1 TX queues and RX queues */
2744 dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1,
2745 GENET_MAX_MQ_CNT + 1);
2747 dev_err(&pdev->dev, "can't allocate net device\n");
2752 of_id = of_match_node(bcmgenet_match, dn);
2757 priv = netdev_priv(dev);
2758 priv->irq0 = platform_get_irq(pdev, 0);
2759 priv->irq1 = platform_get_irq(pdev, 1);
2760 priv->wol_irq = platform_get_irq(pdev, 2);
2761 if (!priv->irq0 || !priv->irq1) {
2762 dev_err(&pdev->dev, "can't find IRQs\n");
2768 macaddr = of_get_mac_address(dn);
2770 dev_err(&pdev->dev, "can't find MAC address\n");
2775 macaddr = pd->mac_address;
2778 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2779 priv->base = devm_ioremap_resource(&pdev->dev, r);
2780 if (IS_ERR(priv->base)) {
2781 err = PTR_ERR(priv->base);
2785 SET_NETDEV_DEV(dev, &pdev->dev);
2786 dev_set_drvdata(&pdev->dev, dev);
2787 ether_addr_copy(dev->dev_addr, macaddr);
2788 dev->watchdog_timeo = 2 * HZ;
2789 dev->ethtool_ops = &bcmgenet_ethtool_ops;
2790 dev->netdev_ops = &bcmgenet_netdev_ops;
2791 netif_napi_add(dev, &priv->napi, bcmgenet_poll, 64);
2793 priv->msg_enable = netif_msg_init(-1, GENET_MSG_DEFAULT);
2795 /* Set hardware features */
2796 dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM |
2797 NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM;
2799 /* Request the WOL interrupt and advertise suspend if available */
2800 priv->wol_irq_disabled = true;
2801 err = devm_request_irq(&pdev->dev, priv->wol_irq, bcmgenet_wol_isr, 0,
2804 device_set_wakeup_capable(&pdev->dev, 1);
2806 /* Set the needed headroom to account for any possible
2807 * features enabling/disabling at runtime
2809 dev->needed_headroom += 64;
2811 netdev_boot_setup_check(dev);
2816 priv->version = (enum bcmgenet_version)of_id->data;
2818 priv->version = pd->genet_version;
2820 priv->clk = devm_clk_get(&priv->pdev->dev, "enet");
2821 if (IS_ERR(priv->clk))
2822 dev_warn(&priv->pdev->dev, "failed to get enet clock\n");
2824 if (!IS_ERR(priv->clk))
2825 clk_prepare_enable(priv->clk);
2827 bcmgenet_set_hw_params(priv);
2829 /* Mii wait queue */
2830 init_waitqueue_head(&priv->wq);
2831 /* Always use RX_BUF_LENGTH (2KB) buffer for all chips */
2832 priv->rx_buf_len = RX_BUF_LENGTH;
2833 INIT_WORK(&priv->bcmgenet_irq_work, bcmgenet_irq_task);
2835 priv->clk_wol = devm_clk_get(&priv->pdev->dev, "enet-wol");
2836 if (IS_ERR(priv->clk_wol))
2837 dev_warn(&priv->pdev->dev, "failed to get enet-wol clock\n");
2839 priv->clk_eee = devm_clk_get(&priv->pdev->dev, "enet-eee");
2840 if (IS_ERR(priv->clk_eee)) {
2841 dev_warn(&priv->pdev->dev, "failed to get enet-eee clock\n");
2842 priv->clk_eee = NULL;
2845 err = reset_umac(priv);
2847 goto err_clk_disable;
2849 err = bcmgenet_mii_init(dev);
2851 goto err_clk_disable;
2853 /* setup number of real queues + 1 (GENET_V1 has 0 hardware queues
2854 * just the ring 16 descriptor based TX
2856 netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1);
2857 netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1);
2859 /* libphy will determine the link state */
2860 netif_carrier_off(dev);
2862 /* Turn off the main clock, WOL clock is handled separately */
2863 if (!IS_ERR(priv->clk))
2864 clk_disable_unprepare(priv->clk);
2866 err = register_netdev(dev);
2873 if (!IS_ERR(priv->clk))
2874 clk_disable_unprepare(priv->clk);
2880 static int bcmgenet_remove(struct platform_device *pdev)
2882 struct bcmgenet_priv *priv = dev_to_priv(&pdev->dev);
2884 dev_set_drvdata(&pdev->dev, NULL);
2885 unregister_netdev(priv->dev);
2886 bcmgenet_mii_exit(priv->dev);
2887 free_netdev(priv->dev);
2892 #ifdef CONFIG_PM_SLEEP
2893 static int bcmgenet_suspend(struct device *d)
2895 struct net_device *dev = dev_get_drvdata(d);
2896 struct bcmgenet_priv *priv = netdev_priv(dev);
2899 if (!netif_running(dev))
2902 bcmgenet_netif_stop(dev);
2904 phy_suspend(priv->phydev);
2906 netif_device_detach(dev);
2908 /* Disable MAC receive */
2909 umac_enable_set(priv, CMD_RX_EN, false);
2911 ret = bcmgenet_dma_teardown(priv);
2915 /* Disable MAC transmit. TX DMA disabled have to done before this */
2916 umac_enable_set(priv, CMD_TX_EN, false);
2919 bcmgenet_tx_reclaim_all(dev);
2920 bcmgenet_fini_dma(priv);
2922 /* Prepare the device for Wake-on-LAN and switch to the slow clock */
2923 if (device_may_wakeup(d) && priv->wolopts) {
2924 bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC);
2925 clk_prepare_enable(priv->clk_wol);
2928 /* Turn off the clocks */
2929 clk_disable_unprepare(priv->clk);
2934 static int bcmgenet_resume(struct device *d)
2936 struct net_device *dev = dev_get_drvdata(d);
2937 struct bcmgenet_priv *priv = netdev_priv(dev);
2938 unsigned long dma_ctrl;
2942 if (!netif_running(dev))
2945 /* Turn on the clock */
2946 ret = clk_prepare_enable(priv->clk);
2950 bcmgenet_umac_reset(priv);
2952 ret = init_umac(priv);
2954 goto out_clk_disable;
2956 /* From WOL-enabled suspend, switch to regular clock */
2958 clk_disable_unprepare(priv->clk_wol);
2960 phy_init_hw(priv->phydev);
2961 /* Speed settings must be restored */
2962 bcmgenet_mii_config(priv->dev, false);
2964 /* disable ethernet MAC while updating its registers */
2965 umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false);
2967 bcmgenet_set_hw_addr(priv, dev->dev_addr);
2969 if (phy_is_internal(priv->phydev)) {
2970 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
2971 reg |= EXT_ENERGY_DET_MASK;
2972 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
2976 bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC);
2978 /* Disable RX/TX DMA and flush TX queues */
2979 dma_ctrl = bcmgenet_dma_disable(priv);
2981 /* Reinitialize TDMA and RDMA and SW housekeeping */
2982 ret = bcmgenet_init_dma(priv);
2984 netdev_err(dev, "failed to initialize DMA\n");
2985 goto out_clk_disable;
2988 /* Always enable ring 16 - descriptor ring */
2989 bcmgenet_enable_dma(priv, dma_ctrl);
2991 netif_device_attach(dev);
2993 phy_resume(priv->phydev);
2995 if (priv->eee.eee_enabled)
2996 bcmgenet_eee_enable_set(dev, true);
2998 bcmgenet_netif_start(dev);
3003 clk_disable_unprepare(priv->clk);
3006 #endif /* CONFIG_PM_SLEEP */
3008 static SIMPLE_DEV_PM_OPS(bcmgenet_pm_ops, bcmgenet_suspend, bcmgenet_resume);
3010 static struct platform_driver bcmgenet_driver = {
3011 .probe = bcmgenet_probe,
3012 .remove = bcmgenet_remove,
3015 .of_match_table = bcmgenet_match,
3016 .pm = &bcmgenet_pm_ops,
3019 module_platform_driver(bcmgenet_driver);
3021 MODULE_AUTHOR("Broadcom Corporation");
3022 MODULE_DESCRIPTION("Broadcom GENET Ethernet controller driver");
3023 MODULE_ALIAS("platform:bcmgenet");
3024 MODULE_LICENSE("GPL");