1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
5 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
6 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
9 #include <linux/of_device.h>
10 #include <linux/of_mdio.h>
11 #include <linux/of_net.h>
12 #include <linux/mfd/syscon.h>
13 #include <linux/regmap.h>
14 #include <linux/clk.h>
15 #include <linux/pm_runtime.h>
16 #include <linux/if_vlan.h>
17 #include <linux/reset.h>
18 #include <linux/tcp.h>
19 #include <linux/interrupt.h>
20 #include <linux/pinctrl/devinfo.h>
21 #include <linux/phylink.h>
23 #include "mtk_eth_soc.h"
25 static int mtk_msg_level = -1;
26 module_param_named(msg_level, mtk_msg_level, int, 0);
27 MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
29 #define MTK_ETHTOOL_STAT(x) { #x, \
30 offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
32 /* strings used by ethtool */
33 static const struct mtk_ethtool_stats {
34 char str[ETH_GSTRING_LEN];
36 } mtk_ethtool_stats[] = {
37 MTK_ETHTOOL_STAT(tx_bytes),
38 MTK_ETHTOOL_STAT(tx_packets),
39 MTK_ETHTOOL_STAT(tx_skip),
40 MTK_ETHTOOL_STAT(tx_collisions),
41 MTK_ETHTOOL_STAT(rx_bytes),
42 MTK_ETHTOOL_STAT(rx_packets),
43 MTK_ETHTOOL_STAT(rx_overflow),
44 MTK_ETHTOOL_STAT(rx_fcs_errors),
45 MTK_ETHTOOL_STAT(rx_short_errors),
46 MTK_ETHTOOL_STAT(rx_long_errors),
47 MTK_ETHTOOL_STAT(rx_checksum_errors),
48 MTK_ETHTOOL_STAT(rx_flow_control_packets),
51 static const char * const mtk_clks_source_name[] = {
52 "ethif", "sgmiitop", "esw", "gp0", "gp1", "gp2", "fe", "trgpll",
53 "sgmii_tx250m", "sgmii_rx250m", "sgmii_cdr_ref", "sgmii_cdr_fb",
54 "sgmii2_tx250m", "sgmii2_rx250m", "sgmii2_cdr_ref", "sgmii2_cdr_fb",
55 "sgmii_ck", "eth2pll",
58 void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
60 __raw_writel(val, eth->base + reg);
63 u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
65 return __raw_readl(eth->base + reg);
68 static int mtk_mdio_busy_wait(struct mtk_eth *eth)
70 unsigned long t_start = jiffies;
73 if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS))
75 if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT))
80 dev_err(eth->dev, "mdio: MDIO timeout\n");
84 static u32 _mtk_mdio_write(struct mtk_eth *eth, u32 phy_addr,
85 u32 phy_register, u32 write_data)
87 if (mtk_mdio_busy_wait(eth))
92 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_WRITE |
93 (phy_register << PHY_IAC_REG_SHIFT) |
94 (phy_addr << PHY_IAC_ADDR_SHIFT) | write_data,
97 if (mtk_mdio_busy_wait(eth))
103 static u32 _mtk_mdio_read(struct mtk_eth *eth, int phy_addr, int phy_reg)
107 if (mtk_mdio_busy_wait(eth))
110 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_READ |
111 (phy_reg << PHY_IAC_REG_SHIFT) |
112 (phy_addr << PHY_IAC_ADDR_SHIFT),
115 if (mtk_mdio_busy_wait(eth))
118 d = mtk_r32(eth, MTK_PHY_IAC) & 0xffff;
123 static int mtk_mdio_write(struct mii_bus *bus, int phy_addr,
124 int phy_reg, u16 val)
126 struct mtk_eth *eth = bus->priv;
128 return _mtk_mdio_write(eth, phy_addr, phy_reg, val);
131 static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
133 struct mtk_eth *eth = bus->priv;
135 return _mtk_mdio_read(eth, phy_addr, phy_reg);
138 static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth,
139 phy_interface_t interface)
143 /* Check DDR memory type.
144 * Currently TRGMII mode with DDR2 memory is not supported.
146 regmap_read(eth->ethsys, ETHSYS_SYSCFG, &val);
147 if (interface == PHY_INTERFACE_MODE_TRGMII &&
148 val & SYSCFG_DRAM_TYPE_DDR2) {
150 "TRGMII mode with DDR2 memory is not supported!\n");
154 val = (interface == PHY_INTERFACE_MODE_TRGMII) ?
155 ETHSYS_TRGMII_MT7621_DDR_PLL : 0;
157 regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
158 ETHSYS_TRGMII_MT7621_MASK, val);
163 static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth, int speed)
168 val = (speed == SPEED_1000) ?
169 INTF_MODE_RGMII_1000 : INTF_MODE_RGMII_10_100;
170 mtk_w32(eth, val, INTF_MODE);
172 regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
173 ETHSYS_TRGMII_CLK_SEL362_5,
174 ETHSYS_TRGMII_CLK_SEL362_5);
176 val = (speed == SPEED_1000) ? 250000000 : 500000000;
177 ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
179 dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
181 val = (speed == SPEED_1000) ?
182 RCK_CTRL_RGMII_1000 : RCK_CTRL_RGMII_10_100;
183 mtk_w32(eth, val, TRGMII_RCK_CTRL);
185 val = (speed == SPEED_1000) ?
186 TCK_CTRL_RGMII_1000 : TCK_CTRL_RGMII_10_100;
187 mtk_w32(eth, val, TRGMII_TCK_CTRL);
190 static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
191 const struct phylink_link_state *state)
193 struct mtk_mac *mac = container_of(config, struct mtk_mac,
195 struct mtk_eth *eth = mac->hw;
196 u32 mcr_cur, mcr_new, sid;
197 int val, ge_mode, err;
199 /* MT76x8 has no hardware settings between for the MAC */
200 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
201 mac->interface != state->interface) {
202 /* Setup soc pin functions */
203 switch (state->interface) {
204 case PHY_INTERFACE_MODE_TRGMII:
207 if (!MTK_HAS_CAPS(mac->hw->soc->caps,
211 case PHY_INTERFACE_MODE_RGMII_TXID:
212 case PHY_INTERFACE_MODE_RGMII_RXID:
213 case PHY_INTERFACE_MODE_RGMII_ID:
214 case PHY_INTERFACE_MODE_RGMII:
215 case PHY_INTERFACE_MODE_MII:
216 case PHY_INTERFACE_MODE_REVMII:
217 case PHY_INTERFACE_MODE_RMII:
218 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RGMII)) {
219 err = mtk_gmac_rgmii_path_setup(eth, mac->id);
224 case PHY_INTERFACE_MODE_1000BASEX:
225 case PHY_INTERFACE_MODE_2500BASEX:
226 case PHY_INTERFACE_MODE_SGMII:
227 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
228 err = mtk_gmac_sgmii_path_setup(eth, mac->id);
233 case PHY_INTERFACE_MODE_GMII:
234 if (MTK_HAS_CAPS(eth->soc->caps, MTK_GEPHY)) {
235 err = mtk_gmac_gephy_path_setup(eth, mac->id);
244 /* Setup clock for 1st gmac */
245 if (!mac->id && state->interface != PHY_INTERFACE_MODE_SGMII &&
246 !phy_interface_mode_is_8023z(state->interface) &&
247 MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII)) {
248 if (MTK_HAS_CAPS(mac->hw->soc->caps,
249 MTK_TRGMII_MT7621_CLK)) {
250 if (mt7621_gmac0_rgmii_adjust(mac->hw,
254 if (state->interface !=
255 PHY_INTERFACE_MODE_TRGMII)
256 mtk_gmac0_rgmii_adjust(mac->hw,
262 switch (state->interface) {
263 case PHY_INTERFACE_MODE_MII:
264 case PHY_INTERFACE_MODE_GMII:
267 case PHY_INTERFACE_MODE_REVMII:
270 case PHY_INTERFACE_MODE_RMII:
279 /* put the gmac into the right mode */
280 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
281 val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
282 val |= SYSCFG0_GE_MODE(ge_mode, mac->id);
283 regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
285 mac->interface = state->interface;
289 if (state->interface == PHY_INTERFACE_MODE_SGMII ||
290 phy_interface_mode_is_8023z(state->interface)) {
291 /* The path GMAC to SGMII will be enabled once the SGMIISYS is
294 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
296 regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
298 ~(u32)SYSCFG0_SGMII_MASK);
300 /* Decide how GMAC and SGMIISYS be mapped */
301 sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ?
304 /* Setup SGMIISYS with the determined property */
305 if (state->interface != PHY_INTERFACE_MODE_SGMII)
306 err = mtk_sgmii_setup_mode_force(eth->sgmii, sid,
308 else if (phylink_autoneg_inband(mode))
309 err = mtk_sgmii_setup_mode_an(eth->sgmii, sid);
314 regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
315 SYSCFG0_SGMII_MASK, val);
316 } else if (phylink_autoneg_inband(mode)) {
318 "In-band mode not supported in non SGMII mode!\n");
323 mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
325 mcr_new &= ~(MAC_MCR_SPEED_100 | MAC_MCR_SPEED_1000 |
326 MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_TX_FC |
327 MAC_MCR_FORCE_RX_FC);
328 mcr_new |= MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
329 MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK;
331 switch (state->speed) {
334 mcr_new |= MAC_MCR_SPEED_1000;
337 mcr_new |= MAC_MCR_SPEED_100;
340 if (state->duplex == DUPLEX_FULL) {
341 mcr_new |= MAC_MCR_FORCE_DPX;
342 if (state->pause & MLO_PAUSE_TX)
343 mcr_new |= MAC_MCR_FORCE_TX_FC;
344 if (state->pause & MLO_PAUSE_RX)
345 mcr_new |= MAC_MCR_FORCE_RX_FC;
348 /* Only update control register when needed! */
349 if (mcr_new != mcr_cur)
350 mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
355 dev_err(eth->dev, "%s: GMAC%d mode %s not supported!\n", __func__,
356 mac->id, phy_modes(state->interface));
360 dev_err(eth->dev, "%s: GMAC%d mode %s err: %d!\n", __func__,
361 mac->id, phy_modes(state->interface), err);
364 static void mtk_mac_pcs_get_state(struct phylink_config *config,
365 struct phylink_link_state *state)
367 struct mtk_mac *mac = container_of(config, struct mtk_mac,
369 u32 pmsr = mtk_r32(mac->hw, MTK_MAC_MSR(mac->id));
371 state->link = (pmsr & MAC_MSR_LINK);
372 state->duplex = (pmsr & MAC_MSR_DPX) >> 1;
374 switch (pmsr & (MAC_MSR_SPEED_1000 | MAC_MSR_SPEED_100)) {
376 state->speed = SPEED_10;
378 case MAC_MSR_SPEED_100:
379 state->speed = SPEED_100;
381 case MAC_MSR_SPEED_1000:
382 state->speed = SPEED_1000;
385 state->speed = SPEED_UNKNOWN;
389 state->pause &= (MLO_PAUSE_RX | MLO_PAUSE_TX);
390 if (pmsr & MAC_MSR_RX_FC)
391 state->pause |= MLO_PAUSE_RX;
392 if (pmsr & MAC_MSR_TX_FC)
393 state->pause |= MLO_PAUSE_TX;
396 static void mtk_mac_an_restart(struct phylink_config *config)
398 struct mtk_mac *mac = container_of(config, struct mtk_mac,
401 mtk_sgmii_restart_an(mac->hw, mac->id);
404 static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode,
405 phy_interface_t interface)
407 struct mtk_mac *mac = container_of(config, struct mtk_mac,
409 u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
411 mcr &= ~(MAC_MCR_TX_EN | MAC_MCR_RX_EN);
412 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
415 static void mtk_mac_link_up(struct phylink_config *config,
416 struct phy_device *phy,
417 unsigned int mode, phy_interface_t interface,
418 int speed, int duplex, bool tx_pause, bool rx_pause)
420 struct mtk_mac *mac = container_of(config, struct mtk_mac,
422 u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
424 mcr |= MAC_MCR_TX_EN | MAC_MCR_RX_EN;
425 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
428 static void mtk_validate(struct phylink_config *config,
429 unsigned long *supported,
430 struct phylink_link_state *state)
432 struct mtk_mac *mac = container_of(config, struct mtk_mac,
434 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
436 if (state->interface != PHY_INTERFACE_MODE_NA &&
437 state->interface != PHY_INTERFACE_MODE_MII &&
438 state->interface != PHY_INTERFACE_MODE_GMII &&
439 !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII) &&
440 phy_interface_mode_is_rgmii(state->interface)) &&
441 !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII) &&
442 !mac->id && state->interface == PHY_INTERFACE_MODE_TRGMII) &&
443 !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII) &&
444 (state->interface == PHY_INTERFACE_MODE_SGMII ||
445 phy_interface_mode_is_8023z(state->interface)))) {
446 linkmode_zero(supported);
450 phylink_set_port_modes(mask);
451 phylink_set(mask, Autoneg);
453 switch (state->interface) {
454 case PHY_INTERFACE_MODE_TRGMII:
455 phylink_set(mask, 1000baseT_Full);
457 case PHY_INTERFACE_MODE_1000BASEX:
458 case PHY_INTERFACE_MODE_2500BASEX:
459 phylink_set(mask, 1000baseX_Full);
460 phylink_set(mask, 2500baseX_Full);
462 case PHY_INTERFACE_MODE_GMII:
463 case PHY_INTERFACE_MODE_RGMII:
464 case PHY_INTERFACE_MODE_RGMII_ID:
465 case PHY_INTERFACE_MODE_RGMII_RXID:
466 case PHY_INTERFACE_MODE_RGMII_TXID:
467 phylink_set(mask, 1000baseT_Half);
469 case PHY_INTERFACE_MODE_SGMII:
470 phylink_set(mask, 1000baseT_Full);
471 phylink_set(mask, 1000baseX_Full);
473 case PHY_INTERFACE_MODE_MII:
474 case PHY_INTERFACE_MODE_RMII:
475 case PHY_INTERFACE_MODE_REVMII:
476 case PHY_INTERFACE_MODE_NA:
478 phylink_set(mask, 10baseT_Half);
479 phylink_set(mask, 10baseT_Full);
480 phylink_set(mask, 100baseT_Half);
481 phylink_set(mask, 100baseT_Full);
485 if (state->interface == PHY_INTERFACE_MODE_NA) {
486 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII)) {
487 phylink_set(mask, 1000baseT_Full);
488 phylink_set(mask, 1000baseX_Full);
489 phylink_set(mask, 2500baseX_Full);
491 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII)) {
492 phylink_set(mask, 1000baseT_Full);
493 phylink_set(mask, 1000baseT_Half);
494 phylink_set(mask, 1000baseX_Full);
496 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GEPHY)) {
497 phylink_set(mask, 1000baseT_Full);
498 phylink_set(mask, 1000baseT_Half);
502 phylink_set(mask, Pause);
503 phylink_set(mask, Asym_Pause);
505 linkmode_and(supported, supported, mask);
506 linkmode_and(state->advertising, state->advertising, mask);
508 /* We can only operate at 2500BaseX or 1000BaseX. If requested
509 * to advertise both, only report advertising at 2500BaseX.
511 phylink_helper_basex_speed(state);
514 static const struct phylink_mac_ops mtk_phylink_ops = {
515 .validate = mtk_validate,
516 .mac_pcs_get_state = mtk_mac_pcs_get_state,
517 .mac_an_restart = mtk_mac_an_restart,
518 .mac_config = mtk_mac_config,
519 .mac_link_down = mtk_mac_link_down,
520 .mac_link_up = mtk_mac_link_up,
523 static int mtk_mdio_init(struct mtk_eth *eth)
525 struct device_node *mii_np;
528 mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
530 dev_err(eth->dev, "no %s child node found", "mdio-bus");
534 if (!of_device_is_available(mii_np)) {
539 eth->mii_bus = devm_mdiobus_alloc(eth->dev);
545 eth->mii_bus->name = "mdio";
546 eth->mii_bus->read = mtk_mdio_read;
547 eth->mii_bus->write = mtk_mdio_write;
548 eth->mii_bus->priv = eth;
549 eth->mii_bus->parent = eth->dev;
551 snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np);
552 ret = of_mdiobus_register(eth->mii_bus, mii_np);
559 static void mtk_mdio_cleanup(struct mtk_eth *eth)
564 mdiobus_unregister(eth->mii_bus);
567 static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask)
572 spin_lock_irqsave(ð->tx_irq_lock, flags);
573 val = mtk_r32(eth, eth->tx_int_mask_reg);
574 mtk_w32(eth, val & ~mask, eth->tx_int_mask_reg);
575 spin_unlock_irqrestore(ð->tx_irq_lock, flags);
578 static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask)
583 spin_lock_irqsave(ð->tx_irq_lock, flags);
584 val = mtk_r32(eth, eth->tx_int_mask_reg);
585 mtk_w32(eth, val | mask, eth->tx_int_mask_reg);
586 spin_unlock_irqrestore(ð->tx_irq_lock, flags);
589 static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask)
594 spin_lock_irqsave(ð->rx_irq_lock, flags);
595 val = mtk_r32(eth, MTK_PDMA_INT_MASK);
596 mtk_w32(eth, val & ~mask, MTK_PDMA_INT_MASK);
597 spin_unlock_irqrestore(ð->rx_irq_lock, flags);
600 static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask)
605 spin_lock_irqsave(ð->rx_irq_lock, flags);
606 val = mtk_r32(eth, MTK_PDMA_INT_MASK);
607 mtk_w32(eth, val | mask, MTK_PDMA_INT_MASK);
608 spin_unlock_irqrestore(ð->rx_irq_lock, flags);
611 static int mtk_set_mac_address(struct net_device *dev, void *p)
613 int ret = eth_mac_addr(dev, p);
614 struct mtk_mac *mac = netdev_priv(dev);
615 struct mtk_eth *eth = mac->hw;
616 const char *macaddr = dev->dev_addr;
621 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
624 spin_lock_bh(&mac->hw->page_lock);
625 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
626 mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
627 MT7628_SDM_MAC_ADRH);
628 mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
629 (macaddr[4] << 8) | macaddr[5],
630 MT7628_SDM_MAC_ADRL);
632 mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
633 MTK_GDMA_MAC_ADRH(mac->id));
634 mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
635 (macaddr[4] << 8) | macaddr[5],
636 MTK_GDMA_MAC_ADRL(mac->id));
638 spin_unlock_bh(&mac->hw->page_lock);
643 void mtk_stats_update_mac(struct mtk_mac *mac)
645 struct mtk_hw_stats *hw_stats = mac->hw_stats;
646 unsigned int base = MTK_GDM1_TX_GBCNT;
649 base += hw_stats->reg_offset;
651 u64_stats_update_begin(&hw_stats->syncp);
653 hw_stats->rx_bytes += mtk_r32(mac->hw, base);
654 stats = mtk_r32(mac->hw, base + 0x04);
656 hw_stats->rx_bytes += (stats << 32);
657 hw_stats->rx_packets += mtk_r32(mac->hw, base + 0x08);
658 hw_stats->rx_overflow += mtk_r32(mac->hw, base + 0x10);
659 hw_stats->rx_fcs_errors += mtk_r32(mac->hw, base + 0x14);
660 hw_stats->rx_short_errors += mtk_r32(mac->hw, base + 0x18);
661 hw_stats->rx_long_errors += mtk_r32(mac->hw, base + 0x1c);
662 hw_stats->rx_checksum_errors += mtk_r32(mac->hw, base + 0x20);
663 hw_stats->rx_flow_control_packets +=
664 mtk_r32(mac->hw, base + 0x24);
665 hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x28);
666 hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x2c);
667 hw_stats->tx_bytes += mtk_r32(mac->hw, base + 0x30);
668 stats = mtk_r32(mac->hw, base + 0x34);
670 hw_stats->tx_bytes += (stats << 32);
671 hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x38);
672 u64_stats_update_end(&hw_stats->syncp);
675 static void mtk_stats_update(struct mtk_eth *eth)
679 for (i = 0; i < MTK_MAC_COUNT; i++) {
680 if (!eth->mac[i] || !eth->mac[i]->hw_stats)
682 if (spin_trylock(ð->mac[i]->hw_stats->stats_lock)) {
683 mtk_stats_update_mac(eth->mac[i]);
684 spin_unlock(ð->mac[i]->hw_stats->stats_lock);
689 static void mtk_get_stats64(struct net_device *dev,
690 struct rtnl_link_stats64 *storage)
692 struct mtk_mac *mac = netdev_priv(dev);
693 struct mtk_hw_stats *hw_stats = mac->hw_stats;
696 if (netif_running(dev) && netif_device_present(dev)) {
697 if (spin_trylock_bh(&hw_stats->stats_lock)) {
698 mtk_stats_update_mac(mac);
699 spin_unlock_bh(&hw_stats->stats_lock);
704 start = u64_stats_fetch_begin_irq(&hw_stats->syncp);
705 storage->rx_packets = hw_stats->rx_packets;
706 storage->tx_packets = hw_stats->tx_packets;
707 storage->rx_bytes = hw_stats->rx_bytes;
708 storage->tx_bytes = hw_stats->tx_bytes;
709 storage->collisions = hw_stats->tx_collisions;
710 storage->rx_length_errors = hw_stats->rx_short_errors +
711 hw_stats->rx_long_errors;
712 storage->rx_over_errors = hw_stats->rx_overflow;
713 storage->rx_crc_errors = hw_stats->rx_fcs_errors;
714 storage->rx_errors = hw_stats->rx_checksum_errors;
715 storage->tx_aborted_errors = hw_stats->tx_skip;
716 } while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start));
718 storage->tx_errors = dev->stats.tx_errors;
719 storage->rx_dropped = dev->stats.rx_dropped;
720 storage->tx_dropped = dev->stats.tx_dropped;
723 static inline int mtk_max_frag_size(int mtu)
725 /* make sure buf_size will be at least MTK_MAX_RX_LENGTH */
726 if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH)
727 mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
729 return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
730 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
733 static inline int mtk_max_buf_size(int frag_size)
735 int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
736 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
738 WARN_ON(buf_size < MTK_MAX_RX_LENGTH);
743 static inline void mtk_rx_get_desc(struct mtk_rx_dma *rxd,
744 struct mtk_rx_dma *dma_rxd)
746 rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
747 rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
748 rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
749 rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
752 /* the qdma core needs scratch memory to be setup */
753 static int mtk_init_fq_dma(struct mtk_eth *eth)
755 dma_addr_t phy_ring_tail;
756 int cnt = MTK_DMA_SIZE;
760 eth->scratch_ring = dma_alloc_coherent(eth->dev,
761 cnt * sizeof(struct mtk_tx_dma),
762 ð->phy_scratch_ring,
764 if (unlikely(!eth->scratch_ring))
767 eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE,
769 if (unlikely(!eth->scratch_head))
772 dma_addr = dma_map_single(eth->dev,
773 eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
775 if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
778 phy_ring_tail = eth->phy_scratch_ring +
779 (sizeof(struct mtk_tx_dma) * (cnt - 1));
781 for (i = 0; i < cnt; i++) {
782 eth->scratch_ring[i].txd1 =
783 (dma_addr + (i * MTK_QDMA_PAGE_SIZE));
785 eth->scratch_ring[i].txd2 = (eth->phy_scratch_ring +
786 ((i + 1) * sizeof(struct mtk_tx_dma)));
787 eth->scratch_ring[i].txd3 = TX_DMA_SDL(MTK_QDMA_PAGE_SIZE);
790 mtk_w32(eth, eth->phy_scratch_ring, MTK_QDMA_FQ_HEAD);
791 mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL);
792 mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT);
793 mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN);
798 static inline void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
800 void *ret = ring->dma;
802 return ret + (desc - ring->phys);
805 static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
806 struct mtk_tx_dma *txd)
808 int idx = txd - ring->dma;
810 return &ring->buf[idx];
813 static struct mtk_tx_dma *qdma_to_pdma(struct mtk_tx_ring *ring,
814 struct mtk_tx_dma *dma)
816 return ring->dma_pdma - ring->dma + dma;
819 static int txd_to_idx(struct mtk_tx_ring *ring, struct mtk_tx_dma *dma)
821 return ((void *)dma - (void *)ring->dma) / sizeof(*dma);
824 static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf)
826 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
827 if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
828 dma_unmap_single(eth->dev,
829 dma_unmap_addr(tx_buf, dma_addr0),
830 dma_unmap_len(tx_buf, dma_len0),
832 } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
833 dma_unmap_page(eth->dev,
834 dma_unmap_addr(tx_buf, dma_addr0),
835 dma_unmap_len(tx_buf, dma_len0),
839 if (dma_unmap_len(tx_buf, dma_len0)) {
840 dma_unmap_page(eth->dev,
841 dma_unmap_addr(tx_buf, dma_addr0),
842 dma_unmap_len(tx_buf, dma_len0),
846 if (dma_unmap_len(tx_buf, dma_len1)) {
847 dma_unmap_page(eth->dev,
848 dma_unmap_addr(tx_buf, dma_addr1),
849 dma_unmap_len(tx_buf, dma_len1),
856 (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC))
857 dev_kfree_skb_any(tx_buf->skb);
861 static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
862 struct mtk_tx_dma *txd, dma_addr_t mapped_addr,
863 size_t size, int idx)
865 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
866 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
867 dma_unmap_len_set(tx_buf, dma_len0, size);
870 txd->txd3 = mapped_addr;
871 txd->txd2 |= TX_DMA_PLEN1(size);
872 dma_unmap_addr_set(tx_buf, dma_addr1, mapped_addr);
873 dma_unmap_len_set(tx_buf, dma_len1, size);
875 tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
876 txd->txd1 = mapped_addr;
877 txd->txd2 = TX_DMA_PLEN0(size);
878 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
879 dma_unmap_len_set(tx_buf, dma_len0, size);
884 static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
885 int tx_num, struct mtk_tx_ring *ring, bool gso)
887 struct mtk_mac *mac = netdev_priv(dev);
888 struct mtk_eth *eth = mac->hw;
889 struct mtk_tx_dma *itxd, *txd;
890 struct mtk_tx_dma *itxd_pdma, *txd_pdma;
891 struct mtk_tx_buf *itx_buf, *tx_buf;
892 dma_addr_t mapped_addr;
893 unsigned int nr_frags;
898 itxd = ring->next_free;
899 itxd_pdma = qdma_to_pdma(ring, itxd);
900 if (itxd == ring->last_free)
903 /* set the forward port */
904 fport = (mac->id + 1) << TX_DMA_FPORT_SHIFT;
907 itx_buf = mtk_desc_to_tx_buf(ring, itxd);
908 memset(itx_buf, 0, sizeof(*itx_buf));
913 /* TX Checksum offload */
914 if (skb->ip_summed == CHECKSUM_PARTIAL)
915 txd4 |= TX_DMA_CHKSUM;
917 /* VLAN header offload */
918 if (skb_vlan_tag_present(skb))
919 txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb);
921 mapped_addr = dma_map_single(eth->dev, skb->data,
922 skb_headlen(skb), DMA_TO_DEVICE);
923 if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
926 WRITE_ONCE(itxd->txd1, mapped_addr);
927 itx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
928 itx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
930 setup_tx_buf(eth, itx_buf, itxd_pdma, mapped_addr, skb_headlen(skb),
935 txd_pdma = qdma_to_pdma(ring, txd);
936 nr_frags = skb_shinfo(skb)->nr_frags;
938 for (i = 0; i < nr_frags; i++) {
939 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
940 unsigned int offset = 0;
941 int frag_size = skb_frag_size(frag);
944 bool last_frag = false;
945 unsigned int frag_map_size;
946 bool new_desc = true;
948 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA) ||
950 txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
951 txd_pdma = qdma_to_pdma(ring, txd);
952 if (txd == ring->last_free)
961 frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN);
962 mapped_addr = skb_frag_dma_map(eth->dev, frag, offset,
965 if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
968 if (i == nr_frags - 1 &&
969 (frag_size - frag_map_size) == 0)
972 WRITE_ONCE(txd->txd1, mapped_addr);
973 WRITE_ONCE(txd->txd3, (TX_DMA_SWC |
974 TX_DMA_PLEN0(frag_map_size) |
975 last_frag * TX_DMA_LS0));
976 WRITE_ONCE(txd->txd4, fport);
978 tx_buf = mtk_desc_to_tx_buf(ring, txd);
980 memset(tx_buf, 0, sizeof(*tx_buf));
981 tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
982 tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
983 tx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
986 setup_tx_buf(eth, tx_buf, txd_pdma, mapped_addr,
989 frag_size -= frag_map_size;
990 offset += frag_map_size;
994 /* store skb to cleanup */
997 WRITE_ONCE(itxd->txd4, txd4);
998 WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) |
999 (!nr_frags * TX_DMA_LS0)));
1000 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1002 txd_pdma->txd2 |= TX_DMA_LS0;
1004 txd_pdma->txd2 |= TX_DMA_LS1;
1007 netdev_sent_queue(dev, skb->len);
1008 skb_tx_timestamp(skb);
1010 ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
1011 atomic_sub(n_desc, &ring->free_count);
1013 /* make sure that all changes to the dma ring are flushed before we
1018 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1019 if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) ||
1020 !netdev_xmit_more())
1021 mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR);
1023 int next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd),
1025 mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
1032 tx_buf = mtk_desc_to_tx_buf(ring, itxd);
1035 mtk_tx_unmap(eth, tx_buf);
1037 itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
1038 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
1039 itxd_pdma->txd2 = TX_DMA_DESP2_DEF;
1041 itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
1042 itxd_pdma = qdma_to_pdma(ring, itxd);
1043 } while (itxd != txd);
1048 static inline int mtk_cal_txd_req(struct sk_buff *skb)
1054 if (skb_is_gso(skb)) {
1055 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1056 frag = &skb_shinfo(skb)->frags[i];
1057 nfrags += DIV_ROUND_UP(skb_frag_size(frag),
1058 MTK_TX_DMA_BUF_LEN);
1061 nfrags += skb_shinfo(skb)->nr_frags;
1067 static int mtk_queue_stopped(struct mtk_eth *eth)
1071 for (i = 0; i < MTK_MAC_COUNT; i++) {
1072 if (!eth->netdev[i])
1074 if (netif_queue_stopped(eth->netdev[i]))
1081 static void mtk_wake_queue(struct mtk_eth *eth)
1085 for (i = 0; i < MTK_MAC_COUNT; i++) {
1086 if (!eth->netdev[i])
1088 netif_wake_queue(eth->netdev[i]);
1092 static void mtk_stop_queue(struct mtk_eth *eth)
1096 for (i = 0; i < MTK_MAC_COUNT; i++) {
1097 if (!eth->netdev[i])
1099 netif_stop_queue(eth->netdev[i]);
1103 static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
1105 struct mtk_mac *mac = netdev_priv(dev);
1106 struct mtk_eth *eth = mac->hw;
1107 struct mtk_tx_ring *ring = ð->tx_ring;
1108 struct net_device_stats *stats = &dev->stats;
1112 /* normally we can rely on the stack not calling this more than once,
1113 * however we have 2 queues running on the same ring so we need to lock
1116 spin_lock(ð->page_lock);
1118 if (unlikely(test_bit(MTK_RESETTING, ð->state)))
1121 tx_num = mtk_cal_txd_req(skb);
1122 if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
1123 mtk_stop_queue(eth);
1124 netif_err(eth, tx_queued, dev,
1125 "Tx Ring full when queue awake!\n");
1126 spin_unlock(ð->page_lock);
1127 return NETDEV_TX_BUSY;
1130 /* TSO: fill MSS info in tcp checksum field */
1131 if (skb_is_gso(skb)) {
1132 if (skb_cow_head(skb, 0)) {
1133 netif_warn(eth, tx_err, dev,
1134 "GSO expand head fail.\n");
1138 if (skb_shinfo(skb)->gso_type &
1139 (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
1141 tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
1145 if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
1148 if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
1149 mtk_stop_queue(eth);
1151 spin_unlock(ð->page_lock);
1153 return NETDEV_TX_OK;
1156 spin_unlock(ð->page_lock);
1157 stats->tx_dropped++;
1158 dev_kfree_skb_any(skb);
1159 return NETDEV_TX_OK;
1162 static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
1165 struct mtk_rx_ring *ring;
1169 return ð->rx_ring[0];
1171 for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
1172 ring = ð->rx_ring[i];
1173 idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
1174 if (ring->dma[idx].rxd2 & RX_DMA_DONE) {
1175 ring->calc_idx_update = true;
1183 static void mtk_update_rx_cpu_idx(struct mtk_eth *eth)
1185 struct mtk_rx_ring *ring;
1189 ring = ð->rx_ring[0];
1190 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1192 for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
1193 ring = ð->rx_ring[i];
1194 if (ring->calc_idx_update) {
1195 ring->calc_idx_update = false;
1196 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1202 static int mtk_poll_rx(struct napi_struct *napi, int budget,
1203 struct mtk_eth *eth)
1205 struct mtk_rx_ring *ring;
1207 struct sk_buff *skb;
1208 u8 *data, *new_data;
1209 struct mtk_rx_dma *rxd, trxd;
1212 while (done < budget) {
1213 struct net_device *netdev;
1214 unsigned int pktlen;
1215 dma_addr_t dma_addr;
1218 ring = mtk_get_rx_ring(eth);
1219 if (unlikely(!ring))
1222 idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
1223 rxd = &ring->dma[idx];
1224 data = ring->data[idx];
1226 mtk_rx_get_desc(&trxd, rxd);
1227 if (!(trxd.rxd2 & RX_DMA_DONE))
1230 /* find out which mac the packet come from. values start at 1 */
1231 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
1234 mac = (trxd.rxd4 >> RX_DMA_FPORT_SHIFT) &
1239 if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
1243 netdev = eth->netdev[mac];
1245 if (unlikely(test_bit(MTK_RESETTING, ð->state)))
1248 /* alloc new buffer */
1249 new_data = napi_alloc_frag(ring->frag_size);
1250 if (unlikely(!new_data)) {
1251 netdev->stats.rx_dropped++;
1254 dma_addr = dma_map_single(eth->dev,
1255 new_data + NET_SKB_PAD +
1259 if (unlikely(dma_mapping_error(eth->dev, dma_addr))) {
1260 skb_free_frag(new_data);
1261 netdev->stats.rx_dropped++;
1266 skb = build_skb(data, ring->frag_size);
1267 if (unlikely(!skb)) {
1268 skb_free_frag(new_data);
1269 netdev->stats.rx_dropped++;
1272 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
1274 dma_unmap_single(eth->dev, trxd.rxd1,
1275 ring->buf_size, DMA_FROM_DEVICE);
1276 pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
1278 skb_put(skb, pktlen);
1279 if (trxd.rxd4 & eth->rx_dma_l4_valid)
1280 skb->ip_summed = CHECKSUM_UNNECESSARY;
1282 skb_checksum_none_assert(skb);
1283 skb->protocol = eth_type_trans(skb, netdev);
1285 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX &&
1286 RX_DMA_VID(trxd.rxd3))
1287 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1288 RX_DMA_VID(trxd.rxd3));
1289 skb_record_rx_queue(skb, 0);
1290 napi_gro_receive(napi, skb);
1292 ring->data[idx] = new_data;
1293 rxd->rxd1 = (unsigned int)dma_addr;
1296 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
1297 rxd->rxd2 = RX_DMA_LSO;
1299 rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size);
1301 ring->calc_idx = idx;
1308 /* make sure that all changes to the dma ring are flushed before
1312 mtk_update_rx_cpu_idx(eth);
1318 static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
1319 unsigned int *done, unsigned int *bytes)
1321 struct mtk_tx_ring *ring = ð->tx_ring;
1322 struct mtk_tx_dma *desc;
1323 struct sk_buff *skb;
1324 struct mtk_tx_buf *tx_buf;
1327 cpu = mtk_r32(eth, MTK_QTX_CRX_PTR);
1328 dma = mtk_r32(eth, MTK_QTX_DRX_PTR);
1330 desc = mtk_qdma_phys_to_virt(ring, cpu);
1332 while ((cpu != dma) && budget) {
1333 u32 next_cpu = desc->txd2;
1336 desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
1337 if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
1340 tx_buf = mtk_desc_to_tx_buf(ring, desc);
1341 if (tx_buf->flags & MTK_TX_FLAGS_FPORT1)
1348 if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
1349 bytes[mac] += skb->len;
1353 mtk_tx_unmap(eth, tx_buf);
1355 ring->last_free = desc;
1356 atomic_inc(&ring->free_count);
1361 mtk_w32(eth, cpu, MTK_QTX_CRX_PTR);
1366 static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
1367 unsigned int *done, unsigned int *bytes)
1369 struct mtk_tx_ring *ring = ð->tx_ring;
1370 struct mtk_tx_dma *desc;
1371 struct sk_buff *skb;
1372 struct mtk_tx_buf *tx_buf;
1375 cpu = ring->cpu_idx;
1376 dma = mtk_r32(eth, MT7628_TX_DTX_IDX0);
1378 while ((cpu != dma) && budget) {
1379 tx_buf = &ring->buf[cpu];
1384 if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
1385 bytes[0] += skb->len;
1390 mtk_tx_unmap(eth, tx_buf);
1392 desc = &ring->dma[cpu];
1393 ring->last_free = desc;
1394 atomic_inc(&ring->free_count);
1396 cpu = NEXT_DESP_IDX(cpu, ring->dma_size);
1399 ring->cpu_idx = cpu;
1404 static int mtk_poll_tx(struct mtk_eth *eth, int budget)
1406 struct mtk_tx_ring *ring = ð->tx_ring;
1407 unsigned int done[MTK_MAX_DEVS];
1408 unsigned int bytes[MTK_MAX_DEVS];
1411 memset(done, 0, sizeof(done));
1412 memset(bytes, 0, sizeof(bytes));
1414 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
1415 budget = mtk_poll_tx_qdma(eth, budget, done, bytes);
1417 budget = mtk_poll_tx_pdma(eth, budget, done, bytes);
1419 for (i = 0; i < MTK_MAC_COUNT; i++) {
1420 if (!eth->netdev[i] || !done[i])
1422 netdev_completed_queue(eth->netdev[i], done[i], bytes[i]);
1426 if (mtk_queue_stopped(eth) &&
1427 (atomic_read(&ring->free_count) > ring->thresh))
1428 mtk_wake_queue(eth);
1433 static void mtk_handle_status_irq(struct mtk_eth *eth)
1435 u32 status2 = mtk_r32(eth, MTK_INT_STATUS2);
1437 if (unlikely(status2 & (MTK_GDM1_AF | MTK_GDM2_AF))) {
1438 mtk_stats_update(eth);
1439 mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF),
1444 static int mtk_napi_tx(struct napi_struct *napi, int budget)
1446 struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
1450 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
1451 mtk_handle_status_irq(eth);
1452 mtk_w32(eth, MTK_TX_DONE_INT, eth->tx_int_status_reg);
1453 tx_done = mtk_poll_tx(eth, budget);
1455 if (unlikely(netif_msg_intr(eth))) {
1456 status = mtk_r32(eth, eth->tx_int_status_reg);
1457 mask = mtk_r32(eth, eth->tx_int_mask_reg);
1459 "done tx %d, intr 0x%08x/0x%x\n",
1460 tx_done, status, mask);
1463 if (tx_done == budget)
1466 status = mtk_r32(eth, eth->tx_int_status_reg);
1467 if (status & MTK_TX_DONE_INT)
1470 napi_complete(napi);
1471 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
1476 static int mtk_napi_rx(struct napi_struct *napi, int budget)
1478 struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
1481 int remain_budget = budget;
1483 mtk_handle_status_irq(eth);
1486 mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_STATUS);
1487 rx_done = mtk_poll_rx(napi, remain_budget, eth);
1489 if (unlikely(netif_msg_intr(eth))) {
1490 status = mtk_r32(eth, MTK_PDMA_INT_STATUS);
1491 mask = mtk_r32(eth, MTK_PDMA_INT_MASK);
1493 "done rx %d, intr 0x%08x/0x%x\n",
1494 rx_done, status, mask);
1496 if (rx_done == remain_budget)
1499 status = mtk_r32(eth, MTK_PDMA_INT_STATUS);
1500 if (status & MTK_RX_DONE_INT) {
1501 remain_budget -= rx_done;
1504 napi_complete(napi);
1505 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
1507 return rx_done + budget - remain_budget;
1510 static int mtk_tx_alloc(struct mtk_eth *eth)
1512 struct mtk_tx_ring *ring = ð->tx_ring;
1513 int i, sz = sizeof(*ring->dma);
1515 ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf),
1520 ring->dma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
1521 &ring->phys, GFP_ATOMIC);
1525 for (i = 0; i < MTK_DMA_SIZE; i++) {
1526 int next = (i + 1) % MTK_DMA_SIZE;
1527 u32 next_ptr = ring->phys + next * sz;
1529 ring->dma[i].txd2 = next_ptr;
1530 ring->dma[i].txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
1533 /* On MT7688 (PDMA only) this driver uses the ring->dma structs
1534 * only as the framework. The real HW descriptors are the PDMA
1535 * descriptors in ring->dma_pdma.
1537 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1538 ring->dma_pdma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
1541 if (!ring->dma_pdma)
1544 for (i = 0; i < MTK_DMA_SIZE; i++) {
1545 ring->dma_pdma[i].txd2 = TX_DMA_DESP2_DEF;
1546 ring->dma_pdma[i].txd4 = 0;
1550 ring->dma_size = MTK_DMA_SIZE;
1551 atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
1552 ring->next_free = &ring->dma[0];
1553 ring->last_free = &ring->dma[MTK_DMA_SIZE - 1];
1554 ring->thresh = MAX_SKB_FRAGS;
1556 /* make sure that all changes to the dma ring are flushed before we
1561 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1562 mtk_w32(eth, ring->phys, MTK_QTX_CTX_PTR);
1563 mtk_w32(eth, ring->phys, MTK_QTX_DTX_PTR);
1565 ring->phys + ((MTK_DMA_SIZE - 1) * sz),
1568 ring->phys + ((MTK_DMA_SIZE - 1) * sz),
1570 mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES,
1573 mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0);
1574 mtk_w32(eth, MTK_DMA_SIZE, MT7628_TX_MAX_CNT0);
1575 mtk_w32(eth, 0, MT7628_TX_CTX_IDX0);
1576 mtk_w32(eth, MT7628_PST_DTX_IDX0, MTK_PDMA_RST_IDX);
1585 static void mtk_tx_clean(struct mtk_eth *eth)
1587 struct mtk_tx_ring *ring = ð->tx_ring;
1591 for (i = 0; i < MTK_DMA_SIZE; i++)
1592 mtk_tx_unmap(eth, &ring->buf[i]);
1598 dma_free_coherent(eth->dev,
1599 MTK_DMA_SIZE * sizeof(*ring->dma),
1605 if (ring->dma_pdma) {
1606 dma_free_coherent(eth->dev,
1607 MTK_DMA_SIZE * sizeof(*ring->dma_pdma),
1610 ring->dma_pdma = NULL;
1614 static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
1616 struct mtk_rx_ring *ring;
1617 int rx_data_len, rx_dma_size;
1621 if (rx_flag == MTK_RX_FLAGS_QDMA) {
1624 ring = ð->rx_ring_qdma;
1627 ring = ð->rx_ring[ring_no];
1630 if (rx_flag == MTK_RX_FLAGS_HWLRO) {
1631 rx_data_len = MTK_MAX_LRO_RX_LENGTH;
1632 rx_dma_size = MTK_HW_LRO_DMA_SIZE;
1634 rx_data_len = ETH_DATA_LEN;
1635 rx_dma_size = MTK_DMA_SIZE;
1638 ring->frag_size = mtk_max_frag_size(rx_data_len);
1639 ring->buf_size = mtk_max_buf_size(ring->frag_size);
1640 ring->data = kcalloc(rx_dma_size, sizeof(*ring->data),
1645 for (i = 0; i < rx_dma_size; i++) {
1646 ring->data[i] = netdev_alloc_frag(ring->frag_size);
1651 ring->dma = dma_alloc_coherent(eth->dev,
1652 rx_dma_size * sizeof(*ring->dma),
1653 &ring->phys, GFP_ATOMIC);
1657 for (i = 0; i < rx_dma_size; i++) {
1658 dma_addr_t dma_addr = dma_map_single(eth->dev,
1659 ring->data[i] + NET_SKB_PAD + eth->ip_align,
1662 if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
1664 ring->dma[i].rxd1 = (unsigned int)dma_addr;
1666 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
1667 ring->dma[i].rxd2 = RX_DMA_LSO;
1669 ring->dma[i].rxd2 = RX_DMA_PLEN0(ring->buf_size);
1671 ring->dma_size = rx_dma_size;
1672 ring->calc_idx_update = false;
1673 ring->calc_idx = rx_dma_size - 1;
1674 ring->crx_idx_reg = MTK_PRX_CRX_IDX_CFG(ring_no);
1675 /* make sure that all changes to the dma ring are flushed before we
1680 mtk_w32(eth, ring->phys, MTK_PRX_BASE_PTR_CFG(ring_no) + offset);
1681 mtk_w32(eth, rx_dma_size, MTK_PRX_MAX_CNT_CFG(ring_no) + offset);
1682 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg + offset);
1683 mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), MTK_PDMA_RST_IDX + offset);
1688 static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring)
1692 if (ring->data && ring->dma) {
1693 for (i = 0; i < ring->dma_size; i++) {
1696 if (!ring->dma[i].rxd1)
1698 dma_unmap_single(eth->dev,
1702 skb_free_frag(ring->data[i]);
1709 dma_free_coherent(eth->dev,
1710 ring->dma_size * sizeof(*ring->dma),
1717 static int mtk_hwlro_rx_init(struct mtk_eth *eth)
1720 u32 ring_ctrl_dw1 = 0, ring_ctrl_dw2 = 0, ring_ctrl_dw3 = 0;
1721 u32 lro_ctrl_dw0 = 0, lro_ctrl_dw3 = 0;
1723 /* set LRO rings to auto-learn modes */
1724 ring_ctrl_dw2 |= MTK_RING_AUTO_LERAN_MODE;
1726 /* validate LRO ring */
1727 ring_ctrl_dw2 |= MTK_RING_VLD;
1729 /* set AGE timer (unit: 20us) */
1730 ring_ctrl_dw2 |= MTK_RING_AGE_TIME_H;
1731 ring_ctrl_dw1 |= MTK_RING_AGE_TIME_L;
1733 /* set max AGG timer (unit: 20us) */
1734 ring_ctrl_dw2 |= MTK_RING_MAX_AGG_TIME;
1736 /* set max LRO AGG count */
1737 ring_ctrl_dw2 |= MTK_RING_MAX_AGG_CNT_L;
1738 ring_ctrl_dw3 |= MTK_RING_MAX_AGG_CNT_H;
1740 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
1741 mtk_w32(eth, ring_ctrl_dw1, MTK_LRO_CTRL_DW1_CFG(i));
1742 mtk_w32(eth, ring_ctrl_dw2, MTK_LRO_CTRL_DW2_CFG(i));
1743 mtk_w32(eth, ring_ctrl_dw3, MTK_LRO_CTRL_DW3_CFG(i));
1746 /* IPv4 checksum update enable */
1747 lro_ctrl_dw0 |= MTK_L3_CKS_UPD_EN;
1749 /* switch priority comparison to packet count mode */
1750 lro_ctrl_dw0 |= MTK_LRO_ALT_PKT_CNT_MODE;
1752 /* bandwidth threshold setting */
1753 mtk_w32(eth, MTK_HW_LRO_BW_THRE, MTK_PDMA_LRO_CTRL_DW2);
1755 /* auto-learn score delta setting */
1756 mtk_w32(eth, MTK_HW_LRO_REPLACE_DELTA, MTK_PDMA_LRO_ALT_SCORE_DELTA);
1758 /* set refresh timer for altering flows to 1 sec. (unit: 20us) */
1759 mtk_w32(eth, (MTK_HW_LRO_TIMER_UNIT << 16) | MTK_HW_LRO_REFRESH_TIME,
1760 MTK_PDMA_LRO_ALT_REFRESH_TIMER);
1762 /* set HW LRO mode & the max aggregation count for rx packets */
1763 lro_ctrl_dw3 |= MTK_ADMA_MODE | (MTK_HW_LRO_MAX_AGG_CNT & 0xff);
1765 /* the minimal remaining room of SDL0 in RXD for lro aggregation */
1766 lro_ctrl_dw3 |= MTK_LRO_MIN_RXD_SDL;
1769 lro_ctrl_dw0 |= MTK_LRO_EN;
1771 mtk_w32(eth, lro_ctrl_dw3, MTK_PDMA_LRO_CTRL_DW3);
1772 mtk_w32(eth, lro_ctrl_dw0, MTK_PDMA_LRO_CTRL_DW0);
1777 static void mtk_hwlro_rx_uninit(struct mtk_eth *eth)
1782 /* relinquish lro rings, flush aggregated packets */
1783 mtk_w32(eth, MTK_LRO_RING_RELINQUISH_REQ, MTK_PDMA_LRO_CTRL_DW0);
1785 /* wait for relinquishments done */
1786 for (i = 0; i < 10; i++) {
1787 val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
1788 if (val & MTK_LRO_RING_RELINQUISH_DONE) {
1795 /* invalidate lro rings */
1796 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
1797 mtk_w32(eth, 0, MTK_LRO_CTRL_DW2_CFG(i));
1799 /* disable HW LRO */
1800 mtk_w32(eth, 0, MTK_PDMA_LRO_CTRL_DW0);
1803 static void mtk_hwlro_val_ipaddr(struct mtk_eth *eth, int idx, __be32 ip)
1807 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
1809 /* invalidate the IP setting */
1810 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
1812 mtk_w32(eth, ip, MTK_LRO_DIP_DW0_CFG(idx));
1814 /* validate the IP setting */
1815 mtk_w32(eth, (reg_val | MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
1818 static void mtk_hwlro_inval_ipaddr(struct mtk_eth *eth, int idx)
1822 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
1824 /* invalidate the IP setting */
1825 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
1827 mtk_w32(eth, 0, MTK_LRO_DIP_DW0_CFG(idx));
1830 static int mtk_hwlro_get_ip_cnt(struct mtk_mac *mac)
1835 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
1836 if (mac->hwlro_ip[i])
1843 static int mtk_hwlro_add_ipaddr(struct net_device *dev,
1844 struct ethtool_rxnfc *cmd)
1846 struct ethtool_rx_flow_spec *fsp =
1847 (struct ethtool_rx_flow_spec *)&cmd->fs;
1848 struct mtk_mac *mac = netdev_priv(dev);
1849 struct mtk_eth *eth = mac->hw;
1852 if ((fsp->flow_type != TCP_V4_FLOW) ||
1853 (!fsp->h_u.tcp_ip4_spec.ip4dst) ||
1854 (fsp->location > 1))
1857 mac->hwlro_ip[fsp->location] = htonl(fsp->h_u.tcp_ip4_spec.ip4dst);
1858 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
1860 mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
1862 mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]);
1867 static int mtk_hwlro_del_ipaddr(struct net_device *dev,
1868 struct ethtool_rxnfc *cmd)
1870 struct ethtool_rx_flow_spec *fsp =
1871 (struct ethtool_rx_flow_spec *)&cmd->fs;
1872 struct mtk_mac *mac = netdev_priv(dev);
1873 struct mtk_eth *eth = mac->hw;
1876 if (fsp->location > 1)
1879 mac->hwlro_ip[fsp->location] = 0;
1880 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
1882 mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
1884 mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
1889 static void mtk_hwlro_netdev_disable(struct net_device *dev)
1891 struct mtk_mac *mac = netdev_priv(dev);
1892 struct mtk_eth *eth = mac->hw;
1895 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
1896 mac->hwlro_ip[i] = 0;
1897 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + i;
1899 mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
1902 mac->hwlro_ip_cnt = 0;
1905 static int mtk_hwlro_get_fdir_entry(struct net_device *dev,
1906 struct ethtool_rxnfc *cmd)
1908 struct mtk_mac *mac = netdev_priv(dev);
1909 struct ethtool_rx_flow_spec *fsp =
1910 (struct ethtool_rx_flow_spec *)&cmd->fs;
1912 /* only tcp dst ipv4 is meaningful, others are meaningless */
1913 fsp->flow_type = TCP_V4_FLOW;
1914 fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]);
1915 fsp->m_u.tcp_ip4_spec.ip4dst = 0;
1917 fsp->h_u.tcp_ip4_spec.ip4src = 0;
1918 fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff;
1919 fsp->h_u.tcp_ip4_spec.psrc = 0;
1920 fsp->m_u.tcp_ip4_spec.psrc = 0xffff;
1921 fsp->h_u.tcp_ip4_spec.pdst = 0;
1922 fsp->m_u.tcp_ip4_spec.pdst = 0xffff;
1923 fsp->h_u.tcp_ip4_spec.tos = 0;
1924 fsp->m_u.tcp_ip4_spec.tos = 0xff;
1929 static int mtk_hwlro_get_fdir_all(struct net_device *dev,
1930 struct ethtool_rxnfc *cmd,
1933 struct mtk_mac *mac = netdev_priv(dev);
1937 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
1938 if (mac->hwlro_ip[i]) {
1944 cmd->rule_cnt = cnt;
1949 static netdev_features_t mtk_fix_features(struct net_device *dev,
1950 netdev_features_t features)
1952 if (!(features & NETIF_F_LRO)) {
1953 struct mtk_mac *mac = netdev_priv(dev);
1954 int ip_cnt = mtk_hwlro_get_ip_cnt(mac);
1957 netdev_info(dev, "RX flow is programmed, LRO should keep on\n");
1959 features |= NETIF_F_LRO;
1966 static int mtk_set_features(struct net_device *dev, netdev_features_t features)
1970 if (!((dev->features ^ features) & NETIF_F_LRO))
1973 if (!(features & NETIF_F_LRO))
1974 mtk_hwlro_netdev_disable(dev);
1979 /* wait for DMA to finish whatever it is doing before we start using it again */
1980 static int mtk_dma_busy_wait(struct mtk_eth *eth)
1982 unsigned long t_start = jiffies;
1985 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1986 if (!(mtk_r32(eth, MTK_QDMA_GLO_CFG) &
1987 (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
1990 if (!(mtk_r32(eth, MTK_PDMA_GLO_CFG) &
1991 (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
1995 if (time_after(jiffies, t_start + MTK_DMA_BUSY_TIMEOUT))
1999 dev_err(eth->dev, "DMA init timeout\n");
2003 static int mtk_dma_init(struct mtk_eth *eth)
2008 if (mtk_dma_busy_wait(eth))
2011 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2012 /* QDMA needs scratch memory for internal reordering of the
2015 err = mtk_init_fq_dma(eth);
2020 err = mtk_tx_alloc(eth);
2024 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2025 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA);
2030 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL);
2035 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
2036 err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_HWLRO);
2040 err = mtk_hwlro_rx_init(eth);
2045 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2046 /* Enable random early drop and set drop threshold
2049 mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN |
2050 FC_THRES_MIN, MTK_QDMA_FC_THRES);
2051 mtk_w32(eth, 0x0, MTK_QDMA_HRED2);
2057 static void mtk_dma_free(struct mtk_eth *eth)
2061 for (i = 0; i < MTK_MAC_COUNT; i++)
2063 netdev_reset_queue(eth->netdev[i]);
2064 if (eth->scratch_ring) {
2065 dma_free_coherent(eth->dev,
2066 MTK_DMA_SIZE * sizeof(struct mtk_tx_dma),
2068 eth->phy_scratch_ring);
2069 eth->scratch_ring = NULL;
2070 eth->phy_scratch_ring = 0;
2073 mtk_rx_clean(eth, ð->rx_ring[0]);
2074 mtk_rx_clean(eth, ð->rx_ring_qdma);
2077 mtk_hwlro_rx_uninit(eth);
2078 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
2079 mtk_rx_clean(eth, ð->rx_ring[i]);
2082 kfree(eth->scratch_head);
2085 static void mtk_tx_timeout(struct net_device *dev, unsigned int txqueue)
2087 struct mtk_mac *mac = netdev_priv(dev);
2088 struct mtk_eth *eth = mac->hw;
2090 eth->netdev[mac->id]->stats.tx_errors++;
2091 netif_err(eth, tx_err, dev,
2092 "transmit timed out\n");
2093 schedule_work(ð->pending_work);
2096 static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
2098 struct mtk_eth *eth = _eth;
2100 if (likely(napi_schedule_prep(ð->rx_napi))) {
2101 __napi_schedule(ð->rx_napi);
2102 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
2108 static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
2110 struct mtk_eth *eth = _eth;
2112 if (likely(napi_schedule_prep(ð->tx_napi))) {
2113 __napi_schedule(ð->tx_napi);
2114 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
2120 static irqreturn_t mtk_handle_irq(int irq, void *_eth)
2122 struct mtk_eth *eth = _eth;
2124 if (mtk_r32(eth, MTK_PDMA_INT_MASK) & MTK_RX_DONE_INT) {
2125 if (mtk_r32(eth, MTK_PDMA_INT_STATUS) & MTK_RX_DONE_INT)
2126 mtk_handle_irq_rx(irq, _eth);
2128 if (mtk_r32(eth, eth->tx_int_mask_reg) & MTK_TX_DONE_INT) {
2129 if (mtk_r32(eth, eth->tx_int_status_reg) & MTK_TX_DONE_INT)
2130 mtk_handle_irq_tx(irq, _eth);
2136 #ifdef CONFIG_NET_POLL_CONTROLLER
2137 static void mtk_poll_controller(struct net_device *dev)
2139 struct mtk_mac *mac = netdev_priv(dev);
2140 struct mtk_eth *eth = mac->hw;
2142 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
2143 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
2144 mtk_handle_irq_rx(eth->irq[2], dev);
2145 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
2146 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
2150 static int mtk_start_dma(struct mtk_eth *eth)
2152 u32 rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
2155 err = mtk_dma_init(eth);
2161 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2163 MTK_TX_WB_DDONE | MTK_TX_DMA_EN |
2164 MTK_DMA_SIZE_16DWORDS | MTK_NDP_CO_PRO |
2165 MTK_RX_DMA_EN | MTK_RX_2B_OFFSET |
2170 MTK_RX_DMA_EN | rx_2b_offset |
2171 MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
2174 mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
2175 MTK_MULTI_EN | MTK_PDMA_SIZE_8DWORDS,
2182 static void mtk_gdm_config(struct mtk_eth *eth, u32 config)
2186 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
2189 for (i = 0; i < MTK_MAC_COUNT; i++) {
2190 u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
2192 /* default setup the forward port to send frame to PDMA */
2195 /* Enable RX checksum */
2196 val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
2200 mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
2202 /* Reset and enable PSE */
2203 mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
2204 mtk_w32(eth, 0, MTK_RST_GL);
2207 static int mtk_open(struct net_device *dev)
2209 struct mtk_mac *mac = netdev_priv(dev);
2210 struct mtk_eth *eth = mac->hw;
2213 err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0);
2215 netdev_err(dev, "%s: could not attach PHY: %d\n", __func__,
2220 /* we run 2 netdevs on the same dma ring so we only bring it up once */
2221 if (!refcount_read(ð->dma_refcnt)) {
2222 int err = mtk_start_dma(eth);
2227 mtk_gdm_config(eth, MTK_GDMA_TO_PDMA);
2229 napi_enable(ð->tx_napi);
2230 napi_enable(ð->rx_napi);
2231 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
2232 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
2233 refcount_set(ð->dma_refcnt, 1);
2236 refcount_inc(ð->dma_refcnt);
2238 phylink_start(mac->phylink);
2239 netif_start_queue(dev);
2243 static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
2248 /* stop the dma engine */
2249 spin_lock_bh(ð->page_lock);
2250 val = mtk_r32(eth, glo_cfg);
2251 mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
2253 spin_unlock_bh(ð->page_lock);
2255 /* wait for dma stop */
2256 for (i = 0; i < 10; i++) {
2257 val = mtk_r32(eth, glo_cfg);
2258 if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
2266 static int mtk_stop(struct net_device *dev)
2268 struct mtk_mac *mac = netdev_priv(dev);
2269 struct mtk_eth *eth = mac->hw;
2271 phylink_stop(mac->phylink);
2273 netif_tx_disable(dev);
2275 phylink_disconnect_phy(mac->phylink);
2277 /* only shutdown DMA if this is the last user */
2278 if (!refcount_dec_and_test(ð->dma_refcnt))
2281 mtk_gdm_config(eth, MTK_GDMA_DROP_ALL);
2283 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
2284 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
2285 napi_disable(ð->tx_napi);
2286 napi_disable(ð->rx_napi);
2288 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2289 mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
2290 mtk_stop_dma(eth, MTK_PDMA_GLO_CFG);
2297 static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits)
2299 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
2303 usleep_range(1000, 1100);
2304 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
2310 static void mtk_clk_disable(struct mtk_eth *eth)
2314 for (clk = MTK_CLK_MAX - 1; clk >= 0; clk--)
2315 clk_disable_unprepare(eth->clks[clk]);
2318 static int mtk_clk_enable(struct mtk_eth *eth)
2322 for (clk = 0; clk < MTK_CLK_MAX ; clk++) {
2323 ret = clk_prepare_enable(eth->clks[clk]);
2325 goto err_disable_clks;
2332 clk_disable_unprepare(eth->clks[clk]);
2337 static int mtk_hw_init(struct mtk_eth *eth)
2341 if (test_and_set_bit(MTK_HW_INIT, ð->state))
2344 pm_runtime_enable(eth->dev);
2345 pm_runtime_get_sync(eth->dev);
2347 ret = mtk_clk_enable(eth);
2349 goto err_disable_pm;
2351 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
2352 ret = device_reset(eth->dev);
2354 dev_err(eth->dev, "MAC reset failed!\n");
2355 goto err_disable_pm;
2358 /* enable interrupt delay for RX */
2359 mtk_w32(eth, MTK_PDMA_DELAY_RX_DELAY, MTK_PDMA_DELAY_INT);
2361 /* disable delay and normal interrupt */
2362 mtk_tx_irq_disable(eth, ~0);
2363 mtk_rx_irq_disable(eth, ~0);
2368 /* Non-MT7628 handling... */
2369 ethsys_reset(eth, RSTCTRL_FE);
2370 ethsys_reset(eth, RSTCTRL_PPE);
2373 /* Set GE2 driving and slew rate */
2374 regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
2377 regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
2380 regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
2383 /* Set linkdown as the default for each GMAC. Its own MCR would be set
2384 * up with the more appropriate value when mtk_mac_config call is being
2387 for (i = 0; i < MTK_MAC_COUNT; i++)
2388 mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i));
2390 /* Indicates CDM to parse the MTK special tag from CPU
2391 * which also is working out for untag packets.
2393 val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
2394 mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL);
2396 /* Enable RX VLan Offloading */
2397 mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
2399 /* enable interrupt delay for RX */
2400 mtk_w32(eth, MTK_PDMA_DELAY_RX_DELAY, MTK_PDMA_DELAY_INT);
2402 /* disable delay and normal interrupt */
2403 mtk_w32(eth, 0, MTK_QDMA_DELAY_INT);
2404 mtk_tx_irq_disable(eth, ~0);
2405 mtk_rx_irq_disable(eth, ~0);
2407 /* FE int grouping */
2408 mtk_w32(eth, MTK_TX_DONE_INT, MTK_PDMA_INT_GRP1);
2409 mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_GRP2);
2410 mtk_w32(eth, MTK_TX_DONE_INT, MTK_QDMA_INT_GRP1);
2411 mtk_w32(eth, MTK_RX_DONE_INT, MTK_QDMA_INT_GRP2);
2412 mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
2417 pm_runtime_put_sync(eth->dev);
2418 pm_runtime_disable(eth->dev);
2423 static int mtk_hw_deinit(struct mtk_eth *eth)
2425 if (!test_and_clear_bit(MTK_HW_INIT, ð->state))
2428 mtk_clk_disable(eth);
2430 pm_runtime_put_sync(eth->dev);
2431 pm_runtime_disable(eth->dev);
2436 static int __init mtk_init(struct net_device *dev)
2438 struct mtk_mac *mac = netdev_priv(dev);
2439 struct mtk_eth *eth = mac->hw;
2440 const char *mac_addr;
2442 mac_addr = of_get_mac_address(mac->of_node);
2443 if (!IS_ERR(mac_addr))
2444 ether_addr_copy(dev->dev_addr, mac_addr);
2446 /* If the mac address is invalid, use random mac address */
2447 if (!is_valid_ether_addr(dev->dev_addr)) {
2448 eth_hw_addr_random(dev);
2449 dev_err(eth->dev, "generated random MAC address %pM\n",
2456 static void mtk_uninit(struct net_device *dev)
2458 struct mtk_mac *mac = netdev_priv(dev);
2459 struct mtk_eth *eth = mac->hw;
2461 phylink_disconnect_phy(mac->phylink);
2462 mtk_tx_irq_disable(eth, ~0);
2463 mtk_rx_irq_disable(eth, ~0);
2466 static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2468 struct mtk_mac *mac = netdev_priv(dev);
2474 return phylink_mii_ioctl(mac->phylink, ifr, cmd);
2482 static void mtk_pending_work(struct work_struct *work)
2484 struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work);
2486 unsigned long restart = 0;
2490 dev_dbg(eth->dev, "[%s][%d] reset\n", __func__, __LINE__);
2492 while (test_and_set_bit_lock(MTK_RESETTING, ð->state))
2495 dev_dbg(eth->dev, "[%s][%d] mtk_stop starts\n", __func__, __LINE__);
2496 /* stop all devices to make sure that dma is properly shut down */
2497 for (i = 0; i < MTK_MAC_COUNT; i++) {
2498 if (!eth->netdev[i])
2500 mtk_stop(eth->netdev[i]);
2501 __set_bit(i, &restart);
2503 dev_dbg(eth->dev, "[%s][%d] mtk_stop ends\n", __func__, __LINE__);
2505 /* restart underlying hardware such as power, clock, pin mux
2506 * and the connected phy
2511 pinctrl_select_state(eth->dev->pins->p,
2512 eth->dev->pins->default_state);
2515 /* restart DMA and enable IRQs */
2516 for (i = 0; i < MTK_MAC_COUNT; i++) {
2517 if (!test_bit(i, &restart))
2519 err = mtk_open(eth->netdev[i]);
2521 netif_alert(eth, ifup, eth->netdev[i],
2522 "Driver up/down cycle failed, closing device.\n");
2523 dev_close(eth->netdev[i]);
2527 dev_dbg(eth->dev, "[%s][%d] reset done\n", __func__, __LINE__);
2529 clear_bit_unlock(MTK_RESETTING, ð->state);
2534 static int mtk_free_dev(struct mtk_eth *eth)
2538 for (i = 0; i < MTK_MAC_COUNT; i++) {
2539 if (!eth->netdev[i])
2541 free_netdev(eth->netdev[i]);
2547 static int mtk_unreg_dev(struct mtk_eth *eth)
2551 for (i = 0; i < MTK_MAC_COUNT; i++) {
2552 if (!eth->netdev[i])
2554 unregister_netdev(eth->netdev[i]);
2560 static int mtk_cleanup(struct mtk_eth *eth)
2564 cancel_work_sync(ð->pending_work);
2569 static int mtk_get_link_ksettings(struct net_device *ndev,
2570 struct ethtool_link_ksettings *cmd)
2572 struct mtk_mac *mac = netdev_priv(ndev);
2574 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
2577 return phylink_ethtool_ksettings_get(mac->phylink, cmd);
2580 static int mtk_set_link_ksettings(struct net_device *ndev,
2581 const struct ethtool_link_ksettings *cmd)
2583 struct mtk_mac *mac = netdev_priv(ndev);
2585 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
2588 return phylink_ethtool_ksettings_set(mac->phylink, cmd);
2591 static void mtk_get_drvinfo(struct net_device *dev,
2592 struct ethtool_drvinfo *info)
2594 struct mtk_mac *mac = netdev_priv(dev);
2596 strlcpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
2597 strlcpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
2598 info->n_stats = ARRAY_SIZE(mtk_ethtool_stats);
2601 static u32 mtk_get_msglevel(struct net_device *dev)
2603 struct mtk_mac *mac = netdev_priv(dev);
2605 return mac->hw->msg_enable;
2608 static void mtk_set_msglevel(struct net_device *dev, u32 value)
2610 struct mtk_mac *mac = netdev_priv(dev);
2612 mac->hw->msg_enable = value;
2615 static int mtk_nway_reset(struct net_device *dev)
2617 struct mtk_mac *mac = netdev_priv(dev);
2619 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
2625 return phylink_ethtool_nway_reset(mac->phylink);
2628 static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2632 switch (stringset) {
2634 for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) {
2635 memcpy(data, mtk_ethtool_stats[i].str, ETH_GSTRING_LEN);
2636 data += ETH_GSTRING_LEN;
2642 static int mtk_get_sset_count(struct net_device *dev, int sset)
2646 return ARRAY_SIZE(mtk_ethtool_stats);
2652 static void mtk_get_ethtool_stats(struct net_device *dev,
2653 struct ethtool_stats *stats, u64 *data)
2655 struct mtk_mac *mac = netdev_priv(dev);
2656 struct mtk_hw_stats *hwstats = mac->hw_stats;
2657 u64 *data_src, *data_dst;
2661 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
2664 if (netif_running(dev) && netif_device_present(dev)) {
2665 if (spin_trylock_bh(&hwstats->stats_lock)) {
2666 mtk_stats_update_mac(mac);
2667 spin_unlock_bh(&hwstats->stats_lock);
2671 data_src = (u64 *)hwstats;
2675 start = u64_stats_fetch_begin_irq(&hwstats->syncp);
2677 for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
2678 *data_dst++ = *(data_src + mtk_ethtool_stats[i].offset);
2679 } while (u64_stats_fetch_retry_irq(&hwstats->syncp, start));
2682 static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
2685 int ret = -EOPNOTSUPP;
2688 case ETHTOOL_GRXRINGS:
2689 if (dev->hw_features & NETIF_F_LRO) {
2690 cmd->data = MTK_MAX_RX_RING_NUM;
2694 case ETHTOOL_GRXCLSRLCNT:
2695 if (dev->hw_features & NETIF_F_LRO) {
2696 struct mtk_mac *mac = netdev_priv(dev);
2698 cmd->rule_cnt = mac->hwlro_ip_cnt;
2702 case ETHTOOL_GRXCLSRULE:
2703 if (dev->hw_features & NETIF_F_LRO)
2704 ret = mtk_hwlro_get_fdir_entry(dev, cmd);
2706 case ETHTOOL_GRXCLSRLALL:
2707 if (dev->hw_features & NETIF_F_LRO)
2708 ret = mtk_hwlro_get_fdir_all(dev, cmd,
2718 static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
2720 int ret = -EOPNOTSUPP;
2723 case ETHTOOL_SRXCLSRLINS:
2724 if (dev->hw_features & NETIF_F_LRO)
2725 ret = mtk_hwlro_add_ipaddr(dev, cmd);
2727 case ETHTOOL_SRXCLSRLDEL:
2728 if (dev->hw_features & NETIF_F_LRO)
2729 ret = mtk_hwlro_del_ipaddr(dev, cmd);
2738 static const struct ethtool_ops mtk_ethtool_ops = {
2739 .get_link_ksettings = mtk_get_link_ksettings,
2740 .set_link_ksettings = mtk_set_link_ksettings,
2741 .get_drvinfo = mtk_get_drvinfo,
2742 .get_msglevel = mtk_get_msglevel,
2743 .set_msglevel = mtk_set_msglevel,
2744 .nway_reset = mtk_nway_reset,
2745 .get_link = ethtool_op_get_link,
2746 .get_strings = mtk_get_strings,
2747 .get_sset_count = mtk_get_sset_count,
2748 .get_ethtool_stats = mtk_get_ethtool_stats,
2749 .get_rxnfc = mtk_get_rxnfc,
2750 .set_rxnfc = mtk_set_rxnfc,
2753 static const struct net_device_ops mtk_netdev_ops = {
2754 .ndo_init = mtk_init,
2755 .ndo_uninit = mtk_uninit,
2756 .ndo_open = mtk_open,
2757 .ndo_stop = mtk_stop,
2758 .ndo_start_xmit = mtk_start_xmit,
2759 .ndo_set_mac_address = mtk_set_mac_address,
2760 .ndo_validate_addr = eth_validate_addr,
2761 .ndo_do_ioctl = mtk_do_ioctl,
2762 .ndo_tx_timeout = mtk_tx_timeout,
2763 .ndo_get_stats64 = mtk_get_stats64,
2764 .ndo_fix_features = mtk_fix_features,
2765 .ndo_set_features = mtk_set_features,
2766 #ifdef CONFIG_NET_POLL_CONTROLLER
2767 .ndo_poll_controller = mtk_poll_controller,
2771 static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
2773 const __be32 *_id = of_get_property(np, "reg", NULL);
2774 phy_interface_t phy_mode;
2775 struct phylink *phylink;
2776 struct mtk_mac *mac;
2780 dev_err(eth->dev, "missing mac id\n");
2784 id = be32_to_cpup(_id);
2785 if (id >= MTK_MAC_COUNT) {
2786 dev_err(eth->dev, "%d is not a valid mac id\n", id);
2790 if (eth->netdev[id]) {
2791 dev_err(eth->dev, "duplicate mac id found: %d\n", id);
2795 eth->netdev[id] = alloc_etherdev(sizeof(*mac));
2796 if (!eth->netdev[id]) {
2797 dev_err(eth->dev, "alloc_etherdev failed\n");
2800 mac = netdev_priv(eth->netdev[id]);
2806 memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip));
2807 mac->hwlro_ip_cnt = 0;
2809 mac->hw_stats = devm_kzalloc(eth->dev,
2810 sizeof(*mac->hw_stats),
2812 if (!mac->hw_stats) {
2813 dev_err(eth->dev, "failed to allocate counter memory\n");
2817 spin_lock_init(&mac->hw_stats->stats_lock);
2818 u64_stats_init(&mac->hw_stats->syncp);
2819 mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
2821 /* phylink create */
2822 err = of_get_phy_mode(np, &phy_mode);
2824 dev_err(eth->dev, "incorrect phy-mode\n");
2828 /* mac config is not set */
2829 mac->interface = PHY_INTERFACE_MODE_NA;
2830 mac->mode = MLO_AN_PHY;
2831 mac->speed = SPEED_UNKNOWN;
2833 mac->phylink_config.dev = ð->netdev[id]->dev;
2834 mac->phylink_config.type = PHYLINK_NETDEV;
2836 phylink = phylink_create(&mac->phylink_config,
2837 of_fwnode_handle(mac->of_node),
2838 phy_mode, &mtk_phylink_ops);
2839 if (IS_ERR(phylink)) {
2840 err = PTR_ERR(phylink);
2844 mac->phylink = phylink;
2846 SET_NETDEV_DEV(eth->netdev[id], eth->dev);
2847 eth->netdev[id]->watchdog_timeo = 5 * HZ;
2848 eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
2849 eth->netdev[id]->base_addr = (unsigned long)eth->base;
2851 eth->netdev[id]->hw_features = eth->soc->hw_features;
2853 eth->netdev[id]->hw_features |= NETIF_F_LRO;
2855 eth->netdev[id]->vlan_features = eth->soc->hw_features &
2856 ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
2857 eth->netdev[id]->features |= eth->soc->hw_features;
2858 eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
2860 eth->netdev[id]->irq = eth->irq[0];
2861 eth->netdev[id]->dev.of_node = np;
2866 free_netdev(eth->netdev[id]);
2870 static int mtk_probe(struct platform_device *pdev)
2872 struct device_node *mac_np;
2873 struct mtk_eth *eth;
2876 eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
2880 eth->soc = of_device_get_match_data(&pdev->dev);
2882 eth->dev = &pdev->dev;
2883 eth->base = devm_platform_ioremap_resource(pdev, 0);
2884 if (IS_ERR(eth->base))
2885 return PTR_ERR(eth->base);
2887 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2888 eth->tx_int_mask_reg = MTK_QDMA_INT_MASK;
2889 eth->tx_int_status_reg = MTK_QDMA_INT_STATUS;
2891 eth->tx_int_mask_reg = MTK_PDMA_INT_MASK;
2892 eth->tx_int_status_reg = MTK_PDMA_INT_STATUS;
2895 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
2896 eth->rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA;
2897 eth->ip_align = NET_IP_ALIGN;
2899 eth->rx_dma_l4_valid = RX_DMA_L4_VALID;
2902 spin_lock_init(ð->page_lock);
2903 spin_lock_init(ð->tx_irq_lock);
2904 spin_lock_init(ð->rx_irq_lock);
2906 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
2907 eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
2909 if (IS_ERR(eth->ethsys)) {
2910 dev_err(&pdev->dev, "no ethsys regmap found\n");
2911 return PTR_ERR(eth->ethsys);
2915 if (MTK_HAS_CAPS(eth->soc->caps, MTK_INFRA)) {
2916 eth->infra = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
2917 "mediatek,infracfg");
2918 if (IS_ERR(eth->infra)) {
2919 dev_err(&pdev->dev, "no infracfg regmap found\n");
2920 return PTR_ERR(eth->infra);
2924 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
2925 eth->sgmii = devm_kzalloc(eth->dev, sizeof(*eth->sgmii),
2930 err = mtk_sgmii_init(eth->sgmii, pdev->dev.of_node,
2931 eth->soc->ana_rgc3);
2937 if (eth->soc->required_pctl) {
2938 eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
2940 if (IS_ERR(eth->pctl)) {
2941 dev_err(&pdev->dev, "no pctl regmap found\n");
2942 return PTR_ERR(eth->pctl);
2946 for (i = 0; i < 3; i++) {
2947 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0)
2948 eth->irq[i] = eth->irq[0];
2950 eth->irq[i] = platform_get_irq(pdev, i);
2951 if (eth->irq[i] < 0) {
2952 dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
2956 for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
2957 eth->clks[i] = devm_clk_get(eth->dev,
2958 mtk_clks_source_name[i]);
2959 if (IS_ERR(eth->clks[i])) {
2960 if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER)
2961 return -EPROBE_DEFER;
2962 if (eth->soc->required_clks & BIT(i)) {
2963 dev_err(&pdev->dev, "clock %s not found\n",
2964 mtk_clks_source_name[i]);
2967 eth->clks[i] = NULL;
2971 eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
2972 INIT_WORK(ð->pending_work, mtk_pending_work);
2974 err = mtk_hw_init(eth);
2978 eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO);
2980 for_each_child_of_node(pdev->dev.of_node, mac_np) {
2981 if (!of_device_is_compatible(mac_np,
2982 "mediatek,eth-mac"))
2985 if (!of_device_is_available(mac_np))
2988 err = mtk_add_mac(eth, mac_np);
2990 of_node_put(mac_np);
2995 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) {
2996 err = devm_request_irq(eth->dev, eth->irq[0],
2998 dev_name(eth->dev), eth);
3000 err = devm_request_irq(eth->dev, eth->irq[1],
3001 mtk_handle_irq_tx, 0,
3002 dev_name(eth->dev), eth);
3006 err = devm_request_irq(eth->dev, eth->irq[2],
3007 mtk_handle_irq_rx, 0,
3008 dev_name(eth->dev), eth);
3013 /* No MT7628/88 support yet */
3014 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
3015 err = mtk_mdio_init(eth);
3020 for (i = 0; i < MTK_MAX_DEVS; i++) {
3021 if (!eth->netdev[i])
3024 err = register_netdev(eth->netdev[i]);
3026 dev_err(eth->dev, "error bringing up device\n");
3027 goto err_deinit_mdio;
3029 netif_info(eth, probe, eth->netdev[i],
3030 "mediatek frame engine at 0x%08lx, irq %d\n",
3031 eth->netdev[i]->base_addr, eth->irq[0]);
3034 /* we run 2 devices on the same DMA ring so we need a dummy device
3037 init_dummy_netdev(ð->dummy_dev);
3038 netif_napi_add(ð->dummy_dev, ð->tx_napi, mtk_napi_tx,
3040 netif_napi_add(ð->dummy_dev, ð->rx_napi, mtk_napi_rx,
3043 platform_set_drvdata(pdev, eth);
3048 mtk_mdio_cleanup(eth);
3057 static int mtk_remove(struct platform_device *pdev)
3059 struct mtk_eth *eth = platform_get_drvdata(pdev);
3060 struct mtk_mac *mac;
3063 /* stop all devices to make sure that dma is properly shut down */
3064 for (i = 0; i < MTK_MAC_COUNT; i++) {
3065 if (!eth->netdev[i])
3067 mtk_stop(eth->netdev[i]);
3068 mac = netdev_priv(eth->netdev[i]);
3069 phylink_disconnect_phy(mac->phylink);
3074 netif_napi_del(ð->tx_napi);
3075 netif_napi_del(ð->rx_napi);
3077 mtk_mdio_cleanup(eth);
3082 static const struct mtk_soc_data mt2701_data = {
3083 .caps = MT7623_CAPS | MTK_HWLRO,
3084 .hw_features = MTK_HW_FEATURES,
3085 .required_clks = MT7623_CLKS_BITMAP,
3086 .required_pctl = true,
3089 static const struct mtk_soc_data mt7621_data = {
3090 .caps = MT7621_CAPS,
3091 .hw_features = MTK_HW_FEATURES,
3092 .required_clks = MT7621_CLKS_BITMAP,
3093 .required_pctl = false,
3096 static const struct mtk_soc_data mt7622_data = {
3098 .caps = MT7622_CAPS | MTK_HWLRO,
3099 .hw_features = MTK_HW_FEATURES,
3100 .required_clks = MT7622_CLKS_BITMAP,
3101 .required_pctl = false,
3104 static const struct mtk_soc_data mt7623_data = {
3105 .caps = MT7623_CAPS | MTK_HWLRO,
3106 .hw_features = MTK_HW_FEATURES,
3107 .required_clks = MT7623_CLKS_BITMAP,
3108 .required_pctl = true,
3111 static const struct mtk_soc_data mt7629_data = {
3113 .caps = MT7629_CAPS | MTK_HWLRO,
3114 .hw_features = MTK_HW_FEATURES,
3115 .required_clks = MT7629_CLKS_BITMAP,
3116 .required_pctl = false,
3119 static const struct mtk_soc_data rt5350_data = {
3120 .caps = MT7628_CAPS,
3121 .hw_features = MTK_HW_FEATURES_MT7628,
3122 .required_clks = MT7628_CLKS_BITMAP,
3123 .required_pctl = false,
3126 const struct of_device_id of_mtk_match[] = {
3127 { .compatible = "mediatek,mt2701-eth", .data = &mt2701_data},
3128 { .compatible = "mediatek,mt7621-eth", .data = &mt7621_data},
3129 { .compatible = "mediatek,mt7622-eth", .data = &mt7622_data},
3130 { .compatible = "mediatek,mt7623-eth", .data = &mt7623_data},
3131 { .compatible = "mediatek,mt7629-eth", .data = &mt7629_data},
3132 { .compatible = "ralink,rt5350-eth", .data = &rt5350_data},
3135 MODULE_DEVICE_TABLE(of, of_mtk_match);
3137 static struct platform_driver mtk_driver = {
3139 .remove = mtk_remove,
3141 .name = "mtk_soc_eth",
3142 .of_match_table = of_mtk_match,
3146 module_platform_driver(mtk_driver);
3148 MODULE_LICENSE("GPL");
3149 MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
3150 MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");