1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3 This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4 ST Ethernet IPs are built around a Synopsys IP Core.
6 Copyright(C) 2007-2011 STMicroelectronics Ltd
9 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
11 Documentation available at:
12 http://www.stlinux.com
14 https://bugzilla.stlinux.com/
15 *******************************************************************************/
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/prefetch.h>
32 #include <linux/pinctrl/consumer.h>
33 #ifdef CONFIG_DEBUG_FS
34 #include <linux/debugfs.h>
35 #include <linux/seq_file.h>
36 #endif /* CONFIG_DEBUG_FS */
37 #include <linux/net_tstamp.h>
38 #include <linux/phylink.h>
39 #include <linux/udp.h>
40 #include <net/pkt_cls.h>
41 #include "stmmac_ptp.h"
43 #include <linux/reset.h>
44 #include <linux/of_mdio.h>
45 #include "dwmac1000.h"
49 #define STMMAC_ALIGN(x) __ALIGN_KERNEL(x, SMP_CACHE_BYTES)
50 #define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
52 /* Module parameters */
54 static int watchdog = TX_TIMEO;
55 module_param(watchdog, int, 0644);
56 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
58 static int debug = -1;
59 module_param(debug, int, 0644);
60 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
62 static int phyaddr = -1;
63 module_param(phyaddr, int, 0444);
64 MODULE_PARM_DESC(phyaddr, "Physical device address");
66 #define STMMAC_TX_THRESH (DMA_TX_SIZE / 4)
67 #define STMMAC_RX_THRESH (DMA_RX_SIZE / 4)
69 static int flow_ctrl = FLOW_AUTO;
70 module_param(flow_ctrl, int, 0644);
71 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
73 static int pause = PAUSE_TIME;
74 module_param(pause, int, 0644);
75 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
78 static int tc = TC_DEFAULT;
79 module_param(tc, int, 0644);
80 MODULE_PARM_DESC(tc, "DMA threshold control value");
82 #define DEFAULT_BUFSIZE 1536
83 static int buf_sz = DEFAULT_BUFSIZE;
84 module_param(buf_sz, int, 0644);
85 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
87 #define STMMAC_RX_COPYBREAK 256
89 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
90 NETIF_MSG_LINK | NETIF_MSG_IFUP |
91 NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
93 #define STMMAC_DEFAULT_LPI_TIMER 1000
94 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
95 module_param(eee_timer, int, 0644);
96 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
97 #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
99 /* By default the driver will use the ring mode to manage tx and rx descriptors,
100 * but allow user to force to use the chain instead of the ring
102 static unsigned int chain_mode;
103 module_param(chain_mode, int, 0444);
104 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
106 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
108 #ifdef CONFIG_DEBUG_FS
109 static void stmmac_init_fs(struct net_device *dev);
110 static void stmmac_exit_fs(struct net_device *dev);
113 #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
116 * stmmac_verify_args - verify the driver parameters.
117 * Description: it checks the driver parameters and set a default in case of
120 static void stmmac_verify_args(void)
122 if (unlikely(watchdog < 0))
124 if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
125 buf_sz = DEFAULT_BUFSIZE;
126 if (unlikely(flow_ctrl > 1))
127 flow_ctrl = FLOW_AUTO;
128 else if (likely(flow_ctrl < 0))
129 flow_ctrl = FLOW_OFF;
130 if (unlikely((pause < 0) || (pause > 0xffff)))
133 eee_timer = STMMAC_DEFAULT_LPI_TIMER;
137 * stmmac_disable_all_queues - Disable all queues
138 * @priv: driver private structure
140 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
142 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
143 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
144 u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
147 for (queue = 0; queue < maxq; queue++) {
148 struct stmmac_channel *ch = &priv->channel[queue];
150 if (queue < rx_queues_cnt)
151 napi_disable(&ch->rx_napi);
152 if (queue < tx_queues_cnt)
153 napi_disable(&ch->tx_napi);
158 * stmmac_enable_all_queues - Enable all queues
159 * @priv: driver private structure
161 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
163 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
164 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
165 u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
168 for (queue = 0; queue < maxq; queue++) {
169 struct stmmac_channel *ch = &priv->channel[queue];
171 if (queue < rx_queues_cnt)
172 napi_enable(&ch->rx_napi);
173 if (queue < tx_queues_cnt)
174 napi_enable(&ch->tx_napi);
179 * stmmac_stop_all_queues - Stop all queues
180 * @priv: driver private structure
182 static void stmmac_stop_all_queues(struct stmmac_priv *priv)
184 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
187 for (queue = 0; queue < tx_queues_cnt; queue++)
188 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
192 * stmmac_start_all_queues - Start all queues
193 * @priv: driver private structure
195 static void stmmac_start_all_queues(struct stmmac_priv *priv)
197 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
200 for (queue = 0; queue < tx_queues_cnt; queue++)
201 netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
204 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
206 if (!test_bit(STMMAC_DOWN, &priv->state) &&
207 !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
208 queue_work(priv->wq, &priv->service_task);
211 static void stmmac_global_err(struct stmmac_priv *priv)
213 netif_carrier_off(priv->dev);
214 set_bit(STMMAC_RESET_REQUESTED, &priv->state);
215 stmmac_service_event_schedule(priv);
219 * stmmac_clk_csr_set - dynamically set the MDC clock
220 * @priv: driver private structure
221 * Description: this is to dynamically set the MDC clock according to the csr
224 * If a specific clk_csr value is passed from the platform
225 * this means that the CSR Clock Range selection cannot be
226 * changed at run-time and it is fixed (as reported in the driver
227 * documentation). Viceversa the driver will try to set the MDC
228 * clock dynamically according to the actual clock input.
230 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
234 clk_rate = clk_get_rate(priv->plat->stmmac_clk);
236 /* Platform provided default clk_csr would be assumed valid
237 * for all other cases except for the below mentioned ones.
238 * For values higher than the IEEE 802.3 specified frequency
239 * we can not estimate the proper divider as it is not known
240 * the frequency of clk_csr_i. So we do not change the default
243 if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
244 if (clk_rate < CSR_F_35M)
245 priv->clk_csr = STMMAC_CSR_20_35M;
246 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
247 priv->clk_csr = STMMAC_CSR_35_60M;
248 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
249 priv->clk_csr = STMMAC_CSR_60_100M;
250 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
251 priv->clk_csr = STMMAC_CSR_100_150M;
252 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
253 priv->clk_csr = STMMAC_CSR_150_250M;
254 else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
255 priv->clk_csr = STMMAC_CSR_250_300M;
258 if (priv->plat->has_sun8i) {
259 if (clk_rate > 160000000)
260 priv->clk_csr = 0x03;
261 else if (clk_rate > 80000000)
262 priv->clk_csr = 0x02;
263 else if (clk_rate > 40000000)
264 priv->clk_csr = 0x01;
269 if (priv->plat->has_xgmac) {
270 if (clk_rate > 400000000)
272 else if (clk_rate > 350000000)
274 else if (clk_rate > 300000000)
276 else if (clk_rate > 250000000)
278 else if (clk_rate > 150000000)
285 static void print_pkt(unsigned char *buf, int len)
287 pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
288 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
291 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
293 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
296 if (tx_q->dirty_tx > tx_q->cur_tx)
297 avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
299 avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
305 * stmmac_rx_dirty - Get RX queue dirty
306 * @priv: driver private structure
307 * @queue: RX queue index
309 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
311 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
314 if (rx_q->dirty_rx <= rx_q->cur_rx)
315 dirty = rx_q->cur_rx - rx_q->dirty_rx;
317 dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
323 * stmmac_enable_eee_mode - check and enter in LPI mode
324 * @priv: driver private structure
325 * Description: this function is to verify and enter in LPI mode in case of
328 static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
330 u32 tx_cnt = priv->plat->tx_queues_to_use;
333 /* check if all TX queues have the work finished */
334 for (queue = 0; queue < tx_cnt; queue++) {
335 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
337 if (tx_q->dirty_tx != tx_q->cur_tx)
338 return; /* still unfinished work */
341 /* Check and enter in LPI mode */
342 if (!priv->tx_path_in_lpi_mode)
343 stmmac_set_eee_mode(priv, priv->hw,
344 priv->plat->en_tx_lpi_clockgating);
348 * stmmac_disable_eee_mode - disable and exit from LPI mode
349 * @priv: driver private structure
350 * Description: this function is to exit and disable EEE in case of
351 * LPI state is true. This is called by the xmit.
353 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
355 stmmac_reset_eee_mode(priv, priv->hw);
356 del_timer_sync(&priv->eee_ctrl_timer);
357 priv->tx_path_in_lpi_mode = false;
361 * stmmac_eee_ctrl_timer - EEE TX SW timer.
364 * if there is no data transfer and if we are not in LPI state,
365 * then MAC Transmitter can be moved to LPI state.
367 static void stmmac_eee_ctrl_timer(struct timer_list *t)
369 struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
371 stmmac_enable_eee_mode(priv);
372 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
376 * stmmac_eee_init - init EEE
377 * @priv: driver private structure
379 * if the GMAC supports the EEE (from the HW cap reg) and the phy device
380 * can also manage EEE, this function enable the LPI state and start related
383 bool stmmac_eee_init(struct stmmac_priv *priv)
385 int tx_lpi_timer = priv->tx_lpi_timer;
387 /* Using PCS we cannot dial with the phy registers at this stage
388 * so we do not support extra feature like EEE.
390 if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
391 (priv->hw->pcs == STMMAC_PCS_TBI) ||
392 (priv->hw->pcs == STMMAC_PCS_RTBI))
395 /* Check if MAC core supports the EEE feature. */
396 if (!priv->dma_cap.eee)
399 mutex_lock(&priv->lock);
401 /* Check if it needs to be deactivated */
402 if (!priv->eee_active) {
403 if (priv->eee_enabled) {
404 netdev_dbg(priv->dev, "disable EEE\n");
405 del_timer_sync(&priv->eee_ctrl_timer);
406 stmmac_set_eee_timer(priv, priv->hw, 0, tx_lpi_timer);
408 mutex_unlock(&priv->lock);
412 if (priv->eee_active && !priv->eee_enabled) {
413 timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
414 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
415 stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
419 mutex_unlock(&priv->lock);
420 netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
424 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
425 * @priv: driver private structure
426 * @p : descriptor pointer
427 * @skb : the socket buffer
429 * This function will read timestamp from the descriptor & pass it to stack.
430 * and also perform some sanity checks.
432 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
433 struct dma_desc *p, struct sk_buff *skb)
435 struct skb_shared_hwtstamps shhwtstamp;
439 if (!priv->hwts_tx_en)
442 /* exit if skb doesn't support hw tstamp */
443 if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
446 /* check tx tstamp status */
447 if (stmmac_get_tx_timestamp_status(priv, p)) {
448 stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
450 } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
455 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
456 shhwtstamp.hwtstamp = ns_to_ktime(ns);
458 netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
459 /* pass tstamp to stack */
460 skb_tstamp_tx(skb, &shhwtstamp);
464 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
465 * @priv: driver private structure
466 * @p : descriptor pointer
467 * @np : next descriptor pointer
468 * @skb : the socket buffer
470 * This function will read received packet's timestamp from the descriptor
471 * and pass it to stack. It also perform some sanity checks.
473 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
474 struct dma_desc *np, struct sk_buff *skb)
476 struct skb_shared_hwtstamps *shhwtstamp = NULL;
477 struct dma_desc *desc = p;
480 if (!priv->hwts_rx_en)
482 /* For GMAC4, the valid timestamp is from CTX next desc. */
483 if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
486 /* Check if timestamp is available */
487 if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
488 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
489 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
490 shhwtstamp = skb_hwtstamps(skb);
491 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
492 shhwtstamp->hwtstamp = ns_to_ktime(ns);
494 netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
499 * stmmac_hwtstamp_set - control hardware timestamping.
500 * @dev: device pointer.
501 * @ifr: An IOCTL specific structure, that can contain a pointer to
502 * a proprietary structure used to pass information to the driver.
504 * This function configures the MAC to enable/disable both outgoing(TX)
505 * and incoming(RX) packets time stamping based on user input.
507 * 0 on success and an appropriate -ve integer on failure.
509 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
511 struct stmmac_priv *priv = netdev_priv(dev);
512 struct hwtstamp_config config;
513 struct timespec64 now;
517 u32 ptp_over_ipv4_udp = 0;
518 u32 ptp_over_ipv6_udp = 0;
519 u32 ptp_over_ethernet = 0;
520 u32 snap_type_sel = 0;
521 u32 ts_master_en = 0;
527 xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
529 if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
530 netdev_alert(priv->dev, "No support for HW time stamping\n");
531 priv->hwts_tx_en = 0;
532 priv->hwts_rx_en = 0;
537 if (copy_from_user(&config, ifr->ifr_data,
541 netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
542 __func__, config.flags, config.tx_type, config.rx_filter);
544 /* reserved for future extensions */
548 if (config.tx_type != HWTSTAMP_TX_OFF &&
549 config.tx_type != HWTSTAMP_TX_ON)
553 switch (config.rx_filter) {
554 case HWTSTAMP_FILTER_NONE:
555 /* time stamp no incoming packet at all */
556 config.rx_filter = HWTSTAMP_FILTER_NONE;
559 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
560 /* PTP v1, UDP, any kind of event packet */
561 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
562 /* 'xmac' hardware can support Sync, Pdelay_Req and
563 * Pdelay_resp by setting bit14 and bits17/16 to 01
564 * This leaves Delay_Req timestamps out.
565 * Enable all events *and* general purpose message
568 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
569 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
570 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
573 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
574 /* PTP v1, UDP, Sync packet */
575 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
576 /* take time stamp for SYNC messages only */
577 ts_event_en = PTP_TCR_TSEVNTENA;
579 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
580 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
583 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
584 /* PTP v1, UDP, Delay_req packet */
585 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
586 /* take time stamp for Delay_Req messages only */
587 ts_master_en = PTP_TCR_TSMSTRENA;
588 ts_event_en = PTP_TCR_TSEVNTENA;
590 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
591 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
594 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
595 /* PTP v2, UDP, any kind of event packet */
596 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
597 ptp_v2 = PTP_TCR_TSVER2ENA;
598 /* take time stamp for all event messages */
599 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
601 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
602 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
605 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
606 /* PTP v2, UDP, Sync packet */
607 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
608 ptp_v2 = PTP_TCR_TSVER2ENA;
609 /* take time stamp for SYNC messages only */
610 ts_event_en = PTP_TCR_TSEVNTENA;
612 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
613 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
616 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
617 /* PTP v2, UDP, Delay_req packet */
618 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
619 ptp_v2 = PTP_TCR_TSVER2ENA;
620 /* take time stamp for Delay_Req messages only */
621 ts_master_en = PTP_TCR_TSMSTRENA;
622 ts_event_en = PTP_TCR_TSEVNTENA;
624 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
625 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
628 case HWTSTAMP_FILTER_PTP_V2_EVENT:
629 /* PTP v2/802.AS1 any layer, any kind of event packet */
630 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
631 ptp_v2 = PTP_TCR_TSVER2ENA;
632 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
633 ts_event_en = PTP_TCR_TSEVNTENA;
634 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
635 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
636 ptp_over_ethernet = PTP_TCR_TSIPENA;
639 case HWTSTAMP_FILTER_PTP_V2_SYNC:
640 /* PTP v2/802.AS1, any layer, Sync packet */
641 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
642 ptp_v2 = PTP_TCR_TSVER2ENA;
643 /* take time stamp for SYNC messages only */
644 ts_event_en = PTP_TCR_TSEVNTENA;
646 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
647 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
648 ptp_over_ethernet = PTP_TCR_TSIPENA;
651 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
652 /* PTP v2/802.AS1, any layer, Delay_req packet */
653 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
654 ptp_v2 = PTP_TCR_TSVER2ENA;
655 /* take time stamp for Delay_Req messages only */
656 ts_master_en = PTP_TCR_TSMSTRENA;
657 ts_event_en = PTP_TCR_TSEVNTENA;
659 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
660 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
661 ptp_over_ethernet = PTP_TCR_TSIPENA;
664 case HWTSTAMP_FILTER_NTP_ALL:
665 case HWTSTAMP_FILTER_ALL:
666 /* time stamp any incoming packet */
667 config.rx_filter = HWTSTAMP_FILTER_ALL;
668 tstamp_all = PTP_TCR_TSENALL;
675 switch (config.rx_filter) {
676 case HWTSTAMP_FILTER_NONE:
677 config.rx_filter = HWTSTAMP_FILTER_NONE;
680 /* PTP v1, UDP, any kind of event packet */
681 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
685 priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
686 priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
688 if (!priv->hwts_tx_en && !priv->hwts_rx_en)
689 stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0);
691 value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
692 tstamp_all | ptp_v2 | ptp_over_ethernet |
693 ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
694 ts_master_en | snap_type_sel);
695 stmmac_config_hw_tstamping(priv, priv->ptpaddr, value);
697 /* program Sub Second Increment reg */
698 stmmac_config_sub_second_increment(priv,
699 priv->ptpaddr, priv->plat->clk_ptp_rate,
701 temp = div_u64(1000000000ULL, sec_inc);
703 /* Store sub second increment and flags for later use */
704 priv->sub_second_inc = sec_inc;
705 priv->systime_flags = value;
707 /* calculate default added value:
709 * addend = (2^32)/freq_div_ratio;
710 * where, freq_div_ratio = 1e9ns/sec_inc
712 temp = (u64)(temp << 32);
713 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
714 stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
716 /* initialize system time */
717 ktime_get_real_ts64(&now);
719 /* lower 32 bits of tv_sec are safe until y2106 */
720 stmmac_init_systime(priv, priv->ptpaddr,
721 (u32)now.tv_sec, now.tv_nsec);
724 memcpy(&priv->tstamp_config, &config, sizeof(config));
726 return copy_to_user(ifr->ifr_data, &config,
727 sizeof(config)) ? -EFAULT : 0;
731 * stmmac_hwtstamp_get - read hardware timestamping.
732 * @dev: device pointer.
733 * @ifr: An IOCTL specific structure, that can contain a pointer to
734 * a proprietary structure used to pass information to the driver.
736 * This function obtain the current hardware timestamping settings
739 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
741 struct stmmac_priv *priv = netdev_priv(dev);
742 struct hwtstamp_config *config = &priv->tstamp_config;
744 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
747 return copy_to_user(ifr->ifr_data, config,
748 sizeof(*config)) ? -EFAULT : 0;
752 * stmmac_init_ptp - init PTP
753 * @priv: driver private structure
754 * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
755 * This is done by looking at the HW cap. register.
756 * This function also registers the ptp driver.
758 static int stmmac_init_ptp(struct stmmac_priv *priv)
760 bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
762 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
766 /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
767 if (xmac && priv->dma_cap.atime_stamp)
769 /* Dwmac 3.x core with extend_desc can support adv_ts */
770 else if (priv->extend_desc && priv->dma_cap.atime_stamp)
773 if (priv->dma_cap.time_stamp)
774 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
777 netdev_info(priv->dev,
778 "IEEE 1588-2008 Advanced Timestamp supported\n");
780 priv->hwts_tx_en = 0;
781 priv->hwts_rx_en = 0;
783 stmmac_ptp_register(priv);
788 static void stmmac_release_ptp(struct stmmac_priv *priv)
790 if (priv->plat->clk_ptp_ref)
791 clk_disable_unprepare(priv->plat->clk_ptp_ref);
792 stmmac_ptp_unregister(priv);
796 * stmmac_mac_flow_ctrl - Configure flow control in all queues
797 * @priv: driver private structure
798 * Description: It is used for configuring the flow control in all queues
800 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
802 u32 tx_cnt = priv->plat->tx_queues_to_use;
804 stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
805 priv->pause, tx_cnt);
808 static void stmmac_validate(struct phylink_config *config,
809 unsigned long *supported,
810 struct phylink_link_state *state)
812 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
813 __ETHTOOL_DECLARE_LINK_MODE_MASK(mac_supported) = { 0, };
814 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
815 int tx_cnt = priv->plat->tx_queues_to_use;
816 int max_speed = priv->plat->max_speed;
818 phylink_set(mac_supported, 10baseT_Half);
819 phylink_set(mac_supported, 10baseT_Full);
820 phylink_set(mac_supported, 100baseT_Half);
821 phylink_set(mac_supported, 100baseT_Full);
822 phylink_set(mac_supported, 1000baseT_Half);
823 phylink_set(mac_supported, 1000baseT_Full);
824 phylink_set(mac_supported, 1000baseKX_Full);
826 phylink_set(mac_supported, Autoneg);
827 phylink_set(mac_supported, Pause);
828 phylink_set(mac_supported, Asym_Pause);
829 phylink_set_port_modes(mac_supported);
831 /* Cut down 1G if asked to */
832 if ((max_speed > 0) && (max_speed < 1000)) {
833 phylink_set(mask, 1000baseT_Full);
834 phylink_set(mask, 1000baseX_Full);
835 } else if (priv->plat->has_xgmac) {
836 if (!max_speed || (max_speed >= 2500)) {
837 phylink_set(mac_supported, 2500baseT_Full);
838 phylink_set(mac_supported, 2500baseX_Full);
840 if (!max_speed || (max_speed >= 5000)) {
841 phylink_set(mac_supported, 5000baseT_Full);
843 if (!max_speed || (max_speed >= 10000)) {
844 phylink_set(mac_supported, 10000baseSR_Full);
845 phylink_set(mac_supported, 10000baseLR_Full);
846 phylink_set(mac_supported, 10000baseER_Full);
847 phylink_set(mac_supported, 10000baseLRM_Full);
848 phylink_set(mac_supported, 10000baseT_Full);
849 phylink_set(mac_supported, 10000baseKX4_Full);
850 phylink_set(mac_supported, 10000baseKR_Full);
854 /* Half-Duplex can only work with single queue */
856 phylink_set(mask, 10baseT_Half);
857 phylink_set(mask, 100baseT_Half);
858 phylink_set(mask, 1000baseT_Half);
861 bitmap_and(supported, supported, mac_supported,
862 __ETHTOOL_LINK_MODE_MASK_NBITS);
863 bitmap_andnot(supported, supported, mask,
864 __ETHTOOL_LINK_MODE_MASK_NBITS);
865 bitmap_and(state->advertising, state->advertising, mac_supported,
866 __ETHTOOL_LINK_MODE_MASK_NBITS);
867 bitmap_andnot(state->advertising, state->advertising, mask,
868 __ETHTOOL_LINK_MODE_MASK_NBITS);
871 static void stmmac_mac_pcs_get_state(struct phylink_config *config,
872 struct phylink_link_state *state)
877 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
878 const struct phylink_link_state *state)
880 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
883 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
884 ctrl &= ~priv->hw->link.speed_mask;
886 if (state->interface == PHY_INTERFACE_MODE_USXGMII) {
887 switch (state->speed) {
889 ctrl |= priv->hw->link.xgmii.speed10000;
892 ctrl |= priv->hw->link.xgmii.speed5000;
895 ctrl |= priv->hw->link.xgmii.speed2500;
901 switch (state->speed) {
903 ctrl |= priv->hw->link.speed2500;
906 ctrl |= priv->hw->link.speed1000;
909 ctrl |= priv->hw->link.speed100;
912 ctrl |= priv->hw->link.speed10;
919 priv->speed = state->speed;
921 if (priv->plat->fix_mac_speed)
922 priv->plat->fix_mac_speed(priv->plat->bsp_priv, state->speed);
925 ctrl &= ~priv->hw->link.duplex;
927 ctrl |= priv->hw->link.duplex;
929 /* Flow Control operation */
931 stmmac_mac_flow_ctrl(priv, state->duplex);
933 writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
936 static void stmmac_mac_an_restart(struct phylink_config *config)
941 static void stmmac_mac_link_down(struct phylink_config *config,
942 unsigned int mode, phy_interface_t interface)
944 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
946 stmmac_mac_set(priv, priv->ioaddr, false);
947 priv->eee_active = false;
948 stmmac_eee_init(priv);
949 stmmac_set_eee_pls(priv, priv->hw, false);
952 static void stmmac_mac_link_up(struct phylink_config *config,
953 unsigned int mode, phy_interface_t interface,
954 struct phy_device *phy)
956 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
958 stmmac_mac_set(priv, priv->ioaddr, true);
959 if (phy && priv->dma_cap.eee) {
960 priv->eee_active = phy_init_eee(phy, 1) >= 0;
961 priv->eee_enabled = stmmac_eee_init(priv);
962 stmmac_set_eee_pls(priv, priv->hw, true);
966 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
967 .validate = stmmac_validate,
968 .mac_pcs_get_state = stmmac_mac_pcs_get_state,
969 .mac_config = stmmac_mac_config,
970 .mac_an_restart = stmmac_mac_an_restart,
971 .mac_link_down = stmmac_mac_link_down,
972 .mac_link_up = stmmac_mac_link_up,
976 * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
977 * @priv: driver private structure
978 * Description: this is to verify if the HW supports the PCS.
979 * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
980 * configured for the TBI, RTBI, or SGMII PHY interface.
982 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
984 int interface = priv->plat->interface;
986 if (priv->dma_cap.pcs) {
987 if ((interface == PHY_INTERFACE_MODE_RGMII) ||
988 (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
989 (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
990 (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
991 netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
992 priv->hw->pcs = STMMAC_PCS_RGMII;
993 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
994 netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
995 priv->hw->pcs = STMMAC_PCS_SGMII;
1001 * stmmac_init_phy - PHY initialization
1002 * @dev: net device structure
1003 * Description: it initializes the driver's PHY state, and attaches the PHY
1004 * to the mac driver.
1008 static int stmmac_init_phy(struct net_device *dev)
1010 struct stmmac_priv *priv = netdev_priv(dev);
1011 struct device_node *node;
1014 node = priv->plat->phylink_node;
1017 ret = phylink_of_phy_connect(priv->phylink, node, 0);
1019 /* Some DT bindings do not set-up the PHY handle. Let's try to
1023 int addr = priv->plat->phy_addr;
1024 struct phy_device *phydev;
1026 phydev = mdiobus_get_phy(priv->mii, addr);
1028 netdev_err(priv->dev, "no phy at addr %d\n", addr);
1032 ret = phylink_connect_phy(priv->phylink, phydev);
1038 static int stmmac_phy_setup(struct stmmac_priv *priv)
1040 struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node);
1041 int mode = priv->plat->phy_interface;
1042 struct phylink *phylink;
1044 priv->phylink_config.dev = &priv->dev->dev;
1045 priv->phylink_config.type = PHYLINK_NETDEV;
1047 phylink = phylink_create(&priv->phylink_config, fwnode,
1048 mode, &stmmac_phylink_mac_ops);
1049 if (IS_ERR(phylink))
1050 return PTR_ERR(phylink);
1052 priv->phylink = phylink;
1056 static void stmmac_display_rx_rings(struct stmmac_priv *priv)
1058 u32 rx_cnt = priv->plat->rx_queues_to_use;
1062 /* Display RX rings */
1063 for (queue = 0; queue < rx_cnt; queue++) {
1064 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1066 pr_info("\tRX Queue %u rings\n", queue);
1068 if (priv->extend_desc)
1069 head_rx = (void *)rx_q->dma_erx;
1071 head_rx = (void *)rx_q->dma_rx;
1073 /* Display RX ring */
1074 stmmac_display_ring(priv, head_rx, DMA_RX_SIZE, true);
1078 static void stmmac_display_tx_rings(struct stmmac_priv *priv)
1080 u32 tx_cnt = priv->plat->tx_queues_to_use;
1084 /* Display TX rings */
1085 for (queue = 0; queue < tx_cnt; queue++) {
1086 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1088 pr_info("\tTX Queue %d rings\n", queue);
1090 if (priv->extend_desc)
1091 head_tx = (void *)tx_q->dma_etx;
1093 head_tx = (void *)tx_q->dma_tx;
1095 stmmac_display_ring(priv, head_tx, DMA_TX_SIZE, false);
1099 static void stmmac_display_rings(struct stmmac_priv *priv)
1101 /* Display RX ring */
1102 stmmac_display_rx_rings(priv);
1104 /* Display TX ring */
1105 stmmac_display_tx_rings(priv);
1108 static int stmmac_set_bfsize(int mtu, int bufsize)
1112 if (mtu >= BUF_SIZE_4KiB)
1113 ret = BUF_SIZE_8KiB;
1114 else if (mtu >= BUF_SIZE_2KiB)
1115 ret = BUF_SIZE_4KiB;
1116 else if (mtu > DEFAULT_BUFSIZE)
1117 ret = BUF_SIZE_2KiB;
1119 ret = DEFAULT_BUFSIZE;
1125 * stmmac_clear_rx_descriptors - clear RX descriptors
1126 * @priv: driver private structure
1127 * @queue: RX queue index
1128 * Description: this function is called to clear the RX descriptors
1129 * in case of both basic and extended descriptors are used.
1131 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1133 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1136 /* Clear the RX descriptors */
1137 for (i = 0; i < DMA_RX_SIZE; i++)
1138 if (priv->extend_desc)
1139 stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1140 priv->use_riwt, priv->mode,
1141 (i == DMA_RX_SIZE - 1),
1144 stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1145 priv->use_riwt, priv->mode,
1146 (i == DMA_RX_SIZE - 1),
1151 * stmmac_clear_tx_descriptors - clear tx descriptors
1152 * @priv: driver private structure
1153 * @queue: TX queue index.
1154 * Description: this function is called to clear the TX descriptors
1155 * in case of both basic and extended descriptors are used.
1157 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
1159 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1162 /* Clear the TX descriptors */
1163 for (i = 0; i < DMA_TX_SIZE; i++)
1164 if (priv->extend_desc)
1165 stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
1166 priv->mode, (i == DMA_TX_SIZE - 1));
1168 stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
1169 priv->mode, (i == DMA_TX_SIZE - 1));
1173 * stmmac_clear_descriptors - clear descriptors
1174 * @priv: driver private structure
1175 * Description: this function is called to clear the TX and RX descriptors
1176 * in case of both basic and extended descriptors are used.
1178 static void stmmac_clear_descriptors(struct stmmac_priv *priv)
1180 u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1181 u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1184 /* Clear the RX descriptors */
1185 for (queue = 0; queue < rx_queue_cnt; queue++)
1186 stmmac_clear_rx_descriptors(priv, queue);
1188 /* Clear the TX descriptors */
1189 for (queue = 0; queue < tx_queue_cnt; queue++)
1190 stmmac_clear_tx_descriptors(priv, queue);
1194 * stmmac_init_rx_buffers - init the RX descriptor buffer.
1195 * @priv: driver private structure
1196 * @p: descriptor pointer
1197 * @i: descriptor index
1199 * @queue: RX queue index
1200 * Description: this function is called to allocate a receive buffer, perform
1201 * the DMA mapping and init the descriptor.
1203 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
1204 int i, gfp_t flags, u32 queue)
1206 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1207 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1209 buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
1214 buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
1218 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1219 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr);
1221 buf->sec_page = NULL;
1224 buf->addr = page_pool_get_dma_addr(buf->page);
1225 stmmac_set_desc_addr(priv, p, buf->addr);
1226 if (priv->dma_buf_sz == BUF_SIZE_16KiB)
1227 stmmac_init_desc3(priv, p);
1233 * stmmac_free_rx_buffer - free RX dma buffers
1234 * @priv: private structure
1235 * @queue: RX queue index
1238 static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1240 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1241 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1244 page_pool_put_page(rx_q->page_pool, buf->page, false);
1248 page_pool_put_page(rx_q->page_pool, buf->sec_page, false);
1249 buf->sec_page = NULL;
1253 * stmmac_free_tx_buffer - free RX dma buffers
1254 * @priv: private structure
1255 * @queue: RX queue index
1258 static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1260 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1262 if (tx_q->tx_skbuff_dma[i].buf) {
1263 if (tx_q->tx_skbuff_dma[i].map_as_page)
1264 dma_unmap_page(priv->device,
1265 tx_q->tx_skbuff_dma[i].buf,
1266 tx_q->tx_skbuff_dma[i].len,
1269 dma_unmap_single(priv->device,
1270 tx_q->tx_skbuff_dma[i].buf,
1271 tx_q->tx_skbuff_dma[i].len,
1275 if (tx_q->tx_skbuff[i]) {
1276 dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1277 tx_q->tx_skbuff[i] = NULL;
1278 tx_q->tx_skbuff_dma[i].buf = 0;
1279 tx_q->tx_skbuff_dma[i].map_as_page = false;
1284 * init_dma_rx_desc_rings - init the RX descriptor rings
1285 * @dev: net device structure
1287 * Description: this function initializes the DMA RX descriptors
1288 * and allocates the socket buffers. It supports the chained and ring
1291 static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
1293 struct stmmac_priv *priv = netdev_priv(dev);
1294 u32 rx_count = priv->plat->rx_queues_to_use;
1300 bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
1304 if (bfsize < BUF_SIZE_16KiB)
1305 bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
1307 priv->dma_buf_sz = bfsize;
1309 /* RX INITIALIZATION */
1310 netif_dbg(priv, probe, priv->dev,
1311 "SKB addresses:\nskb\t\tskb data\tdma data\n");
1313 for (queue = 0; queue < rx_count; queue++) {
1314 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1316 netif_dbg(priv, probe, priv->dev,
1317 "(%s) dma_rx_phy=0x%08x\n", __func__,
1318 (u32)rx_q->dma_rx_phy);
1320 stmmac_clear_rx_descriptors(priv, queue);
1322 for (i = 0; i < DMA_RX_SIZE; i++) {
1325 if (priv->extend_desc)
1326 p = &((rx_q->dma_erx + i)->basic);
1328 p = rx_q->dma_rx + i;
1330 ret = stmmac_init_rx_buffers(priv, p, i, flags,
1333 goto err_init_rx_buffers;
1337 rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
1339 /* Setup the chained descriptor addresses */
1340 if (priv->mode == STMMAC_CHAIN_MODE) {
1341 if (priv->extend_desc)
1342 stmmac_mode_init(priv, rx_q->dma_erx,
1343 rx_q->dma_rx_phy, DMA_RX_SIZE, 1);
1345 stmmac_mode_init(priv, rx_q->dma_rx,
1346 rx_q->dma_rx_phy, DMA_RX_SIZE, 0);
1354 err_init_rx_buffers:
1355 while (queue >= 0) {
1357 stmmac_free_rx_buffer(priv, queue, i);
1370 * init_dma_tx_desc_rings - init the TX descriptor rings
1371 * @dev: net device structure.
1372 * Description: this function initializes the DMA TX descriptors
1373 * and allocates the socket buffers. It supports the chained and ring
1376 static int init_dma_tx_desc_rings(struct net_device *dev)
1378 struct stmmac_priv *priv = netdev_priv(dev);
1379 u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1383 for (queue = 0; queue < tx_queue_cnt; queue++) {
1384 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1386 netif_dbg(priv, probe, priv->dev,
1387 "(%s) dma_tx_phy=0x%08x\n", __func__,
1388 (u32)tx_q->dma_tx_phy);
1390 /* Setup the chained descriptor addresses */
1391 if (priv->mode == STMMAC_CHAIN_MODE) {
1392 if (priv->extend_desc)
1393 stmmac_mode_init(priv, tx_q->dma_etx,
1394 tx_q->dma_tx_phy, DMA_TX_SIZE, 1);
1396 stmmac_mode_init(priv, tx_q->dma_tx,
1397 tx_q->dma_tx_phy, DMA_TX_SIZE, 0);
1400 for (i = 0; i < DMA_TX_SIZE; i++) {
1402 if (priv->extend_desc)
1403 p = &((tx_q->dma_etx + i)->basic);
1405 p = tx_q->dma_tx + i;
1407 stmmac_clear_desc(priv, p);
1409 tx_q->tx_skbuff_dma[i].buf = 0;
1410 tx_q->tx_skbuff_dma[i].map_as_page = false;
1411 tx_q->tx_skbuff_dma[i].len = 0;
1412 tx_q->tx_skbuff_dma[i].last_segment = false;
1413 tx_q->tx_skbuff[i] = NULL;
1420 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1427 * init_dma_desc_rings - init the RX/TX descriptor rings
1428 * @dev: net device structure
1430 * Description: this function initializes the DMA RX/TX descriptors
1431 * and allocates the socket buffers. It supports the chained and ring
1434 static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1436 struct stmmac_priv *priv = netdev_priv(dev);
1439 ret = init_dma_rx_desc_rings(dev, flags);
1443 ret = init_dma_tx_desc_rings(dev);
1445 stmmac_clear_descriptors(priv);
1447 if (netif_msg_hw(priv))
1448 stmmac_display_rings(priv);
1454 * dma_free_rx_skbufs - free RX dma buffers
1455 * @priv: private structure
1456 * @queue: RX queue index
1458 static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
1462 for (i = 0; i < DMA_RX_SIZE; i++)
1463 stmmac_free_rx_buffer(priv, queue, i);
1467 * dma_free_tx_skbufs - free TX dma buffers
1468 * @priv: private structure
1469 * @queue: TX queue index
1471 static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
1475 for (i = 0; i < DMA_TX_SIZE; i++)
1476 stmmac_free_tx_buffer(priv, queue, i);
1480 * free_dma_rx_desc_resources - free RX dma desc resources
1481 * @priv: private structure
1483 static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
1485 u32 rx_count = priv->plat->rx_queues_to_use;
1488 /* Free RX queue resources */
1489 for (queue = 0; queue < rx_count; queue++) {
1490 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1492 /* Release the DMA RX socket buffers */
1493 dma_free_rx_skbufs(priv, queue);
1495 /* Free DMA regions of consistent memory previously allocated */
1496 if (!priv->extend_desc)
1497 dma_free_coherent(priv->device,
1498 DMA_RX_SIZE * sizeof(struct dma_desc),
1499 rx_q->dma_rx, rx_q->dma_rx_phy);
1501 dma_free_coherent(priv->device, DMA_RX_SIZE *
1502 sizeof(struct dma_extended_desc),
1503 rx_q->dma_erx, rx_q->dma_rx_phy);
1505 kfree(rx_q->buf_pool);
1506 if (rx_q->page_pool)
1507 page_pool_destroy(rx_q->page_pool);
1512 * free_dma_tx_desc_resources - free TX dma desc resources
1513 * @priv: private structure
1515 static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1517 u32 tx_count = priv->plat->tx_queues_to_use;
1520 /* Free TX queue resources */
1521 for (queue = 0; queue < tx_count; queue++) {
1522 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1524 /* Release the DMA TX socket buffers */
1525 dma_free_tx_skbufs(priv, queue);
1527 /* Free DMA regions of consistent memory previously allocated */
1528 if (!priv->extend_desc)
1529 dma_free_coherent(priv->device,
1530 DMA_TX_SIZE * sizeof(struct dma_desc),
1531 tx_q->dma_tx, tx_q->dma_tx_phy);
1533 dma_free_coherent(priv->device, DMA_TX_SIZE *
1534 sizeof(struct dma_extended_desc),
1535 tx_q->dma_etx, tx_q->dma_tx_phy);
1537 kfree(tx_q->tx_skbuff_dma);
1538 kfree(tx_q->tx_skbuff);
1543 * alloc_dma_rx_desc_resources - alloc RX resources.
1544 * @priv: private structure
1545 * Description: according to which descriptor can be used (extend or basic)
1546 * this function allocates the resources for TX and RX paths. In case of
1547 * reception, for example, it pre-allocated the RX socket buffer in order to
1548 * allow zero-copy mechanism.
1550 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
1552 u32 rx_count = priv->plat->rx_queues_to_use;
1556 /* RX queues buffers and DMA */
1557 for (queue = 0; queue < rx_count; queue++) {
1558 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1559 struct page_pool_params pp_params = { 0 };
1560 unsigned int num_pages;
1562 rx_q->queue_index = queue;
1563 rx_q->priv_data = priv;
1565 pp_params.flags = PP_FLAG_DMA_MAP;
1566 pp_params.pool_size = DMA_RX_SIZE;
1567 num_pages = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE);
1568 pp_params.order = ilog2(num_pages);
1569 pp_params.nid = dev_to_node(priv->device);
1570 pp_params.dev = priv->device;
1571 pp_params.dma_dir = DMA_FROM_DEVICE;
1573 rx_q->page_pool = page_pool_create(&pp_params);
1574 if (IS_ERR(rx_q->page_pool)) {
1575 ret = PTR_ERR(rx_q->page_pool);
1576 rx_q->page_pool = NULL;
1580 rx_q->buf_pool = kcalloc(DMA_RX_SIZE, sizeof(*rx_q->buf_pool),
1582 if (!rx_q->buf_pool)
1585 if (priv->extend_desc) {
1586 rx_q->dma_erx = dma_alloc_coherent(priv->device,
1587 DMA_RX_SIZE * sizeof(struct dma_extended_desc),
1594 rx_q->dma_rx = dma_alloc_coherent(priv->device,
1595 DMA_RX_SIZE * sizeof(struct dma_desc),
1606 free_dma_rx_desc_resources(priv);
1612 * alloc_dma_tx_desc_resources - alloc TX resources.
1613 * @priv: private structure
1614 * Description: according to which descriptor can be used (extend or basic)
1615 * this function allocates the resources for TX and RX paths. In case of
1616 * reception, for example, it pre-allocated the RX socket buffer in order to
1617 * allow zero-copy mechanism.
1619 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
1621 u32 tx_count = priv->plat->tx_queues_to_use;
1625 /* TX queues buffers and DMA */
1626 for (queue = 0; queue < tx_count; queue++) {
1627 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1629 tx_q->queue_index = queue;
1630 tx_q->priv_data = priv;
1632 tx_q->tx_skbuff_dma = kcalloc(DMA_TX_SIZE,
1633 sizeof(*tx_q->tx_skbuff_dma),
1635 if (!tx_q->tx_skbuff_dma)
1638 tx_q->tx_skbuff = kcalloc(DMA_TX_SIZE,
1639 sizeof(struct sk_buff *),
1641 if (!tx_q->tx_skbuff)
1644 if (priv->extend_desc) {
1645 tx_q->dma_etx = dma_alloc_coherent(priv->device,
1646 DMA_TX_SIZE * sizeof(struct dma_extended_desc),
1652 tx_q->dma_tx = dma_alloc_coherent(priv->device,
1653 DMA_TX_SIZE * sizeof(struct dma_desc),
1664 free_dma_tx_desc_resources(priv);
1670 * alloc_dma_desc_resources - alloc TX/RX resources.
1671 * @priv: private structure
1672 * Description: according to which descriptor can be used (extend or basic)
1673 * this function allocates the resources for TX and RX paths. In case of
1674 * reception, for example, it pre-allocated the RX socket buffer in order to
1675 * allow zero-copy mechanism.
1677 static int alloc_dma_desc_resources(struct stmmac_priv *priv)
1680 int ret = alloc_dma_rx_desc_resources(priv);
1685 ret = alloc_dma_tx_desc_resources(priv);
1691 * free_dma_desc_resources - free dma desc resources
1692 * @priv: private structure
1694 static void free_dma_desc_resources(struct stmmac_priv *priv)
1696 /* Release the DMA RX socket buffers */
1697 free_dma_rx_desc_resources(priv);
1699 /* Release the DMA TX socket buffers */
1700 free_dma_tx_desc_resources(priv);
1704 * stmmac_mac_enable_rx_queues - Enable MAC rx queues
1705 * @priv: driver private structure
1706 * Description: It is used for enabling the rx queues in the MAC
1708 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
1710 u32 rx_queues_count = priv->plat->rx_queues_to_use;
1714 for (queue = 0; queue < rx_queues_count; queue++) {
1715 mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1716 stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
1721 * stmmac_start_rx_dma - start RX DMA channel
1722 * @priv: driver private structure
1723 * @chan: RX channel index
1725 * This starts a RX DMA channel
1727 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
1729 netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
1730 stmmac_start_rx(priv, priv->ioaddr, chan);
1734 * stmmac_start_tx_dma - start TX DMA channel
1735 * @priv: driver private structure
1736 * @chan: TX channel index
1738 * This starts a TX DMA channel
1740 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
1742 netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
1743 stmmac_start_tx(priv, priv->ioaddr, chan);
1747 * stmmac_stop_rx_dma - stop RX DMA channel
1748 * @priv: driver private structure
1749 * @chan: RX channel index
1751 * This stops a RX DMA channel
1753 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
1755 netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
1756 stmmac_stop_rx(priv, priv->ioaddr, chan);
1760 * stmmac_stop_tx_dma - stop TX DMA channel
1761 * @priv: driver private structure
1762 * @chan: TX channel index
1764 * This stops a TX DMA channel
1766 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
1768 netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
1769 stmmac_stop_tx(priv, priv->ioaddr, chan);
1773 * stmmac_start_all_dma - start all RX and TX DMA channels
1774 * @priv: driver private structure
1776 * This starts all the RX and TX DMA channels
1778 static void stmmac_start_all_dma(struct stmmac_priv *priv)
1780 u32 rx_channels_count = priv->plat->rx_queues_to_use;
1781 u32 tx_channels_count = priv->plat->tx_queues_to_use;
1784 for (chan = 0; chan < rx_channels_count; chan++)
1785 stmmac_start_rx_dma(priv, chan);
1787 for (chan = 0; chan < tx_channels_count; chan++)
1788 stmmac_start_tx_dma(priv, chan);
1792 * stmmac_stop_all_dma - stop all RX and TX DMA channels
1793 * @priv: driver private structure
1795 * This stops the RX and TX DMA channels
1797 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
1799 u32 rx_channels_count = priv->plat->rx_queues_to_use;
1800 u32 tx_channels_count = priv->plat->tx_queues_to_use;
1803 for (chan = 0; chan < rx_channels_count; chan++)
1804 stmmac_stop_rx_dma(priv, chan);
1806 for (chan = 0; chan < tx_channels_count; chan++)
1807 stmmac_stop_tx_dma(priv, chan);
1811 * stmmac_dma_operation_mode - HW DMA operation mode
1812 * @priv: driver private structure
1813 * Description: it is used for configuring the DMA operation mode register in
1814 * order to program the tx/rx DMA thresholds or Store-And-Forward mode.
1816 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1818 u32 rx_channels_count = priv->plat->rx_queues_to_use;
1819 u32 tx_channels_count = priv->plat->tx_queues_to_use;
1820 int rxfifosz = priv->plat->rx_fifo_size;
1821 int txfifosz = priv->plat->tx_fifo_size;
1828 rxfifosz = priv->dma_cap.rx_fifo_size;
1830 txfifosz = priv->dma_cap.tx_fifo_size;
1832 /* Adjust for real per queue fifo size */
1833 rxfifosz /= rx_channels_count;
1834 txfifosz /= tx_channels_count;
1836 if (priv->plat->force_thresh_dma_mode) {
1839 } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
1841 * In case of GMAC, SF mode can be enabled
1842 * to perform the TX COE in HW. This depends on:
1843 * 1) TX COE if actually supported
1844 * 2) There is no bugged Jumbo frame support
1845 * that needs to not insert csum in the TDES.
1847 txmode = SF_DMA_MODE;
1848 rxmode = SF_DMA_MODE;
1849 priv->xstats.threshold = SF_DMA_MODE;
1852 rxmode = SF_DMA_MODE;
1855 /* configure all channels */
1856 for (chan = 0; chan < rx_channels_count; chan++) {
1857 qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
1859 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
1861 stmmac_set_dma_bfsize(priv, priv->ioaddr, priv->dma_buf_sz,
1865 for (chan = 0; chan < tx_channels_count; chan++) {
1866 qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
1868 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
1874 * stmmac_tx_clean - to manage the transmission completion
1875 * @priv: driver private structure
1876 * @queue: TX queue index
1877 * Description: it reclaims the transmit resources after transmission completes.
1879 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
1881 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1882 unsigned int bytes_compl = 0, pkts_compl = 0;
1883 unsigned int entry, count = 0;
1885 __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
1887 priv->xstats.tx_clean++;
1889 entry = tx_q->dirty_tx;
1890 while ((entry != tx_q->cur_tx) && (count < budget)) {
1891 struct sk_buff *skb = tx_q->tx_skbuff[entry];
1895 if (priv->extend_desc)
1896 p = (struct dma_desc *)(tx_q->dma_etx + entry);
1898 p = tx_q->dma_tx + entry;
1900 status = stmmac_tx_status(priv, &priv->dev->stats,
1901 &priv->xstats, p, priv->ioaddr);
1902 /* Check if the descriptor is owned by the DMA */
1903 if (unlikely(status & tx_dma_own))
1908 /* Make sure descriptor fields are read after reading
1913 /* Just consider the last segment and ...*/
1914 if (likely(!(status & tx_not_ls))) {
1915 /* ... verify the status error condition */
1916 if (unlikely(status & tx_err)) {
1917 priv->dev->stats.tx_errors++;
1919 priv->dev->stats.tx_packets++;
1920 priv->xstats.tx_pkt_n++;
1922 stmmac_get_tx_hwtstamp(priv, p, skb);
1925 if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
1926 if (tx_q->tx_skbuff_dma[entry].map_as_page)
1927 dma_unmap_page(priv->device,
1928 tx_q->tx_skbuff_dma[entry].buf,
1929 tx_q->tx_skbuff_dma[entry].len,
1932 dma_unmap_single(priv->device,
1933 tx_q->tx_skbuff_dma[entry].buf,
1934 tx_q->tx_skbuff_dma[entry].len,
1936 tx_q->tx_skbuff_dma[entry].buf = 0;
1937 tx_q->tx_skbuff_dma[entry].len = 0;
1938 tx_q->tx_skbuff_dma[entry].map_as_page = false;
1941 stmmac_clean_desc3(priv, tx_q, p);
1943 tx_q->tx_skbuff_dma[entry].last_segment = false;
1944 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
1946 if (likely(skb != NULL)) {
1948 bytes_compl += skb->len;
1949 dev_consume_skb_any(skb);
1950 tx_q->tx_skbuff[entry] = NULL;
1953 stmmac_release_tx_desc(priv, p, priv->mode);
1955 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
1957 tx_q->dirty_tx = entry;
1959 netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
1960 pkts_compl, bytes_compl);
1962 if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
1964 stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
1966 netif_dbg(priv, tx_done, priv->dev,
1967 "%s: restart transmit\n", __func__);
1968 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
1971 if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
1972 stmmac_enable_eee_mode(priv);
1973 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
1976 /* We still have pending packets, let's call for a new scheduling */
1977 if (tx_q->dirty_tx != tx_q->cur_tx)
1978 mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(10));
1980 __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
1986 * stmmac_tx_err - to manage the tx error
1987 * @priv: driver private structure
1988 * @chan: channel index
1989 * Description: it cleans the descriptors and restarts the transmission
1990 * in case of transmission errors.
1992 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
1994 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
1997 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
1999 stmmac_stop_tx_dma(priv, chan);
2000 dma_free_tx_skbufs(priv, chan);
2001 for (i = 0; i < DMA_TX_SIZE; i++)
2002 if (priv->extend_desc)
2003 stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
2004 priv->mode, (i == DMA_TX_SIZE - 1));
2006 stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
2007 priv->mode, (i == DMA_TX_SIZE - 1));
2011 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
2012 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2013 tx_q->dma_tx_phy, chan);
2014 stmmac_start_tx_dma(priv, chan);
2016 priv->dev->stats.tx_errors++;
2017 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2021 * stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2022 * @priv: driver private structure
2023 * @txmode: TX operating mode
2024 * @rxmode: RX operating mode
2025 * @chan: channel index
2026 * Description: it is used for configuring of the DMA operation mode in
2027 * runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2030 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2031 u32 rxmode, u32 chan)
2033 u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2034 u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2035 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2036 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2037 int rxfifosz = priv->plat->rx_fifo_size;
2038 int txfifosz = priv->plat->tx_fifo_size;
2041 rxfifosz = priv->dma_cap.rx_fifo_size;
2043 txfifosz = priv->dma_cap.tx_fifo_size;
2045 /* Adjust for real per queue fifo size */
2046 rxfifosz /= rx_channels_count;
2047 txfifosz /= tx_channels_count;
2049 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2050 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2053 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2057 ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2058 priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2059 if (ret && (ret != -EINVAL)) {
2060 stmmac_global_err(priv);
2067 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan)
2069 int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2070 &priv->xstats, chan);
2071 struct stmmac_channel *ch = &priv->channel[chan];
2073 if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2074 if (napi_schedule_prep(&ch->rx_napi)) {
2075 stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
2076 __napi_schedule_irqoff(&ch->rx_napi);
2077 status |= handle_tx;
2081 if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use))
2082 napi_schedule_irqoff(&ch->tx_napi);
2088 * stmmac_dma_interrupt - DMA ISR
2089 * @priv: driver private structure
2090 * Description: this is the DMA ISR. It is called by the main ISR.
2091 * It calls the dwmac dma routine and schedule poll method in case of some
2094 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2096 u32 tx_channel_count = priv->plat->tx_queues_to_use;
2097 u32 rx_channel_count = priv->plat->rx_queues_to_use;
2098 u32 channels_to_check = tx_channel_count > rx_channel_count ?
2099 tx_channel_count : rx_channel_count;
2101 int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2103 /* Make sure we never check beyond our status buffer. */
2104 if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2105 channels_to_check = ARRAY_SIZE(status);
2107 for (chan = 0; chan < channels_to_check; chan++)
2108 status[chan] = stmmac_napi_check(priv, chan);
2110 for (chan = 0; chan < tx_channel_count; chan++) {
2111 if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2112 /* Try to bump up the dma threshold on this failure */
2113 if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
2116 if (priv->plat->force_thresh_dma_mode)
2117 stmmac_set_dma_operation_mode(priv,
2122 stmmac_set_dma_operation_mode(priv,
2126 priv->xstats.threshold = tc;
2128 } else if (unlikely(status[chan] == tx_hard_error)) {
2129 stmmac_tx_err(priv, chan);
2135 * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2136 * @priv: driver private structure
2137 * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2139 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2141 unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2142 MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2144 stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
2146 if (priv->dma_cap.rmon) {
2147 stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
2148 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2150 netdev_info(priv->dev, "No MAC Management Counters available\n");
2154 * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2155 * @priv: driver private structure
2157 * new GMAC chip generations have a new register to indicate the
2158 * presence of the optional feature/functions.
2159 * This can be also used to override the value passed through the
2160 * platform and necessary for old MAC10/100 and GMAC chips.
2162 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2164 return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2168 * stmmac_check_ether_addr - check if the MAC addr is valid
2169 * @priv: driver private structure
2171 * it is to verify if the MAC address is valid, in case of failures it
2172 * generates a random MAC address
2174 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2176 if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2177 stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0);
2178 if (!is_valid_ether_addr(priv->dev->dev_addr))
2179 eth_hw_addr_random(priv->dev);
2180 dev_info(priv->device, "device MAC address %pM\n",
2181 priv->dev->dev_addr);
2186 * stmmac_init_dma_engine - DMA init.
2187 * @priv: driver private structure
2189 * It inits the DMA invoking the specific MAC/GMAC callback.
2190 * Some DMA parameters can be passed from the platform;
2191 * in case of these are not passed a default is kept for the MAC or GMAC.
2193 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2195 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2196 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2197 u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2198 struct stmmac_rx_queue *rx_q;
2199 struct stmmac_tx_queue *tx_q;
2204 if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2205 dev_err(priv->device, "Invalid DMA configuration\n");
2209 if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2212 ret = stmmac_reset(priv, priv->ioaddr);
2214 dev_err(priv->device, "Failed to reset the dma\n");
2218 /* DMA Configuration */
2219 stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
2221 if (priv->plat->axi)
2222 stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
2224 /* DMA CSR Channel configuration */
2225 for (chan = 0; chan < dma_csr_ch; chan++)
2226 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
2228 /* DMA RX Channel Configuration */
2229 for (chan = 0; chan < rx_channels_count; chan++) {
2230 rx_q = &priv->rx_queue[chan];
2232 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2233 rx_q->dma_rx_phy, chan);
2235 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2236 (DMA_RX_SIZE * sizeof(struct dma_desc));
2237 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2238 rx_q->rx_tail_addr, chan);
2241 /* DMA TX Channel Configuration */
2242 for (chan = 0; chan < tx_channels_count; chan++) {
2243 tx_q = &priv->tx_queue[chan];
2245 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2246 tx_q->dma_tx_phy, chan);
2248 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
2249 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2250 tx_q->tx_tail_addr, chan);
2256 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
2258 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2260 mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer));
2264 * stmmac_tx_timer - mitigation sw timer for tx.
2265 * @data: data pointer
2267 * This is the timer handler to directly invoke the stmmac_tx_clean.
2269 static void stmmac_tx_timer(struct timer_list *t)
2271 struct stmmac_tx_queue *tx_q = from_timer(tx_q, t, txtimer);
2272 struct stmmac_priv *priv = tx_q->priv_data;
2273 struct stmmac_channel *ch;
2275 ch = &priv->channel[tx_q->queue_index];
2278 * If NAPI is already running we can miss some events. Let's rearm
2279 * the timer and try again.
2281 if (likely(napi_schedule_prep(&ch->tx_napi)))
2282 __napi_schedule(&ch->tx_napi);
2284 mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(10));
2288 * stmmac_init_coalesce - init mitigation options.
2289 * @priv: driver private structure
2291 * This inits the coalesce parameters: i.e. timer rate,
2292 * timer handler and default threshold used for enabling the
2293 * interrupt on completion bit.
2295 static void stmmac_init_coalesce(struct stmmac_priv *priv)
2297 u32 tx_channel_count = priv->plat->tx_queues_to_use;
2300 priv->tx_coal_frames = STMMAC_TX_FRAMES;
2301 priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
2302 priv->rx_coal_frames = STMMAC_RX_FRAMES;
2304 for (chan = 0; chan < tx_channel_count; chan++) {
2305 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2307 timer_setup(&tx_q->txtimer, stmmac_tx_timer, 0);
2311 static void stmmac_set_rings_length(struct stmmac_priv *priv)
2313 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2314 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2317 /* set TX ring length */
2318 for (chan = 0; chan < tx_channels_count; chan++)
2319 stmmac_set_tx_ring_len(priv, priv->ioaddr,
2320 (DMA_TX_SIZE - 1), chan);
2322 /* set RX ring length */
2323 for (chan = 0; chan < rx_channels_count; chan++)
2324 stmmac_set_rx_ring_len(priv, priv->ioaddr,
2325 (DMA_RX_SIZE - 1), chan);
2329 * stmmac_set_tx_queue_weight - Set TX queue weight
2330 * @priv: driver private structure
2331 * Description: It is used for setting TX queues weight
2333 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
2335 u32 tx_queues_count = priv->plat->tx_queues_to_use;
2339 for (queue = 0; queue < tx_queues_count; queue++) {
2340 weight = priv->plat->tx_queues_cfg[queue].weight;
2341 stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
2346 * stmmac_configure_cbs - Configure CBS in TX queue
2347 * @priv: driver private structure
2348 * Description: It is used for configuring CBS in AVB TX queues
2350 static void stmmac_configure_cbs(struct stmmac_priv *priv)
2352 u32 tx_queues_count = priv->plat->tx_queues_to_use;
2356 /* queue 0 is reserved for legacy traffic */
2357 for (queue = 1; queue < tx_queues_count; queue++) {
2358 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
2359 if (mode_to_use == MTL_QUEUE_DCB)
2362 stmmac_config_cbs(priv, priv->hw,
2363 priv->plat->tx_queues_cfg[queue].send_slope,
2364 priv->plat->tx_queues_cfg[queue].idle_slope,
2365 priv->plat->tx_queues_cfg[queue].high_credit,
2366 priv->plat->tx_queues_cfg[queue].low_credit,
2372 * stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
2373 * @priv: driver private structure
2374 * Description: It is used for mapping RX queues to RX dma channels
2376 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
2378 u32 rx_queues_count = priv->plat->rx_queues_to_use;
2382 for (queue = 0; queue < rx_queues_count; queue++) {
2383 chan = priv->plat->rx_queues_cfg[queue].chan;
2384 stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
2389 * stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
2390 * @priv: driver private structure
2391 * Description: It is used for configuring the RX Queue Priority
2393 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
2395 u32 rx_queues_count = priv->plat->rx_queues_to_use;
2399 for (queue = 0; queue < rx_queues_count; queue++) {
2400 if (!priv->plat->rx_queues_cfg[queue].use_prio)
2403 prio = priv->plat->rx_queues_cfg[queue].prio;
2404 stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
2409 * stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
2410 * @priv: driver private structure
2411 * Description: It is used for configuring the TX Queue Priority
2413 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
2415 u32 tx_queues_count = priv->plat->tx_queues_to_use;
2419 for (queue = 0; queue < tx_queues_count; queue++) {
2420 if (!priv->plat->tx_queues_cfg[queue].use_prio)
2423 prio = priv->plat->tx_queues_cfg[queue].prio;
2424 stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
2429 * stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
2430 * @priv: driver private structure
2431 * Description: It is used for configuring the RX queue routing
2433 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
2435 u32 rx_queues_count = priv->plat->rx_queues_to_use;
2439 for (queue = 0; queue < rx_queues_count; queue++) {
2440 /* no specific packet type routing specified for the queue */
2441 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
2444 packet = priv->plat->rx_queues_cfg[queue].pkt_route;
2445 stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
2449 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
2451 if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
2452 priv->rss.enable = false;
2456 if (priv->dev->features & NETIF_F_RXHASH)
2457 priv->rss.enable = true;
2459 priv->rss.enable = false;
2461 stmmac_rss_configure(priv, priv->hw, &priv->rss,
2462 priv->plat->rx_queues_to_use);
2466 * stmmac_mtl_configuration - Configure MTL
2467 * @priv: driver private structure
2468 * Description: It is used for configurring MTL
2470 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
2472 u32 rx_queues_count = priv->plat->rx_queues_to_use;
2473 u32 tx_queues_count = priv->plat->tx_queues_to_use;
2475 if (tx_queues_count > 1)
2476 stmmac_set_tx_queue_weight(priv);
2478 /* Configure MTL RX algorithms */
2479 if (rx_queues_count > 1)
2480 stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
2481 priv->plat->rx_sched_algorithm);
2483 /* Configure MTL TX algorithms */
2484 if (tx_queues_count > 1)
2485 stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
2486 priv->plat->tx_sched_algorithm);
2488 /* Configure CBS in AVB TX queues */
2489 if (tx_queues_count > 1)
2490 stmmac_configure_cbs(priv);
2492 /* Map RX MTL to DMA channels */
2493 stmmac_rx_queue_dma_chan_map(priv);
2495 /* Enable MAC RX Queues */
2496 stmmac_mac_enable_rx_queues(priv);
2498 /* Set RX priorities */
2499 if (rx_queues_count > 1)
2500 stmmac_mac_config_rx_queues_prio(priv);
2502 /* Set TX priorities */
2503 if (tx_queues_count > 1)
2504 stmmac_mac_config_tx_queues_prio(priv);
2506 /* Set RX routing */
2507 if (rx_queues_count > 1)
2508 stmmac_mac_config_rx_queues_routing(priv);
2510 /* Receive Side Scaling */
2511 if (rx_queues_count > 1)
2512 stmmac_mac_config_rss(priv);
2515 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
2517 if (priv->dma_cap.asp) {
2518 netdev_info(priv->dev, "Enabling Safety Features\n");
2519 stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp);
2521 netdev_info(priv->dev, "No Safety Features support found\n");
2526 * stmmac_hw_setup - setup mac in a usable state.
2527 * @dev : pointer to the device structure.
2529 * this is the main function to setup the HW in a usable state because the
2530 * dma engine is reset, the core registers are configured (e.g. AXI,
2531 * Checksum features, timers). The DMA is ready to start receiving and
2534 * 0 on success and an appropriate (-)ve integer as defined in errno.h
2537 static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
2539 struct stmmac_priv *priv = netdev_priv(dev);
2540 u32 rx_cnt = priv->plat->rx_queues_to_use;
2541 u32 tx_cnt = priv->plat->tx_queues_to_use;
2545 /* DMA initialization and SW reset */
2546 ret = stmmac_init_dma_engine(priv);
2548 netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
2553 /* Copy the MAC addr into the HW */
2554 stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
2556 /* PS and related bits will be programmed according to the speed */
2557 if (priv->hw->pcs) {
2558 int speed = priv->plat->mac_port_sel_speed;
2560 if ((speed == SPEED_10) || (speed == SPEED_100) ||
2561 (speed == SPEED_1000)) {
2562 priv->hw->ps = speed;
2564 dev_warn(priv->device, "invalid port speed\n");
2569 /* Initialize the MAC Core */
2570 stmmac_core_init(priv, priv->hw, dev);
2573 stmmac_mtl_configuration(priv);
2575 /* Initialize Safety Features */
2576 stmmac_safety_feat_configuration(priv);
2578 ret = stmmac_rx_ipc(priv, priv->hw);
2580 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
2581 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2582 priv->hw->rx_csum = 0;
2585 /* Enable the MAC Rx/Tx */
2586 stmmac_mac_set(priv, priv->ioaddr, true);
2588 /* Set the HW DMA mode and the COE */
2589 stmmac_dma_operation_mode(priv);
2591 stmmac_mmc_setup(priv);
2594 ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
2596 netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
2598 ret = stmmac_init_ptp(priv);
2599 if (ret == -EOPNOTSUPP)
2600 netdev_warn(priv->dev, "PTP not supported by HW\n");
2602 netdev_warn(priv->dev, "PTP init failed\n");
2605 priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
2607 if (priv->use_riwt) {
2609 priv->rx_riwt = DEF_DMA_RIWT;
2611 ret = stmmac_rx_watchdog(priv, priv->ioaddr, priv->rx_riwt, rx_cnt);
2615 stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
2617 /* set TX and RX rings length */
2618 stmmac_set_rings_length(priv);
2622 for (chan = 0; chan < tx_cnt; chan++)
2623 stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
2626 /* Enable Split Header */
2627 if (priv->sph && priv->hw->rx_csum) {
2628 for (chan = 0; chan < rx_cnt; chan++)
2629 stmmac_enable_sph(priv, priv->ioaddr, 1, chan);
2632 /* VLAN Tag Insertion */
2633 if (priv->dma_cap.vlins)
2634 stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
2636 /* Start the ball rolling... */
2637 stmmac_start_all_dma(priv);
2642 static void stmmac_hw_teardown(struct net_device *dev)
2644 struct stmmac_priv *priv = netdev_priv(dev);
2646 clk_disable_unprepare(priv->plat->clk_ptp_ref);
2650 * stmmac_open - open entry point of the driver
2651 * @dev : pointer to the device structure.
2653 * This function is the open entry point of the driver.
2655 * 0 on success and an appropriate (-)ve integer as defined in errno.h
2658 static int stmmac_open(struct net_device *dev)
2660 struct stmmac_priv *priv = netdev_priv(dev);
2664 if (priv->hw->pcs != STMMAC_PCS_RGMII &&
2665 priv->hw->pcs != STMMAC_PCS_TBI &&
2666 priv->hw->pcs != STMMAC_PCS_RTBI) {
2667 ret = stmmac_init_phy(dev);
2669 netdev_err(priv->dev,
2670 "%s: Cannot attach to PHY (error: %d)\n",
2676 /* Extra statistics */
2677 memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
2678 priv->xstats.threshold = tc;
2680 priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
2681 priv->rx_copybreak = STMMAC_RX_COPYBREAK;
2683 ret = alloc_dma_desc_resources(priv);
2685 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
2687 goto dma_desc_error;
2690 ret = init_dma_desc_rings(dev, GFP_KERNEL);
2692 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
2697 ret = stmmac_hw_setup(dev, true);
2699 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
2703 stmmac_init_coalesce(priv);
2705 phylink_start(priv->phylink);
2707 /* Request the IRQ lines */
2708 ret = request_irq(dev->irq, stmmac_interrupt,
2709 IRQF_SHARED, dev->name, dev);
2710 if (unlikely(ret < 0)) {
2711 netdev_err(priv->dev,
2712 "%s: ERROR: allocating the IRQ %d (error: %d)\n",
2713 __func__, dev->irq, ret);
2717 /* Request the Wake IRQ in case of another line is used for WoL */
2718 if (priv->wol_irq != dev->irq) {
2719 ret = request_irq(priv->wol_irq, stmmac_interrupt,
2720 IRQF_SHARED, dev->name, dev);
2721 if (unlikely(ret < 0)) {
2722 netdev_err(priv->dev,
2723 "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
2724 __func__, priv->wol_irq, ret);
2729 /* Request the IRQ lines */
2730 if (priv->lpi_irq > 0) {
2731 ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
2733 if (unlikely(ret < 0)) {
2734 netdev_err(priv->dev,
2735 "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
2736 __func__, priv->lpi_irq, ret);
2741 stmmac_enable_all_queues(priv);
2742 stmmac_start_all_queues(priv);
2747 if (priv->wol_irq != dev->irq)
2748 free_irq(priv->wol_irq, dev);
2750 free_irq(dev->irq, dev);
2752 phylink_stop(priv->phylink);
2754 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
2755 del_timer_sync(&priv->tx_queue[chan].txtimer);
2757 stmmac_hw_teardown(dev);
2759 free_dma_desc_resources(priv);
2761 phylink_disconnect_phy(priv->phylink);
2766 * stmmac_release - close entry point of the driver
2767 * @dev : device pointer.
2769 * This is the stop entry point of the driver.
2771 static int stmmac_release(struct net_device *dev)
2773 struct stmmac_priv *priv = netdev_priv(dev);
2776 if (priv->eee_enabled)
2777 del_timer_sync(&priv->eee_ctrl_timer);
2779 /* Stop and disconnect the PHY */
2780 phylink_stop(priv->phylink);
2781 phylink_disconnect_phy(priv->phylink);
2783 stmmac_stop_all_queues(priv);
2785 stmmac_disable_all_queues(priv);
2787 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
2788 del_timer_sync(&priv->tx_queue[chan].txtimer);
2790 /* Free the IRQ lines */
2791 free_irq(dev->irq, dev);
2792 if (priv->wol_irq != dev->irq)
2793 free_irq(priv->wol_irq, dev);
2794 if (priv->lpi_irq > 0)
2795 free_irq(priv->lpi_irq, dev);
2797 /* Stop TX/RX DMA and clear the descriptors */
2798 stmmac_stop_all_dma(priv);
2800 /* Release and free the Rx/Tx resources */
2801 free_dma_desc_resources(priv);
2803 /* Disable the MAC Rx/Tx */
2804 stmmac_mac_set(priv, priv->ioaddr, false);
2806 netif_carrier_off(dev);
2808 stmmac_release_ptp(priv);
2813 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
2814 struct stmmac_tx_queue *tx_q)
2816 u16 tag = 0x0, inner_tag = 0x0;
2817 u32 inner_type = 0x0;
2820 if (!priv->dma_cap.vlins)
2822 if (!skb_vlan_tag_present(skb))
2824 if (skb->vlan_proto == htons(ETH_P_8021AD)) {
2825 inner_tag = skb_vlan_tag_get(skb);
2826 inner_type = STMMAC_VLAN_INSERT;
2829 tag = skb_vlan_tag_get(skb);
2831 p = tx_q->dma_tx + tx_q->cur_tx;
2832 if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
2835 stmmac_set_tx_owner(priv, p);
2836 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2841 * stmmac_tso_allocator - close entry point of the driver
2842 * @priv: driver private structure
2843 * @des: buffer start address
2844 * @total_len: total length to fill in descriptors
2845 * @last_segmant: condition for the last descriptor
2846 * @queue: TX queue index
2848 * This function fills descriptor and request new descriptors according to
2849 * buffer length to fill
2851 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
2852 int total_len, bool last_segment, u32 queue)
2854 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2855 struct dma_desc *desc;
2859 tmp_len = total_len;
2861 while (tmp_len > 0) {
2862 dma_addr_t curr_addr;
2864 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2865 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2866 desc = tx_q->dma_tx + tx_q->cur_tx;
2868 curr_addr = des + (total_len - tmp_len);
2869 if (priv->dma_cap.addr64 <= 32)
2870 desc->des0 = cpu_to_le32(curr_addr);
2872 stmmac_set_desc_addr(priv, desc, curr_addr);
2874 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
2875 TSO_MAX_BUFF_SIZE : tmp_len;
2877 stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
2879 (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
2882 tmp_len -= TSO_MAX_BUFF_SIZE;
2887 * stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
2888 * @skb : the socket buffer
2889 * @dev : device pointer
2890 * Description: this is the transmit function that is called on TSO frames
2891 * (support available on GMAC4 and newer chips).
2892 * Diagram below show the ring programming in case of TSO frames:
2896 * | DES0 |---> buffer1 = L2/L3/L4 header
2897 * | DES1 |---> TCP Payload (can continue on next descr...)
2898 * | DES2 |---> buffer 1 and 2 len
2899 * | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
2905 * | DES0 | --| Split TCP Payload on Buffers 1 and 2
2907 * | DES2 | --> buffer 1 and 2 len
2911 * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
2913 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2915 struct dma_desc *desc, *first, *mss_desc = NULL;
2916 struct stmmac_priv *priv = netdev_priv(dev);
2917 int nfrags = skb_shinfo(skb)->nr_frags;
2918 u32 queue = skb_get_queue_mapping(skb);
2919 unsigned int first_entry, tx_packets;
2920 int tmp_pay_len = 0, first_tx;
2921 struct stmmac_tx_queue *tx_q;
2922 u8 proto_hdr_len, hdr;
2923 bool has_vlan, set_ic;
2928 tx_q = &priv->tx_queue[queue];
2929 first_tx = tx_q->cur_tx;
2931 /* Compute header lengths */
2932 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
2933 proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
2934 hdr = sizeof(struct udphdr);
2936 proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2937 hdr = tcp_hdrlen(skb);
2940 /* Desc availability based on threshold should be enough safe */
2941 if (unlikely(stmmac_tx_avail(priv, queue) <
2942 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
2943 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
2944 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
2946 /* This is a hard error, log it. */
2947 netdev_err(priv->dev,
2948 "%s: Tx Ring full when queue awake\n",
2951 return NETDEV_TX_BUSY;
2954 pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
2956 mss = skb_shinfo(skb)->gso_size;
2958 /* set new MSS value if needed */
2959 if (mss != tx_q->mss) {
2960 mss_desc = tx_q->dma_tx + tx_q->cur_tx;
2961 stmmac_set_mss(priv, mss_desc, mss);
2963 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2964 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2967 if (netif_msg_tx_queued(priv)) {
2968 pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
2969 __func__, hdr, proto_hdr_len, pay_len, mss);
2970 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
2974 /* Check if VLAN can be inserted by HW */
2975 has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
2977 first_entry = tx_q->cur_tx;
2978 WARN_ON(tx_q->tx_skbuff[first_entry]);
2980 desc = tx_q->dma_tx + first_entry;
2984 stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
2986 /* first descriptor: fill Headers on Buf1 */
2987 des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
2989 if (dma_mapping_error(priv->device, des))
2992 tx_q->tx_skbuff_dma[first_entry].buf = des;
2993 tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
2995 if (priv->dma_cap.addr64 <= 32) {
2996 first->des0 = cpu_to_le32(des);
2998 /* Fill start of payload in buff2 of first descriptor */
3000 first->des1 = cpu_to_le32(des + proto_hdr_len);
3002 /* If needed take extra descriptors to fill the remaining payload */
3003 tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
3005 stmmac_set_desc_addr(priv, first, des);
3006 tmp_pay_len = pay_len;
3007 des += proto_hdr_len;
3011 stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
3013 /* Prepare fragments */
3014 for (i = 0; i < nfrags; i++) {
3015 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3017 des = skb_frag_dma_map(priv->device, frag, 0,
3018 skb_frag_size(frag),
3020 if (dma_mapping_error(priv->device, des))
3023 stmmac_tso_allocator(priv, des, skb_frag_size(frag),
3024 (i == nfrags - 1), queue);
3026 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
3027 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
3028 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
3031 tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
3033 /* Only the last descriptor gets to point to the skb. */
3034 tx_q->tx_skbuff[tx_q->cur_tx] = skb;
3036 /* Manage tx mitigation */
3037 tx_packets = (tx_q->cur_tx + 1) - first_tx;
3038 tx_q->tx_count_frames += tx_packets;
3040 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
3042 else if (!priv->tx_coal_frames)
3044 else if (tx_packets > priv->tx_coal_frames)
3046 else if ((tx_q->tx_count_frames % priv->tx_coal_frames) < tx_packets)
3052 desc = &tx_q->dma_tx[tx_q->cur_tx];
3053 tx_q->tx_count_frames = 0;
3054 stmmac_set_tx_ic(priv, desc);
3055 priv->xstats.tx_set_ic_bit++;
3057 stmmac_tx_timer_arm(priv, queue);
3060 /* We've used all descriptors we need for this skb, however,
3061 * advance cur_tx so that it references a fresh descriptor.
3062 * ndo_start_xmit will fill this descriptor the next time it's
3063 * called and stmmac_tx_clean may clean up to this descriptor.
3065 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
3067 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3068 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3070 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
3073 dev->stats.tx_bytes += skb->len;
3074 priv->xstats.tx_tso_frames++;
3075 priv->xstats.tx_tso_nfrags += nfrags;
3077 if (priv->sarc_type)
3078 stmmac_set_desc_sarc(priv, first, priv->sarc_type);
3080 skb_tx_timestamp(skb);
3082 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3083 priv->hwts_tx_en)) {
3084 /* declare that device is doing timestamping */
3085 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3086 stmmac_enable_tx_timestamp(priv, first);
3089 /* Complete the first descriptor before granting the DMA */
3090 stmmac_prepare_tso_tx_desc(priv, first, 1,
3093 1, tx_q->tx_skbuff_dma[first_entry].last_segment,
3094 hdr / 4, (skb->len - proto_hdr_len));
3096 /* If context desc is used to change MSS */
3098 /* Make sure that first descriptor has been completely
3099 * written, including its own bit. This is because MSS is
3100 * actually before first descriptor, so we need to make
3101 * sure that MSS's own bit is the last thing written.
3104 stmmac_set_tx_owner(priv, mss_desc);
3107 /* The own bit must be the latest setting done when prepare the
3108 * descriptor and then barrier is needed to make sure that
3109 * all is coherent before granting the DMA engine.
3113 if (netif_msg_pktdata(priv)) {
3114 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
3115 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3116 tx_q->cur_tx, first, nfrags);
3118 stmmac_display_ring(priv, (void *)tx_q->dma_tx, DMA_TX_SIZE, 0);
3120 pr_info(">>> frame to be transmitted: ");
3121 print_pkt(skb->data, skb_headlen(skb));
3124 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3126 tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
3127 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
3129 return NETDEV_TX_OK;
3132 dev_err(priv->device, "Tx dma map failed\n");
3134 priv->dev->stats.tx_dropped++;
3135 return NETDEV_TX_OK;
3139 * stmmac_xmit - Tx entry point of the driver
3140 * @skb : the socket buffer
3141 * @dev : device pointer
3142 * Description : this is the tx entry point of the driver.
3143 * It programs the chain or the ring and supports oversized frames
3146 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
3148 unsigned int first_entry, tx_packets, enh_desc;
3149 struct stmmac_priv *priv = netdev_priv(dev);
3150 unsigned int nopaged_len = skb_headlen(skb);
3151 int i, csum_insertion = 0, is_jumbo = 0;
3152 u32 queue = skb_get_queue_mapping(skb);
3153 int nfrags = skb_shinfo(skb)->nr_frags;
3154 int gso = skb_shinfo(skb)->gso_type;
3155 struct dma_desc *desc, *first;
3156 struct stmmac_tx_queue *tx_q;
3157 bool has_vlan, set_ic;
3158 int entry, first_tx;
3161 tx_q = &priv->tx_queue[queue];
3162 first_tx = tx_q->cur_tx;
3164 if (priv->tx_path_in_lpi_mode)
3165 stmmac_disable_eee_mode(priv);
3167 /* Manage oversized TCP frames for GMAC4 device */
3168 if (skb_is_gso(skb) && priv->tso) {
3169 if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
3170 return stmmac_tso_xmit(skb, dev);
3171 if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
3172 return stmmac_tso_xmit(skb, dev);
3175 if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
3176 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
3177 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
3179 /* This is a hard error, log it. */
3180 netdev_err(priv->dev,
3181 "%s: Tx Ring full when queue awake\n",
3184 return NETDEV_TX_BUSY;
3187 /* Check if VLAN can be inserted by HW */
3188 has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
3190 entry = tx_q->cur_tx;
3191 first_entry = entry;
3192 WARN_ON(tx_q->tx_skbuff[first_entry]);
3194 csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
3196 if (likely(priv->extend_desc))
3197 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3199 desc = tx_q->dma_tx + entry;
3204 stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
3206 enh_desc = priv->plat->enh_desc;
3207 /* To program the descriptors according to the size of the frame */
3209 is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
3211 if (unlikely(is_jumbo)) {
3212 entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
3213 if (unlikely(entry < 0) && (entry != -EINVAL))
3217 for (i = 0; i < nfrags; i++) {
3218 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3219 int len = skb_frag_size(frag);
3220 bool last_segment = (i == (nfrags - 1));
3222 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3223 WARN_ON(tx_q->tx_skbuff[entry]);
3225 if (likely(priv->extend_desc))
3226 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3228 desc = tx_q->dma_tx + entry;
3230 des = skb_frag_dma_map(priv->device, frag, 0, len,
3232 if (dma_mapping_error(priv->device, des))
3233 goto dma_map_err; /* should reuse desc w/o issues */
3235 tx_q->tx_skbuff_dma[entry].buf = des;
3237 stmmac_set_desc_addr(priv, desc, des);
3239 tx_q->tx_skbuff_dma[entry].map_as_page = true;
3240 tx_q->tx_skbuff_dma[entry].len = len;
3241 tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
3243 /* Prepare the descriptor and set the own bit too */
3244 stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
3245 priv->mode, 1, last_segment, skb->len);
3248 /* Only the last descriptor gets to point to the skb. */
3249 tx_q->tx_skbuff[entry] = skb;
3251 /* According to the coalesce parameter the IC bit for the latest
3252 * segment is reset and the timer re-started to clean the tx status.
3253 * This approach takes care about the fragments: desc is the first
3254 * element in case of no SG.
3256 tx_packets = (entry + 1) - first_tx;
3257 tx_q->tx_count_frames += tx_packets;
3259 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
3261 else if (!priv->tx_coal_frames)
3263 else if (tx_packets > priv->tx_coal_frames)
3265 else if ((tx_q->tx_count_frames % priv->tx_coal_frames) < tx_packets)
3271 if (likely(priv->extend_desc))
3272 desc = &tx_q->dma_etx[entry].basic;
3274 desc = &tx_q->dma_tx[entry];
3276 tx_q->tx_count_frames = 0;
3277 stmmac_set_tx_ic(priv, desc);
3278 priv->xstats.tx_set_ic_bit++;
3280 stmmac_tx_timer_arm(priv, queue);
3283 /* We've used all descriptors we need for this skb, however,
3284 * advance cur_tx so that it references a fresh descriptor.
3285 * ndo_start_xmit will fill this descriptor the next time it's
3286 * called and stmmac_tx_clean may clean up to this descriptor.
3288 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3289 tx_q->cur_tx = entry;
3291 if (netif_msg_pktdata(priv)) {
3294 netdev_dbg(priv->dev,
3295 "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
3296 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3297 entry, first, nfrags);
3299 if (priv->extend_desc)
3300 tx_head = (void *)tx_q->dma_etx;
3302 tx_head = (void *)tx_q->dma_tx;
3304 stmmac_display_ring(priv, tx_head, DMA_TX_SIZE, false);
3306 netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
3307 print_pkt(skb->data, skb->len);
3310 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3311 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3313 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
3316 dev->stats.tx_bytes += skb->len;
3318 if (priv->sarc_type)
3319 stmmac_set_desc_sarc(priv, first, priv->sarc_type);
3321 skb_tx_timestamp(skb);
3323 /* Ready to fill the first descriptor and set the OWN bit w/o any
3324 * problems because all the descriptors are actually ready to be
3325 * passed to the DMA engine.
3327 if (likely(!is_jumbo)) {
3328 bool last_segment = (nfrags == 0);
3330 des = dma_map_single(priv->device, skb->data,
3331 nopaged_len, DMA_TO_DEVICE);
3332 if (dma_mapping_error(priv->device, des))
3335 tx_q->tx_skbuff_dma[first_entry].buf = des;
3337 stmmac_set_desc_addr(priv, first, des);
3339 tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
3340 tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
3342 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3343 priv->hwts_tx_en)) {
3344 /* declare that device is doing timestamping */
3345 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3346 stmmac_enable_tx_timestamp(priv, first);
3349 /* Prepare the first descriptor setting the OWN bit too */
3350 stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
3351 csum_insertion, priv->mode, 1, last_segment,
3354 stmmac_set_tx_owner(priv, first);
3357 /* The own bit must be the latest setting done when prepare the
3358 * descriptor and then barrier is needed to make sure that
3359 * all is coherent before granting the DMA engine.
3363 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3365 stmmac_enable_dma_transmission(priv, priv->ioaddr);
3367 tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
3368 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
3370 return NETDEV_TX_OK;
3373 netdev_err(priv->dev, "Tx DMA map failed\n");
3375 priv->dev->stats.tx_dropped++;
3376 return NETDEV_TX_OK;
3379 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
3381 struct vlan_ethhdr *veth;
3385 veth = (struct vlan_ethhdr *)skb->data;
3386 vlan_proto = veth->h_vlan_proto;
3388 if ((vlan_proto == htons(ETH_P_8021Q) &&
3389 dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
3390 (vlan_proto == htons(ETH_P_8021AD) &&
3391 dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
3392 /* pop the vlan tag */
3393 vlanid = ntohs(veth->h_vlan_TCI);
3394 memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
3395 skb_pull(skb, VLAN_HLEN);
3396 __vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
3401 static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
3403 if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
3410 * stmmac_rx_refill - refill used skb preallocated buffers
3411 * @priv: driver private structure
3412 * @queue: RX queue index
3413 * Description : this is to reallocate the skb for the reception process
3414 * that is based on zero-copy.
3416 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
3418 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3419 int len, dirty = stmmac_rx_dirty(priv, queue);
3420 unsigned int entry = rx_q->dirty_rx;
3422 len = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
3424 while (dirty-- > 0) {
3425 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
3429 if (priv->extend_desc)
3430 p = (struct dma_desc *)(rx_q->dma_erx + entry);
3432 p = rx_q->dma_rx + entry;
3435 buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
3440 if (priv->sph && !buf->sec_page) {
3441 buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
3445 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
3447 dma_sync_single_for_device(priv->device, buf->sec_addr,
3448 len, DMA_FROM_DEVICE);
3451 buf->addr = page_pool_get_dma_addr(buf->page);
3453 /* Sync whole allocation to device. This will invalidate old
3456 dma_sync_single_for_device(priv->device, buf->addr, len,
3459 stmmac_set_desc_addr(priv, p, buf->addr);
3460 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr);
3461 stmmac_refill_desc3(priv, rx_q, p);
3463 rx_q->rx_count_frames++;
3464 rx_q->rx_count_frames += priv->rx_coal_frames;
3465 if (rx_q->rx_count_frames > priv->rx_coal_frames)
3466 rx_q->rx_count_frames = 0;
3468 use_rx_wd = !priv->rx_coal_frames;
3469 use_rx_wd |= rx_q->rx_count_frames > 0;
3470 if (!priv->use_riwt)
3474 stmmac_set_rx_owner(priv, p, use_rx_wd);
3476 entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
3478 rx_q->dirty_rx = entry;
3479 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
3480 (rx_q->dirty_rx * sizeof(struct dma_desc));
3481 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
3484 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
3486 int status, unsigned int len)
3488 int ret, coe = priv->hw->rx_csum;
3489 unsigned int plen = 0, hlen = 0;
3491 /* Not first descriptor, buffer is always zero */
3492 if (priv->sph && len)
3495 /* First descriptor, get split header length */
3496 ret = stmmac_get_rx_header_len(priv, p, &hlen);
3497 if (priv->sph && hlen) {
3498 priv->xstats.rx_split_hdr_pkt_n++;
3502 /* First descriptor, not last descriptor and not split header */
3503 if (status & rx_not_ls)
3504 return priv->dma_buf_sz;
3506 plen = stmmac_get_rx_frame_len(priv, p, coe);
3508 /* First descriptor and last descriptor and not split header */
3509 return min_t(unsigned int, priv->dma_buf_sz, plen);
3512 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
3514 int status, unsigned int len)
3516 int coe = priv->hw->rx_csum;
3517 unsigned int plen = 0;
3519 /* Not split header, buffer is not available */
3523 /* Not last descriptor */
3524 if (status & rx_not_ls)
3525 return priv->dma_buf_sz;
3527 plen = stmmac_get_rx_frame_len(priv, p, coe);
3529 /* Last descriptor */
3534 * stmmac_rx - manage the receive process
3535 * @priv: driver private structure
3536 * @limit: napi bugget
3537 * @queue: RX queue index.
3538 * Description : this the function called by the napi poll method.
3539 * It gets all the frames inside the ring.
3541 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3543 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3544 struct stmmac_channel *ch = &priv->channel[queue];
3545 unsigned int count = 0, error = 0, len = 0;
3546 int status = 0, coe = priv->hw->rx_csum;
3547 unsigned int next_entry = rx_q->cur_rx;
3548 struct sk_buff *skb = NULL;
3550 if (netif_msg_rx_status(priv)) {
3553 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
3554 if (priv->extend_desc)
3555 rx_head = (void *)rx_q->dma_erx;
3557 rx_head = (void *)rx_q->dma_rx;
3559 stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
3561 while (count < limit) {
3562 unsigned int buf1_len = 0, buf2_len = 0;
3563 enum pkt_hash_types hash_type;
3564 struct stmmac_rx_buffer *buf;
3565 struct dma_desc *np, *p;
3569 if (!count && rx_q->state_saved) {
3570 skb = rx_q->state.skb;
3571 error = rx_q->state.error;
3572 len = rx_q->state.len;
3574 rx_q->state_saved = false;
3587 buf = &rx_q->buf_pool[entry];
3589 if (priv->extend_desc)
3590 p = (struct dma_desc *)(rx_q->dma_erx + entry);
3592 p = rx_q->dma_rx + entry;
3594 /* read the status of the incoming frame */
3595 status = stmmac_rx_status(priv, &priv->dev->stats,
3597 /* check if managed by the DMA otherwise go ahead */
3598 if (unlikely(status & dma_own))
3601 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
3602 next_entry = rx_q->cur_rx;
3604 if (priv->extend_desc)
3605 np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
3607 np = rx_q->dma_rx + next_entry;
3611 if (priv->extend_desc)
3612 stmmac_rx_extended_status(priv, &priv->dev->stats,
3613 &priv->xstats, rx_q->dma_erx + entry);
3614 if (unlikely(status == discard_frame)) {
3615 page_pool_recycle_direct(rx_q->page_pool, buf->page);
3618 if (!priv->hwts_rx_en)
3619 priv->dev->stats.rx_errors++;
3622 if (unlikely(error && (status & rx_not_ls)))
3624 if (unlikely(error)) {
3631 /* Buffer is good. Go on. */
3633 prefetch(page_address(buf->page));
3635 prefetch(page_address(buf->sec_page));
3637 buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
3639 buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
3642 /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
3643 * Type frames (LLC/LLC-SNAP)
3645 * llc_snap is never checked in GMAC >= 4, so this ACS
3646 * feature is always disabled and packets need to be
3647 * stripped manually.
3649 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
3650 unlikely(status != llc_snap)) {
3652 buf2_len -= ETH_FCS_LEN;
3654 buf1_len -= ETH_FCS_LEN;
3660 skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
3662 priv->dev->stats.rx_dropped++;
3667 dma_sync_single_for_cpu(priv->device, buf->addr,
3668 buf1_len, DMA_FROM_DEVICE);
3669 skb_copy_to_linear_data(skb, page_address(buf->page),
3671 skb_put(skb, buf1_len);
3673 /* Data payload copied into SKB, page ready for recycle */
3674 page_pool_recycle_direct(rx_q->page_pool, buf->page);
3676 } else if (buf1_len) {
3677 dma_sync_single_for_cpu(priv->device, buf->addr,
3678 buf1_len, DMA_FROM_DEVICE);
3679 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
3680 buf->page, 0, buf1_len,
3683 /* Data payload appended into SKB */
3684 page_pool_release_page(rx_q->page_pool, buf->page);
3689 dma_sync_single_for_cpu(priv->device, buf->sec_addr,
3690 buf2_len, DMA_FROM_DEVICE);
3691 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
3692 buf->sec_page, 0, buf2_len,
3695 /* Data payload appended into SKB */
3696 page_pool_release_page(rx_q->page_pool, buf->sec_page);
3697 buf->sec_page = NULL;
3701 if (likely(status & rx_not_ls))
3706 /* Got entire packet into SKB. Finish it. */
3708 stmmac_get_rx_hwtstamp(priv, p, np, skb);
3709 stmmac_rx_vlan(priv->dev, skb);
3710 skb->protocol = eth_type_trans(skb, priv->dev);
3713 skb_checksum_none_assert(skb);
3715 skb->ip_summed = CHECKSUM_UNNECESSARY;
3717 if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
3718 skb_set_hash(skb, hash, hash_type);
3720 skb_record_rx_queue(skb, queue);
3721 napi_gro_receive(&ch->rx_napi, skb);
3724 priv->dev->stats.rx_packets++;
3725 priv->dev->stats.rx_bytes += len;
3729 if (status & rx_not_ls || skb) {
3730 rx_q->state_saved = true;
3731 rx_q->state.skb = skb;
3732 rx_q->state.error = error;
3733 rx_q->state.len = len;
3736 stmmac_rx_refill(priv, queue);
3738 priv->xstats.rx_pkt_n += count;
3743 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
3745 struct stmmac_channel *ch =
3746 container_of(napi, struct stmmac_channel, rx_napi);
3747 struct stmmac_priv *priv = ch->priv_data;
3748 u32 chan = ch->index;
3751 priv->xstats.napi_poll++;
3753 work_done = stmmac_rx(priv, budget, chan);
3754 if (work_done < budget && napi_complete_done(napi, work_done))
3755 stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
3759 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
3761 struct stmmac_channel *ch =
3762 container_of(napi, struct stmmac_channel, tx_napi);
3763 struct stmmac_priv *priv = ch->priv_data;
3764 struct stmmac_tx_queue *tx_q;
3765 u32 chan = ch->index;
3768 priv->xstats.napi_poll++;
3770 work_done = stmmac_tx_clean(priv, DMA_TX_SIZE, chan);
3771 work_done = min(work_done, budget);
3773 if (work_done < budget)
3774 napi_complete_done(napi, work_done);
3776 /* Force transmission restart */
3777 tx_q = &priv->tx_queue[chan];
3778 if (tx_q->cur_tx != tx_q->dirty_tx) {
3779 stmmac_enable_dma_transmission(priv, priv->ioaddr);
3780 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr,
3789 * @dev : Pointer to net device structure
3790 * Description: this function is called when a packet transmission fails to
3791 * complete within a reasonable time. The driver will mark the error in the
3792 * netdev structure and arrange for the device to be reset to a sane state
3793 * in order to transmit a new packet.
3795 static void stmmac_tx_timeout(struct net_device *dev)
3797 struct stmmac_priv *priv = netdev_priv(dev);
3799 stmmac_global_err(priv);
3803 * stmmac_set_rx_mode - entry point for multicast addressing
3804 * @dev : pointer to the device structure
3806 * This function is a driver entry point which gets called by the kernel
3807 * whenever multicast addresses must be enabled/disabled.
3811 static void stmmac_set_rx_mode(struct net_device *dev)
3813 struct stmmac_priv *priv = netdev_priv(dev);
3815 stmmac_set_filter(priv, priv->hw, dev);
3819 * stmmac_change_mtu - entry point to change MTU size for the device.
3820 * @dev : device pointer.
3821 * @new_mtu : the new MTU size for the device.
3822 * Description: the Maximum Transfer Unit (MTU) is used by the network layer
3823 * to drive packet transmission. Ethernet has an MTU of 1500 octets
3824 * (ETH_DATA_LEN). This value can be changed with ifconfig.
3826 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3829 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
3831 struct stmmac_priv *priv = netdev_priv(dev);
3833 if (netif_running(dev)) {
3834 netdev_err(priv->dev, "must be stopped to change its MTU\n");
3840 netdev_update_features(dev);
3845 static netdev_features_t stmmac_fix_features(struct net_device *dev,
3846 netdev_features_t features)
3848 struct stmmac_priv *priv = netdev_priv(dev);
3850 if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
3851 features &= ~NETIF_F_RXCSUM;
3853 if (!priv->plat->tx_coe)
3854 features &= ~NETIF_F_CSUM_MASK;
3856 /* Some GMAC devices have a bugged Jumbo frame support that
3857 * needs to have the Tx COE disabled for oversized frames
3858 * (due to limited buffer sizes). In this case we disable
3859 * the TX csum insertion in the TDES and not use SF.
3861 if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
3862 features &= ~NETIF_F_CSUM_MASK;
3864 /* Disable tso if asked by ethtool */
3865 if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
3866 if (features & NETIF_F_TSO)
3875 static int stmmac_set_features(struct net_device *netdev,
3876 netdev_features_t features)
3878 struct stmmac_priv *priv = netdev_priv(netdev);
3882 /* Keep the COE Type in case of csum is supporting */
3883 if (features & NETIF_F_RXCSUM)
3884 priv->hw->rx_csum = priv->plat->rx_coe;
3886 priv->hw->rx_csum = 0;
3887 /* No check needed because rx_coe has been set before and it will be
3888 * fixed in case of issue.
3890 stmmac_rx_ipc(priv, priv->hw);
3892 sph_en = (priv->hw->rx_csum > 0) && priv->sph;
3893 for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
3894 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3900 * stmmac_interrupt - main ISR
3901 * @irq: interrupt number.
3902 * @dev_id: to pass the net device pointer.
3903 * Description: this is the main driver interrupt service routine.
3905 * o DMA service routine (to manage incoming frame reception and transmission
3907 * o Core interrupts to manage: remote wake-up, management counter, LPI
3910 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
3912 struct net_device *dev = (struct net_device *)dev_id;
3913 struct stmmac_priv *priv = netdev_priv(dev);
3914 u32 rx_cnt = priv->plat->rx_queues_to_use;
3915 u32 tx_cnt = priv->plat->tx_queues_to_use;
3920 xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
3921 queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
3924 pm_wakeup_event(priv->device, 0);
3926 if (unlikely(!dev)) {
3927 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
3931 /* Check if adapter is up */
3932 if (test_bit(STMMAC_DOWN, &priv->state))
3934 /* Check if a fatal error happened */
3935 if (stmmac_safety_feat_interrupt(priv))
3938 /* To handle GMAC own interrupts */
3939 if ((priv->plat->has_gmac) || xmac) {
3940 int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
3943 if (unlikely(status)) {
3944 /* For LPI we need to save the tx status */
3945 if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
3946 priv->tx_path_in_lpi_mode = true;
3947 if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
3948 priv->tx_path_in_lpi_mode = false;
3951 for (queue = 0; queue < queues_count; queue++) {
3952 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3954 mtl_status = stmmac_host_mtl_irq_status(priv, priv->hw,
3956 if (mtl_status != -EINVAL)
3957 status |= mtl_status;
3959 if (status & CORE_IRQ_MTL_RX_OVERFLOW)
3960 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
3965 /* PCS link status */
3966 if (priv->hw->pcs) {
3967 if (priv->xstats.pcs_link)
3968 netif_carrier_on(dev);
3970 netif_carrier_off(dev);
3974 /* To handle DMA interrupts */
3975 stmmac_dma_interrupt(priv);
3980 #ifdef CONFIG_NET_POLL_CONTROLLER
3981 /* Polling receive - used by NETCONSOLE and other diagnostic tools
3982 * to allow network I/O with interrupts disabled.
3984 static void stmmac_poll_controller(struct net_device *dev)
3986 disable_irq(dev->irq);
3987 stmmac_interrupt(dev->irq, dev);
3988 enable_irq(dev->irq);
3993 * stmmac_ioctl - Entry point for the Ioctl
3994 * @dev: Device pointer.
3995 * @rq: An IOCTL specefic structure, that can contain a pointer to
3996 * a proprietary structure used to pass information to the driver.
3997 * @cmd: IOCTL command
3999 * Currently it supports the phy_mii_ioctl(...) and HW time stamping.
4001 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
4003 struct stmmac_priv *priv = netdev_priv (dev);
4004 int ret = -EOPNOTSUPP;
4006 if (!netif_running(dev))
4013 ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
4016 ret = stmmac_hwtstamp_set(dev, rq);
4019 ret = stmmac_hwtstamp_get(dev, rq);
4028 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
4031 struct stmmac_priv *priv = cb_priv;
4032 int ret = -EOPNOTSUPP;
4034 if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
4037 stmmac_disable_all_queues(priv);
4040 case TC_SETUP_CLSU32:
4041 ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
4043 case TC_SETUP_CLSFLOWER:
4044 ret = stmmac_tc_setup_cls(priv, priv, type_data);
4050 stmmac_enable_all_queues(priv);
4054 static LIST_HEAD(stmmac_block_cb_list);
4056 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
4059 struct stmmac_priv *priv = netdev_priv(ndev);
4062 case TC_SETUP_BLOCK:
4063 return flow_block_cb_setup_simple(type_data,
4064 &stmmac_block_cb_list,
4065 stmmac_setup_tc_block_cb,
4067 case TC_SETUP_QDISC_CBS:
4068 return stmmac_tc_setup_cbs(priv, priv, type_data);
4074 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
4075 struct net_device *sb_dev)
4077 int gso = skb_shinfo(skb)->gso_type;
4079 if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
4081 * There is no way to determine the number of TSO/USO
4082 * capable Queues. Let's use always the Queue 0
4083 * because if TSO/USO is supported then at least this
4084 * one will be capable.
4089 return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
4092 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
4094 struct stmmac_priv *priv = netdev_priv(ndev);
4097 ret = eth_mac_addr(ndev, addr);
4101 stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
4106 #ifdef CONFIG_DEBUG_FS
4107 static struct dentry *stmmac_fs_dir;
4109 static void sysfs_display_ring(void *head, int size, int extend_desc,
4110 struct seq_file *seq)
4113 struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
4114 struct dma_desc *p = (struct dma_desc *)head;
4116 for (i = 0; i < size; i++) {
4118 seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
4119 i, (unsigned int)virt_to_phys(ep),
4120 le32_to_cpu(ep->basic.des0),
4121 le32_to_cpu(ep->basic.des1),
4122 le32_to_cpu(ep->basic.des2),
4123 le32_to_cpu(ep->basic.des3));
4126 seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
4127 i, (unsigned int)virt_to_phys(p),
4128 le32_to_cpu(p->des0), le32_to_cpu(p->des1),
4129 le32_to_cpu(p->des2), le32_to_cpu(p->des3));
4132 seq_printf(seq, "\n");
4136 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
4138 struct net_device *dev = seq->private;
4139 struct stmmac_priv *priv = netdev_priv(dev);
4140 u32 rx_count = priv->plat->rx_queues_to_use;
4141 u32 tx_count = priv->plat->tx_queues_to_use;
4144 if ((dev->flags & IFF_UP) == 0)
4147 for (queue = 0; queue < rx_count; queue++) {
4148 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4150 seq_printf(seq, "RX Queue %d:\n", queue);
4152 if (priv->extend_desc) {
4153 seq_printf(seq, "Extended descriptor ring:\n");
4154 sysfs_display_ring((void *)rx_q->dma_erx,
4155 DMA_RX_SIZE, 1, seq);
4157 seq_printf(seq, "Descriptor ring:\n");
4158 sysfs_display_ring((void *)rx_q->dma_rx,
4159 DMA_RX_SIZE, 0, seq);
4163 for (queue = 0; queue < tx_count; queue++) {
4164 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4166 seq_printf(seq, "TX Queue %d:\n", queue);
4168 if (priv->extend_desc) {
4169 seq_printf(seq, "Extended descriptor ring:\n");
4170 sysfs_display_ring((void *)tx_q->dma_etx,
4171 DMA_TX_SIZE, 1, seq);
4173 seq_printf(seq, "Descriptor ring:\n");
4174 sysfs_display_ring((void *)tx_q->dma_tx,
4175 DMA_TX_SIZE, 0, seq);
4181 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
4183 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
4185 struct net_device *dev = seq->private;
4186 struct stmmac_priv *priv = netdev_priv(dev);
4188 if (!priv->hw_cap_support) {
4189 seq_printf(seq, "DMA HW features not supported\n");
4193 seq_printf(seq, "==============================\n");
4194 seq_printf(seq, "\tDMA HW features\n");
4195 seq_printf(seq, "==============================\n");
4197 seq_printf(seq, "\t10/100 Mbps: %s\n",
4198 (priv->dma_cap.mbps_10_100) ? "Y" : "N");
4199 seq_printf(seq, "\t1000 Mbps: %s\n",
4200 (priv->dma_cap.mbps_1000) ? "Y" : "N");
4201 seq_printf(seq, "\tHalf duplex: %s\n",
4202 (priv->dma_cap.half_duplex) ? "Y" : "N");
4203 seq_printf(seq, "\tHash Filter: %s\n",
4204 (priv->dma_cap.hash_filter) ? "Y" : "N");
4205 seq_printf(seq, "\tMultiple MAC address registers: %s\n",
4206 (priv->dma_cap.multi_addr) ? "Y" : "N");
4207 seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
4208 (priv->dma_cap.pcs) ? "Y" : "N");
4209 seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
4210 (priv->dma_cap.sma_mdio) ? "Y" : "N");
4211 seq_printf(seq, "\tPMT Remote wake up: %s\n",
4212 (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
4213 seq_printf(seq, "\tPMT Magic Frame: %s\n",
4214 (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
4215 seq_printf(seq, "\tRMON module: %s\n",
4216 (priv->dma_cap.rmon) ? "Y" : "N");
4217 seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
4218 (priv->dma_cap.time_stamp) ? "Y" : "N");
4219 seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
4220 (priv->dma_cap.atime_stamp) ? "Y" : "N");
4221 seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
4222 (priv->dma_cap.eee) ? "Y" : "N");
4223 seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
4224 seq_printf(seq, "\tChecksum Offload in TX: %s\n",
4225 (priv->dma_cap.tx_coe) ? "Y" : "N");
4226 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
4227 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
4228 (priv->dma_cap.rx_coe) ? "Y" : "N");
4230 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
4231 (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
4232 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
4233 (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
4235 seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
4236 (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
4237 seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
4238 priv->dma_cap.number_rx_channel);
4239 seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
4240 priv->dma_cap.number_tx_channel);
4241 seq_printf(seq, "\tEnhanced descriptors: %s\n",
4242 (priv->dma_cap.enh_desc) ? "Y" : "N");
4246 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
4248 static void stmmac_init_fs(struct net_device *dev)
4250 struct stmmac_priv *priv = netdev_priv(dev);
4252 /* Create per netdev entries */
4253 priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
4255 /* Entry to report DMA RX/TX rings */
4256 debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
4257 &stmmac_rings_status_fops);
4259 /* Entry to report the DMA HW features */
4260 debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
4261 &stmmac_dma_cap_fops);
4264 static void stmmac_exit_fs(struct net_device *dev)
4266 struct stmmac_priv *priv = netdev_priv(dev);
4268 debugfs_remove_recursive(priv->dbgfs_dir);
4270 #endif /* CONFIG_DEBUG_FS */
4272 static u32 stmmac_vid_crc32_le(__le16 vid_le)
4274 unsigned char *data = (unsigned char *)&vid_le;
4275 unsigned char data_byte = 0;
4280 bits = get_bitmask_order(VLAN_VID_MASK);
4281 for (i = 0; i < bits; i++) {
4283 data_byte = data[i / 8];
4285 temp = ((crc & 1) ^ data_byte) & 1;
4296 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
4303 for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
4304 __le16 vid_le = cpu_to_le16(vid);
4305 crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
4310 if (!priv->dma_cap.vlhash) {
4311 if (count > 2) /* VID = 0 always passes filter */
4314 pmatch = cpu_to_le16(vid);
4318 return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
4321 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
4323 struct stmmac_priv *priv = netdev_priv(ndev);
4324 bool is_double = false;
4327 if (be16_to_cpu(proto) == ETH_P_8021AD)
4330 set_bit(vid, priv->active_vlans);
4331 ret = stmmac_vlan_update(priv, is_double);
4333 clear_bit(vid, priv->active_vlans);
4340 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
4342 struct stmmac_priv *priv = netdev_priv(ndev);
4343 bool is_double = false;
4345 if (be16_to_cpu(proto) == ETH_P_8021AD)
4348 clear_bit(vid, priv->active_vlans);
4349 return stmmac_vlan_update(priv, is_double);
4352 static const struct net_device_ops stmmac_netdev_ops = {
4353 .ndo_open = stmmac_open,
4354 .ndo_start_xmit = stmmac_xmit,
4355 .ndo_stop = stmmac_release,
4356 .ndo_change_mtu = stmmac_change_mtu,
4357 .ndo_fix_features = stmmac_fix_features,
4358 .ndo_set_features = stmmac_set_features,
4359 .ndo_set_rx_mode = stmmac_set_rx_mode,
4360 .ndo_tx_timeout = stmmac_tx_timeout,
4361 .ndo_do_ioctl = stmmac_ioctl,
4362 .ndo_setup_tc = stmmac_setup_tc,
4363 .ndo_select_queue = stmmac_select_queue,
4364 #ifdef CONFIG_NET_POLL_CONTROLLER
4365 .ndo_poll_controller = stmmac_poll_controller,
4367 .ndo_set_mac_address = stmmac_set_mac_address,
4368 .ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
4369 .ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
4372 static void stmmac_reset_subtask(struct stmmac_priv *priv)
4374 if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
4376 if (test_bit(STMMAC_DOWN, &priv->state))
4379 netdev_err(priv->dev, "Reset adapter.\n");
4382 netif_trans_update(priv->dev);
4383 while (test_and_set_bit(STMMAC_RESETING, &priv->state))
4384 usleep_range(1000, 2000);
4386 set_bit(STMMAC_DOWN, &priv->state);
4387 dev_close(priv->dev);
4388 dev_open(priv->dev, NULL);
4389 clear_bit(STMMAC_DOWN, &priv->state);
4390 clear_bit(STMMAC_RESETING, &priv->state);
4394 static void stmmac_service_task(struct work_struct *work)
4396 struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
4399 stmmac_reset_subtask(priv);
4400 clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
4404 * stmmac_hw_init - Init the MAC device
4405 * @priv: driver private structure
4406 * Description: this function is to configure the MAC device according to
4407 * some platform parameters or the HW capability register. It prepares the
4408 * driver to use either ring or chain modes and to setup either enhanced or
4409 * normal descriptors.
4411 static int stmmac_hw_init(struct stmmac_priv *priv)
4415 /* dwmac-sun8i only work in chain mode */
4416 if (priv->plat->has_sun8i)
4418 priv->chain_mode = chain_mode;
4420 /* Initialize HW Interface */
4421 ret = stmmac_hwif_init(priv);
4425 /* Get the HW capability (new GMAC newer than 3.50a) */
4426 priv->hw_cap_support = stmmac_get_hw_features(priv);
4427 if (priv->hw_cap_support) {
4428 dev_info(priv->device, "DMA HW capability register supported\n");
4430 /* We can override some gmac/dma configuration fields: e.g.
4431 * enh_desc, tx_coe (e.g. that are passed through the
4432 * platform) with the values from the HW capability
4433 * register (if supported).
4435 priv->plat->enh_desc = priv->dma_cap.enh_desc;
4436 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
4437 priv->hw->pmt = priv->plat->pmt;
4438 if (priv->dma_cap.hash_tb_sz) {
4439 priv->hw->multicast_filter_bins =
4440 (BIT(priv->dma_cap.hash_tb_sz) << 5);
4441 priv->hw->mcast_bits_log2 =
4442 ilog2(priv->hw->multicast_filter_bins);
4445 /* TXCOE doesn't work in thresh DMA mode */
4446 if (priv->plat->force_thresh_dma_mode)
4447 priv->plat->tx_coe = 0;
4449 priv->plat->tx_coe = priv->dma_cap.tx_coe;
4451 /* In case of GMAC4 rx_coe is from HW cap register. */
4452 priv->plat->rx_coe = priv->dma_cap.rx_coe;
4454 if (priv->dma_cap.rx_coe_type2)
4455 priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
4456 else if (priv->dma_cap.rx_coe_type1)
4457 priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
4460 dev_info(priv->device, "No HW DMA feature register supported\n");
4463 if (priv->plat->rx_coe) {
4464 priv->hw->rx_csum = priv->plat->rx_coe;
4465 dev_info(priv->device, "RX Checksum Offload Engine supported\n");
4466 if (priv->synopsys_id < DWMAC_CORE_4_00)
4467 dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
4469 if (priv->plat->tx_coe)
4470 dev_info(priv->device, "TX Checksum insertion supported\n");
4472 if (priv->plat->pmt) {
4473 dev_info(priv->device, "Wake-Up On Lan supported\n");
4474 device_set_wakeup_capable(priv->device, 1);
4477 if (priv->dma_cap.tsoen)
4478 dev_info(priv->device, "TSO supported\n");
4480 /* Run HW quirks, if any */
4481 if (priv->hwif_quirks) {
4482 ret = priv->hwif_quirks(priv);
4487 /* Rx Watchdog is available in the COREs newer than the 3.40.
4488 * In some case, for example on bugged HW this feature
4489 * has to be disable and this can be done by passing the
4490 * riwt_off field from the platform.
4492 if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
4493 (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
4495 dev_info(priv->device,
4496 "Enable RX Mitigation via HW Watchdog Timer\n");
4504 * @device: device pointer
4505 * @plat_dat: platform data pointer
4506 * @res: stmmac resource pointer
4507 * Description: this is the main probe function used to
4508 * call the alloc_etherdev, allocate the priv structure.
4510 * returns 0 on success, otherwise errno.
4512 int stmmac_dvr_probe(struct device *device,
4513 struct plat_stmmacenet_data *plat_dat,
4514 struct stmmac_resources *res)
4516 struct net_device *ndev = NULL;
4517 struct stmmac_priv *priv;
4518 u32 queue, rxq, maxq;
4521 ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
4522 MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
4526 SET_NETDEV_DEV(ndev, device);
4528 priv = netdev_priv(ndev);
4529 priv->device = device;
4532 stmmac_set_ethtool_ops(ndev);
4533 priv->pause = pause;
4534 priv->plat = plat_dat;
4535 priv->ioaddr = res->addr;
4536 priv->dev->base_addr = (unsigned long)res->addr;
4538 priv->dev->irq = res->irq;
4539 priv->wol_irq = res->wol_irq;
4540 priv->lpi_irq = res->lpi_irq;
4542 if (!IS_ERR_OR_NULL(res->mac))
4543 memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
4545 dev_set_drvdata(device, priv->dev);
4547 /* Verify driver arguments */
4548 stmmac_verify_args();
4550 /* Allocate workqueue */
4551 priv->wq = create_singlethread_workqueue("stmmac_wq");
4553 dev_err(priv->device, "failed to create workqueue\n");
4557 INIT_WORK(&priv->service_task, stmmac_service_task);
4559 /* Override with kernel parameters if supplied XXX CRS XXX
4560 * this needs to have multiple instances
4562 if ((phyaddr >= 0) && (phyaddr <= 31))
4563 priv->plat->phy_addr = phyaddr;
4565 if (priv->plat->stmmac_rst) {
4566 ret = reset_control_assert(priv->plat->stmmac_rst);
4567 reset_control_deassert(priv->plat->stmmac_rst);
4568 /* Some reset controllers have only reset callback instead of
4569 * assert + deassert callbacks pair.
4571 if (ret == -ENOTSUPP)
4572 reset_control_reset(priv->plat->stmmac_rst);
4575 /* Init MAC and get the capabilities */
4576 ret = stmmac_hw_init(priv);
4580 stmmac_check_ether_addr(priv);
4582 /* Configure real RX and TX queues */
4583 netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
4584 netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
4586 ndev->netdev_ops = &stmmac_netdev_ops;
4588 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4591 ret = stmmac_tc_init(priv, priv);
4593 ndev->hw_features |= NETIF_F_HW_TC;
4596 if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
4597 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
4598 if (priv->plat->has_gmac4)
4599 ndev->hw_features |= NETIF_F_GSO_UDP_L4;
4601 dev_info(priv->device, "TSO feature enabled\n");
4604 if (priv->dma_cap.sphen) {
4605 ndev->hw_features |= NETIF_F_GRO;
4607 dev_info(priv->device, "SPH feature enabled\n");
4610 if (priv->dma_cap.addr64) {
4611 ret = dma_set_mask_and_coherent(device,
4612 DMA_BIT_MASK(priv->dma_cap.addr64));
4614 dev_info(priv->device, "Using %d bits DMA width\n",
4615 priv->dma_cap.addr64);
4618 * If more than 32 bits can be addressed, make sure to
4619 * enable enhanced addressing mode.
4621 if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
4622 priv->plat->dma_cfg->eame = true;
4624 ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
4626 dev_err(priv->device, "Failed to set DMA Mask\n");
4630 priv->dma_cap.addr64 = 32;
4634 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
4635 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
4636 #ifdef STMMAC_VLAN_TAG_USED
4637 /* Both mac100 and gmac support receive VLAN tag detection */
4638 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
4639 if (priv->dma_cap.vlhash) {
4640 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
4641 ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
4643 if (priv->dma_cap.vlins) {
4644 ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
4645 if (priv->dma_cap.dvlan)
4646 ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
4649 priv->msg_enable = netif_msg_init(debug, default_msg_level);
4651 /* Initialize RSS */
4652 rxq = priv->plat->rx_queues_to_use;
4653 netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
4654 for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
4655 priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
4657 if (priv->dma_cap.rssen && priv->plat->rss_en)
4658 ndev->features |= NETIF_F_RXHASH;
4660 /* MTU range: 46 - hw-specific max */
4661 ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
4662 if (priv->plat->has_xgmac)
4663 ndev->max_mtu = XGMAC_JUMBO_LEN;
4664 else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
4665 ndev->max_mtu = JUMBO_LEN;
4667 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
4668 /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
4669 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
4671 if ((priv->plat->maxmtu < ndev->max_mtu) &&
4672 (priv->plat->maxmtu >= ndev->min_mtu))
4673 ndev->max_mtu = priv->plat->maxmtu;
4674 else if (priv->plat->maxmtu < ndev->min_mtu)
4675 dev_warn(priv->device,
4676 "%s: warning: maxmtu having invalid value (%d)\n",
4677 __func__, priv->plat->maxmtu);
4680 priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
4682 /* Setup channels NAPI */
4683 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
4685 for (queue = 0; queue < maxq; queue++) {
4686 struct stmmac_channel *ch = &priv->channel[queue];
4688 ch->priv_data = priv;
4691 if (queue < priv->plat->rx_queues_to_use) {
4692 netif_napi_add(ndev, &ch->rx_napi, stmmac_napi_poll_rx,
4695 if (queue < priv->plat->tx_queues_to_use) {
4696 netif_tx_napi_add(ndev, &ch->tx_napi,
4697 stmmac_napi_poll_tx,
4702 mutex_init(&priv->lock);
4704 /* If a specific clk_csr value is passed from the platform
4705 * this means that the CSR Clock Range selection cannot be
4706 * changed at run-time and it is fixed. Viceversa the driver'll try to
4707 * set the MDC clock dynamically according to the csr actual
4710 if (priv->plat->clk_csr >= 0)
4711 priv->clk_csr = priv->plat->clk_csr;
4713 stmmac_clk_csr_set(priv);
4715 stmmac_check_pcs_mode(priv);
4717 if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4718 priv->hw->pcs != STMMAC_PCS_TBI &&
4719 priv->hw->pcs != STMMAC_PCS_RTBI) {
4720 /* MDIO bus Registration */
4721 ret = stmmac_mdio_register(ndev);
4723 dev_err(priv->device,
4724 "%s: MDIO bus (id: %d) registration failed",
4725 __func__, priv->plat->bus_id);
4726 goto error_mdio_register;
4730 ret = stmmac_phy_setup(priv);
4732 netdev_err(ndev, "failed to setup phy (%d)\n", ret);
4733 goto error_phy_setup;
4736 ret = register_netdev(ndev);
4738 dev_err(priv->device, "%s: ERROR %i registering the device\n",
4740 goto error_netdev_register;
4743 #ifdef CONFIG_DEBUG_FS
4744 stmmac_init_fs(ndev);
4749 error_netdev_register:
4750 phylink_destroy(priv->phylink);
4752 if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4753 priv->hw->pcs != STMMAC_PCS_TBI &&
4754 priv->hw->pcs != STMMAC_PCS_RTBI)
4755 stmmac_mdio_unregister(ndev);
4756 error_mdio_register:
4757 for (queue = 0; queue < maxq; queue++) {
4758 struct stmmac_channel *ch = &priv->channel[queue];
4760 if (queue < priv->plat->rx_queues_to_use)
4761 netif_napi_del(&ch->rx_napi);
4762 if (queue < priv->plat->tx_queues_to_use)
4763 netif_napi_del(&ch->tx_napi);
4766 destroy_workqueue(priv->wq);
4770 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
4774 * @dev: device pointer
4775 * Description: this function resets the TX/RX processes, disables the MAC RX/TX
4776 * changes the link status, releases the DMA descriptor rings.
4778 int stmmac_dvr_remove(struct device *dev)
4780 struct net_device *ndev = dev_get_drvdata(dev);
4781 struct stmmac_priv *priv = netdev_priv(ndev);
4783 netdev_info(priv->dev, "%s: removing driver", __func__);
4785 #ifdef CONFIG_DEBUG_FS
4786 stmmac_exit_fs(ndev);
4788 stmmac_stop_all_dma(priv);
4790 stmmac_mac_set(priv, priv->ioaddr, false);
4791 netif_carrier_off(ndev);
4792 unregister_netdev(ndev);
4793 phylink_destroy(priv->phylink);
4794 if (priv->plat->stmmac_rst)
4795 reset_control_assert(priv->plat->stmmac_rst);
4796 clk_disable_unprepare(priv->plat->pclk);
4797 clk_disable_unprepare(priv->plat->stmmac_clk);
4798 if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4799 priv->hw->pcs != STMMAC_PCS_TBI &&
4800 priv->hw->pcs != STMMAC_PCS_RTBI)
4801 stmmac_mdio_unregister(ndev);
4802 destroy_workqueue(priv->wq);
4803 mutex_destroy(&priv->lock);
4807 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
4810 * stmmac_suspend - suspend callback
4811 * @dev: device pointer
4812 * Description: this is the function to suspend the device and it is called
4813 * by the platform driver to stop the network queue, release the resources,
4814 * program the PMT register (for WoL), clean and release driver resources.
4816 int stmmac_suspend(struct device *dev)
4818 struct net_device *ndev = dev_get_drvdata(dev);
4819 struct stmmac_priv *priv = netdev_priv(ndev);
4821 if (!ndev || !netif_running(ndev))
4824 phylink_mac_change(priv->phylink, false);
4826 mutex_lock(&priv->lock);
4828 netif_device_detach(ndev);
4829 stmmac_stop_all_queues(priv);
4831 stmmac_disable_all_queues(priv);
4833 /* Stop TX/RX DMA */
4834 stmmac_stop_all_dma(priv);
4836 /* Enable Power down mode by programming the PMT regs */
4837 if (device_may_wakeup(priv->device)) {
4838 stmmac_pmt(priv, priv->hw, priv->wolopts);
4841 mutex_unlock(&priv->lock);
4843 phylink_stop(priv->phylink);
4845 mutex_lock(&priv->lock);
4847 stmmac_mac_set(priv, priv->ioaddr, false);
4848 pinctrl_pm_select_sleep_state(priv->device);
4849 /* Disable clock in case of PWM is off */
4850 if (priv->plat->clk_ptp_ref)
4851 clk_disable_unprepare(priv->plat->clk_ptp_ref);
4852 clk_disable_unprepare(priv->plat->pclk);
4853 clk_disable_unprepare(priv->plat->stmmac_clk);
4855 mutex_unlock(&priv->lock);
4857 priv->speed = SPEED_UNKNOWN;
4860 EXPORT_SYMBOL_GPL(stmmac_suspend);
4863 * stmmac_reset_queues_param - reset queue parameters
4864 * @dev: device pointer
4866 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
4868 u32 rx_cnt = priv->plat->rx_queues_to_use;
4869 u32 tx_cnt = priv->plat->tx_queues_to_use;
4872 for (queue = 0; queue < rx_cnt; queue++) {
4873 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4879 for (queue = 0; queue < tx_cnt; queue++) {
4880 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4889 * stmmac_resume - resume callback
4890 * @dev: device pointer
4891 * Description: when resume this function is invoked to setup the DMA and CORE
4892 * in a usable state.
4894 int stmmac_resume(struct device *dev)
4896 struct net_device *ndev = dev_get_drvdata(dev);
4897 struct stmmac_priv *priv = netdev_priv(ndev);
4899 if (!netif_running(ndev))
4902 /* Power Down bit, into the PM register, is cleared
4903 * automatically as soon as a magic packet or a Wake-up frame
4904 * is received. Anyway, it's better to manually clear
4905 * this bit because it can generate problems while resuming
4906 * from another devices (e.g. serial console).
4908 if (device_may_wakeup(priv->device)) {
4909 mutex_lock(&priv->lock);
4910 stmmac_pmt(priv, priv->hw, 0);
4911 mutex_unlock(&priv->lock);
4914 pinctrl_pm_select_default_state(priv->device);
4915 /* enable the clk previously disabled */
4916 clk_prepare_enable(priv->plat->stmmac_clk);
4917 clk_prepare_enable(priv->plat->pclk);
4918 if (priv->plat->clk_ptp_ref)
4919 clk_prepare_enable(priv->plat->clk_ptp_ref);
4920 /* reset the phy so that it's ready */
4922 stmmac_mdio_reset(priv->mii);
4925 netif_device_attach(ndev);
4927 mutex_lock(&priv->lock);
4929 stmmac_reset_queues_param(priv);
4931 stmmac_clear_descriptors(priv);
4933 stmmac_hw_setup(ndev, false);
4934 stmmac_init_coalesce(priv);
4935 stmmac_set_rx_mode(ndev);
4937 stmmac_enable_all_queues(priv);
4939 stmmac_start_all_queues(priv);
4941 mutex_unlock(&priv->lock);
4943 if (!device_may_wakeup(priv->device)) {
4945 phylink_start(priv->phylink);
4949 phylink_mac_change(priv->phylink, true);
4953 EXPORT_SYMBOL_GPL(stmmac_resume);
4956 static int __init stmmac_cmdline_opt(char *str)
4962 while ((opt = strsep(&str, ",")) != NULL) {
4963 if (!strncmp(opt, "debug:", 6)) {
4964 if (kstrtoint(opt + 6, 0, &debug))
4966 } else if (!strncmp(opt, "phyaddr:", 8)) {
4967 if (kstrtoint(opt + 8, 0, &phyaddr))
4969 } else if (!strncmp(opt, "buf_sz:", 7)) {
4970 if (kstrtoint(opt + 7, 0, &buf_sz))
4972 } else if (!strncmp(opt, "tc:", 3)) {
4973 if (kstrtoint(opt + 3, 0, &tc))
4975 } else if (!strncmp(opt, "watchdog:", 9)) {
4976 if (kstrtoint(opt + 9, 0, &watchdog))
4978 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
4979 if (kstrtoint(opt + 10, 0, &flow_ctrl))
4981 } else if (!strncmp(opt, "pause:", 6)) {
4982 if (kstrtoint(opt + 6, 0, &pause))
4984 } else if (!strncmp(opt, "eee_timer:", 10)) {
4985 if (kstrtoint(opt + 10, 0, &eee_timer))
4987 } else if (!strncmp(opt, "chain_mode:", 11)) {
4988 if (kstrtoint(opt + 11, 0, &chain_mode))
4995 pr_err("%s: ERROR broken module parameter conversion", __func__);
4999 __setup("stmmaceth=", stmmac_cmdline_opt);
5002 static int __init stmmac_init(void)
5004 #ifdef CONFIG_DEBUG_FS
5005 /* Create debugfs main directory if it doesn't exist yet */
5007 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
5013 static void __exit stmmac_exit(void)
5015 #ifdef CONFIG_DEBUG_FS
5016 debugfs_remove_recursive(stmmac_fs_dir);
5020 module_init(stmmac_init)
5021 module_exit(stmmac_exit)
5023 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
5024 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
5025 MODULE_LICENSE("GPL");