OSDN Git Service

net: stmmac: Disable ACS Feature for GMAC >= 4
[uclinux-h8/linux.git] / drivers / net / ethernet / stmicro / stmmac / stmmac_main.c
1 /*******************************************************************************
2   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
3   ST Ethernet IPs are built around a Synopsys IP Core.
4
5         Copyright(C) 2007-2011 STMicroelectronics Ltd
6
7   This program is free software; you can redistribute it and/or modify it
8   under the terms and conditions of the GNU General Public License,
9   version 2, as published by the Free Software Foundation.
10
11   This program is distributed in the hope it will be useful, but WITHOUT
12   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14   more details.
15
16   The full GNU General Public License is included in this distribution in
17   the file called "COPYING".
18
19   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
20
21   Documentation available at:
22         http://www.stlinux.com
23   Support available at:
24         https://bugzilla.stlinux.com/
25 *******************************************************************************/
26
27 #include <linux/clk.h>
28 #include <linux/kernel.h>
29 #include <linux/interrupt.h>
30 #include <linux/ip.h>
31 #include <linux/tcp.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/if_ether.h>
35 #include <linux/crc32.h>
36 #include <linux/mii.h>
37 #include <linux/if.h>
38 #include <linux/if_vlan.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/slab.h>
41 #include <linux/prefetch.h>
42 #include <linux/pinctrl/consumer.h>
43 #ifdef CONFIG_DEBUG_FS
44 #include <linux/debugfs.h>
45 #include <linux/seq_file.h>
46 #endif /* CONFIG_DEBUG_FS */
47 #include <linux/net_tstamp.h>
48 #include "stmmac_ptp.h"
49 #include "stmmac.h"
50 #include <linux/reset.h>
51 #include <linux/of_mdio.h>
52 #include "dwmac1000.h"
53
54 #define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x)
55 #define TSO_MAX_BUFF_SIZE       (SZ_16K - 1)
56
57 /* Module parameters */
58 #define TX_TIMEO        5000
59 static int watchdog = TX_TIMEO;
60 module_param(watchdog, int, 0644);
61 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
62
63 static int debug = -1;
64 module_param(debug, int, 0644);
65 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
66
67 static int phyaddr = -1;
68 module_param(phyaddr, int, 0444);
69 MODULE_PARM_DESC(phyaddr, "Physical device address");
70
71 #define STMMAC_TX_THRESH        (DMA_TX_SIZE / 4)
72 #define STMMAC_RX_THRESH        (DMA_RX_SIZE / 4)
73
74 static int flow_ctrl = FLOW_OFF;
75 module_param(flow_ctrl, int, 0644);
76 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
77
78 static int pause = PAUSE_TIME;
79 module_param(pause, int, 0644);
80 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
81
82 #define TC_DEFAULT 64
83 static int tc = TC_DEFAULT;
84 module_param(tc, int, 0644);
85 MODULE_PARM_DESC(tc, "DMA threshold control value");
86
87 #define DEFAULT_BUFSIZE 1536
88 static int buf_sz = DEFAULT_BUFSIZE;
89 module_param(buf_sz, int, 0644);
90 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
91
92 #define STMMAC_RX_COPYBREAK     256
93
94 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
95                                       NETIF_MSG_LINK | NETIF_MSG_IFUP |
96                                       NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
97
98 #define STMMAC_DEFAULT_LPI_TIMER        1000
99 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
100 module_param(eee_timer, int, 0644);
101 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
102 #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
103
104 /* By default the driver will use the ring mode to manage tx and rx descriptors,
105  * but allow user to force to use the chain instead of the ring
106  */
107 static unsigned int chain_mode;
108 module_param(chain_mode, int, 0444);
109 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
110
111 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
112
113 #ifdef CONFIG_DEBUG_FS
114 static int stmmac_init_fs(struct net_device *dev);
115 static void stmmac_exit_fs(struct net_device *dev);
116 #endif
117
118 #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
119
120 /**
121  * stmmac_verify_args - verify the driver parameters.
122  * Description: it checks the driver parameters and set a default in case of
123  * errors.
124  */
125 static void stmmac_verify_args(void)
126 {
127         if (unlikely(watchdog < 0))
128                 watchdog = TX_TIMEO;
129         if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
130                 buf_sz = DEFAULT_BUFSIZE;
131         if (unlikely(flow_ctrl > 1))
132                 flow_ctrl = FLOW_AUTO;
133         else if (likely(flow_ctrl < 0))
134                 flow_ctrl = FLOW_OFF;
135         if (unlikely((pause < 0) || (pause > 0xffff)))
136                 pause = PAUSE_TIME;
137         if (eee_timer < 0)
138                 eee_timer = STMMAC_DEFAULT_LPI_TIMER;
139 }
140
141 /**
142  * stmmac_disable_all_queues - Disable all queues
143  * @priv: driver private structure
144  */
145 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
146 {
147         u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
148         u32 queue;
149
150         for (queue = 0; queue < rx_queues_cnt; queue++) {
151                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
152
153                 napi_disable(&rx_q->napi);
154         }
155 }
156
157 /**
158  * stmmac_enable_all_queues - Enable all queues
159  * @priv: driver private structure
160  */
161 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
162 {
163         u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
164         u32 queue;
165
166         for (queue = 0; queue < rx_queues_cnt; queue++) {
167                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
168
169                 napi_enable(&rx_q->napi);
170         }
171 }
172
173 /**
174  * stmmac_stop_all_queues - Stop all queues
175  * @priv: driver private structure
176  */
177 static void stmmac_stop_all_queues(struct stmmac_priv *priv)
178 {
179         u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
180         u32 queue;
181
182         for (queue = 0; queue < tx_queues_cnt; queue++)
183                 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
184 }
185
186 /**
187  * stmmac_start_all_queues - Start all queues
188  * @priv: driver private structure
189  */
190 static void stmmac_start_all_queues(struct stmmac_priv *priv)
191 {
192         u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
193         u32 queue;
194
195         for (queue = 0; queue < tx_queues_cnt; queue++)
196                 netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
197 }
198
199 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
200 {
201         if (!test_bit(STMMAC_DOWN, &priv->state) &&
202             !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
203                 queue_work(priv->wq, &priv->service_task);
204 }
205
206 static void stmmac_global_err(struct stmmac_priv *priv)
207 {
208         netif_carrier_off(priv->dev);
209         set_bit(STMMAC_RESET_REQUESTED, &priv->state);
210         stmmac_service_event_schedule(priv);
211 }
212
213 /**
214  * stmmac_clk_csr_set - dynamically set the MDC clock
215  * @priv: driver private structure
216  * Description: this is to dynamically set the MDC clock according to the csr
217  * clock input.
218  * Note:
219  *      If a specific clk_csr value is passed from the platform
220  *      this means that the CSR Clock Range selection cannot be
221  *      changed at run-time and it is fixed (as reported in the driver
222  *      documentation). Viceversa the driver will try to set the MDC
223  *      clock dynamically according to the actual clock input.
224  */
225 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
226 {
227         u32 clk_rate;
228
229         clk_rate = clk_get_rate(priv->plat->stmmac_clk);
230
231         /* Platform provided default clk_csr would be assumed valid
232          * for all other cases except for the below mentioned ones.
233          * For values higher than the IEEE 802.3 specified frequency
234          * we can not estimate the proper divider as it is not known
235          * the frequency of clk_csr_i. So we do not change the default
236          * divider.
237          */
238         if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
239                 if (clk_rate < CSR_F_35M)
240                         priv->clk_csr = STMMAC_CSR_20_35M;
241                 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
242                         priv->clk_csr = STMMAC_CSR_35_60M;
243                 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
244                         priv->clk_csr = STMMAC_CSR_60_100M;
245                 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
246                         priv->clk_csr = STMMAC_CSR_100_150M;
247                 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
248                         priv->clk_csr = STMMAC_CSR_150_250M;
249                 else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
250                         priv->clk_csr = STMMAC_CSR_250_300M;
251         }
252
253         if (priv->plat->has_sun8i) {
254                 if (clk_rate > 160000000)
255                         priv->clk_csr = 0x03;
256                 else if (clk_rate > 80000000)
257                         priv->clk_csr = 0x02;
258                 else if (clk_rate > 40000000)
259                         priv->clk_csr = 0x01;
260                 else
261                         priv->clk_csr = 0;
262         }
263 }
264
265 static void print_pkt(unsigned char *buf, int len)
266 {
267         pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
268         print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
269 }
270
271 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
272 {
273         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
274         u32 avail;
275
276         if (tx_q->dirty_tx > tx_q->cur_tx)
277                 avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
278         else
279                 avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
280
281         return avail;
282 }
283
284 /**
285  * stmmac_rx_dirty - Get RX queue dirty
286  * @priv: driver private structure
287  * @queue: RX queue index
288  */
289 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
290 {
291         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
292         u32 dirty;
293
294         if (rx_q->dirty_rx <= rx_q->cur_rx)
295                 dirty = rx_q->cur_rx - rx_q->dirty_rx;
296         else
297                 dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
298
299         return dirty;
300 }
301
302 /**
303  * stmmac_hw_fix_mac_speed - callback for speed selection
304  * @priv: driver private structure
305  * Description: on some platforms (e.g. ST), some HW system configuration
306  * registers have to be set according to the link speed negotiated.
307  */
308 static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
309 {
310         struct net_device *ndev = priv->dev;
311         struct phy_device *phydev = ndev->phydev;
312
313         if (likely(priv->plat->fix_mac_speed))
314                 priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
315 }
316
317 /**
318  * stmmac_enable_eee_mode - check and enter in LPI mode
319  * @priv: driver private structure
320  * Description: this function is to verify and enter in LPI mode in case of
321  * EEE.
322  */
323 static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
324 {
325         u32 tx_cnt = priv->plat->tx_queues_to_use;
326         u32 queue;
327
328         /* check if all TX queues have the work finished */
329         for (queue = 0; queue < tx_cnt; queue++) {
330                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
331
332                 if (tx_q->dirty_tx != tx_q->cur_tx)
333                         return; /* still unfinished work */
334         }
335
336         /* Check and enter in LPI mode */
337         if (!priv->tx_path_in_lpi_mode)
338                 priv->hw->mac->set_eee_mode(priv->hw,
339                                             priv->plat->en_tx_lpi_clockgating);
340 }
341
342 /**
343  * stmmac_disable_eee_mode - disable and exit from LPI mode
344  * @priv: driver private structure
345  * Description: this function is to exit and disable EEE in case of
346  * LPI state is true. This is called by the xmit.
347  */
348 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
349 {
350         priv->hw->mac->reset_eee_mode(priv->hw);
351         del_timer_sync(&priv->eee_ctrl_timer);
352         priv->tx_path_in_lpi_mode = false;
353 }
354
355 /**
356  * stmmac_eee_ctrl_timer - EEE TX SW timer.
357  * @arg : data hook
358  * Description:
359  *  if there is no data transfer and if we are not in LPI state,
360  *  then MAC Transmitter can be moved to LPI state.
361  */
362 static void stmmac_eee_ctrl_timer(struct timer_list *t)
363 {
364         struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
365
366         stmmac_enable_eee_mode(priv);
367         mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
368 }
369
370 /**
371  * stmmac_eee_init - init EEE
372  * @priv: driver private structure
373  * Description:
374  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
375  *  can also manage EEE, this function enable the LPI state and start related
376  *  timer.
377  */
378 bool stmmac_eee_init(struct stmmac_priv *priv)
379 {
380         struct net_device *ndev = priv->dev;
381         int interface = priv->plat->interface;
382         unsigned long flags;
383         bool ret = false;
384
385         if ((interface != PHY_INTERFACE_MODE_MII) &&
386             (interface != PHY_INTERFACE_MODE_GMII) &&
387             !phy_interface_mode_is_rgmii(interface))
388                 goto out;
389
390         /* Using PCS we cannot dial with the phy registers at this stage
391          * so we do not support extra feature like EEE.
392          */
393         if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
394             (priv->hw->pcs == STMMAC_PCS_TBI) ||
395             (priv->hw->pcs == STMMAC_PCS_RTBI))
396                 goto out;
397
398         /* MAC core supports the EEE feature. */
399         if (priv->dma_cap.eee) {
400                 int tx_lpi_timer = priv->tx_lpi_timer;
401
402                 /* Check if the PHY supports EEE */
403                 if (phy_init_eee(ndev->phydev, 1)) {
404                         /* To manage at run-time if the EEE cannot be supported
405                          * anymore (for example because the lp caps have been
406                          * changed).
407                          * In that case the driver disable own timers.
408                          */
409                         spin_lock_irqsave(&priv->lock, flags);
410                         if (priv->eee_active) {
411                                 netdev_dbg(priv->dev, "disable EEE\n");
412                                 del_timer_sync(&priv->eee_ctrl_timer);
413                                 priv->hw->mac->set_eee_timer(priv->hw, 0,
414                                                              tx_lpi_timer);
415                         }
416                         priv->eee_active = 0;
417                         spin_unlock_irqrestore(&priv->lock, flags);
418                         goto out;
419                 }
420                 /* Activate the EEE and start timers */
421                 spin_lock_irqsave(&priv->lock, flags);
422                 if (!priv->eee_active) {
423                         priv->eee_active = 1;
424                         timer_setup(&priv->eee_ctrl_timer,
425                                     stmmac_eee_ctrl_timer, 0);
426                         mod_timer(&priv->eee_ctrl_timer,
427                                   STMMAC_LPI_T(eee_timer));
428
429                         priv->hw->mac->set_eee_timer(priv->hw,
430                                                      STMMAC_DEFAULT_LIT_LS,
431                                                      tx_lpi_timer);
432                 }
433                 /* Set HW EEE according to the speed */
434                 priv->hw->mac->set_eee_pls(priv->hw, ndev->phydev->link);
435
436                 ret = true;
437                 spin_unlock_irqrestore(&priv->lock, flags);
438
439                 netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
440         }
441 out:
442         return ret;
443 }
444
445 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
446  * @priv: driver private structure
447  * @p : descriptor pointer
448  * @skb : the socket buffer
449  * Description :
450  * This function will read timestamp from the descriptor & pass it to stack.
451  * and also perform some sanity checks.
452  */
453 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
454                                    struct dma_desc *p, struct sk_buff *skb)
455 {
456         struct skb_shared_hwtstamps shhwtstamp;
457         u64 ns;
458
459         if (!priv->hwts_tx_en)
460                 return;
461
462         /* exit if skb doesn't support hw tstamp */
463         if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
464                 return;
465
466         /* check tx tstamp status */
467         if (priv->hw->desc->get_tx_timestamp_status(p)) {
468                 /* get the valid tstamp */
469                 ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
470
471                 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
472                 shhwtstamp.hwtstamp = ns_to_ktime(ns);
473
474                 netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
475                 /* pass tstamp to stack */
476                 skb_tstamp_tx(skb, &shhwtstamp);
477         }
478
479         return;
480 }
481
482 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
483  * @priv: driver private structure
484  * @p : descriptor pointer
485  * @np : next descriptor pointer
486  * @skb : the socket buffer
487  * Description :
488  * This function will read received packet's timestamp from the descriptor
489  * and pass it to stack. It also perform some sanity checks.
490  */
491 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
492                                    struct dma_desc *np, struct sk_buff *skb)
493 {
494         struct skb_shared_hwtstamps *shhwtstamp = NULL;
495         struct dma_desc *desc = p;
496         u64 ns;
497
498         if (!priv->hwts_rx_en)
499                 return;
500         /* For GMAC4, the valid timestamp is from CTX next desc. */
501         if (priv->plat->has_gmac4)
502                 desc = np;
503
504         /* Check if timestamp is available */
505         if (priv->hw->desc->get_rx_timestamp_status(p, np, priv->adv_ts)) {
506                 ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts);
507                 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
508                 shhwtstamp = skb_hwtstamps(skb);
509                 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
510                 shhwtstamp->hwtstamp = ns_to_ktime(ns);
511         } else  {
512                 netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
513         }
514 }
515
516 /**
517  *  stmmac_hwtstamp_ioctl - control hardware timestamping.
518  *  @dev: device pointer.
519  *  @ifr: An IOCTL specific structure, that can contain a pointer to
520  *  a proprietary structure used to pass information to the driver.
521  *  Description:
522  *  This function configures the MAC to enable/disable both outgoing(TX)
523  *  and incoming(RX) packets time stamping based on user input.
524  *  Return Value:
525  *  0 on success and an appropriate -ve integer on failure.
526  */
527 static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
528 {
529         struct stmmac_priv *priv = netdev_priv(dev);
530         struct hwtstamp_config config;
531         struct timespec64 now;
532         u64 temp = 0;
533         u32 ptp_v2 = 0;
534         u32 tstamp_all = 0;
535         u32 ptp_over_ipv4_udp = 0;
536         u32 ptp_over_ipv6_udp = 0;
537         u32 ptp_over_ethernet = 0;
538         u32 snap_type_sel = 0;
539         u32 ts_master_en = 0;
540         u32 ts_event_en = 0;
541         u32 value = 0;
542         u32 sec_inc;
543
544         if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
545                 netdev_alert(priv->dev, "No support for HW time stamping\n");
546                 priv->hwts_tx_en = 0;
547                 priv->hwts_rx_en = 0;
548
549                 return -EOPNOTSUPP;
550         }
551
552         if (copy_from_user(&config, ifr->ifr_data,
553                            sizeof(struct hwtstamp_config)))
554                 return -EFAULT;
555
556         netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
557                    __func__, config.flags, config.tx_type, config.rx_filter);
558
559         /* reserved for future extensions */
560         if (config.flags)
561                 return -EINVAL;
562
563         if (config.tx_type != HWTSTAMP_TX_OFF &&
564             config.tx_type != HWTSTAMP_TX_ON)
565                 return -ERANGE;
566
567         if (priv->adv_ts) {
568                 switch (config.rx_filter) {
569                 case HWTSTAMP_FILTER_NONE:
570                         /* time stamp no incoming packet at all */
571                         config.rx_filter = HWTSTAMP_FILTER_NONE;
572                         break;
573
574                 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
575                         /* PTP v1, UDP, any kind of event packet */
576                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
577                         /* take time stamp for all event messages */
578                         if (priv->plat->has_gmac4)
579                                 snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
580                         else
581                                 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
582
583                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
584                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
585                         break;
586
587                 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
588                         /* PTP v1, UDP, Sync packet */
589                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
590                         /* take time stamp for SYNC messages only */
591                         ts_event_en = PTP_TCR_TSEVNTENA;
592
593                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
594                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
595                         break;
596
597                 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
598                         /* PTP v1, UDP, Delay_req packet */
599                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
600                         /* take time stamp for Delay_Req messages only */
601                         ts_master_en = PTP_TCR_TSMSTRENA;
602                         ts_event_en = PTP_TCR_TSEVNTENA;
603
604                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
605                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
606                         break;
607
608                 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
609                         /* PTP v2, UDP, any kind of event packet */
610                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
611                         ptp_v2 = PTP_TCR_TSVER2ENA;
612                         /* take time stamp for all event messages */
613                         if (priv->plat->has_gmac4)
614                                 snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
615                         else
616                                 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
617
618                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
619                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
620                         break;
621
622                 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
623                         /* PTP v2, UDP, Sync packet */
624                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
625                         ptp_v2 = PTP_TCR_TSVER2ENA;
626                         /* take time stamp for SYNC messages only */
627                         ts_event_en = PTP_TCR_TSEVNTENA;
628
629                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
630                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
631                         break;
632
633                 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
634                         /* PTP v2, UDP, Delay_req packet */
635                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
636                         ptp_v2 = PTP_TCR_TSVER2ENA;
637                         /* take time stamp for Delay_Req messages only */
638                         ts_master_en = PTP_TCR_TSMSTRENA;
639                         ts_event_en = PTP_TCR_TSEVNTENA;
640
641                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
642                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
643                         break;
644
645                 case HWTSTAMP_FILTER_PTP_V2_EVENT:
646                         /* PTP v2/802.AS1 any layer, any kind of event packet */
647                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
648                         ptp_v2 = PTP_TCR_TSVER2ENA;
649                         /* take time stamp for all event messages */
650                         if (priv->plat->has_gmac4)
651                                 snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
652                         else
653                                 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
654
655                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
656                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
657                         ptp_over_ethernet = PTP_TCR_TSIPENA;
658                         break;
659
660                 case HWTSTAMP_FILTER_PTP_V2_SYNC:
661                         /* PTP v2/802.AS1, any layer, Sync packet */
662                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
663                         ptp_v2 = PTP_TCR_TSVER2ENA;
664                         /* take time stamp for SYNC messages only */
665                         ts_event_en = PTP_TCR_TSEVNTENA;
666
667                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
668                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
669                         ptp_over_ethernet = PTP_TCR_TSIPENA;
670                         break;
671
672                 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
673                         /* PTP v2/802.AS1, any layer, Delay_req packet */
674                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
675                         ptp_v2 = PTP_TCR_TSVER2ENA;
676                         /* take time stamp for Delay_Req messages only */
677                         ts_master_en = PTP_TCR_TSMSTRENA;
678                         ts_event_en = PTP_TCR_TSEVNTENA;
679
680                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
681                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
682                         ptp_over_ethernet = PTP_TCR_TSIPENA;
683                         break;
684
685                 case HWTSTAMP_FILTER_NTP_ALL:
686                 case HWTSTAMP_FILTER_ALL:
687                         /* time stamp any incoming packet */
688                         config.rx_filter = HWTSTAMP_FILTER_ALL;
689                         tstamp_all = PTP_TCR_TSENALL;
690                         break;
691
692                 default:
693                         return -ERANGE;
694                 }
695         } else {
696                 switch (config.rx_filter) {
697                 case HWTSTAMP_FILTER_NONE:
698                         config.rx_filter = HWTSTAMP_FILTER_NONE;
699                         break;
700                 default:
701                         /* PTP v1, UDP, any kind of event packet */
702                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
703                         break;
704                 }
705         }
706         priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
707         priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
708
709         if (!priv->hwts_tx_en && !priv->hwts_rx_en)
710                 priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, 0);
711         else {
712                 value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
713                          tstamp_all | ptp_v2 | ptp_over_ethernet |
714                          ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
715                          ts_master_en | snap_type_sel);
716                 priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, value);
717
718                 /* program Sub Second Increment reg */
719                 sec_inc = priv->hw->ptp->config_sub_second_increment(
720                         priv->ptpaddr, priv->plat->clk_ptp_rate,
721                         priv->plat->has_gmac4);
722                 temp = div_u64(1000000000ULL, sec_inc);
723
724                 /* calculate default added value:
725                  * formula is :
726                  * addend = (2^32)/freq_div_ratio;
727                  * where, freq_div_ratio = 1e9ns/sec_inc
728                  */
729                 temp = (u64)(temp << 32);
730                 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
731                 priv->hw->ptp->config_addend(priv->ptpaddr,
732                                              priv->default_addend);
733
734                 /* initialize system time */
735                 ktime_get_real_ts64(&now);
736
737                 /* lower 32 bits of tv_sec are safe until y2106 */
738                 priv->hw->ptp->init_systime(priv->ptpaddr, (u32)now.tv_sec,
739                                             now.tv_nsec);
740         }
741
742         return copy_to_user(ifr->ifr_data, &config,
743                             sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
744 }
745
746 /**
747  * stmmac_init_ptp - init PTP
748  * @priv: driver private structure
749  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
750  * This is done by looking at the HW cap. register.
751  * This function also registers the ptp driver.
752  */
753 static int stmmac_init_ptp(struct stmmac_priv *priv)
754 {
755         if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
756                 return -EOPNOTSUPP;
757
758         priv->adv_ts = 0;
759         /* Check if adv_ts can be enabled for dwmac 4.x core */
760         if (priv->plat->has_gmac4 && priv->dma_cap.atime_stamp)
761                 priv->adv_ts = 1;
762         /* Dwmac 3.x core with extend_desc can support adv_ts */
763         else if (priv->extend_desc && priv->dma_cap.atime_stamp)
764                 priv->adv_ts = 1;
765
766         if (priv->dma_cap.time_stamp)
767                 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
768
769         if (priv->adv_ts)
770                 netdev_info(priv->dev,
771                             "IEEE 1588-2008 Advanced Timestamp supported\n");
772
773         priv->hw->ptp = &stmmac_ptp;
774         priv->hwts_tx_en = 0;
775         priv->hwts_rx_en = 0;
776
777         stmmac_ptp_register(priv);
778
779         return 0;
780 }
781
782 static void stmmac_release_ptp(struct stmmac_priv *priv)
783 {
784         if (priv->plat->clk_ptp_ref)
785                 clk_disable_unprepare(priv->plat->clk_ptp_ref);
786         stmmac_ptp_unregister(priv);
787 }
788
789 /**
790  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
791  *  @priv: driver private structure
792  *  Description: It is used for configuring the flow control in all queues
793  */
794 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
795 {
796         u32 tx_cnt = priv->plat->tx_queues_to_use;
797
798         priv->hw->mac->flow_ctrl(priv->hw, duplex, priv->flow_ctrl,
799                                  priv->pause, tx_cnt);
800 }
801
802 /**
803  * stmmac_adjust_link - adjusts the link parameters
804  * @dev: net device structure
805  * Description: this is the helper called by the physical abstraction layer
806  * drivers to communicate the phy link status. According the speed and duplex
807  * this driver can invoke registered glue-logic as well.
808  * It also invoke the eee initialization because it could happen when switch
809  * on different networks (that are eee capable).
810  */
811 static void stmmac_adjust_link(struct net_device *dev)
812 {
813         struct stmmac_priv *priv = netdev_priv(dev);
814         struct phy_device *phydev = dev->phydev;
815         unsigned long flags;
816         bool new_state = false;
817
818         if (!phydev)
819                 return;
820
821         spin_lock_irqsave(&priv->lock, flags);
822
823         if (phydev->link) {
824                 u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
825
826                 /* Now we make sure that we can be in full duplex mode.
827                  * If not, we operate in half-duplex mode. */
828                 if (phydev->duplex != priv->oldduplex) {
829                         new_state = true;
830                         if (!phydev->duplex)
831                                 ctrl &= ~priv->hw->link.duplex;
832                         else
833                                 ctrl |= priv->hw->link.duplex;
834                         priv->oldduplex = phydev->duplex;
835                 }
836                 /* Flow Control operation */
837                 if (phydev->pause)
838                         stmmac_mac_flow_ctrl(priv, phydev->duplex);
839
840                 if (phydev->speed != priv->speed) {
841                         new_state = true;
842                         ctrl &= ~priv->hw->link.speed_mask;
843                         switch (phydev->speed) {
844                         case SPEED_1000:
845                                 ctrl |= priv->hw->link.speed1000;
846                                 break;
847                         case SPEED_100:
848                                 ctrl |= priv->hw->link.speed100;
849                                 break;
850                         case SPEED_10:
851                                 ctrl |= priv->hw->link.speed10;
852                                 break;
853                         default:
854                                 netif_warn(priv, link, priv->dev,
855                                            "broken speed: %d\n", phydev->speed);
856                                 phydev->speed = SPEED_UNKNOWN;
857                                 break;
858                         }
859                         if (phydev->speed != SPEED_UNKNOWN)
860                                 stmmac_hw_fix_mac_speed(priv);
861                         priv->speed = phydev->speed;
862                 }
863
864                 writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
865
866                 if (!priv->oldlink) {
867                         new_state = true;
868                         priv->oldlink = true;
869                 }
870         } else if (priv->oldlink) {
871                 new_state = true;
872                 priv->oldlink = false;
873                 priv->speed = SPEED_UNKNOWN;
874                 priv->oldduplex = DUPLEX_UNKNOWN;
875         }
876
877         if (new_state && netif_msg_link(priv))
878                 phy_print_status(phydev);
879
880         spin_unlock_irqrestore(&priv->lock, flags);
881
882         if (phydev->is_pseudo_fixed_link)
883                 /* Stop PHY layer to call the hook to adjust the link in case
884                  * of a switch is attached to the stmmac driver.
885                  */
886                 phydev->irq = PHY_IGNORE_INTERRUPT;
887         else
888                 /* At this stage, init the EEE if supported.
889                  * Never called in case of fixed_link.
890                  */
891                 priv->eee_enabled = stmmac_eee_init(priv);
892 }
893
894 /**
895  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
896  * @priv: driver private structure
897  * Description: this is to verify if the HW supports the PCS.
898  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
899  * configured for the TBI, RTBI, or SGMII PHY interface.
900  */
901 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
902 {
903         int interface = priv->plat->interface;
904
905         if (priv->dma_cap.pcs) {
906                 if ((interface == PHY_INTERFACE_MODE_RGMII) ||
907                     (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
908                     (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
909                     (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
910                         netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
911                         priv->hw->pcs = STMMAC_PCS_RGMII;
912                 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
913                         netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
914                         priv->hw->pcs = STMMAC_PCS_SGMII;
915                 }
916         }
917 }
918
919 /**
920  * stmmac_init_phy - PHY initialization
921  * @dev: net device structure
922  * Description: it initializes the driver's PHY state, and attaches the PHY
923  * to the mac driver.
924  *  Return value:
925  *  0 on success
926  */
927 static int stmmac_init_phy(struct net_device *dev)
928 {
929         struct stmmac_priv *priv = netdev_priv(dev);
930         struct phy_device *phydev;
931         char phy_id_fmt[MII_BUS_ID_SIZE + 3];
932         char bus_id[MII_BUS_ID_SIZE];
933         int interface = priv->plat->interface;
934         int max_speed = priv->plat->max_speed;
935         priv->oldlink = false;
936         priv->speed = SPEED_UNKNOWN;
937         priv->oldduplex = DUPLEX_UNKNOWN;
938
939         if (priv->plat->phy_node) {
940                 phydev = of_phy_connect(dev, priv->plat->phy_node,
941                                         &stmmac_adjust_link, 0, interface);
942         } else {
943                 snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
944                          priv->plat->bus_id);
945
946                 snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
947                          priv->plat->phy_addr);
948                 netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__,
949                            phy_id_fmt);
950
951                 phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
952                                      interface);
953         }
954
955         if (IS_ERR_OR_NULL(phydev)) {
956                 netdev_err(priv->dev, "Could not attach to PHY\n");
957                 if (!phydev)
958                         return -ENODEV;
959
960                 return PTR_ERR(phydev);
961         }
962
963         /* Stop Advertising 1000BASE Capability if interface is not GMII */
964         if ((interface == PHY_INTERFACE_MODE_MII) ||
965             (interface == PHY_INTERFACE_MODE_RMII) ||
966                 (max_speed < 1000 && max_speed > 0))
967                 phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
968                                          SUPPORTED_1000baseT_Full);
969
970         /*
971          * Broken HW is sometimes missing the pull-up resistor on the
972          * MDIO line, which results in reads to non-existent devices returning
973          * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
974          * device as well.
975          * Note: phydev->phy_id is the result of reading the UID PHY registers.
976          */
977         if (!priv->plat->phy_node && phydev->phy_id == 0) {
978                 phy_disconnect(phydev);
979                 return -ENODEV;
980         }
981
982         /* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid
983          * subsequent PHY polling, make sure we force a link transition if
984          * we have a UP/DOWN/UP transition
985          */
986         if (phydev->is_pseudo_fixed_link)
987                 phydev->irq = PHY_POLL;
988
989         phy_attached_info(phydev);
990         return 0;
991 }
992
993 static void stmmac_display_rx_rings(struct stmmac_priv *priv)
994 {
995         u32 rx_cnt = priv->plat->rx_queues_to_use;
996         void *head_rx;
997         u32 queue;
998
999         /* Display RX rings */
1000         for (queue = 0; queue < rx_cnt; queue++) {
1001                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1002
1003                 pr_info("\tRX Queue %u rings\n", queue);
1004
1005                 if (priv->extend_desc)
1006                         head_rx = (void *)rx_q->dma_erx;
1007                 else
1008                         head_rx = (void *)rx_q->dma_rx;
1009
1010                 /* Display RX ring */
1011                 priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true);
1012         }
1013 }
1014
1015 static void stmmac_display_tx_rings(struct stmmac_priv *priv)
1016 {
1017         u32 tx_cnt = priv->plat->tx_queues_to_use;
1018         void *head_tx;
1019         u32 queue;
1020
1021         /* Display TX rings */
1022         for (queue = 0; queue < tx_cnt; queue++) {
1023                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1024
1025                 pr_info("\tTX Queue %d rings\n", queue);
1026
1027                 if (priv->extend_desc)
1028                         head_tx = (void *)tx_q->dma_etx;
1029                 else
1030                         head_tx = (void *)tx_q->dma_tx;
1031
1032                 priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false);
1033         }
1034 }
1035
1036 static void stmmac_display_rings(struct stmmac_priv *priv)
1037 {
1038         /* Display RX ring */
1039         stmmac_display_rx_rings(priv);
1040
1041         /* Display TX ring */
1042         stmmac_display_tx_rings(priv);
1043 }
1044
1045 static int stmmac_set_bfsize(int mtu, int bufsize)
1046 {
1047         int ret = bufsize;
1048
1049         if (mtu >= BUF_SIZE_4KiB)
1050                 ret = BUF_SIZE_8KiB;
1051         else if (mtu >= BUF_SIZE_2KiB)
1052                 ret = BUF_SIZE_4KiB;
1053         else if (mtu > DEFAULT_BUFSIZE)
1054                 ret = BUF_SIZE_2KiB;
1055         else
1056                 ret = DEFAULT_BUFSIZE;
1057
1058         return ret;
1059 }
1060
1061 /**
1062  * stmmac_clear_rx_descriptors - clear RX descriptors
1063  * @priv: driver private structure
1064  * @queue: RX queue index
1065  * Description: this function is called to clear the RX descriptors
1066  * in case of both basic and extended descriptors are used.
1067  */
1068 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1069 {
1070         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1071         int i;
1072
1073         /* Clear the RX descriptors */
1074         for (i = 0; i < DMA_RX_SIZE; i++)
1075                 if (priv->extend_desc)
1076                         priv->hw->desc->init_rx_desc(&rx_q->dma_erx[i].basic,
1077                                                      priv->use_riwt, priv->mode,
1078                                                      (i == DMA_RX_SIZE - 1));
1079                 else
1080                         priv->hw->desc->init_rx_desc(&rx_q->dma_rx[i],
1081                                                      priv->use_riwt, priv->mode,
1082                                                      (i == DMA_RX_SIZE - 1));
1083 }
1084
1085 /**
1086  * stmmac_clear_tx_descriptors - clear tx descriptors
1087  * @priv: driver private structure
1088  * @queue: TX queue index.
1089  * Description: this function is called to clear the TX descriptors
1090  * in case of both basic and extended descriptors are used.
1091  */
1092 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
1093 {
1094         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1095         int i;
1096
1097         /* Clear the TX descriptors */
1098         for (i = 0; i < DMA_TX_SIZE; i++)
1099                 if (priv->extend_desc)
1100                         priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic,
1101                                                      priv->mode,
1102                                                      (i == DMA_TX_SIZE - 1));
1103                 else
1104                         priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i],
1105                                                      priv->mode,
1106                                                      (i == DMA_TX_SIZE - 1));
1107 }
1108
1109 /**
1110  * stmmac_clear_descriptors - clear descriptors
1111  * @priv: driver private structure
1112  * Description: this function is called to clear the TX and RX descriptors
1113  * in case of both basic and extended descriptors are used.
1114  */
1115 static void stmmac_clear_descriptors(struct stmmac_priv *priv)
1116 {
1117         u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1118         u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1119         u32 queue;
1120
1121         /* Clear the RX descriptors */
1122         for (queue = 0; queue < rx_queue_cnt; queue++)
1123                 stmmac_clear_rx_descriptors(priv, queue);
1124
1125         /* Clear the TX descriptors */
1126         for (queue = 0; queue < tx_queue_cnt; queue++)
1127                 stmmac_clear_tx_descriptors(priv, queue);
1128 }
1129
1130 /**
1131  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1132  * @priv: driver private structure
1133  * @p: descriptor pointer
1134  * @i: descriptor index
1135  * @flags: gfp flag
1136  * @queue: RX queue index
1137  * Description: this function is called to allocate a receive buffer, perform
1138  * the DMA mapping and init the descriptor.
1139  */
1140 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
1141                                   int i, gfp_t flags, u32 queue)
1142 {
1143         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1144         struct sk_buff *skb;
1145
1146         skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
1147         if (!skb) {
1148                 netdev_err(priv->dev,
1149                            "%s: Rx init fails; skb is NULL\n", __func__);
1150                 return -ENOMEM;
1151         }
1152         rx_q->rx_skbuff[i] = skb;
1153         rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
1154                                                 priv->dma_buf_sz,
1155                                                 DMA_FROM_DEVICE);
1156         if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) {
1157                 netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
1158                 dev_kfree_skb_any(skb);
1159                 return -EINVAL;
1160         }
1161
1162         if (priv->synopsys_id >= DWMAC_CORE_4_00)
1163                 p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
1164         else
1165                 p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
1166
1167         if ((priv->hw->mode->init_desc3) &&
1168             (priv->dma_buf_sz == BUF_SIZE_16KiB))
1169                 priv->hw->mode->init_desc3(p);
1170
1171         return 0;
1172 }
1173
1174 /**
1175  * stmmac_free_rx_buffer - free RX dma buffers
1176  * @priv: private structure
1177  * @queue: RX queue index
1178  * @i: buffer index.
1179  */
1180 static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1181 {
1182         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1183
1184         if (rx_q->rx_skbuff[i]) {
1185                 dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i],
1186                                  priv->dma_buf_sz, DMA_FROM_DEVICE);
1187                 dev_kfree_skb_any(rx_q->rx_skbuff[i]);
1188         }
1189         rx_q->rx_skbuff[i] = NULL;
1190 }
1191
1192 /**
1193  * stmmac_free_tx_buffer - free RX dma buffers
1194  * @priv: private structure
1195  * @queue: RX queue index
1196  * @i: buffer index.
1197  */
1198 static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1199 {
1200         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1201
1202         if (tx_q->tx_skbuff_dma[i].buf) {
1203                 if (tx_q->tx_skbuff_dma[i].map_as_page)
1204                         dma_unmap_page(priv->device,
1205                                        tx_q->tx_skbuff_dma[i].buf,
1206                                        tx_q->tx_skbuff_dma[i].len,
1207                                        DMA_TO_DEVICE);
1208                 else
1209                         dma_unmap_single(priv->device,
1210                                          tx_q->tx_skbuff_dma[i].buf,
1211                                          tx_q->tx_skbuff_dma[i].len,
1212                                          DMA_TO_DEVICE);
1213         }
1214
1215         if (tx_q->tx_skbuff[i]) {
1216                 dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1217                 tx_q->tx_skbuff[i] = NULL;
1218                 tx_q->tx_skbuff_dma[i].buf = 0;
1219                 tx_q->tx_skbuff_dma[i].map_as_page = false;
1220         }
1221 }
1222
1223 /**
1224  * init_dma_rx_desc_rings - init the RX descriptor rings
1225  * @dev: net device structure
1226  * @flags: gfp flag.
1227  * Description: this function initializes the DMA RX descriptors
1228  * and allocates the socket buffers. It supports the chained and ring
1229  * modes.
1230  */
1231 static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
1232 {
1233         struct stmmac_priv *priv = netdev_priv(dev);
1234         u32 rx_count = priv->plat->rx_queues_to_use;
1235         unsigned int bfsize = 0;
1236         int ret = -ENOMEM;
1237         int queue;
1238         int i;
1239
1240         if (priv->hw->mode->set_16kib_bfsize)
1241                 bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu);
1242
1243         if (bfsize < BUF_SIZE_16KiB)
1244                 bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
1245
1246         priv->dma_buf_sz = bfsize;
1247
1248         /* RX INITIALIZATION */
1249         netif_dbg(priv, probe, priv->dev,
1250                   "SKB addresses:\nskb\t\tskb data\tdma data\n");
1251
1252         for (queue = 0; queue < rx_count; queue++) {
1253                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1254
1255                 netif_dbg(priv, probe, priv->dev,
1256                           "(%s) dma_rx_phy=0x%08x\n", __func__,
1257                           (u32)rx_q->dma_rx_phy);
1258
1259                 for (i = 0; i < DMA_RX_SIZE; i++) {
1260                         struct dma_desc *p;
1261
1262                         if (priv->extend_desc)
1263                                 p = &((rx_q->dma_erx + i)->basic);
1264                         else
1265                                 p = rx_q->dma_rx + i;
1266
1267                         ret = stmmac_init_rx_buffers(priv, p, i, flags,
1268                                                      queue);
1269                         if (ret)
1270                                 goto err_init_rx_buffers;
1271
1272                         netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
1273                                   rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data,
1274                                   (unsigned int)rx_q->rx_skbuff_dma[i]);
1275                 }
1276
1277                 rx_q->cur_rx = 0;
1278                 rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
1279
1280                 stmmac_clear_rx_descriptors(priv, queue);
1281
1282                 /* Setup the chained descriptor addresses */
1283                 if (priv->mode == STMMAC_CHAIN_MODE) {
1284                         if (priv->extend_desc)
1285                                 priv->hw->mode->init(rx_q->dma_erx,
1286                                                      rx_q->dma_rx_phy,
1287                                                      DMA_RX_SIZE, 1);
1288                         else
1289                                 priv->hw->mode->init(rx_q->dma_rx,
1290                                                      rx_q->dma_rx_phy,
1291                                                      DMA_RX_SIZE, 0);
1292                 }
1293         }
1294
1295         buf_sz = bfsize;
1296
1297         return 0;
1298
1299 err_init_rx_buffers:
1300         while (queue >= 0) {
1301                 while (--i >= 0)
1302                         stmmac_free_rx_buffer(priv, queue, i);
1303
1304                 if (queue == 0)
1305                         break;
1306
1307                 i = DMA_RX_SIZE;
1308                 queue--;
1309         }
1310
1311         return ret;
1312 }
1313
1314 /**
1315  * init_dma_tx_desc_rings - init the TX descriptor rings
1316  * @dev: net device structure.
1317  * Description: this function initializes the DMA TX descriptors
1318  * and allocates the socket buffers. It supports the chained and ring
1319  * modes.
1320  */
1321 static int init_dma_tx_desc_rings(struct net_device *dev)
1322 {
1323         struct stmmac_priv *priv = netdev_priv(dev);
1324         u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1325         u32 queue;
1326         int i;
1327
1328         for (queue = 0; queue < tx_queue_cnt; queue++) {
1329                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1330
1331                 netif_dbg(priv, probe, priv->dev,
1332                           "(%s) dma_tx_phy=0x%08x\n", __func__,
1333                          (u32)tx_q->dma_tx_phy);
1334
1335                 /* Setup the chained descriptor addresses */
1336                 if (priv->mode == STMMAC_CHAIN_MODE) {
1337                         if (priv->extend_desc)
1338                                 priv->hw->mode->init(tx_q->dma_etx,
1339                                                      tx_q->dma_tx_phy,
1340                                                      DMA_TX_SIZE, 1);
1341                         else
1342                                 priv->hw->mode->init(tx_q->dma_tx,
1343                                                      tx_q->dma_tx_phy,
1344                                                      DMA_TX_SIZE, 0);
1345                 }
1346
1347                 for (i = 0; i < DMA_TX_SIZE; i++) {
1348                         struct dma_desc *p;
1349                         if (priv->extend_desc)
1350                                 p = &((tx_q->dma_etx + i)->basic);
1351                         else
1352                                 p = tx_q->dma_tx + i;
1353
1354                         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1355                                 p->des0 = 0;
1356                                 p->des1 = 0;
1357                                 p->des2 = 0;
1358                                 p->des3 = 0;
1359                         } else {
1360                                 p->des2 = 0;
1361                         }
1362
1363                         tx_q->tx_skbuff_dma[i].buf = 0;
1364                         tx_q->tx_skbuff_dma[i].map_as_page = false;
1365                         tx_q->tx_skbuff_dma[i].len = 0;
1366                         tx_q->tx_skbuff_dma[i].last_segment = false;
1367                         tx_q->tx_skbuff[i] = NULL;
1368                 }
1369
1370                 tx_q->dirty_tx = 0;
1371                 tx_q->cur_tx = 0;
1372                 tx_q->mss = 0;
1373
1374                 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1375         }
1376
1377         return 0;
1378 }
1379
1380 /**
1381  * init_dma_desc_rings - init the RX/TX descriptor rings
1382  * @dev: net device structure
1383  * @flags: gfp flag.
1384  * Description: this function initializes the DMA RX/TX descriptors
1385  * and allocates the socket buffers. It supports the chained and ring
1386  * modes.
1387  */
1388 static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1389 {
1390         struct stmmac_priv *priv = netdev_priv(dev);
1391         int ret;
1392
1393         ret = init_dma_rx_desc_rings(dev, flags);
1394         if (ret)
1395                 return ret;
1396
1397         ret = init_dma_tx_desc_rings(dev);
1398
1399         stmmac_clear_descriptors(priv);
1400
1401         if (netif_msg_hw(priv))
1402                 stmmac_display_rings(priv);
1403
1404         return ret;
1405 }
1406
1407 /**
1408  * dma_free_rx_skbufs - free RX dma buffers
1409  * @priv: private structure
1410  * @queue: RX queue index
1411  */
1412 static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
1413 {
1414         int i;
1415
1416         for (i = 0; i < DMA_RX_SIZE; i++)
1417                 stmmac_free_rx_buffer(priv, queue, i);
1418 }
1419
1420 /**
1421  * dma_free_tx_skbufs - free TX dma buffers
1422  * @priv: private structure
1423  * @queue: TX queue index
1424  */
1425 static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
1426 {
1427         int i;
1428
1429         for (i = 0; i < DMA_TX_SIZE; i++)
1430                 stmmac_free_tx_buffer(priv, queue, i);
1431 }
1432
1433 /**
1434  * free_dma_rx_desc_resources - free RX dma desc resources
1435  * @priv: private structure
1436  */
1437 static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
1438 {
1439         u32 rx_count = priv->plat->rx_queues_to_use;
1440         u32 queue;
1441
1442         /* Free RX queue resources */
1443         for (queue = 0; queue < rx_count; queue++) {
1444                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1445
1446                 /* Release the DMA RX socket buffers */
1447                 dma_free_rx_skbufs(priv, queue);
1448
1449                 /* Free DMA regions of consistent memory previously allocated */
1450                 if (!priv->extend_desc)
1451                         dma_free_coherent(priv->device,
1452                                           DMA_RX_SIZE * sizeof(struct dma_desc),
1453                                           rx_q->dma_rx, rx_q->dma_rx_phy);
1454                 else
1455                         dma_free_coherent(priv->device, DMA_RX_SIZE *
1456                                           sizeof(struct dma_extended_desc),
1457                                           rx_q->dma_erx, rx_q->dma_rx_phy);
1458
1459                 kfree(rx_q->rx_skbuff_dma);
1460                 kfree(rx_q->rx_skbuff);
1461         }
1462 }
1463
1464 /**
1465  * free_dma_tx_desc_resources - free TX dma desc resources
1466  * @priv: private structure
1467  */
1468 static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1469 {
1470         u32 tx_count = priv->plat->tx_queues_to_use;
1471         u32 queue;
1472
1473         /* Free TX queue resources */
1474         for (queue = 0; queue < tx_count; queue++) {
1475                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1476
1477                 /* Release the DMA TX socket buffers */
1478                 dma_free_tx_skbufs(priv, queue);
1479
1480                 /* Free DMA regions of consistent memory previously allocated */
1481                 if (!priv->extend_desc)
1482                         dma_free_coherent(priv->device,
1483                                           DMA_TX_SIZE * sizeof(struct dma_desc),
1484                                           tx_q->dma_tx, tx_q->dma_tx_phy);
1485                 else
1486                         dma_free_coherent(priv->device, DMA_TX_SIZE *
1487                                           sizeof(struct dma_extended_desc),
1488                                           tx_q->dma_etx, tx_q->dma_tx_phy);
1489
1490                 kfree(tx_q->tx_skbuff_dma);
1491                 kfree(tx_q->tx_skbuff);
1492         }
1493 }
1494
1495 /**
1496  * alloc_dma_rx_desc_resources - alloc RX resources.
1497  * @priv: private structure
1498  * Description: according to which descriptor can be used (extend or basic)
1499  * this function allocates the resources for TX and RX paths. In case of
1500  * reception, for example, it pre-allocated the RX socket buffer in order to
1501  * allow zero-copy mechanism.
1502  */
1503 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
1504 {
1505         u32 rx_count = priv->plat->rx_queues_to_use;
1506         int ret = -ENOMEM;
1507         u32 queue;
1508
1509         /* RX queues buffers and DMA */
1510         for (queue = 0; queue < rx_count; queue++) {
1511                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1512
1513                 rx_q->queue_index = queue;
1514                 rx_q->priv_data = priv;
1515
1516                 rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE,
1517                                                     sizeof(dma_addr_t),
1518                                                     GFP_KERNEL);
1519                 if (!rx_q->rx_skbuff_dma)
1520                         goto err_dma;
1521
1522                 rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE,
1523                                                 sizeof(struct sk_buff *),
1524                                                 GFP_KERNEL);
1525                 if (!rx_q->rx_skbuff)
1526                         goto err_dma;
1527
1528                 if (priv->extend_desc) {
1529                         rx_q->dma_erx = dma_zalloc_coherent(priv->device,
1530                                                             DMA_RX_SIZE *
1531                                                             sizeof(struct
1532                                                             dma_extended_desc),
1533                                                             &rx_q->dma_rx_phy,
1534                                                             GFP_KERNEL);
1535                         if (!rx_q->dma_erx)
1536                                 goto err_dma;
1537
1538                 } else {
1539                         rx_q->dma_rx = dma_zalloc_coherent(priv->device,
1540                                                            DMA_RX_SIZE *
1541                                                            sizeof(struct
1542                                                            dma_desc),
1543                                                            &rx_q->dma_rx_phy,
1544                                                            GFP_KERNEL);
1545                         if (!rx_q->dma_rx)
1546                                 goto err_dma;
1547                 }
1548         }
1549
1550         return 0;
1551
1552 err_dma:
1553         free_dma_rx_desc_resources(priv);
1554
1555         return ret;
1556 }
1557
1558 /**
1559  * alloc_dma_tx_desc_resources - alloc TX resources.
1560  * @priv: private structure
1561  * Description: according to which descriptor can be used (extend or basic)
1562  * this function allocates the resources for TX and RX paths. In case of
1563  * reception, for example, it pre-allocated the RX socket buffer in order to
1564  * allow zero-copy mechanism.
1565  */
1566 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
1567 {
1568         u32 tx_count = priv->plat->tx_queues_to_use;
1569         int ret = -ENOMEM;
1570         u32 queue;
1571
1572         /* TX queues buffers and DMA */
1573         for (queue = 0; queue < tx_count; queue++) {
1574                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1575
1576                 tx_q->queue_index = queue;
1577                 tx_q->priv_data = priv;
1578
1579                 tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
1580                                                     sizeof(*tx_q->tx_skbuff_dma),
1581                                                     GFP_KERNEL);
1582                 if (!tx_q->tx_skbuff_dma)
1583                         goto err_dma;
1584
1585                 tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE,
1586                                                 sizeof(struct sk_buff *),
1587                                                 GFP_KERNEL);
1588                 if (!tx_q->tx_skbuff)
1589                         goto err_dma;
1590
1591                 if (priv->extend_desc) {
1592                         tx_q->dma_etx = dma_zalloc_coherent(priv->device,
1593                                                             DMA_TX_SIZE *
1594                                                             sizeof(struct
1595                                                             dma_extended_desc),
1596                                                             &tx_q->dma_tx_phy,
1597                                                             GFP_KERNEL);
1598                         if (!tx_q->dma_etx)
1599                                 goto err_dma;
1600                 } else {
1601                         tx_q->dma_tx = dma_zalloc_coherent(priv->device,
1602                                                            DMA_TX_SIZE *
1603                                                            sizeof(struct
1604                                                                   dma_desc),
1605                                                            &tx_q->dma_tx_phy,
1606                                                            GFP_KERNEL);
1607                         if (!tx_q->dma_tx)
1608                                 goto err_dma;
1609                 }
1610         }
1611
1612         return 0;
1613
1614 err_dma:
1615         free_dma_tx_desc_resources(priv);
1616
1617         return ret;
1618 }
1619
1620 /**
1621  * alloc_dma_desc_resources - alloc TX/RX resources.
1622  * @priv: private structure
1623  * Description: according to which descriptor can be used (extend or basic)
1624  * this function allocates the resources for TX and RX paths. In case of
1625  * reception, for example, it pre-allocated the RX socket buffer in order to
1626  * allow zero-copy mechanism.
1627  */
1628 static int alloc_dma_desc_resources(struct stmmac_priv *priv)
1629 {
1630         /* RX Allocation */
1631         int ret = alloc_dma_rx_desc_resources(priv);
1632
1633         if (ret)
1634                 return ret;
1635
1636         ret = alloc_dma_tx_desc_resources(priv);
1637
1638         return ret;
1639 }
1640
1641 /**
1642  * free_dma_desc_resources - free dma desc resources
1643  * @priv: private structure
1644  */
1645 static void free_dma_desc_resources(struct stmmac_priv *priv)
1646 {
1647         /* Release the DMA RX socket buffers */
1648         free_dma_rx_desc_resources(priv);
1649
1650         /* Release the DMA TX socket buffers */
1651         free_dma_tx_desc_resources(priv);
1652 }
1653
1654 /**
1655  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
1656  *  @priv: driver private structure
1657  *  Description: It is used for enabling the rx queues in the MAC
1658  */
1659 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
1660 {
1661         u32 rx_queues_count = priv->plat->rx_queues_to_use;
1662         int queue;
1663         u8 mode;
1664
1665         for (queue = 0; queue < rx_queues_count; queue++) {
1666                 mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1667                 priv->hw->mac->rx_queue_enable(priv->hw, mode, queue);
1668         }
1669 }
1670
1671 /**
1672  * stmmac_start_rx_dma - start RX DMA channel
1673  * @priv: driver private structure
1674  * @chan: RX channel index
1675  * Description:
1676  * This starts a RX DMA channel
1677  */
1678 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
1679 {
1680         netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
1681         priv->hw->dma->start_rx(priv->ioaddr, chan);
1682 }
1683
1684 /**
1685  * stmmac_start_tx_dma - start TX DMA channel
1686  * @priv: driver private structure
1687  * @chan: TX channel index
1688  * Description:
1689  * This starts a TX DMA channel
1690  */
1691 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
1692 {
1693         netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
1694         priv->hw->dma->start_tx(priv->ioaddr, chan);
1695 }
1696
1697 /**
1698  * stmmac_stop_rx_dma - stop RX DMA channel
1699  * @priv: driver private structure
1700  * @chan: RX channel index
1701  * Description:
1702  * This stops a RX DMA channel
1703  */
1704 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
1705 {
1706         netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
1707         priv->hw->dma->stop_rx(priv->ioaddr, chan);
1708 }
1709
1710 /**
1711  * stmmac_stop_tx_dma - stop TX DMA channel
1712  * @priv: driver private structure
1713  * @chan: TX channel index
1714  * Description:
1715  * This stops a TX DMA channel
1716  */
1717 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
1718 {
1719         netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
1720         priv->hw->dma->stop_tx(priv->ioaddr, chan);
1721 }
1722
1723 /**
1724  * stmmac_start_all_dma - start all RX and TX DMA channels
1725  * @priv: driver private structure
1726  * Description:
1727  * This starts all the RX and TX DMA channels
1728  */
1729 static void stmmac_start_all_dma(struct stmmac_priv *priv)
1730 {
1731         u32 rx_channels_count = priv->plat->rx_queues_to_use;
1732         u32 tx_channels_count = priv->plat->tx_queues_to_use;
1733         u32 chan = 0;
1734
1735         for (chan = 0; chan < rx_channels_count; chan++)
1736                 stmmac_start_rx_dma(priv, chan);
1737
1738         for (chan = 0; chan < tx_channels_count; chan++)
1739                 stmmac_start_tx_dma(priv, chan);
1740 }
1741
1742 /**
1743  * stmmac_stop_all_dma - stop all RX and TX DMA channels
1744  * @priv: driver private structure
1745  * Description:
1746  * This stops the RX and TX DMA channels
1747  */
1748 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
1749 {
1750         u32 rx_channels_count = priv->plat->rx_queues_to_use;
1751         u32 tx_channels_count = priv->plat->tx_queues_to_use;
1752         u32 chan = 0;
1753
1754         for (chan = 0; chan < rx_channels_count; chan++)
1755                 stmmac_stop_rx_dma(priv, chan);
1756
1757         for (chan = 0; chan < tx_channels_count; chan++)
1758                 stmmac_stop_tx_dma(priv, chan);
1759 }
1760
1761 /**
1762  *  stmmac_dma_operation_mode - HW DMA operation mode
1763  *  @priv: driver private structure
1764  *  Description: it is used for configuring the DMA operation mode register in
1765  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
1766  */
1767 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1768 {
1769         u32 rx_channels_count = priv->plat->rx_queues_to_use;
1770         u32 tx_channels_count = priv->plat->tx_queues_to_use;
1771         int rxfifosz = priv->plat->rx_fifo_size;
1772         int txfifosz = priv->plat->tx_fifo_size;
1773         u32 txmode = 0;
1774         u32 rxmode = 0;
1775         u32 chan = 0;
1776         u8 qmode = 0;
1777
1778         if (rxfifosz == 0)
1779                 rxfifosz = priv->dma_cap.rx_fifo_size;
1780         if (txfifosz == 0)
1781                 txfifosz = priv->dma_cap.tx_fifo_size;
1782
1783         /* Adjust for real per queue fifo size */
1784         rxfifosz /= rx_channels_count;
1785         txfifosz /= tx_channels_count;
1786
1787         if (priv->plat->force_thresh_dma_mode) {
1788                 txmode = tc;
1789                 rxmode = tc;
1790         } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
1791                 /*
1792                  * In case of GMAC, SF mode can be enabled
1793                  * to perform the TX COE in HW. This depends on:
1794                  * 1) TX COE if actually supported
1795                  * 2) There is no bugged Jumbo frame support
1796                  *    that needs to not insert csum in the TDES.
1797                  */
1798                 txmode = SF_DMA_MODE;
1799                 rxmode = SF_DMA_MODE;
1800                 priv->xstats.threshold = SF_DMA_MODE;
1801         } else {
1802                 txmode = tc;
1803                 rxmode = SF_DMA_MODE;
1804         }
1805
1806         /* configure all channels */
1807         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1808                 for (chan = 0; chan < rx_channels_count; chan++) {
1809                         qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
1810
1811                         priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
1812                                                    rxfifosz, qmode);
1813                 }
1814
1815                 for (chan = 0; chan < tx_channels_count; chan++) {
1816                         qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
1817
1818                         priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan,
1819                                                    txfifosz, qmode);
1820                 }
1821         } else {
1822                 priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
1823                                         rxfifosz);
1824         }
1825 }
1826
1827 /**
1828  * stmmac_tx_clean - to manage the transmission completion
1829  * @priv: driver private structure
1830  * @queue: TX queue index
1831  * Description: it reclaims the transmit resources after transmission completes.
1832  */
1833 static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
1834 {
1835         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1836         unsigned int bytes_compl = 0, pkts_compl = 0;
1837         unsigned int entry;
1838
1839         netif_tx_lock(priv->dev);
1840
1841         priv->xstats.tx_clean++;
1842
1843         entry = tx_q->dirty_tx;
1844         while (entry != tx_q->cur_tx) {
1845                 struct sk_buff *skb = tx_q->tx_skbuff[entry];
1846                 struct dma_desc *p;
1847                 int status;
1848
1849                 if (priv->extend_desc)
1850                         p = (struct dma_desc *)(tx_q->dma_etx + entry);
1851                 else
1852                         p = tx_q->dma_tx + entry;
1853
1854                 status = priv->hw->desc->tx_status(&priv->dev->stats,
1855                                                       &priv->xstats, p,
1856                                                       priv->ioaddr);
1857                 /* Check if the descriptor is owned by the DMA */
1858                 if (unlikely(status & tx_dma_own))
1859                         break;
1860
1861                 /* Make sure descriptor fields are read after reading
1862                  * the own bit.
1863                  */
1864                 dma_rmb();
1865
1866                 /* Just consider the last segment and ...*/
1867                 if (likely(!(status & tx_not_ls))) {
1868                         /* ... verify the status error condition */
1869                         if (unlikely(status & tx_err)) {
1870                                 priv->dev->stats.tx_errors++;
1871                         } else {
1872                                 priv->dev->stats.tx_packets++;
1873                                 priv->xstats.tx_pkt_n++;
1874                         }
1875                         stmmac_get_tx_hwtstamp(priv, p, skb);
1876                 }
1877
1878                 if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
1879                         if (tx_q->tx_skbuff_dma[entry].map_as_page)
1880                                 dma_unmap_page(priv->device,
1881                                                tx_q->tx_skbuff_dma[entry].buf,
1882                                                tx_q->tx_skbuff_dma[entry].len,
1883                                                DMA_TO_DEVICE);
1884                         else
1885                                 dma_unmap_single(priv->device,
1886                                                  tx_q->tx_skbuff_dma[entry].buf,
1887                                                  tx_q->tx_skbuff_dma[entry].len,
1888                                                  DMA_TO_DEVICE);
1889                         tx_q->tx_skbuff_dma[entry].buf = 0;
1890                         tx_q->tx_skbuff_dma[entry].len = 0;
1891                         tx_q->tx_skbuff_dma[entry].map_as_page = false;
1892                 }
1893
1894                 if (priv->hw->mode->clean_desc3)
1895                         priv->hw->mode->clean_desc3(tx_q, p);
1896
1897                 tx_q->tx_skbuff_dma[entry].last_segment = false;
1898                 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
1899
1900                 if (likely(skb != NULL)) {
1901                         pkts_compl++;
1902                         bytes_compl += skb->len;
1903                         dev_consume_skb_any(skb);
1904                         tx_q->tx_skbuff[entry] = NULL;
1905                 }
1906
1907                 priv->hw->desc->release_tx_desc(p, priv->mode);
1908
1909                 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
1910         }
1911         tx_q->dirty_tx = entry;
1912
1913         netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
1914                                   pkts_compl, bytes_compl);
1915
1916         if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
1917                                                                 queue))) &&
1918             stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
1919
1920                 netif_dbg(priv, tx_done, priv->dev,
1921                           "%s: restart transmit\n", __func__);
1922                 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
1923         }
1924
1925         if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
1926                 stmmac_enable_eee_mode(priv);
1927                 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
1928         }
1929         netif_tx_unlock(priv->dev);
1930 }
1931
1932 static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv, u32 chan)
1933 {
1934         priv->hw->dma->enable_dma_irq(priv->ioaddr, chan);
1935 }
1936
1937 static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv, u32 chan)
1938 {
1939         priv->hw->dma->disable_dma_irq(priv->ioaddr, chan);
1940 }
1941
1942 /**
1943  * stmmac_tx_err - to manage the tx error
1944  * @priv: driver private structure
1945  * @chan: channel index
1946  * Description: it cleans the descriptors and restarts the transmission
1947  * in case of transmission errors.
1948  */
1949 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
1950 {
1951         struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
1952         int i;
1953
1954         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
1955
1956         stmmac_stop_tx_dma(priv, chan);
1957         dma_free_tx_skbufs(priv, chan);
1958         for (i = 0; i < DMA_TX_SIZE; i++)
1959                 if (priv->extend_desc)
1960                         priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic,
1961                                                      priv->mode,
1962                                                      (i == DMA_TX_SIZE - 1));
1963                 else
1964                         priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i],
1965                                                      priv->mode,
1966                                                      (i == DMA_TX_SIZE - 1));
1967         tx_q->dirty_tx = 0;
1968         tx_q->cur_tx = 0;
1969         tx_q->mss = 0;
1970         netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
1971         stmmac_start_tx_dma(priv, chan);
1972
1973         priv->dev->stats.tx_errors++;
1974         netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
1975 }
1976
1977 /**
1978  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
1979  *  @priv: driver private structure
1980  *  @txmode: TX operating mode
1981  *  @rxmode: RX operating mode
1982  *  @chan: channel index
1983  *  Description: it is used for configuring of the DMA operation mode in
1984  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
1985  *  mode.
1986  */
1987 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
1988                                           u32 rxmode, u32 chan)
1989 {
1990         u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
1991         u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
1992         u32 rx_channels_count = priv->plat->rx_queues_to_use;
1993         u32 tx_channels_count = priv->plat->tx_queues_to_use;
1994         int rxfifosz = priv->plat->rx_fifo_size;
1995         int txfifosz = priv->plat->tx_fifo_size;
1996
1997         if (rxfifosz == 0)
1998                 rxfifosz = priv->dma_cap.rx_fifo_size;
1999         if (txfifosz == 0)
2000                 txfifosz = priv->dma_cap.tx_fifo_size;
2001
2002         /* Adjust for real per queue fifo size */
2003         rxfifosz /= rx_channels_count;
2004         txfifosz /= tx_channels_count;
2005
2006         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
2007                 priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
2008                                            rxfifosz, rxqmode);
2009                 priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan,
2010                                            txfifosz, txqmode);
2011         } else {
2012                 priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
2013                                         rxfifosz);
2014         }
2015 }
2016
2017 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2018 {
2019         bool ret = false;
2020
2021         /* Safety features are only available in cores >= 5.10 */
2022         if (priv->synopsys_id < DWMAC_CORE_5_10)
2023                 return ret;
2024         if (priv->hw->mac->safety_feat_irq_status)
2025                 ret = priv->hw->mac->safety_feat_irq_status(priv->dev,
2026                                 priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2027
2028         if (ret)
2029                 stmmac_global_err(priv);
2030         return ret;
2031 }
2032
2033 /**
2034  * stmmac_dma_interrupt - DMA ISR
2035  * @priv: driver private structure
2036  * Description: this is the DMA ISR. It is called by the main ISR.
2037  * It calls the dwmac dma routine and schedule poll method in case of some
2038  * work can be done.
2039  */
2040 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2041 {
2042         u32 tx_channel_count = priv->plat->tx_queues_to_use;
2043         u32 rx_channel_count = priv->plat->rx_queues_to_use;
2044         u32 channels_to_check = tx_channel_count > rx_channel_count ?
2045                                 tx_channel_count : rx_channel_count;
2046         u32 chan;
2047         bool poll_scheduled = false;
2048         int status[channels_to_check];
2049
2050         /* Each DMA channel can be used for rx and tx simultaneously, yet
2051          * napi_struct is embedded in struct stmmac_rx_queue rather than in a
2052          * stmmac_channel struct.
2053          * Because of this, stmmac_poll currently checks (and possibly wakes)
2054          * all tx queues rather than just a single tx queue.
2055          */
2056         for (chan = 0; chan < channels_to_check; chan++)
2057                 status[chan] = priv->hw->dma->dma_interrupt(priv->ioaddr,
2058                                                             &priv->xstats,
2059                                                             chan);
2060
2061         for (chan = 0; chan < rx_channel_count; chan++) {
2062                 if (likely(status[chan] & handle_rx)) {
2063                         struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
2064
2065                         if (likely(napi_schedule_prep(&rx_q->napi))) {
2066                                 stmmac_disable_dma_irq(priv, chan);
2067                                 __napi_schedule(&rx_q->napi);
2068                                 poll_scheduled = true;
2069                         }
2070                 }
2071         }
2072
2073         /* If we scheduled poll, we already know that tx queues will be checked.
2074          * If we didn't schedule poll, see if any DMA channel (used by tx) has a
2075          * completed transmission, if so, call stmmac_poll (once).
2076          */
2077         if (!poll_scheduled) {
2078                 for (chan = 0; chan < tx_channel_count; chan++) {
2079                         if (status[chan] & handle_tx) {
2080                                 /* It doesn't matter what rx queue we choose
2081                                  * here. We use 0 since it always exists.
2082                                  */
2083                                 struct stmmac_rx_queue *rx_q =
2084                                         &priv->rx_queue[0];
2085
2086                                 if (likely(napi_schedule_prep(&rx_q->napi))) {
2087                                         stmmac_disable_dma_irq(priv, chan);
2088                                         __napi_schedule(&rx_q->napi);
2089                                 }
2090                                 break;
2091                         }
2092                 }
2093         }
2094
2095         for (chan = 0; chan < tx_channel_count; chan++) {
2096                 if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2097                         /* Try to bump up the dma threshold on this failure */
2098                         if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
2099                             (tc <= 256)) {
2100                                 tc += 64;
2101                                 if (priv->plat->force_thresh_dma_mode)
2102                                         stmmac_set_dma_operation_mode(priv,
2103                                                                       tc,
2104                                                                       tc,
2105                                                                       chan);
2106                                 else
2107                                         stmmac_set_dma_operation_mode(priv,
2108                                                                     tc,
2109                                                                     SF_DMA_MODE,
2110                                                                     chan);
2111                                 priv->xstats.threshold = tc;
2112                         }
2113                 } else if (unlikely(status[chan] == tx_hard_error)) {
2114                         stmmac_tx_err(priv, chan);
2115                 }
2116         }
2117 }
2118
2119 /**
2120  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2121  * @priv: driver private structure
2122  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2123  */
2124 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2125 {
2126         unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2127                             MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2128
2129         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
2130                 priv->ptpaddr = priv->ioaddr + PTP_GMAC4_OFFSET;
2131                 priv->mmcaddr = priv->ioaddr + MMC_GMAC4_OFFSET;
2132         } else {
2133                 priv->ptpaddr = priv->ioaddr + PTP_GMAC3_X_OFFSET;
2134                 priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET;
2135         }
2136
2137         dwmac_mmc_intr_all_mask(priv->mmcaddr);
2138
2139         if (priv->dma_cap.rmon) {
2140                 dwmac_mmc_ctrl(priv->mmcaddr, mode);
2141                 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2142         } else
2143                 netdev_info(priv->dev, "No MAC Management Counters available\n");
2144 }
2145
2146 /**
2147  * stmmac_selec_desc_mode - to select among: normal/alternate/extend descriptors
2148  * @priv: driver private structure
2149  * Description: select the Enhanced/Alternate or Normal descriptors.
2150  * In case of Enhanced/Alternate, it checks if the extended descriptors are
2151  * supported by the HW capability register.
2152  */
2153 static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
2154 {
2155         if (priv->plat->enh_desc) {
2156                 dev_info(priv->device, "Enhanced/Alternate descriptors\n");
2157
2158                 /* GMAC older than 3.50 has no extended descriptors */
2159                 if (priv->synopsys_id >= DWMAC_CORE_3_50) {
2160                         dev_info(priv->device, "Enabled extended descriptors\n");
2161                         priv->extend_desc = 1;
2162                 } else
2163                         dev_warn(priv->device, "Extended descriptors not supported\n");
2164
2165                 priv->hw->desc = &enh_desc_ops;
2166         } else {
2167                 dev_info(priv->device, "Normal descriptors\n");
2168                 priv->hw->desc = &ndesc_ops;
2169         }
2170 }
2171
2172 /**
2173  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2174  * @priv: driver private structure
2175  * Description:
2176  *  new GMAC chip generations have a new register to indicate the
2177  *  presence of the optional feature/functions.
2178  *  This can be also used to override the value passed through the
2179  *  platform and necessary for old MAC10/100 and GMAC chips.
2180  */
2181 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2182 {
2183         u32 ret = 0;
2184
2185         if (priv->hw->dma->get_hw_feature) {
2186                 priv->hw->dma->get_hw_feature(priv->ioaddr,
2187                                               &priv->dma_cap);
2188                 ret = 1;
2189         }
2190
2191         return ret;
2192 }
2193
2194 /**
2195  * stmmac_check_ether_addr - check if the MAC addr is valid
2196  * @priv: driver private structure
2197  * Description:
2198  * it is to verify if the MAC address is valid, in case of failures it
2199  * generates a random MAC address
2200  */
2201 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2202 {
2203         if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2204                 priv->hw->mac->get_umac_addr(priv->hw,
2205                                              priv->dev->dev_addr, 0);
2206                 if (!is_valid_ether_addr(priv->dev->dev_addr))
2207                         eth_hw_addr_random(priv->dev);
2208                 netdev_info(priv->dev, "device MAC address %pM\n",
2209                             priv->dev->dev_addr);
2210         }
2211 }
2212
2213 /**
2214  * stmmac_init_dma_engine - DMA init.
2215  * @priv: driver private structure
2216  * Description:
2217  * It inits the DMA invoking the specific MAC/GMAC callback.
2218  * Some DMA parameters can be passed from the platform;
2219  * in case of these are not passed a default is kept for the MAC or GMAC.
2220  */
2221 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2222 {
2223         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2224         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2225         struct stmmac_rx_queue *rx_q;
2226         struct stmmac_tx_queue *tx_q;
2227         u32 dummy_dma_rx_phy = 0;
2228         u32 dummy_dma_tx_phy = 0;
2229         u32 chan = 0;
2230         int atds = 0;
2231         int ret = 0;
2232
2233         if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2234                 dev_err(priv->device, "Invalid DMA configuration\n");
2235                 return -EINVAL;
2236         }
2237
2238         if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2239                 atds = 1;
2240
2241         ret = priv->hw->dma->reset(priv->ioaddr);
2242         if (ret) {
2243                 dev_err(priv->device, "Failed to reset the dma\n");
2244                 return ret;
2245         }
2246
2247         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
2248                 /* DMA Configuration */
2249                 priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
2250                                     dummy_dma_tx_phy, dummy_dma_rx_phy, atds);
2251
2252                 /* DMA RX Channel Configuration */
2253                 for (chan = 0; chan < rx_channels_count; chan++) {
2254                         rx_q = &priv->rx_queue[chan];
2255
2256                         priv->hw->dma->init_rx_chan(priv->ioaddr,
2257                                                     priv->plat->dma_cfg,
2258                                                     rx_q->dma_rx_phy, chan);
2259
2260                         rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2261                                     (DMA_RX_SIZE * sizeof(struct dma_desc));
2262                         priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
2263                                                        rx_q->rx_tail_addr,
2264                                                        chan);
2265                 }
2266
2267                 /* DMA TX Channel Configuration */
2268                 for (chan = 0; chan < tx_channels_count; chan++) {
2269                         tx_q = &priv->tx_queue[chan];
2270
2271                         priv->hw->dma->init_chan(priv->ioaddr,
2272                                                  priv->plat->dma_cfg,
2273                                                  chan);
2274
2275                         priv->hw->dma->init_tx_chan(priv->ioaddr,
2276                                                     priv->plat->dma_cfg,
2277                                                     tx_q->dma_tx_phy, chan);
2278
2279                         tx_q->tx_tail_addr = tx_q->dma_tx_phy +
2280                                     (DMA_TX_SIZE * sizeof(struct dma_desc));
2281                         priv->hw->dma->set_tx_tail_ptr(priv->ioaddr,
2282                                                        tx_q->tx_tail_addr,
2283                                                        chan);
2284                 }
2285         } else {
2286                 rx_q = &priv->rx_queue[chan];
2287                 tx_q = &priv->tx_queue[chan];
2288                 priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
2289                                     tx_q->dma_tx_phy, rx_q->dma_rx_phy, atds);
2290         }
2291
2292         if (priv->plat->axi && priv->hw->dma->axi)
2293                 priv->hw->dma->axi(priv->ioaddr, priv->plat->axi);
2294
2295         return ret;
2296 }
2297
2298 /**
2299  * stmmac_tx_timer - mitigation sw timer for tx.
2300  * @data: data pointer
2301  * Description:
2302  * This is the timer handler to directly invoke the stmmac_tx_clean.
2303  */
2304 static void stmmac_tx_timer(struct timer_list *t)
2305 {
2306         struct stmmac_priv *priv = from_timer(priv, t, txtimer);
2307         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2308         u32 queue;
2309
2310         /* let's scan all the tx queues */
2311         for (queue = 0; queue < tx_queues_count; queue++)
2312                 stmmac_tx_clean(priv, queue);
2313 }
2314
2315 /**
2316  * stmmac_init_tx_coalesce - init tx mitigation options.
2317  * @priv: driver private structure
2318  * Description:
2319  * This inits the transmit coalesce parameters: i.e. timer rate,
2320  * timer handler and default threshold used for enabling the
2321  * interrupt on completion bit.
2322  */
2323 static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
2324 {
2325         priv->tx_coal_frames = STMMAC_TX_FRAMES;
2326         priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
2327         timer_setup(&priv->txtimer, stmmac_tx_timer, 0);
2328         priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer);
2329         add_timer(&priv->txtimer);
2330 }
2331
2332 static void stmmac_set_rings_length(struct stmmac_priv *priv)
2333 {
2334         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2335         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2336         u32 chan;
2337
2338         /* set TX ring length */
2339         if (priv->hw->dma->set_tx_ring_len) {
2340                 for (chan = 0; chan < tx_channels_count; chan++)
2341                         priv->hw->dma->set_tx_ring_len(priv->ioaddr,
2342                                                        (DMA_TX_SIZE - 1), chan);
2343         }
2344
2345         /* set RX ring length */
2346         if (priv->hw->dma->set_rx_ring_len) {
2347                 for (chan = 0; chan < rx_channels_count; chan++)
2348                         priv->hw->dma->set_rx_ring_len(priv->ioaddr,
2349                                                        (DMA_RX_SIZE - 1), chan);
2350         }
2351 }
2352
2353 /**
2354  *  stmmac_set_tx_queue_weight - Set TX queue weight
2355  *  @priv: driver private structure
2356  *  Description: It is used for setting TX queues weight
2357  */
2358 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
2359 {
2360         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2361         u32 weight;
2362         u32 queue;
2363
2364         for (queue = 0; queue < tx_queues_count; queue++) {
2365                 weight = priv->plat->tx_queues_cfg[queue].weight;
2366                 priv->hw->mac->set_mtl_tx_queue_weight(priv->hw, weight, queue);
2367         }
2368 }
2369
2370 /**
2371  *  stmmac_configure_cbs - Configure CBS in TX queue
2372  *  @priv: driver private structure
2373  *  Description: It is used for configuring CBS in AVB TX queues
2374  */
2375 static void stmmac_configure_cbs(struct stmmac_priv *priv)
2376 {
2377         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2378         u32 mode_to_use;
2379         u32 queue;
2380
2381         /* queue 0 is reserved for legacy traffic */
2382         for (queue = 1; queue < tx_queues_count; queue++) {
2383                 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
2384                 if (mode_to_use == MTL_QUEUE_DCB)
2385                         continue;
2386
2387                 priv->hw->mac->config_cbs(priv->hw,
2388                                 priv->plat->tx_queues_cfg[queue].send_slope,
2389                                 priv->plat->tx_queues_cfg[queue].idle_slope,
2390                                 priv->plat->tx_queues_cfg[queue].high_credit,
2391                                 priv->plat->tx_queues_cfg[queue].low_credit,
2392                                 queue);
2393         }
2394 }
2395
2396 /**
2397  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
2398  *  @priv: driver private structure
2399  *  Description: It is used for mapping RX queues to RX dma channels
2400  */
2401 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
2402 {
2403         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2404         u32 queue;
2405         u32 chan;
2406
2407         for (queue = 0; queue < rx_queues_count; queue++) {
2408                 chan = priv->plat->rx_queues_cfg[queue].chan;
2409                 priv->hw->mac->map_mtl_to_dma(priv->hw, queue, chan);
2410         }
2411 }
2412
2413 /**
2414  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
2415  *  @priv: driver private structure
2416  *  Description: It is used for configuring the RX Queue Priority
2417  */
2418 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
2419 {
2420         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2421         u32 queue;
2422         u32 prio;
2423
2424         for (queue = 0; queue < rx_queues_count; queue++) {
2425                 if (!priv->plat->rx_queues_cfg[queue].use_prio)
2426                         continue;
2427
2428                 prio = priv->plat->rx_queues_cfg[queue].prio;
2429                 priv->hw->mac->rx_queue_prio(priv->hw, prio, queue);
2430         }
2431 }
2432
2433 /**
2434  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
2435  *  @priv: driver private structure
2436  *  Description: It is used for configuring the TX Queue Priority
2437  */
2438 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
2439 {
2440         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2441         u32 queue;
2442         u32 prio;
2443
2444         for (queue = 0; queue < tx_queues_count; queue++) {
2445                 if (!priv->plat->tx_queues_cfg[queue].use_prio)
2446                         continue;
2447
2448                 prio = priv->plat->tx_queues_cfg[queue].prio;
2449                 priv->hw->mac->tx_queue_prio(priv->hw, prio, queue);
2450         }
2451 }
2452
2453 /**
2454  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
2455  *  @priv: driver private structure
2456  *  Description: It is used for configuring the RX queue routing
2457  */
2458 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
2459 {
2460         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2461         u32 queue;
2462         u8 packet;
2463
2464         for (queue = 0; queue < rx_queues_count; queue++) {
2465                 /* no specific packet type routing specified for the queue */
2466                 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
2467                         continue;
2468
2469                 packet = priv->plat->rx_queues_cfg[queue].pkt_route;
2470                 priv->hw->mac->rx_queue_routing(priv->hw, packet, queue);
2471         }
2472 }
2473
2474 /**
2475  *  stmmac_mtl_configuration - Configure MTL
2476  *  @priv: driver private structure
2477  *  Description: It is used for configurring MTL
2478  */
2479 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
2480 {
2481         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2482         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2483
2484         if (tx_queues_count > 1 && priv->hw->mac->set_mtl_tx_queue_weight)
2485                 stmmac_set_tx_queue_weight(priv);
2486
2487         /* Configure MTL RX algorithms */
2488         if (rx_queues_count > 1 && priv->hw->mac->prog_mtl_rx_algorithms)
2489                 priv->hw->mac->prog_mtl_rx_algorithms(priv->hw,
2490                                                 priv->plat->rx_sched_algorithm);
2491
2492         /* Configure MTL TX algorithms */
2493         if (tx_queues_count > 1 && priv->hw->mac->prog_mtl_tx_algorithms)
2494                 priv->hw->mac->prog_mtl_tx_algorithms(priv->hw,
2495                                                 priv->plat->tx_sched_algorithm);
2496
2497         /* Configure CBS in AVB TX queues */
2498         if (tx_queues_count > 1 && priv->hw->mac->config_cbs)
2499                 stmmac_configure_cbs(priv);
2500
2501         /* Map RX MTL to DMA channels */
2502         if (priv->hw->mac->map_mtl_to_dma)
2503                 stmmac_rx_queue_dma_chan_map(priv);
2504
2505         /* Enable MAC RX Queues */
2506         if (priv->hw->mac->rx_queue_enable)
2507                 stmmac_mac_enable_rx_queues(priv);
2508
2509         /* Set RX priorities */
2510         if (rx_queues_count > 1 && priv->hw->mac->rx_queue_prio)
2511                 stmmac_mac_config_rx_queues_prio(priv);
2512
2513         /* Set TX priorities */
2514         if (tx_queues_count > 1 && priv->hw->mac->tx_queue_prio)
2515                 stmmac_mac_config_tx_queues_prio(priv);
2516
2517         /* Set RX routing */
2518         if (rx_queues_count > 1 && priv->hw->mac->rx_queue_routing)
2519                 stmmac_mac_config_rx_queues_routing(priv);
2520 }
2521
2522 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
2523 {
2524         if (priv->hw->mac->safety_feat_config && priv->dma_cap.asp) {
2525                 netdev_info(priv->dev, "Enabling Safety Features\n");
2526                 priv->hw->mac->safety_feat_config(priv->ioaddr,
2527                                 priv->dma_cap.asp);
2528         } else {
2529                 netdev_info(priv->dev, "No Safety Features support found\n");
2530         }
2531 }
2532
2533 /**
2534  * stmmac_hw_setup - setup mac in a usable state.
2535  *  @dev : pointer to the device structure.
2536  *  Description:
2537  *  this is the main function to setup the HW in a usable state because the
2538  *  dma engine is reset, the core registers are configured (e.g. AXI,
2539  *  Checksum features, timers). The DMA is ready to start receiving and
2540  *  transmitting.
2541  *  Return value:
2542  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2543  *  file on failure.
2544  */
2545 static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
2546 {
2547         struct stmmac_priv *priv = netdev_priv(dev);
2548         u32 rx_cnt = priv->plat->rx_queues_to_use;
2549         u32 tx_cnt = priv->plat->tx_queues_to_use;
2550         u32 chan;
2551         int ret;
2552
2553         /* DMA initialization and SW reset */
2554         ret = stmmac_init_dma_engine(priv);
2555         if (ret < 0) {
2556                 netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
2557                            __func__);
2558                 return ret;
2559         }
2560
2561         /* Copy the MAC addr into the HW  */
2562         priv->hw->mac->set_umac_addr(priv->hw, dev->dev_addr, 0);
2563
2564         /* PS and related bits will be programmed according to the speed */
2565         if (priv->hw->pcs) {
2566                 int speed = priv->plat->mac_port_sel_speed;
2567
2568                 if ((speed == SPEED_10) || (speed == SPEED_100) ||
2569                     (speed == SPEED_1000)) {
2570                         priv->hw->ps = speed;
2571                 } else {
2572                         dev_warn(priv->device, "invalid port speed\n");
2573                         priv->hw->ps = 0;
2574                 }
2575         }
2576
2577         /* Initialize the MAC Core */
2578         priv->hw->mac->core_init(priv->hw, dev);
2579
2580         /* Initialize MTL*/
2581         if (priv->synopsys_id >= DWMAC_CORE_4_00)
2582                 stmmac_mtl_configuration(priv);
2583
2584         /* Initialize Safety Features */
2585         if (priv->synopsys_id >= DWMAC_CORE_5_10)
2586                 stmmac_safety_feat_configuration(priv);
2587
2588         ret = priv->hw->mac->rx_ipc(priv->hw);
2589         if (!ret) {
2590                 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
2591                 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2592                 priv->hw->rx_csum = 0;
2593         }
2594
2595         /* Enable the MAC Rx/Tx */
2596         priv->hw->mac->set_mac(priv->ioaddr, true);
2597
2598         /* Set the HW DMA mode and the COE */
2599         stmmac_dma_operation_mode(priv);
2600
2601         stmmac_mmc_setup(priv);
2602
2603         if (init_ptp) {
2604                 ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
2605                 if (ret < 0)
2606                         netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
2607
2608                 ret = stmmac_init_ptp(priv);
2609                 if (ret == -EOPNOTSUPP)
2610                         netdev_warn(priv->dev, "PTP not supported by HW\n");
2611                 else if (ret)
2612                         netdev_warn(priv->dev, "PTP init failed\n");
2613         }
2614
2615 #ifdef CONFIG_DEBUG_FS
2616         ret = stmmac_init_fs(dev);
2617         if (ret < 0)
2618                 netdev_warn(priv->dev, "%s: failed debugFS registration\n",
2619                             __func__);
2620 #endif
2621         /* Start the ball rolling... */
2622         stmmac_start_all_dma(priv);
2623
2624         priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
2625
2626         if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
2627                 priv->rx_riwt = MAX_DMA_RIWT;
2628                 priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT, rx_cnt);
2629         }
2630
2631         if (priv->hw->pcs && priv->hw->mac->pcs_ctrl_ane)
2632                 priv->hw->mac->pcs_ctrl_ane(priv->hw, 1, priv->hw->ps, 0);
2633
2634         /* set TX and RX rings length */
2635         stmmac_set_rings_length(priv);
2636
2637         /* Enable TSO */
2638         if (priv->tso) {
2639                 for (chan = 0; chan < tx_cnt; chan++)
2640                         priv->hw->dma->enable_tso(priv->ioaddr, 1, chan);
2641         }
2642
2643         return 0;
2644 }
2645
2646 static void stmmac_hw_teardown(struct net_device *dev)
2647 {
2648         struct stmmac_priv *priv = netdev_priv(dev);
2649
2650         clk_disable_unprepare(priv->plat->clk_ptp_ref);
2651 }
2652
2653 /**
2654  *  stmmac_open - open entry point of the driver
2655  *  @dev : pointer to the device structure.
2656  *  Description:
2657  *  This function is the open entry point of the driver.
2658  *  Return value:
2659  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2660  *  file on failure.
2661  */
2662 static int stmmac_open(struct net_device *dev)
2663 {
2664         struct stmmac_priv *priv = netdev_priv(dev);
2665         int ret;
2666
2667         stmmac_check_ether_addr(priv);
2668
2669         if (priv->hw->pcs != STMMAC_PCS_RGMII &&
2670             priv->hw->pcs != STMMAC_PCS_TBI &&
2671             priv->hw->pcs != STMMAC_PCS_RTBI) {
2672                 ret = stmmac_init_phy(dev);
2673                 if (ret) {
2674                         netdev_err(priv->dev,
2675                                    "%s: Cannot attach to PHY (error: %d)\n",
2676                                    __func__, ret);
2677                         return ret;
2678                 }
2679         }
2680
2681         /* Extra statistics */
2682         memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
2683         priv->xstats.threshold = tc;
2684
2685         priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
2686         priv->rx_copybreak = STMMAC_RX_COPYBREAK;
2687
2688         ret = alloc_dma_desc_resources(priv);
2689         if (ret < 0) {
2690                 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
2691                            __func__);
2692                 goto dma_desc_error;
2693         }
2694
2695         ret = init_dma_desc_rings(dev, GFP_KERNEL);
2696         if (ret < 0) {
2697                 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
2698                            __func__);
2699                 goto init_error;
2700         }
2701
2702         ret = stmmac_hw_setup(dev, true);
2703         if (ret < 0) {
2704                 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
2705                 goto init_error;
2706         }
2707
2708         stmmac_init_tx_coalesce(priv);
2709
2710         if (dev->phydev)
2711                 phy_start(dev->phydev);
2712
2713         /* Request the IRQ lines */
2714         ret = request_irq(dev->irq, stmmac_interrupt,
2715                           IRQF_SHARED, dev->name, dev);
2716         if (unlikely(ret < 0)) {
2717                 netdev_err(priv->dev,
2718                            "%s: ERROR: allocating the IRQ %d (error: %d)\n",
2719                            __func__, dev->irq, ret);
2720                 goto irq_error;
2721         }
2722
2723         /* Request the Wake IRQ in case of another line is used for WoL */
2724         if (priv->wol_irq != dev->irq) {
2725                 ret = request_irq(priv->wol_irq, stmmac_interrupt,
2726                                   IRQF_SHARED, dev->name, dev);
2727                 if (unlikely(ret < 0)) {
2728                         netdev_err(priv->dev,
2729                                    "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
2730                                    __func__, priv->wol_irq, ret);
2731                         goto wolirq_error;
2732                 }
2733         }
2734
2735         /* Request the IRQ lines */
2736         if (priv->lpi_irq > 0) {
2737                 ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
2738                                   dev->name, dev);
2739                 if (unlikely(ret < 0)) {
2740                         netdev_err(priv->dev,
2741                                    "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
2742                                    __func__, priv->lpi_irq, ret);
2743                         goto lpiirq_error;
2744                 }
2745         }
2746
2747         stmmac_enable_all_queues(priv);
2748         stmmac_start_all_queues(priv);
2749
2750         return 0;
2751
2752 lpiirq_error:
2753         if (priv->wol_irq != dev->irq)
2754                 free_irq(priv->wol_irq, dev);
2755 wolirq_error:
2756         free_irq(dev->irq, dev);
2757 irq_error:
2758         if (dev->phydev)
2759                 phy_stop(dev->phydev);
2760
2761         del_timer_sync(&priv->txtimer);
2762         stmmac_hw_teardown(dev);
2763 init_error:
2764         free_dma_desc_resources(priv);
2765 dma_desc_error:
2766         if (dev->phydev)
2767                 phy_disconnect(dev->phydev);
2768
2769         return ret;
2770 }
2771
2772 /**
2773  *  stmmac_release - close entry point of the driver
2774  *  @dev : device pointer.
2775  *  Description:
2776  *  This is the stop entry point of the driver.
2777  */
2778 static int stmmac_release(struct net_device *dev)
2779 {
2780         struct stmmac_priv *priv = netdev_priv(dev);
2781
2782         if (priv->eee_enabled)
2783                 del_timer_sync(&priv->eee_ctrl_timer);
2784
2785         /* Stop and disconnect the PHY */
2786         if (dev->phydev) {
2787                 phy_stop(dev->phydev);
2788                 phy_disconnect(dev->phydev);
2789         }
2790
2791         stmmac_stop_all_queues(priv);
2792
2793         stmmac_disable_all_queues(priv);
2794
2795         del_timer_sync(&priv->txtimer);
2796
2797         /* Free the IRQ lines */
2798         free_irq(dev->irq, dev);
2799         if (priv->wol_irq != dev->irq)
2800                 free_irq(priv->wol_irq, dev);
2801         if (priv->lpi_irq > 0)
2802                 free_irq(priv->lpi_irq, dev);
2803
2804         /* Stop TX/RX DMA and clear the descriptors */
2805         stmmac_stop_all_dma(priv);
2806
2807         /* Release and free the Rx/Tx resources */
2808         free_dma_desc_resources(priv);
2809
2810         /* Disable the MAC Rx/Tx */
2811         priv->hw->mac->set_mac(priv->ioaddr, false);
2812
2813         netif_carrier_off(dev);
2814
2815 #ifdef CONFIG_DEBUG_FS
2816         stmmac_exit_fs(dev);
2817 #endif
2818
2819         stmmac_release_ptp(priv);
2820
2821         return 0;
2822 }
2823
2824 /**
2825  *  stmmac_tso_allocator - close entry point of the driver
2826  *  @priv: driver private structure
2827  *  @des: buffer start address
2828  *  @total_len: total length to fill in descriptors
2829  *  @last_segmant: condition for the last descriptor
2830  *  @queue: TX queue index
2831  *  Description:
2832  *  This function fills descriptor and request new descriptors according to
2833  *  buffer length to fill
2834  */
2835 static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
2836                                  int total_len, bool last_segment, u32 queue)
2837 {
2838         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2839         struct dma_desc *desc;
2840         u32 buff_size;
2841         int tmp_len;
2842
2843         tmp_len = total_len;
2844
2845         while (tmp_len > 0) {
2846                 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2847                 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2848                 desc = tx_q->dma_tx + tx_q->cur_tx;
2849
2850                 desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
2851                 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
2852                             TSO_MAX_BUFF_SIZE : tmp_len;
2853
2854                 priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size,
2855                         0, 1,
2856                         (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
2857                         0, 0);
2858
2859                 tmp_len -= TSO_MAX_BUFF_SIZE;
2860         }
2861 }
2862
2863 /**
2864  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
2865  *  @skb : the socket buffer
2866  *  @dev : device pointer
2867  *  Description: this is the transmit function that is called on TSO frames
2868  *  (support available on GMAC4 and newer chips).
2869  *  Diagram below show the ring programming in case of TSO frames:
2870  *
2871  *  First Descriptor
2872  *   --------
2873  *   | DES0 |---> buffer1 = L2/L3/L4 header
2874  *   | DES1 |---> TCP Payload (can continue on next descr...)
2875  *   | DES2 |---> buffer 1 and 2 len
2876  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
2877  *   --------
2878  *      |
2879  *     ...
2880  *      |
2881  *   --------
2882  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
2883  *   | DES1 | --|
2884  *   | DES2 | --> buffer 1 and 2 len
2885  *   | DES3 |
2886  *   --------
2887  *
2888  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
2889  */
2890 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2891 {
2892         struct dma_desc *desc, *first, *mss_desc = NULL;
2893         struct stmmac_priv *priv = netdev_priv(dev);
2894         int nfrags = skb_shinfo(skb)->nr_frags;
2895         u32 queue = skb_get_queue_mapping(skb);
2896         unsigned int first_entry, des;
2897         struct stmmac_tx_queue *tx_q;
2898         int tmp_pay_len = 0;
2899         u32 pay_len, mss;
2900         u8 proto_hdr_len;
2901         int i;
2902
2903         tx_q = &priv->tx_queue[queue];
2904
2905         /* Compute header lengths */
2906         proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2907
2908         /* Desc availability based on threshold should be enough safe */
2909         if (unlikely(stmmac_tx_avail(priv, queue) <
2910                 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
2911                 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
2912                         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
2913                                                                 queue));
2914                         /* This is a hard error, log it. */
2915                         netdev_err(priv->dev,
2916                                    "%s: Tx Ring full when queue awake\n",
2917                                    __func__);
2918                 }
2919                 return NETDEV_TX_BUSY;
2920         }
2921
2922         pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
2923
2924         mss = skb_shinfo(skb)->gso_size;
2925
2926         /* set new MSS value if needed */
2927         if (mss != tx_q->mss) {
2928                 mss_desc = tx_q->dma_tx + tx_q->cur_tx;
2929                 priv->hw->desc->set_mss(mss_desc, mss);
2930                 tx_q->mss = mss;
2931                 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2932                 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2933         }
2934
2935         if (netif_msg_tx_queued(priv)) {
2936                 pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
2937                         __func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
2938                 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
2939                         skb->data_len);
2940         }
2941
2942         first_entry = tx_q->cur_tx;
2943         WARN_ON(tx_q->tx_skbuff[first_entry]);
2944
2945         desc = tx_q->dma_tx + first_entry;
2946         first = desc;
2947
2948         /* first descriptor: fill Headers on Buf1 */
2949         des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
2950                              DMA_TO_DEVICE);
2951         if (dma_mapping_error(priv->device, des))
2952                 goto dma_map_err;
2953
2954         tx_q->tx_skbuff_dma[first_entry].buf = des;
2955         tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
2956
2957         first->des0 = cpu_to_le32(des);
2958
2959         /* Fill start of payload in buff2 of first descriptor */
2960         if (pay_len)
2961                 first->des1 = cpu_to_le32(des + proto_hdr_len);
2962
2963         /* If needed take extra descriptors to fill the remaining payload */
2964         tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
2965
2966         stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
2967
2968         /* Prepare fragments */
2969         for (i = 0; i < nfrags; i++) {
2970                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2971
2972                 des = skb_frag_dma_map(priv->device, frag, 0,
2973                                        skb_frag_size(frag),
2974                                        DMA_TO_DEVICE);
2975                 if (dma_mapping_error(priv->device, des))
2976                         goto dma_map_err;
2977
2978                 stmmac_tso_allocator(priv, des, skb_frag_size(frag),
2979                                      (i == nfrags - 1), queue);
2980
2981                 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
2982                 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
2983                 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
2984         }
2985
2986         tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
2987
2988         /* Only the last descriptor gets to point to the skb. */
2989         tx_q->tx_skbuff[tx_q->cur_tx] = skb;
2990
2991         /* We've used all descriptors we need for this skb, however,
2992          * advance cur_tx so that it references a fresh descriptor.
2993          * ndo_start_xmit will fill this descriptor the next time it's
2994          * called and stmmac_tx_clean may clean up to this descriptor.
2995          */
2996         tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2997
2998         if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
2999                 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3000                           __func__);
3001                 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
3002         }
3003
3004         dev->stats.tx_bytes += skb->len;
3005         priv->xstats.tx_tso_frames++;
3006         priv->xstats.tx_tso_nfrags += nfrags;
3007
3008         /* Manage tx mitigation */
3009         priv->tx_count_frames += nfrags + 1;
3010         if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
3011                 mod_timer(&priv->txtimer,
3012                           STMMAC_COAL_TIMER(priv->tx_coal_timer));
3013         } else {
3014                 priv->tx_count_frames = 0;
3015                 priv->hw->desc->set_tx_ic(desc);
3016                 priv->xstats.tx_set_ic_bit++;
3017         }
3018
3019         skb_tx_timestamp(skb);
3020
3021         if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3022                      priv->hwts_tx_en)) {
3023                 /* declare that device is doing timestamping */
3024                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3025                 priv->hw->desc->enable_tx_timestamp(first);
3026         }
3027
3028         /* Complete the first descriptor before granting the DMA */
3029         priv->hw->desc->prepare_tso_tx_desc(first, 1,
3030                         proto_hdr_len,
3031                         pay_len,
3032                         1, tx_q->tx_skbuff_dma[first_entry].last_segment,
3033                         tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
3034
3035         /* If context desc is used to change MSS */
3036         if (mss_desc) {
3037                 /* Make sure that first descriptor has been completely
3038                  * written, including its own bit. This is because MSS is
3039                  * actually before first descriptor, so we need to make
3040                  * sure that MSS's own bit is the last thing written.
3041                  */
3042                 dma_wmb();
3043                 priv->hw->desc->set_tx_owner(mss_desc);
3044         }
3045
3046         /* The own bit must be the latest setting done when prepare the
3047          * descriptor and then barrier is needed to make sure that
3048          * all is coherent before granting the DMA engine.
3049          */
3050         wmb();
3051
3052         if (netif_msg_pktdata(priv)) {
3053                 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
3054                         __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3055                         tx_q->cur_tx, first, nfrags);
3056
3057                 priv->hw->desc->display_ring((void *)tx_q->dma_tx, DMA_TX_SIZE,
3058                                              0);
3059
3060                 pr_info(">>> frame to be transmitted: ");
3061                 print_pkt(skb->data, skb_headlen(skb));
3062         }
3063
3064         netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3065
3066         priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr,
3067                                        queue);
3068
3069         return NETDEV_TX_OK;
3070
3071 dma_map_err:
3072         dev_err(priv->device, "Tx dma map failed\n");
3073         dev_kfree_skb(skb);
3074         priv->dev->stats.tx_dropped++;
3075         return NETDEV_TX_OK;
3076 }
3077
3078 /**
3079  *  stmmac_xmit - Tx entry point of the driver
3080  *  @skb : the socket buffer
3081  *  @dev : device pointer
3082  *  Description : this is the tx entry point of the driver.
3083  *  It programs the chain or the ring and supports oversized frames
3084  *  and SG feature.
3085  */
3086 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
3087 {
3088         struct stmmac_priv *priv = netdev_priv(dev);
3089         unsigned int nopaged_len = skb_headlen(skb);
3090         int i, csum_insertion = 0, is_jumbo = 0;
3091         u32 queue = skb_get_queue_mapping(skb);
3092         int nfrags = skb_shinfo(skb)->nr_frags;
3093         int entry;
3094         unsigned int first_entry;
3095         struct dma_desc *desc, *first;
3096         struct stmmac_tx_queue *tx_q;
3097         unsigned int enh_desc;
3098         unsigned int des;
3099
3100         tx_q = &priv->tx_queue[queue];
3101
3102         /* Manage oversized TCP frames for GMAC4 device */
3103         if (skb_is_gso(skb) && priv->tso) {
3104                 if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
3105                         return stmmac_tso_xmit(skb, dev);
3106         }
3107
3108         if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
3109                 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
3110                         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
3111                                                                 queue));
3112                         /* This is a hard error, log it. */
3113                         netdev_err(priv->dev,
3114                                    "%s: Tx Ring full when queue awake\n",
3115                                    __func__);
3116                 }
3117                 return NETDEV_TX_BUSY;
3118         }
3119
3120         if (priv->tx_path_in_lpi_mode)
3121                 stmmac_disable_eee_mode(priv);
3122
3123         entry = tx_q->cur_tx;
3124         first_entry = entry;
3125         WARN_ON(tx_q->tx_skbuff[first_entry]);
3126
3127         csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
3128
3129         if (likely(priv->extend_desc))
3130                 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3131         else
3132                 desc = tx_q->dma_tx + entry;
3133
3134         first = desc;
3135
3136         enh_desc = priv->plat->enh_desc;
3137         /* To program the descriptors according to the size of the frame */
3138         if (enh_desc)
3139                 is_jumbo = priv->hw->mode->is_jumbo_frm(skb->len, enh_desc);
3140
3141         if (unlikely(is_jumbo) && likely(priv->synopsys_id <
3142                                          DWMAC_CORE_4_00)) {
3143                 entry = priv->hw->mode->jumbo_frm(tx_q, skb, csum_insertion);
3144                 if (unlikely(entry < 0))
3145                         goto dma_map_err;
3146         }
3147
3148         for (i = 0; i < nfrags; i++) {
3149                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3150                 int len = skb_frag_size(frag);
3151                 bool last_segment = (i == (nfrags - 1));
3152
3153                 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3154                 WARN_ON(tx_q->tx_skbuff[entry]);
3155
3156                 if (likely(priv->extend_desc))
3157                         desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3158                 else
3159                         desc = tx_q->dma_tx + entry;
3160
3161                 des = skb_frag_dma_map(priv->device, frag, 0, len,
3162                                        DMA_TO_DEVICE);
3163                 if (dma_mapping_error(priv->device, des))
3164                         goto dma_map_err; /* should reuse desc w/o issues */
3165
3166                 tx_q->tx_skbuff_dma[entry].buf = des;
3167                 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3168                         desc->des0 = cpu_to_le32(des);
3169                 else
3170                         desc->des2 = cpu_to_le32(des);
3171
3172                 tx_q->tx_skbuff_dma[entry].map_as_page = true;
3173                 tx_q->tx_skbuff_dma[entry].len = len;
3174                 tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
3175
3176                 /* Prepare the descriptor and set the own bit too */
3177                 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
3178                                                 priv->mode, 1, last_segment,
3179                                                 skb->len);
3180         }
3181
3182         /* Only the last descriptor gets to point to the skb. */
3183         tx_q->tx_skbuff[entry] = skb;
3184
3185         /* We've used all descriptors we need for this skb, however,
3186          * advance cur_tx so that it references a fresh descriptor.
3187          * ndo_start_xmit will fill this descriptor the next time it's
3188          * called and stmmac_tx_clean may clean up to this descriptor.
3189          */
3190         entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3191         tx_q->cur_tx = entry;
3192
3193         if (netif_msg_pktdata(priv)) {
3194                 void *tx_head;
3195
3196                 netdev_dbg(priv->dev,
3197                            "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
3198                            __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3199                            entry, first, nfrags);
3200
3201                 if (priv->extend_desc)
3202                         tx_head = (void *)tx_q->dma_etx;
3203                 else
3204                         tx_head = (void *)tx_q->dma_tx;
3205
3206                 priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false);
3207
3208                 netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
3209                 print_pkt(skb->data, skb->len);
3210         }
3211
3212         if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3213                 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3214                           __func__);
3215                 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
3216         }
3217
3218         dev->stats.tx_bytes += skb->len;
3219
3220         /* According to the coalesce parameter the IC bit for the latest
3221          * segment is reset and the timer re-started to clean the tx status.
3222          * This approach takes care about the fragments: desc is the first
3223          * element in case of no SG.
3224          */
3225         priv->tx_count_frames += nfrags + 1;
3226         if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
3227                 mod_timer(&priv->txtimer,
3228                           STMMAC_COAL_TIMER(priv->tx_coal_timer));
3229         } else {
3230                 priv->tx_count_frames = 0;
3231                 priv->hw->desc->set_tx_ic(desc);
3232                 priv->xstats.tx_set_ic_bit++;
3233         }
3234
3235         skb_tx_timestamp(skb);
3236
3237         /* Ready to fill the first descriptor and set the OWN bit w/o any
3238          * problems because all the descriptors are actually ready to be
3239          * passed to the DMA engine.
3240          */
3241         if (likely(!is_jumbo)) {
3242                 bool last_segment = (nfrags == 0);
3243
3244                 des = dma_map_single(priv->device, skb->data,
3245                                      nopaged_len, DMA_TO_DEVICE);
3246                 if (dma_mapping_error(priv->device, des))
3247                         goto dma_map_err;
3248
3249                 tx_q->tx_skbuff_dma[first_entry].buf = des;
3250                 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3251                         first->des0 = cpu_to_le32(des);
3252                 else
3253                         first->des2 = cpu_to_le32(des);
3254
3255                 tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
3256                 tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
3257
3258                 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3259                              priv->hwts_tx_en)) {
3260                         /* declare that device is doing timestamping */
3261                         skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3262                         priv->hw->desc->enable_tx_timestamp(first);
3263                 }
3264
3265                 /* Prepare the first descriptor setting the OWN bit too */
3266                 priv->hw->desc->prepare_tx_desc(first, 1, nopaged_len,
3267                                                 csum_insertion, priv->mode, 1,
3268                                                 last_segment, skb->len);
3269
3270                 /* The own bit must be the latest setting done when prepare the
3271                  * descriptor and then barrier is needed to make sure that
3272                  * all is coherent before granting the DMA engine.
3273                  */
3274                 wmb();
3275         }
3276
3277         netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3278
3279         if (priv->synopsys_id < DWMAC_CORE_4_00)
3280                 priv->hw->dma->enable_dma_transmission(priv->ioaddr);
3281         else
3282                 priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr,
3283                                                queue);
3284
3285         return NETDEV_TX_OK;
3286
3287 dma_map_err:
3288         netdev_err(priv->dev, "Tx DMA map failed\n");
3289         dev_kfree_skb(skb);
3290         priv->dev->stats.tx_dropped++;
3291         return NETDEV_TX_OK;
3292 }
3293
3294 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
3295 {
3296         struct ethhdr *ehdr;
3297         u16 vlanid;
3298
3299         if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) ==
3300             NETIF_F_HW_VLAN_CTAG_RX &&
3301             !__vlan_get_tag(skb, &vlanid)) {
3302                 /* pop the vlan tag */
3303                 ehdr = (struct ethhdr *)skb->data;
3304                 memmove(skb->data + VLAN_HLEN, ehdr, ETH_ALEN * 2);
3305                 skb_pull(skb, VLAN_HLEN);
3306                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid);
3307         }
3308 }
3309
3310
3311 static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
3312 {
3313         if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
3314                 return 0;
3315
3316         return 1;
3317 }
3318
3319 /**
3320  * stmmac_rx_refill - refill used skb preallocated buffers
3321  * @priv: driver private structure
3322  * @queue: RX queue index
3323  * Description : this is to reallocate the skb for the reception process
3324  * that is based on zero-copy.
3325  */
3326 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
3327 {
3328         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3329         int dirty = stmmac_rx_dirty(priv, queue);
3330         unsigned int entry = rx_q->dirty_rx;
3331
3332         int bfsize = priv->dma_buf_sz;
3333
3334         while (dirty-- > 0) {
3335                 struct dma_desc *p;
3336
3337                 if (priv->extend_desc)
3338                         p = (struct dma_desc *)(rx_q->dma_erx + entry);
3339                 else
3340                         p = rx_q->dma_rx + entry;
3341
3342                 if (likely(!rx_q->rx_skbuff[entry])) {
3343                         struct sk_buff *skb;
3344
3345                         skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
3346                         if (unlikely(!skb)) {
3347                                 /* so for a while no zero-copy! */
3348                                 rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH;
3349                                 if (unlikely(net_ratelimit()))
3350                                         dev_err(priv->device,
3351                                                 "fail to alloc skb entry %d\n",
3352                                                 entry);
3353                                 break;
3354                         }
3355
3356                         rx_q->rx_skbuff[entry] = skb;
3357                         rx_q->rx_skbuff_dma[entry] =
3358                             dma_map_single(priv->device, skb->data, bfsize,
3359                                            DMA_FROM_DEVICE);
3360                         if (dma_mapping_error(priv->device,
3361                                               rx_q->rx_skbuff_dma[entry])) {
3362                                 netdev_err(priv->dev, "Rx DMA map failed\n");
3363                                 dev_kfree_skb(skb);
3364                                 break;
3365                         }
3366
3367                         if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
3368                                 p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
3369                                 p->des1 = 0;
3370                         } else {
3371                                 p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
3372                         }
3373                         if (priv->hw->mode->refill_desc3)
3374                                 priv->hw->mode->refill_desc3(rx_q, p);
3375
3376                         if (rx_q->rx_zeroc_thresh > 0)
3377                                 rx_q->rx_zeroc_thresh--;
3378
3379                         netif_dbg(priv, rx_status, priv->dev,
3380                                   "refill entry #%d\n", entry);
3381                 }
3382                 dma_wmb();
3383
3384                 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3385                         priv->hw->desc->init_rx_desc(p, priv->use_riwt, 0, 0);
3386                 else
3387                         priv->hw->desc->set_rx_owner(p);
3388
3389                 dma_wmb();
3390
3391                 entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
3392         }
3393         rx_q->dirty_rx = entry;
3394 }
3395
3396 /**
3397  * stmmac_rx - manage the receive process
3398  * @priv: driver private structure
3399  * @limit: napi bugget
3400  * @queue: RX queue index.
3401  * Description :  this the function called by the napi poll method.
3402  * It gets all the frames inside the ring.
3403  */
3404 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3405 {
3406         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3407         unsigned int entry = rx_q->cur_rx;
3408         int coe = priv->hw->rx_csum;
3409         unsigned int next_entry;
3410         unsigned int count = 0;
3411
3412         if (netif_msg_rx_status(priv)) {
3413                 void *rx_head;
3414
3415                 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
3416                 if (priv->extend_desc)
3417                         rx_head = (void *)rx_q->dma_erx;
3418                 else
3419                         rx_head = (void *)rx_q->dma_rx;
3420
3421                 priv->hw->desc->display_ring(rx_head, DMA_RX_SIZE, true);
3422         }
3423         while (count < limit) {
3424                 int status;
3425                 struct dma_desc *p;
3426                 struct dma_desc *np;
3427
3428                 if (priv->extend_desc)
3429                         p = (struct dma_desc *)(rx_q->dma_erx + entry);
3430                 else
3431                         p = rx_q->dma_rx + entry;
3432
3433                 /* read the status of the incoming frame */
3434                 status = priv->hw->desc->rx_status(&priv->dev->stats,
3435                                                    &priv->xstats, p);
3436                 /* check if managed by the DMA otherwise go ahead */
3437                 if (unlikely(status & dma_own))
3438                         break;
3439
3440                 count++;
3441
3442                 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
3443                 next_entry = rx_q->cur_rx;
3444
3445                 if (priv->extend_desc)
3446                         np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
3447                 else
3448                         np = rx_q->dma_rx + next_entry;
3449
3450                 prefetch(np);
3451
3452                 if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status))
3453                         priv->hw->desc->rx_extended_status(&priv->dev->stats,
3454                                                            &priv->xstats,
3455                                                            rx_q->dma_erx +
3456                                                            entry);
3457                 if (unlikely(status == discard_frame)) {
3458                         priv->dev->stats.rx_errors++;
3459                         if (priv->hwts_rx_en && !priv->extend_desc) {
3460                                 /* DESC2 & DESC3 will be overwritten by device
3461                                  * with timestamp value, hence reinitialize
3462                                  * them in stmmac_rx_refill() function so that
3463                                  * device can reuse it.
3464                                  */
3465                                 dev_kfree_skb_any(rx_q->rx_skbuff[entry]);
3466                                 rx_q->rx_skbuff[entry] = NULL;
3467                                 dma_unmap_single(priv->device,
3468                                                  rx_q->rx_skbuff_dma[entry],
3469                                                  priv->dma_buf_sz,
3470                                                  DMA_FROM_DEVICE);
3471                         }
3472                 } else {
3473                         struct sk_buff *skb;
3474                         int frame_len;
3475                         unsigned int des;
3476
3477                         if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3478                                 des = le32_to_cpu(p->des0);
3479                         else
3480                                 des = le32_to_cpu(p->des2);
3481
3482                         frame_len = priv->hw->desc->get_rx_frame_len(p, coe);
3483
3484                         /*  If frame length is greater than skb buffer size
3485                          *  (preallocated during init) then the packet is
3486                          *  ignored
3487                          */
3488                         if (frame_len > priv->dma_buf_sz) {
3489                                 netdev_err(priv->dev,
3490                                            "len %d larger than size (%d)\n",
3491                                            frame_len, priv->dma_buf_sz);
3492                                 priv->dev->stats.rx_length_errors++;
3493                                 break;
3494                         }
3495
3496                         /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
3497                          * Type frames (LLC/LLC-SNAP)
3498                          *
3499                          * llc_snap is never checked in GMAC >= 4, so this ACS
3500                          * feature is always disabled and packets need to be
3501                          * stripped manually.
3502                          */
3503                         if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
3504                             unlikely(status != llc_snap))
3505                                 frame_len -= ETH_FCS_LEN;
3506
3507                         if (netif_msg_rx_status(priv)) {
3508                                 netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
3509                                            p, entry, des);
3510                                 netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
3511                                            frame_len, status);
3512                         }
3513
3514                         /* The zero-copy is always used for all the sizes
3515                          * in case of GMAC4 because it needs
3516                          * to refill the used descriptors, always.
3517                          */
3518                         if (unlikely(!priv->plat->has_gmac4 &&
3519                                      ((frame_len < priv->rx_copybreak) ||
3520                                      stmmac_rx_threshold_count(rx_q)))) {
3521                                 skb = netdev_alloc_skb_ip_align(priv->dev,
3522                                                                 frame_len);
3523                                 if (unlikely(!skb)) {
3524                                         if (net_ratelimit())
3525                                                 dev_warn(priv->device,
3526                                                          "packet dropped\n");
3527                                         priv->dev->stats.rx_dropped++;
3528                                         break;
3529                                 }
3530
3531                                 dma_sync_single_for_cpu(priv->device,
3532                                                         rx_q->rx_skbuff_dma
3533                                                         [entry], frame_len,
3534                                                         DMA_FROM_DEVICE);
3535                                 skb_copy_to_linear_data(skb,
3536                                                         rx_q->
3537                                                         rx_skbuff[entry]->data,
3538                                                         frame_len);
3539
3540                                 skb_put(skb, frame_len);
3541                                 dma_sync_single_for_device(priv->device,
3542                                                            rx_q->rx_skbuff_dma
3543                                                            [entry], frame_len,
3544                                                            DMA_FROM_DEVICE);
3545                         } else {
3546                                 skb = rx_q->rx_skbuff[entry];
3547                                 if (unlikely(!skb)) {
3548                                         netdev_err(priv->dev,
3549                                                    "%s: Inconsistent Rx chain\n",
3550                                                    priv->dev->name);
3551                                         priv->dev->stats.rx_dropped++;
3552                                         break;
3553                                 }
3554                                 prefetch(skb->data - NET_IP_ALIGN);
3555                                 rx_q->rx_skbuff[entry] = NULL;
3556                                 rx_q->rx_zeroc_thresh++;
3557
3558                                 skb_put(skb, frame_len);
3559                                 dma_unmap_single(priv->device,
3560                                                  rx_q->rx_skbuff_dma[entry],
3561                                                  priv->dma_buf_sz,
3562                                                  DMA_FROM_DEVICE);
3563                         }
3564
3565                         if (netif_msg_pktdata(priv)) {
3566                                 netdev_dbg(priv->dev, "frame received (%dbytes)",
3567                                            frame_len);
3568                                 print_pkt(skb->data, frame_len);
3569                         }
3570
3571                         stmmac_get_rx_hwtstamp(priv, p, np, skb);
3572
3573                         stmmac_rx_vlan(priv->dev, skb);
3574
3575                         skb->protocol = eth_type_trans(skb, priv->dev);
3576
3577                         if (unlikely(!coe))
3578                                 skb_checksum_none_assert(skb);
3579                         else
3580                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
3581
3582                         napi_gro_receive(&rx_q->napi, skb);
3583
3584                         priv->dev->stats.rx_packets++;
3585                         priv->dev->stats.rx_bytes += frame_len;
3586                 }
3587                 entry = next_entry;
3588         }
3589
3590         stmmac_rx_refill(priv, queue);
3591
3592         priv->xstats.rx_pkt_n += count;
3593
3594         return count;
3595 }
3596
3597 /**
3598  *  stmmac_poll - stmmac poll method (NAPI)
3599  *  @napi : pointer to the napi structure.
3600  *  @budget : maximum number of packets that the current CPU can receive from
3601  *            all interfaces.
3602  *  Description :
3603  *  To look at the incoming frames and clear the tx resources.
3604  */
3605 static int stmmac_poll(struct napi_struct *napi, int budget)
3606 {
3607         struct stmmac_rx_queue *rx_q =
3608                 container_of(napi, struct stmmac_rx_queue, napi);
3609         struct stmmac_priv *priv = rx_q->priv_data;
3610         u32 tx_count = priv->plat->tx_queues_to_use;
3611         u32 chan = rx_q->queue_index;
3612         int work_done = 0;
3613         u32 queue;
3614
3615         priv->xstats.napi_poll++;
3616
3617         /* check all the queues */
3618         for (queue = 0; queue < tx_count; queue++)
3619                 stmmac_tx_clean(priv, queue);
3620
3621         work_done = stmmac_rx(priv, budget, rx_q->queue_index);
3622         if (work_done < budget) {
3623                 napi_complete_done(napi, work_done);
3624                 stmmac_enable_dma_irq(priv, chan);
3625         }
3626         return work_done;
3627 }
3628
3629 /**
3630  *  stmmac_tx_timeout
3631  *  @dev : Pointer to net device structure
3632  *  Description: this function is called when a packet transmission fails to
3633  *   complete within a reasonable time. The driver will mark the error in the
3634  *   netdev structure and arrange for the device to be reset to a sane state
3635  *   in order to transmit a new packet.
3636  */
3637 static void stmmac_tx_timeout(struct net_device *dev)
3638 {
3639         struct stmmac_priv *priv = netdev_priv(dev);
3640
3641         stmmac_global_err(priv);
3642 }
3643
3644 /**
3645  *  stmmac_set_rx_mode - entry point for multicast addressing
3646  *  @dev : pointer to the device structure
3647  *  Description:
3648  *  This function is a driver entry point which gets called by the kernel
3649  *  whenever multicast addresses must be enabled/disabled.
3650  *  Return value:
3651  *  void.
3652  */
3653 static void stmmac_set_rx_mode(struct net_device *dev)
3654 {
3655         struct stmmac_priv *priv = netdev_priv(dev);
3656
3657         priv->hw->mac->set_filter(priv->hw, dev);
3658 }
3659
3660 /**
3661  *  stmmac_change_mtu - entry point to change MTU size for the device.
3662  *  @dev : device pointer.
3663  *  @new_mtu : the new MTU size for the device.
3664  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
3665  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
3666  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
3667  *  Return value:
3668  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3669  *  file on failure.
3670  */
3671 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
3672 {
3673         struct stmmac_priv *priv = netdev_priv(dev);
3674
3675         if (netif_running(dev)) {
3676                 netdev_err(priv->dev, "must be stopped to change its MTU\n");
3677                 return -EBUSY;
3678         }
3679
3680         dev->mtu = new_mtu;
3681
3682         netdev_update_features(dev);
3683
3684         return 0;
3685 }
3686
3687 static netdev_features_t stmmac_fix_features(struct net_device *dev,
3688                                              netdev_features_t features)
3689 {
3690         struct stmmac_priv *priv = netdev_priv(dev);
3691
3692         if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
3693                 features &= ~NETIF_F_RXCSUM;
3694
3695         if (!priv->plat->tx_coe)
3696                 features &= ~NETIF_F_CSUM_MASK;
3697
3698         /* Some GMAC devices have a bugged Jumbo frame support that
3699          * needs to have the Tx COE disabled for oversized frames
3700          * (due to limited buffer sizes). In this case we disable
3701          * the TX csum insertion in the TDES and not use SF.
3702          */
3703         if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
3704                 features &= ~NETIF_F_CSUM_MASK;
3705
3706         /* Disable tso if asked by ethtool */
3707         if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
3708                 if (features & NETIF_F_TSO)
3709                         priv->tso = true;
3710                 else
3711                         priv->tso = false;
3712         }
3713
3714         return features;
3715 }
3716
3717 static int stmmac_set_features(struct net_device *netdev,
3718                                netdev_features_t features)
3719 {
3720         struct stmmac_priv *priv = netdev_priv(netdev);
3721
3722         /* Keep the COE Type in case of csum is supporting */
3723         if (features & NETIF_F_RXCSUM)
3724                 priv->hw->rx_csum = priv->plat->rx_coe;
3725         else
3726                 priv->hw->rx_csum = 0;
3727         /* No check needed because rx_coe has been set before and it will be
3728          * fixed in case of issue.
3729          */
3730         priv->hw->mac->rx_ipc(priv->hw);
3731
3732         return 0;
3733 }
3734
3735 /**
3736  *  stmmac_interrupt - main ISR
3737  *  @irq: interrupt number.
3738  *  @dev_id: to pass the net device pointer.
3739  *  Description: this is the main driver interrupt service routine.
3740  *  It can call:
3741  *  o DMA service routine (to manage incoming frame reception and transmission
3742  *    status)
3743  *  o Core interrupts to manage: remote wake-up, management counter, LPI
3744  *    interrupts.
3745  */
3746 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
3747 {
3748         struct net_device *dev = (struct net_device *)dev_id;
3749         struct stmmac_priv *priv = netdev_priv(dev);
3750         u32 rx_cnt = priv->plat->rx_queues_to_use;
3751         u32 tx_cnt = priv->plat->tx_queues_to_use;
3752         u32 queues_count;
3753         u32 queue;
3754
3755         queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
3756
3757         if (priv->irq_wake)
3758                 pm_wakeup_event(priv->device, 0);
3759
3760         if (unlikely(!dev)) {
3761                 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
3762                 return IRQ_NONE;
3763         }
3764
3765         /* Check if adapter is up */
3766         if (test_bit(STMMAC_DOWN, &priv->state))
3767                 return IRQ_HANDLED;
3768         /* Check if a fatal error happened */
3769         if (stmmac_safety_feat_interrupt(priv))
3770                 return IRQ_HANDLED;
3771
3772         /* To handle GMAC own interrupts */
3773         if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) {
3774                 int status = priv->hw->mac->host_irq_status(priv->hw,
3775                                                             &priv->xstats);
3776
3777                 if (unlikely(status)) {
3778                         /* For LPI we need to save the tx status */
3779                         if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
3780                                 priv->tx_path_in_lpi_mode = true;
3781                         if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
3782                                 priv->tx_path_in_lpi_mode = false;
3783                 }
3784
3785                 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3786                         for (queue = 0; queue < queues_count; queue++) {
3787                                 struct stmmac_rx_queue *rx_q =
3788                                 &priv->rx_queue[queue];
3789
3790                                 status |=
3791                                 priv->hw->mac->host_mtl_irq_status(priv->hw,
3792                                                                    queue);
3793
3794                                 if (status & CORE_IRQ_MTL_RX_OVERFLOW &&
3795                                     priv->hw->dma->set_rx_tail_ptr)
3796                                         priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
3797                                                                 rx_q->rx_tail_addr,
3798                                                                 queue);
3799                         }
3800                 }
3801
3802                 /* PCS link status */
3803                 if (priv->hw->pcs) {
3804                         if (priv->xstats.pcs_link)
3805                                 netif_carrier_on(dev);
3806                         else
3807                                 netif_carrier_off(dev);
3808                 }
3809         }
3810
3811         /* To handle DMA interrupts */
3812         stmmac_dma_interrupt(priv);
3813
3814         return IRQ_HANDLED;
3815 }
3816
3817 #ifdef CONFIG_NET_POLL_CONTROLLER
3818 /* Polling receive - used by NETCONSOLE and other diagnostic tools
3819  * to allow network I/O with interrupts disabled.
3820  */
3821 static void stmmac_poll_controller(struct net_device *dev)
3822 {
3823         disable_irq(dev->irq);
3824         stmmac_interrupt(dev->irq, dev);
3825         enable_irq(dev->irq);
3826 }
3827 #endif
3828
3829 /**
3830  *  stmmac_ioctl - Entry point for the Ioctl
3831  *  @dev: Device pointer.
3832  *  @rq: An IOCTL specefic structure, that can contain a pointer to
3833  *  a proprietary structure used to pass information to the driver.
3834  *  @cmd: IOCTL command
3835  *  Description:
3836  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
3837  */
3838 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3839 {
3840         int ret = -EOPNOTSUPP;
3841
3842         if (!netif_running(dev))
3843                 return -EINVAL;
3844
3845         switch (cmd) {
3846         case SIOCGMIIPHY:
3847         case SIOCGMIIREG:
3848         case SIOCSMIIREG:
3849                 if (!dev->phydev)
3850                         return -EINVAL;
3851                 ret = phy_mii_ioctl(dev->phydev, rq, cmd);
3852                 break;
3853         case SIOCSHWTSTAMP:
3854                 ret = stmmac_hwtstamp_ioctl(dev, rq);
3855                 break;
3856         default:
3857                 break;
3858         }
3859
3860         return ret;
3861 }
3862
3863 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
3864 {
3865         struct stmmac_priv *priv = netdev_priv(ndev);
3866         int ret = 0;
3867
3868         ret = eth_mac_addr(ndev, addr);
3869         if (ret)
3870                 return ret;
3871
3872         priv->hw->mac->set_umac_addr(priv->hw, ndev->dev_addr, 0);
3873
3874         return ret;
3875 }
3876
3877 #ifdef CONFIG_DEBUG_FS
3878 static struct dentry *stmmac_fs_dir;
3879
3880 static void sysfs_display_ring(void *head, int size, int extend_desc,
3881                                struct seq_file *seq)
3882 {
3883         int i;
3884         struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
3885         struct dma_desc *p = (struct dma_desc *)head;
3886
3887         for (i = 0; i < size; i++) {
3888                 if (extend_desc) {
3889                         seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3890                                    i, (unsigned int)virt_to_phys(ep),
3891                                    le32_to_cpu(ep->basic.des0),
3892                                    le32_to_cpu(ep->basic.des1),
3893                                    le32_to_cpu(ep->basic.des2),
3894                                    le32_to_cpu(ep->basic.des3));
3895                         ep++;
3896                 } else {
3897                         seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3898                                    i, (unsigned int)virt_to_phys(p),
3899                                    le32_to_cpu(p->des0), le32_to_cpu(p->des1),
3900                                    le32_to_cpu(p->des2), le32_to_cpu(p->des3));
3901                         p++;
3902                 }
3903                 seq_printf(seq, "\n");
3904         }
3905 }
3906
3907 static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
3908 {
3909         struct net_device *dev = seq->private;
3910         struct stmmac_priv *priv = netdev_priv(dev);
3911         u32 rx_count = priv->plat->rx_queues_to_use;
3912         u32 tx_count = priv->plat->tx_queues_to_use;
3913         u32 queue;
3914
3915         for (queue = 0; queue < rx_count; queue++) {
3916                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3917
3918                 seq_printf(seq, "RX Queue %d:\n", queue);
3919
3920                 if (priv->extend_desc) {
3921                         seq_printf(seq, "Extended descriptor ring:\n");
3922                         sysfs_display_ring((void *)rx_q->dma_erx,
3923                                            DMA_RX_SIZE, 1, seq);
3924                 } else {
3925                         seq_printf(seq, "Descriptor ring:\n");
3926                         sysfs_display_ring((void *)rx_q->dma_rx,
3927                                            DMA_RX_SIZE, 0, seq);
3928                 }
3929         }
3930
3931         for (queue = 0; queue < tx_count; queue++) {
3932                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3933
3934                 seq_printf(seq, "TX Queue %d:\n", queue);
3935
3936                 if (priv->extend_desc) {
3937                         seq_printf(seq, "Extended descriptor ring:\n");
3938                         sysfs_display_ring((void *)tx_q->dma_etx,
3939                                            DMA_TX_SIZE, 1, seq);
3940                 } else {
3941                         seq_printf(seq, "Descriptor ring:\n");
3942                         sysfs_display_ring((void *)tx_q->dma_tx,
3943                                            DMA_TX_SIZE, 0, seq);
3944                 }
3945         }
3946
3947         return 0;
3948 }
3949
3950 static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file)
3951 {
3952         return single_open(file, stmmac_sysfs_ring_read, inode->i_private);
3953 }
3954
3955 /* Debugfs files, should appear in /sys/kernel/debug/stmmaceth/eth0 */
3956
3957 static const struct file_operations stmmac_rings_status_fops = {
3958         .owner = THIS_MODULE,
3959         .open = stmmac_sysfs_ring_open,
3960         .read = seq_read,
3961         .llseek = seq_lseek,
3962         .release = single_release,
3963 };
3964
3965 static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
3966 {
3967         struct net_device *dev = seq->private;
3968         struct stmmac_priv *priv = netdev_priv(dev);
3969
3970         if (!priv->hw_cap_support) {
3971                 seq_printf(seq, "DMA HW features not supported\n");
3972                 return 0;
3973         }
3974
3975         seq_printf(seq, "==============================\n");
3976         seq_printf(seq, "\tDMA HW features\n");
3977         seq_printf(seq, "==============================\n");
3978
3979         seq_printf(seq, "\t10/100 Mbps: %s\n",
3980                    (priv->dma_cap.mbps_10_100) ? "Y" : "N");
3981         seq_printf(seq, "\t1000 Mbps: %s\n",
3982                    (priv->dma_cap.mbps_1000) ? "Y" : "N");
3983         seq_printf(seq, "\tHalf duplex: %s\n",
3984                    (priv->dma_cap.half_duplex) ? "Y" : "N");
3985         seq_printf(seq, "\tHash Filter: %s\n",
3986                    (priv->dma_cap.hash_filter) ? "Y" : "N");
3987         seq_printf(seq, "\tMultiple MAC address registers: %s\n",
3988                    (priv->dma_cap.multi_addr) ? "Y" : "N");
3989         seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
3990                    (priv->dma_cap.pcs) ? "Y" : "N");
3991         seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
3992                    (priv->dma_cap.sma_mdio) ? "Y" : "N");
3993         seq_printf(seq, "\tPMT Remote wake up: %s\n",
3994                    (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
3995         seq_printf(seq, "\tPMT Magic Frame: %s\n",
3996                    (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
3997         seq_printf(seq, "\tRMON module: %s\n",
3998                    (priv->dma_cap.rmon) ? "Y" : "N");
3999         seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
4000                    (priv->dma_cap.time_stamp) ? "Y" : "N");
4001         seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
4002                    (priv->dma_cap.atime_stamp) ? "Y" : "N");
4003         seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
4004                    (priv->dma_cap.eee) ? "Y" : "N");
4005         seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
4006         seq_printf(seq, "\tChecksum Offload in TX: %s\n",
4007                    (priv->dma_cap.tx_coe) ? "Y" : "N");
4008         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
4009                 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
4010                            (priv->dma_cap.rx_coe) ? "Y" : "N");
4011         } else {
4012                 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
4013                            (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
4014                 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
4015                            (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
4016         }
4017         seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
4018                    (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
4019         seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
4020                    priv->dma_cap.number_rx_channel);
4021         seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
4022                    priv->dma_cap.number_tx_channel);
4023         seq_printf(seq, "\tEnhanced descriptors: %s\n",
4024                    (priv->dma_cap.enh_desc) ? "Y" : "N");
4025
4026         return 0;
4027 }
4028
4029 static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file)
4030 {
4031         return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private);
4032 }
4033
4034 static const struct file_operations stmmac_dma_cap_fops = {
4035         .owner = THIS_MODULE,
4036         .open = stmmac_sysfs_dma_cap_open,
4037         .read = seq_read,
4038         .llseek = seq_lseek,
4039         .release = single_release,
4040 };
4041
4042 static int stmmac_init_fs(struct net_device *dev)
4043 {
4044         struct stmmac_priv *priv = netdev_priv(dev);
4045
4046         /* Create per netdev entries */
4047         priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
4048
4049         if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
4050                 netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
4051
4052                 return -ENOMEM;
4053         }
4054
4055         /* Entry to report DMA RX/TX rings */
4056         priv->dbgfs_rings_status =
4057                 debugfs_create_file("descriptors_status", 0444,
4058                                     priv->dbgfs_dir, dev,
4059                                     &stmmac_rings_status_fops);
4060
4061         if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
4062                 netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
4063                 debugfs_remove_recursive(priv->dbgfs_dir);
4064
4065                 return -ENOMEM;
4066         }
4067
4068         /* Entry to report the DMA HW features */
4069         priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", 0444,
4070                                                   priv->dbgfs_dir,
4071                                                   dev, &stmmac_dma_cap_fops);
4072
4073         if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
4074                 netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
4075                 debugfs_remove_recursive(priv->dbgfs_dir);
4076
4077                 return -ENOMEM;
4078         }
4079
4080         return 0;
4081 }
4082
4083 static void stmmac_exit_fs(struct net_device *dev)
4084 {
4085         struct stmmac_priv *priv = netdev_priv(dev);
4086
4087         debugfs_remove_recursive(priv->dbgfs_dir);
4088 }
4089 #endif /* CONFIG_DEBUG_FS */
4090
4091 static const struct net_device_ops stmmac_netdev_ops = {
4092         .ndo_open = stmmac_open,
4093         .ndo_start_xmit = stmmac_xmit,
4094         .ndo_stop = stmmac_release,
4095         .ndo_change_mtu = stmmac_change_mtu,
4096         .ndo_fix_features = stmmac_fix_features,
4097         .ndo_set_features = stmmac_set_features,
4098         .ndo_set_rx_mode = stmmac_set_rx_mode,
4099         .ndo_tx_timeout = stmmac_tx_timeout,
4100         .ndo_do_ioctl = stmmac_ioctl,
4101 #ifdef CONFIG_NET_POLL_CONTROLLER
4102         .ndo_poll_controller = stmmac_poll_controller,
4103 #endif
4104         .ndo_set_mac_address = stmmac_set_mac_address,
4105 };
4106
4107 static void stmmac_reset_subtask(struct stmmac_priv *priv)
4108 {
4109         if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
4110                 return;
4111         if (test_bit(STMMAC_DOWN, &priv->state))
4112                 return;
4113
4114         netdev_err(priv->dev, "Reset adapter.\n");
4115
4116         rtnl_lock();
4117         netif_trans_update(priv->dev);
4118         while (test_and_set_bit(STMMAC_RESETING, &priv->state))
4119                 usleep_range(1000, 2000);
4120
4121         set_bit(STMMAC_DOWN, &priv->state);
4122         dev_close(priv->dev);
4123         dev_open(priv->dev);
4124         clear_bit(STMMAC_DOWN, &priv->state);
4125         clear_bit(STMMAC_RESETING, &priv->state);
4126         rtnl_unlock();
4127 }
4128
4129 static void stmmac_service_task(struct work_struct *work)
4130 {
4131         struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
4132                         service_task);
4133
4134         stmmac_reset_subtask(priv);
4135         clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
4136 }
4137
4138 /**
4139  *  stmmac_hw_init - Init the MAC device
4140  *  @priv: driver private structure
4141  *  Description: this function is to configure the MAC device according to
4142  *  some platform parameters or the HW capability register. It prepares the
4143  *  driver to use either ring or chain modes and to setup either enhanced or
4144  *  normal descriptors.
4145  */
4146 static int stmmac_hw_init(struct stmmac_priv *priv)
4147 {
4148         struct mac_device_info *mac;
4149
4150         /* Identify the MAC HW device */
4151         if (priv->plat->setup) {
4152                 mac = priv->plat->setup(priv);
4153         } else if (priv->plat->has_gmac) {
4154                 priv->dev->priv_flags |= IFF_UNICAST_FLT;
4155                 mac = dwmac1000_setup(priv->ioaddr,
4156                                       priv->plat->multicast_filter_bins,
4157                                       priv->plat->unicast_filter_entries,
4158                                       &priv->synopsys_id);
4159         } else if (priv->plat->has_gmac4) {
4160                 priv->dev->priv_flags |= IFF_UNICAST_FLT;
4161                 mac = dwmac4_setup(priv->ioaddr,
4162                                    priv->plat->multicast_filter_bins,
4163                                    priv->plat->unicast_filter_entries,
4164                                    &priv->synopsys_id);
4165         } else {
4166                 mac = dwmac100_setup(priv->ioaddr, &priv->synopsys_id);
4167         }
4168         if (!mac)
4169                 return -ENOMEM;
4170
4171         priv->hw = mac;
4172
4173         /* dwmac-sun8i only work in chain mode */
4174         if (priv->plat->has_sun8i)
4175                 chain_mode = 1;
4176
4177         /* To use the chained or ring mode */
4178         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
4179                 priv->hw->mode = &dwmac4_ring_mode_ops;
4180         } else {
4181                 if (chain_mode) {
4182                         priv->hw->mode = &chain_mode_ops;
4183                         dev_info(priv->device, "Chain mode enabled\n");
4184                         priv->mode = STMMAC_CHAIN_MODE;
4185                 } else {
4186                         priv->hw->mode = &ring_mode_ops;
4187                         dev_info(priv->device, "Ring mode enabled\n");
4188                         priv->mode = STMMAC_RING_MODE;
4189                 }
4190         }
4191
4192         /* Get the HW capability (new GMAC newer than 3.50a) */
4193         priv->hw_cap_support = stmmac_get_hw_features(priv);
4194         if (priv->hw_cap_support) {
4195                 dev_info(priv->device, "DMA HW capability register supported\n");
4196
4197                 /* We can override some gmac/dma configuration fields: e.g.
4198                  * enh_desc, tx_coe (e.g. that are passed through the
4199                  * platform) with the values from the HW capability
4200                  * register (if supported).
4201                  */
4202                 priv->plat->enh_desc = priv->dma_cap.enh_desc;
4203                 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
4204                 priv->hw->pmt = priv->plat->pmt;
4205
4206                 /* TXCOE doesn't work in thresh DMA mode */
4207                 if (priv->plat->force_thresh_dma_mode)
4208                         priv->plat->tx_coe = 0;
4209                 else
4210                         priv->plat->tx_coe = priv->dma_cap.tx_coe;
4211
4212                 /* In case of GMAC4 rx_coe is from HW cap register. */
4213                 priv->plat->rx_coe = priv->dma_cap.rx_coe;
4214
4215                 if (priv->dma_cap.rx_coe_type2)
4216                         priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
4217                 else if (priv->dma_cap.rx_coe_type1)
4218                         priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
4219
4220         } else {
4221                 dev_info(priv->device, "No HW DMA feature register supported\n");
4222         }
4223
4224         /* To use alternate (extended), normal or GMAC4 descriptor structures */
4225         if (priv->synopsys_id >= DWMAC_CORE_4_00)
4226                 priv->hw->desc = &dwmac4_desc_ops;
4227         else
4228                 stmmac_selec_desc_mode(priv);
4229
4230         if (priv->plat->rx_coe) {
4231                 priv->hw->rx_csum = priv->plat->rx_coe;
4232                 dev_info(priv->device, "RX Checksum Offload Engine supported\n");
4233                 if (priv->synopsys_id < DWMAC_CORE_4_00)
4234                         dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
4235         }
4236         if (priv->plat->tx_coe)
4237                 dev_info(priv->device, "TX Checksum insertion supported\n");
4238
4239         if (priv->plat->pmt) {
4240                 dev_info(priv->device, "Wake-Up On Lan supported\n");
4241                 device_set_wakeup_capable(priv->device, 1);
4242         }
4243
4244         if (priv->dma_cap.tsoen)
4245                 dev_info(priv->device, "TSO supported\n");
4246
4247         return 0;
4248 }
4249
4250 /**
4251  * stmmac_dvr_probe
4252  * @device: device pointer
4253  * @plat_dat: platform data pointer
4254  * @res: stmmac resource pointer
4255  * Description: this is the main probe function used to
4256  * call the alloc_etherdev, allocate the priv structure.
4257  * Return:
4258  * returns 0 on success, otherwise errno.
4259  */
4260 int stmmac_dvr_probe(struct device *device,
4261                      struct plat_stmmacenet_data *plat_dat,
4262                      struct stmmac_resources *res)
4263 {
4264         struct net_device *ndev = NULL;
4265         struct stmmac_priv *priv;
4266         int ret = 0;
4267         u32 queue;
4268
4269         ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv),
4270                                   MTL_MAX_TX_QUEUES,
4271                                   MTL_MAX_RX_QUEUES);
4272         if (!ndev)
4273                 return -ENOMEM;
4274
4275         SET_NETDEV_DEV(ndev, device);
4276
4277         priv = netdev_priv(ndev);
4278         priv->device = device;
4279         priv->dev = ndev;
4280
4281         stmmac_set_ethtool_ops(ndev);
4282         priv->pause = pause;
4283         priv->plat = plat_dat;
4284         priv->ioaddr = res->addr;
4285         priv->dev->base_addr = (unsigned long)res->addr;
4286
4287         priv->dev->irq = res->irq;
4288         priv->wol_irq = res->wol_irq;
4289         priv->lpi_irq = res->lpi_irq;
4290
4291         if (res->mac)
4292                 memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
4293
4294         dev_set_drvdata(device, priv->dev);
4295
4296         /* Verify driver arguments */
4297         stmmac_verify_args();
4298
4299         /* Allocate workqueue */
4300         priv->wq = create_singlethread_workqueue("stmmac_wq");
4301         if (!priv->wq) {
4302                 dev_err(priv->device, "failed to create workqueue\n");
4303                 goto error_wq;
4304         }
4305
4306         INIT_WORK(&priv->service_task, stmmac_service_task);
4307
4308         /* Override with kernel parameters if supplied XXX CRS XXX
4309          * this needs to have multiple instances
4310          */
4311         if ((phyaddr >= 0) && (phyaddr <= 31))
4312                 priv->plat->phy_addr = phyaddr;
4313
4314         if (priv->plat->stmmac_rst) {
4315                 ret = reset_control_assert(priv->plat->stmmac_rst);
4316                 reset_control_deassert(priv->plat->stmmac_rst);
4317                 /* Some reset controllers have only reset callback instead of
4318                  * assert + deassert callbacks pair.
4319                  */
4320                 if (ret == -ENOTSUPP)
4321                         reset_control_reset(priv->plat->stmmac_rst);
4322         }
4323
4324         /* Init MAC and get the capabilities */
4325         ret = stmmac_hw_init(priv);
4326         if (ret)
4327                 goto error_hw_init;
4328
4329         /* Configure real RX and TX queues */
4330         netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
4331         netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
4332
4333         ndev->netdev_ops = &stmmac_netdev_ops;
4334
4335         ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4336                             NETIF_F_RXCSUM;
4337
4338         if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
4339                 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
4340                 priv->tso = true;
4341                 dev_info(priv->device, "TSO feature enabled\n");
4342         }
4343         ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
4344         ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
4345 #ifdef STMMAC_VLAN_TAG_USED
4346         /* Both mac100 and gmac support receive VLAN tag detection */
4347         ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
4348 #endif
4349         priv->msg_enable = netif_msg_init(debug, default_msg_level);
4350
4351         /* MTU range: 46 - hw-specific max */
4352         ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
4353         if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
4354                 ndev->max_mtu = JUMBO_LEN;
4355         else
4356                 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
4357         /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
4358          * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
4359          */
4360         if ((priv->plat->maxmtu < ndev->max_mtu) &&
4361             (priv->plat->maxmtu >= ndev->min_mtu))
4362                 ndev->max_mtu = priv->plat->maxmtu;
4363         else if (priv->plat->maxmtu < ndev->min_mtu)
4364                 dev_warn(priv->device,
4365                          "%s: warning: maxmtu having invalid value (%d)\n",
4366                          __func__, priv->plat->maxmtu);
4367
4368         if (flow_ctrl)
4369                 priv->flow_ctrl = FLOW_AUTO;    /* RX/TX pause on */
4370
4371         /* Rx Watchdog is available in the COREs newer than the 3.40.
4372          * In some case, for example on bugged HW this feature
4373          * has to be disable and this can be done by passing the
4374          * riwt_off field from the platform.
4375          */
4376         if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) {
4377                 priv->use_riwt = 1;
4378                 dev_info(priv->device,
4379                          "Enable RX Mitigation via HW Watchdog Timer\n");
4380         }
4381
4382         for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
4383                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4384
4385                 netif_napi_add(ndev, &rx_q->napi, stmmac_poll,
4386                                (8 * priv->plat->rx_queues_to_use));
4387         }
4388
4389         spin_lock_init(&priv->lock);
4390
4391         /* If a specific clk_csr value is passed from the platform
4392          * this means that the CSR Clock Range selection cannot be
4393          * changed at run-time and it is fixed. Viceversa the driver'll try to
4394          * set the MDC clock dynamically according to the csr actual
4395          * clock input.
4396          */
4397         if (!priv->plat->clk_csr)
4398                 stmmac_clk_csr_set(priv);
4399         else
4400                 priv->clk_csr = priv->plat->clk_csr;
4401
4402         stmmac_check_pcs_mode(priv);
4403
4404         if (priv->hw->pcs != STMMAC_PCS_RGMII  &&
4405             priv->hw->pcs != STMMAC_PCS_TBI &&
4406             priv->hw->pcs != STMMAC_PCS_RTBI) {
4407                 /* MDIO bus Registration */
4408                 ret = stmmac_mdio_register(ndev);
4409                 if (ret < 0) {
4410                         dev_err(priv->device,
4411                                 "%s: MDIO bus (id: %d) registration failed",
4412                                 __func__, priv->plat->bus_id);
4413                         goto error_mdio_register;
4414                 }
4415         }
4416
4417         ret = register_netdev(ndev);
4418         if (ret) {
4419                 dev_err(priv->device, "%s: ERROR %i registering the device\n",
4420                         __func__, ret);
4421                 goto error_netdev_register;
4422         }
4423
4424         return ret;
4425
4426 error_netdev_register:
4427         if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4428             priv->hw->pcs != STMMAC_PCS_TBI &&
4429             priv->hw->pcs != STMMAC_PCS_RTBI)
4430                 stmmac_mdio_unregister(ndev);
4431 error_mdio_register:
4432         for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
4433                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4434
4435                 netif_napi_del(&rx_q->napi);
4436         }
4437 error_hw_init:
4438         destroy_workqueue(priv->wq);
4439 error_wq:
4440         free_netdev(ndev);
4441
4442         return ret;
4443 }
4444 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
4445
4446 /**
4447  * stmmac_dvr_remove
4448  * @dev: device pointer
4449  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
4450  * changes the link status, releases the DMA descriptor rings.
4451  */
4452 int stmmac_dvr_remove(struct device *dev)
4453 {
4454         struct net_device *ndev = dev_get_drvdata(dev);
4455         struct stmmac_priv *priv = netdev_priv(ndev);
4456
4457         netdev_info(priv->dev, "%s: removing driver", __func__);
4458
4459         stmmac_stop_all_dma(priv);
4460
4461         priv->hw->mac->set_mac(priv->ioaddr, false);
4462         netif_carrier_off(ndev);
4463         unregister_netdev(ndev);
4464         if (priv->plat->stmmac_rst)
4465                 reset_control_assert(priv->plat->stmmac_rst);
4466         clk_disable_unprepare(priv->plat->pclk);
4467         clk_disable_unprepare(priv->plat->stmmac_clk);
4468         if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4469             priv->hw->pcs != STMMAC_PCS_TBI &&
4470             priv->hw->pcs != STMMAC_PCS_RTBI)
4471                 stmmac_mdio_unregister(ndev);
4472         destroy_workqueue(priv->wq);
4473         free_netdev(ndev);
4474
4475         return 0;
4476 }
4477 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
4478
4479 /**
4480  * stmmac_suspend - suspend callback
4481  * @dev: device pointer
4482  * Description: this is the function to suspend the device and it is called
4483  * by the platform driver to stop the network queue, release the resources,
4484  * program the PMT register (for WoL), clean and release driver resources.
4485  */
4486 int stmmac_suspend(struct device *dev)
4487 {
4488         struct net_device *ndev = dev_get_drvdata(dev);
4489         struct stmmac_priv *priv = netdev_priv(ndev);
4490         unsigned long flags;
4491
4492         if (!ndev || !netif_running(ndev))
4493                 return 0;
4494
4495         if (ndev->phydev)
4496                 phy_stop(ndev->phydev);
4497
4498         spin_lock_irqsave(&priv->lock, flags);
4499
4500         netif_device_detach(ndev);
4501         stmmac_stop_all_queues(priv);
4502
4503         stmmac_disable_all_queues(priv);
4504
4505         /* Stop TX/RX DMA */
4506         stmmac_stop_all_dma(priv);
4507
4508         /* Enable Power down mode by programming the PMT regs */
4509         if (device_may_wakeup(priv->device)) {
4510                 priv->hw->mac->pmt(priv->hw, priv->wolopts);
4511                 priv->irq_wake = 1;
4512         } else {
4513                 priv->hw->mac->set_mac(priv->ioaddr, false);
4514                 pinctrl_pm_select_sleep_state(priv->device);
4515                 /* Disable clock in case of PWM is off */
4516                 clk_disable(priv->plat->pclk);
4517                 clk_disable(priv->plat->stmmac_clk);
4518         }
4519         spin_unlock_irqrestore(&priv->lock, flags);
4520
4521         priv->oldlink = false;
4522         priv->speed = SPEED_UNKNOWN;
4523         priv->oldduplex = DUPLEX_UNKNOWN;
4524         return 0;
4525 }
4526 EXPORT_SYMBOL_GPL(stmmac_suspend);
4527
4528 /**
4529  * stmmac_reset_queues_param - reset queue parameters
4530  * @dev: device pointer
4531  */
4532 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
4533 {
4534         u32 rx_cnt = priv->plat->rx_queues_to_use;
4535         u32 tx_cnt = priv->plat->tx_queues_to_use;
4536         u32 queue;
4537
4538         for (queue = 0; queue < rx_cnt; queue++) {
4539                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4540
4541                 rx_q->cur_rx = 0;
4542                 rx_q->dirty_rx = 0;
4543         }
4544
4545         for (queue = 0; queue < tx_cnt; queue++) {
4546                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4547
4548                 tx_q->cur_tx = 0;
4549                 tx_q->dirty_tx = 0;
4550                 tx_q->mss = 0;
4551         }
4552 }
4553
4554 /**
4555  * stmmac_resume - resume callback
4556  * @dev: device pointer
4557  * Description: when resume this function is invoked to setup the DMA and CORE
4558  * in a usable state.
4559  */
4560 int stmmac_resume(struct device *dev)
4561 {
4562         struct net_device *ndev = dev_get_drvdata(dev);
4563         struct stmmac_priv *priv = netdev_priv(ndev);
4564         unsigned long flags;
4565
4566         if (!netif_running(ndev))
4567                 return 0;
4568
4569         /* Power Down bit, into the PM register, is cleared
4570          * automatically as soon as a magic packet or a Wake-up frame
4571          * is received. Anyway, it's better to manually clear
4572          * this bit because it can generate problems while resuming
4573          * from another devices (e.g. serial console).
4574          */
4575         if (device_may_wakeup(priv->device)) {
4576                 spin_lock_irqsave(&priv->lock, flags);
4577                 priv->hw->mac->pmt(priv->hw, 0);
4578                 spin_unlock_irqrestore(&priv->lock, flags);
4579                 priv->irq_wake = 0;
4580         } else {
4581                 pinctrl_pm_select_default_state(priv->device);
4582                 /* enable the clk previously disabled */
4583                 clk_enable(priv->plat->stmmac_clk);
4584                 clk_enable(priv->plat->pclk);
4585                 /* reset the phy so that it's ready */
4586                 if (priv->mii)
4587                         stmmac_mdio_reset(priv->mii);
4588         }
4589
4590         netif_device_attach(ndev);
4591
4592         spin_lock_irqsave(&priv->lock, flags);
4593
4594         stmmac_reset_queues_param(priv);
4595
4596         stmmac_clear_descriptors(priv);
4597
4598         stmmac_hw_setup(ndev, false);
4599         stmmac_init_tx_coalesce(priv);
4600         stmmac_set_rx_mode(ndev);
4601
4602         stmmac_enable_all_queues(priv);
4603
4604         stmmac_start_all_queues(priv);
4605
4606         spin_unlock_irqrestore(&priv->lock, flags);
4607
4608         if (ndev->phydev)
4609                 phy_start(ndev->phydev);
4610
4611         return 0;
4612 }
4613 EXPORT_SYMBOL_GPL(stmmac_resume);
4614
4615 #ifndef MODULE
4616 static int __init stmmac_cmdline_opt(char *str)
4617 {
4618         char *opt;
4619
4620         if (!str || !*str)
4621                 return -EINVAL;
4622         while ((opt = strsep(&str, ",")) != NULL) {
4623                 if (!strncmp(opt, "debug:", 6)) {
4624                         if (kstrtoint(opt + 6, 0, &debug))
4625                                 goto err;
4626                 } else if (!strncmp(opt, "phyaddr:", 8)) {
4627                         if (kstrtoint(opt + 8, 0, &phyaddr))
4628                                 goto err;
4629                 } else if (!strncmp(opt, "buf_sz:", 7)) {
4630                         if (kstrtoint(opt + 7, 0, &buf_sz))
4631                                 goto err;
4632                 } else if (!strncmp(opt, "tc:", 3)) {
4633                         if (kstrtoint(opt + 3, 0, &tc))
4634                                 goto err;
4635                 } else if (!strncmp(opt, "watchdog:", 9)) {
4636                         if (kstrtoint(opt + 9, 0, &watchdog))
4637                                 goto err;
4638                 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
4639                         if (kstrtoint(opt + 10, 0, &flow_ctrl))
4640                                 goto err;
4641                 } else if (!strncmp(opt, "pause:", 6)) {
4642                         if (kstrtoint(opt + 6, 0, &pause))
4643                                 goto err;
4644                 } else if (!strncmp(opt, "eee_timer:", 10)) {
4645                         if (kstrtoint(opt + 10, 0, &eee_timer))
4646                                 goto err;
4647                 } else if (!strncmp(opt, "chain_mode:", 11)) {
4648                         if (kstrtoint(opt + 11, 0, &chain_mode))
4649                                 goto err;
4650                 }
4651         }
4652         return 0;
4653
4654 err:
4655         pr_err("%s: ERROR broken module parameter conversion", __func__);
4656         return -EINVAL;
4657 }
4658
4659 __setup("stmmaceth=", stmmac_cmdline_opt);
4660 #endif /* MODULE */
4661
4662 static int __init stmmac_init(void)
4663 {
4664 #ifdef CONFIG_DEBUG_FS
4665         /* Create debugfs main directory if it doesn't exist yet */
4666         if (!stmmac_fs_dir) {
4667                 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
4668
4669                 if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
4670                         pr_err("ERROR %s, debugfs create directory failed\n",
4671                                STMMAC_RESOURCE_NAME);
4672
4673                         return -ENOMEM;
4674                 }
4675         }
4676 #endif
4677
4678         return 0;
4679 }
4680
4681 static void __exit stmmac_exit(void)
4682 {
4683 #ifdef CONFIG_DEBUG_FS
4684         debugfs_remove_recursive(stmmac_fs_dir);
4685 #endif
4686 }
4687
4688 module_init(stmmac_init)
4689 module_exit(stmmac_exit)
4690
4691 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
4692 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
4693 MODULE_LICENSE("GPL");