OSDN Git Service

Merge tag 'kvmarm-fixes-for-5.1' of git://git.kernel.org/pub/scm/linux/kernel/git...
[uclinux-h8/linux.git] / drivers / net / ethernet / stmicro / stmmac / stmmac_main.c
1 /*******************************************************************************
2   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
3   ST Ethernet IPs are built around a Synopsys IP Core.
4
5         Copyright(C) 2007-2011 STMicroelectronics Ltd
6
7   This program is free software; you can redistribute it and/or modify it
8   under the terms and conditions of the GNU General Public License,
9   version 2, as published by the Free Software Foundation.
10
11   This program is distributed in the hope it will be useful, but WITHOUT
12   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14   more details.
15
16   The full GNU General Public License is included in this distribution in
17   the file called "COPYING".
18
19   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
20
21   Documentation available at:
22         http://www.stlinux.com
23   Support available at:
24         https://bugzilla.stlinux.com/
25 *******************************************************************************/
26
27 #include <linux/clk.h>
28 #include <linux/kernel.h>
29 #include <linux/interrupt.h>
30 #include <linux/ip.h>
31 #include <linux/tcp.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/if_ether.h>
35 #include <linux/crc32.h>
36 #include <linux/mii.h>
37 #include <linux/if.h>
38 #include <linux/if_vlan.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/slab.h>
41 #include <linux/prefetch.h>
42 #include <linux/pinctrl/consumer.h>
43 #ifdef CONFIG_DEBUG_FS
44 #include <linux/debugfs.h>
45 #include <linux/seq_file.h>
46 #endif /* CONFIG_DEBUG_FS */
47 #include <linux/net_tstamp.h>
48 #include <net/pkt_cls.h>
49 #include "stmmac_ptp.h"
50 #include "stmmac.h"
51 #include <linux/reset.h>
52 #include <linux/of_mdio.h>
53 #include "dwmac1000.h"
54 #include "dwxgmac2.h"
55 #include "hwif.h"
56
57 #define STMMAC_ALIGN(x)         __ALIGN_KERNEL(x, SMP_CACHE_BYTES)
58 #define TSO_MAX_BUFF_SIZE       (SZ_16K - 1)
59
60 /* Module parameters */
61 #define TX_TIMEO        5000
62 static int watchdog = TX_TIMEO;
63 module_param(watchdog, int, 0644);
64 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
65
66 static int debug = -1;
67 module_param(debug, int, 0644);
68 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
69
70 static int phyaddr = -1;
71 module_param(phyaddr, int, 0444);
72 MODULE_PARM_DESC(phyaddr, "Physical device address");
73
74 #define STMMAC_TX_THRESH        (DMA_TX_SIZE / 4)
75 #define STMMAC_RX_THRESH        (DMA_RX_SIZE / 4)
76
77 static int flow_ctrl = FLOW_OFF;
78 module_param(flow_ctrl, int, 0644);
79 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
80
81 static int pause = PAUSE_TIME;
82 module_param(pause, int, 0644);
83 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
84
85 #define TC_DEFAULT 64
86 static int tc = TC_DEFAULT;
87 module_param(tc, int, 0644);
88 MODULE_PARM_DESC(tc, "DMA threshold control value");
89
90 #define DEFAULT_BUFSIZE 1536
91 static int buf_sz = DEFAULT_BUFSIZE;
92 module_param(buf_sz, int, 0644);
93 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
94
95 #define STMMAC_RX_COPYBREAK     256
96
97 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
98                                       NETIF_MSG_LINK | NETIF_MSG_IFUP |
99                                       NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
100
101 #define STMMAC_DEFAULT_LPI_TIMER        1000
102 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
103 module_param(eee_timer, int, 0644);
104 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
105 #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
106
107 /* By default the driver will use the ring mode to manage tx and rx descriptors,
108  * but allow user to force to use the chain instead of the ring
109  */
110 static unsigned int chain_mode;
111 module_param(chain_mode, int, 0444);
112 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
113
114 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
115
116 #ifdef CONFIG_DEBUG_FS
117 static int stmmac_init_fs(struct net_device *dev);
118 static void stmmac_exit_fs(struct net_device *dev);
119 #endif
120
121 #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
122
123 /**
124  * stmmac_verify_args - verify the driver parameters.
125  * Description: it checks the driver parameters and set a default in case of
126  * errors.
127  */
128 static void stmmac_verify_args(void)
129 {
130         if (unlikely(watchdog < 0))
131                 watchdog = TX_TIMEO;
132         if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
133                 buf_sz = DEFAULT_BUFSIZE;
134         if (unlikely(flow_ctrl > 1))
135                 flow_ctrl = FLOW_AUTO;
136         else if (likely(flow_ctrl < 0))
137                 flow_ctrl = FLOW_OFF;
138         if (unlikely((pause < 0) || (pause > 0xffff)))
139                 pause = PAUSE_TIME;
140         if (eee_timer < 0)
141                 eee_timer = STMMAC_DEFAULT_LPI_TIMER;
142 }
143
144 /**
145  * stmmac_disable_all_queues - Disable all queues
146  * @priv: driver private structure
147  */
148 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
149 {
150         u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
151         u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
152         u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
153         u32 queue;
154
155         for (queue = 0; queue < maxq; queue++) {
156                 struct stmmac_channel *ch = &priv->channel[queue];
157
158                 if (queue < rx_queues_cnt)
159                         napi_disable(&ch->rx_napi);
160                 if (queue < tx_queues_cnt)
161                         napi_disable(&ch->tx_napi);
162         }
163 }
164
165 /**
166  * stmmac_enable_all_queues - Enable all queues
167  * @priv: driver private structure
168  */
169 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
170 {
171         u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
172         u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
173         u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
174         u32 queue;
175
176         for (queue = 0; queue < maxq; queue++) {
177                 struct stmmac_channel *ch = &priv->channel[queue];
178
179                 if (queue < rx_queues_cnt)
180                         napi_enable(&ch->rx_napi);
181                 if (queue < tx_queues_cnt)
182                         napi_enable(&ch->tx_napi);
183         }
184 }
185
186 /**
187  * stmmac_stop_all_queues - Stop all queues
188  * @priv: driver private structure
189  */
190 static void stmmac_stop_all_queues(struct stmmac_priv *priv)
191 {
192         u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
193         u32 queue;
194
195         for (queue = 0; queue < tx_queues_cnt; queue++)
196                 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
197 }
198
199 /**
200  * stmmac_start_all_queues - Start all queues
201  * @priv: driver private structure
202  */
203 static void stmmac_start_all_queues(struct stmmac_priv *priv)
204 {
205         u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
206         u32 queue;
207
208         for (queue = 0; queue < tx_queues_cnt; queue++)
209                 netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
210 }
211
212 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
213 {
214         if (!test_bit(STMMAC_DOWN, &priv->state) &&
215             !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
216                 queue_work(priv->wq, &priv->service_task);
217 }
218
219 static void stmmac_global_err(struct stmmac_priv *priv)
220 {
221         netif_carrier_off(priv->dev);
222         set_bit(STMMAC_RESET_REQUESTED, &priv->state);
223         stmmac_service_event_schedule(priv);
224 }
225
226 /**
227  * stmmac_clk_csr_set - dynamically set the MDC clock
228  * @priv: driver private structure
229  * Description: this is to dynamically set the MDC clock according to the csr
230  * clock input.
231  * Note:
232  *      If a specific clk_csr value is passed from the platform
233  *      this means that the CSR Clock Range selection cannot be
234  *      changed at run-time and it is fixed (as reported in the driver
235  *      documentation). Viceversa the driver will try to set the MDC
236  *      clock dynamically according to the actual clock input.
237  */
238 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
239 {
240         u32 clk_rate;
241
242         clk_rate = clk_get_rate(priv->plat->stmmac_clk);
243
244         /* Platform provided default clk_csr would be assumed valid
245          * for all other cases except for the below mentioned ones.
246          * For values higher than the IEEE 802.3 specified frequency
247          * we can not estimate the proper divider as it is not known
248          * the frequency of clk_csr_i. So we do not change the default
249          * divider.
250          */
251         if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
252                 if (clk_rate < CSR_F_35M)
253                         priv->clk_csr = STMMAC_CSR_20_35M;
254                 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
255                         priv->clk_csr = STMMAC_CSR_35_60M;
256                 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
257                         priv->clk_csr = STMMAC_CSR_60_100M;
258                 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
259                         priv->clk_csr = STMMAC_CSR_100_150M;
260                 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
261                         priv->clk_csr = STMMAC_CSR_150_250M;
262                 else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
263                         priv->clk_csr = STMMAC_CSR_250_300M;
264         }
265
266         if (priv->plat->has_sun8i) {
267                 if (clk_rate > 160000000)
268                         priv->clk_csr = 0x03;
269                 else if (clk_rate > 80000000)
270                         priv->clk_csr = 0x02;
271                 else if (clk_rate > 40000000)
272                         priv->clk_csr = 0x01;
273                 else
274                         priv->clk_csr = 0;
275         }
276
277         if (priv->plat->has_xgmac) {
278                 if (clk_rate > 400000000)
279                         priv->clk_csr = 0x5;
280                 else if (clk_rate > 350000000)
281                         priv->clk_csr = 0x4;
282                 else if (clk_rate > 300000000)
283                         priv->clk_csr = 0x3;
284                 else if (clk_rate > 250000000)
285                         priv->clk_csr = 0x2;
286                 else if (clk_rate > 150000000)
287                         priv->clk_csr = 0x1;
288                 else
289                         priv->clk_csr = 0x0;
290         }
291 }
292
293 static void print_pkt(unsigned char *buf, int len)
294 {
295         pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
296         print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
297 }
298
299 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
300 {
301         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
302         u32 avail;
303
304         if (tx_q->dirty_tx > tx_q->cur_tx)
305                 avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
306         else
307                 avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
308
309         return avail;
310 }
311
312 /**
313  * stmmac_rx_dirty - Get RX queue dirty
314  * @priv: driver private structure
315  * @queue: RX queue index
316  */
317 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
318 {
319         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
320         u32 dirty;
321
322         if (rx_q->dirty_rx <= rx_q->cur_rx)
323                 dirty = rx_q->cur_rx - rx_q->dirty_rx;
324         else
325                 dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
326
327         return dirty;
328 }
329
330 /**
331  * stmmac_hw_fix_mac_speed - callback for speed selection
332  * @priv: driver private structure
333  * Description: on some platforms (e.g. ST), some HW system configuration
334  * registers have to be set according to the link speed negotiated.
335  */
336 static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
337 {
338         struct net_device *ndev = priv->dev;
339         struct phy_device *phydev = ndev->phydev;
340
341         if (likely(priv->plat->fix_mac_speed))
342                 priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
343 }
344
345 /**
346  * stmmac_enable_eee_mode - check and enter in LPI mode
347  * @priv: driver private structure
348  * Description: this function is to verify and enter in LPI mode in case of
349  * EEE.
350  */
351 static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
352 {
353         u32 tx_cnt = priv->plat->tx_queues_to_use;
354         u32 queue;
355
356         /* check if all TX queues have the work finished */
357         for (queue = 0; queue < tx_cnt; queue++) {
358                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
359
360                 if (tx_q->dirty_tx != tx_q->cur_tx)
361                         return; /* still unfinished work */
362         }
363
364         /* Check and enter in LPI mode */
365         if (!priv->tx_path_in_lpi_mode)
366                 stmmac_set_eee_mode(priv, priv->hw,
367                                 priv->plat->en_tx_lpi_clockgating);
368 }
369
370 /**
371  * stmmac_disable_eee_mode - disable and exit from LPI mode
372  * @priv: driver private structure
373  * Description: this function is to exit and disable EEE in case of
374  * LPI state is true. This is called by the xmit.
375  */
376 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
377 {
378         stmmac_reset_eee_mode(priv, priv->hw);
379         del_timer_sync(&priv->eee_ctrl_timer);
380         priv->tx_path_in_lpi_mode = false;
381 }
382
383 /**
384  * stmmac_eee_ctrl_timer - EEE TX SW timer.
385  * @arg : data hook
386  * Description:
387  *  if there is no data transfer and if we are not in LPI state,
388  *  then MAC Transmitter can be moved to LPI state.
389  */
390 static void stmmac_eee_ctrl_timer(struct timer_list *t)
391 {
392         struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
393
394         stmmac_enable_eee_mode(priv);
395         mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
396 }
397
398 /**
399  * stmmac_eee_init - init EEE
400  * @priv: driver private structure
401  * Description:
402  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
403  *  can also manage EEE, this function enable the LPI state and start related
404  *  timer.
405  */
406 bool stmmac_eee_init(struct stmmac_priv *priv)
407 {
408         struct net_device *ndev = priv->dev;
409         int interface = priv->plat->interface;
410         bool ret = false;
411
412         if ((interface != PHY_INTERFACE_MODE_MII) &&
413             (interface != PHY_INTERFACE_MODE_GMII) &&
414             !phy_interface_mode_is_rgmii(interface))
415                 goto out;
416
417         /* Using PCS we cannot dial with the phy registers at this stage
418          * so we do not support extra feature like EEE.
419          */
420         if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
421             (priv->hw->pcs == STMMAC_PCS_TBI) ||
422             (priv->hw->pcs == STMMAC_PCS_RTBI))
423                 goto out;
424
425         /* MAC core supports the EEE feature. */
426         if (priv->dma_cap.eee) {
427                 int tx_lpi_timer = priv->tx_lpi_timer;
428
429                 /* Check if the PHY supports EEE */
430                 if (phy_init_eee(ndev->phydev, 1)) {
431                         /* To manage at run-time if the EEE cannot be supported
432                          * anymore (for example because the lp caps have been
433                          * changed).
434                          * In that case the driver disable own timers.
435                          */
436                         mutex_lock(&priv->lock);
437                         if (priv->eee_active) {
438                                 netdev_dbg(priv->dev, "disable EEE\n");
439                                 del_timer_sync(&priv->eee_ctrl_timer);
440                                 stmmac_set_eee_timer(priv, priv->hw, 0,
441                                                 tx_lpi_timer);
442                         }
443                         priv->eee_active = 0;
444                         mutex_unlock(&priv->lock);
445                         goto out;
446                 }
447                 /* Activate the EEE and start timers */
448                 mutex_lock(&priv->lock);
449                 if (!priv->eee_active) {
450                         priv->eee_active = 1;
451                         timer_setup(&priv->eee_ctrl_timer,
452                                     stmmac_eee_ctrl_timer, 0);
453                         mod_timer(&priv->eee_ctrl_timer,
454                                   STMMAC_LPI_T(eee_timer));
455
456                         stmmac_set_eee_timer(priv, priv->hw,
457                                         STMMAC_DEFAULT_LIT_LS, tx_lpi_timer);
458                 }
459                 /* Set HW EEE according to the speed */
460                 stmmac_set_eee_pls(priv, priv->hw, ndev->phydev->link);
461
462                 ret = true;
463                 mutex_unlock(&priv->lock);
464
465                 netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
466         }
467 out:
468         return ret;
469 }
470
471 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
472  * @priv: driver private structure
473  * @p : descriptor pointer
474  * @skb : the socket buffer
475  * Description :
476  * This function will read timestamp from the descriptor & pass it to stack.
477  * and also perform some sanity checks.
478  */
479 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
480                                    struct dma_desc *p, struct sk_buff *skb)
481 {
482         struct skb_shared_hwtstamps shhwtstamp;
483         u64 ns = 0;
484
485         if (!priv->hwts_tx_en)
486                 return;
487
488         /* exit if skb doesn't support hw tstamp */
489         if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
490                 return;
491
492         /* check tx tstamp status */
493         if (stmmac_get_tx_timestamp_status(priv, p)) {
494                 /* get the valid tstamp */
495                 stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
496
497                 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
498                 shhwtstamp.hwtstamp = ns_to_ktime(ns);
499
500                 netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
501                 /* pass tstamp to stack */
502                 skb_tstamp_tx(skb, &shhwtstamp);
503         }
504
505         return;
506 }
507
508 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
509  * @priv: driver private structure
510  * @p : descriptor pointer
511  * @np : next descriptor pointer
512  * @skb : the socket buffer
513  * Description :
514  * This function will read received packet's timestamp from the descriptor
515  * and pass it to stack. It also perform some sanity checks.
516  */
517 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
518                                    struct dma_desc *np, struct sk_buff *skb)
519 {
520         struct skb_shared_hwtstamps *shhwtstamp = NULL;
521         struct dma_desc *desc = p;
522         u64 ns = 0;
523
524         if (!priv->hwts_rx_en)
525                 return;
526         /* For GMAC4, the valid timestamp is from CTX next desc. */
527         if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
528                 desc = np;
529
530         /* Check if timestamp is available */
531         if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
532                 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
533                 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
534                 shhwtstamp = skb_hwtstamps(skb);
535                 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
536                 shhwtstamp->hwtstamp = ns_to_ktime(ns);
537         } else  {
538                 netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
539         }
540 }
541
542 /**
543  *  stmmac_hwtstamp_set - control hardware timestamping.
544  *  @dev: device pointer.
545  *  @ifr: An IOCTL specific structure, that can contain a pointer to
546  *  a proprietary structure used to pass information to the driver.
547  *  Description:
548  *  This function configures the MAC to enable/disable both outgoing(TX)
549  *  and incoming(RX) packets time stamping based on user input.
550  *  Return Value:
551  *  0 on success and an appropriate -ve integer on failure.
552  */
553 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
554 {
555         struct stmmac_priv *priv = netdev_priv(dev);
556         struct hwtstamp_config config;
557         struct timespec64 now;
558         u64 temp = 0;
559         u32 ptp_v2 = 0;
560         u32 tstamp_all = 0;
561         u32 ptp_over_ipv4_udp = 0;
562         u32 ptp_over_ipv6_udp = 0;
563         u32 ptp_over_ethernet = 0;
564         u32 snap_type_sel = 0;
565         u32 ts_master_en = 0;
566         u32 ts_event_en = 0;
567         u32 sec_inc = 0;
568         u32 value = 0;
569         bool xmac;
570
571         xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
572
573         if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
574                 netdev_alert(priv->dev, "No support for HW time stamping\n");
575                 priv->hwts_tx_en = 0;
576                 priv->hwts_rx_en = 0;
577
578                 return -EOPNOTSUPP;
579         }
580
581         if (copy_from_user(&config, ifr->ifr_data,
582                            sizeof(config)))
583                 return -EFAULT;
584
585         netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
586                    __func__, config.flags, config.tx_type, config.rx_filter);
587
588         /* reserved for future extensions */
589         if (config.flags)
590                 return -EINVAL;
591
592         if (config.tx_type != HWTSTAMP_TX_OFF &&
593             config.tx_type != HWTSTAMP_TX_ON)
594                 return -ERANGE;
595
596         if (priv->adv_ts) {
597                 switch (config.rx_filter) {
598                 case HWTSTAMP_FILTER_NONE:
599                         /* time stamp no incoming packet at all */
600                         config.rx_filter = HWTSTAMP_FILTER_NONE;
601                         break;
602
603                 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
604                         /* PTP v1, UDP, any kind of event packet */
605                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
606                         /* 'xmac' hardware can support Sync, Pdelay_Req and
607                          * Pdelay_resp by setting bit14 and bits17/16 to 01
608                          * This leaves Delay_Req timestamps out.
609                          * Enable all events *and* general purpose message
610                          * timestamping
611                          */
612                         snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
613                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
614                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
615                         break;
616
617                 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
618                         /* PTP v1, UDP, Sync packet */
619                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
620                         /* take time stamp for SYNC messages only */
621                         ts_event_en = PTP_TCR_TSEVNTENA;
622
623                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
624                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
625                         break;
626
627                 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
628                         /* PTP v1, UDP, Delay_req packet */
629                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
630                         /* take time stamp for Delay_Req messages only */
631                         ts_master_en = PTP_TCR_TSMSTRENA;
632                         ts_event_en = PTP_TCR_TSEVNTENA;
633
634                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
635                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
636                         break;
637
638                 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
639                         /* PTP v2, UDP, any kind of event packet */
640                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
641                         ptp_v2 = PTP_TCR_TSVER2ENA;
642                         /* take time stamp for all event messages */
643                         snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
644
645                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
646                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
647                         break;
648
649                 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
650                         /* PTP v2, UDP, Sync packet */
651                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
652                         ptp_v2 = PTP_TCR_TSVER2ENA;
653                         /* take time stamp for SYNC messages only */
654                         ts_event_en = PTP_TCR_TSEVNTENA;
655
656                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
657                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
658                         break;
659
660                 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
661                         /* PTP v2, UDP, Delay_req packet */
662                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
663                         ptp_v2 = PTP_TCR_TSVER2ENA;
664                         /* take time stamp for Delay_Req messages only */
665                         ts_master_en = PTP_TCR_TSMSTRENA;
666                         ts_event_en = PTP_TCR_TSEVNTENA;
667
668                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
669                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
670                         break;
671
672                 case HWTSTAMP_FILTER_PTP_V2_EVENT:
673                         /* PTP v2/802.AS1 any layer, any kind of event packet */
674                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
675                         ptp_v2 = PTP_TCR_TSVER2ENA;
676                         snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
677                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
678                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
679                         ptp_over_ethernet = PTP_TCR_TSIPENA;
680                         break;
681
682                 case HWTSTAMP_FILTER_PTP_V2_SYNC:
683                         /* PTP v2/802.AS1, any layer, Sync packet */
684                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
685                         ptp_v2 = PTP_TCR_TSVER2ENA;
686                         /* take time stamp for SYNC messages only */
687                         ts_event_en = PTP_TCR_TSEVNTENA;
688
689                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
690                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
691                         ptp_over_ethernet = PTP_TCR_TSIPENA;
692                         break;
693
694                 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
695                         /* PTP v2/802.AS1, any layer, Delay_req packet */
696                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
697                         ptp_v2 = PTP_TCR_TSVER2ENA;
698                         /* take time stamp for Delay_Req messages only */
699                         ts_master_en = PTP_TCR_TSMSTRENA;
700                         ts_event_en = PTP_TCR_TSEVNTENA;
701
702                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
703                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
704                         ptp_over_ethernet = PTP_TCR_TSIPENA;
705                         break;
706
707                 case HWTSTAMP_FILTER_NTP_ALL:
708                 case HWTSTAMP_FILTER_ALL:
709                         /* time stamp any incoming packet */
710                         config.rx_filter = HWTSTAMP_FILTER_ALL;
711                         tstamp_all = PTP_TCR_TSENALL;
712                         break;
713
714                 default:
715                         return -ERANGE;
716                 }
717         } else {
718                 switch (config.rx_filter) {
719                 case HWTSTAMP_FILTER_NONE:
720                         config.rx_filter = HWTSTAMP_FILTER_NONE;
721                         break;
722                 default:
723                         /* PTP v1, UDP, any kind of event packet */
724                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
725                         break;
726                 }
727         }
728         priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
729         priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
730
731         if (!priv->hwts_tx_en && !priv->hwts_rx_en)
732                 stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0);
733         else {
734                 value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
735                          tstamp_all | ptp_v2 | ptp_over_ethernet |
736                          ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
737                          ts_master_en | snap_type_sel);
738                 stmmac_config_hw_tstamping(priv, priv->ptpaddr, value);
739
740                 /* program Sub Second Increment reg */
741                 stmmac_config_sub_second_increment(priv,
742                                 priv->ptpaddr, priv->plat->clk_ptp_rate,
743                                 xmac, &sec_inc);
744                 temp = div_u64(1000000000ULL, sec_inc);
745
746                 /* Store sub second increment and flags for later use */
747                 priv->sub_second_inc = sec_inc;
748                 priv->systime_flags = value;
749
750                 /* calculate default added value:
751                  * formula is :
752                  * addend = (2^32)/freq_div_ratio;
753                  * where, freq_div_ratio = 1e9ns/sec_inc
754                  */
755                 temp = (u64)(temp << 32);
756                 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
757                 stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
758
759                 /* initialize system time */
760                 ktime_get_real_ts64(&now);
761
762                 /* lower 32 bits of tv_sec are safe until y2106 */
763                 stmmac_init_systime(priv, priv->ptpaddr,
764                                 (u32)now.tv_sec, now.tv_nsec);
765         }
766
767         memcpy(&priv->tstamp_config, &config, sizeof(config));
768
769         return copy_to_user(ifr->ifr_data, &config,
770                             sizeof(config)) ? -EFAULT : 0;
771 }
772
773 /**
774  *  stmmac_hwtstamp_get - read hardware timestamping.
775  *  @dev: device pointer.
776  *  @ifr: An IOCTL specific structure, that can contain a pointer to
777  *  a proprietary structure used to pass information to the driver.
778  *  Description:
779  *  This function obtain the current hardware timestamping settings
780     as requested.
781  */
782 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
783 {
784         struct stmmac_priv *priv = netdev_priv(dev);
785         struct hwtstamp_config *config = &priv->tstamp_config;
786
787         if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
788                 return -EOPNOTSUPP;
789
790         return copy_to_user(ifr->ifr_data, config,
791                             sizeof(*config)) ? -EFAULT : 0;
792 }
793
794 /**
795  * stmmac_init_ptp - init PTP
796  * @priv: driver private structure
797  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
798  * This is done by looking at the HW cap. register.
799  * This function also registers the ptp driver.
800  */
801 static int stmmac_init_ptp(struct stmmac_priv *priv)
802 {
803         bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
804
805         if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
806                 return -EOPNOTSUPP;
807
808         priv->adv_ts = 0;
809         /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
810         if (xmac && priv->dma_cap.atime_stamp)
811                 priv->adv_ts = 1;
812         /* Dwmac 3.x core with extend_desc can support adv_ts */
813         else if (priv->extend_desc && priv->dma_cap.atime_stamp)
814                 priv->adv_ts = 1;
815
816         if (priv->dma_cap.time_stamp)
817                 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
818
819         if (priv->adv_ts)
820                 netdev_info(priv->dev,
821                             "IEEE 1588-2008 Advanced Timestamp supported\n");
822
823         priv->hwts_tx_en = 0;
824         priv->hwts_rx_en = 0;
825
826         stmmac_ptp_register(priv);
827
828         return 0;
829 }
830
831 static void stmmac_release_ptp(struct stmmac_priv *priv)
832 {
833         if (priv->plat->clk_ptp_ref)
834                 clk_disable_unprepare(priv->plat->clk_ptp_ref);
835         stmmac_ptp_unregister(priv);
836 }
837
838 /**
839  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
840  *  @priv: driver private structure
841  *  Description: It is used for configuring the flow control in all queues
842  */
843 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
844 {
845         u32 tx_cnt = priv->plat->tx_queues_to_use;
846
847         stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
848                         priv->pause, tx_cnt);
849 }
850
851 /**
852  * stmmac_adjust_link - adjusts the link parameters
853  * @dev: net device structure
854  * Description: this is the helper called by the physical abstraction layer
855  * drivers to communicate the phy link status. According the speed and duplex
856  * this driver can invoke registered glue-logic as well.
857  * It also invoke the eee initialization because it could happen when switch
858  * on different networks (that are eee capable).
859  */
860 static void stmmac_adjust_link(struct net_device *dev)
861 {
862         struct stmmac_priv *priv = netdev_priv(dev);
863         struct phy_device *phydev = dev->phydev;
864         bool new_state = false;
865
866         if (!phydev)
867                 return;
868
869         mutex_lock(&priv->lock);
870
871         if (phydev->link) {
872                 u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
873
874                 /* Now we make sure that we can be in full duplex mode.
875                  * If not, we operate in half-duplex mode. */
876                 if (phydev->duplex != priv->oldduplex) {
877                         new_state = true;
878                         if (!phydev->duplex)
879                                 ctrl &= ~priv->hw->link.duplex;
880                         else
881                                 ctrl |= priv->hw->link.duplex;
882                         priv->oldduplex = phydev->duplex;
883                 }
884                 /* Flow Control operation */
885                 if (phydev->pause)
886                         stmmac_mac_flow_ctrl(priv, phydev->duplex);
887
888                 if (phydev->speed != priv->speed) {
889                         new_state = true;
890                         ctrl &= ~priv->hw->link.speed_mask;
891                         switch (phydev->speed) {
892                         case SPEED_1000:
893                                 ctrl |= priv->hw->link.speed1000;
894                                 break;
895                         case SPEED_100:
896                                 ctrl |= priv->hw->link.speed100;
897                                 break;
898                         case SPEED_10:
899                                 ctrl |= priv->hw->link.speed10;
900                                 break;
901                         default:
902                                 netif_warn(priv, link, priv->dev,
903                                            "broken speed: %d\n", phydev->speed);
904                                 phydev->speed = SPEED_UNKNOWN;
905                                 break;
906                         }
907                         if (phydev->speed != SPEED_UNKNOWN)
908                                 stmmac_hw_fix_mac_speed(priv);
909                         priv->speed = phydev->speed;
910                 }
911
912                 writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
913
914                 if (!priv->oldlink) {
915                         new_state = true;
916                         priv->oldlink = true;
917                 }
918         } else if (priv->oldlink) {
919                 new_state = true;
920                 priv->oldlink = false;
921                 priv->speed = SPEED_UNKNOWN;
922                 priv->oldduplex = DUPLEX_UNKNOWN;
923         }
924
925         if (new_state && netif_msg_link(priv))
926                 phy_print_status(phydev);
927
928         mutex_unlock(&priv->lock);
929
930         if (phydev->is_pseudo_fixed_link)
931                 /* Stop PHY layer to call the hook to adjust the link in case
932                  * of a switch is attached to the stmmac driver.
933                  */
934                 phydev->irq = PHY_IGNORE_INTERRUPT;
935         else
936                 /* At this stage, init the EEE if supported.
937                  * Never called in case of fixed_link.
938                  */
939                 priv->eee_enabled = stmmac_eee_init(priv);
940 }
941
942 /**
943  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
944  * @priv: driver private structure
945  * Description: this is to verify if the HW supports the PCS.
946  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
947  * configured for the TBI, RTBI, or SGMII PHY interface.
948  */
949 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
950 {
951         int interface = priv->plat->interface;
952
953         if (priv->dma_cap.pcs) {
954                 if ((interface == PHY_INTERFACE_MODE_RGMII) ||
955                     (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
956                     (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
957                     (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
958                         netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
959                         priv->hw->pcs = STMMAC_PCS_RGMII;
960                 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
961                         netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
962                         priv->hw->pcs = STMMAC_PCS_SGMII;
963                 }
964         }
965 }
966
967 /**
968  * stmmac_init_phy - PHY initialization
969  * @dev: net device structure
970  * Description: it initializes the driver's PHY state, and attaches the PHY
971  * to the mac driver.
972  *  Return value:
973  *  0 on success
974  */
975 static int stmmac_init_phy(struct net_device *dev)
976 {
977         struct stmmac_priv *priv = netdev_priv(dev);
978         u32 tx_cnt = priv->plat->tx_queues_to_use;
979         struct phy_device *phydev;
980         char phy_id_fmt[MII_BUS_ID_SIZE + 3];
981         char bus_id[MII_BUS_ID_SIZE];
982         int interface = priv->plat->interface;
983         int max_speed = priv->plat->max_speed;
984         priv->oldlink = false;
985         priv->speed = SPEED_UNKNOWN;
986         priv->oldduplex = DUPLEX_UNKNOWN;
987
988         if (priv->plat->phy_node) {
989                 phydev = of_phy_connect(dev, priv->plat->phy_node,
990                                         &stmmac_adjust_link, 0, interface);
991         } else {
992                 snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
993                          priv->plat->bus_id);
994
995                 snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
996                          priv->plat->phy_addr);
997                 netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__,
998                            phy_id_fmt);
999
1000                 phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
1001                                      interface);
1002         }
1003
1004         if (IS_ERR_OR_NULL(phydev)) {
1005                 netdev_err(priv->dev, "Could not attach to PHY\n");
1006                 if (!phydev)
1007                         return -ENODEV;
1008
1009                 return PTR_ERR(phydev);
1010         }
1011
1012         /* Stop Advertising 1000BASE Capability if interface is not GMII */
1013         if ((interface == PHY_INTERFACE_MODE_MII) ||
1014             (interface == PHY_INTERFACE_MODE_RMII) ||
1015                 (max_speed < 1000 && max_speed > 0))
1016                 phy_set_max_speed(phydev, SPEED_100);
1017
1018         /*
1019          * Half-duplex mode not supported with multiqueue
1020          * half-duplex can only works with single queue
1021          */
1022         if (tx_cnt > 1) {
1023                 phy_remove_link_mode(phydev,
1024                                      ETHTOOL_LINK_MODE_10baseT_Half_BIT);
1025                 phy_remove_link_mode(phydev,
1026                                      ETHTOOL_LINK_MODE_100baseT_Half_BIT);
1027                 phy_remove_link_mode(phydev,
1028                                      ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
1029         }
1030
1031         /*
1032          * Broken HW is sometimes missing the pull-up resistor on the
1033          * MDIO line, which results in reads to non-existent devices returning
1034          * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
1035          * device as well.
1036          * Note: phydev->phy_id is the result of reading the UID PHY registers.
1037          */
1038         if (!priv->plat->phy_node && phydev->phy_id == 0) {
1039                 phy_disconnect(phydev);
1040                 return -ENODEV;
1041         }
1042
1043         /* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid
1044          * subsequent PHY polling, make sure we force a link transition if
1045          * we have a UP/DOWN/UP transition
1046          */
1047         if (phydev->is_pseudo_fixed_link)
1048                 phydev->irq = PHY_POLL;
1049
1050         phy_attached_info(phydev);
1051         return 0;
1052 }
1053
1054 static void stmmac_display_rx_rings(struct stmmac_priv *priv)
1055 {
1056         u32 rx_cnt = priv->plat->rx_queues_to_use;
1057         void *head_rx;
1058         u32 queue;
1059
1060         /* Display RX rings */
1061         for (queue = 0; queue < rx_cnt; queue++) {
1062                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1063
1064                 pr_info("\tRX Queue %u rings\n", queue);
1065
1066                 if (priv->extend_desc)
1067                         head_rx = (void *)rx_q->dma_erx;
1068                 else
1069                         head_rx = (void *)rx_q->dma_rx;
1070
1071                 /* Display RX ring */
1072                 stmmac_display_ring(priv, head_rx, DMA_RX_SIZE, true);
1073         }
1074 }
1075
1076 static void stmmac_display_tx_rings(struct stmmac_priv *priv)
1077 {
1078         u32 tx_cnt = priv->plat->tx_queues_to_use;
1079         void *head_tx;
1080         u32 queue;
1081
1082         /* Display TX rings */
1083         for (queue = 0; queue < tx_cnt; queue++) {
1084                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1085
1086                 pr_info("\tTX Queue %d rings\n", queue);
1087
1088                 if (priv->extend_desc)
1089                         head_tx = (void *)tx_q->dma_etx;
1090                 else
1091                         head_tx = (void *)tx_q->dma_tx;
1092
1093                 stmmac_display_ring(priv, head_tx, DMA_TX_SIZE, false);
1094         }
1095 }
1096
1097 static void stmmac_display_rings(struct stmmac_priv *priv)
1098 {
1099         /* Display RX ring */
1100         stmmac_display_rx_rings(priv);
1101
1102         /* Display TX ring */
1103         stmmac_display_tx_rings(priv);
1104 }
1105
1106 static int stmmac_set_bfsize(int mtu, int bufsize)
1107 {
1108         int ret = bufsize;
1109
1110         if (mtu >= BUF_SIZE_4KiB)
1111                 ret = BUF_SIZE_8KiB;
1112         else if (mtu >= BUF_SIZE_2KiB)
1113                 ret = BUF_SIZE_4KiB;
1114         else if (mtu > DEFAULT_BUFSIZE)
1115                 ret = BUF_SIZE_2KiB;
1116         else
1117                 ret = DEFAULT_BUFSIZE;
1118
1119         return ret;
1120 }
1121
1122 /**
1123  * stmmac_clear_rx_descriptors - clear RX descriptors
1124  * @priv: driver private structure
1125  * @queue: RX queue index
1126  * Description: this function is called to clear the RX descriptors
1127  * in case of both basic and extended descriptors are used.
1128  */
1129 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1130 {
1131         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1132         int i;
1133
1134         /* Clear the RX descriptors */
1135         for (i = 0; i < DMA_RX_SIZE; i++)
1136                 if (priv->extend_desc)
1137                         stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1138                                         priv->use_riwt, priv->mode,
1139                                         (i == DMA_RX_SIZE - 1));
1140                 else
1141                         stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1142                                         priv->use_riwt, priv->mode,
1143                                         (i == DMA_RX_SIZE - 1));
1144 }
1145
1146 /**
1147  * stmmac_clear_tx_descriptors - clear tx descriptors
1148  * @priv: driver private structure
1149  * @queue: TX queue index.
1150  * Description: this function is called to clear the TX descriptors
1151  * in case of both basic and extended descriptors are used.
1152  */
1153 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
1154 {
1155         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1156         int i;
1157
1158         /* Clear the TX descriptors */
1159         for (i = 0; i < DMA_TX_SIZE; i++)
1160                 if (priv->extend_desc)
1161                         stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
1162                                         priv->mode, (i == DMA_TX_SIZE - 1));
1163                 else
1164                         stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
1165                                         priv->mode, (i == DMA_TX_SIZE - 1));
1166 }
1167
1168 /**
1169  * stmmac_clear_descriptors - clear descriptors
1170  * @priv: driver private structure
1171  * Description: this function is called to clear the TX and RX descriptors
1172  * in case of both basic and extended descriptors are used.
1173  */
1174 static void stmmac_clear_descriptors(struct stmmac_priv *priv)
1175 {
1176         u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1177         u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1178         u32 queue;
1179
1180         /* Clear the RX descriptors */
1181         for (queue = 0; queue < rx_queue_cnt; queue++)
1182                 stmmac_clear_rx_descriptors(priv, queue);
1183
1184         /* Clear the TX descriptors */
1185         for (queue = 0; queue < tx_queue_cnt; queue++)
1186                 stmmac_clear_tx_descriptors(priv, queue);
1187 }
1188
1189 /**
1190  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1191  * @priv: driver private structure
1192  * @p: descriptor pointer
1193  * @i: descriptor index
1194  * @flags: gfp flag
1195  * @queue: RX queue index
1196  * Description: this function is called to allocate a receive buffer, perform
1197  * the DMA mapping and init the descriptor.
1198  */
1199 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
1200                                   int i, gfp_t flags, u32 queue)
1201 {
1202         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1203         struct sk_buff *skb;
1204
1205         skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
1206         if (!skb) {
1207                 netdev_err(priv->dev,
1208                            "%s: Rx init fails; skb is NULL\n", __func__);
1209                 return -ENOMEM;
1210         }
1211         rx_q->rx_skbuff[i] = skb;
1212         rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
1213                                                 priv->dma_buf_sz,
1214                                                 DMA_FROM_DEVICE);
1215         if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) {
1216                 netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
1217                 dev_kfree_skb_any(skb);
1218                 return -EINVAL;
1219         }
1220
1221         stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[i]);
1222
1223         if (priv->dma_buf_sz == BUF_SIZE_16KiB)
1224                 stmmac_init_desc3(priv, p);
1225
1226         return 0;
1227 }
1228
1229 /**
1230  * stmmac_free_rx_buffer - free RX dma buffers
1231  * @priv: private structure
1232  * @queue: RX queue index
1233  * @i: buffer index.
1234  */
1235 static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1236 {
1237         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1238
1239         if (rx_q->rx_skbuff[i]) {
1240                 dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i],
1241                                  priv->dma_buf_sz, DMA_FROM_DEVICE);
1242                 dev_kfree_skb_any(rx_q->rx_skbuff[i]);
1243         }
1244         rx_q->rx_skbuff[i] = NULL;
1245 }
1246
1247 /**
1248  * stmmac_free_tx_buffer - free RX dma buffers
1249  * @priv: private structure
1250  * @queue: RX queue index
1251  * @i: buffer index.
1252  */
1253 static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1254 {
1255         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1256
1257         if (tx_q->tx_skbuff_dma[i].buf) {
1258                 if (tx_q->tx_skbuff_dma[i].map_as_page)
1259                         dma_unmap_page(priv->device,
1260                                        tx_q->tx_skbuff_dma[i].buf,
1261                                        tx_q->tx_skbuff_dma[i].len,
1262                                        DMA_TO_DEVICE);
1263                 else
1264                         dma_unmap_single(priv->device,
1265                                          tx_q->tx_skbuff_dma[i].buf,
1266                                          tx_q->tx_skbuff_dma[i].len,
1267                                          DMA_TO_DEVICE);
1268         }
1269
1270         if (tx_q->tx_skbuff[i]) {
1271                 dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1272                 tx_q->tx_skbuff[i] = NULL;
1273                 tx_q->tx_skbuff_dma[i].buf = 0;
1274                 tx_q->tx_skbuff_dma[i].map_as_page = false;
1275         }
1276 }
1277
1278 /**
1279  * init_dma_rx_desc_rings - init the RX descriptor rings
1280  * @dev: net device structure
1281  * @flags: gfp flag.
1282  * Description: this function initializes the DMA RX descriptors
1283  * and allocates the socket buffers. It supports the chained and ring
1284  * modes.
1285  */
1286 static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
1287 {
1288         struct stmmac_priv *priv = netdev_priv(dev);
1289         u32 rx_count = priv->plat->rx_queues_to_use;
1290         int ret = -ENOMEM;
1291         int bfsize = 0;
1292         int queue;
1293         int i;
1294
1295         bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
1296         if (bfsize < 0)
1297                 bfsize = 0;
1298
1299         if (bfsize < BUF_SIZE_16KiB)
1300                 bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
1301
1302         priv->dma_buf_sz = bfsize;
1303
1304         /* RX INITIALIZATION */
1305         netif_dbg(priv, probe, priv->dev,
1306                   "SKB addresses:\nskb\t\tskb data\tdma data\n");
1307
1308         for (queue = 0; queue < rx_count; queue++) {
1309                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1310
1311                 netif_dbg(priv, probe, priv->dev,
1312                           "(%s) dma_rx_phy=0x%08x\n", __func__,
1313                           (u32)rx_q->dma_rx_phy);
1314
1315                 for (i = 0; i < DMA_RX_SIZE; i++) {
1316                         struct dma_desc *p;
1317
1318                         if (priv->extend_desc)
1319                                 p = &((rx_q->dma_erx + i)->basic);
1320                         else
1321                                 p = rx_q->dma_rx + i;
1322
1323                         ret = stmmac_init_rx_buffers(priv, p, i, flags,
1324                                                      queue);
1325                         if (ret)
1326                                 goto err_init_rx_buffers;
1327
1328                         netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
1329                                   rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data,
1330                                   (unsigned int)rx_q->rx_skbuff_dma[i]);
1331                 }
1332
1333                 rx_q->cur_rx = 0;
1334                 rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
1335
1336                 stmmac_clear_rx_descriptors(priv, queue);
1337
1338                 /* Setup the chained descriptor addresses */
1339                 if (priv->mode == STMMAC_CHAIN_MODE) {
1340                         if (priv->extend_desc)
1341                                 stmmac_mode_init(priv, rx_q->dma_erx,
1342                                                 rx_q->dma_rx_phy, DMA_RX_SIZE, 1);
1343                         else
1344                                 stmmac_mode_init(priv, rx_q->dma_rx,
1345                                                 rx_q->dma_rx_phy, DMA_RX_SIZE, 0);
1346                 }
1347         }
1348
1349         buf_sz = bfsize;
1350
1351         return 0;
1352
1353 err_init_rx_buffers:
1354         while (queue >= 0) {
1355                 while (--i >= 0)
1356                         stmmac_free_rx_buffer(priv, queue, i);
1357
1358                 if (queue == 0)
1359                         break;
1360
1361                 i = DMA_RX_SIZE;
1362                 queue--;
1363         }
1364
1365         return ret;
1366 }
1367
1368 /**
1369  * init_dma_tx_desc_rings - init the TX descriptor rings
1370  * @dev: net device structure.
1371  * Description: this function initializes the DMA TX descriptors
1372  * and allocates the socket buffers. It supports the chained and ring
1373  * modes.
1374  */
1375 static int init_dma_tx_desc_rings(struct net_device *dev)
1376 {
1377         struct stmmac_priv *priv = netdev_priv(dev);
1378         u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1379         u32 queue;
1380         int i;
1381
1382         for (queue = 0; queue < tx_queue_cnt; queue++) {
1383                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1384
1385                 netif_dbg(priv, probe, priv->dev,
1386                           "(%s) dma_tx_phy=0x%08x\n", __func__,
1387                          (u32)tx_q->dma_tx_phy);
1388
1389                 /* Setup the chained descriptor addresses */
1390                 if (priv->mode == STMMAC_CHAIN_MODE) {
1391                         if (priv->extend_desc)
1392                                 stmmac_mode_init(priv, tx_q->dma_etx,
1393                                                 tx_q->dma_tx_phy, DMA_TX_SIZE, 1);
1394                         else
1395                                 stmmac_mode_init(priv, tx_q->dma_tx,
1396                                                 tx_q->dma_tx_phy, DMA_TX_SIZE, 0);
1397                 }
1398
1399                 for (i = 0; i < DMA_TX_SIZE; i++) {
1400                         struct dma_desc *p;
1401                         if (priv->extend_desc)
1402                                 p = &((tx_q->dma_etx + i)->basic);
1403                         else
1404                                 p = tx_q->dma_tx + i;
1405
1406                         stmmac_clear_desc(priv, p);
1407
1408                         tx_q->tx_skbuff_dma[i].buf = 0;
1409                         tx_q->tx_skbuff_dma[i].map_as_page = false;
1410                         tx_q->tx_skbuff_dma[i].len = 0;
1411                         tx_q->tx_skbuff_dma[i].last_segment = false;
1412                         tx_q->tx_skbuff[i] = NULL;
1413                 }
1414
1415                 tx_q->dirty_tx = 0;
1416                 tx_q->cur_tx = 0;
1417                 tx_q->mss = 0;
1418
1419                 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1420         }
1421
1422         return 0;
1423 }
1424
1425 /**
1426  * init_dma_desc_rings - init the RX/TX descriptor rings
1427  * @dev: net device structure
1428  * @flags: gfp flag.
1429  * Description: this function initializes the DMA RX/TX descriptors
1430  * and allocates the socket buffers. It supports the chained and ring
1431  * modes.
1432  */
1433 static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1434 {
1435         struct stmmac_priv *priv = netdev_priv(dev);
1436         int ret;
1437
1438         ret = init_dma_rx_desc_rings(dev, flags);
1439         if (ret)
1440                 return ret;
1441
1442         ret = init_dma_tx_desc_rings(dev);
1443
1444         stmmac_clear_descriptors(priv);
1445
1446         if (netif_msg_hw(priv))
1447                 stmmac_display_rings(priv);
1448
1449         return ret;
1450 }
1451
1452 /**
1453  * dma_free_rx_skbufs - free RX dma buffers
1454  * @priv: private structure
1455  * @queue: RX queue index
1456  */
1457 static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
1458 {
1459         int i;
1460
1461         for (i = 0; i < DMA_RX_SIZE; i++)
1462                 stmmac_free_rx_buffer(priv, queue, i);
1463 }
1464
1465 /**
1466  * dma_free_tx_skbufs - free TX dma buffers
1467  * @priv: private structure
1468  * @queue: TX queue index
1469  */
1470 static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
1471 {
1472         int i;
1473
1474         for (i = 0; i < DMA_TX_SIZE; i++)
1475                 stmmac_free_tx_buffer(priv, queue, i);
1476 }
1477
1478 /**
1479  * free_dma_rx_desc_resources - free RX dma desc resources
1480  * @priv: private structure
1481  */
1482 static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
1483 {
1484         u32 rx_count = priv->plat->rx_queues_to_use;
1485         u32 queue;
1486
1487         /* Free RX queue resources */
1488         for (queue = 0; queue < rx_count; queue++) {
1489                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1490
1491                 /* Release the DMA RX socket buffers */
1492                 dma_free_rx_skbufs(priv, queue);
1493
1494                 /* Free DMA regions of consistent memory previously allocated */
1495                 if (!priv->extend_desc)
1496                         dma_free_coherent(priv->device,
1497                                           DMA_RX_SIZE * sizeof(struct dma_desc),
1498                                           rx_q->dma_rx, rx_q->dma_rx_phy);
1499                 else
1500                         dma_free_coherent(priv->device, DMA_RX_SIZE *
1501                                           sizeof(struct dma_extended_desc),
1502                                           rx_q->dma_erx, rx_q->dma_rx_phy);
1503
1504                 kfree(rx_q->rx_skbuff_dma);
1505                 kfree(rx_q->rx_skbuff);
1506         }
1507 }
1508
1509 /**
1510  * free_dma_tx_desc_resources - free TX dma desc resources
1511  * @priv: private structure
1512  */
1513 static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1514 {
1515         u32 tx_count = priv->plat->tx_queues_to_use;
1516         u32 queue;
1517
1518         /* Free TX queue resources */
1519         for (queue = 0; queue < tx_count; queue++) {
1520                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1521
1522                 /* Release the DMA TX socket buffers */
1523                 dma_free_tx_skbufs(priv, queue);
1524
1525                 /* Free DMA regions of consistent memory previously allocated */
1526                 if (!priv->extend_desc)
1527                         dma_free_coherent(priv->device,
1528                                           DMA_TX_SIZE * sizeof(struct dma_desc),
1529                                           tx_q->dma_tx, tx_q->dma_tx_phy);
1530                 else
1531                         dma_free_coherent(priv->device, DMA_TX_SIZE *
1532                                           sizeof(struct dma_extended_desc),
1533                                           tx_q->dma_etx, tx_q->dma_tx_phy);
1534
1535                 kfree(tx_q->tx_skbuff_dma);
1536                 kfree(tx_q->tx_skbuff);
1537         }
1538 }
1539
1540 /**
1541  * alloc_dma_rx_desc_resources - alloc RX resources.
1542  * @priv: private structure
1543  * Description: according to which descriptor can be used (extend or basic)
1544  * this function allocates the resources for TX and RX paths. In case of
1545  * reception, for example, it pre-allocated the RX socket buffer in order to
1546  * allow zero-copy mechanism.
1547  */
1548 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
1549 {
1550         u32 rx_count = priv->plat->rx_queues_to_use;
1551         int ret = -ENOMEM;
1552         u32 queue;
1553
1554         /* RX queues buffers and DMA */
1555         for (queue = 0; queue < rx_count; queue++) {
1556                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1557
1558                 rx_q->queue_index = queue;
1559                 rx_q->priv_data = priv;
1560
1561                 rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE,
1562                                                     sizeof(dma_addr_t),
1563                                                     GFP_KERNEL);
1564                 if (!rx_q->rx_skbuff_dma)
1565                         goto err_dma;
1566
1567                 rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE,
1568                                                 sizeof(struct sk_buff *),
1569                                                 GFP_KERNEL);
1570                 if (!rx_q->rx_skbuff)
1571                         goto err_dma;
1572
1573                 if (priv->extend_desc) {
1574                         rx_q->dma_erx = dma_alloc_coherent(priv->device,
1575                                                            DMA_RX_SIZE * sizeof(struct dma_extended_desc),
1576                                                            &rx_q->dma_rx_phy,
1577                                                            GFP_KERNEL);
1578                         if (!rx_q->dma_erx)
1579                                 goto err_dma;
1580
1581                 } else {
1582                         rx_q->dma_rx = dma_alloc_coherent(priv->device,
1583                                                           DMA_RX_SIZE * sizeof(struct dma_desc),
1584                                                           &rx_q->dma_rx_phy,
1585                                                           GFP_KERNEL);
1586                         if (!rx_q->dma_rx)
1587                                 goto err_dma;
1588                 }
1589         }
1590
1591         return 0;
1592
1593 err_dma:
1594         free_dma_rx_desc_resources(priv);
1595
1596         return ret;
1597 }
1598
1599 /**
1600  * alloc_dma_tx_desc_resources - alloc TX resources.
1601  * @priv: private structure
1602  * Description: according to which descriptor can be used (extend or basic)
1603  * this function allocates the resources for TX and RX paths. In case of
1604  * reception, for example, it pre-allocated the RX socket buffer in order to
1605  * allow zero-copy mechanism.
1606  */
1607 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
1608 {
1609         u32 tx_count = priv->plat->tx_queues_to_use;
1610         int ret = -ENOMEM;
1611         u32 queue;
1612
1613         /* TX queues buffers and DMA */
1614         for (queue = 0; queue < tx_count; queue++) {
1615                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1616
1617                 tx_q->queue_index = queue;
1618                 tx_q->priv_data = priv;
1619
1620                 tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
1621                                                     sizeof(*tx_q->tx_skbuff_dma),
1622                                                     GFP_KERNEL);
1623                 if (!tx_q->tx_skbuff_dma)
1624                         goto err_dma;
1625
1626                 tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE,
1627                                                 sizeof(struct sk_buff *),
1628                                                 GFP_KERNEL);
1629                 if (!tx_q->tx_skbuff)
1630                         goto err_dma;
1631
1632                 if (priv->extend_desc) {
1633                         tx_q->dma_etx = dma_alloc_coherent(priv->device,
1634                                                            DMA_TX_SIZE * sizeof(struct dma_extended_desc),
1635                                                            &tx_q->dma_tx_phy,
1636                                                            GFP_KERNEL);
1637                         if (!tx_q->dma_etx)
1638                                 goto err_dma;
1639                 } else {
1640                         tx_q->dma_tx = dma_alloc_coherent(priv->device,
1641                                                           DMA_TX_SIZE * sizeof(struct dma_desc),
1642                                                           &tx_q->dma_tx_phy,
1643                                                           GFP_KERNEL);
1644                         if (!tx_q->dma_tx)
1645                                 goto err_dma;
1646                 }
1647         }
1648
1649         return 0;
1650
1651 err_dma:
1652         free_dma_tx_desc_resources(priv);
1653
1654         return ret;
1655 }
1656
1657 /**
1658  * alloc_dma_desc_resources - alloc TX/RX resources.
1659  * @priv: private structure
1660  * Description: according to which descriptor can be used (extend or basic)
1661  * this function allocates the resources for TX and RX paths. In case of
1662  * reception, for example, it pre-allocated the RX socket buffer in order to
1663  * allow zero-copy mechanism.
1664  */
1665 static int alloc_dma_desc_resources(struct stmmac_priv *priv)
1666 {
1667         /* RX Allocation */
1668         int ret = alloc_dma_rx_desc_resources(priv);
1669
1670         if (ret)
1671                 return ret;
1672
1673         ret = alloc_dma_tx_desc_resources(priv);
1674
1675         return ret;
1676 }
1677
1678 /**
1679  * free_dma_desc_resources - free dma desc resources
1680  * @priv: private structure
1681  */
1682 static void free_dma_desc_resources(struct stmmac_priv *priv)
1683 {
1684         /* Release the DMA RX socket buffers */
1685         free_dma_rx_desc_resources(priv);
1686
1687         /* Release the DMA TX socket buffers */
1688         free_dma_tx_desc_resources(priv);
1689 }
1690
1691 /**
1692  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
1693  *  @priv: driver private structure
1694  *  Description: It is used for enabling the rx queues in the MAC
1695  */
1696 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
1697 {
1698         u32 rx_queues_count = priv->plat->rx_queues_to_use;
1699         int queue;
1700         u8 mode;
1701
1702         for (queue = 0; queue < rx_queues_count; queue++) {
1703                 mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1704                 stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
1705         }
1706 }
1707
1708 /**
1709  * stmmac_start_rx_dma - start RX DMA channel
1710  * @priv: driver private structure
1711  * @chan: RX channel index
1712  * Description:
1713  * This starts a RX DMA channel
1714  */
1715 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
1716 {
1717         netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
1718         stmmac_start_rx(priv, priv->ioaddr, chan);
1719 }
1720
1721 /**
1722  * stmmac_start_tx_dma - start TX DMA channel
1723  * @priv: driver private structure
1724  * @chan: TX channel index
1725  * Description:
1726  * This starts a TX DMA channel
1727  */
1728 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
1729 {
1730         netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
1731         stmmac_start_tx(priv, priv->ioaddr, chan);
1732 }
1733
1734 /**
1735  * stmmac_stop_rx_dma - stop RX DMA channel
1736  * @priv: driver private structure
1737  * @chan: RX channel index
1738  * Description:
1739  * This stops a RX DMA channel
1740  */
1741 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
1742 {
1743         netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
1744         stmmac_stop_rx(priv, priv->ioaddr, chan);
1745 }
1746
1747 /**
1748  * stmmac_stop_tx_dma - stop TX DMA channel
1749  * @priv: driver private structure
1750  * @chan: TX channel index
1751  * Description:
1752  * This stops a TX DMA channel
1753  */
1754 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
1755 {
1756         netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
1757         stmmac_stop_tx(priv, priv->ioaddr, chan);
1758 }
1759
1760 /**
1761  * stmmac_start_all_dma - start all RX and TX DMA channels
1762  * @priv: driver private structure
1763  * Description:
1764  * This starts all the RX and TX DMA channels
1765  */
1766 static void stmmac_start_all_dma(struct stmmac_priv *priv)
1767 {
1768         u32 rx_channels_count = priv->plat->rx_queues_to_use;
1769         u32 tx_channels_count = priv->plat->tx_queues_to_use;
1770         u32 chan = 0;
1771
1772         for (chan = 0; chan < rx_channels_count; chan++)
1773                 stmmac_start_rx_dma(priv, chan);
1774
1775         for (chan = 0; chan < tx_channels_count; chan++)
1776                 stmmac_start_tx_dma(priv, chan);
1777 }
1778
1779 /**
1780  * stmmac_stop_all_dma - stop all RX and TX DMA channels
1781  * @priv: driver private structure
1782  * Description:
1783  * This stops the RX and TX DMA channels
1784  */
1785 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
1786 {
1787         u32 rx_channels_count = priv->plat->rx_queues_to_use;
1788         u32 tx_channels_count = priv->plat->tx_queues_to_use;
1789         u32 chan = 0;
1790
1791         for (chan = 0; chan < rx_channels_count; chan++)
1792                 stmmac_stop_rx_dma(priv, chan);
1793
1794         for (chan = 0; chan < tx_channels_count; chan++)
1795                 stmmac_stop_tx_dma(priv, chan);
1796 }
1797
1798 /**
1799  *  stmmac_dma_operation_mode - HW DMA operation mode
1800  *  @priv: driver private structure
1801  *  Description: it is used for configuring the DMA operation mode register in
1802  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
1803  */
1804 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1805 {
1806         u32 rx_channels_count = priv->plat->rx_queues_to_use;
1807         u32 tx_channels_count = priv->plat->tx_queues_to_use;
1808         int rxfifosz = priv->plat->rx_fifo_size;
1809         int txfifosz = priv->plat->tx_fifo_size;
1810         u32 txmode = 0;
1811         u32 rxmode = 0;
1812         u32 chan = 0;
1813         u8 qmode = 0;
1814
1815         if (rxfifosz == 0)
1816                 rxfifosz = priv->dma_cap.rx_fifo_size;
1817         if (txfifosz == 0)
1818                 txfifosz = priv->dma_cap.tx_fifo_size;
1819
1820         /* Adjust for real per queue fifo size */
1821         rxfifosz /= rx_channels_count;
1822         txfifosz /= tx_channels_count;
1823
1824         if (priv->plat->force_thresh_dma_mode) {
1825                 txmode = tc;
1826                 rxmode = tc;
1827         } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
1828                 /*
1829                  * In case of GMAC, SF mode can be enabled
1830                  * to perform the TX COE in HW. This depends on:
1831                  * 1) TX COE if actually supported
1832                  * 2) There is no bugged Jumbo frame support
1833                  *    that needs to not insert csum in the TDES.
1834                  */
1835                 txmode = SF_DMA_MODE;
1836                 rxmode = SF_DMA_MODE;
1837                 priv->xstats.threshold = SF_DMA_MODE;
1838         } else {
1839                 txmode = tc;
1840                 rxmode = SF_DMA_MODE;
1841         }
1842
1843         /* configure all channels */
1844         for (chan = 0; chan < rx_channels_count; chan++) {
1845                 qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
1846
1847                 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
1848                                 rxfifosz, qmode);
1849                 stmmac_set_dma_bfsize(priv, priv->ioaddr, priv->dma_buf_sz,
1850                                 chan);
1851         }
1852
1853         for (chan = 0; chan < tx_channels_count; chan++) {
1854                 qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
1855
1856                 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
1857                                 txfifosz, qmode);
1858         }
1859 }
1860
1861 /**
1862  * stmmac_tx_clean - to manage the transmission completion
1863  * @priv: driver private structure
1864  * @queue: TX queue index
1865  * Description: it reclaims the transmit resources after transmission completes.
1866  */
1867 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
1868 {
1869         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1870         unsigned int bytes_compl = 0, pkts_compl = 0;
1871         unsigned int entry, count = 0;
1872
1873         __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
1874
1875         priv->xstats.tx_clean++;
1876
1877         entry = tx_q->dirty_tx;
1878         while ((entry != tx_q->cur_tx) && (count < budget)) {
1879                 struct sk_buff *skb = tx_q->tx_skbuff[entry];
1880                 struct dma_desc *p;
1881                 int status;
1882
1883                 if (priv->extend_desc)
1884                         p = (struct dma_desc *)(tx_q->dma_etx + entry);
1885                 else
1886                         p = tx_q->dma_tx + entry;
1887
1888                 status = stmmac_tx_status(priv, &priv->dev->stats,
1889                                 &priv->xstats, p, priv->ioaddr);
1890                 /* Check if the descriptor is owned by the DMA */
1891                 if (unlikely(status & tx_dma_own))
1892                         break;
1893
1894                 count++;
1895
1896                 /* Make sure descriptor fields are read after reading
1897                  * the own bit.
1898                  */
1899                 dma_rmb();
1900
1901                 /* Just consider the last segment and ...*/
1902                 if (likely(!(status & tx_not_ls))) {
1903                         /* ... verify the status error condition */
1904                         if (unlikely(status & tx_err)) {
1905                                 priv->dev->stats.tx_errors++;
1906                         } else {
1907                                 priv->dev->stats.tx_packets++;
1908                                 priv->xstats.tx_pkt_n++;
1909                         }
1910                         stmmac_get_tx_hwtstamp(priv, p, skb);
1911                 }
1912
1913                 if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
1914                         if (tx_q->tx_skbuff_dma[entry].map_as_page)
1915                                 dma_unmap_page(priv->device,
1916                                                tx_q->tx_skbuff_dma[entry].buf,
1917                                                tx_q->tx_skbuff_dma[entry].len,
1918                                                DMA_TO_DEVICE);
1919                         else
1920                                 dma_unmap_single(priv->device,
1921                                                  tx_q->tx_skbuff_dma[entry].buf,
1922                                                  tx_q->tx_skbuff_dma[entry].len,
1923                                                  DMA_TO_DEVICE);
1924                         tx_q->tx_skbuff_dma[entry].buf = 0;
1925                         tx_q->tx_skbuff_dma[entry].len = 0;
1926                         tx_q->tx_skbuff_dma[entry].map_as_page = false;
1927                 }
1928
1929                 stmmac_clean_desc3(priv, tx_q, p);
1930
1931                 tx_q->tx_skbuff_dma[entry].last_segment = false;
1932                 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
1933
1934                 if (likely(skb != NULL)) {
1935                         pkts_compl++;
1936                         bytes_compl += skb->len;
1937                         dev_consume_skb_any(skb);
1938                         tx_q->tx_skbuff[entry] = NULL;
1939                 }
1940
1941                 stmmac_release_tx_desc(priv, p, priv->mode);
1942
1943                 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
1944         }
1945         tx_q->dirty_tx = entry;
1946
1947         netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
1948                                   pkts_compl, bytes_compl);
1949
1950         if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
1951                                                                 queue))) &&
1952             stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
1953
1954                 netif_dbg(priv, tx_done, priv->dev,
1955                           "%s: restart transmit\n", __func__);
1956                 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
1957         }
1958
1959         if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
1960                 stmmac_enable_eee_mode(priv);
1961                 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
1962         }
1963
1964         /* We still have pending packets, let's call for a new scheduling */
1965         if (tx_q->dirty_tx != tx_q->cur_tx)
1966                 mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(10));
1967
1968         __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
1969
1970         return count;
1971 }
1972
1973 /**
1974  * stmmac_tx_err - to manage the tx error
1975  * @priv: driver private structure
1976  * @chan: channel index
1977  * Description: it cleans the descriptors and restarts the transmission
1978  * in case of transmission errors.
1979  */
1980 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
1981 {
1982         struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
1983         int i;
1984
1985         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
1986
1987         stmmac_stop_tx_dma(priv, chan);
1988         dma_free_tx_skbufs(priv, chan);
1989         for (i = 0; i < DMA_TX_SIZE; i++)
1990                 if (priv->extend_desc)
1991                         stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
1992                                         priv->mode, (i == DMA_TX_SIZE - 1));
1993                 else
1994                         stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
1995                                         priv->mode, (i == DMA_TX_SIZE - 1));
1996         tx_q->dirty_tx = 0;
1997         tx_q->cur_tx = 0;
1998         tx_q->mss = 0;
1999         netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
2000         stmmac_start_tx_dma(priv, chan);
2001
2002         priv->dev->stats.tx_errors++;
2003         netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2004 }
2005
2006 /**
2007  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2008  *  @priv: driver private structure
2009  *  @txmode: TX operating mode
2010  *  @rxmode: RX operating mode
2011  *  @chan: channel index
2012  *  Description: it is used for configuring of the DMA operation mode in
2013  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2014  *  mode.
2015  */
2016 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2017                                           u32 rxmode, u32 chan)
2018 {
2019         u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2020         u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2021         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2022         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2023         int rxfifosz = priv->plat->rx_fifo_size;
2024         int txfifosz = priv->plat->tx_fifo_size;
2025
2026         if (rxfifosz == 0)
2027                 rxfifosz = priv->dma_cap.rx_fifo_size;
2028         if (txfifosz == 0)
2029                 txfifosz = priv->dma_cap.tx_fifo_size;
2030
2031         /* Adjust for real per queue fifo size */
2032         rxfifosz /= rx_channels_count;
2033         txfifosz /= tx_channels_count;
2034
2035         stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2036         stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2037 }
2038
2039 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2040 {
2041         int ret;
2042
2043         ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2044                         priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2045         if (ret && (ret != -EINVAL)) {
2046                 stmmac_global_err(priv);
2047                 return true;
2048         }
2049
2050         return false;
2051 }
2052
2053 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan)
2054 {
2055         int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2056                                                  &priv->xstats, chan);
2057         struct stmmac_channel *ch = &priv->channel[chan];
2058
2059         if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2060                 stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
2061                 napi_schedule_irqoff(&ch->rx_napi);
2062         }
2063
2064         if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2065                 stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
2066                 napi_schedule_irqoff(&ch->tx_napi);
2067         }
2068
2069         return status;
2070 }
2071
2072 /**
2073  * stmmac_dma_interrupt - DMA ISR
2074  * @priv: driver private structure
2075  * Description: this is the DMA ISR. It is called by the main ISR.
2076  * It calls the dwmac dma routine and schedule poll method in case of some
2077  * work can be done.
2078  */
2079 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2080 {
2081         u32 tx_channel_count = priv->plat->tx_queues_to_use;
2082         u32 rx_channel_count = priv->plat->rx_queues_to_use;
2083         u32 channels_to_check = tx_channel_count > rx_channel_count ?
2084                                 tx_channel_count : rx_channel_count;
2085         u32 chan;
2086         int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2087
2088         /* Make sure we never check beyond our status buffer. */
2089         if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2090                 channels_to_check = ARRAY_SIZE(status);
2091
2092         for (chan = 0; chan < channels_to_check; chan++)
2093                 status[chan] = stmmac_napi_check(priv, chan);
2094
2095         for (chan = 0; chan < tx_channel_count; chan++) {
2096                 if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2097                         /* Try to bump up the dma threshold on this failure */
2098                         if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
2099                             (tc <= 256)) {
2100                                 tc += 64;
2101                                 if (priv->plat->force_thresh_dma_mode)
2102                                         stmmac_set_dma_operation_mode(priv,
2103                                                                       tc,
2104                                                                       tc,
2105                                                                       chan);
2106                                 else
2107                                         stmmac_set_dma_operation_mode(priv,
2108                                                                     tc,
2109                                                                     SF_DMA_MODE,
2110                                                                     chan);
2111                                 priv->xstats.threshold = tc;
2112                         }
2113                 } else if (unlikely(status[chan] == tx_hard_error)) {
2114                         stmmac_tx_err(priv, chan);
2115                 }
2116         }
2117 }
2118
2119 /**
2120  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2121  * @priv: driver private structure
2122  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2123  */
2124 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2125 {
2126         unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2127                             MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2128
2129         dwmac_mmc_intr_all_mask(priv->mmcaddr);
2130
2131         if (priv->dma_cap.rmon) {
2132                 dwmac_mmc_ctrl(priv->mmcaddr, mode);
2133                 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2134         } else
2135                 netdev_info(priv->dev, "No MAC Management Counters available\n");
2136 }
2137
2138 /**
2139  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2140  * @priv: driver private structure
2141  * Description:
2142  *  new GMAC chip generations have a new register to indicate the
2143  *  presence of the optional feature/functions.
2144  *  This can be also used to override the value passed through the
2145  *  platform and necessary for old MAC10/100 and GMAC chips.
2146  */
2147 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2148 {
2149         return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2150 }
2151
2152 /**
2153  * stmmac_check_ether_addr - check if the MAC addr is valid
2154  * @priv: driver private structure
2155  * Description:
2156  * it is to verify if the MAC address is valid, in case of failures it
2157  * generates a random MAC address
2158  */
2159 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2160 {
2161         if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2162                 stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0);
2163                 if (!is_valid_ether_addr(priv->dev->dev_addr))
2164                         eth_hw_addr_random(priv->dev);
2165                 netdev_info(priv->dev, "device MAC address %pM\n",
2166                             priv->dev->dev_addr);
2167         }
2168 }
2169
2170 /**
2171  * stmmac_init_dma_engine - DMA init.
2172  * @priv: driver private structure
2173  * Description:
2174  * It inits the DMA invoking the specific MAC/GMAC callback.
2175  * Some DMA parameters can be passed from the platform;
2176  * in case of these are not passed a default is kept for the MAC or GMAC.
2177  */
2178 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2179 {
2180         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2181         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2182         u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2183         struct stmmac_rx_queue *rx_q;
2184         struct stmmac_tx_queue *tx_q;
2185         u32 chan = 0;
2186         int atds = 0;
2187         int ret = 0;
2188
2189         if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2190                 dev_err(priv->device, "Invalid DMA configuration\n");
2191                 return -EINVAL;
2192         }
2193
2194         if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2195                 atds = 1;
2196
2197         ret = stmmac_reset(priv, priv->ioaddr);
2198         if (ret) {
2199                 dev_err(priv->device, "Failed to reset the dma\n");
2200                 return ret;
2201         }
2202
2203         /* DMA Configuration */
2204         stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
2205
2206         if (priv->plat->axi)
2207                 stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
2208
2209         /* DMA RX Channel Configuration */
2210         for (chan = 0; chan < rx_channels_count; chan++) {
2211                 rx_q = &priv->rx_queue[chan];
2212
2213                 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2214                                     rx_q->dma_rx_phy, chan);
2215
2216                 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2217                             (DMA_RX_SIZE * sizeof(struct dma_desc));
2218                 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2219                                        rx_q->rx_tail_addr, chan);
2220         }
2221
2222         /* DMA TX Channel Configuration */
2223         for (chan = 0; chan < tx_channels_count; chan++) {
2224                 tx_q = &priv->tx_queue[chan];
2225
2226                 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2227                                     tx_q->dma_tx_phy, chan);
2228
2229                 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
2230                 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2231                                        tx_q->tx_tail_addr, chan);
2232         }
2233
2234         /* DMA CSR Channel configuration */
2235         for (chan = 0; chan < dma_csr_ch; chan++)
2236                 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
2237
2238         return ret;
2239 }
2240
2241 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
2242 {
2243         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2244
2245         mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer));
2246 }
2247
2248 /**
2249  * stmmac_tx_timer - mitigation sw timer for tx.
2250  * @data: data pointer
2251  * Description:
2252  * This is the timer handler to directly invoke the stmmac_tx_clean.
2253  */
2254 static void stmmac_tx_timer(struct timer_list *t)
2255 {
2256         struct stmmac_tx_queue *tx_q = from_timer(tx_q, t, txtimer);
2257         struct stmmac_priv *priv = tx_q->priv_data;
2258         struct stmmac_channel *ch;
2259
2260         ch = &priv->channel[tx_q->queue_index];
2261
2262         /*
2263          * If NAPI is already running we can miss some events. Let's rearm
2264          * the timer and try again.
2265          */
2266         if (likely(napi_schedule_prep(&ch->tx_napi)))
2267                 __napi_schedule(&ch->tx_napi);
2268         else
2269                 mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(10));
2270 }
2271
2272 /**
2273  * stmmac_init_tx_coalesce - init tx mitigation options.
2274  * @priv: driver private structure
2275  * Description:
2276  * This inits the transmit coalesce parameters: i.e. timer rate,
2277  * timer handler and default threshold used for enabling the
2278  * interrupt on completion bit.
2279  */
2280 static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
2281 {
2282         u32 tx_channel_count = priv->plat->tx_queues_to_use;
2283         u32 chan;
2284
2285         priv->tx_coal_frames = STMMAC_TX_FRAMES;
2286         priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
2287
2288         for (chan = 0; chan < tx_channel_count; chan++) {
2289                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2290
2291                 timer_setup(&tx_q->txtimer, stmmac_tx_timer, 0);
2292         }
2293 }
2294
2295 static void stmmac_set_rings_length(struct stmmac_priv *priv)
2296 {
2297         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2298         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2299         u32 chan;
2300
2301         /* set TX ring length */
2302         for (chan = 0; chan < tx_channels_count; chan++)
2303                 stmmac_set_tx_ring_len(priv, priv->ioaddr,
2304                                 (DMA_TX_SIZE - 1), chan);
2305
2306         /* set RX ring length */
2307         for (chan = 0; chan < rx_channels_count; chan++)
2308                 stmmac_set_rx_ring_len(priv, priv->ioaddr,
2309                                 (DMA_RX_SIZE - 1), chan);
2310 }
2311
2312 /**
2313  *  stmmac_set_tx_queue_weight - Set TX queue weight
2314  *  @priv: driver private structure
2315  *  Description: It is used for setting TX queues weight
2316  */
2317 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
2318 {
2319         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2320         u32 weight;
2321         u32 queue;
2322
2323         for (queue = 0; queue < tx_queues_count; queue++) {
2324                 weight = priv->plat->tx_queues_cfg[queue].weight;
2325                 stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
2326         }
2327 }
2328
2329 /**
2330  *  stmmac_configure_cbs - Configure CBS in TX queue
2331  *  @priv: driver private structure
2332  *  Description: It is used for configuring CBS in AVB TX queues
2333  */
2334 static void stmmac_configure_cbs(struct stmmac_priv *priv)
2335 {
2336         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2337         u32 mode_to_use;
2338         u32 queue;
2339
2340         /* queue 0 is reserved for legacy traffic */
2341         for (queue = 1; queue < tx_queues_count; queue++) {
2342                 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
2343                 if (mode_to_use == MTL_QUEUE_DCB)
2344                         continue;
2345
2346                 stmmac_config_cbs(priv, priv->hw,
2347                                 priv->plat->tx_queues_cfg[queue].send_slope,
2348                                 priv->plat->tx_queues_cfg[queue].idle_slope,
2349                                 priv->plat->tx_queues_cfg[queue].high_credit,
2350                                 priv->plat->tx_queues_cfg[queue].low_credit,
2351                                 queue);
2352         }
2353 }
2354
2355 /**
2356  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
2357  *  @priv: driver private structure
2358  *  Description: It is used for mapping RX queues to RX dma channels
2359  */
2360 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
2361 {
2362         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2363         u32 queue;
2364         u32 chan;
2365
2366         for (queue = 0; queue < rx_queues_count; queue++) {
2367                 chan = priv->plat->rx_queues_cfg[queue].chan;
2368                 stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
2369         }
2370 }
2371
2372 /**
2373  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
2374  *  @priv: driver private structure
2375  *  Description: It is used for configuring the RX Queue Priority
2376  */
2377 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
2378 {
2379         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2380         u32 queue;
2381         u32 prio;
2382
2383         for (queue = 0; queue < rx_queues_count; queue++) {
2384                 if (!priv->plat->rx_queues_cfg[queue].use_prio)
2385                         continue;
2386
2387                 prio = priv->plat->rx_queues_cfg[queue].prio;
2388                 stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
2389         }
2390 }
2391
2392 /**
2393  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
2394  *  @priv: driver private structure
2395  *  Description: It is used for configuring the TX Queue Priority
2396  */
2397 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
2398 {
2399         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2400         u32 queue;
2401         u32 prio;
2402
2403         for (queue = 0; queue < tx_queues_count; queue++) {
2404                 if (!priv->plat->tx_queues_cfg[queue].use_prio)
2405                         continue;
2406
2407                 prio = priv->plat->tx_queues_cfg[queue].prio;
2408                 stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
2409         }
2410 }
2411
2412 /**
2413  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
2414  *  @priv: driver private structure
2415  *  Description: It is used for configuring the RX queue routing
2416  */
2417 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
2418 {
2419         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2420         u32 queue;
2421         u8 packet;
2422
2423         for (queue = 0; queue < rx_queues_count; queue++) {
2424                 /* no specific packet type routing specified for the queue */
2425                 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
2426                         continue;
2427
2428                 packet = priv->plat->rx_queues_cfg[queue].pkt_route;
2429                 stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
2430         }
2431 }
2432
2433 /**
2434  *  stmmac_mtl_configuration - Configure MTL
2435  *  @priv: driver private structure
2436  *  Description: It is used for configurring MTL
2437  */
2438 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
2439 {
2440         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2441         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2442
2443         if (tx_queues_count > 1)
2444                 stmmac_set_tx_queue_weight(priv);
2445
2446         /* Configure MTL RX algorithms */
2447         if (rx_queues_count > 1)
2448                 stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
2449                                 priv->plat->rx_sched_algorithm);
2450
2451         /* Configure MTL TX algorithms */
2452         if (tx_queues_count > 1)
2453                 stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
2454                                 priv->plat->tx_sched_algorithm);
2455
2456         /* Configure CBS in AVB TX queues */
2457         if (tx_queues_count > 1)
2458                 stmmac_configure_cbs(priv);
2459
2460         /* Map RX MTL to DMA channels */
2461         stmmac_rx_queue_dma_chan_map(priv);
2462
2463         /* Enable MAC RX Queues */
2464         stmmac_mac_enable_rx_queues(priv);
2465
2466         /* Set RX priorities */
2467         if (rx_queues_count > 1)
2468                 stmmac_mac_config_rx_queues_prio(priv);
2469
2470         /* Set TX priorities */
2471         if (tx_queues_count > 1)
2472                 stmmac_mac_config_tx_queues_prio(priv);
2473
2474         /* Set RX routing */
2475         if (rx_queues_count > 1)
2476                 stmmac_mac_config_rx_queues_routing(priv);
2477 }
2478
2479 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
2480 {
2481         if (priv->dma_cap.asp) {
2482                 netdev_info(priv->dev, "Enabling Safety Features\n");
2483                 stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp);
2484         } else {
2485                 netdev_info(priv->dev, "No Safety Features support found\n");
2486         }
2487 }
2488
2489 /**
2490  * stmmac_hw_setup - setup mac in a usable state.
2491  *  @dev : pointer to the device structure.
2492  *  Description:
2493  *  this is the main function to setup the HW in a usable state because the
2494  *  dma engine is reset, the core registers are configured (e.g. AXI,
2495  *  Checksum features, timers). The DMA is ready to start receiving and
2496  *  transmitting.
2497  *  Return value:
2498  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2499  *  file on failure.
2500  */
2501 static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
2502 {
2503         struct stmmac_priv *priv = netdev_priv(dev);
2504         u32 rx_cnt = priv->plat->rx_queues_to_use;
2505         u32 tx_cnt = priv->plat->tx_queues_to_use;
2506         u32 chan;
2507         int ret;
2508
2509         /* DMA initialization and SW reset */
2510         ret = stmmac_init_dma_engine(priv);
2511         if (ret < 0) {
2512                 netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
2513                            __func__);
2514                 return ret;
2515         }
2516
2517         /* Copy the MAC addr into the HW  */
2518         stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
2519
2520         /* PS and related bits will be programmed according to the speed */
2521         if (priv->hw->pcs) {
2522                 int speed = priv->plat->mac_port_sel_speed;
2523
2524                 if ((speed == SPEED_10) || (speed == SPEED_100) ||
2525                     (speed == SPEED_1000)) {
2526                         priv->hw->ps = speed;
2527                 } else {
2528                         dev_warn(priv->device, "invalid port speed\n");
2529                         priv->hw->ps = 0;
2530                 }
2531         }
2532
2533         /* Initialize the MAC Core */
2534         stmmac_core_init(priv, priv->hw, dev);
2535
2536         /* Initialize MTL*/
2537         stmmac_mtl_configuration(priv);
2538
2539         /* Initialize Safety Features */
2540         stmmac_safety_feat_configuration(priv);
2541
2542         ret = stmmac_rx_ipc(priv, priv->hw);
2543         if (!ret) {
2544                 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
2545                 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2546                 priv->hw->rx_csum = 0;
2547         }
2548
2549         /* Enable the MAC Rx/Tx */
2550         stmmac_mac_set(priv, priv->ioaddr, true);
2551
2552         /* Set the HW DMA mode and the COE */
2553         stmmac_dma_operation_mode(priv);
2554
2555         stmmac_mmc_setup(priv);
2556
2557         if (init_ptp) {
2558                 ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
2559                 if (ret < 0)
2560                         netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
2561
2562                 ret = stmmac_init_ptp(priv);
2563                 if (ret == -EOPNOTSUPP)
2564                         netdev_warn(priv->dev, "PTP not supported by HW\n");
2565                 else if (ret)
2566                         netdev_warn(priv->dev, "PTP init failed\n");
2567         }
2568
2569         priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
2570
2571         if (priv->use_riwt) {
2572                 ret = stmmac_rx_watchdog(priv, priv->ioaddr, MAX_DMA_RIWT, rx_cnt);
2573                 if (!ret)
2574                         priv->rx_riwt = MAX_DMA_RIWT;
2575         }
2576
2577         if (priv->hw->pcs)
2578                 stmmac_pcs_ctrl_ane(priv, priv->hw, 1, priv->hw->ps, 0);
2579
2580         /* set TX and RX rings length */
2581         stmmac_set_rings_length(priv);
2582
2583         /* Enable TSO */
2584         if (priv->tso) {
2585                 for (chan = 0; chan < tx_cnt; chan++)
2586                         stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
2587         }
2588
2589         /* Start the ball rolling... */
2590         stmmac_start_all_dma(priv);
2591
2592         return 0;
2593 }
2594
2595 static void stmmac_hw_teardown(struct net_device *dev)
2596 {
2597         struct stmmac_priv *priv = netdev_priv(dev);
2598
2599         clk_disable_unprepare(priv->plat->clk_ptp_ref);
2600 }
2601
2602 /**
2603  *  stmmac_open - open entry point of the driver
2604  *  @dev : pointer to the device structure.
2605  *  Description:
2606  *  This function is the open entry point of the driver.
2607  *  Return value:
2608  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2609  *  file on failure.
2610  */
2611 static int stmmac_open(struct net_device *dev)
2612 {
2613         struct stmmac_priv *priv = netdev_priv(dev);
2614         u32 chan;
2615         int ret;
2616
2617         stmmac_check_ether_addr(priv);
2618
2619         if (priv->hw->pcs != STMMAC_PCS_RGMII &&
2620             priv->hw->pcs != STMMAC_PCS_TBI &&
2621             priv->hw->pcs != STMMAC_PCS_RTBI) {
2622                 ret = stmmac_init_phy(dev);
2623                 if (ret) {
2624                         netdev_err(priv->dev,
2625                                    "%s: Cannot attach to PHY (error: %d)\n",
2626                                    __func__, ret);
2627                         return ret;
2628                 }
2629         }
2630
2631         /* Extra statistics */
2632         memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
2633         priv->xstats.threshold = tc;
2634
2635         priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
2636         priv->rx_copybreak = STMMAC_RX_COPYBREAK;
2637
2638         ret = alloc_dma_desc_resources(priv);
2639         if (ret < 0) {
2640                 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
2641                            __func__);
2642                 goto dma_desc_error;
2643         }
2644
2645         ret = init_dma_desc_rings(dev, GFP_KERNEL);
2646         if (ret < 0) {
2647                 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
2648                            __func__);
2649                 goto init_error;
2650         }
2651
2652         ret = stmmac_hw_setup(dev, true);
2653         if (ret < 0) {
2654                 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
2655                 goto init_error;
2656         }
2657
2658         stmmac_init_tx_coalesce(priv);
2659
2660         if (dev->phydev)
2661                 phy_start(dev->phydev);
2662
2663         /* Request the IRQ lines */
2664         ret = request_irq(dev->irq, stmmac_interrupt,
2665                           IRQF_SHARED, dev->name, dev);
2666         if (unlikely(ret < 0)) {
2667                 netdev_err(priv->dev,
2668                            "%s: ERROR: allocating the IRQ %d (error: %d)\n",
2669                            __func__, dev->irq, ret);
2670                 goto irq_error;
2671         }
2672
2673         /* Request the Wake IRQ in case of another line is used for WoL */
2674         if (priv->wol_irq != dev->irq) {
2675                 ret = request_irq(priv->wol_irq, stmmac_interrupt,
2676                                   IRQF_SHARED, dev->name, dev);
2677                 if (unlikely(ret < 0)) {
2678                         netdev_err(priv->dev,
2679                                    "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
2680                                    __func__, priv->wol_irq, ret);
2681                         goto wolirq_error;
2682                 }
2683         }
2684
2685         /* Request the IRQ lines */
2686         if (priv->lpi_irq > 0) {
2687                 ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
2688                                   dev->name, dev);
2689                 if (unlikely(ret < 0)) {
2690                         netdev_err(priv->dev,
2691                                    "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
2692                                    __func__, priv->lpi_irq, ret);
2693                         goto lpiirq_error;
2694                 }
2695         }
2696
2697         stmmac_enable_all_queues(priv);
2698         stmmac_start_all_queues(priv);
2699
2700         return 0;
2701
2702 lpiirq_error:
2703         if (priv->wol_irq != dev->irq)
2704                 free_irq(priv->wol_irq, dev);
2705 wolirq_error:
2706         free_irq(dev->irq, dev);
2707 irq_error:
2708         if (dev->phydev)
2709                 phy_stop(dev->phydev);
2710
2711         for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
2712                 del_timer_sync(&priv->tx_queue[chan].txtimer);
2713
2714         stmmac_hw_teardown(dev);
2715 init_error:
2716         free_dma_desc_resources(priv);
2717 dma_desc_error:
2718         if (dev->phydev)
2719                 phy_disconnect(dev->phydev);
2720
2721         return ret;
2722 }
2723
2724 /**
2725  *  stmmac_release - close entry point of the driver
2726  *  @dev : device pointer.
2727  *  Description:
2728  *  This is the stop entry point of the driver.
2729  */
2730 static int stmmac_release(struct net_device *dev)
2731 {
2732         struct stmmac_priv *priv = netdev_priv(dev);
2733         u32 chan;
2734
2735         if (priv->eee_enabled)
2736                 del_timer_sync(&priv->eee_ctrl_timer);
2737
2738         /* Stop and disconnect the PHY */
2739         if (dev->phydev) {
2740                 phy_stop(dev->phydev);
2741                 phy_disconnect(dev->phydev);
2742         }
2743
2744         stmmac_stop_all_queues(priv);
2745
2746         stmmac_disable_all_queues(priv);
2747
2748         for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
2749                 del_timer_sync(&priv->tx_queue[chan].txtimer);
2750
2751         /* Free the IRQ lines */
2752         free_irq(dev->irq, dev);
2753         if (priv->wol_irq != dev->irq)
2754                 free_irq(priv->wol_irq, dev);
2755         if (priv->lpi_irq > 0)
2756                 free_irq(priv->lpi_irq, dev);
2757
2758         /* Stop TX/RX DMA and clear the descriptors */
2759         stmmac_stop_all_dma(priv);
2760
2761         /* Release and free the Rx/Tx resources */
2762         free_dma_desc_resources(priv);
2763
2764         /* Disable the MAC Rx/Tx */
2765         stmmac_mac_set(priv, priv->ioaddr, false);
2766
2767         netif_carrier_off(dev);
2768
2769         stmmac_release_ptp(priv);
2770
2771         return 0;
2772 }
2773
2774 /**
2775  *  stmmac_tso_allocator - close entry point of the driver
2776  *  @priv: driver private structure
2777  *  @des: buffer start address
2778  *  @total_len: total length to fill in descriptors
2779  *  @last_segmant: condition for the last descriptor
2780  *  @queue: TX queue index
2781  *  Description:
2782  *  This function fills descriptor and request new descriptors according to
2783  *  buffer length to fill
2784  */
2785 static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
2786                                  int total_len, bool last_segment, u32 queue)
2787 {
2788         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2789         struct dma_desc *desc;
2790         u32 buff_size;
2791         int tmp_len;
2792
2793         tmp_len = total_len;
2794
2795         while (tmp_len > 0) {
2796                 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2797                 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2798                 desc = tx_q->dma_tx + tx_q->cur_tx;
2799
2800                 desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
2801                 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
2802                             TSO_MAX_BUFF_SIZE : tmp_len;
2803
2804                 stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
2805                                 0, 1,
2806                                 (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
2807                                 0, 0);
2808
2809                 tmp_len -= TSO_MAX_BUFF_SIZE;
2810         }
2811 }
2812
2813 /**
2814  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
2815  *  @skb : the socket buffer
2816  *  @dev : device pointer
2817  *  Description: this is the transmit function that is called on TSO frames
2818  *  (support available on GMAC4 and newer chips).
2819  *  Diagram below show the ring programming in case of TSO frames:
2820  *
2821  *  First Descriptor
2822  *   --------
2823  *   | DES0 |---> buffer1 = L2/L3/L4 header
2824  *   | DES1 |---> TCP Payload (can continue on next descr...)
2825  *   | DES2 |---> buffer 1 and 2 len
2826  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
2827  *   --------
2828  *      |
2829  *     ...
2830  *      |
2831  *   --------
2832  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
2833  *   | DES1 | --|
2834  *   | DES2 | --> buffer 1 and 2 len
2835  *   | DES3 |
2836  *   --------
2837  *
2838  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
2839  */
2840 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2841 {
2842         struct dma_desc *desc, *first, *mss_desc = NULL;
2843         struct stmmac_priv *priv = netdev_priv(dev);
2844         int nfrags = skb_shinfo(skb)->nr_frags;
2845         u32 queue = skb_get_queue_mapping(skb);
2846         unsigned int first_entry, des;
2847         struct stmmac_tx_queue *tx_q;
2848         int tmp_pay_len = 0;
2849         u32 pay_len, mss;
2850         u8 proto_hdr_len;
2851         int i;
2852
2853         tx_q = &priv->tx_queue[queue];
2854
2855         /* Compute header lengths */
2856         proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2857
2858         /* Desc availability based on threshold should be enough safe */
2859         if (unlikely(stmmac_tx_avail(priv, queue) <
2860                 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
2861                 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
2862                         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
2863                                                                 queue));
2864                         /* This is a hard error, log it. */
2865                         netdev_err(priv->dev,
2866                                    "%s: Tx Ring full when queue awake\n",
2867                                    __func__);
2868                 }
2869                 return NETDEV_TX_BUSY;
2870         }
2871
2872         pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
2873
2874         mss = skb_shinfo(skb)->gso_size;
2875
2876         /* set new MSS value if needed */
2877         if (mss != tx_q->mss) {
2878                 mss_desc = tx_q->dma_tx + tx_q->cur_tx;
2879                 stmmac_set_mss(priv, mss_desc, mss);
2880                 tx_q->mss = mss;
2881                 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2882                 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2883         }
2884
2885         if (netif_msg_tx_queued(priv)) {
2886                 pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
2887                         __func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
2888                 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
2889                         skb->data_len);
2890         }
2891
2892         first_entry = tx_q->cur_tx;
2893         WARN_ON(tx_q->tx_skbuff[first_entry]);
2894
2895         desc = tx_q->dma_tx + first_entry;
2896         first = desc;
2897
2898         /* first descriptor: fill Headers on Buf1 */
2899         des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
2900                              DMA_TO_DEVICE);
2901         if (dma_mapping_error(priv->device, des))
2902                 goto dma_map_err;
2903
2904         tx_q->tx_skbuff_dma[first_entry].buf = des;
2905         tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
2906
2907         first->des0 = cpu_to_le32(des);
2908
2909         /* Fill start of payload in buff2 of first descriptor */
2910         if (pay_len)
2911                 first->des1 = cpu_to_le32(des + proto_hdr_len);
2912
2913         /* If needed take extra descriptors to fill the remaining payload */
2914         tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
2915
2916         stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
2917
2918         /* Prepare fragments */
2919         for (i = 0; i < nfrags; i++) {
2920                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2921
2922                 des = skb_frag_dma_map(priv->device, frag, 0,
2923                                        skb_frag_size(frag),
2924                                        DMA_TO_DEVICE);
2925                 if (dma_mapping_error(priv->device, des))
2926                         goto dma_map_err;
2927
2928                 stmmac_tso_allocator(priv, des, skb_frag_size(frag),
2929                                      (i == nfrags - 1), queue);
2930
2931                 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
2932                 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
2933                 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
2934         }
2935
2936         tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
2937
2938         /* Only the last descriptor gets to point to the skb. */
2939         tx_q->tx_skbuff[tx_q->cur_tx] = skb;
2940
2941         /* We've used all descriptors we need for this skb, however,
2942          * advance cur_tx so that it references a fresh descriptor.
2943          * ndo_start_xmit will fill this descriptor the next time it's
2944          * called and stmmac_tx_clean may clean up to this descriptor.
2945          */
2946         tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2947
2948         if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
2949                 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
2950                           __func__);
2951                 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
2952         }
2953
2954         dev->stats.tx_bytes += skb->len;
2955         priv->xstats.tx_tso_frames++;
2956         priv->xstats.tx_tso_nfrags += nfrags;
2957
2958         /* Manage tx mitigation */
2959         tx_q->tx_count_frames += nfrags + 1;
2960         if (priv->tx_coal_frames <= tx_q->tx_count_frames) {
2961                 stmmac_set_tx_ic(priv, desc);
2962                 priv->xstats.tx_set_ic_bit++;
2963                 tx_q->tx_count_frames = 0;
2964         } else {
2965                 stmmac_tx_timer_arm(priv, queue);
2966         }
2967
2968         skb_tx_timestamp(skb);
2969
2970         if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2971                      priv->hwts_tx_en)) {
2972                 /* declare that device is doing timestamping */
2973                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2974                 stmmac_enable_tx_timestamp(priv, first);
2975         }
2976
2977         /* Complete the first descriptor before granting the DMA */
2978         stmmac_prepare_tso_tx_desc(priv, first, 1,
2979                         proto_hdr_len,
2980                         pay_len,
2981                         1, tx_q->tx_skbuff_dma[first_entry].last_segment,
2982                         tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
2983
2984         /* If context desc is used to change MSS */
2985         if (mss_desc) {
2986                 /* Make sure that first descriptor has been completely
2987                  * written, including its own bit. This is because MSS is
2988                  * actually before first descriptor, so we need to make
2989                  * sure that MSS's own bit is the last thing written.
2990                  */
2991                 dma_wmb();
2992                 stmmac_set_tx_owner(priv, mss_desc);
2993         }
2994
2995         /* The own bit must be the latest setting done when prepare the
2996          * descriptor and then barrier is needed to make sure that
2997          * all is coherent before granting the DMA engine.
2998          */
2999         wmb();
3000
3001         if (netif_msg_pktdata(priv)) {
3002                 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
3003                         __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3004                         tx_q->cur_tx, first, nfrags);
3005
3006                 stmmac_display_ring(priv, (void *)tx_q->dma_tx, DMA_TX_SIZE, 0);
3007
3008                 pr_info(">>> frame to be transmitted: ");
3009                 print_pkt(skb->data, skb_headlen(skb));
3010         }
3011
3012         netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3013
3014         tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
3015         stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
3016
3017         return NETDEV_TX_OK;
3018
3019 dma_map_err:
3020         dev_err(priv->device, "Tx dma map failed\n");
3021         dev_kfree_skb(skb);
3022         priv->dev->stats.tx_dropped++;
3023         return NETDEV_TX_OK;
3024 }
3025
3026 /**
3027  *  stmmac_xmit - Tx entry point of the driver
3028  *  @skb : the socket buffer
3029  *  @dev : device pointer
3030  *  Description : this is the tx entry point of the driver.
3031  *  It programs the chain or the ring and supports oversized frames
3032  *  and SG feature.
3033  */
3034 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
3035 {
3036         struct stmmac_priv *priv = netdev_priv(dev);
3037         unsigned int nopaged_len = skb_headlen(skb);
3038         int i, csum_insertion = 0, is_jumbo = 0;
3039         u32 queue = skb_get_queue_mapping(skb);
3040         int nfrags = skb_shinfo(skb)->nr_frags;
3041         int entry;
3042         unsigned int first_entry;
3043         struct dma_desc *desc, *first;
3044         struct stmmac_tx_queue *tx_q;
3045         unsigned int enh_desc;
3046         unsigned int des;
3047
3048         tx_q = &priv->tx_queue[queue];
3049
3050         if (priv->tx_path_in_lpi_mode)
3051                 stmmac_disable_eee_mode(priv);
3052
3053         /* Manage oversized TCP frames for GMAC4 device */
3054         if (skb_is_gso(skb) && priv->tso) {
3055                 if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
3056                         /*
3057                          * There is no way to determine the number of TSO
3058                          * capable Queues. Let's use always the Queue 0
3059                          * because if TSO is supported then at least this
3060                          * one will be capable.
3061                          */
3062                         skb_set_queue_mapping(skb, 0);
3063
3064                         return stmmac_tso_xmit(skb, dev);
3065                 }
3066         }
3067
3068         if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
3069                 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
3070                         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
3071                                                                 queue));
3072                         /* This is a hard error, log it. */
3073                         netdev_err(priv->dev,
3074                                    "%s: Tx Ring full when queue awake\n",
3075                                    __func__);
3076                 }
3077                 return NETDEV_TX_BUSY;
3078         }
3079
3080         entry = tx_q->cur_tx;
3081         first_entry = entry;
3082         WARN_ON(tx_q->tx_skbuff[first_entry]);
3083
3084         csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
3085
3086         if (likely(priv->extend_desc))
3087                 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3088         else
3089                 desc = tx_q->dma_tx + entry;
3090
3091         first = desc;
3092
3093         enh_desc = priv->plat->enh_desc;
3094         /* To program the descriptors according to the size of the frame */
3095         if (enh_desc)
3096                 is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
3097
3098         if (unlikely(is_jumbo)) {
3099                 entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
3100                 if (unlikely(entry < 0) && (entry != -EINVAL))
3101                         goto dma_map_err;
3102         }
3103
3104         for (i = 0; i < nfrags; i++) {
3105                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3106                 int len = skb_frag_size(frag);
3107                 bool last_segment = (i == (nfrags - 1));
3108
3109                 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3110                 WARN_ON(tx_q->tx_skbuff[entry]);
3111
3112                 if (likely(priv->extend_desc))
3113                         desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3114                 else
3115                         desc = tx_q->dma_tx + entry;
3116
3117                 des = skb_frag_dma_map(priv->device, frag, 0, len,
3118                                        DMA_TO_DEVICE);
3119                 if (dma_mapping_error(priv->device, des))
3120                         goto dma_map_err; /* should reuse desc w/o issues */
3121
3122                 tx_q->tx_skbuff_dma[entry].buf = des;
3123
3124                 stmmac_set_desc_addr(priv, desc, des);
3125
3126                 tx_q->tx_skbuff_dma[entry].map_as_page = true;
3127                 tx_q->tx_skbuff_dma[entry].len = len;
3128                 tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
3129
3130                 /* Prepare the descriptor and set the own bit too */
3131                 stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
3132                                 priv->mode, 1, last_segment, skb->len);
3133         }
3134
3135         /* Only the last descriptor gets to point to the skb. */
3136         tx_q->tx_skbuff[entry] = skb;
3137
3138         /* We've used all descriptors we need for this skb, however,
3139          * advance cur_tx so that it references a fresh descriptor.
3140          * ndo_start_xmit will fill this descriptor the next time it's
3141          * called and stmmac_tx_clean may clean up to this descriptor.
3142          */
3143         entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3144         tx_q->cur_tx = entry;
3145
3146         if (netif_msg_pktdata(priv)) {
3147                 void *tx_head;
3148
3149                 netdev_dbg(priv->dev,
3150                            "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
3151                            __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3152                            entry, first, nfrags);
3153
3154                 if (priv->extend_desc)
3155                         tx_head = (void *)tx_q->dma_etx;
3156                 else
3157                         tx_head = (void *)tx_q->dma_tx;
3158
3159                 stmmac_display_ring(priv, tx_head, DMA_TX_SIZE, false);
3160
3161                 netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
3162                 print_pkt(skb->data, skb->len);
3163         }
3164
3165         if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3166                 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3167                           __func__);
3168                 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
3169         }
3170
3171         dev->stats.tx_bytes += skb->len;
3172
3173         /* According to the coalesce parameter the IC bit for the latest
3174          * segment is reset and the timer re-started to clean the tx status.
3175          * This approach takes care about the fragments: desc is the first
3176          * element in case of no SG.
3177          */
3178         tx_q->tx_count_frames += nfrags + 1;
3179         if (priv->tx_coal_frames <= tx_q->tx_count_frames) {
3180                 stmmac_set_tx_ic(priv, desc);
3181                 priv->xstats.tx_set_ic_bit++;
3182                 tx_q->tx_count_frames = 0;
3183         } else {
3184                 stmmac_tx_timer_arm(priv, queue);
3185         }
3186
3187         skb_tx_timestamp(skb);
3188
3189         /* Ready to fill the first descriptor and set the OWN bit w/o any
3190          * problems because all the descriptors are actually ready to be
3191          * passed to the DMA engine.
3192          */
3193         if (likely(!is_jumbo)) {
3194                 bool last_segment = (nfrags == 0);
3195
3196                 des = dma_map_single(priv->device, skb->data,
3197                                      nopaged_len, DMA_TO_DEVICE);
3198                 if (dma_mapping_error(priv->device, des))
3199                         goto dma_map_err;
3200
3201                 tx_q->tx_skbuff_dma[first_entry].buf = des;
3202
3203                 stmmac_set_desc_addr(priv, first, des);
3204
3205                 tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
3206                 tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
3207
3208                 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3209                              priv->hwts_tx_en)) {
3210                         /* declare that device is doing timestamping */
3211                         skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3212                         stmmac_enable_tx_timestamp(priv, first);
3213                 }
3214
3215                 /* Prepare the first descriptor setting the OWN bit too */
3216                 stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
3217                                 csum_insertion, priv->mode, 1, last_segment,
3218                                 skb->len);
3219
3220                 /* The own bit must be the latest setting done when prepare the
3221                  * descriptor and then barrier is needed to make sure that
3222                  * all is coherent before granting the DMA engine.
3223                  */
3224                 wmb();
3225         }
3226
3227         netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3228
3229         stmmac_enable_dma_transmission(priv, priv->ioaddr);
3230
3231         tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
3232         stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
3233
3234         return NETDEV_TX_OK;
3235
3236 dma_map_err:
3237         netdev_err(priv->dev, "Tx DMA map failed\n");
3238         dev_kfree_skb(skb);
3239         priv->dev->stats.tx_dropped++;
3240         return NETDEV_TX_OK;
3241 }
3242
3243 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
3244 {
3245         struct vlan_ethhdr *veth;
3246         __be16 vlan_proto;
3247         u16 vlanid;
3248
3249         veth = (struct vlan_ethhdr *)skb->data;
3250         vlan_proto = veth->h_vlan_proto;
3251
3252         if ((vlan_proto == htons(ETH_P_8021Q) &&
3253              dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
3254             (vlan_proto == htons(ETH_P_8021AD) &&
3255              dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
3256                 /* pop the vlan tag */
3257                 vlanid = ntohs(veth->h_vlan_TCI);
3258                 memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
3259                 skb_pull(skb, VLAN_HLEN);
3260                 __vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
3261         }
3262 }
3263
3264
3265 static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
3266 {
3267         if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
3268                 return 0;
3269
3270         return 1;
3271 }
3272
3273 /**
3274  * stmmac_rx_refill - refill used skb preallocated buffers
3275  * @priv: driver private structure
3276  * @queue: RX queue index
3277  * Description : this is to reallocate the skb for the reception process
3278  * that is based on zero-copy.
3279  */
3280 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
3281 {
3282         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3283         int dirty = stmmac_rx_dirty(priv, queue);
3284         unsigned int entry = rx_q->dirty_rx;
3285
3286         int bfsize = priv->dma_buf_sz;
3287
3288         while (dirty-- > 0) {
3289                 struct dma_desc *p;
3290
3291                 if (priv->extend_desc)
3292                         p = (struct dma_desc *)(rx_q->dma_erx + entry);
3293                 else
3294                         p = rx_q->dma_rx + entry;
3295
3296                 if (likely(!rx_q->rx_skbuff[entry])) {
3297                         struct sk_buff *skb;
3298
3299                         skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
3300                         if (unlikely(!skb)) {
3301                                 /* so for a while no zero-copy! */
3302                                 rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH;
3303                                 if (unlikely(net_ratelimit()))
3304                                         dev_err(priv->device,
3305                                                 "fail to alloc skb entry %d\n",
3306                                                 entry);
3307                                 break;
3308                         }
3309
3310                         rx_q->rx_skbuff[entry] = skb;
3311                         rx_q->rx_skbuff_dma[entry] =
3312                             dma_map_single(priv->device, skb->data, bfsize,
3313                                            DMA_FROM_DEVICE);
3314                         if (dma_mapping_error(priv->device,
3315                                               rx_q->rx_skbuff_dma[entry])) {
3316                                 netdev_err(priv->dev, "Rx DMA map failed\n");
3317                                 dev_kfree_skb(skb);
3318                                 break;
3319                         }
3320
3321                         stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[entry]);
3322                         stmmac_refill_desc3(priv, rx_q, p);
3323
3324                         if (rx_q->rx_zeroc_thresh > 0)
3325                                 rx_q->rx_zeroc_thresh--;
3326
3327                         netif_dbg(priv, rx_status, priv->dev,
3328                                   "refill entry #%d\n", entry);
3329                 }
3330                 dma_wmb();
3331
3332                 stmmac_set_rx_owner(priv, p, priv->use_riwt);
3333
3334                 dma_wmb();
3335
3336                 entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
3337         }
3338         rx_q->dirty_rx = entry;
3339 }
3340
3341 /**
3342  * stmmac_rx - manage the receive process
3343  * @priv: driver private structure
3344  * @limit: napi bugget
3345  * @queue: RX queue index.
3346  * Description :  this the function called by the napi poll method.
3347  * It gets all the frames inside the ring.
3348  */
3349 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3350 {
3351         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3352         struct stmmac_channel *ch = &priv->channel[queue];
3353         unsigned int entry = rx_q->cur_rx;
3354         int coe = priv->hw->rx_csum;
3355         unsigned int next_entry;
3356         unsigned int count = 0;
3357         bool xmac;
3358
3359         xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
3360
3361         if (netif_msg_rx_status(priv)) {
3362                 void *rx_head;
3363
3364                 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
3365                 if (priv->extend_desc)
3366                         rx_head = (void *)rx_q->dma_erx;
3367                 else
3368                         rx_head = (void *)rx_q->dma_rx;
3369
3370                 stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
3371         }
3372         while (count < limit) {
3373                 int status;
3374                 struct dma_desc *p;
3375                 struct dma_desc *np;
3376
3377                 if (priv->extend_desc)
3378                         p = (struct dma_desc *)(rx_q->dma_erx + entry);
3379                 else
3380                         p = rx_q->dma_rx + entry;
3381
3382                 /* read the status of the incoming frame */
3383                 status = stmmac_rx_status(priv, &priv->dev->stats,
3384                                 &priv->xstats, p);
3385                 /* check if managed by the DMA otherwise go ahead */
3386                 if (unlikely(status & dma_own))
3387                         break;
3388
3389                 count++;
3390
3391                 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
3392                 next_entry = rx_q->cur_rx;
3393
3394                 if (priv->extend_desc)
3395                         np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
3396                 else
3397                         np = rx_q->dma_rx + next_entry;
3398
3399                 prefetch(np);
3400
3401                 if (priv->extend_desc)
3402                         stmmac_rx_extended_status(priv, &priv->dev->stats,
3403                                         &priv->xstats, rx_q->dma_erx + entry);
3404                 if (unlikely(status == discard_frame)) {
3405                         priv->dev->stats.rx_errors++;
3406                         if (priv->hwts_rx_en && !priv->extend_desc) {
3407                                 /* DESC2 & DESC3 will be overwritten by device
3408                                  * with timestamp value, hence reinitialize
3409                                  * them in stmmac_rx_refill() function so that
3410                                  * device can reuse it.
3411                                  */
3412                                 dev_kfree_skb_any(rx_q->rx_skbuff[entry]);
3413                                 rx_q->rx_skbuff[entry] = NULL;
3414                                 dma_unmap_single(priv->device,
3415                                                  rx_q->rx_skbuff_dma[entry],
3416                                                  priv->dma_buf_sz,
3417                                                  DMA_FROM_DEVICE);
3418                         }
3419                 } else {
3420                         struct sk_buff *skb;
3421                         int frame_len;
3422                         unsigned int des;
3423
3424                         stmmac_get_desc_addr(priv, p, &des);
3425                         frame_len = stmmac_get_rx_frame_len(priv, p, coe);
3426
3427                         /*  If frame length is greater than skb buffer size
3428                          *  (preallocated during init) then the packet is
3429                          *  ignored
3430                          */
3431                         if (frame_len > priv->dma_buf_sz) {
3432                                 netdev_err(priv->dev,
3433                                            "len %d larger than size (%d)\n",
3434                                            frame_len, priv->dma_buf_sz);
3435                                 priv->dev->stats.rx_length_errors++;
3436                                 break;
3437                         }
3438
3439                         /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
3440                          * Type frames (LLC/LLC-SNAP)
3441                          *
3442                          * llc_snap is never checked in GMAC >= 4, so this ACS
3443                          * feature is always disabled and packets need to be
3444                          * stripped manually.
3445                          */
3446                         if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
3447                             unlikely(status != llc_snap))
3448                                 frame_len -= ETH_FCS_LEN;
3449
3450                         if (netif_msg_rx_status(priv)) {
3451                                 netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
3452                                            p, entry, des);
3453                                 netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
3454                                            frame_len, status);
3455                         }
3456
3457                         /* The zero-copy is always used for all the sizes
3458                          * in case of GMAC4 because it needs
3459                          * to refill the used descriptors, always.
3460                          */
3461                         if (unlikely(!xmac &&
3462                                      ((frame_len < priv->rx_copybreak) ||
3463                                      stmmac_rx_threshold_count(rx_q)))) {
3464                                 skb = netdev_alloc_skb_ip_align(priv->dev,
3465                                                                 frame_len);
3466                                 if (unlikely(!skb)) {
3467                                         if (net_ratelimit())
3468                                                 dev_warn(priv->device,
3469                                                          "packet dropped\n");
3470                                         priv->dev->stats.rx_dropped++;
3471                                         break;
3472                                 }
3473
3474                                 dma_sync_single_for_cpu(priv->device,
3475                                                         rx_q->rx_skbuff_dma
3476                                                         [entry], frame_len,
3477                                                         DMA_FROM_DEVICE);
3478                                 skb_copy_to_linear_data(skb,
3479                                                         rx_q->
3480                                                         rx_skbuff[entry]->data,
3481                                                         frame_len);
3482
3483                                 skb_put(skb, frame_len);
3484                                 dma_sync_single_for_device(priv->device,
3485                                                            rx_q->rx_skbuff_dma
3486                                                            [entry], frame_len,
3487                                                            DMA_FROM_DEVICE);
3488                         } else {
3489                                 skb = rx_q->rx_skbuff[entry];
3490                                 if (unlikely(!skb)) {
3491                                         netdev_err(priv->dev,
3492                                                    "%s: Inconsistent Rx chain\n",
3493                                                    priv->dev->name);
3494                                         priv->dev->stats.rx_dropped++;
3495                                         break;
3496                                 }
3497                                 prefetch(skb->data - NET_IP_ALIGN);
3498                                 rx_q->rx_skbuff[entry] = NULL;
3499                                 rx_q->rx_zeroc_thresh++;
3500
3501                                 skb_put(skb, frame_len);
3502                                 dma_unmap_single(priv->device,
3503                                                  rx_q->rx_skbuff_dma[entry],
3504                                                  priv->dma_buf_sz,
3505                                                  DMA_FROM_DEVICE);
3506                         }
3507
3508                         if (netif_msg_pktdata(priv)) {
3509                                 netdev_dbg(priv->dev, "frame received (%dbytes)",
3510                                            frame_len);
3511                                 print_pkt(skb->data, frame_len);
3512                         }
3513
3514                         stmmac_get_rx_hwtstamp(priv, p, np, skb);
3515
3516                         stmmac_rx_vlan(priv->dev, skb);
3517
3518                         skb->protocol = eth_type_trans(skb, priv->dev);
3519
3520                         if (unlikely(!coe))
3521                                 skb_checksum_none_assert(skb);
3522                         else
3523                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
3524
3525                         napi_gro_receive(&ch->rx_napi, skb);
3526
3527                         priv->dev->stats.rx_packets++;
3528                         priv->dev->stats.rx_bytes += frame_len;
3529                 }
3530                 entry = next_entry;
3531         }
3532
3533         stmmac_rx_refill(priv, queue);
3534
3535         priv->xstats.rx_pkt_n += count;
3536
3537         return count;
3538 }
3539
3540 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
3541 {
3542         struct stmmac_channel *ch =
3543                 container_of(napi, struct stmmac_channel, rx_napi);
3544         struct stmmac_priv *priv = ch->priv_data;
3545         u32 chan = ch->index;
3546         int work_done;
3547
3548         priv->xstats.napi_poll++;
3549
3550         work_done = stmmac_rx(priv, budget, chan);
3551         if (work_done < budget && napi_complete_done(napi, work_done))
3552                 stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
3553         return work_done;
3554 }
3555
3556 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
3557 {
3558         struct stmmac_channel *ch =
3559                 container_of(napi, struct stmmac_channel, tx_napi);
3560         struct stmmac_priv *priv = ch->priv_data;
3561         struct stmmac_tx_queue *tx_q;
3562         u32 chan = ch->index;
3563         int work_done;
3564
3565         priv->xstats.napi_poll++;
3566
3567         work_done = stmmac_tx_clean(priv, DMA_TX_SIZE, chan);
3568         work_done = min(work_done, budget);
3569
3570         if (work_done < budget && napi_complete_done(napi, work_done))
3571                 stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
3572
3573         /* Force transmission restart */
3574         tx_q = &priv->tx_queue[chan];
3575         if (tx_q->cur_tx != tx_q->dirty_tx) {
3576                 stmmac_enable_dma_transmission(priv, priv->ioaddr);
3577                 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr,
3578                                        chan);
3579         }
3580
3581         return work_done;
3582 }
3583
3584 /**
3585  *  stmmac_tx_timeout
3586  *  @dev : Pointer to net device structure
3587  *  Description: this function is called when a packet transmission fails to
3588  *   complete within a reasonable time. The driver will mark the error in the
3589  *   netdev structure and arrange for the device to be reset to a sane state
3590  *   in order to transmit a new packet.
3591  */
3592 static void stmmac_tx_timeout(struct net_device *dev)
3593 {
3594         struct stmmac_priv *priv = netdev_priv(dev);
3595
3596         stmmac_global_err(priv);
3597 }
3598
3599 /**
3600  *  stmmac_set_rx_mode - entry point for multicast addressing
3601  *  @dev : pointer to the device structure
3602  *  Description:
3603  *  This function is a driver entry point which gets called by the kernel
3604  *  whenever multicast addresses must be enabled/disabled.
3605  *  Return value:
3606  *  void.
3607  */
3608 static void stmmac_set_rx_mode(struct net_device *dev)
3609 {
3610         struct stmmac_priv *priv = netdev_priv(dev);
3611
3612         stmmac_set_filter(priv, priv->hw, dev);
3613 }
3614
3615 /**
3616  *  stmmac_change_mtu - entry point to change MTU size for the device.
3617  *  @dev : device pointer.
3618  *  @new_mtu : the new MTU size for the device.
3619  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
3620  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
3621  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
3622  *  Return value:
3623  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3624  *  file on failure.
3625  */
3626 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
3627 {
3628         struct stmmac_priv *priv = netdev_priv(dev);
3629
3630         if (netif_running(dev)) {
3631                 netdev_err(priv->dev, "must be stopped to change its MTU\n");
3632                 return -EBUSY;
3633         }
3634
3635         dev->mtu = new_mtu;
3636
3637         netdev_update_features(dev);
3638
3639         return 0;
3640 }
3641
3642 static netdev_features_t stmmac_fix_features(struct net_device *dev,
3643                                              netdev_features_t features)
3644 {
3645         struct stmmac_priv *priv = netdev_priv(dev);
3646
3647         if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
3648                 features &= ~NETIF_F_RXCSUM;
3649
3650         if (!priv->plat->tx_coe)
3651                 features &= ~NETIF_F_CSUM_MASK;
3652
3653         /* Some GMAC devices have a bugged Jumbo frame support that
3654          * needs to have the Tx COE disabled for oversized frames
3655          * (due to limited buffer sizes). In this case we disable
3656          * the TX csum insertion in the TDES and not use SF.
3657          */
3658         if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
3659                 features &= ~NETIF_F_CSUM_MASK;
3660
3661         /* Disable tso if asked by ethtool */
3662         if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
3663                 if (features & NETIF_F_TSO)
3664                         priv->tso = true;
3665                 else
3666                         priv->tso = false;
3667         }
3668
3669         return features;
3670 }
3671
3672 static int stmmac_set_features(struct net_device *netdev,
3673                                netdev_features_t features)
3674 {
3675         struct stmmac_priv *priv = netdev_priv(netdev);
3676
3677         /* Keep the COE Type in case of csum is supporting */
3678         if (features & NETIF_F_RXCSUM)
3679                 priv->hw->rx_csum = priv->plat->rx_coe;
3680         else
3681                 priv->hw->rx_csum = 0;
3682         /* No check needed because rx_coe has been set before and it will be
3683          * fixed in case of issue.
3684          */
3685         stmmac_rx_ipc(priv, priv->hw);
3686
3687         return 0;
3688 }
3689
3690 /**
3691  *  stmmac_interrupt - main ISR
3692  *  @irq: interrupt number.
3693  *  @dev_id: to pass the net device pointer.
3694  *  Description: this is the main driver interrupt service routine.
3695  *  It can call:
3696  *  o DMA service routine (to manage incoming frame reception and transmission
3697  *    status)
3698  *  o Core interrupts to manage: remote wake-up, management counter, LPI
3699  *    interrupts.
3700  */
3701 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
3702 {
3703         struct net_device *dev = (struct net_device *)dev_id;
3704         struct stmmac_priv *priv = netdev_priv(dev);
3705         u32 rx_cnt = priv->plat->rx_queues_to_use;
3706         u32 tx_cnt = priv->plat->tx_queues_to_use;
3707         u32 queues_count;
3708         u32 queue;
3709         bool xmac;
3710
3711         xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
3712         queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
3713
3714         if (priv->irq_wake)
3715                 pm_wakeup_event(priv->device, 0);
3716
3717         if (unlikely(!dev)) {
3718                 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
3719                 return IRQ_NONE;
3720         }
3721
3722         /* Check if adapter is up */
3723         if (test_bit(STMMAC_DOWN, &priv->state))
3724                 return IRQ_HANDLED;
3725         /* Check if a fatal error happened */
3726         if (stmmac_safety_feat_interrupt(priv))
3727                 return IRQ_HANDLED;
3728
3729         /* To handle GMAC own interrupts */
3730         if ((priv->plat->has_gmac) || xmac) {
3731                 int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
3732                 int mtl_status;
3733
3734                 if (unlikely(status)) {
3735                         /* For LPI we need to save the tx status */
3736                         if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
3737                                 priv->tx_path_in_lpi_mode = true;
3738                         if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
3739                                 priv->tx_path_in_lpi_mode = false;
3740                 }
3741
3742                 for (queue = 0; queue < queues_count; queue++) {
3743                         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3744
3745                         mtl_status = stmmac_host_mtl_irq_status(priv, priv->hw,
3746                                                                 queue);
3747                         if (mtl_status != -EINVAL)
3748                                 status |= mtl_status;
3749
3750                         if (status & CORE_IRQ_MTL_RX_OVERFLOW)
3751                                 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
3752                                                        rx_q->rx_tail_addr,
3753                                                        queue);
3754                 }
3755
3756                 /* PCS link status */
3757                 if (priv->hw->pcs) {
3758                         if (priv->xstats.pcs_link)
3759                                 netif_carrier_on(dev);
3760                         else
3761                                 netif_carrier_off(dev);
3762                 }
3763         }
3764
3765         /* To handle DMA interrupts */
3766         stmmac_dma_interrupt(priv);
3767
3768         return IRQ_HANDLED;
3769 }
3770
3771 #ifdef CONFIG_NET_POLL_CONTROLLER
3772 /* Polling receive - used by NETCONSOLE and other diagnostic tools
3773  * to allow network I/O with interrupts disabled.
3774  */
3775 static void stmmac_poll_controller(struct net_device *dev)
3776 {
3777         disable_irq(dev->irq);
3778         stmmac_interrupt(dev->irq, dev);
3779         enable_irq(dev->irq);
3780 }
3781 #endif
3782
3783 /**
3784  *  stmmac_ioctl - Entry point for the Ioctl
3785  *  @dev: Device pointer.
3786  *  @rq: An IOCTL specefic structure, that can contain a pointer to
3787  *  a proprietary structure used to pass information to the driver.
3788  *  @cmd: IOCTL command
3789  *  Description:
3790  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
3791  */
3792 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3793 {
3794         int ret = -EOPNOTSUPP;
3795
3796         if (!netif_running(dev))
3797                 return -EINVAL;
3798
3799         switch (cmd) {
3800         case SIOCGMIIPHY:
3801         case SIOCGMIIREG:
3802         case SIOCSMIIREG:
3803                 if (!dev->phydev)
3804                         return -EINVAL;
3805                 ret = phy_mii_ioctl(dev->phydev, rq, cmd);
3806                 break;
3807         case SIOCSHWTSTAMP:
3808                 ret = stmmac_hwtstamp_set(dev, rq);
3809                 break;
3810         case SIOCGHWTSTAMP:
3811                 ret = stmmac_hwtstamp_get(dev, rq);
3812                 break;
3813         default:
3814                 break;
3815         }
3816
3817         return ret;
3818 }
3819
3820 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
3821                                     void *cb_priv)
3822 {
3823         struct stmmac_priv *priv = cb_priv;
3824         int ret = -EOPNOTSUPP;
3825
3826         stmmac_disable_all_queues(priv);
3827
3828         switch (type) {
3829         case TC_SETUP_CLSU32:
3830                 if (tc_cls_can_offload_and_chain0(priv->dev, type_data))
3831                         ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
3832                 break;
3833         default:
3834                 break;
3835         }
3836
3837         stmmac_enable_all_queues(priv);
3838         return ret;
3839 }
3840
3841 static int stmmac_setup_tc_block(struct stmmac_priv *priv,
3842                                  struct tc_block_offload *f)
3843 {
3844         if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
3845                 return -EOPNOTSUPP;
3846
3847         switch (f->command) {
3848         case TC_BLOCK_BIND:
3849                 return tcf_block_cb_register(f->block, stmmac_setup_tc_block_cb,
3850                                 priv, priv, f->extack);
3851         case TC_BLOCK_UNBIND:
3852                 tcf_block_cb_unregister(f->block, stmmac_setup_tc_block_cb, priv);
3853                 return 0;
3854         default:
3855                 return -EOPNOTSUPP;
3856         }
3857 }
3858
3859 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
3860                            void *type_data)
3861 {
3862         struct stmmac_priv *priv = netdev_priv(ndev);
3863
3864         switch (type) {
3865         case TC_SETUP_BLOCK:
3866                 return stmmac_setup_tc_block(priv, type_data);
3867         case TC_SETUP_QDISC_CBS:
3868                 return stmmac_tc_setup_cbs(priv, priv, type_data);
3869         default:
3870                 return -EOPNOTSUPP;
3871         }
3872 }
3873
3874 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
3875 {
3876         struct stmmac_priv *priv = netdev_priv(ndev);
3877         int ret = 0;
3878
3879         ret = eth_mac_addr(ndev, addr);
3880         if (ret)
3881                 return ret;
3882
3883         stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
3884
3885         return ret;
3886 }
3887
3888 #ifdef CONFIG_DEBUG_FS
3889 static struct dentry *stmmac_fs_dir;
3890
3891 static void sysfs_display_ring(void *head, int size, int extend_desc,
3892                                struct seq_file *seq)
3893 {
3894         int i;
3895         struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
3896         struct dma_desc *p = (struct dma_desc *)head;
3897
3898         for (i = 0; i < size; i++) {
3899                 if (extend_desc) {
3900                         seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3901                                    i, (unsigned int)virt_to_phys(ep),
3902                                    le32_to_cpu(ep->basic.des0),
3903                                    le32_to_cpu(ep->basic.des1),
3904                                    le32_to_cpu(ep->basic.des2),
3905                                    le32_to_cpu(ep->basic.des3));
3906                         ep++;
3907                 } else {
3908                         seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3909                                    i, (unsigned int)virt_to_phys(p),
3910                                    le32_to_cpu(p->des0), le32_to_cpu(p->des1),
3911                                    le32_to_cpu(p->des2), le32_to_cpu(p->des3));
3912                         p++;
3913                 }
3914                 seq_printf(seq, "\n");
3915         }
3916 }
3917
3918 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
3919 {
3920         struct net_device *dev = seq->private;
3921         struct stmmac_priv *priv = netdev_priv(dev);
3922         u32 rx_count = priv->plat->rx_queues_to_use;
3923         u32 tx_count = priv->plat->tx_queues_to_use;
3924         u32 queue;
3925
3926         if ((dev->flags & IFF_UP) == 0)
3927                 return 0;
3928
3929         for (queue = 0; queue < rx_count; queue++) {
3930                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3931
3932                 seq_printf(seq, "RX Queue %d:\n", queue);
3933
3934                 if (priv->extend_desc) {
3935                         seq_printf(seq, "Extended descriptor ring:\n");
3936                         sysfs_display_ring((void *)rx_q->dma_erx,
3937                                            DMA_RX_SIZE, 1, seq);
3938                 } else {
3939                         seq_printf(seq, "Descriptor ring:\n");
3940                         sysfs_display_ring((void *)rx_q->dma_rx,
3941                                            DMA_RX_SIZE, 0, seq);
3942                 }
3943         }
3944
3945         for (queue = 0; queue < tx_count; queue++) {
3946                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3947
3948                 seq_printf(seq, "TX Queue %d:\n", queue);
3949
3950                 if (priv->extend_desc) {
3951                         seq_printf(seq, "Extended descriptor ring:\n");
3952                         sysfs_display_ring((void *)tx_q->dma_etx,
3953                                            DMA_TX_SIZE, 1, seq);
3954                 } else {
3955                         seq_printf(seq, "Descriptor ring:\n");
3956                         sysfs_display_ring((void *)tx_q->dma_tx,
3957                                            DMA_TX_SIZE, 0, seq);
3958                 }
3959         }
3960
3961         return 0;
3962 }
3963 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
3964
3965 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
3966 {
3967         struct net_device *dev = seq->private;
3968         struct stmmac_priv *priv = netdev_priv(dev);
3969
3970         if (!priv->hw_cap_support) {
3971                 seq_printf(seq, "DMA HW features not supported\n");
3972                 return 0;
3973         }
3974
3975         seq_printf(seq, "==============================\n");
3976         seq_printf(seq, "\tDMA HW features\n");
3977         seq_printf(seq, "==============================\n");
3978
3979         seq_printf(seq, "\t10/100 Mbps: %s\n",
3980                    (priv->dma_cap.mbps_10_100) ? "Y" : "N");
3981         seq_printf(seq, "\t1000 Mbps: %s\n",
3982                    (priv->dma_cap.mbps_1000) ? "Y" : "N");
3983         seq_printf(seq, "\tHalf duplex: %s\n",
3984                    (priv->dma_cap.half_duplex) ? "Y" : "N");
3985         seq_printf(seq, "\tHash Filter: %s\n",
3986                    (priv->dma_cap.hash_filter) ? "Y" : "N");
3987         seq_printf(seq, "\tMultiple MAC address registers: %s\n",
3988                    (priv->dma_cap.multi_addr) ? "Y" : "N");
3989         seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
3990                    (priv->dma_cap.pcs) ? "Y" : "N");
3991         seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
3992                    (priv->dma_cap.sma_mdio) ? "Y" : "N");
3993         seq_printf(seq, "\tPMT Remote wake up: %s\n",
3994                    (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
3995         seq_printf(seq, "\tPMT Magic Frame: %s\n",
3996                    (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
3997         seq_printf(seq, "\tRMON module: %s\n",
3998                    (priv->dma_cap.rmon) ? "Y" : "N");
3999         seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
4000                    (priv->dma_cap.time_stamp) ? "Y" : "N");
4001         seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
4002                    (priv->dma_cap.atime_stamp) ? "Y" : "N");
4003         seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
4004                    (priv->dma_cap.eee) ? "Y" : "N");
4005         seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
4006         seq_printf(seq, "\tChecksum Offload in TX: %s\n",
4007                    (priv->dma_cap.tx_coe) ? "Y" : "N");
4008         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
4009                 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
4010                            (priv->dma_cap.rx_coe) ? "Y" : "N");
4011         } else {
4012                 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
4013                            (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
4014                 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
4015                            (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
4016         }
4017         seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
4018                    (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
4019         seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
4020                    priv->dma_cap.number_rx_channel);
4021         seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
4022                    priv->dma_cap.number_tx_channel);
4023         seq_printf(seq, "\tEnhanced descriptors: %s\n",
4024                    (priv->dma_cap.enh_desc) ? "Y" : "N");
4025
4026         return 0;
4027 }
4028 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
4029
4030 static int stmmac_init_fs(struct net_device *dev)
4031 {
4032         struct stmmac_priv *priv = netdev_priv(dev);
4033
4034         /* Create per netdev entries */
4035         priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
4036
4037         if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
4038                 netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
4039
4040                 return -ENOMEM;
4041         }
4042
4043         /* Entry to report DMA RX/TX rings */
4044         priv->dbgfs_rings_status =
4045                 debugfs_create_file("descriptors_status", 0444,
4046                                     priv->dbgfs_dir, dev,
4047                                     &stmmac_rings_status_fops);
4048
4049         if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
4050                 netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
4051                 debugfs_remove_recursive(priv->dbgfs_dir);
4052
4053                 return -ENOMEM;
4054         }
4055
4056         /* Entry to report the DMA HW features */
4057         priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", 0444,
4058                                                   priv->dbgfs_dir,
4059                                                   dev, &stmmac_dma_cap_fops);
4060
4061         if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
4062                 netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
4063                 debugfs_remove_recursive(priv->dbgfs_dir);
4064
4065                 return -ENOMEM;
4066         }
4067
4068         return 0;
4069 }
4070
4071 static void stmmac_exit_fs(struct net_device *dev)
4072 {
4073         struct stmmac_priv *priv = netdev_priv(dev);
4074
4075         debugfs_remove_recursive(priv->dbgfs_dir);
4076 }
4077 #endif /* CONFIG_DEBUG_FS */
4078
4079 static const struct net_device_ops stmmac_netdev_ops = {
4080         .ndo_open = stmmac_open,
4081         .ndo_start_xmit = stmmac_xmit,
4082         .ndo_stop = stmmac_release,
4083         .ndo_change_mtu = stmmac_change_mtu,
4084         .ndo_fix_features = stmmac_fix_features,
4085         .ndo_set_features = stmmac_set_features,
4086         .ndo_set_rx_mode = stmmac_set_rx_mode,
4087         .ndo_tx_timeout = stmmac_tx_timeout,
4088         .ndo_do_ioctl = stmmac_ioctl,
4089         .ndo_setup_tc = stmmac_setup_tc,
4090 #ifdef CONFIG_NET_POLL_CONTROLLER
4091         .ndo_poll_controller = stmmac_poll_controller,
4092 #endif
4093         .ndo_set_mac_address = stmmac_set_mac_address,
4094 };
4095
4096 static void stmmac_reset_subtask(struct stmmac_priv *priv)
4097 {
4098         if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
4099                 return;
4100         if (test_bit(STMMAC_DOWN, &priv->state))
4101                 return;
4102
4103         netdev_err(priv->dev, "Reset adapter.\n");
4104
4105         rtnl_lock();
4106         netif_trans_update(priv->dev);
4107         while (test_and_set_bit(STMMAC_RESETING, &priv->state))
4108                 usleep_range(1000, 2000);
4109
4110         set_bit(STMMAC_DOWN, &priv->state);
4111         dev_close(priv->dev);
4112         dev_open(priv->dev, NULL);
4113         clear_bit(STMMAC_DOWN, &priv->state);
4114         clear_bit(STMMAC_RESETING, &priv->state);
4115         rtnl_unlock();
4116 }
4117
4118 static void stmmac_service_task(struct work_struct *work)
4119 {
4120         struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
4121                         service_task);
4122
4123         stmmac_reset_subtask(priv);
4124         clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
4125 }
4126
4127 /**
4128  *  stmmac_hw_init - Init the MAC device
4129  *  @priv: driver private structure
4130  *  Description: this function is to configure the MAC device according to
4131  *  some platform parameters or the HW capability register. It prepares the
4132  *  driver to use either ring or chain modes and to setup either enhanced or
4133  *  normal descriptors.
4134  */
4135 static int stmmac_hw_init(struct stmmac_priv *priv)
4136 {
4137         int ret;
4138
4139         /* dwmac-sun8i only work in chain mode */
4140         if (priv->plat->has_sun8i)
4141                 chain_mode = 1;
4142         priv->chain_mode = chain_mode;
4143
4144         /* Initialize HW Interface */
4145         ret = stmmac_hwif_init(priv);
4146         if (ret)
4147                 return ret;
4148
4149         /* Get the HW capability (new GMAC newer than 3.50a) */
4150         priv->hw_cap_support = stmmac_get_hw_features(priv);
4151         if (priv->hw_cap_support) {
4152                 dev_info(priv->device, "DMA HW capability register supported\n");
4153
4154                 /* We can override some gmac/dma configuration fields: e.g.
4155                  * enh_desc, tx_coe (e.g. that are passed through the
4156                  * platform) with the values from the HW capability
4157                  * register (if supported).
4158                  */
4159                 priv->plat->enh_desc = priv->dma_cap.enh_desc;
4160                 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
4161                 priv->hw->pmt = priv->plat->pmt;
4162
4163                 /* TXCOE doesn't work in thresh DMA mode */
4164                 if (priv->plat->force_thresh_dma_mode)
4165                         priv->plat->tx_coe = 0;
4166                 else
4167                         priv->plat->tx_coe = priv->dma_cap.tx_coe;
4168
4169                 /* In case of GMAC4 rx_coe is from HW cap register. */
4170                 priv->plat->rx_coe = priv->dma_cap.rx_coe;
4171
4172                 if (priv->dma_cap.rx_coe_type2)
4173                         priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
4174                 else if (priv->dma_cap.rx_coe_type1)
4175                         priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
4176
4177         } else {
4178                 dev_info(priv->device, "No HW DMA feature register supported\n");
4179         }
4180
4181         if (priv->plat->rx_coe) {
4182                 priv->hw->rx_csum = priv->plat->rx_coe;
4183                 dev_info(priv->device, "RX Checksum Offload Engine supported\n");
4184                 if (priv->synopsys_id < DWMAC_CORE_4_00)
4185                         dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
4186         }
4187         if (priv->plat->tx_coe)
4188                 dev_info(priv->device, "TX Checksum insertion supported\n");
4189
4190         if (priv->plat->pmt) {
4191                 dev_info(priv->device, "Wake-Up On Lan supported\n");
4192                 device_set_wakeup_capable(priv->device, 1);
4193         }
4194
4195         if (priv->dma_cap.tsoen)
4196                 dev_info(priv->device, "TSO supported\n");
4197
4198         /* Run HW quirks, if any */
4199         if (priv->hwif_quirks) {
4200                 ret = priv->hwif_quirks(priv);
4201                 if (ret)
4202                         return ret;
4203         }
4204
4205         /* Rx Watchdog is available in the COREs newer than the 3.40.
4206          * In some case, for example on bugged HW this feature
4207          * has to be disable and this can be done by passing the
4208          * riwt_off field from the platform.
4209          */
4210         if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
4211             (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
4212                 priv->use_riwt = 1;
4213                 dev_info(priv->device,
4214                          "Enable RX Mitigation via HW Watchdog Timer\n");
4215         }
4216
4217         return 0;
4218 }
4219
4220 /**
4221  * stmmac_dvr_probe
4222  * @device: device pointer
4223  * @plat_dat: platform data pointer
4224  * @res: stmmac resource pointer
4225  * Description: this is the main probe function used to
4226  * call the alloc_etherdev, allocate the priv structure.
4227  * Return:
4228  * returns 0 on success, otherwise errno.
4229  */
4230 int stmmac_dvr_probe(struct device *device,
4231                      struct plat_stmmacenet_data *plat_dat,
4232                      struct stmmac_resources *res)
4233 {
4234         struct net_device *ndev = NULL;
4235         struct stmmac_priv *priv;
4236         u32 queue, maxq;
4237         int ret = 0;
4238
4239         ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv),
4240                                   MTL_MAX_TX_QUEUES,
4241                                   MTL_MAX_RX_QUEUES);
4242         if (!ndev)
4243                 return -ENOMEM;
4244
4245         SET_NETDEV_DEV(ndev, device);
4246
4247         priv = netdev_priv(ndev);
4248         priv->device = device;
4249         priv->dev = ndev;
4250
4251         stmmac_set_ethtool_ops(ndev);
4252         priv->pause = pause;
4253         priv->plat = plat_dat;
4254         priv->ioaddr = res->addr;
4255         priv->dev->base_addr = (unsigned long)res->addr;
4256
4257         priv->dev->irq = res->irq;
4258         priv->wol_irq = res->wol_irq;
4259         priv->lpi_irq = res->lpi_irq;
4260
4261         if (res->mac)
4262                 memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
4263
4264         dev_set_drvdata(device, priv->dev);
4265
4266         /* Verify driver arguments */
4267         stmmac_verify_args();
4268
4269         /* Allocate workqueue */
4270         priv->wq = create_singlethread_workqueue("stmmac_wq");
4271         if (!priv->wq) {
4272                 dev_err(priv->device, "failed to create workqueue\n");
4273                 ret = -ENOMEM;
4274                 goto error_wq;
4275         }
4276
4277         INIT_WORK(&priv->service_task, stmmac_service_task);
4278
4279         /* Override with kernel parameters if supplied XXX CRS XXX
4280          * this needs to have multiple instances
4281          */
4282         if ((phyaddr >= 0) && (phyaddr <= 31))
4283                 priv->plat->phy_addr = phyaddr;
4284
4285         if (priv->plat->stmmac_rst) {
4286                 ret = reset_control_assert(priv->plat->stmmac_rst);
4287                 reset_control_deassert(priv->plat->stmmac_rst);
4288                 /* Some reset controllers have only reset callback instead of
4289                  * assert + deassert callbacks pair.
4290                  */
4291                 if (ret == -ENOTSUPP)
4292                         reset_control_reset(priv->plat->stmmac_rst);
4293         }
4294
4295         /* Init MAC and get the capabilities */
4296         ret = stmmac_hw_init(priv);
4297         if (ret)
4298                 goto error_hw_init;
4299
4300         /* Configure real RX and TX queues */
4301         netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
4302         netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
4303
4304         ndev->netdev_ops = &stmmac_netdev_ops;
4305
4306         ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4307                             NETIF_F_RXCSUM;
4308
4309         ret = stmmac_tc_init(priv, priv);
4310         if (!ret) {
4311                 ndev->hw_features |= NETIF_F_HW_TC;
4312         }
4313
4314         if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
4315                 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
4316                 priv->tso = true;
4317                 dev_info(priv->device, "TSO feature enabled\n");
4318         }
4319         ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
4320         ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
4321 #ifdef STMMAC_VLAN_TAG_USED
4322         /* Both mac100 and gmac support receive VLAN tag detection */
4323         ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
4324 #endif
4325         priv->msg_enable = netif_msg_init(debug, default_msg_level);
4326
4327         /* MTU range: 46 - hw-specific max */
4328         ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
4329         if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
4330                 ndev->max_mtu = JUMBO_LEN;
4331         else if (priv->plat->has_xgmac)
4332                 ndev->max_mtu = XGMAC_JUMBO_LEN;
4333         else
4334                 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
4335         /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
4336          * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
4337          */
4338         if ((priv->plat->maxmtu < ndev->max_mtu) &&
4339             (priv->plat->maxmtu >= ndev->min_mtu))
4340                 ndev->max_mtu = priv->plat->maxmtu;
4341         else if (priv->plat->maxmtu < ndev->min_mtu)
4342                 dev_warn(priv->device,
4343                          "%s: warning: maxmtu having invalid value (%d)\n",
4344                          __func__, priv->plat->maxmtu);
4345
4346         if (flow_ctrl)
4347                 priv->flow_ctrl = FLOW_AUTO;    /* RX/TX pause on */
4348
4349         /* Setup channels NAPI */
4350         maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
4351
4352         for (queue = 0; queue < maxq; queue++) {
4353                 struct stmmac_channel *ch = &priv->channel[queue];
4354
4355                 ch->priv_data = priv;
4356                 ch->index = queue;
4357
4358                 if (queue < priv->plat->rx_queues_to_use) {
4359                         netif_napi_add(ndev, &ch->rx_napi, stmmac_napi_poll_rx,
4360                                        NAPI_POLL_WEIGHT);
4361                 }
4362                 if (queue < priv->plat->tx_queues_to_use) {
4363                         netif_napi_add(ndev, &ch->tx_napi, stmmac_napi_poll_tx,
4364                                        NAPI_POLL_WEIGHT);
4365                 }
4366         }
4367
4368         mutex_init(&priv->lock);
4369
4370         /* If a specific clk_csr value is passed from the platform
4371          * this means that the CSR Clock Range selection cannot be
4372          * changed at run-time and it is fixed. Viceversa the driver'll try to
4373          * set the MDC clock dynamically according to the csr actual
4374          * clock input.
4375          */
4376         if (!priv->plat->clk_csr)
4377                 stmmac_clk_csr_set(priv);
4378         else
4379                 priv->clk_csr = priv->plat->clk_csr;
4380
4381         stmmac_check_pcs_mode(priv);
4382
4383         if (priv->hw->pcs != STMMAC_PCS_RGMII  &&
4384             priv->hw->pcs != STMMAC_PCS_TBI &&
4385             priv->hw->pcs != STMMAC_PCS_RTBI) {
4386                 /* MDIO bus Registration */
4387                 ret = stmmac_mdio_register(ndev);
4388                 if (ret < 0) {
4389                         dev_err(priv->device,
4390                                 "%s: MDIO bus (id: %d) registration failed",
4391                                 __func__, priv->plat->bus_id);
4392                         goto error_mdio_register;
4393                 }
4394         }
4395
4396         ret = register_netdev(ndev);
4397         if (ret) {
4398                 dev_err(priv->device, "%s: ERROR %i registering the device\n",
4399                         __func__, ret);
4400                 goto error_netdev_register;
4401         }
4402
4403 #ifdef CONFIG_DEBUG_FS
4404         ret = stmmac_init_fs(ndev);
4405         if (ret < 0)
4406                 netdev_warn(priv->dev, "%s: failed debugFS registration\n",
4407                             __func__);
4408 #endif
4409
4410         return ret;
4411
4412 error_netdev_register:
4413         if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4414             priv->hw->pcs != STMMAC_PCS_TBI &&
4415             priv->hw->pcs != STMMAC_PCS_RTBI)
4416                 stmmac_mdio_unregister(ndev);
4417 error_mdio_register:
4418         for (queue = 0; queue < maxq; queue++) {
4419                 struct stmmac_channel *ch = &priv->channel[queue];
4420
4421                 if (queue < priv->plat->rx_queues_to_use)
4422                         netif_napi_del(&ch->rx_napi);
4423                 if (queue < priv->plat->tx_queues_to_use)
4424                         netif_napi_del(&ch->tx_napi);
4425         }
4426 error_hw_init:
4427         destroy_workqueue(priv->wq);
4428 error_wq:
4429         free_netdev(ndev);
4430
4431         return ret;
4432 }
4433 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
4434
4435 /**
4436  * stmmac_dvr_remove
4437  * @dev: device pointer
4438  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
4439  * changes the link status, releases the DMA descriptor rings.
4440  */
4441 int stmmac_dvr_remove(struct device *dev)
4442 {
4443         struct net_device *ndev = dev_get_drvdata(dev);
4444         struct stmmac_priv *priv = netdev_priv(ndev);
4445
4446         netdev_info(priv->dev, "%s: removing driver", __func__);
4447
4448 #ifdef CONFIG_DEBUG_FS
4449         stmmac_exit_fs(ndev);
4450 #endif
4451         stmmac_stop_all_dma(priv);
4452
4453         stmmac_mac_set(priv, priv->ioaddr, false);
4454         netif_carrier_off(ndev);
4455         unregister_netdev(ndev);
4456         if (priv->plat->stmmac_rst)
4457                 reset_control_assert(priv->plat->stmmac_rst);
4458         clk_disable_unprepare(priv->plat->pclk);
4459         clk_disable_unprepare(priv->plat->stmmac_clk);
4460         if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4461             priv->hw->pcs != STMMAC_PCS_TBI &&
4462             priv->hw->pcs != STMMAC_PCS_RTBI)
4463                 stmmac_mdio_unregister(ndev);
4464         destroy_workqueue(priv->wq);
4465         mutex_destroy(&priv->lock);
4466         free_netdev(ndev);
4467
4468         return 0;
4469 }
4470 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
4471
4472 /**
4473  * stmmac_suspend - suspend callback
4474  * @dev: device pointer
4475  * Description: this is the function to suspend the device and it is called
4476  * by the platform driver to stop the network queue, release the resources,
4477  * program the PMT register (for WoL), clean and release driver resources.
4478  */
4479 int stmmac_suspend(struct device *dev)
4480 {
4481         struct net_device *ndev = dev_get_drvdata(dev);
4482         struct stmmac_priv *priv = netdev_priv(ndev);
4483
4484         if (!ndev || !netif_running(ndev))
4485                 return 0;
4486
4487         if (ndev->phydev)
4488                 phy_stop(ndev->phydev);
4489
4490         mutex_lock(&priv->lock);
4491
4492         netif_device_detach(ndev);
4493         stmmac_stop_all_queues(priv);
4494
4495         stmmac_disable_all_queues(priv);
4496
4497         /* Stop TX/RX DMA */
4498         stmmac_stop_all_dma(priv);
4499
4500         /* Enable Power down mode by programming the PMT regs */
4501         if (device_may_wakeup(priv->device)) {
4502                 stmmac_pmt(priv, priv->hw, priv->wolopts);
4503                 priv->irq_wake = 1;
4504         } else {
4505                 stmmac_mac_set(priv, priv->ioaddr, false);
4506                 pinctrl_pm_select_sleep_state(priv->device);
4507                 /* Disable clock in case of PWM is off */
4508                 clk_disable(priv->plat->pclk);
4509                 clk_disable(priv->plat->stmmac_clk);
4510         }
4511         mutex_unlock(&priv->lock);
4512
4513         priv->oldlink = false;
4514         priv->speed = SPEED_UNKNOWN;
4515         priv->oldduplex = DUPLEX_UNKNOWN;
4516         return 0;
4517 }
4518 EXPORT_SYMBOL_GPL(stmmac_suspend);
4519
4520 /**
4521  * stmmac_reset_queues_param - reset queue parameters
4522  * @dev: device pointer
4523  */
4524 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
4525 {
4526         u32 rx_cnt = priv->plat->rx_queues_to_use;
4527         u32 tx_cnt = priv->plat->tx_queues_to_use;
4528         u32 queue;
4529
4530         for (queue = 0; queue < rx_cnt; queue++) {
4531                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4532
4533                 rx_q->cur_rx = 0;
4534                 rx_q->dirty_rx = 0;
4535         }
4536
4537         for (queue = 0; queue < tx_cnt; queue++) {
4538                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4539
4540                 tx_q->cur_tx = 0;
4541                 tx_q->dirty_tx = 0;
4542                 tx_q->mss = 0;
4543         }
4544 }
4545
4546 /**
4547  * stmmac_resume - resume callback
4548  * @dev: device pointer
4549  * Description: when resume this function is invoked to setup the DMA and CORE
4550  * in a usable state.
4551  */
4552 int stmmac_resume(struct device *dev)
4553 {
4554         struct net_device *ndev = dev_get_drvdata(dev);
4555         struct stmmac_priv *priv = netdev_priv(ndev);
4556
4557         if (!netif_running(ndev))
4558                 return 0;
4559
4560         /* Power Down bit, into the PM register, is cleared
4561          * automatically as soon as a magic packet or a Wake-up frame
4562          * is received. Anyway, it's better to manually clear
4563          * this bit because it can generate problems while resuming
4564          * from another devices (e.g. serial console).
4565          */
4566         if (device_may_wakeup(priv->device)) {
4567                 mutex_lock(&priv->lock);
4568                 stmmac_pmt(priv, priv->hw, 0);
4569                 mutex_unlock(&priv->lock);
4570                 priv->irq_wake = 0;
4571         } else {
4572                 pinctrl_pm_select_default_state(priv->device);
4573                 /* enable the clk previously disabled */
4574                 clk_enable(priv->plat->stmmac_clk);
4575                 clk_enable(priv->plat->pclk);
4576                 /* reset the phy so that it's ready */
4577                 if (priv->mii)
4578                         stmmac_mdio_reset(priv->mii);
4579         }
4580
4581         netif_device_attach(ndev);
4582
4583         mutex_lock(&priv->lock);
4584
4585         stmmac_reset_queues_param(priv);
4586
4587         stmmac_clear_descriptors(priv);
4588
4589         stmmac_hw_setup(ndev, false);
4590         stmmac_init_tx_coalesce(priv);
4591         stmmac_set_rx_mode(ndev);
4592
4593         stmmac_enable_all_queues(priv);
4594
4595         stmmac_start_all_queues(priv);
4596
4597         mutex_unlock(&priv->lock);
4598
4599         if (ndev->phydev)
4600                 phy_start(ndev->phydev);
4601
4602         return 0;
4603 }
4604 EXPORT_SYMBOL_GPL(stmmac_resume);
4605
4606 #ifndef MODULE
4607 static int __init stmmac_cmdline_opt(char *str)
4608 {
4609         char *opt;
4610
4611         if (!str || !*str)
4612                 return -EINVAL;
4613         while ((opt = strsep(&str, ",")) != NULL) {
4614                 if (!strncmp(opt, "debug:", 6)) {
4615                         if (kstrtoint(opt + 6, 0, &debug))
4616                                 goto err;
4617                 } else if (!strncmp(opt, "phyaddr:", 8)) {
4618                         if (kstrtoint(opt + 8, 0, &phyaddr))
4619                                 goto err;
4620                 } else if (!strncmp(opt, "buf_sz:", 7)) {
4621                         if (kstrtoint(opt + 7, 0, &buf_sz))
4622                                 goto err;
4623                 } else if (!strncmp(opt, "tc:", 3)) {
4624                         if (kstrtoint(opt + 3, 0, &tc))
4625                                 goto err;
4626                 } else if (!strncmp(opt, "watchdog:", 9)) {
4627                         if (kstrtoint(opt + 9, 0, &watchdog))
4628                                 goto err;
4629                 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
4630                         if (kstrtoint(opt + 10, 0, &flow_ctrl))
4631                                 goto err;
4632                 } else if (!strncmp(opt, "pause:", 6)) {
4633                         if (kstrtoint(opt + 6, 0, &pause))
4634                                 goto err;
4635                 } else if (!strncmp(opt, "eee_timer:", 10)) {
4636                         if (kstrtoint(opt + 10, 0, &eee_timer))
4637                                 goto err;
4638                 } else if (!strncmp(opt, "chain_mode:", 11)) {
4639                         if (kstrtoint(opt + 11, 0, &chain_mode))
4640                                 goto err;
4641                 }
4642         }
4643         return 0;
4644
4645 err:
4646         pr_err("%s: ERROR broken module parameter conversion", __func__);
4647         return -EINVAL;
4648 }
4649
4650 __setup("stmmaceth=", stmmac_cmdline_opt);
4651 #endif /* MODULE */
4652
4653 static int __init stmmac_init(void)
4654 {
4655 #ifdef CONFIG_DEBUG_FS
4656         /* Create debugfs main directory if it doesn't exist yet */
4657         if (!stmmac_fs_dir) {
4658                 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
4659
4660                 if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
4661                         pr_err("ERROR %s, debugfs create directory failed\n",
4662                                STMMAC_RESOURCE_NAME);
4663
4664                         return -ENOMEM;
4665                 }
4666         }
4667 #endif
4668
4669         return 0;
4670 }
4671
4672 static void __exit stmmac_exit(void)
4673 {
4674 #ifdef CONFIG_DEBUG_FS
4675         debugfs_remove_recursive(stmmac_fs_dir);
4676 #endif
4677 }
4678
4679 module_init(stmmac_init)
4680 module_exit(stmmac_exit)
4681
4682 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
4683 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
4684 MODULE_LICENSE("GPL");