OSDN Git Service

Merge branch 'i2c/for-current' of git://git.kernel.org/pub/scm/linux/kernel/git/wsa...
[uclinux-h8/linux.git] / drivers / net / ethernet / stmicro / stmmac / stmmac_main.c
1 /*******************************************************************************
2   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
3   ST Ethernet IPs are built around a Synopsys IP Core.
4
5         Copyright(C) 2007-2011 STMicroelectronics Ltd
6
7   This program is free software; you can redistribute it and/or modify it
8   under the terms and conditions of the GNU General Public License,
9   version 2, as published by the Free Software Foundation.
10
11   This program is distributed in the hope it will be useful, but WITHOUT
12   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14   more details.
15
16   The full GNU General Public License is included in this distribution in
17   the file called "COPYING".
18
19   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
20
21   Documentation available at:
22         http://www.stlinux.com
23   Support available at:
24         https://bugzilla.stlinux.com/
25 *******************************************************************************/
26
27 #include <linux/clk.h>
28 #include <linux/kernel.h>
29 #include <linux/interrupt.h>
30 #include <linux/ip.h>
31 #include <linux/tcp.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/if_ether.h>
35 #include <linux/crc32.h>
36 #include <linux/mii.h>
37 #include <linux/if.h>
38 #include <linux/if_vlan.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/slab.h>
41 #include <linux/prefetch.h>
42 #include <linux/pinctrl/consumer.h>
43 #ifdef CONFIG_DEBUG_FS
44 #include <linux/debugfs.h>
45 #include <linux/seq_file.h>
46 #endif /* CONFIG_DEBUG_FS */
47 #include <linux/net_tstamp.h>
48 #include <net/pkt_cls.h>
49 #include "stmmac_ptp.h"
50 #include "stmmac.h"
51 #include <linux/reset.h>
52 #include <linux/of_mdio.h>
53 #include "dwmac1000.h"
54 #include "dwxgmac2.h"
55 #include "hwif.h"
56
57 #define STMMAC_ALIGN(x)         __ALIGN_KERNEL(x, SMP_CACHE_BYTES)
58 #define TSO_MAX_BUFF_SIZE       (SZ_16K - 1)
59
60 /* Module parameters */
61 #define TX_TIMEO        5000
62 static int watchdog = TX_TIMEO;
63 module_param(watchdog, int, 0644);
64 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
65
66 static int debug = -1;
67 module_param(debug, int, 0644);
68 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
69
70 static int phyaddr = -1;
71 module_param(phyaddr, int, 0444);
72 MODULE_PARM_DESC(phyaddr, "Physical device address");
73
74 #define STMMAC_TX_THRESH        (DMA_TX_SIZE / 4)
75 #define STMMAC_RX_THRESH        (DMA_RX_SIZE / 4)
76
77 static int flow_ctrl = FLOW_OFF;
78 module_param(flow_ctrl, int, 0644);
79 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
80
81 static int pause = PAUSE_TIME;
82 module_param(pause, int, 0644);
83 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
84
85 #define TC_DEFAULT 64
86 static int tc = TC_DEFAULT;
87 module_param(tc, int, 0644);
88 MODULE_PARM_DESC(tc, "DMA threshold control value");
89
90 #define DEFAULT_BUFSIZE 1536
91 static int buf_sz = DEFAULT_BUFSIZE;
92 module_param(buf_sz, int, 0644);
93 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
94
95 #define STMMAC_RX_COPYBREAK     256
96
97 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
98                                       NETIF_MSG_LINK | NETIF_MSG_IFUP |
99                                       NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
100
101 #define STMMAC_DEFAULT_LPI_TIMER        1000
102 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
103 module_param(eee_timer, int, 0644);
104 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
105 #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
106
107 /* By default the driver will use the ring mode to manage tx and rx descriptors,
108  * but allow user to force to use the chain instead of the ring
109  */
110 static unsigned int chain_mode;
111 module_param(chain_mode, int, 0444);
112 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
113
114 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
115
116 #ifdef CONFIG_DEBUG_FS
117 static int stmmac_init_fs(struct net_device *dev);
118 static void stmmac_exit_fs(struct net_device *dev);
119 #endif
120
121 #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
122
123 /**
124  * stmmac_verify_args - verify the driver parameters.
125  * Description: it checks the driver parameters and set a default in case of
126  * errors.
127  */
128 static void stmmac_verify_args(void)
129 {
130         if (unlikely(watchdog < 0))
131                 watchdog = TX_TIMEO;
132         if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
133                 buf_sz = DEFAULT_BUFSIZE;
134         if (unlikely(flow_ctrl > 1))
135                 flow_ctrl = FLOW_AUTO;
136         else if (likely(flow_ctrl < 0))
137                 flow_ctrl = FLOW_OFF;
138         if (unlikely((pause < 0) || (pause > 0xffff)))
139                 pause = PAUSE_TIME;
140         if (eee_timer < 0)
141                 eee_timer = STMMAC_DEFAULT_LPI_TIMER;
142 }
143
144 /**
145  * stmmac_disable_all_queues - Disable all queues
146  * @priv: driver private structure
147  */
148 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
149 {
150         u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
151         u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
152         u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
153         u32 queue;
154
155         for (queue = 0; queue < maxq; queue++) {
156                 struct stmmac_channel *ch = &priv->channel[queue];
157
158                 napi_disable(&ch->napi);
159         }
160 }
161
162 /**
163  * stmmac_enable_all_queues - Enable all queues
164  * @priv: driver private structure
165  */
166 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
167 {
168         u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
169         u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
170         u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
171         u32 queue;
172
173         for (queue = 0; queue < maxq; queue++) {
174                 struct stmmac_channel *ch = &priv->channel[queue];
175
176                 napi_enable(&ch->napi);
177         }
178 }
179
180 /**
181  * stmmac_stop_all_queues - Stop all queues
182  * @priv: driver private structure
183  */
184 static void stmmac_stop_all_queues(struct stmmac_priv *priv)
185 {
186         u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
187         u32 queue;
188
189         for (queue = 0; queue < tx_queues_cnt; queue++)
190                 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
191 }
192
193 /**
194  * stmmac_start_all_queues - Start all queues
195  * @priv: driver private structure
196  */
197 static void stmmac_start_all_queues(struct stmmac_priv *priv)
198 {
199         u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
200         u32 queue;
201
202         for (queue = 0; queue < tx_queues_cnt; queue++)
203                 netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
204 }
205
206 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
207 {
208         if (!test_bit(STMMAC_DOWN, &priv->state) &&
209             !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
210                 queue_work(priv->wq, &priv->service_task);
211 }
212
213 static void stmmac_global_err(struct stmmac_priv *priv)
214 {
215         netif_carrier_off(priv->dev);
216         set_bit(STMMAC_RESET_REQUESTED, &priv->state);
217         stmmac_service_event_schedule(priv);
218 }
219
220 /**
221  * stmmac_clk_csr_set - dynamically set the MDC clock
222  * @priv: driver private structure
223  * Description: this is to dynamically set the MDC clock according to the csr
224  * clock input.
225  * Note:
226  *      If a specific clk_csr value is passed from the platform
227  *      this means that the CSR Clock Range selection cannot be
228  *      changed at run-time and it is fixed (as reported in the driver
229  *      documentation). Viceversa the driver will try to set the MDC
230  *      clock dynamically according to the actual clock input.
231  */
232 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
233 {
234         u32 clk_rate;
235
236         clk_rate = clk_get_rate(priv->plat->stmmac_clk);
237
238         /* Platform provided default clk_csr would be assumed valid
239          * for all other cases except for the below mentioned ones.
240          * For values higher than the IEEE 802.3 specified frequency
241          * we can not estimate the proper divider as it is not known
242          * the frequency of clk_csr_i. So we do not change the default
243          * divider.
244          */
245         if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
246                 if (clk_rate < CSR_F_35M)
247                         priv->clk_csr = STMMAC_CSR_20_35M;
248                 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
249                         priv->clk_csr = STMMAC_CSR_35_60M;
250                 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
251                         priv->clk_csr = STMMAC_CSR_60_100M;
252                 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
253                         priv->clk_csr = STMMAC_CSR_100_150M;
254                 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
255                         priv->clk_csr = STMMAC_CSR_150_250M;
256                 else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
257                         priv->clk_csr = STMMAC_CSR_250_300M;
258         }
259
260         if (priv->plat->has_sun8i) {
261                 if (clk_rate > 160000000)
262                         priv->clk_csr = 0x03;
263                 else if (clk_rate > 80000000)
264                         priv->clk_csr = 0x02;
265                 else if (clk_rate > 40000000)
266                         priv->clk_csr = 0x01;
267                 else
268                         priv->clk_csr = 0;
269         }
270
271         if (priv->plat->has_xgmac) {
272                 if (clk_rate > 400000000)
273                         priv->clk_csr = 0x5;
274                 else if (clk_rate > 350000000)
275                         priv->clk_csr = 0x4;
276                 else if (clk_rate > 300000000)
277                         priv->clk_csr = 0x3;
278                 else if (clk_rate > 250000000)
279                         priv->clk_csr = 0x2;
280                 else if (clk_rate > 150000000)
281                         priv->clk_csr = 0x1;
282                 else
283                         priv->clk_csr = 0x0;
284         }
285 }
286
287 static void print_pkt(unsigned char *buf, int len)
288 {
289         pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
290         print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
291 }
292
293 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
294 {
295         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
296         u32 avail;
297
298         if (tx_q->dirty_tx > tx_q->cur_tx)
299                 avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
300         else
301                 avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
302
303         return avail;
304 }
305
306 /**
307  * stmmac_rx_dirty - Get RX queue dirty
308  * @priv: driver private structure
309  * @queue: RX queue index
310  */
311 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
312 {
313         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
314         u32 dirty;
315
316         if (rx_q->dirty_rx <= rx_q->cur_rx)
317                 dirty = rx_q->cur_rx - rx_q->dirty_rx;
318         else
319                 dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
320
321         return dirty;
322 }
323
324 /**
325  * stmmac_hw_fix_mac_speed - callback for speed selection
326  * @priv: driver private structure
327  * Description: on some platforms (e.g. ST), some HW system configuration
328  * registers have to be set according to the link speed negotiated.
329  */
330 static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
331 {
332         struct net_device *ndev = priv->dev;
333         struct phy_device *phydev = ndev->phydev;
334
335         if (likely(priv->plat->fix_mac_speed))
336                 priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
337 }
338
339 /**
340  * stmmac_enable_eee_mode - check and enter in LPI mode
341  * @priv: driver private structure
342  * Description: this function is to verify and enter in LPI mode in case of
343  * EEE.
344  */
345 static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
346 {
347         u32 tx_cnt = priv->plat->tx_queues_to_use;
348         u32 queue;
349
350         /* check if all TX queues have the work finished */
351         for (queue = 0; queue < tx_cnt; queue++) {
352                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
353
354                 if (tx_q->dirty_tx != tx_q->cur_tx)
355                         return; /* still unfinished work */
356         }
357
358         /* Check and enter in LPI mode */
359         if (!priv->tx_path_in_lpi_mode)
360                 stmmac_set_eee_mode(priv, priv->hw,
361                                 priv->plat->en_tx_lpi_clockgating);
362 }
363
364 /**
365  * stmmac_disable_eee_mode - disable and exit from LPI mode
366  * @priv: driver private structure
367  * Description: this function is to exit and disable EEE in case of
368  * LPI state is true. This is called by the xmit.
369  */
370 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
371 {
372         stmmac_reset_eee_mode(priv, priv->hw);
373         del_timer_sync(&priv->eee_ctrl_timer);
374         priv->tx_path_in_lpi_mode = false;
375 }
376
377 /**
378  * stmmac_eee_ctrl_timer - EEE TX SW timer.
379  * @arg : data hook
380  * Description:
381  *  if there is no data transfer and if we are not in LPI state,
382  *  then MAC Transmitter can be moved to LPI state.
383  */
384 static void stmmac_eee_ctrl_timer(struct timer_list *t)
385 {
386         struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
387
388         stmmac_enable_eee_mode(priv);
389         mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
390 }
391
392 /**
393  * stmmac_eee_init - init EEE
394  * @priv: driver private structure
395  * Description:
396  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
397  *  can also manage EEE, this function enable the LPI state and start related
398  *  timer.
399  */
400 bool stmmac_eee_init(struct stmmac_priv *priv)
401 {
402         struct net_device *ndev = priv->dev;
403         int interface = priv->plat->interface;
404         bool ret = false;
405
406         if ((interface != PHY_INTERFACE_MODE_MII) &&
407             (interface != PHY_INTERFACE_MODE_GMII) &&
408             !phy_interface_mode_is_rgmii(interface))
409                 goto out;
410
411         /* Using PCS we cannot dial with the phy registers at this stage
412          * so we do not support extra feature like EEE.
413          */
414         if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
415             (priv->hw->pcs == STMMAC_PCS_TBI) ||
416             (priv->hw->pcs == STMMAC_PCS_RTBI))
417                 goto out;
418
419         /* MAC core supports the EEE feature. */
420         if (priv->dma_cap.eee) {
421                 int tx_lpi_timer = priv->tx_lpi_timer;
422
423                 /* Check if the PHY supports EEE */
424                 if (phy_init_eee(ndev->phydev, 1)) {
425                         /* To manage at run-time if the EEE cannot be supported
426                          * anymore (for example because the lp caps have been
427                          * changed).
428                          * In that case the driver disable own timers.
429                          */
430                         mutex_lock(&priv->lock);
431                         if (priv->eee_active) {
432                                 netdev_dbg(priv->dev, "disable EEE\n");
433                                 del_timer_sync(&priv->eee_ctrl_timer);
434                                 stmmac_set_eee_timer(priv, priv->hw, 0,
435                                                 tx_lpi_timer);
436                         }
437                         priv->eee_active = 0;
438                         mutex_unlock(&priv->lock);
439                         goto out;
440                 }
441                 /* Activate the EEE and start timers */
442                 mutex_lock(&priv->lock);
443                 if (!priv->eee_active) {
444                         priv->eee_active = 1;
445                         timer_setup(&priv->eee_ctrl_timer,
446                                     stmmac_eee_ctrl_timer, 0);
447                         mod_timer(&priv->eee_ctrl_timer,
448                                   STMMAC_LPI_T(eee_timer));
449
450                         stmmac_set_eee_timer(priv, priv->hw,
451                                         STMMAC_DEFAULT_LIT_LS, tx_lpi_timer);
452                 }
453                 /* Set HW EEE according to the speed */
454                 stmmac_set_eee_pls(priv, priv->hw, ndev->phydev->link);
455
456                 ret = true;
457                 mutex_unlock(&priv->lock);
458
459                 netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
460         }
461 out:
462         return ret;
463 }
464
465 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
466  * @priv: driver private structure
467  * @p : descriptor pointer
468  * @skb : the socket buffer
469  * Description :
470  * This function will read timestamp from the descriptor & pass it to stack.
471  * and also perform some sanity checks.
472  */
473 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
474                                    struct dma_desc *p, struct sk_buff *skb)
475 {
476         struct skb_shared_hwtstamps shhwtstamp;
477         u64 ns;
478
479         if (!priv->hwts_tx_en)
480                 return;
481
482         /* exit if skb doesn't support hw tstamp */
483         if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
484                 return;
485
486         /* check tx tstamp status */
487         if (stmmac_get_tx_timestamp_status(priv, p)) {
488                 /* get the valid tstamp */
489                 stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
490
491                 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
492                 shhwtstamp.hwtstamp = ns_to_ktime(ns);
493
494                 netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
495                 /* pass tstamp to stack */
496                 skb_tstamp_tx(skb, &shhwtstamp);
497         }
498
499         return;
500 }
501
502 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
503  * @priv: driver private structure
504  * @p : descriptor pointer
505  * @np : next descriptor pointer
506  * @skb : the socket buffer
507  * Description :
508  * This function will read received packet's timestamp from the descriptor
509  * and pass it to stack. It also perform some sanity checks.
510  */
511 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
512                                    struct dma_desc *np, struct sk_buff *skb)
513 {
514         struct skb_shared_hwtstamps *shhwtstamp = NULL;
515         struct dma_desc *desc = p;
516         u64 ns;
517
518         if (!priv->hwts_rx_en)
519                 return;
520         /* For GMAC4, the valid timestamp is from CTX next desc. */
521         if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
522                 desc = np;
523
524         /* Check if timestamp is available */
525         if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
526                 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
527                 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
528                 shhwtstamp = skb_hwtstamps(skb);
529                 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
530                 shhwtstamp->hwtstamp = ns_to_ktime(ns);
531         } else  {
532                 netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
533         }
534 }
535
536 /**
537  *  stmmac_hwtstamp_ioctl - control hardware timestamping.
538  *  @dev: device pointer.
539  *  @ifr: An IOCTL specific structure, that can contain a pointer to
540  *  a proprietary structure used to pass information to the driver.
541  *  Description:
542  *  This function configures the MAC to enable/disable both outgoing(TX)
543  *  and incoming(RX) packets time stamping based on user input.
544  *  Return Value:
545  *  0 on success and an appropriate -ve integer on failure.
546  */
547 static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
548 {
549         struct stmmac_priv *priv = netdev_priv(dev);
550         struct hwtstamp_config config;
551         struct timespec64 now;
552         u64 temp = 0;
553         u32 ptp_v2 = 0;
554         u32 tstamp_all = 0;
555         u32 ptp_over_ipv4_udp = 0;
556         u32 ptp_over_ipv6_udp = 0;
557         u32 ptp_over_ethernet = 0;
558         u32 snap_type_sel = 0;
559         u32 ts_master_en = 0;
560         u32 ts_event_en = 0;
561         u32 value = 0;
562         u32 sec_inc;
563         bool xmac;
564
565         xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
566
567         if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
568                 netdev_alert(priv->dev, "No support for HW time stamping\n");
569                 priv->hwts_tx_en = 0;
570                 priv->hwts_rx_en = 0;
571
572                 return -EOPNOTSUPP;
573         }
574
575         if (copy_from_user(&config, ifr->ifr_data,
576                            sizeof(struct hwtstamp_config)))
577                 return -EFAULT;
578
579         netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
580                    __func__, config.flags, config.tx_type, config.rx_filter);
581
582         /* reserved for future extensions */
583         if (config.flags)
584                 return -EINVAL;
585
586         if (config.tx_type != HWTSTAMP_TX_OFF &&
587             config.tx_type != HWTSTAMP_TX_ON)
588                 return -ERANGE;
589
590         if (priv->adv_ts) {
591                 switch (config.rx_filter) {
592                 case HWTSTAMP_FILTER_NONE:
593                         /* time stamp no incoming packet at all */
594                         config.rx_filter = HWTSTAMP_FILTER_NONE;
595                         break;
596
597                 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
598                         /* PTP v1, UDP, any kind of event packet */
599                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
600                         /* take time stamp for all event messages */
601                         if (xmac)
602                                 snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
603                         else
604                                 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
605
606                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
607                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
608                         break;
609
610                 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
611                         /* PTP v1, UDP, Sync packet */
612                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
613                         /* take time stamp for SYNC messages only */
614                         ts_event_en = PTP_TCR_TSEVNTENA;
615
616                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
617                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
618                         break;
619
620                 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
621                         /* PTP v1, UDP, Delay_req packet */
622                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
623                         /* take time stamp for Delay_Req messages only */
624                         ts_master_en = PTP_TCR_TSMSTRENA;
625                         ts_event_en = PTP_TCR_TSEVNTENA;
626
627                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
628                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
629                         break;
630
631                 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
632                         /* PTP v2, UDP, any kind of event packet */
633                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
634                         ptp_v2 = PTP_TCR_TSVER2ENA;
635                         /* take time stamp for all event messages */
636                         if (xmac)
637                                 snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
638                         else
639                                 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
640
641                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
642                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
643                         break;
644
645                 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
646                         /* PTP v2, UDP, Sync packet */
647                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
648                         ptp_v2 = PTP_TCR_TSVER2ENA;
649                         /* take time stamp for SYNC messages only */
650                         ts_event_en = PTP_TCR_TSEVNTENA;
651
652                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
653                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
654                         break;
655
656                 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
657                         /* PTP v2, UDP, Delay_req packet */
658                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
659                         ptp_v2 = PTP_TCR_TSVER2ENA;
660                         /* take time stamp for Delay_Req messages only */
661                         ts_master_en = PTP_TCR_TSMSTRENA;
662                         ts_event_en = PTP_TCR_TSEVNTENA;
663
664                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
665                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
666                         break;
667
668                 case HWTSTAMP_FILTER_PTP_V2_EVENT:
669                         /* PTP v2/802.AS1 any layer, any kind of event packet */
670                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
671                         ptp_v2 = PTP_TCR_TSVER2ENA;
672                         /* take time stamp for all event messages */
673                         if (xmac)
674                                 snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
675                         else
676                                 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
677
678                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
679                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
680                         ptp_over_ethernet = PTP_TCR_TSIPENA;
681                         break;
682
683                 case HWTSTAMP_FILTER_PTP_V2_SYNC:
684                         /* PTP v2/802.AS1, any layer, Sync packet */
685                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
686                         ptp_v2 = PTP_TCR_TSVER2ENA;
687                         /* take time stamp for SYNC messages only */
688                         ts_event_en = PTP_TCR_TSEVNTENA;
689
690                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
691                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
692                         ptp_over_ethernet = PTP_TCR_TSIPENA;
693                         break;
694
695                 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
696                         /* PTP v2/802.AS1, any layer, Delay_req packet */
697                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
698                         ptp_v2 = PTP_TCR_TSVER2ENA;
699                         /* take time stamp for Delay_Req messages only */
700                         ts_master_en = PTP_TCR_TSMSTRENA;
701                         ts_event_en = PTP_TCR_TSEVNTENA;
702
703                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
704                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
705                         ptp_over_ethernet = PTP_TCR_TSIPENA;
706                         break;
707
708                 case HWTSTAMP_FILTER_NTP_ALL:
709                 case HWTSTAMP_FILTER_ALL:
710                         /* time stamp any incoming packet */
711                         config.rx_filter = HWTSTAMP_FILTER_ALL;
712                         tstamp_all = PTP_TCR_TSENALL;
713                         break;
714
715                 default:
716                         return -ERANGE;
717                 }
718         } else {
719                 switch (config.rx_filter) {
720                 case HWTSTAMP_FILTER_NONE:
721                         config.rx_filter = HWTSTAMP_FILTER_NONE;
722                         break;
723                 default:
724                         /* PTP v1, UDP, any kind of event packet */
725                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
726                         break;
727                 }
728         }
729         priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
730         priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
731
732         if (!priv->hwts_tx_en && !priv->hwts_rx_en)
733                 stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0);
734         else {
735                 value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
736                          tstamp_all | ptp_v2 | ptp_over_ethernet |
737                          ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
738                          ts_master_en | snap_type_sel);
739                 stmmac_config_hw_tstamping(priv, priv->ptpaddr, value);
740
741                 /* program Sub Second Increment reg */
742                 stmmac_config_sub_second_increment(priv,
743                                 priv->ptpaddr, priv->plat->clk_ptp_rate,
744                                 xmac, &sec_inc);
745                 temp = div_u64(1000000000ULL, sec_inc);
746
747                 /* Store sub second increment and flags for later use */
748                 priv->sub_second_inc = sec_inc;
749                 priv->systime_flags = value;
750
751                 /* calculate default added value:
752                  * formula is :
753                  * addend = (2^32)/freq_div_ratio;
754                  * where, freq_div_ratio = 1e9ns/sec_inc
755                  */
756                 temp = (u64)(temp << 32);
757                 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
758                 stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
759
760                 /* initialize system time */
761                 ktime_get_real_ts64(&now);
762
763                 /* lower 32 bits of tv_sec are safe until y2106 */
764                 stmmac_init_systime(priv, priv->ptpaddr,
765                                 (u32)now.tv_sec, now.tv_nsec);
766         }
767
768         return copy_to_user(ifr->ifr_data, &config,
769                             sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
770 }
771
772 /**
773  * stmmac_init_ptp - init PTP
774  * @priv: driver private structure
775  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
776  * This is done by looking at the HW cap. register.
777  * This function also registers the ptp driver.
778  */
779 static int stmmac_init_ptp(struct stmmac_priv *priv)
780 {
781         bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
782
783         if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
784                 return -EOPNOTSUPP;
785
786         priv->adv_ts = 0;
787         /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
788         if (xmac && priv->dma_cap.atime_stamp)
789                 priv->adv_ts = 1;
790         /* Dwmac 3.x core with extend_desc can support adv_ts */
791         else if (priv->extend_desc && priv->dma_cap.atime_stamp)
792                 priv->adv_ts = 1;
793
794         if (priv->dma_cap.time_stamp)
795                 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
796
797         if (priv->adv_ts)
798                 netdev_info(priv->dev,
799                             "IEEE 1588-2008 Advanced Timestamp supported\n");
800
801         priv->hwts_tx_en = 0;
802         priv->hwts_rx_en = 0;
803
804         stmmac_ptp_register(priv);
805
806         return 0;
807 }
808
809 static void stmmac_release_ptp(struct stmmac_priv *priv)
810 {
811         if (priv->plat->clk_ptp_ref)
812                 clk_disable_unprepare(priv->plat->clk_ptp_ref);
813         stmmac_ptp_unregister(priv);
814 }
815
816 /**
817  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
818  *  @priv: driver private structure
819  *  Description: It is used for configuring the flow control in all queues
820  */
821 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
822 {
823         u32 tx_cnt = priv->plat->tx_queues_to_use;
824
825         stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
826                         priv->pause, tx_cnt);
827 }
828
829 /**
830  * stmmac_adjust_link - adjusts the link parameters
831  * @dev: net device structure
832  * Description: this is the helper called by the physical abstraction layer
833  * drivers to communicate the phy link status. According the speed and duplex
834  * this driver can invoke registered glue-logic as well.
835  * It also invoke the eee initialization because it could happen when switch
836  * on different networks (that are eee capable).
837  */
838 static void stmmac_adjust_link(struct net_device *dev)
839 {
840         struct stmmac_priv *priv = netdev_priv(dev);
841         struct phy_device *phydev = dev->phydev;
842         bool new_state = false;
843
844         if (!phydev)
845                 return;
846
847         mutex_lock(&priv->lock);
848
849         if (phydev->link) {
850                 u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
851
852                 /* Now we make sure that we can be in full duplex mode.
853                  * If not, we operate in half-duplex mode. */
854                 if (phydev->duplex != priv->oldduplex) {
855                         new_state = true;
856                         if (!phydev->duplex)
857                                 ctrl &= ~priv->hw->link.duplex;
858                         else
859                                 ctrl |= priv->hw->link.duplex;
860                         priv->oldduplex = phydev->duplex;
861                 }
862                 /* Flow Control operation */
863                 if (phydev->pause)
864                         stmmac_mac_flow_ctrl(priv, phydev->duplex);
865
866                 if (phydev->speed != priv->speed) {
867                         new_state = true;
868                         ctrl &= ~priv->hw->link.speed_mask;
869                         switch (phydev->speed) {
870                         case SPEED_1000:
871                                 ctrl |= priv->hw->link.speed1000;
872                                 break;
873                         case SPEED_100:
874                                 ctrl |= priv->hw->link.speed100;
875                                 break;
876                         case SPEED_10:
877                                 ctrl |= priv->hw->link.speed10;
878                                 break;
879                         default:
880                                 netif_warn(priv, link, priv->dev,
881                                            "broken speed: %d\n", phydev->speed);
882                                 phydev->speed = SPEED_UNKNOWN;
883                                 break;
884                         }
885                         if (phydev->speed != SPEED_UNKNOWN)
886                                 stmmac_hw_fix_mac_speed(priv);
887                         priv->speed = phydev->speed;
888                 }
889
890                 writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
891
892                 if (!priv->oldlink) {
893                         new_state = true;
894                         priv->oldlink = true;
895                 }
896         } else if (priv->oldlink) {
897                 new_state = true;
898                 priv->oldlink = false;
899                 priv->speed = SPEED_UNKNOWN;
900                 priv->oldduplex = DUPLEX_UNKNOWN;
901         }
902
903         if (new_state && netif_msg_link(priv))
904                 phy_print_status(phydev);
905
906         mutex_unlock(&priv->lock);
907
908         if (phydev->is_pseudo_fixed_link)
909                 /* Stop PHY layer to call the hook to adjust the link in case
910                  * of a switch is attached to the stmmac driver.
911                  */
912                 phydev->irq = PHY_IGNORE_INTERRUPT;
913         else
914                 /* At this stage, init the EEE if supported.
915                  * Never called in case of fixed_link.
916                  */
917                 priv->eee_enabled = stmmac_eee_init(priv);
918 }
919
920 /**
921  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
922  * @priv: driver private structure
923  * Description: this is to verify if the HW supports the PCS.
924  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
925  * configured for the TBI, RTBI, or SGMII PHY interface.
926  */
927 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
928 {
929         int interface = priv->plat->interface;
930
931         if (priv->dma_cap.pcs) {
932                 if ((interface == PHY_INTERFACE_MODE_RGMII) ||
933                     (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
934                     (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
935                     (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
936                         netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
937                         priv->hw->pcs = STMMAC_PCS_RGMII;
938                 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
939                         netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
940                         priv->hw->pcs = STMMAC_PCS_SGMII;
941                 }
942         }
943 }
944
945 /**
946  * stmmac_init_phy - PHY initialization
947  * @dev: net device structure
948  * Description: it initializes the driver's PHY state, and attaches the PHY
949  * to the mac driver.
950  *  Return value:
951  *  0 on success
952  */
953 static int stmmac_init_phy(struct net_device *dev)
954 {
955         struct stmmac_priv *priv = netdev_priv(dev);
956         u32 tx_cnt = priv->plat->tx_queues_to_use;
957         struct phy_device *phydev;
958         char phy_id_fmt[MII_BUS_ID_SIZE + 3];
959         char bus_id[MII_BUS_ID_SIZE];
960         int interface = priv->plat->interface;
961         int max_speed = priv->plat->max_speed;
962         priv->oldlink = false;
963         priv->speed = SPEED_UNKNOWN;
964         priv->oldduplex = DUPLEX_UNKNOWN;
965
966         if (priv->plat->phy_node) {
967                 phydev = of_phy_connect(dev, priv->plat->phy_node,
968                                         &stmmac_adjust_link, 0, interface);
969         } else {
970                 snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
971                          priv->plat->bus_id);
972
973                 snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
974                          priv->plat->phy_addr);
975                 netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__,
976                            phy_id_fmt);
977
978                 phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
979                                      interface);
980         }
981
982         if (IS_ERR_OR_NULL(phydev)) {
983                 netdev_err(priv->dev, "Could not attach to PHY\n");
984                 if (!phydev)
985                         return -ENODEV;
986
987                 return PTR_ERR(phydev);
988         }
989
990         /* Stop Advertising 1000BASE Capability if interface is not GMII */
991         if ((interface == PHY_INTERFACE_MODE_MII) ||
992             (interface == PHY_INTERFACE_MODE_RMII) ||
993                 (max_speed < 1000 && max_speed > 0))
994                 phy_set_max_speed(phydev, SPEED_100);
995
996         /*
997          * Half-duplex mode not supported with multiqueue
998          * half-duplex can only works with single queue
999          */
1000         if (tx_cnt > 1) {
1001                 phy_remove_link_mode(phydev,
1002                                      ETHTOOL_LINK_MODE_10baseT_Half_BIT);
1003                 phy_remove_link_mode(phydev,
1004                                      ETHTOOL_LINK_MODE_100baseT_Half_BIT);
1005                 phy_remove_link_mode(phydev,
1006                                      ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
1007         }
1008
1009         /*
1010          * Broken HW is sometimes missing the pull-up resistor on the
1011          * MDIO line, which results in reads to non-existent devices returning
1012          * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
1013          * device as well.
1014          * Note: phydev->phy_id is the result of reading the UID PHY registers.
1015          */
1016         if (!priv->plat->phy_node && phydev->phy_id == 0) {
1017                 phy_disconnect(phydev);
1018                 return -ENODEV;
1019         }
1020
1021         /* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid
1022          * subsequent PHY polling, make sure we force a link transition if
1023          * we have a UP/DOWN/UP transition
1024          */
1025         if (phydev->is_pseudo_fixed_link)
1026                 phydev->irq = PHY_POLL;
1027
1028         phy_attached_info(phydev);
1029         return 0;
1030 }
1031
1032 static void stmmac_display_rx_rings(struct stmmac_priv *priv)
1033 {
1034         u32 rx_cnt = priv->plat->rx_queues_to_use;
1035         void *head_rx;
1036         u32 queue;
1037
1038         /* Display RX rings */
1039         for (queue = 0; queue < rx_cnt; queue++) {
1040                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1041
1042                 pr_info("\tRX Queue %u rings\n", queue);
1043
1044                 if (priv->extend_desc)
1045                         head_rx = (void *)rx_q->dma_erx;
1046                 else
1047                         head_rx = (void *)rx_q->dma_rx;
1048
1049                 /* Display RX ring */
1050                 stmmac_display_ring(priv, head_rx, DMA_RX_SIZE, true);
1051         }
1052 }
1053
1054 static void stmmac_display_tx_rings(struct stmmac_priv *priv)
1055 {
1056         u32 tx_cnt = priv->plat->tx_queues_to_use;
1057         void *head_tx;
1058         u32 queue;
1059
1060         /* Display TX rings */
1061         for (queue = 0; queue < tx_cnt; queue++) {
1062                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1063
1064                 pr_info("\tTX Queue %d rings\n", queue);
1065
1066                 if (priv->extend_desc)
1067                         head_tx = (void *)tx_q->dma_etx;
1068                 else
1069                         head_tx = (void *)tx_q->dma_tx;
1070
1071                 stmmac_display_ring(priv, head_tx, DMA_TX_SIZE, false);
1072         }
1073 }
1074
1075 static void stmmac_display_rings(struct stmmac_priv *priv)
1076 {
1077         /* Display RX ring */
1078         stmmac_display_rx_rings(priv);
1079
1080         /* Display TX ring */
1081         stmmac_display_tx_rings(priv);
1082 }
1083
1084 static int stmmac_set_bfsize(int mtu, int bufsize)
1085 {
1086         int ret = bufsize;
1087
1088         if (mtu >= BUF_SIZE_4KiB)
1089                 ret = BUF_SIZE_8KiB;
1090         else if (mtu >= BUF_SIZE_2KiB)
1091                 ret = BUF_SIZE_4KiB;
1092         else if (mtu > DEFAULT_BUFSIZE)
1093                 ret = BUF_SIZE_2KiB;
1094         else
1095                 ret = DEFAULT_BUFSIZE;
1096
1097         return ret;
1098 }
1099
1100 /**
1101  * stmmac_clear_rx_descriptors - clear RX descriptors
1102  * @priv: driver private structure
1103  * @queue: RX queue index
1104  * Description: this function is called to clear the RX descriptors
1105  * in case of both basic and extended descriptors are used.
1106  */
1107 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1108 {
1109         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1110         int i;
1111
1112         /* Clear the RX descriptors */
1113         for (i = 0; i < DMA_RX_SIZE; i++)
1114                 if (priv->extend_desc)
1115                         stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1116                                         priv->use_riwt, priv->mode,
1117                                         (i == DMA_RX_SIZE - 1));
1118                 else
1119                         stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1120                                         priv->use_riwt, priv->mode,
1121                                         (i == DMA_RX_SIZE - 1));
1122 }
1123
1124 /**
1125  * stmmac_clear_tx_descriptors - clear tx descriptors
1126  * @priv: driver private structure
1127  * @queue: TX queue index.
1128  * Description: this function is called to clear the TX descriptors
1129  * in case of both basic and extended descriptors are used.
1130  */
1131 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
1132 {
1133         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1134         int i;
1135
1136         /* Clear the TX descriptors */
1137         for (i = 0; i < DMA_TX_SIZE; i++)
1138                 if (priv->extend_desc)
1139                         stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
1140                                         priv->mode, (i == DMA_TX_SIZE - 1));
1141                 else
1142                         stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
1143                                         priv->mode, (i == DMA_TX_SIZE - 1));
1144 }
1145
1146 /**
1147  * stmmac_clear_descriptors - clear descriptors
1148  * @priv: driver private structure
1149  * Description: this function is called to clear the TX and RX descriptors
1150  * in case of both basic and extended descriptors are used.
1151  */
1152 static void stmmac_clear_descriptors(struct stmmac_priv *priv)
1153 {
1154         u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1155         u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1156         u32 queue;
1157
1158         /* Clear the RX descriptors */
1159         for (queue = 0; queue < rx_queue_cnt; queue++)
1160                 stmmac_clear_rx_descriptors(priv, queue);
1161
1162         /* Clear the TX descriptors */
1163         for (queue = 0; queue < tx_queue_cnt; queue++)
1164                 stmmac_clear_tx_descriptors(priv, queue);
1165 }
1166
1167 /**
1168  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1169  * @priv: driver private structure
1170  * @p: descriptor pointer
1171  * @i: descriptor index
1172  * @flags: gfp flag
1173  * @queue: RX queue index
1174  * Description: this function is called to allocate a receive buffer, perform
1175  * the DMA mapping and init the descriptor.
1176  */
1177 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
1178                                   int i, gfp_t flags, u32 queue)
1179 {
1180         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1181         struct sk_buff *skb;
1182
1183         skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
1184         if (!skb) {
1185                 netdev_err(priv->dev,
1186                            "%s: Rx init fails; skb is NULL\n", __func__);
1187                 return -ENOMEM;
1188         }
1189         rx_q->rx_skbuff[i] = skb;
1190         rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
1191                                                 priv->dma_buf_sz,
1192                                                 DMA_FROM_DEVICE);
1193         if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) {
1194                 netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
1195                 dev_kfree_skb_any(skb);
1196                 return -EINVAL;
1197         }
1198
1199         stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[i]);
1200
1201         if (priv->dma_buf_sz == BUF_SIZE_16KiB)
1202                 stmmac_init_desc3(priv, p);
1203
1204         return 0;
1205 }
1206
1207 /**
1208  * stmmac_free_rx_buffer - free RX dma buffers
1209  * @priv: private structure
1210  * @queue: RX queue index
1211  * @i: buffer index.
1212  */
1213 static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1214 {
1215         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1216
1217         if (rx_q->rx_skbuff[i]) {
1218                 dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i],
1219                                  priv->dma_buf_sz, DMA_FROM_DEVICE);
1220                 dev_kfree_skb_any(rx_q->rx_skbuff[i]);
1221         }
1222         rx_q->rx_skbuff[i] = NULL;
1223 }
1224
1225 /**
1226  * stmmac_free_tx_buffer - free RX dma buffers
1227  * @priv: private structure
1228  * @queue: RX queue index
1229  * @i: buffer index.
1230  */
1231 static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1232 {
1233         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1234
1235         if (tx_q->tx_skbuff_dma[i].buf) {
1236                 if (tx_q->tx_skbuff_dma[i].map_as_page)
1237                         dma_unmap_page(priv->device,
1238                                        tx_q->tx_skbuff_dma[i].buf,
1239                                        tx_q->tx_skbuff_dma[i].len,
1240                                        DMA_TO_DEVICE);
1241                 else
1242                         dma_unmap_single(priv->device,
1243                                          tx_q->tx_skbuff_dma[i].buf,
1244                                          tx_q->tx_skbuff_dma[i].len,
1245                                          DMA_TO_DEVICE);
1246         }
1247
1248         if (tx_q->tx_skbuff[i]) {
1249                 dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1250                 tx_q->tx_skbuff[i] = NULL;
1251                 tx_q->tx_skbuff_dma[i].buf = 0;
1252                 tx_q->tx_skbuff_dma[i].map_as_page = false;
1253         }
1254 }
1255
1256 /**
1257  * init_dma_rx_desc_rings - init the RX descriptor rings
1258  * @dev: net device structure
1259  * @flags: gfp flag.
1260  * Description: this function initializes the DMA RX descriptors
1261  * and allocates the socket buffers. It supports the chained and ring
1262  * modes.
1263  */
1264 static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
1265 {
1266         struct stmmac_priv *priv = netdev_priv(dev);
1267         u32 rx_count = priv->plat->rx_queues_to_use;
1268         int ret = -ENOMEM;
1269         int bfsize = 0;
1270         int queue;
1271         int i;
1272
1273         bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
1274         if (bfsize < 0)
1275                 bfsize = 0;
1276
1277         if (bfsize < BUF_SIZE_16KiB)
1278                 bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
1279
1280         priv->dma_buf_sz = bfsize;
1281
1282         /* RX INITIALIZATION */
1283         netif_dbg(priv, probe, priv->dev,
1284                   "SKB addresses:\nskb\t\tskb data\tdma data\n");
1285
1286         for (queue = 0; queue < rx_count; queue++) {
1287                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1288
1289                 netif_dbg(priv, probe, priv->dev,
1290                           "(%s) dma_rx_phy=0x%08x\n", __func__,
1291                           (u32)rx_q->dma_rx_phy);
1292
1293                 for (i = 0; i < DMA_RX_SIZE; i++) {
1294                         struct dma_desc *p;
1295
1296                         if (priv->extend_desc)
1297                                 p = &((rx_q->dma_erx + i)->basic);
1298                         else
1299                                 p = rx_q->dma_rx + i;
1300
1301                         ret = stmmac_init_rx_buffers(priv, p, i, flags,
1302                                                      queue);
1303                         if (ret)
1304                                 goto err_init_rx_buffers;
1305
1306                         netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
1307                                   rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data,
1308                                   (unsigned int)rx_q->rx_skbuff_dma[i]);
1309                 }
1310
1311                 rx_q->cur_rx = 0;
1312                 rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
1313
1314                 stmmac_clear_rx_descriptors(priv, queue);
1315
1316                 /* Setup the chained descriptor addresses */
1317                 if (priv->mode == STMMAC_CHAIN_MODE) {
1318                         if (priv->extend_desc)
1319                                 stmmac_mode_init(priv, rx_q->dma_erx,
1320                                                 rx_q->dma_rx_phy, DMA_RX_SIZE, 1);
1321                         else
1322                                 stmmac_mode_init(priv, rx_q->dma_rx,
1323                                                 rx_q->dma_rx_phy, DMA_RX_SIZE, 0);
1324                 }
1325         }
1326
1327         buf_sz = bfsize;
1328
1329         return 0;
1330
1331 err_init_rx_buffers:
1332         while (queue >= 0) {
1333                 while (--i >= 0)
1334                         stmmac_free_rx_buffer(priv, queue, i);
1335
1336                 if (queue == 0)
1337                         break;
1338
1339                 i = DMA_RX_SIZE;
1340                 queue--;
1341         }
1342
1343         return ret;
1344 }
1345
1346 /**
1347  * init_dma_tx_desc_rings - init the TX descriptor rings
1348  * @dev: net device structure.
1349  * Description: this function initializes the DMA TX descriptors
1350  * and allocates the socket buffers. It supports the chained and ring
1351  * modes.
1352  */
1353 static int init_dma_tx_desc_rings(struct net_device *dev)
1354 {
1355         struct stmmac_priv *priv = netdev_priv(dev);
1356         u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1357         u32 queue;
1358         int i;
1359
1360         for (queue = 0; queue < tx_queue_cnt; queue++) {
1361                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1362
1363                 netif_dbg(priv, probe, priv->dev,
1364                           "(%s) dma_tx_phy=0x%08x\n", __func__,
1365                          (u32)tx_q->dma_tx_phy);
1366
1367                 /* Setup the chained descriptor addresses */
1368                 if (priv->mode == STMMAC_CHAIN_MODE) {
1369                         if (priv->extend_desc)
1370                                 stmmac_mode_init(priv, tx_q->dma_etx,
1371                                                 tx_q->dma_tx_phy, DMA_TX_SIZE, 1);
1372                         else
1373                                 stmmac_mode_init(priv, tx_q->dma_tx,
1374                                                 tx_q->dma_tx_phy, DMA_TX_SIZE, 0);
1375                 }
1376
1377                 for (i = 0; i < DMA_TX_SIZE; i++) {
1378                         struct dma_desc *p;
1379                         if (priv->extend_desc)
1380                                 p = &((tx_q->dma_etx + i)->basic);
1381                         else
1382                                 p = tx_q->dma_tx + i;
1383
1384                         stmmac_clear_desc(priv, p);
1385
1386                         tx_q->tx_skbuff_dma[i].buf = 0;
1387                         tx_q->tx_skbuff_dma[i].map_as_page = false;
1388                         tx_q->tx_skbuff_dma[i].len = 0;
1389                         tx_q->tx_skbuff_dma[i].last_segment = false;
1390                         tx_q->tx_skbuff[i] = NULL;
1391                 }
1392
1393                 tx_q->dirty_tx = 0;
1394                 tx_q->cur_tx = 0;
1395                 tx_q->mss = 0;
1396
1397                 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1398         }
1399
1400         return 0;
1401 }
1402
1403 /**
1404  * init_dma_desc_rings - init the RX/TX descriptor rings
1405  * @dev: net device structure
1406  * @flags: gfp flag.
1407  * Description: this function initializes the DMA RX/TX descriptors
1408  * and allocates the socket buffers. It supports the chained and ring
1409  * modes.
1410  */
1411 static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1412 {
1413         struct stmmac_priv *priv = netdev_priv(dev);
1414         int ret;
1415
1416         ret = init_dma_rx_desc_rings(dev, flags);
1417         if (ret)
1418                 return ret;
1419
1420         ret = init_dma_tx_desc_rings(dev);
1421
1422         stmmac_clear_descriptors(priv);
1423
1424         if (netif_msg_hw(priv))
1425                 stmmac_display_rings(priv);
1426
1427         return ret;
1428 }
1429
1430 /**
1431  * dma_free_rx_skbufs - free RX dma buffers
1432  * @priv: private structure
1433  * @queue: RX queue index
1434  */
1435 static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
1436 {
1437         int i;
1438
1439         for (i = 0; i < DMA_RX_SIZE; i++)
1440                 stmmac_free_rx_buffer(priv, queue, i);
1441 }
1442
1443 /**
1444  * dma_free_tx_skbufs - free TX dma buffers
1445  * @priv: private structure
1446  * @queue: TX queue index
1447  */
1448 static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
1449 {
1450         int i;
1451
1452         for (i = 0; i < DMA_TX_SIZE; i++)
1453                 stmmac_free_tx_buffer(priv, queue, i);
1454 }
1455
1456 /**
1457  * free_dma_rx_desc_resources - free RX dma desc resources
1458  * @priv: private structure
1459  */
1460 static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
1461 {
1462         u32 rx_count = priv->plat->rx_queues_to_use;
1463         u32 queue;
1464
1465         /* Free RX queue resources */
1466         for (queue = 0; queue < rx_count; queue++) {
1467                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1468
1469                 /* Release the DMA RX socket buffers */
1470                 dma_free_rx_skbufs(priv, queue);
1471
1472                 /* Free DMA regions of consistent memory previously allocated */
1473                 if (!priv->extend_desc)
1474                         dma_free_coherent(priv->device,
1475                                           DMA_RX_SIZE * sizeof(struct dma_desc),
1476                                           rx_q->dma_rx, rx_q->dma_rx_phy);
1477                 else
1478                         dma_free_coherent(priv->device, DMA_RX_SIZE *
1479                                           sizeof(struct dma_extended_desc),
1480                                           rx_q->dma_erx, rx_q->dma_rx_phy);
1481
1482                 kfree(rx_q->rx_skbuff_dma);
1483                 kfree(rx_q->rx_skbuff);
1484         }
1485 }
1486
1487 /**
1488  * free_dma_tx_desc_resources - free TX dma desc resources
1489  * @priv: private structure
1490  */
1491 static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1492 {
1493         u32 tx_count = priv->plat->tx_queues_to_use;
1494         u32 queue;
1495
1496         /* Free TX queue resources */
1497         for (queue = 0; queue < tx_count; queue++) {
1498                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1499
1500                 /* Release the DMA TX socket buffers */
1501                 dma_free_tx_skbufs(priv, queue);
1502
1503                 /* Free DMA regions of consistent memory previously allocated */
1504                 if (!priv->extend_desc)
1505                         dma_free_coherent(priv->device,
1506                                           DMA_TX_SIZE * sizeof(struct dma_desc),
1507                                           tx_q->dma_tx, tx_q->dma_tx_phy);
1508                 else
1509                         dma_free_coherent(priv->device, DMA_TX_SIZE *
1510                                           sizeof(struct dma_extended_desc),
1511                                           tx_q->dma_etx, tx_q->dma_tx_phy);
1512
1513                 kfree(tx_q->tx_skbuff_dma);
1514                 kfree(tx_q->tx_skbuff);
1515         }
1516 }
1517
1518 /**
1519  * alloc_dma_rx_desc_resources - alloc RX resources.
1520  * @priv: private structure
1521  * Description: according to which descriptor can be used (extend or basic)
1522  * this function allocates the resources for TX and RX paths. In case of
1523  * reception, for example, it pre-allocated the RX socket buffer in order to
1524  * allow zero-copy mechanism.
1525  */
1526 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
1527 {
1528         u32 rx_count = priv->plat->rx_queues_to_use;
1529         int ret = -ENOMEM;
1530         u32 queue;
1531
1532         /* RX queues buffers and DMA */
1533         for (queue = 0; queue < rx_count; queue++) {
1534                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1535
1536                 rx_q->queue_index = queue;
1537                 rx_q->priv_data = priv;
1538
1539                 rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE,
1540                                                     sizeof(dma_addr_t),
1541                                                     GFP_KERNEL);
1542                 if (!rx_q->rx_skbuff_dma)
1543                         goto err_dma;
1544
1545                 rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE,
1546                                                 sizeof(struct sk_buff *),
1547                                                 GFP_KERNEL);
1548                 if (!rx_q->rx_skbuff)
1549                         goto err_dma;
1550
1551                 if (priv->extend_desc) {
1552                         rx_q->dma_erx = dma_alloc_coherent(priv->device,
1553                                                            DMA_RX_SIZE * sizeof(struct dma_extended_desc),
1554                                                            &rx_q->dma_rx_phy,
1555                                                            GFP_KERNEL);
1556                         if (!rx_q->dma_erx)
1557                                 goto err_dma;
1558
1559                 } else {
1560                         rx_q->dma_rx = dma_alloc_coherent(priv->device,
1561                                                           DMA_RX_SIZE * sizeof(struct dma_desc),
1562                                                           &rx_q->dma_rx_phy,
1563                                                           GFP_KERNEL);
1564                         if (!rx_q->dma_rx)
1565                                 goto err_dma;
1566                 }
1567         }
1568
1569         return 0;
1570
1571 err_dma:
1572         free_dma_rx_desc_resources(priv);
1573
1574         return ret;
1575 }
1576
1577 /**
1578  * alloc_dma_tx_desc_resources - alloc TX resources.
1579  * @priv: private structure
1580  * Description: according to which descriptor can be used (extend or basic)
1581  * this function allocates the resources for TX and RX paths. In case of
1582  * reception, for example, it pre-allocated the RX socket buffer in order to
1583  * allow zero-copy mechanism.
1584  */
1585 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
1586 {
1587         u32 tx_count = priv->plat->tx_queues_to_use;
1588         int ret = -ENOMEM;
1589         u32 queue;
1590
1591         /* TX queues buffers and DMA */
1592         for (queue = 0; queue < tx_count; queue++) {
1593                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1594
1595                 tx_q->queue_index = queue;
1596                 tx_q->priv_data = priv;
1597
1598                 tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
1599                                                     sizeof(*tx_q->tx_skbuff_dma),
1600                                                     GFP_KERNEL);
1601                 if (!tx_q->tx_skbuff_dma)
1602                         goto err_dma;
1603
1604                 tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE,
1605                                                 sizeof(struct sk_buff *),
1606                                                 GFP_KERNEL);
1607                 if (!tx_q->tx_skbuff)
1608                         goto err_dma;
1609
1610                 if (priv->extend_desc) {
1611                         tx_q->dma_etx = dma_alloc_coherent(priv->device,
1612                                                            DMA_TX_SIZE * sizeof(struct dma_extended_desc),
1613                                                            &tx_q->dma_tx_phy,
1614                                                            GFP_KERNEL);
1615                         if (!tx_q->dma_etx)
1616                                 goto err_dma;
1617                 } else {
1618                         tx_q->dma_tx = dma_alloc_coherent(priv->device,
1619                                                           DMA_TX_SIZE * sizeof(struct dma_desc),
1620                                                           &tx_q->dma_tx_phy,
1621                                                           GFP_KERNEL);
1622                         if (!tx_q->dma_tx)
1623                                 goto err_dma;
1624                 }
1625         }
1626
1627         return 0;
1628
1629 err_dma:
1630         free_dma_tx_desc_resources(priv);
1631
1632         return ret;
1633 }
1634
1635 /**
1636  * alloc_dma_desc_resources - alloc TX/RX resources.
1637  * @priv: private structure
1638  * Description: according to which descriptor can be used (extend or basic)
1639  * this function allocates the resources for TX and RX paths. In case of
1640  * reception, for example, it pre-allocated the RX socket buffer in order to
1641  * allow zero-copy mechanism.
1642  */
1643 static int alloc_dma_desc_resources(struct stmmac_priv *priv)
1644 {
1645         /* RX Allocation */
1646         int ret = alloc_dma_rx_desc_resources(priv);
1647
1648         if (ret)
1649                 return ret;
1650
1651         ret = alloc_dma_tx_desc_resources(priv);
1652
1653         return ret;
1654 }
1655
1656 /**
1657  * free_dma_desc_resources - free dma desc resources
1658  * @priv: private structure
1659  */
1660 static void free_dma_desc_resources(struct stmmac_priv *priv)
1661 {
1662         /* Release the DMA RX socket buffers */
1663         free_dma_rx_desc_resources(priv);
1664
1665         /* Release the DMA TX socket buffers */
1666         free_dma_tx_desc_resources(priv);
1667 }
1668
1669 /**
1670  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
1671  *  @priv: driver private structure
1672  *  Description: It is used for enabling the rx queues in the MAC
1673  */
1674 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
1675 {
1676         u32 rx_queues_count = priv->plat->rx_queues_to_use;
1677         int queue;
1678         u8 mode;
1679
1680         for (queue = 0; queue < rx_queues_count; queue++) {
1681                 mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1682                 stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
1683         }
1684 }
1685
1686 /**
1687  * stmmac_start_rx_dma - start RX DMA channel
1688  * @priv: driver private structure
1689  * @chan: RX channel index
1690  * Description:
1691  * This starts a RX DMA channel
1692  */
1693 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
1694 {
1695         netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
1696         stmmac_start_rx(priv, priv->ioaddr, chan);
1697 }
1698
1699 /**
1700  * stmmac_start_tx_dma - start TX DMA channel
1701  * @priv: driver private structure
1702  * @chan: TX channel index
1703  * Description:
1704  * This starts a TX DMA channel
1705  */
1706 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
1707 {
1708         netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
1709         stmmac_start_tx(priv, priv->ioaddr, chan);
1710 }
1711
1712 /**
1713  * stmmac_stop_rx_dma - stop RX DMA channel
1714  * @priv: driver private structure
1715  * @chan: RX channel index
1716  * Description:
1717  * This stops a RX DMA channel
1718  */
1719 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
1720 {
1721         netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
1722         stmmac_stop_rx(priv, priv->ioaddr, chan);
1723 }
1724
1725 /**
1726  * stmmac_stop_tx_dma - stop TX DMA channel
1727  * @priv: driver private structure
1728  * @chan: TX channel index
1729  * Description:
1730  * This stops a TX DMA channel
1731  */
1732 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
1733 {
1734         netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
1735         stmmac_stop_tx(priv, priv->ioaddr, chan);
1736 }
1737
1738 /**
1739  * stmmac_start_all_dma - start all RX and TX DMA channels
1740  * @priv: driver private structure
1741  * Description:
1742  * This starts all the RX and TX DMA channels
1743  */
1744 static void stmmac_start_all_dma(struct stmmac_priv *priv)
1745 {
1746         u32 rx_channels_count = priv->plat->rx_queues_to_use;
1747         u32 tx_channels_count = priv->plat->tx_queues_to_use;
1748         u32 chan = 0;
1749
1750         for (chan = 0; chan < rx_channels_count; chan++)
1751                 stmmac_start_rx_dma(priv, chan);
1752
1753         for (chan = 0; chan < tx_channels_count; chan++)
1754                 stmmac_start_tx_dma(priv, chan);
1755 }
1756
1757 /**
1758  * stmmac_stop_all_dma - stop all RX and TX DMA channels
1759  * @priv: driver private structure
1760  * Description:
1761  * This stops the RX and TX DMA channels
1762  */
1763 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
1764 {
1765         u32 rx_channels_count = priv->plat->rx_queues_to_use;
1766         u32 tx_channels_count = priv->plat->tx_queues_to_use;
1767         u32 chan = 0;
1768
1769         for (chan = 0; chan < rx_channels_count; chan++)
1770                 stmmac_stop_rx_dma(priv, chan);
1771
1772         for (chan = 0; chan < tx_channels_count; chan++)
1773                 stmmac_stop_tx_dma(priv, chan);
1774 }
1775
1776 /**
1777  *  stmmac_dma_operation_mode - HW DMA operation mode
1778  *  @priv: driver private structure
1779  *  Description: it is used for configuring the DMA operation mode register in
1780  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
1781  */
1782 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1783 {
1784         u32 rx_channels_count = priv->plat->rx_queues_to_use;
1785         u32 tx_channels_count = priv->plat->tx_queues_to_use;
1786         int rxfifosz = priv->plat->rx_fifo_size;
1787         int txfifosz = priv->plat->tx_fifo_size;
1788         u32 txmode = 0;
1789         u32 rxmode = 0;
1790         u32 chan = 0;
1791         u8 qmode = 0;
1792
1793         if (rxfifosz == 0)
1794                 rxfifosz = priv->dma_cap.rx_fifo_size;
1795         if (txfifosz == 0)
1796                 txfifosz = priv->dma_cap.tx_fifo_size;
1797
1798         /* Adjust for real per queue fifo size */
1799         rxfifosz /= rx_channels_count;
1800         txfifosz /= tx_channels_count;
1801
1802         if (priv->plat->force_thresh_dma_mode) {
1803                 txmode = tc;
1804                 rxmode = tc;
1805         } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
1806                 /*
1807                  * In case of GMAC, SF mode can be enabled
1808                  * to perform the TX COE in HW. This depends on:
1809                  * 1) TX COE if actually supported
1810                  * 2) There is no bugged Jumbo frame support
1811                  *    that needs to not insert csum in the TDES.
1812                  */
1813                 txmode = SF_DMA_MODE;
1814                 rxmode = SF_DMA_MODE;
1815                 priv->xstats.threshold = SF_DMA_MODE;
1816         } else {
1817                 txmode = tc;
1818                 rxmode = SF_DMA_MODE;
1819         }
1820
1821         /* configure all channels */
1822         for (chan = 0; chan < rx_channels_count; chan++) {
1823                 qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
1824
1825                 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
1826                                 rxfifosz, qmode);
1827                 stmmac_set_dma_bfsize(priv, priv->ioaddr, priv->dma_buf_sz,
1828                                 chan);
1829         }
1830
1831         for (chan = 0; chan < tx_channels_count; chan++) {
1832                 qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
1833
1834                 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
1835                                 txfifosz, qmode);
1836         }
1837 }
1838
1839 /**
1840  * stmmac_tx_clean - to manage the transmission completion
1841  * @priv: driver private structure
1842  * @queue: TX queue index
1843  * Description: it reclaims the transmit resources after transmission completes.
1844  */
1845 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
1846 {
1847         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1848         unsigned int bytes_compl = 0, pkts_compl = 0;
1849         unsigned int entry, count = 0;
1850
1851         __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
1852
1853         priv->xstats.tx_clean++;
1854
1855         entry = tx_q->dirty_tx;
1856         while ((entry != tx_q->cur_tx) && (count < budget)) {
1857                 struct sk_buff *skb = tx_q->tx_skbuff[entry];
1858                 struct dma_desc *p;
1859                 int status;
1860
1861                 if (priv->extend_desc)
1862                         p = (struct dma_desc *)(tx_q->dma_etx + entry);
1863                 else
1864                         p = tx_q->dma_tx + entry;
1865
1866                 status = stmmac_tx_status(priv, &priv->dev->stats,
1867                                 &priv->xstats, p, priv->ioaddr);
1868                 /* Check if the descriptor is owned by the DMA */
1869                 if (unlikely(status & tx_dma_own))
1870                         break;
1871
1872                 count++;
1873
1874                 /* Make sure descriptor fields are read after reading
1875                  * the own bit.
1876                  */
1877                 dma_rmb();
1878
1879                 /* Just consider the last segment and ...*/
1880                 if (likely(!(status & tx_not_ls))) {
1881                         /* ... verify the status error condition */
1882                         if (unlikely(status & tx_err)) {
1883                                 priv->dev->stats.tx_errors++;
1884                         } else {
1885                                 priv->dev->stats.tx_packets++;
1886                                 priv->xstats.tx_pkt_n++;
1887                         }
1888                         stmmac_get_tx_hwtstamp(priv, p, skb);
1889                 }
1890
1891                 if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
1892                         if (tx_q->tx_skbuff_dma[entry].map_as_page)
1893                                 dma_unmap_page(priv->device,
1894                                                tx_q->tx_skbuff_dma[entry].buf,
1895                                                tx_q->tx_skbuff_dma[entry].len,
1896                                                DMA_TO_DEVICE);
1897                         else
1898                                 dma_unmap_single(priv->device,
1899                                                  tx_q->tx_skbuff_dma[entry].buf,
1900                                                  tx_q->tx_skbuff_dma[entry].len,
1901                                                  DMA_TO_DEVICE);
1902                         tx_q->tx_skbuff_dma[entry].buf = 0;
1903                         tx_q->tx_skbuff_dma[entry].len = 0;
1904                         tx_q->tx_skbuff_dma[entry].map_as_page = false;
1905                 }
1906
1907                 stmmac_clean_desc3(priv, tx_q, p);
1908
1909                 tx_q->tx_skbuff_dma[entry].last_segment = false;
1910                 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
1911
1912                 if (likely(skb != NULL)) {
1913                         pkts_compl++;
1914                         bytes_compl += skb->len;
1915                         dev_consume_skb_any(skb);
1916                         tx_q->tx_skbuff[entry] = NULL;
1917                 }
1918
1919                 stmmac_release_tx_desc(priv, p, priv->mode);
1920
1921                 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
1922         }
1923         tx_q->dirty_tx = entry;
1924
1925         netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
1926                                   pkts_compl, bytes_compl);
1927
1928         if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
1929                                                                 queue))) &&
1930             stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
1931
1932                 netif_dbg(priv, tx_done, priv->dev,
1933                           "%s: restart transmit\n", __func__);
1934                 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
1935         }
1936
1937         if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
1938                 stmmac_enable_eee_mode(priv);
1939                 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
1940         }
1941
1942         __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
1943
1944         return count;
1945 }
1946
1947 /**
1948  * stmmac_tx_err - to manage the tx error
1949  * @priv: driver private structure
1950  * @chan: channel index
1951  * Description: it cleans the descriptors and restarts the transmission
1952  * in case of transmission errors.
1953  */
1954 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
1955 {
1956         struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
1957         int i;
1958
1959         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
1960
1961         stmmac_stop_tx_dma(priv, chan);
1962         dma_free_tx_skbufs(priv, chan);
1963         for (i = 0; i < DMA_TX_SIZE; i++)
1964                 if (priv->extend_desc)
1965                         stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
1966                                         priv->mode, (i == DMA_TX_SIZE - 1));
1967                 else
1968                         stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
1969                                         priv->mode, (i == DMA_TX_SIZE - 1));
1970         tx_q->dirty_tx = 0;
1971         tx_q->cur_tx = 0;
1972         tx_q->mss = 0;
1973         netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
1974         stmmac_start_tx_dma(priv, chan);
1975
1976         priv->dev->stats.tx_errors++;
1977         netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
1978 }
1979
1980 /**
1981  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
1982  *  @priv: driver private structure
1983  *  @txmode: TX operating mode
1984  *  @rxmode: RX operating mode
1985  *  @chan: channel index
1986  *  Description: it is used for configuring of the DMA operation mode in
1987  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
1988  *  mode.
1989  */
1990 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
1991                                           u32 rxmode, u32 chan)
1992 {
1993         u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
1994         u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
1995         u32 rx_channels_count = priv->plat->rx_queues_to_use;
1996         u32 tx_channels_count = priv->plat->tx_queues_to_use;
1997         int rxfifosz = priv->plat->rx_fifo_size;
1998         int txfifosz = priv->plat->tx_fifo_size;
1999
2000         if (rxfifosz == 0)
2001                 rxfifosz = priv->dma_cap.rx_fifo_size;
2002         if (txfifosz == 0)
2003                 txfifosz = priv->dma_cap.tx_fifo_size;
2004
2005         /* Adjust for real per queue fifo size */
2006         rxfifosz /= rx_channels_count;
2007         txfifosz /= tx_channels_count;
2008
2009         stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2010         stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2011 }
2012
2013 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2014 {
2015         int ret;
2016
2017         ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2018                         priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2019         if (ret && (ret != -EINVAL)) {
2020                 stmmac_global_err(priv);
2021                 return true;
2022         }
2023
2024         return false;
2025 }
2026
2027 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan)
2028 {
2029         int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2030                                                  &priv->xstats, chan);
2031         struct stmmac_channel *ch = &priv->channel[chan];
2032         bool needs_work = false;
2033
2034         if ((status & handle_rx) && ch->has_rx) {
2035                 needs_work = true;
2036         } else {
2037                 status &= ~handle_rx;
2038         }
2039
2040         if ((status & handle_tx) && ch->has_tx) {
2041                 needs_work = true;
2042         } else {
2043                 status &= ~handle_tx;
2044         }
2045
2046         if (needs_work && napi_schedule_prep(&ch->napi)) {
2047                 stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
2048                 __napi_schedule(&ch->napi);
2049         }
2050
2051         return status;
2052 }
2053
2054 /**
2055  * stmmac_dma_interrupt - DMA ISR
2056  * @priv: driver private structure
2057  * Description: this is the DMA ISR. It is called by the main ISR.
2058  * It calls the dwmac dma routine and schedule poll method in case of some
2059  * work can be done.
2060  */
2061 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2062 {
2063         u32 tx_channel_count = priv->plat->tx_queues_to_use;
2064         u32 rx_channel_count = priv->plat->rx_queues_to_use;
2065         u32 channels_to_check = tx_channel_count > rx_channel_count ?
2066                                 tx_channel_count : rx_channel_count;
2067         u32 chan;
2068         int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2069
2070         /* Make sure we never check beyond our status buffer. */
2071         if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2072                 channels_to_check = ARRAY_SIZE(status);
2073
2074         for (chan = 0; chan < channels_to_check; chan++)
2075                 status[chan] = stmmac_napi_check(priv, chan);
2076
2077         for (chan = 0; chan < tx_channel_count; chan++) {
2078                 if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2079                         /* Try to bump up the dma threshold on this failure */
2080                         if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
2081                             (tc <= 256)) {
2082                                 tc += 64;
2083                                 if (priv->plat->force_thresh_dma_mode)
2084                                         stmmac_set_dma_operation_mode(priv,
2085                                                                       tc,
2086                                                                       tc,
2087                                                                       chan);
2088                                 else
2089                                         stmmac_set_dma_operation_mode(priv,
2090                                                                     tc,
2091                                                                     SF_DMA_MODE,
2092                                                                     chan);
2093                                 priv->xstats.threshold = tc;
2094                         }
2095                 } else if (unlikely(status[chan] == tx_hard_error)) {
2096                         stmmac_tx_err(priv, chan);
2097                 }
2098         }
2099 }
2100
2101 /**
2102  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2103  * @priv: driver private structure
2104  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2105  */
2106 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2107 {
2108         unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2109                             MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2110
2111         dwmac_mmc_intr_all_mask(priv->mmcaddr);
2112
2113         if (priv->dma_cap.rmon) {
2114                 dwmac_mmc_ctrl(priv->mmcaddr, mode);
2115                 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2116         } else
2117                 netdev_info(priv->dev, "No MAC Management Counters available\n");
2118 }
2119
2120 /**
2121  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2122  * @priv: driver private structure
2123  * Description:
2124  *  new GMAC chip generations have a new register to indicate the
2125  *  presence of the optional feature/functions.
2126  *  This can be also used to override the value passed through the
2127  *  platform and necessary for old MAC10/100 and GMAC chips.
2128  */
2129 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2130 {
2131         return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2132 }
2133
2134 /**
2135  * stmmac_check_ether_addr - check if the MAC addr is valid
2136  * @priv: driver private structure
2137  * Description:
2138  * it is to verify if the MAC address is valid, in case of failures it
2139  * generates a random MAC address
2140  */
2141 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2142 {
2143         if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2144                 stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0);
2145                 if (!is_valid_ether_addr(priv->dev->dev_addr))
2146                         eth_hw_addr_random(priv->dev);
2147                 netdev_info(priv->dev, "device MAC address %pM\n",
2148                             priv->dev->dev_addr);
2149         }
2150 }
2151
2152 /**
2153  * stmmac_init_dma_engine - DMA init.
2154  * @priv: driver private structure
2155  * Description:
2156  * It inits the DMA invoking the specific MAC/GMAC callback.
2157  * Some DMA parameters can be passed from the platform;
2158  * in case of these are not passed a default is kept for the MAC or GMAC.
2159  */
2160 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2161 {
2162         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2163         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2164         u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2165         struct stmmac_rx_queue *rx_q;
2166         struct stmmac_tx_queue *tx_q;
2167         u32 chan = 0;
2168         int atds = 0;
2169         int ret = 0;
2170
2171         if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2172                 dev_err(priv->device, "Invalid DMA configuration\n");
2173                 return -EINVAL;
2174         }
2175
2176         if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2177                 atds = 1;
2178
2179         ret = stmmac_reset(priv, priv->ioaddr);
2180         if (ret) {
2181                 dev_err(priv->device, "Failed to reset the dma\n");
2182                 return ret;
2183         }
2184
2185         /* DMA Configuration */
2186         stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
2187
2188         if (priv->plat->axi)
2189                 stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
2190
2191         /* DMA RX Channel Configuration */
2192         for (chan = 0; chan < rx_channels_count; chan++) {
2193                 rx_q = &priv->rx_queue[chan];
2194
2195                 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2196                                     rx_q->dma_rx_phy, chan);
2197
2198                 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2199                             (DMA_RX_SIZE * sizeof(struct dma_desc));
2200                 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2201                                        rx_q->rx_tail_addr, chan);
2202         }
2203
2204         /* DMA TX Channel Configuration */
2205         for (chan = 0; chan < tx_channels_count; chan++) {
2206                 tx_q = &priv->tx_queue[chan];
2207
2208                 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2209                                     tx_q->dma_tx_phy, chan);
2210
2211                 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
2212                 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2213                                        tx_q->tx_tail_addr, chan);
2214         }
2215
2216         /* DMA CSR Channel configuration */
2217         for (chan = 0; chan < dma_csr_ch; chan++)
2218                 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
2219
2220         return ret;
2221 }
2222
2223 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
2224 {
2225         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2226
2227         mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer));
2228 }
2229
2230 /**
2231  * stmmac_tx_timer - mitigation sw timer for tx.
2232  * @data: data pointer
2233  * Description:
2234  * This is the timer handler to directly invoke the stmmac_tx_clean.
2235  */
2236 static void stmmac_tx_timer(struct timer_list *t)
2237 {
2238         struct stmmac_tx_queue *tx_q = from_timer(tx_q, t, txtimer);
2239         struct stmmac_priv *priv = tx_q->priv_data;
2240         struct stmmac_channel *ch;
2241
2242         ch = &priv->channel[tx_q->queue_index];
2243
2244         if (likely(napi_schedule_prep(&ch->napi)))
2245                 __napi_schedule(&ch->napi);
2246 }
2247
2248 /**
2249  * stmmac_init_tx_coalesce - init tx mitigation options.
2250  * @priv: driver private structure
2251  * Description:
2252  * This inits the transmit coalesce parameters: i.e. timer rate,
2253  * timer handler and default threshold used for enabling the
2254  * interrupt on completion bit.
2255  */
2256 static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
2257 {
2258         u32 tx_channel_count = priv->plat->tx_queues_to_use;
2259         u32 chan;
2260
2261         priv->tx_coal_frames = STMMAC_TX_FRAMES;
2262         priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
2263
2264         for (chan = 0; chan < tx_channel_count; chan++) {
2265                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2266
2267                 timer_setup(&tx_q->txtimer, stmmac_tx_timer, 0);
2268         }
2269 }
2270
2271 static void stmmac_set_rings_length(struct stmmac_priv *priv)
2272 {
2273         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2274         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2275         u32 chan;
2276
2277         /* set TX ring length */
2278         for (chan = 0; chan < tx_channels_count; chan++)
2279                 stmmac_set_tx_ring_len(priv, priv->ioaddr,
2280                                 (DMA_TX_SIZE - 1), chan);
2281
2282         /* set RX ring length */
2283         for (chan = 0; chan < rx_channels_count; chan++)
2284                 stmmac_set_rx_ring_len(priv, priv->ioaddr,
2285                                 (DMA_RX_SIZE - 1), chan);
2286 }
2287
2288 /**
2289  *  stmmac_set_tx_queue_weight - Set TX queue weight
2290  *  @priv: driver private structure
2291  *  Description: It is used for setting TX queues weight
2292  */
2293 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
2294 {
2295         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2296         u32 weight;
2297         u32 queue;
2298
2299         for (queue = 0; queue < tx_queues_count; queue++) {
2300                 weight = priv->plat->tx_queues_cfg[queue].weight;
2301                 stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
2302         }
2303 }
2304
2305 /**
2306  *  stmmac_configure_cbs - Configure CBS in TX queue
2307  *  @priv: driver private structure
2308  *  Description: It is used for configuring CBS in AVB TX queues
2309  */
2310 static void stmmac_configure_cbs(struct stmmac_priv *priv)
2311 {
2312         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2313         u32 mode_to_use;
2314         u32 queue;
2315
2316         /* queue 0 is reserved for legacy traffic */
2317         for (queue = 1; queue < tx_queues_count; queue++) {
2318                 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
2319                 if (mode_to_use == MTL_QUEUE_DCB)
2320                         continue;
2321
2322                 stmmac_config_cbs(priv, priv->hw,
2323                                 priv->plat->tx_queues_cfg[queue].send_slope,
2324                                 priv->plat->tx_queues_cfg[queue].idle_slope,
2325                                 priv->plat->tx_queues_cfg[queue].high_credit,
2326                                 priv->plat->tx_queues_cfg[queue].low_credit,
2327                                 queue);
2328         }
2329 }
2330
2331 /**
2332  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
2333  *  @priv: driver private structure
2334  *  Description: It is used for mapping RX queues to RX dma channels
2335  */
2336 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
2337 {
2338         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2339         u32 queue;
2340         u32 chan;
2341
2342         for (queue = 0; queue < rx_queues_count; queue++) {
2343                 chan = priv->plat->rx_queues_cfg[queue].chan;
2344                 stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
2345         }
2346 }
2347
2348 /**
2349  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
2350  *  @priv: driver private structure
2351  *  Description: It is used for configuring the RX Queue Priority
2352  */
2353 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
2354 {
2355         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2356         u32 queue;
2357         u32 prio;
2358
2359         for (queue = 0; queue < rx_queues_count; queue++) {
2360                 if (!priv->plat->rx_queues_cfg[queue].use_prio)
2361                         continue;
2362
2363                 prio = priv->plat->rx_queues_cfg[queue].prio;
2364                 stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
2365         }
2366 }
2367
2368 /**
2369  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
2370  *  @priv: driver private structure
2371  *  Description: It is used for configuring the TX Queue Priority
2372  */
2373 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
2374 {
2375         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2376         u32 queue;
2377         u32 prio;
2378
2379         for (queue = 0; queue < tx_queues_count; queue++) {
2380                 if (!priv->plat->tx_queues_cfg[queue].use_prio)
2381                         continue;
2382
2383                 prio = priv->plat->tx_queues_cfg[queue].prio;
2384                 stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
2385         }
2386 }
2387
2388 /**
2389  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
2390  *  @priv: driver private structure
2391  *  Description: It is used for configuring the RX queue routing
2392  */
2393 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
2394 {
2395         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2396         u32 queue;
2397         u8 packet;
2398
2399         for (queue = 0; queue < rx_queues_count; queue++) {
2400                 /* no specific packet type routing specified for the queue */
2401                 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
2402                         continue;
2403
2404                 packet = priv->plat->rx_queues_cfg[queue].pkt_route;
2405                 stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
2406         }
2407 }
2408
2409 /**
2410  *  stmmac_mtl_configuration - Configure MTL
2411  *  @priv: driver private structure
2412  *  Description: It is used for configurring MTL
2413  */
2414 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
2415 {
2416         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2417         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2418
2419         if (tx_queues_count > 1)
2420                 stmmac_set_tx_queue_weight(priv);
2421
2422         /* Configure MTL RX algorithms */
2423         if (rx_queues_count > 1)
2424                 stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
2425                                 priv->plat->rx_sched_algorithm);
2426
2427         /* Configure MTL TX algorithms */
2428         if (tx_queues_count > 1)
2429                 stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
2430                                 priv->plat->tx_sched_algorithm);
2431
2432         /* Configure CBS in AVB TX queues */
2433         if (tx_queues_count > 1)
2434                 stmmac_configure_cbs(priv);
2435
2436         /* Map RX MTL to DMA channels */
2437         stmmac_rx_queue_dma_chan_map(priv);
2438
2439         /* Enable MAC RX Queues */
2440         stmmac_mac_enable_rx_queues(priv);
2441
2442         /* Set RX priorities */
2443         if (rx_queues_count > 1)
2444                 stmmac_mac_config_rx_queues_prio(priv);
2445
2446         /* Set TX priorities */
2447         if (tx_queues_count > 1)
2448                 stmmac_mac_config_tx_queues_prio(priv);
2449
2450         /* Set RX routing */
2451         if (rx_queues_count > 1)
2452                 stmmac_mac_config_rx_queues_routing(priv);
2453 }
2454
2455 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
2456 {
2457         if (priv->dma_cap.asp) {
2458                 netdev_info(priv->dev, "Enabling Safety Features\n");
2459                 stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp);
2460         } else {
2461                 netdev_info(priv->dev, "No Safety Features support found\n");
2462         }
2463 }
2464
2465 /**
2466  * stmmac_hw_setup - setup mac in a usable state.
2467  *  @dev : pointer to the device structure.
2468  *  Description:
2469  *  this is the main function to setup the HW in a usable state because the
2470  *  dma engine is reset, the core registers are configured (e.g. AXI,
2471  *  Checksum features, timers). The DMA is ready to start receiving and
2472  *  transmitting.
2473  *  Return value:
2474  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2475  *  file on failure.
2476  */
2477 static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
2478 {
2479         struct stmmac_priv *priv = netdev_priv(dev);
2480         u32 rx_cnt = priv->plat->rx_queues_to_use;
2481         u32 tx_cnt = priv->plat->tx_queues_to_use;
2482         u32 chan;
2483         int ret;
2484
2485         /* DMA initialization and SW reset */
2486         ret = stmmac_init_dma_engine(priv);
2487         if (ret < 0) {
2488                 netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
2489                            __func__);
2490                 return ret;
2491         }
2492
2493         /* Copy the MAC addr into the HW  */
2494         stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
2495
2496         /* PS and related bits will be programmed according to the speed */
2497         if (priv->hw->pcs) {
2498                 int speed = priv->plat->mac_port_sel_speed;
2499
2500                 if ((speed == SPEED_10) || (speed == SPEED_100) ||
2501                     (speed == SPEED_1000)) {
2502                         priv->hw->ps = speed;
2503                 } else {
2504                         dev_warn(priv->device, "invalid port speed\n");
2505                         priv->hw->ps = 0;
2506                 }
2507         }
2508
2509         /* Initialize the MAC Core */
2510         stmmac_core_init(priv, priv->hw, dev);
2511
2512         /* Initialize MTL*/
2513         stmmac_mtl_configuration(priv);
2514
2515         /* Initialize Safety Features */
2516         stmmac_safety_feat_configuration(priv);
2517
2518         ret = stmmac_rx_ipc(priv, priv->hw);
2519         if (!ret) {
2520                 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
2521                 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2522                 priv->hw->rx_csum = 0;
2523         }
2524
2525         /* Enable the MAC Rx/Tx */
2526         stmmac_mac_set(priv, priv->ioaddr, true);
2527
2528         /* Set the HW DMA mode and the COE */
2529         stmmac_dma_operation_mode(priv);
2530
2531         stmmac_mmc_setup(priv);
2532
2533         if (init_ptp) {
2534                 ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
2535                 if (ret < 0)
2536                         netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
2537
2538                 ret = stmmac_init_ptp(priv);
2539                 if (ret == -EOPNOTSUPP)
2540                         netdev_warn(priv->dev, "PTP not supported by HW\n");
2541                 else if (ret)
2542                         netdev_warn(priv->dev, "PTP init failed\n");
2543         }
2544
2545         priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
2546
2547         if (priv->use_riwt) {
2548                 ret = stmmac_rx_watchdog(priv, priv->ioaddr, MAX_DMA_RIWT, rx_cnt);
2549                 if (!ret)
2550                         priv->rx_riwt = MAX_DMA_RIWT;
2551         }
2552
2553         if (priv->hw->pcs)
2554                 stmmac_pcs_ctrl_ane(priv, priv->hw, 1, priv->hw->ps, 0);
2555
2556         /* set TX and RX rings length */
2557         stmmac_set_rings_length(priv);
2558
2559         /* Enable TSO */
2560         if (priv->tso) {
2561                 for (chan = 0; chan < tx_cnt; chan++)
2562                         stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
2563         }
2564
2565         /* Start the ball rolling... */
2566         stmmac_start_all_dma(priv);
2567
2568         return 0;
2569 }
2570
2571 static void stmmac_hw_teardown(struct net_device *dev)
2572 {
2573         struct stmmac_priv *priv = netdev_priv(dev);
2574
2575         clk_disable_unprepare(priv->plat->clk_ptp_ref);
2576 }
2577
2578 /**
2579  *  stmmac_open - open entry point of the driver
2580  *  @dev : pointer to the device structure.
2581  *  Description:
2582  *  This function is the open entry point of the driver.
2583  *  Return value:
2584  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2585  *  file on failure.
2586  */
2587 static int stmmac_open(struct net_device *dev)
2588 {
2589         struct stmmac_priv *priv = netdev_priv(dev);
2590         u32 chan;
2591         int ret;
2592
2593         stmmac_check_ether_addr(priv);
2594
2595         if (priv->hw->pcs != STMMAC_PCS_RGMII &&
2596             priv->hw->pcs != STMMAC_PCS_TBI &&
2597             priv->hw->pcs != STMMAC_PCS_RTBI) {
2598                 ret = stmmac_init_phy(dev);
2599                 if (ret) {
2600                         netdev_err(priv->dev,
2601                                    "%s: Cannot attach to PHY (error: %d)\n",
2602                                    __func__, ret);
2603                         return ret;
2604                 }
2605         }
2606
2607         /* Extra statistics */
2608         memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
2609         priv->xstats.threshold = tc;
2610
2611         priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
2612         priv->rx_copybreak = STMMAC_RX_COPYBREAK;
2613
2614         ret = alloc_dma_desc_resources(priv);
2615         if (ret < 0) {
2616                 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
2617                            __func__);
2618                 goto dma_desc_error;
2619         }
2620
2621         ret = init_dma_desc_rings(dev, GFP_KERNEL);
2622         if (ret < 0) {
2623                 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
2624                            __func__);
2625                 goto init_error;
2626         }
2627
2628         ret = stmmac_hw_setup(dev, true);
2629         if (ret < 0) {
2630                 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
2631                 goto init_error;
2632         }
2633
2634         stmmac_init_tx_coalesce(priv);
2635
2636         if (dev->phydev)
2637                 phy_start(dev->phydev);
2638
2639         /* Request the IRQ lines */
2640         ret = request_irq(dev->irq, stmmac_interrupt,
2641                           IRQF_SHARED, dev->name, dev);
2642         if (unlikely(ret < 0)) {
2643                 netdev_err(priv->dev,
2644                            "%s: ERROR: allocating the IRQ %d (error: %d)\n",
2645                            __func__, dev->irq, ret);
2646                 goto irq_error;
2647         }
2648
2649         /* Request the Wake IRQ in case of another line is used for WoL */
2650         if (priv->wol_irq != dev->irq) {
2651                 ret = request_irq(priv->wol_irq, stmmac_interrupt,
2652                                   IRQF_SHARED, dev->name, dev);
2653                 if (unlikely(ret < 0)) {
2654                         netdev_err(priv->dev,
2655                                    "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
2656                                    __func__, priv->wol_irq, ret);
2657                         goto wolirq_error;
2658                 }
2659         }
2660
2661         /* Request the IRQ lines */
2662         if (priv->lpi_irq > 0) {
2663                 ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
2664                                   dev->name, dev);
2665                 if (unlikely(ret < 0)) {
2666                         netdev_err(priv->dev,
2667                                    "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
2668                                    __func__, priv->lpi_irq, ret);
2669                         goto lpiirq_error;
2670                 }
2671         }
2672
2673         stmmac_enable_all_queues(priv);
2674         stmmac_start_all_queues(priv);
2675
2676         return 0;
2677
2678 lpiirq_error:
2679         if (priv->wol_irq != dev->irq)
2680                 free_irq(priv->wol_irq, dev);
2681 wolirq_error:
2682         free_irq(dev->irq, dev);
2683 irq_error:
2684         if (dev->phydev)
2685                 phy_stop(dev->phydev);
2686
2687         for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
2688                 del_timer_sync(&priv->tx_queue[chan].txtimer);
2689
2690         stmmac_hw_teardown(dev);
2691 init_error:
2692         free_dma_desc_resources(priv);
2693 dma_desc_error:
2694         if (dev->phydev)
2695                 phy_disconnect(dev->phydev);
2696
2697         return ret;
2698 }
2699
2700 /**
2701  *  stmmac_release - close entry point of the driver
2702  *  @dev : device pointer.
2703  *  Description:
2704  *  This is the stop entry point of the driver.
2705  */
2706 static int stmmac_release(struct net_device *dev)
2707 {
2708         struct stmmac_priv *priv = netdev_priv(dev);
2709         u32 chan;
2710
2711         if (priv->eee_enabled)
2712                 del_timer_sync(&priv->eee_ctrl_timer);
2713
2714         /* Stop and disconnect the PHY */
2715         if (dev->phydev) {
2716                 phy_stop(dev->phydev);
2717                 phy_disconnect(dev->phydev);
2718         }
2719
2720         stmmac_stop_all_queues(priv);
2721
2722         stmmac_disable_all_queues(priv);
2723
2724         for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
2725                 del_timer_sync(&priv->tx_queue[chan].txtimer);
2726
2727         /* Free the IRQ lines */
2728         free_irq(dev->irq, dev);
2729         if (priv->wol_irq != dev->irq)
2730                 free_irq(priv->wol_irq, dev);
2731         if (priv->lpi_irq > 0)
2732                 free_irq(priv->lpi_irq, dev);
2733
2734         /* Stop TX/RX DMA and clear the descriptors */
2735         stmmac_stop_all_dma(priv);
2736
2737         /* Release and free the Rx/Tx resources */
2738         free_dma_desc_resources(priv);
2739
2740         /* Disable the MAC Rx/Tx */
2741         stmmac_mac_set(priv, priv->ioaddr, false);
2742
2743         netif_carrier_off(dev);
2744
2745         stmmac_release_ptp(priv);
2746
2747         return 0;
2748 }
2749
2750 /**
2751  *  stmmac_tso_allocator - close entry point of the driver
2752  *  @priv: driver private structure
2753  *  @des: buffer start address
2754  *  @total_len: total length to fill in descriptors
2755  *  @last_segmant: condition for the last descriptor
2756  *  @queue: TX queue index
2757  *  Description:
2758  *  This function fills descriptor and request new descriptors according to
2759  *  buffer length to fill
2760  */
2761 static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
2762                                  int total_len, bool last_segment, u32 queue)
2763 {
2764         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2765         struct dma_desc *desc;
2766         u32 buff_size;
2767         int tmp_len;
2768
2769         tmp_len = total_len;
2770
2771         while (tmp_len > 0) {
2772                 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2773                 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2774                 desc = tx_q->dma_tx + tx_q->cur_tx;
2775
2776                 desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
2777                 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
2778                             TSO_MAX_BUFF_SIZE : tmp_len;
2779
2780                 stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
2781                                 0, 1,
2782                                 (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
2783                                 0, 0);
2784
2785                 tmp_len -= TSO_MAX_BUFF_SIZE;
2786         }
2787 }
2788
2789 /**
2790  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
2791  *  @skb : the socket buffer
2792  *  @dev : device pointer
2793  *  Description: this is the transmit function that is called on TSO frames
2794  *  (support available on GMAC4 and newer chips).
2795  *  Diagram below show the ring programming in case of TSO frames:
2796  *
2797  *  First Descriptor
2798  *   --------
2799  *   | DES0 |---> buffer1 = L2/L3/L4 header
2800  *   | DES1 |---> TCP Payload (can continue on next descr...)
2801  *   | DES2 |---> buffer 1 and 2 len
2802  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
2803  *   --------
2804  *      |
2805  *     ...
2806  *      |
2807  *   --------
2808  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
2809  *   | DES1 | --|
2810  *   | DES2 | --> buffer 1 and 2 len
2811  *   | DES3 |
2812  *   --------
2813  *
2814  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
2815  */
2816 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2817 {
2818         struct dma_desc *desc, *first, *mss_desc = NULL;
2819         struct stmmac_priv *priv = netdev_priv(dev);
2820         int nfrags = skb_shinfo(skb)->nr_frags;
2821         u32 queue = skb_get_queue_mapping(skb);
2822         unsigned int first_entry, des;
2823         struct stmmac_tx_queue *tx_q;
2824         int tmp_pay_len = 0;
2825         u32 pay_len, mss;
2826         u8 proto_hdr_len;
2827         int i;
2828
2829         tx_q = &priv->tx_queue[queue];
2830
2831         /* Compute header lengths */
2832         proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2833
2834         /* Desc availability based on threshold should be enough safe */
2835         if (unlikely(stmmac_tx_avail(priv, queue) <
2836                 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
2837                 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
2838                         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
2839                                                                 queue));
2840                         /* This is a hard error, log it. */
2841                         netdev_err(priv->dev,
2842                                    "%s: Tx Ring full when queue awake\n",
2843                                    __func__);
2844                 }
2845                 return NETDEV_TX_BUSY;
2846         }
2847
2848         pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
2849
2850         mss = skb_shinfo(skb)->gso_size;
2851
2852         /* set new MSS value if needed */
2853         if (mss != tx_q->mss) {
2854                 mss_desc = tx_q->dma_tx + tx_q->cur_tx;
2855                 stmmac_set_mss(priv, mss_desc, mss);
2856                 tx_q->mss = mss;
2857                 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2858                 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2859         }
2860
2861         if (netif_msg_tx_queued(priv)) {
2862                 pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
2863                         __func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
2864                 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
2865                         skb->data_len);
2866         }
2867
2868         first_entry = tx_q->cur_tx;
2869         WARN_ON(tx_q->tx_skbuff[first_entry]);
2870
2871         desc = tx_q->dma_tx + first_entry;
2872         first = desc;
2873
2874         /* first descriptor: fill Headers on Buf1 */
2875         des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
2876                              DMA_TO_DEVICE);
2877         if (dma_mapping_error(priv->device, des))
2878                 goto dma_map_err;
2879
2880         tx_q->tx_skbuff_dma[first_entry].buf = des;
2881         tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
2882
2883         first->des0 = cpu_to_le32(des);
2884
2885         /* Fill start of payload in buff2 of first descriptor */
2886         if (pay_len)
2887                 first->des1 = cpu_to_le32(des + proto_hdr_len);
2888
2889         /* If needed take extra descriptors to fill the remaining payload */
2890         tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
2891
2892         stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
2893
2894         /* Prepare fragments */
2895         for (i = 0; i < nfrags; i++) {
2896                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2897
2898                 des = skb_frag_dma_map(priv->device, frag, 0,
2899                                        skb_frag_size(frag),
2900                                        DMA_TO_DEVICE);
2901                 if (dma_mapping_error(priv->device, des))
2902                         goto dma_map_err;
2903
2904                 stmmac_tso_allocator(priv, des, skb_frag_size(frag),
2905                                      (i == nfrags - 1), queue);
2906
2907                 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
2908                 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
2909                 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
2910         }
2911
2912         tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
2913
2914         /* Only the last descriptor gets to point to the skb. */
2915         tx_q->tx_skbuff[tx_q->cur_tx] = skb;
2916
2917         /* We've used all descriptors we need for this skb, however,
2918          * advance cur_tx so that it references a fresh descriptor.
2919          * ndo_start_xmit will fill this descriptor the next time it's
2920          * called and stmmac_tx_clean may clean up to this descriptor.
2921          */
2922         tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2923
2924         if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
2925                 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
2926                           __func__);
2927                 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
2928         }
2929
2930         dev->stats.tx_bytes += skb->len;
2931         priv->xstats.tx_tso_frames++;
2932         priv->xstats.tx_tso_nfrags += nfrags;
2933
2934         /* Manage tx mitigation */
2935         tx_q->tx_count_frames += nfrags + 1;
2936         if (priv->tx_coal_frames <= tx_q->tx_count_frames) {
2937                 stmmac_set_tx_ic(priv, desc);
2938                 priv->xstats.tx_set_ic_bit++;
2939                 tx_q->tx_count_frames = 0;
2940         } else {
2941                 stmmac_tx_timer_arm(priv, queue);
2942         }
2943
2944         skb_tx_timestamp(skb);
2945
2946         if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2947                      priv->hwts_tx_en)) {
2948                 /* declare that device is doing timestamping */
2949                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2950                 stmmac_enable_tx_timestamp(priv, first);
2951         }
2952
2953         /* Complete the first descriptor before granting the DMA */
2954         stmmac_prepare_tso_tx_desc(priv, first, 1,
2955                         proto_hdr_len,
2956                         pay_len,
2957                         1, tx_q->tx_skbuff_dma[first_entry].last_segment,
2958                         tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
2959
2960         /* If context desc is used to change MSS */
2961         if (mss_desc) {
2962                 /* Make sure that first descriptor has been completely
2963                  * written, including its own bit. This is because MSS is
2964                  * actually before first descriptor, so we need to make
2965                  * sure that MSS's own bit is the last thing written.
2966                  */
2967                 dma_wmb();
2968                 stmmac_set_tx_owner(priv, mss_desc);
2969         }
2970
2971         /* The own bit must be the latest setting done when prepare the
2972          * descriptor and then barrier is needed to make sure that
2973          * all is coherent before granting the DMA engine.
2974          */
2975         wmb();
2976
2977         if (netif_msg_pktdata(priv)) {
2978                 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
2979                         __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
2980                         tx_q->cur_tx, first, nfrags);
2981
2982                 stmmac_display_ring(priv, (void *)tx_q->dma_tx, DMA_TX_SIZE, 0);
2983
2984                 pr_info(">>> frame to be transmitted: ");
2985                 print_pkt(skb->data, skb_headlen(skb));
2986         }
2987
2988         netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
2989
2990         tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
2991         stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
2992
2993         return NETDEV_TX_OK;
2994
2995 dma_map_err:
2996         dev_err(priv->device, "Tx dma map failed\n");
2997         dev_kfree_skb(skb);
2998         priv->dev->stats.tx_dropped++;
2999         return NETDEV_TX_OK;
3000 }
3001
3002 /**
3003  *  stmmac_xmit - Tx entry point of the driver
3004  *  @skb : the socket buffer
3005  *  @dev : device pointer
3006  *  Description : this is the tx entry point of the driver.
3007  *  It programs the chain or the ring and supports oversized frames
3008  *  and SG feature.
3009  */
3010 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
3011 {
3012         struct stmmac_priv *priv = netdev_priv(dev);
3013         unsigned int nopaged_len = skb_headlen(skb);
3014         int i, csum_insertion = 0, is_jumbo = 0;
3015         u32 queue = skb_get_queue_mapping(skb);
3016         int nfrags = skb_shinfo(skb)->nr_frags;
3017         int entry;
3018         unsigned int first_entry;
3019         struct dma_desc *desc, *first;
3020         struct stmmac_tx_queue *tx_q;
3021         unsigned int enh_desc;
3022         unsigned int des;
3023
3024         tx_q = &priv->tx_queue[queue];
3025
3026         if (priv->tx_path_in_lpi_mode)
3027                 stmmac_disable_eee_mode(priv);
3028
3029         /* Manage oversized TCP frames for GMAC4 device */
3030         if (skb_is_gso(skb) && priv->tso) {
3031                 if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
3032                         /*
3033                          * There is no way to determine the number of TSO
3034                          * capable Queues. Let's use always the Queue 0
3035                          * because if TSO is supported then at least this
3036                          * one will be capable.
3037                          */
3038                         skb_set_queue_mapping(skb, 0);
3039
3040                         return stmmac_tso_xmit(skb, dev);
3041                 }
3042         }
3043
3044         if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
3045                 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
3046                         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
3047                                                                 queue));
3048                         /* This is a hard error, log it. */
3049                         netdev_err(priv->dev,
3050                                    "%s: Tx Ring full when queue awake\n",
3051                                    __func__);
3052                 }
3053                 return NETDEV_TX_BUSY;
3054         }
3055
3056         entry = tx_q->cur_tx;
3057         first_entry = entry;
3058         WARN_ON(tx_q->tx_skbuff[first_entry]);
3059
3060         csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
3061
3062         if (likely(priv->extend_desc))
3063                 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3064         else
3065                 desc = tx_q->dma_tx + entry;
3066
3067         first = desc;
3068
3069         enh_desc = priv->plat->enh_desc;
3070         /* To program the descriptors according to the size of the frame */
3071         if (enh_desc)
3072                 is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
3073
3074         if (unlikely(is_jumbo)) {
3075                 entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
3076                 if (unlikely(entry < 0) && (entry != -EINVAL))
3077                         goto dma_map_err;
3078         }
3079
3080         for (i = 0; i < nfrags; i++) {
3081                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3082                 int len = skb_frag_size(frag);
3083                 bool last_segment = (i == (nfrags - 1));
3084
3085                 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3086                 WARN_ON(tx_q->tx_skbuff[entry]);
3087
3088                 if (likely(priv->extend_desc))
3089                         desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3090                 else
3091                         desc = tx_q->dma_tx + entry;
3092
3093                 des = skb_frag_dma_map(priv->device, frag, 0, len,
3094                                        DMA_TO_DEVICE);
3095                 if (dma_mapping_error(priv->device, des))
3096                         goto dma_map_err; /* should reuse desc w/o issues */
3097
3098                 tx_q->tx_skbuff_dma[entry].buf = des;
3099
3100                 stmmac_set_desc_addr(priv, desc, des);
3101
3102                 tx_q->tx_skbuff_dma[entry].map_as_page = true;
3103                 tx_q->tx_skbuff_dma[entry].len = len;
3104                 tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
3105
3106                 /* Prepare the descriptor and set the own bit too */
3107                 stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
3108                                 priv->mode, 1, last_segment, skb->len);
3109         }
3110
3111         /* Only the last descriptor gets to point to the skb. */
3112         tx_q->tx_skbuff[entry] = skb;
3113
3114         /* We've used all descriptors we need for this skb, however,
3115          * advance cur_tx so that it references a fresh descriptor.
3116          * ndo_start_xmit will fill this descriptor the next time it's
3117          * called and stmmac_tx_clean may clean up to this descriptor.
3118          */
3119         entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3120         tx_q->cur_tx = entry;
3121
3122         if (netif_msg_pktdata(priv)) {
3123                 void *tx_head;
3124
3125                 netdev_dbg(priv->dev,
3126                            "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
3127                            __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3128                            entry, first, nfrags);
3129
3130                 if (priv->extend_desc)
3131                         tx_head = (void *)tx_q->dma_etx;
3132                 else
3133                         tx_head = (void *)tx_q->dma_tx;
3134
3135                 stmmac_display_ring(priv, tx_head, DMA_TX_SIZE, false);
3136
3137                 netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
3138                 print_pkt(skb->data, skb->len);
3139         }
3140
3141         if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3142                 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3143                           __func__);
3144                 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
3145         }
3146
3147         dev->stats.tx_bytes += skb->len;
3148
3149         /* According to the coalesce parameter the IC bit for the latest
3150          * segment is reset and the timer re-started to clean the tx status.
3151          * This approach takes care about the fragments: desc is the first
3152          * element in case of no SG.
3153          */
3154         tx_q->tx_count_frames += nfrags + 1;
3155         if (priv->tx_coal_frames <= tx_q->tx_count_frames) {
3156                 stmmac_set_tx_ic(priv, desc);
3157                 priv->xstats.tx_set_ic_bit++;
3158                 tx_q->tx_count_frames = 0;
3159         } else {
3160                 stmmac_tx_timer_arm(priv, queue);
3161         }
3162
3163         skb_tx_timestamp(skb);
3164
3165         /* Ready to fill the first descriptor and set the OWN bit w/o any
3166          * problems because all the descriptors are actually ready to be
3167          * passed to the DMA engine.
3168          */
3169         if (likely(!is_jumbo)) {
3170                 bool last_segment = (nfrags == 0);
3171
3172                 des = dma_map_single(priv->device, skb->data,
3173                                      nopaged_len, DMA_TO_DEVICE);
3174                 if (dma_mapping_error(priv->device, des))
3175                         goto dma_map_err;
3176
3177                 tx_q->tx_skbuff_dma[first_entry].buf = des;
3178
3179                 stmmac_set_desc_addr(priv, first, des);
3180
3181                 tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
3182                 tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
3183
3184                 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3185                              priv->hwts_tx_en)) {
3186                         /* declare that device is doing timestamping */
3187                         skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3188                         stmmac_enable_tx_timestamp(priv, first);
3189                 }
3190
3191                 /* Prepare the first descriptor setting the OWN bit too */
3192                 stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
3193                                 csum_insertion, priv->mode, 1, last_segment,
3194                                 skb->len);
3195
3196                 /* The own bit must be the latest setting done when prepare the
3197                  * descriptor and then barrier is needed to make sure that
3198                  * all is coherent before granting the DMA engine.
3199                  */
3200                 wmb();
3201         }
3202
3203         netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3204
3205         stmmac_enable_dma_transmission(priv, priv->ioaddr);
3206
3207         tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
3208         stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
3209
3210         return NETDEV_TX_OK;
3211
3212 dma_map_err:
3213         netdev_err(priv->dev, "Tx DMA map failed\n");
3214         dev_kfree_skb(skb);
3215         priv->dev->stats.tx_dropped++;
3216         return NETDEV_TX_OK;
3217 }
3218
3219 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
3220 {
3221         struct vlan_ethhdr *veth;
3222         __be16 vlan_proto;
3223         u16 vlanid;
3224
3225         veth = (struct vlan_ethhdr *)skb->data;
3226         vlan_proto = veth->h_vlan_proto;
3227
3228         if ((vlan_proto == htons(ETH_P_8021Q) &&
3229              dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
3230             (vlan_proto == htons(ETH_P_8021AD) &&
3231              dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
3232                 /* pop the vlan tag */
3233                 vlanid = ntohs(veth->h_vlan_TCI);
3234                 memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
3235                 skb_pull(skb, VLAN_HLEN);
3236                 __vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
3237         }
3238 }
3239
3240
3241 static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
3242 {
3243         if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
3244                 return 0;
3245
3246         return 1;
3247 }
3248
3249 /**
3250  * stmmac_rx_refill - refill used skb preallocated buffers
3251  * @priv: driver private structure
3252  * @queue: RX queue index
3253  * Description : this is to reallocate the skb for the reception process
3254  * that is based on zero-copy.
3255  */
3256 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
3257 {
3258         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3259         int dirty = stmmac_rx_dirty(priv, queue);
3260         unsigned int entry = rx_q->dirty_rx;
3261
3262         int bfsize = priv->dma_buf_sz;
3263
3264         while (dirty-- > 0) {
3265                 struct dma_desc *p;
3266
3267                 if (priv->extend_desc)
3268                         p = (struct dma_desc *)(rx_q->dma_erx + entry);
3269                 else
3270                         p = rx_q->dma_rx + entry;
3271
3272                 if (likely(!rx_q->rx_skbuff[entry])) {
3273                         struct sk_buff *skb;
3274
3275                         skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
3276                         if (unlikely(!skb)) {
3277                                 /* so for a while no zero-copy! */
3278                                 rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH;
3279                                 if (unlikely(net_ratelimit()))
3280                                         dev_err(priv->device,
3281                                                 "fail to alloc skb entry %d\n",
3282                                                 entry);
3283                                 break;
3284                         }
3285
3286                         rx_q->rx_skbuff[entry] = skb;
3287                         rx_q->rx_skbuff_dma[entry] =
3288                             dma_map_single(priv->device, skb->data, bfsize,
3289                                            DMA_FROM_DEVICE);
3290                         if (dma_mapping_error(priv->device,
3291                                               rx_q->rx_skbuff_dma[entry])) {
3292                                 netdev_err(priv->dev, "Rx DMA map failed\n");
3293                                 dev_kfree_skb(skb);
3294                                 break;
3295                         }
3296
3297                         stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[entry]);
3298                         stmmac_refill_desc3(priv, rx_q, p);
3299
3300                         if (rx_q->rx_zeroc_thresh > 0)
3301                                 rx_q->rx_zeroc_thresh--;
3302
3303                         netif_dbg(priv, rx_status, priv->dev,
3304                                   "refill entry #%d\n", entry);
3305                 }
3306                 dma_wmb();
3307
3308                 stmmac_set_rx_owner(priv, p, priv->use_riwt);
3309
3310                 dma_wmb();
3311
3312                 entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
3313         }
3314         rx_q->dirty_rx = entry;
3315 }
3316
3317 /**
3318  * stmmac_rx - manage the receive process
3319  * @priv: driver private structure
3320  * @limit: napi bugget
3321  * @queue: RX queue index.
3322  * Description :  this the function called by the napi poll method.
3323  * It gets all the frames inside the ring.
3324  */
3325 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3326 {
3327         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3328         struct stmmac_channel *ch = &priv->channel[queue];
3329         unsigned int entry = rx_q->cur_rx;
3330         int coe = priv->hw->rx_csum;
3331         unsigned int next_entry;
3332         unsigned int count = 0;
3333         bool xmac;
3334
3335         xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
3336
3337         if (netif_msg_rx_status(priv)) {
3338                 void *rx_head;
3339
3340                 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
3341                 if (priv->extend_desc)
3342                         rx_head = (void *)rx_q->dma_erx;
3343                 else
3344                         rx_head = (void *)rx_q->dma_rx;
3345
3346                 stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
3347         }
3348         while (count < limit) {
3349                 int status;
3350                 struct dma_desc *p;
3351                 struct dma_desc *np;
3352
3353                 if (priv->extend_desc)
3354                         p = (struct dma_desc *)(rx_q->dma_erx + entry);
3355                 else
3356                         p = rx_q->dma_rx + entry;
3357
3358                 /* read the status of the incoming frame */
3359                 status = stmmac_rx_status(priv, &priv->dev->stats,
3360                                 &priv->xstats, p);
3361                 /* check if managed by the DMA otherwise go ahead */
3362                 if (unlikely(status & dma_own))
3363                         break;
3364
3365                 count++;
3366
3367                 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
3368                 next_entry = rx_q->cur_rx;
3369
3370                 if (priv->extend_desc)
3371                         np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
3372                 else
3373                         np = rx_q->dma_rx + next_entry;
3374
3375                 prefetch(np);
3376
3377                 if (priv->extend_desc)
3378                         stmmac_rx_extended_status(priv, &priv->dev->stats,
3379                                         &priv->xstats, rx_q->dma_erx + entry);
3380                 if (unlikely(status == discard_frame)) {
3381                         priv->dev->stats.rx_errors++;
3382                         if (priv->hwts_rx_en && !priv->extend_desc) {
3383                                 /* DESC2 & DESC3 will be overwritten by device
3384                                  * with timestamp value, hence reinitialize
3385                                  * them in stmmac_rx_refill() function so that
3386                                  * device can reuse it.
3387                                  */
3388                                 dev_kfree_skb_any(rx_q->rx_skbuff[entry]);
3389                                 rx_q->rx_skbuff[entry] = NULL;
3390                                 dma_unmap_single(priv->device,
3391                                                  rx_q->rx_skbuff_dma[entry],
3392                                                  priv->dma_buf_sz,
3393                                                  DMA_FROM_DEVICE);
3394                         }
3395                 } else {
3396                         struct sk_buff *skb;
3397                         int frame_len;
3398                         unsigned int des;
3399
3400                         stmmac_get_desc_addr(priv, p, &des);
3401                         frame_len = stmmac_get_rx_frame_len(priv, p, coe);
3402
3403                         /*  If frame length is greater than skb buffer size
3404                          *  (preallocated during init) then the packet is
3405                          *  ignored
3406                          */
3407                         if (frame_len > priv->dma_buf_sz) {
3408                                 netdev_err(priv->dev,
3409                                            "len %d larger than size (%d)\n",
3410                                            frame_len, priv->dma_buf_sz);
3411                                 priv->dev->stats.rx_length_errors++;
3412                                 break;
3413                         }
3414
3415                         /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
3416                          * Type frames (LLC/LLC-SNAP)
3417                          *
3418                          * llc_snap is never checked in GMAC >= 4, so this ACS
3419                          * feature is always disabled and packets need to be
3420                          * stripped manually.
3421                          */
3422                         if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
3423                             unlikely(status != llc_snap))
3424                                 frame_len -= ETH_FCS_LEN;
3425
3426                         if (netif_msg_rx_status(priv)) {
3427                                 netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
3428                                            p, entry, des);
3429                                 netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
3430                                            frame_len, status);
3431                         }
3432
3433                         /* The zero-copy is always used for all the sizes
3434                          * in case of GMAC4 because it needs
3435                          * to refill the used descriptors, always.
3436                          */
3437                         if (unlikely(!xmac &&
3438                                      ((frame_len < priv->rx_copybreak) ||
3439                                      stmmac_rx_threshold_count(rx_q)))) {
3440                                 skb = netdev_alloc_skb_ip_align(priv->dev,
3441                                                                 frame_len);
3442                                 if (unlikely(!skb)) {
3443                                         if (net_ratelimit())
3444                                                 dev_warn(priv->device,
3445                                                          "packet dropped\n");
3446                                         priv->dev->stats.rx_dropped++;
3447                                         break;
3448                                 }
3449
3450                                 dma_sync_single_for_cpu(priv->device,
3451                                                         rx_q->rx_skbuff_dma
3452                                                         [entry], frame_len,
3453                                                         DMA_FROM_DEVICE);
3454                                 skb_copy_to_linear_data(skb,
3455                                                         rx_q->
3456                                                         rx_skbuff[entry]->data,
3457                                                         frame_len);
3458
3459                                 skb_put(skb, frame_len);
3460                                 dma_sync_single_for_device(priv->device,
3461                                                            rx_q->rx_skbuff_dma
3462                                                            [entry], frame_len,
3463                                                            DMA_FROM_DEVICE);
3464                         } else {
3465                                 skb = rx_q->rx_skbuff[entry];
3466                                 if (unlikely(!skb)) {
3467                                         netdev_err(priv->dev,
3468                                                    "%s: Inconsistent Rx chain\n",
3469                                                    priv->dev->name);
3470                                         priv->dev->stats.rx_dropped++;
3471                                         break;
3472                                 }
3473                                 prefetch(skb->data - NET_IP_ALIGN);
3474                                 rx_q->rx_skbuff[entry] = NULL;
3475                                 rx_q->rx_zeroc_thresh++;
3476
3477                                 skb_put(skb, frame_len);
3478                                 dma_unmap_single(priv->device,
3479                                                  rx_q->rx_skbuff_dma[entry],
3480                                                  priv->dma_buf_sz,
3481                                                  DMA_FROM_DEVICE);
3482                         }
3483
3484                         if (netif_msg_pktdata(priv)) {
3485                                 netdev_dbg(priv->dev, "frame received (%dbytes)",
3486                                            frame_len);
3487                                 print_pkt(skb->data, frame_len);
3488                         }
3489
3490                         stmmac_get_rx_hwtstamp(priv, p, np, skb);
3491
3492                         stmmac_rx_vlan(priv->dev, skb);
3493
3494                         skb->protocol = eth_type_trans(skb, priv->dev);
3495
3496                         if (unlikely(!coe))
3497                                 skb_checksum_none_assert(skb);
3498                         else
3499                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
3500
3501                         napi_gro_receive(&ch->napi, skb);
3502
3503                         priv->dev->stats.rx_packets++;
3504                         priv->dev->stats.rx_bytes += frame_len;
3505                 }
3506                 entry = next_entry;
3507         }
3508
3509         stmmac_rx_refill(priv, queue);
3510
3511         priv->xstats.rx_pkt_n += count;
3512
3513         return count;
3514 }
3515
3516 /**
3517  *  stmmac_poll - stmmac poll method (NAPI)
3518  *  @napi : pointer to the napi structure.
3519  *  @budget : maximum number of packets that the current CPU can receive from
3520  *            all interfaces.
3521  *  Description :
3522  *  To look at the incoming frames and clear the tx resources.
3523  */
3524 static int stmmac_napi_poll(struct napi_struct *napi, int budget)
3525 {
3526         struct stmmac_channel *ch =
3527                 container_of(napi, struct stmmac_channel, napi);
3528         struct stmmac_priv *priv = ch->priv_data;
3529         int work_done, rx_done = 0, tx_done = 0;
3530         u32 chan = ch->index;
3531
3532         priv->xstats.napi_poll++;
3533
3534         if (ch->has_tx)
3535                 tx_done = stmmac_tx_clean(priv, budget, chan);
3536         if (ch->has_rx)
3537                 rx_done = stmmac_rx(priv, budget, chan);
3538
3539         work_done = max(rx_done, tx_done);
3540         work_done = min(work_done, budget);
3541
3542         if (work_done < budget && napi_complete_done(napi, work_done)) {
3543                 int stat;
3544
3545                 stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
3546                 stat = stmmac_dma_interrupt_status(priv, priv->ioaddr,
3547                                                    &priv->xstats, chan);
3548                 if (stat && napi_reschedule(napi))
3549                         stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
3550         }
3551
3552         return work_done;
3553 }
3554
3555 /**
3556  *  stmmac_tx_timeout
3557  *  @dev : Pointer to net device structure
3558  *  Description: this function is called when a packet transmission fails to
3559  *   complete within a reasonable time. The driver will mark the error in the
3560  *   netdev structure and arrange for the device to be reset to a sane state
3561  *   in order to transmit a new packet.
3562  */
3563 static void stmmac_tx_timeout(struct net_device *dev)
3564 {
3565         struct stmmac_priv *priv = netdev_priv(dev);
3566
3567         stmmac_global_err(priv);
3568 }
3569
3570 /**
3571  *  stmmac_set_rx_mode - entry point for multicast addressing
3572  *  @dev : pointer to the device structure
3573  *  Description:
3574  *  This function is a driver entry point which gets called by the kernel
3575  *  whenever multicast addresses must be enabled/disabled.
3576  *  Return value:
3577  *  void.
3578  */
3579 static void stmmac_set_rx_mode(struct net_device *dev)
3580 {
3581         struct stmmac_priv *priv = netdev_priv(dev);
3582
3583         stmmac_set_filter(priv, priv->hw, dev);
3584 }
3585
3586 /**
3587  *  stmmac_change_mtu - entry point to change MTU size for the device.
3588  *  @dev : device pointer.
3589  *  @new_mtu : the new MTU size for the device.
3590  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
3591  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
3592  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
3593  *  Return value:
3594  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3595  *  file on failure.
3596  */
3597 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
3598 {
3599         struct stmmac_priv *priv = netdev_priv(dev);
3600
3601         if (netif_running(dev)) {
3602                 netdev_err(priv->dev, "must be stopped to change its MTU\n");
3603                 return -EBUSY;
3604         }
3605
3606         dev->mtu = new_mtu;
3607
3608         netdev_update_features(dev);
3609
3610         return 0;
3611 }
3612
3613 static netdev_features_t stmmac_fix_features(struct net_device *dev,
3614                                              netdev_features_t features)
3615 {
3616         struct stmmac_priv *priv = netdev_priv(dev);
3617
3618         if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
3619                 features &= ~NETIF_F_RXCSUM;
3620
3621         if (!priv->plat->tx_coe)
3622                 features &= ~NETIF_F_CSUM_MASK;
3623
3624         /* Some GMAC devices have a bugged Jumbo frame support that
3625          * needs to have the Tx COE disabled for oversized frames
3626          * (due to limited buffer sizes). In this case we disable
3627          * the TX csum insertion in the TDES and not use SF.
3628          */
3629         if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
3630                 features &= ~NETIF_F_CSUM_MASK;
3631
3632         /* Disable tso if asked by ethtool */
3633         if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
3634                 if (features & NETIF_F_TSO)
3635                         priv->tso = true;
3636                 else
3637                         priv->tso = false;
3638         }
3639
3640         return features;
3641 }
3642
3643 static int stmmac_set_features(struct net_device *netdev,
3644                                netdev_features_t features)
3645 {
3646         struct stmmac_priv *priv = netdev_priv(netdev);
3647
3648         /* Keep the COE Type in case of csum is supporting */
3649         if (features & NETIF_F_RXCSUM)
3650                 priv->hw->rx_csum = priv->plat->rx_coe;
3651         else
3652                 priv->hw->rx_csum = 0;
3653         /* No check needed because rx_coe has been set before and it will be
3654          * fixed in case of issue.
3655          */
3656         stmmac_rx_ipc(priv, priv->hw);
3657
3658         return 0;
3659 }
3660
3661 /**
3662  *  stmmac_interrupt - main ISR
3663  *  @irq: interrupt number.
3664  *  @dev_id: to pass the net device pointer.
3665  *  Description: this is the main driver interrupt service routine.
3666  *  It can call:
3667  *  o DMA service routine (to manage incoming frame reception and transmission
3668  *    status)
3669  *  o Core interrupts to manage: remote wake-up, management counter, LPI
3670  *    interrupts.
3671  */
3672 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
3673 {
3674         struct net_device *dev = (struct net_device *)dev_id;
3675         struct stmmac_priv *priv = netdev_priv(dev);
3676         u32 rx_cnt = priv->plat->rx_queues_to_use;
3677         u32 tx_cnt = priv->plat->tx_queues_to_use;
3678         u32 queues_count;
3679         u32 queue;
3680         bool xmac;
3681
3682         xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
3683         queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
3684
3685         if (priv->irq_wake)
3686                 pm_wakeup_event(priv->device, 0);
3687
3688         if (unlikely(!dev)) {
3689                 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
3690                 return IRQ_NONE;
3691         }
3692
3693         /* Check if adapter is up */
3694         if (test_bit(STMMAC_DOWN, &priv->state))
3695                 return IRQ_HANDLED;
3696         /* Check if a fatal error happened */
3697         if (stmmac_safety_feat_interrupt(priv))
3698                 return IRQ_HANDLED;
3699
3700         /* To handle GMAC own interrupts */
3701         if ((priv->plat->has_gmac) || xmac) {
3702                 int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
3703                 int mtl_status;
3704
3705                 if (unlikely(status)) {
3706                         /* For LPI we need to save the tx status */
3707                         if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
3708                                 priv->tx_path_in_lpi_mode = true;
3709                         if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
3710                                 priv->tx_path_in_lpi_mode = false;
3711                 }
3712
3713                 for (queue = 0; queue < queues_count; queue++) {
3714                         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3715
3716                         mtl_status = stmmac_host_mtl_irq_status(priv, priv->hw,
3717                                                                 queue);
3718                         if (mtl_status != -EINVAL)
3719                                 status |= mtl_status;
3720
3721                         if (status & CORE_IRQ_MTL_RX_OVERFLOW)
3722                                 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
3723                                                        rx_q->rx_tail_addr,
3724                                                        queue);
3725                 }
3726
3727                 /* PCS link status */
3728                 if (priv->hw->pcs) {
3729                         if (priv->xstats.pcs_link)
3730                                 netif_carrier_on(dev);
3731                         else
3732                                 netif_carrier_off(dev);
3733                 }
3734         }
3735
3736         /* To handle DMA interrupts */
3737         stmmac_dma_interrupt(priv);
3738
3739         return IRQ_HANDLED;
3740 }
3741
3742 #ifdef CONFIG_NET_POLL_CONTROLLER
3743 /* Polling receive - used by NETCONSOLE and other diagnostic tools
3744  * to allow network I/O with interrupts disabled.
3745  */
3746 static void stmmac_poll_controller(struct net_device *dev)
3747 {
3748         disable_irq(dev->irq);
3749         stmmac_interrupt(dev->irq, dev);
3750         enable_irq(dev->irq);
3751 }
3752 #endif
3753
3754 /**
3755  *  stmmac_ioctl - Entry point for the Ioctl
3756  *  @dev: Device pointer.
3757  *  @rq: An IOCTL specefic structure, that can contain a pointer to
3758  *  a proprietary structure used to pass information to the driver.
3759  *  @cmd: IOCTL command
3760  *  Description:
3761  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
3762  */
3763 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3764 {
3765         int ret = -EOPNOTSUPP;
3766
3767         if (!netif_running(dev))
3768                 return -EINVAL;
3769
3770         switch (cmd) {
3771         case SIOCGMIIPHY:
3772         case SIOCGMIIREG:
3773         case SIOCSMIIREG:
3774                 if (!dev->phydev)
3775                         return -EINVAL;
3776                 ret = phy_mii_ioctl(dev->phydev, rq, cmd);
3777                 break;
3778         case SIOCSHWTSTAMP:
3779                 ret = stmmac_hwtstamp_ioctl(dev, rq);
3780                 break;
3781         default:
3782                 break;
3783         }
3784
3785         return ret;
3786 }
3787
3788 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
3789                                     void *cb_priv)
3790 {
3791         struct stmmac_priv *priv = cb_priv;
3792         int ret = -EOPNOTSUPP;
3793
3794         stmmac_disable_all_queues(priv);
3795
3796         switch (type) {
3797         case TC_SETUP_CLSU32:
3798                 if (tc_cls_can_offload_and_chain0(priv->dev, type_data))
3799                         ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
3800                 break;
3801         default:
3802                 break;
3803         }
3804
3805         stmmac_enable_all_queues(priv);
3806         return ret;
3807 }
3808
3809 static int stmmac_setup_tc_block(struct stmmac_priv *priv,
3810                                  struct tc_block_offload *f)
3811 {
3812         if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
3813                 return -EOPNOTSUPP;
3814
3815         switch (f->command) {
3816         case TC_BLOCK_BIND:
3817                 return tcf_block_cb_register(f->block, stmmac_setup_tc_block_cb,
3818                                 priv, priv, f->extack);
3819         case TC_BLOCK_UNBIND:
3820                 tcf_block_cb_unregister(f->block, stmmac_setup_tc_block_cb, priv);
3821                 return 0;
3822         default:
3823                 return -EOPNOTSUPP;
3824         }
3825 }
3826
3827 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
3828                            void *type_data)
3829 {
3830         struct stmmac_priv *priv = netdev_priv(ndev);
3831
3832         switch (type) {
3833         case TC_SETUP_BLOCK:
3834                 return stmmac_setup_tc_block(priv, type_data);
3835         case TC_SETUP_QDISC_CBS:
3836                 return stmmac_tc_setup_cbs(priv, priv, type_data);
3837         default:
3838                 return -EOPNOTSUPP;
3839         }
3840 }
3841
3842 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
3843 {
3844         struct stmmac_priv *priv = netdev_priv(ndev);
3845         int ret = 0;
3846
3847         ret = eth_mac_addr(ndev, addr);
3848         if (ret)
3849                 return ret;
3850
3851         stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
3852
3853         return ret;
3854 }
3855
3856 #ifdef CONFIG_DEBUG_FS
3857 static struct dentry *stmmac_fs_dir;
3858
3859 static void sysfs_display_ring(void *head, int size, int extend_desc,
3860                                struct seq_file *seq)
3861 {
3862         int i;
3863         struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
3864         struct dma_desc *p = (struct dma_desc *)head;
3865
3866         for (i = 0; i < size; i++) {
3867                 if (extend_desc) {
3868                         seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3869                                    i, (unsigned int)virt_to_phys(ep),
3870                                    le32_to_cpu(ep->basic.des0),
3871                                    le32_to_cpu(ep->basic.des1),
3872                                    le32_to_cpu(ep->basic.des2),
3873                                    le32_to_cpu(ep->basic.des3));
3874                         ep++;
3875                 } else {
3876                         seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3877                                    i, (unsigned int)virt_to_phys(p),
3878                                    le32_to_cpu(p->des0), le32_to_cpu(p->des1),
3879                                    le32_to_cpu(p->des2), le32_to_cpu(p->des3));
3880                         p++;
3881                 }
3882                 seq_printf(seq, "\n");
3883         }
3884 }
3885
3886 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
3887 {
3888         struct net_device *dev = seq->private;
3889         struct stmmac_priv *priv = netdev_priv(dev);
3890         u32 rx_count = priv->plat->rx_queues_to_use;
3891         u32 tx_count = priv->plat->tx_queues_to_use;
3892         u32 queue;
3893
3894         if ((dev->flags & IFF_UP) == 0)
3895                 return 0;
3896
3897         for (queue = 0; queue < rx_count; queue++) {
3898                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3899
3900                 seq_printf(seq, "RX Queue %d:\n", queue);
3901
3902                 if (priv->extend_desc) {
3903                         seq_printf(seq, "Extended descriptor ring:\n");
3904                         sysfs_display_ring((void *)rx_q->dma_erx,
3905                                            DMA_RX_SIZE, 1, seq);
3906                 } else {
3907                         seq_printf(seq, "Descriptor ring:\n");
3908                         sysfs_display_ring((void *)rx_q->dma_rx,
3909                                            DMA_RX_SIZE, 0, seq);
3910                 }
3911         }
3912
3913         for (queue = 0; queue < tx_count; queue++) {
3914                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3915
3916                 seq_printf(seq, "TX Queue %d:\n", queue);
3917
3918                 if (priv->extend_desc) {
3919                         seq_printf(seq, "Extended descriptor ring:\n");
3920                         sysfs_display_ring((void *)tx_q->dma_etx,
3921                                            DMA_TX_SIZE, 1, seq);
3922                 } else {
3923                         seq_printf(seq, "Descriptor ring:\n");
3924                         sysfs_display_ring((void *)tx_q->dma_tx,
3925                                            DMA_TX_SIZE, 0, seq);
3926                 }
3927         }
3928
3929         return 0;
3930 }
3931 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
3932
3933 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
3934 {
3935         struct net_device *dev = seq->private;
3936         struct stmmac_priv *priv = netdev_priv(dev);
3937
3938         if (!priv->hw_cap_support) {
3939                 seq_printf(seq, "DMA HW features not supported\n");
3940                 return 0;
3941         }
3942
3943         seq_printf(seq, "==============================\n");
3944         seq_printf(seq, "\tDMA HW features\n");
3945         seq_printf(seq, "==============================\n");
3946
3947         seq_printf(seq, "\t10/100 Mbps: %s\n",
3948                    (priv->dma_cap.mbps_10_100) ? "Y" : "N");
3949         seq_printf(seq, "\t1000 Mbps: %s\n",
3950                    (priv->dma_cap.mbps_1000) ? "Y" : "N");
3951         seq_printf(seq, "\tHalf duplex: %s\n",
3952                    (priv->dma_cap.half_duplex) ? "Y" : "N");
3953         seq_printf(seq, "\tHash Filter: %s\n",
3954                    (priv->dma_cap.hash_filter) ? "Y" : "N");
3955         seq_printf(seq, "\tMultiple MAC address registers: %s\n",
3956                    (priv->dma_cap.multi_addr) ? "Y" : "N");
3957         seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
3958                    (priv->dma_cap.pcs) ? "Y" : "N");
3959         seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
3960                    (priv->dma_cap.sma_mdio) ? "Y" : "N");
3961         seq_printf(seq, "\tPMT Remote wake up: %s\n",
3962                    (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
3963         seq_printf(seq, "\tPMT Magic Frame: %s\n",
3964                    (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
3965         seq_printf(seq, "\tRMON module: %s\n",
3966                    (priv->dma_cap.rmon) ? "Y" : "N");
3967         seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
3968                    (priv->dma_cap.time_stamp) ? "Y" : "N");
3969         seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
3970                    (priv->dma_cap.atime_stamp) ? "Y" : "N");
3971         seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
3972                    (priv->dma_cap.eee) ? "Y" : "N");
3973         seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
3974         seq_printf(seq, "\tChecksum Offload in TX: %s\n",
3975                    (priv->dma_cap.tx_coe) ? "Y" : "N");
3976         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3977                 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
3978                            (priv->dma_cap.rx_coe) ? "Y" : "N");
3979         } else {
3980                 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
3981                            (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
3982                 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
3983                            (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
3984         }
3985         seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
3986                    (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
3987         seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
3988                    priv->dma_cap.number_rx_channel);
3989         seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
3990                    priv->dma_cap.number_tx_channel);
3991         seq_printf(seq, "\tEnhanced descriptors: %s\n",
3992                    (priv->dma_cap.enh_desc) ? "Y" : "N");
3993
3994         return 0;
3995 }
3996 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
3997
3998 static int stmmac_init_fs(struct net_device *dev)
3999 {
4000         struct stmmac_priv *priv = netdev_priv(dev);
4001
4002         /* Create per netdev entries */
4003         priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
4004
4005         if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
4006                 netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
4007
4008                 return -ENOMEM;
4009         }
4010
4011         /* Entry to report DMA RX/TX rings */
4012         priv->dbgfs_rings_status =
4013                 debugfs_create_file("descriptors_status", 0444,
4014                                     priv->dbgfs_dir, dev,
4015                                     &stmmac_rings_status_fops);
4016
4017         if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
4018                 netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
4019                 debugfs_remove_recursive(priv->dbgfs_dir);
4020
4021                 return -ENOMEM;
4022         }
4023
4024         /* Entry to report the DMA HW features */
4025         priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", 0444,
4026                                                   priv->dbgfs_dir,
4027                                                   dev, &stmmac_dma_cap_fops);
4028
4029         if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
4030                 netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
4031                 debugfs_remove_recursive(priv->dbgfs_dir);
4032
4033                 return -ENOMEM;
4034         }
4035
4036         return 0;
4037 }
4038
4039 static void stmmac_exit_fs(struct net_device *dev)
4040 {
4041         struct stmmac_priv *priv = netdev_priv(dev);
4042
4043         debugfs_remove_recursive(priv->dbgfs_dir);
4044 }
4045 #endif /* CONFIG_DEBUG_FS */
4046
4047 static const struct net_device_ops stmmac_netdev_ops = {
4048         .ndo_open = stmmac_open,
4049         .ndo_start_xmit = stmmac_xmit,
4050         .ndo_stop = stmmac_release,
4051         .ndo_change_mtu = stmmac_change_mtu,
4052         .ndo_fix_features = stmmac_fix_features,
4053         .ndo_set_features = stmmac_set_features,
4054         .ndo_set_rx_mode = stmmac_set_rx_mode,
4055         .ndo_tx_timeout = stmmac_tx_timeout,
4056         .ndo_do_ioctl = stmmac_ioctl,
4057         .ndo_setup_tc = stmmac_setup_tc,
4058 #ifdef CONFIG_NET_POLL_CONTROLLER
4059         .ndo_poll_controller = stmmac_poll_controller,
4060 #endif
4061         .ndo_set_mac_address = stmmac_set_mac_address,
4062 };
4063
4064 static void stmmac_reset_subtask(struct stmmac_priv *priv)
4065 {
4066         if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
4067                 return;
4068         if (test_bit(STMMAC_DOWN, &priv->state))
4069                 return;
4070
4071         netdev_err(priv->dev, "Reset adapter.\n");
4072
4073         rtnl_lock();
4074         netif_trans_update(priv->dev);
4075         while (test_and_set_bit(STMMAC_RESETING, &priv->state))
4076                 usleep_range(1000, 2000);
4077
4078         set_bit(STMMAC_DOWN, &priv->state);
4079         dev_close(priv->dev);
4080         dev_open(priv->dev, NULL);
4081         clear_bit(STMMAC_DOWN, &priv->state);
4082         clear_bit(STMMAC_RESETING, &priv->state);
4083         rtnl_unlock();
4084 }
4085
4086 static void stmmac_service_task(struct work_struct *work)
4087 {
4088         struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
4089                         service_task);
4090
4091         stmmac_reset_subtask(priv);
4092         clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
4093 }
4094
4095 /**
4096  *  stmmac_hw_init - Init the MAC device
4097  *  @priv: driver private structure
4098  *  Description: this function is to configure the MAC device according to
4099  *  some platform parameters or the HW capability register. It prepares the
4100  *  driver to use either ring or chain modes and to setup either enhanced or
4101  *  normal descriptors.
4102  */
4103 static int stmmac_hw_init(struct stmmac_priv *priv)
4104 {
4105         int ret;
4106
4107         /* dwmac-sun8i only work in chain mode */
4108         if (priv->plat->has_sun8i)
4109                 chain_mode = 1;
4110         priv->chain_mode = chain_mode;
4111
4112         /* Initialize HW Interface */
4113         ret = stmmac_hwif_init(priv);
4114         if (ret)
4115                 return ret;
4116
4117         /* Get the HW capability (new GMAC newer than 3.50a) */
4118         priv->hw_cap_support = stmmac_get_hw_features(priv);
4119         if (priv->hw_cap_support) {
4120                 dev_info(priv->device, "DMA HW capability register supported\n");
4121
4122                 /* We can override some gmac/dma configuration fields: e.g.
4123                  * enh_desc, tx_coe (e.g. that are passed through the
4124                  * platform) with the values from the HW capability
4125                  * register (if supported).
4126                  */
4127                 priv->plat->enh_desc = priv->dma_cap.enh_desc;
4128                 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
4129                 priv->hw->pmt = priv->plat->pmt;
4130
4131                 /* TXCOE doesn't work in thresh DMA mode */
4132                 if (priv->plat->force_thresh_dma_mode)
4133                         priv->plat->tx_coe = 0;
4134                 else
4135                         priv->plat->tx_coe = priv->dma_cap.tx_coe;
4136
4137                 /* In case of GMAC4 rx_coe is from HW cap register. */
4138                 priv->plat->rx_coe = priv->dma_cap.rx_coe;
4139
4140                 if (priv->dma_cap.rx_coe_type2)
4141                         priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
4142                 else if (priv->dma_cap.rx_coe_type1)
4143                         priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
4144
4145         } else {
4146                 dev_info(priv->device, "No HW DMA feature register supported\n");
4147         }
4148
4149         if (priv->plat->rx_coe) {
4150                 priv->hw->rx_csum = priv->plat->rx_coe;
4151                 dev_info(priv->device, "RX Checksum Offload Engine supported\n");
4152                 if (priv->synopsys_id < DWMAC_CORE_4_00)
4153                         dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
4154         }
4155         if (priv->plat->tx_coe)
4156                 dev_info(priv->device, "TX Checksum insertion supported\n");
4157
4158         if (priv->plat->pmt) {
4159                 dev_info(priv->device, "Wake-Up On Lan supported\n");
4160                 device_set_wakeup_capable(priv->device, 1);
4161         }
4162
4163         if (priv->dma_cap.tsoen)
4164                 dev_info(priv->device, "TSO supported\n");
4165
4166         /* Run HW quirks, if any */
4167         if (priv->hwif_quirks) {
4168                 ret = priv->hwif_quirks(priv);
4169                 if (ret)
4170                         return ret;
4171         }
4172
4173         /* Rx Watchdog is available in the COREs newer than the 3.40.
4174          * In some case, for example on bugged HW this feature
4175          * has to be disable and this can be done by passing the
4176          * riwt_off field from the platform.
4177          */
4178         if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
4179             (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
4180                 priv->use_riwt = 1;
4181                 dev_info(priv->device,
4182                          "Enable RX Mitigation via HW Watchdog Timer\n");
4183         }
4184
4185         return 0;
4186 }
4187
4188 /**
4189  * stmmac_dvr_probe
4190  * @device: device pointer
4191  * @plat_dat: platform data pointer
4192  * @res: stmmac resource pointer
4193  * Description: this is the main probe function used to
4194  * call the alloc_etherdev, allocate the priv structure.
4195  * Return:
4196  * returns 0 on success, otherwise errno.
4197  */
4198 int stmmac_dvr_probe(struct device *device,
4199                      struct plat_stmmacenet_data *plat_dat,
4200                      struct stmmac_resources *res)
4201 {
4202         struct net_device *ndev = NULL;
4203         struct stmmac_priv *priv;
4204         u32 queue, maxq;
4205         int ret = 0;
4206
4207         ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv),
4208                                   MTL_MAX_TX_QUEUES,
4209                                   MTL_MAX_RX_QUEUES);
4210         if (!ndev)
4211                 return -ENOMEM;
4212
4213         SET_NETDEV_DEV(ndev, device);
4214
4215         priv = netdev_priv(ndev);
4216         priv->device = device;
4217         priv->dev = ndev;
4218
4219         stmmac_set_ethtool_ops(ndev);
4220         priv->pause = pause;
4221         priv->plat = plat_dat;
4222         priv->ioaddr = res->addr;
4223         priv->dev->base_addr = (unsigned long)res->addr;
4224
4225         priv->dev->irq = res->irq;
4226         priv->wol_irq = res->wol_irq;
4227         priv->lpi_irq = res->lpi_irq;
4228
4229         if (res->mac)
4230                 memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
4231
4232         dev_set_drvdata(device, priv->dev);
4233
4234         /* Verify driver arguments */
4235         stmmac_verify_args();
4236
4237         /* Allocate workqueue */
4238         priv->wq = create_singlethread_workqueue("stmmac_wq");
4239         if (!priv->wq) {
4240                 dev_err(priv->device, "failed to create workqueue\n");
4241                 ret = -ENOMEM;
4242                 goto error_wq;
4243         }
4244
4245         INIT_WORK(&priv->service_task, stmmac_service_task);
4246
4247         /* Override with kernel parameters if supplied XXX CRS XXX
4248          * this needs to have multiple instances
4249          */
4250         if ((phyaddr >= 0) && (phyaddr <= 31))
4251                 priv->plat->phy_addr = phyaddr;
4252
4253         if (priv->plat->stmmac_rst) {
4254                 ret = reset_control_assert(priv->plat->stmmac_rst);
4255                 reset_control_deassert(priv->plat->stmmac_rst);
4256                 /* Some reset controllers have only reset callback instead of
4257                  * assert + deassert callbacks pair.
4258                  */
4259                 if (ret == -ENOTSUPP)
4260                         reset_control_reset(priv->plat->stmmac_rst);
4261         }
4262
4263         /* Init MAC and get the capabilities */
4264         ret = stmmac_hw_init(priv);
4265         if (ret)
4266                 goto error_hw_init;
4267
4268         /* Configure real RX and TX queues */
4269         netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
4270         netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
4271
4272         ndev->netdev_ops = &stmmac_netdev_ops;
4273
4274         ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4275                             NETIF_F_RXCSUM;
4276
4277         ret = stmmac_tc_init(priv, priv);
4278         if (!ret) {
4279                 ndev->hw_features |= NETIF_F_HW_TC;
4280         }
4281
4282         if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
4283                 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
4284                 priv->tso = true;
4285                 dev_info(priv->device, "TSO feature enabled\n");
4286         }
4287         ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
4288         ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
4289 #ifdef STMMAC_VLAN_TAG_USED
4290         /* Both mac100 and gmac support receive VLAN tag detection */
4291         ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
4292 #endif
4293         priv->msg_enable = netif_msg_init(debug, default_msg_level);
4294
4295         /* MTU range: 46 - hw-specific max */
4296         ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
4297         if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
4298                 ndev->max_mtu = JUMBO_LEN;
4299         else if (priv->plat->has_xgmac)
4300                 ndev->max_mtu = XGMAC_JUMBO_LEN;
4301         else
4302                 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
4303         /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
4304          * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
4305          */
4306         if ((priv->plat->maxmtu < ndev->max_mtu) &&
4307             (priv->plat->maxmtu >= ndev->min_mtu))
4308                 ndev->max_mtu = priv->plat->maxmtu;
4309         else if (priv->plat->maxmtu < ndev->min_mtu)
4310                 dev_warn(priv->device,
4311                          "%s: warning: maxmtu having invalid value (%d)\n",
4312                          __func__, priv->plat->maxmtu);
4313
4314         if (flow_ctrl)
4315                 priv->flow_ctrl = FLOW_AUTO;    /* RX/TX pause on */
4316
4317         /* Setup channels NAPI */
4318         maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
4319
4320         for (queue = 0; queue < maxq; queue++) {
4321                 struct stmmac_channel *ch = &priv->channel[queue];
4322
4323                 ch->priv_data = priv;
4324                 ch->index = queue;
4325
4326                 if (queue < priv->plat->rx_queues_to_use)
4327                         ch->has_rx = true;
4328                 if (queue < priv->plat->tx_queues_to_use)
4329                         ch->has_tx = true;
4330
4331                 netif_napi_add(ndev, &ch->napi, stmmac_napi_poll,
4332                                NAPI_POLL_WEIGHT);
4333         }
4334
4335         mutex_init(&priv->lock);
4336
4337         /* If a specific clk_csr value is passed from the platform
4338          * this means that the CSR Clock Range selection cannot be
4339          * changed at run-time and it is fixed. Viceversa the driver'll try to
4340          * set the MDC clock dynamically according to the csr actual
4341          * clock input.
4342          */
4343         if (!priv->plat->clk_csr)
4344                 stmmac_clk_csr_set(priv);
4345         else
4346                 priv->clk_csr = priv->plat->clk_csr;
4347
4348         stmmac_check_pcs_mode(priv);
4349
4350         if (priv->hw->pcs != STMMAC_PCS_RGMII  &&
4351             priv->hw->pcs != STMMAC_PCS_TBI &&
4352             priv->hw->pcs != STMMAC_PCS_RTBI) {
4353                 /* MDIO bus Registration */
4354                 ret = stmmac_mdio_register(ndev);
4355                 if (ret < 0) {
4356                         dev_err(priv->device,
4357                                 "%s: MDIO bus (id: %d) registration failed",
4358                                 __func__, priv->plat->bus_id);
4359                         goto error_mdio_register;
4360                 }
4361         }
4362
4363         ret = register_netdev(ndev);
4364         if (ret) {
4365                 dev_err(priv->device, "%s: ERROR %i registering the device\n",
4366                         __func__, ret);
4367                 goto error_netdev_register;
4368         }
4369
4370 #ifdef CONFIG_DEBUG_FS
4371         ret = stmmac_init_fs(ndev);
4372         if (ret < 0)
4373                 netdev_warn(priv->dev, "%s: failed debugFS registration\n",
4374                             __func__);
4375 #endif
4376
4377         return ret;
4378
4379 error_netdev_register:
4380         if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4381             priv->hw->pcs != STMMAC_PCS_TBI &&
4382             priv->hw->pcs != STMMAC_PCS_RTBI)
4383                 stmmac_mdio_unregister(ndev);
4384 error_mdio_register:
4385         for (queue = 0; queue < maxq; queue++) {
4386                 struct stmmac_channel *ch = &priv->channel[queue];
4387
4388                 netif_napi_del(&ch->napi);
4389         }
4390 error_hw_init:
4391         destroy_workqueue(priv->wq);
4392 error_wq:
4393         free_netdev(ndev);
4394
4395         return ret;
4396 }
4397 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
4398
4399 /**
4400  * stmmac_dvr_remove
4401  * @dev: device pointer
4402  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
4403  * changes the link status, releases the DMA descriptor rings.
4404  */
4405 int stmmac_dvr_remove(struct device *dev)
4406 {
4407         struct net_device *ndev = dev_get_drvdata(dev);
4408         struct stmmac_priv *priv = netdev_priv(ndev);
4409
4410         netdev_info(priv->dev, "%s: removing driver", __func__);
4411
4412 #ifdef CONFIG_DEBUG_FS
4413         stmmac_exit_fs(ndev);
4414 #endif
4415         stmmac_stop_all_dma(priv);
4416
4417         stmmac_mac_set(priv, priv->ioaddr, false);
4418         netif_carrier_off(ndev);
4419         unregister_netdev(ndev);
4420         if (priv->plat->stmmac_rst)
4421                 reset_control_assert(priv->plat->stmmac_rst);
4422         clk_disable_unprepare(priv->plat->pclk);
4423         clk_disable_unprepare(priv->plat->stmmac_clk);
4424         if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4425             priv->hw->pcs != STMMAC_PCS_TBI &&
4426             priv->hw->pcs != STMMAC_PCS_RTBI)
4427                 stmmac_mdio_unregister(ndev);
4428         destroy_workqueue(priv->wq);
4429         mutex_destroy(&priv->lock);
4430         free_netdev(ndev);
4431
4432         return 0;
4433 }
4434 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
4435
4436 /**
4437  * stmmac_suspend - suspend callback
4438  * @dev: device pointer
4439  * Description: this is the function to suspend the device and it is called
4440  * by the platform driver to stop the network queue, release the resources,
4441  * program the PMT register (for WoL), clean and release driver resources.
4442  */
4443 int stmmac_suspend(struct device *dev)
4444 {
4445         struct net_device *ndev = dev_get_drvdata(dev);
4446         struct stmmac_priv *priv = netdev_priv(ndev);
4447
4448         if (!ndev || !netif_running(ndev))
4449                 return 0;
4450
4451         if (ndev->phydev)
4452                 phy_stop(ndev->phydev);
4453
4454         mutex_lock(&priv->lock);
4455
4456         netif_device_detach(ndev);
4457         stmmac_stop_all_queues(priv);
4458
4459         stmmac_disable_all_queues(priv);
4460
4461         /* Stop TX/RX DMA */
4462         stmmac_stop_all_dma(priv);
4463
4464         /* Enable Power down mode by programming the PMT regs */
4465         if (device_may_wakeup(priv->device)) {
4466                 stmmac_pmt(priv, priv->hw, priv->wolopts);
4467                 priv->irq_wake = 1;
4468         } else {
4469                 stmmac_mac_set(priv, priv->ioaddr, false);
4470                 pinctrl_pm_select_sleep_state(priv->device);
4471                 /* Disable clock in case of PWM is off */
4472                 clk_disable(priv->plat->pclk);
4473                 clk_disable(priv->plat->stmmac_clk);
4474         }
4475         mutex_unlock(&priv->lock);
4476
4477         priv->oldlink = false;
4478         priv->speed = SPEED_UNKNOWN;
4479         priv->oldduplex = DUPLEX_UNKNOWN;
4480         return 0;
4481 }
4482 EXPORT_SYMBOL_GPL(stmmac_suspend);
4483
4484 /**
4485  * stmmac_reset_queues_param - reset queue parameters
4486  * @dev: device pointer
4487  */
4488 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
4489 {
4490         u32 rx_cnt = priv->plat->rx_queues_to_use;
4491         u32 tx_cnt = priv->plat->tx_queues_to_use;
4492         u32 queue;
4493
4494         for (queue = 0; queue < rx_cnt; queue++) {
4495                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4496
4497                 rx_q->cur_rx = 0;
4498                 rx_q->dirty_rx = 0;
4499         }
4500
4501         for (queue = 0; queue < tx_cnt; queue++) {
4502                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4503
4504                 tx_q->cur_tx = 0;
4505                 tx_q->dirty_tx = 0;
4506                 tx_q->mss = 0;
4507         }
4508 }
4509
4510 /**
4511  * stmmac_resume - resume callback
4512  * @dev: device pointer
4513  * Description: when resume this function is invoked to setup the DMA and CORE
4514  * in a usable state.
4515  */
4516 int stmmac_resume(struct device *dev)
4517 {
4518         struct net_device *ndev = dev_get_drvdata(dev);
4519         struct stmmac_priv *priv = netdev_priv(ndev);
4520
4521         if (!netif_running(ndev))
4522                 return 0;
4523
4524         /* Power Down bit, into the PM register, is cleared
4525          * automatically as soon as a magic packet or a Wake-up frame
4526          * is received. Anyway, it's better to manually clear
4527          * this bit because it can generate problems while resuming
4528          * from another devices (e.g. serial console).
4529          */
4530         if (device_may_wakeup(priv->device)) {
4531                 mutex_lock(&priv->lock);
4532                 stmmac_pmt(priv, priv->hw, 0);
4533                 mutex_unlock(&priv->lock);
4534                 priv->irq_wake = 0;
4535         } else {
4536                 pinctrl_pm_select_default_state(priv->device);
4537                 /* enable the clk previously disabled */
4538                 clk_enable(priv->plat->stmmac_clk);
4539                 clk_enable(priv->plat->pclk);
4540                 /* reset the phy so that it's ready */
4541                 if (priv->mii)
4542                         stmmac_mdio_reset(priv->mii);
4543         }
4544
4545         netif_device_attach(ndev);
4546
4547         mutex_lock(&priv->lock);
4548
4549         stmmac_reset_queues_param(priv);
4550
4551         stmmac_clear_descriptors(priv);
4552
4553         stmmac_hw_setup(ndev, false);
4554         stmmac_init_tx_coalesce(priv);
4555         stmmac_set_rx_mode(ndev);
4556
4557         stmmac_enable_all_queues(priv);
4558
4559         stmmac_start_all_queues(priv);
4560
4561         mutex_unlock(&priv->lock);
4562
4563         if (ndev->phydev)
4564                 phy_start(ndev->phydev);
4565
4566         return 0;
4567 }
4568 EXPORT_SYMBOL_GPL(stmmac_resume);
4569
4570 #ifndef MODULE
4571 static int __init stmmac_cmdline_opt(char *str)
4572 {
4573         char *opt;
4574
4575         if (!str || !*str)
4576                 return -EINVAL;
4577         while ((opt = strsep(&str, ",")) != NULL) {
4578                 if (!strncmp(opt, "debug:", 6)) {
4579                         if (kstrtoint(opt + 6, 0, &debug))
4580                                 goto err;
4581                 } else if (!strncmp(opt, "phyaddr:", 8)) {
4582                         if (kstrtoint(opt + 8, 0, &phyaddr))
4583                                 goto err;
4584                 } else if (!strncmp(opt, "buf_sz:", 7)) {
4585                         if (kstrtoint(opt + 7, 0, &buf_sz))
4586                                 goto err;
4587                 } else if (!strncmp(opt, "tc:", 3)) {
4588                         if (kstrtoint(opt + 3, 0, &tc))
4589                                 goto err;
4590                 } else if (!strncmp(opt, "watchdog:", 9)) {
4591                         if (kstrtoint(opt + 9, 0, &watchdog))
4592                                 goto err;
4593                 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
4594                         if (kstrtoint(opt + 10, 0, &flow_ctrl))
4595                                 goto err;
4596                 } else if (!strncmp(opt, "pause:", 6)) {
4597                         if (kstrtoint(opt + 6, 0, &pause))
4598                                 goto err;
4599                 } else if (!strncmp(opt, "eee_timer:", 10)) {
4600                         if (kstrtoint(opt + 10, 0, &eee_timer))
4601                                 goto err;
4602                 } else if (!strncmp(opt, "chain_mode:", 11)) {
4603                         if (kstrtoint(opt + 11, 0, &chain_mode))
4604                                 goto err;
4605                 }
4606         }
4607         return 0;
4608
4609 err:
4610         pr_err("%s: ERROR broken module parameter conversion", __func__);
4611         return -EINVAL;
4612 }
4613
4614 __setup("stmmaceth=", stmmac_cmdline_opt);
4615 #endif /* MODULE */
4616
4617 static int __init stmmac_init(void)
4618 {
4619 #ifdef CONFIG_DEBUG_FS
4620         /* Create debugfs main directory if it doesn't exist yet */
4621         if (!stmmac_fs_dir) {
4622                 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
4623
4624                 if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
4625                         pr_err("ERROR %s, debugfs create directory failed\n",
4626                                STMMAC_RESOURCE_NAME);
4627
4628                         return -ENOMEM;
4629                 }
4630         }
4631 #endif
4632
4633         return 0;
4634 }
4635
4636 static void __exit stmmac_exit(void)
4637 {
4638 #ifdef CONFIG_DEBUG_FS
4639         debugfs_remove_recursive(stmmac_fs_dir);
4640 #endif
4641 }
4642
4643 module_init(stmmac_init)
4644 module_exit(stmmac_exit)
4645
4646 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
4647 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
4648 MODULE_LICENSE("GPL");