OSDN Git Service

Merge "msm: kgsl: Add missing check for snapshot IB dump"
[sagit-ice-cold/kernel_xiaomi_msm8998.git] / drivers / net / ethernet / renesas / sh_eth.c
1 /*  SuperH Ethernet device driver
2  *
3  *  Copyright (C) 2014  Renesas Electronics Corporation
4  *  Copyright (C) 2006-2012 Nobuhiro Iwamatsu
5  *  Copyright (C) 2008-2014 Renesas Solutions Corp.
6  *  Copyright (C) 2013-2014 Cogent Embedded, Inc.
7  *  Copyright (C) 2014 Codethink Limited
8  *
9  *  This program is free software; you can redistribute it and/or modify it
10  *  under the terms and conditions of the GNU General Public License,
11  *  version 2, as published by the Free Software Foundation.
12  *
13  *  This program is distributed in the hope it will be useful, but WITHOUT
14  *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  *  more details.
17  *
18  *  The full GNU General Public License is included in this distribution in
19  *  the file called "COPYING".
20  */
21
22 #include <linux/module.h>
23 #include <linux/kernel.h>
24 #include <linux/spinlock.h>
25 #include <linux/interrupt.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/etherdevice.h>
28 #include <linux/delay.h>
29 #include <linux/platform_device.h>
30 #include <linux/mdio-bitbang.h>
31 #include <linux/netdevice.h>
32 #include <linux/of.h>
33 #include <linux/of_device.h>
34 #include <linux/of_irq.h>
35 #include <linux/of_net.h>
36 #include <linux/phy.h>
37 #include <linux/cache.h>
38 #include <linux/io.h>
39 #include <linux/pm_runtime.h>
40 #include <linux/slab.h>
41 #include <linux/ethtool.h>
42 #include <linux/if_vlan.h>
43 #include <linux/clk.h>
44 #include <linux/sh_eth.h>
45 #include <linux/of_mdio.h>
46
47 #include "sh_eth.h"
48
49 #define SH_ETH_DEF_MSG_ENABLE \
50                 (NETIF_MSG_LINK | \
51                 NETIF_MSG_TIMER | \
52                 NETIF_MSG_RX_ERR| \
53                 NETIF_MSG_TX_ERR)
54
55 #define SH_ETH_OFFSET_INVALID   ((u16)~0)
56
57 #define SH_ETH_OFFSET_DEFAULTS                  \
58         [0 ... SH_ETH_MAX_REGISTER_OFFSET - 1] = SH_ETH_OFFSET_INVALID
59
60 static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
61         SH_ETH_OFFSET_DEFAULTS,
62
63         [EDSR]          = 0x0000,
64         [EDMR]          = 0x0400,
65         [EDTRR]         = 0x0408,
66         [EDRRR]         = 0x0410,
67         [EESR]          = 0x0428,
68         [EESIPR]        = 0x0430,
69         [TDLAR]         = 0x0010,
70         [TDFAR]         = 0x0014,
71         [TDFXR]         = 0x0018,
72         [TDFFR]         = 0x001c,
73         [RDLAR]         = 0x0030,
74         [RDFAR]         = 0x0034,
75         [RDFXR]         = 0x0038,
76         [RDFFR]         = 0x003c,
77         [TRSCER]        = 0x0438,
78         [RMFCR]         = 0x0440,
79         [TFTR]          = 0x0448,
80         [FDR]           = 0x0450,
81         [RMCR]          = 0x0458,
82         [RPADIR]        = 0x0460,
83         [FCFTR]         = 0x0468,
84         [CSMR]          = 0x04E4,
85
86         [ECMR]          = 0x0500,
87         [ECSR]          = 0x0510,
88         [ECSIPR]        = 0x0518,
89         [PIR]           = 0x0520,
90         [PSR]           = 0x0528,
91         [PIPR]          = 0x052c,
92         [RFLR]          = 0x0508,
93         [APR]           = 0x0554,
94         [MPR]           = 0x0558,
95         [PFTCR]         = 0x055c,
96         [PFRCR]         = 0x0560,
97         [TPAUSER]       = 0x0564,
98         [GECMR]         = 0x05b0,
99         [BCULR]         = 0x05b4,
100         [MAHR]          = 0x05c0,
101         [MALR]          = 0x05c8,
102         [TROCR]         = 0x0700,
103         [CDCR]          = 0x0708,
104         [LCCR]          = 0x0710,
105         [CEFCR]         = 0x0740,
106         [FRECR]         = 0x0748,
107         [TSFRCR]        = 0x0750,
108         [TLFRCR]        = 0x0758,
109         [RFCR]          = 0x0760,
110         [CERCR]         = 0x0768,
111         [CEECR]         = 0x0770,
112         [MAFCR]         = 0x0778,
113         [RMII_MII]      = 0x0790,
114
115         [ARSTR]         = 0x0000,
116         [TSU_CTRST]     = 0x0004,
117         [TSU_FWEN0]     = 0x0010,
118         [TSU_FWEN1]     = 0x0014,
119         [TSU_FCM]       = 0x0018,
120         [TSU_BSYSL0]    = 0x0020,
121         [TSU_BSYSL1]    = 0x0024,
122         [TSU_PRISL0]    = 0x0028,
123         [TSU_PRISL1]    = 0x002c,
124         [TSU_FWSL0]     = 0x0030,
125         [TSU_FWSL1]     = 0x0034,
126         [TSU_FWSLC]     = 0x0038,
127         [TSU_QTAG0]     = 0x0040,
128         [TSU_QTAG1]     = 0x0044,
129         [TSU_FWSR]      = 0x0050,
130         [TSU_FWINMK]    = 0x0054,
131         [TSU_ADQT0]     = 0x0048,
132         [TSU_ADQT1]     = 0x004c,
133         [TSU_VTAG0]     = 0x0058,
134         [TSU_VTAG1]     = 0x005c,
135         [TSU_ADSBSY]    = 0x0060,
136         [TSU_TEN]       = 0x0064,
137         [TSU_POST1]     = 0x0070,
138         [TSU_POST2]     = 0x0074,
139         [TSU_POST3]     = 0x0078,
140         [TSU_POST4]     = 0x007c,
141         [TSU_ADRH0]     = 0x0100,
142
143         [TXNLCR0]       = 0x0080,
144         [TXALCR0]       = 0x0084,
145         [RXNLCR0]       = 0x0088,
146         [RXALCR0]       = 0x008c,
147         [FWNLCR0]       = 0x0090,
148         [FWALCR0]       = 0x0094,
149         [TXNLCR1]       = 0x00a0,
150         [TXALCR1]       = 0x00a0,
151         [RXNLCR1]       = 0x00a8,
152         [RXALCR1]       = 0x00ac,
153         [FWNLCR1]       = 0x00b0,
154         [FWALCR1]       = 0x00b4,
155 };
156
157 static const u16 sh_eth_offset_fast_rz[SH_ETH_MAX_REGISTER_OFFSET] = {
158         SH_ETH_OFFSET_DEFAULTS,
159
160         [EDSR]          = 0x0000,
161         [EDMR]          = 0x0400,
162         [EDTRR]         = 0x0408,
163         [EDRRR]         = 0x0410,
164         [EESR]          = 0x0428,
165         [EESIPR]        = 0x0430,
166         [TDLAR]         = 0x0010,
167         [TDFAR]         = 0x0014,
168         [TDFXR]         = 0x0018,
169         [TDFFR]         = 0x001c,
170         [RDLAR]         = 0x0030,
171         [RDFAR]         = 0x0034,
172         [RDFXR]         = 0x0038,
173         [RDFFR]         = 0x003c,
174         [TRSCER]        = 0x0438,
175         [RMFCR]         = 0x0440,
176         [TFTR]          = 0x0448,
177         [FDR]           = 0x0450,
178         [RMCR]          = 0x0458,
179         [RPADIR]        = 0x0460,
180         [FCFTR]         = 0x0468,
181         [CSMR]          = 0x04E4,
182
183         [ECMR]          = 0x0500,
184         [RFLR]          = 0x0508,
185         [ECSR]          = 0x0510,
186         [ECSIPR]        = 0x0518,
187         [PIR]           = 0x0520,
188         [APR]           = 0x0554,
189         [MPR]           = 0x0558,
190         [PFTCR]         = 0x055c,
191         [PFRCR]         = 0x0560,
192         [TPAUSER]       = 0x0564,
193         [MAHR]          = 0x05c0,
194         [MALR]          = 0x05c8,
195         [CEFCR]         = 0x0740,
196         [FRECR]         = 0x0748,
197         [TSFRCR]        = 0x0750,
198         [TLFRCR]        = 0x0758,
199         [RFCR]          = 0x0760,
200         [MAFCR]         = 0x0778,
201
202         [ARSTR]         = 0x0000,
203         [TSU_CTRST]     = 0x0004,
204         [TSU_VTAG0]     = 0x0058,
205         [TSU_ADSBSY]    = 0x0060,
206         [TSU_TEN]       = 0x0064,
207         [TSU_ADRH0]     = 0x0100,
208
209         [TXNLCR0]       = 0x0080,
210         [TXALCR0]       = 0x0084,
211         [RXNLCR0]       = 0x0088,
212         [RXALCR0]       = 0x008C,
213 };
214
215 static const u16 sh_eth_offset_fast_rcar[SH_ETH_MAX_REGISTER_OFFSET] = {
216         SH_ETH_OFFSET_DEFAULTS,
217
218         [ECMR]          = 0x0300,
219         [RFLR]          = 0x0308,
220         [ECSR]          = 0x0310,
221         [ECSIPR]        = 0x0318,
222         [PIR]           = 0x0320,
223         [PSR]           = 0x0328,
224         [RDMLR]         = 0x0340,
225         [IPGR]          = 0x0350,
226         [APR]           = 0x0354,
227         [MPR]           = 0x0358,
228         [RFCF]          = 0x0360,
229         [TPAUSER]       = 0x0364,
230         [TPAUSECR]      = 0x0368,
231         [MAHR]          = 0x03c0,
232         [MALR]          = 0x03c8,
233         [TROCR]         = 0x03d0,
234         [CDCR]          = 0x03d4,
235         [LCCR]          = 0x03d8,
236         [CNDCR]         = 0x03dc,
237         [CEFCR]         = 0x03e4,
238         [FRECR]         = 0x03e8,
239         [TSFRCR]        = 0x03ec,
240         [TLFRCR]        = 0x03f0,
241         [RFCR]          = 0x03f4,
242         [MAFCR]         = 0x03f8,
243
244         [EDMR]          = 0x0200,
245         [EDTRR]         = 0x0208,
246         [EDRRR]         = 0x0210,
247         [TDLAR]         = 0x0218,
248         [RDLAR]         = 0x0220,
249         [EESR]          = 0x0228,
250         [EESIPR]        = 0x0230,
251         [TRSCER]        = 0x0238,
252         [RMFCR]         = 0x0240,
253         [TFTR]          = 0x0248,
254         [FDR]           = 0x0250,
255         [RMCR]          = 0x0258,
256         [TFUCR]         = 0x0264,
257         [RFOCR]         = 0x0268,
258         [RMIIMODE]      = 0x026c,
259         [FCFTR]         = 0x0270,
260         [TRIMD]         = 0x027c,
261 };
262
263 static const u16 sh_eth_offset_fast_sh4[SH_ETH_MAX_REGISTER_OFFSET] = {
264         SH_ETH_OFFSET_DEFAULTS,
265
266         [ECMR]          = 0x0100,
267         [RFLR]          = 0x0108,
268         [ECSR]          = 0x0110,
269         [ECSIPR]        = 0x0118,
270         [PIR]           = 0x0120,
271         [PSR]           = 0x0128,
272         [RDMLR]         = 0x0140,
273         [IPGR]          = 0x0150,
274         [APR]           = 0x0154,
275         [MPR]           = 0x0158,
276         [TPAUSER]       = 0x0164,
277         [RFCF]          = 0x0160,
278         [TPAUSECR]      = 0x0168,
279         [BCFRR]         = 0x016c,
280         [MAHR]          = 0x01c0,
281         [MALR]          = 0x01c8,
282         [TROCR]         = 0x01d0,
283         [CDCR]          = 0x01d4,
284         [LCCR]          = 0x01d8,
285         [CNDCR]         = 0x01dc,
286         [CEFCR]         = 0x01e4,
287         [FRECR]         = 0x01e8,
288         [TSFRCR]        = 0x01ec,
289         [TLFRCR]        = 0x01f0,
290         [RFCR]          = 0x01f4,
291         [MAFCR]         = 0x01f8,
292         [RTRATE]        = 0x01fc,
293
294         [EDMR]          = 0x0000,
295         [EDTRR]         = 0x0008,
296         [EDRRR]         = 0x0010,
297         [TDLAR]         = 0x0018,
298         [RDLAR]         = 0x0020,
299         [EESR]          = 0x0028,
300         [EESIPR]        = 0x0030,
301         [TRSCER]        = 0x0038,
302         [RMFCR]         = 0x0040,
303         [TFTR]          = 0x0048,
304         [FDR]           = 0x0050,
305         [RMCR]          = 0x0058,
306         [TFUCR]         = 0x0064,
307         [RFOCR]         = 0x0068,
308         [FCFTR]         = 0x0070,
309         [RPADIR]        = 0x0078,
310         [TRIMD]         = 0x007c,
311         [RBWAR]         = 0x00c8,
312         [RDFAR]         = 0x00cc,
313         [TBRAR]         = 0x00d4,
314         [TDFAR]         = 0x00d8,
315 };
316
317 static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
318         SH_ETH_OFFSET_DEFAULTS,
319
320         [EDMR]          = 0x0000,
321         [EDTRR]         = 0x0004,
322         [EDRRR]         = 0x0008,
323         [TDLAR]         = 0x000c,
324         [RDLAR]         = 0x0010,
325         [EESR]          = 0x0014,
326         [EESIPR]        = 0x0018,
327         [TRSCER]        = 0x001c,
328         [RMFCR]         = 0x0020,
329         [TFTR]          = 0x0024,
330         [FDR]           = 0x0028,
331         [RMCR]          = 0x002c,
332         [EDOCR]         = 0x0030,
333         [FCFTR]         = 0x0034,
334         [RPADIR]        = 0x0038,
335         [TRIMD]         = 0x003c,
336         [RBWAR]         = 0x0040,
337         [RDFAR]         = 0x0044,
338         [TBRAR]         = 0x004c,
339         [TDFAR]         = 0x0050,
340
341         [ECMR]          = 0x0160,
342         [ECSR]          = 0x0164,
343         [ECSIPR]        = 0x0168,
344         [PIR]           = 0x016c,
345         [MAHR]          = 0x0170,
346         [MALR]          = 0x0174,
347         [RFLR]          = 0x0178,
348         [PSR]           = 0x017c,
349         [TROCR]         = 0x0180,
350         [CDCR]          = 0x0184,
351         [LCCR]          = 0x0188,
352         [CNDCR]         = 0x018c,
353         [CEFCR]         = 0x0194,
354         [FRECR]         = 0x0198,
355         [TSFRCR]        = 0x019c,
356         [TLFRCR]        = 0x01a0,
357         [RFCR]          = 0x01a4,
358         [MAFCR]         = 0x01a8,
359         [IPGR]          = 0x01b4,
360         [APR]           = 0x01b8,
361         [MPR]           = 0x01bc,
362         [TPAUSER]       = 0x01c4,
363         [BCFR]          = 0x01cc,
364
365         [ARSTR]         = 0x0000,
366         [TSU_CTRST]     = 0x0004,
367         [TSU_FWEN0]     = 0x0010,
368         [TSU_FWEN1]     = 0x0014,
369         [TSU_FCM]       = 0x0018,
370         [TSU_BSYSL0]    = 0x0020,
371         [TSU_BSYSL1]    = 0x0024,
372         [TSU_PRISL0]    = 0x0028,
373         [TSU_PRISL1]    = 0x002c,
374         [TSU_FWSL0]     = 0x0030,
375         [TSU_FWSL1]     = 0x0034,
376         [TSU_FWSLC]     = 0x0038,
377         [TSU_QTAGM0]    = 0x0040,
378         [TSU_QTAGM1]    = 0x0044,
379         [TSU_ADQT0]     = 0x0048,
380         [TSU_ADQT1]     = 0x004c,
381         [TSU_FWSR]      = 0x0050,
382         [TSU_FWINMK]    = 0x0054,
383         [TSU_ADSBSY]    = 0x0060,
384         [TSU_TEN]       = 0x0064,
385         [TSU_POST1]     = 0x0070,
386         [TSU_POST2]     = 0x0074,
387         [TSU_POST3]     = 0x0078,
388         [TSU_POST4]     = 0x007c,
389
390         [TXNLCR0]       = 0x0080,
391         [TXALCR0]       = 0x0084,
392         [RXNLCR0]       = 0x0088,
393         [RXALCR0]       = 0x008c,
394         [FWNLCR0]       = 0x0090,
395         [FWALCR0]       = 0x0094,
396         [TXNLCR1]       = 0x00a0,
397         [TXALCR1]       = 0x00a0,
398         [RXNLCR1]       = 0x00a8,
399         [RXALCR1]       = 0x00ac,
400         [FWNLCR1]       = 0x00b0,
401         [FWALCR1]       = 0x00b4,
402
403         [TSU_ADRH0]     = 0x0100,
404 };
405
406 static void sh_eth_rcv_snd_disable(struct net_device *ndev);
407 static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev);
408
409 static void sh_eth_write(struct net_device *ndev, u32 data, int enum_index)
410 {
411         struct sh_eth_private *mdp = netdev_priv(ndev);
412         u16 offset = mdp->reg_offset[enum_index];
413
414         if (WARN_ON(offset == SH_ETH_OFFSET_INVALID))
415                 return;
416
417         iowrite32(data, mdp->addr + offset);
418 }
419
420 static u32 sh_eth_read(struct net_device *ndev, int enum_index)
421 {
422         struct sh_eth_private *mdp = netdev_priv(ndev);
423         u16 offset = mdp->reg_offset[enum_index];
424
425         if (WARN_ON(offset == SH_ETH_OFFSET_INVALID))
426                 return ~0U;
427
428         return ioread32(mdp->addr + offset);
429 }
430
431 static bool sh_eth_is_gether(struct sh_eth_private *mdp)
432 {
433         return mdp->reg_offset == sh_eth_offset_gigabit;
434 }
435
436 static bool sh_eth_is_rz_fast_ether(struct sh_eth_private *mdp)
437 {
438         return mdp->reg_offset == sh_eth_offset_fast_rz;
439 }
440
441 static void sh_eth_select_mii(struct net_device *ndev)
442 {
443         u32 value = 0x0;
444         struct sh_eth_private *mdp = netdev_priv(ndev);
445
446         switch (mdp->phy_interface) {
447         case PHY_INTERFACE_MODE_GMII:
448                 value = 0x2;
449                 break;
450         case PHY_INTERFACE_MODE_MII:
451                 value = 0x1;
452                 break;
453         case PHY_INTERFACE_MODE_RMII:
454                 value = 0x0;
455                 break;
456         default:
457                 netdev_warn(ndev,
458                             "PHY interface mode was not setup. Set to MII.\n");
459                 value = 0x1;
460                 break;
461         }
462
463         sh_eth_write(ndev, value, RMII_MII);
464 }
465
466 static void sh_eth_set_duplex(struct net_device *ndev)
467 {
468         struct sh_eth_private *mdp = netdev_priv(ndev);
469
470         if (mdp->duplex) /* Full */
471                 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
472         else            /* Half */
473                 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
474 }
475
476 /* There is CPU dependent code */
477 static void sh_eth_set_rate_r8a777x(struct net_device *ndev)
478 {
479         struct sh_eth_private *mdp = netdev_priv(ndev);
480
481         switch (mdp->speed) {
482         case 10: /* 10BASE */
483                 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_ELB, ECMR);
484                 break;
485         case 100:/* 100BASE */
486                 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_ELB, ECMR);
487                 break;
488         default:
489                 break;
490         }
491 }
492
493 /* R8A7778/9 */
494 static struct sh_eth_cpu_data r8a777x_data = {
495         .set_duplex     = sh_eth_set_duplex,
496         .set_rate       = sh_eth_set_rate_r8a777x,
497
498         .register_type  = SH_ETH_REG_FAST_RCAR,
499
500         .ecsr_value     = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
501         .ecsipr_value   = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
502         .eesipr_value   = 0x01ff009f,
503
504         .tx_check       = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
505         .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
506                           EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
507                           EESR_ECI,
508         .fdr_value      = 0x00000f0f,
509
510         .apr            = 1,
511         .mpr            = 1,
512         .tpauser        = 1,
513         .hw_swap        = 1,
514 };
515
516 /* R8A7790/1 */
517 static struct sh_eth_cpu_data r8a779x_data = {
518         .set_duplex     = sh_eth_set_duplex,
519         .set_rate       = sh_eth_set_rate_r8a777x,
520
521         .register_type  = SH_ETH_REG_FAST_RCAR,
522
523         .ecsr_value     = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
524         .ecsipr_value   = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
525         .eesipr_value   = 0x01ff009f,
526
527         .tx_check       = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
528         .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
529                           EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
530                           EESR_ECI,
531         .fdr_value      = 0x00000f0f,
532
533         .trscer_err_mask = DESC_I_RINT8,
534
535         .apr            = 1,
536         .mpr            = 1,
537         .tpauser        = 1,
538         .hw_swap        = 1,
539         .rmiimode       = 1,
540 };
541
542 static void sh_eth_set_rate_sh7724(struct net_device *ndev)
543 {
544         struct sh_eth_private *mdp = netdev_priv(ndev);
545
546         switch (mdp->speed) {
547         case 10: /* 10BASE */
548                 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_RTM, ECMR);
549                 break;
550         case 100:/* 100BASE */
551                 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_RTM, ECMR);
552                 break;
553         default:
554                 break;
555         }
556 }
557
558 /* SH7724 */
559 static struct sh_eth_cpu_data sh7724_data = {
560         .set_duplex     = sh_eth_set_duplex,
561         .set_rate       = sh_eth_set_rate_sh7724,
562
563         .register_type  = SH_ETH_REG_FAST_SH4,
564
565         .ecsr_value     = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
566         .ecsipr_value   = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
567         .eesipr_value   = 0x01ff009f,
568
569         .tx_check       = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
570         .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
571                           EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
572                           EESR_ECI,
573
574         .apr            = 1,
575         .mpr            = 1,
576         .tpauser        = 1,
577         .hw_swap        = 1,
578         .rpadir         = 1,
579         .rpadir_value   = 0x00020000, /* NET_IP_ALIGN assumed to be 2 */
580 };
581
582 static void sh_eth_set_rate_sh7757(struct net_device *ndev)
583 {
584         struct sh_eth_private *mdp = netdev_priv(ndev);
585
586         switch (mdp->speed) {
587         case 10: /* 10BASE */
588                 sh_eth_write(ndev, 0, RTRATE);
589                 break;
590         case 100:/* 100BASE */
591                 sh_eth_write(ndev, 1, RTRATE);
592                 break;
593         default:
594                 break;
595         }
596 }
597
598 /* SH7757 */
599 static struct sh_eth_cpu_data sh7757_data = {
600         .set_duplex     = sh_eth_set_duplex,
601         .set_rate       = sh_eth_set_rate_sh7757,
602
603         .register_type  = SH_ETH_REG_FAST_SH4,
604
605         .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
606
607         .tx_check       = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
608         .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
609                           EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
610                           EESR_ECI,
611
612         .irq_flags      = IRQF_SHARED,
613         .apr            = 1,
614         .mpr            = 1,
615         .tpauser        = 1,
616         .hw_swap        = 1,
617         .no_ade         = 1,
618         .rpadir         = 1,
619         .rpadir_value   = 2 << 16,
620         .rtrate         = 1,
621 };
622
623 #define SH_GIGA_ETH_BASE        0xfee00000UL
624 #define GIGA_MALR(port)         (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c8)
625 #define GIGA_MAHR(port)         (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0)
626 static void sh_eth_chip_reset_giga(struct net_device *ndev)
627 {
628         int i;
629         u32 mahr[2], malr[2];
630
631         /* save MAHR and MALR */
632         for (i = 0; i < 2; i++) {
633                 malr[i] = ioread32((void *)GIGA_MALR(i));
634                 mahr[i] = ioread32((void *)GIGA_MAHR(i));
635         }
636
637         /* reset device */
638         iowrite32(ARSTR_ARSTR, (void *)(SH_GIGA_ETH_BASE + 0x1800));
639         mdelay(1);
640
641         /* restore MAHR and MALR */
642         for (i = 0; i < 2; i++) {
643                 iowrite32(malr[i], (void *)GIGA_MALR(i));
644                 iowrite32(mahr[i], (void *)GIGA_MAHR(i));
645         }
646 }
647
648 static void sh_eth_set_rate_giga(struct net_device *ndev)
649 {
650         struct sh_eth_private *mdp = netdev_priv(ndev);
651
652         switch (mdp->speed) {
653         case 10: /* 10BASE */
654                 sh_eth_write(ndev, 0x00000000, GECMR);
655                 break;
656         case 100:/* 100BASE */
657                 sh_eth_write(ndev, 0x00000010, GECMR);
658                 break;
659         case 1000: /* 1000BASE */
660                 sh_eth_write(ndev, 0x00000020, GECMR);
661                 break;
662         default:
663                 break;
664         }
665 }
666
667 /* SH7757(GETHERC) */
668 static struct sh_eth_cpu_data sh7757_data_giga = {
669         .chip_reset     = sh_eth_chip_reset_giga,
670         .set_duplex     = sh_eth_set_duplex,
671         .set_rate       = sh_eth_set_rate_giga,
672
673         .register_type  = SH_ETH_REG_GIGABIT,
674
675         .ecsr_value     = ECSR_ICD | ECSR_MPD,
676         .ecsipr_value   = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
677         .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
678
679         .tx_check       = EESR_TC1 | EESR_FTC,
680         .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
681                           EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
682                           EESR_TDE | EESR_ECI,
683         .fdr_value      = 0x0000072f,
684
685         .irq_flags      = IRQF_SHARED,
686         .apr            = 1,
687         .mpr            = 1,
688         .tpauser        = 1,
689         .bculr          = 1,
690         .hw_swap        = 1,
691         .rpadir         = 1,
692         .rpadir_value   = 2 << 16,
693         .no_trimd       = 1,
694         .no_ade         = 1,
695         .tsu            = 1,
696 };
697
698 static void sh_eth_chip_reset(struct net_device *ndev)
699 {
700         struct sh_eth_private *mdp = netdev_priv(ndev);
701
702         /* reset device */
703         sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
704         mdelay(1);
705 }
706
707 static void sh_eth_set_rate_gether(struct net_device *ndev)
708 {
709         struct sh_eth_private *mdp = netdev_priv(ndev);
710
711         switch (mdp->speed) {
712         case 10: /* 10BASE */
713                 sh_eth_write(ndev, GECMR_10, GECMR);
714                 break;
715         case 100:/* 100BASE */
716                 sh_eth_write(ndev, GECMR_100, GECMR);
717                 break;
718         case 1000: /* 1000BASE */
719                 sh_eth_write(ndev, GECMR_1000, GECMR);
720                 break;
721         default:
722                 break;
723         }
724 }
725
726 /* SH7734 */
727 static struct sh_eth_cpu_data sh7734_data = {
728         .chip_reset     = sh_eth_chip_reset,
729         .set_duplex     = sh_eth_set_duplex,
730         .set_rate       = sh_eth_set_rate_gether,
731
732         .register_type  = SH_ETH_REG_GIGABIT,
733
734         .ecsr_value     = ECSR_ICD | ECSR_MPD,
735         .ecsipr_value   = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
736         .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
737
738         .tx_check       = EESR_TC1 | EESR_FTC,
739         .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
740                           EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
741                           EESR_TDE | EESR_ECI,
742
743         .apr            = 1,
744         .mpr            = 1,
745         .tpauser        = 1,
746         .bculr          = 1,
747         .hw_swap        = 1,
748         .no_trimd       = 1,
749         .no_ade         = 1,
750         .tsu            = 1,
751         .hw_crc         = 1,
752         .select_mii     = 1,
753         .shift_rd0      = 1,
754 };
755
756 /* SH7763 */
757 static struct sh_eth_cpu_data sh7763_data = {
758         .chip_reset     = sh_eth_chip_reset,
759         .set_duplex     = sh_eth_set_duplex,
760         .set_rate       = sh_eth_set_rate_gether,
761
762         .register_type  = SH_ETH_REG_GIGABIT,
763
764         .ecsr_value     = ECSR_ICD | ECSR_MPD,
765         .ecsipr_value   = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
766         .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
767
768         .tx_check       = EESR_TC1 | EESR_FTC,
769         .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
770                           EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
771                           EESR_ECI,
772
773         .apr            = 1,
774         .mpr            = 1,
775         .tpauser        = 1,
776         .bculr          = 1,
777         .hw_swap        = 1,
778         .no_trimd       = 1,
779         .no_ade         = 1,
780         .tsu            = 1,
781         .irq_flags      = IRQF_SHARED,
782 };
783
784 static void sh_eth_chip_reset_r8a7740(struct net_device *ndev)
785 {
786         struct sh_eth_private *mdp = netdev_priv(ndev);
787
788         /* reset device */
789         sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
790         mdelay(1);
791
792         sh_eth_select_mii(ndev);
793 }
794
795 /* R8A7740 */
796 static struct sh_eth_cpu_data r8a7740_data = {
797         .chip_reset     = sh_eth_chip_reset_r8a7740,
798         .set_duplex     = sh_eth_set_duplex,
799         .set_rate       = sh_eth_set_rate_gether,
800
801         .register_type  = SH_ETH_REG_GIGABIT,
802
803         .ecsr_value     = ECSR_ICD | ECSR_MPD,
804         .ecsipr_value   = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
805         .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
806
807         .tx_check       = EESR_TC1 | EESR_FTC,
808         .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
809                           EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
810                           EESR_TDE | EESR_ECI,
811         .fdr_value      = 0x0000070f,
812
813         .apr            = 1,
814         .mpr            = 1,
815         .tpauser        = 1,
816         .bculr          = 1,
817         .hw_swap        = 1,
818         .rpadir         = 1,
819         .rpadir_value   = 2 << 16,
820         .no_trimd       = 1,
821         .no_ade         = 1,
822         .hw_crc         = 1,
823         .tsu            = 1,
824         .select_mii     = 1,
825         .shift_rd0      = 1,
826 };
827
828 /* R7S72100 */
829 static struct sh_eth_cpu_data r7s72100_data = {
830         .chip_reset     = sh_eth_chip_reset,
831         .set_duplex     = sh_eth_set_duplex,
832
833         .register_type  = SH_ETH_REG_FAST_RZ,
834
835         .ecsr_value     = ECSR_ICD,
836         .ecsipr_value   = ECSIPR_ICDIP,
837         .eesipr_value   = 0xe77f009f,
838
839         .tx_check       = EESR_TC1 | EESR_FTC,
840         .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
841                           EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
842                           EESR_TDE | EESR_ECI,
843         .fdr_value      = 0x0000070f,
844
845         .no_psr         = 1,
846         .apr            = 1,
847         .mpr            = 1,
848         .tpauser        = 1,
849         .hw_swap        = 1,
850         .rpadir         = 1,
851         .rpadir_value   = 2 << 16,
852         .no_trimd       = 1,
853         .no_ade         = 1,
854         .hw_crc         = 1,
855         .tsu            = 1,
856         .shift_rd0      = 1,
857 };
858
859 static struct sh_eth_cpu_data sh7619_data = {
860         .register_type  = SH_ETH_REG_FAST_SH3_SH2,
861
862         .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
863
864         .apr            = 1,
865         .mpr            = 1,
866         .tpauser        = 1,
867         .hw_swap        = 1,
868 };
869
870 static struct sh_eth_cpu_data sh771x_data = {
871         .register_type  = SH_ETH_REG_FAST_SH3_SH2,
872
873         .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
874         .tsu            = 1,
875 };
876
877 static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
878 {
879         if (!cd->ecsr_value)
880                 cd->ecsr_value = DEFAULT_ECSR_INIT;
881
882         if (!cd->ecsipr_value)
883                 cd->ecsipr_value = DEFAULT_ECSIPR_INIT;
884
885         if (!cd->fcftr_value)
886                 cd->fcftr_value = DEFAULT_FIFO_F_D_RFF |
887                                   DEFAULT_FIFO_F_D_RFD;
888
889         if (!cd->fdr_value)
890                 cd->fdr_value = DEFAULT_FDR_INIT;
891
892         if (!cd->tx_check)
893                 cd->tx_check = DEFAULT_TX_CHECK;
894
895         if (!cd->eesr_err_check)
896                 cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK;
897
898         if (!cd->trscer_err_mask)
899                 cd->trscer_err_mask = DEFAULT_TRSCER_ERR_MASK;
900 }
901
902 static int sh_eth_check_reset(struct net_device *ndev)
903 {
904         int ret = 0;
905         int cnt = 100;
906
907         while (cnt > 0) {
908                 if (!(sh_eth_read(ndev, EDMR) & 0x3))
909                         break;
910                 mdelay(1);
911                 cnt--;
912         }
913         if (cnt <= 0) {
914                 netdev_err(ndev, "Device reset failed\n");
915                 ret = -ETIMEDOUT;
916         }
917         return ret;
918 }
919
920 static int sh_eth_reset(struct net_device *ndev)
921 {
922         struct sh_eth_private *mdp = netdev_priv(ndev);
923         int ret = 0;
924
925         if (sh_eth_is_gether(mdp) || sh_eth_is_rz_fast_ether(mdp)) {
926                 sh_eth_write(ndev, EDSR_ENALL, EDSR);
927                 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER,
928                              EDMR);
929
930                 ret = sh_eth_check_reset(ndev);
931                 if (ret)
932                         return ret;
933
934                 /* Table Init */
935                 sh_eth_write(ndev, 0x0, TDLAR);
936                 sh_eth_write(ndev, 0x0, TDFAR);
937                 sh_eth_write(ndev, 0x0, TDFXR);
938                 sh_eth_write(ndev, 0x0, TDFFR);
939                 sh_eth_write(ndev, 0x0, RDLAR);
940                 sh_eth_write(ndev, 0x0, RDFAR);
941                 sh_eth_write(ndev, 0x0, RDFXR);
942                 sh_eth_write(ndev, 0x0, RDFFR);
943
944                 /* Reset HW CRC register */
945                 if (mdp->cd->hw_crc)
946                         sh_eth_write(ndev, 0x0, CSMR);
947
948                 /* Select MII mode */
949                 if (mdp->cd->select_mii)
950                         sh_eth_select_mii(ndev);
951         } else {
952                 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER,
953                              EDMR);
954                 mdelay(3);
955                 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER,
956                              EDMR);
957         }
958
959         return ret;
960 }
961
962 static void sh_eth_set_receive_align(struct sk_buff *skb)
963 {
964         uintptr_t reserve = (uintptr_t)skb->data & (SH_ETH_RX_ALIGN - 1);
965
966         if (reserve)
967                 skb_reserve(skb, SH_ETH_RX_ALIGN - reserve);
968 }
969
970
971 /* CPU <-> EDMAC endian convert */
972 static inline __u32 cpu_to_edmac(struct sh_eth_private *mdp, u32 x)
973 {
974         switch (mdp->edmac_endian) {
975         case EDMAC_LITTLE_ENDIAN:
976                 return cpu_to_le32(x);
977         case EDMAC_BIG_ENDIAN:
978                 return cpu_to_be32(x);
979         }
980         return x;
981 }
982
983 static inline __u32 edmac_to_cpu(struct sh_eth_private *mdp, u32 x)
984 {
985         switch (mdp->edmac_endian) {
986         case EDMAC_LITTLE_ENDIAN:
987                 return le32_to_cpu(x);
988         case EDMAC_BIG_ENDIAN:
989                 return be32_to_cpu(x);
990         }
991         return x;
992 }
993
994 /* Program the hardware MAC address from dev->dev_addr. */
995 static void update_mac_address(struct net_device *ndev)
996 {
997         sh_eth_write(ndev,
998                      (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
999                      (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
1000         sh_eth_write(ndev,
1001                      (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
1002 }
1003
1004 /* Get MAC address from SuperH MAC address register
1005  *
1006  * SuperH's Ethernet device doesn't have 'ROM' to MAC address.
1007  * This driver get MAC address that use by bootloader(U-boot or sh-ipl+g).
1008  * When you want use this device, you must set MAC address in bootloader.
1009  *
1010  */
1011 static void read_mac_address(struct net_device *ndev, unsigned char *mac)
1012 {
1013         if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) {
1014                 memcpy(ndev->dev_addr, mac, ETH_ALEN);
1015         } else {
1016                 ndev->dev_addr[0] = (sh_eth_read(ndev, MAHR) >> 24);
1017                 ndev->dev_addr[1] = (sh_eth_read(ndev, MAHR) >> 16) & 0xFF;
1018                 ndev->dev_addr[2] = (sh_eth_read(ndev, MAHR) >> 8) & 0xFF;
1019                 ndev->dev_addr[3] = (sh_eth_read(ndev, MAHR) & 0xFF);
1020                 ndev->dev_addr[4] = (sh_eth_read(ndev, MALR) >> 8) & 0xFF;
1021                 ndev->dev_addr[5] = (sh_eth_read(ndev, MALR) & 0xFF);
1022         }
1023 }
1024
1025 static u32 sh_eth_get_edtrr_trns(struct sh_eth_private *mdp)
1026 {
1027         if (sh_eth_is_gether(mdp) || sh_eth_is_rz_fast_ether(mdp))
1028                 return EDTRR_TRNS_GETHER;
1029         else
1030                 return EDTRR_TRNS_ETHER;
1031 }
1032
1033 struct bb_info {
1034         void (*set_gate)(void *addr);
1035         struct mdiobb_ctrl ctrl;
1036         void *addr;
1037         u32 mmd_msk;/* MMD */
1038         u32 mdo_msk;
1039         u32 mdi_msk;
1040         u32 mdc_msk;
1041 };
1042
1043 /* PHY bit set */
1044 static void bb_set(void *addr, u32 msk)
1045 {
1046         iowrite32(ioread32(addr) | msk, addr);
1047 }
1048
1049 /* PHY bit clear */
1050 static void bb_clr(void *addr, u32 msk)
1051 {
1052         iowrite32((ioread32(addr) & ~msk), addr);
1053 }
1054
1055 /* PHY bit read */
1056 static int bb_read(void *addr, u32 msk)
1057 {
1058         return (ioread32(addr) & msk) != 0;
1059 }
1060
1061 /* Data I/O pin control */
1062 static void sh_mmd_ctrl(struct mdiobb_ctrl *ctrl, int bit)
1063 {
1064         struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
1065
1066         if (bitbang->set_gate)
1067                 bitbang->set_gate(bitbang->addr);
1068
1069         if (bit)
1070                 bb_set(bitbang->addr, bitbang->mmd_msk);
1071         else
1072                 bb_clr(bitbang->addr, bitbang->mmd_msk);
1073 }
1074
1075 /* Set bit data*/
1076 static void sh_set_mdio(struct mdiobb_ctrl *ctrl, int bit)
1077 {
1078         struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
1079
1080         if (bitbang->set_gate)
1081                 bitbang->set_gate(bitbang->addr);
1082
1083         if (bit)
1084                 bb_set(bitbang->addr, bitbang->mdo_msk);
1085         else
1086                 bb_clr(bitbang->addr, bitbang->mdo_msk);
1087 }
1088
1089 /* Get bit data*/
1090 static int sh_get_mdio(struct mdiobb_ctrl *ctrl)
1091 {
1092         struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
1093
1094         if (bitbang->set_gate)
1095                 bitbang->set_gate(bitbang->addr);
1096
1097         return bb_read(bitbang->addr, bitbang->mdi_msk);
1098 }
1099
1100 /* MDC pin control */
1101 static void sh_mdc_ctrl(struct mdiobb_ctrl *ctrl, int bit)
1102 {
1103         struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
1104
1105         if (bitbang->set_gate)
1106                 bitbang->set_gate(bitbang->addr);
1107
1108         if (bit)
1109                 bb_set(bitbang->addr, bitbang->mdc_msk);
1110         else
1111                 bb_clr(bitbang->addr, bitbang->mdc_msk);
1112 }
1113
1114 /* mdio bus control struct */
1115 static struct mdiobb_ops bb_ops = {
1116         .owner = THIS_MODULE,
1117         .set_mdc = sh_mdc_ctrl,
1118         .set_mdio_dir = sh_mmd_ctrl,
1119         .set_mdio_data = sh_set_mdio,
1120         .get_mdio_data = sh_get_mdio,
1121 };
1122
1123 /* free skb and descriptor buffer */
1124 static void sh_eth_ring_free(struct net_device *ndev)
1125 {
1126         struct sh_eth_private *mdp = netdev_priv(ndev);
1127         int ringsize, i;
1128
1129         /* Free Rx skb ringbuffer */
1130         if (mdp->rx_skbuff) {
1131                 for (i = 0; i < mdp->num_rx_ring; i++)
1132                         dev_kfree_skb(mdp->rx_skbuff[i]);
1133         }
1134         kfree(mdp->rx_skbuff);
1135         mdp->rx_skbuff = NULL;
1136
1137         /* Free Tx skb ringbuffer */
1138         if (mdp->tx_skbuff) {
1139                 for (i = 0; i < mdp->num_tx_ring; i++)
1140                         dev_kfree_skb(mdp->tx_skbuff[i]);
1141         }
1142         kfree(mdp->tx_skbuff);
1143         mdp->tx_skbuff = NULL;
1144
1145         if (mdp->rx_ring) {
1146                 ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
1147                 dma_free_coherent(NULL, ringsize, mdp->rx_ring,
1148                                   mdp->rx_desc_dma);
1149                 mdp->rx_ring = NULL;
1150         }
1151
1152         if (mdp->tx_ring) {
1153                 ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
1154                 dma_free_coherent(NULL, ringsize, mdp->tx_ring,
1155                                   mdp->tx_desc_dma);
1156                 mdp->tx_ring = NULL;
1157         }
1158 }
1159
1160 /* format skb and descriptor buffer */
1161 static void sh_eth_ring_format(struct net_device *ndev)
1162 {
1163         struct sh_eth_private *mdp = netdev_priv(ndev);
1164         int i;
1165         struct sk_buff *skb;
1166         struct sh_eth_rxdesc *rxdesc = NULL;
1167         struct sh_eth_txdesc *txdesc = NULL;
1168         int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
1169         int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
1170         int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1;
1171         dma_addr_t dma_addr;
1172         u32 buf_len;
1173
1174         mdp->cur_rx = 0;
1175         mdp->cur_tx = 0;
1176         mdp->dirty_rx = 0;
1177         mdp->dirty_tx = 0;
1178
1179         memset(mdp->rx_ring, 0, rx_ringsize);
1180
1181         /* build Rx ring buffer */
1182         for (i = 0; i < mdp->num_rx_ring; i++) {
1183                 /* skb */
1184                 mdp->rx_skbuff[i] = NULL;
1185                 skb = netdev_alloc_skb(ndev, skbuff_size);
1186                 if (skb == NULL)
1187                         break;
1188                 sh_eth_set_receive_align(skb);
1189
1190                 /* The size of the buffer is a multiple of 32 bytes. */
1191                 buf_len = ALIGN(mdp->rx_buf_sz, 32);
1192                 dma_addr = dma_map_single(&ndev->dev, skb->data, buf_len,
1193                                           DMA_FROM_DEVICE);
1194                 if (dma_mapping_error(&ndev->dev, dma_addr)) {
1195                         kfree_skb(skb);
1196                         break;
1197                 }
1198                 mdp->rx_skbuff[i] = skb;
1199
1200                 /* RX descriptor */
1201                 rxdesc = &mdp->rx_ring[i];
1202                 rxdesc->len = cpu_to_edmac(mdp, buf_len << 16);
1203                 rxdesc->addr = cpu_to_edmac(mdp, dma_addr);
1204                 rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
1205
1206                 /* Rx descriptor address set */
1207                 if (i == 0) {
1208                         sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR);
1209                         if (sh_eth_is_gether(mdp) ||
1210                             sh_eth_is_rz_fast_ether(mdp))
1211                                 sh_eth_write(ndev, mdp->rx_desc_dma, RDFAR);
1212                 }
1213         }
1214
1215         mdp->dirty_rx = (u32) (i - mdp->num_rx_ring);
1216
1217         /* Mark the last entry as wrapping the ring. */
1218         if (rxdesc)
1219                 rxdesc->status |= cpu_to_edmac(mdp, RD_RDLE);
1220
1221         memset(mdp->tx_ring, 0, tx_ringsize);
1222
1223         /* build Tx ring buffer */
1224         for (i = 0; i < mdp->num_tx_ring; i++) {
1225                 mdp->tx_skbuff[i] = NULL;
1226                 txdesc = &mdp->tx_ring[i];
1227                 txdesc->status = cpu_to_edmac(mdp, TD_TFP);
1228                 txdesc->len = cpu_to_edmac(mdp, 0);
1229                 if (i == 0) {
1230                         /* Tx descriptor address set */
1231                         sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR);
1232                         if (sh_eth_is_gether(mdp) ||
1233                             sh_eth_is_rz_fast_ether(mdp))
1234                                 sh_eth_write(ndev, mdp->tx_desc_dma, TDFAR);
1235                 }
1236         }
1237
1238         txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
1239 }
1240
1241 /* Get skb and descriptor buffer */
1242 static int sh_eth_ring_init(struct net_device *ndev)
1243 {
1244         struct sh_eth_private *mdp = netdev_priv(ndev);
1245         int rx_ringsize, tx_ringsize;
1246
1247         /* +26 gets the maximum ethernet encapsulation, +7 & ~7 because the
1248          * card needs room to do 8 byte alignment, +2 so we can reserve
1249          * the first 2 bytes, and +16 gets room for the status word from the
1250          * card.
1251          */
1252         mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ :
1253                           (((ndev->mtu + 26 + 7) & ~7) + 2 + 16));
1254         if (mdp->cd->rpadir)
1255                 mdp->rx_buf_sz += NET_IP_ALIGN;
1256
1257         /* Allocate RX and TX skb rings */
1258         mdp->rx_skbuff = kcalloc(mdp->num_rx_ring, sizeof(*mdp->rx_skbuff),
1259                                  GFP_KERNEL);
1260         if (!mdp->rx_skbuff)
1261                 return -ENOMEM;
1262
1263         mdp->tx_skbuff = kcalloc(mdp->num_tx_ring, sizeof(*mdp->tx_skbuff),
1264                                  GFP_KERNEL);
1265         if (!mdp->tx_skbuff)
1266                 goto ring_free;
1267
1268         /* Allocate all Rx descriptors. */
1269         rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
1270         mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma,
1271                                           GFP_KERNEL);
1272         if (!mdp->rx_ring)
1273                 goto ring_free;
1274
1275         mdp->dirty_rx = 0;
1276
1277         /* Allocate all Tx descriptors. */
1278         tx_ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
1279         mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma,
1280                                           GFP_KERNEL);
1281         if (!mdp->tx_ring)
1282                 goto ring_free;
1283         return 0;
1284
1285 ring_free:
1286         /* Free Rx and Tx skb ring buffer and DMA buffer */
1287         sh_eth_ring_free(ndev);
1288
1289         return -ENOMEM;
1290 }
1291
1292 static int sh_eth_dev_init(struct net_device *ndev, bool start)
1293 {
1294         int ret = 0;
1295         struct sh_eth_private *mdp = netdev_priv(ndev);
1296         u32 val;
1297
1298         /* Soft Reset */
1299         ret = sh_eth_reset(ndev);
1300         if (ret)
1301                 return ret;
1302
1303         if (mdp->cd->rmiimode)
1304                 sh_eth_write(ndev, 0x1, RMIIMODE);
1305
1306         /* Descriptor format */
1307         sh_eth_ring_format(ndev);
1308         if (mdp->cd->rpadir)
1309                 sh_eth_write(ndev, mdp->cd->rpadir_value, RPADIR);
1310
1311         /* all sh_eth int mask */
1312         sh_eth_write(ndev, 0, EESIPR);
1313
1314 #if defined(__LITTLE_ENDIAN)
1315         if (mdp->cd->hw_swap)
1316                 sh_eth_write(ndev, EDMR_EL, EDMR);
1317         else
1318 #endif
1319                 sh_eth_write(ndev, 0, EDMR);
1320
1321         /* FIFO size set */
1322         sh_eth_write(ndev, mdp->cd->fdr_value, FDR);
1323         sh_eth_write(ndev, 0, TFTR);
1324
1325         /* Frame recv control (enable multiple-packets per rx irq) */
1326         sh_eth_write(ndev, RMCR_RNC, RMCR);
1327
1328         sh_eth_write(ndev, mdp->cd->trscer_err_mask, TRSCER);
1329
1330         if (mdp->cd->bculr)
1331                 sh_eth_write(ndev, 0x800, BCULR);       /* Burst sycle set */
1332
1333         sh_eth_write(ndev, mdp->cd->fcftr_value, FCFTR);
1334
1335         if (!mdp->cd->no_trimd)
1336                 sh_eth_write(ndev, 0, TRIMD);
1337
1338         /* Recv frame limit set register */
1339         sh_eth_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN,
1340                      RFLR);
1341
1342         sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR);
1343         if (start) {
1344                 mdp->irq_enabled = true;
1345                 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1346         }
1347
1348         /* PAUSE Prohibition */
1349         val = (sh_eth_read(ndev, ECMR) & ECMR_DM) |
1350                 ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE;
1351
1352         sh_eth_write(ndev, val, ECMR);
1353
1354         if (mdp->cd->set_rate)
1355                 mdp->cd->set_rate(ndev);
1356
1357         /* E-MAC Status Register clear */
1358         sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR);
1359
1360         /* E-MAC Interrupt Enable register */
1361         if (start)
1362                 sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR);
1363
1364         /* Set MAC address */
1365         update_mac_address(ndev);
1366
1367         /* mask reset */
1368         if (mdp->cd->apr)
1369                 sh_eth_write(ndev, APR_AP, APR);
1370         if (mdp->cd->mpr)
1371                 sh_eth_write(ndev, MPR_MP, MPR);
1372         if (mdp->cd->tpauser)
1373                 sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER);
1374
1375         if (start) {
1376                 /* Setting the Rx mode will start the Rx process. */
1377                 sh_eth_write(ndev, EDRRR_R, EDRRR);
1378
1379                 netif_start_queue(ndev);
1380         }
1381
1382         return ret;
1383 }
1384
1385 static void sh_eth_dev_exit(struct net_device *ndev)
1386 {
1387         struct sh_eth_private *mdp = netdev_priv(ndev);
1388         int i;
1389
1390         /* Deactivate all TX descriptors, so DMA should stop at next
1391          * packet boundary if it's currently running
1392          */
1393         for (i = 0; i < mdp->num_tx_ring; i++)
1394                 mdp->tx_ring[i].status &= ~cpu_to_edmac(mdp, TD_TACT);
1395
1396         /* Disable TX FIFO egress to MAC */
1397         sh_eth_rcv_snd_disable(ndev);
1398
1399         /* Stop RX DMA at next packet boundary */
1400         sh_eth_write(ndev, 0, EDRRR);
1401
1402         /* Aside from TX DMA, we can't tell when the hardware is
1403          * really stopped, so we need to reset to make sure.
1404          * Before doing that, wait for long enough to *probably*
1405          * finish transmitting the last packet and poll stats.
1406          */
1407         msleep(2); /* max frame time at 10 Mbps < 1250 us */
1408         sh_eth_get_stats(ndev);
1409         sh_eth_reset(ndev);
1410
1411         /* Set the RMII mode again if required */
1412         if (mdp->cd->rmiimode)
1413                 sh_eth_write(ndev, 0x1, RMIIMODE);
1414
1415         /* Set MAC address again */
1416         update_mac_address(ndev);
1417 }
1418
1419 /* free Tx skb function */
1420 static int sh_eth_txfree(struct net_device *ndev)
1421 {
1422         struct sh_eth_private *mdp = netdev_priv(ndev);
1423         struct sh_eth_txdesc *txdesc;
1424         int free_num = 0;
1425         int entry = 0;
1426
1427         for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
1428                 entry = mdp->dirty_tx % mdp->num_tx_ring;
1429                 txdesc = &mdp->tx_ring[entry];
1430                 if (txdesc->status & cpu_to_edmac(mdp, TD_TACT))
1431                         break;
1432                 /* TACT bit must be checked before all the following reads */
1433                 dma_rmb();
1434                 netif_info(mdp, tx_done, ndev,
1435                            "tx entry %d status 0x%08x\n",
1436                            entry, edmac_to_cpu(mdp, txdesc->status));
1437                 /* Free the original skb. */
1438                 if (mdp->tx_skbuff[entry]) {
1439                         dma_unmap_single(&ndev->dev,
1440                                          edmac_to_cpu(mdp, txdesc->addr),
1441                                          edmac_to_cpu(mdp, txdesc->len) >> 16,
1442                                          DMA_TO_DEVICE);
1443                         dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
1444                         mdp->tx_skbuff[entry] = NULL;
1445                         free_num++;
1446                 }
1447                 txdesc->status = cpu_to_edmac(mdp, TD_TFP);
1448                 if (entry >= mdp->num_tx_ring - 1)
1449                         txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
1450
1451                 ndev->stats.tx_packets++;
1452                 ndev->stats.tx_bytes += edmac_to_cpu(mdp, txdesc->len) >> 16;
1453         }
1454         return free_num;
1455 }
1456
1457 /* Packet receive function */
1458 static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1459 {
1460         struct sh_eth_private *mdp = netdev_priv(ndev);
1461         struct sh_eth_rxdesc *rxdesc;
1462
1463         int entry = mdp->cur_rx % mdp->num_rx_ring;
1464         int boguscnt = (mdp->dirty_rx + mdp->num_rx_ring) - mdp->cur_rx;
1465         int limit;
1466         struct sk_buff *skb;
1467         u16 pkt_len = 0;
1468         u32 desc_status;
1469         int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1;
1470         dma_addr_t dma_addr;
1471         u32 buf_len;
1472
1473         boguscnt = min(boguscnt, *quota);
1474         limit = boguscnt;
1475         rxdesc = &mdp->rx_ring[entry];
1476         while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) {
1477                 /* RACT bit must be checked before all the following reads */
1478                 dma_rmb();
1479                 desc_status = edmac_to_cpu(mdp, rxdesc->status);
1480                 pkt_len = edmac_to_cpu(mdp, rxdesc->len) & RD_RFL;
1481
1482                 if (--boguscnt < 0)
1483                         break;
1484
1485                 netif_info(mdp, rx_status, ndev,
1486                            "rx entry %d status 0x%08x len %d\n",
1487                            entry, desc_status, pkt_len);
1488
1489                 if (!(desc_status & RDFEND))
1490                         ndev->stats.rx_length_errors++;
1491
1492                 /* In case of almost all GETHER/ETHERs, the Receive Frame State
1493                  * (RFS) bits in the Receive Descriptor 0 are from bit 9 to
1494                  * bit 0. However, in case of the R8A7740 and R7S72100
1495                  * the RFS bits are from bit 25 to bit 16. So, the
1496                  * driver needs right shifting by 16.
1497                  */
1498                 if (mdp->cd->shift_rd0)
1499                         desc_status >>= 16;
1500
1501                 skb = mdp->rx_skbuff[entry];
1502                 if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 |
1503                                    RD_RFS5 | RD_RFS6 | RD_RFS10)) {
1504                         ndev->stats.rx_errors++;
1505                         if (desc_status & RD_RFS1)
1506                                 ndev->stats.rx_crc_errors++;
1507                         if (desc_status & RD_RFS2)
1508                                 ndev->stats.rx_frame_errors++;
1509                         if (desc_status & RD_RFS3)
1510                                 ndev->stats.rx_length_errors++;
1511                         if (desc_status & RD_RFS4)
1512                                 ndev->stats.rx_length_errors++;
1513                         if (desc_status & RD_RFS6)
1514                                 ndev->stats.rx_missed_errors++;
1515                         if (desc_status & RD_RFS10)
1516                                 ndev->stats.rx_over_errors++;
1517                 } else  if (skb) {
1518                         dma_addr = edmac_to_cpu(mdp, rxdesc->addr);
1519                         if (!mdp->cd->hw_swap)
1520                                 sh_eth_soft_swap(
1521                                         phys_to_virt(ALIGN(dma_addr, 4)),
1522                                         pkt_len + 2);
1523                         mdp->rx_skbuff[entry] = NULL;
1524                         if (mdp->cd->rpadir)
1525                                 skb_reserve(skb, NET_IP_ALIGN);
1526                         dma_unmap_single(&ndev->dev, dma_addr,
1527                                          ALIGN(mdp->rx_buf_sz, 32),
1528                                          DMA_FROM_DEVICE);
1529                         skb_put(skb, pkt_len);
1530                         skb->protocol = eth_type_trans(skb, ndev);
1531                         netif_receive_skb(skb);
1532                         ndev->stats.rx_packets++;
1533                         ndev->stats.rx_bytes += pkt_len;
1534                         if (desc_status & RD_RFS8)
1535                                 ndev->stats.multicast++;
1536                 }
1537                 entry = (++mdp->cur_rx) % mdp->num_rx_ring;
1538                 rxdesc = &mdp->rx_ring[entry];
1539         }
1540
1541         /* Refill the Rx ring buffers. */
1542         for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) {
1543                 entry = mdp->dirty_rx % mdp->num_rx_ring;
1544                 rxdesc = &mdp->rx_ring[entry];
1545                 /* The size of the buffer is 32 byte boundary. */
1546                 buf_len = ALIGN(mdp->rx_buf_sz, 32);
1547                 rxdesc->len = cpu_to_edmac(mdp, buf_len << 16);
1548
1549                 if (mdp->rx_skbuff[entry] == NULL) {
1550                         skb = netdev_alloc_skb(ndev, skbuff_size);
1551                         if (skb == NULL)
1552                                 break;  /* Better luck next round. */
1553                         sh_eth_set_receive_align(skb);
1554                         dma_addr = dma_map_single(&ndev->dev, skb->data,
1555                                                   buf_len, DMA_FROM_DEVICE);
1556                         if (dma_mapping_error(&ndev->dev, dma_addr)) {
1557                                 kfree_skb(skb);
1558                                 break;
1559                         }
1560                         mdp->rx_skbuff[entry] = skb;
1561
1562                         skb_checksum_none_assert(skb);
1563                         rxdesc->addr = cpu_to_edmac(mdp, dma_addr);
1564                 }
1565                 dma_wmb(); /* RACT bit must be set after all the above writes */
1566                 if (entry >= mdp->num_rx_ring - 1)
1567                         rxdesc->status |=
1568                                 cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDLE);
1569                 else
1570                         rxdesc->status |=
1571                                 cpu_to_edmac(mdp, RD_RACT | RD_RFP);
1572         }
1573
1574         /* Restart Rx engine if stopped. */
1575         /* If we don't need to check status, don't. -KDU */
1576         if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) {
1577                 /* fix the values for the next receiving if RDE is set */
1578                 if (intr_status & EESR_RDE &&
1579                     mdp->reg_offset[RDFAR] != SH_ETH_OFFSET_INVALID) {
1580                         u32 count = (sh_eth_read(ndev, RDFAR) -
1581                                      sh_eth_read(ndev, RDLAR)) >> 4;
1582
1583                         mdp->cur_rx = count;
1584                         mdp->dirty_rx = count;
1585                 }
1586                 sh_eth_write(ndev, EDRRR_R, EDRRR);
1587         }
1588
1589         *quota -= limit - boguscnt - 1;
1590
1591         return *quota <= 0;
1592 }
1593
1594 static void sh_eth_rcv_snd_disable(struct net_device *ndev)
1595 {
1596         /* disable tx and rx */
1597         sh_eth_write(ndev, sh_eth_read(ndev, ECMR) &
1598                 ~(ECMR_RE | ECMR_TE), ECMR);
1599 }
1600
1601 static void sh_eth_rcv_snd_enable(struct net_device *ndev)
1602 {
1603         /* enable tx and rx */
1604         sh_eth_write(ndev, sh_eth_read(ndev, ECMR) |
1605                 (ECMR_RE | ECMR_TE), ECMR);
1606 }
1607
1608 /* error control function */
1609 static void sh_eth_error(struct net_device *ndev, u32 intr_status)
1610 {
1611         struct sh_eth_private *mdp = netdev_priv(ndev);
1612         u32 felic_stat;
1613         u32 link_stat;
1614         u32 mask;
1615
1616         if (intr_status & EESR_ECI) {
1617                 felic_stat = sh_eth_read(ndev, ECSR);
1618                 sh_eth_write(ndev, felic_stat, ECSR);   /* clear int */
1619                 if (felic_stat & ECSR_ICD)
1620                         ndev->stats.tx_carrier_errors++;
1621                 if (felic_stat & ECSR_LCHNG) {
1622                         /* Link Changed */
1623                         if (mdp->cd->no_psr || mdp->no_ether_link) {
1624                                 goto ignore_link;
1625                         } else {
1626                                 link_stat = (sh_eth_read(ndev, PSR));
1627                                 if (mdp->ether_link_active_low)
1628                                         link_stat = ~link_stat;
1629                         }
1630                         if (!(link_stat & PHY_ST_LINK)) {
1631                                 sh_eth_rcv_snd_disable(ndev);
1632                         } else {
1633                                 /* Link Up */
1634                                 sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) &
1635                                                    ~DMAC_M_ECI, EESIPR);
1636                                 /* clear int */
1637                                 sh_eth_write(ndev, sh_eth_read(ndev, ECSR),
1638                                              ECSR);
1639                                 sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) |
1640                                                    DMAC_M_ECI, EESIPR);
1641                                 /* enable tx and rx */
1642                                 sh_eth_rcv_snd_enable(ndev);
1643                         }
1644                 }
1645         }
1646
1647 ignore_link:
1648         if (intr_status & EESR_TWB) {
1649                 /* Unused write back interrupt */
1650                 if (intr_status & EESR_TABT) {  /* Transmit Abort int */
1651                         ndev->stats.tx_aborted_errors++;
1652                         netif_err(mdp, tx_err, ndev, "Transmit Abort\n");
1653                 }
1654         }
1655
1656         if (intr_status & EESR_RABT) {
1657                 /* Receive Abort int */
1658                 if (intr_status & EESR_RFRMER) {
1659                         /* Receive Frame Overflow int */
1660                         ndev->stats.rx_frame_errors++;
1661                 }
1662         }
1663
1664         if (intr_status & EESR_TDE) {
1665                 /* Transmit Descriptor Empty int */
1666                 ndev->stats.tx_fifo_errors++;
1667                 netif_err(mdp, tx_err, ndev, "Transmit Descriptor Empty\n");
1668         }
1669
1670         if (intr_status & EESR_TFE) {
1671                 /* FIFO under flow */
1672                 ndev->stats.tx_fifo_errors++;
1673                 netif_err(mdp, tx_err, ndev, "Transmit FIFO Under flow\n");
1674         }
1675
1676         if (intr_status & EESR_RDE) {
1677                 /* Receive Descriptor Empty int */
1678                 ndev->stats.rx_over_errors++;
1679         }
1680
1681         if (intr_status & EESR_RFE) {
1682                 /* Receive FIFO Overflow int */
1683                 ndev->stats.rx_fifo_errors++;
1684         }
1685
1686         if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
1687                 /* Address Error */
1688                 ndev->stats.tx_fifo_errors++;
1689                 netif_err(mdp, tx_err, ndev, "Address Error\n");
1690         }
1691
1692         mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE;
1693         if (mdp->cd->no_ade)
1694                 mask &= ~EESR_ADE;
1695         if (intr_status & mask) {
1696                 /* Tx error */
1697                 u32 edtrr = sh_eth_read(ndev, EDTRR);
1698
1699                 /* dmesg */
1700                 netdev_err(ndev, "TX error. status=%8.8x cur_tx=%8.8x dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
1701                            intr_status, mdp->cur_tx, mdp->dirty_tx,
1702                            (u32)ndev->state, edtrr);
1703                 /* dirty buffer free */
1704                 sh_eth_txfree(ndev);
1705
1706                 /* SH7712 BUG */
1707                 if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) {
1708                         /* tx dma start */
1709                         sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
1710                 }
1711                 /* wakeup */
1712                 netif_wake_queue(ndev);
1713         }
1714 }
1715
1716 static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
1717 {
1718         struct net_device *ndev = netdev;
1719         struct sh_eth_private *mdp = netdev_priv(ndev);
1720         struct sh_eth_cpu_data *cd = mdp->cd;
1721         irqreturn_t ret = IRQ_NONE;
1722         u32 intr_status, intr_enable;
1723
1724         spin_lock(&mdp->lock);
1725
1726         /* Get interrupt status */
1727         intr_status = sh_eth_read(ndev, EESR);
1728         /* Mask it with the interrupt mask, forcing ECI interrupt to be always
1729          * enabled since it's the one that  comes thru regardless of the mask,
1730          * and we need to fully handle it in sh_eth_error() in order to quench
1731          * it as it doesn't get cleared by just writing 1 to the ECI bit...
1732          */
1733         intr_enable = sh_eth_read(ndev, EESIPR);
1734         intr_status &= intr_enable | DMAC_M_ECI;
1735         if (intr_status & (EESR_RX_CHECK | cd->tx_check | cd->eesr_err_check))
1736                 ret = IRQ_HANDLED;
1737         else
1738                 goto out;
1739
1740         if (!likely(mdp->irq_enabled)) {
1741                 sh_eth_write(ndev, 0, EESIPR);
1742                 goto out;
1743         }
1744
1745         if (intr_status & EESR_RX_CHECK) {
1746                 if (napi_schedule_prep(&mdp->napi)) {
1747                         /* Mask Rx interrupts */
1748                         sh_eth_write(ndev, intr_enable & ~EESR_RX_CHECK,
1749                                      EESIPR);
1750                         __napi_schedule(&mdp->napi);
1751                 } else {
1752                         netdev_warn(ndev,
1753                                     "ignoring interrupt, status 0x%08x, mask 0x%08x.\n",
1754                                     intr_status, intr_enable);
1755                 }
1756         }
1757
1758         /* Tx Check */
1759         if (intr_status & cd->tx_check) {
1760                 /* Clear Tx interrupts */
1761                 sh_eth_write(ndev, intr_status & cd->tx_check, EESR);
1762
1763                 sh_eth_txfree(ndev);
1764                 netif_wake_queue(ndev);
1765         }
1766
1767         if (intr_status & cd->eesr_err_check) {
1768                 /* Clear error interrupts */
1769                 sh_eth_write(ndev, intr_status & cd->eesr_err_check, EESR);
1770
1771                 sh_eth_error(ndev, intr_status);
1772         }
1773
1774 out:
1775         spin_unlock(&mdp->lock);
1776
1777         return ret;
1778 }
1779
1780 static int sh_eth_poll(struct napi_struct *napi, int budget)
1781 {
1782         struct sh_eth_private *mdp = container_of(napi, struct sh_eth_private,
1783                                                   napi);
1784         struct net_device *ndev = napi->dev;
1785         int quota = budget;
1786         u32 intr_status;
1787
1788         for (;;) {
1789                 intr_status = sh_eth_read(ndev, EESR);
1790                 if (!(intr_status & EESR_RX_CHECK))
1791                         break;
1792                 /* Clear Rx interrupts */
1793                 sh_eth_write(ndev, intr_status & EESR_RX_CHECK, EESR);
1794
1795                 if (sh_eth_rx(ndev, intr_status, &quota))
1796                         goto out;
1797         }
1798
1799         napi_complete(napi);
1800
1801         /* Reenable Rx interrupts */
1802         if (mdp->irq_enabled)
1803                 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1804 out:
1805         return budget - quota;
1806 }
1807
1808 /* PHY state control function */
1809 static void sh_eth_adjust_link(struct net_device *ndev)
1810 {
1811         struct sh_eth_private *mdp = netdev_priv(ndev);
1812         struct phy_device *phydev = mdp->phydev;
1813         int new_state = 0;
1814
1815         if (phydev->link) {
1816                 if (phydev->duplex != mdp->duplex) {
1817                         new_state = 1;
1818                         mdp->duplex = phydev->duplex;
1819                         if (mdp->cd->set_duplex)
1820                                 mdp->cd->set_duplex(ndev);
1821                 }
1822
1823                 if (phydev->speed != mdp->speed) {
1824                         new_state = 1;
1825                         mdp->speed = phydev->speed;
1826                         if (mdp->cd->set_rate)
1827                                 mdp->cd->set_rate(ndev);
1828                 }
1829                 if (!mdp->link) {
1830                         sh_eth_write(ndev,
1831                                      sh_eth_read(ndev, ECMR) & ~ECMR_TXF,
1832                                      ECMR);
1833                         new_state = 1;
1834                         mdp->link = phydev->link;
1835                         if (mdp->cd->no_psr || mdp->no_ether_link)
1836                                 sh_eth_rcv_snd_enable(ndev);
1837                 }
1838         } else if (mdp->link) {
1839                 new_state = 1;
1840                 mdp->link = 0;
1841                 mdp->speed = 0;
1842                 mdp->duplex = -1;
1843                 if (mdp->cd->no_psr || mdp->no_ether_link)
1844                         sh_eth_rcv_snd_disable(ndev);
1845         }
1846
1847         if (new_state && netif_msg_link(mdp))
1848                 phy_print_status(phydev);
1849 }
1850
1851 /* PHY init function */
1852 static int sh_eth_phy_init(struct net_device *ndev)
1853 {
1854         struct device_node *np = ndev->dev.parent->of_node;
1855         struct sh_eth_private *mdp = netdev_priv(ndev);
1856         struct phy_device *phydev = NULL;
1857
1858         mdp->link = 0;
1859         mdp->speed = 0;
1860         mdp->duplex = -1;
1861
1862         /* Try connect to PHY */
1863         if (np) {
1864                 struct device_node *pn;
1865
1866                 pn = of_parse_phandle(np, "phy-handle", 0);
1867                 phydev = of_phy_connect(ndev, pn,
1868                                         sh_eth_adjust_link, 0,
1869                                         mdp->phy_interface);
1870
1871                 if (!phydev)
1872                         phydev = ERR_PTR(-ENOENT);
1873         } else {
1874                 char phy_id[MII_BUS_ID_SIZE + 3];
1875
1876                 snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
1877                          mdp->mii_bus->id, mdp->phy_id);
1878
1879                 phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link,
1880                                      mdp->phy_interface);
1881         }
1882
1883         if (IS_ERR(phydev)) {
1884                 netdev_err(ndev, "failed to connect PHY\n");
1885                 return PTR_ERR(phydev);
1886         }
1887
1888         netdev_info(ndev, "attached PHY %d (IRQ %d) to driver %s\n",
1889                     phydev->addr, phydev->irq, phydev->drv->name);
1890
1891         mdp->phydev = phydev;
1892
1893         return 0;
1894 }
1895
1896 /* PHY control start function */
1897 static int sh_eth_phy_start(struct net_device *ndev)
1898 {
1899         struct sh_eth_private *mdp = netdev_priv(ndev);
1900         int ret;
1901
1902         ret = sh_eth_phy_init(ndev);
1903         if (ret)
1904                 return ret;
1905
1906         phy_start(mdp->phydev);
1907
1908         return 0;
1909 }
1910
1911 static int sh_eth_get_settings(struct net_device *ndev,
1912                                struct ethtool_cmd *ecmd)
1913 {
1914         struct sh_eth_private *mdp = netdev_priv(ndev);
1915         unsigned long flags;
1916         int ret;
1917
1918         if (!mdp->phydev)
1919                 return -ENODEV;
1920
1921         spin_lock_irqsave(&mdp->lock, flags);
1922         ret = phy_ethtool_gset(mdp->phydev, ecmd);
1923         spin_unlock_irqrestore(&mdp->lock, flags);
1924
1925         return ret;
1926 }
1927
1928 static int sh_eth_set_settings(struct net_device *ndev,
1929                                struct ethtool_cmd *ecmd)
1930 {
1931         struct sh_eth_private *mdp = netdev_priv(ndev);
1932         unsigned long flags;
1933         int ret;
1934
1935         if (!mdp->phydev)
1936                 return -ENODEV;
1937
1938         spin_lock_irqsave(&mdp->lock, flags);
1939
1940         /* disable tx and rx */
1941         sh_eth_rcv_snd_disable(ndev);
1942
1943         ret = phy_ethtool_sset(mdp->phydev, ecmd);
1944         if (ret)
1945                 goto error_exit;
1946
1947         if (ecmd->duplex == DUPLEX_FULL)
1948                 mdp->duplex = 1;
1949         else
1950                 mdp->duplex = 0;
1951
1952         if (mdp->cd->set_duplex)
1953                 mdp->cd->set_duplex(ndev);
1954
1955 error_exit:
1956         mdelay(1);
1957
1958         /* enable tx and rx */
1959         sh_eth_rcv_snd_enable(ndev);
1960
1961         spin_unlock_irqrestore(&mdp->lock, flags);
1962
1963         return ret;
1964 }
1965
1966 /* If it is ever necessary to increase SH_ETH_REG_DUMP_MAX_REGS, the
1967  * version must be bumped as well.  Just adding registers up to that
1968  * limit is fine, as long as the existing register indices don't
1969  * change.
1970  */
1971 #define SH_ETH_REG_DUMP_VERSION         1
1972 #define SH_ETH_REG_DUMP_MAX_REGS        256
1973
1974 static size_t __sh_eth_get_regs(struct net_device *ndev, u32 *buf)
1975 {
1976         struct sh_eth_private *mdp = netdev_priv(ndev);
1977         struct sh_eth_cpu_data *cd = mdp->cd;
1978         u32 *valid_map;
1979         size_t len;
1980
1981         BUILD_BUG_ON(SH_ETH_MAX_REGISTER_OFFSET > SH_ETH_REG_DUMP_MAX_REGS);
1982
1983         /* Dump starts with a bitmap that tells ethtool which
1984          * registers are defined for this chip.
1985          */
1986         len = DIV_ROUND_UP(SH_ETH_REG_DUMP_MAX_REGS, 32);
1987         if (buf) {
1988                 valid_map = buf;
1989                 buf += len;
1990         } else {
1991                 valid_map = NULL;
1992         }
1993
1994         /* Add a register to the dump, if it has a defined offset.
1995          * This automatically skips most undefined registers, but for
1996          * some it is also necessary to check a capability flag in
1997          * struct sh_eth_cpu_data.
1998          */
1999 #define mark_reg_valid(reg) valid_map[reg / 32] |= 1U << (reg % 32)
2000 #define add_reg_from(reg, read_expr) do {                               \
2001                 if (mdp->reg_offset[reg] != SH_ETH_OFFSET_INVALID) {    \
2002                         if (buf) {                                      \
2003                                 mark_reg_valid(reg);                    \
2004                                 *buf++ = read_expr;                     \
2005                         }                                               \
2006                         ++len;                                          \
2007                 }                                                       \
2008         } while (0)
2009 #define add_reg(reg) add_reg_from(reg, sh_eth_read(ndev, reg))
2010 #define add_tsu_reg(reg) add_reg_from(reg, sh_eth_tsu_read(mdp, reg))
2011
2012         add_reg(EDSR);
2013         add_reg(EDMR);
2014         add_reg(EDTRR);
2015         add_reg(EDRRR);
2016         add_reg(EESR);
2017         add_reg(EESIPR);
2018         add_reg(TDLAR);
2019         add_reg(TDFAR);
2020         add_reg(TDFXR);
2021         add_reg(TDFFR);
2022         add_reg(RDLAR);
2023         add_reg(RDFAR);
2024         add_reg(RDFXR);
2025         add_reg(RDFFR);
2026         add_reg(TRSCER);
2027         add_reg(RMFCR);
2028         add_reg(TFTR);
2029         add_reg(FDR);
2030         add_reg(RMCR);
2031         add_reg(TFUCR);
2032         add_reg(RFOCR);
2033         if (cd->rmiimode)
2034                 add_reg(RMIIMODE);
2035         add_reg(FCFTR);
2036         if (cd->rpadir)
2037                 add_reg(RPADIR);
2038         if (!cd->no_trimd)
2039                 add_reg(TRIMD);
2040         add_reg(ECMR);
2041         add_reg(ECSR);
2042         add_reg(ECSIPR);
2043         add_reg(PIR);
2044         if (!cd->no_psr)
2045                 add_reg(PSR);
2046         add_reg(RDMLR);
2047         add_reg(RFLR);
2048         add_reg(IPGR);
2049         if (cd->apr)
2050                 add_reg(APR);
2051         if (cd->mpr)
2052                 add_reg(MPR);
2053         add_reg(RFCR);
2054         add_reg(RFCF);
2055         if (cd->tpauser)
2056                 add_reg(TPAUSER);
2057         add_reg(TPAUSECR);
2058         add_reg(GECMR);
2059         if (cd->bculr)
2060                 add_reg(BCULR);
2061         add_reg(MAHR);
2062         add_reg(MALR);
2063         add_reg(TROCR);
2064         add_reg(CDCR);
2065         add_reg(LCCR);
2066         add_reg(CNDCR);
2067         add_reg(CEFCR);
2068         add_reg(FRECR);
2069         add_reg(TSFRCR);
2070         add_reg(TLFRCR);
2071         add_reg(CERCR);
2072         add_reg(CEECR);
2073         add_reg(MAFCR);
2074         if (cd->rtrate)
2075                 add_reg(RTRATE);
2076         if (cd->hw_crc)
2077                 add_reg(CSMR);
2078         if (cd->select_mii)
2079                 add_reg(RMII_MII);
2080         add_reg(ARSTR);
2081         if (cd->tsu) {
2082                 add_tsu_reg(TSU_CTRST);
2083                 add_tsu_reg(TSU_FWEN0);
2084                 add_tsu_reg(TSU_FWEN1);
2085                 add_tsu_reg(TSU_FCM);
2086                 add_tsu_reg(TSU_BSYSL0);
2087                 add_tsu_reg(TSU_BSYSL1);
2088                 add_tsu_reg(TSU_PRISL0);
2089                 add_tsu_reg(TSU_PRISL1);
2090                 add_tsu_reg(TSU_FWSL0);
2091                 add_tsu_reg(TSU_FWSL1);
2092                 add_tsu_reg(TSU_FWSLC);
2093                 add_tsu_reg(TSU_QTAG0);
2094                 add_tsu_reg(TSU_QTAG1);
2095                 add_tsu_reg(TSU_QTAGM0);
2096                 add_tsu_reg(TSU_QTAGM1);
2097                 add_tsu_reg(TSU_FWSR);
2098                 add_tsu_reg(TSU_FWINMK);
2099                 add_tsu_reg(TSU_ADQT0);
2100                 add_tsu_reg(TSU_ADQT1);
2101                 add_tsu_reg(TSU_VTAG0);
2102                 add_tsu_reg(TSU_VTAG1);
2103                 add_tsu_reg(TSU_ADSBSY);
2104                 add_tsu_reg(TSU_TEN);
2105                 add_tsu_reg(TSU_POST1);
2106                 add_tsu_reg(TSU_POST2);
2107                 add_tsu_reg(TSU_POST3);
2108                 add_tsu_reg(TSU_POST4);
2109                 if (mdp->reg_offset[TSU_ADRH0] != SH_ETH_OFFSET_INVALID) {
2110                         /* This is the start of a table, not just a single
2111                          * register.
2112                          */
2113                         if (buf) {
2114                                 unsigned int i;
2115
2116                                 mark_reg_valid(TSU_ADRH0);
2117                                 for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES * 2; i++)
2118                                         *buf++ = ioread32(
2119                                                 mdp->tsu_addr +
2120                                                 mdp->reg_offset[TSU_ADRH0] +
2121                                                 i * 4);
2122                         }
2123                         len += SH_ETH_TSU_CAM_ENTRIES * 2;
2124                 }
2125         }
2126
2127 #undef mark_reg_valid
2128 #undef add_reg_from
2129 #undef add_reg
2130 #undef add_tsu_reg
2131
2132         return len * 4;
2133 }
2134
2135 static int sh_eth_get_regs_len(struct net_device *ndev)
2136 {
2137         return __sh_eth_get_regs(ndev, NULL);
2138 }
2139
2140 static void sh_eth_get_regs(struct net_device *ndev, struct ethtool_regs *regs,
2141                             void *buf)
2142 {
2143         struct sh_eth_private *mdp = netdev_priv(ndev);
2144
2145         regs->version = SH_ETH_REG_DUMP_VERSION;
2146
2147         pm_runtime_get_sync(&mdp->pdev->dev);
2148         __sh_eth_get_regs(ndev, buf);
2149         pm_runtime_put_sync(&mdp->pdev->dev);
2150 }
2151
2152 static int sh_eth_nway_reset(struct net_device *ndev)
2153 {
2154         struct sh_eth_private *mdp = netdev_priv(ndev);
2155         unsigned long flags;
2156         int ret;
2157
2158         if (!mdp->phydev)
2159                 return -ENODEV;
2160
2161         spin_lock_irqsave(&mdp->lock, flags);
2162         ret = phy_start_aneg(mdp->phydev);
2163         spin_unlock_irqrestore(&mdp->lock, flags);
2164
2165         return ret;
2166 }
2167
2168 static u32 sh_eth_get_msglevel(struct net_device *ndev)
2169 {
2170         struct sh_eth_private *mdp = netdev_priv(ndev);
2171         return mdp->msg_enable;
2172 }
2173
2174 static void sh_eth_set_msglevel(struct net_device *ndev, u32 value)
2175 {
2176         struct sh_eth_private *mdp = netdev_priv(ndev);
2177         mdp->msg_enable = value;
2178 }
2179
2180 static const char sh_eth_gstrings_stats[][ETH_GSTRING_LEN] = {
2181         "rx_current", "tx_current",
2182         "rx_dirty", "tx_dirty",
2183 };
2184 #define SH_ETH_STATS_LEN  ARRAY_SIZE(sh_eth_gstrings_stats)
2185
2186 static int sh_eth_get_sset_count(struct net_device *netdev, int sset)
2187 {
2188         switch (sset) {
2189         case ETH_SS_STATS:
2190                 return SH_ETH_STATS_LEN;
2191         default:
2192                 return -EOPNOTSUPP;
2193         }
2194 }
2195
2196 static void sh_eth_get_ethtool_stats(struct net_device *ndev,
2197                                      struct ethtool_stats *stats, u64 *data)
2198 {
2199         struct sh_eth_private *mdp = netdev_priv(ndev);
2200         int i = 0;
2201
2202         /* device-specific stats */
2203         data[i++] = mdp->cur_rx;
2204         data[i++] = mdp->cur_tx;
2205         data[i++] = mdp->dirty_rx;
2206         data[i++] = mdp->dirty_tx;
2207 }
2208
2209 static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
2210 {
2211         switch (stringset) {
2212         case ETH_SS_STATS:
2213                 memcpy(data, *sh_eth_gstrings_stats,
2214                        sizeof(sh_eth_gstrings_stats));
2215                 break;
2216         }
2217 }
2218
2219 static void sh_eth_get_ringparam(struct net_device *ndev,
2220                                  struct ethtool_ringparam *ring)
2221 {
2222         struct sh_eth_private *mdp = netdev_priv(ndev);
2223
2224         ring->rx_max_pending = RX_RING_MAX;
2225         ring->tx_max_pending = TX_RING_MAX;
2226         ring->rx_pending = mdp->num_rx_ring;
2227         ring->tx_pending = mdp->num_tx_ring;
2228 }
2229
2230 static int sh_eth_set_ringparam(struct net_device *ndev,
2231                                 struct ethtool_ringparam *ring)
2232 {
2233         struct sh_eth_private *mdp = netdev_priv(ndev);
2234         int ret;
2235
2236         if (ring->tx_pending > TX_RING_MAX ||
2237             ring->rx_pending > RX_RING_MAX ||
2238             ring->tx_pending < TX_RING_MIN ||
2239             ring->rx_pending < RX_RING_MIN)
2240                 return -EINVAL;
2241         if (ring->rx_mini_pending || ring->rx_jumbo_pending)
2242                 return -EINVAL;
2243
2244         if (netif_running(ndev)) {
2245                 netif_device_detach(ndev);
2246                 netif_tx_disable(ndev);
2247
2248                 /* Serialise with the interrupt handler and NAPI, then
2249                  * disable interrupts.  We have to clear the
2250                  * irq_enabled flag first to ensure that interrupts
2251                  * won't be re-enabled.
2252                  */
2253                 mdp->irq_enabled = false;
2254                 synchronize_irq(ndev->irq);
2255                 napi_synchronize(&mdp->napi);
2256                 sh_eth_write(ndev, 0x0000, EESIPR);
2257
2258                 sh_eth_dev_exit(ndev);
2259
2260                 /* Free all the skbuffs in the Rx queue and the DMA buffers. */
2261                 sh_eth_ring_free(ndev);
2262         }
2263
2264         /* Set new parameters */
2265         mdp->num_rx_ring = ring->rx_pending;
2266         mdp->num_tx_ring = ring->tx_pending;
2267
2268         if (netif_running(ndev)) {
2269                 ret = sh_eth_ring_init(ndev);
2270                 if (ret < 0) {
2271                         netdev_err(ndev, "%s: sh_eth_ring_init failed.\n",
2272                                    __func__);
2273                         return ret;
2274                 }
2275                 ret = sh_eth_dev_init(ndev, false);
2276                 if (ret < 0) {
2277                         netdev_err(ndev, "%s: sh_eth_dev_init failed.\n",
2278                                    __func__);
2279                         return ret;
2280                 }
2281
2282                 mdp->irq_enabled = true;
2283                 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
2284                 /* Setting the Rx mode will start the Rx process. */
2285                 sh_eth_write(ndev, EDRRR_R, EDRRR);
2286                 netif_device_attach(ndev);
2287         }
2288
2289         return 0;
2290 }
2291
2292 static const struct ethtool_ops sh_eth_ethtool_ops = {
2293         .get_settings   = sh_eth_get_settings,
2294         .set_settings   = sh_eth_set_settings,
2295         .get_regs_len   = sh_eth_get_regs_len,
2296         .get_regs       = sh_eth_get_regs,
2297         .nway_reset     = sh_eth_nway_reset,
2298         .get_msglevel   = sh_eth_get_msglevel,
2299         .set_msglevel   = sh_eth_set_msglevel,
2300         .get_link       = ethtool_op_get_link,
2301         .get_strings    = sh_eth_get_strings,
2302         .get_ethtool_stats  = sh_eth_get_ethtool_stats,
2303         .get_sset_count     = sh_eth_get_sset_count,
2304         .get_ringparam  = sh_eth_get_ringparam,
2305         .set_ringparam  = sh_eth_set_ringparam,
2306 };
2307
2308 /* network device open function */
2309 static int sh_eth_open(struct net_device *ndev)
2310 {
2311         int ret = 0;
2312         struct sh_eth_private *mdp = netdev_priv(ndev);
2313
2314         pm_runtime_get_sync(&mdp->pdev->dev);
2315
2316         napi_enable(&mdp->napi);
2317
2318         ret = request_irq(ndev->irq, sh_eth_interrupt,
2319                           mdp->cd->irq_flags, ndev->name, ndev);
2320         if (ret) {
2321                 netdev_err(ndev, "Can not assign IRQ number\n");
2322                 goto out_napi_off;
2323         }
2324
2325         /* Descriptor set */
2326         ret = sh_eth_ring_init(ndev);
2327         if (ret)
2328                 goto out_free_irq;
2329
2330         /* device init */
2331         ret = sh_eth_dev_init(ndev, true);
2332         if (ret)
2333                 goto out_free_irq;
2334
2335         /* PHY control start*/
2336         ret = sh_eth_phy_start(ndev);
2337         if (ret)
2338                 goto out_free_irq;
2339
2340         mdp->is_opened = 1;
2341
2342         return ret;
2343
2344 out_free_irq:
2345         free_irq(ndev->irq, ndev);
2346 out_napi_off:
2347         napi_disable(&mdp->napi);
2348         pm_runtime_put_sync(&mdp->pdev->dev);
2349         return ret;
2350 }
2351
2352 /* Timeout function */
2353 static void sh_eth_tx_timeout(struct net_device *ndev)
2354 {
2355         struct sh_eth_private *mdp = netdev_priv(ndev);
2356         struct sh_eth_rxdesc *rxdesc;
2357         int i;
2358
2359         netif_stop_queue(ndev);
2360
2361         netif_err(mdp, timer, ndev,
2362                   "transmit timed out, status %8.8x, resetting...\n",
2363                   sh_eth_read(ndev, EESR));
2364
2365         /* tx_errors count up */
2366         ndev->stats.tx_errors++;
2367
2368         /* Free all the skbuffs in the Rx queue. */
2369         for (i = 0; i < mdp->num_rx_ring; i++) {
2370                 rxdesc = &mdp->rx_ring[i];
2371                 rxdesc->status = cpu_to_edmac(mdp, 0);
2372                 rxdesc->addr = cpu_to_edmac(mdp, 0xBADF00D0);
2373                 dev_kfree_skb(mdp->rx_skbuff[i]);
2374                 mdp->rx_skbuff[i] = NULL;
2375         }
2376         for (i = 0; i < mdp->num_tx_ring; i++) {
2377                 dev_kfree_skb(mdp->tx_skbuff[i]);
2378                 mdp->tx_skbuff[i] = NULL;
2379         }
2380
2381         /* device init */
2382         sh_eth_dev_init(ndev, true);
2383 }
2384
2385 /* Packet transmit function */
2386 static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
2387 {
2388         struct sh_eth_private *mdp = netdev_priv(ndev);
2389         struct sh_eth_txdesc *txdesc;
2390         dma_addr_t dma_addr;
2391         u32 entry;
2392         unsigned long flags;
2393
2394         spin_lock_irqsave(&mdp->lock, flags);
2395         if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) {
2396                 if (!sh_eth_txfree(ndev)) {
2397                         netif_warn(mdp, tx_queued, ndev, "TxFD exhausted.\n");
2398                         netif_stop_queue(ndev);
2399                         spin_unlock_irqrestore(&mdp->lock, flags);
2400                         return NETDEV_TX_BUSY;
2401                 }
2402         }
2403         spin_unlock_irqrestore(&mdp->lock, flags);
2404
2405         if (skb_put_padto(skb, ETH_ZLEN))
2406                 return NETDEV_TX_OK;
2407
2408         entry = mdp->cur_tx % mdp->num_tx_ring;
2409         mdp->tx_skbuff[entry] = skb;
2410         txdesc = &mdp->tx_ring[entry];
2411         /* soft swap. */
2412         if (!mdp->cd->hw_swap)
2413                 sh_eth_soft_swap(PTR_ALIGN(skb->data, 4), skb->len + 2);
2414         dma_addr = dma_map_single(&ndev->dev, skb->data, skb->len,
2415                                   DMA_TO_DEVICE);
2416         if (dma_mapping_error(&ndev->dev, dma_addr)) {
2417                 kfree_skb(skb);
2418                 return NETDEV_TX_OK;
2419         }
2420         txdesc->addr = cpu_to_edmac(mdp, dma_addr);
2421         txdesc->len  = cpu_to_edmac(mdp, skb->len << 16);
2422
2423         dma_wmb(); /* TACT bit must be set after all the above writes */
2424         if (entry >= mdp->num_tx_ring - 1)
2425                 txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
2426         else
2427                 txdesc->status |= cpu_to_edmac(mdp, TD_TACT);
2428
2429         mdp->cur_tx++;
2430
2431         if (!(sh_eth_read(ndev, EDTRR) & sh_eth_get_edtrr_trns(mdp)))
2432                 sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
2433
2434         return NETDEV_TX_OK;
2435 }
2436
2437 /* The statistics registers have write-clear behaviour, which means we
2438  * will lose any increment between the read and write.  We mitigate
2439  * this by only clearing when we read a non-zero value, so we will
2440  * never falsely report a total of zero.
2441  */
2442 static void
2443 sh_eth_update_stat(struct net_device *ndev, unsigned long *stat, int reg)
2444 {
2445         u32 delta = sh_eth_read(ndev, reg);
2446
2447         if (delta) {
2448                 *stat += delta;
2449                 sh_eth_write(ndev, 0, reg);
2450         }
2451 }
2452
2453 static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
2454 {
2455         struct sh_eth_private *mdp = netdev_priv(ndev);
2456
2457         if (sh_eth_is_rz_fast_ether(mdp))
2458                 return &ndev->stats;
2459
2460         if (!mdp->is_opened)
2461                 return &ndev->stats;
2462
2463         sh_eth_update_stat(ndev, &ndev->stats.tx_dropped, TROCR);
2464         sh_eth_update_stat(ndev, &ndev->stats.collisions, CDCR);
2465         sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors, LCCR);
2466
2467         if (sh_eth_is_gether(mdp)) {
2468                 sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors,
2469                                    CERCR);
2470                 sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors,
2471                                    CEECR);
2472         } else {
2473                 sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors,
2474                                    CNDCR);
2475         }
2476
2477         return &ndev->stats;
2478 }
2479
2480 /* device close function */
2481 static int sh_eth_close(struct net_device *ndev)
2482 {
2483         struct sh_eth_private *mdp = netdev_priv(ndev);
2484
2485         netif_stop_queue(ndev);
2486
2487         /* Serialise with the interrupt handler and NAPI, then disable
2488          * interrupts.  We have to clear the irq_enabled flag first to
2489          * ensure that interrupts won't be re-enabled.
2490          */
2491         mdp->irq_enabled = false;
2492         synchronize_irq(ndev->irq);
2493         napi_disable(&mdp->napi);
2494         sh_eth_write(ndev, 0x0000, EESIPR);
2495
2496         sh_eth_dev_exit(ndev);
2497
2498         /* PHY Disconnect */
2499         if (mdp->phydev) {
2500                 phy_stop(mdp->phydev);
2501                 phy_disconnect(mdp->phydev);
2502                 mdp->phydev = NULL;
2503         }
2504
2505         free_irq(ndev->irq, ndev);
2506
2507         /* Free all the skbuffs in the Rx queue and the DMA buffer. */
2508         sh_eth_ring_free(ndev);
2509
2510         pm_runtime_put_sync(&mdp->pdev->dev);
2511
2512         mdp->is_opened = 0;
2513
2514         return 0;
2515 }
2516
2517 /* ioctl to device function */
2518 static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
2519 {
2520         struct sh_eth_private *mdp = netdev_priv(ndev);
2521         struct phy_device *phydev = mdp->phydev;
2522
2523         if (!netif_running(ndev))
2524                 return -EINVAL;
2525
2526         if (!phydev)
2527                 return -ENODEV;
2528
2529         return phy_mii_ioctl(phydev, rq, cmd);
2530 }
2531
2532 /* For TSU_POSTn. Please refer to the manual about this (strange) bitfields */
2533 static void *sh_eth_tsu_get_post_reg_offset(struct sh_eth_private *mdp,
2534                                             int entry)
2535 {
2536         return sh_eth_tsu_get_offset(mdp, TSU_POST1) + (entry / 8 * 4);
2537 }
2538
2539 static u32 sh_eth_tsu_get_post_mask(int entry)
2540 {
2541         return 0x0f << (28 - ((entry % 8) * 4));
2542 }
2543
2544 static u32 sh_eth_tsu_get_post_bit(struct sh_eth_private *mdp, int entry)
2545 {
2546         return (0x08 >> (mdp->port << 1)) << (28 - ((entry % 8) * 4));
2547 }
2548
2549 static void sh_eth_tsu_enable_cam_entry_post(struct net_device *ndev,
2550                                              int entry)
2551 {
2552         struct sh_eth_private *mdp = netdev_priv(ndev);
2553         u32 tmp;
2554         void *reg_offset;
2555
2556         reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry);
2557         tmp = ioread32(reg_offset);
2558         iowrite32(tmp | sh_eth_tsu_get_post_bit(mdp, entry), reg_offset);
2559 }
2560
2561 static bool sh_eth_tsu_disable_cam_entry_post(struct net_device *ndev,
2562                                               int entry)
2563 {
2564         struct sh_eth_private *mdp = netdev_priv(ndev);
2565         u32 post_mask, ref_mask, tmp;
2566         void *reg_offset;
2567
2568         reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry);
2569         post_mask = sh_eth_tsu_get_post_mask(entry);
2570         ref_mask = sh_eth_tsu_get_post_bit(mdp, entry) & ~post_mask;
2571
2572         tmp = ioread32(reg_offset);
2573         iowrite32(tmp & ~post_mask, reg_offset);
2574
2575         /* If other port enables, the function returns "true" */
2576         return tmp & ref_mask;
2577 }
2578
2579 static int sh_eth_tsu_busy(struct net_device *ndev)
2580 {
2581         int timeout = SH_ETH_TSU_TIMEOUT_MS * 100;
2582         struct sh_eth_private *mdp = netdev_priv(ndev);
2583
2584         while ((sh_eth_tsu_read(mdp, TSU_ADSBSY) & TSU_ADSBSY_0)) {
2585                 udelay(10);
2586                 timeout--;
2587                 if (timeout <= 0) {
2588                         netdev_err(ndev, "%s: timeout\n", __func__);
2589                         return -ETIMEDOUT;
2590                 }
2591         }
2592
2593         return 0;
2594 }
2595
2596 static int sh_eth_tsu_write_entry(struct net_device *ndev, void *reg,
2597                                   const u8 *addr)
2598 {
2599         u32 val;
2600
2601         val = addr[0] << 24 | addr[1] << 16 | addr[2] << 8 | addr[3];
2602         iowrite32(val, reg);
2603         if (sh_eth_tsu_busy(ndev) < 0)
2604                 return -EBUSY;
2605
2606         val = addr[4] << 8 | addr[5];
2607         iowrite32(val, reg + 4);
2608         if (sh_eth_tsu_busy(ndev) < 0)
2609                 return -EBUSY;
2610
2611         return 0;
2612 }
2613
2614 static void sh_eth_tsu_read_entry(void *reg, u8 *addr)
2615 {
2616         u32 val;
2617
2618         val = ioread32(reg);
2619         addr[0] = (val >> 24) & 0xff;
2620         addr[1] = (val >> 16) & 0xff;
2621         addr[2] = (val >> 8) & 0xff;
2622         addr[3] = val & 0xff;
2623         val = ioread32(reg + 4);
2624         addr[4] = (val >> 8) & 0xff;
2625         addr[5] = val & 0xff;
2626 }
2627
2628
2629 static int sh_eth_tsu_find_entry(struct net_device *ndev, const u8 *addr)
2630 {
2631         struct sh_eth_private *mdp = netdev_priv(ndev);
2632         void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2633         int i;
2634         u8 c_addr[ETH_ALEN];
2635
2636         for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
2637                 sh_eth_tsu_read_entry(reg_offset, c_addr);
2638                 if (ether_addr_equal(addr, c_addr))
2639                         return i;
2640         }
2641
2642         return -ENOENT;
2643 }
2644
2645 static int sh_eth_tsu_find_empty(struct net_device *ndev)
2646 {
2647         u8 blank[ETH_ALEN];
2648         int entry;
2649
2650         memset(blank, 0, sizeof(blank));
2651         entry = sh_eth_tsu_find_entry(ndev, blank);
2652         return (entry < 0) ? -ENOMEM : entry;
2653 }
2654
2655 static int sh_eth_tsu_disable_cam_entry_table(struct net_device *ndev,
2656                                               int entry)
2657 {
2658         struct sh_eth_private *mdp = netdev_priv(ndev);
2659         void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2660         int ret;
2661         u8 blank[ETH_ALEN];
2662
2663         sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) &
2664                          ~(1 << (31 - entry)), TSU_TEN);
2665
2666         memset(blank, 0, sizeof(blank));
2667         ret = sh_eth_tsu_write_entry(ndev, reg_offset + entry * 8, blank);
2668         if (ret < 0)
2669                 return ret;
2670         return 0;
2671 }
2672
2673 static int sh_eth_tsu_add_entry(struct net_device *ndev, const u8 *addr)
2674 {
2675         struct sh_eth_private *mdp = netdev_priv(ndev);
2676         void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2677         int i, ret;
2678
2679         if (!mdp->cd->tsu)
2680                 return 0;
2681
2682         i = sh_eth_tsu_find_entry(ndev, addr);
2683         if (i < 0) {
2684                 /* No entry found, create one */
2685                 i = sh_eth_tsu_find_empty(ndev);
2686                 if (i < 0)
2687                         return -ENOMEM;
2688                 ret = sh_eth_tsu_write_entry(ndev, reg_offset + i * 8, addr);
2689                 if (ret < 0)
2690                         return ret;
2691
2692                 /* Enable the entry */
2693                 sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) |
2694                                  (1 << (31 - i)), TSU_TEN);
2695         }
2696
2697         /* Entry found or created, enable POST */
2698         sh_eth_tsu_enable_cam_entry_post(ndev, i);
2699
2700         return 0;
2701 }
2702
2703 static int sh_eth_tsu_del_entry(struct net_device *ndev, const u8 *addr)
2704 {
2705         struct sh_eth_private *mdp = netdev_priv(ndev);
2706         int i, ret;
2707
2708         if (!mdp->cd->tsu)
2709                 return 0;
2710
2711         i = sh_eth_tsu_find_entry(ndev, addr);
2712         if (i) {
2713                 /* Entry found */
2714                 if (sh_eth_tsu_disable_cam_entry_post(ndev, i))
2715                         goto done;
2716
2717                 /* Disable the entry if both ports was disabled */
2718                 ret = sh_eth_tsu_disable_cam_entry_table(ndev, i);
2719                 if (ret < 0)
2720                         return ret;
2721         }
2722 done:
2723         return 0;
2724 }
2725
2726 static int sh_eth_tsu_purge_all(struct net_device *ndev)
2727 {
2728         struct sh_eth_private *mdp = netdev_priv(ndev);
2729         int i, ret;
2730
2731         if (!mdp->cd->tsu)
2732                 return 0;
2733
2734         for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++) {
2735                 if (sh_eth_tsu_disable_cam_entry_post(ndev, i))
2736                         continue;
2737
2738                 /* Disable the entry if both ports was disabled */
2739                 ret = sh_eth_tsu_disable_cam_entry_table(ndev, i);
2740                 if (ret < 0)
2741                         return ret;
2742         }
2743
2744         return 0;
2745 }
2746
2747 static void sh_eth_tsu_purge_mcast(struct net_device *ndev)
2748 {
2749         struct sh_eth_private *mdp = netdev_priv(ndev);
2750         u8 addr[ETH_ALEN];
2751         void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2752         int i;
2753
2754         if (!mdp->cd->tsu)
2755                 return;
2756
2757         for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
2758                 sh_eth_tsu_read_entry(reg_offset, addr);
2759                 if (is_multicast_ether_addr(addr))
2760                         sh_eth_tsu_del_entry(ndev, addr);
2761         }
2762 }
2763
2764 /* Update promiscuous flag and multicast filter */
2765 static void sh_eth_set_rx_mode(struct net_device *ndev)
2766 {
2767         struct sh_eth_private *mdp = netdev_priv(ndev);
2768         u32 ecmr_bits;
2769         int mcast_all = 0;
2770         unsigned long flags;
2771
2772         spin_lock_irqsave(&mdp->lock, flags);
2773         /* Initial condition is MCT = 1, PRM = 0.
2774          * Depending on ndev->flags, set PRM or clear MCT
2775          */
2776         ecmr_bits = sh_eth_read(ndev, ECMR) & ~ECMR_PRM;
2777         if (mdp->cd->tsu)
2778                 ecmr_bits |= ECMR_MCT;
2779
2780         if (!(ndev->flags & IFF_MULTICAST)) {
2781                 sh_eth_tsu_purge_mcast(ndev);
2782                 mcast_all = 1;
2783         }
2784         if (ndev->flags & IFF_ALLMULTI) {
2785                 sh_eth_tsu_purge_mcast(ndev);
2786                 ecmr_bits &= ~ECMR_MCT;
2787                 mcast_all = 1;
2788         }
2789
2790         if (ndev->flags & IFF_PROMISC) {
2791                 sh_eth_tsu_purge_all(ndev);
2792                 ecmr_bits = (ecmr_bits & ~ECMR_MCT) | ECMR_PRM;
2793         } else if (mdp->cd->tsu) {
2794                 struct netdev_hw_addr *ha;
2795                 netdev_for_each_mc_addr(ha, ndev) {
2796                         if (mcast_all && is_multicast_ether_addr(ha->addr))
2797                                 continue;
2798
2799                         if (sh_eth_tsu_add_entry(ndev, ha->addr) < 0) {
2800                                 if (!mcast_all) {
2801                                         sh_eth_tsu_purge_mcast(ndev);
2802                                         ecmr_bits &= ~ECMR_MCT;
2803                                         mcast_all = 1;
2804                                 }
2805                         }
2806                 }
2807         }
2808
2809         /* update the ethernet mode */
2810         sh_eth_write(ndev, ecmr_bits, ECMR);
2811
2812         spin_unlock_irqrestore(&mdp->lock, flags);
2813 }
2814
2815 static int sh_eth_get_vtag_index(struct sh_eth_private *mdp)
2816 {
2817         if (!mdp->port)
2818                 return TSU_VTAG0;
2819         else
2820                 return TSU_VTAG1;
2821 }
2822
2823 static int sh_eth_vlan_rx_add_vid(struct net_device *ndev,
2824                                   __be16 proto, u16 vid)
2825 {
2826         struct sh_eth_private *mdp = netdev_priv(ndev);
2827         int vtag_reg_index = sh_eth_get_vtag_index(mdp);
2828
2829         if (unlikely(!mdp->cd->tsu))
2830                 return -EPERM;
2831
2832         /* No filtering if vid = 0 */
2833         if (!vid)
2834                 return 0;
2835
2836         mdp->vlan_num_ids++;
2837
2838         /* The controller has one VLAN tag HW filter. So, if the filter is
2839          * already enabled, the driver disables it and the filte
2840          */
2841         if (mdp->vlan_num_ids > 1) {
2842                 /* disable VLAN filter */
2843                 sh_eth_tsu_write(mdp, 0, vtag_reg_index);
2844                 return 0;
2845         }
2846
2847         sh_eth_tsu_write(mdp, TSU_VTAG_ENABLE | (vid & TSU_VTAG_VID_MASK),
2848                          vtag_reg_index);
2849
2850         return 0;
2851 }
2852
2853 static int sh_eth_vlan_rx_kill_vid(struct net_device *ndev,
2854                                    __be16 proto, u16 vid)
2855 {
2856         struct sh_eth_private *mdp = netdev_priv(ndev);
2857         int vtag_reg_index = sh_eth_get_vtag_index(mdp);
2858
2859         if (unlikely(!mdp->cd->tsu))
2860                 return -EPERM;
2861
2862         /* No filtering if vid = 0 */
2863         if (!vid)
2864                 return 0;
2865
2866         mdp->vlan_num_ids--;
2867         sh_eth_tsu_write(mdp, 0, vtag_reg_index);
2868
2869         return 0;
2870 }
2871
2872 /* SuperH's TSU register init function */
2873 static void sh_eth_tsu_init(struct sh_eth_private *mdp)
2874 {
2875         if (sh_eth_is_rz_fast_ether(mdp)) {
2876                 sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */
2877                 return;
2878         }
2879
2880         sh_eth_tsu_write(mdp, 0, TSU_FWEN0);    /* Disable forward(0->1) */
2881         sh_eth_tsu_write(mdp, 0, TSU_FWEN1);    /* Disable forward(1->0) */
2882         sh_eth_tsu_write(mdp, 0, TSU_FCM);      /* forward fifo 3k-3k */
2883         sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL0);
2884         sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL1);
2885         sh_eth_tsu_write(mdp, 0, TSU_PRISL0);
2886         sh_eth_tsu_write(mdp, 0, TSU_PRISL1);
2887         sh_eth_tsu_write(mdp, 0, TSU_FWSL0);
2888         sh_eth_tsu_write(mdp, 0, TSU_FWSL1);
2889         sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, TSU_FWSLC);
2890         if (sh_eth_is_gether(mdp)) {
2891                 sh_eth_tsu_write(mdp, 0, TSU_QTAG0);    /* Disable QTAG(0->1) */
2892                 sh_eth_tsu_write(mdp, 0, TSU_QTAG1);    /* Disable QTAG(1->0) */
2893         } else {
2894                 sh_eth_tsu_write(mdp, 0, TSU_QTAGM0);   /* Disable QTAG(0->1) */
2895                 sh_eth_tsu_write(mdp, 0, TSU_QTAGM1);   /* Disable QTAG(1->0) */
2896         }
2897         sh_eth_tsu_write(mdp, 0, TSU_FWSR);     /* all interrupt status clear */
2898         sh_eth_tsu_write(mdp, 0, TSU_FWINMK);   /* Disable all interrupt */
2899         sh_eth_tsu_write(mdp, 0, TSU_TEN);      /* Disable all CAM entry */
2900         sh_eth_tsu_write(mdp, 0, TSU_POST1);    /* Disable CAM entry [ 0- 7] */
2901         sh_eth_tsu_write(mdp, 0, TSU_POST2);    /* Disable CAM entry [ 8-15] */
2902         sh_eth_tsu_write(mdp, 0, TSU_POST3);    /* Disable CAM entry [16-23] */
2903         sh_eth_tsu_write(mdp, 0, TSU_POST4);    /* Disable CAM entry [24-31] */
2904 }
2905
2906 /* MDIO bus release function */
2907 static int sh_mdio_release(struct sh_eth_private *mdp)
2908 {
2909         /* unregister mdio bus */
2910         mdiobus_unregister(mdp->mii_bus);
2911
2912         /* free bitbang info */
2913         free_mdio_bitbang(mdp->mii_bus);
2914
2915         return 0;
2916 }
2917
2918 /* MDIO bus init function */
2919 static int sh_mdio_init(struct sh_eth_private *mdp,
2920                         struct sh_eth_plat_data *pd)
2921 {
2922         int ret, i;
2923         struct bb_info *bitbang;
2924         struct platform_device *pdev = mdp->pdev;
2925         struct device *dev = &mdp->pdev->dev;
2926
2927         /* create bit control struct for PHY */
2928         bitbang = devm_kzalloc(dev, sizeof(struct bb_info), GFP_KERNEL);
2929         if (!bitbang)
2930                 return -ENOMEM;
2931
2932         /* bitbang init */
2933         bitbang->addr = mdp->addr + mdp->reg_offset[PIR];
2934         bitbang->set_gate = pd->set_mdio_gate;
2935         bitbang->mdi_msk = PIR_MDI;
2936         bitbang->mdo_msk = PIR_MDO;
2937         bitbang->mmd_msk = PIR_MMD;
2938         bitbang->mdc_msk = PIR_MDC;
2939         bitbang->ctrl.ops = &bb_ops;
2940
2941         /* MII controller setting */
2942         mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl);
2943         if (!mdp->mii_bus)
2944                 return -ENOMEM;
2945
2946         /* Hook up MII support for ethtool */
2947         mdp->mii_bus->name = "sh_mii";
2948         mdp->mii_bus->parent = dev;
2949         snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
2950                  pdev->name, pdev->id);
2951
2952         /* PHY IRQ */
2953         mdp->mii_bus->irq = devm_kmalloc_array(dev, PHY_MAX_ADDR, sizeof(int),
2954                                                GFP_KERNEL);
2955         if (!mdp->mii_bus->irq) {
2956                 ret = -ENOMEM;
2957                 goto out_free_bus;
2958         }
2959
2960         /* register MDIO bus */
2961         if (dev->of_node) {
2962                 ret = of_mdiobus_register(mdp->mii_bus, dev->of_node);
2963         } else {
2964                 for (i = 0; i < PHY_MAX_ADDR; i++)
2965                         mdp->mii_bus->irq[i] = PHY_POLL;
2966                 if (pd->phy_irq > 0)
2967                         mdp->mii_bus->irq[pd->phy] = pd->phy_irq;
2968
2969                 ret = mdiobus_register(mdp->mii_bus);
2970         }
2971
2972         if (ret)
2973                 goto out_free_bus;
2974
2975         return 0;
2976
2977 out_free_bus:
2978         free_mdio_bitbang(mdp->mii_bus);
2979         return ret;
2980 }
2981
2982 static const u16 *sh_eth_get_register_offset(int register_type)
2983 {
2984         const u16 *reg_offset = NULL;
2985
2986         switch (register_type) {
2987         case SH_ETH_REG_GIGABIT:
2988                 reg_offset = sh_eth_offset_gigabit;
2989                 break;
2990         case SH_ETH_REG_FAST_RZ:
2991                 reg_offset = sh_eth_offset_fast_rz;
2992                 break;
2993         case SH_ETH_REG_FAST_RCAR:
2994                 reg_offset = sh_eth_offset_fast_rcar;
2995                 break;
2996         case SH_ETH_REG_FAST_SH4:
2997                 reg_offset = sh_eth_offset_fast_sh4;
2998                 break;
2999         case SH_ETH_REG_FAST_SH3_SH2:
3000                 reg_offset = sh_eth_offset_fast_sh3_sh2;
3001                 break;
3002         default:
3003                 break;
3004         }
3005
3006         return reg_offset;
3007 }
3008
3009 static const struct net_device_ops sh_eth_netdev_ops = {
3010         .ndo_open               = sh_eth_open,
3011         .ndo_stop               = sh_eth_close,
3012         .ndo_start_xmit         = sh_eth_start_xmit,
3013         .ndo_get_stats          = sh_eth_get_stats,
3014         .ndo_set_rx_mode        = sh_eth_set_rx_mode,
3015         .ndo_tx_timeout         = sh_eth_tx_timeout,
3016         .ndo_do_ioctl           = sh_eth_do_ioctl,
3017         .ndo_validate_addr      = eth_validate_addr,
3018         .ndo_set_mac_address    = eth_mac_addr,
3019         .ndo_change_mtu         = eth_change_mtu,
3020 };
3021
3022 static const struct net_device_ops sh_eth_netdev_ops_tsu = {
3023         .ndo_open               = sh_eth_open,
3024         .ndo_stop               = sh_eth_close,
3025         .ndo_start_xmit         = sh_eth_start_xmit,
3026         .ndo_get_stats          = sh_eth_get_stats,
3027         .ndo_set_rx_mode        = sh_eth_set_rx_mode,
3028         .ndo_vlan_rx_add_vid    = sh_eth_vlan_rx_add_vid,
3029         .ndo_vlan_rx_kill_vid   = sh_eth_vlan_rx_kill_vid,
3030         .ndo_tx_timeout         = sh_eth_tx_timeout,
3031         .ndo_do_ioctl           = sh_eth_do_ioctl,
3032         .ndo_validate_addr      = eth_validate_addr,
3033         .ndo_set_mac_address    = eth_mac_addr,
3034         .ndo_change_mtu         = eth_change_mtu,
3035 };
3036
3037 #ifdef CONFIG_OF
3038 static struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
3039 {
3040         struct device_node *np = dev->of_node;
3041         struct sh_eth_plat_data *pdata;
3042         const char *mac_addr;
3043
3044         pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
3045         if (!pdata)
3046                 return NULL;
3047
3048         pdata->phy_interface = of_get_phy_mode(np);
3049
3050         mac_addr = of_get_mac_address(np);
3051         if (mac_addr)
3052                 memcpy(pdata->mac_addr, mac_addr, ETH_ALEN);
3053
3054         pdata->no_ether_link =
3055                 of_property_read_bool(np, "renesas,no-ether-link");
3056         pdata->ether_link_active_low =
3057                 of_property_read_bool(np, "renesas,ether-link-active-low");
3058
3059         return pdata;
3060 }
3061
3062 static const struct of_device_id sh_eth_match_table[] = {
3063         { .compatible = "renesas,gether-r8a7740", .data = &r8a7740_data },
3064         { .compatible = "renesas,ether-r8a7778", .data = &r8a777x_data },
3065         { .compatible = "renesas,ether-r8a7779", .data = &r8a777x_data },
3066         { .compatible = "renesas,ether-r8a7790", .data = &r8a779x_data },
3067         { .compatible = "renesas,ether-r8a7791", .data = &r8a779x_data },
3068         { .compatible = "renesas,ether-r8a7793", .data = &r8a779x_data },
3069         { .compatible = "renesas,ether-r8a7794", .data = &r8a779x_data },
3070         { .compatible = "renesas,ether-r7s72100", .data = &r7s72100_data },
3071         { }
3072 };
3073 MODULE_DEVICE_TABLE(of, sh_eth_match_table);
3074 #else
3075 static inline struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
3076 {
3077         return NULL;
3078 }
3079 #endif
3080
3081 static int sh_eth_drv_probe(struct platform_device *pdev)
3082 {
3083         int ret, devno = 0;
3084         struct resource *res;
3085         struct net_device *ndev = NULL;
3086         struct sh_eth_private *mdp = NULL;
3087         struct sh_eth_plat_data *pd = dev_get_platdata(&pdev->dev);
3088         const struct platform_device_id *id = platform_get_device_id(pdev);
3089
3090         /* get base addr */
3091         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3092
3093         ndev = alloc_etherdev(sizeof(struct sh_eth_private));
3094         if (!ndev)
3095                 return -ENOMEM;
3096
3097         pm_runtime_enable(&pdev->dev);
3098         pm_runtime_get_sync(&pdev->dev);
3099
3100         devno = pdev->id;
3101         if (devno < 0)
3102                 devno = 0;
3103
3104         ndev->dma = -1;
3105         ret = platform_get_irq(pdev, 0);
3106         if (ret < 0)
3107                 goto out_release;
3108         ndev->irq = ret;
3109
3110         SET_NETDEV_DEV(ndev, &pdev->dev);
3111
3112         mdp = netdev_priv(ndev);
3113         mdp->num_tx_ring = TX_RING_SIZE;
3114         mdp->num_rx_ring = RX_RING_SIZE;
3115         mdp->addr = devm_ioremap_resource(&pdev->dev, res);
3116         if (IS_ERR(mdp->addr)) {
3117                 ret = PTR_ERR(mdp->addr);
3118                 goto out_release;
3119         }
3120
3121         ndev->base_addr = res->start;
3122
3123         spin_lock_init(&mdp->lock);
3124         mdp->pdev = pdev;
3125
3126         if (pdev->dev.of_node)
3127                 pd = sh_eth_parse_dt(&pdev->dev);
3128         if (!pd) {
3129                 dev_err(&pdev->dev, "no platform data\n");
3130                 ret = -EINVAL;
3131                 goto out_release;
3132         }
3133
3134         /* get PHY ID */
3135         mdp->phy_id = pd->phy;
3136         mdp->phy_interface = pd->phy_interface;
3137         /* EDMAC endian */
3138         mdp->edmac_endian = pd->edmac_endian;
3139         mdp->no_ether_link = pd->no_ether_link;
3140         mdp->ether_link_active_low = pd->ether_link_active_low;
3141
3142         /* set cpu data */
3143         if (id) {
3144                 mdp->cd = (struct sh_eth_cpu_data *)id->driver_data;
3145         } else  {
3146                 const struct of_device_id *match;
3147
3148                 match = of_match_device(of_match_ptr(sh_eth_match_table),
3149                                         &pdev->dev);
3150                 mdp->cd = (struct sh_eth_cpu_data *)match->data;
3151         }
3152         mdp->reg_offset = sh_eth_get_register_offset(mdp->cd->register_type);
3153         if (!mdp->reg_offset) {
3154                 dev_err(&pdev->dev, "Unknown register type (%d)\n",
3155                         mdp->cd->register_type);
3156                 ret = -EINVAL;
3157                 goto out_release;
3158         }
3159         sh_eth_set_default_cpu_data(mdp->cd);
3160
3161         /* set function */
3162         if (mdp->cd->tsu)
3163                 ndev->netdev_ops = &sh_eth_netdev_ops_tsu;
3164         else
3165                 ndev->netdev_ops = &sh_eth_netdev_ops;
3166         ndev->ethtool_ops = &sh_eth_ethtool_ops;
3167         ndev->watchdog_timeo = TX_TIMEOUT;
3168
3169         /* debug message level */
3170         mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE;
3171
3172         /* read and set MAC address */
3173         read_mac_address(ndev, pd->mac_addr);
3174         if (!is_valid_ether_addr(ndev->dev_addr)) {
3175                 dev_warn(&pdev->dev,
3176                          "no valid MAC address supplied, using a random one.\n");
3177                 eth_hw_addr_random(ndev);
3178         }
3179
3180         /* ioremap the TSU registers */
3181         if (mdp->cd->tsu) {
3182                 struct resource *rtsu;
3183
3184                 rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1);
3185                 if (!rtsu) {
3186                         dev_err(&pdev->dev, "no TSU resource\n");
3187                         ret = -ENODEV;
3188                         goto out_release;
3189                 }
3190                 /* We can only request the  TSU region  for the first port
3191                  * of the two  sharing this TSU for the probe to succeed...
3192                  */
3193                 if (devno % 2 == 0 &&
3194                     !devm_request_mem_region(&pdev->dev, rtsu->start,
3195                                              resource_size(rtsu),
3196                                              dev_name(&pdev->dev))) {
3197                         dev_err(&pdev->dev, "can't request TSU resource.\n");
3198                         ret = -EBUSY;
3199                         goto out_release;
3200                 }
3201                 mdp->tsu_addr = devm_ioremap(&pdev->dev, rtsu->start,
3202                                              resource_size(rtsu));
3203                 if (!mdp->tsu_addr) {
3204                         dev_err(&pdev->dev, "TSU region ioremap() failed.\n");
3205                         ret = -ENOMEM;
3206                         goto out_release;
3207                 }
3208                 mdp->port = devno % 2;
3209                 ndev->features = NETIF_F_HW_VLAN_CTAG_FILTER;
3210         }
3211
3212         /* Need to init only the first port of the two sharing a TSU */
3213         if (devno % 2 == 0) {
3214                 if (mdp->cd->chip_reset)
3215                         mdp->cd->chip_reset(ndev);
3216
3217                 if (mdp->cd->tsu) {
3218                         /* TSU init (Init only)*/
3219                         sh_eth_tsu_init(mdp);
3220                 }
3221         }
3222
3223         if (mdp->cd->rmiimode)
3224                 sh_eth_write(ndev, 0x1, RMIIMODE);
3225
3226         /* MDIO bus init */
3227         ret = sh_mdio_init(mdp, pd);
3228         if (ret) {
3229                 dev_err(&pdev->dev, "failed to initialise MDIO\n");
3230                 goto out_release;
3231         }
3232
3233         netif_napi_add(ndev, &mdp->napi, sh_eth_poll, 64);
3234
3235         /* network device register */
3236         ret = register_netdev(ndev);
3237         if (ret)
3238                 goto out_napi_del;
3239
3240         /* print device information */
3241         netdev_info(ndev, "Base address at 0x%x, %pM, IRQ %d.\n",
3242                     (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
3243
3244         pm_runtime_put(&pdev->dev);
3245         platform_set_drvdata(pdev, ndev);
3246
3247         return ret;
3248
3249 out_napi_del:
3250         netif_napi_del(&mdp->napi);
3251         sh_mdio_release(mdp);
3252
3253 out_release:
3254         /* net_dev free */
3255         if (ndev)
3256                 free_netdev(ndev);
3257
3258         pm_runtime_put(&pdev->dev);
3259         pm_runtime_disable(&pdev->dev);
3260         return ret;
3261 }
3262
3263 static int sh_eth_drv_remove(struct platform_device *pdev)
3264 {
3265         struct net_device *ndev = platform_get_drvdata(pdev);
3266         struct sh_eth_private *mdp = netdev_priv(ndev);
3267
3268         unregister_netdev(ndev);
3269         netif_napi_del(&mdp->napi);
3270         sh_mdio_release(mdp);
3271         pm_runtime_disable(&pdev->dev);
3272         free_netdev(ndev);
3273
3274         return 0;
3275 }
3276
3277 #ifdef CONFIG_PM
3278 #ifdef CONFIG_PM_SLEEP
3279 static int sh_eth_suspend(struct device *dev)
3280 {
3281         struct net_device *ndev = dev_get_drvdata(dev);
3282         int ret = 0;
3283
3284         if (netif_running(ndev)) {
3285                 netif_device_detach(ndev);
3286                 ret = sh_eth_close(ndev);
3287         }
3288
3289         return ret;
3290 }
3291
3292 static int sh_eth_resume(struct device *dev)
3293 {
3294         struct net_device *ndev = dev_get_drvdata(dev);
3295         int ret = 0;
3296
3297         if (netif_running(ndev)) {
3298                 ret = sh_eth_open(ndev);
3299                 if (ret < 0)
3300                         return ret;
3301                 netif_device_attach(ndev);
3302         }
3303
3304         return ret;
3305 }
3306 #endif
3307
3308 static int sh_eth_runtime_nop(struct device *dev)
3309 {
3310         /* Runtime PM callback shared between ->runtime_suspend()
3311          * and ->runtime_resume(). Simply returns success.
3312          *
3313          * This driver re-initializes all registers after
3314          * pm_runtime_get_sync() anyway so there is no need
3315          * to save and restore registers here.
3316          */
3317         return 0;
3318 }
3319
3320 static const struct dev_pm_ops sh_eth_dev_pm_ops = {
3321         SET_SYSTEM_SLEEP_PM_OPS(sh_eth_suspend, sh_eth_resume)
3322         SET_RUNTIME_PM_OPS(sh_eth_runtime_nop, sh_eth_runtime_nop, NULL)
3323 };
3324 #define SH_ETH_PM_OPS (&sh_eth_dev_pm_ops)
3325 #else
3326 #define SH_ETH_PM_OPS NULL
3327 #endif
3328
3329 static struct platform_device_id sh_eth_id_table[] = {
3330         { "sh7619-ether", (kernel_ulong_t)&sh7619_data },
3331         { "sh771x-ether", (kernel_ulong_t)&sh771x_data },
3332         { "sh7724-ether", (kernel_ulong_t)&sh7724_data },
3333         { "sh7734-gether", (kernel_ulong_t)&sh7734_data },
3334         { "sh7757-ether", (kernel_ulong_t)&sh7757_data },
3335         { "sh7757-gether", (kernel_ulong_t)&sh7757_data_giga },
3336         { "sh7763-gether", (kernel_ulong_t)&sh7763_data },
3337         { "r7s72100-ether", (kernel_ulong_t)&r7s72100_data },
3338         { "r8a7740-gether", (kernel_ulong_t)&r8a7740_data },
3339         { "r8a777x-ether", (kernel_ulong_t)&r8a777x_data },
3340         { "r8a7790-ether", (kernel_ulong_t)&r8a779x_data },
3341         { "r8a7791-ether", (kernel_ulong_t)&r8a779x_data },
3342         { "r8a7793-ether", (kernel_ulong_t)&r8a779x_data },
3343         { "r8a7794-ether", (kernel_ulong_t)&r8a779x_data },
3344         { }
3345 };
3346 MODULE_DEVICE_TABLE(platform, sh_eth_id_table);
3347
3348 static struct platform_driver sh_eth_driver = {
3349         .probe = sh_eth_drv_probe,
3350         .remove = sh_eth_drv_remove,
3351         .id_table = sh_eth_id_table,
3352         .driver = {
3353                    .name = CARDNAME,
3354                    .pm = SH_ETH_PM_OPS,
3355                    .of_match_table = of_match_ptr(sh_eth_match_table),
3356         },
3357 };
3358
3359 module_platform_driver(sh_eth_driver);
3360
3361 MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda");
3362 MODULE_DESCRIPTION("Renesas SuperH Ethernet driver");
3363 MODULE_LICENSE("GPL v2");