OSDN Git Service

c368c3914bdaa8acdfc6ce6730a99b134b949ee8
[tomoyo/tomoyo-test1.git] / drivers / net / ethernet / xilinx / ll_temac_main.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Driver for Xilinx TEMAC Ethernet device
4  *
5  * Copyright (c) 2008 Nissin Systems Co., Ltd.,  Yoshio Kashiwagi
6  * Copyright (c) 2005-2008 DLA Systems,  David H. Lynch Jr. <dhlii@dlasys.net>
7  * Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
8  *
9  * This is a driver for the Xilinx ll_temac ipcore which is often used
10  * in the Virtex and Spartan series of chips.
11  *
12  * Notes:
13  * - The ll_temac hardware uses indirect access for many of the TEMAC
14  *   registers, include the MDIO bus.  However, indirect access to MDIO
15  *   registers take considerably more clock cycles than to TEMAC registers.
16  *   MDIO accesses are long, so threads doing them should probably sleep
17  *   rather than busywait.  However, since only one indirect access can be
18  *   in progress at any given time, that means that *all* indirect accesses
19  *   could end up sleeping (to wait for an MDIO access to complete).
20  *   Fortunately none of the indirect accesses are on the 'hot' path for tx
21  *   or rx, so this should be okay.
22  *
23  * TODO:
24  * - Factor out locallink DMA code into separate driver
25  * - Fix support for hardware checksumming.
26  * - Testing.  Lots and lots of testing.
27  *
28  */
29
30 #include <linux/delay.h>
31 #include <linux/etherdevice.h>
32 #include <linux/mii.h>
33 #include <linux/module.h>
34 #include <linux/mutex.h>
35 #include <linux/netdevice.h>
36 #include <linux/if_ether.h>
37 #include <linux/of.h>
38 #include <linux/of_device.h>
39 #include <linux/of_irq.h>
40 #include <linux/of_mdio.h>
41 #include <linux/of_net.h>
42 #include <linux/of_platform.h>
43 #include <linux/of_address.h>
44 #include <linux/skbuff.h>
45 #include <linux/spinlock.h>
46 #include <linux/tcp.h>      /* needed for sizeof(tcphdr) */
47 #include <linux/udp.h>      /* needed for sizeof(udphdr) */
48 #include <linux/phy.h>
49 #include <linux/in.h>
50 #include <linux/io.h>
51 #include <linux/ip.h>
52 #include <linux/slab.h>
53 #include <linux/interrupt.h>
54 #include <linux/dma-mapping.h>
55 #include <linux/processor.h>
56 #include <linux/platform_data/xilinx-ll-temac.h>
57
58 #include "ll_temac.h"
59
60 #define TX_BD_NUM   64
61 #define RX_BD_NUM   128
62
63 /* ---------------------------------------------------------------------
64  * Low level register access functions
65  */
66
67 static u32 _temac_ior_be(struct temac_local *lp, int offset)
68 {
69         return ioread32be(lp->regs + offset);
70 }
71
72 static void _temac_iow_be(struct temac_local *lp, int offset, u32 value)
73 {
74         return iowrite32be(value, lp->regs + offset);
75 }
76
77 static u32 _temac_ior_le(struct temac_local *lp, int offset)
78 {
79         return ioread32(lp->regs + offset);
80 }
81
82 static void _temac_iow_le(struct temac_local *lp, int offset, u32 value)
83 {
84         return iowrite32(value, lp->regs + offset);
85 }
86
87 static bool hard_acs_rdy(struct temac_local *lp)
88 {
89         return temac_ior(lp, XTE_RDY0_OFFSET) & XTE_RDY0_HARD_ACS_RDY_MASK;
90 }
91
92 static bool hard_acs_rdy_or_timeout(struct temac_local *lp, ktime_t timeout)
93 {
94         ktime_t cur = ktime_get();
95
96         return hard_acs_rdy(lp) || ktime_after(cur, timeout);
97 }
98
99 /* Poll for maximum 20 ms.  This is similar to the 2 jiffies @ 100 Hz
100  * that was used before, and should cover MDIO bus speed down to 3200
101  * Hz.
102  */
103 #define HARD_ACS_RDY_POLL_NS (20 * NSEC_PER_MSEC)
104
105 /**
106  * temac_indirect_busywait - Wait for current indirect register access
107  * to complete.
108  */
109 int temac_indirect_busywait(struct temac_local *lp)
110 {
111         ktime_t timeout = ktime_add_ns(ktime_get(), HARD_ACS_RDY_POLL_NS);
112
113         spin_until_cond(hard_acs_rdy_or_timeout(lp, timeout));
114         if (WARN_ON(!hard_acs_rdy(lp)))
115                 return -ETIMEDOUT;
116         else
117                 return 0;
118 }
119
120 /**
121  * temac_indirect_in32 - Indirect register read access.  This function
122  * must be called without lp->indirect_lock being held.
123  */
124 u32 temac_indirect_in32(struct temac_local *lp, int reg)
125 {
126         unsigned long flags;
127         int val;
128
129         spin_lock_irqsave(lp->indirect_lock, flags);
130         val = temac_indirect_in32_locked(lp, reg);
131         spin_unlock_irqrestore(lp->indirect_lock, flags);
132         return val;
133 }
134
135 /**
136  * temac_indirect_in32_locked - Indirect register read access.  This
137  * function must be called with lp->indirect_lock being held.  Use
138  * this together with spin_lock_irqsave/spin_lock_irqrestore to avoid
139  * repeated lock/unlock and to ensure uninterrupted access to indirect
140  * registers.
141  */
142 u32 temac_indirect_in32_locked(struct temac_local *lp, int reg)
143 {
144         /* This initial wait should normally not spin, as we always
145          * try to wait for indirect access to complete before
146          * releasing the indirect_lock.
147          */
148         if (WARN_ON(temac_indirect_busywait(lp)))
149                 return -ETIMEDOUT;
150         /* Initiate read from indirect register */
151         temac_iow(lp, XTE_CTL0_OFFSET, reg);
152         /* Wait for indirect register access to complete.  We really
153          * should not see timeouts, and could even end up causing
154          * problem for following indirect access, so let's make a bit
155          * of WARN noise.
156          */
157         if (WARN_ON(temac_indirect_busywait(lp)))
158                 return -ETIMEDOUT;
159         /* Value is ready now */
160         return temac_ior(lp, XTE_LSW0_OFFSET);
161 }
162
163 /**
164  * temac_indirect_out32 - Indirect register write access.  This function
165  * must be called without lp->indirect_lock being held.
166  */
167 void temac_indirect_out32(struct temac_local *lp, int reg, u32 value)
168 {
169         unsigned long flags;
170
171         spin_lock_irqsave(lp->indirect_lock, flags);
172         temac_indirect_out32_locked(lp, reg, value);
173         spin_unlock_irqrestore(lp->indirect_lock, flags);
174 }
175
176 /**
177  * temac_indirect_out32_locked - Indirect register write access.  This
178  * function must be called with lp->indirect_lock being held.  Use
179  * this together with spin_lock_irqsave/spin_lock_irqrestore to avoid
180  * repeated lock/unlock and to ensure uninterrupted access to indirect
181  * registers.
182  */
183 void temac_indirect_out32_locked(struct temac_local *lp, int reg, u32 value)
184 {
185         /* As in temac_indirect_in32_locked(), we should normally not
186          * spin here.  And if it happens, we actually end up silently
187          * ignoring the write request.  Ouch.
188          */
189         if (WARN_ON(temac_indirect_busywait(lp)))
190                 return;
191         /* Initiate write to indirect register */
192         temac_iow(lp, XTE_LSW0_OFFSET, value);
193         temac_iow(lp, XTE_CTL0_OFFSET, CNTLREG_WRITE_ENABLE_MASK | reg);
194         /* As in temac_indirect_in32_locked(), we should not see timeouts
195          * here.  And if it happens, we continue before the write has
196          * completed.  Not good.
197          */
198         WARN_ON(temac_indirect_busywait(lp));
199 }
200
201 /**
202  * temac_dma_in32_* - Memory mapped DMA read, these function expects a
203  * register input that is based on DCR word addresses which are then
204  * converted to memory mapped byte addresses.  To be assigned to
205  * lp->dma_in32.
206  */
207 static u32 temac_dma_in32_be(struct temac_local *lp, int reg)
208 {
209         return ioread32be(lp->sdma_regs + (reg << 2));
210 }
211
212 static u32 temac_dma_in32_le(struct temac_local *lp, int reg)
213 {
214         return ioread32(lp->sdma_regs + (reg << 2));
215 }
216
217 /**
218  * temac_dma_out32_* - Memory mapped DMA read, these function expects
219  * a register input that is based on DCR word addresses which are then
220  * converted to memory mapped byte addresses.  To be assigned to
221  * lp->dma_out32.
222  */
223 static void temac_dma_out32_be(struct temac_local *lp, int reg, u32 value)
224 {
225         iowrite32be(value, lp->sdma_regs + (reg << 2));
226 }
227
228 static void temac_dma_out32_le(struct temac_local *lp, int reg, u32 value)
229 {
230         iowrite32(value, lp->sdma_regs + (reg << 2));
231 }
232
233 /* DMA register access functions can be DCR based or memory mapped.
234  * The PowerPC 440 is DCR based, the PowerPC 405 and MicroBlaze are both
235  * memory mapped.
236  */
237 #ifdef CONFIG_PPC_DCR
238
239 /**
240  * temac_dma_dcr_in32 - DCR based DMA read
241  */
242 static u32 temac_dma_dcr_in(struct temac_local *lp, int reg)
243 {
244         return dcr_read(lp->sdma_dcrs, reg);
245 }
246
247 /**
248  * temac_dma_dcr_out32 - DCR based DMA write
249  */
250 static void temac_dma_dcr_out(struct temac_local *lp, int reg, u32 value)
251 {
252         dcr_write(lp->sdma_dcrs, reg, value);
253 }
254
255 /**
256  * temac_dcr_setup - If the DMA is DCR based, then setup the address and
257  * I/O  functions
258  */
259 static int temac_dcr_setup(struct temac_local *lp, struct platform_device *op,
260                                 struct device_node *np)
261 {
262         unsigned int dcrs;
263
264         /* setup the dcr address mapping if it's in the device tree */
265
266         dcrs = dcr_resource_start(np, 0);
267         if (dcrs != 0) {
268                 lp->sdma_dcrs = dcr_map(np, dcrs, dcr_resource_len(np, 0));
269                 lp->dma_in = temac_dma_dcr_in;
270                 lp->dma_out = temac_dma_dcr_out;
271                 dev_dbg(&op->dev, "DCR base: %x\n", dcrs);
272                 return 0;
273         }
274         /* no DCR in the device tree, indicate a failure */
275         return -1;
276 }
277
278 #else
279
280 /*
281  * temac_dcr_setup - This is a stub for when DCR is not supported,
282  * such as with MicroBlaze and x86
283  */
284 static int temac_dcr_setup(struct temac_local *lp, struct platform_device *op,
285                                 struct device_node *np)
286 {
287         return -1;
288 }
289
290 #endif
291
292 /**
293  * temac_dma_bd_release - Release buffer descriptor rings
294  */
295 static void temac_dma_bd_release(struct net_device *ndev)
296 {
297         struct temac_local *lp = netdev_priv(ndev);
298         int i;
299
300         /* Reset Local Link (DMA) */
301         lp->dma_out(lp, DMA_CONTROL_REG, DMA_CONTROL_RST);
302
303         for (i = 0; i < RX_BD_NUM; i++) {
304                 if (!lp->rx_skb[i])
305                         break;
306                 else {
307                         dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys,
308                                         XTE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE);
309                         dev_kfree_skb(lp->rx_skb[i]);
310                 }
311         }
312         if (lp->rx_bd_v)
313                 dma_free_coherent(ndev->dev.parent,
314                                 sizeof(*lp->rx_bd_v) * RX_BD_NUM,
315                                 lp->rx_bd_v, lp->rx_bd_p);
316         if (lp->tx_bd_v)
317                 dma_free_coherent(ndev->dev.parent,
318                                 sizeof(*lp->tx_bd_v) * TX_BD_NUM,
319                                 lp->tx_bd_v, lp->tx_bd_p);
320 }
321
322 /**
323  * temac_dma_bd_init - Setup buffer descriptor rings
324  */
325 static int temac_dma_bd_init(struct net_device *ndev)
326 {
327         struct temac_local *lp = netdev_priv(ndev);
328         struct sk_buff *skb;
329         dma_addr_t skb_dma_addr;
330         int i;
331
332         lp->rx_skb = devm_kcalloc(&ndev->dev, RX_BD_NUM, sizeof(*lp->rx_skb),
333                                   GFP_KERNEL);
334         if (!lp->rx_skb)
335                 goto out;
336
337         /* allocate the tx and rx ring buffer descriptors. */
338         /* returns a virtual address and a physical address. */
339         lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
340                                          sizeof(*lp->tx_bd_v) * TX_BD_NUM,
341                                          &lp->tx_bd_p, GFP_KERNEL);
342         if (!lp->tx_bd_v)
343                 goto out;
344
345         lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
346                                          sizeof(*lp->rx_bd_v) * RX_BD_NUM,
347                                          &lp->rx_bd_p, GFP_KERNEL);
348         if (!lp->rx_bd_v)
349                 goto out;
350
351         for (i = 0; i < TX_BD_NUM; i++) {
352                 lp->tx_bd_v[i].next = cpu_to_be32(lp->tx_bd_p
353                                 + sizeof(*lp->tx_bd_v) * ((i + 1) % TX_BD_NUM));
354         }
355
356         for (i = 0; i < RX_BD_NUM; i++) {
357                 lp->rx_bd_v[i].next = cpu_to_be32(lp->rx_bd_p
358                                 + sizeof(*lp->rx_bd_v) * ((i + 1) % RX_BD_NUM));
359
360                 skb = netdev_alloc_skb_ip_align(ndev,
361                                                 XTE_MAX_JUMBO_FRAME_SIZE);
362                 if (!skb)
363                         goto out;
364
365                 lp->rx_skb[i] = skb;
366                 /* returns physical address of skb->data */
367                 skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data,
368                                               XTE_MAX_JUMBO_FRAME_SIZE,
369                                               DMA_FROM_DEVICE);
370                 if (dma_mapping_error(ndev->dev.parent, skb_dma_addr))
371                         goto out;
372                 lp->rx_bd_v[i].phys = cpu_to_be32(skb_dma_addr);
373                 lp->rx_bd_v[i].len = cpu_to_be32(XTE_MAX_JUMBO_FRAME_SIZE);
374                 lp->rx_bd_v[i].app0 = cpu_to_be32(STS_CTRL_APP0_IRQONEND);
375         }
376
377         /* Configure DMA channel (irq setup) */
378         lp->dma_out(lp, TX_CHNL_CTRL, lp->tx_chnl_ctrl |
379                     0x00000400 | // Use 1 Bit Wide Counters. Currently Not Used!
380                     CHNL_CTRL_IRQ_EN | CHNL_CTRL_IRQ_ERR_EN |
381                     CHNL_CTRL_IRQ_DLY_EN | CHNL_CTRL_IRQ_COAL_EN);
382         lp->dma_out(lp, RX_CHNL_CTRL, lp->rx_chnl_ctrl |
383                     CHNL_CTRL_IRQ_IOE |
384                     CHNL_CTRL_IRQ_EN | CHNL_CTRL_IRQ_ERR_EN |
385                     CHNL_CTRL_IRQ_DLY_EN | CHNL_CTRL_IRQ_COAL_EN);
386
387         /* Init descriptor indexes */
388         lp->tx_bd_ci = 0;
389         lp->tx_bd_next = 0;
390         lp->tx_bd_tail = 0;
391         lp->rx_bd_ci = 0;
392
393         /* Enable RX DMA transfers */
394         wmb();
395         lp->dma_out(lp, RX_CURDESC_PTR,  lp->rx_bd_p);
396         lp->dma_out(lp, RX_TAILDESC_PTR,
397                        lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
398
399         /* Prepare for TX DMA transfer */
400         lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p);
401
402         return 0;
403
404 out:
405         temac_dma_bd_release(ndev);
406         return -ENOMEM;
407 }
408
409 /* ---------------------------------------------------------------------
410  * net_device_ops
411  */
412
413 static void temac_do_set_mac_address(struct net_device *ndev)
414 {
415         struct temac_local *lp = netdev_priv(ndev);
416         unsigned long flags;
417
418         /* set up unicast MAC address filter set its mac address */
419         spin_lock_irqsave(lp->indirect_lock, flags);
420         temac_indirect_out32_locked(lp, XTE_UAW0_OFFSET,
421                                     (ndev->dev_addr[0]) |
422                                     (ndev->dev_addr[1] << 8) |
423                                     (ndev->dev_addr[2] << 16) |
424                                     (ndev->dev_addr[3] << 24));
425         /* There are reserved bits in EUAW1
426          * so don't affect them Set MAC bits [47:32] in EUAW1 */
427         temac_indirect_out32_locked(lp, XTE_UAW1_OFFSET,
428                                     (ndev->dev_addr[4] & 0x000000ff) |
429                                     (ndev->dev_addr[5] << 8));
430         spin_unlock_irqrestore(lp->indirect_lock, flags);
431 }
432
433 static int temac_init_mac_address(struct net_device *ndev, const void *address)
434 {
435         ether_addr_copy(ndev->dev_addr, address);
436         if (!is_valid_ether_addr(ndev->dev_addr))
437                 eth_hw_addr_random(ndev);
438         temac_do_set_mac_address(ndev);
439         return 0;
440 }
441
442 static int temac_set_mac_address(struct net_device *ndev, void *p)
443 {
444         struct sockaddr *addr = p;
445
446         if (!is_valid_ether_addr(addr->sa_data))
447                 return -EADDRNOTAVAIL;
448         memcpy(ndev->dev_addr, addr->sa_data, ETH_ALEN);
449         temac_do_set_mac_address(ndev);
450         return 0;
451 }
452
453 static void temac_set_multicast_list(struct net_device *ndev)
454 {
455         struct temac_local *lp = netdev_priv(ndev);
456         u32 multi_addr_msw, multi_addr_lsw;
457         int i = 0;
458         unsigned long flags;
459         bool promisc_mode_disabled = false;
460
461         if (ndev->flags & (IFF_PROMISC | IFF_ALLMULTI) ||
462             (netdev_mc_count(ndev) > MULTICAST_CAM_TABLE_NUM)) {
463                 temac_indirect_out32(lp, XTE_AFM_OFFSET, XTE_AFM_EPPRM_MASK);
464                 dev_info(&ndev->dev, "Promiscuous mode enabled.\n");
465                 return;
466         }
467
468         spin_lock_irqsave(lp->indirect_lock, flags);
469
470         if (!netdev_mc_empty(ndev)) {
471                 struct netdev_hw_addr *ha;
472
473                 netdev_for_each_mc_addr(ha, ndev) {
474                         if (WARN_ON(i >= MULTICAST_CAM_TABLE_NUM))
475                                 break;
476                         multi_addr_msw = ((ha->addr[3] << 24) |
477                                           (ha->addr[2] << 16) |
478                                           (ha->addr[1] << 8) |
479                                           (ha->addr[0]));
480                         temac_indirect_out32_locked(lp, XTE_MAW0_OFFSET,
481                                                     multi_addr_msw);
482                         multi_addr_lsw = ((ha->addr[5] << 8) |
483                                           (ha->addr[4]) | (i << 16));
484                         temac_indirect_out32_locked(lp, XTE_MAW1_OFFSET,
485                                                     multi_addr_lsw);
486                         i++;
487                 }
488         }
489
490         /* Clear all or remaining/unused address table entries */
491         while (i < MULTICAST_CAM_TABLE_NUM) {
492                 temac_indirect_out32_locked(lp, XTE_MAW0_OFFSET, 0);
493                 temac_indirect_out32_locked(lp, XTE_MAW1_OFFSET, i << 16);
494                 i++;
495         }
496
497         /* Enable address filter block if currently disabled */
498         if (temac_indirect_in32_locked(lp, XTE_AFM_OFFSET)
499             & XTE_AFM_EPPRM_MASK) {
500                 temac_indirect_out32_locked(lp, XTE_AFM_OFFSET, 0);
501                 promisc_mode_disabled = true;
502         }
503
504         spin_unlock_irqrestore(lp->indirect_lock, flags);
505
506         if (promisc_mode_disabled)
507                 dev_info(&ndev->dev, "Promiscuous mode disabled.\n");
508 }
509
510 static struct temac_option {
511         int flg;
512         u32 opt;
513         u32 reg;
514         u32 m_or;
515         u32 m_and;
516 } temac_options[] = {
517         /* Turn on jumbo packet support for both Rx and Tx */
518         {
519                 .opt = XTE_OPTION_JUMBO,
520                 .reg = XTE_TXC_OFFSET,
521                 .m_or = XTE_TXC_TXJMBO_MASK,
522         },
523         {
524                 .opt = XTE_OPTION_JUMBO,
525                 .reg = XTE_RXC1_OFFSET,
526                 .m_or =XTE_RXC1_RXJMBO_MASK,
527         },
528         /* Turn on VLAN packet support for both Rx and Tx */
529         {
530                 .opt = XTE_OPTION_VLAN,
531                 .reg = XTE_TXC_OFFSET,
532                 .m_or =XTE_TXC_TXVLAN_MASK,
533         },
534         {
535                 .opt = XTE_OPTION_VLAN,
536                 .reg = XTE_RXC1_OFFSET,
537                 .m_or =XTE_RXC1_RXVLAN_MASK,
538         },
539         /* Turn on FCS stripping on receive packets */
540         {
541                 .opt = XTE_OPTION_FCS_STRIP,
542                 .reg = XTE_RXC1_OFFSET,
543                 .m_or =XTE_RXC1_RXFCS_MASK,
544         },
545         /* Turn on FCS insertion on transmit packets */
546         {
547                 .opt = XTE_OPTION_FCS_INSERT,
548                 .reg = XTE_TXC_OFFSET,
549                 .m_or =XTE_TXC_TXFCS_MASK,
550         },
551         /* Turn on length/type field checking on receive packets */
552         {
553                 .opt = XTE_OPTION_LENTYPE_ERR,
554                 .reg = XTE_RXC1_OFFSET,
555                 .m_or =XTE_RXC1_RXLT_MASK,
556         },
557         /* Turn on flow control */
558         {
559                 .opt = XTE_OPTION_FLOW_CONTROL,
560                 .reg = XTE_FCC_OFFSET,
561                 .m_or =XTE_FCC_RXFLO_MASK,
562         },
563         /* Turn on flow control */
564         {
565                 .opt = XTE_OPTION_FLOW_CONTROL,
566                 .reg = XTE_FCC_OFFSET,
567                 .m_or =XTE_FCC_TXFLO_MASK,
568         },
569         /* Turn on promiscuous frame filtering (all frames are received ) */
570         {
571                 .opt = XTE_OPTION_PROMISC,
572                 .reg = XTE_AFM_OFFSET,
573                 .m_or =XTE_AFM_EPPRM_MASK,
574         },
575         /* Enable transmitter if not already enabled */
576         {
577                 .opt = XTE_OPTION_TXEN,
578                 .reg = XTE_TXC_OFFSET,
579                 .m_or =XTE_TXC_TXEN_MASK,
580         },
581         /* Enable receiver? */
582         {
583                 .opt = XTE_OPTION_RXEN,
584                 .reg = XTE_RXC1_OFFSET,
585                 .m_or =XTE_RXC1_RXEN_MASK,
586         },
587         {}
588 };
589
590 /**
591  * temac_setoptions
592  */
593 static u32 temac_setoptions(struct net_device *ndev, u32 options)
594 {
595         struct temac_local *lp = netdev_priv(ndev);
596         struct temac_option *tp = &temac_options[0];
597         int reg;
598         unsigned long flags;
599
600         spin_lock_irqsave(lp->indirect_lock, flags);
601         while (tp->opt) {
602                 reg = temac_indirect_in32_locked(lp, tp->reg) & ~tp->m_or;
603                 if (options & tp->opt) {
604                         reg |= tp->m_or;
605                         temac_indirect_out32_locked(lp, tp->reg, reg);
606                 }
607                 tp++;
608         }
609         spin_unlock_irqrestore(lp->indirect_lock, flags);
610         lp->options |= options;
611
612         return 0;
613 }
614
615 /* Initialize temac */
616 static void temac_device_reset(struct net_device *ndev)
617 {
618         struct temac_local *lp = netdev_priv(ndev);
619         u32 timeout;
620         u32 val;
621         unsigned long flags;
622
623         /* Perform a software reset */
624
625         /* 0x300 host enable bit ? */
626         /* reset PHY through control register ?:1 */
627
628         dev_dbg(&ndev->dev, "%s()\n", __func__);
629
630         /* Reset the receiver and wait for it to finish reset */
631         temac_indirect_out32(lp, XTE_RXC1_OFFSET, XTE_RXC1_RXRST_MASK);
632         timeout = 1000;
633         while (temac_indirect_in32(lp, XTE_RXC1_OFFSET) & XTE_RXC1_RXRST_MASK) {
634                 udelay(1);
635                 if (--timeout == 0) {
636                         dev_err(&ndev->dev,
637                                 "temac_device_reset RX reset timeout!!\n");
638                         break;
639                 }
640         }
641
642         /* Reset the transmitter and wait for it to finish reset */
643         temac_indirect_out32(lp, XTE_TXC_OFFSET, XTE_TXC_TXRST_MASK);
644         timeout = 1000;
645         while (temac_indirect_in32(lp, XTE_TXC_OFFSET) & XTE_TXC_TXRST_MASK) {
646                 udelay(1);
647                 if (--timeout == 0) {
648                         dev_err(&ndev->dev,
649                                 "temac_device_reset TX reset timeout!!\n");
650                         break;
651                 }
652         }
653
654         /* Disable the receiver */
655         spin_lock_irqsave(lp->indirect_lock, flags);
656         val = temac_indirect_in32_locked(lp, XTE_RXC1_OFFSET);
657         temac_indirect_out32_locked(lp, XTE_RXC1_OFFSET,
658                                     val & ~XTE_RXC1_RXEN_MASK);
659         spin_unlock_irqrestore(lp->indirect_lock, flags);
660
661         /* Reset Local Link (DMA) */
662         lp->dma_out(lp, DMA_CONTROL_REG, DMA_CONTROL_RST);
663         timeout = 1000;
664         while (lp->dma_in(lp, DMA_CONTROL_REG) & DMA_CONTROL_RST) {
665                 udelay(1);
666                 if (--timeout == 0) {
667                         dev_err(&ndev->dev,
668                                 "temac_device_reset DMA reset timeout!!\n");
669                         break;
670                 }
671         }
672         lp->dma_out(lp, DMA_CONTROL_REG, DMA_TAIL_ENABLE);
673
674         if (temac_dma_bd_init(ndev)) {
675                 dev_err(&ndev->dev,
676                                 "temac_device_reset descriptor allocation failed\n");
677         }
678
679         spin_lock_irqsave(lp->indirect_lock, flags);
680         temac_indirect_out32_locked(lp, XTE_RXC0_OFFSET, 0);
681         temac_indirect_out32_locked(lp, XTE_RXC1_OFFSET, 0);
682         temac_indirect_out32_locked(lp, XTE_TXC_OFFSET, 0);
683         temac_indirect_out32_locked(lp, XTE_FCC_OFFSET, XTE_FCC_RXFLO_MASK);
684         spin_unlock_irqrestore(lp->indirect_lock, flags);
685
686         /* Sync default options with HW
687          * but leave receiver and transmitter disabled.  */
688         temac_setoptions(ndev,
689                          lp->options & ~(XTE_OPTION_TXEN | XTE_OPTION_RXEN));
690
691         temac_do_set_mac_address(ndev);
692
693         /* Set address filter table */
694         temac_set_multicast_list(ndev);
695         if (temac_setoptions(ndev, lp->options))
696                 dev_err(&ndev->dev, "Error setting TEMAC options\n");
697
698         /* Init Driver variable */
699         netif_trans_update(ndev); /* prevent tx timeout */
700 }
701
702 static void temac_adjust_link(struct net_device *ndev)
703 {
704         struct temac_local *lp = netdev_priv(ndev);
705         struct phy_device *phy = ndev->phydev;
706         u32 mii_speed;
707         int link_state;
708         unsigned long flags;
709
710         /* hash together the state values to decide if something has changed */
711         link_state = phy->speed | (phy->duplex << 1) | phy->link;
712
713         if (lp->last_link != link_state) {
714                 spin_lock_irqsave(lp->indirect_lock, flags);
715                 mii_speed = temac_indirect_in32_locked(lp, XTE_EMCFG_OFFSET);
716                 mii_speed &= ~XTE_EMCFG_LINKSPD_MASK;
717
718                 switch (phy->speed) {
719                 case SPEED_1000: mii_speed |= XTE_EMCFG_LINKSPD_1000; break;
720                 case SPEED_100: mii_speed |= XTE_EMCFG_LINKSPD_100; break;
721                 case SPEED_10: mii_speed |= XTE_EMCFG_LINKSPD_10; break;
722                 }
723
724                 /* Write new speed setting out to TEMAC */
725                 temac_indirect_out32_locked(lp, XTE_EMCFG_OFFSET, mii_speed);
726                 spin_unlock_irqrestore(lp->indirect_lock, flags);
727
728                 lp->last_link = link_state;
729                 phy_print_status(phy);
730         }
731 }
732
733 #ifdef CONFIG_64BIT
734
735 static void ptr_to_txbd(void *p, struct cdmac_bd *bd)
736 {
737         bd->app3 = (u32)(((u64)p) >> 32);
738         bd->app4 = (u32)((u64)p & 0xFFFFFFFF);
739 }
740
741 static void *ptr_from_txbd(struct cdmac_bd *bd)
742 {
743         return (void *)(((u64)(bd->app3) << 32) | bd->app4);
744 }
745
746 #else
747
748 static void ptr_to_txbd(void *p, struct cdmac_bd *bd)
749 {
750         bd->app4 = (u32)p;
751 }
752
753 static void *ptr_from_txbd(struct cdmac_bd *bd)
754 {
755         return (void *)(bd->app4);
756 }
757
758 #endif
759
760 static void temac_start_xmit_done(struct net_device *ndev)
761 {
762         struct temac_local *lp = netdev_priv(ndev);
763         struct cdmac_bd *cur_p;
764         unsigned int stat = 0;
765         struct sk_buff *skb;
766
767         cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
768         stat = be32_to_cpu(cur_p->app0);
769
770         while (stat & STS_CTRL_APP0_CMPLT) {
771                 dma_unmap_single(ndev->dev.parent, be32_to_cpu(cur_p->phys),
772                                  be32_to_cpu(cur_p->len), DMA_TO_DEVICE);
773                 skb = (struct sk_buff *)ptr_from_txbd(cur_p);
774                 if (skb)
775                         dev_consume_skb_irq(skb);
776                 cur_p->app0 = 0;
777                 cur_p->app1 = 0;
778                 cur_p->app2 = 0;
779                 cur_p->app3 = 0;
780                 cur_p->app4 = 0;
781
782                 ndev->stats.tx_packets++;
783                 ndev->stats.tx_bytes += be32_to_cpu(cur_p->len);
784
785                 lp->tx_bd_ci++;
786                 if (lp->tx_bd_ci >= TX_BD_NUM)
787                         lp->tx_bd_ci = 0;
788
789                 cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
790                 stat = be32_to_cpu(cur_p->app0);
791         }
792
793         /* Matches barrier in temac_start_xmit */
794         smp_mb();
795
796         netif_wake_queue(ndev);
797 }
798
799 static inline int temac_check_tx_bd_space(struct temac_local *lp, int num_frag)
800 {
801         struct cdmac_bd *cur_p;
802         int tail;
803
804         tail = lp->tx_bd_tail;
805         cur_p = &lp->tx_bd_v[tail];
806
807         do {
808                 if (cur_p->app0)
809                         return NETDEV_TX_BUSY;
810
811                 tail++;
812                 if (tail >= TX_BD_NUM)
813                         tail = 0;
814
815                 cur_p = &lp->tx_bd_v[tail];
816                 num_frag--;
817         } while (num_frag >= 0);
818
819         return 0;
820 }
821
822 static netdev_tx_t
823 temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
824 {
825         struct temac_local *lp = netdev_priv(ndev);
826         struct cdmac_bd *cur_p;
827         dma_addr_t start_p, tail_p, skb_dma_addr;
828         int ii;
829         unsigned long num_frag;
830         skb_frag_t *frag;
831
832         num_frag = skb_shinfo(skb)->nr_frags;
833         frag = &skb_shinfo(skb)->frags[0];
834         start_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
835         cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
836
837         if (temac_check_tx_bd_space(lp, num_frag + 1)) {
838                 if (netif_queue_stopped(ndev))
839                         return NETDEV_TX_BUSY;
840
841                 netif_stop_queue(ndev);
842
843                 /* Matches barrier in temac_start_xmit_done */
844                 smp_mb();
845
846                 /* Space might have just been freed - check again */
847                 if (temac_check_tx_bd_space(lp, num_frag))
848                         return NETDEV_TX_BUSY;
849
850                 netif_wake_queue(ndev);
851         }
852
853         cur_p->app0 = 0;
854         if (skb->ip_summed == CHECKSUM_PARTIAL) {
855                 unsigned int csum_start_off = skb_checksum_start_offset(skb);
856                 unsigned int csum_index_off = csum_start_off + skb->csum_offset;
857
858                 cur_p->app0 |= cpu_to_be32(0x000001); /* TX Checksum Enabled */
859                 cur_p->app1 = cpu_to_be32((csum_start_off << 16)
860                                           | csum_index_off);
861                 cur_p->app2 = 0;  /* initial checksum seed */
862         }
863
864         cur_p->app0 |= cpu_to_be32(STS_CTRL_APP0_SOP);
865         skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data,
866                                       skb_headlen(skb), DMA_TO_DEVICE);
867         cur_p->len = cpu_to_be32(skb_headlen(skb));
868         if (WARN_ON_ONCE(dma_mapping_error(ndev->dev.parent, skb_dma_addr)))
869                 return NETDEV_TX_BUSY;
870         cur_p->phys = cpu_to_be32(skb_dma_addr);
871         ptr_to_txbd((void *)skb, cur_p);
872
873         for (ii = 0; ii < num_frag; ii++) {
874                 if (++lp->tx_bd_tail >= TX_BD_NUM)
875                         lp->tx_bd_tail = 0;
876
877                 cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
878                 skb_dma_addr = dma_map_single(ndev->dev.parent,
879                                               skb_frag_address(frag),
880                                               skb_frag_size(frag),
881                                               DMA_TO_DEVICE);
882                 if (dma_mapping_error(ndev->dev.parent, skb_dma_addr)) {
883                         if (--lp->tx_bd_tail < 0)
884                                 lp->tx_bd_tail = TX_BD_NUM - 1;
885                         cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
886                         while (--ii >= 0) {
887                                 --frag;
888                                 dma_unmap_single(ndev->dev.parent,
889                                                  be32_to_cpu(cur_p->phys),
890                                                  skb_frag_size(frag),
891                                                  DMA_TO_DEVICE);
892                                 if (--lp->tx_bd_tail < 0)
893                                         lp->tx_bd_tail = TX_BD_NUM - 1;
894                                 cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
895                         }
896                         dma_unmap_single(ndev->dev.parent,
897                                          be32_to_cpu(cur_p->phys),
898                                          skb_headlen(skb), DMA_TO_DEVICE);
899                         return NETDEV_TX_BUSY;
900                 }
901                 cur_p->phys = cpu_to_be32(skb_dma_addr);
902                 cur_p->len = cpu_to_be32(skb_frag_size(frag));
903                 cur_p->app0 = 0;
904                 frag++;
905         }
906         cur_p->app0 |= cpu_to_be32(STS_CTRL_APP0_EOP);
907
908         tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
909         lp->tx_bd_tail++;
910         if (lp->tx_bd_tail >= TX_BD_NUM)
911                 lp->tx_bd_tail = 0;
912
913         skb_tx_timestamp(skb);
914
915         /* Kick off the transfer */
916         wmb();
917         lp->dma_out(lp, TX_TAILDESC_PTR, tail_p); /* DMA start */
918
919         return NETDEV_TX_OK;
920 }
921
922
923 static void ll_temac_recv(struct net_device *ndev)
924 {
925         struct temac_local *lp = netdev_priv(ndev);
926         struct sk_buff *skb, *new_skb;
927         unsigned int bdstat;
928         struct cdmac_bd *cur_p;
929         dma_addr_t tail_p, skb_dma_addr;
930         int length;
931         unsigned long flags;
932
933         spin_lock_irqsave(&lp->rx_lock, flags);
934
935         tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
936         cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
937
938         bdstat = be32_to_cpu(cur_p->app0);
939         while ((bdstat & STS_CTRL_APP0_CMPLT)) {
940
941                 skb = lp->rx_skb[lp->rx_bd_ci];
942                 length = be32_to_cpu(cur_p->app4) & 0x3FFF;
943
944                 dma_unmap_single(ndev->dev.parent, be32_to_cpu(cur_p->phys),
945                                  XTE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE);
946
947                 skb_put(skb, length);
948                 skb->protocol = eth_type_trans(skb, ndev);
949                 skb_checksum_none_assert(skb);
950
951                 /* if we're doing rx csum offload, set it up */
952                 if (((lp->temac_features & TEMAC_FEATURE_RX_CSUM) != 0) &&
953                     (skb->protocol == htons(ETH_P_IP)) &&
954                     (skb->len > 64)) {
955
956                         /* Convert from device endianness (be32) to cpu
957                          * endiannes, and if necessary swap the bytes
958                          * (back) for proper IP checksum byte order
959                          * (be16).
960                          */
961                         skb->csum = htons(be32_to_cpu(cur_p->app3) & 0xFFFF);
962                         skb->ip_summed = CHECKSUM_COMPLETE;
963                 }
964
965                 if (!skb_defer_rx_timestamp(skb))
966                         netif_rx(skb);
967
968                 ndev->stats.rx_packets++;
969                 ndev->stats.rx_bytes += length;
970
971                 new_skb = netdev_alloc_skb_ip_align(ndev,
972                                                 XTE_MAX_JUMBO_FRAME_SIZE);
973                 if (!new_skb) {
974                         spin_unlock_irqrestore(&lp->rx_lock, flags);
975                         return;
976                 }
977
978                 cur_p->app0 = cpu_to_be32(STS_CTRL_APP0_IRQONEND);
979                 skb_dma_addr = dma_map_single(ndev->dev.parent, new_skb->data,
980                                               XTE_MAX_JUMBO_FRAME_SIZE,
981                                               DMA_FROM_DEVICE);
982                 cur_p->phys = cpu_to_be32(skb_dma_addr);
983                 cur_p->len = cpu_to_be32(XTE_MAX_JUMBO_FRAME_SIZE);
984                 lp->rx_skb[lp->rx_bd_ci] = new_skb;
985
986                 lp->rx_bd_ci++;
987                 if (lp->rx_bd_ci >= RX_BD_NUM)
988                         lp->rx_bd_ci = 0;
989
990                 cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
991                 bdstat = be32_to_cpu(cur_p->app0);
992         }
993         lp->dma_out(lp, RX_TAILDESC_PTR, tail_p);
994
995         spin_unlock_irqrestore(&lp->rx_lock, flags);
996 }
997
998 static irqreturn_t ll_temac_tx_irq(int irq, void *_ndev)
999 {
1000         struct net_device *ndev = _ndev;
1001         struct temac_local *lp = netdev_priv(ndev);
1002         unsigned int status;
1003
1004         status = lp->dma_in(lp, TX_IRQ_REG);
1005         lp->dma_out(lp, TX_IRQ_REG, status);
1006
1007         if (status & (IRQ_COAL | IRQ_DLY))
1008                 temac_start_xmit_done(lp->ndev);
1009         if (status & (IRQ_ERR | IRQ_DMAERR))
1010                 dev_err_ratelimited(&ndev->dev,
1011                                     "TX error 0x%x TX_CHNL_STS=0x%08x\n",
1012                                     status, lp->dma_in(lp, TX_CHNL_STS));
1013
1014         return IRQ_HANDLED;
1015 }
1016
1017 static irqreturn_t ll_temac_rx_irq(int irq, void *_ndev)
1018 {
1019         struct net_device *ndev = _ndev;
1020         struct temac_local *lp = netdev_priv(ndev);
1021         unsigned int status;
1022
1023         /* Read and clear the status registers */
1024         status = lp->dma_in(lp, RX_IRQ_REG);
1025         lp->dma_out(lp, RX_IRQ_REG, status);
1026
1027         if (status & (IRQ_COAL | IRQ_DLY))
1028                 ll_temac_recv(lp->ndev);
1029         if (status & (IRQ_ERR | IRQ_DMAERR))
1030                 dev_err_ratelimited(&ndev->dev,
1031                                     "RX error 0x%x RX_CHNL_STS=0x%08x\n",
1032                                     status, lp->dma_in(lp, RX_CHNL_STS));
1033
1034         return IRQ_HANDLED;
1035 }
1036
1037 static int temac_open(struct net_device *ndev)
1038 {
1039         struct temac_local *lp = netdev_priv(ndev);
1040         struct phy_device *phydev = NULL;
1041         int rc;
1042
1043         dev_dbg(&ndev->dev, "temac_open()\n");
1044
1045         if (lp->phy_node) {
1046                 phydev = of_phy_connect(lp->ndev, lp->phy_node,
1047                                         temac_adjust_link, 0, 0);
1048                 if (!phydev) {
1049                         dev_err(lp->dev, "of_phy_connect() failed\n");
1050                         return -ENODEV;
1051                 }
1052                 phy_start(phydev);
1053         } else if (strlen(lp->phy_name) > 0) {
1054                 phydev = phy_connect(lp->ndev, lp->phy_name, temac_adjust_link,
1055                                      lp->phy_interface);
1056                 if (IS_ERR(phydev)) {
1057                         dev_err(lp->dev, "phy_connect() failed\n");
1058                         return PTR_ERR(phydev);
1059                 }
1060                 phy_start(phydev);
1061         }
1062
1063         temac_device_reset(ndev);
1064
1065         rc = request_irq(lp->tx_irq, ll_temac_tx_irq, 0, ndev->name, ndev);
1066         if (rc)
1067                 goto err_tx_irq;
1068         rc = request_irq(lp->rx_irq, ll_temac_rx_irq, 0, ndev->name, ndev);
1069         if (rc)
1070                 goto err_rx_irq;
1071
1072         return 0;
1073
1074  err_rx_irq:
1075         free_irq(lp->tx_irq, ndev);
1076  err_tx_irq:
1077         if (phydev)
1078                 phy_disconnect(phydev);
1079         dev_err(lp->dev, "request_irq() failed\n");
1080         return rc;
1081 }
1082
1083 static int temac_stop(struct net_device *ndev)
1084 {
1085         struct temac_local *lp = netdev_priv(ndev);
1086         struct phy_device *phydev = ndev->phydev;
1087
1088         dev_dbg(&ndev->dev, "temac_close()\n");
1089
1090         free_irq(lp->tx_irq, ndev);
1091         free_irq(lp->rx_irq, ndev);
1092
1093         if (phydev)
1094                 phy_disconnect(phydev);
1095
1096         temac_dma_bd_release(ndev);
1097
1098         return 0;
1099 }
1100
1101 #ifdef CONFIG_NET_POLL_CONTROLLER
1102 static void
1103 temac_poll_controller(struct net_device *ndev)
1104 {
1105         struct temac_local *lp = netdev_priv(ndev);
1106
1107         disable_irq(lp->tx_irq);
1108         disable_irq(lp->rx_irq);
1109
1110         ll_temac_rx_irq(lp->tx_irq, ndev);
1111         ll_temac_tx_irq(lp->rx_irq, ndev);
1112
1113         enable_irq(lp->tx_irq);
1114         enable_irq(lp->rx_irq);
1115 }
1116 #endif
1117
1118 static const struct net_device_ops temac_netdev_ops = {
1119         .ndo_open = temac_open,
1120         .ndo_stop = temac_stop,
1121         .ndo_start_xmit = temac_start_xmit,
1122         .ndo_set_rx_mode = temac_set_multicast_list,
1123         .ndo_set_mac_address = temac_set_mac_address,
1124         .ndo_validate_addr = eth_validate_addr,
1125         .ndo_do_ioctl = phy_do_ioctl_running,
1126 #ifdef CONFIG_NET_POLL_CONTROLLER
1127         .ndo_poll_controller = temac_poll_controller,
1128 #endif
1129 };
1130
1131 /* ---------------------------------------------------------------------
1132  * SYSFS device attributes
1133  */
1134 static ssize_t temac_show_llink_regs(struct device *dev,
1135                                      struct device_attribute *attr, char *buf)
1136 {
1137         struct net_device *ndev = dev_get_drvdata(dev);
1138         struct temac_local *lp = netdev_priv(ndev);
1139         int i, len = 0;
1140
1141         for (i = 0; i < 0x11; i++)
1142                 len += sprintf(buf + len, "%.8x%s", lp->dma_in(lp, i),
1143                                (i % 8) == 7 ? "\n" : " ");
1144         len += sprintf(buf + len, "\n");
1145
1146         return len;
1147 }
1148
1149 static DEVICE_ATTR(llink_regs, 0440, temac_show_llink_regs, NULL);
1150
1151 static struct attribute *temac_device_attrs[] = {
1152         &dev_attr_llink_regs.attr,
1153         NULL,
1154 };
1155
1156 static const struct attribute_group temac_attr_group = {
1157         .attrs = temac_device_attrs,
1158 };
1159
1160 /* ethtool support */
1161 static const struct ethtool_ops temac_ethtool_ops = {
1162         .nway_reset = phy_ethtool_nway_reset,
1163         .get_link = ethtool_op_get_link,
1164         .get_ts_info = ethtool_op_get_ts_info,
1165         .get_link_ksettings = phy_ethtool_get_link_ksettings,
1166         .set_link_ksettings = phy_ethtool_set_link_ksettings,
1167 };
1168
1169 static int temac_probe(struct platform_device *pdev)
1170 {
1171         struct ll_temac_platform_data *pdata = dev_get_platdata(&pdev->dev);
1172         struct device_node *temac_np = dev_of_node(&pdev->dev), *dma_np;
1173         struct temac_local *lp;
1174         struct net_device *ndev;
1175         struct resource *res;
1176         const void *addr;
1177         __be32 *p;
1178         bool little_endian;
1179         int rc = 0;
1180
1181         /* Init network device structure */
1182         ndev = devm_alloc_etherdev(&pdev->dev, sizeof(*lp));
1183         if (!ndev)
1184                 return -ENOMEM;
1185
1186         platform_set_drvdata(pdev, ndev);
1187         SET_NETDEV_DEV(ndev, &pdev->dev);
1188         ndev->features = NETIF_F_SG;
1189         ndev->netdev_ops = &temac_netdev_ops;
1190         ndev->ethtool_ops = &temac_ethtool_ops;
1191 #if 0
1192         ndev->features |= NETIF_F_IP_CSUM; /* Can checksum TCP/UDP over IPv4. */
1193         ndev->features |= NETIF_F_HW_CSUM; /* Can checksum all the packets. */
1194         ndev->features |= NETIF_F_IPV6_CSUM; /* Can checksum IPV6 TCP/UDP */
1195         ndev->features |= NETIF_F_HIGHDMA; /* Can DMA to high memory. */
1196         ndev->features |= NETIF_F_HW_VLAN_CTAG_TX; /* Transmit VLAN hw accel */
1197         ndev->features |= NETIF_F_HW_VLAN_CTAG_RX; /* Receive VLAN hw acceleration */
1198         ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; /* Receive VLAN filtering */
1199         ndev->features |= NETIF_F_VLAN_CHALLENGED; /* cannot handle VLAN pkts */
1200         ndev->features |= NETIF_F_GSO; /* Enable software GSO. */
1201         ndev->features |= NETIF_F_MULTI_QUEUE; /* Has multiple TX/RX queues */
1202         ndev->features |= NETIF_F_LRO; /* large receive offload */
1203 #endif
1204
1205         /* setup temac private info structure */
1206         lp = netdev_priv(ndev);
1207         lp->ndev = ndev;
1208         lp->dev = &pdev->dev;
1209         lp->options = XTE_OPTION_DEFAULTS;
1210         spin_lock_init(&lp->rx_lock);
1211
1212         /* Setup mutex for synchronization of indirect register access */
1213         if (pdata) {
1214                 if (!pdata->indirect_lock) {
1215                         dev_err(&pdev->dev,
1216                                 "indirect_lock missing in platform_data\n");
1217                         return -EINVAL;
1218                 }
1219                 lp->indirect_lock = pdata->indirect_lock;
1220         } else {
1221                 lp->indirect_lock = devm_kmalloc(&pdev->dev,
1222                                                  sizeof(*lp->indirect_lock),
1223                                                  GFP_KERNEL);
1224                 spin_lock_init(lp->indirect_lock);
1225         }
1226
1227         /* map device registers */
1228         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1229         lp->regs = devm_ioremap(&pdev->dev, res->start,
1230                                         resource_size(res));
1231         if (IS_ERR(lp->regs)) {
1232                 dev_err(&pdev->dev, "could not map TEMAC registers\n");
1233                 return PTR_ERR(lp->regs);
1234         }
1235
1236         /* Select register access functions with the specified
1237          * endianness mode.  Default for OF devices is big-endian.
1238          */
1239         little_endian = false;
1240         if (temac_np) {
1241                 if (of_get_property(temac_np, "little-endian", NULL))
1242                         little_endian = true;
1243         } else if (pdata) {
1244                 little_endian = pdata->reg_little_endian;
1245         }
1246         if (little_endian) {
1247                 lp->temac_ior = _temac_ior_le;
1248                 lp->temac_iow = _temac_iow_le;
1249         } else {
1250                 lp->temac_ior = _temac_ior_be;
1251                 lp->temac_iow = _temac_iow_be;
1252         }
1253
1254         /* Setup checksum offload, but default to off if not specified */
1255         lp->temac_features = 0;
1256         if (temac_np) {
1257                 p = (__be32 *)of_get_property(temac_np, "xlnx,txcsum", NULL);
1258                 if (p && be32_to_cpu(*p))
1259                         lp->temac_features |= TEMAC_FEATURE_TX_CSUM;
1260                 p = (__be32 *)of_get_property(temac_np, "xlnx,rxcsum", NULL);
1261                 if (p && be32_to_cpu(*p))
1262                         lp->temac_features |= TEMAC_FEATURE_RX_CSUM;
1263         } else if (pdata) {
1264                 if (pdata->txcsum)
1265                         lp->temac_features |= TEMAC_FEATURE_TX_CSUM;
1266                 if (pdata->rxcsum)
1267                         lp->temac_features |= TEMAC_FEATURE_RX_CSUM;
1268         }
1269         if (lp->temac_features & TEMAC_FEATURE_TX_CSUM)
1270                 /* Can checksum TCP/UDP over IPv4. */
1271                 ndev->features |= NETIF_F_IP_CSUM;
1272
1273         /* Setup LocalLink DMA */
1274         if (temac_np) {
1275                 /* Find the DMA node, map the DMA registers, and
1276                  * decode the DMA IRQs.
1277                  */
1278                 dma_np = of_parse_phandle(temac_np, "llink-connected", 0);
1279                 if (!dma_np) {
1280                         dev_err(&pdev->dev, "could not find DMA node\n");
1281                         return -ENODEV;
1282                 }
1283
1284                 /* Setup the DMA register accesses, could be DCR or
1285                  * memory mapped.
1286                  */
1287                 if (temac_dcr_setup(lp, pdev, dma_np)) {
1288                         /* no DCR in the device tree, try non-DCR */
1289                         lp->sdma_regs = devm_of_iomap(&pdev->dev, dma_np, 0,
1290                                                       NULL);
1291                         if (IS_ERR(lp->sdma_regs)) {
1292                                 dev_err(&pdev->dev,
1293                                         "unable to map DMA registers\n");
1294                                 of_node_put(dma_np);
1295                                 return PTR_ERR(lp->sdma_regs);
1296                         }
1297                         if (of_get_property(dma_np, "little-endian", NULL)) {
1298                                 lp->dma_in = temac_dma_in32_le;
1299                                 lp->dma_out = temac_dma_out32_le;
1300                         } else {
1301                                 lp->dma_in = temac_dma_in32_be;
1302                                 lp->dma_out = temac_dma_out32_be;
1303                         }
1304                         dev_dbg(&pdev->dev, "MEM base: %p\n", lp->sdma_regs);
1305                 }
1306
1307                 /* Get DMA RX and TX interrupts */
1308                 lp->rx_irq = irq_of_parse_and_map(dma_np, 0);
1309                 lp->tx_irq = irq_of_parse_and_map(dma_np, 1);
1310
1311                 /* Use defaults for IRQ delay/coalescing setup.  These
1312                  * are configuration values, so does not belong in
1313                  * device-tree.
1314                  */
1315                 lp->tx_chnl_ctrl = 0x10220000;
1316                 lp->rx_chnl_ctrl = 0xff070000;
1317
1318                 /* Finished with the DMA node; drop the reference */
1319                 of_node_put(dma_np);
1320         } else if (pdata) {
1321                 /* 2nd memory resource specifies DMA registers */
1322                 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1323                 lp->sdma_regs = devm_ioremap(&pdev->dev, res->start,
1324                                                      resource_size(res));
1325                 if (IS_ERR(lp->sdma_regs)) {
1326                         dev_err(&pdev->dev,
1327                                 "could not map DMA registers\n");
1328                         return PTR_ERR(lp->sdma_regs);
1329                 }
1330                 if (pdata->dma_little_endian) {
1331                         lp->dma_in = temac_dma_in32_le;
1332                         lp->dma_out = temac_dma_out32_le;
1333                 } else {
1334                         lp->dma_in = temac_dma_in32_be;
1335                         lp->dma_out = temac_dma_out32_be;
1336                 }
1337
1338                 /* Get DMA RX and TX interrupts */
1339                 lp->rx_irq = platform_get_irq(pdev, 0);
1340                 lp->tx_irq = platform_get_irq(pdev, 1);
1341
1342                 /* IRQ delay/coalescing setup */
1343                 if (pdata->tx_irq_timeout || pdata->tx_irq_count)
1344                         lp->tx_chnl_ctrl = (pdata->tx_irq_timeout << 24) |
1345                                 (pdata->tx_irq_count << 16);
1346                 else
1347                         lp->tx_chnl_ctrl = 0x10220000;
1348                 if (pdata->rx_irq_timeout || pdata->rx_irq_count)
1349                         lp->rx_chnl_ctrl = (pdata->rx_irq_timeout << 24) |
1350                                 (pdata->rx_irq_count << 16);
1351                 else
1352                         lp->rx_chnl_ctrl = 0xff070000;
1353         }
1354
1355         /* Error handle returned DMA RX and TX interrupts */
1356         if (lp->rx_irq < 0) {
1357                 if (lp->rx_irq != -EPROBE_DEFER)
1358                         dev_err(&pdev->dev, "could not get DMA RX irq\n");
1359                 return lp->rx_irq;
1360         }
1361         if (lp->tx_irq < 0) {
1362                 if (lp->tx_irq != -EPROBE_DEFER)
1363                         dev_err(&pdev->dev, "could not get DMA TX irq\n");
1364                 return lp->tx_irq;
1365         }
1366
1367         if (temac_np) {
1368                 /* Retrieve the MAC address */
1369                 addr = of_get_mac_address(temac_np);
1370                 if (IS_ERR(addr)) {
1371                         dev_err(&pdev->dev, "could not find MAC address\n");
1372                         return -ENODEV;
1373                 }
1374                 temac_init_mac_address(ndev, addr);
1375         } else if (pdata) {
1376                 temac_init_mac_address(ndev, pdata->mac_addr);
1377         }
1378
1379         rc = temac_mdio_setup(lp, pdev);
1380         if (rc)
1381                 dev_warn(&pdev->dev, "error registering MDIO bus\n");
1382
1383         if (temac_np) {
1384                 lp->phy_node = of_parse_phandle(temac_np, "phy-handle", 0);
1385                 if (lp->phy_node)
1386                         dev_dbg(lp->dev, "using PHY node %pOF\n", temac_np);
1387         } else if (pdata) {
1388                 snprintf(lp->phy_name, sizeof(lp->phy_name),
1389                          PHY_ID_FMT, lp->mii_bus->id, pdata->phy_addr);
1390                 lp->phy_interface = pdata->phy_interface;
1391         }
1392
1393         /* Add the device attributes */
1394         rc = sysfs_create_group(&lp->dev->kobj, &temac_attr_group);
1395         if (rc) {
1396                 dev_err(lp->dev, "Error creating sysfs files\n");
1397                 goto err_sysfs_create;
1398         }
1399
1400         rc = register_netdev(lp->ndev);
1401         if (rc) {
1402                 dev_err(lp->dev, "register_netdev() error (%i)\n", rc);
1403                 goto err_register_ndev;
1404         }
1405
1406         return 0;
1407
1408 err_register_ndev:
1409         sysfs_remove_group(&lp->dev->kobj, &temac_attr_group);
1410 err_sysfs_create:
1411         if (lp->phy_node)
1412                 of_node_put(lp->phy_node);
1413         temac_mdio_teardown(lp);
1414         return rc;
1415 }
1416
1417 static int temac_remove(struct platform_device *pdev)
1418 {
1419         struct net_device *ndev = platform_get_drvdata(pdev);
1420         struct temac_local *lp = netdev_priv(ndev);
1421
1422         unregister_netdev(ndev);
1423         sysfs_remove_group(&lp->dev->kobj, &temac_attr_group);
1424         if (lp->phy_node)
1425                 of_node_put(lp->phy_node);
1426         temac_mdio_teardown(lp);
1427         return 0;
1428 }
1429
1430 static const struct of_device_id temac_of_match[] = {
1431         { .compatible = "xlnx,xps-ll-temac-1.01.b", },
1432         { .compatible = "xlnx,xps-ll-temac-2.00.a", },
1433         { .compatible = "xlnx,xps-ll-temac-2.02.a", },
1434         { .compatible = "xlnx,xps-ll-temac-2.03.a", },
1435         {},
1436 };
1437 MODULE_DEVICE_TABLE(of, temac_of_match);
1438
1439 static struct platform_driver temac_driver = {
1440         .probe = temac_probe,
1441         .remove = temac_remove,
1442         .driver = {
1443                 .name = "xilinx_temac",
1444                 .of_match_table = temac_of_match,
1445         },
1446 };
1447
1448 module_platform_driver(temac_driver);
1449
1450 MODULE_DESCRIPTION("Xilinx LL_TEMAC Ethernet driver");
1451 MODULE_AUTHOR("Yoshio Kashiwagi");
1452 MODULE_LICENSE("GPL");