1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /* Copyright 2014-2016 Freescale Semiconductor Inc.
3 * Copyright 2016-2017 NXP
5 #include <linux/init.h>
6 #include <linux/module.h>
7 #include <linux/platform_device.h>
8 #include <linux/etherdevice.h>
9 #include <linux/of_net.h>
10 #include <linux/interrupt.h>
11 #include <linux/msi.h>
12 #include <linux/kthread.h>
13 #include <linux/iommu.h>
14 #include <linux/net_tstamp.h>
15 #include <linux/fsl/mc.h>
16 #include <linux/bpf.h>
17 #include <linux/bpf_trace.h>
20 #include "dpaa2-eth.h"
22 /* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files
23 * using trace events only need to #include <trace/events/sched.h>
25 #define CREATE_TRACE_POINTS
26 #include "dpaa2-eth-trace.h"
28 MODULE_LICENSE("Dual BSD/GPL");
29 MODULE_AUTHOR("Freescale Semiconductor, Inc");
30 MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver");
32 static void *dpaa2_iova_to_virt(struct iommu_domain *domain,
35 phys_addr_t phys_addr;
37 phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr;
39 return phys_to_virt(phys_addr);
42 static void validate_rx_csum(struct dpaa2_eth_priv *priv,
46 skb_checksum_none_assert(skb);
48 /* HW checksum validation is disabled, nothing to do here */
49 if (!(priv->net_dev->features & NETIF_F_RXCSUM))
52 /* Read checksum validation bits */
53 if (!((fd_status & DPAA2_FAS_L3CV) &&
54 (fd_status & DPAA2_FAS_L4CV)))
57 /* Inform the stack there's no need to compute L3/L4 csum anymore */
58 skb->ip_summed = CHECKSUM_UNNECESSARY;
61 /* Free a received FD.
62 * Not to be used for Tx conf FDs or on any other paths.
64 static void free_rx_fd(struct dpaa2_eth_priv *priv,
65 const struct dpaa2_fd *fd,
68 struct device *dev = priv->net_dev->dev.parent;
69 dma_addr_t addr = dpaa2_fd_get_addr(fd);
70 u8 fd_format = dpaa2_fd_get_format(fd);
71 struct dpaa2_sg_entry *sgt;
75 /* If single buffer frame, just free the data buffer */
76 if (fd_format == dpaa2_fd_single)
78 else if (fd_format != dpaa2_fd_sg)
79 /* We don't support any other format */
82 /* For S/G frames, we first need to free all SG entries
83 * except the first one, which was taken care of already
85 sgt = vaddr + dpaa2_fd_get_offset(fd);
86 for (i = 1; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
87 addr = dpaa2_sg_get_addr(&sgt[i]);
88 sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
89 dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
92 free_pages((unsigned long)sg_vaddr, 0);
93 if (dpaa2_sg_is_final(&sgt[i]))
98 free_pages((unsigned long)vaddr, 0);
101 /* Build a linear skb based on a single-buffer frame descriptor */
102 static struct sk_buff *build_linear_skb(struct dpaa2_eth_channel *ch,
103 const struct dpaa2_fd *fd,
106 struct sk_buff *skb = NULL;
107 u16 fd_offset = dpaa2_fd_get_offset(fd);
108 u32 fd_length = dpaa2_fd_get_len(fd);
112 skb = build_skb(fd_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE);
116 skb_reserve(skb, fd_offset);
117 skb_put(skb, fd_length);
122 /* Build a non linear (fragmented) skb based on a S/G table */
123 static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
124 struct dpaa2_eth_channel *ch,
125 struct dpaa2_sg_entry *sgt)
127 struct sk_buff *skb = NULL;
128 struct device *dev = priv->net_dev->dev.parent;
133 struct page *page, *head_page;
137 for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
138 struct dpaa2_sg_entry *sge = &sgt[i];
140 /* NOTE: We only support SG entries in dpaa2_sg_single format,
141 * but this is the only format we may receive from HW anyway
144 /* Get the address and length from the S/G entry */
145 sg_addr = dpaa2_sg_get_addr(sge);
146 sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr);
147 dma_unmap_page(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE,
150 sg_length = dpaa2_sg_get_len(sge);
153 /* We build the skb around the first data buffer */
154 skb = build_skb(sg_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE);
155 if (unlikely(!skb)) {
156 /* Free the first SG entry now, since we already
157 * unmapped it and obtained the virtual address
159 free_pages((unsigned long)sg_vaddr, 0);
161 /* We still need to subtract the buffers used
162 * by this FD from our software counter
164 while (!dpaa2_sg_is_final(&sgt[i]) &&
165 i < DPAA2_ETH_MAX_SG_ENTRIES)
170 sg_offset = dpaa2_sg_get_offset(sge);
171 skb_reserve(skb, sg_offset);
172 skb_put(skb, sg_length);
174 /* Rest of the data buffers are stored as skb frags */
175 page = virt_to_page(sg_vaddr);
176 head_page = virt_to_head_page(sg_vaddr);
178 /* Offset in page (which may be compound).
179 * Data in subsequent SG entries is stored from the
180 * beginning of the buffer, so we don't need to add the
183 page_offset = ((unsigned long)sg_vaddr &
185 (page_address(page) - page_address(head_page));
187 skb_add_rx_frag(skb, i - 1, head_page, page_offset,
188 sg_length, DPAA2_ETH_RX_BUF_SIZE);
191 if (dpaa2_sg_is_final(sge))
195 WARN_ONCE(i == DPAA2_ETH_MAX_SG_ENTRIES, "Final bit not set in SGT");
197 /* Count all data buffers + SG table buffer */
198 ch->buf_count -= i + 2;
203 /* Free buffers acquired from the buffer pool or which were meant to
204 * be released in the pool
206 static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count)
208 struct device *dev = priv->net_dev->dev.parent;
212 for (i = 0; i < count; i++) {
213 vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]);
214 dma_unmap_page(dev, buf_array[i], DPAA2_ETH_RX_BUF_SIZE,
216 free_pages((unsigned long)vaddr, 0);
220 static void xdp_release_buf(struct dpaa2_eth_priv *priv,
221 struct dpaa2_eth_channel *ch,
226 ch->xdp.drop_bufs[ch->xdp.drop_cnt++] = addr;
227 if (ch->xdp.drop_cnt < DPAA2_ETH_BUFS_PER_CMD)
230 while ((err = dpaa2_io_service_release(ch->dpio, priv->bpid,
232 ch->xdp.drop_cnt)) == -EBUSY)
236 free_bufs(priv, ch->xdp.drop_bufs, ch->xdp.drop_cnt);
237 ch->buf_count -= ch->xdp.drop_cnt;
240 ch->xdp.drop_cnt = 0;
243 static int xdp_enqueue(struct dpaa2_eth_priv *priv, struct dpaa2_fd *fd,
244 void *buf_start, u16 queue_id)
246 struct dpaa2_eth_fq *fq;
247 struct dpaa2_faead *faead;
251 /* Mark the egress frame hardware annotation area as valid */
252 frc = dpaa2_fd_get_frc(fd);
253 dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV);
254 dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_ASAL);
256 /* Instruct hardware to release the FD buffer directly into
257 * the buffer pool once transmission is completed, instead of
258 * sending a Tx confirmation frame to us
260 ctrl = DPAA2_FAEAD_A4V | DPAA2_FAEAD_A2V | DPAA2_FAEAD_EBDDV;
261 faead = dpaa2_get_faead(buf_start, false);
262 faead->ctrl = cpu_to_le32(ctrl);
263 faead->conf_fqid = 0;
265 fq = &priv->fq[queue_id];
266 for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
267 err = priv->enqueue(priv, fq, fd, 0);
275 static u32 run_xdp(struct dpaa2_eth_priv *priv,
276 struct dpaa2_eth_channel *ch,
277 struct dpaa2_eth_fq *rx_fq,
278 struct dpaa2_fd *fd, void *vaddr)
280 dma_addr_t addr = dpaa2_fd_get_addr(fd);
281 struct rtnl_link_stats64 *percpu_stats;
282 struct bpf_prog *xdp_prog;
284 u32 xdp_act = XDP_PASS;
287 percpu_stats = this_cpu_ptr(priv->percpu_stats);
291 xdp_prog = READ_ONCE(ch->xdp.prog);
295 xdp.data = vaddr + dpaa2_fd_get_offset(fd);
296 xdp.data_end = xdp.data + dpaa2_fd_get_len(fd);
297 xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
298 xdp_set_data_meta_invalid(&xdp);
299 xdp.rxq = &ch->xdp_rxq;
301 xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
303 /* xdp.data pointer may have changed */
304 dpaa2_fd_set_offset(fd, xdp.data - vaddr);
305 dpaa2_fd_set_len(fd, xdp.data_end - xdp.data);
311 err = xdp_enqueue(priv, fd, vaddr, rx_fq->flowid);
313 xdp_release_buf(priv, ch, addr);
314 percpu_stats->tx_errors++;
315 ch->stats.xdp_tx_err++;
317 percpu_stats->tx_packets++;
318 percpu_stats->tx_bytes += dpaa2_fd_get_len(fd);
323 bpf_warn_invalid_xdp_action(xdp_act);
326 trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act);
329 xdp_release_buf(priv, ch, addr);
330 ch->stats.xdp_drop++;
333 dma_unmap_page(priv->net_dev->dev.parent, addr,
334 DPAA2_ETH_RX_BUF_SIZE, DMA_BIDIRECTIONAL);
336 xdp.data_hard_start = vaddr;
337 err = xdp_do_redirect(priv->net_dev, &xdp, xdp_prog);
339 ch->stats.xdp_drop++;
341 ch->stats.xdp_redirect++;
345 ch->xdp.res |= xdp_act;
351 /* Main Rx frame processing routine */
352 static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
353 struct dpaa2_eth_channel *ch,
354 const struct dpaa2_fd *fd,
355 struct dpaa2_eth_fq *fq)
357 dma_addr_t addr = dpaa2_fd_get_addr(fd);
358 u8 fd_format = dpaa2_fd_get_format(fd);
361 struct rtnl_link_stats64 *percpu_stats;
362 struct dpaa2_eth_drv_stats *percpu_extras;
363 struct device *dev = priv->net_dev->dev.parent;
364 struct dpaa2_fas *fas;
370 trace_dpaa2_rx_fd(priv->net_dev, fd);
372 vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
373 dma_sync_single_for_cpu(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
376 fas = dpaa2_get_fas(vaddr, false);
378 buf_data = vaddr + dpaa2_fd_get_offset(fd);
381 percpu_stats = this_cpu_ptr(priv->percpu_stats);
382 percpu_extras = this_cpu_ptr(priv->percpu_extras);
384 if (fd_format == dpaa2_fd_single) {
385 xdp_act = run_xdp(priv, ch, fq, (struct dpaa2_fd *)fd, vaddr);
386 if (xdp_act != XDP_PASS) {
387 percpu_stats->rx_packets++;
388 percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
392 dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
394 skb = build_linear_skb(ch, fd, vaddr);
395 } else if (fd_format == dpaa2_fd_sg) {
396 WARN_ON(priv->xdp_prog);
398 dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
400 skb = build_frag_skb(priv, ch, buf_data);
401 free_pages((unsigned long)vaddr, 0);
402 percpu_extras->rx_sg_frames++;
403 percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd);
405 /* We don't support any other format */
406 goto err_frame_format;
414 /* Get the timestamp value */
415 if (priv->rx_tstamp) {
416 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
417 __le64 *ts = dpaa2_get_ts(vaddr, false);
420 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
422 ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts);
423 shhwtstamps->hwtstamp = ns_to_ktime(ns);
426 /* Check if we need to validate the L4 csum */
427 if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) {
428 status = le32_to_cpu(fas->status);
429 validate_rx_csum(priv, status, skb);
432 skb->protocol = eth_type_trans(skb, priv->net_dev);
433 skb_record_rx_queue(skb, fq->flowid);
435 percpu_stats->rx_packets++;
436 percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
438 napi_gro_receive(&ch->napi, skb);
443 free_rx_fd(priv, fd, vaddr);
445 percpu_stats->rx_dropped++;
448 /* Consume all frames pull-dequeued into the store. This is the simplest way to
449 * make sure we don't accidentally issue another volatile dequeue which would
450 * overwrite (leak) frames already in the store.
452 * Observance of NAPI budget is not our concern, leaving that to the caller.
454 static int consume_frames(struct dpaa2_eth_channel *ch,
455 struct dpaa2_eth_fq **src)
457 struct dpaa2_eth_priv *priv = ch->priv;
458 struct dpaa2_eth_fq *fq = NULL;
460 const struct dpaa2_fd *fd;
465 dq = dpaa2_io_store_next(ch->store, &is_last);
467 /* If we're here, we *must* have placed a
468 * volatile dequeue comnmand, so keep reading through
469 * the store until we get some sort of valid response
470 * token (either a valid frame or an "empty dequeue")
475 fd = dpaa2_dq_fd(dq);
476 fq = (struct dpaa2_eth_fq *)(uintptr_t)dpaa2_dq_fqd_ctx(dq);
478 fq->consume(priv, ch, fd, fq);
485 fq->stats.frames += cleaned;
487 /* A dequeue operation only pulls frames from a single queue
488 * into the store. Return the frame queue as an out param.
496 /* Configure the egress frame annotation for timestamp update */
497 static void enable_tx_tstamp(struct dpaa2_fd *fd, void *buf_start)
499 struct dpaa2_faead *faead;
502 /* Mark the egress frame annotation area as valid */
503 frc = dpaa2_fd_get_frc(fd);
504 dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV);
506 /* Set hardware annotation size */
507 ctrl = dpaa2_fd_get_ctrl(fd);
508 dpaa2_fd_set_ctrl(fd, ctrl | DPAA2_FD_CTRL_ASAL);
510 /* enable UPD (update prepanded data) bit in FAEAD field of
511 * hardware frame annotation area
513 ctrl = DPAA2_FAEAD_A2V | DPAA2_FAEAD_UPDV | DPAA2_FAEAD_UPD;
514 faead = dpaa2_get_faead(buf_start, true);
515 faead->ctrl = cpu_to_le32(ctrl);
518 /* Create a frame descriptor based on a fragmented skb */
519 static int build_sg_fd(struct dpaa2_eth_priv *priv,
523 struct device *dev = priv->net_dev->dev.parent;
524 void *sgt_buf = NULL;
526 int nr_frags = skb_shinfo(skb)->nr_frags;
527 struct dpaa2_sg_entry *sgt;
530 struct scatterlist *scl, *crt_scl;
533 struct dpaa2_eth_swa *swa;
535 /* Create and map scatterlist.
536 * We don't advertise NETIF_F_FRAGLIST, so skb_to_sgvec() will not have
537 * to go beyond nr_frags+1.
538 * Note: We don't support chained scatterlists
540 if (unlikely(PAGE_SIZE / sizeof(struct scatterlist) < nr_frags + 1))
543 scl = kcalloc(nr_frags + 1, sizeof(struct scatterlist), GFP_ATOMIC);
547 sg_init_table(scl, nr_frags + 1);
548 num_sg = skb_to_sgvec(skb, scl, 0, skb->len);
549 num_dma_bufs = dma_map_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
550 if (unlikely(!num_dma_bufs)) {
552 goto dma_map_sg_failed;
555 /* Prepare the HW SGT structure */
556 sgt_buf_size = priv->tx_data_offset +
557 sizeof(struct dpaa2_sg_entry) * num_dma_bufs;
558 sgt_buf = netdev_alloc_frag(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN);
559 if (unlikely(!sgt_buf)) {
561 goto sgt_buf_alloc_failed;
563 sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN);
564 memset(sgt_buf, 0, sgt_buf_size);
566 sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
568 /* Fill in the HW SGT structure.
570 * sgt_buf is zeroed out, so the following fields are implicit
571 * in all sgt entries:
573 * - format is 'dpaa2_sg_single'
575 for_each_sg(scl, crt_scl, num_dma_bufs, i) {
576 dpaa2_sg_set_addr(&sgt[i], sg_dma_address(crt_scl));
577 dpaa2_sg_set_len(&sgt[i], sg_dma_len(crt_scl));
579 dpaa2_sg_set_final(&sgt[i - 1], true);
581 /* Store the skb backpointer in the SGT buffer.
582 * Fit the scatterlist and the number of buffers alongside the
583 * skb backpointer in the software annotation area. We'll need
584 * all of them on Tx Conf.
586 swa = (struct dpaa2_eth_swa *)sgt_buf;
587 swa->type = DPAA2_ETH_SWA_SG;
590 swa->sg.num_sg = num_sg;
591 swa->sg.sgt_size = sgt_buf_size;
593 /* Separately map the SGT buffer */
594 addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
595 if (unlikely(dma_mapping_error(dev, addr))) {
597 goto dma_map_single_failed;
599 dpaa2_fd_set_offset(fd, priv->tx_data_offset);
600 dpaa2_fd_set_format(fd, dpaa2_fd_sg);
601 dpaa2_fd_set_addr(fd, addr);
602 dpaa2_fd_set_len(fd, skb->len);
603 dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
605 if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
606 enable_tx_tstamp(fd, sgt_buf);
610 dma_map_single_failed:
611 skb_free_frag(sgt_buf);
612 sgt_buf_alloc_failed:
613 dma_unmap_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
619 /* Create a frame descriptor based on a linear skb */
620 static int build_single_fd(struct dpaa2_eth_priv *priv,
624 struct device *dev = priv->net_dev->dev.parent;
625 u8 *buffer_start, *aligned_start;
626 struct dpaa2_eth_swa *swa;
629 buffer_start = skb->data - dpaa2_eth_needed_headroom(priv, skb);
631 /* If there's enough room to align the FD address, do it.
632 * It will help hardware optimize accesses.
634 aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
635 DPAA2_ETH_TX_BUF_ALIGN);
636 if (aligned_start >= skb->head)
637 buffer_start = aligned_start;
639 /* Store a backpointer to the skb at the beginning of the buffer
640 * (in the private data area) such that we can release it
643 swa = (struct dpaa2_eth_swa *)buffer_start;
644 swa->type = DPAA2_ETH_SWA_SINGLE;
645 swa->single.skb = skb;
647 addr = dma_map_single(dev, buffer_start,
648 skb_tail_pointer(skb) - buffer_start,
650 if (unlikely(dma_mapping_error(dev, addr)))
653 dpaa2_fd_set_addr(fd, addr);
654 dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start));
655 dpaa2_fd_set_len(fd, skb->len);
656 dpaa2_fd_set_format(fd, dpaa2_fd_single);
657 dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
659 if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
660 enable_tx_tstamp(fd, buffer_start);
665 /* FD freeing routine on the Tx path
667 * DMA-unmap and free FD and possibly SGT buffer allocated on Tx. The skb
668 * back-pointed to is also freed.
669 * This can be called either from dpaa2_eth_tx_conf() or on the error path of
672 static void free_tx_fd(const struct dpaa2_eth_priv *priv,
673 struct dpaa2_eth_fq *fq,
674 const struct dpaa2_fd *fd, bool in_napi)
676 struct device *dev = priv->net_dev->dev.parent;
678 struct sk_buff *skb = NULL;
679 unsigned char *buffer_start;
680 struct dpaa2_eth_swa *swa;
681 u8 fd_format = dpaa2_fd_get_format(fd);
682 u32 fd_len = dpaa2_fd_get_len(fd);
684 fd_addr = dpaa2_fd_get_addr(fd);
685 buffer_start = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr);
686 swa = (struct dpaa2_eth_swa *)buffer_start;
688 if (fd_format == dpaa2_fd_single) {
689 if (swa->type == DPAA2_ETH_SWA_SINGLE) {
690 skb = swa->single.skb;
691 /* Accessing the skb buffer is safe before dma unmap,
692 * because we didn't map the actual skb shell.
694 dma_unmap_single(dev, fd_addr,
695 skb_tail_pointer(skb) - buffer_start,
698 WARN_ONCE(swa->type != DPAA2_ETH_SWA_XDP, "Wrong SWA type");
699 dma_unmap_single(dev, fd_addr, swa->xdp.dma_size,
702 } else if (fd_format == dpaa2_fd_sg) {
705 /* Unmap the scatterlist */
706 dma_unmap_sg(dev, swa->sg.scl, swa->sg.num_sg,
710 /* Unmap the SGT buffer */
711 dma_unmap_single(dev, fd_addr, swa->sg.sgt_size,
714 netdev_dbg(priv->net_dev, "Invalid FD format\n");
718 if (swa->type != DPAA2_ETH_SWA_XDP && in_napi) {
720 fq->dq_bytes += fd_len;
723 if (swa->type == DPAA2_ETH_SWA_XDP) {
724 xdp_return_frame(swa->xdp.xdpf);
728 /* Get the timestamp value */
729 if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
730 struct skb_shared_hwtstamps shhwtstamps;
731 __le64 *ts = dpaa2_get_ts(buffer_start, true);
734 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
736 ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts);
737 shhwtstamps.hwtstamp = ns_to_ktime(ns);
738 skb_tstamp_tx(skb, &shhwtstamps);
741 /* Free SGT buffer allocated on tx */
742 if (fd_format != dpaa2_fd_single)
743 skb_free_frag(buffer_start);
745 /* Move on with skb release */
746 napi_consume_skb(skb, in_napi);
749 static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
751 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
753 struct rtnl_link_stats64 *percpu_stats;
754 struct dpaa2_eth_drv_stats *percpu_extras;
755 struct dpaa2_eth_fq *fq;
756 struct netdev_queue *nq;
758 unsigned int needed_headroom;
762 percpu_stats = this_cpu_ptr(priv->percpu_stats);
763 percpu_extras = this_cpu_ptr(priv->percpu_extras);
765 needed_headroom = dpaa2_eth_needed_headroom(priv, skb);
766 if (skb_headroom(skb) < needed_headroom) {
769 ns = skb_realloc_headroom(skb, needed_headroom);
771 percpu_stats->tx_dropped++;
772 goto err_alloc_headroom;
774 percpu_extras->tx_reallocs++;
777 skb_set_owner_w(ns, skb->sk);
783 /* We'll be holding a back-reference to the skb until Tx Confirmation;
784 * we don't want that overwritten by a concurrent Tx with a cloned skb.
786 skb = skb_unshare(skb, GFP_ATOMIC);
787 if (unlikely(!skb)) {
788 /* skb_unshare() has already freed the skb */
789 percpu_stats->tx_dropped++;
793 /* Setup the FD fields */
794 memset(&fd, 0, sizeof(fd));
796 if (skb_is_nonlinear(skb)) {
797 err = build_sg_fd(priv, skb, &fd);
798 percpu_extras->tx_sg_frames++;
799 percpu_extras->tx_sg_bytes += skb->len;
801 err = build_single_fd(priv, skb, &fd);
805 percpu_stats->tx_dropped++;
810 trace_dpaa2_tx_fd(net_dev, &fd);
812 /* TxConf FQ selection relies on queue id from the stack.
813 * In case of a forwarded frame from another DPNI interface, we choose
814 * a queue affined to the same core that processed the Rx frame
816 queue_mapping = skb_get_queue_mapping(skb);
817 fq = &priv->fq[queue_mapping];
819 fd_len = dpaa2_fd_get_len(&fd);
820 nq = netdev_get_tx_queue(net_dev, queue_mapping);
821 netdev_tx_sent_queue(nq, fd_len);
823 /* Everything that happens after this enqueues might race with
824 * the Tx confirmation callback for this frame
826 for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
827 err = priv->enqueue(priv, fq, &fd, 0);
831 percpu_extras->tx_portal_busy += i;
832 if (unlikely(err < 0)) {
833 percpu_stats->tx_errors++;
834 /* Clean up everything, including freeing the skb */
835 free_tx_fd(priv, fq, &fd, false);
836 netdev_tx_completed_queue(nq, 1, fd_len);
838 percpu_stats->tx_packets++;
839 percpu_stats->tx_bytes += fd_len;
851 /* Tx confirmation frame processing routine */
852 static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
853 struct dpaa2_eth_channel *ch __always_unused,
854 const struct dpaa2_fd *fd,
855 struct dpaa2_eth_fq *fq)
857 struct rtnl_link_stats64 *percpu_stats;
858 struct dpaa2_eth_drv_stats *percpu_extras;
859 u32 fd_len = dpaa2_fd_get_len(fd);
863 trace_dpaa2_tx_conf_fd(priv->net_dev, fd);
865 percpu_extras = this_cpu_ptr(priv->percpu_extras);
866 percpu_extras->tx_conf_frames++;
867 percpu_extras->tx_conf_bytes += fd_len;
869 /* Check frame errors in the FD field */
870 fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK;
871 free_tx_fd(priv, fq, fd, true);
873 if (likely(!fd_errors))
877 netdev_dbg(priv->net_dev, "TX frame FD error: 0x%08x\n",
880 percpu_stats = this_cpu_ptr(priv->percpu_stats);
881 /* Tx-conf logically pertains to the egress path. */
882 percpu_stats->tx_errors++;
885 static int set_rx_csum(struct dpaa2_eth_priv *priv, bool enable)
889 err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
890 DPNI_OFF_RX_L3_CSUM, enable);
892 netdev_err(priv->net_dev,
893 "dpni_set_offload(RX_L3_CSUM) failed\n");
897 err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
898 DPNI_OFF_RX_L4_CSUM, enable);
900 netdev_err(priv->net_dev,
901 "dpni_set_offload(RX_L4_CSUM) failed\n");
908 static int set_tx_csum(struct dpaa2_eth_priv *priv, bool enable)
912 err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
913 DPNI_OFF_TX_L3_CSUM, enable);
915 netdev_err(priv->net_dev, "dpni_set_offload(TX_L3_CSUM) failed\n");
919 err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
920 DPNI_OFF_TX_L4_CSUM, enable);
922 netdev_err(priv->net_dev, "dpni_set_offload(TX_L4_CSUM) failed\n");
929 /* Perform a single release command to add buffers
930 * to the specified buffer pool
932 static int add_bufs(struct dpaa2_eth_priv *priv,
933 struct dpaa2_eth_channel *ch, u16 bpid)
935 struct device *dev = priv->net_dev->dev.parent;
936 u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
941 for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) {
942 /* Allocate buffer visible to WRIOP + skb shared info +
945 /* allocate one page for each Rx buffer. WRIOP sees
946 * the entire page except for a tailroom reserved for
949 page = dev_alloc_pages(0);
953 addr = dma_map_page(dev, page, 0, DPAA2_ETH_RX_BUF_SIZE,
955 if (unlikely(dma_mapping_error(dev, addr)))
961 trace_dpaa2_eth_buf_seed(priv->net_dev,
962 page, DPAA2_ETH_RX_BUF_RAW_SIZE,
963 addr, DPAA2_ETH_RX_BUF_SIZE,
968 /* In case the portal is busy, retry until successful */
969 while ((err = dpaa2_io_service_release(ch->dpio, bpid,
970 buf_array, i)) == -EBUSY)
973 /* If release command failed, clean up and bail out;
974 * not much else we can do about it
977 free_bufs(priv, buf_array, i);
984 __free_pages(page, 0);
986 /* If we managed to allocate at least some buffers,
987 * release them to hardware
995 static int seed_pool(struct dpaa2_eth_priv *priv, u16 bpid)
1000 /* This is the lazy seeding of Rx buffer pools.
1001 * dpaa2_add_bufs() is also used on the Rx hotpath and calls
1002 * napi_alloc_frag(). The trouble with that is that it in turn ends up
1003 * calling this_cpu_ptr(), which mandates execution in atomic context.
1004 * Rather than splitting up the code, do a one-off preempt disable.
1007 for (j = 0; j < priv->num_channels; j++) {
1008 for (i = 0; i < DPAA2_ETH_NUM_BUFS;
1009 i += DPAA2_ETH_BUFS_PER_CMD) {
1010 new_count = add_bufs(priv, priv->channel[j], bpid);
1011 priv->channel[j]->buf_count += new_count;
1013 if (new_count < DPAA2_ETH_BUFS_PER_CMD) {
1025 * Drain the specified number of buffers from the DPNI's private buffer pool.
1026 * @count must not exceeed DPAA2_ETH_BUFS_PER_CMD
1028 static void drain_bufs(struct dpaa2_eth_priv *priv, int count)
1030 u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
1034 ret = dpaa2_io_service_acquire(NULL, priv->bpid,
1037 netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n");
1040 free_bufs(priv, buf_array, ret);
1044 static void drain_pool(struct dpaa2_eth_priv *priv)
1048 drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD);
1049 drain_bufs(priv, 1);
1051 for (i = 0; i < priv->num_channels; i++)
1052 priv->channel[i]->buf_count = 0;
1055 /* Function is called from softirq context only, so we don't need to guard
1056 * the access to percpu count
1058 static int refill_pool(struct dpaa2_eth_priv *priv,
1059 struct dpaa2_eth_channel *ch,
1064 if (likely(ch->buf_count >= DPAA2_ETH_REFILL_THRESH))
1068 new_count = add_bufs(priv, ch, bpid);
1069 if (unlikely(!new_count)) {
1070 /* Out of memory; abort for now, we'll try later on */
1073 ch->buf_count += new_count;
1074 } while (ch->buf_count < DPAA2_ETH_NUM_BUFS);
1076 if (unlikely(ch->buf_count < DPAA2_ETH_NUM_BUFS))
1082 static int pull_channel(struct dpaa2_eth_channel *ch)
1087 /* Retry while portal is busy */
1089 err = dpaa2_io_service_pull_channel(ch->dpio, ch->ch_id,
1093 } while (err == -EBUSY);
1095 ch->stats.dequeue_portal_busy += dequeues;
1097 ch->stats.pull_err++;
1102 /* NAPI poll routine
1104 * Frames are dequeued from the QMan channel associated with this NAPI context.
1105 * Rx, Tx confirmation and (if configured) Rx error frames all count
1106 * towards the NAPI budget.
1108 static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
1110 struct dpaa2_eth_channel *ch;
1111 struct dpaa2_eth_priv *priv;
1112 int rx_cleaned = 0, txconf_cleaned = 0;
1113 struct dpaa2_eth_fq *fq, *txc_fq = NULL;
1114 struct netdev_queue *nq;
1115 int store_cleaned, work_done;
1118 ch = container_of(napi, struct dpaa2_eth_channel, napi);
1123 err = pull_channel(ch);
1127 /* Refill pool if appropriate */
1128 refill_pool(priv, ch, priv->bpid);
1130 store_cleaned = consume_frames(ch, &fq);
1133 if (fq->type == DPAA2_RX_FQ) {
1134 rx_cleaned += store_cleaned;
1136 txconf_cleaned += store_cleaned;
1137 /* We have a single Tx conf FQ on this channel */
1141 /* If we either consumed the whole NAPI budget with Rx frames
1142 * or we reached the Tx confirmations threshold, we're done.
1144 if (rx_cleaned >= budget ||
1145 txconf_cleaned >= DPAA2_ETH_TXCONF_PER_NAPI) {
1149 } while (store_cleaned);
1151 /* We didn't consume the entire budget, so finish napi and
1152 * re-enable data availability notifications
1154 napi_complete_done(napi, rx_cleaned);
1156 err = dpaa2_io_service_rearm(ch->dpio, &ch->nctx);
1158 } while (err == -EBUSY);
1159 WARN_ONCE(err, "CDAN notifications rearm failed on core %d",
1160 ch->nctx.desired_cpu);
1162 work_done = max(rx_cleaned, 1);
1165 if (txc_fq && txc_fq->dq_frames) {
1166 nq = netdev_get_tx_queue(priv->net_dev, txc_fq->flowid);
1167 netdev_tx_completed_queue(nq, txc_fq->dq_frames,
1169 txc_fq->dq_frames = 0;
1170 txc_fq->dq_bytes = 0;
1173 if (ch->xdp.res & XDP_REDIRECT)
1179 static void enable_ch_napi(struct dpaa2_eth_priv *priv)
1181 struct dpaa2_eth_channel *ch;
1184 for (i = 0; i < priv->num_channels; i++) {
1185 ch = priv->channel[i];
1186 napi_enable(&ch->napi);
1190 static void disable_ch_napi(struct dpaa2_eth_priv *priv)
1192 struct dpaa2_eth_channel *ch;
1195 for (i = 0; i < priv->num_channels; i++) {
1196 ch = priv->channel[i];
1197 napi_disable(&ch->napi);
1201 static int link_state_update(struct dpaa2_eth_priv *priv)
1203 struct dpni_link_state state = {0};
1206 err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
1207 if (unlikely(err)) {
1208 netdev_err(priv->net_dev,
1209 "dpni_get_link_state() failed\n");
1213 /* Chech link state; speed / duplex changes are not treated yet */
1214 if (priv->link_state.up == state.up)
1217 priv->link_state = state;
1219 netif_carrier_on(priv->net_dev);
1220 netif_tx_start_all_queues(priv->net_dev);
1222 netif_tx_stop_all_queues(priv->net_dev);
1223 netif_carrier_off(priv->net_dev);
1226 netdev_info(priv->net_dev, "Link Event: state %s\n",
1227 state.up ? "up" : "down");
1232 static int dpaa2_eth_open(struct net_device *net_dev)
1234 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1237 err = seed_pool(priv, priv->bpid);
1239 /* Not much to do; the buffer pool, though not filled up,
1240 * may still contain some buffers which would enable us
1243 netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n",
1244 priv->dpbp_dev->obj_desc.id, priv->bpid);
1247 /* We'll only start the txqs when the link is actually ready; make sure
1248 * we don't race against the link up notification, which may come
1249 * immediately after dpni_enable();
1251 netif_tx_stop_all_queues(net_dev);
1252 enable_ch_napi(priv);
1253 /* Also, explicitly set carrier off, otherwise netif_carrier_ok() will
1254 * return true and cause 'ip link show' to report the LOWER_UP flag,
1255 * even though the link notification wasn't even received.
1257 netif_carrier_off(net_dev);
1259 err = dpni_enable(priv->mc_io, 0, priv->mc_token);
1261 netdev_err(net_dev, "dpni_enable() failed\n");
1265 /* If the DPMAC object has already processed the link up interrupt,
1266 * we have to learn the link state ourselves.
1268 err = link_state_update(priv);
1270 netdev_err(net_dev, "Can't update link state\n");
1271 goto link_state_err;
1278 disable_ch_napi(priv);
1283 /* Total number of in-flight frames on ingress queues */
1284 static u32 ingress_fq_count(struct dpaa2_eth_priv *priv)
1286 struct dpaa2_eth_fq *fq;
1287 u32 fcnt = 0, bcnt = 0, total = 0;
1290 for (i = 0; i < priv->num_fqs; i++) {
1292 err = dpaa2_io_query_fq_count(NULL, fq->fqid, &fcnt, &bcnt);
1294 netdev_warn(priv->net_dev, "query_fq_count failed");
1303 static void wait_for_fq_empty(struct dpaa2_eth_priv *priv)
1309 pending = ingress_fq_count(priv);
1312 } while (pending && --retries);
1315 static int dpaa2_eth_stop(struct net_device *net_dev)
1317 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1318 int dpni_enabled = 0;
1321 netif_tx_stop_all_queues(net_dev);
1322 netif_carrier_off(net_dev);
1324 /* On dpni_disable(), the MC firmware will:
1325 * - stop MAC Rx and wait for all Rx frames to be enqueued to software
1326 * - cut off WRIOP dequeues from egress FQs and wait until transmission
1327 * of all in flight Tx frames is finished (and corresponding Tx conf
1328 * frames are enqueued back to software)
1330 * Before calling dpni_disable(), we wait for all Tx frames to arrive
1331 * on WRIOP. After it finishes, wait until all remaining frames on Rx
1332 * and Tx conf queues are consumed on NAPI poll.
1337 dpni_disable(priv->mc_io, 0, priv->mc_token);
1338 dpni_is_enabled(priv->mc_io, 0, priv->mc_token, &dpni_enabled);
1340 /* Allow the hardware some slack */
1342 } while (dpni_enabled && --retries);
1344 netdev_warn(net_dev, "Retry count exceeded disabling DPNI\n");
1345 /* Must go on and disable NAPI nonetheless, so we don't crash at
1346 * the next "ifconfig up"
1350 wait_for_fq_empty(priv);
1351 disable_ch_napi(priv);
1353 /* Empty the buffer pool */
1359 static int dpaa2_eth_set_addr(struct net_device *net_dev, void *addr)
1361 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1362 struct device *dev = net_dev->dev.parent;
1365 err = eth_mac_addr(net_dev, addr);
1367 dev_err(dev, "eth_mac_addr() failed (%d)\n", err);
1371 err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
1374 dev_err(dev, "dpni_set_primary_mac_addr() failed (%d)\n", err);
1381 /** Fill in counters maintained by the GPP driver. These may be different from
1382 * the hardware counters obtained by ethtool.
1384 static void dpaa2_eth_get_stats(struct net_device *net_dev,
1385 struct rtnl_link_stats64 *stats)
1387 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1388 struct rtnl_link_stats64 *percpu_stats;
1390 u64 *netstats = (u64 *)stats;
1392 int num = sizeof(struct rtnl_link_stats64) / sizeof(u64);
1394 for_each_possible_cpu(i) {
1395 percpu_stats = per_cpu_ptr(priv->percpu_stats, i);
1396 cpustats = (u64 *)percpu_stats;
1397 for (j = 0; j < num; j++)
1398 netstats[j] += cpustats[j];
1402 /* Copy mac unicast addresses from @net_dev to @priv.
1403 * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
1405 static void add_uc_hw_addr(const struct net_device *net_dev,
1406 struct dpaa2_eth_priv *priv)
1408 struct netdev_hw_addr *ha;
1411 netdev_for_each_uc_addr(ha, net_dev) {
1412 err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
1415 netdev_warn(priv->net_dev,
1416 "Could not add ucast MAC %pM to the filtering table (err %d)\n",
1421 /* Copy mac multicast addresses from @net_dev to @priv
1422 * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
1424 static void add_mc_hw_addr(const struct net_device *net_dev,
1425 struct dpaa2_eth_priv *priv)
1427 struct netdev_hw_addr *ha;
1430 netdev_for_each_mc_addr(ha, net_dev) {
1431 err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
1434 netdev_warn(priv->net_dev,
1435 "Could not add mcast MAC %pM to the filtering table (err %d)\n",
1440 static void dpaa2_eth_set_rx_mode(struct net_device *net_dev)
1442 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1443 int uc_count = netdev_uc_count(net_dev);
1444 int mc_count = netdev_mc_count(net_dev);
1445 u8 max_mac = priv->dpni_attrs.mac_filter_entries;
1446 u32 options = priv->dpni_attrs.options;
1447 u16 mc_token = priv->mc_token;
1448 struct fsl_mc_io *mc_io = priv->mc_io;
1451 /* Basic sanity checks; these probably indicate a misconfiguration */
1452 if (options & DPNI_OPT_NO_MAC_FILTER && max_mac != 0)
1453 netdev_info(net_dev,
1454 "mac_filter_entries=%d, DPNI_OPT_NO_MAC_FILTER option must be disabled\n",
1457 /* Force promiscuous if the uc or mc counts exceed our capabilities. */
1458 if (uc_count > max_mac) {
1459 netdev_info(net_dev,
1460 "Unicast addr count reached %d, max allowed is %d; forcing promisc\n",
1464 if (mc_count + uc_count > max_mac) {
1465 netdev_info(net_dev,
1466 "Unicast + multicast addr count reached %d, max allowed is %d; forcing promisc\n",
1467 uc_count + mc_count, max_mac);
1468 goto force_mc_promisc;
1471 /* Adjust promisc settings due to flag combinations */
1472 if (net_dev->flags & IFF_PROMISC)
1474 if (net_dev->flags & IFF_ALLMULTI) {
1475 /* First, rebuild unicast filtering table. This should be done
1476 * in promisc mode, in order to avoid frame loss while we
1477 * progressively add entries to the table.
1478 * We don't know whether we had been in promisc already, and
1479 * making an MC call to find out is expensive; so set uc promisc
1482 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
1484 netdev_warn(net_dev, "Can't set uc promisc\n");
1486 /* Actual uc table reconstruction. */
1487 err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 0);
1489 netdev_warn(net_dev, "Can't clear uc filters\n");
1490 add_uc_hw_addr(net_dev, priv);
1492 /* Finally, clear uc promisc and set mc promisc as requested. */
1493 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
1495 netdev_warn(net_dev, "Can't clear uc promisc\n");
1496 goto force_mc_promisc;
1499 /* Neither unicast, nor multicast promisc will be on... eventually.
1500 * For now, rebuild mac filtering tables while forcing both of them on.
1502 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
1504 netdev_warn(net_dev, "Can't set uc promisc (%d)\n", err);
1505 err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
1507 netdev_warn(net_dev, "Can't set mc promisc (%d)\n", err);
1509 /* Actual mac filtering tables reconstruction */
1510 err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 1);
1512 netdev_warn(net_dev, "Can't clear mac filters\n");
1513 add_mc_hw_addr(net_dev, priv);
1514 add_uc_hw_addr(net_dev, priv);
1516 /* Now we can clear both ucast and mcast promisc, without risking
1517 * to drop legitimate frames anymore.
1519 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
1521 netdev_warn(net_dev, "Can't clear ucast promisc\n");
1522 err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 0);
1524 netdev_warn(net_dev, "Can't clear mcast promisc\n");
1529 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
1531 netdev_warn(net_dev, "Can't set ucast promisc\n");
1533 err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
1535 netdev_warn(net_dev, "Can't set mcast promisc\n");
1538 static int dpaa2_eth_set_features(struct net_device *net_dev,
1539 netdev_features_t features)
1541 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1542 netdev_features_t changed = features ^ net_dev->features;
1546 if (changed & NETIF_F_RXCSUM) {
1547 enable = !!(features & NETIF_F_RXCSUM);
1548 err = set_rx_csum(priv, enable);
1553 if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
1554 enable = !!(features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
1555 err = set_tx_csum(priv, enable);
1563 static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1565 struct dpaa2_eth_priv *priv = netdev_priv(dev);
1566 struct hwtstamp_config config;
1568 if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
1571 switch (config.tx_type) {
1572 case HWTSTAMP_TX_OFF:
1573 priv->tx_tstamp = false;
1575 case HWTSTAMP_TX_ON:
1576 priv->tx_tstamp = true;
1582 if (config.rx_filter == HWTSTAMP_FILTER_NONE) {
1583 priv->rx_tstamp = false;
1585 priv->rx_tstamp = true;
1586 /* TS is set for all frame types, not only those requested */
1587 config.rx_filter = HWTSTAMP_FILTER_ALL;
1590 return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
1594 static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1596 if (cmd == SIOCSHWTSTAMP)
1597 return dpaa2_eth_ts_ioctl(dev, rq, cmd);
1602 static bool xdp_mtu_valid(struct dpaa2_eth_priv *priv, int mtu)
1604 int mfl, linear_mfl;
1606 mfl = DPAA2_ETH_L2_MAX_FRM(mtu);
1607 linear_mfl = DPAA2_ETH_RX_BUF_SIZE - DPAA2_ETH_RX_HWA_SIZE -
1608 dpaa2_eth_rx_head_room(priv) - XDP_PACKET_HEADROOM;
1610 if (mfl > linear_mfl) {
1611 netdev_warn(priv->net_dev, "Maximum MTU for XDP is %d\n",
1612 linear_mfl - VLAN_ETH_HLEN);
1619 static int set_rx_mfl(struct dpaa2_eth_priv *priv, int mtu, bool has_xdp)
1623 /* We enforce a maximum Rx frame length based on MTU only if we have
1624 * an XDP program attached (in order to avoid Rx S/G frames).
1625 * Otherwise, we accept all incoming frames as long as they are not
1626 * larger than maximum size supported in hardware
1629 mfl = DPAA2_ETH_L2_MAX_FRM(mtu);
1631 mfl = DPAA2_ETH_MFL;
1633 err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token, mfl);
1635 netdev_err(priv->net_dev, "dpni_set_max_frame_length failed\n");
1642 static int dpaa2_eth_change_mtu(struct net_device *dev, int new_mtu)
1644 struct dpaa2_eth_priv *priv = netdev_priv(dev);
1647 if (!priv->xdp_prog)
1650 if (!xdp_mtu_valid(priv, new_mtu))
1653 err = set_rx_mfl(priv, new_mtu, true);
1662 static int update_rx_buffer_headroom(struct dpaa2_eth_priv *priv, bool has_xdp)
1664 struct dpni_buffer_layout buf_layout = {0};
1667 err = dpni_get_buffer_layout(priv->mc_io, 0, priv->mc_token,
1668 DPNI_QUEUE_RX, &buf_layout);
1670 netdev_err(priv->net_dev, "dpni_get_buffer_layout failed\n");
1674 /* Reserve extra headroom for XDP header size changes */
1675 buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv) +
1676 (has_xdp ? XDP_PACKET_HEADROOM : 0);
1677 buf_layout.options = DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM;
1678 err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
1679 DPNI_QUEUE_RX, &buf_layout);
1681 netdev_err(priv->net_dev, "dpni_set_buffer_layout failed\n");
1688 static int setup_xdp(struct net_device *dev, struct bpf_prog *prog)
1690 struct dpaa2_eth_priv *priv = netdev_priv(dev);
1691 struct dpaa2_eth_channel *ch;
1692 struct bpf_prog *old;
1693 bool up, need_update;
1696 if (prog && !xdp_mtu_valid(priv, dev->mtu))
1700 prog = bpf_prog_add(prog, priv->num_channels);
1702 return PTR_ERR(prog);
1705 up = netif_running(dev);
1706 need_update = (!!priv->xdp_prog != !!prog);
1709 dpaa2_eth_stop(dev);
1711 /* While in xdp mode, enforce a maximum Rx frame size based on MTU.
1712 * Also, when switching between xdp/non-xdp modes we need to reconfigure
1713 * our Rx buffer layout. Buffer pool was drained on dpaa2_eth_stop,
1714 * so we are sure no old format buffers will be used from now on.
1717 err = set_rx_mfl(priv, dev->mtu, !!prog);
1720 err = update_rx_buffer_headroom(priv, !!prog);
1725 old = xchg(&priv->xdp_prog, prog);
1729 for (i = 0; i < priv->num_channels; i++) {
1730 ch = priv->channel[i];
1731 old = xchg(&ch->xdp.prog, prog);
1737 err = dpaa2_eth_open(dev);
1746 bpf_prog_sub(prog, priv->num_channels);
1748 dpaa2_eth_open(dev);
1753 static int dpaa2_eth_xdp(struct net_device *dev, struct netdev_bpf *xdp)
1755 struct dpaa2_eth_priv *priv = netdev_priv(dev);
1757 switch (xdp->command) {
1758 case XDP_SETUP_PROG:
1759 return setup_xdp(dev, xdp->prog);
1760 case XDP_QUERY_PROG:
1761 xdp->prog_id = priv->xdp_prog ? priv->xdp_prog->aux->id : 0;
1770 static int dpaa2_eth_xdp_xmit_frame(struct net_device *net_dev,
1771 struct xdp_frame *xdpf)
1773 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1774 struct device *dev = net_dev->dev.parent;
1775 struct rtnl_link_stats64 *percpu_stats;
1776 struct dpaa2_eth_drv_stats *percpu_extras;
1777 unsigned int needed_headroom;
1778 struct dpaa2_eth_swa *swa;
1779 struct dpaa2_eth_fq *fq;
1781 void *buffer_start, *aligned_start;
1785 /* We require a minimum headroom to be able to transmit the frame.
1786 * Otherwise return an error and let the original net_device handle it
1788 needed_headroom = dpaa2_eth_needed_headroom(priv, NULL);
1789 if (xdpf->headroom < needed_headroom)
1792 percpu_stats = this_cpu_ptr(priv->percpu_stats);
1793 percpu_extras = this_cpu_ptr(priv->percpu_extras);
1795 /* Setup the FD fields */
1796 memset(&fd, 0, sizeof(fd));
1798 /* Align FD address, if possible */
1799 buffer_start = xdpf->data - needed_headroom;
1800 aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
1801 DPAA2_ETH_TX_BUF_ALIGN);
1802 if (aligned_start >= xdpf->data - xdpf->headroom)
1803 buffer_start = aligned_start;
1805 swa = (struct dpaa2_eth_swa *)buffer_start;
1806 /* fill in necessary fields here */
1807 swa->type = DPAA2_ETH_SWA_XDP;
1808 swa->xdp.dma_size = xdpf->data + xdpf->len - buffer_start;
1809 swa->xdp.xdpf = xdpf;
1811 addr = dma_map_single(dev, buffer_start,
1814 if (unlikely(dma_mapping_error(dev, addr))) {
1815 percpu_stats->tx_dropped++;
1819 dpaa2_fd_set_addr(&fd, addr);
1820 dpaa2_fd_set_offset(&fd, xdpf->data - buffer_start);
1821 dpaa2_fd_set_len(&fd, xdpf->len);
1822 dpaa2_fd_set_format(&fd, dpaa2_fd_single);
1823 dpaa2_fd_set_ctrl(&fd, FD_CTRL_PTA);
1825 fq = &priv->fq[smp_processor_id() % dpaa2_eth_queue_count(priv)];
1826 for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
1827 err = priv->enqueue(priv, fq, &fd, 0);
1831 percpu_extras->tx_portal_busy += i;
1832 if (unlikely(err < 0)) {
1833 percpu_stats->tx_errors++;
1834 /* let the Rx device handle the cleanup */
1838 percpu_stats->tx_packets++;
1839 percpu_stats->tx_bytes += dpaa2_fd_get_len(&fd);
1844 static int dpaa2_eth_xdp_xmit(struct net_device *net_dev, int n,
1845 struct xdp_frame **frames, u32 flags)
1850 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
1853 if (!netif_running(net_dev))
1856 for (i = 0; i < n; i++) {
1857 struct xdp_frame *xdpf = frames[i];
1859 err = dpaa2_eth_xdp_xmit_frame(net_dev, xdpf);
1861 xdp_return_frame_rx_napi(xdpf);
1869 static const struct net_device_ops dpaa2_eth_ops = {
1870 .ndo_open = dpaa2_eth_open,
1871 .ndo_start_xmit = dpaa2_eth_tx,
1872 .ndo_stop = dpaa2_eth_stop,
1873 .ndo_set_mac_address = dpaa2_eth_set_addr,
1874 .ndo_get_stats64 = dpaa2_eth_get_stats,
1875 .ndo_set_rx_mode = dpaa2_eth_set_rx_mode,
1876 .ndo_set_features = dpaa2_eth_set_features,
1877 .ndo_do_ioctl = dpaa2_eth_ioctl,
1878 .ndo_change_mtu = dpaa2_eth_change_mtu,
1879 .ndo_bpf = dpaa2_eth_xdp,
1880 .ndo_xdp_xmit = dpaa2_eth_xdp_xmit,
1883 static void cdan_cb(struct dpaa2_io_notification_ctx *ctx)
1885 struct dpaa2_eth_channel *ch;
1887 ch = container_of(ctx, struct dpaa2_eth_channel, nctx);
1889 /* Update NAPI statistics */
1892 napi_schedule_irqoff(&ch->napi);
1895 /* Allocate and configure a DPCON object */
1896 static struct fsl_mc_device *setup_dpcon(struct dpaa2_eth_priv *priv)
1898 struct fsl_mc_device *dpcon;
1899 struct device *dev = priv->net_dev->dev.parent;
1900 struct dpcon_attr attrs;
1903 err = fsl_mc_object_allocate(to_fsl_mc_device(dev),
1904 FSL_MC_POOL_DPCON, &dpcon);
1907 err = -EPROBE_DEFER;
1909 dev_info(dev, "Not enough DPCONs, will go on as-is\n");
1910 return ERR_PTR(err);
1913 err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle);
1915 dev_err(dev, "dpcon_open() failed\n");
1919 err = dpcon_reset(priv->mc_io, 0, dpcon->mc_handle);
1921 dev_err(dev, "dpcon_reset() failed\n");
1925 err = dpcon_get_attributes(priv->mc_io, 0, dpcon->mc_handle, &attrs);
1927 dev_err(dev, "dpcon_get_attributes() failed\n");
1931 err = dpcon_enable(priv->mc_io, 0, dpcon->mc_handle);
1933 dev_err(dev, "dpcon_enable() failed\n");
1940 dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
1942 fsl_mc_object_free(dpcon);
1947 static void free_dpcon(struct dpaa2_eth_priv *priv,
1948 struct fsl_mc_device *dpcon)
1950 dpcon_disable(priv->mc_io, 0, dpcon->mc_handle);
1951 dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
1952 fsl_mc_object_free(dpcon);
1955 static struct dpaa2_eth_channel *
1956 alloc_channel(struct dpaa2_eth_priv *priv)
1958 struct dpaa2_eth_channel *channel;
1959 struct dpcon_attr attr;
1960 struct device *dev = priv->net_dev->dev.parent;
1963 channel = kzalloc(sizeof(*channel), GFP_KERNEL);
1967 channel->dpcon = setup_dpcon(priv);
1968 if (IS_ERR_OR_NULL(channel->dpcon)) {
1969 err = PTR_ERR(channel->dpcon);
1973 err = dpcon_get_attributes(priv->mc_io, 0, channel->dpcon->mc_handle,
1976 dev_err(dev, "dpcon_get_attributes() failed\n");
1980 channel->dpcon_id = attr.id;
1981 channel->ch_id = attr.qbman_ch_id;
1982 channel->priv = priv;
1987 free_dpcon(priv, channel->dpcon);
1990 return ERR_PTR(err);
1993 static void free_channel(struct dpaa2_eth_priv *priv,
1994 struct dpaa2_eth_channel *channel)
1996 free_dpcon(priv, channel->dpcon);
2000 /* DPIO setup: allocate and configure QBMan channels, setup core affinity
2001 * and register data availability notifications
2003 static int setup_dpio(struct dpaa2_eth_priv *priv)
2005 struct dpaa2_io_notification_ctx *nctx;
2006 struct dpaa2_eth_channel *channel;
2007 struct dpcon_notification_cfg dpcon_notif_cfg;
2008 struct device *dev = priv->net_dev->dev.parent;
2011 /* We want the ability to spread ingress traffic (RX, TX conf) to as
2012 * many cores as possible, so we need one channel for each core
2013 * (unless there's fewer queues than cores, in which case the extra
2014 * channels would be wasted).
2015 * Allocate one channel per core and register it to the core's
2016 * affine DPIO. If not enough channels are available for all cores
2017 * or if some cores don't have an affine DPIO, there will be no
2018 * ingress frame processing on those cores.
2020 cpumask_clear(&priv->dpio_cpumask);
2021 for_each_online_cpu(i) {
2022 /* Try to allocate a channel */
2023 channel = alloc_channel(priv);
2024 if (IS_ERR_OR_NULL(channel)) {
2025 err = PTR_ERR(channel);
2026 if (err != -EPROBE_DEFER)
2028 "No affine channel for cpu %d and above\n", i);
2032 priv->channel[priv->num_channels] = channel;
2034 nctx = &channel->nctx;
2037 nctx->id = channel->ch_id;
2038 nctx->desired_cpu = i;
2040 /* Register the new context */
2041 channel->dpio = dpaa2_io_service_select(i);
2042 err = dpaa2_io_service_register(channel->dpio, nctx, dev);
2044 dev_dbg(dev, "No affine DPIO for cpu %d\n", i);
2045 /* If no affine DPIO for this core, there's probably
2046 * none available for next cores either. Signal we want
2047 * to retry later, in case the DPIO devices weren't
2050 err = -EPROBE_DEFER;
2051 goto err_service_reg;
2054 /* Register DPCON notification with MC */
2055 dpcon_notif_cfg.dpio_id = nctx->dpio_id;
2056 dpcon_notif_cfg.priority = 0;
2057 dpcon_notif_cfg.user_ctx = nctx->qman64;
2058 err = dpcon_set_notification(priv->mc_io, 0,
2059 channel->dpcon->mc_handle,
2062 dev_err(dev, "dpcon_set_notification failed()\n");
2066 /* If we managed to allocate a channel and also found an affine
2067 * DPIO for this core, add it to the final mask
2069 cpumask_set_cpu(i, &priv->dpio_cpumask);
2070 priv->num_channels++;
2072 /* Stop if we already have enough channels to accommodate all
2073 * RX and TX conf queues
2075 if (priv->num_channels == priv->dpni_attrs.num_queues)
2082 dpaa2_io_service_deregister(channel->dpio, nctx, dev);
2084 free_channel(priv, channel);
2086 if (err == -EPROBE_DEFER)
2089 if (cpumask_empty(&priv->dpio_cpumask)) {
2090 dev_err(dev, "No cpu with an affine DPIO/DPCON\n");
2094 dev_info(dev, "Cores %*pbl available for processing ingress traffic\n",
2095 cpumask_pr_args(&priv->dpio_cpumask));
2100 static void free_dpio(struct dpaa2_eth_priv *priv)
2102 struct device *dev = priv->net_dev->dev.parent;
2103 struct dpaa2_eth_channel *ch;
2106 /* deregister CDAN notifications and free channels */
2107 for (i = 0; i < priv->num_channels; i++) {
2108 ch = priv->channel[i];
2109 dpaa2_io_service_deregister(ch->dpio, &ch->nctx, dev);
2110 free_channel(priv, ch);
2114 static struct dpaa2_eth_channel *get_affine_channel(struct dpaa2_eth_priv *priv,
2117 struct device *dev = priv->net_dev->dev.parent;
2120 for (i = 0; i < priv->num_channels; i++)
2121 if (priv->channel[i]->nctx.desired_cpu == cpu)
2122 return priv->channel[i];
2124 /* We should never get here. Issue a warning and return
2125 * the first channel, because it's still better than nothing
2127 dev_warn(dev, "No affine channel found for cpu %d\n", cpu);
2129 return priv->channel[0];
2132 static void set_fq_affinity(struct dpaa2_eth_priv *priv)
2134 struct device *dev = priv->net_dev->dev.parent;
2135 struct cpumask xps_mask;
2136 struct dpaa2_eth_fq *fq;
2137 int rx_cpu, txc_cpu;
2140 /* For each FQ, pick one channel/CPU to deliver frames to.
2141 * This may well change at runtime, either through irqbalance or
2142 * through direct user intervention.
2144 rx_cpu = txc_cpu = cpumask_first(&priv->dpio_cpumask);
2146 for (i = 0; i < priv->num_fqs; i++) {
2150 fq->target_cpu = rx_cpu;
2151 rx_cpu = cpumask_next(rx_cpu, &priv->dpio_cpumask);
2152 if (rx_cpu >= nr_cpu_ids)
2153 rx_cpu = cpumask_first(&priv->dpio_cpumask);
2155 case DPAA2_TX_CONF_FQ:
2156 fq->target_cpu = txc_cpu;
2158 /* Tell the stack to affine to txc_cpu the Tx queue
2159 * associated with the confirmation one
2161 cpumask_clear(&xps_mask);
2162 cpumask_set_cpu(txc_cpu, &xps_mask);
2163 err = netif_set_xps_queue(priv->net_dev, &xps_mask,
2166 dev_err(dev, "Error setting XPS queue\n");
2168 txc_cpu = cpumask_next(txc_cpu, &priv->dpio_cpumask);
2169 if (txc_cpu >= nr_cpu_ids)
2170 txc_cpu = cpumask_first(&priv->dpio_cpumask);
2173 dev_err(dev, "Unknown FQ type: %d\n", fq->type);
2175 fq->channel = get_affine_channel(priv, fq->target_cpu);
2179 static void setup_fqs(struct dpaa2_eth_priv *priv)
2183 /* We have one TxConf FQ per Tx flow.
2184 * The number of Tx and Rx queues is the same.
2185 * Tx queues come first in the fq array.
2187 for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
2188 priv->fq[priv->num_fqs].type = DPAA2_TX_CONF_FQ;
2189 priv->fq[priv->num_fqs].consume = dpaa2_eth_tx_conf;
2190 priv->fq[priv->num_fqs++].flowid = (u16)i;
2193 for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
2194 priv->fq[priv->num_fqs].type = DPAA2_RX_FQ;
2195 priv->fq[priv->num_fqs].consume = dpaa2_eth_rx;
2196 priv->fq[priv->num_fqs++].flowid = (u16)i;
2199 /* For each FQ, decide on which core to process incoming frames */
2200 set_fq_affinity(priv);
2203 /* Allocate and configure one buffer pool for each interface */
2204 static int setup_dpbp(struct dpaa2_eth_priv *priv)
2207 struct fsl_mc_device *dpbp_dev;
2208 struct device *dev = priv->net_dev->dev.parent;
2209 struct dpbp_attr dpbp_attrs;
2211 err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP,
2215 err = -EPROBE_DEFER;
2217 dev_err(dev, "DPBP device allocation failed\n");
2221 priv->dpbp_dev = dpbp_dev;
2223 err = dpbp_open(priv->mc_io, 0, priv->dpbp_dev->obj_desc.id,
2224 &dpbp_dev->mc_handle);
2226 dev_err(dev, "dpbp_open() failed\n");
2230 err = dpbp_reset(priv->mc_io, 0, dpbp_dev->mc_handle);
2232 dev_err(dev, "dpbp_reset() failed\n");
2236 err = dpbp_enable(priv->mc_io, 0, dpbp_dev->mc_handle);
2238 dev_err(dev, "dpbp_enable() failed\n");
2242 err = dpbp_get_attributes(priv->mc_io, 0, dpbp_dev->mc_handle,
2245 dev_err(dev, "dpbp_get_attributes() failed\n");
2248 priv->bpid = dpbp_attrs.bpid;
2253 dpbp_disable(priv->mc_io, 0, dpbp_dev->mc_handle);
2256 dpbp_close(priv->mc_io, 0, dpbp_dev->mc_handle);
2258 fsl_mc_object_free(dpbp_dev);
2263 static void free_dpbp(struct dpaa2_eth_priv *priv)
2266 dpbp_disable(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
2267 dpbp_close(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
2268 fsl_mc_object_free(priv->dpbp_dev);
2271 static int set_buffer_layout(struct dpaa2_eth_priv *priv)
2273 struct device *dev = priv->net_dev->dev.parent;
2274 struct dpni_buffer_layout buf_layout = {0};
2278 /* We need to check for WRIOP version 1.0.0, but depending on the MC
2279 * version, this number is not always provided correctly on rev1.
2280 * We need to check for both alternatives in this situation.
2282 if (priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(0, 0, 0) ||
2283 priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(1, 0, 0))
2284 rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN_REV1;
2286 rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN;
2289 buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE;
2290 buf_layout.pass_timestamp = true;
2291 buf_layout.options = DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE |
2292 DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
2293 err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
2294 DPNI_QUEUE_TX, &buf_layout);
2296 dev_err(dev, "dpni_set_buffer_layout(TX) failed\n");
2300 /* tx-confirm buffer */
2301 buf_layout.options = DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
2302 err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
2303 DPNI_QUEUE_TX_CONFIRM, &buf_layout);
2305 dev_err(dev, "dpni_set_buffer_layout(TX_CONF) failed\n");
2309 /* Now that we've set our tx buffer layout, retrieve the minimum
2310 * required tx data offset.
2312 err = dpni_get_tx_data_offset(priv->mc_io, 0, priv->mc_token,
2313 &priv->tx_data_offset);
2315 dev_err(dev, "dpni_get_tx_data_offset() failed\n");
2319 if ((priv->tx_data_offset % 64) != 0)
2320 dev_warn(dev, "Tx data offset (%d) not a multiple of 64B\n",
2321 priv->tx_data_offset);
2324 buf_layout.pass_frame_status = true;
2325 buf_layout.pass_parser_result = true;
2326 buf_layout.data_align = rx_buf_align;
2327 buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv);
2328 buf_layout.private_data_size = 0;
2329 buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
2330 DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
2331 DPNI_BUF_LAYOUT_OPT_DATA_ALIGN |
2332 DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM |
2333 DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
2334 err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
2335 DPNI_QUEUE_RX, &buf_layout);
2337 dev_err(dev, "dpni_set_buffer_layout(RX) failed\n");
2344 #define DPNI_ENQUEUE_FQID_VER_MAJOR 7
2345 #define DPNI_ENQUEUE_FQID_VER_MINOR 9
2347 static inline int dpaa2_eth_enqueue_qd(struct dpaa2_eth_priv *priv,
2348 struct dpaa2_eth_fq *fq,
2349 struct dpaa2_fd *fd, u8 prio)
2351 return dpaa2_io_service_enqueue_qd(fq->channel->dpio,
2352 priv->tx_qdid, prio,
2356 static inline int dpaa2_eth_enqueue_fq(struct dpaa2_eth_priv *priv,
2357 struct dpaa2_eth_fq *fq,
2358 struct dpaa2_fd *fd,
2359 u8 prio __always_unused)
2361 return dpaa2_io_service_enqueue_fq(fq->channel->dpio,
2365 static void set_enqueue_mode(struct dpaa2_eth_priv *priv)
2367 if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR,
2368 DPNI_ENQUEUE_FQID_VER_MINOR) < 0)
2369 priv->enqueue = dpaa2_eth_enqueue_qd;
2371 priv->enqueue = dpaa2_eth_enqueue_fq;
2374 /* Configure the DPNI object this interface is associated with */
2375 static int setup_dpni(struct fsl_mc_device *ls_dev)
2377 struct device *dev = &ls_dev->dev;
2378 struct dpaa2_eth_priv *priv;
2379 struct net_device *net_dev;
2382 net_dev = dev_get_drvdata(dev);
2383 priv = netdev_priv(net_dev);
2385 /* get a handle for the DPNI object */
2386 err = dpni_open(priv->mc_io, 0, ls_dev->obj_desc.id, &priv->mc_token);
2388 dev_err(dev, "dpni_open() failed\n");
2392 /* Check if we can work with this DPNI object */
2393 err = dpni_get_api_version(priv->mc_io, 0, &priv->dpni_ver_major,
2394 &priv->dpni_ver_minor);
2396 dev_err(dev, "dpni_get_api_version() failed\n");
2399 if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_VER_MAJOR, DPNI_VER_MINOR) < 0) {
2400 dev_err(dev, "DPNI version %u.%u not supported, need >= %u.%u\n",
2401 priv->dpni_ver_major, priv->dpni_ver_minor,
2402 DPNI_VER_MAJOR, DPNI_VER_MINOR);
2407 ls_dev->mc_io = priv->mc_io;
2408 ls_dev->mc_handle = priv->mc_token;
2410 err = dpni_reset(priv->mc_io, 0, priv->mc_token);
2412 dev_err(dev, "dpni_reset() failed\n");
2416 err = dpni_get_attributes(priv->mc_io, 0, priv->mc_token,
2419 dev_err(dev, "dpni_get_attributes() failed (err=%d)\n", err);
2423 err = set_buffer_layout(priv);
2427 set_enqueue_mode(priv);
2429 priv->cls_rules = devm_kzalloc(dev, sizeof(struct dpaa2_eth_cls_rule) *
2430 dpaa2_eth_fs_count(priv), GFP_KERNEL);
2431 if (!priv->cls_rules)
2437 dpni_close(priv->mc_io, 0, priv->mc_token);
2442 static void free_dpni(struct dpaa2_eth_priv *priv)
2446 err = dpni_reset(priv->mc_io, 0, priv->mc_token);
2448 netdev_warn(priv->net_dev, "dpni_reset() failed (err %d)\n",
2451 dpni_close(priv->mc_io, 0, priv->mc_token);
2454 static int setup_rx_flow(struct dpaa2_eth_priv *priv,
2455 struct dpaa2_eth_fq *fq)
2457 struct device *dev = priv->net_dev->dev.parent;
2458 struct dpni_queue queue;
2459 struct dpni_queue_id qid;
2460 struct dpni_taildrop td;
2463 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
2464 DPNI_QUEUE_RX, 0, fq->flowid, &queue, &qid);
2466 dev_err(dev, "dpni_get_queue(RX) failed\n");
2470 fq->fqid = qid.fqid;
2472 queue.destination.id = fq->channel->dpcon_id;
2473 queue.destination.type = DPNI_DEST_DPCON;
2474 queue.destination.priority = 1;
2475 queue.user_context = (u64)(uintptr_t)fq;
2476 queue.flc.stash_control = 1;
2477 queue.flc.value &= 0xFFFFFFFFFFFFFFC0;
2478 /* 01 01 00 - data, annotation, flow context */
2479 queue.flc.value |= 0x14;
2480 err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
2481 DPNI_QUEUE_RX, 0, fq->flowid,
2482 DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST |
2486 dev_err(dev, "dpni_set_queue(RX) failed\n");
2491 td.threshold = DPAA2_ETH_TAILDROP_THRESH;
2492 err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token, DPNI_CP_QUEUE,
2493 DPNI_QUEUE_RX, 0, fq->flowid, &td);
2495 dev_err(dev, "dpni_set_threshold() failed\n");
2500 err = xdp_rxq_info_reg(&fq->channel->xdp_rxq, priv->net_dev,
2503 dev_err(dev, "xdp_rxq_info_reg failed\n");
2507 err = xdp_rxq_info_reg_mem_model(&fq->channel->xdp_rxq,
2508 MEM_TYPE_PAGE_ORDER0, NULL);
2510 dev_err(dev, "xdp_rxq_info_reg_mem_model failed\n");
2517 static int setup_tx_flow(struct dpaa2_eth_priv *priv,
2518 struct dpaa2_eth_fq *fq)
2520 struct device *dev = priv->net_dev->dev.parent;
2521 struct dpni_queue queue;
2522 struct dpni_queue_id qid;
2525 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
2526 DPNI_QUEUE_TX, 0, fq->flowid, &queue, &qid);
2528 dev_err(dev, "dpni_get_queue(TX) failed\n");
2532 fq->tx_qdbin = qid.qdbin;
2533 fq->tx_fqid = qid.fqid;
2535 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
2536 DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
2539 dev_err(dev, "dpni_get_queue(TX_CONF) failed\n");
2543 fq->fqid = qid.fqid;
2545 queue.destination.id = fq->channel->dpcon_id;
2546 queue.destination.type = DPNI_DEST_DPCON;
2547 queue.destination.priority = 0;
2548 queue.user_context = (u64)(uintptr_t)fq;
2549 err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
2550 DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
2551 DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
2554 dev_err(dev, "dpni_set_queue(TX_CONF) failed\n");
2561 /* Supported header fields for Rx hash distribution key */
2562 static const struct dpaa2_eth_dist_fields dist_fields[] = {
2565 .rxnfc_field = RXH_L2DA,
2566 .cls_prot = NET_PROT_ETH,
2567 .cls_field = NH_FLD_ETH_DA,
2570 .cls_prot = NET_PROT_ETH,
2571 .cls_field = NH_FLD_ETH_SA,
2574 /* This is the last ethertype field parsed:
2575 * depending on frame format, it can be the MAC ethertype
2576 * or the VLAN etype.
2578 .cls_prot = NET_PROT_ETH,
2579 .cls_field = NH_FLD_ETH_TYPE,
2583 .rxnfc_field = RXH_VLAN,
2584 .cls_prot = NET_PROT_VLAN,
2585 .cls_field = NH_FLD_VLAN_TCI,
2589 .rxnfc_field = RXH_IP_SRC,
2590 .cls_prot = NET_PROT_IP,
2591 .cls_field = NH_FLD_IP_SRC,
2594 .rxnfc_field = RXH_IP_DST,
2595 .cls_prot = NET_PROT_IP,
2596 .cls_field = NH_FLD_IP_DST,
2599 .rxnfc_field = RXH_L3_PROTO,
2600 .cls_prot = NET_PROT_IP,
2601 .cls_field = NH_FLD_IP_PROTO,
2604 /* Using UDP ports, this is functionally equivalent to raw
2605 * byte pairs from L4 header.
2607 .rxnfc_field = RXH_L4_B_0_1,
2608 .cls_prot = NET_PROT_UDP,
2609 .cls_field = NH_FLD_UDP_PORT_SRC,
2612 .rxnfc_field = RXH_L4_B_2_3,
2613 .cls_prot = NET_PROT_UDP,
2614 .cls_field = NH_FLD_UDP_PORT_DST,
2619 /* Configure the Rx hash key using the legacy API */
2620 static int config_legacy_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
2622 struct device *dev = priv->net_dev->dev.parent;
2623 struct dpni_rx_tc_dist_cfg dist_cfg;
2626 memset(&dist_cfg, 0, sizeof(dist_cfg));
2628 dist_cfg.key_cfg_iova = key;
2629 dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
2630 dist_cfg.dist_mode = DPNI_DIST_MODE_HASH;
2632 err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, 0, &dist_cfg);
2634 dev_err(dev, "dpni_set_rx_tc_dist failed\n");
2639 /* Configure the Rx hash key using the new API */
2640 static int config_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
2642 struct device *dev = priv->net_dev->dev.parent;
2643 struct dpni_rx_dist_cfg dist_cfg;
2646 memset(&dist_cfg, 0, sizeof(dist_cfg));
2648 dist_cfg.key_cfg_iova = key;
2649 dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
2650 dist_cfg.enable = 1;
2652 err = dpni_set_rx_hash_dist(priv->mc_io, 0, priv->mc_token, &dist_cfg);
2654 dev_err(dev, "dpni_set_rx_hash_dist failed\n");
2659 /* Configure the Rx flow classification key */
2660 static int config_cls_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
2662 struct device *dev = priv->net_dev->dev.parent;
2663 struct dpni_rx_dist_cfg dist_cfg;
2666 memset(&dist_cfg, 0, sizeof(dist_cfg));
2668 dist_cfg.key_cfg_iova = key;
2669 dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
2670 dist_cfg.enable = 1;
2672 err = dpni_set_rx_fs_dist(priv->mc_io, 0, priv->mc_token, &dist_cfg);
2674 dev_err(dev, "dpni_set_rx_fs_dist failed\n");
2679 /* Size of the Rx flow classification key */
2680 int dpaa2_eth_cls_key_size(void)
2684 for (i = 0; i < ARRAY_SIZE(dist_fields); i++)
2685 size += dist_fields[i].size;
2690 /* Offset of header field in Rx classification key */
2691 int dpaa2_eth_cls_fld_off(int prot, int field)
2695 for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
2696 if (dist_fields[i].cls_prot == prot &&
2697 dist_fields[i].cls_field == field)
2699 off += dist_fields[i].size;
2702 WARN_ONCE(1, "Unsupported header field used for Rx flow cls\n");
2706 /* Set Rx distribution (hash or flow classification) key
2707 * flags is a combination of RXH_ bits
2709 static int dpaa2_eth_set_dist_key(struct net_device *net_dev,
2710 enum dpaa2_eth_rx_dist type, u64 flags)
2712 struct device *dev = net_dev->dev.parent;
2713 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2714 struct dpkg_profile_cfg cls_cfg;
2715 u32 rx_hash_fields = 0;
2716 dma_addr_t key_iova;
2721 memset(&cls_cfg, 0, sizeof(cls_cfg));
2723 for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
2724 struct dpkg_extract *key =
2725 &cls_cfg.extracts[cls_cfg.num_extracts];
2727 /* For Rx hashing key we set only the selected fields.
2728 * For Rx flow classification key we set all supported fields
2730 if (type == DPAA2_ETH_RX_DIST_HASH) {
2731 if (!(flags & dist_fields[i].rxnfc_field))
2733 rx_hash_fields |= dist_fields[i].rxnfc_field;
2736 if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
2737 dev_err(dev, "error adding key extraction rule, too many rules?\n");
2741 key->type = DPKG_EXTRACT_FROM_HDR;
2742 key->extract.from_hdr.prot = dist_fields[i].cls_prot;
2743 key->extract.from_hdr.type = DPKG_FULL_FIELD;
2744 key->extract.from_hdr.field = dist_fields[i].cls_field;
2745 cls_cfg.num_extracts++;
2748 dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL);
2752 err = dpni_prepare_key_cfg(&cls_cfg, dma_mem);
2754 dev_err(dev, "dpni_prepare_key_cfg error %d\n", err);
2758 /* Prepare for setting the rx dist */
2759 key_iova = dma_map_single(dev, dma_mem, DPAA2_CLASSIFIER_DMA_SIZE,
2761 if (dma_mapping_error(dev, key_iova)) {
2762 dev_err(dev, "DMA mapping failed\n");
2767 if (type == DPAA2_ETH_RX_DIST_HASH) {
2768 if (dpaa2_eth_has_legacy_dist(priv))
2769 err = config_legacy_hash_key(priv, key_iova);
2771 err = config_hash_key(priv, key_iova);
2773 err = config_cls_key(priv, key_iova);
2776 dma_unmap_single(dev, key_iova, DPAA2_CLASSIFIER_DMA_SIZE,
2778 if (!err && type == DPAA2_ETH_RX_DIST_HASH)
2779 priv->rx_hash_fields = rx_hash_fields;
2786 int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
2788 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2790 if (!dpaa2_eth_hash_enabled(priv))
2793 return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_HASH, flags);
2796 static int dpaa2_eth_set_cls(struct dpaa2_eth_priv *priv)
2798 struct device *dev = priv->net_dev->dev.parent;
2800 /* Check if we actually support Rx flow classification */
2801 if (dpaa2_eth_has_legacy_dist(priv)) {
2802 dev_dbg(dev, "Rx cls not supported by current MC version\n");
2806 if (priv->dpni_attrs.options & DPNI_OPT_NO_FS ||
2807 !(priv->dpni_attrs.options & DPNI_OPT_HAS_KEY_MASKING)) {
2808 dev_dbg(dev, "Rx cls disabled in DPNI options\n");
2812 if (!dpaa2_eth_hash_enabled(priv)) {
2813 dev_dbg(dev, "Rx cls disabled for single queue DPNIs\n");
2817 priv->rx_cls_enabled = 1;
2819 return dpaa2_eth_set_dist_key(priv->net_dev, DPAA2_ETH_RX_DIST_CLS, 0);
2822 /* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs,
2823 * frame queues and channels
2825 static int bind_dpni(struct dpaa2_eth_priv *priv)
2827 struct net_device *net_dev = priv->net_dev;
2828 struct device *dev = net_dev->dev.parent;
2829 struct dpni_pools_cfg pools_params;
2830 struct dpni_error_cfg err_cfg;
2834 pools_params.num_dpbp = 1;
2835 pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id;
2836 pools_params.pools[0].backup_pool = 0;
2837 pools_params.pools[0].buffer_size = DPAA2_ETH_RX_BUF_SIZE;
2838 err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params);
2840 dev_err(dev, "dpni_set_pools() failed\n");
2844 /* have the interface implicitly distribute traffic based on
2845 * the default hash key
2847 err = dpaa2_eth_set_hash(net_dev, DPAA2_RXH_DEFAULT);
2848 if (err && err != -EOPNOTSUPP)
2849 dev_err(dev, "Failed to configure hashing\n");
2851 /* Configure the flow classification key; it includes all
2852 * supported header fields and cannot be modified at runtime
2854 err = dpaa2_eth_set_cls(priv);
2855 if (err && err != -EOPNOTSUPP)
2856 dev_err(dev, "Failed to configure Rx classification key\n");
2858 /* Configure handling of error frames */
2859 err_cfg.errors = DPAA2_FAS_RX_ERR_MASK;
2860 err_cfg.set_frame_annotation = 1;
2861 err_cfg.error_action = DPNI_ERROR_ACTION_DISCARD;
2862 err = dpni_set_errors_behavior(priv->mc_io, 0, priv->mc_token,
2865 dev_err(dev, "dpni_set_errors_behavior failed\n");
2869 /* Configure Rx and Tx conf queues to generate CDANs */
2870 for (i = 0; i < priv->num_fqs; i++) {
2871 switch (priv->fq[i].type) {
2873 err = setup_rx_flow(priv, &priv->fq[i]);
2875 case DPAA2_TX_CONF_FQ:
2876 err = setup_tx_flow(priv, &priv->fq[i]);
2879 dev_err(dev, "Invalid FQ type %d\n", priv->fq[i].type);
2886 err = dpni_get_qdid(priv->mc_io, 0, priv->mc_token,
2887 DPNI_QUEUE_TX, &priv->tx_qdid);
2889 dev_err(dev, "dpni_get_qdid() failed\n");
2896 /* Allocate rings for storing incoming frame descriptors */
2897 static int alloc_rings(struct dpaa2_eth_priv *priv)
2899 struct net_device *net_dev = priv->net_dev;
2900 struct device *dev = net_dev->dev.parent;
2903 for (i = 0; i < priv->num_channels; i++) {
2904 priv->channel[i]->store =
2905 dpaa2_io_store_create(DPAA2_ETH_STORE_SIZE, dev);
2906 if (!priv->channel[i]->store) {
2907 netdev_err(net_dev, "dpaa2_io_store_create() failed\n");
2915 for (i = 0; i < priv->num_channels; i++) {
2916 if (!priv->channel[i]->store)
2918 dpaa2_io_store_destroy(priv->channel[i]->store);
2924 static void free_rings(struct dpaa2_eth_priv *priv)
2928 for (i = 0; i < priv->num_channels; i++)
2929 dpaa2_io_store_destroy(priv->channel[i]->store);
2932 static int set_mac_addr(struct dpaa2_eth_priv *priv)
2934 struct net_device *net_dev = priv->net_dev;
2935 struct device *dev = net_dev->dev.parent;
2936 u8 mac_addr[ETH_ALEN], dpni_mac_addr[ETH_ALEN];
2939 /* Get firmware address, if any */
2940 err = dpni_get_port_mac_addr(priv->mc_io, 0, priv->mc_token, mac_addr);
2942 dev_err(dev, "dpni_get_port_mac_addr() failed\n");
2946 /* Get DPNI attributes address, if any */
2947 err = dpni_get_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
2950 dev_err(dev, "dpni_get_primary_mac_addr() failed\n");
2954 /* First check if firmware has any address configured by bootloader */
2955 if (!is_zero_ether_addr(mac_addr)) {
2956 /* If the DPMAC addr != DPNI addr, update it */
2957 if (!ether_addr_equal(mac_addr, dpni_mac_addr)) {
2958 err = dpni_set_primary_mac_addr(priv->mc_io, 0,
2962 dev_err(dev, "dpni_set_primary_mac_addr() failed\n");
2966 memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
2967 } else if (is_zero_ether_addr(dpni_mac_addr)) {
2968 /* No MAC address configured, fill in net_dev->dev_addr
2971 eth_hw_addr_random(net_dev);
2972 dev_dbg_once(dev, "device(s) have all-zero hwaddr, replaced with random\n");
2974 err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
2977 dev_err(dev, "dpni_set_primary_mac_addr() failed\n");
2981 /* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all
2982 * practical purposes, this will be our "permanent" mac address,
2983 * at least until the next reboot. This move will also permit
2984 * register_netdevice() to properly fill up net_dev->perm_addr.
2986 net_dev->addr_assign_type = NET_ADDR_PERM;
2988 /* NET_ADDR_PERM is default, all we have to do is
2989 * fill in the device addr.
2991 memcpy(net_dev->dev_addr, dpni_mac_addr, net_dev->addr_len);
2997 static int netdev_init(struct net_device *net_dev)
2999 struct device *dev = net_dev->dev.parent;
3000 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
3001 u32 options = priv->dpni_attrs.options;
3002 u64 supported = 0, not_supported = 0;
3003 u8 bcast_addr[ETH_ALEN];
3007 net_dev->netdev_ops = &dpaa2_eth_ops;
3008 net_dev->ethtool_ops = &dpaa2_ethtool_ops;
3010 err = set_mac_addr(priv);
3014 /* Explicitly add the broadcast address to the MAC filtering table */
3015 eth_broadcast_addr(bcast_addr);
3016 err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, bcast_addr);
3018 dev_err(dev, "dpni_add_mac_addr() failed\n");
3022 /* Set MTU upper limit; lower limit is 68B (default value) */
3023 net_dev->max_mtu = DPAA2_ETH_MAX_MTU;
3024 err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token,
3027 dev_err(dev, "dpni_set_max_frame_length() failed\n");
3031 /* Set actual number of queues in the net device */
3032 num_queues = dpaa2_eth_queue_count(priv);
3033 err = netif_set_real_num_tx_queues(net_dev, num_queues);
3035 dev_err(dev, "netif_set_real_num_tx_queues() failed\n");
3038 err = netif_set_real_num_rx_queues(net_dev, num_queues);
3040 dev_err(dev, "netif_set_real_num_rx_queues() failed\n");
3044 /* Capabilities listing */
3045 supported |= IFF_LIVE_ADDR_CHANGE;
3047 if (options & DPNI_OPT_NO_MAC_FILTER)
3048 not_supported |= IFF_UNICAST_FLT;
3050 supported |= IFF_UNICAST_FLT;
3052 net_dev->priv_flags |= supported;
3053 net_dev->priv_flags &= ~not_supported;
3056 net_dev->features = NETIF_F_RXCSUM |
3057 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3058 NETIF_F_SG | NETIF_F_HIGHDMA |
3060 net_dev->hw_features = net_dev->features;
3065 static int poll_link_state(void *arg)
3067 struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)arg;
3070 while (!kthread_should_stop()) {
3071 err = link_state_update(priv);
3075 msleep(DPAA2_ETH_LINK_STATE_REFRESH);
3081 static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
3084 struct device *dev = (struct device *)arg;
3085 struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev);
3086 struct net_device *net_dev = dev_get_drvdata(dev);
3089 err = dpni_get_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle,
3090 DPNI_IRQ_INDEX, &status);
3091 if (unlikely(err)) {
3092 netdev_err(net_dev, "Can't get irq status (err %d)\n", err);
3096 if (status & DPNI_IRQ_EVENT_LINK_CHANGED)
3097 link_state_update(netdev_priv(net_dev));
3102 static int setup_irqs(struct fsl_mc_device *ls_dev)
3105 struct fsl_mc_device_irq *irq;
3107 err = fsl_mc_allocate_irqs(ls_dev);
3109 dev_err(&ls_dev->dev, "MC irqs allocation failed\n");
3113 irq = ls_dev->irqs[0];
3114 err = devm_request_threaded_irq(&ls_dev->dev, irq->msi_desc->irq,
3115 NULL, dpni_irq0_handler_thread,
3116 IRQF_NO_SUSPEND | IRQF_ONESHOT,
3117 dev_name(&ls_dev->dev), &ls_dev->dev);
3119 dev_err(&ls_dev->dev, "devm_request_threaded_irq(): %d\n", err);
3123 err = dpni_set_irq_mask(ls_dev->mc_io, 0, ls_dev->mc_handle,
3124 DPNI_IRQ_INDEX, DPNI_IRQ_EVENT_LINK_CHANGED);
3126 dev_err(&ls_dev->dev, "dpni_set_irq_mask(): %d\n", err);
3130 err = dpni_set_irq_enable(ls_dev->mc_io, 0, ls_dev->mc_handle,
3133 dev_err(&ls_dev->dev, "dpni_set_irq_enable(): %d\n", err);
3140 devm_free_irq(&ls_dev->dev, irq->msi_desc->irq, &ls_dev->dev);
3142 fsl_mc_free_irqs(ls_dev);
3147 static void add_ch_napi(struct dpaa2_eth_priv *priv)
3150 struct dpaa2_eth_channel *ch;
3152 for (i = 0; i < priv->num_channels; i++) {
3153 ch = priv->channel[i];
3154 /* NAPI weight *MUST* be a multiple of DPAA2_ETH_STORE_SIZE */
3155 netif_napi_add(priv->net_dev, &ch->napi, dpaa2_eth_poll,
3160 static void del_ch_napi(struct dpaa2_eth_priv *priv)
3163 struct dpaa2_eth_channel *ch;
3165 for (i = 0; i < priv->num_channels; i++) {
3166 ch = priv->channel[i];
3167 netif_napi_del(&ch->napi);
3171 static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
3174 struct net_device *net_dev = NULL;
3175 struct dpaa2_eth_priv *priv = NULL;
3178 dev = &dpni_dev->dev;
3181 net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_TX_QUEUES);
3183 dev_err(dev, "alloc_etherdev_mq() failed\n");
3187 SET_NETDEV_DEV(net_dev, dev);
3188 dev_set_drvdata(dev, net_dev);
3190 priv = netdev_priv(net_dev);
3191 priv->net_dev = net_dev;
3193 priv->iommu_domain = iommu_get_domain_for_dev(dev);
3195 /* Obtain a MC portal */
3196 err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
3200 err = -EPROBE_DEFER;
3202 dev_err(dev, "MC portal allocation failed\n");
3203 goto err_portal_alloc;
3206 /* MC objects initialization and configuration */
3207 err = setup_dpni(dpni_dev);
3209 goto err_dpni_setup;
3211 err = setup_dpio(priv);
3213 goto err_dpio_setup;
3217 err = setup_dpbp(priv);
3219 goto err_dpbp_setup;
3221 err = bind_dpni(priv);
3225 /* Add a NAPI context for each channel */
3228 /* Percpu statistics */
3229 priv->percpu_stats = alloc_percpu(*priv->percpu_stats);
3230 if (!priv->percpu_stats) {
3231 dev_err(dev, "alloc_percpu(percpu_stats) failed\n");
3233 goto err_alloc_percpu_stats;
3235 priv->percpu_extras = alloc_percpu(*priv->percpu_extras);
3236 if (!priv->percpu_extras) {
3237 dev_err(dev, "alloc_percpu(percpu_extras) failed\n");
3239 goto err_alloc_percpu_extras;
3242 err = netdev_init(net_dev);
3244 goto err_netdev_init;
3246 /* Configure checksum offload based on current interface flags */
3247 err = set_rx_csum(priv, !!(net_dev->features & NETIF_F_RXCSUM));
3251 err = set_tx_csum(priv, !!(net_dev->features &
3252 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)));
3256 err = alloc_rings(priv);
3258 goto err_alloc_rings;
3260 err = setup_irqs(dpni_dev);
3262 netdev_warn(net_dev, "Failed to set link interrupt, fall back to polling\n");
3263 priv->poll_thread = kthread_run(poll_link_state, priv,
3264 "%s_poll_link", net_dev->name);
3265 if (IS_ERR(priv->poll_thread)) {
3266 dev_err(dev, "Error starting polling thread\n");
3267 goto err_poll_thread;
3269 priv->do_link_poll = true;
3272 err = register_netdev(net_dev);
3274 dev_err(dev, "register_netdev() failed\n");
3275 goto err_netdev_reg;
3278 #ifdef CONFIG_DEBUG_FS
3279 dpaa2_dbg_add(priv);
3282 dev_info(dev, "Probed interface %s\n", net_dev->name);
3286 if (priv->do_link_poll)
3287 kthread_stop(priv->poll_thread);
3289 fsl_mc_free_irqs(dpni_dev);
3295 free_percpu(priv->percpu_extras);
3296 err_alloc_percpu_extras:
3297 free_percpu(priv->percpu_stats);
3298 err_alloc_percpu_stats:
3307 fsl_mc_portal_free(priv->mc_io);
3309 dev_set_drvdata(dev, NULL);
3310 free_netdev(net_dev);
3315 static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
3318 struct net_device *net_dev;
3319 struct dpaa2_eth_priv *priv;
3322 net_dev = dev_get_drvdata(dev);
3323 priv = netdev_priv(net_dev);
3325 #ifdef CONFIG_DEBUG_FS
3326 dpaa2_dbg_remove(priv);
3328 unregister_netdev(net_dev);
3330 if (priv->do_link_poll)
3331 kthread_stop(priv->poll_thread);
3333 fsl_mc_free_irqs(ls_dev);
3336 free_percpu(priv->percpu_stats);
3337 free_percpu(priv->percpu_extras);
3344 fsl_mc_portal_free(priv->mc_io);
3346 free_netdev(net_dev);
3348 dev_dbg(net_dev->dev.parent, "Removed interface %s\n", net_dev->name);
3353 static const struct fsl_mc_device_id dpaa2_eth_match_id_table[] = {
3355 .vendor = FSL_MC_VENDOR_FREESCALE,
3360 MODULE_DEVICE_TABLE(fslmc, dpaa2_eth_match_id_table);
3362 static struct fsl_mc_driver dpaa2_eth_driver = {
3364 .name = KBUILD_MODNAME,
3365 .owner = THIS_MODULE,
3367 .probe = dpaa2_eth_probe,
3368 .remove = dpaa2_eth_remove,
3369 .match_id_table = dpaa2_eth_match_id_table
3372 static int __init dpaa2_eth_driver_init(void)
3376 dpaa2_eth_dbg_init();
3377 err = fsl_mc_driver_register(&dpaa2_eth_driver);
3379 dpaa2_eth_dbg_exit();
3386 static void __exit dpaa2_eth_driver_exit(void)
3388 dpaa2_eth_dbg_exit();
3389 fsl_mc_driver_unregister(&dpaa2_eth_driver);
3392 module_init(dpaa2_eth_driver_init);
3393 module_exit(dpaa2_eth_driver_exit);