1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2019 Synopsys, Inc. and/or its affiliates.
4 * stmmac Selftests Support
6 * Author: Jose Abreu <joabreu@synopsys.com>
9 #include <linux/bitrev.h>
10 #include <linux/completion.h>
11 #include <linux/crc32.h>
12 #include <linux/ethtool.h>
14 #include <linux/phy.h>
15 #include <linux/udp.h>
16 #include <net/pkt_cls.h>
17 #include <net/pkt_sched.h>
20 #include <net/tc_act/tc_gact.h>
29 #define STMMAC_TEST_PKT_SIZE (sizeof(struct ethhdr) + sizeof(struct iphdr) + \
30 sizeof(struct stmmachdr))
31 #define STMMAC_TEST_PKT_MAGIC 0xdeadcafecafedeadULL
32 #define STMMAC_LB_TIMEOUT msecs_to_jiffies(200)
34 struct stmmac_packet_attrs {
57 static u8 stmmac_test_next_id;
59 static struct sk_buff *stmmac_test_get_udp_skb(struct stmmac_priv *priv,
60 struct stmmac_packet_attrs *attr)
62 struct sk_buff *skb = NULL;
63 struct udphdr *uhdr = NULL;
64 struct tcphdr *thdr = NULL;
65 struct stmmachdr *shdr;
70 size = attr->size + STMMAC_TEST_PKT_SIZE;
78 size += sizeof(struct tcphdr);
80 size += sizeof(struct udphdr);
82 if (attr->max_size && (attr->max_size > size))
83 size = attr->max_size;
85 skb = netdev_alloc_skb(priv->dev, size);
92 ehdr = skb_push(skb, ETH_HLEN + 8);
94 ehdr = skb_push(skb, ETH_HLEN + 4);
95 else if (attr->remove_sa)
96 ehdr = skb_push(skb, ETH_HLEN - 6);
98 ehdr = skb_push(skb, ETH_HLEN);
99 skb_reset_mac_header(skb);
101 skb_set_network_header(skb, skb->len);
102 ihdr = skb_put(skb, sizeof(*ihdr));
104 skb_set_transport_header(skb, skb->len);
106 thdr = skb_put(skb, sizeof(*thdr));
108 uhdr = skb_put(skb, sizeof(*uhdr));
110 if (!attr->remove_sa)
111 eth_zero_addr(ehdr->h_source);
112 eth_zero_addr(ehdr->h_dest);
113 if (attr->src && !attr->remove_sa)
114 ether_addr_copy(ehdr->h_source, attr->src);
116 ether_addr_copy(ehdr->h_dest, attr->dst);
118 if (!attr->remove_sa) {
119 ehdr->h_proto = htons(ETH_P_IP);
121 __be16 *ptr = (__be16 *)ehdr;
124 ptr[3] = htons(ETH_P_IP);
130 if (!attr->remove_sa) {
131 tag = (void *)ehdr + ETH_HLEN;
132 proto = (void *)ehdr + (2 * ETH_ALEN);
134 tag = (void *)ehdr + ETH_HLEN - 6;
135 proto = (void *)ehdr + ETH_ALEN;
138 proto[0] = htons(ETH_P_8021Q);
139 tag[0] = htons(attr->vlan_id_out);
140 tag[1] = htons(ETH_P_IP);
141 if (attr->vlan > 1) {
142 proto[0] = htons(ETH_P_8021AD);
143 tag[1] = htons(ETH_P_8021Q);
144 tag[2] = htons(attr->vlan_id_in);
145 tag[3] = htons(ETH_P_IP);
150 thdr->source = htons(attr->sport);
151 thdr->dest = htons(attr->dport);
152 thdr->doff = sizeof(struct tcphdr) / 4;
155 uhdr->source = htons(attr->sport);
156 uhdr->dest = htons(attr->dport);
157 uhdr->len = htons(sizeof(*shdr) + sizeof(*uhdr) + attr->size);
159 uhdr->len = htons(attr->max_size -
160 (sizeof(*ihdr) + sizeof(*ehdr)));
168 ihdr->protocol = IPPROTO_TCP;
170 ihdr->protocol = IPPROTO_UDP;
171 iplen = sizeof(*ihdr) + sizeof(*shdr) + attr->size;
173 iplen += sizeof(*thdr);
175 iplen += sizeof(*uhdr);
178 iplen = attr->max_size - sizeof(*ehdr);
180 ihdr->tot_len = htons(iplen);
182 ihdr->saddr = htonl(attr->ip_src);
183 ihdr->daddr = htonl(attr->ip_dst);
188 shdr = skb_put(skb, sizeof(*shdr));
190 shdr->magic = cpu_to_be64(STMMAC_TEST_PKT_MAGIC);
191 attr->id = stmmac_test_next_id;
192 shdr->id = stmmac_test_next_id++;
195 skb_put(skb, attr->size);
196 if (attr->max_size && (attr->max_size > skb->len))
197 skb_put(skb, attr->max_size - skb->len);
200 skb->ip_summed = CHECKSUM_PARTIAL;
202 thdr->check = ~tcp_v4_check(skb->len, ihdr->saddr, ihdr->daddr, 0);
203 skb->csum_start = skb_transport_header(skb) - skb->head;
204 skb->csum_offset = offsetof(struct tcphdr, check);
206 udp4_hwcsum(skb, ihdr->saddr, ihdr->daddr);
209 skb->protocol = htons(ETH_P_IP);
210 skb->pkt_type = PACKET_HOST;
211 skb->dev = priv->dev;
214 skb->tstamp = ns_to_ktime(attr->timestamp);
219 static struct sk_buff *stmmac_test_get_arp_skb(struct stmmac_priv *priv,
220 struct stmmac_packet_attrs *attr)
222 __be32 ip_src = htonl(attr->ip_src);
223 __be32 ip_dst = htonl(attr->ip_dst);
224 struct sk_buff *skb = NULL;
226 skb = arp_create(ARPOP_REQUEST, ETH_P_ARP, ip_dst, priv->dev, ip_src,
227 NULL, attr->src, attr->dst);
231 skb->pkt_type = PACKET_HOST;
232 skb->dev = priv->dev;
237 struct stmmac_test_priv {
238 struct stmmac_packet_attrs *packet;
239 struct packet_type pt;
240 struct completion comp;
246 static int stmmac_test_loopback_validate(struct sk_buff *skb,
247 struct net_device *ndev,
248 struct packet_type *pt,
249 struct net_device *orig_ndev)
251 struct stmmac_test_priv *tpriv = pt->af_packet_priv;
252 unsigned char *src = tpriv->packet->src;
253 unsigned char *dst = tpriv->packet->dst;
254 struct stmmachdr *shdr;
260 skb = skb_unshare(skb, GFP_ATOMIC);
264 if (skb_linearize(skb))
266 if (skb_headlen(skb) < (STMMAC_TEST_PKT_SIZE - ETH_HLEN))
269 ehdr = (struct ethhdr *)skb_mac_header(skb);
271 if (!ether_addr_equal_unaligned(ehdr->h_dest, dst))
274 if (tpriv->packet->sarc) {
275 if (!ether_addr_equal_unaligned(ehdr->h_source, ehdr->h_dest))
278 if (!ether_addr_equal_unaligned(ehdr->h_source, src))
283 if (tpriv->double_vlan)
284 ihdr = (struct iphdr *)(skb_network_header(skb) + 4);
286 if (tpriv->packet->tcp) {
287 if (ihdr->protocol != IPPROTO_TCP)
290 thdr = (struct tcphdr *)((u8 *)ihdr + 4 * ihdr->ihl);
291 if (thdr->dest != htons(tpriv->packet->dport))
294 shdr = (struct stmmachdr *)((u8 *)thdr + sizeof(*thdr));
296 if (ihdr->protocol != IPPROTO_UDP)
299 uhdr = (struct udphdr *)((u8 *)ihdr + 4 * ihdr->ihl);
300 if (uhdr->dest != htons(tpriv->packet->dport))
303 shdr = (struct stmmachdr *)((u8 *)uhdr + sizeof(*uhdr));
306 if (shdr->magic != cpu_to_be64(STMMAC_TEST_PKT_MAGIC))
308 if (tpriv->packet->exp_hash && !skb->hash)
310 if (tpriv->packet->id != shdr->id)
314 complete(&tpriv->comp);
320 static int __stmmac_test_loopback(struct stmmac_priv *priv,
321 struct stmmac_packet_attrs *attr)
323 struct stmmac_test_priv *tpriv;
324 struct sk_buff *skb = NULL;
327 tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
332 init_completion(&tpriv->comp);
334 tpriv->pt.type = htons(ETH_P_IP);
335 tpriv->pt.func = stmmac_test_loopback_validate;
336 tpriv->pt.dev = priv->dev;
337 tpriv->pt.af_packet_priv = tpriv;
338 tpriv->packet = attr;
340 if (!attr->dont_wait)
341 dev_add_pack(&tpriv->pt);
343 skb = stmmac_test_get_udp_skb(priv, attr);
349 ret = dev_direct_xmit(skb, attr->queue_mapping);
357 attr->timeout = STMMAC_LB_TIMEOUT;
359 wait_for_completion_timeout(&tpriv->comp, attr->timeout);
360 ret = tpriv->ok ? 0 : -ETIMEDOUT;
363 if (!attr->dont_wait)
364 dev_remove_pack(&tpriv->pt);
369 static int stmmac_test_mac_loopback(struct stmmac_priv *priv)
371 struct stmmac_packet_attrs attr = { };
373 attr.dst = priv->dev->dev_addr;
374 return __stmmac_test_loopback(priv, &attr);
377 static int stmmac_test_phy_loopback(struct stmmac_priv *priv)
379 struct stmmac_packet_attrs attr = { };
382 if (!priv->dev->phydev)
385 ret = phy_loopback(priv->dev->phydev, true);
389 attr.dst = priv->dev->dev_addr;
390 ret = __stmmac_test_loopback(priv, &attr);
392 phy_loopback(priv->dev->phydev, false);
396 static int stmmac_test_mmc(struct stmmac_priv *priv)
398 struct stmmac_counters initial, final;
401 memset(&initial, 0, sizeof(initial));
402 memset(&final, 0, sizeof(final));
404 if (!priv->dma_cap.rmon)
407 /* Save previous results into internal struct */
408 stmmac_mmc_read(priv, priv->mmcaddr, &priv->mmc);
410 ret = stmmac_test_mac_loopback(priv);
414 /* These will be loopback results so no need to save them */
415 stmmac_mmc_read(priv, priv->mmcaddr, &final);
418 * The number of MMC counters available depends on HW configuration
419 * so we just use this one to validate the feature. I hope there is
420 * not a version without this counter.
422 if (final.mmc_tx_framecount_g <= initial.mmc_tx_framecount_g)
428 static int stmmac_test_eee(struct stmmac_priv *priv)
430 struct stmmac_extra_stats *initial, *final;
434 if (!priv->dma_cap.eee || !priv->eee_active)
437 initial = kzalloc(sizeof(*initial), GFP_KERNEL);
441 final = kzalloc(sizeof(*final), GFP_KERNEL);
444 goto out_free_initial;
447 memcpy(initial, &priv->xstats, sizeof(*initial));
449 ret = stmmac_test_mac_loopback(priv);
453 /* We have no traffic in the line so, sooner or later it will go LPI */
455 memcpy(final, &priv->xstats, sizeof(*final));
457 if (final->irq_tx_path_in_lpi_mode_n >
458 initial->irq_tx_path_in_lpi_mode_n)
468 if (final->irq_tx_path_in_lpi_mode_n <=
469 initial->irq_tx_path_in_lpi_mode_n) {
474 if (final->irq_tx_path_exit_lpi_mode_n <=
475 initial->irq_tx_path_exit_lpi_mode_n) {
487 static int stmmac_filter_check(struct stmmac_priv *priv)
489 if (!(priv->dev->flags & IFF_PROMISC))
492 netdev_warn(priv->dev, "Test can't be run in promiscuous mode!\n");
496 static bool stmmac_hash_check(struct stmmac_priv *priv, unsigned char *addr)
498 int mc_offset = 32 - priv->hw->mcast_bits_log2;
499 struct netdev_hw_addr *ha;
502 /* First compute the hash for desired addr */
503 hash = bitrev32(~crc32_le(~0, addr, 6)) >> mc_offset;
505 hash = 1 << (hash & 0x1f);
507 /* Now, check if it collides with any existing one */
508 netdev_for_each_mc_addr(ha, priv->dev) {
509 u32 nr = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN)) >> mc_offset;
510 if (((nr >> 5) == hash_nr) && ((1 << (nr & 0x1f)) == hash))
514 /* No collisions, address is good to go */
518 static bool stmmac_perfect_check(struct stmmac_priv *priv, unsigned char *addr)
520 struct netdev_hw_addr *ha;
522 /* Check if it collides with any existing one */
523 netdev_for_each_uc_addr(ha, priv->dev) {
524 if (!memcmp(ha->addr, addr, ETH_ALEN))
528 /* No collisions, address is good to go */
532 static int stmmac_test_hfilt(struct stmmac_priv *priv)
534 unsigned char gd_addr[ETH_ALEN] = {0xf1, 0xee, 0xdd, 0xcc, 0xbb, 0xaa};
535 unsigned char bd_addr[ETH_ALEN] = {0xf1, 0xff, 0xff, 0xff, 0xff, 0xff};
536 struct stmmac_packet_attrs attr = { };
537 int ret, tries = 256;
539 ret = stmmac_filter_check(priv);
543 if (netdev_mc_count(priv->dev) >= priv->hw->multicast_filter_bins)
547 /* We only need to check the bd_addr for collisions */
548 bd_addr[ETH_ALEN - 1] = tries;
549 if (stmmac_hash_check(priv, bd_addr))
556 ret = dev_mc_add(priv->dev, gd_addr);
562 /* Shall receive packet */
563 ret = __stmmac_test_loopback(priv, &attr);
569 /* Shall NOT receive packet */
570 ret = __stmmac_test_loopback(priv, &attr);
571 ret = ret ? 0 : -EINVAL;
574 dev_mc_del(priv->dev, gd_addr);
578 static int stmmac_test_pfilt(struct stmmac_priv *priv)
580 unsigned char gd_addr[ETH_ALEN] = {0xf0, 0x01, 0x44, 0x55, 0x66, 0x77};
581 unsigned char bd_addr[ETH_ALEN] = {0xf0, 0xff, 0xff, 0xff, 0xff, 0xff};
582 struct stmmac_packet_attrs attr = { };
583 int ret, tries = 256;
585 if (stmmac_filter_check(priv))
587 if (netdev_uc_count(priv->dev) >= priv->hw->unicast_filter_entries)
591 /* We only need to check the bd_addr for collisions */
592 bd_addr[ETH_ALEN - 1] = tries;
593 if (stmmac_perfect_check(priv, bd_addr))
600 ret = dev_uc_add(priv->dev, gd_addr);
606 /* Shall receive packet */
607 ret = __stmmac_test_loopback(priv, &attr);
613 /* Shall NOT receive packet */
614 ret = __stmmac_test_loopback(priv, &attr);
615 ret = ret ? 0 : -EINVAL;
618 dev_uc_del(priv->dev, gd_addr);
622 static int stmmac_test_mcfilt(struct stmmac_priv *priv)
624 unsigned char uc_addr[ETH_ALEN] = {0xf0, 0xff, 0xff, 0xff, 0xff, 0xff};
625 unsigned char mc_addr[ETH_ALEN] = {0xf1, 0xff, 0xff, 0xff, 0xff, 0xff};
626 struct stmmac_packet_attrs attr = { };
627 int ret, tries = 256;
629 if (stmmac_filter_check(priv))
631 if (netdev_uc_count(priv->dev) >= priv->hw->unicast_filter_entries)
633 if (netdev_mc_count(priv->dev) >= priv->hw->multicast_filter_bins)
637 /* We only need to check the mc_addr for collisions */
638 mc_addr[ETH_ALEN - 1] = tries;
639 if (stmmac_hash_check(priv, mc_addr))
646 ret = dev_uc_add(priv->dev, uc_addr);
652 /* Shall receive packet */
653 ret = __stmmac_test_loopback(priv, &attr);
659 /* Shall NOT receive packet */
660 ret = __stmmac_test_loopback(priv, &attr);
661 ret = ret ? 0 : -EINVAL;
664 dev_uc_del(priv->dev, uc_addr);
668 static int stmmac_test_ucfilt(struct stmmac_priv *priv)
670 unsigned char uc_addr[ETH_ALEN] = {0xf0, 0xff, 0xff, 0xff, 0xff, 0xff};
671 unsigned char mc_addr[ETH_ALEN] = {0xf1, 0xff, 0xff, 0xff, 0xff, 0xff};
672 struct stmmac_packet_attrs attr = { };
673 int ret, tries = 256;
675 if (stmmac_filter_check(priv))
677 if (netdev_uc_count(priv->dev) >= priv->hw->unicast_filter_entries)
679 if (netdev_mc_count(priv->dev) >= priv->hw->multicast_filter_bins)
683 /* We only need to check the uc_addr for collisions */
684 uc_addr[ETH_ALEN - 1] = tries;
685 if (stmmac_perfect_check(priv, uc_addr))
692 ret = dev_mc_add(priv->dev, mc_addr);
698 /* Shall receive packet */
699 ret = __stmmac_test_loopback(priv, &attr);
705 /* Shall NOT receive packet */
706 ret = __stmmac_test_loopback(priv, &attr);
707 ret = ret ? 0 : -EINVAL;
710 dev_mc_del(priv->dev, mc_addr);
714 static int stmmac_test_flowctrl_validate(struct sk_buff *skb,
715 struct net_device *ndev,
716 struct packet_type *pt,
717 struct net_device *orig_ndev)
719 struct stmmac_test_priv *tpriv = pt->af_packet_priv;
722 ehdr = (struct ethhdr *)skb_mac_header(skb);
723 if (!ether_addr_equal_unaligned(ehdr->h_source, orig_ndev->dev_addr))
725 if (ehdr->h_proto != htons(ETH_P_PAUSE))
729 complete(&tpriv->comp);
735 static int stmmac_test_flowctrl(struct stmmac_priv *priv)
737 unsigned char paddr[ETH_ALEN] = {0x01, 0x80, 0xC2, 0x00, 0x00, 0x01};
738 struct phy_device *phydev = priv->dev->phydev;
739 u32 rx_cnt = priv->plat->rx_queues_to_use;
740 struct stmmac_test_priv *tpriv;
741 unsigned int pkt_count;
744 if (!phydev || (!phydev->pause && !phydev->asym_pause))
747 tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
752 init_completion(&tpriv->comp);
753 tpriv->pt.type = htons(ETH_P_PAUSE);
754 tpriv->pt.func = stmmac_test_flowctrl_validate;
755 tpriv->pt.dev = priv->dev;
756 tpriv->pt.af_packet_priv = tpriv;
757 dev_add_pack(&tpriv->pt);
759 /* Compute minimum number of packets to make FIFO full */
760 pkt_count = priv->plat->rx_fifo_size;
762 pkt_count = priv->dma_cap.rx_fifo_size;
766 for (i = 0; i < rx_cnt; i++)
767 stmmac_stop_rx(priv, priv->ioaddr, i);
769 ret = dev_set_promiscuity(priv->dev, 1);
773 ret = dev_mc_add(priv->dev, paddr);
777 for (i = 0; i < pkt_count; i++) {
778 struct stmmac_packet_attrs attr = { };
780 attr.dst = priv->dev->dev_addr;
781 attr.dont_wait = true;
784 ret = __stmmac_test_loopback(priv, &attr);
791 /* Wait for some time in case RX Watchdog is enabled */
794 for (i = 0; i < rx_cnt; i++) {
795 struct stmmac_channel *ch = &priv->channel[i];
798 tail = priv->rx_queue[i].dma_rx_phy +
799 (DMA_RX_SIZE * sizeof(struct dma_desc));
801 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, tail, i);
802 stmmac_start_rx(priv, priv->ioaddr, i);
805 napi_reschedule(&ch->rx_napi);
809 wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT);
810 ret = tpriv->ok ? 0 : -ETIMEDOUT;
813 dev_mc_del(priv->dev, paddr);
814 dev_set_promiscuity(priv->dev, -1);
815 dev_remove_pack(&tpriv->pt);
820 static int stmmac_test_rss(struct stmmac_priv *priv)
822 struct stmmac_packet_attrs attr = { };
824 if (!priv->dma_cap.rssen || !priv->rss.enable)
827 attr.dst = priv->dev->dev_addr;
828 attr.exp_hash = true;
832 return __stmmac_test_loopback(priv, &attr);
835 static int stmmac_test_vlan_validate(struct sk_buff *skb,
836 struct net_device *ndev,
837 struct packet_type *pt,
838 struct net_device *orig_ndev)
840 struct stmmac_test_priv *tpriv = pt->af_packet_priv;
841 struct stmmachdr *shdr;
847 proto = tpriv->double_vlan ? ETH_P_8021AD : ETH_P_8021Q;
849 skb = skb_unshare(skb, GFP_ATOMIC);
853 if (skb_linearize(skb))
855 if (skb_headlen(skb) < (STMMAC_TEST_PKT_SIZE - ETH_HLEN))
857 if (tpriv->vlan_id) {
858 if (skb->vlan_proto != htons(proto))
860 if (skb->vlan_tci != tpriv->vlan_id) {
861 /* Means filter did not work. */
863 complete(&tpriv->comp);
868 ehdr = (struct ethhdr *)skb_mac_header(skb);
869 if (!ether_addr_equal_unaligned(ehdr->h_dest, tpriv->packet->dst))
873 if (tpriv->double_vlan)
874 ihdr = (struct iphdr *)(skb_network_header(skb) + 4);
875 if (ihdr->protocol != IPPROTO_UDP)
878 uhdr = (struct udphdr *)((u8 *)ihdr + 4 * ihdr->ihl);
879 if (uhdr->dest != htons(tpriv->packet->dport))
882 shdr = (struct stmmachdr *)((u8 *)uhdr + sizeof(*uhdr));
883 if (shdr->magic != cpu_to_be64(STMMAC_TEST_PKT_MAGIC))
887 complete(&tpriv->comp);
894 static int __stmmac_test_vlanfilt(struct stmmac_priv *priv)
896 struct stmmac_packet_attrs attr = { };
897 struct stmmac_test_priv *tpriv;
898 struct sk_buff *skb = NULL;
901 tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
906 init_completion(&tpriv->comp);
908 tpriv->pt.type = htons(ETH_P_IP);
909 tpriv->pt.func = stmmac_test_vlan_validate;
910 tpriv->pt.dev = priv->dev;
911 tpriv->pt.af_packet_priv = tpriv;
912 tpriv->packet = &attr;
915 * As we use HASH filtering, false positives may appear. This is a
916 * specially chosen ID so that adjacent IDs (+4) have different
919 tpriv->vlan_id = 0x123;
920 dev_add_pack(&tpriv->pt);
922 ret = vlan_vid_add(priv->dev, htons(ETH_P_8021Q), tpriv->vlan_id);
926 for (i = 0; i < 4; i++) {
928 attr.vlan_id_out = tpriv->vlan_id + i;
929 attr.dst = priv->dev->dev_addr;
933 skb = stmmac_test_get_udp_skb(priv, &attr);
939 ret = dev_direct_xmit(skb, 0);
943 wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT);
944 ret = tpriv->ok ? 0 : -ETIMEDOUT;
947 } else if (!ret && i) {
958 vlan_vid_del(priv->dev, htons(ETH_P_8021Q), tpriv->vlan_id);
960 dev_remove_pack(&tpriv->pt);
965 static int stmmac_test_vlanfilt(struct stmmac_priv *priv)
967 if (!priv->dma_cap.vlhash)
970 return __stmmac_test_vlanfilt(priv);
973 static int stmmac_test_vlanfilt_perfect(struct stmmac_priv *priv)
975 int ret, prev_cap = priv->dma_cap.vlhash;
977 if (!(priv->dev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
980 priv->dma_cap.vlhash = 0;
981 ret = __stmmac_test_vlanfilt(priv);
982 priv->dma_cap.vlhash = prev_cap;
987 static int __stmmac_test_dvlanfilt(struct stmmac_priv *priv)
989 struct stmmac_packet_attrs attr = { };
990 struct stmmac_test_priv *tpriv;
991 struct sk_buff *skb = NULL;
994 tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
999 tpriv->double_vlan = true;
1000 init_completion(&tpriv->comp);
1002 tpriv->pt.type = htons(ETH_P_8021Q);
1003 tpriv->pt.func = stmmac_test_vlan_validate;
1004 tpriv->pt.dev = priv->dev;
1005 tpriv->pt.af_packet_priv = tpriv;
1006 tpriv->packet = &attr;
1009 * As we use HASH filtering, false positives may appear. This is a
1010 * specially chosen ID so that adjacent IDs (+4) have different
1013 tpriv->vlan_id = 0x123;
1014 dev_add_pack(&tpriv->pt);
1016 ret = vlan_vid_add(priv->dev, htons(ETH_P_8021AD), tpriv->vlan_id);
1020 for (i = 0; i < 4; i++) {
1022 attr.vlan_id_out = tpriv->vlan_id + i;
1023 attr.dst = priv->dev->dev_addr;
1027 skb = stmmac_test_get_udp_skb(priv, &attr);
1033 ret = dev_direct_xmit(skb, 0);
1037 wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT);
1038 ret = tpriv->ok ? 0 : -ETIMEDOUT;
1041 } else if (!ret && i) {
1052 vlan_vid_del(priv->dev, htons(ETH_P_8021AD), tpriv->vlan_id);
1054 dev_remove_pack(&tpriv->pt);
1059 static int stmmac_test_dvlanfilt(struct stmmac_priv *priv)
1061 if (!priv->dma_cap.vlhash)
1064 return __stmmac_test_dvlanfilt(priv);
1067 static int stmmac_test_dvlanfilt_perfect(struct stmmac_priv *priv)
1069 int ret, prev_cap = priv->dma_cap.vlhash;
1071 if (!(priv->dev->features & NETIF_F_HW_VLAN_STAG_FILTER))
1074 priv->dma_cap.vlhash = 0;
1075 ret = __stmmac_test_dvlanfilt(priv);
1076 priv->dma_cap.vlhash = prev_cap;
1081 #ifdef CONFIG_NET_CLS_ACT
1082 static int stmmac_test_rxp(struct stmmac_priv *priv)
1084 unsigned char addr[ETH_ALEN] = {0xde, 0xad, 0xbe, 0xef, 0x00, 0x00};
1085 struct tc_cls_u32_offload cls_u32 = { };
1086 struct stmmac_packet_attrs attr = { };
1087 struct tc_action **actions, *act;
1088 struct tc_u32_sel *sel;
1089 struct tcf_exts *exts;
1092 if (!tc_can_offload(priv->dev))
1094 if (!priv->dma_cap.frpsel)
1097 sel = kzalloc(struct_size(sel, keys, nk), GFP_KERNEL);
1101 exts = kzalloc(sizeof(*exts), GFP_KERNEL);
1107 actions = kzalloc(nk * sizeof(*actions), GFP_KERNEL);
1113 act = kzalloc(nk * sizeof(*act), GFP_KERNEL);
1116 goto cleanup_actions;
1119 cls_u32.command = TC_CLSU32_NEW_KNODE;
1120 cls_u32.common.chain_index = 0;
1121 cls_u32.common.protocol = htons(ETH_P_ALL);
1122 cls_u32.knode.exts = exts;
1123 cls_u32.knode.sel = sel;
1124 cls_u32.knode.handle = 0x123;
1126 exts->nr_actions = nk;
1127 exts->actions = actions;
1128 for (i = 0; i < nk; i++) {
1129 struct tcf_gact *gact = to_gact(&act[i]);
1131 actions[i] = &act[i];
1132 gact->tcf_action = TC_ACT_SHOT;
1137 sel->keys[0].off = 6;
1138 sel->keys[0].val = htonl(0xdeadbeef);
1139 sel->keys[0].mask = ~0x0;
1141 ret = stmmac_tc_setup_cls_u32(priv, priv, &cls_u32);
1145 attr.dst = priv->dev->dev_addr;
1148 ret = __stmmac_test_loopback(priv, &attr);
1149 ret = ret ? 0 : -EINVAL; /* Shall NOT receive packet */
1151 cls_u32.command = TC_CLSU32_DELETE_KNODE;
1152 stmmac_tc_setup_cls_u32(priv, priv, &cls_u32);
1165 static int stmmac_test_rxp(struct stmmac_priv *priv)
1171 static int stmmac_test_desc_sai(struct stmmac_priv *priv)
1173 unsigned char src[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
1174 struct stmmac_packet_attrs attr = { };
1177 if (!priv->dma_cap.vlins)
1180 attr.remove_sa = true;
1183 attr.dst = priv->dev->dev_addr;
1185 priv->sarc_type = 0x1;
1187 ret = __stmmac_test_loopback(priv, &attr);
1189 priv->sarc_type = 0x0;
1193 static int stmmac_test_desc_sar(struct stmmac_priv *priv)
1195 unsigned char src[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
1196 struct stmmac_packet_attrs attr = { };
1199 if (!priv->dma_cap.vlins)
1204 attr.dst = priv->dev->dev_addr;
1206 priv->sarc_type = 0x2;
1208 ret = __stmmac_test_loopback(priv, &attr);
1210 priv->sarc_type = 0x0;
1214 static int stmmac_test_reg_sai(struct stmmac_priv *priv)
1216 unsigned char src[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
1217 struct stmmac_packet_attrs attr = { };
1220 if (!priv->dma_cap.vlins)
1223 attr.remove_sa = true;
1226 attr.dst = priv->dev->dev_addr;
1228 if (stmmac_sarc_configure(priv, priv->ioaddr, 0x2))
1231 ret = __stmmac_test_loopback(priv, &attr);
1233 stmmac_sarc_configure(priv, priv->ioaddr, 0x0);
1237 static int stmmac_test_reg_sar(struct stmmac_priv *priv)
1239 unsigned char src[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
1240 struct stmmac_packet_attrs attr = { };
1243 if (!priv->dma_cap.vlins)
1248 attr.dst = priv->dev->dev_addr;
1250 if (stmmac_sarc_configure(priv, priv->ioaddr, 0x3))
1253 ret = __stmmac_test_loopback(priv, &attr);
1255 stmmac_sarc_configure(priv, priv->ioaddr, 0x0);
1259 static int stmmac_test_vlanoff_common(struct stmmac_priv *priv, bool svlan)
1261 struct stmmac_packet_attrs attr = { };
1262 struct stmmac_test_priv *tpriv;
1263 struct sk_buff *skb = NULL;
1267 if (!priv->dma_cap.vlins)
1270 tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
1274 proto = svlan ? ETH_P_8021AD : ETH_P_8021Q;
1277 tpriv->double_vlan = svlan;
1278 init_completion(&tpriv->comp);
1280 tpriv->pt.type = svlan ? htons(ETH_P_8021Q) : htons(ETH_P_IP);
1281 tpriv->pt.func = stmmac_test_vlan_validate;
1282 tpriv->pt.dev = priv->dev;
1283 tpriv->pt.af_packet_priv = tpriv;
1284 tpriv->packet = &attr;
1285 tpriv->vlan_id = 0x123;
1286 dev_add_pack(&tpriv->pt);
1288 ret = vlan_vid_add(priv->dev, htons(proto), tpriv->vlan_id);
1292 attr.dst = priv->dev->dev_addr;
1294 skb = stmmac_test_get_udp_skb(priv, &attr);
1300 __vlan_hwaccel_put_tag(skb, htons(proto), tpriv->vlan_id);
1301 skb->protocol = htons(proto);
1303 ret = dev_direct_xmit(skb, 0);
1307 wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT);
1308 ret = tpriv->ok ? 0 : -ETIMEDOUT;
1311 vlan_vid_del(priv->dev, htons(proto), tpriv->vlan_id);
1313 dev_remove_pack(&tpriv->pt);
1318 static int stmmac_test_vlanoff(struct stmmac_priv *priv)
1320 return stmmac_test_vlanoff_common(priv, false);
1323 static int stmmac_test_svlanoff(struct stmmac_priv *priv)
1325 if (!priv->dma_cap.dvlan)
1327 return stmmac_test_vlanoff_common(priv, true);
1330 #ifdef CONFIG_NET_CLS_ACT
1331 static int __stmmac_test_l3filt(struct stmmac_priv *priv, u32 dst, u32 src,
1332 u32 dst_mask, u32 src_mask)
1334 struct flow_dissector_key_ipv4_addrs key, mask;
1335 unsigned long dummy_cookie = 0xdeadbeef;
1336 struct stmmac_packet_attrs attr = { };
1337 struct flow_dissector *dissector;
1338 struct flow_cls_offload *cls;
1339 int ret, old_enable = 0;
1340 struct flow_rule *rule;
1342 if (!tc_can_offload(priv->dev))
1344 if (!priv->dma_cap.l3l4fnum)
1346 if (priv->rss.enable) {
1347 old_enable = priv->rss.enable;
1348 priv->rss.enable = false;
1349 stmmac_rss_configure(priv, priv->hw, NULL,
1350 priv->plat->rx_queues_to_use);
1353 dissector = kzalloc(sizeof(*dissector), GFP_KERNEL);
1359 dissector->used_keys |= (1 << FLOW_DISSECTOR_KEY_IPV4_ADDRS);
1360 dissector->offset[FLOW_DISSECTOR_KEY_IPV4_ADDRS] = 0;
1362 cls = kzalloc(sizeof(*cls), GFP_KERNEL);
1365 goto cleanup_dissector;
1368 cls->common.chain_index = 0;
1369 cls->command = FLOW_CLS_REPLACE;
1370 cls->cookie = dummy_cookie;
1372 rule = kzalloc(struct_size(rule, action.entries, 1), GFP_KERNEL);
1378 rule->match.dissector = dissector;
1379 rule->match.key = (void *)&key;
1380 rule->match.mask = (void *)&mask;
1382 key.src = htonl(src);
1383 key.dst = htonl(dst);
1384 mask.src = src_mask;
1385 mask.dst = dst_mask;
1389 rule->action.entries[0].id = FLOW_ACTION_DROP;
1390 rule->action.entries[0].hw_stats = FLOW_ACTION_HW_STATS_ANY;
1391 rule->action.num_entries = 1;
1393 attr.dst = priv->dev->dev_addr;
1397 /* Shall receive packet */
1398 ret = __stmmac_test_loopback(priv, &attr);
1402 ret = stmmac_tc_setup_cls(priv, priv, cls);
1406 /* Shall NOT receive packet */
1407 ret = __stmmac_test_loopback(priv, &attr);
1408 ret = ret ? 0 : -EINVAL;
1410 cls->command = FLOW_CLS_DESTROY;
1411 stmmac_tc_setup_cls(priv, priv, cls);
1420 priv->rss.enable = old_enable;
1421 stmmac_rss_configure(priv, priv->hw, &priv->rss,
1422 priv->plat->rx_queues_to_use);
1428 static int __stmmac_test_l3filt(struct stmmac_priv *priv, u32 dst, u32 src,
1429 u32 dst_mask, u32 src_mask)
1435 static int stmmac_test_l3filt_da(struct stmmac_priv *priv)
1437 u32 addr = 0x10203040;
1439 return __stmmac_test_l3filt(priv, addr, 0, ~0, 0);
1442 static int stmmac_test_l3filt_sa(struct stmmac_priv *priv)
1444 u32 addr = 0x10203040;
1446 return __stmmac_test_l3filt(priv, 0, addr, 0, ~0);
1449 #ifdef CONFIG_NET_CLS_ACT
1450 static int __stmmac_test_l4filt(struct stmmac_priv *priv, u32 dst, u32 src,
1451 u32 dst_mask, u32 src_mask, bool udp)
1454 struct flow_dissector_key_basic bkey;
1455 struct flow_dissector_key_ports key;
1456 } __aligned(BITS_PER_LONG / 8) keys;
1458 struct flow_dissector_key_basic bmask;
1459 struct flow_dissector_key_ports mask;
1460 } __aligned(BITS_PER_LONG / 8) masks;
1461 unsigned long dummy_cookie = 0xdeadbeef;
1462 struct stmmac_packet_attrs attr = { };
1463 struct flow_dissector *dissector;
1464 struct flow_cls_offload *cls;
1465 int ret, old_enable = 0;
1466 struct flow_rule *rule;
1468 if (!tc_can_offload(priv->dev))
1470 if (!priv->dma_cap.l3l4fnum)
1472 if (priv->rss.enable) {
1473 old_enable = priv->rss.enable;
1474 priv->rss.enable = false;
1475 stmmac_rss_configure(priv, priv->hw, NULL,
1476 priv->plat->rx_queues_to_use);
1479 dissector = kzalloc(sizeof(*dissector), GFP_KERNEL);
1485 dissector->used_keys |= (1 << FLOW_DISSECTOR_KEY_BASIC);
1486 dissector->used_keys |= (1 << FLOW_DISSECTOR_KEY_PORTS);
1487 dissector->offset[FLOW_DISSECTOR_KEY_BASIC] = 0;
1488 dissector->offset[FLOW_DISSECTOR_KEY_PORTS] = offsetof(typeof(keys), key);
1490 cls = kzalloc(sizeof(*cls), GFP_KERNEL);
1493 goto cleanup_dissector;
1496 cls->common.chain_index = 0;
1497 cls->command = FLOW_CLS_REPLACE;
1498 cls->cookie = dummy_cookie;
1500 rule = kzalloc(struct_size(rule, action.entries, 1), GFP_KERNEL);
1506 rule->match.dissector = dissector;
1507 rule->match.key = (void *)&keys;
1508 rule->match.mask = (void *)&masks;
1510 keys.bkey.ip_proto = udp ? IPPROTO_UDP : IPPROTO_TCP;
1511 keys.key.src = htons(src);
1512 keys.key.dst = htons(dst);
1513 masks.mask.src = src_mask;
1514 masks.mask.dst = dst_mask;
1518 rule->action.entries[0].id = FLOW_ACTION_DROP;
1519 rule->action.entries[0].hw_stats = FLOW_ACTION_HW_STATS_ANY;
1520 rule->action.num_entries = 1;
1522 attr.dst = priv->dev->dev_addr;
1528 /* Shall receive packet */
1529 ret = __stmmac_test_loopback(priv, &attr);
1533 ret = stmmac_tc_setup_cls(priv, priv, cls);
1537 /* Shall NOT receive packet */
1538 ret = __stmmac_test_loopback(priv, &attr);
1539 ret = ret ? 0 : -EINVAL;
1541 cls->command = FLOW_CLS_DESTROY;
1542 stmmac_tc_setup_cls(priv, priv, cls);
1551 priv->rss.enable = old_enable;
1552 stmmac_rss_configure(priv, priv->hw, &priv->rss,
1553 priv->plat->rx_queues_to_use);
1559 static int __stmmac_test_l4filt(struct stmmac_priv *priv, u32 dst, u32 src,
1560 u32 dst_mask, u32 src_mask, bool udp)
1566 static int stmmac_test_l4filt_da_tcp(struct stmmac_priv *priv)
1568 u16 dummy_port = 0x123;
1570 return __stmmac_test_l4filt(priv, dummy_port, 0, ~0, 0, false);
1573 static int stmmac_test_l4filt_sa_tcp(struct stmmac_priv *priv)
1575 u16 dummy_port = 0x123;
1577 return __stmmac_test_l4filt(priv, 0, dummy_port, 0, ~0, false);
1580 static int stmmac_test_l4filt_da_udp(struct stmmac_priv *priv)
1582 u16 dummy_port = 0x123;
1584 return __stmmac_test_l4filt(priv, dummy_port, 0, ~0, 0, true);
1587 static int stmmac_test_l4filt_sa_udp(struct stmmac_priv *priv)
1589 u16 dummy_port = 0x123;
1591 return __stmmac_test_l4filt(priv, 0, dummy_port, 0, ~0, true);
1594 static int stmmac_test_arp_validate(struct sk_buff *skb,
1595 struct net_device *ndev,
1596 struct packet_type *pt,
1597 struct net_device *orig_ndev)
1599 struct stmmac_test_priv *tpriv = pt->af_packet_priv;
1600 struct ethhdr *ehdr;
1601 struct arphdr *ahdr;
1603 ehdr = (struct ethhdr *)skb_mac_header(skb);
1604 if (!ether_addr_equal_unaligned(ehdr->h_dest, tpriv->packet->src))
1607 ahdr = arp_hdr(skb);
1608 if (ahdr->ar_op != htons(ARPOP_REPLY))
1612 complete(&tpriv->comp);
1618 static int stmmac_test_arpoffload(struct stmmac_priv *priv)
1620 unsigned char src[ETH_ALEN] = {0x01, 0x02, 0x03, 0x04, 0x05, 0x06};
1621 unsigned char dst[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
1622 struct stmmac_packet_attrs attr = { };
1623 struct stmmac_test_priv *tpriv;
1624 struct sk_buff *skb = NULL;
1625 u32 ip_addr = 0xdeadcafe;
1626 u32 ip_src = 0xdeadbeef;
1629 if (!priv->dma_cap.arpoffsel)
1632 tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
1637 init_completion(&tpriv->comp);
1639 tpriv->pt.type = htons(ETH_P_ARP);
1640 tpriv->pt.func = stmmac_test_arp_validate;
1641 tpriv->pt.dev = priv->dev;
1642 tpriv->pt.af_packet_priv = tpriv;
1643 tpriv->packet = &attr;
1644 dev_add_pack(&tpriv->pt);
1647 attr.ip_src = ip_src;
1649 attr.ip_dst = ip_addr;
1651 skb = stmmac_test_get_arp_skb(priv, &attr);
1657 ret = stmmac_set_arp_offload(priv, priv->hw, true, ip_addr);
1661 ret = dev_set_promiscuity(priv->dev, 1);
1665 ret = dev_direct_xmit(skb, 0);
1667 goto cleanup_promisc;
1669 wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT);
1670 ret = tpriv->ok ? 0 : -ETIMEDOUT;
1673 dev_set_promiscuity(priv->dev, -1);
1675 stmmac_set_arp_offload(priv, priv->hw, false, 0x0);
1676 dev_remove_pack(&tpriv->pt);
1681 static int __stmmac_test_jumbo(struct stmmac_priv *priv, u16 queue)
1683 struct stmmac_packet_attrs attr = { };
1684 int size = priv->dma_buf_sz;
1686 attr.dst = priv->dev->dev_addr;
1687 attr.max_size = size - ETH_FCS_LEN;
1688 attr.queue_mapping = queue;
1690 return __stmmac_test_loopback(priv, &attr);
1693 static int stmmac_test_jumbo(struct stmmac_priv *priv)
1695 return __stmmac_test_jumbo(priv, 0);
1698 static int stmmac_test_mjumbo(struct stmmac_priv *priv)
1700 u32 chan, tx_cnt = priv->plat->tx_queues_to_use;
1706 for (chan = 0; chan < tx_cnt; chan++) {
1707 ret = __stmmac_test_jumbo(priv, chan);
1715 static int stmmac_test_sph(struct stmmac_priv *priv)
1717 unsigned long cnt_end, cnt_start = priv->xstats.rx_split_hdr_pkt_n;
1718 struct stmmac_packet_attrs attr = { };
1724 /* Check for UDP first */
1725 attr.dst = priv->dev->dev_addr;
1728 ret = __stmmac_test_loopback(priv, &attr);
1732 cnt_end = priv->xstats.rx_split_hdr_pkt_n;
1733 if (cnt_end <= cnt_start)
1736 /* Check for TCP now */
1737 cnt_start = cnt_end;
1739 attr.dst = priv->dev->dev_addr;
1742 ret = __stmmac_test_loopback(priv, &attr);
1746 cnt_end = priv->xstats.rx_split_hdr_pkt_n;
1747 if (cnt_end <= cnt_start)
1753 static int stmmac_test_tbs(struct stmmac_priv *priv)
1755 #define STMMAC_TBS_LT_OFFSET (500 * 1000 * 1000) /* 500 ms*/
1756 struct stmmac_packet_attrs attr = { };
1757 struct tc_etf_qopt_offload qopt;
1758 u64 start_time, curr_time = 0;
1759 unsigned long flags;
1762 if (!priv->hwts_tx_en)
1765 /* Find first TBS enabled Queue, if any */
1766 for (i = 0; i < priv->plat->tx_queues_to_use; i++)
1767 if (priv->tx_queue[i].tbs & STMMAC_TBS_AVAIL)
1770 if (i >= priv->plat->tx_queues_to_use)
1776 ret = stmmac_tc_setup_etf(priv, priv, &qopt);
1780 spin_lock_irqsave(&priv->ptp_lock, flags);
1781 stmmac_get_systime(priv, priv->ptpaddr, &curr_time);
1782 spin_unlock_irqrestore(&priv->ptp_lock, flags);
1789 start_time = curr_time;
1790 curr_time += STMMAC_TBS_LT_OFFSET;
1792 attr.dst = priv->dev->dev_addr;
1793 attr.timestamp = curr_time;
1794 attr.timeout = nsecs_to_jiffies(2 * STMMAC_TBS_LT_OFFSET);
1795 attr.queue_mapping = i;
1797 ret = __stmmac_test_loopback(priv, &attr);
1801 /* Check if expected time has elapsed */
1802 spin_lock_irqsave(&priv->ptp_lock, flags);
1803 stmmac_get_systime(priv, priv->ptpaddr, &curr_time);
1804 spin_unlock_irqrestore(&priv->ptp_lock, flags);
1806 if ((curr_time - start_time) < STMMAC_TBS_LT_OFFSET)
1810 qopt.enable = false;
1811 stmmac_tc_setup_etf(priv, priv, &qopt);
1815 #define STMMAC_LOOPBACK_NONE 0
1816 #define STMMAC_LOOPBACK_MAC 1
1817 #define STMMAC_LOOPBACK_PHY 2
1819 static const struct stmmac_test {
1820 char name[ETH_GSTRING_LEN];
1822 int (*fn)(struct stmmac_priv *priv);
1823 } stmmac_selftests[] = {
1825 .name = "MAC Loopback ",
1826 .lb = STMMAC_LOOPBACK_MAC,
1827 .fn = stmmac_test_mac_loopback,
1829 .name = "PHY Loopback ",
1830 .lb = STMMAC_LOOPBACK_NONE, /* Test will handle it */
1831 .fn = stmmac_test_phy_loopback,
1833 .name = "MMC Counters ",
1834 .lb = STMMAC_LOOPBACK_PHY,
1835 .fn = stmmac_test_mmc,
1838 .lb = STMMAC_LOOPBACK_PHY,
1839 .fn = stmmac_test_eee,
1841 .name = "Hash Filter MC ",
1842 .lb = STMMAC_LOOPBACK_PHY,
1843 .fn = stmmac_test_hfilt,
1845 .name = "Perfect Filter UC ",
1846 .lb = STMMAC_LOOPBACK_PHY,
1847 .fn = stmmac_test_pfilt,
1849 .name = "MC Filter ",
1850 .lb = STMMAC_LOOPBACK_PHY,
1851 .fn = stmmac_test_mcfilt,
1853 .name = "UC Filter ",
1854 .lb = STMMAC_LOOPBACK_PHY,
1855 .fn = stmmac_test_ucfilt,
1857 .name = "Flow Control ",
1858 .lb = STMMAC_LOOPBACK_PHY,
1859 .fn = stmmac_test_flowctrl,
1862 .lb = STMMAC_LOOPBACK_PHY,
1863 .fn = stmmac_test_rss,
1865 .name = "VLAN Filtering ",
1866 .lb = STMMAC_LOOPBACK_PHY,
1867 .fn = stmmac_test_vlanfilt,
1869 .name = "VLAN Filtering (perf) ",
1870 .lb = STMMAC_LOOPBACK_PHY,
1871 .fn = stmmac_test_vlanfilt_perfect,
1873 .name = "Double VLAN Filter ",
1874 .lb = STMMAC_LOOPBACK_PHY,
1875 .fn = stmmac_test_dvlanfilt,
1877 .name = "Double VLAN Filter (perf) ",
1878 .lb = STMMAC_LOOPBACK_PHY,
1879 .fn = stmmac_test_dvlanfilt_perfect,
1881 .name = "Flexible RX Parser ",
1882 .lb = STMMAC_LOOPBACK_PHY,
1883 .fn = stmmac_test_rxp,
1885 .name = "SA Insertion (desc) ",
1886 .lb = STMMAC_LOOPBACK_PHY,
1887 .fn = stmmac_test_desc_sai,
1889 .name = "SA Replacement (desc) ",
1890 .lb = STMMAC_LOOPBACK_PHY,
1891 .fn = stmmac_test_desc_sar,
1893 .name = "SA Insertion (reg) ",
1894 .lb = STMMAC_LOOPBACK_PHY,
1895 .fn = stmmac_test_reg_sai,
1897 .name = "SA Replacement (reg) ",
1898 .lb = STMMAC_LOOPBACK_PHY,
1899 .fn = stmmac_test_reg_sar,
1901 .name = "VLAN TX Insertion ",
1902 .lb = STMMAC_LOOPBACK_PHY,
1903 .fn = stmmac_test_vlanoff,
1905 .name = "SVLAN TX Insertion ",
1906 .lb = STMMAC_LOOPBACK_PHY,
1907 .fn = stmmac_test_svlanoff,
1909 .name = "L3 DA Filtering ",
1910 .lb = STMMAC_LOOPBACK_PHY,
1911 .fn = stmmac_test_l3filt_da,
1913 .name = "L3 SA Filtering ",
1914 .lb = STMMAC_LOOPBACK_PHY,
1915 .fn = stmmac_test_l3filt_sa,
1917 .name = "L4 DA TCP Filtering ",
1918 .lb = STMMAC_LOOPBACK_PHY,
1919 .fn = stmmac_test_l4filt_da_tcp,
1921 .name = "L4 SA TCP Filtering ",
1922 .lb = STMMAC_LOOPBACK_PHY,
1923 .fn = stmmac_test_l4filt_sa_tcp,
1925 .name = "L4 DA UDP Filtering ",
1926 .lb = STMMAC_LOOPBACK_PHY,
1927 .fn = stmmac_test_l4filt_da_udp,
1929 .name = "L4 SA UDP Filtering ",
1930 .lb = STMMAC_LOOPBACK_PHY,
1931 .fn = stmmac_test_l4filt_sa_udp,
1933 .name = "ARP Offload ",
1934 .lb = STMMAC_LOOPBACK_PHY,
1935 .fn = stmmac_test_arpoffload,
1937 .name = "Jumbo Frame ",
1938 .lb = STMMAC_LOOPBACK_PHY,
1939 .fn = stmmac_test_jumbo,
1941 .name = "Multichannel Jumbo ",
1942 .lb = STMMAC_LOOPBACK_PHY,
1943 .fn = stmmac_test_mjumbo,
1945 .name = "Split Header ",
1946 .lb = STMMAC_LOOPBACK_PHY,
1947 .fn = stmmac_test_sph,
1949 .name = "TBS (ETF Scheduler) ",
1950 .lb = STMMAC_LOOPBACK_PHY,
1951 .fn = stmmac_test_tbs,
1955 void stmmac_selftest_run(struct net_device *dev,
1956 struct ethtool_test *etest, u64 *buf)
1958 struct stmmac_priv *priv = netdev_priv(dev);
1959 int count = stmmac_selftest_get_count(priv);
1962 memset(buf, 0, sizeof(*buf) * count);
1963 stmmac_test_next_id = 0;
1965 if (etest->flags != ETH_TEST_FL_OFFLINE) {
1966 netdev_err(priv->dev, "Only offline tests are supported\n");
1967 etest->flags |= ETH_TEST_FL_FAILED;
1969 } else if (!netif_carrier_ok(dev)) {
1970 netdev_err(priv->dev, "You need valid Link to execute tests\n");
1971 etest->flags |= ETH_TEST_FL_FAILED;
1975 /* Wait for queues drain */
1978 for (i = 0; i < count; i++) {
1981 switch (stmmac_selftests[i].lb) {
1982 case STMMAC_LOOPBACK_PHY:
1985 ret = phy_loopback(dev->phydev, true);
1989 case STMMAC_LOOPBACK_MAC:
1990 ret = stmmac_set_mac_loopback(priv, priv->ioaddr, true);
1992 case STMMAC_LOOPBACK_NONE:
2000 * First tests will always be MAC / PHY loobpack. If any of
2001 * them is not supported we abort earlier.
2004 netdev_err(priv->dev, "Loopback is not supported\n");
2005 etest->flags |= ETH_TEST_FL_FAILED;
2009 ret = stmmac_selftests[i].fn(priv);
2010 if (ret && (ret != -EOPNOTSUPP))
2011 etest->flags |= ETH_TEST_FL_FAILED;
2014 switch (stmmac_selftests[i].lb) {
2015 case STMMAC_LOOPBACK_PHY:
2018 ret = phy_loopback(dev->phydev, false);
2022 case STMMAC_LOOPBACK_MAC:
2023 stmmac_set_mac_loopback(priv, priv->ioaddr, false);
2031 void stmmac_selftest_get_strings(struct stmmac_priv *priv, u8 *data)
2036 for (i = 0; i < stmmac_selftest_get_count(priv); i++) {
2037 snprintf(p, ETH_GSTRING_LEN, "%2d. %s", i + 1,
2038 stmmac_selftests[i].name);
2039 p += ETH_GSTRING_LEN;
2043 int stmmac_selftest_get_count(struct stmmac_priv *priv)
2045 return ARRAY_SIZE(stmmac_selftests);