1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2019 Synopsys, Inc. and/or its affiliates.
4 * stmmac Selftests Support
6 * Author: Jose Abreu <joabreu@synopsys.com>
9 #include <linux/bitrev.h>
10 #include <linux/completion.h>
11 #include <linux/crc32.h>
12 #include <linux/ethtool.h>
14 #include <linux/phy.h>
15 #include <linux/udp.h>
16 #include <net/pkt_cls.h>
19 #include <net/tc_act/tc_gact.h>
28 #define STMMAC_TEST_PKT_SIZE (sizeof(struct ethhdr) + sizeof(struct iphdr) + \
29 sizeof(struct stmmachdr))
30 #define STMMAC_TEST_PKT_MAGIC 0xdeadcafecafedeadULL
31 #define STMMAC_LB_TIMEOUT msecs_to_jiffies(200)
33 struct stmmac_packet_attrs {
55 static u8 stmmac_test_next_id;
57 static struct sk_buff *stmmac_test_get_udp_skb(struct stmmac_priv *priv,
58 struct stmmac_packet_attrs *attr)
60 struct sk_buff *skb = NULL;
61 struct udphdr *uhdr = NULL;
62 struct tcphdr *thdr = NULL;
63 struct stmmachdr *shdr;
68 size = attr->size + STMMAC_TEST_PKT_SIZE;
76 size += sizeof(struct tcphdr);
78 size += sizeof(struct udphdr);
80 if (attr->max_size && (attr->max_size > size))
81 size = attr->max_size;
83 skb = netdev_alloc_skb(priv->dev, size);
90 ehdr = skb_push(skb, ETH_HLEN + 8);
92 ehdr = skb_push(skb, ETH_HLEN + 4);
93 else if (attr->remove_sa)
94 ehdr = skb_push(skb, ETH_HLEN - 6);
96 ehdr = skb_push(skb, ETH_HLEN);
97 skb_reset_mac_header(skb);
99 skb_set_network_header(skb, skb->len);
100 ihdr = skb_put(skb, sizeof(*ihdr));
102 skb_set_transport_header(skb, skb->len);
104 thdr = skb_put(skb, sizeof(*thdr));
106 uhdr = skb_put(skb, sizeof(*uhdr));
108 if (!attr->remove_sa)
109 eth_zero_addr(ehdr->h_source);
110 eth_zero_addr(ehdr->h_dest);
111 if (attr->src && !attr->remove_sa)
112 ether_addr_copy(ehdr->h_source, attr->src);
114 ether_addr_copy(ehdr->h_dest, attr->dst);
116 if (!attr->remove_sa) {
117 ehdr->h_proto = htons(ETH_P_IP);
119 __be16 *ptr = (__be16 *)ehdr;
122 ptr[3] = htons(ETH_P_IP);
128 if (!attr->remove_sa) {
129 tag = (void *)ehdr + ETH_HLEN;
130 proto = (void *)ehdr + (2 * ETH_ALEN);
132 tag = (void *)ehdr + ETH_HLEN - 6;
133 proto = (void *)ehdr + ETH_ALEN;
136 proto[0] = htons(ETH_P_8021Q);
137 tag[0] = htons(attr->vlan_id_out);
138 tag[1] = htons(ETH_P_IP);
139 if (attr->vlan > 1) {
140 proto[0] = htons(ETH_P_8021AD);
141 tag[1] = htons(ETH_P_8021Q);
142 tag[2] = htons(attr->vlan_id_in);
143 tag[3] = htons(ETH_P_IP);
148 thdr->source = htons(attr->sport);
149 thdr->dest = htons(attr->dport);
150 thdr->doff = sizeof(struct tcphdr) / 4;
153 uhdr->source = htons(attr->sport);
154 uhdr->dest = htons(attr->dport);
155 uhdr->len = htons(sizeof(*shdr) + sizeof(*uhdr) + attr->size);
157 uhdr->len = htons(attr->max_size -
158 (sizeof(*ihdr) + sizeof(*ehdr)));
166 ihdr->protocol = IPPROTO_TCP;
168 ihdr->protocol = IPPROTO_UDP;
169 iplen = sizeof(*ihdr) + sizeof(*shdr) + attr->size;
171 iplen += sizeof(*thdr);
173 iplen += sizeof(*uhdr);
176 iplen = attr->max_size - sizeof(*ehdr);
178 ihdr->tot_len = htons(iplen);
180 ihdr->saddr = htonl(attr->ip_src);
181 ihdr->daddr = htonl(attr->ip_dst);
186 shdr = skb_put(skb, sizeof(*shdr));
188 shdr->magic = cpu_to_be64(STMMAC_TEST_PKT_MAGIC);
189 attr->id = stmmac_test_next_id;
190 shdr->id = stmmac_test_next_id++;
193 skb_put(skb, attr->size);
194 if (attr->max_size && (attr->max_size > skb->len))
195 skb_put(skb, attr->max_size - skb->len);
198 skb->ip_summed = CHECKSUM_PARTIAL;
200 thdr->check = ~tcp_v4_check(skb->len, ihdr->saddr, ihdr->daddr, 0);
201 skb->csum_start = skb_transport_header(skb) - skb->head;
202 skb->csum_offset = offsetof(struct tcphdr, check);
204 udp4_hwcsum(skb, ihdr->saddr, ihdr->daddr);
207 skb->protocol = htons(ETH_P_IP);
208 skb->pkt_type = PACKET_HOST;
209 skb->dev = priv->dev;
214 static struct sk_buff *stmmac_test_get_arp_skb(struct stmmac_priv *priv,
215 struct stmmac_packet_attrs *attr)
217 __be32 ip_src = htonl(attr->ip_src);
218 __be32 ip_dst = htonl(attr->ip_dst);
219 struct sk_buff *skb = NULL;
221 skb = arp_create(ARPOP_REQUEST, ETH_P_ARP, ip_dst, priv->dev, ip_src,
222 NULL, attr->src, attr->dst);
226 skb->pkt_type = PACKET_HOST;
227 skb->dev = priv->dev;
232 struct stmmac_test_priv {
233 struct stmmac_packet_attrs *packet;
234 struct packet_type pt;
235 struct completion comp;
241 static int stmmac_test_loopback_validate(struct sk_buff *skb,
242 struct net_device *ndev,
243 struct packet_type *pt,
244 struct net_device *orig_ndev)
246 struct stmmac_test_priv *tpriv = pt->af_packet_priv;
247 unsigned char *src = tpriv->packet->src;
248 unsigned char *dst = tpriv->packet->dst;
249 struct stmmachdr *shdr;
255 skb = skb_unshare(skb, GFP_ATOMIC);
259 if (skb_linearize(skb))
261 if (skb_headlen(skb) < (STMMAC_TEST_PKT_SIZE - ETH_HLEN))
264 ehdr = (struct ethhdr *)skb_mac_header(skb);
266 if (!ether_addr_equal_unaligned(ehdr->h_dest, dst))
269 if (tpriv->packet->sarc) {
270 if (!ether_addr_equal_unaligned(ehdr->h_source, ehdr->h_dest))
273 if (!ether_addr_equal_unaligned(ehdr->h_source, src))
278 if (tpriv->double_vlan)
279 ihdr = (struct iphdr *)(skb_network_header(skb) + 4);
281 if (tpriv->packet->tcp) {
282 if (ihdr->protocol != IPPROTO_TCP)
285 thdr = (struct tcphdr *)((u8 *)ihdr + 4 * ihdr->ihl);
286 if (thdr->dest != htons(tpriv->packet->dport))
289 shdr = (struct stmmachdr *)((u8 *)thdr + sizeof(*thdr));
291 if (ihdr->protocol != IPPROTO_UDP)
294 uhdr = (struct udphdr *)((u8 *)ihdr + 4 * ihdr->ihl);
295 if (uhdr->dest != htons(tpriv->packet->dport))
298 shdr = (struct stmmachdr *)((u8 *)uhdr + sizeof(*uhdr));
301 if (shdr->magic != cpu_to_be64(STMMAC_TEST_PKT_MAGIC))
303 if (tpriv->packet->exp_hash && !skb->hash)
305 if (tpriv->packet->id != shdr->id)
309 complete(&tpriv->comp);
315 static int __stmmac_test_loopback(struct stmmac_priv *priv,
316 struct stmmac_packet_attrs *attr)
318 struct stmmac_test_priv *tpriv;
319 struct sk_buff *skb = NULL;
322 tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
327 init_completion(&tpriv->comp);
329 tpriv->pt.type = htons(ETH_P_IP);
330 tpriv->pt.func = stmmac_test_loopback_validate;
331 tpriv->pt.dev = priv->dev;
332 tpriv->pt.af_packet_priv = tpriv;
333 tpriv->packet = attr;
335 if (!attr->dont_wait)
336 dev_add_pack(&tpriv->pt);
338 skb = stmmac_test_get_udp_skb(priv, attr);
344 skb_set_queue_mapping(skb, attr->queue_mapping);
345 ret = dev_queue_xmit(skb);
353 attr->timeout = STMMAC_LB_TIMEOUT;
355 wait_for_completion_timeout(&tpriv->comp, attr->timeout);
356 ret = tpriv->ok ? 0 : -ETIMEDOUT;
359 if (!attr->dont_wait)
360 dev_remove_pack(&tpriv->pt);
365 static int stmmac_test_mac_loopback(struct stmmac_priv *priv)
367 struct stmmac_packet_attrs attr = { };
369 attr.dst = priv->dev->dev_addr;
370 return __stmmac_test_loopback(priv, &attr);
373 static int stmmac_test_phy_loopback(struct stmmac_priv *priv)
375 struct stmmac_packet_attrs attr = { };
378 if (!priv->dev->phydev)
381 ret = phy_loopback(priv->dev->phydev, true);
385 attr.dst = priv->dev->dev_addr;
386 ret = __stmmac_test_loopback(priv, &attr);
388 phy_loopback(priv->dev->phydev, false);
392 static int stmmac_test_mmc(struct stmmac_priv *priv)
394 struct stmmac_counters initial, final;
397 memset(&initial, 0, sizeof(initial));
398 memset(&final, 0, sizeof(final));
400 if (!priv->dma_cap.rmon)
403 /* Save previous results into internal struct */
404 stmmac_mmc_read(priv, priv->mmcaddr, &priv->mmc);
406 ret = stmmac_test_mac_loopback(priv);
410 /* These will be loopback results so no need to save them */
411 stmmac_mmc_read(priv, priv->mmcaddr, &final);
414 * The number of MMC counters available depends on HW configuration
415 * so we just use this one to validate the feature. I hope there is
416 * not a version without this counter.
418 if (final.mmc_tx_framecount_g <= initial.mmc_tx_framecount_g)
424 static int stmmac_test_eee(struct stmmac_priv *priv)
426 struct stmmac_extra_stats *initial, *final;
430 if (!priv->dma_cap.eee || !priv->eee_active)
433 initial = kzalloc(sizeof(*initial), GFP_KERNEL);
437 final = kzalloc(sizeof(*final), GFP_KERNEL);
440 goto out_free_initial;
443 memcpy(initial, &priv->xstats, sizeof(*initial));
445 ret = stmmac_test_mac_loopback(priv);
449 /* We have no traffic in the line so, sooner or later it will go LPI */
451 memcpy(final, &priv->xstats, sizeof(*final));
453 if (final->irq_tx_path_in_lpi_mode_n >
454 initial->irq_tx_path_in_lpi_mode_n)
464 if (final->irq_tx_path_in_lpi_mode_n <=
465 initial->irq_tx_path_in_lpi_mode_n) {
470 if (final->irq_tx_path_exit_lpi_mode_n <=
471 initial->irq_tx_path_exit_lpi_mode_n) {
483 static int stmmac_filter_check(struct stmmac_priv *priv)
485 if (!(priv->dev->flags & IFF_PROMISC))
488 netdev_warn(priv->dev, "Test can't be run in promiscuous mode!\n");
492 static bool stmmac_hash_check(struct stmmac_priv *priv, unsigned char *addr)
494 int mc_offset = 32 - priv->hw->mcast_bits_log2;
495 struct netdev_hw_addr *ha;
498 /* First compute the hash for desired addr */
499 hash = bitrev32(~crc32_le(~0, addr, 6)) >> mc_offset;
501 hash = 1 << (hash & 0x1f);
503 /* Now, check if it collides with any existing one */
504 netdev_for_each_mc_addr(ha, priv->dev) {
505 u32 nr = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN)) >> mc_offset;
506 if (((nr >> 5) == hash_nr) && ((1 << (nr & 0x1f)) == hash))
510 /* No collisions, address is good to go */
514 static bool stmmac_perfect_check(struct stmmac_priv *priv, unsigned char *addr)
516 struct netdev_hw_addr *ha;
518 /* Check if it collides with any existing one */
519 netdev_for_each_uc_addr(ha, priv->dev) {
520 if (!memcmp(ha->addr, addr, ETH_ALEN))
524 /* No collisions, address is good to go */
528 static int stmmac_test_hfilt(struct stmmac_priv *priv)
530 unsigned char gd_addr[ETH_ALEN] = {0xf1, 0xee, 0xdd, 0xcc, 0xbb, 0xaa};
531 unsigned char bd_addr[ETH_ALEN] = {0xf1, 0xff, 0xff, 0xff, 0xff, 0xff};
532 struct stmmac_packet_attrs attr = { };
533 int ret, tries = 256;
535 ret = stmmac_filter_check(priv);
539 if (netdev_mc_count(priv->dev) >= priv->hw->multicast_filter_bins)
543 /* We only need to check the bd_addr for collisions */
544 bd_addr[ETH_ALEN - 1] = tries;
545 if (stmmac_hash_check(priv, bd_addr))
552 ret = dev_mc_add(priv->dev, gd_addr);
558 /* Shall receive packet */
559 ret = __stmmac_test_loopback(priv, &attr);
565 /* Shall NOT receive packet */
566 ret = __stmmac_test_loopback(priv, &attr);
567 ret = ret ? 0 : -EINVAL;
570 dev_mc_del(priv->dev, gd_addr);
574 static int stmmac_test_pfilt(struct stmmac_priv *priv)
576 unsigned char gd_addr[ETH_ALEN] = {0xf0, 0x01, 0x44, 0x55, 0x66, 0x77};
577 unsigned char bd_addr[ETH_ALEN] = {0xf0, 0xff, 0xff, 0xff, 0xff, 0xff};
578 struct stmmac_packet_attrs attr = { };
579 int ret, tries = 256;
581 if (stmmac_filter_check(priv))
583 if (netdev_uc_count(priv->dev) >= priv->hw->unicast_filter_entries)
587 /* We only need to check the bd_addr for collisions */
588 bd_addr[ETH_ALEN - 1] = tries;
589 if (stmmac_perfect_check(priv, bd_addr))
596 ret = dev_uc_add(priv->dev, gd_addr);
602 /* Shall receive packet */
603 ret = __stmmac_test_loopback(priv, &attr);
609 /* Shall NOT receive packet */
610 ret = __stmmac_test_loopback(priv, &attr);
611 ret = ret ? 0 : -EINVAL;
614 dev_uc_del(priv->dev, gd_addr);
618 static int stmmac_test_mcfilt(struct stmmac_priv *priv)
620 unsigned char uc_addr[ETH_ALEN] = {0xf0, 0xff, 0xff, 0xff, 0xff, 0xff};
621 unsigned char mc_addr[ETH_ALEN] = {0xf1, 0xff, 0xff, 0xff, 0xff, 0xff};
622 struct stmmac_packet_attrs attr = { };
623 int ret, tries = 256;
625 if (stmmac_filter_check(priv))
627 if (netdev_uc_count(priv->dev) >= priv->hw->unicast_filter_entries)
629 if (netdev_mc_count(priv->dev) >= priv->hw->multicast_filter_bins)
633 /* We only need to check the mc_addr for collisions */
634 mc_addr[ETH_ALEN - 1] = tries;
635 if (stmmac_hash_check(priv, mc_addr))
642 ret = dev_uc_add(priv->dev, uc_addr);
648 /* Shall receive packet */
649 ret = __stmmac_test_loopback(priv, &attr);
655 /* Shall NOT receive packet */
656 ret = __stmmac_test_loopback(priv, &attr);
657 ret = ret ? 0 : -EINVAL;
660 dev_uc_del(priv->dev, uc_addr);
664 static int stmmac_test_ucfilt(struct stmmac_priv *priv)
666 unsigned char uc_addr[ETH_ALEN] = {0xf0, 0xff, 0xff, 0xff, 0xff, 0xff};
667 unsigned char mc_addr[ETH_ALEN] = {0xf1, 0xff, 0xff, 0xff, 0xff, 0xff};
668 struct stmmac_packet_attrs attr = { };
669 int ret, tries = 256;
671 if (stmmac_filter_check(priv))
673 if (netdev_uc_count(priv->dev) >= priv->hw->unicast_filter_entries)
675 if (netdev_mc_count(priv->dev) >= priv->hw->multicast_filter_bins)
679 /* We only need to check the uc_addr for collisions */
680 uc_addr[ETH_ALEN - 1] = tries;
681 if (stmmac_perfect_check(priv, uc_addr))
688 ret = dev_mc_add(priv->dev, mc_addr);
694 /* Shall receive packet */
695 ret = __stmmac_test_loopback(priv, &attr);
701 /* Shall NOT receive packet */
702 ret = __stmmac_test_loopback(priv, &attr);
703 ret = ret ? 0 : -EINVAL;
706 dev_mc_del(priv->dev, mc_addr);
710 static int stmmac_test_flowctrl_validate(struct sk_buff *skb,
711 struct net_device *ndev,
712 struct packet_type *pt,
713 struct net_device *orig_ndev)
715 struct stmmac_test_priv *tpriv = pt->af_packet_priv;
718 ehdr = (struct ethhdr *)skb_mac_header(skb);
719 if (!ether_addr_equal_unaligned(ehdr->h_source, orig_ndev->dev_addr))
721 if (ehdr->h_proto != htons(ETH_P_PAUSE))
725 complete(&tpriv->comp);
731 static int stmmac_test_flowctrl(struct stmmac_priv *priv)
733 unsigned char paddr[ETH_ALEN] = {0x01, 0x80, 0xC2, 0x00, 0x00, 0x01};
734 struct phy_device *phydev = priv->dev->phydev;
735 u32 rx_cnt = priv->plat->rx_queues_to_use;
736 struct stmmac_test_priv *tpriv;
737 unsigned int pkt_count;
740 if (!phydev || (!phydev->pause && !phydev->asym_pause))
743 tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
748 init_completion(&tpriv->comp);
749 tpriv->pt.type = htons(ETH_P_PAUSE);
750 tpriv->pt.func = stmmac_test_flowctrl_validate;
751 tpriv->pt.dev = priv->dev;
752 tpriv->pt.af_packet_priv = tpriv;
753 dev_add_pack(&tpriv->pt);
755 /* Compute minimum number of packets to make FIFO full */
756 pkt_count = priv->plat->rx_fifo_size;
758 pkt_count = priv->dma_cap.rx_fifo_size;
762 for (i = 0; i < rx_cnt; i++)
763 stmmac_stop_rx(priv, priv->ioaddr, i);
765 ret = dev_set_promiscuity(priv->dev, 1);
769 ret = dev_mc_add(priv->dev, paddr);
773 for (i = 0; i < pkt_count; i++) {
774 struct stmmac_packet_attrs attr = { };
776 attr.dst = priv->dev->dev_addr;
777 attr.dont_wait = true;
780 ret = __stmmac_test_loopback(priv, &attr);
787 /* Wait for some time in case RX Watchdog is enabled */
790 for (i = 0; i < rx_cnt; i++) {
791 struct stmmac_channel *ch = &priv->channel[i];
794 tail = priv->rx_queue[i].dma_rx_phy +
795 (DMA_RX_SIZE * sizeof(struct dma_desc));
797 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, tail, i);
798 stmmac_start_rx(priv, priv->ioaddr, i);
801 napi_reschedule(&ch->rx_napi);
805 wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT);
806 ret = tpriv->ok ? 0 : -ETIMEDOUT;
809 dev_mc_del(priv->dev, paddr);
810 dev_set_promiscuity(priv->dev, -1);
811 dev_remove_pack(&tpriv->pt);
816 static int stmmac_test_rss(struct stmmac_priv *priv)
818 struct stmmac_packet_attrs attr = { };
820 if (!priv->dma_cap.rssen || !priv->rss.enable)
823 attr.dst = priv->dev->dev_addr;
824 attr.exp_hash = true;
828 return __stmmac_test_loopback(priv, &attr);
831 static int stmmac_test_vlan_validate(struct sk_buff *skb,
832 struct net_device *ndev,
833 struct packet_type *pt,
834 struct net_device *orig_ndev)
836 struct stmmac_test_priv *tpriv = pt->af_packet_priv;
837 struct stmmachdr *shdr;
843 proto = tpriv->double_vlan ? ETH_P_8021AD : ETH_P_8021Q;
845 skb = skb_unshare(skb, GFP_ATOMIC);
849 if (skb_linearize(skb))
851 if (skb_headlen(skb) < (STMMAC_TEST_PKT_SIZE - ETH_HLEN))
853 if (tpriv->vlan_id) {
854 if (skb->vlan_proto != htons(proto))
856 if (skb->vlan_tci != tpriv->vlan_id) {
857 /* Means filter did not work. */
859 complete(&tpriv->comp);
864 ehdr = (struct ethhdr *)skb_mac_header(skb);
865 if (!ether_addr_equal_unaligned(ehdr->h_dest, tpriv->packet->dst))
869 if (tpriv->double_vlan)
870 ihdr = (struct iphdr *)(skb_network_header(skb) + 4);
871 if (ihdr->protocol != IPPROTO_UDP)
874 uhdr = (struct udphdr *)((u8 *)ihdr + 4 * ihdr->ihl);
875 if (uhdr->dest != htons(tpriv->packet->dport))
878 shdr = (struct stmmachdr *)((u8 *)uhdr + sizeof(*uhdr));
879 if (shdr->magic != cpu_to_be64(STMMAC_TEST_PKT_MAGIC))
883 complete(&tpriv->comp);
890 static int stmmac_test_vlanfilt(struct stmmac_priv *priv)
892 struct stmmac_packet_attrs attr = { };
893 struct stmmac_test_priv *tpriv;
894 struct sk_buff *skb = NULL;
897 if (!priv->dma_cap.vlhash)
900 tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
905 init_completion(&tpriv->comp);
907 tpriv->pt.type = htons(ETH_P_IP);
908 tpriv->pt.func = stmmac_test_vlan_validate;
909 tpriv->pt.dev = priv->dev;
910 tpriv->pt.af_packet_priv = tpriv;
911 tpriv->packet = &attr;
914 * As we use HASH filtering, false positives may appear. This is a
915 * specially chosen ID so that adjacent IDs (+4) have different
918 tpriv->vlan_id = 0x123;
919 dev_add_pack(&tpriv->pt);
921 ret = vlan_vid_add(priv->dev, htons(ETH_P_8021Q), tpriv->vlan_id);
925 for (i = 0; i < 4; i++) {
927 attr.vlan_id_out = tpriv->vlan_id + i;
928 attr.dst = priv->dev->dev_addr;
932 skb = stmmac_test_get_udp_skb(priv, &attr);
938 skb_set_queue_mapping(skb, 0);
939 ret = dev_queue_xmit(skb);
943 wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT);
944 ret = tpriv->ok ? 0 : -ETIMEDOUT;
947 } else if (!ret && i) {
958 vlan_vid_del(priv->dev, htons(ETH_P_8021Q), tpriv->vlan_id);
960 dev_remove_pack(&tpriv->pt);
965 static int stmmac_test_dvlanfilt(struct stmmac_priv *priv)
967 struct stmmac_packet_attrs attr = { };
968 struct stmmac_test_priv *tpriv;
969 struct sk_buff *skb = NULL;
972 if (!priv->dma_cap.vlhash)
975 tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
980 tpriv->double_vlan = true;
981 init_completion(&tpriv->comp);
983 tpriv->pt.type = htons(ETH_P_8021Q);
984 tpriv->pt.func = stmmac_test_vlan_validate;
985 tpriv->pt.dev = priv->dev;
986 tpriv->pt.af_packet_priv = tpriv;
987 tpriv->packet = &attr;
990 * As we use HASH filtering, false positives may appear. This is a
991 * specially chosen ID so that adjacent IDs (+4) have different
994 tpriv->vlan_id = 0x123;
995 dev_add_pack(&tpriv->pt);
997 ret = vlan_vid_add(priv->dev, htons(ETH_P_8021AD), tpriv->vlan_id);
1001 for (i = 0; i < 4; i++) {
1003 attr.vlan_id_out = tpriv->vlan_id + i;
1004 attr.dst = priv->dev->dev_addr;
1008 skb = stmmac_test_get_udp_skb(priv, &attr);
1014 skb_set_queue_mapping(skb, 0);
1015 ret = dev_queue_xmit(skb);
1019 wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT);
1020 ret = tpriv->ok ? 0 : -ETIMEDOUT;
1023 } else if (!ret && i) {
1034 vlan_vid_del(priv->dev, htons(ETH_P_8021AD), tpriv->vlan_id);
1036 dev_remove_pack(&tpriv->pt);
1041 #ifdef CONFIG_NET_CLS_ACT
1042 static int stmmac_test_rxp(struct stmmac_priv *priv)
1044 unsigned char addr[ETH_ALEN] = {0xde, 0xad, 0xbe, 0xef, 0x00, 0x00};
1045 struct tc_cls_u32_offload cls_u32 = { };
1046 struct stmmac_packet_attrs attr = { };
1047 struct tc_action **actions, *act;
1048 struct tc_u32_sel *sel;
1049 struct tcf_exts *exts;
1052 if (!tc_can_offload(priv->dev))
1054 if (!priv->dma_cap.frpsel)
1057 sel = kzalloc(sizeof(*sel) + nk * sizeof(struct tc_u32_key), GFP_KERNEL);
1061 exts = kzalloc(sizeof(*exts), GFP_KERNEL);
1067 actions = kzalloc(nk * sizeof(*actions), GFP_KERNEL);
1073 act = kzalloc(nk * sizeof(*act), GFP_KERNEL);
1076 goto cleanup_actions;
1079 cls_u32.command = TC_CLSU32_NEW_KNODE;
1080 cls_u32.common.chain_index = 0;
1081 cls_u32.common.protocol = htons(ETH_P_ALL);
1082 cls_u32.knode.exts = exts;
1083 cls_u32.knode.sel = sel;
1084 cls_u32.knode.handle = 0x123;
1086 exts->nr_actions = nk;
1087 exts->actions = actions;
1088 for (i = 0; i < nk; i++) {
1089 struct tcf_gact *gact = to_gact(&act[i]);
1091 actions[i] = &act[i];
1092 gact->tcf_action = TC_ACT_SHOT;
1097 sel->keys[0].off = 6;
1098 sel->keys[0].val = htonl(0xdeadbeef);
1099 sel->keys[0].mask = ~0x0;
1101 ret = stmmac_tc_setup_cls_u32(priv, priv, &cls_u32);
1105 attr.dst = priv->dev->dev_addr;
1108 ret = __stmmac_test_loopback(priv, &attr);
1109 ret = ret ? 0 : -EINVAL; /* Shall NOT receive packet */
1111 cls_u32.command = TC_CLSU32_DELETE_KNODE;
1112 stmmac_tc_setup_cls_u32(priv, priv, &cls_u32);
1125 static int stmmac_test_rxp(struct stmmac_priv *priv)
1131 static int stmmac_test_desc_sai(struct stmmac_priv *priv)
1133 unsigned char src[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
1134 struct stmmac_packet_attrs attr = { };
1137 if (!priv->dma_cap.vlins)
1140 attr.remove_sa = true;
1143 attr.dst = priv->dev->dev_addr;
1145 priv->sarc_type = 0x1;
1147 ret = __stmmac_test_loopback(priv, &attr);
1149 priv->sarc_type = 0x0;
1153 static int stmmac_test_desc_sar(struct stmmac_priv *priv)
1155 unsigned char src[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
1156 struct stmmac_packet_attrs attr = { };
1159 if (!priv->dma_cap.vlins)
1164 attr.dst = priv->dev->dev_addr;
1166 priv->sarc_type = 0x2;
1168 ret = __stmmac_test_loopback(priv, &attr);
1170 priv->sarc_type = 0x0;
1174 static int stmmac_test_reg_sai(struct stmmac_priv *priv)
1176 unsigned char src[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
1177 struct stmmac_packet_attrs attr = { };
1180 if (!priv->dma_cap.vlins)
1183 attr.remove_sa = true;
1186 attr.dst = priv->dev->dev_addr;
1188 if (stmmac_sarc_configure(priv, priv->ioaddr, 0x2))
1191 ret = __stmmac_test_loopback(priv, &attr);
1193 stmmac_sarc_configure(priv, priv->ioaddr, 0x0);
1197 static int stmmac_test_reg_sar(struct stmmac_priv *priv)
1199 unsigned char src[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
1200 struct stmmac_packet_attrs attr = { };
1203 if (!priv->dma_cap.vlins)
1208 attr.dst = priv->dev->dev_addr;
1210 if (stmmac_sarc_configure(priv, priv->ioaddr, 0x3))
1213 ret = __stmmac_test_loopback(priv, &attr);
1215 stmmac_sarc_configure(priv, priv->ioaddr, 0x0);
1219 static int stmmac_test_vlanoff_common(struct stmmac_priv *priv, bool svlan)
1221 struct stmmac_packet_attrs attr = { };
1222 struct stmmac_test_priv *tpriv;
1223 struct sk_buff *skb = NULL;
1227 if (!priv->dma_cap.vlins)
1230 tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
1234 proto = svlan ? ETH_P_8021AD : ETH_P_8021Q;
1237 tpriv->double_vlan = svlan;
1238 init_completion(&tpriv->comp);
1240 tpriv->pt.type = svlan ? htons(ETH_P_8021Q) : htons(ETH_P_IP);
1241 tpriv->pt.func = stmmac_test_vlan_validate;
1242 tpriv->pt.dev = priv->dev;
1243 tpriv->pt.af_packet_priv = tpriv;
1244 tpriv->packet = &attr;
1245 tpriv->vlan_id = 0x123;
1246 dev_add_pack(&tpriv->pt);
1248 ret = vlan_vid_add(priv->dev, htons(proto), tpriv->vlan_id);
1252 attr.dst = priv->dev->dev_addr;
1254 skb = stmmac_test_get_udp_skb(priv, &attr);
1260 __vlan_hwaccel_put_tag(skb, htons(proto), tpriv->vlan_id);
1261 skb->protocol = htons(proto);
1263 skb_set_queue_mapping(skb, 0);
1264 ret = dev_queue_xmit(skb);
1268 wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT);
1269 ret = tpriv->ok ? 0 : -ETIMEDOUT;
1272 vlan_vid_del(priv->dev, htons(proto), tpriv->vlan_id);
1274 dev_remove_pack(&tpriv->pt);
1279 static int stmmac_test_vlanoff(struct stmmac_priv *priv)
1281 return stmmac_test_vlanoff_common(priv, false);
1284 static int stmmac_test_svlanoff(struct stmmac_priv *priv)
1286 if (!priv->dma_cap.dvlan)
1288 return stmmac_test_vlanoff_common(priv, true);
1291 #ifdef CONFIG_NET_CLS_ACT
1292 static int __stmmac_test_l3filt(struct stmmac_priv *priv, u32 dst, u32 src,
1293 u32 dst_mask, u32 src_mask)
1295 struct flow_dissector_key_ipv4_addrs key, mask;
1296 unsigned long dummy_cookie = 0xdeadbeef;
1297 struct stmmac_packet_attrs attr = { };
1298 struct flow_dissector *dissector;
1299 struct flow_cls_offload *cls;
1300 int ret, old_enable = 0;
1301 struct flow_rule *rule;
1303 if (!tc_can_offload(priv->dev))
1305 if (!priv->dma_cap.l3l4fnum)
1307 if (priv->rss.enable) {
1308 old_enable = priv->rss.enable;
1309 priv->rss.enable = false;
1310 stmmac_rss_configure(priv, priv->hw, NULL,
1311 priv->plat->rx_queues_to_use);
1314 dissector = kzalloc(sizeof(*dissector), GFP_KERNEL);
1320 dissector->used_keys |= (1 << FLOW_DISSECTOR_KEY_IPV4_ADDRS);
1321 dissector->offset[FLOW_DISSECTOR_KEY_IPV4_ADDRS] = 0;
1323 cls = kzalloc(sizeof(*cls), GFP_KERNEL);
1326 goto cleanup_dissector;
1329 cls->common.chain_index = 0;
1330 cls->command = FLOW_CLS_REPLACE;
1331 cls->cookie = dummy_cookie;
1333 rule = kzalloc(struct_size(rule, action.entries, 1), GFP_KERNEL);
1339 rule->match.dissector = dissector;
1340 rule->match.key = (void *)&key;
1341 rule->match.mask = (void *)&mask;
1343 key.src = htonl(src);
1344 key.dst = htonl(dst);
1345 mask.src = src_mask;
1346 mask.dst = dst_mask;
1350 rule->action.entries[0].id = FLOW_ACTION_DROP;
1351 rule->action.num_entries = 1;
1353 attr.dst = priv->dev->dev_addr;
1357 /* Shall receive packet */
1358 ret = __stmmac_test_loopback(priv, &attr);
1362 ret = stmmac_tc_setup_cls(priv, priv, cls);
1366 /* Shall NOT receive packet */
1367 ret = __stmmac_test_loopback(priv, &attr);
1368 ret = ret ? 0 : -EINVAL;
1370 cls->command = FLOW_CLS_DESTROY;
1371 stmmac_tc_setup_cls(priv, priv, cls);
1380 priv->rss.enable = old_enable;
1381 stmmac_rss_configure(priv, priv->hw, &priv->rss,
1382 priv->plat->rx_queues_to_use);
1388 static int __stmmac_test_l3filt(struct stmmac_priv *priv, u32 dst, u32 src,
1389 u32 dst_mask, u32 src_mask)
1395 static int stmmac_test_l3filt_da(struct stmmac_priv *priv)
1397 u32 addr = 0x10203040;
1399 return __stmmac_test_l3filt(priv, addr, 0, ~0, 0);
1402 static int stmmac_test_l3filt_sa(struct stmmac_priv *priv)
1404 u32 addr = 0x10203040;
1406 return __stmmac_test_l3filt(priv, 0, addr, 0, ~0);
1409 #ifdef CONFIG_NET_CLS_ACT
1410 static int __stmmac_test_l4filt(struct stmmac_priv *priv, u32 dst, u32 src,
1411 u32 dst_mask, u32 src_mask, bool udp)
1414 struct flow_dissector_key_basic bkey;
1415 struct flow_dissector_key_ports key;
1416 } __aligned(BITS_PER_LONG / 8) keys;
1418 struct flow_dissector_key_basic bmask;
1419 struct flow_dissector_key_ports mask;
1420 } __aligned(BITS_PER_LONG / 8) masks;
1421 unsigned long dummy_cookie = 0xdeadbeef;
1422 struct stmmac_packet_attrs attr = { };
1423 struct flow_dissector *dissector;
1424 struct flow_cls_offload *cls;
1425 int ret, old_enable = 0;
1426 struct flow_rule *rule;
1428 if (!tc_can_offload(priv->dev))
1430 if (!priv->dma_cap.l3l4fnum)
1432 if (priv->rss.enable) {
1433 old_enable = priv->rss.enable;
1434 priv->rss.enable = false;
1435 stmmac_rss_configure(priv, priv->hw, NULL,
1436 priv->plat->rx_queues_to_use);
1439 dissector = kzalloc(sizeof(*dissector), GFP_KERNEL);
1445 dissector->used_keys |= (1 << FLOW_DISSECTOR_KEY_BASIC);
1446 dissector->used_keys |= (1 << FLOW_DISSECTOR_KEY_PORTS);
1447 dissector->offset[FLOW_DISSECTOR_KEY_BASIC] = 0;
1448 dissector->offset[FLOW_DISSECTOR_KEY_PORTS] = offsetof(typeof(keys), key);
1450 cls = kzalloc(sizeof(*cls), GFP_KERNEL);
1453 goto cleanup_dissector;
1456 cls->common.chain_index = 0;
1457 cls->command = FLOW_CLS_REPLACE;
1458 cls->cookie = dummy_cookie;
1460 rule = kzalloc(struct_size(rule, action.entries, 1), GFP_KERNEL);
1466 rule->match.dissector = dissector;
1467 rule->match.key = (void *)&keys;
1468 rule->match.mask = (void *)&masks;
1470 keys.bkey.ip_proto = udp ? IPPROTO_UDP : IPPROTO_TCP;
1471 keys.key.src = htons(src);
1472 keys.key.dst = htons(dst);
1473 masks.mask.src = src_mask;
1474 masks.mask.dst = dst_mask;
1478 rule->action.entries[0].id = FLOW_ACTION_DROP;
1479 rule->action.num_entries = 1;
1481 attr.dst = priv->dev->dev_addr;
1487 /* Shall receive packet */
1488 ret = __stmmac_test_loopback(priv, &attr);
1492 ret = stmmac_tc_setup_cls(priv, priv, cls);
1496 /* Shall NOT receive packet */
1497 ret = __stmmac_test_loopback(priv, &attr);
1498 ret = ret ? 0 : -EINVAL;
1500 cls->command = FLOW_CLS_DESTROY;
1501 stmmac_tc_setup_cls(priv, priv, cls);
1510 priv->rss.enable = old_enable;
1511 stmmac_rss_configure(priv, priv->hw, &priv->rss,
1512 priv->plat->rx_queues_to_use);
1518 static int __stmmac_test_l4filt(struct stmmac_priv *priv, u32 dst, u32 src,
1519 u32 dst_mask, u32 src_mask, bool udp)
1525 static int stmmac_test_l4filt_da_tcp(struct stmmac_priv *priv)
1527 u16 dummy_port = 0x123;
1529 return __stmmac_test_l4filt(priv, dummy_port, 0, ~0, 0, false);
1532 static int stmmac_test_l4filt_sa_tcp(struct stmmac_priv *priv)
1534 u16 dummy_port = 0x123;
1536 return __stmmac_test_l4filt(priv, 0, dummy_port, 0, ~0, false);
1539 static int stmmac_test_l4filt_da_udp(struct stmmac_priv *priv)
1541 u16 dummy_port = 0x123;
1543 return __stmmac_test_l4filt(priv, dummy_port, 0, ~0, 0, true);
1546 static int stmmac_test_l4filt_sa_udp(struct stmmac_priv *priv)
1548 u16 dummy_port = 0x123;
1550 return __stmmac_test_l4filt(priv, 0, dummy_port, 0, ~0, true);
1553 static int stmmac_test_arp_validate(struct sk_buff *skb,
1554 struct net_device *ndev,
1555 struct packet_type *pt,
1556 struct net_device *orig_ndev)
1558 struct stmmac_test_priv *tpriv = pt->af_packet_priv;
1559 struct ethhdr *ehdr;
1560 struct arphdr *ahdr;
1562 ehdr = (struct ethhdr *)skb_mac_header(skb);
1563 if (!ether_addr_equal_unaligned(ehdr->h_dest, tpriv->packet->src))
1566 ahdr = arp_hdr(skb);
1567 if (ahdr->ar_op != htons(ARPOP_REPLY))
1571 complete(&tpriv->comp);
1577 static int stmmac_test_arpoffload(struct stmmac_priv *priv)
1579 unsigned char src[ETH_ALEN] = {0x01, 0x02, 0x03, 0x04, 0x05, 0x06};
1580 unsigned char dst[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
1581 struct stmmac_packet_attrs attr = { };
1582 struct stmmac_test_priv *tpriv;
1583 struct sk_buff *skb = NULL;
1584 u32 ip_addr = 0xdeadcafe;
1585 u32 ip_src = 0xdeadbeef;
1588 if (!priv->dma_cap.arpoffsel)
1591 tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
1596 init_completion(&tpriv->comp);
1598 tpriv->pt.type = htons(ETH_P_ARP);
1599 tpriv->pt.func = stmmac_test_arp_validate;
1600 tpriv->pt.dev = priv->dev;
1601 tpriv->pt.af_packet_priv = tpriv;
1602 tpriv->packet = &attr;
1603 dev_add_pack(&tpriv->pt);
1606 attr.ip_src = ip_src;
1608 attr.ip_dst = ip_addr;
1610 skb = stmmac_test_get_arp_skb(priv, &attr);
1616 ret = stmmac_set_arp_offload(priv, priv->hw, true, ip_addr);
1620 ret = dev_set_promiscuity(priv->dev, 1);
1624 skb_set_queue_mapping(skb, 0);
1625 ret = dev_queue_xmit(skb);
1627 goto cleanup_promisc;
1629 wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT);
1630 ret = tpriv->ok ? 0 : -ETIMEDOUT;
1633 dev_set_promiscuity(priv->dev, -1);
1635 stmmac_set_arp_offload(priv, priv->hw, false, 0x0);
1636 dev_remove_pack(&tpriv->pt);
1641 static int __stmmac_test_jumbo(struct stmmac_priv *priv, u16 queue)
1643 struct stmmac_packet_attrs attr = { };
1644 int size = priv->dma_buf_sz;
1646 attr.dst = priv->dev->dev_addr;
1647 attr.max_size = size - ETH_FCS_LEN;
1648 attr.queue_mapping = queue;
1650 return __stmmac_test_loopback(priv, &attr);
1653 static int stmmac_test_jumbo(struct stmmac_priv *priv)
1655 return __stmmac_test_jumbo(priv, 0);
1658 static int stmmac_test_mjumbo(struct stmmac_priv *priv)
1660 u32 chan, tx_cnt = priv->plat->tx_queues_to_use;
1666 for (chan = 0; chan < tx_cnt; chan++) {
1667 ret = __stmmac_test_jumbo(priv, chan);
1675 static int stmmac_test_sph(struct stmmac_priv *priv)
1677 unsigned long cnt_end, cnt_start = priv->xstats.rx_split_hdr_pkt_n;
1678 struct stmmac_packet_attrs attr = { };
1684 /* Check for UDP first */
1685 attr.dst = priv->dev->dev_addr;
1688 ret = __stmmac_test_loopback(priv, &attr);
1692 cnt_end = priv->xstats.rx_split_hdr_pkt_n;
1693 if (cnt_end <= cnt_start)
1696 /* Check for TCP now */
1697 cnt_start = cnt_end;
1699 attr.dst = priv->dev->dev_addr;
1702 ret = __stmmac_test_loopback(priv, &attr);
1706 cnt_end = priv->xstats.rx_split_hdr_pkt_n;
1707 if (cnt_end <= cnt_start)
1713 #define STMMAC_LOOPBACK_NONE 0
1714 #define STMMAC_LOOPBACK_MAC 1
1715 #define STMMAC_LOOPBACK_PHY 2
1717 static const struct stmmac_test {
1718 char name[ETH_GSTRING_LEN];
1720 int (*fn)(struct stmmac_priv *priv);
1721 } stmmac_selftests[] = {
1723 .name = "MAC Loopback ",
1724 .lb = STMMAC_LOOPBACK_MAC,
1725 .fn = stmmac_test_mac_loopback,
1727 .name = "PHY Loopback ",
1728 .lb = STMMAC_LOOPBACK_NONE, /* Test will handle it */
1729 .fn = stmmac_test_phy_loopback,
1731 .name = "MMC Counters ",
1732 .lb = STMMAC_LOOPBACK_PHY,
1733 .fn = stmmac_test_mmc,
1736 .lb = STMMAC_LOOPBACK_PHY,
1737 .fn = stmmac_test_eee,
1739 .name = "Hash Filter MC ",
1740 .lb = STMMAC_LOOPBACK_PHY,
1741 .fn = stmmac_test_hfilt,
1743 .name = "Perfect Filter UC ",
1744 .lb = STMMAC_LOOPBACK_PHY,
1745 .fn = stmmac_test_pfilt,
1747 .name = "MC Filter ",
1748 .lb = STMMAC_LOOPBACK_PHY,
1749 .fn = stmmac_test_mcfilt,
1751 .name = "UC Filter ",
1752 .lb = STMMAC_LOOPBACK_PHY,
1753 .fn = stmmac_test_ucfilt,
1755 .name = "Flow Control ",
1756 .lb = STMMAC_LOOPBACK_PHY,
1757 .fn = stmmac_test_flowctrl,
1760 .lb = STMMAC_LOOPBACK_PHY,
1761 .fn = stmmac_test_rss,
1763 .name = "VLAN Filtering ",
1764 .lb = STMMAC_LOOPBACK_PHY,
1765 .fn = stmmac_test_vlanfilt,
1767 .name = "Double VLAN Filtering",
1768 .lb = STMMAC_LOOPBACK_PHY,
1769 .fn = stmmac_test_dvlanfilt,
1771 .name = "Flexible RX Parser ",
1772 .lb = STMMAC_LOOPBACK_PHY,
1773 .fn = stmmac_test_rxp,
1775 .name = "SA Insertion (desc) ",
1776 .lb = STMMAC_LOOPBACK_PHY,
1777 .fn = stmmac_test_desc_sai,
1779 .name = "SA Replacement (desc)",
1780 .lb = STMMAC_LOOPBACK_PHY,
1781 .fn = stmmac_test_desc_sar,
1783 .name = "SA Insertion (reg) ",
1784 .lb = STMMAC_LOOPBACK_PHY,
1785 .fn = stmmac_test_reg_sai,
1787 .name = "SA Replacement (reg)",
1788 .lb = STMMAC_LOOPBACK_PHY,
1789 .fn = stmmac_test_reg_sar,
1791 .name = "VLAN TX Insertion ",
1792 .lb = STMMAC_LOOPBACK_PHY,
1793 .fn = stmmac_test_vlanoff,
1795 .name = "SVLAN TX Insertion ",
1796 .lb = STMMAC_LOOPBACK_PHY,
1797 .fn = stmmac_test_svlanoff,
1799 .name = "L3 DA Filtering ",
1800 .lb = STMMAC_LOOPBACK_PHY,
1801 .fn = stmmac_test_l3filt_da,
1803 .name = "L3 SA Filtering ",
1804 .lb = STMMAC_LOOPBACK_PHY,
1805 .fn = stmmac_test_l3filt_sa,
1807 .name = "L4 DA TCP Filtering ",
1808 .lb = STMMAC_LOOPBACK_PHY,
1809 .fn = stmmac_test_l4filt_da_tcp,
1811 .name = "L4 SA TCP Filtering ",
1812 .lb = STMMAC_LOOPBACK_PHY,
1813 .fn = stmmac_test_l4filt_sa_tcp,
1815 .name = "L4 DA UDP Filtering ",
1816 .lb = STMMAC_LOOPBACK_PHY,
1817 .fn = stmmac_test_l4filt_da_udp,
1819 .name = "L4 SA UDP Filtering ",
1820 .lb = STMMAC_LOOPBACK_PHY,
1821 .fn = stmmac_test_l4filt_sa_udp,
1823 .name = "ARP Offload ",
1824 .lb = STMMAC_LOOPBACK_PHY,
1825 .fn = stmmac_test_arpoffload,
1827 .name = "Jumbo Frame ",
1828 .lb = STMMAC_LOOPBACK_PHY,
1829 .fn = stmmac_test_jumbo,
1831 .name = "Multichannel Jumbo ",
1832 .lb = STMMAC_LOOPBACK_PHY,
1833 .fn = stmmac_test_mjumbo,
1835 .name = "Split Header ",
1836 .lb = STMMAC_LOOPBACK_PHY,
1837 .fn = stmmac_test_sph,
1841 void stmmac_selftest_run(struct net_device *dev,
1842 struct ethtool_test *etest, u64 *buf)
1844 struct stmmac_priv *priv = netdev_priv(dev);
1845 int count = stmmac_selftest_get_count(priv);
1846 int carrier = netif_carrier_ok(dev);
1849 memset(buf, 0, sizeof(*buf) * count);
1850 stmmac_test_next_id = 0;
1852 if (etest->flags != ETH_TEST_FL_OFFLINE) {
1853 netdev_err(priv->dev, "Only offline tests are supported\n");
1854 etest->flags |= ETH_TEST_FL_FAILED;
1856 } else if (!carrier) {
1857 netdev_err(priv->dev, "You need valid Link to execute tests\n");
1858 etest->flags |= ETH_TEST_FL_FAILED;
1862 /* We don't want extra traffic */
1863 netif_carrier_off(dev);
1865 /* Wait for queues drain */
1868 for (i = 0; i < count; i++) {
1871 switch (stmmac_selftests[i].lb) {
1872 case STMMAC_LOOPBACK_PHY:
1875 ret = phy_loopback(dev->phydev, true);
1879 case STMMAC_LOOPBACK_MAC:
1880 ret = stmmac_set_mac_loopback(priv, priv->ioaddr, true);
1882 case STMMAC_LOOPBACK_NONE:
1890 * First tests will always be MAC / PHY loobpack. If any of
1891 * them is not supported we abort earlier.
1894 netdev_err(priv->dev, "Loopback is not supported\n");
1895 etest->flags |= ETH_TEST_FL_FAILED;
1899 ret = stmmac_selftests[i].fn(priv);
1900 if (ret && (ret != -EOPNOTSUPP))
1901 etest->flags |= ETH_TEST_FL_FAILED;
1904 switch (stmmac_selftests[i].lb) {
1905 case STMMAC_LOOPBACK_PHY:
1908 ret = phy_loopback(dev->phydev, false);
1912 case STMMAC_LOOPBACK_MAC:
1913 stmmac_set_mac_loopback(priv, priv->ioaddr, false);
1920 /* Restart everything */
1922 netif_carrier_on(dev);
1925 void stmmac_selftest_get_strings(struct stmmac_priv *priv, u8 *data)
1930 for (i = 0; i < stmmac_selftest_get_count(priv); i++) {
1931 snprintf(p, ETH_GSTRING_LEN, "%2d. %s", i + 1,
1932 stmmac_selftests[i].name);
1933 p += ETH_GSTRING_LEN;
1937 int stmmac_selftest_get_count(struct stmmac_priv *priv)
1939 return ARRAY_SIZE(stmmac_selftests);