74b21ddf5a9879b6666063f651d97d45b34d8c04
[platform/kernel/linux-starfive.git] / tools / testing / selftests / bpf / xskxceiver.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2020 Intel Corporation. */
3
4 /*
5  * Some functions in this program are taken from
6  * Linux kernel samples/bpf/xdpsock* and modified
7  * for use.
8  *
9  * See test_xsk.sh for detailed information on test topology
10  * and prerequisite network setup.
11  *
12  * This test program contains two threads, each thread is single socket with
13  * a unique UMEM. It validates in-order packet delivery and packet content
14  * by sending packets to each other.
15  *
16  * Tests Information:
17  * ------------------
18  * These selftests test AF_XDP SKB and Native/DRV modes using veth
19  * Virtual Ethernet interfaces.
20  *
21  * For each mode, the following tests are run:
22  *    a. nopoll - soft-irq processing in run-to-completion mode
23  *    b. poll - using poll() syscall
24  *    c. Socket Teardown
25  *       Create a Tx and a Rx socket, Tx from one socket, Rx on another. Destroy
26  *       both sockets, then repeat multiple times. Only nopoll mode is used
27  *    d. Bi-directional sockets
28  *       Configure sockets as bi-directional tx/rx sockets, sets up fill and
29  *       completion rings on each socket, tx/rx in both directions. Only nopoll
30  *       mode is used
31  *    e. Statistics
32  *       Trigger some error conditions and ensure that the appropriate statistics
33  *       are incremented. Within this test, the following statistics are tested:
34  *       i.   rx dropped
35  *            Increase the UMEM frame headroom to a value which results in
36  *            insufficient space in the rx buffer for both the packet and the headroom.
37  *       ii.  tx invalid
38  *            Set the 'len' field of tx descriptors to an invalid value (umem frame
39  *            size + 1).
40  *       iii. rx ring full
41  *            Reduce the size of the RX ring to a fraction of the fill ring size.
42  *       iv.  fill queue empty
43  *            Do not populate the fill queue and then try to receive pkts.
44  *    f. bpf_link resource persistence
45  *       Configure sockets at indexes 0 and 1, run a traffic on queue ids 0,
46  *       then remove xsk sockets from queue 0 on both veth interfaces and
47  *       finally run a traffic on queues ids 1
48  *    g. unaligned mode
49  *    h. tests for invalid and corner case Tx descriptors so that the correct ones
50  *       are discarded and let through, respectively.
51  *    i. 2K frame size tests
52  *
53  * Total tests: 12
54  *
55  * Flow:
56  * -----
57  * - Single process spawns two threads: Tx and Rx
58  * - Each of these two threads attach to a veth interface within their assigned
59  *   namespaces
60  * - Each thread Creates one AF_XDP socket connected to a unique umem for each
61  *   veth interface
62  * - Tx thread Transmits 10k packets from veth<xxxx> to veth<yyyy>
63  * - Rx thread verifies if all 10k packets were received and delivered in-order,
64  *   and have the right content
65  *
66  * Enable/disable packet dump mode:
67  * --------------------------
68  * To enable L2 - L4 headers and payload dump of each packet on STDOUT, add
69  * parameter -D to params array in test_xsk.sh, i.e. params=("-S" "-D")
70  */
71
72 #define _GNU_SOURCE
73 #include <fcntl.h>
74 #include <errno.h>
75 #include <getopt.h>
76 #include <asm/barrier.h>
77 #include <linux/if_link.h>
78 #include <linux/if_ether.h>
79 #include <linux/ip.h>
80 #include <linux/udp.h>
81 #include <arpa/inet.h>
82 #include <net/if.h>
83 #include <locale.h>
84 #include <poll.h>
85 #include <pthread.h>
86 #include <signal.h>
87 #include <stdbool.h>
88 #include <stdio.h>
89 #include <stdlib.h>
90 #include <string.h>
91 #include <stddef.h>
92 #include <sys/mman.h>
93 #include <sys/socket.h>
94 #include <sys/time.h>
95 #include <sys/types.h>
96 #include <sys/queue.h>
97 #include <time.h>
98 #include <unistd.h>
99 #include <stdatomic.h>
100 #include "xsk.h"
101 #include "xskxceiver.h"
102 #include <bpf/bpf.h>
103 #include <linux/filter.h>
104 #include "../kselftest.h"
105
106 /* AF_XDP APIs were moved into libxdp and marked as deprecated in libbpf.
107  * Until xskxceiver is either moved or re-writed into libxdp, suppress
108  * deprecation warnings in this file
109  */
110 #pragma GCC diagnostic ignored "-Wdeprecated-declarations"
111
112 static const char *MAC1 = "\x00\x0A\x56\x9E\xEE\x62";
113 static const char *MAC2 = "\x00\x0A\x56\x9E\xEE\x61";
114 static const char *IP1 = "192.168.100.162";
115 static const char *IP2 = "192.168.100.161";
116 static const u16 UDP_PORT1 = 2020;
117 static const u16 UDP_PORT2 = 2121;
118
119 static void __exit_with_error(int error, const char *file, const char *func, int line)
120 {
121         ksft_test_result_fail("[%s:%s:%i]: ERROR: %d/\"%s\"\n", file, func, line, error,
122                               strerror(error));
123         ksft_exit_xfail();
124 }
125
126 #define exit_with_error(error) __exit_with_error(error, __FILE__, __func__, __LINE__)
127
128 #define mode_string(test) (test)->ifobj_tx->xdp_flags & XDP_FLAGS_SKB_MODE ? "SKB" : "DRV"
129 #define busy_poll_string(test) (test)->ifobj_tx->busy_poll ? "BUSY-POLL " : ""
130
131 static void report_failure(struct test_spec *test)
132 {
133         if (test->fail)
134                 return;
135
136         ksft_test_result_fail("FAIL: %s %s%s\n", mode_string(test), busy_poll_string(test),
137                               test->name);
138         test->fail = true;
139 }
140
141 static void memset32_htonl(void *dest, u32 val, u32 size)
142 {
143         u32 *ptr = (u32 *)dest;
144         int i;
145
146         val = htonl(val);
147
148         for (i = 0; i < (size & (~0x3)); i += 4)
149                 ptr[i >> 2] = val;
150 }
151
152 /*
153  * Fold a partial checksum
154  * This function code has been taken from
155  * Linux kernel include/asm-generic/checksum.h
156  */
157 static __u16 csum_fold(__u32 csum)
158 {
159         u32 sum = (__force u32)csum;
160
161         sum = (sum & 0xffff) + (sum >> 16);
162         sum = (sum & 0xffff) + (sum >> 16);
163         return (__force __u16)~sum;
164 }
165
166 /*
167  * This function code has been taken from
168  * Linux kernel lib/checksum.c
169  */
170 static u32 from64to32(u64 x)
171 {
172         /* add up 32-bit and 32-bit for 32+c bit */
173         x = (x & 0xffffffff) + (x >> 32);
174         /* add up carry.. */
175         x = (x & 0xffffffff) + (x >> 32);
176         return (u32)x;
177 }
178
179 /*
180  * This function code has been taken from
181  * Linux kernel lib/checksum.c
182  */
183 static __u32 csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len, __u8 proto, __u32 sum)
184 {
185         unsigned long long s = (__force u32)sum;
186
187         s += (__force u32)saddr;
188         s += (__force u32)daddr;
189 #ifdef __BIG_ENDIAN__
190         s += proto + len;
191 #else
192         s += (proto + len) << 8;
193 #endif
194         return (__force __u32)from64to32(s);
195 }
196
197 /*
198  * This function has been taken from
199  * Linux kernel include/asm-generic/checksum.h
200  */
201 static __u16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len, __u8 proto, __u32 sum)
202 {
203         return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
204 }
205
206 static u16 udp_csum(u32 saddr, u32 daddr, u32 len, u8 proto, u16 *udp_pkt)
207 {
208         u32 csum = 0;
209         u32 cnt = 0;
210
211         /* udp hdr and data */
212         for (; cnt < len; cnt += 2)
213                 csum += udp_pkt[cnt >> 1];
214
215         return csum_tcpudp_magic(saddr, daddr, len, proto, csum);
216 }
217
218 static void gen_eth_hdr(struct ifobject *ifobject, struct ethhdr *eth_hdr)
219 {
220         memcpy(eth_hdr->h_dest, ifobject->dst_mac, ETH_ALEN);
221         memcpy(eth_hdr->h_source, ifobject->src_mac, ETH_ALEN);
222         eth_hdr->h_proto = htons(ETH_P_IP);
223 }
224
225 static void gen_ip_hdr(struct ifobject *ifobject, struct iphdr *ip_hdr)
226 {
227         ip_hdr->version = IP_PKT_VER;
228         ip_hdr->ihl = 0x5;
229         ip_hdr->tos = IP_PKT_TOS;
230         ip_hdr->tot_len = htons(IP_PKT_SIZE);
231         ip_hdr->id = 0;
232         ip_hdr->frag_off = 0;
233         ip_hdr->ttl = IPDEFTTL;
234         ip_hdr->protocol = IPPROTO_UDP;
235         ip_hdr->saddr = ifobject->src_ip;
236         ip_hdr->daddr = ifobject->dst_ip;
237         ip_hdr->check = 0;
238 }
239
240 static void gen_udp_hdr(u32 payload, void *pkt, struct ifobject *ifobject,
241                         struct udphdr *udp_hdr)
242 {
243         udp_hdr->source = htons(ifobject->src_port);
244         udp_hdr->dest = htons(ifobject->dst_port);
245         udp_hdr->len = htons(UDP_PKT_SIZE);
246         memset32_htonl(pkt + PKT_HDR_SIZE, payload, UDP_PKT_DATA_SIZE);
247 }
248
249 static bool is_umem_valid(struct ifobject *ifobj)
250 {
251         return !!ifobj->umem->umem;
252 }
253
254 static void gen_udp_csum(struct udphdr *udp_hdr, struct iphdr *ip_hdr)
255 {
256         udp_hdr->check = 0;
257         udp_hdr->check =
258             udp_csum(ip_hdr->saddr, ip_hdr->daddr, UDP_PKT_SIZE, IPPROTO_UDP, (u16 *)udp_hdr);
259 }
260
261 static int xsk_configure_umem(struct xsk_umem_info *umem, void *buffer, u64 size)
262 {
263         struct xsk_umem_config cfg = {
264                 .fill_size = XSK_RING_PROD__DEFAULT_NUM_DESCS,
265                 .comp_size = XSK_RING_CONS__DEFAULT_NUM_DESCS,
266                 .frame_size = umem->frame_size,
267                 .frame_headroom = umem->frame_headroom,
268                 .flags = XSK_UMEM__DEFAULT_FLAGS
269         };
270         int ret;
271
272         if (umem->unaligned_mode)
273                 cfg.flags |= XDP_UMEM_UNALIGNED_CHUNK_FLAG;
274
275         ret = xsk_umem__create(&umem->umem, buffer, size,
276                                &umem->fq, &umem->cq, &cfg);
277         if (ret)
278                 return ret;
279
280         umem->buffer = buffer;
281         return 0;
282 }
283
284 static void enable_busy_poll(struct xsk_socket_info *xsk)
285 {
286         int sock_opt;
287
288         sock_opt = 1;
289         if (setsockopt(xsk_socket__fd(xsk->xsk), SOL_SOCKET, SO_PREFER_BUSY_POLL,
290                        (void *)&sock_opt, sizeof(sock_opt)) < 0)
291                 exit_with_error(errno);
292
293         sock_opt = 20;
294         if (setsockopt(xsk_socket__fd(xsk->xsk), SOL_SOCKET, SO_BUSY_POLL,
295                        (void *)&sock_opt, sizeof(sock_opt)) < 0)
296                 exit_with_error(errno);
297
298         sock_opt = BATCH_SIZE;
299         if (setsockopt(xsk_socket__fd(xsk->xsk), SOL_SOCKET, SO_BUSY_POLL_BUDGET,
300                        (void *)&sock_opt, sizeof(sock_opt)) < 0)
301                 exit_with_error(errno);
302 }
303
304 static int __xsk_configure_socket(struct xsk_socket_info *xsk, struct xsk_umem_info *umem,
305                                   struct ifobject *ifobject, bool shared)
306 {
307         struct xsk_socket_config cfg = {};
308         struct xsk_ring_cons *rxr;
309         struct xsk_ring_prod *txr;
310
311         xsk->umem = umem;
312         cfg.rx_size = xsk->rxqsize;
313         cfg.tx_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
314         cfg.libbpf_flags = XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD;
315         cfg.xdp_flags = ifobject->xdp_flags;
316         cfg.bind_flags = ifobject->bind_flags;
317         if (shared)
318                 cfg.bind_flags |= XDP_SHARED_UMEM;
319
320         txr = ifobject->tx_on ? &xsk->tx : NULL;
321         rxr = ifobject->rx_on ? &xsk->rx : NULL;
322         return xsk_socket__create(&xsk->xsk, ifobject->ifname, 0, umem->umem, rxr, txr, &cfg);
323 }
324
325 static struct option long_options[] = {
326         {"interface", required_argument, 0, 'i'},
327         {"busy-poll", no_argument, 0, 'b'},
328         {"dump-pkts", no_argument, 0, 'D'},
329         {"verbose", no_argument, 0, 'v'},
330         {0, 0, 0, 0}
331 };
332
333 static void usage(const char *prog)
334 {
335         const char *str =
336                 "  Usage: %s [OPTIONS]\n"
337                 "  Options:\n"
338                 "  -i, --interface      Use interface\n"
339                 "  -D, --dump-pkts      Dump packets L2 - L5\n"
340                 "  -v, --verbose        Verbose output\n"
341                 "  -b, --busy-poll      Enable busy poll\n";
342
343         ksft_print_msg(str, prog);
344 }
345
346 static int switch_namespace(const char *nsname)
347 {
348         char fqns[26] = "/var/run/netns/";
349         int nsfd;
350
351         if (!nsname || strlen(nsname) == 0)
352                 return -1;
353
354         strncat(fqns, nsname, sizeof(fqns) - strlen(fqns) - 1);
355         nsfd = open(fqns, O_RDONLY);
356
357         if (nsfd == -1)
358                 exit_with_error(errno);
359
360         if (setns(nsfd, 0) == -1)
361                 exit_with_error(errno);
362
363         print_verbose("NS switched: %s\n", nsname);
364
365         return nsfd;
366 }
367
368 static bool validate_interface(struct ifobject *ifobj)
369 {
370         if (!strcmp(ifobj->ifname, ""))
371                 return false;
372         return true;
373 }
374
375 static void parse_command_line(struct ifobject *ifobj_tx, struct ifobject *ifobj_rx, int argc,
376                                char **argv)
377 {
378         struct ifobject *ifobj;
379         u32 interface_nb = 0;
380         int option_index, c;
381
382         opterr = 0;
383
384         for (;;) {
385                 char *sptr, *token;
386
387                 c = getopt_long(argc, argv, "i:Dvb", long_options, &option_index);
388                 if (c == -1)
389                         break;
390
391                 switch (c) {
392                 case 'i':
393                         if (interface_nb == 0)
394                                 ifobj = ifobj_tx;
395                         else if (interface_nb == 1)
396                                 ifobj = ifobj_rx;
397                         else
398                                 break;
399
400                         sptr = strndupa(optarg, strlen(optarg));
401                         memcpy(ifobj->ifname, strsep(&sptr, ","), MAX_INTERFACE_NAME_CHARS);
402                         token = strsep(&sptr, ",");
403                         if (token)
404                                 memcpy(ifobj->nsname, token, MAX_INTERFACES_NAMESPACE_CHARS);
405                         interface_nb++;
406                         break;
407                 case 'D':
408                         opt_pkt_dump = true;
409                         break;
410                 case 'v':
411                         opt_verbose = true;
412                         break;
413                 case 'b':
414                         ifobj_tx->busy_poll = true;
415                         ifobj_rx->busy_poll = true;
416                         break;
417                 default:
418                         usage(basename(argv[0]));
419                         ksft_exit_xfail();
420                 }
421         }
422 }
423
424 static void __test_spec_init(struct test_spec *test, struct ifobject *ifobj_tx,
425                              struct ifobject *ifobj_rx)
426 {
427         u32 i, j;
428
429         for (i = 0; i < MAX_INTERFACES; i++) {
430                 struct ifobject *ifobj = i ? ifobj_rx : ifobj_tx;
431
432                 ifobj->xsk = &ifobj->xsk_arr[0];
433                 ifobj->use_poll = false;
434                 ifobj->use_fill_ring = true;
435                 ifobj->release_rx = true;
436                 ifobj->validation_func = NULL;
437
438                 if (i == 0) {
439                         ifobj->rx_on = false;
440                         ifobj->tx_on = true;
441                         ifobj->pkt_stream = test->tx_pkt_stream_default;
442                 } else {
443                         ifobj->rx_on = true;
444                         ifobj->tx_on = false;
445                         ifobj->pkt_stream = test->rx_pkt_stream_default;
446                 }
447
448                 memset(ifobj->umem, 0, sizeof(*ifobj->umem));
449                 ifobj->umem->num_frames = DEFAULT_UMEM_BUFFERS;
450                 ifobj->umem->frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE;
451                 if (ifobj->shared_umem && ifobj->rx_on)
452                         ifobj->umem->base_addr = DEFAULT_UMEM_BUFFERS *
453                                 XSK_UMEM__DEFAULT_FRAME_SIZE;
454
455                 for (j = 0; j < MAX_SOCKETS; j++) {
456                         memset(&ifobj->xsk_arr[j], 0, sizeof(ifobj->xsk_arr[j]));
457                         ifobj->xsk_arr[j].rxqsize = XSK_RING_CONS__DEFAULT_NUM_DESCS;
458                 }
459         }
460
461         test->ifobj_tx = ifobj_tx;
462         test->ifobj_rx = ifobj_rx;
463         test->current_step = 0;
464         test->total_steps = 1;
465         test->nb_sockets = 1;
466         test->fail = false;
467 }
468
469 static void test_spec_init(struct test_spec *test, struct ifobject *ifobj_tx,
470                            struct ifobject *ifobj_rx, enum test_mode mode)
471 {
472         struct pkt_stream *tx_pkt_stream;
473         struct pkt_stream *rx_pkt_stream;
474         u32 i;
475
476         tx_pkt_stream = test->tx_pkt_stream_default;
477         rx_pkt_stream = test->rx_pkt_stream_default;
478         memset(test, 0, sizeof(*test));
479         test->tx_pkt_stream_default = tx_pkt_stream;
480         test->rx_pkt_stream_default = rx_pkt_stream;
481
482         for (i = 0; i < MAX_INTERFACES; i++) {
483                 struct ifobject *ifobj = i ? ifobj_rx : ifobj_tx;
484
485                 ifobj->xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
486                 if (mode == TEST_MODE_SKB)
487                         ifobj->xdp_flags |= XDP_FLAGS_SKB_MODE;
488                 else
489                         ifobj->xdp_flags |= XDP_FLAGS_DRV_MODE;
490
491                 ifobj->bind_flags = XDP_USE_NEED_WAKEUP | XDP_COPY;
492         }
493
494         __test_spec_init(test, ifobj_tx, ifobj_rx);
495 }
496
497 static void test_spec_reset(struct test_spec *test)
498 {
499         __test_spec_init(test, test->ifobj_tx, test->ifobj_rx);
500 }
501
502 static void test_spec_set_name(struct test_spec *test, const char *name)
503 {
504         strncpy(test->name, name, MAX_TEST_NAME_SIZE);
505 }
506
507 static void pkt_stream_reset(struct pkt_stream *pkt_stream)
508 {
509         if (pkt_stream)
510                 pkt_stream->rx_pkt_nb = 0;
511 }
512
513 static struct pkt *pkt_stream_get_pkt(struct pkt_stream *pkt_stream, u32 pkt_nb)
514 {
515         if (pkt_nb >= pkt_stream->nb_pkts)
516                 return NULL;
517
518         return &pkt_stream->pkts[pkt_nb];
519 }
520
521 static struct pkt *pkt_stream_get_next_rx_pkt(struct pkt_stream *pkt_stream, u32 *pkts_sent)
522 {
523         while (pkt_stream->rx_pkt_nb < pkt_stream->nb_pkts) {
524                 (*pkts_sent)++;
525                 if (pkt_stream->pkts[pkt_stream->rx_pkt_nb].valid)
526                         return &pkt_stream->pkts[pkt_stream->rx_pkt_nb++];
527                 pkt_stream->rx_pkt_nb++;
528         }
529         return NULL;
530 }
531
532 static void pkt_stream_delete(struct pkt_stream *pkt_stream)
533 {
534         free(pkt_stream->pkts);
535         free(pkt_stream);
536 }
537
538 static void pkt_stream_restore_default(struct test_spec *test)
539 {
540         struct pkt_stream *tx_pkt_stream = test->ifobj_tx->pkt_stream;
541         struct pkt_stream *rx_pkt_stream = test->ifobj_rx->pkt_stream;
542
543         if (tx_pkt_stream != test->tx_pkt_stream_default) {
544                 pkt_stream_delete(test->ifobj_tx->pkt_stream);
545                 test->ifobj_tx->pkt_stream = test->tx_pkt_stream_default;
546         }
547
548         if (rx_pkt_stream != test->rx_pkt_stream_default) {
549                 pkt_stream_delete(test->ifobj_rx->pkt_stream);
550                 test->ifobj_rx->pkt_stream = test->rx_pkt_stream_default;
551         }
552 }
553
554 static struct pkt_stream *__pkt_stream_alloc(u32 nb_pkts)
555 {
556         struct pkt_stream *pkt_stream;
557
558         pkt_stream = calloc(1, sizeof(*pkt_stream));
559         if (!pkt_stream)
560                 return NULL;
561
562         pkt_stream->pkts = calloc(nb_pkts, sizeof(*pkt_stream->pkts));
563         if (!pkt_stream->pkts) {
564                 free(pkt_stream);
565                 return NULL;
566         }
567
568         pkt_stream->nb_pkts = nb_pkts;
569         return pkt_stream;
570 }
571
572 static void pkt_set(struct xsk_umem_info *umem, struct pkt *pkt, u64 addr, u32 len)
573 {
574         pkt->addr = addr + umem->base_addr;
575         pkt->len = len;
576         if (len > umem->frame_size - XDP_PACKET_HEADROOM - MIN_PKT_SIZE * 2 - umem->frame_headroom)
577                 pkt->valid = false;
578         else
579                 pkt->valid = true;
580 }
581
582 static struct pkt_stream *pkt_stream_generate(struct xsk_umem_info *umem, u32 nb_pkts, u32 pkt_len)
583 {
584         struct pkt_stream *pkt_stream;
585         u32 i;
586
587         pkt_stream = __pkt_stream_alloc(nb_pkts);
588         if (!pkt_stream)
589                 exit_with_error(ENOMEM);
590
591         pkt_stream->nb_pkts = nb_pkts;
592         for (i = 0; i < nb_pkts; i++) {
593                 pkt_set(umem, &pkt_stream->pkts[i], (i % umem->num_frames) * umem->frame_size,
594                         pkt_len);
595                 pkt_stream->pkts[i].payload = i;
596         }
597
598         return pkt_stream;
599 }
600
601 static struct pkt_stream *pkt_stream_clone(struct xsk_umem_info *umem,
602                                            struct pkt_stream *pkt_stream)
603 {
604         return pkt_stream_generate(umem, pkt_stream->nb_pkts, pkt_stream->pkts[0].len);
605 }
606
607 static void pkt_stream_replace(struct test_spec *test, u32 nb_pkts, u32 pkt_len)
608 {
609         struct pkt_stream *pkt_stream;
610
611         pkt_stream = pkt_stream_generate(test->ifobj_tx->umem, nb_pkts, pkt_len);
612         test->ifobj_tx->pkt_stream = pkt_stream;
613         pkt_stream = pkt_stream_generate(test->ifobj_rx->umem, nb_pkts, pkt_len);
614         test->ifobj_rx->pkt_stream = pkt_stream;
615 }
616
617 static void __pkt_stream_replace_half(struct ifobject *ifobj, u32 pkt_len,
618                                       int offset)
619 {
620         struct xsk_umem_info *umem = ifobj->umem;
621         struct pkt_stream *pkt_stream;
622         u32 i;
623
624         pkt_stream = pkt_stream_clone(umem, ifobj->pkt_stream);
625         for (i = 1; i < ifobj->pkt_stream->nb_pkts; i += 2)
626                 pkt_set(umem, &pkt_stream->pkts[i],
627                         (i % umem->num_frames) * umem->frame_size + offset, pkt_len);
628
629         ifobj->pkt_stream = pkt_stream;
630 }
631
632 static void pkt_stream_replace_half(struct test_spec *test, u32 pkt_len, int offset)
633 {
634         __pkt_stream_replace_half(test->ifobj_tx, pkt_len, offset);
635         __pkt_stream_replace_half(test->ifobj_rx, pkt_len, offset);
636 }
637
638 static void pkt_stream_receive_half(struct test_spec *test)
639 {
640         struct xsk_umem_info *umem = test->ifobj_rx->umem;
641         struct pkt_stream *pkt_stream = test->ifobj_tx->pkt_stream;
642         u32 i;
643
644         test->ifobj_rx->pkt_stream = pkt_stream_generate(umem, pkt_stream->nb_pkts,
645                                                          pkt_stream->pkts[0].len);
646         pkt_stream = test->ifobj_rx->pkt_stream;
647         for (i = 1; i < pkt_stream->nb_pkts; i += 2)
648                 pkt_stream->pkts[i].valid = false;
649 }
650
651 static struct pkt *pkt_generate(struct ifobject *ifobject, u32 pkt_nb)
652 {
653         struct pkt *pkt = pkt_stream_get_pkt(ifobject->pkt_stream, pkt_nb);
654         struct udphdr *udp_hdr;
655         struct ethhdr *eth_hdr;
656         struct iphdr *ip_hdr;
657         void *data;
658
659         if (!pkt)
660                 return NULL;
661         if (!pkt->valid || pkt->len < MIN_PKT_SIZE)
662                 return pkt;
663
664         data = xsk_umem__get_data(ifobject->umem->buffer, pkt->addr);
665         udp_hdr = (struct udphdr *)(data + sizeof(struct ethhdr) + sizeof(struct iphdr));
666         ip_hdr = (struct iphdr *)(data + sizeof(struct ethhdr));
667         eth_hdr = (struct ethhdr *)data;
668
669         gen_udp_hdr(pkt_nb, data, ifobject, udp_hdr);
670         gen_ip_hdr(ifobject, ip_hdr);
671         gen_udp_csum(udp_hdr, ip_hdr);
672         gen_eth_hdr(ifobject, eth_hdr);
673
674         return pkt;
675 }
676
677 static void __pkt_stream_generate_custom(struct ifobject *ifobj,
678                                          struct pkt *pkts, u32 nb_pkts)
679 {
680         struct pkt_stream *pkt_stream;
681         u32 i;
682
683         pkt_stream = __pkt_stream_alloc(nb_pkts);
684         if (!pkt_stream)
685                 exit_with_error(ENOMEM);
686
687         for (i = 0; i < nb_pkts; i++) {
688                 pkt_stream->pkts[i].addr = pkts[i].addr + ifobj->umem->base_addr;
689                 pkt_stream->pkts[i].len = pkts[i].len;
690                 pkt_stream->pkts[i].payload = i;
691                 pkt_stream->pkts[i].valid = pkts[i].valid;
692         }
693
694         ifobj->pkt_stream = pkt_stream;
695 }
696
697 static void pkt_stream_generate_custom(struct test_spec *test, struct pkt *pkts, u32 nb_pkts)
698 {
699         __pkt_stream_generate_custom(test->ifobj_tx, pkts, nb_pkts);
700         __pkt_stream_generate_custom(test->ifobj_rx, pkts, nb_pkts);
701 }
702
703 static void pkt_dump(void *pkt, u32 len)
704 {
705         char s[INET_ADDRSTRLEN];
706         struct ethhdr *ethhdr;
707         struct udphdr *udphdr;
708         struct iphdr *iphdr;
709         int payload, i;
710
711         ethhdr = pkt;
712         iphdr = pkt + sizeof(*ethhdr);
713         udphdr = pkt + sizeof(*ethhdr) + sizeof(*iphdr);
714
715         /*extract L2 frame */
716         fprintf(stdout, "DEBUG>> L2: dst mac: ");
717         for (i = 0; i < ETH_ALEN; i++)
718                 fprintf(stdout, "%02X", ethhdr->h_dest[i]);
719
720         fprintf(stdout, "\nDEBUG>> L2: src mac: ");
721         for (i = 0; i < ETH_ALEN; i++)
722                 fprintf(stdout, "%02X", ethhdr->h_source[i]);
723
724         /*extract L3 frame */
725         fprintf(stdout, "\nDEBUG>> L3: ip_hdr->ihl: %02X\n", iphdr->ihl);
726         fprintf(stdout, "DEBUG>> L3: ip_hdr->saddr: %s\n",
727                 inet_ntop(AF_INET, &iphdr->saddr, s, sizeof(s)));
728         fprintf(stdout, "DEBUG>> L3: ip_hdr->daddr: %s\n",
729                 inet_ntop(AF_INET, &iphdr->daddr, s, sizeof(s)));
730         /*extract L4 frame */
731         fprintf(stdout, "DEBUG>> L4: udp_hdr->src: %d\n", ntohs(udphdr->source));
732         fprintf(stdout, "DEBUG>> L4: udp_hdr->dst: %d\n", ntohs(udphdr->dest));
733         /*extract L5 frame */
734         payload = *((uint32_t *)(pkt + PKT_HDR_SIZE));
735
736         fprintf(stdout, "DEBUG>> L5: payload: %d\n", payload);
737         fprintf(stdout, "---------------------------------------\n");
738 }
739
740 static bool is_offset_correct(struct xsk_umem_info *umem, struct pkt_stream *pkt_stream, u64 addr,
741                               u64 pkt_stream_addr)
742 {
743         u32 headroom = umem->unaligned_mode ? 0 : umem->frame_headroom;
744         u32 offset = addr % umem->frame_size, expected_offset = 0;
745
746         if (!pkt_stream->use_addr_for_fill)
747                 pkt_stream_addr = 0;
748
749         expected_offset += (pkt_stream_addr + headroom + XDP_PACKET_HEADROOM) % umem->frame_size;
750
751         if (offset == expected_offset)
752                 return true;
753
754         ksft_print_msg("[%s] expected [%u], got [%u]\n", __func__, expected_offset, offset);
755         return false;
756 }
757
758 static bool is_pkt_valid(struct pkt *pkt, void *buffer, u64 addr, u32 len)
759 {
760         void *data = xsk_umem__get_data(buffer, addr);
761         struct iphdr *iphdr = (struct iphdr *)(data + sizeof(struct ethhdr));
762
763         if (!pkt) {
764                 ksft_print_msg("[%s] too many packets received\n", __func__);
765                 return false;
766         }
767
768         if (len < MIN_PKT_SIZE || pkt->len < MIN_PKT_SIZE) {
769                 /* Do not try to verify packets that are smaller than minimum size. */
770                 return true;
771         }
772
773         if (pkt->len != len) {
774                 ksft_print_msg("[%s] expected length [%d], got length [%d]\n",
775                                __func__, pkt->len, len);
776                 return false;
777         }
778
779         if (iphdr->version == IP_PKT_VER && iphdr->tos == IP_PKT_TOS) {
780                 u32 seqnum = ntohl(*((u32 *)(data + PKT_HDR_SIZE)));
781
782                 if (opt_pkt_dump)
783                         pkt_dump(data, PKT_SIZE);
784
785                 if (pkt->payload != seqnum) {
786                         ksft_print_msg("[%s] expected seqnum [%d], got seqnum [%d]\n",
787                                        __func__, pkt->payload, seqnum);
788                         return false;
789                 }
790         } else {
791                 ksft_print_msg("Invalid frame received: ");
792                 ksft_print_msg("[IP_PKT_VER: %02X], [IP_PKT_TOS: %02X]\n", iphdr->version,
793                                iphdr->tos);
794                 return false;
795         }
796
797         return true;
798 }
799
800 static void kick_tx(struct xsk_socket_info *xsk)
801 {
802         int ret;
803
804         ret = sendto(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, 0);
805         if (ret >= 0)
806                 return;
807         if (errno == ENOBUFS || errno == EAGAIN || errno == EBUSY || errno == ENETDOWN) {
808                 usleep(100);
809                 return;
810         }
811         exit_with_error(errno);
812 }
813
814 static void kick_rx(struct xsk_socket_info *xsk)
815 {
816         int ret;
817
818         ret = recvfrom(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, NULL);
819         if (ret < 0)
820                 exit_with_error(errno);
821 }
822
823 static int complete_pkts(struct xsk_socket_info *xsk, int batch_size)
824 {
825         unsigned int rcvd;
826         u32 idx;
827
828         if (xsk_ring_prod__needs_wakeup(&xsk->tx))
829                 kick_tx(xsk);
830
831         rcvd = xsk_ring_cons__peek(&xsk->umem->cq, batch_size, &idx);
832         if (rcvd) {
833                 if (rcvd > xsk->outstanding_tx) {
834                         u64 addr = *xsk_ring_cons__comp_addr(&xsk->umem->cq, idx + rcvd - 1);
835
836                         ksft_print_msg("[%s] Too many packets completed\n", __func__);
837                         ksft_print_msg("Last completion address: %llx\n", addr);
838                         return TEST_FAILURE;
839                 }
840
841                 xsk_ring_cons__release(&xsk->umem->cq, rcvd);
842                 xsk->outstanding_tx -= rcvd;
843         }
844
845         return TEST_PASS;
846 }
847
848 static int receive_pkts(struct test_spec *test, struct pollfd *fds)
849 {
850         struct timeval tv_end, tv_now, tv_timeout = {THREAD_TMOUT, 0};
851         struct pkt_stream *pkt_stream = test->ifobj_rx->pkt_stream;
852         u32 idx_rx = 0, idx_fq = 0, rcvd, i, pkts_sent = 0;
853         struct xsk_socket_info *xsk = test->ifobj_rx->xsk;
854         struct ifobject *ifobj = test->ifobj_rx;
855         struct xsk_umem_info *umem = xsk->umem;
856         struct pkt *pkt;
857         int ret;
858
859         ret = gettimeofday(&tv_now, NULL);
860         if (ret)
861                 exit_with_error(errno);
862         timeradd(&tv_now, &tv_timeout, &tv_end);
863
864         pkt = pkt_stream_get_next_rx_pkt(pkt_stream, &pkts_sent);
865         while (pkt) {
866                 ret = gettimeofday(&tv_now, NULL);
867                 if (ret)
868                         exit_with_error(errno);
869                 if (timercmp(&tv_now, &tv_end, >)) {
870                         ksft_print_msg("ERROR: [%s] Receive loop timed out\n", __func__);
871                         return TEST_FAILURE;
872                 }
873
874                 kick_rx(xsk);
875                 if (ifobj->use_poll) {
876                         ret = poll(fds, 1, POLL_TMOUT);
877                         if (ret < 0)
878                                 exit_with_error(-ret);
879
880                         if (!ret) {
881                                 if (!is_umem_valid(test->ifobj_tx))
882                                         return TEST_PASS;
883
884                                 ksft_print_msg("ERROR: [%s] Poll timed out\n", __func__);
885                                 return TEST_FAILURE;
886
887                         }
888
889                         if (!(fds->revents & POLLIN))
890                                 continue;
891                 }
892
893                 rcvd = xsk_ring_cons__peek(&xsk->rx, BATCH_SIZE, &idx_rx);
894                 if (!rcvd)
895                         continue;
896
897                 if (ifobj->use_fill_ring) {
898                         ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq);
899                         while (ret != rcvd) {
900                                 if (ret < 0)
901                                         exit_with_error(-ret);
902                                 if (xsk_ring_prod__needs_wakeup(&umem->fq)) {
903                                         ret = poll(fds, 1, POLL_TMOUT);
904                                         if (ret < 0)
905                                                 exit_with_error(-ret);
906                                 }
907                                 ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq);
908                         }
909                 }
910
911                 for (i = 0; i < rcvd; i++) {
912                         const struct xdp_desc *desc = xsk_ring_cons__rx_desc(&xsk->rx, idx_rx++);
913                         u64 addr = desc->addr, orig;
914
915                         orig = xsk_umem__extract_addr(addr);
916                         addr = xsk_umem__add_offset_to_addr(addr);
917
918                         if (!is_pkt_valid(pkt, umem->buffer, addr, desc->len) ||
919                             !is_offset_correct(umem, pkt_stream, addr, pkt->addr))
920                                 return TEST_FAILURE;
921
922                         if (ifobj->use_fill_ring)
923                                 *xsk_ring_prod__fill_addr(&umem->fq, idx_fq++) = orig;
924                         pkt = pkt_stream_get_next_rx_pkt(pkt_stream, &pkts_sent);
925                 }
926
927                 if (ifobj->use_fill_ring)
928                         xsk_ring_prod__submit(&umem->fq, rcvd);
929                 if (ifobj->release_rx)
930                         xsk_ring_cons__release(&xsk->rx, rcvd);
931
932                 pthread_mutex_lock(&pacing_mutex);
933                 pkts_in_flight -= pkts_sent;
934                 if (pkts_in_flight < umem->num_frames)
935                         pthread_cond_signal(&pacing_cond);
936                 pthread_mutex_unlock(&pacing_mutex);
937                 pkts_sent = 0;
938         }
939
940         return TEST_PASS;
941 }
942
943 static int __send_pkts(struct ifobject *ifobject, u32 *pkt_nb, struct pollfd *fds,
944                        bool timeout)
945 {
946         struct xsk_socket_info *xsk = ifobject->xsk;
947         bool use_poll = ifobject->use_poll;
948         u32 i, idx = 0, ret, valid_pkts = 0;
949
950         while (xsk_ring_prod__reserve(&xsk->tx, BATCH_SIZE, &idx) < BATCH_SIZE) {
951                 if (use_poll) {
952                         ret = poll(fds, 1, POLL_TMOUT);
953                         if (timeout) {
954                                 if (ret < 0) {
955                                         ksft_print_msg("ERROR: [%s] Poll error %d\n",
956                                                        __func__, ret);
957                                         return TEST_FAILURE;
958                                 }
959                                 if (ret == 0)
960                                         return TEST_PASS;
961                                 break;
962                         }
963                         if (ret <= 0) {
964                                 ksft_print_msg("ERROR: [%s] Poll error %d\n",
965                                                __func__, ret);
966                                 return TEST_FAILURE;
967                         }
968                 }
969
970                 complete_pkts(xsk, BATCH_SIZE);
971         }
972
973         for (i = 0; i < BATCH_SIZE; i++) {
974                 struct xdp_desc *tx_desc = xsk_ring_prod__tx_desc(&xsk->tx, idx + i);
975                 struct pkt *pkt = pkt_generate(ifobject, *pkt_nb);
976
977                 if (!pkt)
978                         break;
979
980                 tx_desc->addr = pkt->addr;
981                 tx_desc->len = pkt->len;
982                 (*pkt_nb)++;
983                 if (pkt->valid)
984                         valid_pkts++;
985         }
986
987         pthread_mutex_lock(&pacing_mutex);
988         pkts_in_flight += valid_pkts;
989         /* pkts_in_flight might be negative if many invalid packets are sent */
990         if (pkts_in_flight >= (int)(ifobject->umem->num_frames - BATCH_SIZE)) {
991                 kick_tx(xsk);
992                 pthread_cond_wait(&pacing_cond, &pacing_mutex);
993         }
994         pthread_mutex_unlock(&pacing_mutex);
995
996         xsk_ring_prod__submit(&xsk->tx, i);
997         xsk->outstanding_tx += valid_pkts;
998
999         if (use_poll) {
1000                 ret = poll(fds, 1, POLL_TMOUT);
1001                 if (ret <= 0) {
1002                         if (ret == 0 && timeout)
1003                                 return TEST_PASS;
1004
1005                         ksft_print_msg("ERROR: [%s] Poll error %d\n", __func__, ret);
1006                         return TEST_FAILURE;
1007                 }
1008         }
1009
1010         if (!timeout) {
1011                 if (complete_pkts(xsk, i))
1012                         return TEST_FAILURE;
1013
1014                 usleep(10);
1015                 return TEST_PASS;
1016         }
1017
1018         return TEST_CONTINUE;
1019 }
1020
1021 static void wait_for_tx_completion(struct xsk_socket_info *xsk)
1022 {
1023         while (xsk->outstanding_tx)
1024                 complete_pkts(xsk, BATCH_SIZE);
1025 }
1026
1027 static int send_pkts(struct test_spec *test, struct ifobject *ifobject)
1028 {
1029         bool timeout = !is_umem_valid(test->ifobj_rx);
1030         struct pollfd fds = { };
1031         u32 pkt_cnt = 0, ret;
1032
1033         fds.fd = xsk_socket__fd(ifobject->xsk->xsk);
1034         fds.events = POLLOUT;
1035
1036         while (pkt_cnt < ifobject->pkt_stream->nb_pkts) {
1037                 ret = __send_pkts(ifobject, &pkt_cnt, &fds, timeout);
1038                 if ((ret || test->fail) && !timeout)
1039                         return TEST_FAILURE;
1040                 else if (ret == TEST_PASS && timeout)
1041                         return ret;
1042         }
1043
1044         wait_for_tx_completion(ifobject->xsk);
1045         return TEST_PASS;
1046 }
1047
1048 static int get_xsk_stats(struct xsk_socket *xsk, struct xdp_statistics *stats)
1049 {
1050         int fd = xsk_socket__fd(xsk), err;
1051         socklen_t optlen, expected_len;
1052
1053         optlen = sizeof(*stats);
1054         err = getsockopt(fd, SOL_XDP, XDP_STATISTICS, stats, &optlen);
1055         if (err) {
1056                 ksft_print_msg("[%s] getsockopt(XDP_STATISTICS) error %u %s\n",
1057                                __func__, -err, strerror(-err));
1058                 return TEST_FAILURE;
1059         }
1060
1061         expected_len = sizeof(struct xdp_statistics);
1062         if (optlen != expected_len) {
1063                 ksft_print_msg("[%s] getsockopt optlen error. Expected: %u got: %u\n",
1064                                __func__, expected_len, optlen);
1065                 return TEST_FAILURE;
1066         }
1067
1068         return TEST_PASS;
1069 }
1070
1071 static int validate_rx_dropped(struct ifobject *ifobject)
1072 {
1073         struct xsk_socket *xsk = ifobject->xsk->xsk;
1074         struct xdp_statistics stats;
1075         int err;
1076
1077         kick_rx(ifobject->xsk);
1078
1079         err = get_xsk_stats(xsk, &stats);
1080         if (err)
1081                 return TEST_FAILURE;
1082
1083         if (stats.rx_dropped == ifobject->pkt_stream->nb_pkts / 2)
1084                 return TEST_PASS;
1085
1086         return TEST_FAILURE;
1087 }
1088
1089 static int validate_rx_full(struct ifobject *ifobject)
1090 {
1091         struct xsk_socket *xsk = ifobject->xsk->xsk;
1092         struct xdp_statistics stats;
1093         int err;
1094
1095         usleep(1000);
1096         kick_rx(ifobject->xsk);
1097
1098         err = get_xsk_stats(xsk, &stats);
1099         if (err)
1100                 return TEST_FAILURE;
1101
1102         if (stats.rx_ring_full)
1103                 return TEST_PASS;
1104
1105         return TEST_FAILURE;
1106 }
1107
1108 static int validate_fill_empty(struct ifobject *ifobject)
1109 {
1110         struct xsk_socket *xsk = ifobject->xsk->xsk;
1111         struct xdp_statistics stats;
1112         int err;
1113
1114         usleep(1000);
1115         kick_rx(ifobject->xsk);
1116
1117         err = get_xsk_stats(xsk, &stats);
1118         if (err)
1119                 return TEST_FAILURE;
1120
1121         if (stats.rx_fill_ring_empty_descs)
1122                 return TEST_PASS;
1123
1124         return TEST_FAILURE;
1125 }
1126
1127 static int validate_tx_invalid_descs(struct ifobject *ifobject)
1128 {
1129         struct xsk_socket *xsk = ifobject->xsk->xsk;
1130         int fd = xsk_socket__fd(xsk);
1131         struct xdp_statistics stats;
1132         socklen_t optlen;
1133         int err;
1134
1135         optlen = sizeof(stats);
1136         err = getsockopt(fd, SOL_XDP, XDP_STATISTICS, &stats, &optlen);
1137         if (err) {
1138                 ksft_print_msg("[%s] getsockopt(XDP_STATISTICS) error %u %s\n",
1139                                __func__, -err, strerror(-err));
1140                 return TEST_FAILURE;
1141         }
1142
1143         if (stats.tx_invalid_descs != ifobject->pkt_stream->nb_pkts / 2) {
1144                 ksft_print_msg("[%s] tx_invalid_descs incorrect. Got [%u] expected [%u]\n",
1145                                __func__, stats.tx_invalid_descs, ifobject->pkt_stream->nb_pkts);
1146                 return TEST_FAILURE;
1147         }
1148
1149         return TEST_PASS;
1150 }
1151
1152 static void xsk_configure_socket(struct test_spec *test, struct ifobject *ifobject,
1153                                  struct xsk_umem_info *umem, bool tx)
1154 {
1155         int i, ret;
1156
1157         for (i = 0; i < test->nb_sockets; i++) {
1158                 bool shared = (ifobject->shared_umem && tx) ? true : !!i;
1159                 u32 ctr = 0;
1160
1161                 while (ctr++ < SOCK_RECONF_CTR) {
1162                         ret = __xsk_configure_socket(&ifobject->xsk_arr[i], umem,
1163                                                      ifobject, shared);
1164                         if (!ret)
1165                                 break;
1166
1167                         /* Retry if it fails as xsk_socket__create() is asynchronous */
1168                         if (ctr >= SOCK_RECONF_CTR)
1169                                 exit_with_error(-ret);
1170                         usleep(USLEEP_MAX);
1171                 }
1172                 if (ifobject->busy_poll)
1173                         enable_busy_poll(&ifobject->xsk_arr[i]);
1174         }
1175 }
1176
1177 static void thread_common_ops_tx(struct test_spec *test, struct ifobject *ifobject)
1178 {
1179         xsk_configure_socket(test, ifobject, test->ifobj_rx->umem, true);
1180         ifobject->xsk = &ifobject->xsk_arr[0];
1181         ifobject->xsk_map_fd = test->ifobj_rx->xsk_map_fd;
1182         memcpy(ifobject->umem, test->ifobj_rx->umem, sizeof(struct xsk_umem_info));
1183 }
1184
1185 static void xsk_populate_fill_ring(struct xsk_umem_info *umem, struct pkt_stream *pkt_stream)
1186 {
1187         u32 idx = 0, i, buffers_to_fill;
1188         int ret;
1189
1190         if (umem->num_frames < XSK_RING_PROD__DEFAULT_NUM_DESCS)
1191                 buffers_to_fill = umem->num_frames;
1192         else
1193                 buffers_to_fill = XSK_RING_PROD__DEFAULT_NUM_DESCS;
1194
1195         ret = xsk_ring_prod__reserve(&umem->fq, buffers_to_fill, &idx);
1196         if (ret != buffers_to_fill)
1197                 exit_with_error(ENOSPC);
1198         for (i = 0; i < buffers_to_fill; i++) {
1199                 u64 addr;
1200
1201                 if (pkt_stream->use_addr_for_fill) {
1202                         struct pkt *pkt = pkt_stream_get_pkt(pkt_stream, i);
1203
1204                         if (!pkt)
1205                                 break;
1206                         addr = pkt->addr;
1207                 } else {
1208                         addr = i * umem->frame_size;
1209                 }
1210
1211                 *xsk_ring_prod__fill_addr(&umem->fq, idx++) = addr;
1212         }
1213         xsk_ring_prod__submit(&umem->fq, buffers_to_fill);
1214 }
1215
1216 static void thread_common_ops(struct test_spec *test, struct ifobject *ifobject)
1217 {
1218         u64 umem_sz = ifobject->umem->num_frames * ifobject->umem->frame_size;
1219         int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE;
1220         LIBBPF_OPTS(bpf_xdp_query_opts, opts);
1221         int ret, ifindex;
1222         void *bufs;
1223
1224         ifobject->ns_fd = switch_namespace(ifobject->nsname);
1225
1226         if (ifobject->umem->unaligned_mode)
1227                 mmap_flags |= MAP_HUGETLB;
1228
1229         if (ifobject->shared_umem)
1230                 umem_sz *= 2;
1231
1232         bufs = mmap(NULL, umem_sz, PROT_READ | PROT_WRITE, mmap_flags, -1, 0);
1233         if (bufs == MAP_FAILED)
1234                 exit_with_error(errno);
1235
1236         ret = xsk_configure_umem(ifobject->umem, bufs, umem_sz);
1237         if (ret)
1238                 exit_with_error(-ret);
1239
1240         xsk_populate_fill_ring(ifobject->umem, ifobject->pkt_stream);
1241
1242         xsk_configure_socket(test, ifobject, ifobject->umem, false);
1243
1244         ifobject->xsk = &ifobject->xsk_arr[0];
1245
1246         if (!ifobject->rx_on)
1247                 return;
1248
1249         ifindex = if_nametoindex(ifobject->ifname);
1250         if (!ifindex)
1251                 exit_with_error(errno);
1252
1253         ret = xsk_setup_xdp_prog_xsk(ifobject->xsk->xsk, &ifobject->xsk_map_fd);
1254         if (ret)
1255                 exit_with_error(-ret);
1256
1257         ret = bpf_xdp_query(ifindex, ifobject->xdp_flags, &opts);
1258         if (ret)
1259                 exit_with_error(-ret);
1260
1261         if (ifobject->xdp_flags & XDP_FLAGS_SKB_MODE) {
1262                 if (opts.attach_mode != XDP_ATTACHED_SKB) {
1263                         ksft_print_msg("ERROR: [%s] XDP prog not in SKB mode\n");
1264                         exit_with_error(-EINVAL);
1265                 }
1266         } else if (ifobject->xdp_flags & XDP_FLAGS_DRV_MODE) {
1267                 if (opts.attach_mode != XDP_ATTACHED_DRV) {
1268                         ksft_print_msg("ERROR: [%s] XDP prog not in DRV mode\n");
1269                         exit_with_error(-EINVAL);
1270                 }
1271         }
1272
1273         ret = xsk_socket__update_xskmap(ifobject->xsk->xsk, ifobject->xsk_map_fd);
1274         if (ret)
1275                 exit_with_error(-ret);
1276 }
1277
1278 static void *worker_testapp_validate_tx(void *arg)
1279 {
1280         struct test_spec *test = (struct test_spec *)arg;
1281         struct ifobject *ifobject = test->ifobj_tx;
1282         int err;
1283
1284         if (test->current_step == 1) {
1285                 if (!ifobject->shared_umem)
1286                         thread_common_ops(test, ifobject);
1287                 else
1288                         thread_common_ops_tx(test, ifobject);
1289         }
1290
1291         print_verbose("Sending %d packets on interface %s\n", ifobject->pkt_stream->nb_pkts,
1292                       ifobject->ifname);
1293         err = send_pkts(test, ifobject);
1294
1295         if (!err && ifobject->validation_func)
1296                 err = ifobject->validation_func(ifobject);
1297         if (err)
1298                 report_failure(test);
1299
1300         pthread_exit(NULL);
1301 }
1302
1303 static void *worker_testapp_validate_rx(void *arg)
1304 {
1305         struct test_spec *test = (struct test_spec *)arg;
1306         struct ifobject *ifobject = test->ifobj_rx;
1307         struct pollfd fds = { };
1308         int id = 0;
1309         int err;
1310
1311         if (test->current_step == 1) {
1312                 thread_common_ops(test, ifobject);
1313         } else {
1314                 bpf_map_delete_elem(ifobject->xsk_map_fd, &id);
1315                 xsk_socket__update_xskmap(ifobject->xsk->xsk, ifobject->xsk_map_fd);
1316         }
1317
1318         fds.fd = xsk_socket__fd(ifobject->xsk->xsk);
1319         fds.events = POLLIN;
1320
1321         pthread_barrier_wait(&barr);
1322
1323         err = receive_pkts(test, &fds);
1324
1325         if (!err && ifobject->validation_func)
1326                 err = ifobject->validation_func(ifobject);
1327         if (err) {
1328                 report_failure(test);
1329                 pthread_mutex_lock(&pacing_mutex);
1330                 pthread_cond_signal(&pacing_cond);
1331                 pthread_mutex_unlock(&pacing_mutex);
1332         }
1333
1334         pthread_exit(NULL);
1335 }
1336
1337 static void testapp_clean_xsk_umem(struct ifobject *ifobj)
1338 {
1339         u64 umem_sz = ifobj->umem->num_frames * ifobj->umem->frame_size;
1340
1341         if (ifobj->shared_umem)
1342                 umem_sz *= 2;
1343
1344         xsk_umem__delete(ifobj->umem->umem);
1345         munmap(ifobj->umem->buffer, umem_sz);
1346 }
1347
1348 static void handler(int signum)
1349 {
1350         pthread_exit(NULL);
1351 }
1352
1353 static int testapp_validate_traffic_single_thread(struct test_spec *test, struct ifobject *ifobj,
1354                                                   enum test_type type)
1355 {
1356         bool old_shared_umem = ifobj->shared_umem;
1357         pthread_t t0;
1358
1359         if (pthread_barrier_init(&barr, NULL, 2))
1360                 exit_with_error(errno);
1361
1362         test->current_step++;
1363         if (type == TEST_TYPE_POLL_RXQ_TMOUT)
1364                 pkt_stream_reset(ifobj->pkt_stream);
1365         pkts_in_flight = 0;
1366
1367         test->ifobj_rx->shared_umem = false;
1368         test->ifobj_tx->shared_umem = false;
1369
1370         signal(SIGUSR1, handler);
1371         /* Spawn thread */
1372         pthread_create(&t0, NULL, ifobj->func_ptr, test);
1373
1374         if (type != TEST_TYPE_POLL_TXQ_TMOUT)
1375                 pthread_barrier_wait(&barr);
1376
1377         if (pthread_barrier_destroy(&barr))
1378                 exit_with_error(errno);
1379
1380         pthread_kill(t0, SIGUSR1);
1381         pthread_join(t0, NULL);
1382
1383         if (test->total_steps == test->current_step || test->fail) {
1384                 xsk_socket__delete(ifobj->xsk->xsk);
1385                 testapp_clean_xsk_umem(ifobj);
1386         }
1387
1388         test->ifobj_rx->shared_umem = old_shared_umem;
1389         test->ifobj_tx->shared_umem = old_shared_umem;
1390
1391         return !!test->fail;
1392 }
1393
1394 static int testapp_validate_traffic(struct test_spec *test)
1395 {
1396         struct ifobject *ifobj_tx = test->ifobj_tx;
1397         struct ifobject *ifobj_rx = test->ifobj_rx;
1398         pthread_t t0, t1;
1399
1400         if (pthread_barrier_init(&barr, NULL, 2))
1401                 exit_with_error(errno);
1402
1403         test->current_step++;
1404         pkt_stream_reset(ifobj_rx->pkt_stream);
1405         pkts_in_flight = 0;
1406
1407         /*Spawn RX thread */
1408         pthread_create(&t0, NULL, ifobj_rx->func_ptr, test);
1409
1410         pthread_barrier_wait(&barr);
1411         if (pthread_barrier_destroy(&barr))
1412                 exit_with_error(errno);
1413
1414         /*Spawn TX thread */
1415         pthread_create(&t1, NULL, ifobj_tx->func_ptr, test);
1416
1417         pthread_join(t1, NULL);
1418         pthread_join(t0, NULL);
1419
1420         if (test->total_steps == test->current_step || test->fail) {
1421                 xsk_socket__delete(ifobj_tx->xsk->xsk);
1422                 xsk_socket__delete(ifobj_rx->xsk->xsk);
1423                 testapp_clean_xsk_umem(ifobj_rx);
1424                 if (!ifobj_tx->shared_umem)
1425                         testapp_clean_xsk_umem(ifobj_tx);
1426         }
1427
1428         return !!test->fail;
1429 }
1430
1431 static void testapp_teardown(struct test_spec *test)
1432 {
1433         int i;
1434
1435         test_spec_set_name(test, "TEARDOWN");
1436         for (i = 0; i < MAX_TEARDOWN_ITER; i++) {
1437                 if (testapp_validate_traffic(test))
1438                         return;
1439                 test_spec_reset(test);
1440         }
1441 }
1442
1443 static void swap_directions(struct ifobject **ifobj1, struct ifobject **ifobj2)
1444 {
1445         thread_func_t tmp_func_ptr = (*ifobj1)->func_ptr;
1446         struct ifobject *tmp_ifobj = (*ifobj1);
1447
1448         (*ifobj1)->func_ptr = (*ifobj2)->func_ptr;
1449         (*ifobj2)->func_ptr = tmp_func_ptr;
1450
1451         *ifobj1 = *ifobj2;
1452         *ifobj2 = tmp_ifobj;
1453 }
1454
1455 static void testapp_bidi(struct test_spec *test)
1456 {
1457         test_spec_set_name(test, "BIDIRECTIONAL");
1458         test->ifobj_tx->rx_on = true;
1459         test->ifobj_rx->tx_on = true;
1460         test->total_steps = 2;
1461         if (testapp_validate_traffic(test))
1462                 return;
1463
1464         print_verbose("Switching Tx/Rx vectors\n");
1465         swap_directions(&test->ifobj_rx, &test->ifobj_tx);
1466         testapp_validate_traffic(test);
1467
1468         swap_directions(&test->ifobj_rx, &test->ifobj_tx);
1469 }
1470
1471 static void swap_xsk_resources(struct ifobject *ifobj_tx, struct ifobject *ifobj_rx)
1472 {
1473         int ret;
1474
1475         xsk_socket__delete(ifobj_tx->xsk->xsk);
1476         xsk_socket__delete(ifobj_rx->xsk->xsk);
1477         ifobj_tx->xsk = &ifobj_tx->xsk_arr[1];
1478         ifobj_rx->xsk = &ifobj_rx->xsk_arr[1];
1479
1480         ret = xsk_socket__update_xskmap(ifobj_rx->xsk->xsk, ifobj_rx->xsk_map_fd);
1481         if (ret)
1482                 exit_with_error(-ret);
1483 }
1484
1485 static void testapp_bpf_res(struct test_spec *test)
1486 {
1487         test_spec_set_name(test, "BPF_RES");
1488         test->total_steps = 2;
1489         test->nb_sockets = 2;
1490         if (testapp_validate_traffic(test))
1491                 return;
1492
1493         swap_xsk_resources(test->ifobj_tx, test->ifobj_rx);
1494         testapp_validate_traffic(test);
1495 }
1496
1497 static void testapp_headroom(struct test_spec *test)
1498 {
1499         test_spec_set_name(test, "UMEM_HEADROOM");
1500         test->ifobj_rx->umem->frame_headroom = UMEM_HEADROOM_TEST_SIZE;
1501         testapp_validate_traffic(test);
1502 }
1503
1504 static void testapp_stats_rx_dropped(struct test_spec *test)
1505 {
1506         test_spec_set_name(test, "STAT_RX_DROPPED");
1507         pkt_stream_replace_half(test, MIN_PKT_SIZE * 4, 0);
1508         test->ifobj_rx->umem->frame_headroom = test->ifobj_rx->umem->frame_size -
1509                 XDP_PACKET_HEADROOM - MIN_PKT_SIZE * 3;
1510         pkt_stream_receive_half(test);
1511         test->ifobj_rx->validation_func = validate_rx_dropped;
1512         testapp_validate_traffic(test);
1513 }
1514
1515 static void testapp_stats_tx_invalid_descs(struct test_spec *test)
1516 {
1517         test_spec_set_name(test, "STAT_TX_INVALID");
1518         pkt_stream_replace_half(test, XSK_UMEM__INVALID_FRAME_SIZE, 0);
1519         test->ifobj_tx->validation_func = validate_tx_invalid_descs;
1520         testapp_validate_traffic(test);
1521
1522         pkt_stream_restore_default(test);
1523 }
1524
1525 static void testapp_stats_rx_full(struct test_spec *test)
1526 {
1527         test_spec_set_name(test, "STAT_RX_FULL");
1528         pkt_stream_replace(test, DEFAULT_UMEM_BUFFERS + DEFAULT_UMEM_BUFFERS / 2, PKT_SIZE);
1529         test->ifobj_rx->pkt_stream = pkt_stream_generate(test->ifobj_rx->umem,
1530                                                          DEFAULT_UMEM_BUFFERS, PKT_SIZE);
1531         if (!test->ifobj_rx->pkt_stream)
1532                 exit_with_error(ENOMEM);
1533
1534         test->ifobj_rx->xsk->rxqsize = DEFAULT_UMEM_BUFFERS;
1535         test->ifobj_rx->release_rx = false;
1536         test->ifobj_rx->validation_func = validate_rx_full;
1537         testapp_validate_traffic(test);
1538
1539         pkt_stream_restore_default(test);
1540 }
1541
1542 static void testapp_stats_fill_empty(struct test_spec *test)
1543 {
1544         test_spec_set_name(test, "STAT_RX_FILL_EMPTY");
1545         pkt_stream_replace(test, DEFAULT_UMEM_BUFFERS + DEFAULT_UMEM_BUFFERS / 2, PKT_SIZE);
1546         test->ifobj_rx->pkt_stream = pkt_stream_generate(test->ifobj_rx->umem,
1547                                                          DEFAULT_UMEM_BUFFERS, PKT_SIZE);
1548         if (!test->ifobj_rx->pkt_stream)
1549                 exit_with_error(ENOMEM);
1550
1551         test->ifobj_rx->use_fill_ring = false;
1552         test->ifobj_rx->validation_func = validate_fill_empty;
1553         testapp_validate_traffic(test);
1554
1555         pkt_stream_restore_default(test);
1556 }
1557
1558 /* Simple test */
1559 static bool hugepages_present(struct ifobject *ifobject)
1560 {
1561         const size_t mmap_sz = 2 * ifobject->umem->num_frames * ifobject->umem->frame_size;
1562         void *bufs;
1563
1564         bufs = mmap(NULL, mmap_sz, PROT_READ | PROT_WRITE,
1565                     MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB, -1, 0);
1566         if (bufs == MAP_FAILED)
1567                 return false;
1568
1569         munmap(bufs, mmap_sz);
1570         return true;
1571 }
1572
1573 static bool testapp_unaligned(struct test_spec *test)
1574 {
1575         if (!hugepages_present(test->ifobj_tx)) {
1576                 ksft_test_result_skip("No 2M huge pages present.\n");
1577                 return false;
1578         }
1579
1580         test_spec_set_name(test, "UNALIGNED_MODE");
1581         test->ifobj_tx->umem->unaligned_mode = true;
1582         test->ifobj_rx->umem->unaligned_mode = true;
1583         /* Let half of the packets straddle a buffer boundrary */
1584         pkt_stream_replace_half(test, PKT_SIZE, -PKT_SIZE / 2);
1585         test->ifobj_rx->pkt_stream->use_addr_for_fill = true;
1586         testapp_validate_traffic(test);
1587
1588         pkt_stream_restore_default(test);
1589         return true;
1590 }
1591
1592 static void testapp_single_pkt(struct test_spec *test)
1593 {
1594         struct pkt pkts[] = {{0x1000, PKT_SIZE, 0, true}};
1595
1596         pkt_stream_generate_custom(test, pkts, ARRAY_SIZE(pkts));
1597         testapp_validate_traffic(test);
1598         pkt_stream_restore_default(test);
1599 }
1600
1601 static void testapp_invalid_desc(struct test_spec *test)
1602 {
1603         struct pkt pkts[] = {
1604                 /* Zero packet address allowed */
1605                 {0, PKT_SIZE, 0, true},
1606                 /* Allowed packet */
1607                 {0x1000, PKT_SIZE, 0, true},
1608                 /* Straddling the start of umem */
1609                 {-2, PKT_SIZE, 0, false},
1610                 /* Packet too large */
1611                 {0x2000, XSK_UMEM__INVALID_FRAME_SIZE, 0, false},
1612                 /* After umem ends */
1613                 {UMEM_SIZE, PKT_SIZE, 0, false},
1614                 /* Straddle the end of umem */
1615                 {UMEM_SIZE - PKT_SIZE / 2, PKT_SIZE, 0, false},
1616                 /* Straddle a page boundrary */
1617                 {0x3000 - PKT_SIZE / 2, PKT_SIZE, 0, false},
1618                 /* Straddle a 2K boundrary */
1619                 {0x3800 - PKT_SIZE / 2, PKT_SIZE, 0, true},
1620                 /* Valid packet for synch so that something is received */
1621                 {0x4000, PKT_SIZE, 0, true}};
1622
1623         if (test->ifobj_tx->umem->unaligned_mode) {
1624                 /* Crossing a page boundrary allowed */
1625                 pkts[6].valid = true;
1626         }
1627         if (test->ifobj_tx->umem->frame_size == XSK_UMEM__DEFAULT_FRAME_SIZE / 2) {
1628                 /* Crossing a 2K frame size boundrary not allowed */
1629                 pkts[7].valid = false;
1630         }
1631
1632         if (test->ifobj_tx->shared_umem) {
1633                 pkts[4].addr += UMEM_SIZE;
1634                 pkts[5].addr += UMEM_SIZE;
1635         }
1636
1637         pkt_stream_generate_custom(test, pkts, ARRAY_SIZE(pkts));
1638         testapp_validate_traffic(test);
1639         pkt_stream_restore_default(test);
1640 }
1641
1642 static void init_iface(struct ifobject *ifobj, const char *dst_mac, const char *src_mac,
1643                        const char *dst_ip, const char *src_ip, const u16 dst_port,
1644                        const u16 src_port, thread_func_t func_ptr)
1645 {
1646         struct in_addr ip;
1647
1648         memcpy(ifobj->dst_mac, dst_mac, ETH_ALEN);
1649         memcpy(ifobj->src_mac, src_mac, ETH_ALEN);
1650
1651         inet_aton(dst_ip, &ip);
1652         ifobj->dst_ip = ip.s_addr;
1653
1654         inet_aton(src_ip, &ip);
1655         ifobj->src_ip = ip.s_addr;
1656
1657         ifobj->dst_port = dst_port;
1658         ifobj->src_port = src_port;
1659
1660         ifobj->func_ptr = func_ptr;
1661 }
1662
1663 static void run_pkt_test(struct test_spec *test, enum test_mode mode, enum test_type type)
1664 {
1665         switch (type) {
1666         case TEST_TYPE_STATS_RX_DROPPED:
1667                 testapp_stats_rx_dropped(test);
1668                 break;
1669         case TEST_TYPE_STATS_TX_INVALID_DESCS:
1670                 testapp_stats_tx_invalid_descs(test);
1671                 break;
1672         case TEST_TYPE_STATS_RX_FULL:
1673                 testapp_stats_rx_full(test);
1674                 break;
1675         case TEST_TYPE_STATS_FILL_EMPTY:
1676                 testapp_stats_fill_empty(test);
1677                 break;
1678         case TEST_TYPE_TEARDOWN:
1679                 testapp_teardown(test);
1680                 break;
1681         case TEST_TYPE_BIDI:
1682                 testapp_bidi(test);
1683                 break;
1684         case TEST_TYPE_BPF_RES:
1685                 testapp_bpf_res(test);
1686                 break;
1687         case TEST_TYPE_RUN_TO_COMPLETION:
1688                 test_spec_set_name(test, "RUN_TO_COMPLETION");
1689                 testapp_validate_traffic(test);
1690                 break;
1691         case TEST_TYPE_RUN_TO_COMPLETION_SINGLE_PKT:
1692                 test_spec_set_name(test, "RUN_TO_COMPLETION_SINGLE_PKT");
1693                 testapp_single_pkt(test);
1694                 break;
1695         case TEST_TYPE_RUN_TO_COMPLETION_2K_FRAME:
1696                 test_spec_set_name(test, "RUN_TO_COMPLETION_2K_FRAME_SIZE");
1697                 test->ifobj_tx->umem->frame_size = 2048;
1698                 test->ifobj_rx->umem->frame_size = 2048;
1699                 pkt_stream_replace(test, DEFAULT_PKT_CNT, PKT_SIZE);
1700                 testapp_validate_traffic(test);
1701
1702                 pkt_stream_restore_default(test);
1703                 break;
1704         case TEST_TYPE_RX_POLL:
1705                 test->ifobj_rx->use_poll = true;
1706                 test_spec_set_name(test, "POLL_RX");
1707                 testapp_validate_traffic(test);
1708                 break;
1709         case TEST_TYPE_TX_POLL:
1710                 test->ifobj_tx->use_poll = true;
1711                 test_spec_set_name(test, "POLL_TX");
1712                 testapp_validate_traffic(test);
1713                 break;
1714         case TEST_TYPE_POLL_TXQ_TMOUT:
1715                 test_spec_set_name(test, "POLL_TXQ_FULL");
1716                 test->ifobj_tx->use_poll = true;
1717                 /* create invalid frame by set umem frame_size and pkt length equal to 2048 */
1718                 test->ifobj_tx->umem->frame_size = 2048;
1719                 pkt_stream_replace(test, 2 * DEFAULT_PKT_CNT, 2048);
1720                 testapp_validate_traffic_single_thread(test, test->ifobj_tx, type);
1721                 pkt_stream_restore_default(test);
1722                 break;
1723         case TEST_TYPE_POLL_RXQ_TMOUT:
1724                 test_spec_set_name(test, "POLL_RXQ_EMPTY");
1725                 test->ifobj_rx->use_poll = true;
1726                 testapp_validate_traffic_single_thread(test, test->ifobj_rx, type);
1727                 break;
1728         case TEST_TYPE_ALIGNED_INV_DESC:
1729                 test_spec_set_name(test, "ALIGNED_INV_DESC");
1730                 testapp_invalid_desc(test);
1731                 break;
1732         case TEST_TYPE_ALIGNED_INV_DESC_2K_FRAME:
1733                 test_spec_set_name(test, "ALIGNED_INV_DESC_2K_FRAME_SIZE");
1734                 test->ifobj_tx->umem->frame_size = 2048;
1735                 test->ifobj_rx->umem->frame_size = 2048;
1736                 testapp_invalid_desc(test);
1737                 break;
1738         case TEST_TYPE_UNALIGNED_INV_DESC:
1739                 if (!hugepages_present(test->ifobj_tx)) {
1740                         ksft_test_result_skip("No 2M huge pages present.\n");
1741                         return;
1742                 }
1743                 test_spec_set_name(test, "UNALIGNED_INV_DESC");
1744                 test->ifobj_tx->umem->unaligned_mode = true;
1745                 test->ifobj_rx->umem->unaligned_mode = true;
1746                 testapp_invalid_desc(test);
1747                 break;
1748         case TEST_TYPE_UNALIGNED:
1749                 if (!testapp_unaligned(test))
1750                         return;
1751                 break;
1752         case TEST_TYPE_HEADROOM:
1753                 testapp_headroom(test);
1754                 break;
1755         default:
1756                 break;
1757         }
1758
1759         if (!test->fail)
1760                 ksft_test_result_pass("PASS: %s %s%s\n", mode_string(test), busy_poll_string(test),
1761                                       test->name);
1762 }
1763
1764 static struct ifobject *ifobject_create(void)
1765 {
1766         struct ifobject *ifobj;
1767
1768         ifobj = calloc(1, sizeof(struct ifobject));
1769         if (!ifobj)
1770                 return NULL;
1771
1772         ifobj->xsk_arr = calloc(MAX_SOCKETS, sizeof(*ifobj->xsk_arr));
1773         if (!ifobj->xsk_arr)
1774                 goto out_xsk_arr;
1775
1776         ifobj->umem = calloc(1, sizeof(*ifobj->umem));
1777         if (!ifobj->umem)
1778                 goto out_umem;
1779
1780         return ifobj;
1781
1782 out_umem:
1783         free(ifobj->xsk_arr);
1784 out_xsk_arr:
1785         free(ifobj);
1786         return NULL;
1787 }
1788
1789 static void ifobject_delete(struct ifobject *ifobj)
1790 {
1791         free(ifobj->umem);
1792         free(ifobj->xsk_arr);
1793         free(ifobj);
1794 }
1795
1796 static bool is_xdp_supported(struct ifobject *ifobject)
1797 {
1798         int flags = XDP_FLAGS_DRV_MODE;
1799
1800         LIBBPF_OPTS(bpf_link_create_opts, opts, .flags = flags);
1801         struct bpf_insn insns[2] = {
1802                 BPF_MOV64_IMM(BPF_REG_0, XDP_PASS),
1803                 BPF_EXIT_INSN()
1804         };
1805         int ifindex = if_nametoindex(ifobject->ifname);
1806         int prog_fd, insn_cnt = ARRAY_SIZE(insns);
1807         int err;
1808
1809         prog_fd = bpf_prog_load(BPF_PROG_TYPE_XDP, NULL, "GPL", insns, insn_cnt, NULL);
1810         if (prog_fd < 0)
1811                 return false;
1812
1813         err = bpf_xdp_attach(ifindex, prog_fd, flags, NULL);
1814         if (err) {
1815                 close(prog_fd);
1816                 return false;
1817         }
1818
1819         bpf_xdp_detach(ifindex, flags, NULL);
1820         close(prog_fd);
1821
1822         return true;
1823 }
1824
1825 int main(int argc, char **argv)
1826 {
1827         struct pkt_stream *rx_pkt_stream_default;
1828         struct pkt_stream *tx_pkt_stream_default;
1829         struct ifobject *ifobj_tx, *ifobj_rx;
1830         int modes = TEST_MODE_SKB + 1;
1831         u32 i, j, failed_tests = 0;
1832         struct test_spec test;
1833         bool shared_umem;
1834
1835         /* Use libbpf 1.0 API mode */
1836         libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
1837
1838         ifobj_tx = ifobject_create();
1839         if (!ifobj_tx)
1840                 exit_with_error(ENOMEM);
1841         ifobj_rx = ifobject_create();
1842         if (!ifobj_rx)
1843                 exit_with_error(ENOMEM);
1844
1845         setlocale(LC_ALL, "");
1846
1847         parse_command_line(ifobj_tx, ifobj_rx, argc, argv);
1848         shared_umem = !strcmp(ifobj_tx->ifname, ifobj_rx->ifname);
1849
1850         ifobj_tx->shared_umem = shared_umem;
1851         ifobj_rx->shared_umem = shared_umem;
1852
1853         if (!validate_interface(ifobj_tx) || !validate_interface(ifobj_rx)) {
1854                 usage(basename(argv[0]));
1855                 ksft_exit_xfail();
1856         }
1857
1858         init_iface(ifobj_tx, MAC1, MAC2, IP1, IP2, UDP_PORT1, UDP_PORT2,
1859                    worker_testapp_validate_tx);
1860         init_iface(ifobj_rx, MAC2, MAC1, IP2, IP1, UDP_PORT2, UDP_PORT1,
1861                    worker_testapp_validate_rx);
1862
1863         if (is_xdp_supported(ifobj_tx))
1864                 modes++;
1865
1866         test_spec_init(&test, ifobj_tx, ifobj_rx, 0);
1867         tx_pkt_stream_default = pkt_stream_generate(ifobj_tx->umem, DEFAULT_PKT_CNT, PKT_SIZE);
1868         rx_pkt_stream_default = pkt_stream_generate(ifobj_rx->umem, DEFAULT_PKT_CNT, PKT_SIZE);
1869         if (!tx_pkt_stream_default || !rx_pkt_stream_default)
1870                 exit_with_error(ENOMEM);
1871         test.tx_pkt_stream_default = tx_pkt_stream_default;
1872         test.rx_pkt_stream_default = rx_pkt_stream_default;
1873
1874         ksft_set_plan(modes * TEST_TYPE_MAX);
1875
1876         for (i = 0; i < modes; i++)
1877                 for (j = 0; j < TEST_TYPE_MAX; j++) {
1878                         test_spec_init(&test, ifobj_tx, ifobj_rx, i);
1879                         run_pkt_test(&test, i, j);
1880                         usleep(USLEEP_MAX);
1881
1882                         if (test.fail)
1883                                 failed_tests++;
1884                 }
1885
1886         pkt_stream_delete(tx_pkt_stream_default);
1887         pkt_stream_delete(rx_pkt_stream_default);
1888         free(ifobj_rx->umem);
1889         if (!ifobj_tx->shared_umem)
1890                 free(ifobj_tx->umem);
1891         ifobject_delete(ifobj_tx);
1892         ifobject_delete(ifobj_rx);
1893
1894         if (failed_tests)
1895                 ksft_exit_fail();
1896         else
1897                 ksft_exit_pass();
1898 }