1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
5 * This contains some basic static unit tests for the allowedips data structure.
6 * It also has two additional modes that are disabled and meant to be used by
7 * folks directly playing with this file. If you define the macro
8 * DEBUG_PRINT_TRIE_GRAPHVIZ to be 1, then every time there's a full tree in
9 * memory, it will be printed out as KERN_DEBUG in a format that can be passed
10 * to graphviz (the dot command) to visualize it. If you define the macro
11 * DEBUG_RANDOM_TRIE to be 1, then there will be an extremely costly set of
12 * randomized tests done against a trivial implementation, which may take
13 * upwards of a half-hour to complete. There's no set of users who should be
14 * enabling these, and the only developers that should go anywhere near these
15 * nobs are the ones who are reading this comment.
20 #include <linux/siphash.h>
22 static __init void print_node(struct allowedips_node *node, u8 bits)
24 char *fmt_connection = KERN_DEBUG "\t\"%p/%d\" -> \"%p/%d\";\n";
25 char *fmt_declaration = KERN_DEBUG "\t\"%p/%d\"[style=%s, color=\"#%06x\"];\n";
26 u8 ip1[16], ip2[16], cidr1, cidr2;
27 char *style = "dotted";
33 fmt_connection = KERN_DEBUG "\t\"%pI4/%d\" -> \"%pI4/%d\";\n";
34 fmt_declaration = KERN_DEBUG "\t\"%pI4/%d\"[style=%s, color=\"#%06x\"];\n";
35 } else if (bits == 128) {
36 fmt_connection = KERN_DEBUG "\t\"%pI6/%d\" -> \"%pI6/%d\";\n";
37 fmt_declaration = KERN_DEBUG "\t\"%pI6/%d\"[style=%s, color=\"#%06x\"];\n";
40 hsiphash_key_t key = { { 0 } };
42 memcpy(&key, &node->peer, sizeof(node->peer));
43 color = hsiphash_1u32(0xdeadbeef, &key) % 200 << 16 |
44 hsiphash_1u32(0xbabecafe, &key) % 200 << 8 |
45 hsiphash_1u32(0xabad1dea, &key) % 200;
48 wg_allowedips_read_node(node, ip1, &cidr1);
49 printk(fmt_declaration, ip1, cidr1, style, color);
51 wg_allowedips_read_node(rcu_dereference_raw(node->bit[0]), ip2, &cidr2);
52 printk(fmt_connection, ip1, cidr1, ip2, cidr2);
55 wg_allowedips_read_node(rcu_dereference_raw(node->bit[1]), ip2, &cidr2);
56 printk(fmt_connection, ip1, cidr1, ip2, cidr2);
59 print_node(rcu_dereference_raw(node->bit[0]), bits);
61 print_node(rcu_dereference_raw(node->bit[1]), bits);
64 static __init void print_tree(struct allowedips_node __rcu *top, u8 bits)
66 printk(KERN_DEBUG "digraph trie {\n");
67 print_node(rcu_dereference_raw(top), bits);
68 printk(KERN_DEBUG "}\n");
73 NUM_RAND_ROUTES = 400,
74 NUM_MUTATED_ROUTES = 100,
75 NUM_QUERIES = NUM_RAND_ROUTES * NUM_MUTATED_ROUTES * 30
78 struct horrible_allowedips {
79 struct hlist_head head;
82 struct horrible_allowedips_node {
83 struct hlist_node table;
84 union nf_inet_addr ip;
85 union nf_inet_addr mask;
90 static __init void horrible_allowedips_init(struct horrible_allowedips *table)
92 INIT_HLIST_HEAD(&table->head);
95 static __init void horrible_allowedips_free(struct horrible_allowedips *table)
97 struct horrible_allowedips_node *node;
100 hlist_for_each_entry_safe(node, h, &table->head, table) {
101 hlist_del(&node->table);
106 static __init inline union nf_inet_addr horrible_cidr_to_mask(u8 cidr)
108 union nf_inet_addr mask;
110 memset(&mask, 0, sizeof(mask));
111 memset(&mask.all, 0xff, cidr / 8);
113 mask.all[cidr / 32] = (__force u32)htonl(
114 (0xFFFFFFFFUL << (32 - (cidr % 32))) & 0xFFFFFFFFUL);
118 static __init inline u8 horrible_mask_to_cidr(union nf_inet_addr subnet)
120 return hweight32(subnet.all[0]) + hweight32(subnet.all[1]) +
121 hweight32(subnet.all[2]) + hweight32(subnet.all[3]);
124 static __init inline void
125 horrible_mask_self(struct horrible_allowedips_node *node)
127 if (node->ip_version == 4) {
128 node->ip.ip &= node->mask.ip;
129 } else if (node->ip_version == 6) {
130 node->ip.ip6[0] &= node->mask.ip6[0];
131 node->ip.ip6[1] &= node->mask.ip6[1];
132 node->ip.ip6[2] &= node->mask.ip6[2];
133 node->ip.ip6[3] &= node->mask.ip6[3];
137 static __init inline bool
138 horrible_match_v4(const struct horrible_allowedips_node *node, struct in_addr *ip)
140 return (ip->s_addr & node->mask.ip) == node->ip.ip;
143 static __init inline bool
144 horrible_match_v6(const struct horrible_allowedips_node *node, struct in6_addr *ip)
146 return (ip->in6_u.u6_addr32[0] & node->mask.ip6[0]) == node->ip.ip6[0] &&
147 (ip->in6_u.u6_addr32[1] & node->mask.ip6[1]) == node->ip.ip6[1] &&
148 (ip->in6_u.u6_addr32[2] & node->mask.ip6[2]) == node->ip.ip6[2] &&
149 (ip->in6_u.u6_addr32[3] & node->mask.ip6[3]) == node->ip.ip6[3];
153 horrible_insert_ordered(struct horrible_allowedips *table, struct horrible_allowedips_node *node)
155 struct horrible_allowedips_node *other = NULL, *where = NULL;
156 u8 my_cidr = horrible_mask_to_cidr(node->mask);
158 hlist_for_each_entry(other, &table->head, table) {
159 if (other->ip_version == node->ip_version &&
160 !memcmp(&other->mask, &node->mask, sizeof(union nf_inet_addr)) &&
161 !memcmp(&other->ip, &node->ip, sizeof(union nf_inet_addr))) {
162 other->value = node->value;
167 hlist_for_each_entry(other, &table->head, table) {
169 if (horrible_mask_to_cidr(other->mask) <= my_cidr)
172 if (!other && !where)
173 hlist_add_head(&node->table, &table->head);
175 hlist_add_behind(&node->table, &where->table);
177 hlist_add_before(&node->table, &where->table);
181 horrible_allowedips_insert_v4(struct horrible_allowedips *table,
182 struct in_addr *ip, u8 cidr, void *value)
184 struct horrible_allowedips_node *node = kzalloc(sizeof(*node), GFP_KERNEL);
189 node->mask = horrible_cidr_to_mask(cidr);
190 node->ip_version = 4;
192 horrible_mask_self(node);
193 horrible_insert_ordered(table, node);
198 horrible_allowedips_insert_v6(struct horrible_allowedips *table,
199 struct in6_addr *ip, u8 cidr, void *value)
201 struct horrible_allowedips_node *node = kzalloc(sizeof(*node), GFP_KERNEL);
206 node->mask = horrible_cidr_to_mask(cidr);
207 node->ip_version = 6;
209 horrible_mask_self(node);
210 horrible_insert_ordered(table, node);
215 horrible_allowedips_lookup_v4(struct horrible_allowedips *table, struct in_addr *ip)
217 struct horrible_allowedips_node *node;
219 hlist_for_each_entry(node, &table->head, table) {
220 if (node->ip_version == 4 && horrible_match_v4(node, ip))
227 horrible_allowedips_lookup_v6(struct horrible_allowedips *table, struct in6_addr *ip)
229 struct horrible_allowedips_node *node;
231 hlist_for_each_entry(node, &table->head, table) {
232 if (node->ip_version == 6 && horrible_match_v6(node, ip))
240 horrible_allowedips_remove_by_value(struct horrible_allowedips *table, void *value)
242 struct horrible_allowedips_node *node;
243 struct hlist_node *h;
245 hlist_for_each_entry_safe(node, h, &table->head, table) {
246 if (node->value != value)
248 hlist_del(&node->table);
254 static __init bool randomized_test(void)
256 unsigned int i, j, k, mutate_amount, cidr;
257 u8 ip[16], mutate_mask[16], mutated[16];
258 struct wg_peer **peers, *peer;
259 struct horrible_allowedips h;
266 wg_allowedips_init(&t);
267 horrible_allowedips_init(&h);
269 peers = kcalloc(NUM_PEERS, sizeof(*peers), GFP_KERNEL);
270 if (unlikely(!peers)) {
271 pr_err("allowedips random self-test malloc: FAIL\n");
274 for (i = 0; i < NUM_PEERS; ++i) {
275 peers[i] = kzalloc(sizeof(*peers[i]), GFP_KERNEL);
276 if (unlikely(!peers[i])) {
277 pr_err("allowedips random self-test malloc: FAIL\n");
280 kref_init(&peers[i]->refcount);
281 INIT_LIST_HEAD(&peers[i]->allowedips_list);
286 for (i = 0; i < NUM_RAND_ROUTES; ++i) {
287 get_random_bytes(ip, 4);
288 cidr = get_random_u32_inclusive(1, 32);
289 peer = peers[get_random_u32_below(NUM_PEERS)];
290 if (wg_allowedips_insert_v4(&t, (struct in_addr *)ip, cidr,
292 pr_err("allowedips random self-test malloc: FAIL\n");
295 if (horrible_allowedips_insert_v4(&h, (struct in_addr *)ip,
297 pr_err("allowedips random self-test malloc: FAIL\n");
300 for (j = 0; j < NUM_MUTATED_ROUTES; ++j) {
301 memcpy(mutated, ip, 4);
302 get_random_bytes(mutate_mask, 4);
303 mutate_amount = get_random_u32_below(32);
304 for (k = 0; k < mutate_amount / 8; ++k)
305 mutate_mask[k] = 0xff;
306 mutate_mask[k] = 0xff
307 << ((8 - (mutate_amount % 8)) % 8);
310 for (k = 0; k < 4; ++k)
311 mutated[k] = (mutated[k] & mutate_mask[k]) |
314 cidr = get_random_u32_inclusive(1, 32);
315 peer = peers[get_random_u32_below(NUM_PEERS)];
316 if (wg_allowedips_insert_v4(&t,
317 (struct in_addr *)mutated,
318 cidr, peer, &mutex) < 0) {
319 pr_err("allowedips random self-test malloc: FAIL\n");
322 if (horrible_allowedips_insert_v4(&h,
323 (struct in_addr *)mutated, cidr, peer)) {
324 pr_err("allowedips random self-test malloc: FAIL\n");
330 for (i = 0; i < NUM_RAND_ROUTES; ++i) {
331 get_random_bytes(ip, 16);
332 cidr = get_random_u32_inclusive(1, 128);
333 peer = peers[get_random_u32_below(NUM_PEERS)];
334 if (wg_allowedips_insert_v6(&t, (struct in6_addr *)ip, cidr,
336 pr_err("allowedips random self-test malloc: FAIL\n");
339 if (horrible_allowedips_insert_v6(&h, (struct in6_addr *)ip,
341 pr_err("allowedips random self-test malloc: FAIL\n");
344 for (j = 0; j < NUM_MUTATED_ROUTES; ++j) {
345 memcpy(mutated, ip, 16);
346 get_random_bytes(mutate_mask, 16);
347 mutate_amount = get_random_u32_below(128);
348 for (k = 0; k < mutate_amount / 8; ++k)
349 mutate_mask[k] = 0xff;
350 mutate_mask[k] = 0xff
351 << ((8 - (mutate_amount % 8)) % 8);
354 for (k = 0; k < 4; ++k)
355 mutated[k] = (mutated[k] & mutate_mask[k]) |
358 cidr = get_random_u32_inclusive(1, 128);
359 peer = peers[get_random_u32_below(NUM_PEERS)];
360 if (wg_allowedips_insert_v6(&t,
361 (struct in6_addr *)mutated,
362 cidr, peer, &mutex) < 0) {
363 pr_err("allowedips random self-test malloc: FAIL\n");
366 if (horrible_allowedips_insert_v6(
367 &h, (struct in6_addr *)mutated, cidr,
369 pr_err("allowedips random self-test malloc: FAIL\n");
375 mutex_unlock(&mutex);
377 if (IS_ENABLED(DEBUG_PRINT_TRIE_GRAPHVIZ)) {
378 print_tree(t.root4, 32);
379 print_tree(t.root6, 128);
383 for (i = 0; i < NUM_QUERIES; ++i) {
384 get_random_bytes(ip, 4);
385 if (lookup(t.root4, 32, ip) != horrible_allowedips_lookup_v4(&h, (struct in_addr *)ip)) {
386 horrible_allowedips_lookup_v4(&h, (struct in_addr *)ip);
387 pr_err("allowedips random v4 self-test: FAIL\n");
390 get_random_bytes(ip, 16);
391 if (lookup(t.root6, 128, ip) != horrible_allowedips_lookup_v6(&h, (struct in6_addr *)ip)) {
392 pr_err("allowedips random v6 self-test: FAIL\n");
399 wg_allowedips_remove_by_peer(&t, peers[j], &mutex);
400 mutex_unlock(&mutex);
401 horrible_allowedips_remove_by_value(&h, peers[j]);
404 if (t.root4 || t.root6) {
405 pr_err("allowedips random self-test removal: FAIL\n");
414 wg_allowedips_free(&t, &mutex);
415 mutex_unlock(&mutex);
416 horrible_allowedips_free(&h);
418 for (i = 0; i < NUM_PEERS; ++i)
425 static __init inline struct in_addr *ip4(u8 a, u8 b, u8 c, u8 d)
427 static struct in_addr ip;
428 u8 *split = (u8 *)&ip;
437 static __init inline struct in6_addr *ip6(u32 a, u32 b, u32 c, u32 d)
439 static struct in6_addr ip;
440 __be32 *split = (__be32 *)&ip;
442 split[0] = cpu_to_be32(a);
443 split[1] = cpu_to_be32(b);
444 split[2] = cpu_to_be32(c);
445 split[3] = cpu_to_be32(d);
449 static __init struct wg_peer *init_peer(void)
451 struct wg_peer *peer = kzalloc(sizeof(*peer), GFP_KERNEL);
455 kref_init(&peer->refcount);
456 INIT_LIST_HEAD(&peer->allowedips_list);
460 #define insert(version, mem, ipa, ipb, ipc, ipd, cidr) \
461 wg_allowedips_insert_v##version(&t, ip##version(ipa, ipb, ipc, ipd), \
464 #define maybe_fail() do { \
467 pr_info("allowedips self-test %zu: FAIL\n", i); \
472 #define test(version, mem, ipa, ipb, ipc, ipd) do { \
473 bool _s = lookup(t.root##version, (version) == 4 ? 32 : 128, \
474 ip##version(ipa, ipb, ipc, ipd)) == (mem); \
478 #define test_negative(version, mem, ipa, ipb, ipc, ipd) do { \
479 bool _s = lookup(t.root##version, (version) == 4 ? 32 : 128, \
480 ip##version(ipa, ipb, ipc, ipd)) != (mem); \
484 #define test_boolean(cond) do { \
489 bool __init wg_allowedips_selftest(void)
491 bool found_a = false, found_b = false, found_c = false, found_d = false,
492 found_e = false, found_other = false;
493 struct wg_peer *a = init_peer(), *b = init_peer(), *c = init_peer(),
494 *d = init_peer(), *e = init_peer(), *f = init_peer(),
495 *g = init_peer(), *h = init_peer();
496 struct allowedips_node *iter_node;
497 bool success = false;
501 size_t i = 0, count = 0;
506 wg_allowedips_init(&t);
508 if (!a || !b || !c || !d || !e || !f || !g || !h) {
509 pr_err("allowedips self-test malloc: FAIL\n");
513 insert(4, a, 192, 168, 4, 0, 24);
514 insert(4, b, 192, 168, 4, 4, 32);
515 insert(4, c, 192, 168, 0, 0, 16);
516 insert(4, d, 192, 95, 5, 64, 27);
517 /* replaces previous entry, and maskself is required */
518 insert(4, c, 192, 95, 5, 65, 27);
519 insert(6, d, 0x26075300, 0x60006b00, 0, 0xc05f0543, 128);
520 insert(6, c, 0x26075300, 0x60006b00, 0, 0, 64);
521 insert(4, e, 0, 0, 0, 0, 0);
522 insert(6, e, 0, 0, 0, 0, 0);
523 /* replaces previous entry */
524 insert(6, f, 0, 0, 0, 0, 0);
525 insert(6, g, 0x24046800, 0, 0, 0, 32);
526 /* maskself is required */
527 insert(6, h, 0x24046800, 0x40040800, 0xdeadbeef, 0xdeadbeef, 64);
528 insert(6, a, 0x24046800, 0x40040800, 0xdeadbeef, 0xdeadbeef, 128);
529 insert(6, c, 0x24446800, 0x40e40800, 0xdeaebeef, 0xdefbeef, 128);
530 insert(6, b, 0x24446800, 0xf0e40800, 0xeeaebeef, 0, 98);
531 insert(4, g, 64, 15, 112, 0, 20);
532 /* maskself is required */
533 insert(4, h, 64, 15, 123, 211, 25);
534 insert(4, a, 10, 0, 0, 0, 25);
535 insert(4, b, 10, 0, 0, 128, 25);
536 insert(4, a, 10, 1, 0, 0, 30);
537 insert(4, b, 10, 1, 0, 4, 30);
538 insert(4, c, 10, 1, 0, 8, 29);
539 insert(4, d, 10, 1, 0, 16, 29);
541 if (IS_ENABLED(DEBUG_PRINT_TRIE_GRAPHVIZ)) {
542 print_tree(t.root4, 32);
543 print_tree(t.root6, 128);
548 test(4, a, 192, 168, 4, 20);
549 test(4, a, 192, 168, 4, 0);
550 test(4, b, 192, 168, 4, 4);
551 test(4, c, 192, 168, 200, 182);
552 test(4, c, 192, 95, 5, 68);
553 test(4, e, 192, 95, 5, 96);
554 test(6, d, 0x26075300, 0x60006b00, 0, 0xc05f0543);
555 test(6, c, 0x26075300, 0x60006b00, 0, 0xc02e01ee);
556 test(6, f, 0x26075300, 0x60006b01, 0, 0);
557 test(6, g, 0x24046800, 0x40040806, 0, 0x1006);
558 test(6, g, 0x24046800, 0x40040806, 0x1234, 0x5678);
559 test(6, f, 0x240467ff, 0x40040806, 0x1234, 0x5678);
560 test(6, f, 0x24046801, 0x40040806, 0x1234, 0x5678);
561 test(6, h, 0x24046800, 0x40040800, 0x1234, 0x5678);
562 test(6, h, 0x24046800, 0x40040800, 0, 0);
563 test(6, h, 0x24046800, 0x40040800, 0x10101010, 0x10101010);
564 test(6, a, 0x24046800, 0x40040800, 0xdeadbeef, 0xdeadbeef);
565 test(4, g, 64, 15, 116, 26);
566 test(4, g, 64, 15, 127, 3);
567 test(4, g, 64, 15, 123, 1);
568 test(4, h, 64, 15, 123, 128);
569 test(4, h, 64, 15, 123, 129);
570 test(4, a, 10, 0, 0, 52);
571 test(4, b, 10, 0, 0, 220);
572 test(4, a, 10, 1, 0, 2);
573 test(4, b, 10, 1, 0, 6);
574 test(4, c, 10, 1, 0, 10);
575 test(4, d, 10, 1, 0, 20);
577 insert(4, a, 1, 0, 0, 0, 32);
578 insert(4, a, 64, 0, 0, 0, 32);
579 insert(4, a, 128, 0, 0, 0, 32);
580 insert(4, a, 192, 0, 0, 0, 32);
581 insert(4, a, 255, 0, 0, 0, 32);
582 wg_allowedips_remove_by_peer(&t, a, &mutex);
583 test_negative(4, a, 1, 0, 0, 0);
584 test_negative(4, a, 64, 0, 0, 0);
585 test_negative(4, a, 128, 0, 0, 0);
586 test_negative(4, a, 192, 0, 0, 0);
587 test_negative(4, a, 255, 0, 0, 0);
589 wg_allowedips_free(&t, &mutex);
590 wg_allowedips_init(&t);
591 insert(4, a, 192, 168, 0, 0, 16);
592 insert(4, a, 192, 168, 0, 0, 24);
593 wg_allowedips_remove_by_peer(&t, a, &mutex);
594 test_negative(4, a, 192, 168, 0, 1);
596 /* These will hit the WARN_ON(len >= MAX_ALLOWEDIPS_DEPTH) in free_node
597 * if something goes wrong.
599 for (i = 0; i < 64; ++i) {
600 part = cpu_to_be64(~0LLU << i);
601 memset(&ip, 0xff, 8);
602 memcpy((u8 *)&ip + 8, &part, 8);
603 wg_allowedips_insert_v6(&t, &ip, 128, a, &mutex);
604 memcpy(&ip, &part, 8);
605 memset((u8 *)&ip + 8, 0, 8);
606 wg_allowedips_insert_v6(&t, &ip, 128, a, &mutex);
609 wg_allowedips_insert_v6(&t, &ip, 128, a, &mutex);
610 wg_allowedips_free(&t, &mutex);
612 wg_allowedips_init(&t);
613 insert(4, a, 192, 95, 5, 93, 27);
614 insert(6, a, 0x26075300, 0x60006b00, 0, 0xc05f0543, 128);
615 insert(4, a, 10, 1, 0, 20, 29);
616 insert(6, a, 0x26075300, 0x6d8a6bf8, 0xdab1f1df, 0xc05f1523, 83);
617 insert(6, a, 0x26075300, 0x6d8a6bf8, 0xdab1f1df, 0xc05f1523, 21);
618 list_for_each_entry(iter_node, &a->allowedips_list, peer_list) {
619 u8 cidr, ip[16] __aligned(__alignof(u64));
620 int family = wg_allowedips_read_node(iter_node, ip, &cidr);
624 if (cidr == 27 && family == AF_INET &&
625 !memcmp(ip, ip4(192, 95, 5, 64), sizeof(struct in_addr)))
627 else if (cidr == 128 && family == AF_INET6 &&
628 !memcmp(ip, ip6(0x26075300, 0x60006b00, 0, 0xc05f0543),
629 sizeof(struct in6_addr)))
631 else if (cidr == 29 && family == AF_INET &&
632 !memcmp(ip, ip4(10, 1, 0, 16), sizeof(struct in_addr)))
634 else if (cidr == 83 && family == AF_INET6 &&
635 !memcmp(ip, ip6(0x26075300, 0x6d8a6bf8, 0xdab1e000, 0),
636 sizeof(struct in6_addr)))
638 else if (cidr == 21 && family == AF_INET6 &&
639 !memcmp(ip, ip6(0x26075000, 0, 0, 0),
640 sizeof(struct in6_addr)))
645 test_boolean(count == 5);
646 test_boolean(found_a);
647 test_boolean(found_b);
648 test_boolean(found_c);
649 test_boolean(found_d);
650 test_boolean(found_e);
651 test_boolean(!found_other);
653 if (IS_ENABLED(DEBUG_RANDOM_TRIE) && success)
654 success = randomized_test();
657 pr_info("allowedips self-tests: pass\n");
660 wg_allowedips_free(&t, &mutex);
669 mutex_unlock(&mutex);