1 // SPDX-License-Identifier: GPL-2.0+
3 * (C) Copyright 2018 Simon Goldschmidt
12 #include <test/test.h>
15 static int check_lmb(struct unit_test_state *uts, struct lmb *lmb,
16 phys_addr_t ram_base, phys_size_t ram_size,
17 unsigned long num_reserved,
18 phys_addr_t base1, phys_size_t size1,
19 phys_addr_t base2, phys_size_t size2,
20 phys_addr_t base3, phys_size_t size3)
23 ut_asserteq(lmb->memory.cnt, 1);
24 ut_asserteq(lmb->memory.region[0].base, ram_base);
25 ut_asserteq(lmb->memory.region[0].size, ram_size);
28 ut_asserteq(lmb->reserved.cnt, num_reserved);
29 if (num_reserved > 0) {
30 ut_asserteq(lmb->reserved.region[0].base, base1);
31 ut_asserteq(lmb->reserved.region[0].size, size1);
33 if (num_reserved > 1) {
34 ut_asserteq(lmb->reserved.region[1].base, base2);
35 ut_asserteq(lmb->reserved.region[1].size, size2);
37 if (num_reserved > 2) {
38 ut_asserteq(lmb->reserved.region[2].base, base3);
39 ut_asserteq(lmb->reserved.region[2].size, size3);
44 #define ASSERT_LMB(lmb, ram_base, ram_size, num_reserved, base1, size1, \
45 base2, size2, base3, size3) \
46 ut_assert(!check_lmb(uts, lmb, ram_base, ram_size, \
47 num_reserved, base1, size1, base2, size2, base3, \
51 * Test helper function that reserves 64 KiB somewhere in the simulated RAM and
52 * then does some alloc + free tests.
54 static int test_multi_alloc(struct unit_test_state *uts, const phys_addr_t ram,
55 const phys_size_t ram_size, const phys_addr_t ram0,
56 const phys_size_t ram0_size,
57 const phys_addr_t alloc_64k_addr)
59 const phys_addr_t ram_end = ram + ram_size;
60 const phys_addr_t alloc_64k_end = alloc_64k_addr + 0x10000;
64 phys_addr_t a, a2, b, b2, c, d;
66 /* check for overflow */
67 ut_assert(ram_end == 0 || ram_end > ram);
68 ut_assert(alloc_64k_end > alloc_64k_addr);
69 /* check input addresses + size */
70 ut_assert(alloc_64k_addr >= ram + 8);
71 ut_assert(alloc_64k_end <= ram_end - 8);
76 ret = lmb_add(&lmb, ram0, ram0_size);
80 ret = lmb_add(&lmb, ram, ram_size);
84 ut_asserteq(lmb.memory.cnt, 2);
85 ut_asserteq(lmb.memory.region[0].base, ram0);
86 ut_asserteq(lmb.memory.region[0].size, ram0_size);
87 ut_asserteq(lmb.memory.region[1].base, ram);
88 ut_asserteq(lmb.memory.region[1].size, ram_size);
90 ut_asserteq(lmb.memory.cnt, 1);
91 ut_asserteq(lmb.memory.region[0].base, ram);
92 ut_asserteq(lmb.memory.region[0].size, ram_size);
95 /* reserve 64KiB somewhere */
96 ret = lmb_reserve(&lmb, alloc_64k_addr, 0x10000);
98 ASSERT_LMB(&lmb, 0, 0, 1, alloc_64k_addr, 0x10000,
101 /* allocate somewhere, should be at the end of RAM */
102 a = lmb_alloc(&lmb, 4, 1);
103 ut_asserteq(a, ram_end - 4);
104 ASSERT_LMB(&lmb, 0, 0, 2, alloc_64k_addr, 0x10000,
105 ram_end - 4, 4, 0, 0);
106 /* alloc below end of reserved region -> below reserved region */
107 b = lmb_alloc_base(&lmb, 4, 1, alloc_64k_end);
108 ut_asserteq(b, alloc_64k_addr - 4);
109 ASSERT_LMB(&lmb, 0, 0, 2,
110 alloc_64k_addr - 4, 0x10000 + 4, ram_end - 4, 4, 0, 0);
113 c = lmb_alloc(&lmb, 4, 1);
114 ut_asserteq(c, ram_end - 8);
115 ASSERT_LMB(&lmb, 0, 0, 2,
116 alloc_64k_addr - 4, 0x10000 + 4, ram_end - 8, 8, 0, 0);
117 d = lmb_alloc_base(&lmb, 4, 1, alloc_64k_end);
118 ut_asserteq(d, alloc_64k_addr - 8);
119 ASSERT_LMB(&lmb, 0, 0, 2,
120 alloc_64k_addr - 8, 0x10000 + 8, ram_end - 8, 8, 0, 0);
122 ret = lmb_free(&lmb, a, 4);
124 ASSERT_LMB(&lmb, 0, 0, 2,
125 alloc_64k_addr - 8, 0x10000 + 8, ram_end - 8, 4, 0, 0);
126 /* allocate again to ensure we get the same address */
127 a2 = lmb_alloc(&lmb, 4, 1);
129 ASSERT_LMB(&lmb, 0, 0, 2,
130 alloc_64k_addr - 8, 0x10000 + 8, ram_end - 8, 8, 0, 0);
131 ret = lmb_free(&lmb, a2, 4);
133 ASSERT_LMB(&lmb, 0, 0, 2,
134 alloc_64k_addr - 8, 0x10000 + 8, ram_end - 8, 4, 0, 0);
136 ret = lmb_free(&lmb, b, 4);
138 ASSERT_LMB(&lmb, 0, 0, 3,
139 alloc_64k_addr - 8, 4, alloc_64k_addr, 0x10000,
141 /* allocate again to ensure we get the same address */
142 b2 = lmb_alloc_base(&lmb, 4, 1, alloc_64k_end);
144 ASSERT_LMB(&lmb, 0, 0, 2,
145 alloc_64k_addr - 8, 0x10000 + 8, ram_end - 8, 4, 0, 0);
146 ret = lmb_free(&lmb, b2, 4);
148 ASSERT_LMB(&lmb, 0, 0, 3,
149 alloc_64k_addr - 8, 4, alloc_64k_addr, 0x10000,
152 ret = lmb_free(&lmb, c, 4);
154 ASSERT_LMB(&lmb, 0, 0, 2,
155 alloc_64k_addr - 8, 4, alloc_64k_addr, 0x10000, 0, 0);
156 ret = lmb_free(&lmb, d, 4);
158 ASSERT_LMB(&lmb, 0, 0, 1, alloc_64k_addr, 0x10000,
162 ut_asserteq(lmb.memory.cnt, 2);
163 ut_asserteq(lmb.memory.region[0].base, ram0);
164 ut_asserteq(lmb.memory.region[0].size, ram0_size);
165 ut_asserteq(lmb.memory.region[1].base, ram);
166 ut_asserteq(lmb.memory.region[1].size, ram_size);
168 ut_asserteq(lmb.memory.cnt, 1);
169 ut_asserteq(lmb.memory.region[0].base, ram);
170 ut_asserteq(lmb.memory.region[0].size, ram_size);
176 static int test_multi_alloc_512mb(struct unit_test_state *uts,
177 const phys_addr_t ram)
179 return test_multi_alloc(uts, ram, 0x20000000, 0, 0, ram + 0x10000000);
182 static int test_multi_alloc_512mb_x2(struct unit_test_state *uts,
183 const phys_addr_t ram,
184 const phys_addr_t ram0)
186 return test_multi_alloc(uts, ram, 0x20000000, ram0, 0x20000000,
190 /* Create a memory region with one reserved region and allocate */
191 static int lib_test_lmb_simple(struct unit_test_state *uts)
195 /* simulate 512 MiB RAM beginning at 1GiB */
196 ret = test_multi_alloc_512mb(uts, 0x40000000);
200 /* simulate 512 MiB RAM beginning at 1.5GiB */
201 return test_multi_alloc_512mb(uts, 0xE0000000);
204 DM_TEST(lib_test_lmb_simple, UT_TESTF_SCAN_PDATA | UT_TESTF_SCAN_FDT);
206 /* Create two memory regions with one reserved region and allocate */
207 static int lib_test_lmb_simple_x2(struct unit_test_state *uts)
211 /* simulate 512 MiB RAM beginning at 2GiB and 1 GiB */
212 ret = test_multi_alloc_512mb_x2(uts, 0x80000000, 0x40000000);
216 /* simulate 512 MiB RAM beginning at 3.5GiB and 1 GiB */
217 return test_multi_alloc_512mb_x2(uts, 0xE0000000, 0x40000000);
220 DM_TEST(lib_test_lmb_simple_x2, UT_TESTF_SCAN_PDATA | UT_TESTF_SCAN_FDT);
222 /* Simulate 512 MiB RAM, allocate some blocks that fit/don't fit */
223 static int test_bigblock(struct unit_test_state *uts, const phys_addr_t ram)
225 const phys_size_t ram_size = 0x20000000;
226 const phys_size_t big_block_size = 0x10000000;
227 const phys_addr_t ram_end = ram + ram_size;
228 const phys_addr_t alloc_64k_addr = ram + 0x10000000;
233 /* check for overflow */
234 ut_assert(ram_end == 0 || ram_end > ram);
238 ret = lmb_add(&lmb, ram, ram_size);
241 /* reserve 64KiB in the middle of RAM */
242 ret = lmb_reserve(&lmb, alloc_64k_addr, 0x10000);
244 ASSERT_LMB(&lmb, ram, ram_size, 1, alloc_64k_addr, 0x10000,
247 /* allocate a big block, should be below reserved */
248 a = lmb_alloc(&lmb, big_block_size, 1);
250 ASSERT_LMB(&lmb, ram, ram_size, 1, a,
251 big_block_size + 0x10000, 0, 0, 0, 0);
252 /* allocate 2nd big block */
253 /* This should fail, printing an error */
254 b = lmb_alloc(&lmb, big_block_size, 1);
256 ASSERT_LMB(&lmb, ram, ram_size, 1, a,
257 big_block_size + 0x10000, 0, 0, 0, 0);
259 ret = lmb_free(&lmb, a, big_block_size);
261 ASSERT_LMB(&lmb, ram, ram_size, 1, alloc_64k_addr, 0x10000,
264 /* allocate too big block */
265 /* This should fail, printing an error */
266 a = lmb_alloc(&lmb, ram_size, 1);
268 ASSERT_LMB(&lmb, ram, ram_size, 1, alloc_64k_addr, 0x10000,
274 static int lib_test_lmb_big(struct unit_test_state *uts)
278 /* simulate 512 MiB RAM beginning at 1GiB */
279 ret = test_bigblock(uts, 0x40000000);
283 /* simulate 512 MiB RAM beginning at 1.5GiB */
284 return test_bigblock(uts, 0xE0000000);
287 DM_TEST(lib_test_lmb_big, UT_TESTF_SCAN_PDATA | UT_TESTF_SCAN_FDT);
289 /* Simulate 512 MiB RAM, allocate a block without previous reservation */
290 static int test_noreserved(struct unit_test_state *uts, const phys_addr_t ram,
291 const phys_addr_t alloc_size, const ulong align)
293 const phys_size_t ram_size = 0x20000000;
294 const phys_addr_t ram_end = ram + ram_size;
298 const phys_addr_t alloc_size_aligned = (alloc_size + align - 1) &
301 /* check for overflow */
302 ut_assert(ram_end == 0 || ram_end > ram);
306 ret = lmb_add(&lmb, ram, ram_size);
308 ASSERT_LMB(&lmb, ram, ram_size, 0, 0, 0, 0, 0, 0, 0);
310 /* allocate a block */
311 a = lmb_alloc(&lmb, alloc_size, align);
313 ASSERT_LMB(&lmb, ram, ram_size, 1, ram + ram_size - alloc_size_aligned,
314 alloc_size, 0, 0, 0, 0);
315 /* allocate another block */
316 b = lmb_alloc(&lmb, alloc_size, align);
318 if (alloc_size == alloc_size_aligned) {
319 ASSERT_LMB(&lmb, ram, ram_size, 1, ram + ram_size -
320 (alloc_size_aligned * 2), alloc_size * 2, 0, 0, 0,
323 ASSERT_LMB(&lmb, ram, ram_size, 2, ram + ram_size -
324 (alloc_size_aligned * 2), alloc_size, ram + ram_size
325 - alloc_size_aligned, alloc_size, 0, 0);
328 ret = lmb_free(&lmb, b, alloc_size);
330 ASSERT_LMB(&lmb, ram, ram_size, 1, ram + ram_size - alloc_size_aligned,
331 alloc_size, 0, 0, 0, 0);
332 ret = lmb_free(&lmb, a, alloc_size);
334 ASSERT_LMB(&lmb, ram, ram_size, 0, 0, 0, 0, 0, 0, 0);
336 /* allocate a block with base*/
337 b = lmb_alloc_base(&lmb, alloc_size, align, ram_end);
339 ASSERT_LMB(&lmb, ram, ram_size, 1, ram + ram_size - alloc_size_aligned,
340 alloc_size, 0, 0, 0, 0);
342 ret = lmb_free(&lmb, b, alloc_size);
344 ASSERT_LMB(&lmb, ram, ram_size, 0, 0, 0, 0, 0, 0, 0);
349 static int lib_test_lmb_noreserved(struct unit_test_state *uts)
353 /* simulate 512 MiB RAM beginning at 1GiB */
354 ret = test_noreserved(uts, 0x40000000, 4, 1);
358 /* simulate 512 MiB RAM beginning at 1.5GiB */
359 return test_noreserved(uts, 0xE0000000, 4, 1);
362 DM_TEST(lib_test_lmb_noreserved, UT_TESTF_SCAN_PDATA | UT_TESTF_SCAN_FDT);
364 static int lib_test_lmb_unaligned_size(struct unit_test_state *uts)
368 /* simulate 512 MiB RAM beginning at 1GiB */
369 ret = test_noreserved(uts, 0x40000000, 5, 8);
373 /* simulate 512 MiB RAM beginning at 1.5GiB */
374 return test_noreserved(uts, 0xE0000000, 5, 8);
377 DM_TEST(lib_test_lmb_unaligned_size, UT_TESTF_SCAN_PDATA | UT_TESTF_SCAN_FDT);
379 * Simulate a RAM that starts at 0 and allocate down to address 0, which must
380 * fail as '0' means failure for the lmb_alloc functions.
382 static int lib_test_lmb_at_0(struct unit_test_state *uts)
384 const phys_addr_t ram = 0;
385 const phys_size_t ram_size = 0x20000000;
392 ret = lmb_add(&lmb, ram, ram_size);
395 /* allocate nearly everything */
396 a = lmb_alloc(&lmb, ram_size - 4, 1);
397 ut_asserteq(a, ram + 4);
398 ASSERT_LMB(&lmb, ram, ram_size, 1, a, ram_size - 4,
400 /* allocate the rest */
401 /* This should fail as the allocated address would be 0 */
402 b = lmb_alloc(&lmb, 4, 1);
404 /* check that this was an error by checking lmb */
405 ASSERT_LMB(&lmb, ram, ram_size, 1, a, ram_size - 4,
407 /* check that this was an error by freeing b */
408 ret = lmb_free(&lmb, b, 4);
409 ut_asserteq(ret, -1);
410 ASSERT_LMB(&lmb, ram, ram_size, 1, a, ram_size - 4,
413 ret = lmb_free(&lmb, a, ram_size - 4);
415 ASSERT_LMB(&lmb, ram, ram_size, 0, 0, 0, 0, 0, 0, 0);
420 DM_TEST(lib_test_lmb_at_0, UT_TESTF_SCAN_PDATA | UT_TESTF_SCAN_FDT);
422 /* Check that calling lmb_reserve with overlapping regions fails. */
423 static int lib_test_lmb_overlapping_reserve(struct unit_test_state *uts)
425 const phys_addr_t ram = 0x40000000;
426 const phys_size_t ram_size = 0x20000000;
432 ret = lmb_add(&lmb, ram, ram_size);
435 ret = lmb_reserve(&lmb, 0x40010000, 0x10000);
437 ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40010000, 0x10000,
439 /* allocate overlapping region should fail */
440 ret = lmb_reserve(&lmb, 0x40011000, 0x10000);
441 ut_asserteq(ret, -1);
442 ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40010000, 0x10000,
444 /* allocate 3nd region */
445 ret = lmb_reserve(&lmb, 0x40030000, 0x10000);
447 ASSERT_LMB(&lmb, ram, ram_size, 2, 0x40010000, 0x10000,
448 0x40030000, 0x10000, 0, 0);
449 /* allocate 2nd region */
450 ret = lmb_reserve(&lmb, 0x40020000, 0x10000);
452 ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40010000, 0x30000,
458 DM_TEST(lib_test_lmb_overlapping_reserve,
459 UT_TESTF_SCAN_PDATA | UT_TESTF_SCAN_FDT);
462 * Simulate 512 MiB RAM, reserve 3 blocks, allocate addresses in between.
463 * Expect addresses outside the memory range to fail.
465 static int test_alloc_addr(struct unit_test_state *uts, const phys_addr_t ram)
467 const phys_size_t ram_size = 0x20000000;
468 const phys_addr_t ram_end = ram + ram_size;
469 const phys_size_t alloc_addr_a = ram + 0x8000000;
470 const phys_size_t alloc_addr_b = ram + 0x8000000 * 2;
471 const phys_size_t alloc_addr_c = ram + 0x8000000 * 3;
474 phys_addr_t a, b, c, d, e;
476 /* check for overflow */
477 ut_assert(ram_end == 0 || ram_end > ram);
481 ret = lmb_add(&lmb, ram, ram_size);
484 /* reserve 3 blocks */
485 ret = lmb_reserve(&lmb, alloc_addr_a, 0x10000);
487 ret = lmb_reserve(&lmb, alloc_addr_b, 0x10000);
489 ret = lmb_reserve(&lmb, alloc_addr_c, 0x10000);
491 ASSERT_LMB(&lmb, ram, ram_size, 3, alloc_addr_a, 0x10000,
492 alloc_addr_b, 0x10000, alloc_addr_c, 0x10000);
494 /* allocate blocks */
495 a = lmb_alloc_addr(&lmb, ram, alloc_addr_a - ram);
497 ASSERT_LMB(&lmb, ram, ram_size, 3, ram, 0x8010000,
498 alloc_addr_b, 0x10000, alloc_addr_c, 0x10000);
499 b = lmb_alloc_addr(&lmb, alloc_addr_a + 0x10000,
500 alloc_addr_b - alloc_addr_a - 0x10000);
501 ut_asserteq(b, alloc_addr_a + 0x10000);
502 ASSERT_LMB(&lmb, ram, ram_size, 2, ram, 0x10010000,
503 alloc_addr_c, 0x10000, 0, 0);
504 c = lmb_alloc_addr(&lmb, alloc_addr_b + 0x10000,
505 alloc_addr_c - alloc_addr_b - 0x10000);
506 ut_asserteq(c, alloc_addr_b + 0x10000);
507 ASSERT_LMB(&lmb, ram, ram_size, 1, ram, 0x18010000,
509 d = lmb_alloc_addr(&lmb, alloc_addr_c + 0x10000,
510 ram_end - alloc_addr_c - 0x10000);
511 ut_asserteq(d, alloc_addr_c + 0x10000);
512 ASSERT_LMB(&lmb, ram, ram_size, 1, ram, ram_size,
515 /* allocating anything else should fail */
516 e = lmb_alloc(&lmb, 1, 1);
518 ASSERT_LMB(&lmb, ram, ram_size, 1, ram, ram_size,
521 ret = lmb_free(&lmb, d, ram_end - alloc_addr_c - 0x10000);
524 /* allocate at 3 points in free range */
526 d = lmb_alloc_addr(&lmb, ram_end - 4, 4);
527 ut_asserteq(d, ram_end - 4);
528 ASSERT_LMB(&lmb, ram, ram_size, 2, ram, 0x18010000,
530 ret = lmb_free(&lmb, d, 4);
532 ASSERT_LMB(&lmb, ram, ram_size, 1, ram, 0x18010000,
535 d = lmb_alloc_addr(&lmb, ram_end - 128, 4);
536 ut_asserteq(d, ram_end - 128);
537 ASSERT_LMB(&lmb, ram, ram_size, 2, ram, 0x18010000,
539 ret = lmb_free(&lmb, d, 4);
541 ASSERT_LMB(&lmb, ram, ram_size, 1, ram, 0x18010000,
544 d = lmb_alloc_addr(&lmb, alloc_addr_c + 0x10000, 4);
545 ut_asserteq(d, alloc_addr_c + 0x10000);
546 ASSERT_LMB(&lmb, ram, ram_size, 1, ram, 0x18010004,
548 ret = lmb_free(&lmb, d, 4);
550 ASSERT_LMB(&lmb, ram, ram_size, 1, ram, 0x18010000,
553 /* allocate at the bottom */
554 ret = lmb_free(&lmb, a, alloc_addr_a - ram);
556 ASSERT_LMB(&lmb, ram, ram_size, 1, ram + 0x8000000, 0x10010000,
558 d = lmb_alloc_addr(&lmb, ram, 4);
560 ASSERT_LMB(&lmb, ram, ram_size, 2, d, 4,
561 ram + 0x8000000, 0x10010000, 0, 0);
563 /* check that allocating outside memory fails */
565 ret = lmb_alloc_addr(&lmb, ram_end, 1);
569 ret = lmb_alloc_addr(&lmb, ram - 1, 1);
576 static int lib_test_lmb_alloc_addr(struct unit_test_state *uts)
580 /* simulate 512 MiB RAM beginning at 1GiB */
581 ret = test_alloc_addr(uts, 0x40000000);
585 /* simulate 512 MiB RAM beginning at 1.5GiB */
586 return test_alloc_addr(uts, 0xE0000000);
589 DM_TEST(lib_test_lmb_alloc_addr, UT_TESTF_SCAN_PDATA | UT_TESTF_SCAN_FDT);
591 /* Simulate 512 MiB RAM, reserve 3 blocks, check addresses in between */
592 static int test_get_unreserved_size(struct unit_test_state *uts,
593 const phys_addr_t ram)
595 const phys_size_t ram_size = 0x20000000;
596 const phys_addr_t ram_end = ram + ram_size;
597 const phys_size_t alloc_addr_a = ram + 0x8000000;
598 const phys_size_t alloc_addr_b = ram + 0x8000000 * 2;
599 const phys_size_t alloc_addr_c = ram + 0x8000000 * 3;
604 /* check for overflow */
605 ut_assert(ram_end == 0 || ram_end > ram);
609 ret = lmb_add(&lmb, ram, ram_size);
612 /* reserve 3 blocks */
613 ret = lmb_reserve(&lmb, alloc_addr_a, 0x10000);
615 ret = lmb_reserve(&lmb, alloc_addr_b, 0x10000);
617 ret = lmb_reserve(&lmb, alloc_addr_c, 0x10000);
619 ASSERT_LMB(&lmb, ram, ram_size, 3, alloc_addr_a, 0x10000,
620 alloc_addr_b, 0x10000, alloc_addr_c, 0x10000);
622 /* check addresses in between blocks */
623 s = lmb_get_free_size(&lmb, ram);
624 ut_asserteq(s, alloc_addr_a - ram);
625 s = lmb_get_free_size(&lmb, ram + 0x10000);
626 ut_asserteq(s, alloc_addr_a - ram - 0x10000);
627 s = lmb_get_free_size(&lmb, alloc_addr_a - 4);
630 s = lmb_get_free_size(&lmb, alloc_addr_a + 0x10000);
631 ut_asserteq(s, alloc_addr_b - alloc_addr_a - 0x10000);
632 s = lmb_get_free_size(&lmb, alloc_addr_a + 0x20000);
633 ut_asserteq(s, alloc_addr_b - alloc_addr_a - 0x20000);
634 s = lmb_get_free_size(&lmb, alloc_addr_b - 4);
637 s = lmb_get_free_size(&lmb, alloc_addr_c + 0x10000);
638 ut_asserteq(s, ram_end - alloc_addr_c - 0x10000);
639 s = lmb_get_free_size(&lmb, alloc_addr_c + 0x20000);
640 ut_asserteq(s, ram_end - alloc_addr_c - 0x20000);
641 s = lmb_get_free_size(&lmb, ram_end - 4);
647 static int lib_test_lmb_get_free_size(struct unit_test_state *uts)
651 /* simulate 512 MiB RAM beginning at 1GiB */
652 ret = test_get_unreserved_size(uts, 0x40000000);
656 /* simulate 512 MiB RAM beginning at 1.5GiB */
657 return test_get_unreserved_size(uts, 0xE0000000);
660 DM_TEST(lib_test_lmb_get_free_size,
661 UT_TESTF_SCAN_PDATA | UT_TESTF_SCAN_FDT);