2 * linux/drivers/mmc/card/mmc_test.c
4 * Copyright 2007-2008 Pierre Ossman
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
12 #include <linux/mmc/core.h>
13 #include <linux/mmc/card.h>
14 #include <linux/mmc/host.h>
15 #include <linux/mmc/mmc.h>
16 #include <linux/slab.h>
18 #include <linux/scatterlist.h>
19 #include <linux/swap.h> /* For nr_free_buffer_pages() */
20 #include <linux/list.h>
22 #include <linux/debugfs.h>
23 #include <linux/uaccess.h>
24 #include <linux/seq_file.h>
25 #include <linux/module.h>
29 #define RESULT_UNSUP_HOST 2
30 #define RESULT_UNSUP_CARD 3
32 #define BUFFER_ORDER 2
33 #define BUFFER_SIZE (PAGE_SIZE << BUFFER_ORDER)
36 * Limit the test area size to the maximum MMC HC erase group size. Note that
37 * the maximum SD allocation unit size is just 4MiB.
39 #define TEST_AREA_MAX_SIZE (128 * 1024 * 1024)
42 * struct mmc_test_pages - pages allocated by 'alloc_pages()'.
43 * @page: first page in the allocation
44 * @order: order of the number of pages allocated
46 struct mmc_test_pages {
52 * struct mmc_test_mem - allocated memory.
53 * @arr: array of allocations
54 * @cnt: number of allocations
57 struct mmc_test_pages *arr;
62 * struct mmc_test_area - information for performance tests.
63 * @max_sz: test area size (in bytes)
64 * @dev_addr: address on card at which to do performance tests
65 * @max_tfr: maximum transfer size allowed by driver (in bytes)
66 * @max_segs: maximum segments allowed by driver in scatterlist @sg
67 * @max_seg_sz: maximum segment size allowed by driver
68 * @blocks: number of (512 byte) blocks currently mapped by @sg
69 * @sg_len: length of currently mapped scatterlist @sg
70 * @mem: allocated memory
73 struct mmc_test_area {
75 unsigned int dev_addr;
77 unsigned int max_segs;
78 unsigned int max_seg_sz;
81 struct mmc_test_mem *mem;
82 struct scatterlist *sg;
86 * struct mmc_test_transfer_result - transfer results for performance tests.
87 * @link: double-linked list
88 * @count: amount of group of sectors to check
89 * @sectors: amount of sectors to check in one group
90 * @ts: time values of transfer
91 * @rate: calculated transfer rate
92 * @iops: I/O operations per second (times 100)
94 struct mmc_test_transfer_result {
95 struct list_head link;
104 * struct mmc_test_general_result - results for tests.
105 * @link: double-linked list
106 * @card: card under test
107 * @testcase: number of test case
108 * @result: result of test run
109 * @tr_lst: transfer measurements if any as mmc_test_transfer_result
111 struct mmc_test_general_result {
112 struct list_head link;
113 struct mmc_card *card;
116 struct list_head tr_lst;
120 * struct mmc_test_dbgfs_file - debugfs related file.
121 * @link: double-linked list
122 * @card: card under test
123 * @file: file created under debugfs
125 struct mmc_test_dbgfs_file {
126 struct list_head link;
127 struct mmc_card *card;
132 * struct mmc_test_card - test information.
133 * @card: card under test
134 * @scratch: transfer buffer
135 * @buffer: transfer buffer
136 * @highmem: buffer for highmem tests
137 * @area: information for performance tests
138 * @gr: pointer to results of current testcase
140 struct mmc_test_card {
141 struct mmc_card *card;
143 u8 scratch[BUFFER_SIZE];
145 #ifdef CONFIG_HIGHMEM
146 struct page *highmem;
148 struct mmc_test_area area;
149 struct mmc_test_general_result *gr;
152 enum mmc_test_prep_media {
153 MMC_TEST_PREP_NONE = 0,
154 MMC_TEST_PREP_WRITE_FULL = 1 << 0,
155 MMC_TEST_PREP_ERASE = 1 << 1,
158 struct mmc_test_multiple_rw {
159 unsigned int *sg_len;
164 bool do_nonblock_req;
165 enum mmc_test_prep_media prepare;
168 struct mmc_test_async_req {
169 struct mmc_async_req areq;
170 struct mmc_test_card *test;
173 /*******************************************************************/
174 /* General helper functions */
175 /*******************************************************************/
178 * Configure correct block size in card
180 static int mmc_test_set_blksize(struct mmc_test_card *test, unsigned size)
182 return mmc_set_blocklen(test->card, size);
186 * Fill in the mmc_request structure given a set of transfer parameters.
188 static void mmc_test_prepare_mrq(struct mmc_test_card *test,
189 struct mmc_request *mrq, struct scatterlist *sg, unsigned sg_len,
190 unsigned dev_addr, unsigned blocks, unsigned blksz, int write)
192 BUG_ON(!mrq || !mrq->cmd || !mrq->data || !mrq->stop);
195 mrq->cmd->opcode = write ?
196 MMC_WRITE_MULTIPLE_BLOCK : MMC_READ_MULTIPLE_BLOCK;
198 mrq->cmd->opcode = write ?
199 MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
202 mrq->cmd->arg = dev_addr;
203 if (!mmc_card_blockaddr(test->card))
206 mrq->cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC;
211 mrq->stop->opcode = MMC_STOP_TRANSMISSION;
213 mrq->stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
216 mrq->data->blksz = blksz;
217 mrq->data->blocks = blocks;
218 mrq->data->flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
220 mrq->data->sg_len = sg_len;
222 mmc_set_data_timeout(mrq->data, test->card);
225 static int mmc_test_busy(struct mmc_command *cmd)
227 return !(cmd->resp[0] & R1_READY_FOR_DATA) ||
228 (R1_CURRENT_STATE(cmd->resp[0]) == R1_STATE_PRG);
232 * Wait for the card to finish the busy state
234 static int mmc_test_wait_busy(struct mmc_test_card *test)
237 struct mmc_command cmd = {0};
241 memset(&cmd, 0, sizeof(struct mmc_command));
243 cmd.opcode = MMC_SEND_STATUS;
244 cmd.arg = test->card->rca << 16;
245 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
247 ret = mmc_wait_for_cmd(test->card->host, &cmd, 0);
251 if (!busy && mmc_test_busy(&cmd)) {
253 if (test->card->host->caps & MMC_CAP_WAIT_WHILE_BUSY)
254 pr_info("%s: Warning: Host did not "
255 "wait for busy state to end.\n",
256 mmc_hostname(test->card->host));
258 } while (mmc_test_busy(&cmd));
264 * Transfer a single sector of kernel addressable data
266 static int mmc_test_buffer_transfer(struct mmc_test_card *test,
267 u8 *buffer, unsigned addr, unsigned blksz, int write)
271 struct mmc_request mrq = {0};
272 struct mmc_command cmd = {0};
273 struct mmc_command stop = {0};
274 struct mmc_data data = {0};
276 struct scatterlist sg;
282 sg_init_one(&sg, buffer, blksz);
284 mmc_test_prepare_mrq(test, &mrq, &sg, 1, addr, 1, blksz, write);
286 mmc_wait_for_req(test->card->host, &mrq);
293 ret = mmc_test_wait_busy(test);
300 static void mmc_test_free_mem(struct mmc_test_mem *mem)
305 __free_pages(mem->arr[mem->cnt].page,
306 mem->arr[mem->cnt].order);
312 * Allocate a lot of memory, preferably max_sz but at least min_sz. In case
313 * there isn't much memory do not exceed 1/16th total lowmem pages. Also do
314 * not exceed a maximum number of segments and try not to make segments much
315 * bigger than maximum segment size.
317 static struct mmc_test_mem *mmc_test_alloc_mem(unsigned long min_sz,
318 unsigned long max_sz,
319 unsigned int max_segs,
320 unsigned int max_seg_sz)
322 unsigned long max_page_cnt = DIV_ROUND_UP(max_sz, PAGE_SIZE);
323 unsigned long min_page_cnt = DIV_ROUND_UP(min_sz, PAGE_SIZE);
324 unsigned long max_seg_page_cnt = DIV_ROUND_UP(max_seg_sz, PAGE_SIZE);
325 unsigned long page_cnt = 0;
326 unsigned long limit = nr_free_buffer_pages() >> 4;
327 struct mmc_test_mem *mem;
329 if (max_page_cnt > limit)
330 max_page_cnt = limit;
331 if (min_page_cnt > max_page_cnt)
332 min_page_cnt = max_page_cnt;
334 if (max_seg_page_cnt > max_page_cnt)
335 max_seg_page_cnt = max_page_cnt;
337 if (max_segs > max_page_cnt)
338 max_segs = max_page_cnt;
340 mem = kzalloc(sizeof(struct mmc_test_mem), GFP_KERNEL);
344 mem->arr = kzalloc(sizeof(struct mmc_test_pages) * max_segs,
349 while (max_page_cnt) {
352 gfp_t flags = GFP_KERNEL | GFP_DMA | __GFP_NOWARN |
355 order = get_order(max_seg_page_cnt << PAGE_SHIFT);
357 page = alloc_pages(flags, order);
363 if (page_cnt < min_page_cnt)
367 mem->arr[mem->cnt].page = page;
368 mem->arr[mem->cnt].order = order;
370 if (max_page_cnt <= (1UL << order))
372 max_page_cnt -= 1UL << order;
373 page_cnt += 1UL << order;
374 if (mem->cnt >= max_segs) {
375 if (page_cnt < min_page_cnt)
384 mmc_test_free_mem(mem);
389 * Map memory into a scatterlist. Optionally allow the same memory to be
390 * mapped more than once.
392 static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned long size,
393 struct scatterlist *sglist, int repeat,
394 unsigned int max_segs, unsigned int max_seg_sz,
395 unsigned int *sg_len, int min_sg_len)
397 struct scatterlist *sg = NULL;
399 unsigned long sz = size;
401 sg_init_table(sglist, max_segs);
402 if (min_sg_len > max_segs)
403 min_sg_len = max_segs;
407 for (i = 0; i < mem->cnt; i++) {
408 unsigned long len = PAGE_SIZE << mem->arr[i].order;
410 if (min_sg_len && (size / min_sg_len < len))
411 len = ALIGN(size / min_sg_len, 512);
414 if (len > max_seg_sz)
422 sg_set_page(sg, mem->arr[i].page, len, 0);
428 } while (sz && repeat);
440 * Map memory into a scatterlist so that no pages are contiguous. Allow the
441 * same memory to be mapped more than once.
443 static int mmc_test_map_sg_max_scatter(struct mmc_test_mem *mem,
445 struct scatterlist *sglist,
446 unsigned int max_segs,
447 unsigned int max_seg_sz,
448 unsigned int *sg_len)
450 struct scatterlist *sg = NULL;
451 unsigned int i = mem->cnt, cnt;
453 void *base, *addr, *last_addr = NULL;
455 sg_init_table(sglist, max_segs);
459 base = page_address(mem->arr[--i].page);
460 cnt = 1 << mem->arr[i].order;
462 addr = base + PAGE_SIZE * --cnt;
463 if (last_addr && last_addr + PAGE_SIZE == addr)
467 if (len > max_seg_sz)
477 sg_set_page(sg, virt_to_page(addr), len, 0);
492 * Calculate transfer rate in bytes per second.
494 static unsigned int mmc_test_rate(uint64_t bytes, struct timespec *ts)
504 while (ns > UINT_MAX) {
512 do_div(bytes, (uint32_t)ns);
518 * Save transfer results for future usage
520 static void mmc_test_save_transfer_result(struct mmc_test_card *test,
521 unsigned int count, unsigned int sectors, struct timespec ts,
522 unsigned int rate, unsigned int iops)
524 struct mmc_test_transfer_result *tr;
529 tr = kmalloc(sizeof(struct mmc_test_transfer_result), GFP_KERNEL);
534 tr->sectors = sectors;
539 list_add_tail(&tr->link, &test->gr->tr_lst);
543 * Print the transfer rate.
545 static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes,
546 struct timespec *ts1, struct timespec *ts2)
548 unsigned int rate, iops, sectors = bytes >> 9;
551 ts = timespec_sub(*ts2, *ts1);
553 rate = mmc_test_rate(bytes, &ts);
554 iops = mmc_test_rate(100, &ts); /* I/O ops per sec x 100 */
556 pr_info("%s: Transfer of %u sectors (%u%s KiB) took %lu.%09lu "
557 "seconds (%u kB/s, %u KiB/s, %u.%02u IOPS)\n",
558 mmc_hostname(test->card->host), sectors, sectors >> 1,
559 (sectors & 1 ? ".5" : ""), (unsigned long)ts.tv_sec,
560 (unsigned long)ts.tv_nsec, rate / 1000, rate / 1024,
561 iops / 100, iops % 100);
563 mmc_test_save_transfer_result(test, 1, sectors, ts, rate, iops);
567 * Print the average transfer rate.
569 static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes,
570 unsigned int count, struct timespec *ts1,
571 struct timespec *ts2)
573 unsigned int rate, iops, sectors = bytes >> 9;
574 uint64_t tot = bytes * count;
577 ts = timespec_sub(*ts2, *ts1);
579 rate = mmc_test_rate(tot, &ts);
580 iops = mmc_test_rate(count * 100, &ts); /* I/O ops per sec x 100 */
582 pr_info("%s: Transfer of %u x %u sectors (%u x %u%s KiB) took "
583 "%lu.%09lu seconds (%u kB/s, %u KiB/s, "
584 "%u.%02u IOPS, sg_len %d)\n",
585 mmc_hostname(test->card->host), count, sectors, count,
586 sectors >> 1, (sectors & 1 ? ".5" : ""),
587 (unsigned long)ts.tv_sec, (unsigned long)ts.tv_nsec,
588 rate / 1000, rate / 1024, iops / 100, iops % 100,
591 mmc_test_save_transfer_result(test, count, sectors, ts, rate, iops);
595 * Return the card size in sectors.
597 static unsigned int mmc_test_capacity(struct mmc_card *card)
599 if (!mmc_card_sd(card) && mmc_card_blockaddr(card))
600 return card->ext_csd.sectors;
602 return card->csd.capacity << (card->csd.read_blkbits - 9);
605 /*******************************************************************/
606 /* Test preparation and cleanup */
607 /*******************************************************************/
610 * Fill the first couple of sectors of the card with known data
611 * so that bad reads/writes can be detected
613 static int __mmc_test_prepare(struct mmc_test_card *test, int write)
617 ret = mmc_test_set_blksize(test, 512);
622 memset(test->buffer, 0xDF, 512);
624 for (i = 0;i < 512;i++)
628 for (i = 0;i < BUFFER_SIZE / 512;i++) {
629 ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
637 static int mmc_test_prepare_write(struct mmc_test_card *test)
639 return __mmc_test_prepare(test, 1);
642 static int mmc_test_prepare_read(struct mmc_test_card *test)
644 return __mmc_test_prepare(test, 0);
647 static int mmc_test_cleanup(struct mmc_test_card *test)
651 ret = mmc_test_set_blksize(test, 512);
655 memset(test->buffer, 0, 512);
657 for (i = 0;i < BUFFER_SIZE / 512;i++) {
658 ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
666 /*******************************************************************/
667 /* Test execution helpers */
668 /*******************************************************************/
671 * Modifies the mmc_request to perform the "short transfer" tests
673 static void mmc_test_prepare_broken_mrq(struct mmc_test_card *test,
674 struct mmc_request *mrq, int write)
676 BUG_ON(!mrq || !mrq->cmd || !mrq->data);
678 if (mrq->data->blocks > 1) {
679 mrq->cmd->opcode = write ?
680 MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
683 mrq->cmd->opcode = MMC_SEND_STATUS;
684 mrq->cmd->arg = test->card->rca << 16;
689 * Checks that a normal transfer didn't have any errors
691 static int mmc_test_check_result(struct mmc_test_card *test,
692 struct mmc_request *mrq)
696 BUG_ON(!mrq || !mrq->cmd || !mrq->data);
700 if (!ret && mrq->cmd->error)
701 ret = mrq->cmd->error;
702 if (!ret && mrq->data->error)
703 ret = mrq->data->error;
704 if (!ret && mrq->stop && mrq->stop->error)
705 ret = mrq->stop->error;
706 if (!ret && mrq->data->bytes_xfered !=
707 mrq->data->blocks * mrq->data->blksz)
711 ret = RESULT_UNSUP_HOST;
716 static int mmc_test_check_result_async(struct mmc_card *card,
717 struct mmc_async_req *areq)
719 struct mmc_test_async_req *test_async =
720 container_of(areq, struct mmc_test_async_req, areq);
722 mmc_test_wait_busy(test_async->test);
724 return mmc_test_check_result(test_async->test, areq->mrq);
728 * Checks that a "short transfer" behaved as expected
730 static int mmc_test_check_broken_result(struct mmc_test_card *test,
731 struct mmc_request *mrq)
735 BUG_ON(!mrq || !mrq->cmd || !mrq->data);
739 if (!ret && mrq->cmd->error)
740 ret = mrq->cmd->error;
741 if (!ret && mrq->data->error == 0)
743 if (!ret && mrq->data->error != -ETIMEDOUT)
744 ret = mrq->data->error;
745 if (!ret && mrq->stop && mrq->stop->error)
746 ret = mrq->stop->error;
747 if (mrq->data->blocks > 1) {
748 if (!ret && mrq->data->bytes_xfered > mrq->data->blksz)
751 if (!ret && mrq->data->bytes_xfered > 0)
756 ret = RESULT_UNSUP_HOST;
762 * Tests nonblock transfer with certain parameters
764 static void mmc_test_nonblock_reset(struct mmc_request *mrq,
765 struct mmc_command *cmd,
766 struct mmc_command *stop,
767 struct mmc_data *data)
769 memset(mrq, 0, sizeof(struct mmc_request));
770 memset(cmd, 0, sizeof(struct mmc_command));
771 memset(data, 0, sizeof(struct mmc_data));
772 memset(stop, 0, sizeof(struct mmc_command));
778 static int mmc_test_nonblock_transfer(struct mmc_test_card *test,
779 struct scatterlist *sg, unsigned sg_len,
780 unsigned dev_addr, unsigned blocks,
781 unsigned blksz, int write, int count)
783 struct mmc_request mrq1;
784 struct mmc_command cmd1;
785 struct mmc_command stop1;
786 struct mmc_data data1;
788 struct mmc_request mrq2;
789 struct mmc_command cmd2;
790 struct mmc_command stop2;
791 struct mmc_data data2;
793 struct mmc_test_async_req test_areq[2];
794 struct mmc_async_req *done_areq;
795 struct mmc_async_req *cur_areq = &test_areq[0].areq;
796 struct mmc_async_req *other_areq = &test_areq[1].areq;
800 test_areq[0].test = test;
801 test_areq[1].test = test;
803 mmc_test_nonblock_reset(&mrq1, &cmd1, &stop1, &data1);
804 mmc_test_nonblock_reset(&mrq2, &cmd2, &stop2, &data2);
806 cur_areq->mrq = &mrq1;
807 cur_areq->err_check = mmc_test_check_result_async;
808 other_areq->mrq = &mrq2;
809 other_areq->err_check = mmc_test_check_result_async;
811 for (i = 0; i < count; i++) {
812 mmc_test_prepare_mrq(test, cur_areq->mrq, sg, sg_len, dev_addr,
813 blocks, blksz, write);
814 done_areq = mmc_start_req(test->card->host, cur_areq, &ret);
816 if (ret || (!done_areq && i > 0))
820 if (done_areq->mrq == &mrq2)
821 mmc_test_nonblock_reset(&mrq2, &cmd2,
824 mmc_test_nonblock_reset(&mrq1, &cmd1,
827 done_areq = cur_areq;
828 cur_areq = other_areq;
829 other_areq = done_areq;
833 done_areq = mmc_start_req(test->card->host, NULL, &ret);
841 * Tests a basic transfer with certain parameters
843 static int mmc_test_simple_transfer(struct mmc_test_card *test,
844 struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
845 unsigned blocks, unsigned blksz, int write)
847 struct mmc_request mrq = {0};
848 struct mmc_command cmd = {0};
849 struct mmc_command stop = {0};
850 struct mmc_data data = {0};
856 mmc_test_prepare_mrq(test, &mrq, sg, sg_len, dev_addr,
857 blocks, blksz, write);
859 mmc_wait_for_req(test->card->host, &mrq);
861 mmc_test_wait_busy(test);
863 return mmc_test_check_result(test, &mrq);
867 * Tests a transfer where the card will fail completely or partly
869 static int mmc_test_broken_transfer(struct mmc_test_card *test,
870 unsigned blocks, unsigned blksz, int write)
872 struct mmc_request mrq = {0};
873 struct mmc_command cmd = {0};
874 struct mmc_command stop = {0};
875 struct mmc_data data = {0};
877 struct scatterlist sg;
883 sg_init_one(&sg, test->buffer, blocks * blksz);
885 mmc_test_prepare_mrq(test, &mrq, &sg, 1, 0, blocks, blksz, write);
886 mmc_test_prepare_broken_mrq(test, &mrq, write);
888 mmc_wait_for_req(test->card->host, &mrq);
890 mmc_test_wait_busy(test);
892 return mmc_test_check_broken_result(test, &mrq);
896 * Does a complete transfer test where data is also validated
898 * Note: mmc_test_prepare() must have been done before this call
900 static int mmc_test_transfer(struct mmc_test_card *test,
901 struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
902 unsigned blocks, unsigned blksz, int write)
908 for (i = 0;i < blocks * blksz;i++)
909 test->scratch[i] = i;
911 memset(test->scratch, 0, BUFFER_SIZE);
913 local_irq_save(flags);
914 sg_copy_from_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
915 local_irq_restore(flags);
917 ret = mmc_test_set_blksize(test, blksz);
921 ret = mmc_test_simple_transfer(test, sg, sg_len, dev_addr,
922 blocks, blksz, write);
929 ret = mmc_test_set_blksize(test, 512);
933 sectors = (blocks * blksz + 511) / 512;
934 if ((sectors * 512) == (blocks * blksz))
937 if ((sectors * 512) > BUFFER_SIZE)
940 memset(test->buffer, 0, sectors * 512);
942 for (i = 0;i < sectors;i++) {
943 ret = mmc_test_buffer_transfer(test,
944 test->buffer + i * 512,
945 dev_addr + i, 512, 0);
950 for (i = 0;i < blocks * blksz;i++) {
951 if (test->buffer[i] != (u8)i)
955 for (;i < sectors * 512;i++) {
956 if (test->buffer[i] != 0xDF)
960 local_irq_save(flags);
961 sg_copy_to_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
962 local_irq_restore(flags);
963 for (i = 0;i < blocks * blksz;i++) {
964 if (test->scratch[i] != (u8)i)
972 /*******************************************************************/
974 /*******************************************************************/
976 struct mmc_test_case {
979 int (*prepare)(struct mmc_test_card *);
980 int (*run)(struct mmc_test_card *);
981 int (*cleanup)(struct mmc_test_card *);
984 static int mmc_test_basic_write(struct mmc_test_card *test)
987 struct scatterlist sg;
989 ret = mmc_test_set_blksize(test, 512);
993 sg_init_one(&sg, test->buffer, 512);
995 ret = mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 1);
1002 static int mmc_test_basic_read(struct mmc_test_card *test)
1005 struct scatterlist sg;
1007 ret = mmc_test_set_blksize(test, 512);
1011 sg_init_one(&sg, test->buffer, 512);
1013 ret = mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 0);
1020 static int mmc_test_verify_write(struct mmc_test_card *test)
1023 struct scatterlist sg;
1025 sg_init_one(&sg, test->buffer, 512);
1027 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
1034 static int mmc_test_verify_read(struct mmc_test_card *test)
1037 struct scatterlist sg;
1039 sg_init_one(&sg, test->buffer, 512);
1041 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
1048 static int mmc_test_multi_write(struct mmc_test_card *test)
1052 struct scatterlist sg;
1054 if (test->card->host->max_blk_count == 1)
1055 return RESULT_UNSUP_HOST;
1057 size = PAGE_SIZE * 2;
1058 size = min(size, test->card->host->max_req_size);
1059 size = min(size, test->card->host->max_seg_size);
1060 size = min(size, test->card->host->max_blk_count * 512);
1063 return RESULT_UNSUP_HOST;
1065 sg_init_one(&sg, test->buffer, size);
1067 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
1074 static int mmc_test_multi_read(struct mmc_test_card *test)
1078 struct scatterlist sg;
1080 if (test->card->host->max_blk_count == 1)
1081 return RESULT_UNSUP_HOST;
1083 size = PAGE_SIZE * 2;
1084 size = min(size, test->card->host->max_req_size);
1085 size = min(size, test->card->host->max_seg_size);
1086 size = min(size, test->card->host->max_blk_count * 512);
1089 return RESULT_UNSUP_HOST;
1091 sg_init_one(&sg, test->buffer, size);
1093 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
1100 static int mmc_test_pow2_write(struct mmc_test_card *test)
1103 struct scatterlist sg;
1105 if (!test->card->csd.write_partial)
1106 return RESULT_UNSUP_CARD;
1108 for (i = 1; i < 512;i <<= 1) {
1109 sg_init_one(&sg, test->buffer, i);
1110 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
1118 static int mmc_test_pow2_read(struct mmc_test_card *test)
1121 struct scatterlist sg;
1123 if (!test->card->csd.read_partial)
1124 return RESULT_UNSUP_CARD;
1126 for (i = 1; i < 512;i <<= 1) {
1127 sg_init_one(&sg, test->buffer, i);
1128 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
1136 static int mmc_test_weird_write(struct mmc_test_card *test)
1139 struct scatterlist sg;
1141 if (!test->card->csd.write_partial)
1142 return RESULT_UNSUP_CARD;
1144 for (i = 3; i < 512;i += 7) {
1145 sg_init_one(&sg, test->buffer, i);
1146 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
1154 static int mmc_test_weird_read(struct mmc_test_card *test)
1157 struct scatterlist sg;
1159 if (!test->card->csd.read_partial)
1160 return RESULT_UNSUP_CARD;
1162 for (i = 3; i < 512;i += 7) {
1163 sg_init_one(&sg, test->buffer, i);
1164 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
1172 static int mmc_test_align_write(struct mmc_test_card *test)
1175 struct scatterlist sg;
1177 for (i = 1;i < 4;i++) {
1178 sg_init_one(&sg, test->buffer + i, 512);
1179 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
1187 static int mmc_test_align_read(struct mmc_test_card *test)
1190 struct scatterlist sg;
1192 for (i = 1;i < 4;i++) {
1193 sg_init_one(&sg, test->buffer + i, 512);
1194 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
1202 static int mmc_test_align_multi_write(struct mmc_test_card *test)
1206 struct scatterlist sg;
1208 if (test->card->host->max_blk_count == 1)
1209 return RESULT_UNSUP_HOST;
1211 size = PAGE_SIZE * 2;
1212 size = min(size, test->card->host->max_req_size);
1213 size = min(size, test->card->host->max_seg_size);
1214 size = min(size, test->card->host->max_blk_count * 512);
1217 return RESULT_UNSUP_HOST;
1219 for (i = 1;i < 4;i++) {
1220 sg_init_one(&sg, test->buffer + i, size);
1221 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
1229 static int mmc_test_align_multi_read(struct mmc_test_card *test)
1233 struct scatterlist sg;
1235 if (test->card->host->max_blk_count == 1)
1236 return RESULT_UNSUP_HOST;
1238 size = PAGE_SIZE * 2;
1239 size = min(size, test->card->host->max_req_size);
1240 size = min(size, test->card->host->max_seg_size);
1241 size = min(size, test->card->host->max_blk_count * 512);
1244 return RESULT_UNSUP_HOST;
1246 for (i = 1;i < 4;i++) {
1247 sg_init_one(&sg, test->buffer + i, size);
1248 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
1256 static int mmc_test_xfersize_write(struct mmc_test_card *test)
1260 ret = mmc_test_set_blksize(test, 512);
1264 ret = mmc_test_broken_transfer(test, 1, 512, 1);
1271 static int mmc_test_xfersize_read(struct mmc_test_card *test)
1275 ret = mmc_test_set_blksize(test, 512);
1279 ret = mmc_test_broken_transfer(test, 1, 512, 0);
1286 static int mmc_test_multi_xfersize_write(struct mmc_test_card *test)
1290 if (test->card->host->max_blk_count == 1)
1291 return RESULT_UNSUP_HOST;
1293 ret = mmc_test_set_blksize(test, 512);
1297 ret = mmc_test_broken_transfer(test, 2, 512, 1);
1304 static int mmc_test_multi_xfersize_read(struct mmc_test_card *test)
1308 if (test->card->host->max_blk_count == 1)
1309 return RESULT_UNSUP_HOST;
1311 ret = mmc_test_set_blksize(test, 512);
1315 ret = mmc_test_broken_transfer(test, 2, 512, 0);
1322 #ifdef CONFIG_HIGHMEM
1324 static int mmc_test_write_high(struct mmc_test_card *test)
1327 struct scatterlist sg;
1329 sg_init_table(&sg, 1);
1330 sg_set_page(&sg, test->highmem, 512, 0);
1332 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
1339 static int mmc_test_read_high(struct mmc_test_card *test)
1342 struct scatterlist sg;
1344 sg_init_table(&sg, 1);
1345 sg_set_page(&sg, test->highmem, 512, 0);
1347 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
1354 static int mmc_test_multi_write_high(struct mmc_test_card *test)
1358 struct scatterlist sg;
1360 if (test->card->host->max_blk_count == 1)
1361 return RESULT_UNSUP_HOST;
1363 size = PAGE_SIZE * 2;
1364 size = min(size, test->card->host->max_req_size);
1365 size = min(size, test->card->host->max_seg_size);
1366 size = min(size, test->card->host->max_blk_count * 512);
1369 return RESULT_UNSUP_HOST;
1371 sg_init_table(&sg, 1);
1372 sg_set_page(&sg, test->highmem, size, 0);
1374 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
1381 static int mmc_test_multi_read_high(struct mmc_test_card *test)
1385 struct scatterlist sg;
1387 if (test->card->host->max_blk_count == 1)
1388 return RESULT_UNSUP_HOST;
1390 size = PAGE_SIZE * 2;
1391 size = min(size, test->card->host->max_req_size);
1392 size = min(size, test->card->host->max_seg_size);
1393 size = min(size, test->card->host->max_blk_count * 512);
1396 return RESULT_UNSUP_HOST;
1398 sg_init_table(&sg, 1);
1399 sg_set_page(&sg, test->highmem, size, 0);
1401 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
1410 static int mmc_test_no_highmem(struct mmc_test_card *test)
1412 pr_info("%s: Highmem not configured - test skipped\n",
1413 mmc_hostname(test->card->host));
1417 #endif /* CONFIG_HIGHMEM */
1420 * Map sz bytes so that it can be transferred.
1422 static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz,
1423 int max_scatter, int min_sg_len)
1425 struct mmc_test_area *t = &test->area;
1428 t->blocks = sz >> 9;
1431 err = mmc_test_map_sg_max_scatter(t->mem, sz, t->sg,
1432 t->max_segs, t->max_seg_sz,
1435 err = mmc_test_map_sg(t->mem, sz, t->sg, 1, t->max_segs,
1436 t->max_seg_sz, &t->sg_len, min_sg_len);
1439 pr_info("%s: Failed to map sg list\n",
1440 mmc_hostname(test->card->host));
1445 * Transfer bytes mapped by mmc_test_area_map().
1447 static int mmc_test_area_transfer(struct mmc_test_card *test,
1448 unsigned int dev_addr, int write)
1450 struct mmc_test_area *t = &test->area;
1452 return mmc_test_simple_transfer(test, t->sg, t->sg_len, dev_addr,
1453 t->blocks, 512, write);
1457 * Map and transfer bytes for multiple transfers.
1459 static int mmc_test_area_io_seq(struct mmc_test_card *test, unsigned long sz,
1460 unsigned int dev_addr, int write,
1461 int max_scatter, int timed, int count,
1462 bool nonblock, int min_sg_len)
1464 struct timespec ts1, ts2;
1467 struct mmc_test_area *t = &test->area;
1470 * In the case of a maximally scattered transfer, the maximum transfer
1471 * size is further limited by using PAGE_SIZE segments.
1474 struct mmc_test_area *t = &test->area;
1475 unsigned long max_tfr;
1477 if (t->max_seg_sz >= PAGE_SIZE)
1478 max_tfr = t->max_segs * PAGE_SIZE;
1480 max_tfr = t->max_segs * t->max_seg_sz;
1485 ret = mmc_test_area_map(test, sz, max_scatter, min_sg_len);
1490 getnstimeofday(&ts1);
1492 ret = mmc_test_nonblock_transfer(test, t->sg, t->sg_len,
1493 dev_addr, t->blocks, 512, write, count);
1495 for (i = 0; i < count && ret == 0; i++) {
1496 ret = mmc_test_area_transfer(test, dev_addr, write);
1497 dev_addr += sz >> 9;
1504 getnstimeofday(&ts2);
1507 mmc_test_print_avg_rate(test, sz, count, &ts1, &ts2);
1512 static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz,
1513 unsigned int dev_addr, int write, int max_scatter,
1516 return mmc_test_area_io_seq(test, sz, dev_addr, write, max_scatter,
1517 timed, 1, false, 0);
1521 * Write the test area entirely.
1523 static int mmc_test_area_fill(struct mmc_test_card *test)
1525 struct mmc_test_area *t = &test->area;
1527 return mmc_test_area_io(test, t->max_tfr, t->dev_addr, 1, 0, 0);
1531 * Erase the test area entirely.
1533 static int mmc_test_area_erase(struct mmc_test_card *test)
1535 struct mmc_test_area *t = &test->area;
1537 if (!mmc_can_erase(test->card))
1540 return mmc_erase(test->card, t->dev_addr, t->max_sz >> 9,
1545 * Cleanup struct mmc_test_area.
1547 static int mmc_test_area_cleanup(struct mmc_test_card *test)
1549 struct mmc_test_area *t = &test->area;
1552 mmc_test_free_mem(t->mem);
1558 * Initialize an area for testing large transfers. The test area is set to the
1559 * middle of the card because cards may have different charateristics at the
1560 * front (for FAT file system optimization). Optionally, the area is erased
1561 * (if the card supports it) which may improve write performance. Optionally,
1562 * the area is filled with data for subsequent read tests.
1564 static int mmc_test_area_init(struct mmc_test_card *test, int erase, int fill)
1566 struct mmc_test_area *t = &test->area;
1567 unsigned long min_sz = 64 * 1024, sz;
1570 ret = mmc_test_set_blksize(test, 512);
1574 /* Make the test area size about 4MiB */
1575 sz = (unsigned long)test->card->pref_erase << 9;
1577 while (t->max_sz < 4 * 1024 * 1024)
1579 while (t->max_sz > TEST_AREA_MAX_SIZE && t->max_sz > sz)
1582 t->max_segs = test->card->host->max_segs;
1583 t->max_seg_sz = test->card->host->max_seg_size;
1584 t->max_seg_sz -= t->max_seg_sz % 512;
1586 t->max_tfr = t->max_sz;
1587 if (t->max_tfr >> 9 > test->card->host->max_blk_count)
1588 t->max_tfr = test->card->host->max_blk_count << 9;
1589 if (t->max_tfr > test->card->host->max_req_size)
1590 t->max_tfr = test->card->host->max_req_size;
1591 if (t->max_tfr / t->max_seg_sz > t->max_segs)
1592 t->max_tfr = t->max_segs * t->max_seg_sz;
1595 * Try to allocate enough memory for a max. sized transfer. Less is OK
1596 * because the same memory can be mapped into the scatterlist more than
1597 * once. Also, take into account the limits imposed on scatterlist
1598 * segments by the host driver.
1600 t->mem = mmc_test_alloc_mem(min_sz, t->max_tfr, t->max_segs,
1605 t->sg = kmalloc(sizeof(struct scatterlist) * t->max_segs, GFP_KERNEL);
1611 t->dev_addr = mmc_test_capacity(test->card) / 2;
1612 t->dev_addr -= t->dev_addr % (t->max_sz >> 9);
1615 ret = mmc_test_area_erase(test);
1621 ret = mmc_test_area_fill(test);
1629 mmc_test_area_cleanup(test);
1634 * Prepare for large transfers. Do not erase the test area.
1636 static int mmc_test_area_prepare(struct mmc_test_card *test)
1638 return mmc_test_area_init(test, 0, 0);
1642 * Prepare for large transfers. Do erase the test area.
1644 static int mmc_test_area_prepare_erase(struct mmc_test_card *test)
1646 return mmc_test_area_init(test, 1, 0);
1650 * Prepare for large transfers. Erase and fill the test area.
1652 static int mmc_test_area_prepare_fill(struct mmc_test_card *test)
1654 return mmc_test_area_init(test, 1, 1);
1658 * Test best-case performance. Best-case performance is expected from
1659 * a single large transfer.
1661 * An additional option (max_scatter) allows the measurement of the same
1662 * transfer but with no contiguous pages in the scatter list. This tests
1663 * the efficiency of DMA to handle scattered pages.
1665 static int mmc_test_best_performance(struct mmc_test_card *test, int write,
1668 struct mmc_test_area *t = &test->area;
1670 return mmc_test_area_io(test, t->max_tfr, t->dev_addr, write,
1675 * Best-case read performance.
1677 static int mmc_test_best_read_performance(struct mmc_test_card *test)
1679 return mmc_test_best_performance(test, 0, 0);
1683 * Best-case write performance.
1685 static int mmc_test_best_write_performance(struct mmc_test_card *test)
1687 return mmc_test_best_performance(test, 1, 0);
1691 * Best-case read performance into scattered pages.
1693 static int mmc_test_best_read_perf_max_scatter(struct mmc_test_card *test)
1695 return mmc_test_best_performance(test, 0, 1);
1699 * Best-case write performance from scattered pages.
1701 static int mmc_test_best_write_perf_max_scatter(struct mmc_test_card *test)
1703 return mmc_test_best_performance(test, 1, 1);
1707 * Single read performance by transfer size.
1709 static int mmc_test_profile_read_perf(struct mmc_test_card *test)
1711 struct mmc_test_area *t = &test->area;
1713 unsigned int dev_addr;
1716 for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1717 dev_addr = t->dev_addr + (sz >> 9);
1718 ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
1723 dev_addr = t->dev_addr;
1724 return mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
1728 * Single write performance by transfer size.
1730 static int mmc_test_profile_write_perf(struct mmc_test_card *test)
1732 struct mmc_test_area *t = &test->area;
1734 unsigned int dev_addr;
1737 ret = mmc_test_area_erase(test);
1740 for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1741 dev_addr = t->dev_addr + (sz >> 9);
1742 ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
1746 ret = mmc_test_area_erase(test);
1750 dev_addr = t->dev_addr;
1751 return mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
1755 * Single trim performance by transfer size.
1757 static int mmc_test_profile_trim_perf(struct mmc_test_card *test)
1759 struct mmc_test_area *t = &test->area;
1761 unsigned int dev_addr;
1762 struct timespec ts1, ts2;
1765 if (!mmc_can_trim(test->card))
1766 return RESULT_UNSUP_CARD;
1768 if (!mmc_can_erase(test->card))
1769 return RESULT_UNSUP_HOST;
1771 for (sz = 512; sz < t->max_sz; sz <<= 1) {
1772 dev_addr = t->dev_addr + (sz >> 9);
1773 getnstimeofday(&ts1);
1774 ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
1777 getnstimeofday(&ts2);
1778 mmc_test_print_rate(test, sz, &ts1, &ts2);
1780 dev_addr = t->dev_addr;
1781 getnstimeofday(&ts1);
1782 ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
1785 getnstimeofday(&ts2);
1786 mmc_test_print_rate(test, sz, &ts1, &ts2);
1790 static int mmc_test_seq_read_perf(struct mmc_test_card *test, unsigned long sz)
1792 struct mmc_test_area *t = &test->area;
1793 unsigned int dev_addr, i, cnt;
1794 struct timespec ts1, ts2;
1797 cnt = t->max_sz / sz;
1798 dev_addr = t->dev_addr;
1799 getnstimeofday(&ts1);
1800 for (i = 0; i < cnt; i++) {
1801 ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 0);
1804 dev_addr += (sz >> 9);
1806 getnstimeofday(&ts2);
1807 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1812 * Consecutive read performance by transfer size.
1814 static int mmc_test_profile_seq_read_perf(struct mmc_test_card *test)
1816 struct mmc_test_area *t = &test->area;
1820 for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1821 ret = mmc_test_seq_read_perf(test, sz);
1826 return mmc_test_seq_read_perf(test, sz);
1829 static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz)
1831 struct mmc_test_area *t = &test->area;
1832 unsigned int dev_addr, i, cnt;
1833 struct timespec ts1, ts2;
1836 ret = mmc_test_area_erase(test);
1839 cnt = t->max_sz / sz;
1840 dev_addr = t->dev_addr;
1841 getnstimeofday(&ts1);
1842 for (i = 0; i < cnt; i++) {
1843 ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 0);
1846 dev_addr += (sz >> 9);
1848 getnstimeofday(&ts2);
1849 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1854 * Consecutive write performance by transfer size.
1856 static int mmc_test_profile_seq_write_perf(struct mmc_test_card *test)
1858 struct mmc_test_area *t = &test->area;
1862 for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1863 ret = mmc_test_seq_write_perf(test, sz);
1868 return mmc_test_seq_write_perf(test, sz);
1872 * Consecutive trim performance by transfer size.
1874 static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test)
1876 struct mmc_test_area *t = &test->area;
1878 unsigned int dev_addr, i, cnt;
1879 struct timespec ts1, ts2;
1882 if (!mmc_can_trim(test->card))
1883 return RESULT_UNSUP_CARD;
1885 if (!mmc_can_erase(test->card))
1886 return RESULT_UNSUP_HOST;
1888 for (sz = 512; sz <= t->max_sz; sz <<= 1) {
1889 ret = mmc_test_area_erase(test);
1892 ret = mmc_test_area_fill(test);
1895 cnt = t->max_sz / sz;
1896 dev_addr = t->dev_addr;
1897 getnstimeofday(&ts1);
1898 for (i = 0; i < cnt; i++) {
1899 ret = mmc_erase(test->card, dev_addr, sz >> 9,
1903 dev_addr += (sz >> 9);
1905 getnstimeofday(&ts2);
1906 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1911 static unsigned int rnd_next = 1;
1913 static unsigned int mmc_test_rnd_num(unsigned int rnd_cnt)
1917 rnd_next = rnd_next * 1103515245 + 12345;
1918 r = (rnd_next >> 16) & 0x7fff;
1919 return (r * rnd_cnt) >> 15;
1922 static int mmc_test_rnd_perf(struct mmc_test_card *test, int write, int print,
1925 unsigned int dev_addr, cnt, rnd_addr, range1, range2, last_ea = 0, ea;
1927 struct timespec ts1, ts2, ts;
1932 rnd_addr = mmc_test_capacity(test->card) / 4;
1933 range1 = rnd_addr / test->card->pref_erase;
1934 range2 = range1 / ssz;
1936 getnstimeofday(&ts1);
1937 for (cnt = 0; cnt < UINT_MAX; cnt++) {
1938 getnstimeofday(&ts2);
1939 ts = timespec_sub(ts2, ts1);
1940 if (ts.tv_sec >= 10)
1942 ea = mmc_test_rnd_num(range1);
1946 dev_addr = rnd_addr + test->card->pref_erase * ea +
1947 ssz * mmc_test_rnd_num(range2);
1948 ret = mmc_test_area_io(test, sz, dev_addr, write, 0, 0);
1953 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1957 static int mmc_test_random_perf(struct mmc_test_card *test, int write)
1959 struct mmc_test_area *t = &test->area;
1964 for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1966 * When writing, try to get more consistent results by running
1967 * the test twice with exactly the same I/O but outputting the
1968 * results only for the 2nd run.
1972 ret = mmc_test_rnd_perf(test, write, 0, sz);
1977 ret = mmc_test_rnd_perf(test, write, 1, sz);
1984 ret = mmc_test_rnd_perf(test, write, 0, sz);
1989 return mmc_test_rnd_perf(test, write, 1, sz);
1993 * Random read performance by transfer size.
1995 static int mmc_test_random_read_perf(struct mmc_test_card *test)
1997 return mmc_test_random_perf(test, 0);
2001 * Random write performance by transfer size.
2003 static int mmc_test_random_write_perf(struct mmc_test_card *test)
2005 return mmc_test_random_perf(test, 1);
2008 static int mmc_test_seq_perf(struct mmc_test_card *test, int write,
2009 unsigned int tot_sz, int max_scatter)
2011 struct mmc_test_area *t = &test->area;
2012 unsigned int dev_addr, i, cnt, sz, ssz;
2013 struct timespec ts1, ts2;
2019 * In the case of a maximally scattered transfer, the maximum transfer
2020 * size is further limited by using PAGE_SIZE segments.
2023 unsigned long max_tfr;
2025 if (t->max_seg_sz >= PAGE_SIZE)
2026 max_tfr = t->max_segs * PAGE_SIZE;
2028 max_tfr = t->max_segs * t->max_seg_sz;
2034 dev_addr = mmc_test_capacity(test->card) / 4;
2035 if (tot_sz > dev_addr << 9)
2036 tot_sz = dev_addr << 9;
2038 dev_addr &= 0xffff0000; /* Round to 64MiB boundary */
2040 getnstimeofday(&ts1);
2041 for (i = 0; i < cnt; i++) {
2042 ret = mmc_test_area_io(test, sz, dev_addr, write,
2048 getnstimeofday(&ts2);
2050 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
2055 static int mmc_test_large_seq_perf(struct mmc_test_card *test, int write)
2059 for (i = 0; i < 10; i++) {
2060 ret = mmc_test_seq_perf(test, write, 10 * 1024 * 1024, 1);
2064 for (i = 0; i < 5; i++) {
2065 ret = mmc_test_seq_perf(test, write, 100 * 1024 * 1024, 1);
2069 for (i = 0; i < 3; i++) {
2070 ret = mmc_test_seq_perf(test, write, 1000 * 1024 * 1024, 1);
2079 * Large sequential read performance.
2081 static int mmc_test_large_seq_read_perf(struct mmc_test_card *test)
2083 return mmc_test_large_seq_perf(test, 0);
2087 * Large sequential write performance.
2089 static int mmc_test_large_seq_write_perf(struct mmc_test_card *test)
2091 return mmc_test_large_seq_perf(test, 1);
2094 static int mmc_test_rw_multiple(struct mmc_test_card *test,
2095 struct mmc_test_multiple_rw *tdata,
2096 unsigned int reqsize, unsigned int size,
2099 unsigned int dev_addr;
2100 struct mmc_test_area *t = &test->area;
2103 /* Set up test area */
2104 if (size > mmc_test_capacity(test->card) / 2 * 512)
2105 size = mmc_test_capacity(test->card) / 2 * 512;
2106 if (reqsize > t->max_tfr)
2107 reqsize = t->max_tfr;
2108 dev_addr = mmc_test_capacity(test->card) / 4;
2109 if ((dev_addr & 0xffff0000))
2110 dev_addr &= 0xffff0000; /* Round to 64MiB boundary */
2112 dev_addr &= 0xfffff800; /* Round to 1MiB boundary */
2119 /* prepare test area */
2120 if (mmc_can_erase(test->card) &&
2121 tdata->prepare & MMC_TEST_PREP_ERASE) {
2122 ret = mmc_erase(test->card, dev_addr,
2123 size / 512, MMC_SECURE_ERASE_ARG);
2125 ret = mmc_erase(test->card, dev_addr,
2126 size / 512, MMC_ERASE_ARG);
2132 ret = mmc_test_area_io_seq(test, reqsize, dev_addr,
2133 tdata->do_write, 0, 1, size / reqsize,
2134 tdata->do_nonblock_req, min_sg_len);
2140 pr_info("[%s] error\n", __func__);
2144 static int mmc_test_rw_multiple_size(struct mmc_test_card *test,
2145 struct mmc_test_multiple_rw *rw)
2149 void *pre_req = test->card->host->ops->pre_req;
2150 void *post_req = test->card->host->ops->post_req;
2152 if (rw->do_nonblock_req &&
2153 ((!pre_req && post_req) || (pre_req && !post_req))) {
2154 pr_info("error: only one of pre/post is defined\n");
2158 for (i = 0 ; i < rw->len && ret == 0; i++) {
2159 ret = mmc_test_rw_multiple(test, rw, rw->bs[i], rw->size, 0);
2166 static int mmc_test_rw_multiple_sg_len(struct mmc_test_card *test,
2167 struct mmc_test_multiple_rw *rw)
2172 for (i = 0 ; i < rw->len && ret == 0; i++) {
2173 ret = mmc_test_rw_multiple(test, rw, 512*1024, rw->size,
2182 * Multiple blocking write 4k to 4 MB chunks
2184 static int mmc_test_profile_mult_write_blocking_perf(struct mmc_test_card *test)
2186 unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2187 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2188 struct mmc_test_multiple_rw test_data = {
2190 .size = TEST_AREA_MAX_SIZE,
2191 .len = ARRAY_SIZE(bs),
2193 .do_nonblock_req = false,
2194 .prepare = MMC_TEST_PREP_ERASE,
2197 return mmc_test_rw_multiple_size(test, &test_data);
2201 * Multiple non-blocking write 4k to 4 MB chunks
2203 static int mmc_test_profile_mult_write_nonblock_perf(struct mmc_test_card *test)
2205 unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2206 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2207 struct mmc_test_multiple_rw test_data = {
2209 .size = TEST_AREA_MAX_SIZE,
2210 .len = ARRAY_SIZE(bs),
2212 .do_nonblock_req = true,
2213 .prepare = MMC_TEST_PREP_ERASE,
2216 return mmc_test_rw_multiple_size(test, &test_data);
2220 * Multiple blocking read 4k to 4 MB chunks
2222 static int mmc_test_profile_mult_read_blocking_perf(struct mmc_test_card *test)
2224 unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2225 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2226 struct mmc_test_multiple_rw test_data = {
2228 .size = TEST_AREA_MAX_SIZE,
2229 .len = ARRAY_SIZE(bs),
2231 .do_nonblock_req = false,
2232 .prepare = MMC_TEST_PREP_NONE,
2235 return mmc_test_rw_multiple_size(test, &test_data);
2239 * Multiple non-blocking read 4k to 4 MB chunks
2241 static int mmc_test_profile_mult_read_nonblock_perf(struct mmc_test_card *test)
2243 unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2244 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2245 struct mmc_test_multiple_rw test_data = {
2247 .size = TEST_AREA_MAX_SIZE,
2248 .len = ARRAY_SIZE(bs),
2250 .do_nonblock_req = true,
2251 .prepare = MMC_TEST_PREP_NONE,
2254 return mmc_test_rw_multiple_size(test, &test_data);
2258 * Multiple blocking write 1 to 512 sg elements
2260 static int mmc_test_profile_sglen_wr_blocking_perf(struct mmc_test_card *test)
2262 unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2263 1 << 7, 1 << 8, 1 << 9};
2264 struct mmc_test_multiple_rw test_data = {
2266 .size = TEST_AREA_MAX_SIZE,
2267 .len = ARRAY_SIZE(sg_len),
2269 .do_nonblock_req = false,
2270 .prepare = MMC_TEST_PREP_ERASE,
2273 return mmc_test_rw_multiple_sg_len(test, &test_data);
2277 * Multiple non-blocking write 1 to 512 sg elements
2279 static int mmc_test_profile_sglen_wr_nonblock_perf(struct mmc_test_card *test)
2281 unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2282 1 << 7, 1 << 8, 1 << 9};
2283 struct mmc_test_multiple_rw test_data = {
2285 .size = TEST_AREA_MAX_SIZE,
2286 .len = ARRAY_SIZE(sg_len),
2288 .do_nonblock_req = true,
2289 .prepare = MMC_TEST_PREP_ERASE,
2292 return mmc_test_rw_multiple_sg_len(test, &test_data);
2296 * Multiple blocking read 1 to 512 sg elements
2298 static int mmc_test_profile_sglen_r_blocking_perf(struct mmc_test_card *test)
2300 unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2301 1 << 7, 1 << 8, 1 << 9};
2302 struct mmc_test_multiple_rw test_data = {
2304 .size = TEST_AREA_MAX_SIZE,
2305 .len = ARRAY_SIZE(sg_len),
2307 .do_nonblock_req = false,
2308 .prepare = MMC_TEST_PREP_NONE,
2311 return mmc_test_rw_multiple_sg_len(test, &test_data);
2315 * Multiple non-blocking read 1 to 512 sg elements
2317 static int mmc_test_profile_sglen_r_nonblock_perf(struct mmc_test_card *test)
2319 unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2320 1 << 7, 1 << 8, 1 << 9};
2321 struct mmc_test_multiple_rw test_data = {
2323 .size = TEST_AREA_MAX_SIZE,
2324 .len = ARRAY_SIZE(sg_len),
2326 .do_nonblock_req = true,
2327 .prepare = MMC_TEST_PREP_NONE,
2330 return mmc_test_rw_multiple_sg_len(test, &test_data);
2334 * eMMC hardware reset.
2336 static int mmc_test_hw_reset(struct mmc_test_card *test)
2338 struct mmc_card *card = test->card;
2339 struct mmc_host *host = card->host;
2342 err = mmc_hw_reset_check(host);
2349 if (err != -EOPNOTSUPP)
2352 if (!mmc_can_reset(card))
2353 return RESULT_UNSUP_CARD;
2355 return RESULT_UNSUP_HOST;
2358 static const struct mmc_test_case mmc_test_cases[] = {
2360 .name = "Basic write (no data verification)",
2361 .run = mmc_test_basic_write,
2365 .name = "Basic read (no data verification)",
2366 .run = mmc_test_basic_read,
2370 .name = "Basic write (with data verification)",
2371 .prepare = mmc_test_prepare_write,
2372 .run = mmc_test_verify_write,
2373 .cleanup = mmc_test_cleanup,
2377 .name = "Basic read (with data verification)",
2378 .prepare = mmc_test_prepare_read,
2379 .run = mmc_test_verify_read,
2380 .cleanup = mmc_test_cleanup,
2384 .name = "Multi-block write",
2385 .prepare = mmc_test_prepare_write,
2386 .run = mmc_test_multi_write,
2387 .cleanup = mmc_test_cleanup,
2391 .name = "Multi-block read",
2392 .prepare = mmc_test_prepare_read,
2393 .run = mmc_test_multi_read,
2394 .cleanup = mmc_test_cleanup,
2398 .name = "Power of two block writes",
2399 .prepare = mmc_test_prepare_write,
2400 .run = mmc_test_pow2_write,
2401 .cleanup = mmc_test_cleanup,
2405 .name = "Power of two block reads",
2406 .prepare = mmc_test_prepare_read,
2407 .run = mmc_test_pow2_read,
2408 .cleanup = mmc_test_cleanup,
2412 .name = "Weird sized block writes",
2413 .prepare = mmc_test_prepare_write,
2414 .run = mmc_test_weird_write,
2415 .cleanup = mmc_test_cleanup,
2419 .name = "Weird sized block reads",
2420 .prepare = mmc_test_prepare_read,
2421 .run = mmc_test_weird_read,
2422 .cleanup = mmc_test_cleanup,
2426 .name = "Badly aligned write",
2427 .prepare = mmc_test_prepare_write,
2428 .run = mmc_test_align_write,
2429 .cleanup = mmc_test_cleanup,
2433 .name = "Badly aligned read",
2434 .prepare = mmc_test_prepare_read,
2435 .run = mmc_test_align_read,
2436 .cleanup = mmc_test_cleanup,
2440 .name = "Badly aligned multi-block write",
2441 .prepare = mmc_test_prepare_write,
2442 .run = mmc_test_align_multi_write,
2443 .cleanup = mmc_test_cleanup,
2447 .name = "Badly aligned multi-block read",
2448 .prepare = mmc_test_prepare_read,
2449 .run = mmc_test_align_multi_read,
2450 .cleanup = mmc_test_cleanup,
2454 .name = "Correct xfer_size at write (start failure)",
2455 .run = mmc_test_xfersize_write,
2459 .name = "Correct xfer_size at read (start failure)",
2460 .run = mmc_test_xfersize_read,
2464 .name = "Correct xfer_size at write (midway failure)",
2465 .run = mmc_test_multi_xfersize_write,
2469 .name = "Correct xfer_size at read (midway failure)",
2470 .run = mmc_test_multi_xfersize_read,
2473 #ifdef CONFIG_HIGHMEM
2476 .name = "Highmem write",
2477 .prepare = mmc_test_prepare_write,
2478 .run = mmc_test_write_high,
2479 .cleanup = mmc_test_cleanup,
2483 .name = "Highmem read",
2484 .prepare = mmc_test_prepare_read,
2485 .run = mmc_test_read_high,
2486 .cleanup = mmc_test_cleanup,
2490 .name = "Multi-block highmem write",
2491 .prepare = mmc_test_prepare_write,
2492 .run = mmc_test_multi_write_high,
2493 .cleanup = mmc_test_cleanup,
2497 .name = "Multi-block highmem read",
2498 .prepare = mmc_test_prepare_read,
2499 .run = mmc_test_multi_read_high,
2500 .cleanup = mmc_test_cleanup,
2506 .name = "Highmem write",
2507 .run = mmc_test_no_highmem,
2511 .name = "Highmem read",
2512 .run = mmc_test_no_highmem,
2516 .name = "Multi-block highmem write",
2517 .run = mmc_test_no_highmem,
2521 .name = "Multi-block highmem read",
2522 .run = mmc_test_no_highmem,
2525 #endif /* CONFIG_HIGHMEM */
2528 .name = "Best-case read performance",
2529 .prepare = mmc_test_area_prepare_fill,
2530 .run = mmc_test_best_read_performance,
2531 .cleanup = mmc_test_area_cleanup,
2535 .name = "Best-case write performance",
2536 .prepare = mmc_test_area_prepare_erase,
2537 .run = mmc_test_best_write_performance,
2538 .cleanup = mmc_test_area_cleanup,
2542 .name = "Best-case read performance into scattered pages",
2543 .prepare = mmc_test_area_prepare_fill,
2544 .run = mmc_test_best_read_perf_max_scatter,
2545 .cleanup = mmc_test_area_cleanup,
2549 .name = "Best-case write performance from scattered pages",
2550 .prepare = mmc_test_area_prepare_erase,
2551 .run = mmc_test_best_write_perf_max_scatter,
2552 .cleanup = mmc_test_area_cleanup,
2556 .name = "Single read performance by transfer size",
2557 .prepare = mmc_test_area_prepare_fill,
2558 .run = mmc_test_profile_read_perf,
2559 .cleanup = mmc_test_area_cleanup,
2563 .name = "Single write performance by transfer size",
2564 .prepare = mmc_test_area_prepare,
2565 .run = mmc_test_profile_write_perf,
2566 .cleanup = mmc_test_area_cleanup,
2570 .name = "Single trim performance by transfer size",
2571 .prepare = mmc_test_area_prepare_fill,
2572 .run = mmc_test_profile_trim_perf,
2573 .cleanup = mmc_test_area_cleanup,
2577 .name = "Consecutive read performance by transfer size",
2578 .prepare = mmc_test_area_prepare_fill,
2579 .run = mmc_test_profile_seq_read_perf,
2580 .cleanup = mmc_test_area_cleanup,
2584 .name = "Consecutive write performance by transfer size",
2585 .prepare = mmc_test_area_prepare,
2586 .run = mmc_test_profile_seq_write_perf,
2587 .cleanup = mmc_test_area_cleanup,
2591 .name = "Consecutive trim performance by transfer size",
2592 .prepare = mmc_test_area_prepare,
2593 .run = mmc_test_profile_seq_trim_perf,
2594 .cleanup = mmc_test_area_cleanup,
2598 .name = "Random read performance by transfer size",
2599 .prepare = mmc_test_area_prepare,
2600 .run = mmc_test_random_read_perf,
2601 .cleanup = mmc_test_area_cleanup,
2605 .name = "Random write performance by transfer size",
2606 .prepare = mmc_test_area_prepare,
2607 .run = mmc_test_random_write_perf,
2608 .cleanup = mmc_test_area_cleanup,
2612 .name = "Large sequential read into scattered pages",
2613 .prepare = mmc_test_area_prepare,
2614 .run = mmc_test_large_seq_read_perf,
2615 .cleanup = mmc_test_area_cleanup,
2619 .name = "Large sequential write from scattered pages",
2620 .prepare = mmc_test_area_prepare,
2621 .run = mmc_test_large_seq_write_perf,
2622 .cleanup = mmc_test_area_cleanup,
2626 .name = "Write performance with blocking req 4k to 4MB",
2627 .prepare = mmc_test_area_prepare,
2628 .run = mmc_test_profile_mult_write_blocking_perf,
2629 .cleanup = mmc_test_area_cleanup,
2633 .name = "Write performance with non-blocking req 4k to 4MB",
2634 .prepare = mmc_test_area_prepare,
2635 .run = mmc_test_profile_mult_write_nonblock_perf,
2636 .cleanup = mmc_test_area_cleanup,
2640 .name = "Read performance with blocking req 4k to 4MB",
2641 .prepare = mmc_test_area_prepare,
2642 .run = mmc_test_profile_mult_read_blocking_perf,
2643 .cleanup = mmc_test_area_cleanup,
2647 .name = "Read performance with non-blocking req 4k to 4MB",
2648 .prepare = mmc_test_area_prepare,
2649 .run = mmc_test_profile_mult_read_nonblock_perf,
2650 .cleanup = mmc_test_area_cleanup,
2654 .name = "Write performance blocking req 1 to 512 sg elems",
2655 .prepare = mmc_test_area_prepare,
2656 .run = mmc_test_profile_sglen_wr_blocking_perf,
2657 .cleanup = mmc_test_area_cleanup,
2661 .name = "Write performance non-blocking req 1 to 512 sg elems",
2662 .prepare = mmc_test_area_prepare,
2663 .run = mmc_test_profile_sglen_wr_nonblock_perf,
2664 .cleanup = mmc_test_area_cleanup,
2668 .name = "Read performance blocking req 1 to 512 sg elems",
2669 .prepare = mmc_test_area_prepare,
2670 .run = mmc_test_profile_sglen_r_blocking_perf,
2671 .cleanup = mmc_test_area_cleanup,
2675 .name = "Read performance non-blocking req 1 to 512 sg elems",
2676 .prepare = mmc_test_area_prepare,
2677 .run = mmc_test_profile_sglen_r_nonblock_perf,
2678 .cleanup = mmc_test_area_cleanup,
2682 .name = "eMMC hardware reset",
2683 .run = mmc_test_hw_reset,
2687 static DEFINE_MUTEX(mmc_test_lock);
2689 static LIST_HEAD(mmc_test_result);
2691 static void mmc_test_run(struct mmc_test_card *test, int testcase)
2695 pr_info("%s: Starting tests of card %s...\n",
2696 mmc_hostname(test->card->host), mmc_card_id(test->card));
2698 mmc_claim_host(test->card->host);
2700 for (i = 0;i < ARRAY_SIZE(mmc_test_cases);i++) {
2701 struct mmc_test_general_result *gr;
2703 if (testcase && ((i + 1) != testcase))
2706 pr_info("%s: Test case %d. %s...\n",
2707 mmc_hostname(test->card->host), i + 1,
2708 mmc_test_cases[i].name);
2710 if (mmc_test_cases[i].prepare) {
2711 ret = mmc_test_cases[i].prepare(test);
2713 pr_info("%s: Result: Prepare "
2714 "stage failed! (%d)\n",
2715 mmc_hostname(test->card->host),
2721 gr = kzalloc(sizeof(struct mmc_test_general_result),
2724 INIT_LIST_HEAD(&gr->tr_lst);
2726 /* Assign data what we know already */
2727 gr->card = test->card;
2730 /* Append container to global one */
2731 list_add_tail(&gr->link, &mmc_test_result);
2734 * Save the pointer to created container in our private
2740 ret = mmc_test_cases[i].run(test);
2743 pr_info("%s: Result: OK\n",
2744 mmc_hostname(test->card->host));
2747 pr_info("%s: Result: FAILED\n",
2748 mmc_hostname(test->card->host));
2750 case RESULT_UNSUP_HOST:
2751 pr_info("%s: Result: UNSUPPORTED "
2753 mmc_hostname(test->card->host));
2755 case RESULT_UNSUP_CARD:
2756 pr_info("%s: Result: UNSUPPORTED "
2758 mmc_hostname(test->card->host));
2761 pr_info("%s: Result: ERROR (%d)\n",
2762 mmc_hostname(test->card->host), ret);
2765 /* Save the result */
2769 if (mmc_test_cases[i].cleanup) {
2770 ret = mmc_test_cases[i].cleanup(test);
2772 pr_info("%s: Warning: Cleanup "
2773 "stage failed! (%d)\n",
2774 mmc_hostname(test->card->host),
2780 mmc_release_host(test->card->host);
2782 pr_info("%s: Tests completed.\n",
2783 mmc_hostname(test->card->host));
2786 static void mmc_test_free_result(struct mmc_card *card)
2788 struct mmc_test_general_result *gr, *grs;
2790 mutex_lock(&mmc_test_lock);
2792 list_for_each_entry_safe(gr, grs, &mmc_test_result, link) {
2793 struct mmc_test_transfer_result *tr, *trs;
2795 if (card && gr->card != card)
2798 list_for_each_entry_safe(tr, trs, &gr->tr_lst, link) {
2799 list_del(&tr->link);
2803 list_del(&gr->link);
2807 mutex_unlock(&mmc_test_lock);
2810 static LIST_HEAD(mmc_test_file_test);
2812 static int mtf_test_show(struct seq_file *sf, void *data)
2814 struct mmc_card *card = (struct mmc_card *)sf->private;
2815 struct mmc_test_general_result *gr;
2817 mutex_lock(&mmc_test_lock);
2819 list_for_each_entry(gr, &mmc_test_result, link) {
2820 struct mmc_test_transfer_result *tr;
2822 if (gr->card != card)
2825 seq_printf(sf, "Test %d: %d\n", gr->testcase + 1, gr->result);
2827 list_for_each_entry(tr, &gr->tr_lst, link) {
2828 seq_printf(sf, "%u %d %lu.%09lu %u %u.%02u\n",
2829 tr->count, tr->sectors,
2830 (unsigned long)tr->ts.tv_sec,
2831 (unsigned long)tr->ts.tv_nsec,
2832 tr->rate, tr->iops / 100, tr->iops % 100);
2836 mutex_unlock(&mmc_test_lock);
2841 static int mtf_test_open(struct inode *inode, struct file *file)
2843 return single_open(file, mtf_test_show, inode->i_private);
2846 static ssize_t mtf_test_write(struct file *file, const char __user *buf,
2847 size_t count, loff_t *pos)
2849 struct seq_file *sf = (struct seq_file *)file->private_data;
2850 struct mmc_card *card = (struct mmc_card *)sf->private;
2851 struct mmc_test_card *test;
2855 ret = kstrtol_from_user(buf, count, 10, &testcase);
2859 test = kzalloc(sizeof(struct mmc_test_card), GFP_KERNEL);
2864 * Remove all test cases associated with given card. Thus we have only
2865 * actual data of the last run.
2867 mmc_test_free_result(card);
2871 test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL);
2872 #ifdef CONFIG_HIGHMEM
2873 test->highmem = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, BUFFER_ORDER);
2876 #ifdef CONFIG_HIGHMEM
2877 if (test->buffer && test->highmem) {
2881 mutex_lock(&mmc_test_lock);
2882 mmc_test_run(test, testcase);
2883 mutex_unlock(&mmc_test_lock);
2886 #ifdef CONFIG_HIGHMEM
2887 __free_pages(test->highmem, BUFFER_ORDER);
2889 kfree(test->buffer);
2895 static const struct file_operations mmc_test_fops_test = {
2896 .open = mtf_test_open,
2898 .write = mtf_test_write,
2899 .llseek = seq_lseek,
2900 .release = single_release,
2903 static int mtf_testlist_show(struct seq_file *sf, void *data)
2907 mutex_lock(&mmc_test_lock);
2909 for (i = 0; i < ARRAY_SIZE(mmc_test_cases); i++)
2910 seq_printf(sf, "%d:\t%s\n", i+1, mmc_test_cases[i].name);
2912 mutex_unlock(&mmc_test_lock);
2917 static int mtf_testlist_open(struct inode *inode, struct file *file)
2919 return single_open(file, mtf_testlist_show, inode->i_private);
2922 static const struct file_operations mmc_test_fops_testlist = {
2923 .open = mtf_testlist_open,
2925 .llseek = seq_lseek,
2926 .release = single_release,
2929 static void mmc_test_free_dbgfs_file(struct mmc_card *card)
2931 struct mmc_test_dbgfs_file *df, *dfs;
2933 mutex_lock(&mmc_test_lock);
2935 list_for_each_entry_safe(df, dfs, &mmc_test_file_test, link) {
2936 if (card && df->card != card)
2938 debugfs_remove(df->file);
2939 list_del(&df->link);
2943 mutex_unlock(&mmc_test_lock);
2946 static int __mmc_test_register_dbgfs_file(struct mmc_card *card,
2947 const char *name, umode_t mode, const struct file_operations *fops)
2949 struct dentry *file = NULL;
2950 struct mmc_test_dbgfs_file *df;
2952 if (card->debugfs_root)
2953 file = debugfs_create_file(name, mode, card->debugfs_root,
2956 if (IS_ERR_OR_NULL(file)) {
2958 "Can't create %s. Perhaps debugfs is disabled.\n",
2963 df = kmalloc(sizeof(struct mmc_test_dbgfs_file), GFP_KERNEL);
2965 debugfs_remove(file);
2967 "Can't allocate memory for internal usage.\n");
2974 list_add(&df->link, &mmc_test_file_test);
2978 static int mmc_test_register_dbgfs_file(struct mmc_card *card)
2982 mutex_lock(&mmc_test_lock);
2984 ret = __mmc_test_register_dbgfs_file(card, "test", S_IWUSR | S_IRUGO,
2985 &mmc_test_fops_test);
2989 ret = __mmc_test_register_dbgfs_file(card, "testlist", S_IRUGO,
2990 &mmc_test_fops_testlist);
2995 mutex_unlock(&mmc_test_lock);
3000 static int mmc_test_probe(struct mmc_card *card)
3004 if (!mmc_card_mmc(card) && !mmc_card_sd(card))
3007 ret = mmc_test_register_dbgfs_file(card);
3011 dev_info(&card->dev, "Card claimed for testing.\n");
3016 static void mmc_test_remove(struct mmc_card *card)
3018 mmc_test_free_result(card);
3019 mmc_test_free_dbgfs_file(card);
3022 static void mmc_test_shutdown(struct mmc_card *card)
3026 static struct mmc_driver mmc_driver = {
3030 .probe = mmc_test_probe,
3031 .remove = mmc_test_remove,
3032 .shutdown = mmc_test_shutdown,
3035 static int __init mmc_test_init(void)
3037 return mmc_register_driver(&mmc_driver);
3040 static void __exit mmc_test_exit(void)
3042 /* Clear stalled data if card is still plugged */
3043 mmc_test_free_result(NULL);
3044 mmc_test_free_dbgfs_file(NULL);
3046 mmc_unregister_driver(&mmc_driver);
3049 module_init(mmc_test_init);
3050 module_exit(mmc_test_exit);
3052 MODULE_LICENSE("GPL");
3053 MODULE_DESCRIPTION("Multimedia Card (MMC) host test driver");
3054 MODULE_AUTHOR("Pierre Ossman");