2 * linux/drivers/mmc/card/mmc_test.c
4 * Copyright 2007-2008 Pierre Ossman
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
12 #include <linux/mmc/core.h>
13 #include <linux/mmc/card.h>
14 #include <linux/mmc/host.h>
15 #include <linux/mmc/mmc.h>
16 #include <linux/slab.h>
18 #include <linux/scatterlist.h>
19 #include <linux/swap.h> /* For nr_free_buffer_pages() */
20 #include <linux/list.h>
22 #include <linux/debugfs.h>
23 #include <linux/uaccess.h>
24 #include <linux/seq_file.h>
25 #include <linux/module.h>
29 #define RESULT_UNSUP_HOST 2
30 #define RESULT_UNSUP_CARD 3
32 #define BUFFER_ORDER 2
33 #define BUFFER_SIZE (PAGE_SIZE << BUFFER_ORDER)
36 * Limit the test area size to the maximum MMC HC erase group size. Note that
37 * the maximum SD allocation unit size is just 4MiB.
39 #define TEST_AREA_MAX_SIZE (128 * 1024 * 1024)
42 * struct mmc_test_pages - pages allocated by 'alloc_pages()'.
43 * @page: first page in the allocation
44 * @order: order of the number of pages allocated
46 struct mmc_test_pages {
52 * struct mmc_test_mem - allocated memory.
53 * @arr: array of allocations
54 * @cnt: number of allocations
57 struct mmc_test_pages *arr;
62 * struct mmc_test_area - information for performance tests.
63 * @max_sz: test area size (in bytes)
64 * @dev_addr: address on card at which to do performance tests
65 * @max_tfr: maximum transfer size allowed by driver (in bytes)
66 * @max_segs: maximum segments allowed by driver in scatterlist @sg
67 * @max_seg_sz: maximum segment size allowed by driver
68 * @blocks: number of (512 byte) blocks currently mapped by @sg
69 * @sg_len: length of currently mapped scatterlist @sg
70 * @mem: allocated memory
73 struct mmc_test_area {
75 unsigned int dev_addr;
77 unsigned int max_segs;
78 unsigned int max_seg_sz;
81 struct mmc_test_mem *mem;
82 struct scatterlist *sg;
86 * struct mmc_test_transfer_result - transfer results for performance tests.
87 * @link: double-linked list
88 * @count: amount of group of sectors to check
89 * @sectors: amount of sectors to check in one group
90 * @ts: time values of transfer
91 * @rate: calculated transfer rate
92 * @iops: I/O operations per second (times 100)
94 struct mmc_test_transfer_result {
95 struct list_head link;
104 * struct mmc_test_general_result - results for tests.
105 * @link: double-linked list
106 * @card: card under test
107 * @testcase: number of test case
108 * @result: result of test run
109 * @tr_lst: transfer measurements if any as mmc_test_transfer_result
111 struct mmc_test_general_result {
112 struct list_head link;
113 struct mmc_card *card;
116 struct list_head tr_lst;
120 * struct mmc_test_dbgfs_file - debugfs related file.
121 * @link: double-linked list
122 * @card: card under test
123 * @file: file created under debugfs
125 struct mmc_test_dbgfs_file {
126 struct list_head link;
127 struct mmc_card *card;
132 * struct mmc_test_card - test information.
133 * @card: card under test
134 * @scratch: transfer buffer
135 * @buffer: transfer buffer
136 * @highmem: buffer for highmem tests
137 * @area: information for performance tests
138 * @gr: pointer to results of current testcase
140 struct mmc_test_card {
141 struct mmc_card *card;
143 u8 scratch[BUFFER_SIZE];
145 #ifdef CONFIG_HIGHMEM
146 struct page *highmem;
148 struct mmc_test_area area;
149 struct mmc_test_general_result *gr;
152 enum mmc_test_prep_media {
153 MMC_TEST_PREP_NONE = 0,
154 MMC_TEST_PREP_WRITE_FULL = 1 << 0,
155 MMC_TEST_PREP_ERASE = 1 << 1,
158 struct mmc_test_multiple_rw {
159 unsigned int *sg_len;
164 bool do_nonblock_req;
165 enum mmc_test_prep_media prepare;
168 struct mmc_test_async_req {
169 struct mmc_async_req areq;
170 struct mmc_test_card *test;
173 struct mmc_test_parameter {
176 long (*exec)(struct mmc_test_card *);
180 static long mmc_test_set_testcase(struct mmc_test_card *test);
181 static long mmc_test_set_clock(struct mmc_test_card *test);
182 static long mmc_test_set_bus_width(struct mmc_test_card *test);
183 static long mmc_test_set_timing(struct mmc_test_card *test);
186 static struct mmc_test_parameter mmc_test_parameter[] = {
188 .name = "Testcase Number",
190 .exec = mmc_test_set_testcase,
194 .name = "Clock Rate",
196 .exec = mmc_test_set_clock,
202 .exec = mmc_test_set_bus_width,
208 .exec = mmc_test_set_timing,
213 static long mmc_test_set_testcase(struct mmc_test_card *test)
215 return mmc_test_parameter[0].value;
218 static long mmc_test_set_clock(struct mmc_test_card *test)
220 long clock = mmc_test_parameter[1].value;
222 return test->card->host->ios.clock;
223 WARN_ON(clock < test->card->host->f_min);
224 if (clock > test->card->host->f_max)
225 clock = test->card->host->f_max;
227 test->card->host->ios.clock = clock;
229 return test->card->host->ios.clock;
232 static long mmc_test_set_bus_width(struct mmc_test_card *test)
234 long bus_width = mmc_test_parameter[2].value;
236 return test->card->host->ios.bus_width;
238 test->card->host->ios.bus_width = bus_width;
240 return test->card->host->ios.bus_width = bus_width;
243 static long mmc_test_set_timing(struct mmc_test_card *test)
245 long timing = mmc_test_parameter[3].value;
247 return test->card->host->ios.timing;
248 test->card->host->ios.timing = timing;
250 return test->card->host->ios.timing;
253 static void mmc_test_set_parameters(struct mmc_test_card *test)
256 for (i = 0; i < ARRAY_SIZE(mmc_test_parameter); i++) {
257 printk(KERN_INFO "Parameter[%s] set to [%ld]\n",
258 mmc_test_parameter[i].name,
259 mmc_test_parameter[i].exec(test));
263 /*******************************************************************/
264 /* General helper functions */
265 /*******************************************************************/
268 * Configure correct block size in card
270 static int mmc_test_set_blksize(struct mmc_test_card *test, unsigned size)
272 return mmc_set_blocklen(test->card, size);
276 * Fill in the mmc_request structure given a set of transfer parameters.
278 static void mmc_test_prepare_mrq(struct mmc_test_card *test,
279 struct mmc_request *mrq, struct scatterlist *sg, unsigned sg_len,
280 unsigned dev_addr, unsigned blocks, unsigned blksz, int write)
282 BUG_ON(!mrq || !mrq->cmd || !mrq->data || !mrq->stop);
285 mrq->cmd->opcode = write ?
286 MMC_WRITE_MULTIPLE_BLOCK : MMC_READ_MULTIPLE_BLOCK;
288 mrq->cmd->opcode = write ?
289 MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
292 mrq->cmd->arg = dev_addr;
293 if (!mmc_card_blockaddr(test->card))
296 mrq->cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC;
301 mrq->stop->opcode = MMC_STOP_TRANSMISSION;
303 mrq->stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
306 mrq->data->blksz = blksz;
307 mrq->data->blocks = blocks;
308 mrq->data->flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
310 mrq->data->sg_len = sg_len;
312 mmc_set_data_timeout(mrq->data, test->card);
315 static int mmc_test_busy(struct mmc_command *cmd)
317 return !(cmd->resp[0] & R1_READY_FOR_DATA) ||
318 (R1_CURRENT_STATE(cmd->resp[0]) == R1_STATE_PRG);
322 * Wait for the card to finish the busy state
324 static int mmc_test_wait_busy(struct mmc_test_card *test)
327 struct mmc_command cmd = {0};
331 memset(&cmd, 0, sizeof(struct mmc_command));
333 cmd.opcode = MMC_SEND_STATUS;
334 cmd.arg = test->card->rca << 16;
335 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
337 ret = mmc_wait_for_cmd(test->card->host, &cmd, 0);
341 if (!busy && mmc_test_busy(&cmd)) {
343 if (test->card->host->caps & MMC_CAP_WAIT_WHILE_BUSY)
344 pr_info("%s: Warning: Host did not "
345 "wait for busy state to end.\n",
346 mmc_hostname(test->card->host));
348 } while (mmc_test_busy(&cmd));
354 * Transfer a single sector of kernel addressable data
356 static int mmc_test_buffer_transfer(struct mmc_test_card *test,
357 u8 *buffer, unsigned addr, unsigned blksz, int write)
361 struct mmc_request mrq = {0};
362 struct mmc_command cmd = {0};
363 struct mmc_command stop = {0};
364 struct mmc_data data = {0};
366 struct scatterlist sg;
372 sg_init_one(&sg, buffer, blksz);
374 mmc_test_prepare_mrq(test, &mrq, &sg, 1, addr, 1, blksz, write);
376 mmc_wait_for_req(test->card->host, &mrq);
383 ret = mmc_test_wait_busy(test);
390 static void mmc_test_free_mem(struct mmc_test_mem *mem)
395 __free_pages(mem->arr[mem->cnt].page,
396 mem->arr[mem->cnt].order);
402 * Allocate a lot of memory, preferably max_sz but at least min_sz. In case
403 * there isn't much memory do not exceed 1/16th total lowmem pages. Also do
404 * not exceed a maximum number of segments and try not to make segments much
405 * bigger than maximum segment size.
407 static struct mmc_test_mem *mmc_test_alloc_mem(unsigned long min_sz,
408 unsigned long max_sz,
409 unsigned int max_segs,
410 unsigned int max_seg_sz)
412 unsigned long max_page_cnt = DIV_ROUND_UP(max_sz, PAGE_SIZE);
413 unsigned long min_page_cnt = DIV_ROUND_UP(min_sz, PAGE_SIZE);
414 unsigned long max_seg_page_cnt = DIV_ROUND_UP(max_seg_sz, PAGE_SIZE);
415 unsigned long page_cnt = 0;
416 unsigned long limit = nr_free_buffer_pages() >> 4;
417 struct mmc_test_mem *mem;
419 if (max_page_cnt > limit)
420 max_page_cnt = limit;
421 if (min_page_cnt > max_page_cnt)
422 min_page_cnt = max_page_cnt;
424 if (max_seg_page_cnt > max_page_cnt)
425 max_seg_page_cnt = max_page_cnt;
427 if (max_segs > max_page_cnt)
428 max_segs = max_page_cnt;
430 mem = kzalloc(sizeof(struct mmc_test_mem), GFP_KERNEL);
434 mem->arr = kzalloc(sizeof(struct mmc_test_pages) * max_segs,
439 while (max_page_cnt) {
442 gfp_t flags = GFP_KERNEL | GFP_DMA | __GFP_NOWARN |
445 order = get_order(max_seg_page_cnt << PAGE_SHIFT);
447 page = alloc_pages(flags, order);
453 if (page_cnt < min_page_cnt)
457 mem->arr[mem->cnt].page = page;
458 mem->arr[mem->cnt].order = order;
460 if (max_page_cnt <= (1UL << order))
462 max_page_cnt -= 1UL << order;
463 page_cnt += 1UL << order;
464 if (mem->cnt >= max_segs) {
465 if (page_cnt < min_page_cnt)
474 mmc_test_free_mem(mem);
479 * Map memory into a scatterlist. Optionally allow the same memory to be
480 * mapped more than once.
482 static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned long size,
483 struct scatterlist *sglist, int repeat,
484 unsigned int max_segs, unsigned int max_seg_sz,
485 unsigned int *sg_len, int min_sg_len)
487 struct scatterlist *sg = NULL;
489 unsigned long sz = size;
491 sg_init_table(sglist, max_segs);
492 if (min_sg_len > max_segs)
493 min_sg_len = max_segs;
497 for (i = 0; i < mem->cnt; i++) {
498 unsigned long len = PAGE_SIZE << mem->arr[i].order;
500 if (min_sg_len && (size / min_sg_len < len))
501 len = ALIGN(size / min_sg_len, 512);
504 if (len > max_seg_sz)
512 sg_set_page(sg, mem->arr[i].page, len, 0);
518 } while (sz && repeat);
530 * Map memory into a scatterlist so that no pages are contiguous. Allow the
531 * same memory to be mapped more than once.
533 static int mmc_test_map_sg_max_scatter(struct mmc_test_mem *mem,
535 struct scatterlist *sglist,
536 unsigned int max_segs,
537 unsigned int max_seg_sz,
538 unsigned int *sg_len)
540 struct scatterlist *sg = NULL;
541 unsigned int i = mem->cnt, cnt;
543 void *base, *addr, *last_addr = NULL;
545 sg_init_table(sglist, max_segs);
549 base = page_address(mem->arr[--i].page);
550 cnt = 1 << mem->arr[i].order;
552 addr = base + PAGE_SIZE * --cnt;
553 if (last_addr && last_addr + PAGE_SIZE == addr)
557 if (len > max_seg_sz)
567 sg_set_page(sg, virt_to_page(addr), len, 0);
582 * Calculate transfer rate in bytes per second.
584 static unsigned int mmc_test_rate(uint64_t bytes, struct timespec *ts)
594 while (ns > UINT_MAX) {
602 do_div(bytes, (uint32_t)ns);
608 * Save transfer results for future usage
610 static void mmc_test_save_transfer_result(struct mmc_test_card *test,
611 unsigned int count, unsigned int sectors, struct timespec ts,
612 unsigned int rate, unsigned int iops)
614 struct mmc_test_transfer_result *tr;
619 tr = kmalloc(sizeof(struct mmc_test_transfer_result), GFP_KERNEL);
624 tr->sectors = sectors;
629 list_add_tail(&tr->link, &test->gr->tr_lst);
633 * Print the transfer rate.
635 static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes,
636 struct timespec *ts1, struct timespec *ts2)
638 unsigned int rate, iops, sectors = bytes >> 9;
641 ts = timespec_sub(*ts2, *ts1);
643 rate = mmc_test_rate(bytes, &ts);
644 iops = mmc_test_rate(100, &ts); /* I/O ops per sec x 100 */
646 pr_info("%s: Transfer of %u sectors (%u%s KiB) took %lu.%09lu "
647 "seconds (%u kB/s, %u KiB/s, %u.%02u IOPS)\n",
648 mmc_hostname(test->card->host), sectors, sectors >> 1,
649 (sectors & 1 ? ".5" : ""), (unsigned long)ts.tv_sec,
650 (unsigned long)ts.tv_nsec, rate / 1000, rate / 1024,
651 iops / 100, iops % 100);
653 mmc_test_save_transfer_result(test, 1, sectors, ts, rate, iops);
657 * Print the average transfer rate.
659 static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes,
660 unsigned int count, struct timespec *ts1,
661 struct timespec *ts2)
663 unsigned int rate, iops, sectors = bytes >> 9;
664 uint64_t tot = bytes * count;
667 ts = timespec_sub(*ts2, *ts1);
669 rate = mmc_test_rate(tot, &ts);
670 iops = mmc_test_rate(count * 100, &ts); /* I/O ops per sec x 100 */
672 pr_info("%s: Transfer of %u x %u sectors (%u x %u%s KiB) took "
673 "%lu.%09lu seconds (%u kB/s, %u KiB/s, "
674 "%u.%02u IOPS, sg_len %d)\n",
675 mmc_hostname(test->card->host), count, sectors, count,
676 sectors >> 1, (sectors & 1 ? ".5" : ""),
677 (unsigned long)ts.tv_sec, (unsigned long)ts.tv_nsec,
678 rate / 1000, rate / 1024, iops / 100, iops % 100,
681 mmc_test_save_transfer_result(test, count, sectors, ts, rate, iops);
685 * Return the card size in sectors.
687 static unsigned int mmc_test_capacity(struct mmc_card *card)
689 if (!mmc_card_sd(card) && mmc_card_blockaddr(card))
690 return card->ext_csd.sectors;
692 return card->csd.capacity << (card->csd.read_blkbits - 9);
695 /*******************************************************************/
696 /* Test preparation and cleanup */
697 /*******************************************************************/
700 * Fill the first couple of sectors of the card with known data
701 * so that bad reads/writes can be detected
703 static int __mmc_test_prepare(struct mmc_test_card *test, int write)
707 ret = mmc_test_set_blksize(test, 512);
712 memset(test->buffer, 0xDF, 512);
714 for (i = 0;i < 512;i++)
718 for (i = 0;i < BUFFER_SIZE / 512;i++) {
719 ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
727 static int mmc_test_prepare_write(struct mmc_test_card *test)
729 return __mmc_test_prepare(test, 1);
732 static int mmc_test_prepare_read(struct mmc_test_card *test)
734 return __mmc_test_prepare(test, 0);
737 static int mmc_test_cleanup(struct mmc_test_card *test)
741 ret = mmc_test_set_blksize(test, 512);
745 memset(test->buffer, 0, 512);
747 for (i = 0;i < BUFFER_SIZE / 512;i++) {
748 ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
756 /*******************************************************************/
757 /* Test execution helpers */
758 /*******************************************************************/
761 * Modifies the mmc_request to perform the "short transfer" tests
763 static void mmc_test_prepare_broken_mrq(struct mmc_test_card *test,
764 struct mmc_request *mrq, int write)
766 BUG_ON(!mrq || !mrq->cmd || !mrq->data);
768 if (mrq->data->blocks > 1) {
769 mrq->cmd->opcode = write ?
770 MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
773 mrq->cmd->opcode = MMC_SEND_STATUS;
774 mrq->cmd->arg = test->card->rca << 16;
779 * Checks that a normal transfer didn't have any errors
781 static int mmc_test_check_result(struct mmc_test_card *test,
782 struct mmc_request *mrq)
786 BUG_ON(!mrq || !mrq->cmd || !mrq->data);
790 if (!ret && mrq->cmd->error)
791 ret = mrq->cmd->error;
792 if (!ret && mrq->data->error)
793 ret = mrq->data->error;
794 if (!ret && mrq->stop && mrq->stop->error)
795 ret = mrq->stop->error;
796 if (!ret && mrq->data->bytes_xfered !=
797 mrq->data->blocks * mrq->data->blksz)
801 ret = RESULT_UNSUP_HOST;
806 static int mmc_test_check_result_async(struct mmc_card *card,
807 struct mmc_async_req *areq)
809 struct mmc_test_async_req *test_async =
810 container_of(areq, struct mmc_test_async_req, areq);
812 mmc_test_wait_busy(test_async->test);
814 return mmc_test_check_result(test_async->test, areq->mrq);
818 * Checks that a "short transfer" behaved as expected
820 static int mmc_test_check_broken_result(struct mmc_test_card *test,
821 struct mmc_request *mrq)
825 BUG_ON(!mrq || !mrq->cmd || !mrq->data);
829 if (!ret && mrq->cmd->error)
830 ret = mrq->cmd->error;
831 if (!ret && mrq->data->error == 0)
833 if (!ret && mrq->data->error != -ETIMEDOUT)
834 ret = mrq->data->error;
835 if (!ret && mrq->stop && mrq->stop->error)
836 ret = mrq->stop->error;
837 if (mrq->data->blocks > 1) {
838 if (!ret && mrq->data->bytes_xfered > mrq->data->blksz)
841 if (!ret && mrq->data->bytes_xfered > 0)
846 ret = RESULT_UNSUP_HOST;
852 * Tests nonblock transfer with certain parameters
854 static void mmc_test_nonblock_reset(struct mmc_request *mrq,
855 struct mmc_command *cmd,
856 struct mmc_command *stop,
857 struct mmc_data *data)
859 memset(mrq, 0, sizeof(struct mmc_request));
860 memset(cmd, 0, sizeof(struct mmc_command));
861 memset(data, 0, sizeof(struct mmc_data));
862 memset(stop, 0, sizeof(struct mmc_command));
868 static int mmc_test_nonblock_transfer(struct mmc_test_card *test,
869 struct scatterlist *sg, unsigned sg_len,
870 unsigned dev_addr, unsigned blocks,
871 unsigned blksz, int write, int count)
873 struct mmc_request mrq1;
874 struct mmc_command cmd1;
875 struct mmc_command stop1;
876 struct mmc_data data1;
878 struct mmc_request mrq2;
879 struct mmc_command cmd2;
880 struct mmc_command stop2;
881 struct mmc_data data2;
883 struct mmc_test_async_req test_areq[2];
884 struct mmc_async_req *done_areq;
885 struct mmc_async_req *cur_areq = &test_areq[0].areq;
886 struct mmc_async_req *other_areq = &test_areq[1].areq;
890 test_areq[0].test = test;
891 test_areq[1].test = test;
893 mmc_test_nonblock_reset(&mrq1, &cmd1, &stop1, &data1);
894 mmc_test_nonblock_reset(&mrq2, &cmd2, &stop2, &data2);
896 cur_areq->mrq = &mrq1;
897 cur_areq->err_check = mmc_test_check_result_async;
898 other_areq->mrq = &mrq2;
899 other_areq->err_check = mmc_test_check_result_async;
901 for (i = 0; i < count; i++) {
902 mmc_test_prepare_mrq(test, cur_areq->mrq, sg, sg_len, dev_addr,
903 blocks, blksz, write);
904 done_areq = mmc_start_req(test->card->host, cur_areq, &ret);
906 if (ret || (!done_areq && i > 0))
910 if (done_areq->mrq == &mrq2)
911 mmc_test_nonblock_reset(&mrq2, &cmd2,
914 mmc_test_nonblock_reset(&mrq1, &cmd1,
917 done_areq = cur_areq;
918 cur_areq = other_areq;
919 other_areq = done_areq;
923 done_areq = mmc_start_req(test->card->host, NULL, &ret);
931 * Tests a basic transfer with certain parameters
933 static int mmc_test_simple_transfer(struct mmc_test_card *test,
934 struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
935 unsigned blocks, unsigned blksz, int write)
937 struct mmc_request mrq = {0};
938 struct mmc_command cmd = {0};
939 struct mmc_command stop = {0};
940 struct mmc_data data = {0};
946 mmc_test_prepare_mrq(test, &mrq, sg, sg_len, dev_addr,
947 blocks, blksz, write);
949 mmc_wait_for_req(test->card->host, &mrq);
951 mmc_test_wait_busy(test);
953 return mmc_test_check_result(test, &mrq);
957 * Tests a transfer where the card will fail completely or partly
959 static int mmc_test_broken_transfer(struct mmc_test_card *test,
960 unsigned blocks, unsigned blksz, int write)
962 struct mmc_request mrq = {0};
963 struct mmc_command cmd = {0};
964 struct mmc_command stop = {0};
965 struct mmc_data data = {0};
967 struct scatterlist sg;
973 sg_init_one(&sg, test->buffer, blocks * blksz);
975 mmc_test_prepare_mrq(test, &mrq, &sg, 1, 0, blocks, blksz, write);
976 mmc_test_prepare_broken_mrq(test, &mrq, write);
978 mmc_wait_for_req(test->card->host, &mrq);
980 mmc_test_wait_busy(test);
982 return mmc_test_check_broken_result(test, &mrq);
986 * Does a complete transfer test where data is also validated
988 * Note: mmc_test_prepare() must have been done before this call
990 static int mmc_test_transfer(struct mmc_test_card *test,
991 struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
992 unsigned blocks, unsigned blksz, int write)
998 for (i = 0;i < blocks * blksz;i++)
999 test->scratch[i] = i;
1001 memset(test->scratch, 0, BUFFER_SIZE);
1003 local_irq_save(flags);
1004 sg_copy_from_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
1005 local_irq_restore(flags);
1007 ret = mmc_test_set_blksize(test, blksz);
1011 ret = mmc_test_simple_transfer(test, sg, sg_len, dev_addr,
1012 blocks, blksz, write);
1019 ret = mmc_test_set_blksize(test, 512);
1023 sectors = (blocks * blksz + 511) / 512;
1024 if ((sectors * 512) == (blocks * blksz))
1027 if ((sectors * 512) > BUFFER_SIZE)
1030 memset(test->buffer, 0, sectors * 512);
1032 for (i = 0;i < sectors;i++) {
1033 ret = mmc_test_buffer_transfer(test,
1034 test->buffer + i * 512,
1035 dev_addr + i, 512, 0);
1040 for (i = 0;i < blocks * blksz;i++) {
1041 if (test->buffer[i] != (u8)i)
1045 for (;i < sectors * 512;i++) {
1046 if (test->buffer[i] != 0xDF)
1050 local_irq_save(flags);
1051 sg_copy_to_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
1052 local_irq_restore(flags);
1053 for (i = 0;i < blocks * blksz;i++) {
1054 if (test->scratch[i] != (u8)i)
1062 /*******************************************************************/
1064 /*******************************************************************/
1066 struct mmc_test_case {
1069 int (*prepare)(struct mmc_test_card *);
1070 int (*run)(struct mmc_test_card *);
1071 int (*cleanup)(struct mmc_test_card *);
1074 static int mmc_test_basic_write(struct mmc_test_card *test)
1077 struct scatterlist sg;
1079 ret = mmc_test_set_blksize(test, 512);
1083 sg_init_one(&sg, test->buffer, 512);
1085 ret = mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 1);
1092 static int mmc_test_basic_read(struct mmc_test_card *test)
1095 struct scatterlist sg;
1097 ret = mmc_test_set_blksize(test, 512);
1101 sg_init_one(&sg, test->buffer, 512);
1103 ret = mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 0);
1110 static int mmc_test_verify_write(struct mmc_test_card *test)
1113 struct scatterlist sg;
1115 sg_init_one(&sg, test->buffer, 512);
1117 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
1124 static int mmc_test_verify_read(struct mmc_test_card *test)
1127 struct scatterlist sg;
1129 sg_init_one(&sg, test->buffer, 512);
1131 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
1138 static int mmc_test_multi_write(struct mmc_test_card *test)
1142 struct scatterlist sg;
1144 if (test->card->host->max_blk_count == 1)
1145 return RESULT_UNSUP_HOST;
1147 size = PAGE_SIZE * 2;
1148 size = min(size, test->card->host->max_req_size);
1149 size = min(size, test->card->host->max_seg_size);
1150 size = min(size, test->card->host->max_blk_count * 512);
1153 return RESULT_UNSUP_HOST;
1155 sg_init_one(&sg, test->buffer, size);
1157 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
1164 static int mmc_test_multi_read(struct mmc_test_card *test)
1168 struct scatterlist sg;
1170 if (test->card->host->max_blk_count == 1)
1171 return RESULT_UNSUP_HOST;
1173 size = PAGE_SIZE * 2;
1174 size = min(size, test->card->host->max_req_size);
1175 size = min(size, test->card->host->max_seg_size);
1176 size = min(size, test->card->host->max_blk_count * 512);
1179 return RESULT_UNSUP_HOST;
1181 sg_init_one(&sg, test->buffer, size);
1183 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
1190 static int mmc_test_pow2_write(struct mmc_test_card *test)
1193 struct scatterlist sg;
1195 if (!test->card->csd.write_partial)
1196 return RESULT_UNSUP_CARD;
1198 for (i = 1; i < 512;i <<= 1) {
1199 sg_init_one(&sg, test->buffer, i);
1200 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
1208 static int mmc_test_pow2_read(struct mmc_test_card *test)
1211 struct scatterlist sg;
1213 if (!test->card->csd.read_partial)
1214 return RESULT_UNSUP_CARD;
1216 for (i = 1; i < 512;i <<= 1) {
1217 sg_init_one(&sg, test->buffer, i);
1218 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
1226 static int mmc_test_weird_write(struct mmc_test_card *test)
1229 struct scatterlist sg;
1231 if (!test->card->csd.write_partial)
1232 return RESULT_UNSUP_CARD;
1234 for (i = 3; i < 512;i += 7) {
1235 sg_init_one(&sg, test->buffer, i);
1236 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
1244 static int mmc_test_weird_read(struct mmc_test_card *test)
1247 struct scatterlist sg;
1249 if (!test->card->csd.read_partial)
1250 return RESULT_UNSUP_CARD;
1252 for (i = 3; i < 512;i += 7) {
1253 sg_init_one(&sg, test->buffer, i);
1254 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
1262 static int mmc_test_align_write(struct mmc_test_card *test)
1265 struct scatterlist sg;
1267 for (i = 1;i < 4;i++) {
1268 sg_init_one(&sg, test->buffer + i, 512);
1269 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
1277 static int mmc_test_align_read(struct mmc_test_card *test)
1280 struct scatterlist sg;
1282 for (i = 1;i < 4;i++) {
1283 sg_init_one(&sg, test->buffer + i, 512);
1284 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
1292 static int mmc_test_align_multi_write(struct mmc_test_card *test)
1296 struct scatterlist sg;
1298 if (test->card->host->max_blk_count == 1)
1299 return RESULT_UNSUP_HOST;
1301 size = PAGE_SIZE * 2;
1302 size = min(size, test->card->host->max_req_size);
1303 size = min(size, test->card->host->max_seg_size);
1304 size = min(size, test->card->host->max_blk_count * 512);
1307 return RESULT_UNSUP_HOST;
1309 for (i = 1;i < 4;i++) {
1310 sg_init_one(&sg, test->buffer + i, size);
1311 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
1319 static int mmc_test_align_multi_read(struct mmc_test_card *test)
1323 struct scatterlist sg;
1325 if (test->card->host->max_blk_count == 1)
1326 return RESULT_UNSUP_HOST;
1328 size = PAGE_SIZE * 2;
1329 size = min(size, test->card->host->max_req_size);
1330 size = min(size, test->card->host->max_seg_size);
1331 size = min(size, test->card->host->max_blk_count * 512);
1334 return RESULT_UNSUP_HOST;
1336 for (i = 1;i < 4;i++) {
1337 sg_init_one(&sg, test->buffer + i, size);
1338 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
1346 static int mmc_test_xfersize_write(struct mmc_test_card *test)
1350 ret = mmc_test_set_blksize(test, 512);
1354 ret = mmc_test_broken_transfer(test, 1, 512, 1);
1361 static int mmc_test_xfersize_read(struct mmc_test_card *test)
1365 ret = mmc_test_set_blksize(test, 512);
1369 ret = mmc_test_broken_transfer(test, 1, 512, 0);
1376 static int mmc_test_multi_xfersize_write(struct mmc_test_card *test)
1380 if (test->card->host->max_blk_count == 1)
1381 return RESULT_UNSUP_HOST;
1383 ret = mmc_test_set_blksize(test, 512);
1387 ret = mmc_test_broken_transfer(test, 2, 512, 1);
1394 static int mmc_test_multi_xfersize_read(struct mmc_test_card *test)
1398 if (test->card->host->max_blk_count == 1)
1399 return RESULT_UNSUP_HOST;
1401 ret = mmc_test_set_blksize(test, 512);
1405 ret = mmc_test_broken_transfer(test, 2, 512, 0);
1412 #ifdef CONFIG_HIGHMEM
1414 static int mmc_test_write_high(struct mmc_test_card *test)
1417 struct scatterlist sg;
1419 sg_init_table(&sg, 1);
1420 sg_set_page(&sg, test->highmem, 512, 0);
1422 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
1429 static int mmc_test_read_high(struct mmc_test_card *test)
1432 struct scatterlist sg;
1434 sg_init_table(&sg, 1);
1435 sg_set_page(&sg, test->highmem, 512, 0);
1437 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
1444 static int mmc_test_multi_write_high(struct mmc_test_card *test)
1448 struct scatterlist sg;
1450 if (test->card->host->max_blk_count == 1)
1451 return RESULT_UNSUP_HOST;
1453 size = PAGE_SIZE * 2;
1454 size = min(size, test->card->host->max_req_size);
1455 size = min(size, test->card->host->max_seg_size);
1456 size = min(size, test->card->host->max_blk_count * 512);
1459 return RESULT_UNSUP_HOST;
1461 sg_init_table(&sg, 1);
1462 sg_set_page(&sg, test->highmem, size, 0);
1464 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
1471 static int mmc_test_multi_read_high(struct mmc_test_card *test)
1475 struct scatterlist sg;
1477 if (test->card->host->max_blk_count == 1)
1478 return RESULT_UNSUP_HOST;
1480 size = PAGE_SIZE * 2;
1481 size = min(size, test->card->host->max_req_size);
1482 size = min(size, test->card->host->max_seg_size);
1483 size = min(size, test->card->host->max_blk_count * 512);
1486 return RESULT_UNSUP_HOST;
1488 sg_init_table(&sg, 1);
1489 sg_set_page(&sg, test->highmem, size, 0);
1491 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
1500 static int mmc_test_no_highmem(struct mmc_test_card *test)
1502 pr_info("%s: Highmem not configured - test skipped\n",
1503 mmc_hostname(test->card->host));
1507 #endif /* CONFIG_HIGHMEM */
1510 * Map sz bytes so that it can be transferred.
1512 static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz,
1513 int max_scatter, int min_sg_len)
1515 struct mmc_test_area *t = &test->area;
1518 t->blocks = sz >> 9;
1521 err = mmc_test_map_sg_max_scatter(t->mem, sz, t->sg,
1522 t->max_segs, t->max_seg_sz,
1525 err = mmc_test_map_sg(t->mem, sz, t->sg, 1, t->max_segs,
1526 t->max_seg_sz, &t->sg_len, min_sg_len);
1529 pr_info("%s: Failed to map sg list\n",
1530 mmc_hostname(test->card->host));
1535 * Transfer bytes mapped by mmc_test_area_map().
1537 static int mmc_test_area_transfer(struct mmc_test_card *test,
1538 unsigned int dev_addr, int write)
1540 struct mmc_test_area *t = &test->area;
1542 return mmc_test_simple_transfer(test, t->sg, t->sg_len, dev_addr,
1543 t->blocks, 512, write);
1547 * Map and transfer bytes for multiple transfers.
1549 static int mmc_test_area_io_seq(struct mmc_test_card *test, unsigned long sz,
1550 unsigned int dev_addr, int write,
1551 int max_scatter, int timed, int count,
1552 bool nonblock, int min_sg_len)
1554 struct timespec ts1, ts2;
1557 struct mmc_test_area *t = &test->area;
1560 * In the case of a maximally scattered transfer, the maximum transfer
1561 * size is further limited by using PAGE_SIZE segments.
1564 struct mmc_test_area *t = &test->area;
1565 unsigned long max_tfr;
1567 if (t->max_seg_sz >= PAGE_SIZE)
1568 max_tfr = t->max_segs * PAGE_SIZE;
1570 max_tfr = t->max_segs * t->max_seg_sz;
1575 ret = mmc_test_area_map(test, sz, max_scatter, min_sg_len);
1580 getnstimeofday(&ts1);
1582 ret = mmc_test_nonblock_transfer(test, t->sg, t->sg_len,
1583 dev_addr, t->blocks, 512, write, count);
1585 for (i = 0; i < count && ret == 0; i++) {
1586 ret = mmc_test_area_transfer(test, dev_addr, write);
1587 dev_addr += sz >> 9;
1594 getnstimeofday(&ts2);
1597 mmc_test_print_avg_rate(test, sz, count, &ts1, &ts2);
1602 static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz,
1603 unsigned int dev_addr, int write, int max_scatter,
1606 return mmc_test_area_io_seq(test, sz, dev_addr, write, max_scatter,
1607 timed, 1, false, 0);
1611 * Write the test area entirely.
1613 static int mmc_test_area_fill(struct mmc_test_card *test)
1615 struct mmc_test_area *t = &test->area;
1617 return mmc_test_area_io(test, t->max_tfr, t->dev_addr, 1, 0, 0);
1621 * Erase the test area entirely.
1623 static int mmc_test_area_erase(struct mmc_test_card *test)
1625 struct mmc_test_area *t = &test->area;
1627 if (!mmc_can_erase(test->card))
1630 return mmc_erase(test->card, t->dev_addr, t->max_sz >> 9,
1635 * Cleanup struct mmc_test_area.
1637 static int mmc_test_area_cleanup(struct mmc_test_card *test)
1639 struct mmc_test_area *t = &test->area;
1642 mmc_test_free_mem(t->mem);
1648 * Initialize an area for testing large transfers. The test area is set to the
1649 * middle of the card because cards may have different charateristics at the
1650 * front (for FAT file system optimization). Optionally, the area is erased
1651 * (if the card supports it) which may improve write performance. Optionally,
1652 * the area is filled with data for subsequent read tests.
1654 static int mmc_test_area_init(struct mmc_test_card *test, int erase, int fill)
1656 struct mmc_test_area *t = &test->area;
1657 unsigned long min_sz = 64 * 1024, sz;
1660 ret = mmc_test_set_blksize(test, 512);
1664 /* Make the test area size about 4MiB */
1665 sz = (unsigned long)test->card->pref_erase << 9;
1667 while (t->max_sz < 4 * 1024 * 1024)
1669 while (t->max_sz > TEST_AREA_MAX_SIZE && t->max_sz > sz)
1672 t->max_segs = test->card->host->max_segs;
1673 t->max_seg_sz = test->card->host->max_seg_size;
1674 t->max_seg_sz -= t->max_seg_sz % 512;
1676 t->max_tfr = t->max_sz;
1677 if (t->max_tfr >> 9 > test->card->host->max_blk_count)
1678 t->max_tfr = test->card->host->max_blk_count << 9;
1679 if (t->max_tfr > test->card->host->max_req_size)
1680 t->max_tfr = test->card->host->max_req_size;
1681 if (t->max_tfr / t->max_seg_sz > t->max_segs)
1682 t->max_tfr = t->max_segs * t->max_seg_sz;
1685 * Try to allocate enough memory for a max. sized transfer. Less is OK
1686 * because the same memory can be mapped into the scatterlist more than
1687 * once. Also, take into account the limits imposed on scatterlist
1688 * segments by the host driver.
1690 t->mem = mmc_test_alloc_mem(min_sz, t->max_tfr, t->max_segs,
1695 t->sg = kmalloc(sizeof(struct scatterlist) * t->max_segs, GFP_KERNEL);
1701 t->dev_addr = mmc_test_capacity(test->card) / 2;
1702 t->dev_addr -= t->dev_addr % (t->max_sz >> 9);
1705 ret = mmc_test_area_erase(test);
1711 ret = mmc_test_area_fill(test);
1719 mmc_test_area_cleanup(test);
1724 * Prepare for large transfers. Do not erase the test area.
1726 static int mmc_test_area_prepare(struct mmc_test_card *test)
1728 return mmc_test_area_init(test, 0, 0);
1732 * Prepare for large transfers. Do erase the test area.
1734 static int mmc_test_area_prepare_erase(struct mmc_test_card *test)
1736 return mmc_test_area_init(test, 1, 0);
1740 * Prepare for large transfers. Erase and fill the test area.
1742 static int mmc_test_area_prepare_fill(struct mmc_test_card *test)
1744 return mmc_test_area_init(test, 1, 1);
1748 * Test best-case performance. Best-case performance is expected from
1749 * a single large transfer.
1751 * An additional option (max_scatter) allows the measurement of the same
1752 * transfer but with no contiguous pages in the scatter list. This tests
1753 * the efficiency of DMA to handle scattered pages.
1755 static int mmc_test_best_performance(struct mmc_test_card *test, int write,
1758 struct mmc_test_area *t = &test->area;
1760 return mmc_test_area_io(test, t->max_tfr, t->dev_addr, write,
1765 * Best-case read performance.
1767 static int mmc_test_best_read_performance(struct mmc_test_card *test)
1769 return mmc_test_best_performance(test, 0, 0);
1773 * Best-case write performance.
1775 static int mmc_test_best_write_performance(struct mmc_test_card *test)
1777 return mmc_test_best_performance(test, 1, 0);
1781 * Best-case read performance into scattered pages.
1783 static int mmc_test_best_read_perf_max_scatter(struct mmc_test_card *test)
1785 return mmc_test_best_performance(test, 0, 1);
1789 * Best-case write performance from scattered pages.
1791 static int mmc_test_best_write_perf_max_scatter(struct mmc_test_card *test)
1793 return mmc_test_best_performance(test, 1, 1);
1797 * Single read performance by transfer size.
1799 static int mmc_test_profile_read_perf(struct mmc_test_card *test)
1801 struct mmc_test_area *t = &test->area;
1803 unsigned int dev_addr;
1806 for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1807 dev_addr = t->dev_addr + (sz >> 9);
1808 ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
1813 dev_addr = t->dev_addr;
1814 return mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
1818 * Single write performance by transfer size.
1820 static int mmc_test_profile_write_perf(struct mmc_test_card *test)
1822 struct mmc_test_area *t = &test->area;
1824 unsigned int dev_addr;
1827 ret = mmc_test_area_erase(test);
1830 for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1831 dev_addr = t->dev_addr + (sz >> 9);
1832 ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
1836 ret = mmc_test_area_erase(test);
1840 dev_addr = t->dev_addr;
1841 return mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
1845 * Single trim performance by transfer size.
1847 static int mmc_test_profile_trim_perf(struct mmc_test_card *test)
1849 struct mmc_test_area *t = &test->area;
1851 unsigned int dev_addr;
1852 struct timespec ts1, ts2;
1855 if (!mmc_can_trim(test->card))
1856 return RESULT_UNSUP_CARD;
1858 if (!mmc_can_erase(test->card))
1859 return RESULT_UNSUP_HOST;
1861 for (sz = 512; sz < t->max_sz; sz <<= 1) {
1862 dev_addr = t->dev_addr + (sz >> 9);
1863 getnstimeofday(&ts1);
1864 ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
1867 getnstimeofday(&ts2);
1868 mmc_test_print_rate(test, sz, &ts1, &ts2);
1870 dev_addr = t->dev_addr;
1871 getnstimeofday(&ts1);
1872 ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
1875 getnstimeofday(&ts2);
1876 mmc_test_print_rate(test, sz, &ts1, &ts2);
1880 static int mmc_test_seq_read_perf(struct mmc_test_card *test, unsigned long sz)
1882 struct mmc_test_area *t = &test->area;
1883 unsigned int dev_addr, i, cnt;
1884 struct timespec ts1, ts2;
1887 cnt = t->max_sz / sz;
1888 dev_addr = t->dev_addr;
1889 getnstimeofday(&ts1);
1890 for (i = 0; i < cnt; i++) {
1891 ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 0);
1894 dev_addr += (sz >> 9);
1896 getnstimeofday(&ts2);
1897 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1902 * Consecutive read performance by transfer size.
1904 static int mmc_test_profile_seq_read_perf(struct mmc_test_card *test)
1906 struct mmc_test_area *t = &test->area;
1910 for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1911 ret = mmc_test_seq_read_perf(test, sz);
1916 return mmc_test_seq_read_perf(test, sz);
1919 static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz)
1921 struct mmc_test_area *t = &test->area;
1922 unsigned int dev_addr, i, cnt;
1923 struct timespec ts1, ts2;
1926 ret = mmc_test_area_erase(test);
1929 cnt = t->max_sz / sz;
1930 dev_addr = t->dev_addr;
1931 getnstimeofday(&ts1);
1932 for (i = 0; i < cnt; i++) {
1933 ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 0);
1936 dev_addr += (sz >> 9);
1938 getnstimeofday(&ts2);
1939 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1944 * Consecutive write performance by transfer size.
1946 static int mmc_test_profile_seq_write_perf(struct mmc_test_card *test)
1948 struct mmc_test_area *t = &test->area;
1952 for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1953 ret = mmc_test_seq_write_perf(test, sz);
1958 return mmc_test_seq_write_perf(test, sz);
1962 * Consecutive trim performance by transfer size.
1964 static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test)
1966 struct mmc_test_area *t = &test->area;
1968 unsigned int dev_addr, i, cnt;
1969 struct timespec ts1, ts2;
1972 if (!mmc_can_trim(test->card))
1973 return RESULT_UNSUP_CARD;
1975 if (!mmc_can_erase(test->card))
1976 return RESULT_UNSUP_HOST;
1978 for (sz = 512; sz <= t->max_sz; sz <<= 1) {
1979 ret = mmc_test_area_erase(test);
1982 ret = mmc_test_area_fill(test);
1985 cnt = t->max_sz / sz;
1986 dev_addr = t->dev_addr;
1987 getnstimeofday(&ts1);
1988 for (i = 0; i < cnt; i++) {
1989 ret = mmc_erase(test->card, dev_addr, sz >> 9,
1993 dev_addr += (sz >> 9);
1995 getnstimeofday(&ts2);
1996 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
2001 static unsigned int rnd_next = 1;
2003 static unsigned int mmc_test_rnd_num(unsigned int rnd_cnt)
2007 rnd_next = rnd_next * 1103515245 + 12345;
2008 r = (rnd_next >> 16) & 0x7fff;
2009 return (r * rnd_cnt) >> 15;
2012 static int mmc_test_rnd_perf(struct mmc_test_card *test, int write, int print,
2015 unsigned int dev_addr, cnt, rnd_addr, range1, range2, last_ea = 0, ea;
2017 struct timespec ts1, ts2, ts;
2022 rnd_addr = mmc_test_capacity(test->card) / 4;
2023 range1 = rnd_addr / test->card->pref_erase;
2024 range2 = range1 / ssz;
2026 getnstimeofday(&ts1);
2027 for (cnt = 0; cnt < UINT_MAX; cnt++) {
2028 getnstimeofday(&ts2);
2029 ts = timespec_sub(ts2, ts1);
2030 if (ts.tv_sec >= 10)
2032 ea = mmc_test_rnd_num(range1);
2036 dev_addr = rnd_addr + test->card->pref_erase * ea +
2037 ssz * mmc_test_rnd_num(range2);
2038 ret = mmc_test_area_io(test, sz, dev_addr, write, 0, 0);
2043 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
2047 static int mmc_test_random_perf(struct mmc_test_card *test, int write)
2049 struct mmc_test_area *t = &test->area;
2054 for (sz = 512; sz < t->max_tfr; sz <<= 1) {
2056 * When writing, try to get more consistent results by running
2057 * the test twice with exactly the same I/O but outputting the
2058 * results only for the 2nd run.
2062 ret = mmc_test_rnd_perf(test, write, 0, sz);
2067 ret = mmc_test_rnd_perf(test, write, 1, sz);
2074 ret = mmc_test_rnd_perf(test, write, 0, sz);
2079 return mmc_test_rnd_perf(test, write, 1, sz);
2083 * Random read performance by transfer size.
2085 static int mmc_test_random_read_perf(struct mmc_test_card *test)
2087 return mmc_test_random_perf(test, 0);
2091 * Random write performance by transfer size.
2093 static int mmc_test_random_write_perf(struct mmc_test_card *test)
2095 return mmc_test_random_perf(test, 1);
2098 static int mmc_test_seq_perf(struct mmc_test_card *test, int write,
2099 unsigned int tot_sz, int max_scatter)
2101 struct mmc_test_area *t = &test->area;
2102 unsigned int dev_addr, i, cnt, sz, ssz;
2103 struct timespec ts1, ts2;
2109 * In the case of a maximally scattered transfer, the maximum transfer
2110 * size is further limited by using PAGE_SIZE segments.
2113 unsigned long max_tfr;
2115 if (t->max_seg_sz >= PAGE_SIZE)
2116 max_tfr = t->max_segs * PAGE_SIZE;
2118 max_tfr = t->max_segs * t->max_seg_sz;
2124 dev_addr = mmc_test_capacity(test->card) / 4;
2125 if (tot_sz > dev_addr << 9)
2126 tot_sz = dev_addr << 9;
2128 dev_addr &= 0xffff0000; /* Round to 64MiB boundary */
2130 getnstimeofday(&ts1);
2131 for (i = 0; i < cnt; i++) {
2132 ret = mmc_test_area_io(test, sz, dev_addr, write,
2138 getnstimeofday(&ts2);
2140 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
2145 static int mmc_test_large_seq_perf(struct mmc_test_card *test, int write)
2149 for (i = 0; i < 10; i++) {
2150 ret = mmc_test_seq_perf(test, write, 10 * 1024 * 1024, 1);
2154 for (i = 0; i < 5; i++) {
2155 ret = mmc_test_seq_perf(test, write, 100 * 1024 * 1024, 1);
2159 for (i = 0; i < 3; i++) {
2160 ret = mmc_test_seq_perf(test, write, 1000 * 1024 * 1024, 1);
2169 * Large sequential read performance.
2171 static int mmc_test_large_seq_read_perf(struct mmc_test_card *test)
2173 return mmc_test_large_seq_perf(test, 0);
2177 * Large sequential write performance.
2179 static int mmc_test_large_seq_write_perf(struct mmc_test_card *test)
2181 return mmc_test_large_seq_perf(test, 1);
2184 static int mmc_test_rw_multiple(struct mmc_test_card *test,
2185 struct mmc_test_multiple_rw *tdata,
2186 unsigned int reqsize, unsigned int size,
2189 unsigned int dev_addr;
2190 struct mmc_test_area *t = &test->area;
2193 /* Set up test area */
2194 if (size > mmc_test_capacity(test->card) / 2 * 512)
2195 size = mmc_test_capacity(test->card) / 2 * 512;
2196 if (reqsize > t->max_tfr)
2197 reqsize = t->max_tfr;
2198 dev_addr = mmc_test_capacity(test->card) / 4;
2199 if ((dev_addr & 0xffff0000))
2200 dev_addr &= 0xffff0000; /* Round to 64MiB boundary */
2202 dev_addr &= 0xfffff800; /* Round to 1MiB boundary */
2209 /* prepare test area */
2210 if (mmc_can_erase(test->card) &&
2211 tdata->prepare & MMC_TEST_PREP_ERASE) {
2212 ret = mmc_erase(test->card, dev_addr,
2213 size / 512, MMC_SECURE_ERASE_ARG);
2215 ret = mmc_erase(test->card, dev_addr,
2216 size / 512, MMC_ERASE_ARG);
2222 ret = mmc_test_area_io_seq(test, reqsize, dev_addr,
2223 tdata->do_write, 0, 1, size / reqsize,
2224 tdata->do_nonblock_req, min_sg_len);
2230 pr_info("[%s] error\n", __func__);
2234 static int mmc_test_rw_multiple_size(struct mmc_test_card *test,
2235 struct mmc_test_multiple_rw *rw)
2239 void *pre_req = test->card->host->ops->pre_req;
2240 void *post_req = test->card->host->ops->post_req;
2242 if (rw->do_nonblock_req &&
2243 ((!pre_req && post_req) || (pre_req && !post_req))) {
2244 pr_info("error: only one of pre/post is defined\n");
2248 for (i = 0 ; i < rw->len && ret == 0; i++) {
2249 ret = mmc_test_rw_multiple(test, rw, rw->bs[i], rw->size, 0);
2256 static int mmc_test_rw_multiple_sg_len(struct mmc_test_card *test,
2257 struct mmc_test_multiple_rw *rw)
2262 for (i = 0 ; i < rw->len && ret == 0; i++) {
2263 ret = mmc_test_rw_multiple(test, rw, 512*1024, rw->size,
2272 * Multiple blocking write 4k to 4 MB chunks
2274 static int mmc_test_profile_mult_write_blocking_perf(struct mmc_test_card *test)
2276 unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2277 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2278 struct mmc_test_multiple_rw test_data = {
2280 .size = TEST_AREA_MAX_SIZE,
2281 .len = ARRAY_SIZE(bs),
2283 .do_nonblock_req = false,
2284 .prepare = MMC_TEST_PREP_ERASE,
2287 return mmc_test_rw_multiple_size(test, &test_data);
2291 * Multiple non-blocking write 4k to 4 MB chunks
2293 static int mmc_test_profile_mult_write_nonblock_perf(struct mmc_test_card *test)
2295 unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2296 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2297 struct mmc_test_multiple_rw test_data = {
2299 .size = TEST_AREA_MAX_SIZE,
2300 .len = ARRAY_SIZE(bs),
2302 .do_nonblock_req = true,
2303 .prepare = MMC_TEST_PREP_ERASE,
2306 return mmc_test_rw_multiple_size(test, &test_data);
2310 * Multiple blocking read 4k to 4 MB chunks
2312 static int mmc_test_profile_mult_read_blocking_perf(struct mmc_test_card *test)
2314 unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2315 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2316 struct mmc_test_multiple_rw test_data = {
2318 .size = TEST_AREA_MAX_SIZE,
2319 .len = ARRAY_SIZE(bs),
2321 .do_nonblock_req = false,
2322 .prepare = MMC_TEST_PREP_NONE,
2325 return mmc_test_rw_multiple_size(test, &test_data);
2329 * Multiple non-blocking read 4k to 4 MB chunks
2331 static int mmc_test_profile_mult_read_nonblock_perf(struct mmc_test_card *test)
2333 unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2334 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2335 struct mmc_test_multiple_rw test_data = {
2337 .size = TEST_AREA_MAX_SIZE,
2338 .len = ARRAY_SIZE(bs),
2340 .do_nonblock_req = true,
2341 .prepare = MMC_TEST_PREP_NONE,
2344 return mmc_test_rw_multiple_size(test, &test_data);
2348 * Multiple blocking write 1 to 512 sg elements
2350 static int mmc_test_profile_sglen_wr_blocking_perf(struct mmc_test_card *test)
2352 unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2353 1 << 7, 1 << 8, 1 << 9};
2354 struct mmc_test_multiple_rw test_data = {
2356 .size = TEST_AREA_MAX_SIZE,
2357 .len = ARRAY_SIZE(sg_len),
2359 .do_nonblock_req = false,
2360 .prepare = MMC_TEST_PREP_ERASE,
2363 return mmc_test_rw_multiple_sg_len(test, &test_data);
2367 * Multiple non-blocking write 1 to 512 sg elements
2369 static int mmc_test_profile_sglen_wr_nonblock_perf(struct mmc_test_card *test)
2371 unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2372 1 << 7, 1 << 8, 1 << 9};
2373 struct mmc_test_multiple_rw test_data = {
2375 .size = TEST_AREA_MAX_SIZE,
2376 .len = ARRAY_SIZE(sg_len),
2378 .do_nonblock_req = true,
2379 .prepare = MMC_TEST_PREP_ERASE,
2382 return mmc_test_rw_multiple_sg_len(test, &test_data);
2386 * Multiple blocking read 1 to 512 sg elements
2388 static int mmc_test_profile_sglen_r_blocking_perf(struct mmc_test_card *test)
2390 unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2391 1 << 7, 1 << 8, 1 << 9};
2392 struct mmc_test_multiple_rw test_data = {
2394 .size = TEST_AREA_MAX_SIZE,
2395 .len = ARRAY_SIZE(sg_len),
2397 .do_nonblock_req = false,
2398 .prepare = MMC_TEST_PREP_NONE,
2401 return mmc_test_rw_multiple_sg_len(test, &test_data);
2405 * Multiple non-blocking read 1 to 512 sg elements
2407 static int mmc_test_profile_sglen_r_nonblock_perf(struct mmc_test_card *test)
2409 unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2410 1 << 7, 1 << 8, 1 << 9};
2411 struct mmc_test_multiple_rw test_data = {
2413 .size = TEST_AREA_MAX_SIZE,
2414 .len = ARRAY_SIZE(sg_len),
2416 .do_nonblock_req = true,
2417 .prepare = MMC_TEST_PREP_NONE,
2420 return mmc_test_rw_multiple_sg_len(test, &test_data);
2424 * eMMC hardware reset.
2426 static int mmc_test_hw_reset(struct mmc_test_card *test)
2428 struct mmc_card *card = test->card;
2429 struct mmc_host *host = card->host;
2432 err = mmc_hw_reset_check(host);
2439 if (err != -EOPNOTSUPP)
2442 if (!mmc_can_reset(card))
2443 return RESULT_UNSUP_CARD;
2445 return RESULT_UNSUP_HOST;
2448 static const struct mmc_test_case mmc_test_cases[] = {
2450 .name = "Basic write (no data verification)",
2451 .run = mmc_test_basic_write,
2455 .name = "Basic read (no data verification)",
2456 .run = mmc_test_basic_read,
2460 .name = "Basic write (with data verification)",
2461 .prepare = mmc_test_prepare_write,
2462 .run = mmc_test_verify_write,
2463 .cleanup = mmc_test_cleanup,
2467 .name = "Basic read (with data verification)",
2468 .prepare = mmc_test_prepare_read,
2469 .run = mmc_test_verify_read,
2470 .cleanup = mmc_test_cleanup,
2474 .name = "Multi-block write",
2475 .prepare = mmc_test_prepare_write,
2476 .run = mmc_test_multi_write,
2477 .cleanup = mmc_test_cleanup,
2481 .name = "Multi-block read",
2482 .prepare = mmc_test_prepare_read,
2483 .run = mmc_test_multi_read,
2484 .cleanup = mmc_test_cleanup,
2488 .name = "Power of two block writes",
2489 .prepare = mmc_test_prepare_write,
2490 .run = mmc_test_pow2_write,
2491 .cleanup = mmc_test_cleanup,
2495 .name = "Power of two block reads",
2496 .prepare = mmc_test_prepare_read,
2497 .run = mmc_test_pow2_read,
2498 .cleanup = mmc_test_cleanup,
2502 .name = "Weird sized block writes",
2503 .prepare = mmc_test_prepare_write,
2504 .run = mmc_test_weird_write,
2505 .cleanup = mmc_test_cleanup,
2509 .name = "Weird sized block reads",
2510 .prepare = mmc_test_prepare_read,
2511 .run = mmc_test_weird_read,
2512 .cleanup = mmc_test_cleanup,
2516 .name = "Badly aligned write",
2517 .prepare = mmc_test_prepare_write,
2518 .run = mmc_test_align_write,
2519 .cleanup = mmc_test_cleanup,
2523 .name = "Badly aligned read",
2524 .prepare = mmc_test_prepare_read,
2525 .run = mmc_test_align_read,
2526 .cleanup = mmc_test_cleanup,
2530 .name = "Badly aligned multi-block write",
2531 .prepare = mmc_test_prepare_write,
2532 .run = mmc_test_align_multi_write,
2533 .cleanup = mmc_test_cleanup,
2537 .name = "Badly aligned multi-block read",
2538 .prepare = mmc_test_prepare_read,
2539 .run = mmc_test_align_multi_read,
2540 .cleanup = mmc_test_cleanup,
2544 .name = "Correct xfer_size at write (start failure)",
2545 .run = mmc_test_xfersize_write,
2549 .name = "Correct xfer_size at read (start failure)",
2550 .run = mmc_test_xfersize_read,
2554 .name = "Correct xfer_size at write (midway failure)",
2555 .run = mmc_test_multi_xfersize_write,
2559 .name = "Correct xfer_size at read (midway failure)",
2560 .run = mmc_test_multi_xfersize_read,
2563 #ifdef CONFIG_HIGHMEM
2566 .name = "Highmem write",
2567 .prepare = mmc_test_prepare_write,
2568 .run = mmc_test_write_high,
2569 .cleanup = mmc_test_cleanup,
2573 .name = "Highmem read",
2574 .prepare = mmc_test_prepare_read,
2575 .run = mmc_test_read_high,
2576 .cleanup = mmc_test_cleanup,
2580 .name = "Multi-block highmem write",
2581 .prepare = mmc_test_prepare_write,
2582 .run = mmc_test_multi_write_high,
2583 .cleanup = mmc_test_cleanup,
2587 .name = "Multi-block highmem read",
2588 .prepare = mmc_test_prepare_read,
2589 .run = mmc_test_multi_read_high,
2590 .cleanup = mmc_test_cleanup,
2596 .name = "Highmem write",
2597 .run = mmc_test_no_highmem,
2601 .name = "Highmem read",
2602 .run = mmc_test_no_highmem,
2606 .name = "Multi-block highmem write",
2607 .run = mmc_test_no_highmem,
2611 .name = "Multi-block highmem read",
2612 .run = mmc_test_no_highmem,
2615 #endif /* CONFIG_HIGHMEM */
2618 .name = "Best-case read performance",
2619 .prepare = mmc_test_area_prepare_fill,
2620 .run = mmc_test_best_read_performance,
2621 .cleanup = mmc_test_area_cleanup,
2625 .name = "Best-case write performance",
2626 .prepare = mmc_test_area_prepare_erase,
2627 .run = mmc_test_best_write_performance,
2628 .cleanup = mmc_test_area_cleanup,
2632 .name = "Best-case read performance into scattered pages",
2633 .prepare = mmc_test_area_prepare_fill,
2634 .run = mmc_test_best_read_perf_max_scatter,
2635 .cleanup = mmc_test_area_cleanup,
2639 .name = "Best-case write performance from scattered pages",
2640 .prepare = mmc_test_area_prepare_erase,
2641 .run = mmc_test_best_write_perf_max_scatter,
2642 .cleanup = mmc_test_area_cleanup,
2646 .name = "Single read performance by transfer size",
2647 .prepare = mmc_test_area_prepare_fill,
2648 .run = mmc_test_profile_read_perf,
2649 .cleanup = mmc_test_area_cleanup,
2653 .name = "Single write performance by transfer size",
2654 .prepare = mmc_test_area_prepare,
2655 .run = mmc_test_profile_write_perf,
2656 .cleanup = mmc_test_area_cleanup,
2660 .name = "Single trim performance by transfer size",
2661 .prepare = mmc_test_area_prepare_fill,
2662 .run = mmc_test_profile_trim_perf,
2663 .cleanup = mmc_test_area_cleanup,
2667 .name = "Consecutive read performance by transfer size",
2668 .prepare = mmc_test_area_prepare_fill,
2669 .run = mmc_test_profile_seq_read_perf,
2670 .cleanup = mmc_test_area_cleanup,
2674 .name = "Consecutive write performance by transfer size",
2675 .prepare = mmc_test_area_prepare,
2676 .run = mmc_test_profile_seq_write_perf,
2677 .cleanup = mmc_test_area_cleanup,
2681 .name = "Consecutive trim performance by transfer size",
2682 .prepare = mmc_test_area_prepare,
2683 .run = mmc_test_profile_seq_trim_perf,
2684 .cleanup = mmc_test_area_cleanup,
2688 .name = "Random read performance by transfer size",
2689 .prepare = mmc_test_area_prepare,
2690 .run = mmc_test_random_read_perf,
2691 .cleanup = mmc_test_area_cleanup,
2695 .name = "Random write performance by transfer size",
2696 .prepare = mmc_test_area_prepare,
2697 .run = mmc_test_random_write_perf,
2698 .cleanup = mmc_test_area_cleanup,
2702 .name = "Large sequential read into scattered pages",
2703 .prepare = mmc_test_area_prepare,
2704 .run = mmc_test_large_seq_read_perf,
2705 .cleanup = mmc_test_area_cleanup,
2709 .name = "Large sequential write from scattered pages",
2710 .prepare = mmc_test_area_prepare,
2711 .run = mmc_test_large_seq_write_perf,
2712 .cleanup = mmc_test_area_cleanup,
2716 .name = "Write performance with blocking req 4k to 4MB",
2717 .prepare = mmc_test_area_prepare,
2718 .run = mmc_test_profile_mult_write_blocking_perf,
2719 .cleanup = mmc_test_area_cleanup,
2723 .name = "Write performance with non-blocking req 4k to 4MB",
2724 .prepare = mmc_test_area_prepare,
2725 .run = mmc_test_profile_mult_write_nonblock_perf,
2726 .cleanup = mmc_test_area_cleanup,
2730 .name = "Read performance with blocking req 4k to 4MB",
2731 .prepare = mmc_test_area_prepare,
2732 .run = mmc_test_profile_mult_read_blocking_perf,
2733 .cleanup = mmc_test_area_cleanup,
2737 .name = "Read performance with non-blocking req 4k to 4MB",
2738 .prepare = mmc_test_area_prepare,
2739 .run = mmc_test_profile_mult_read_nonblock_perf,
2740 .cleanup = mmc_test_area_cleanup,
2744 .name = "Write performance blocking req 1 to 512 sg elems",
2745 .prepare = mmc_test_area_prepare,
2746 .run = mmc_test_profile_sglen_wr_blocking_perf,
2747 .cleanup = mmc_test_area_cleanup,
2751 .name = "Write performance non-blocking req 1 to 512 sg elems",
2752 .prepare = mmc_test_area_prepare,
2753 .run = mmc_test_profile_sglen_wr_nonblock_perf,
2754 .cleanup = mmc_test_area_cleanup,
2758 .name = "Read performance blocking req 1 to 512 sg elems",
2759 .prepare = mmc_test_area_prepare,
2760 .run = mmc_test_profile_sglen_r_blocking_perf,
2761 .cleanup = mmc_test_area_cleanup,
2765 .name = "Read performance non-blocking req 1 to 512 sg elems",
2766 .prepare = mmc_test_area_prepare,
2767 .run = mmc_test_profile_sglen_r_nonblock_perf,
2768 .cleanup = mmc_test_area_cleanup,
2772 .name = "eMMC hardware reset",
2773 .run = mmc_test_hw_reset,
2777 static DEFINE_MUTEX(mmc_test_lock);
2779 static LIST_HEAD(mmc_test_result);
2781 static void mmc_test_run(struct mmc_test_card *test, int testcase)
2785 pr_info("%s: Starting tests of card %s...\n",
2786 mmc_hostname(test->card->host), mmc_card_id(test->card));
2788 mmc_claim_host(test->card->host);
2790 mmc_test_set_parameters(test);
2792 for (i = 0;i < ARRAY_SIZE(mmc_test_cases);i++) {
2793 struct mmc_test_general_result *gr;
2795 if (testcase && ((i + 1) != testcase))
2798 pr_info("%s: Test case %d. %s...\n",
2799 mmc_hostname(test->card->host), i + 1,
2800 mmc_test_cases[i].name);
2802 if (mmc_test_cases[i].prepare) {
2803 ret = mmc_test_cases[i].prepare(test);
2805 pr_info("%s: Result: Prepare "
2806 "stage failed! (%d)\n",
2807 mmc_hostname(test->card->host),
2813 gr = kzalloc(sizeof(struct mmc_test_general_result),
2816 INIT_LIST_HEAD(&gr->tr_lst);
2818 /* Assign data what we know already */
2819 gr->card = test->card;
2822 /* Append container to global one */
2823 list_add_tail(&gr->link, &mmc_test_result);
2826 * Save the pointer to created container in our private
2832 ret = mmc_test_cases[i].run(test);
2835 pr_info("%s: Result: OK\n",
2836 mmc_hostname(test->card->host));
2839 pr_info("%s: Result: FAILED\n",
2840 mmc_hostname(test->card->host));
2842 case RESULT_UNSUP_HOST:
2843 pr_info("%s: Result: UNSUPPORTED "
2845 mmc_hostname(test->card->host));
2847 case RESULT_UNSUP_CARD:
2848 pr_info("%s: Result: UNSUPPORTED "
2850 mmc_hostname(test->card->host));
2853 pr_info("%s: Result: ERROR (%d)\n",
2854 mmc_hostname(test->card->host), ret);
2857 /* Save the result */
2861 if (mmc_test_cases[i].cleanup) {
2862 ret = mmc_test_cases[i].cleanup(test);
2864 pr_info("%s: Warning: Cleanup "
2865 "stage failed! (%d)\n",
2866 mmc_hostname(test->card->host),
2872 mmc_release_host(test->card->host);
2874 pr_info("%s: Tests completed.\n",
2875 mmc_hostname(test->card->host));
2878 static void mmc_test_free_result(struct mmc_card *card)
2880 struct mmc_test_general_result *gr, *grs;
2882 mutex_lock(&mmc_test_lock);
2884 list_for_each_entry_safe(gr, grs, &mmc_test_result, link) {
2885 struct mmc_test_transfer_result *tr, *trs;
2887 if (card && gr->card != card)
2890 list_for_each_entry_safe(tr, trs, &gr->tr_lst, link) {
2891 list_del(&tr->link);
2895 list_del(&gr->link);
2899 mutex_unlock(&mmc_test_lock);
2902 static LIST_HEAD(mmc_test_file_test);
2904 static void mmc_test_usage(struct seq_file *sf)
2908 seq_printf(sf, "\nHow to run test:"
2909 "\necho <testcase> [[param1 value1].... ] > test"
2910 "\nExample:: echo 1 -b 4 -c 2500000 -t 2"
2911 "\n\nSupported parameters in sequence\n");
2913 for (i = 0; i < ARRAY_SIZE(mmc_test_parameter); i++) {
2914 seq_printf(sf, "Parameter%d Name:[%s] option:[%s]\n",
2915 i + 1, mmc_test_parameter[i].name,
2916 mmc_test_parameter[i].input);
2918 seq_printf(sf, "\'-1\' passed to take default value\n\n\n");
2921 static int mtf_test_show(struct seq_file *sf, void *data)
2923 struct mmc_card *card = (struct mmc_card *)sf->private;
2924 struct mmc_test_general_result *gr;
2926 mutex_lock(&mmc_test_lock);
2928 list_for_each_entry(gr, &mmc_test_result, link) {
2929 struct mmc_test_transfer_result *tr;
2931 if (gr->card != card)
2934 seq_printf(sf, "Test %d: %d\n", gr->testcase + 1, gr->result);
2936 list_for_each_entry(tr, &gr->tr_lst, link) {
2937 seq_printf(sf, "%u %d %lu.%09lu %u %u.%02u\n",
2938 tr->count, tr->sectors,
2939 (unsigned long)tr->ts.tv_sec,
2940 (unsigned long)tr->ts.tv_nsec,
2941 tr->rate, tr->iops / 100, tr->iops % 100);
2945 mutex_unlock(&mmc_test_lock);
2950 static int mtf_test_open(struct inode *inode, struct file *file)
2952 return single_open(file, mtf_test_show, inode->i_private);
2955 static int mmc_test_extract_parameters(char *data_buf)
2957 char *running = NULL;
2959 const char delimiters[] = " ";
2967 * echo <testcasenumber> [[param1 value1] [param1 value1]] > test
2968 * $] echo 1 > test | Execute testcase 1
2969 * $] echo 1 -c 2500000 | execute tesecase 1 and set clock to 2500000
2970 * $] echo 1 -b 4 -c 2500000 -t 2 |
2971 * execute tesecase 1, set clock to 2500000, set bus_width 4,
2972 * and set timing to 2
2975 while ((token = strsep(&running, delimiters))) {
2976 if (strict_strtol(token, 10, &value)) {
2977 /* [Param1 value1] combination
2978 * Compare with available param list
2980 for (i = 0; i < ARRAY_SIZE(mmc_test_parameter); i++) {
2981 if (!strcmp(mmc_test_parameter[i].input,
2983 /* Valid Option, extract following
2986 token = strsep(&running, delimiters);
2987 if (strict_strtol(token, 10,
2988 &(mmc_test_parameter[i].value))) {
2990 printk(KERN_ERR "wrong parameter value\n");
2997 if (i == ARRAY_SIZE(mmc_test_parameter)) {
2998 printk(KERN_ERR "uknown mmc_test option\n");
3002 /* Testcase number */
3004 mmc_test_parameter[0].value = value;
3007 printk(KERN_ERR "invalid options");
3015 static ssize_t mtf_test_write(struct file *file, const char __user *buf,
3016 size_t count, loff_t *pos)
3018 struct seq_file *sf = (struct seq_file *)file->private_data;
3019 struct mmc_card *card = (struct mmc_card *)sf->private;
3020 struct mmc_test_card *test;
3021 char *data_buf = NULL;
3024 data_buf = kzalloc(count+1, GFP_KERNEL);
3025 if (data_buf == NULL)
3028 if (copy_from_user(data_buf, buf, count)) {
3032 if (mmc_test_extract_parameters(data_buf)) {
3040 testcase = mmc_test_parameter[0].value;
3042 test = kzalloc(sizeof(struct mmc_test_card), GFP_KERNEL);
3047 * Remove all test cases associated with given card. Thus we have only
3048 * actual data of the last run.
3050 mmc_test_free_result(card);
3054 test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL);
3055 #ifdef CONFIG_HIGHMEM
3056 test->highmem = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, BUFFER_ORDER);
3059 #ifdef CONFIG_HIGHMEM
3060 if (test->buffer && test->highmem) {
3064 mutex_lock(&mmc_test_lock);
3065 mmc_test_run(test, testcase);
3066 mutex_unlock(&mmc_test_lock);
3069 #ifdef CONFIG_HIGHMEM
3070 __free_pages(test->highmem, BUFFER_ORDER);
3072 kfree(test->buffer);
3078 static const struct file_operations mmc_test_fops_test = {
3079 .open = mtf_test_open,
3081 .write = mtf_test_write,
3082 .llseek = seq_lseek,
3083 .release = single_release,
3086 static int mtf_testlist_show(struct seq_file *sf, void *data)
3090 mutex_lock(&mmc_test_lock);
3092 for (i = 0; i < ARRAY_SIZE(mmc_test_cases); i++)
3093 seq_printf(sf, "%d:\t%s\n", i+1, mmc_test_cases[i].name);
3095 mutex_unlock(&mmc_test_lock);
3100 static int mtf_testlist_open(struct inode *inode, struct file *file)
3102 return single_open(file, mtf_testlist_show, inode->i_private);
3105 static const struct file_operations mmc_test_fops_testlist = {
3106 .open = mtf_testlist_open,
3108 .llseek = seq_lseek,
3109 .release = single_release,
3112 static void mmc_test_free_dbgfs_file(struct mmc_card *card)
3114 struct mmc_test_dbgfs_file *df, *dfs;
3116 mutex_lock(&mmc_test_lock);
3118 list_for_each_entry_safe(df, dfs, &mmc_test_file_test, link) {
3119 if (card && df->card != card)
3121 debugfs_remove(df->file);
3122 list_del(&df->link);
3126 mutex_unlock(&mmc_test_lock);
3129 static int __mmc_test_register_dbgfs_file(struct mmc_card *card,
3130 const char *name, umode_t mode, const struct file_operations *fops)
3132 struct dentry *file = NULL;
3133 struct mmc_test_dbgfs_file *df;
3135 if (card->debugfs_root)
3136 file = debugfs_create_file(name, mode, card->debugfs_root,
3139 if (IS_ERR_OR_NULL(file)) {
3141 "Can't create %s. Perhaps debugfs is disabled.\n",
3146 df = kmalloc(sizeof(struct mmc_test_dbgfs_file), GFP_KERNEL);
3148 debugfs_remove(file);
3150 "Can't allocate memory for internal usage.\n");
3157 list_add(&df->link, &mmc_test_file_test);
3161 static int mmc_test_register_dbgfs_file(struct mmc_card *card)
3165 mutex_lock(&mmc_test_lock);
3167 ret = __mmc_test_register_dbgfs_file(card, "test", S_IWUSR | S_IRUGO,
3168 &mmc_test_fops_test);
3172 ret = __mmc_test_register_dbgfs_file(card, "testlist", S_IRUGO,
3173 &mmc_test_fops_testlist);
3178 mutex_unlock(&mmc_test_lock);
3183 static int mmc_test_probe(struct mmc_card *card)
3187 if (!mmc_card_mmc(card) && !mmc_card_sd(card))
3190 ret = mmc_test_register_dbgfs_file(card);
3194 dev_info(&card->dev, "Card claimed for testing.\n");
3199 static void mmc_test_remove(struct mmc_card *card)
3201 mmc_test_free_result(card);
3202 mmc_test_free_dbgfs_file(card);
3205 static struct mmc_driver mmc_driver = {
3209 .probe = mmc_test_probe,
3210 .remove = mmc_test_remove,
3213 static int __init mmc_test_init(void)
3215 return mmc_register_driver(&mmc_driver);
3218 static void __exit mmc_test_exit(void)
3220 /* Clear stalled data if card is still plugged */
3221 mmc_test_free_result(NULL);
3222 mmc_test_free_dbgfs_file(NULL);
3224 mmc_unregister_driver(&mmc_driver);
3227 module_init(mmc_test_init);
3228 module_exit(mmc_test_exit);
3230 MODULE_LICENSE("GPL");
3231 MODULE_DESCRIPTION("Multimedia Card (MMC) host test driver");
3232 MODULE_AUTHOR("Pierre Ossman");