Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/drivers/mmc/card/mmc_test.c
15109 views
1
/*
2
* linux/drivers/mmc/card/mmc_test.c
3
*
4
* Copyright 2007-2008 Pierre Ossman
5
*
6
* This program is free software; you can redistribute it and/or modify
7
* it under the terms of the GNU General Public License as published by
8
* the Free Software Foundation; either version 2 of the License, or (at
9
* your option) any later version.
10
*/
11
12
#include <linux/mmc/core.h>
13
#include <linux/mmc/card.h>
14
#include <linux/mmc/host.h>
15
#include <linux/mmc/mmc.h>
16
#include <linux/slab.h>
17
18
#include <linux/scatterlist.h>
19
#include <linux/swap.h> /* For nr_free_buffer_pages() */
20
#include <linux/list.h>
21
22
#include <linux/debugfs.h>
23
#include <linux/uaccess.h>
24
#include <linux/seq_file.h>
25
26
#define RESULT_OK 0
27
#define RESULT_FAIL 1
28
#define RESULT_UNSUP_HOST 2
29
#define RESULT_UNSUP_CARD 3
30
31
#define BUFFER_ORDER 2
32
#define BUFFER_SIZE (PAGE_SIZE << BUFFER_ORDER)
33
34
/*
35
* Limit the test area size to the maximum MMC HC erase group size. Note that
36
* the maximum SD allocation unit size is just 4MiB.
37
*/
38
#define TEST_AREA_MAX_SIZE (128 * 1024 * 1024)
39
40
/**
41
* struct mmc_test_pages - pages allocated by 'alloc_pages()'.
42
* @page: first page in the allocation
43
* @order: order of the number of pages allocated
44
*/
45
struct mmc_test_pages {
46
struct page *page;
47
unsigned int order;
48
};
49
50
/**
51
* struct mmc_test_mem - allocated memory.
52
* @arr: array of allocations
53
* @cnt: number of allocations
54
*/
55
struct mmc_test_mem {
56
struct mmc_test_pages *arr;
57
unsigned int cnt;
58
};
59
60
/**
61
* struct mmc_test_area - information for performance tests.
62
* @max_sz: test area size (in bytes)
63
* @dev_addr: address on card at which to do performance tests
64
* @max_tfr: maximum transfer size allowed by driver (in bytes)
65
* @max_segs: maximum segments allowed by driver in scatterlist @sg
66
* @max_seg_sz: maximum segment size allowed by driver
67
* @blocks: number of (512 byte) blocks currently mapped by @sg
68
* @sg_len: length of currently mapped scatterlist @sg
69
* @mem: allocated memory
70
* @sg: scatterlist
71
*/
72
struct mmc_test_area {
73
unsigned long max_sz;
74
unsigned int dev_addr;
75
unsigned int max_tfr;
76
unsigned int max_segs;
77
unsigned int max_seg_sz;
78
unsigned int blocks;
79
unsigned int sg_len;
80
struct mmc_test_mem *mem;
81
struct scatterlist *sg;
82
};
83
84
/**
85
* struct mmc_test_transfer_result - transfer results for performance tests.
86
* @link: double-linked list
87
* @count: amount of group of sectors to check
88
* @sectors: amount of sectors to check in one group
89
* @ts: time values of transfer
90
* @rate: calculated transfer rate
91
* @iops: I/O operations per second (times 100)
92
*/
93
struct mmc_test_transfer_result {
94
struct list_head link;
95
unsigned int count;
96
unsigned int sectors;
97
struct timespec ts;
98
unsigned int rate;
99
unsigned int iops;
100
};
101
102
/**
103
* struct mmc_test_general_result - results for tests.
104
* @link: double-linked list
105
* @card: card under test
106
* @testcase: number of test case
107
* @result: result of test run
108
* @tr_lst: transfer measurements if any as mmc_test_transfer_result
109
*/
110
struct mmc_test_general_result {
111
struct list_head link;
112
struct mmc_card *card;
113
int testcase;
114
int result;
115
struct list_head tr_lst;
116
};
117
118
/**
119
* struct mmc_test_dbgfs_file - debugfs related file.
120
* @link: double-linked list
121
* @card: card under test
122
* @file: file created under debugfs
123
*/
124
struct mmc_test_dbgfs_file {
125
struct list_head link;
126
struct mmc_card *card;
127
struct dentry *file;
128
};
129
130
/**
131
* struct mmc_test_card - test information.
132
* @card: card under test
133
* @scratch: transfer buffer
134
* @buffer: transfer buffer
135
* @highmem: buffer for highmem tests
136
* @area: information for performance tests
137
* @gr: pointer to results of current testcase
138
*/
139
struct mmc_test_card {
140
struct mmc_card *card;
141
142
u8 scratch[BUFFER_SIZE];
143
u8 *buffer;
144
#ifdef CONFIG_HIGHMEM
145
struct page *highmem;
146
#endif
147
struct mmc_test_area area;
148
struct mmc_test_general_result *gr;
149
};
150
151
/*******************************************************************/
152
/* General helper functions */
153
/*******************************************************************/
154
155
/*
156
* Configure correct block size in card
157
*/
158
static int mmc_test_set_blksize(struct mmc_test_card *test, unsigned size)
159
{
160
return mmc_set_blocklen(test->card, size);
161
}
162
163
/*
164
* Fill in the mmc_request structure given a set of transfer parameters.
165
*/
166
static void mmc_test_prepare_mrq(struct mmc_test_card *test,
167
struct mmc_request *mrq, struct scatterlist *sg, unsigned sg_len,
168
unsigned dev_addr, unsigned blocks, unsigned blksz, int write)
169
{
170
BUG_ON(!mrq || !mrq->cmd || !mrq->data || !mrq->stop);
171
172
if (blocks > 1) {
173
mrq->cmd->opcode = write ?
174
MMC_WRITE_MULTIPLE_BLOCK : MMC_READ_MULTIPLE_BLOCK;
175
} else {
176
mrq->cmd->opcode = write ?
177
MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
178
}
179
180
mrq->cmd->arg = dev_addr;
181
if (!mmc_card_blockaddr(test->card))
182
mrq->cmd->arg <<= 9;
183
184
mrq->cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC;
185
186
if (blocks == 1)
187
mrq->stop = NULL;
188
else {
189
mrq->stop->opcode = MMC_STOP_TRANSMISSION;
190
mrq->stop->arg = 0;
191
mrq->stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
192
}
193
194
mrq->data->blksz = blksz;
195
mrq->data->blocks = blocks;
196
mrq->data->flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
197
mrq->data->sg = sg;
198
mrq->data->sg_len = sg_len;
199
200
mmc_set_data_timeout(mrq->data, test->card);
201
}
202
203
static int mmc_test_busy(struct mmc_command *cmd)
204
{
205
return !(cmd->resp[0] & R1_READY_FOR_DATA) ||
206
(R1_CURRENT_STATE(cmd->resp[0]) == 7);
207
}
208
209
/*
210
* Wait for the card to finish the busy state
211
*/
212
static int mmc_test_wait_busy(struct mmc_test_card *test)
213
{
214
int ret, busy;
215
struct mmc_command cmd = {0};
216
217
busy = 0;
218
do {
219
memset(&cmd, 0, sizeof(struct mmc_command));
220
221
cmd.opcode = MMC_SEND_STATUS;
222
cmd.arg = test->card->rca << 16;
223
cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
224
225
ret = mmc_wait_for_cmd(test->card->host, &cmd, 0);
226
if (ret)
227
break;
228
229
if (!busy && mmc_test_busy(&cmd)) {
230
busy = 1;
231
if (test->card->host->caps & MMC_CAP_WAIT_WHILE_BUSY)
232
printk(KERN_INFO "%s: Warning: Host did not "
233
"wait for busy state to end.\n",
234
mmc_hostname(test->card->host));
235
}
236
} while (mmc_test_busy(&cmd));
237
238
return ret;
239
}
240
241
/*
242
* Transfer a single sector of kernel addressable data
243
*/
244
static int mmc_test_buffer_transfer(struct mmc_test_card *test,
245
u8 *buffer, unsigned addr, unsigned blksz, int write)
246
{
247
int ret;
248
249
struct mmc_request mrq = {0};
250
struct mmc_command cmd = {0};
251
struct mmc_command stop = {0};
252
struct mmc_data data = {0};
253
254
struct scatterlist sg;
255
256
mrq.cmd = &cmd;
257
mrq.data = &data;
258
mrq.stop = &stop;
259
260
sg_init_one(&sg, buffer, blksz);
261
262
mmc_test_prepare_mrq(test, &mrq, &sg, 1, addr, 1, blksz, write);
263
264
mmc_wait_for_req(test->card->host, &mrq);
265
266
if (cmd.error)
267
return cmd.error;
268
if (data.error)
269
return data.error;
270
271
ret = mmc_test_wait_busy(test);
272
if (ret)
273
return ret;
274
275
return 0;
276
}
277
278
static void mmc_test_free_mem(struct mmc_test_mem *mem)
279
{
280
if (!mem)
281
return;
282
while (mem->cnt--)
283
__free_pages(mem->arr[mem->cnt].page,
284
mem->arr[mem->cnt].order);
285
kfree(mem->arr);
286
kfree(mem);
287
}
288
289
/*
290
* Allocate a lot of memory, preferably max_sz but at least min_sz. In case
291
* there isn't much memory do not exceed 1/16th total lowmem pages. Also do
292
* not exceed a maximum number of segments and try not to make segments much
293
* bigger than maximum segment size.
294
*/
295
static struct mmc_test_mem *mmc_test_alloc_mem(unsigned long min_sz,
296
unsigned long max_sz,
297
unsigned int max_segs,
298
unsigned int max_seg_sz)
299
{
300
unsigned long max_page_cnt = DIV_ROUND_UP(max_sz, PAGE_SIZE);
301
unsigned long min_page_cnt = DIV_ROUND_UP(min_sz, PAGE_SIZE);
302
unsigned long max_seg_page_cnt = DIV_ROUND_UP(max_seg_sz, PAGE_SIZE);
303
unsigned long page_cnt = 0;
304
unsigned long limit = nr_free_buffer_pages() >> 4;
305
struct mmc_test_mem *mem;
306
307
if (max_page_cnt > limit)
308
max_page_cnt = limit;
309
if (min_page_cnt > max_page_cnt)
310
min_page_cnt = max_page_cnt;
311
312
if (max_seg_page_cnt > max_page_cnt)
313
max_seg_page_cnt = max_page_cnt;
314
315
if (max_segs > max_page_cnt)
316
max_segs = max_page_cnt;
317
318
mem = kzalloc(sizeof(struct mmc_test_mem), GFP_KERNEL);
319
if (!mem)
320
return NULL;
321
322
mem->arr = kzalloc(sizeof(struct mmc_test_pages) * max_segs,
323
GFP_KERNEL);
324
if (!mem->arr)
325
goto out_free;
326
327
while (max_page_cnt) {
328
struct page *page;
329
unsigned int order;
330
gfp_t flags = GFP_KERNEL | GFP_DMA | __GFP_NOWARN |
331
__GFP_NORETRY;
332
333
order = get_order(max_seg_page_cnt << PAGE_SHIFT);
334
while (1) {
335
page = alloc_pages(flags, order);
336
if (page || !order)
337
break;
338
order -= 1;
339
}
340
if (!page) {
341
if (page_cnt < min_page_cnt)
342
goto out_free;
343
break;
344
}
345
mem->arr[mem->cnt].page = page;
346
mem->arr[mem->cnt].order = order;
347
mem->cnt += 1;
348
if (max_page_cnt <= (1UL << order))
349
break;
350
max_page_cnt -= 1UL << order;
351
page_cnt += 1UL << order;
352
if (mem->cnt >= max_segs) {
353
if (page_cnt < min_page_cnt)
354
goto out_free;
355
break;
356
}
357
}
358
359
return mem;
360
361
out_free:
362
mmc_test_free_mem(mem);
363
return NULL;
364
}
365
366
/*
367
* Map memory into a scatterlist. Optionally allow the same memory to be
368
* mapped more than once.
369
*/
370
static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned long sz,
371
struct scatterlist *sglist, int repeat,
372
unsigned int max_segs, unsigned int max_seg_sz,
373
unsigned int *sg_len)
374
{
375
struct scatterlist *sg = NULL;
376
unsigned int i;
377
378
sg_init_table(sglist, max_segs);
379
380
*sg_len = 0;
381
do {
382
for (i = 0; i < mem->cnt; i++) {
383
unsigned long len = PAGE_SIZE << mem->arr[i].order;
384
385
if (len > sz)
386
len = sz;
387
if (len > max_seg_sz)
388
len = max_seg_sz;
389
if (sg)
390
sg = sg_next(sg);
391
else
392
sg = sglist;
393
if (!sg)
394
return -EINVAL;
395
sg_set_page(sg, mem->arr[i].page, len, 0);
396
sz -= len;
397
*sg_len += 1;
398
if (!sz)
399
break;
400
}
401
} while (sz && repeat);
402
403
if (sz)
404
return -EINVAL;
405
406
if (sg)
407
sg_mark_end(sg);
408
409
return 0;
410
}
411
412
/*
413
* Map memory into a scatterlist so that no pages are contiguous. Allow the
414
* same memory to be mapped more than once.
415
*/
416
static int mmc_test_map_sg_max_scatter(struct mmc_test_mem *mem,
417
unsigned long sz,
418
struct scatterlist *sglist,
419
unsigned int max_segs,
420
unsigned int max_seg_sz,
421
unsigned int *sg_len)
422
{
423
struct scatterlist *sg = NULL;
424
unsigned int i = mem->cnt, cnt;
425
unsigned long len;
426
void *base, *addr, *last_addr = NULL;
427
428
sg_init_table(sglist, max_segs);
429
430
*sg_len = 0;
431
while (sz) {
432
base = page_address(mem->arr[--i].page);
433
cnt = 1 << mem->arr[i].order;
434
while (sz && cnt) {
435
addr = base + PAGE_SIZE * --cnt;
436
if (last_addr && last_addr + PAGE_SIZE == addr)
437
continue;
438
last_addr = addr;
439
len = PAGE_SIZE;
440
if (len > max_seg_sz)
441
len = max_seg_sz;
442
if (len > sz)
443
len = sz;
444
if (sg)
445
sg = sg_next(sg);
446
else
447
sg = sglist;
448
if (!sg)
449
return -EINVAL;
450
sg_set_page(sg, virt_to_page(addr), len, 0);
451
sz -= len;
452
*sg_len += 1;
453
}
454
if (i == 0)
455
i = mem->cnt;
456
}
457
458
if (sg)
459
sg_mark_end(sg);
460
461
return 0;
462
}
463
464
/*
465
* Calculate transfer rate in bytes per second.
466
*/
467
static unsigned int mmc_test_rate(uint64_t bytes, struct timespec *ts)
468
{
469
uint64_t ns;
470
471
ns = ts->tv_sec;
472
ns *= 1000000000;
473
ns += ts->tv_nsec;
474
475
bytes *= 1000000000;
476
477
while (ns > UINT_MAX) {
478
bytes >>= 1;
479
ns >>= 1;
480
}
481
482
if (!ns)
483
return 0;
484
485
do_div(bytes, (uint32_t)ns);
486
487
return bytes;
488
}
489
490
/*
491
* Save transfer results for future usage
492
*/
493
static void mmc_test_save_transfer_result(struct mmc_test_card *test,
494
unsigned int count, unsigned int sectors, struct timespec ts,
495
unsigned int rate, unsigned int iops)
496
{
497
struct mmc_test_transfer_result *tr;
498
499
if (!test->gr)
500
return;
501
502
tr = kmalloc(sizeof(struct mmc_test_transfer_result), GFP_KERNEL);
503
if (!tr)
504
return;
505
506
tr->count = count;
507
tr->sectors = sectors;
508
tr->ts = ts;
509
tr->rate = rate;
510
tr->iops = iops;
511
512
list_add_tail(&tr->link, &test->gr->tr_lst);
513
}
514
515
/*
516
* Print the transfer rate.
517
*/
518
static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes,
519
struct timespec *ts1, struct timespec *ts2)
520
{
521
unsigned int rate, iops, sectors = bytes >> 9;
522
struct timespec ts;
523
524
ts = timespec_sub(*ts2, *ts1);
525
526
rate = mmc_test_rate(bytes, &ts);
527
iops = mmc_test_rate(100, &ts); /* I/O ops per sec x 100 */
528
529
printk(KERN_INFO "%s: Transfer of %u sectors (%u%s KiB) took %lu.%09lu "
530
"seconds (%u kB/s, %u KiB/s, %u.%02u IOPS)\n",
531
mmc_hostname(test->card->host), sectors, sectors >> 1,
532
(sectors & 1 ? ".5" : ""), (unsigned long)ts.tv_sec,
533
(unsigned long)ts.tv_nsec, rate / 1000, rate / 1024,
534
iops / 100, iops % 100);
535
536
mmc_test_save_transfer_result(test, 1, sectors, ts, rate, iops);
537
}
538
539
/*
540
* Print the average transfer rate.
541
*/
542
static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes,
543
unsigned int count, struct timespec *ts1,
544
struct timespec *ts2)
545
{
546
unsigned int rate, iops, sectors = bytes >> 9;
547
uint64_t tot = bytes * count;
548
struct timespec ts;
549
550
ts = timespec_sub(*ts2, *ts1);
551
552
rate = mmc_test_rate(tot, &ts);
553
iops = mmc_test_rate(count * 100, &ts); /* I/O ops per sec x 100 */
554
555
printk(KERN_INFO "%s: Transfer of %u x %u sectors (%u x %u%s KiB) took "
556
"%lu.%09lu seconds (%u kB/s, %u KiB/s, "
557
"%u.%02u IOPS)\n",
558
mmc_hostname(test->card->host), count, sectors, count,
559
sectors >> 1, (sectors & 1 ? ".5" : ""),
560
(unsigned long)ts.tv_sec, (unsigned long)ts.tv_nsec,
561
rate / 1000, rate / 1024, iops / 100, iops % 100);
562
563
mmc_test_save_transfer_result(test, count, sectors, ts, rate, iops);
564
}
565
566
/*
567
* Return the card size in sectors.
568
*/
569
static unsigned int mmc_test_capacity(struct mmc_card *card)
570
{
571
if (!mmc_card_sd(card) && mmc_card_blockaddr(card))
572
return card->ext_csd.sectors;
573
else
574
return card->csd.capacity << (card->csd.read_blkbits - 9);
575
}
576
577
/*******************************************************************/
578
/* Test preparation and cleanup */
579
/*******************************************************************/
580
581
/*
582
* Fill the first couple of sectors of the card with known data
583
* so that bad reads/writes can be detected
584
*/
585
static int __mmc_test_prepare(struct mmc_test_card *test, int write)
586
{
587
int ret, i;
588
589
ret = mmc_test_set_blksize(test, 512);
590
if (ret)
591
return ret;
592
593
if (write)
594
memset(test->buffer, 0xDF, 512);
595
else {
596
for (i = 0;i < 512;i++)
597
test->buffer[i] = i;
598
}
599
600
for (i = 0;i < BUFFER_SIZE / 512;i++) {
601
ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
602
if (ret)
603
return ret;
604
}
605
606
return 0;
607
}
608
609
static int mmc_test_prepare_write(struct mmc_test_card *test)
610
{
611
return __mmc_test_prepare(test, 1);
612
}
613
614
static int mmc_test_prepare_read(struct mmc_test_card *test)
615
{
616
return __mmc_test_prepare(test, 0);
617
}
618
619
static int mmc_test_cleanup(struct mmc_test_card *test)
620
{
621
int ret, i;
622
623
ret = mmc_test_set_blksize(test, 512);
624
if (ret)
625
return ret;
626
627
memset(test->buffer, 0, 512);
628
629
for (i = 0;i < BUFFER_SIZE / 512;i++) {
630
ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
631
if (ret)
632
return ret;
633
}
634
635
return 0;
636
}
637
638
/*******************************************************************/
639
/* Test execution helpers */
640
/*******************************************************************/
641
642
/*
643
* Modifies the mmc_request to perform the "short transfer" tests
644
*/
645
static void mmc_test_prepare_broken_mrq(struct mmc_test_card *test,
646
struct mmc_request *mrq, int write)
647
{
648
BUG_ON(!mrq || !mrq->cmd || !mrq->data);
649
650
if (mrq->data->blocks > 1) {
651
mrq->cmd->opcode = write ?
652
MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
653
mrq->stop = NULL;
654
} else {
655
mrq->cmd->opcode = MMC_SEND_STATUS;
656
mrq->cmd->arg = test->card->rca << 16;
657
}
658
}
659
660
/*
661
* Checks that a normal transfer didn't have any errors
662
*/
663
static int mmc_test_check_result(struct mmc_test_card *test,
664
struct mmc_request *mrq)
665
{
666
int ret;
667
668
BUG_ON(!mrq || !mrq->cmd || !mrq->data);
669
670
ret = 0;
671
672
if (!ret && mrq->cmd->error)
673
ret = mrq->cmd->error;
674
if (!ret && mrq->data->error)
675
ret = mrq->data->error;
676
if (!ret && mrq->stop && mrq->stop->error)
677
ret = mrq->stop->error;
678
if (!ret && mrq->data->bytes_xfered !=
679
mrq->data->blocks * mrq->data->blksz)
680
ret = RESULT_FAIL;
681
682
if (ret == -EINVAL)
683
ret = RESULT_UNSUP_HOST;
684
685
return ret;
686
}
687
688
/*
689
* Checks that a "short transfer" behaved as expected
690
*/
691
static int mmc_test_check_broken_result(struct mmc_test_card *test,
692
struct mmc_request *mrq)
693
{
694
int ret;
695
696
BUG_ON(!mrq || !mrq->cmd || !mrq->data);
697
698
ret = 0;
699
700
if (!ret && mrq->cmd->error)
701
ret = mrq->cmd->error;
702
if (!ret && mrq->data->error == 0)
703
ret = RESULT_FAIL;
704
if (!ret && mrq->data->error != -ETIMEDOUT)
705
ret = mrq->data->error;
706
if (!ret && mrq->stop && mrq->stop->error)
707
ret = mrq->stop->error;
708
if (mrq->data->blocks > 1) {
709
if (!ret && mrq->data->bytes_xfered > mrq->data->blksz)
710
ret = RESULT_FAIL;
711
} else {
712
if (!ret && mrq->data->bytes_xfered > 0)
713
ret = RESULT_FAIL;
714
}
715
716
if (ret == -EINVAL)
717
ret = RESULT_UNSUP_HOST;
718
719
return ret;
720
}
721
722
/*
723
* Tests a basic transfer with certain parameters
724
*/
725
static int mmc_test_simple_transfer(struct mmc_test_card *test,
726
struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
727
unsigned blocks, unsigned blksz, int write)
728
{
729
struct mmc_request mrq = {0};
730
struct mmc_command cmd = {0};
731
struct mmc_command stop = {0};
732
struct mmc_data data = {0};
733
734
mrq.cmd = &cmd;
735
mrq.data = &data;
736
mrq.stop = &stop;
737
738
mmc_test_prepare_mrq(test, &mrq, sg, sg_len, dev_addr,
739
blocks, blksz, write);
740
741
mmc_wait_for_req(test->card->host, &mrq);
742
743
mmc_test_wait_busy(test);
744
745
return mmc_test_check_result(test, &mrq);
746
}
747
748
/*
749
* Tests a transfer where the card will fail completely or partly
750
*/
751
static int mmc_test_broken_transfer(struct mmc_test_card *test,
752
unsigned blocks, unsigned blksz, int write)
753
{
754
struct mmc_request mrq = {0};
755
struct mmc_command cmd = {0};
756
struct mmc_command stop = {0};
757
struct mmc_data data = {0};
758
759
struct scatterlist sg;
760
761
mrq.cmd = &cmd;
762
mrq.data = &data;
763
mrq.stop = &stop;
764
765
sg_init_one(&sg, test->buffer, blocks * blksz);
766
767
mmc_test_prepare_mrq(test, &mrq, &sg, 1, 0, blocks, blksz, write);
768
mmc_test_prepare_broken_mrq(test, &mrq, write);
769
770
mmc_wait_for_req(test->card->host, &mrq);
771
772
mmc_test_wait_busy(test);
773
774
return mmc_test_check_broken_result(test, &mrq);
775
}
776
777
/*
778
* Does a complete transfer test where data is also validated
779
*
780
* Note: mmc_test_prepare() must have been done before this call
781
*/
782
static int mmc_test_transfer(struct mmc_test_card *test,
783
struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
784
unsigned blocks, unsigned blksz, int write)
785
{
786
int ret, i;
787
unsigned long flags;
788
789
if (write) {
790
for (i = 0;i < blocks * blksz;i++)
791
test->scratch[i] = i;
792
} else {
793
memset(test->scratch, 0, BUFFER_SIZE);
794
}
795
local_irq_save(flags);
796
sg_copy_from_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
797
local_irq_restore(flags);
798
799
ret = mmc_test_set_blksize(test, blksz);
800
if (ret)
801
return ret;
802
803
ret = mmc_test_simple_transfer(test, sg, sg_len, dev_addr,
804
blocks, blksz, write);
805
if (ret)
806
return ret;
807
808
if (write) {
809
int sectors;
810
811
ret = mmc_test_set_blksize(test, 512);
812
if (ret)
813
return ret;
814
815
sectors = (blocks * blksz + 511) / 512;
816
if ((sectors * 512) == (blocks * blksz))
817
sectors++;
818
819
if ((sectors * 512) > BUFFER_SIZE)
820
return -EINVAL;
821
822
memset(test->buffer, 0, sectors * 512);
823
824
for (i = 0;i < sectors;i++) {
825
ret = mmc_test_buffer_transfer(test,
826
test->buffer + i * 512,
827
dev_addr + i, 512, 0);
828
if (ret)
829
return ret;
830
}
831
832
for (i = 0;i < blocks * blksz;i++) {
833
if (test->buffer[i] != (u8)i)
834
return RESULT_FAIL;
835
}
836
837
for (;i < sectors * 512;i++) {
838
if (test->buffer[i] != 0xDF)
839
return RESULT_FAIL;
840
}
841
} else {
842
local_irq_save(flags);
843
sg_copy_to_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
844
local_irq_restore(flags);
845
for (i = 0;i < blocks * blksz;i++) {
846
if (test->scratch[i] != (u8)i)
847
return RESULT_FAIL;
848
}
849
}
850
851
return 0;
852
}
853
854
/*******************************************************************/
855
/* Tests */
856
/*******************************************************************/
857
858
struct mmc_test_case {
859
const char *name;
860
861
int (*prepare)(struct mmc_test_card *);
862
int (*run)(struct mmc_test_card *);
863
int (*cleanup)(struct mmc_test_card *);
864
};
865
866
static int mmc_test_basic_write(struct mmc_test_card *test)
867
{
868
int ret;
869
struct scatterlist sg;
870
871
ret = mmc_test_set_blksize(test, 512);
872
if (ret)
873
return ret;
874
875
sg_init_one(&sg, test->buffer, 512);
876
877
ret = mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 1);
878
if (ret)
879
return ret;
880
881
return 0;
882
}
883
884
static int mmc_test_basic_read(struct mmc_test_card *test)
885
{
886
int ret;
887
struct scatterlist sg;
888
889
ret = mmc_test_set_blksize(test, 512);
890
if (ret)
891
return ret;
892
893
sg_init_one(&sg, test->buffer, 512);
894
895
ret = mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 0);
896
if (ret)
897
return ret;
898
899
return 0;
900
}
901
902
static int mmc_test_verify_write(struct mmc_test_card *test)
903
{
904
int ret;
905
struct scatterlist sg;
906
907
sg_init_one(&sg, test->buffer, 512);
908
909
ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
910
if (ret)
911
return ret;
912
913
return 0;
914
}
915
916
static int mmc_test_verify_read(struct mmc_test_card *test)
917
{
918
int ret;
919
struct scatterlist sg;
920
921
sg_init_one(&sg, test->buffer, 512);
922
923
ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
924
if (ret)
925
return ret;
926
927
return 0;
928
}
929
930
static int mmc_test_multi_write(struct mmc_test_card *test)
931
{
932
int ret;
933
unsigned int size;
934
struct scatterlist sg;
935
936
if (test->card->host->max_blk_count == 1)
937
return RESULT_UNSUP_HOST;
938
939
size = PAGE_SIZE * 2;
940
size = min(size, test->card->host->max_req_size);
941
size = min(size, test->card->host->max_seg_size);
942
size = min(size, test->card->host->max_blk_count * 512);
943
944
if (size < 1024)
945
return RESULT_UNSUP_HOST;
946
947
sg_init_one(&sg, test->buffer, size);
948
949
ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
950
if (ret)
951
return ret;
952
953
return 0;
954
}
955
956
static int mmc_test_multi_read(struct mmc_test_card *test)
957
{
958
int ret;
959
unsigned int size;
960
struct scatterlist sg;
961
962
if (test->card->host->max_blk_count == 1)
963
return RESULT_UNSUP_HOST;
964
965
size = PAGE_SIZE * 2;
966
size = min(size, test->card->host->max_req_size);
967
size = min(size, test->card->host->max_seg_size);
968
size = min(size, test->card->host->max_blk_count * 512);
969
970
if (size < 1024)
971
return RESULT_UNSUP_HOST;
972
973
sg_init_one(&sg, test->buffer, size);
974
975
ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
976
if (ret)
977
return ret;
978
979
return 0;
980
}
981
982
static int mmc_test_pow2_write(struct mmc_test_card *test)
983
{
984
int ret, i;
985
struct scatterlist sg;
986
987
if (!test->card->csd.write_partial)
988
return RESULT_UNSUP_CARD;
989
990
for (i = 1; i < 512;i <<= 1) {
991
sg_init_one(&sg, test->buffer, i);
992
ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
993
if (ret)
994
return ret;
995
}
996
997
return 0;
998
}
999
1000
static int mmc_test_pow2_read(struct mmc_test_card *test)
1001
{
1002
int ret, i;
1003
struct scatterlist sg;
1004
1005
if (!test->card->csd.read_partial)
1006
return RESULT_UNSUP_CARD;
1007
1008
for (i = 1; i < 512;i <<= 1) {
1009
sg_init_one(&sg, test->buffer, i);
1010
ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
1011
if (ret)
1012
return ret;
1013
}
1014
1015
return 0;
1016
}
1017
1018
static int mmc_test_weird_write(struct mmc_test_card *test)
1019
{
1020
int ret, i;
1021
struct scatterlist sg;
1022
1023
if (!test->card->csd.write_partial)
1024
return RESULT_UNSUP_CARD;
1025
1026
for (i = 3; i < 512;i += 7) {
1027
sg_init_one(&sg, test->buffer, i);
1028
ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
1029
if (ret)
1030
return ret;
1031
}
1032
1033
return 0;
1034
}
1035
1036
static int mmc_test_weird_read(struct mmc_test_card *test)
1037
{
1038
int ret, i;
1039
struct scatterlist sg;
1040
1041
if (!test->card->csd.read_partial)
1042
return RESULT_UNSUP_CARD;
1043
1044
for (i = 3; i < 512;i += 7) {
1045
sg_init_one(&sg, test->buffer, i);
1046
ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
1047
if (ret)
1048
return ret;
1049
}
1050
1051
return 0;
1052
}
1053
1054
static int mmc_test_align_write(struct mmc_test_card *test)
1055
{
1056
int ret, i;
1057
struct scatterlist sg;
1058
1059
for (i = 1;i < 4;i++) {
1060
sg_init_one(&sg, test->buffer + i, 512);
1061
ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
1062
if (ret)
1063
return ret;
1064
}
1065
1066
return 0;
1067
}
1068
1069
static int mmc_test_align_read(struct mmc_test_card *test)
1070
{
1071
int ret, i;
1072
struct scatterlist sg;
1073
1074
for (i = 1;i < 4;i++) {
1075
sg_init_one(&sg, test->buffer + i, 512);
1076
ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
1077
if (ret)
1078
return ret;
1079
}
1080
1081
return 0;
1082
}
1083
1084
static int mmc_test_align_multi_write(struct mmc_test_card *test)
1085
{
1086
int ret, i;
1087
unsigned int size;
1088
struct scatterlist sg;
1089
1090
if (test->card->host->max_blk_count == 1)
1091
return RESULT_UNSUP_HOST;
1092
1093
size = PAGE_SIZE * 2;
1094
size = min(size, test->card->host->max_req_size);
1095
size = min(size, test->card->host->max_seg_size);
1096
size = min(size, test->card->host->max_blk_count * 512);
1097
1098
if (size < 1024)
1099
return RESULT_UNSUP_HOST;
1100
1101
for (i = 1;i < 4;i++) {
1102
sg_init_one(&sg, test->buffer + i, size);
1103
ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
1104
if (ret)
1105
return ret;
1106
}
1107
1108
return 0;
1109
}
1110
1111
static int mmc_test_align_multi_read(struct mmc_test_card *test)
1112
{
1113
int ret, i;
1114
unsigned int size;
1115
struct scatterlist sg;
1116
1117
if (test->card->host->max_blk_count == 1)
1118
return RESULT_UNSUP_HOST;
1119
1120
size = PAGE_SIZE * 2;
1121
size = min(size, test->card->host->max_req_size);
1122
size = min(size, test->card->host->max_seg_size);
1123
size = min(size, test->card->host->max_blk_count * 512);
1124
1125
if (size < 1024)
1126
return RESULT_UNSUP_HOST;
1127
1128
for (i = 1;i < 4;i++) {
1129
sg_init_one(&sg, test->buffer + i, size);
1130
ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
1131
if (ret)
1132
return ret;
1133
}
1134
1135
return 0;
1136
}
1137
1138
static int mmc_test_xfersize_write(struct mmc_test_card *test)
1139
{
1140
int ret;
1141
1142
ret = mmc_test_set_blksize(test, 512);
1143
if (ret)
1144
return ret;
1145
1146
ret = mmc_test_broken_transfer(test, 1, 512, 1);
1147
if (ret)
1148
return ret;
1149
1150
return 0;
1151
}
1152
1153
static int mmc_test_xfersize_read(struct mmc_test_card *test)
1154
{
1155
int ret;
1156
1157
ret = mmc_test_set_blksize(test, 512);
1158
if (ret)
1159
return ret;
1160
1161
ret = mmc_test_broken_transfer(test, 1, 512, 0);
1162
if (ret)
1163
return ret;
1164
1165
return 0;
1166
}
1167
1168
static int mmc_test_multi_xfersize_write(struct mmc_test_card *test)
1169
{
1170
int ret;
1171
1172
if (test->card->host->max_blk_count == 1)
1173
return RESULT_UNSUP_HOST;
1174
1175
ret = mmc_test_set_blksize(test, 512);
1176
if (ret)
1177
return ret;
1178
1179
ret = mmc_test_broken_transfer(test, 2, 512, 1);
1180
if (ret)
1181
return ret;
1182
1183
return 0;
1184
}
1185
1186
static int mmc_test_multi_xfersize_read(struct mmc_test_card *test)
1187
{
1188
int ret;
1189
1190
if (test->card->host->max_blk_count == 1)
1191
return RESULT_UNSUP_HOST;
1192
1193
ret = mmc_test_set_blksize(test, 512);
1194
if (ret)
1195
return ret;
1196
1197
ret = mmc_test_broken_transfer(test, 2, 512, 0);
1198
if (ret)
1199
return ret;
1200
1201
return 0;
1202
}
1203
1204
#ifdef CONFIG_HIGHMEM
1205
1206
static int mmc_test_write_high(struct mmc_test_card *test)
1207
{
1208
int ret;
1209
struct scatterlist sg;
1210
1211
sg_init_table(&sg, 1);
1212
sg_set_page(&sg, test->highmem, 512, 0);
1213
1214
ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
1215
if (ret)
1216
return ret;
1217
1218
return 0;
1219
}
1220
1221
static int mmc_test_read_high(struct mmc_test_card *test)
1222
{
1223
int ret;
1224
struct scatterlist sg;
1225
1226
sg_init_table(&sg, 1);
1227
sg_set_page(&sg, test->highmem, 512, 0);
1228
1229
ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
1230
if (ret)
1231
return ret;
1232
1233
return 0;
1234
}
1235
1236
static int mmc_test_multi_write_high(struct mmc_test_card *test)
1237
{
1238
int ret;
1239
unsigned int size;
1240
struct scatterlist sg;
1241
1242
if (test->card->host->max_blk_count == 1)
1243
return RESULT_UNSUP_HOST;
1244
1245
size = PAGE_SIZE * 2;
1246
size = min(size, test->card->host->max_req_size);
1247
size = min(size, test->card->host->max_seg_size);
1248
size = min(size, test->card->host->max_blk_count * 512);
1249
1250
if (size < 1024)
1251
return RESULT_UNSUP_HOST;
1252
1253
sg_init_table(&sg, 1);
1254
sg_set_page(&sg, test->highmem, size, 0);
1255
1256
ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
1257
if (ret)
1258
return ret;
1259
1260
return 0;
1261
}
1262
1263
static int mmc_test_multi_read_high(struct mmc_test_card *test)
1264
{
1265
int ret;
1266
unsigned int size;
1267
struct scatterlist sg;
1268
1269
if (test->card->host->max_blk_count == 1)
1270
return RESULT_UNSUP_HOST;
1271
1272
size = PAGE_SIZE * 2;
1273
size = min(size, test->card->host->max_req_size);
1274
size = min(size, test->card->host->max_seg_size);
1275
size = min(size, test->card->host->max_blk_count * 512);
1276
1277
if (size < 1024)
1278
return RESULT_UNSUP_HOST;
1279
1280
sg_init_table(&sg, 1);
1281
sg_set_page(&sg, test->highmem, size, 0);
1282
1283
ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
1284
if (ret)
1285
return ret;
1286
1287
return 0;
1288
}
1289
1290
#else
1291
1292
static int mmc_test_no_highmem(struct mmc_test_card *test)
1293
{
1294
printk(KERN_INFO "%s: Highmem not configured - test skipped\n",
1295
mmc_hostname(test->card->host));
1296
return 0;
1297
}
1298
1299
#endif /* CONFIG_HIGHMEM */
1300
1301
/*
1302
* Map sz bytes so that it can be transferred.
1303
*/
1304
static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz,
1305
int max_scatter)
1306
{
1307
struct mmc_test_area *t = &test->area;
1308
int err;
1309
1310
t->blocks = sz >> 9;
1311
1312
if (max_scatter) {
1313
err = mmc_test_map_sg_max_scatter(t->mem, sz, t->sg,
1314
t->max_segs, t->max_seg_sz,
1315
&t->sg_len);
1316
} else {
1317
err = mmc_test_map_sg(t->mem, sz, t->sg, 1, t->max_segs,
1318
t->max_seg_sz, &t->sg_len);
1319
}
1320
if (err)
1321
printk(KERN_INFO "%s: Failed to map sg list\n",
1322
mmc_hostname(test->card->host));
1323
return err;
1324
}
1325
1326
/*
1327
* Transfer bytes mapped by mmc_test_area_map().
1328
*/
1329
static int mmc_test_area_transfer(struct mmc_test_card *test,
1330
unsigned int dev_addr, int write)
1331
{
1332
struct mmc_test_area *t = &test->area;
1333
1334
return mmc_test_simple_transfer(test, t->sg, t->sg_len, dev_addr,
1335
t->blocks, 512, write);
1336
}
1337
1338
/*
1339
* Map and transfer bytes.
1340
*/
1341
static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz,
1342
unsigned int dev_addr, int write, int max_scatter,
1343
int timed)
1344
{
1345
struct timespec ts1, ts2;
1346
int ret;
1347
1348
/*
1349
* In the case of a maximally scattered transfer, the maximum transfer
1350
* size is further limited by using PAGE_SIZE segments.
1351
*/
1352
if (max_scatter) {
1353
struct mmc_test_area *t = &test->area;
1354
unsigned long max_tfr;
1355
1356
if (t->max_seg_sz >= PAGE_SIZE)
1357
max_tfr = t->max_segs * PAGE_SIZE;
1358
else
1359
max_tfr = t->max_segs * t->max_seg_sz;
1360
if (sz > max_tfr)
1361
sz = max_tfr;
1362
}
1363
1364
ret = mmc_test_area_map(test, sz, max_scatter);
1365
if (ret)
1366
return ret;
1367
1368
if (timed)
1369
getnstimeofday(&ts1);
1370
1371
ret = mmc_test_area_transfer(test, dev_addr, write);
1372
if (ret)
1373
return ret;
1374
1375
if (timed)
1376
getnstimeofday(&ts2);
1377
1378
if (timed)
1379
mmc_test_print_rate(test, sz, &ts1, &ts2);
1380
1381
return 0;
1382
}
1383
1384
/*
1385
* Write the test area entirely.
1386
*/
1387
static int mmc_test_area_fill(struct mmc_test_card *test)
1388
{
1389
struct mmc_test_area *t = &test->area;
1390
1391
return mmc_test_area_io(test, t->max_tfr, t->dev_addr, 1, 0, 0);
1392
}
1393
1394
/*
1395
* Erase the test area entirely.
1396
*/
1397
static int mmc_test_area_erase(struct mmc_test_card *test)
1398
{
1399
struct mmc_test_area *t = &test->area;
1400
1401
if (!mmc_can_erase(test->card))
1402
return 0;
1403
1404
return mmc_erase(test->card, t->dev_addr, t->max_sz >> 9,
1405
MMC_ERASE_ARG);
1406
}
1407
1408
/*
1409
* Cleanup struct mmc_test_area.
1410
*/
1411
static int mmc_test_area_cleanup(struct mmc_test_card *test)
1412
{
1413
struct mmc_test_area *t = &test->area;
1414
1415
kfree(t->sg);
1416
mmc_test_free_mem(t->mem);
1417
1418
return 0;
1419
}
1420
1421
/*
1422
* Initialize an area for testing large transfers. The test area is set to the
1423
* middle of the card because cards may have different charateristics at the
1424
* front (for FAT file system optimization). Optionally, the area is erased
1425
* (if the card supports it) which may improve write performance. Optionally,
1426
* the area is filled with data for subsequent read tests.
1427
*/
1428
static int mmc_test_area_init(struct mmc_test_card *test, int erase, int fill)
1429
{
1430
struct mmc_test_area *t = &test->area;
1431
unsigned long min_sz = 64 * 1024, sz;
1432
int ret;
1433
1434
ret = mmc_test_set_blksize(test, 512);
1435
if (ret)
1436
return ret;
1437
1438
/* Make the test area size about 4MiB */
1439
sz = (unsigned long)test->card->pref_erase << 9;
1440
t->max_sz = sz;
1441
while (t->max_sz < 4 * 1024 * 1024)
1442
t->max_sz += sz;
1443
while (t->max_sz > TEST_AREA_MAX_SIZE && t->max_sz > sz)
1444
t->max_sz -= sz;
1445
1446
t->max_segs = test->card->host->max_segs;
1447
t->max_seg_sz = test->card->host->max_seg_size;
1448
1449
t->max_tfr = t->max_sz;
1450
if (t->max_tfr >> 9 > test->card->host->max_blk_count)
1451
t->max_tfr = test->card->host->max_blk_count << 9;
1452
if (t->max_tfr > test->card->host->max_req_size)
1453
t->max_tfr = test->card->host->max_req_size;
1454
if (t->max_tfr / t->max_seg_sz > t->max_segs)
1455
t->max_tfr = t->max_segs * t->max_seg_sz;
1456
1457
/*
1458
* Try to allocate enough memory for a max. sized transfer. Less is OK
1459
* because the same memory can be mapped into the scatterlist more than
1460
* once. Also, take into account the limits imposed on scatterlist
1461
* segments by the host driver.
1462
*/
1463
t->mem = mmc_test_alloc_mem(min_sz, t->max_tfr, t->max_segs,
1464
t->max_seg_sz);
1465
if (!t->mem)
1466
return -ENOMEM;
1467
1468
t->sg = kmalloc(sizeof(struct scatterlist) * t->max_segs, GFP_KERNEL);
1469
if (!t->sg) {
1470
ret = -ENOMEM;
1471
goto out_free;
1472
}
1473
1474
t->dev_addr = mmc_test_capacity(test->card) / 2;
1475
t->dev_addr -= t->dev_addr % (t->max_sz >> 9);
1476
1477
if (erase) {
1478
ret = mmc_test_area_erase(test);
1479
if (ret)
1480
goto out_free;
1481
}
1482
1483
if (fill) {
1484
ret = mmc_test_area_fill(test);
1485
if (ret)
1486
goto out_free;
1487
}
1488
1489
return 0;
1490
1491
out_free:
1492
mmc_test_area_cleanup(test);
1493
return ret;
1494
}
1495
1496
/*
1497
* Prepare for large transfers. Do not erase the test area.
1498
*/
1499
static int mmc_test_area_prepare(struct mmc_test_card *test)
1500
{
1501
return mmc_test_area_init(test, 0, 0);
1502
}
1503
1504
/*
1505
* Prepare for large transfers. Do erase the test area.
1506
*/
1507
static int mmc_test_area_prepare_erase(struct mmc_test_card *test)
1508
{
1509
return mmc_test_area_init(test, 1, 0);
1510
}
1511
1512
/*
1513
* Prepare for large transfers. Erase and fill the test area.
1514
*/
1515
static int mmc_test_area_prepare_fill(struct mmc_test_card *test)
1516
{
1517
return mmc_test_area_init(test, 1, 1);
1518
}
1519
1520
/*
1521
* Test best-case performance. Best-case performance is expected from
1522
* a single large transfer.
1523
*
1524
* An additional option (max_scatter) allows the measurement of the same
1525
* transfer but with no contiguous pages in the scatter list. This tests
1526
* the efficiency of DMA to handle scattered pages.
1527
*/
1528
static int mmc_test_best_performance(struct mmc_test_card *test, int write,
1529
int max_scatter)
1530
{
1531
struct mmc_test_area *t = &test->area;
1532
1533
return mmc_test_area_io(test, t->max_tfr, t->dev_addr, write,
1534
max_scatter, 1);
1535
}
1536
1537
/*
1538
* Best-case read performance.
1539
*/
1540
static int mmc_test_best_read_performance(struct mmc_test_card *test)
1541
{
1542
return mmc_test_best_performance(test, 0, 0);
1543
}
1544
1545
/*
1546
* Best-case write performance.
1547
*/
1548
static int mmc_test_best_write_performance(struct mmc_test_card *test)
1549
{
1550
return mmc_test_best_performance(test, 1, 0);
1551
}
1552
1553
/*
1554
* Best-case read performance into scattered pages.
1555
*/
1556
static int mmc_test_best_read_perf_max_scatter(struct mmc_test_card *test)
1557
{
1558
return mmc_test_best_performance(test, 0, 1);
1559
}
1560
1561
/*
1562
* Best-case write performance from scattered pages.
1563
*/
1564
static int mmc_test_best_write_perf_max_scatter(struct mmc_test_card *test)
1565
{
1566
return mmc_test_best_performance(test, 1, 1);
1567
}
1568
1569
/*
1570
* Single read performance by transfer size.
1571
*/
1572
static int mmc_test_profile_read_perf(struct mmc_test_card *test)
1573
{
1574
struct mmc_test_area *t = &test->area;
1575
unsigned long sz;
1576
unsigned int dev_addr;
1577
int ret;
1578
1579
for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1580
dev_addr = t->dev_addr + (sz >> 9);
1581
ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
1582
if (ret)
1583
return ret;
1584
}
1585
sz = t->max_tfr;
1586
dev_addr = t->dev_addr;
1587
return mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
1588
}
1589
1590
/*
1591
* Single write performance by transfer size.
1592
*/
1593
static int mmc_test_profile_write_perf(struct mmc_test_card *test)
1594
{
1595
struct mmc_test_area *t = &test->area;
1596
unsigned long sz;
1597
unsigned int dev_addr;
1598
int ret;
1599
1600
ret = mmc_test_area_erase(test);
1601
if (ret)
1602
return ret;
1603
for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1604
dev_addr = t->dev_addr + (sz >> 9);
1605
ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
1606
if (ret)
1607
return ret;
1608
}
1609
ret = mmc_test_area_erase(test);
1610
if (ret)
1611
return ret;
1612
sz = t->max_tfr;
1613
dev_addr = t->dev_addr;
1614
return mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
1615
}
1616
1617
/*
1618
* Single trim performance by transfer size.
1619
*/
1620
static int mmc_test_profile_trim_perf(struct mmc_test_card *test)
1621
{
1622
struct mmc_test_area *t = &test->area;
1623
unsigned long sz;
1624
unsigned int dev_addr;
1625
struct timespec ts1, ts2;
1626
int ret;
1627
1628
if (!mmc_can_trim(test->card))
1629
return RESULT_UNSUP_CARD;
1630
1631
if (!mmc_can_erase(test->card))
1632
return RESULT_UNSUP_HOST;
1633
1634
for (sz = 512; sz < t->max_sz; sz <<= 1) {
1635
dev_addr = t->dev_addr + (sz >> 9);
1636
getnstimeofday(&ts1);
1637
ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
1638
if (ret)
1639
return ret;
1640
getnstimeofday(&ts2);
1641
mmc_test_print_rate(test, sz, &ts1, &ts2);
1642
}
1643
dev_addr = t->dev_addr;
1644
getnstimeofday(&ts1);
1645
ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
1646
if (ret)
1647
return ret;
1648
getnstimeofday(&ts2);
1649
mmc_test_print_rate(test, sz, &ts1, &ts2);
1650
return 0;
1651
}
1652
1653
static int mmc_test_seq_read_perf(struct mmc_test_card *test, unsigned long sz)
1654
{
1655
struct mmc_test_area *t = &test->area;
1656
unsigned int dev_addr, i, cnt;
1657
struct timespec ts1, ts2;
1658
int ret;
1659
1660
cnt = t->max_sz / sz;
1661
dev_addr = t->dev_addr;
1662
getnstimeofday(&ts1);
1663
for (i = 0; i < cnt; i++) {
1664
ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 0);
1665
if (ret)
1666
return ret;
1667
dev_addr += (sz >> 9);
1668
}
1669
getnstimeofday(&ts2);
1670
mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1671
return 0;
1672
}
1673
1674
/*
1675
* Consecutive read performance by transfer size.
1676
*/
1677
static int mmc_test_profile_seq_read_perf(struct mmc_test_card *test)
1678
{
1679
struct mmc_test_area *t = &test->area;
1680
unsigned long sz;
1681
int ret;
1682
1683
for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1684
ret = mmc_test_seq_read_perf(test, sz);
1685
if (ret)
1686
return ret;
1687
}
1688
sz = t->max_tfr;
1689
return mmc_test_seq_read_perf(test, sz);
1690
}
1691
1692
static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz)
1693
{
1694
struct mmc_test_area *t = &test->area;
1695
unsigned int dev_addr, i, cnt;
1696
struct timespec ts1, ts2;
1697
int ret;
1698
1699
ret = mmc_test_area_erase(test);
1700
if (ret)
1701
return ret;
1702
cnt = t->max_sz / sz;
1703
dev_addr = t->dev_addr;
1704
getnstimeofday(&ts1);
1705
for (i = 0; i < cnt; i++) {
1706
ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 0);
1707
if (ret)
1708
return ret;
1709
dev_addr += (sz >> 9);
1710
}
1711
getnstimeofday(&ts2);
1712
mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1713
return 0;
1714
}
1715
1716
/*
1717
* Consecutive write performance by transfer size.
1718
*/
1719
static int mmc_test_profile_seq_write_perf(struct mmc_test_card *test)
1720
{
1721
struct mmc_test_area *t = &test->area;
1722
unsigned long sz;
1723
int ret;
1724
1725
for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1726
ret = mmc_test_seq_write_perf(test, sz);
1727
if (ret)
1728
return ret;
1729
}
1730
sz = t->max_tfr;
1731
return mmc_test_seq_write_perf(test, sz);
1732
}
1733
1734
/*
1735
* Consecutive trim performance by transfer size.
1736
*/
1737
static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test)
1738
{
1739
struct mmc_test_area *t = &test->area;
1740
unsigned long sz;
1741
unsigned int dev_addr, i, cnt;
1742
struct timespec ts1, ts2;
1743
int ret;
1744
1745
if (!mmc_can_trim(test->card))
1746
return RESULT_UNSUP_CARD;
1747
1748
if (!mmc_can_erase(test->card))
1749
return RESULT_UNSUP_HOST;
1750
1751
for (sz = 512; sz <= t->max_sz; sz <<= 1) {
1752
ret = mmc_test_area_erase(test);
1753
if (ret)
1754
return ret;
1755
ret = mmc_test_area_fill(test);
1756
if (ret)
1757
return ret;
1758
cnt = t->max_sz / sz;
1759
dev_addr = t->dev_addr;
1760
getnstimeofday(&ts1);
1761
for (i = 0; i < cnt; i++) {
1762
ret = mmc_erase(test->card, dev_addr, sz >> 9,
1763
MMC_TRIM_ARG);
1764
if (ret)
1765
return ret;
1766
dev_addr += (sz >> 9);
1767
}
1768
getnstimeofday(&ts2);
1769
mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1770
}
1771
return 0;
1772
}
1773
1774
static unsigned int rnd_next = 1;
1775
1776
static unsigned int mmc_test_rnd_num(unsigned int rnd_cnt)
1777
{
1778
uint64_t r;
1779
1780
rnd_next = rnd_next * 1103515245 + 12345;
1781
r = (rnd_next >> 16) & 0x7fff;
1782
return (r * rnd_cnt) >> 15;
1783
}
1784
1785
static int mmc_test_rnd_perf(struct mmc_test_card *test, int write, int print,
1786
unsigned long sz)
1787
{
1788
unsigned int dev_addr, cnt, rnd_addr, range1, range2, last_ea = 0, ea;
1789
unsigned int ssz;
1790
struct timespec ts1, ts2, ts;
1791
int ret;
1792
1793
ssz = sz >> 9;
1794
1795
rnd_addr = mmc_test_capacity(test->card) / 4;
1796
range1 = rnd_addr / test->card->pref_erase;
1797
range2 = range1 / ssz;
1798
1799
getnstimeofday(&ts1);
1800
for (cnt = 0; cnt < UINT_MAX; cnt++) {
1801
getnstimeofday(&ts2);
1802
ts = timespec_sub(ts2, ts1);
1803
if (ts.tv_sec >= 10)
1804
break;
1805
ea = mmc_test_rnd_num(range1);
1806
if (ea == last_ea)
1807
ea -= 1;
1808
last_ea = ea;
1809
dev_addr = rnd_addr + test->card->pref_erase * ea +
1810
ssz * mmc_test_rnd_num(range2);
1811
ret = mmc_test_area_io(test, sz, dev_addr, write, 0, 0);
1812
if (ret)
1813
return ret;
1814
}
1815
if (print)
1816
mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1817
return 0;
1818
}
1819
1820
static int mmc_test_random_perf(struct mmc_test_card *test, int write)
1821
{
1822
struct mmc_test_area *t = &test->area;
1823
unsigned int next;
1824
unsigned long sz;
1825
int ret;
1826
1827
for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1828
/*
1829
* When writing, try to get more consistent results by running
1830
* the test twice with exactly the same I/O but outputting the
1831
* results only for the 2nd run.
1832
*/
1833
if (write) {
1834
next = rnd_next;
1835
ret = mmc_test_rnd_perf(test, write, 0, sz);
1836
if (ret)
1837
return ret;
1838
rnd_next = next;
1839
}
1840
ret = mmc_test_rnd_perf(test, write, 1, sz);
1841
if (ret)
1842
return ret;
1843
}
1844
sz = t->max_tfr;
1845
if (write) {
1846
next = rnd_next;
1847
ret = mmc_test_rnd_perf(test, write, 0, sz);
1848
if (ret)
1849
return ret;
1850
rnd_next = next;
1851
}
1852
return mmc_test_rnd_perf(test, write, 1, sz);
1853
}
1854
1855
/*
1856
* Random read performance by transfer size.
1857
*/
1858
static int mmc_test_random_read_perf(struct mmc_test_card *test)
1859
{
1860
return mmc_test_random_perf(test, 0);
1861
}
1862
1863
/*
1864
* Random write performance by transfer size.
1865
*/
1866
static int mmc_test_random_write_perf(struct mmc_test_card *test)
1867
{
1868
return mmc_test_random_perf(test, 1);
1869
}
1870
1871
static int mmc_test_seq_perf(struct mmc_test_card *test, int write,
1872
unsigned int tot_sz, int max_scatter)
1873
{
1874
struct mmc_test_area *t = &test->area;
1875
unsigned int dev_addr, i, cnt, sz, ssz;
1876
struct timespec ts1, ts2;
1877
int ret;
1878
1879
sz = t->max_tfr;
1880
1881
/*
1882
* In the case of a maximally scattered transfer, the maximum transfer
1883
* size is further limited by using PAGE_SIZE segments.
1884
*/
1885
if (max_scatter) {
1886
unsigned long max_tfr;
1887
1888
if (t->max_seg_sz >= PAGE_SIZE)
1889
max_tfr = t->max_segs * PAGE_SIZE;
1890
else
1891
max_tfr = t->max_segs * t->max_seg_sz;
1892
if (sz > max_tfr)
1893
sz = max_tfr;
1894
}
1895
1896
ssz = sz >> 9;
1897
dev_addr = mmc_test_capacity(test->card) / 4;
1898
if (tot_sz > dev_addr << 9)
1899
tot_sz = dev_addr << 9;
1900
cnt = tot_sz / sz;
1901
dev_addr &= 0xffff0000; /* Round to 64MiB boundary */
1902
1903
getnstimeofday(&ts1);
1904
for (i = 0; i < cnt; i++) {
1905
ret = mmc_test_area_io(test, sz, dev_addr, write,
1906
max_scatter, 0);
1907
if (ret)
1908
return ret;
1909
dev_addr += ssz;
1910
}
1911
getnstimeofday(&ts2);
1912
1913
mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1914
1915
return 0;
1916
}
1917
1918
static int mmc_test_large_seq_perf(struct mmc_test_card *test, int write)
1919
{
1920
int ret, i;
1921
1922
for (i = 0; i < 10; i++) {
1923
ret = mmc_test_seq_perf(test, write, 10 * 1024 * 1024, 1);
1924
if (ret)
1925
return ret;
1926
}
1927
for (i = 0; i < 5; i++) {
1928
ret = mmc_test_seq_perf(test, write, 100 * 1024 * 1024, 1);
1929
if (ret)
1930
return ret;
1931
}
1932
for (i = 0; i < 3; i++) {
1933
ret = mmc_test_seq_perf(test, write, 1000 * 1024 * 1024, 1);
1934
if (ret)
1935
return ret;
1936
}
1937
1938
return ret;
1939
}
1940
1941
/*
1942
* Large sequential read performance.
1943
*/
1944
static int mmc_test_large_seq_read_perf(struct mmc_test_card *test)
1945
{
1946
return mmc_test_large_seq_perf(test, 0);
1947
}
1948
1949
/*
1950
* Large sequential write performance.
1951
*/
1952
static int mmc_test_large_seq_write_perf(struct mmc_test_card *test)
1953
{
1954
return mmc_test_large_seq_perf(test, 1);
1955
}
1956
1957
static const struct mmc_test_case mmc_test_cases[] = {
1958
{
1959
.name = "Basic write (no data verification)",
1960
.run = mmc_test_basic_write,
1961
},
1962
1963
{
1964
.name = "Basic read (no data verification)",
1965
.run = mmc_test_basic_read,
1966
},
1967
1968
{
1969
.name = "Basic write (with data verification)",
1970
.prepare = mmc_test_prepare_write,
1971
.run = mmc_test_verify_write,
1972
.cleanup = mmc_test_cleanup,
1973
},
1974
1975
{
1976
.name = "Basic read (with data verification)",
1977
.prepare = mmc_test_prepare_read,
1978
.run = mmc_test_verify_read,
1979
.cleanup = mmc_test_cleanup,
1980
},
1981
1982
{
1983
.name = "Multi-block write",
1984
.prepare = mmc_test_prepare_write,
1985
.run = mmc_test_multi_write,
1986
.cleanup = mmc_test_cleanup,
1987
},
1988
1989
{
1990
.name = "Multi-block read",
1991
.prepare = mmc_test_prepare_read,
1992
.run = mmc_test_multi_read,
1993
.cleanup = mmc_test_cleanup,
1994
},
1995
1996
{
1997
.name = "Power of two block writes",
1998
.prepare = mmc_test_prepare_write,
1999
.run = mmc_test_pow2_write,
2000
.cleanup = mmc_test_cleanup,
2001
},
2002
2003
{
2004
.name = "Power of two block reads",
2005
.prepare = mmc_test_prepare_read,
2006
.run = mmc_test_pow2_read,
2007
.cleanup = mmc_test_cleanup,
2008
},
2009
2010
{
2011
.name = "Weird sized block writes",
2012
.prepare = mmc_test_prepare_write,
2013
.run = mmc_test_weird_write,
2014
.cleanup = mmc_test_cleanup,
2015
},
2016
2017
{
2018
.name = "Weird sized block reads",
2019
.prepare = mmc_test_prepare_read,
2020
.run = mmc_test_weird_read,
2021
.cleanup = mmc_test_cleanup,
2022
},
2023
2024
{
2025
.name = "Badly aligned write",
2026
.prepare = mmc_test_prepare_write,
2027
.run = mmc_test_align_write,
2028
.cleanup = mmc_test_cleanup,
2029
},
2030
2031
{
2032
.name = "Badly aligned read",
2033
.prepare = mmc_test_prepare_read,
2034
.run = mmc_test_align_read,
2035
.cleanup = mmc_test_cleanup,
2036
},
2037
2038
{
2039
.name = "Badly aligned multi-block write",
2040
.prepare = mmc_test_prepare_write,
2041
.run = mmc_test_align_multi_write,
2042
.cleanup = mmc_test_cleanup,
2043
},
2044
2045
{
2046
.name = "Badly aligned multi-block read",
2047
.prepare = mmc_test_prepare_read,
2048
.run = mmc_test_align_multi_read,
2049
.cleanup = mmc_test_cleanup,
2050
},
2051
2052
{
2053
.name = "Correct xfer_size at write (start failure)",
2054
.run = mmc_test_xfersize_write,
2055
},
2056
2057
{
2058
.name = "Correct xfer_size at read (start failure)",
2059
.run = mmc_test_xfersize_read,
2060
},
2061
2062
{
2063
.name = "Correct xfer_size at write (midway failure)",
2064
.run = mmc_test_multi_xfersize_write,
2065
},
2066
2067
{
2068
.name = "Correct xfer_size at read (midway failure)",
2069
.run = mmc_test_multi_xfersize_read,
2070
},
2071
2072
#ifdef CONFIG_HIGHMEM
2073
2074
{
2075
.name = "Highmem write",
2076
.prepare = mmc_test_prepare_write,
2077
.run = mmc_test_write_high,
2078
.cleanup = mmc_test_cleanup,
2079
},
2080
2081
{
2082
.name = "Highmem read",
2083
.prepare = mmc_test_prepare_read,
2084
.run = mmc_test_read_high,
2085
.cleanup = mmc_test_cleanup,
2086
},
2087
2088
{
2089
.name = "Multi-block highmem write",
2090
.prepare = mmc_test_prepare_write,
2091
.run = mmc_test_multi_write_high,
2092
.cleanup = mmc_test_cleanup,
2093
},
2094
2095
{
2096
.name = "Multi-block highmem read",
2097
.prepare = mmc_test_prepare_read,
2098
.run = mmc_test_multi_read_high,
2099
.cleanup = mmc_test_cleanup,
2100
},
2101
2102
#else
2103
2104
{
2105
.name = "Highmem write",
2106
.run = mmc_test_no_highmem,
2107
},
2108
2109
{
2110
.name = "Highmem read",
2111
.run = mmc_test_no_highmem,
2112
},
2113
2114
{
2115
.name = "Multi-block highmem write",
2116
.run = mmc_test_no_highmem,
2117
},
2118
2119
{
2120
.name = "Multi-block highmem read",
2121
.run = mmc_test_no_highmem,
2122
},
2123
2124
#endif /* CONFIG_HIGHMEM */
2125
2126
{
2127
.name = "Best-case read performance",
2128
.prepare = mmc_test_area_prepare_fill,
2129
.run = mmc_test_best_read_performance,
2130
.cleanup = mmc_test_area_cleanup,
2131
},
2132
2133
{
2134
.name = "Best-case write performance",
2135
.prepare = mmc_test_area_prepare_erase,
2136
.run = mmc_test_best_write_performance,
2137
.cleanup = mmc_test_area_cleanup,
2138
},
2139
2140
{
2141
.name = "Best-case read performance into scattered pages",
2142
.prepare = mmc_test_area_prepare_fill,
2143
.run = mmc_test_best_read_perf_max_scatter,
2144
.cleanup = mmc_test_area_cleanup,
2145
},
2146
2147
{
2148
.name = "Best-case write performance from scattered pages",
2149
.prepare = mmc_test_area_prepare_erase,
2150
.run = mmc_test_best_write_perf_max_scatter,
2151
.cleanup = mmc_test_area_cleanup,
2152
},
2153
2154
{
2155
.name = "Single read performance by transfer size",
2156
.prepare = mmc_test_area_prepare_fill,
2157
.run = mmc_test_profile_read_perf,
2158
.cleanup = mmc_test_area_cleanup,
2159
},
2160
2161
{
2162
.name = "Single write performance by transfer size",
2163
.prepare = mmc_test_area_prepare,
2164
.run = mmc_test_profile_write_perf,
2165
.cleanup = mmc_test_area_cleanup,
2166
},
2167
2168
{
2169
.name = "Single trim performance by transfer size",
2170
.prepare = mmc_test_area_prepare_fill,
2171
.run = mmc_test_profile_trim_perf,
2172
.cleanup = mmc_test_area_cleanup,
2173
},
2174
2175
{
2176
.name = "Consecutive read performance by transfer size",
2177
.prepare = mmc_test_area_prepare_fill,
2178
.run = mmc_test_profile_seq_read_perf,
2179
.cleanup = mmc_test_area_cleanup,
2180
},
2181
2182
{
2183
.name = "Consecutive write performance by transfer size",
2184
.prepare = mmc_test_area_prepare,
2185
.run = mmc_test_profile_seq_write_perf,
2186
.cleanup = mmc_test_area_cleanup,
2187
},
2188
2189
{
2190
.name = "Consecutive trim performance by transfer size",
2191
.prepare = mmc_test_area_prepare,
2192
.run = mmc_test_profile_seq_trim_perf,
2193
.cleanup = mmc_test_area_cleanup,
2194
},
2195
2196
{
2197
.name = "Random read performance by transfer size",
2198
.prepare = mmc_test_area_prepare,
2199
.run = mmc_test_random_read_perf,
2200
.cleanup = mmc_test_area_cleanup,
2201
},
2202
2203
{
2204
.name = "Random write performance by transfer size",
2205
.prepare = mmc_test_area_prepare,
2206
.run = mmc_test_random_write_perf,
2207
.cleanup = mmc_test_area_cleanup,
2208
},
2209
2210
{
2211
.name = "Large sequential read into scattered pages",
2212
.prepare = mmc_test_area_prepare,
2213
.run = mmc_test_large_seq_read_perf,
2214
.cleanup = mmc_test_area_cleanup,
2215
},
2216
2217
{
2218
.name = "Large sequential write from scattered pages",
2219
.prepare = mmc_test_area_prepare,
2220
.run = mmc_test_large_seq_write_perf,
2221
.cleanup = mmc_test_area_cleanup,
2222
},
2223
2224
};
2225
2226
static DEFINE_MUTEX(mmc_test_lock);
2227
2228
static LIST_HEAD(mmc_test_result);
2229
2230
static void mmc_test_run(struct mmc_test_card *test, int testcase)
2231
{
2232
int i, ret;
2233
2234
printk(KERN_INFO "%s: Starting tests of card %s...\n",
2235
mmc_hostname(test->card->host), mmc_card_id(test->card));
2236
2237
mmc_claim_host(test->card->host);
2238
2239
for (i = 0;i < ARRAY_SIZE(mmc_test_cases);i++) {
2240
struct mmc_test_general_result *gr;
2241
2242
if (testcase && ((i + 1) != testcase))
2243
continue;
2244
2245
printk(KERN_INFO "%s: Test case %d. %s...\n",
2246
mmc_hostname(test->card->host), i + 1,
2247
mmc_test_cases[i].name);
2248
2249
if (mmc_test_cases[i].prepare) {
2250
ret = mmc_test_cases[i].prepare(test);
2251
if (ret) {
2252
printk(KERN_INFO "%s: Result: Prepare "
2253
"stage failed! (%d)\n",
2254
mmc_hostname(test->card->host),
2255
ret);
2256
continue;
2257
}
2258
}
2259
2260
gr = kzalloc(sizeof(struct mmc_test_general_result),
2261
GFP_KERNEL);
2262
if (gr) {
2263
INIT_LIST_HEAD(&gr->tr_lst);
2264
2265
/* Assign data what we know already */
2266
gr->card = test->card;
2267
gr->testcase = i;
2268
2269
/* Append container to global one */
2270
list_add_tail(&gr->link, &mmc_test_result);
2271
2272
/*
2273
* Save the pointer to created container in our private
2274
* structure.
2275
*/
2276
test->gr = gr;
2277
}
2278
2279
ret = mmc_test_cases[i].run(test);
2280
switch (ret) {
2281
case RESULT_OK:
2282
printk(KERN_INFO "%s: Result: OK\n",
2283
mmc_hostname(test->card->host));
2284
break;
2285
case RESULT_FAIL:
2286
printk(KERN_INFO "%s: Result: FAILED\n",
2287
mmc_hostname(test->card->host));
2288
break;
2289
case RESULT_UNSUP_HOST:
2290
printk(KERN_INFO "%s: Result: UNSUPPORTED "
2291
"(by host)\n",
2292
mmc_hostname(test->card->host));
2293
break;
2294
case RESULT_UNSUP_CARD:
2295
printk(KERN_INFO "%s: Result: UNSUPPORTED "
2296
"(by card)\n",
2297
mmc_hostname(test->card->host));
2298
break;
2299
default:
2300
printk(KERN_INFO "%s: Result: ERROR (%d)\n",
2301
mmc_hostname(test->card->host), ret);
2302
}
2303
2304
/* Save the result */
2305
if (gr)
2306
gr->result = ret;
2307
2308
if (mmc_test_cases[i].cleanup) {
2309
ret = mmc_test_cases[i].cleanup(test);
2310
if (ret) {
2311
printk(KERN_INFO "%s: Warning: Cleanup "
2312
"stage failed! (%d)\n",
2313
mmc_hostname(test->card->host),
2314
ret);
2315
}
2316
}
2317
}
2318
2319
mmc_release_host(test->card->host);
2320
2321
printk(KERN_INFO "%s: Tests completed.\n",
2322
mmc_hostname(test->card->host));
2323
}
2324
2325
static void mmc_test_free_result(struct mmc_card *card)
2326
{
2327
struct mmc_test_general_result *gr, *grs;
2328
2329
mutex_lock(&mmc_test_lock);
2330
2331
list_for_each_entry_safe(gr, grs, &mmc_test_result, link) {
2332
struct mmc_test_transfer_result *tr, *trs;
2333
2334
if (card && gr->card != card)
2335
continue;
2336
2337
list_for_each_entry_safe(tr, trs, &gr->tr_lst, link) {
2338
list_del(&tr->link);
2339
kfree(tr);
2340
}
2341
2342
list_del(&gr->link);
2343
kfree(gr);
2344
}
2345
2346
mutex_unlock(&mmc_test_lock);
2347
}
2348
2349
static LIST_HEAD(mmc_test_file_test);
2350
2351
static int mtf_test_show(struct seq_file *sf, void *data)
2352
{
2353
struct mmc_card *card = (struct mmc_card *)sf->private;
2354
struct mmc_test_general_result *gr;
2355
2356
mutex_lock(&mmc_test_lock);
2357
2358
list_for_each_entry(gr, &mmc_test_result, link) {
2359
struct mmc_test_transfer_result *tr;
2360
2361
if (gr->card != card)
2362
continue;
2363
2364
seq_printf(sf, "Test %d: %d\n", gr->testcase + 1, gr->result);
2365
2366
list_for_each_entry(tr, &gr->tr_lst, link) {
2367
seq_printf(sf, "%u %d %lu.%09lu %u %u.%02u\n",
2368
tr->count, tr->sectors,
2369
(unsigned long)tr->ts.tv_sec,
2370
(unsigned long)tr->ts.tv_nsec,
2371
tr->rate, tr->iops / 100, tr->iops % 100);
2372
}
2373
}
2374
2375
mutex_unlock(&mmc_test_lock);
2376
2377
return 0;
2378
}
2379
2380
static int mtf_test_open(struct inode *inode, struct file *file)
2381
{
2382
return single_open(file, mtf_test_show, inode->i_private);
2383
}
2384
2385
static ssize_t mtf_test_write(struct file *file, const char __user *buf,
2386
size_t count, loff_t *pos)
2387
{
2388
struct seq_file *sf = (struct seq_file *)file->private_data;
2389
struct mmc_card *card = (struct mmc_card *)sf->private;
2390
struct mmc_test_card *test;
2391
char lbuf[12];
2392
long testcase;
2393
2394
if (count >= sizeof(lbuf))
2395
return -EINVAL;
2396
2397
if (copy_from_user(lbuf, buf, count))
2398
return -EFAULT;
2399
lbuf[count] = '\0';
2400
2401
if (strict_strtol(lbuf, 10, &testcase))
2402
return -EINVAL;
2403
2404
test = kzalloc(sizeof(struct mmc_test_card), GFP_KERNEL);
2405
if (!test)
2406
return -ENOMEM;
2407
2408
/*
2409
* Remove all test cases associated with given card. Thus we have only
2410
* actual data of the last run.
2411
*/
2412
mmc_test_free_result(card);
2413
2414
test->card = card;
2415
2416
test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL);
2417
#ifdef CONFIG_HIGHMEM
2418
test->highmem = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, BUFFER_ORDER);
2419
#endif
2420
2421
#ifdef CONFIG_HIGHMEM
2422
if (test->buffer && test->highmem) {
2423
#else
2424
if (test->buffer) {
2425
#endif
2426
mutex_lock(&mmc_test_lock);
2427
mmc_test_run(test, testcase);
2428
mutex_unlock(&mmc_test_lock);
2429
}
2430
2431
#ifdef CONFIG_HIGHMEM
2432
__free_pages(test->highmem, BUFFER_ORDER);
2433
#endif
2434
kfree(test->buffer);
2435
kfree(test);
2436
2437
return count;
2438
}
2439
2440
static const struct file_operations mmc_test_fops_test = {
2441
.open = mtf_test_open,
2442
.read = seq_read,
2443
.write = mtf_test_write,
2444
.llseek = seq_lseek,
2445
.release = single_release,
2446
};
2447
2448
static void mmc_test_free_file_test(struct mmc_card *card)
2449
{
2450
struct mmc_test_dbgfs_file *df, *dfs;
2451
2452
mutex_lock(&mmc_test_lock);
2453
2454
list_for_each_entry_safe(df, dfs, &mmc_test_file_test, link) {
2455
if (card && df->card != card)
2456
continue;
2457
debugfs_remove(df->file);
2458
list_del(&df->link);
2459
kfree(df);
2460
}
2461
2462
mutex_unlock(&mmc_test_lock);
2463
}
2464
2465
static int mmc_test_register_file_test(struct mmc_card *card)
2466
{
2467
struct dentry *file = NULL;
2468
struct mmc_test_dbgfs_file *df;
2469
int ret = 0;
2470
2471
mutex_lock(&mmc_test_lock);
2472
2473
if (card->debugfs_root)
2474
file = debugfs_create_file("test", S_IWUSR | S_IRUGO,
2475
card->debugfs_root, card, &mmc_test_fops_test);
2476
2477
if (IS_ERR_OR_NULL(file)) {
2478
dev_err(&card->dev,
2479
"Can't create file. Perhaps debugfs is disabled.\n");
2480
ret = -ENODEV;
2481
goto err;
2482
}
2483
2484
df = kmalloc(sizeof(struct mmc_test_dbgfs_file), GFP_KERNEL);
2485
if (!df) {
2486
debugfs_remove(file);
2487
dev_err(&card->dev,
2488
"Can't allocate memory for internal usage.\n");
2489
ret = -ENOMEM;
2490
goto err;
2491
}
2492
2493
df->card = card;
2494
df->file = file;
2495
2496
list_add(&df->link, &mmc_test_file_test);
2497
2498
err:
2499
mutex_unlock(&mmc_test_lock);
2500
2501
return ret;
2502
}
2503
2504
static int mmc_test_probe(struct mmc_card *card)
2505
{
2506
int ret;
2507
2508
if (!mmc_card_mmc(card) && !mmc_card_sd(card))
2509
return -ENODEV;
2510
2511
ret = mmc_test_register_file_test(card);
2512
if (ret)
2513
return ret;
2514
2515
dev_info(&card->dev, "Card claimed for testing.\n");
2516
2517
return 0;
2518
}
2519
2520
static void mmc_test_remove(struct mmc_card *card)
2521
{
2522
mmc_test_free_result(card);
2523
mmc_test_free_file_test(card);
2524
}
2525
2526
static struct mmc_driver mmc_driver = {
2527
.drv = {
2528
.name = "mmc_test",
2529
},
2530
.probe = mmc_test_probe,
2531
.remove = mmc_test_remove,
2532
};
2533
2534
static int __init mmc_test_init(void)
2535
{
2536
return mmc_register_driver(&mmc_driver);
2537
}
2538
2539
static void __exit mmc_test_exit(void)
2540
{
2541
/* Clear stalled data if card is still plugged */
2542
mmc_test_free_result(NULL);
2543
mmc_test_free_file_test(NULL);
2544
2545
mmc_unregister_driver(&mmc_driver);
2546
}
2547
2548
module_init(mmc_test_init);
2549
module_exit(mmc_test_exit);
2550
2551
MODULE_LICENSE("GPL");
2552
MODULE_DESCRIPTION("Multimedia Card (MMC) host test driver");
2553
MODULE_AUTHOR("Pierre Ossman");
2554
2555