Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/drivers/mmc/card/block.c
15111 views
1
/*
2
* Block driver for media (i.e., flash cards)
3
*
4
* Copyright 2002 Hewlett-Packard Company
5
* Copyright 2005-2008 Pierre Ossman
6
*
7
* Use consistent with the GNU GPL is permitted,
8
* provided that this copyright notice is
9
* preserved in its entirety in all copies and derived works.
10
*
11
* HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED,
12
* AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS
13
* FITNESS FOR ANY PARTICULAR PURPOSE.
14
*
15
* Many thanks to Alessandro Rubini and Jonathan Corbet!
16
*
17
* Author: Andrew Christian
18
* 28 May 2002
19
*/
20
#include <linux/moduleparam.h>
21
#include <linux/module.h>
22
#include <linux/init.h>
23
24
#include <linux/kernel.h>
25
#include <linux/fs.h>
26
#include <linux/slab.h>
27
#include <linux/errno.h>
28
#include <linux/hdreg.h>
29
#include <linux/kdev_t.h>
30
#include <linux/blkdev.h>
31
#include <linux/mutex.h>
32
#include <linux/scatterlist.h>
33
#include <linux/string_helpers.h>
34
#include <linux/delay.h>
35
#include <linux/capability.h>
36
#include <linux/compat.h>
37
38
#include <linux/mmc/ioctl.h>
39
#include <linux/mmc/card.h>
40
#include <linux/mmc/host.h>
41
#include <linux/mmc/mmc.h>
42
#include <linux/mmc/sd.h>
43
44
#include <asm/system.h>
45
#include <asm/uaccess.h>
46
47
#include "queue.h"
48
49
MODULE_ALIAS("mmc:block");
50
#ifdef MODULE_PARAM_PREFIX
51
#undef MODULE_PARAM_PREFIX
52
#endif
53
#define MODULE_PARAM_PREFIX "mmcblk."
54
55
#define INAND_CMD38_ARG_EXT_CSD 113
56
#define INAND_CMD38_ARG_ERASE 0x00
57
#define INAND_CMD38_ARG_TRIM 0x01
58
#define INAND_CMD38_ARG_SECERASE 0x80
59
#define INAND_CMD38_ARG_SECTRIM1 0x81
60
#define INAND_CMD38_ARG_SECTRIM2 0x88
61
62
static DEFINE_MUTEX(block_mutex);
63
64
/*
65
* The defaults come from config options but can be overriden by module
66
* or bootarg options.
67
*/
68
static int perdev_minors = CONFIG_MMC_BLOCK_MINORS;
69
70
/*
71
* We've only got one major, so number of mmcblk devices is
72
* limited to 256 / number of minors per device.
73
*/
74
static int max_devices;
75
76
/* 256 minors, so at most 256 separate devices */
77
static DECLARE_BITMAP(dev_use, 256);
78
static DECLARE_BITMAP(name_use, 256);
79
80
/*
81
* There is one mmc_blk_data per slot.
82
*/
83
struct mmc_blk_data {
84
spinlock_t lock;
85
struct gendisk *disk;
86
struct mmc_queue queue;
87
struct list_head part;
88
89
unsigned int flags;
90
#define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */
91
#define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */
92
93
unsigned int usage;
94
unsigned int read_only;
95
unsigned int part_type;
96
unsigned int name_idx;
97
98
/*
99
* Only set in main mmc_blk_data associated
100
* with mmc_card with mmc_set_drvdata, and keeps
101
* track of the current selected device partition.
102
*/
103
unsigned int part_curr;
104
struct device_attribute force_ro;
105
};
106
107
static DEFINE_MUTEX(open_lock);
108
109
module_param(perdev_minors, int, 0444);
110
MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
111
112
static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
113
{
114
struct mmc_blk_data *md;
115
116
mutex_lock(&open_lock);
117
md = disk->private_data;
118
if (md && md->usage == 0)
119
md = NULL;
120
if (md)
121
md->usage++;
122
mutex_unlock(&open_lock);
123
124
return md;
125
}
126
127
static inline int mmc_get_devidx(struct gendisk *disk)
128
{
129
int devmaj = MAJOR(disk_devt(disk));
130
int devidx = MINOR(disk_devt(disk)) / perdev_minors;
131
132
if (!devmaj)
133
devidx = disk->first_minor / perdev_minors;
134
return devidx;
135
}
136
137
static void mmc_blk_put(struct mmc_blk_data *md)
138
{
139
mutex_lock(&open_lock);
140
md->usage--;
141
if (md->usage == 0) {
142
int devidx = mmc_get_devidx(md->disk);
143
blk_cleanup_queue(md->queue.queue);
144
145
__clear_bit(devidx, dev_use);
146
147
put_disk(md->disk);
148
kfree(md);
149
}
150
mutex_unlock(&open_lock);
151
}
152
153
static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr,
154
char *buf)
155
{
156
int ret;
157
struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
158
159
ret = snprintf(buf, PAGE_SIZE, "%d",
160
get_disk_ro(dev_to_disk(dev)) ^
161
md->read_only);
162
mmc_blk_put(md);
163
return ret;
164
}
165
166
static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr,
167
const char *buf, size_t count)
168
{
169
int ret;
170
char *end;
171
struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
172
unsigned long set = simple_strtoul(buf, &end, 0);
173
if (end == buf) {
174
ret = -EINVAL;
175
goto out;
176
}
177
178
set_disk_ro(dev_to_disk(dev), set || md->read_only);
179
ret = count;
180
out:
181
mmc_blk_put(md);
182
return ret;
183
}
184
185
static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
186
{
187
struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
188
int ret = -ENXIO;
189
190
mutex_lock(&block_mutex);
191
if (md) {
192
if (md->usage == 2)
193
check_disk_change(bdev);
194
ret = 0;
195
196
if ((mode & FMODE_WRITE) && md->read_only) {
197
mmc_blk_put(md);
198
ret = -EROFS;
199
}
200
}
201
mutex_unlock(&block_mutex);
202
203
return ret;
204
}
205
206
static int mmc_blk_release(struct gendisk *disk, fmode_t mode)
207
{
208
struct mmc_blk_data *md = disk->private_data;
209
210
mutex_lock(&block_mutex);
211
mmc_blk_put(md);
212
mutex_unlock(&block_mutex);
213
return 0;
214
}
215
216
static int
217
mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
218
{
219
geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
220
geo->heads = 4;
221
geo->sectors = 16;
222
return 0;
223
}
224
225
struct mmc_blk_ioc_data {
226
struct mmc_ioc_cmd ic;
227
unsigned char *buf;
228
u64 buf_bytes;
229
};
230
231
static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
232
struct mmc_ioc_cmd __user *user)
233
{
234
struct mmc_blk_ioc_data *idata;
235
int err;
236
237
idata = kzalloc(sizeof(*idata), GFP_KERNEL);
238
if (!idata) {
239
err = -ENOMEM;
240
goto out;
241
}
242
243
if (copy_from_user(&idata->ic, user, sizeof(idata->ic))) {
244
err = -EFAULT;
245
goto idata_err;
246
}
247
248
idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks;
249
if (idata->buf_bytes > MMC_IOC_MAX_BYTES) {
250
err = -EOVERFLOW;
251
goto idata_err;
252
}
253
254
idata->buf = kzalloc(idata->buf_bytes, GFP_KERNEL);
255
if (!idata->buf) {
256
err = -ENOMEM;
257
goto idata_err;
258
}
259
260
if (copy_from_user(idata->buf, (void __user *)(unsigned long)
261
idata->ic.data_ptr, idata->buf_bytes)) {
262
err = -EFAULT;
263
goto copy_err;
264
}
265
266
return idata;
267
268
copy_err:
269
kfree(idata->buf);
270
idata_err:
271
kfree(idata);
272
out:
273
return ERR_PTR(err);
274
}
275
276
static int mmc_blk_ioctl_cmd(struct block_device *bdev,
277
struct mmc_ioc_cmd __user *ic_ptr)
278
{
279
struct mmc_blk_ioc_data *idata;
280
struct mmc_blk_data *md;
281
struct mmc_card *card;
282
struct mmc_command cmd = {0};
283
struct mmc_data data = {0};
284
struct mmc_request mrq = {0};
285
struct scatterlist sg;
286
int err;
287
288
/*
289
* The caller must have CAP_SYS_RAWIO, and must be calling this on the
290
* whole block device, not on a partition. This prevents overspray
291
* between sibling partitions.
292
*/
293
if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
294
return -EPERM;
295
296
idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
297
if (IS_ERR(idata))
298
return PTR_ERR(idata);
299
300
cmd.opcode = idata->ic.opcode;
301
cmd.arg = idata->ic.arg;
302
cmd.flags = idata->ic.flags;
303
304
data.sg = &sg;
305
data.sg_len = 1;
306
data.blksz = idata->ic.blksz;
307
data.blocks = idata->ic.blocks;
308
309
sg_init_one(data.sg, idata->buf, idata->buf_bytes);
310
311
if (idata->ic.write_flag)
312
data.flags = MMC_DATA_WRITE;
313
else
314
data.flags = MMC_DATA_READ;
315
316
mrq.cmd = &cmd;
317
mrq.data = &data;
318
319
md = mmc_blk_get(bdev->bd_disk);
320
if (!md) {
321
err = -EINVAL;
322
goto cmd_done;
323
}
324
325
card = md->queue.card;
326
if (IS_ERR(card)) {
327
err = PTR_ERR(card);
328
goto cmd_done;
329
}
330
331
mmc_claim_host(card->host);
332
333
if (idata->ic.is_acmd) {
334
err = mmc_app_cmd(card->host, card);
335
if (err)
336
goto cmd_rel_host;
337
}
338
339
/* data.flags must already be set before doing this. */
340
mmc_set_data_timeout(&data, card);
341
/* Allow overriding the timeout_ns for empirical tuning. */
342
if (idata->ic.data_timeout_ns)
343
data.timeout_ns = idata->ic.data_timeout_ns;
344
345
if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
346
/*
347
* Pretend this is a data transfer and rely on the host driver
348
* to compute timeout. When all host drivers support
349
* cmd.cmd_timeout for R1B, this can be changed to:
350
*
351
* mrq.data = NULL;
352
* cmd.cmd_timeout = idata->ic.cmd_timeout_ms;
353
*/
354
data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000;
355
}
356
357
mmc_wait_for_req(card->host, &mrq);
358
359
if (cmd.error) {
360
dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
361
__func__, cmd.error);
362
err = cmd.error;
363
goto cmd_rel_host;
364
}
365
if (data.error) {
366
dev_err(mmc_dev(card->host), "%s: data error %d\n",
367
__func__, data.error);
368
err = data.error;
369
goto cmd_rel_host;
370
}
371
372
/*
373
* According to the SD specs, some commands require a delay after
374
* issuing the command.
375
*/
376
if (idata->ic.postsleep_min_us)
377
usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
378
379
if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) {
380
err = -EFAULT;
381
goto cmd_rel_host;
382
}
383
384
if (!idata->ic.write_flag) {
385
if (copy_to_user((void __user *)(unsigned long) idata->ic.data_ptr,
386
idata->buf, idata->buf_bytes)) {
387
err = -EFAULT;
388
goto cmd_rel_host;
389
}
390
}
391
392
cmd_rel_host:
393
mmc_release_host(card->host);
394
395
cmd_done:
396
mmc_blk_put(md);
397
kfree(idata->buf);
398
kfree(idata);
399
return err;
400
}
401
402
static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
403
unsigned int cmd, unsigned long arg)
404
{
405
int ret = -EINVAL;
406
if (cmd == MMC_IOC_CMD)
407
ret = mmc_blk_ioctl_cmd(bdev, (struct mmc_ioc_cmd __user *)arg);
408
return ret;
409
}
410
411
#ifdef CONFIG_COMPAT
412
static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode,
413
unsigned int cmd, unsigned long arg)
414
{
415
return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg));
416
}
417
#endif
418
419
static const struct block_device_operations mmc_bdops = {
420
.open = mmc_blk_open,
421
.release = mmc_blk_release,
422
.getgeo = mmc_blk_getgeo,
423
.owner = THIS_MODULE,
424
.ioctl = mmc_blk_ioctl,
425
#ifdef CONFIG_COMPAT
426
.compat_ioctl = mmc_blk_compat_ioctl,
427
#endif
428
};
429
430
struct mmc_blk_request {
431
struct mmc_request mrq;
432
struct mmc_command sbc;
433
struct mmc_command cmd;
434
struct mmc_command stop;
435
struct mmc_data data;
436
};
437
438
static inline int mmc_blk_part_switch(struct mmc_card *card,
439
struct mmc_blk_data *md)
440
{
441
int ret;
442
struct mmc_blk_data *main_md = mmc_get_drvdata(card);
443
if (main_md->part_curr == md->part_type)
444
return 0;
445
446
if (mmc_card_mmc(card)) {
447
card->ext_csd.part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
448
card->ext_csd.part_config |= md->part_type;
449
450
ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
451
EXT_CSD_PART_CONFIG, card->ext_csd.part_config,
452
card->ext_csd.part_time);
453
if (ret)
454
return ret;
455
}
456
457
main_md->part_curr = md->part_type;
458
return 0;
459
}
460
461
static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
462
{
463
int err;
464
u32 result;
465
__be32 *blocks;
466
467
struct mmc_request mrq = {0};
468
struct mmc_command cmd = {0};
469
struct mmc_data data = {0};
470
unsigned int timeout_us;
471
472
struct scatterlist sg;
473
474
cmd.opcode = MMC_APP_CMD;
475
cmd.arg = card->rca << 16;
476
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
477
478
err = mmc_wait_for_cmd(card->host, &cmd, 0);
479
if (err)
480
return (u32)-1;
481
if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD))
482
return (u32)-1;
483
484
memset(&cmd, 0, sizeof(struct mmc_command));
485
486
cmd.opcode = SD_APP_SEND_NUM_WR_BLKS;
487
cmd.arg = 0;
488
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
489
490
data.timeout_ns = card->csd.tacc_ns * 100;
491
data.timeout_clks = card->csd.tacc_clks * 100;
492
493
timeout_us = data.timeout_ns / 1000;
494
timeout_us += data.timeout_clks * 1000 /
495
(card->host->ios.clock / 1000);
496
497
if (timeout_us > 100000) {
498
data.timeout_ns = 100000000;
499
data.timeout_clks = 0;
500
}
501
502
data.blksz = 4;
503
data.blocks = 1;
504
data.flags = MMC_DATA_READ;
505
data.sg = &sg;
506
data.sg_len = 1;
507
508
mrq.cmd = &cmd;
509
mrq.data = &data;
510
511
blocks = kmalloc(4, GFP_KERNEL);
512
if (!blocks)
513
return (u32)-1;
514
515
sg_init_one(&sg, blocks, 4);
516
517
mmc_wait_for_req(card->host, &mrq);
518
519
result = ntohl(*blocks);
520
kfree(blocks);
521
522
if (cmd.error || data.error)
523
result = (u32)-1;
524
525
return result;
526
}
527
528
static u32 get_card_status(struct mmc_card *card, struct request *req)
529
{
530
struct mmc_command cmd = {0};
531
int err;
532
533
cmd.opcode = MMC_SEND_STATUS;
534
if (!mmc_host_is_spi(card->host))
535
cmd.arg = card->rca << 16;
536
cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
537
err = mmc_wait_for_cmd(card->host, &cmd, 0);
538
if (err)
539
printk(KERN_ERR "%s: error %d sending status command",
540
req->rq_disk->disk_name, err);
541
return cmd.resp[0];
542
}
543
544
static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
545
{
546
struct mmc_blk_data *md = mq->data;
547
struct mmc_card *card = md->queue.card;
548
unsigned int from, nr, arg;
549
int err = 0;
550
551
if (!mmc_can_erase(card)) {
552
err = -EOPNOTSUPP;
553
goto out;
554
}
555
556
from = blk_rq_pos(req);
557
nr = blk_rq_sectors(req);
558
559
if (mmc_can_trim(card))
560
arg = MMC_TRIM_ARG;
561
else
562
arg = MMC_ERASE_ARG;
563
564
if (card->quirks & MMC_QUIRK_INAND_CMD38) {
565
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
566
INAND_CMD38_ARG_EXT_CSD,
567
arg == MMC_TRIM_ARG ?
568
INAND_CMD38_ARG_TRIM :
569
INAND_CMD38_ARG_ERASE,
570
0);
571
if (err)
572
goto out;
573
}
574
err = mmc_erase(card, from, nr, arg);
575
out:
576
spin_lock_irq(&md->lock);
577
__blk_end_request(req, err, blk_rq_bytes(req));
578
spin_unlock_irq(&md->lock);
579
580
return err ? 0 : 1;
581
}
582
583
static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
584
struct request *req)
585
{
586
struct mmc_blk_data *md = mq->data;
587
struct mmc_card *card = md->queue.card;
588
unsigned int from, nr, arg;
589
int err = 0;
590
591
if (!mmc_can_secure_erase_trim(card)) {
592
err = -EOPNOTSUPP;
593
goto out;
594
}
595
596
from = blk_rq_pos(req);
597
nr = blk_rq_sectors(req);
598
599
if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr))
600
arg = MMC_SECURE_TRIM1_ARG;
601
else
602
arg = MMC_SECURE_ERASE_ARG;
603
604
if (card->quirks & MMC_QUIRK_INAND_CMD38) {
605
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
606
INAND_CMD38_ARG_EXT_CSD,
607
arg == MMC_SECURE_TRIM1_ARG ?
608
INAND_CMD38_ARG_SECTRIM1 :
609
INAND_CMD38_ARG_SECERASE,
610
0);
611
if (err)
612
goto out;
613
}
614
err = mmc_erase(card, from, nr, arg);
615
if (!err && arg == MMC_SECURE_TRIM1_ARG) {
616
if (card->quirks & MMC_QUIRK_INAND_CMD38) {
617
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
618
INAND_CMD38_ARG_EXT_CSD,
619
INAND_CMD38_ARG_SECTRIM2,
620
0);
621
if (err)
622
goto out;
623
}
624
err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
625
}
626
out:
627
spin_lock_irq(&md->lock);
628
__blk_end_request(req, err, blk_rq_bytes(req));
629
spin_unlock_irq(&md->lock);
630
631
return err ? 0 : 1;
632
}
633
634
static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
635
{
636
struct mmc_blk_data *md = mq->data;
637
638
/*
639
* No-op, only service this because we need REQ_FUA for reliable
640
* writes.
641
*/
642
spin_lock_irq(&md->lock);
643
__blk_end_request_all(req, 0);
644
spin_unlock_irq(&md->lock);
645
646
return 1;
647
}
648
649
/*
650
* Reformat current write as a reliable write, supporting
651
* both legacy and the enhanced reliable write MMC cards.
652
* In each transfer we'll handle only as much as a single
653
* reliable write can handle, thus finish the request in
654
* partial completions.
655
*/
656
static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
657
struct mmc_card *card,
658
struct request *req)
659
{
660
if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) {
661
/* Legacy mode imposes restrictions on transfers. */
662
if (!IS_ALIGNED(brq->cmd.arg, card->ext_csd.rel_sectors))
663
brq->data.blocks = 1;
664
665
if (brq->data.blocks > card->ext_csd.rel_sectors)
666
brq->data.blocks = card->ext_csd.rel_sectors;
667
else if (brq->data.blocks < card->ext_csd.rel_sectors)
668
brq->data.blocks = 1;
669
}
670
}
671
672
static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
673
{
674
struct mmc_blk_data *md = mq->data;
675
struct mmc_card *card = md->queue.card;
676
struct mmc_blk_request brq;
677
int ret = 1, disable_multi = 0;
678
679
/*
680
* Reliable writes are used to implement Forced Unit Access and
681
* REQ_META accesses, and are supported only on MMCs.
682
*/
683
bool do_rel_wr = ((req->cmd_flags & REQ_FUA) ||
684
(req->cmd_flags & REQ_META)) &&
685
(rq_data_dir(req) == WRITE) &&
686
(md->flags & MMC_BLK_REL_WR);
687
688
do {
689
struct mmc_command cmd = {0};
690
u32 readcmd, writecmd, status = 0;
691
692
memset(&brq, 0, sizeof(struct mmc_blk_request));
693
brq.mrq.cmd = &brq.cmd;
694
brq.mrq.data = &brq.data;
695
696
brq.cmd.arg = blk_rq_pos(req);
697
if (!mmc_card_blockaddr(card))
698
brq.cmd.arg <<= 9;
699
brq.cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
700
brq.data.blksz = 512;
701
brq.stop.opcode = MMC_STOP_TRANSMISSION;
702
brq.stop.arg = 0;
703
brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
704
brq.data.blocks = blk_rq_sectors(req);
705
706
/*
707
* The block layer doesn't support all sector count
708
* restrictions, so we need to be prepared for too big
709
* requests.
710
*/
711
if (brq.data.blocks > card->host->max_blk_count)
712
brq.data.blocks = card->host->max_blk_count;
713
714
/*
715
* After a read error, we redo the request one sector at a time
716
* in order to accurately determine which sectors can be read
717
* successfully.
718
*/
719
if (disable_multi && brq.data.blocks > 1)
720
brq.data.blocks = 1;
721
722
if (brq.data.blocks > 1 || do_rel_wr) {
723
/* SPI multiblock writes terminate using a special
724
* token, not a STOP_TRANSMISSION request.
725
*/
726
if (!mmc_host_is_spi(card->host) ||
727
rq_data_dir(req) == READ)
728
brq.mrq.stop = &brq.stop;
729
readcmd = MMC_READ_MULTIPLE_BLOCK;
730
writecmd = MMC_WRITE_MULTIPLE_BLOCK;
731
} else {
732
brq.mrq.stop = NULL;
733
readcmd = MMC_READ_SINGLE_BLOCK;
734
writecmd = MMC_WRITE_BLOCK;
735
}
736
if (rq_data_dir(req) == READ) {
737
brq.cmd.opcode = readcmd;
738
brq.data.flags |= MMC_DATA_READ;
739
} else {
740
brq.cmd.opcode = writecmd;
741
brq.data.flags |= MMC_DATA_WRITE;
742
}
743
744
if (do_rel_wr)
745
mmc_apply_rel_rw(&brq, card, req);
746
747
/*
748
* Pre-defined multi-block transfers are preferable to
749
* open ended-ones (and necessary for reliable writes).
750
* However, it is not sufficient to just send CMD23,
751
* and avoid the final CMD12, as on an error condition
752
* CMD12 (stop) needs to be sent anyway. This, coupled
753
* with Auto-CMD23 enhancements provided by some
754
* hosts, means that the complexity of dealing
755
* with this is best left to the host. If CMD23 is
756
* supported by card and host, we'll fill sbc in and let
757
* the host deal with handling it correctly. This means
758
* that for hosts that don't expose MMC_CAP_CMD23, no
759
* change of behavior will be observed.
760
*
761
* N.B: Some MMC cards experience perf degradation.
762
* We'll avoid using CMD23-bounded multiblock writes for
763
* these, while retaining features like reliable writes.
764
*/
765
766
if ((md->flags & MMC_BLK_CMD23) &&
767
mmc_op_multi(brq.cmd.opcode) &&
768
(do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23))) {
769
brq.sbc.opcode = MMC_SET_BLOCK_COUNT;
770
brq.sbc.arg = brq.data.blocks |
771
(do_rel_wr ? (1 << 31) : 0);
772
brq.sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
773
brq.mrq.sbc = &brq.sbc;
774
}
775
776
mmc_set_data_timeout(&brq.data, card);
777
778
brq.data.sg = mq->sg;
779
brq.data.sg_len = mmc_queue_map_sg(mq);
780
781
/*
782
* Adjust the sg list so it is the same size as the
783
* request.
784
*/
785
if (brq.data.blocks != blk_rq_sectors(req)) {
786
int i, data_size = brq.data.blocks << 9;
787
struct scatterlist *sg;
788
789
for_each_sg(brq.data.sg, sg, brq.data.sg_len, i) {
790
data_size -= sg->length;
791
if (data_size <= 0) {
792
sg->length += data_size;
793
i++;
794
break;
795
}
796
}
797
brq.data.sg_len = i;
798
}
799
800
mmc_queue_bounce_pre(mq);
801
802
mmc_wait_for_req(card->host, &brq.mrq);
803
804
mmc_queue_bounce_post(mq);
805
806
/*
807
* Check for errors here, but don't jump to cmd_err
808
* until later as we need to wait for the card to leave
809
* programming mode even when things go wrong.
810
*/
811
if (brq.sbc.error || brq.cmd.error ||
812
brq.data.error || brq.stop.error) {
813
if (brq.data.blocks > 1 && rq_data_dir(req) == READ) {
814
/* Redo read one sector at a time */
815
printk(KERN_WARNING "%s: retrying using single "
816
"block read\n", req->rq_disk->disk_name);
817
disable_multi = 1;
818
continue;
819
}
820
status = get_card_status(card, req);
821
}
822
823
if (brq.sbc.error) {
824
printk(KERN_ERR "%s: error %d sending SET_BLOCK_COUNT "
825
"command, response %#x, card status %#x\n",
826
req->rq_disk->disk_name, brq.sbc.error,
827
brq.sbc.resp[0], status);
828
}
829
830
if (brq.cmd.error) {
831
printk(KERN_ERR "%s: error %d sending read/write "
832
"command, response %#x, card status %#x\n",
833
req->rq_disk->disk_name, brq.cmd.error,
834
brq.cmd.resp[0], status);
835
}
836
837
if (brq.data.error) {
838
if (brq.data.error == -ETIMEDOUT && brq.mrq.stop)
839
/* 'Stop' response contains card status */
840
status = brq.mrq.stop->resp[0];
841
printk(KERN_ERR "%s: error %d transferring data,"
842
" sector %u, nr %u, card status %#x\n",
843
req->rq_disk->disk_name, brq.data.error,
844
(unsigned)blk_rq_pos(req),
845
(unsigned)blk_rq_sectors(req), status);
846
}
847
848
if (brq.stop.error) {
849
printk(KERN_ERR "%s: error %d sending stop command, "
850
"response %#x, card status %#x\n",
851
req->rq_disk->disk_name, brq.stop.error,
852
brq.stop.resp[0], status);
853
}
854
855
if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
856
do {
857
int err;
858
859
cmd.opcode = MMC_SEND_STATUS;
860
cmd.arg = card->rca << 16;
861
cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
862
err = mmc_wait_for_cmd(card->host, &cmd, 5);
863
if (err) {
864
printk(KERN_ERR "%s: error %d requesting status\n",
865
req->rq_disk->disk_name, err);
866
goto cmd_err;
867
}
868
/*
869
* Some cards mishandle the status bits,
870
* so make sure to check both the busy
871
* indication and the card state.
872
*/
873
} while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
874
(R1_CURRENT_STATE(cmd.resp[0]) == 7));
875
876
#if 0
877
if (cmd.resp[0] & ~0x00000900)
878
printk(KERN_ERR "%s: status = %08x\n",
879
req->rq_disk->disk_name, cmd.resp[0]);
880
if (mmc_decode_status(cmd.resp))
881
goto cmd_err;
882
#endif
883
}
884
885
if (brq.cmd.error || brq.stop.error || brq.data.error) {
886
if (rq_data_dir(req) == READ) {
887
/*
888
* After an error, we redo I/O one sector at a
889
* time, so we only reach here after trying to
890
* read a single sector.
891
*/
892
spin_lock_irq(&md->lock);
893
ret = __blk_end_request(req, -EIO, brq.data.blksz);
894
spin_unlock_irq(&md->lock);
895
continue;
896
}
897
goto cmd_err;
898
}
899
900
/*
901
* A block was successfully transferred.
902
*/
903
spin_lock_irq(&md->lock);
904
ret = __blk_end_request(req, 0, brq.data.bytes_xfered);
905
spin_unlock_irq(&md->lock);
906
} while (ret);
907
908
return 1;
909
910
cmd_err:
911
/*
912
* If this is an SD card and we're writing, we can first
913
* mark the known good sectors as ok.
914
*
915
* If the card is not SD, we can still ok written sectors
916
* as reported by the controller (which might be less than
917
* the real number of written sectors, but never more).
918
*/
919
if (mmc_card_sd(card)) {
920
u32 blocks;
921
922
blocks = mmc_sd_num_wr_blocks(card);
923
if (blocks != (u32)-1) {
924
spin_lock_irq(&md->lock);
925
ret = __blk_end_request(req, 0, blocks << 9);
926
spin_unlock_irq(&md->lock);
927
}
928
} else {
929
spin_lock_irq(&md->lock);
930
ret = __blk_end_request(req, 0, brq.data.bytes_xfered);
931
spin_unlock_irq(&md->lock);
932
}
933
934
spin_lock_irq(&md->lock);
935
while (ret)
936
ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req));
937
spin_unlock_irq(&md->lock);
938
939
return 0;
940
}
941
942
static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
943
{
944
int ret;
945
struct mmc_blk_data *md = mq->data;
946
struct mmc_card *card = md->queue.card;
947
948
mmc_claim_host(card->host);
949
ret = mmc_blk_part_switch(card, md);
950
if (ret) {
951
ret = 0;
952
goto out;
953
}
954
955
if (req->cmd_flags & REQ_DISCARD) {
956
if (req->cmd_flags & REQ_SECURE)
957
ret = mmc_blk_issue_secdiscard_rq(mq, req);
958
else
959
ret = mmc_blk_issue_discard_rq(mq, req);
960
} else if (req->cmd_flags & REQ_FLUSH) {
961
ret = mmc_blk_issue_flush(mq, req);
962
} else {
963
ret = mmc_blk_issue_rw_rq(mq, req);
964
}
965
966
out:
967
mmc_release_host(card->host);
968
return ret;
969
}
970
971
static inline int mmc_blk_readonly(struct mmc_card *card)
972
{
973
return mmc_card_readonly(card) ||
974
!(card->csd.cmdclass & CCC_BLOCK_WRITE);
975
}
976
977
static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
978
struct device *parent,
979
sector_t size,
980
bool default_ro,
981
const char *subname)
982
{
983
struct mmc_blk_data *md;
984
int devidx, ret;
985
986
devidx = find_first_zero_bit(dev_use, max_devices);
987
if (devidx >= max_devices)
988
return ERR_PTR(-ENOSPC);
989
__set_bit(devidx, dev_use);
990
991
md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
992
if (!md) {
993
ret = -ENOMEM;
994
goto out;
995
}
996
997
/*
998
* !subname implies we are creating main mmc_blk_data that will be
999
* associated with mmc_card with mmc_set_drvdata. Due to device
1000
* partitions, devidx will not coincide with a per-physical card
1001
* index anymore so we keep track of a name index.
1002
*/
1003
if (!subname) {
1004
md->name_idx = find_first_zero_bit(name_use, max_devices);
1005
__set_bit(md->name_idx, name_use);
1006
}
1007
else
1008
md->name_idx = ((struct mmc_blk_data *)
1009
dev_to_disk(parent)->private_data)->name_idx;
1010
1011
/*
1012
* Set the read-only status based on the supported commands
1013
* and the write protect switch.
1014
*/
1015
md->read_only = mmc_blk_readonly(card);
1016
1017
md->disk = alloc_disk(perdev_minors);
1018
if (md->disk == NULL) {
1019
ret = -ENOMEM;
1020
goto err_kfree;
1021
}
1022
1023
spin_lock_init(&md->lock);
1024
INIT_LIST_HEAD(&md->part);
1025
md->usage = 1;
1026
1027
ret = mmc_init_queue(&md->queue, card, &md->lock, subname);
1028
if (ret)
1029
goto err_putdisk;
1030
1031
md->queue.issue_fn = mmc_blk_issue_rq;
1032
md->queue.data = md;
1033
1034
md->disk->major = MMC_BLOCK_MAJOR;
1035
md->disk->first_minor = devidx * perdev_minors;
1036
md->disk->fops = &mmc_bdops;
1037
md->disk->private_data = md;
1038
md->disk->queue = md->queue.queue;
1039
md->disk->driverfs_dev = parent;
1040
set_disk_ro(md->disk, md->read_only || default_ro);
1041
1042
/*
1043
* As discussed on lkml, GENHD_FL_REMOVABLE should:
1044
*
1045
* - be set for removable media with permanent block devices
1046
* - be unset for removable block devices with permanent media
1047
*
1048
* Since MMC block devices clearly fall under the second
1049
* case, we do not set GENHD_FL_REMOVABLE. Userspace
1050
* should use the block device creation/destruction hotplug
1051
* messages to tell when the card is present.
1052
*/
1053
1054
snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
1055
"mmcblk%d%s", md->name_idx, subname ? subname : "");
1056
1057
blk_queue_logical_block_size(md->queue.queue, 512);
1058
set_capacity(md->disk, size);
1059
1060
if (mmc_host_cmd23(card->host)) {
1061
if (mmc_card_mmc(card) ||
1062
(mmc_card_sd(card) &&
1063
card->scr.cmds & SD_SCR_CMD23_SUPPORT))
1064
md->flags |= MMC_BLK_CMD23;
1065
}
1066
1067
if (mmc_card_mmc(card) &&
1068
md->flags & MMC_BLK_CMD23 &&
1069
((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ||
1070
card->ext_csd.rel_sectors)) {
1071
md->flags |= MMC_BLK_REL_WR;
1072
blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA);
1073
}
1074
1075
return md;
1076
1077
err_putdisk:
1078
put_disk(md->disk);
1079
err_kfree:
1080
kfree(md);
1081
out:
1082
return ERR_PTR(ret);
1083
}
1084
1085
static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
1086
{
1087
sector_t size;
1088
struct mmc_blk_data *md;
1089
1090
if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
1091
/*
1092
* The EXT_CSD sector count is in number or 512 byte
1093
* sectors.
1094
*/
1095
size = card->ext_csd.sectors;
1096
} else {
1097
/*
1098
* The CSD capacity field is in units of read_blkbits.
1099
* set_capacity takes units of 512 bytes.
1100
*/
1101
size = card->csd.capacity << (card->csd.read_blkbits - 9);
1102
}
1103
1104
md = mmc_blk_alloc_req(card, &card->dev, size, false, NULL);
1105
return md;
1106
}
1107
1108
static int mmc_blk_alloc_part(struct mmc_card *card,
1109
struct mmc_blk_data *md,
1110
unsigned int part_type,
1111
sector_t size,
1112
bool default_ro,
1113
const char *subname)
1114
{
1115
char cap_str[10];
1116
struct mmc_blk_data *part_md;
1117
1118
part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro,
1119
subname);
1120
if (IS_ERR(part_md))
1121
return PTR_ERR(part_md);
1122
part_md->part_type = part_type;
1123
list_add(&part_md->part, &md->part);
1124
1125
string_get_size((u64)get_capacity(part_md->disk) << 9, STRING_UNITS_2,
1126
cap_str, sizeof(cap_str));
1127
printk(KERN_INFO "%s: %s %s partition %u %s\n",
1128
part_md->disk->disk_name, mmc_card_id(card),
1129
mmc_card_name(card), part_md->part_type, cap_str);
1130
return 0;
1131
}
1132
1133
static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
1134
{
1135
int ret = 0;
1136
1137
if (!mmc_card_mmc(card))
1138
return 0;
1139
1140
if (card->ext_csd.boot_size) {
1141
ret = mmc_blk_alloc_part(card, md, EXT_CSD_PART_CONFIG_ACC_BOOT0,
1142
card->ext_csd.boot_size >> 9,
1143
true,
1144
"boot0");
1145
if (ret)
1146
return ret;
1147
ret = mmc_blk_alloc_part(card, md, EXT_CSD_PART_CONFIG_ACC_BOOT1,
1148
card->ext_csd.boot_size >> 9,
1149
true,
1150
"boot1");
1151
if (ret)
1152
return ret;
1153
}
1154
1155
return ret;
1156
}
1157
1158
static int
1159
mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card)
1160
{
1161
int err;
1162
1163
mmc_claim_host(card->host);
1164
err = mmc_set_blocklen(card, 512);
1165
mmc_release_host(card->host);
1166
1167
if (err) {
1168
printk(KERN_ERR "%s: unable to set block size to 512: %d\n",
1169
md->disk->disk_name, err);
1170
return -EINVAL;
1171
}
1172
1173
return 0;
1174
}
1175
1176
static void mmc_blk_remove_req(struct mmc_blk_data *md)
1177
{
1178
if (md) {
1179
if (md->disk->flags & GENHD_FL_UP) {
1180
device_remove_file(disk_to_dev(md->disk), &md->force_ro);
1181
1182
/* Stop new requests from getting into the queue */
1183
del_gendisk(md->disk);
1184
}
1185
1186
/* Then flush out any already in there */
1187
mmc_cleanup_queue(&md->queue);
1188
mmc_blk_put(md);
1189
}
1190
}
1191
1192
static void mmc_blk_remove_parts(struct mmc_card *card,
1193
struct mmc_blk_data *md)
1194
{
1195
struct list_head *pos, *q;
1196
struct mmc_blk_data *part_md;
1197
1198
__clear_bit(md->name_idx, name_use);
1199
list_for_each_safe(pos, q, &md->part) {
1200
part_md = list_entry(pos, struct mmc_blk_data, part);
1201
list_del(pos);
1202
mmc_blk_remove_req(part_md);
1203
}
1204
}
1205
1206
static int mmc_add_disk(struct mmc_blk_data *md)
1207
{
1208
int ret;
1209
1210
add_disk(md->disk);
1211
md->force_ro.show = force_ro_show;
1212
md->force_ro.store = force_ro_store;
1213
sysfs_attr_init(&md->force_ro.attr);
1214
md->force_ro.attr.name = "force_ro";
1215
md->force_ro.attr.mode = S_IRUGO | S_IWUSR;
1216
ret = device_create_file(disk_to_dev(md->disk), &md->force_ro);
1217
if (ret)
1218
del_gendisk(md->disk);
1219
1220
return ret;
1221
}
1222
1223
static const struct mmc_fixup blk_fixups[] =
1224
{
1225
MMC_FIXUP("SEM02G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
1226
MMC_FIXUP("SEM04G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
1227
MMC_FIXUP("SEM08G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
1228
MMC_FIXUP("SEM16G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
1229
MMC_FIXUP("SEM32G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
1230
1231
/*
1232
* Some MMC cards experience performance degradation with CMD23
1233
* instead of CMD12-bounded multiblock transfers. For now we'll
1234
* black list what's bad...
1235
* - Certain Toshiba cards.
1236
*
1237
* N.B. This doesn't affect SD cards.
1238
*/
1239
MMC_FIXUP("MMC08G", 0x11, CID_OEMID_ANY, add_quirk_mmc,
1240
MMC_QUIRK_BLK_NO_CMD23),
1241
MMC_FIXUP("MMC16G", 0x11, CID_OEMID_ANY, add_quirk_mmc,
1242
MMC_QUIRK_BLK_NO_CMD23),
1243
MMC_FIXUP("MMC32G", 0x11, CID_OEMID_ANY, add_quirk_mmc,
1244
MMC_QUIRK_BLK_NO_CMD23),
1245
END_FIXUP
1246
};
1247
1248
static int mmc_blk_probe(struct mmc_card *card)
1249
{
1250
struct mmc_blk_data *md, *part_md;
1251
int err;
1252
char cap_str[10];
1253
1254
/*
1255
* Check that the card supports the command class(es) we need.
1256
*/
1257
if (!(card->csd.cmdclass & CCC_BLOCK_READ))
1258
return -ENODEV;
1259
1260
md = mmc_blk_alloc(card);
1261
if (IS_ERR(md))
1262
return PTR_ERR(md);
1263
1264
err = mmc_blk_set_blksize(md, card);
1265
if (err)
1266
goto out;
1267
1268
string_get_size((u64)get_capacity(md->disk) << 9, STRING_UNITS_2,
1269
cap_str, sizeof(cap_str));
1270
printk(KERN_INFO "%s: %s %s %s %s\n",
1271
md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
1272
cap_str, md->read_only ? "(ro)" : "");
1273
1274
if (mmc_blk_alloc_parts(card, md))
1275
goto out;
1276
1277
mmc_set_drvdata(card, md);
1278
mmc_fixup_device(card, blk_fixups);
1279
1280
if (mmc_add_disk(md))
1281
goto out;
1282
1283
list_for_each_entry(part_md, &md->part, part) {
1284
if (mmc_add_disk(part_md))
1285
goto out;
1286
}
1287
return 0;
1288
1289
out:
1290
mmc_blk_remove_parts(card, md);
1291
mmc_blk_remove_req(md);
1292
return err;
1293
}
1294
1295
static void mmc_blk_remove(struct mmc_card *card)
1296
{
1297
struct mmc_blk_data *md = mmc_get_drvdata(card);
1298
1299
mmc_blk_remove_parts(card, md);
1300
mmc_claim_host(card->host);
1301
mmc_blk_part_switch(card, md);
1302
mmc_release_host(card->host);
1303
mmc_blk_remove_req(md);
1304
mmc_set_drvdata(card, NULL);
1305
}
1306
1307
#ifdef CONFIG_PM
1308
static int mmc_blk_suspend(struct mmc_card *card, pm_message_t state)
1309
{
1310
struct mmc_blk_data *part_md;
1311
struct mmc_blk_data *md = mmc_get_drvdata(card);
1312
1313
if (md) {
1314
mmc_queue_suspend(&md->queue);
1315
list_for_each_entry(part_md, &md->part, part) {
1316
mmc_queue_suspend(&part_md->queue);
1317
}
1318
}
1319
return 0;
1320
}
1321
1322
static int mmc_blk_resume(struct mmc_card *card)
1323
{
1324
struct mmc_blk_data *part_md;
1325
struct mmc_blk_data *md = mmc_get_drvdata(card);
1326
1327
if (md) {
1328
mmc_blk_set_blksize(md, card);
1329
1330
/*
1331
* Resume involves the card going into idle state,
1332
* so current partition is always the main one.
1333
*/
1334
md->part_curr = md->part_type;
1335
mmc_queue_resume(&md->queue);
1336
list_for_each_entry(part_md, &md->part, part) {
1337
mmc_queue_resume(&part_md->queue);
1338
}
1339
}
1340
return 0;
1341
}
1342
#else
1343
#define mmc_blk_suspend NULL
1344
#define mmc_blk_resume NULL
1345
#endif
1346
1347
static struct mmc_driver mmc_driver = {
1348
.drv = {
1349
.name = "mmcblk",
1350
},
1351
.probe = mmc_blk_probe,
1352
.remove = mmc_blk_remove,
1353
.suspend = mmc_blk_suspend,
1354
.resume = mmc_blk_resume,
1355
};
1356
1357
static int __init mmc_blk_init(void)
1358
{
1359
int res;
1360
1361
if (perdev_minors != CONFIG_MMC_BLOCK_MINORS)
1362
pr_info("mmcblk: using %d minors per device\n", perdev_minors);
1363
1364
max_devices = 256 / perdev_minors;
1365
1366
res = register_blkdev(MMC_BLOCK_MAJOR, "mmc");
1367
if (res)
1368
goto out;
1369
1370
res = mmc_register_driver(&mmc_driver);
1371
if (res)
1372
goto out2;
1373
1374
return 0;
1375
out2:
1376
unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
1377
out:
1378
return res;
1379
}
1380
1381
static void __exit mmc_blk_exit(void)
1382
{
1383
mmc_unregister_driver(&mmc_driver);
1384
unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
1385
}
1386
1387
module_init(mmc_blk_init);
1388
module_exit(mmc_blk_exit);
1389
1390
MODULE_LICENSE("GPL");
1391
MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");
1392
1393
1394