Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/drivers/mmc/core/core.c
15109 views
1
/*
2
* linux/drivers/mmc/core/core.c
3
*
4
* Copyright (C) 2003-2004 Russell King, All Rights Reserved.
5
* SD support Copyright (C) 2004 Ian Molton, All Rights Reserved.
6
* Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
7
* MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
8
*
9
* This program is free software; you can redistribute it and/or modify
10
* it under the terms of the GNU General Public License version 2 as
11
* published by the Free Software Foundation.
12
*/
13
#include <linux/module.h>
14
#include <linux/init.h>
15
#include <linux/interrupt.h>
16
#include <linux/completion.h>
17
#include <linux/device.h>
18
#include <linux/delay.h>
19
#include <linux/pagemap.h>
20
#include <linux/err.h>
21
#include <linux/leds.h>
22
#include <linux/scatterlist.h>
23
#include <linux/log2.h>
24
#include <linux/regulator/consumer.h>
25
#include <linux/pm_runtime.h>
26
27
#include <linux/mmc/card.h>
28
#include <linux/mmc/host.h>
29
#include <linux/mmc/mmc.h>
30
#include <linux/mmc/sd.h>
31
32
#include "core.h"
33
#include "bus.h"
34
#include "host.h"
35
#include "sdio_bus.h"
36
37
#include "mmc_ops.h"
38
#include "sd_ops.h"
39
#include "sdio_ops.h"
40
41
static struct workqueue_struct *workqueue;
42
43
/*
44
* Enabling software CRCs on the data blocks can be a significant (30%)
45
* performance cost, and for other reasons may not always be desired.
46
* So we allow it it to be disabled.
47
*/
48
int use_spi_crc = 1;
49
module_param(use_spi_crc, bool, 0);
50
51
/*
52
* We normally treat cards as removed during suspend if they are not
53
* known to be on a non-removable bus, to avoid the risk of writing
54
* back data to a different card after resume. Allow this to be
55
* overridden if necessary.
56
*/
57
#ifdef CONFIG_MMC_UNSAFE_RESUME
58
int mmc_assume_removable;
59
#else
60
int mmc_assume_removable = 1;
61
#endif
62
EXPORT_SYMBOL(mmc_assume_removable);
63
module_param_named(removable, mmc_assume_removable, bool, 0644);
64
MODULE_PARM_DESC(
65
removable,
66
"MMC/SD cards are removable and may be removed during suspend");
67
68
/*
69
* Internal function. Schedule delayed work in the MMC work queue.
70
*/
71
static int mmc_schedule_delayed_work(struct delayed_work *work,
72
unsigned long delay)
73
{
74
return queue_delayed_work(workqueue, work, delay);
75
}
76
77
/*
78
* Internal function. Flush all scheduled work from the MMC work queue.
79
*/
80
static void mmc_flush_scheduled_work(void)
81
{
82
flush_workqueue(workqueue);
83
}
84
85
/**
86
* mmc_request_done - finish processing an MMC request
87
* @host: MMC host which completed request
88
* @mrq: MMC request which request
89
*
90
* MMC drivers should call this function when they have completed
91
* their processing of a request.
92
*/
93
void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
94
{
95
struct mmc_command *cmd = mrq->cmd;
96
int err = cmd->error;
97
98
if (err && cmd->retries && mmc_host_is_spi(host)) {
99
if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND)
100
cmd->retries = 0;
101
}
102
103
if (err && cmd->retries) {
104
pr_debug("%s: req failed (CMD%u): %d, retrying...\n",
105
mmc_hostname(host), cmd->opcode, err);
106
107
cmd->retries--;
108
cmd->error = 0;
109
host->ops->request(host, mrq);
110
} else {
111
led_trigger_event(host->led, LED_OFF);
112
113
pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n",
114
mmc_hostname(host), cmd->opcode, err,
115
cmd->resp[0], cmd->resp[1],
116
cmd->resp[2], cmd->resp[3]);
117
118
if (mrq->data) {
119
pr_debug("%s: %d bytes transferred: %d\n",
120
mmc_hostname(host),
121
mrq->data->bytes_xfered, mrq->data->error);
122
}
123
124
if (mrq->stop) {
125
pr_debug("%s: (CMD%u): %d: %08x %08x %08x %08x\n",
126
mmc_hostname(host), mrq->stop->opcode,
127
mrq->stop->error,
128
mrq->stop->resp[0], mrq->stop->resp[1],
129
mrq->stop->resp[2], mrq->stop->resp[3]);
130
}
131
132
if (mrq->done)
133
mrq->done(mrq);
134
135
mmc_host_clk_gate(host);
136
}
137
}
138
139
EXPORT_SYMBOL(mmc_request_done);
140
141
static void
142
mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
143
{
144
#ifdef CONFIG_MMC_DEBUG
145
unsigned int i, sz;
146
struct scatterlist *sg;
147
#endif
148
149
pr_debug("%s: starting CMD%u arg %08x flags %08x\n",
150
mmc_hostname(host), mrq->cmd->opcode,
151
mrq->cmd->arg, mrq->cmd->flags);
152
153
if (mrq->data) {
154
pr_debug("%s: blksz %d blocks %d flags %08x "
155
"tsac %d ms nsac %d\n",
156
mmc_hostname(host), mrq->data->blksz,
157
mrq->data->blocks, mrq->data->flags,
158
mrq->data->timeout_ns / 1000000,
159
mrq->data->timeout_clks);
160
}
161
162
if (mrq->stop) {
163
pr_debug("%s: CMD%u arg %08x flags %08x\n",
164
mmc_hostname(host), mrq->stop->opcode,
165
mrq->stop->arg, mrq->stop->flags);
166
}
167
168
WARN_ON(!host->claimed);
169
170
mrq->cmd->error = 0;
171
mrq->cmd->mrq = mrq;
172
if (mrq->data) {
173
BUG_ON(mrq->data->blksz > host->max_blk_size);
174
BUG_ON(mrq->data->blocks > host->max_blk_count);
175
BUG_ON(mrq->data->blocks * mrq->data->blksz >
176
host->max_req_size);
177
178
#ifdef CONFIG_MMC_DEBUG
179
sz = 0;
180
for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i)
181
sz += sg->length;
182
BUG_ON(sz != mrq->data->blocks * mrq->data->blksz);
183
#endif
184
185
mrq->cmd->data = mrq->data;
186
mrq->data->error = 0;
187
mrq->data->mrq = mrq;
188
if (mrq->stop) {
189
mrq->data->stop = mrq->stop;
190
mrq->stop->error = 0;
191
mrq->stop->mrq = mrq;
192
}
193
}
194
mmc_host_clk_ungate(host);
195
led_trigger_event(host->led, LED_FULL);
196
host->ops->request(host, mrq);
197
}
198
199
static void mmc_wait_done(struct mmc_request *mrq)
200
{
201
complete(mrq->done_data);
202
}
203
204
/**
205
* mmc_wait_for_req - start a request and wait for completion
206
* @host: MMC host to start command
207
* @mrq: MMC request to start
208
*
209
* Start a new MMC custom command request for a host, and wait
210
* for the command to complete. Does not attempt to parse the
211
* response.
212
*/
213
void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
214
{
215
DECLARE_COMPLETION_ONSTACK(complete);
216
217
mrq->done_data = &complete;
218
mrq->done = mmc_wait_done;
219
220
mmc_start_request(host, mrq);
221
222
wait_for_completion(&complete);
223
}
224
225
EXPORT_SYMBOL(mmc_wait_for_req);
226
227
/**
228
* mmc_wait_for_cmd - start a command and wait for completion
229
* @host: MMC host to start command
230
* @cmd: MMC command to start
231
* @retries: maximum number of retries
232
*
233
* Start a new MMC command for a host, and wait for the command
234
* to complete. Return any error that occurred while the command
235
* was executing. Do not attempt to parse the response.
236
*/
237
int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries)
238
{
239
struct mmc_request mrq = {0};
240
241
WARN_ON(!host->claimed);
242
243
memset(cmd->resp, 0, sizeof(cmd->resp));
244
cmd->retries = retries;
245
246
mrq.cmd = cmd;
247
cmd->data = NULL;
248
249
mmc_wait_for_req(host, &mrq);
250
251
return cmd->error;
252
}
253
254
EXPORT_SYMBOL(mmc_wait_for_cmd);
255
256
/**
257
* mmc_set_data_timeout - set the timeout for a data command
258
* @data: data phase for command
259
* @card: the MMC card associated with the data transfer
260
*
261
* Computes the data timeout parameters according to the
262
* correct algorithm given the card type.
263
*/
264
void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
265
{
266
unsigned int mult;
267
268
/*
269
* SDIO cards only define an upper 1 s limit on access.
270
*/
271
if (mmc_card_sdio(card)) {
272
data->timeout_ns = 1000000000;
273
data->timeout_clks = 0;
274
return;
275
}
276
277
/*
278
* SD cards use a 100 multiplier rather than 10
279
*/
280
mult = mmc_card_sd(card) ? 100 : 10;
281
282
/*
283
* Scale up the multiplier (and therefore the timeout) by
284
* the r2w factor for writes.
285
*/
286
if (data->flags & MMC_DATA_WRITE)
287
mult <<= card->csd.r2w_factor;
288
289
data->timeout_ns = card->csd.tacc_ns * mult;
290
data->timeout_clks = card->csd.tacc_clks * mult;
291
292
/*
293
* SD cards also have an upper limit on the timeout.
294
*/
295
if (mmc_card_sd(card)) {
296
unsigned int timeout_us, limit_us;
297
298
timeout_us = data->timeout_ns / 1000;
299
if (mmc_host_clk_rate(card->host))
300
timeout_us += data->timeout_clks * 1000 /
301
(mmc_host_clk_rate(card->host) / 1000);
302
303
if (data->flags & MMC_DATA_WRITE)
304
/*
305
* The limit is really 250 ms, but that is
306
* insufficient for some crappy cards.
307
*/
308
limit_us = 300000;
309
else
310
limit_us = 100000;
311
312
/*
313
* SDHC cards always use these fixed values.
314
*/
315
if (timeout_us > limit_us || mmc_card_blockaddr(card)) {
316
data->timeout_ns = limit_us * 1000;
317
data->timeout_clks = 0;
318
}
319
}
320
/*
321
* Some cards need very high timeouts if driven in SPI mode.
322
* The worst observed timeout was 900ms after writing a
323
* continuous stream of data until the internal logic
324
* overflowed.
325
*/
326
if (mmc_host_is_spi(card->host)) {
327
if (data->flags & MMC_DATA_WRITE) {
328
if (data->timeout_ns < 1000000000)
329
data->timeout_ns = 1000000000; /* 1s */
330
} else {
331
if (data->timeout_ns < 100000000)
332
data->timeout_ns = 100000000; /* 100ms */
333
}
334
}
335
}
336
EXPORT_SYMBOL(mmc_set_data_timeout);
337
338
/**
339
* mmc_align_data_size - pads a transfer size to a more optimal value
340
* @card: the MMC card associated with the data transfer
341
* @sz: original transfer size
342
*
343
* Pads the original data size with a number of extra bytes in
344
* order to avoid controller bugs and/or performance hits
345
* (e.g. some controllers revert to PIO for certain sizes).
346
*
347
* Returns the improved size, which might be unmodified.
348
*
349
* Note that this function is only relevant when issuing a
350
* single scatter gather entry.
351
*/
352
unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz)
353
{
354
/*
355
* FIXME: We don't have a system for the controller to tell
356
* the core about its problems yet, so for now we just 32-bit
357
* align the size.
358
*/
359
sz = ((sz + 3) / 4) * 4;
360
361
return sz;
362
}
363
EXPORT_SYMBOL(mmc_align_data_size);
364
365
/**
366
* mmc_host_enable - enable a host.
367
* @host: mmc host to enable
368
*
369
* Hosts that support power saving can use the 'enable' and 'disable'
370
* methods to exit and enter power saving states. For more information
371
* see comments for struct mmc_host_ops.
372
*/
373
int mmc_host_enable(struct mmc_host *host)
374
{
375
if (!(host->caps & MMC_CAP_DISABLE))
376
return 0;
377
378
if (host->en_dis_recurs)
379
return 0;
380
381
if (host->nesting_cnt++)
382
return 0;
383
384
cancel_delayed_work_sync(&host->disable);
385
386
if (host->enabled)
387
return 0;
388
389
if (host->ops->enable) {
390
int err;
391
392
host->en_dis_recurs = 1;
393
err = host->ops->enable(host);
394
host->en_dis_recurs = 0;
395
396
if (err) {
397
pr_debug("%s: enable error %d\n",
398
mmc_hostname(host), err);
399
return err;
400
}
401
}
402
host->enabled = 1;
403
return 0;
404
}
405
EXPORT_SYMBOL(mmc_host_enable);
406
407
static int mmc_host_do_disable(struct mmc_host *host, int lazy)
408
{
409
if (host->ops->disable) {
410
int err;
411
412
host->en_dis_recurs = 1;
413
err = host->ops->disable(host, lazy);
414
host->en_dis_recurs = 0;
415
416
if (err < 0) {
417
pr_debug("%s: disable error %d\n",
418
mmc_hostname(host), err);
419
return err;
420
}
421
if (err > 0) {
422
unsigned long delay = msecs_to_jiffies(err);
423
424
mmc_schedule_delayed_work(&host->disable, delay);
425
}
426
}
427
host->enabled = 0;
428
return 0;
429
}
430
431
/**
432
* mmc_host_disable - disable a host.
433
* @host: mmc host to disable
434
*
435
* Hosts that support power saving can use the 'enable' and 'disable'
436
* methods to exit and enter power saving states. For more information
437
* see comments for struct mmc_host_ops.
438
*/
439
int mmc_host_disable(struct mmc_host *host)
440
{
441
int err;
442
443
if (!(host->caps & MMC_CAP_DISABLE))
444
return 0;
445
446
if (host->en_dis_recurs)
447
return 0;
448
449
if (--host->nesting_cnt)
450
return 0;
451
452
if (!host->enabled)
453
return 0;
454
455
err = mmc_host_do_disable(host, 0);
456
return err;
457
}
458
EXPORT_SYMBOL(mmc_host_disable);
459
460
/**
461
* __mmc_claim_host - exclusively claim a host
462
* @host: mmc host to claim
463
* @abort: whether or not the operation should be aborted
464
*
465
* Claim a host for a set of operations. If @abort is non null and
466
* dereference a non-zero value then this will return prematurely with
467
* that non-zero value without acquiring the lock. Returns zero
468
* with the lock held otherwise.
469
*/
470
int __mmc_claim_host(struct mmc_host *host, atomic_t *abort)
471
{
472
DECLARE_WAITQUEUE(wait, current);
473
unsigned long flags;
474
int stop;
475
476
might_sleep();
477
478
add_wait_queue(&host->wq, &wait);
479
spin_lock_irqsave(&host->lock, flags);
480
while (1) {
481
set_current_state(TASK_UNINTERRUPTIBLE);
482
stop = abort ? atomic_read(abort) : 0;
483
if (stop || !host->claimed || host->claimer == current)
484
break;
485
spin_unlock_irqrestore(&host->lock, flags);
486
schedule();
487
spin_lock_irqsave(&host->lock, flags);
488
}
489
set_current_state(TASK_RUNNING);
490
if (!stop) {
491
host->claimed = 1;
492
host->claimer = current;
493
host->claim_cnt += 1;
494
} else
495
wake_up(&host->wq);
496
spin_unlock_irqrestore(&host->lock, flags);
497
remove_wait_queue(&host->wq, &wait);
498
if (!stop)
499
mmc_host_enable(host);
500
return stop;
501
}
502
503
EXPORT_SYMBOL(__mmc_claim_host);
504
505
/**
506
* mmc_try_claim_host - try exclusively to claim a host
507
* @host: mmc host to claim
508
*
509
* Returns %1 if the host is claimed, %0 otherwise.
510
*/
511
int mmc_try_claim_host(struct mmc_host *host)
512
{
513
int claimed_host = 0;
514
unsigned long flags;
515
516
spin_lock_irqsave(&host->lock, flags);
517
if (!host->claimed || host->claimer == current) {
518
host->claimed = 1;
519
host->claimer = current;
520
host->claim_cnt += 1;
521
claimed_host = 1;
522
}
523
spin_unlock_irqrestore(&host->lock, flags);
524
return claimed_host;
525
}
526
EXPORT_SYMBOL(mmc_try_claim_host);
527
528
/**
529
* mmc_do_release_host - release a claimed host
530
* @host: mmc host to release
531
*
532
* If you successfully claimed a host, this function will
533
* release it again.
534
*/
535
void mmc_do_release_host(struct mmc_host *host)
536
{
537
unsigned long flags;
538
539
spin_lock_irqsave(&host->lock, flags);
540
if (--host->claim_cnt) {
541
/* Release for nested claim */
542
spin_unlock_irqrestore(&host->lock, flags);
543
} else {
544
host->claimed = 0;
545
host->claimer = NULL;
546
spin_unlock_irqrestore(&host->lock, flags);
547
wake_up(&host->wq);
548
}
549
}
550
EXPORT_SYMBOL(mmc_do_release_host);
551
552
void mmc_host_deeper_disable(struct work_struct *work)
553
{
554
struct mmc_host *host =
555
container_of(work, struct mmc_host, disable.work);
556
557
/* If the host is claimed then we do not want to disable it anymore */
558
if (!mmc_try_claim_host(host))
559
return;
560
mmc_host_do_disable(host, 1);
561
mmc_do_release_host(host);
562
}
563
564
/**
565
* mmc_host_lazy_disable - lazily disable a host.
566
* @host: mmc host to disable
567
*
568
* Hosts that support power saving can use the 'enable' and 'disable'
569
* methods to exit and enter power saving states. For more information
570
* see comments for struct mmc_host_ops.
571
*/
572
int mmc_host_lazy_disable(struct mmc_host *host)
573
{
574
if (!(host->caps & MMC_CAP_DISABLE))
575
return 0;
576
577
if (host->en_dis_recurs)
578
return 0;
579
580
if (--host->nesting_cnt)
581
return 0;
582
583
if (!host->enabled)
584
return 0;
585
586
if (host->disable_delay) {
587
mmc_schedule_delayed_work(&host->disable,
588
msecs_to_jiffies(host->disable_delay));
589
return 0;
590
} else
591
return mmc_host_do_disable(host, 1);
592
}
593
EXPORT_SYMBOL(mmc_host_lazy_disable);
594
595
/**
596
* mmc_release_host - release a host
597
* @host: mmc host to release
598
*
599
* Release a MMC host, allowing others to claim the host
600
* for their operations.
601
*/
602
void mmc_release_host(struct mmc_host *host)
603
{
604
WARN_ON(!host->claimed);
605
606
mmc_host_lazy_disable(host);
607
608
mmc_do_release_host(host);
609
}
610
611
EXPORT_SYMBOL(mmc_release_host);
612
613
/*
614
* Internal function that does the actual ios call to the host driver,
615
* optionally printing some debug output.
616
*/
617
static inline void mmc_set_ios(struct mmc_host *host)
618
{
619
struct mmc_ios *ios = &host->ios;
620
621
pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u "
622
"width %u timing %u\n",
623
mmc_hostname(host), ios->clock, ios->bus_mode,
624
ios->power_mode, ios->chip_select, ios->vdd,
625
ios->bus_width, ios->timing);
626
627
if (ios->clock > 0)
628
mmc_set_ungated(host);
629
host->ops->set_ios(host, ios);
630
}
631
632
/*
633
* Control chip select pin on a host.
634
*/
635
void mmc_set_chip_select(struct mmc_host *host, int mode)
636
{
637
host->ios.chip_select = mode;
638
mmc_set_ios(host);
639
}
640
641
/*
642
* Sets the host clock to the highest possible frequency that
643
* is below "hz".
644
*/
645
void mmc_set_clock(struct mmc_host *host, unsigned int hz)
646
{
647
WARN_ON(hz < host->f_min);
648
649
if (hz > host->f_max)
650
hz = host->f_max;
651
652
host->ios.clock = hz;
653
mmc_set_ios(host);
654
}
655
656
#ifdef CONFIG_MMC_CLKGATE
657
/*
658
* This gates the clock by setting it to 0 Hz.
659
*/
660
void mmc_gate_clock(struct mmc_host *host)
661
{
662
unsigned long flags;
663
664
spin_lock_irqsave(&host->clk_lock, flags);
665
host->clk_old = host->ios.clock;
666
host->ios.clock = 0;
667
host->clk_gated = true;
668
spin_unlock_irqrestore(&host->clk_lock, flags);
669
mmc_set_ios(host);
670
}
671
672
/*
673
* This restores the clock from gating by using the cached
674
* clock value.
675
*/
676
void mmc_ungate_clock(struct mmc_host *host)
677
{
678
/*
679
* We should previously have gated the clock, so the clock shall
680
* be 0 here! The clock may however be 0 during initialization,
681
* when some request operations are performed before setting
682
* the frequency. When ungate is requested in that situation
683
* we just ignore the call.
684
*/
685
if (host->clk_old) {
686
BUG_ON(host->ios.clock);
687
/* This call will also set host->clk_gated to false */
688
mmc_set_clock(host, host->clk_old);
689
}
690
}
691
692
void mmc_set_ungated(struct mmc_host *host)
693
{
694
unsigned long flags;
695
696
/*
697
* We've been given a new frequency while the clock is gated,
698
* so make sure we regard this as ungating it.
699
*/
700
spin_lock_irqsave(&host->clk_lock, flags);
701
host->clk_gated = false;
702
spin_unlock_irqrestore(&host->clk_lock, flags);
703
}
704
705
#else
706
void mmc_set_ungated(struct mmc_host *host)
707
{
708
}
709
#endif
710
711
/*
712
* Change the bus mode (open drain/push-pull) of a host.
713
*/
714
void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
715
{
716
host->ios.bus_mode = mode;
717
mmc_set_ios(host);
718
}
719
720
/*
721
* Change data bus width of a host.
722
*/
723
void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
724
{
725
host->ios.bus_width = width;
726
mmc_set_ios(host);
727
}
728
729
/**
730
* mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number
731
* @vdd: voltage (mV)
732
* @low_bits: prefer low bits in boundary cases
733
*
734
* This function returns the OCR bit number according to the provided @vdd
735
* value. If conversion is not possible a negative errno value returned.
736
*
737
* Depending on the @low_bits flag the function prefers low or high OCR bits
738
* on boundary voltages. For example,
739
* with @low_bits = true, 3300 mV translates to ilog2(MMC_VDD_32_33);
740
* with @low_bits = false, 3300 mV translates to ilog2(MMC_VDD_33_34);
741
*
742
* Any value in the [1951:1999] range translates to the ilog2(MMC_VDD_20_21).
743
*/
744
static int mmc_vdd_to_ocrbitnum(int vdd, bool low_bits)
745
{
746
const int max_bit = ilog2(MMC_VDD_35_36);
747
int bit;
748
749
if (vdd < 1650 || vdd > 3600)
750
return -EINVAL;
751
752
if (vdd >= 1650 && vdd <= 1950)
753
return ilog2(MMC_VDD_165_195);
754
755
if (low_bits)
756
vdd -= 1;
757
758
/* Base 2000 mV, step 100 mV, bit's base 8. */
759
bit = (vdd - 2000) / 100 + 8;
760
if (bit > max_bit)
761
return max_bit;
762
return bit;
763
}
764
765
/**
766
* mmc_vddrange_to_ocrmask - Convert a voltage range to the OCR mask
767
* @vdd_min: minimum voltage value (mV)
768
* @vdd_max: maximum voltage value (mV)
769
*
770
* This function returns the OCR mask bits according to the provided @vdd_min
771
* and @vdd_max values. If conversion is not possible the function returns 0.
772
*
773
* Notes wrt boundary cases:
774
* This function sets the OCR bits for all boundary voltages, for example
775
* [3300:3400] range is translated to MMC_VDD_32_33 | MMC_VDD_33_34 |
776
* MMC_VDD_34_35 mask.
777
*/
778
u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max)
779
{
780
u32 mask = 0;
781
782
if (vdd_max < vdd_min)
783
return 0;
784
785
/* Prefer high bits for the boundary vdd_max values. */
786
vdd_max = mmc_vdd_to_ocrbitnum(vdd_max, false);
787
if (vdd_max < 0)
788
return 0;
789
790
/* Prefer low bits for the boundary vdd_min values. */
791
vdd_min = mmc_vdd_to_ocrbitnum(vdd_min, true);
792
if (vdd_min < 0)
793
return 0;
794
795
/* Fill the mask, from max bit to min bit. */
796
while (vdd_max >= vdd_min)
797
mask |= 1 << vdd_max--;
798
799
return mask;
800
}
801
EXPORT_SYMBOL(mmc_vddrange_to_ocrmask);
802
803
#ifdef CONFIG_REGULATOR
804
805
/**
806
* mmc_regulator_get_ocrmask - return mask of supported voltages
807
* @supply: regulator to use
808
*
809
* This returns either a negative errno, or a mask of voltages that
810
* can be provided to MMC/SD/SDIO devices using the specified voltage
811
* regulator. This would normally be called before registering the
812
* MMC host adapter.
813
*/
814
int mmc_regulator_get_ocrmask(struct regulator *supply)
815
{
816
int result = 0;
817
int count;
818
int i;
819
820
count = regulator_count_voltages(supply);
821
if (count < 0)
822
return count;
823
824
for (i = 0; i < count; i++) {
825
int vdd_uV;
826
int vdd_mV;
827
828
vdd_uV = regulator_list_voltage(supply, i);
829
if (vdd_uV <= 0)
830
continue;
831
832
vdd_mV = vdd_uV / 1000;
833
result |= mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
834
}
835
836
return result;
837
}
838
EXPORT_SYMBOL(mmc_regulator_get_ocrmask);
839
840
/**
841
* mmc_regulator_set_ocr - set regulator to match host->ios voltage
842
* @mmc: the host to regulate
843
* @supply: regulator to use
844
* @vdd_bit: zero for power off, else a bit number (host->ios.vdd)
845
*
846
* Returns zero on success, else negative errno.
847
*
848
* MMC host drivers may use this to enable or disable a regulator using
849
* a particular supply voltage. This would normally be called from the
850
* set_ios() method.
851
*/
852
int mmc_regulator_set_ocr(struct mmc_host *mmc,
853
struct regulator *supply,
854
unsigned short vdd_bit)
855
{
856
int result = 0;
857
int min_uV, max_uV;
858
859
if (vdd_bit) {
860
int tmp;
861
int voltage;
862
863
/* REVISIT mmc_vddrange_to_ocrmask() may have set some
864
* bits this regulator doesn't quite support ... don't
865
* be too picky, most cards and regulators are OK with
866
* a 0.1V range goof (it's a small error percentage).
867
*/
868
tmp = vdd_bit - ilog2(MMC_VDD_165_195);
869
if (tmp == 0) {
870
min_uV = 1650 * 1000;
871
max_uV = 1950 * 1000;
872
} else {
873
min_uV = 1900 * 1000 + tmp * 100 * 1000;
874
max_uV = min_uV + 100 * 1000;
875
}
876
877
/* avoid needless changes to this voltage; the regulator
878
* might not allow this operation
879
*/
880
voltage = regulator_get_voltage(supply);
881
if (voltage < 0)
882
result = voltage;
883
else if (voltage < min_uV || voltage > max_uV)
884
result = regulator_set_voltage(supply, min_uV, max_uV);
885
else
886
result = 0;
887
888
if (result == 0 && !mmc->regulator_enabled) {
889
result = regulator_enable(supply);
890
if (!result)
891
mmc->regulator_enabled = true;
892
}
893
} else if (mmc->regulator_enabled) {
894
result = regulator_disable(supply);
895
if (result == 0)
896
mmc->regulator_enabled = false;
897
}
898
899
if (result)
900
dev_err(mmc_dev(mmc),
901
"could not set regulator OCR (%d)\n", result);
902
return result;
903
}
904
EXPORT_SYMBOL(mmc_regulator_set_ocr);
905
906
#endif /* CONFIG_REGULATOR */
907
908
/*
909
* Mask off any voltages we don't support and select
910
* the lowest voltage
911
*/
912
u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
913
{
914
int bit;
915
916
ocr &= host->ocr_avail;
917
918
bit = ffs(ocr);
919
if (bit) {
920
bit -= 1;
921
922
ocr &= 3 << bit;
923
924
host->ios.vdd = bit;
925
mmc_set_ios(host);
926
} else {
927
pr_warning("%s: host doesn't support card's voltages\n",
928
mmc_hostname(host));
929
ocr = 0;
930
}
931
932
return ocr;
933
}
934
935
int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, bool cmd11)
936
{
937
struct mmc_command cmd = {0};
938
int err = 0;
939
940
BUG_ON(!host);
941
942
/*
943
* Send CMD11 only if the request is to switch the card to
944
* 1.8V signalling.
945
*/
946
if ((signal_voltage != MMC_SIGNAL_VOLTAGE_330) && cmd11) {
947
cmd.opcode = SD_SWITCH_VOLTAGE;
948
cmd.arg = 0;
949
cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
950
951
err = mmc_wait_for_cmd(host, &cmd, 0);
952
if (err)
953
return err;
954
955
if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR))
956
return -EIO;
957
}
958
959
host->ios.signal_voltage = signal_voltage;
960
961
if (host->ops->start_signal_voltage_switch)
962
err = host->ops->start_signal_voltage_switch(host, &host->ios);
963
964
return err;
965
}
966
967
/*
968
* Select timing parameters for host.
969
*/
970
void mmc_set_timing(struct mmc_host *host, unsigned int timing)
971
{
972
host->ios.timing = timing;
973
mmc_set_ios(host);
974
}
975
976
/*
977
* Select appropriate driver type for host.
978
*/
979
void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
980
{
981
host->ios.drv_type = drv_type;
982
mmc_set_ios(host);
983
}
984
985
/*
986
* Apply power to the MMC stack. This is a two-stage process.
987
* First, we enable power to the card without the clock running.
988
* We then wait a bit for the power to stabilise. Finally,
989
* enable the bus drivers and clock to the card.
990
*
991
* We must _NOT_ enable the clock prior to power stablising.
992
*
993
* If a host does all the power sequencing itself, ignore the
994
* initial MMC_POWER_UP stage.
995
*/
996
static void mmc_power_up(struct mmc_host *host)
997
{
998
int bit;
999
1000
/* If ocr is set, we use it */
1001
if (host->ocr)
1002
bit = ffs(host->ocr) - 1;
1003
else
1004
bit = fls(host->ocr_avail) - 1;
1005
1006
host->ios.vdd = bit;
1007
if (mmc_host_is_spi(host)) {
1008
host->ios.chip_select = MMC_CS_HIGH;
1009
host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
1010
} else {
1011
host->ios.chip_select = MMC_CS_DONTCARE;
1012
host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
1013
}
1014
host->ios.power_mode = MMC_POWER_UP;
1015
host->ios.bus_width = MMC_BUS_WIDTH_1;
1016
host->ios.timing = MMC_TIMING_LEGACY;
1017
mmc_set_ios(host);
1018
1019
/*
1020
* This delay should be sufficient to allow the power supply
1021
* to reach the minimum voltage.
1022
*/
1023
mmc_delay(10);
1024
1025
host->ios.clock = host->f_init;
1026
1027
host->ios.power_mode = MMC_POWER_ON;
1028
mmc_set_ios(host);
1029
1030
/*
1031
* This delay must be at least 74 clock sizes, or 1 ms, or the
1032
* time required to reach a stable voltage.
1033
*/
1034
mmc_delay(10);
1035
}
1036
1037
static void mmc_power_off(struct mmc_host *host)
1038
{
1039
host->ios.clock = 0;
1040
host->ios.vdd = 0;
1041
1042
/*
1043
* Reset ocr mask to be the highest possible voltage supported for
1044
* this mmc host. This value will be used at next power up.
1045
*/
1046
host->ocr = 1 << (fls(host->ocr_avail) - 1);
1047
1048
if (!mmc_host_is_spi(host)) {
1049
host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
1050
host->ios.chip_select = MMC_CS_DONTCARE;
1051
}
1052
host->ios.power_mode = MMC_POWER_OFF;
1053
host->ios.bus_width = MMC_BUS_WIDTH_1;
1054
host->ios.timing = MMC_TIMING_LEGACY;
1055
mmc_set_ios(host);
1056
}
1057
1058
/*
1059
* Cleanup when the last reference to the bus operator is dropped.
1060
*/
1061
static void __mmc_release_bus(struct mmc_host *host)
1062
{
1063
BUG_ON(!host);
1064
BUG_ON(host->bus_refs);
1065
BUG_ON(!host->bus_dead);
1066
1067
host->bus_ops = NULL;
1068
}
1069
1070
/*
1071
* Increase reference count of bus operator
1072
*/
1073
static inline void mmc_bus_get(struct mmc_host *host)
1074
{
1075
unsigned long flags;
1076
1077
spin_lock_irqsave(&host->lock, flags);
1078
host->bus_refs++;
1079
spin_unlock_irqrestore(&host->lock, flags);
1080
}
1081
1082
/*
1083
* Decrease reference count of bus operator and free it if
1084
* it is the last reference.
1085
*/
1086
static inline void mmc_bus_put(struct mmc_host *host)
1087
{
1088
unsigned long flags;
1089
1090
spin_lock_irqsave(&host->lock, flags);
1091
host->bus_refs--;
1092
if ((host->bus_refs == 0) && host->bus_ops)
1093
__mmc_release_bus(host);
1094
spin_unlock_irqrestore(&host->lock, flags);
1095
}
1096
1097
/*
1098
* Assign a mmc bus handler to a host. Only one bus handler may control a
1099
* host at any given time.
1100
*/
1101
void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops)
1102
{
1103
unsigned long flags;
1104
1105
BUG_ON(!host);
1106
BUG_ON(!ops);
1107
1108
WARN_ON(!host->claimed);
1109
1110
spin_lock_irqsave(&host->lock, flags);
1111
1112
BUG_ON(host->bus_ops);
1113
BUG_ON(host->bus_refs);
1114
1115
host->bus_ops = ops;
1116
host->bus_refs = 1;
1117
host->bus_dead = 0;
1118
1119
spin_unlock_irqrestore(&host->lock, flags);
1120
}
1121
1122
/*
1123
* Remove the current bus handler from a host. Assumes that there are
1124
* no interesting cards left, so the bus is powered down.
1125
*/
1126
void mmc_detach_bus(struct mmc_host *host)
1127
{
1128
unsigned long flags;
1129
1130
BUG_ON(!host);
1131
1132
WARN_ON(!host->claimed);
1133
WARN_ON(!host->bus_ops);
1134
1135
spin_lock_irqsave(&host->lock, flags);
1136
1137
host->bus_dead = 1;
1138
1139
spin_unlock_irqrestore(&host->lock, flags);
1140
1141
mmc_power_off(host);
1142
1143
mmc_bus_put(host);
1144
}
1145
1146
/**
1147
* mmc_detect_change - process change of state on a MMC socket
1148
* @host: host which changed state.
1149
* @delay: optional delay to wait before detection (jiffies)
1150
*
1151
* MMC drivers should call this when they detect a card has been
1152
* inserted or removed. The MMC layer will confirm that any
1153
* present card is still functional, and initialize any newly
1154
* inserted.
1155
*/
1156
void mmc_detect_change(struct mmc_host *host, unsigned long delay)
1157
{
1158
#ifdef CONFIG_MMC_DEBUG
1159
unsigned long flags;
1160
spin_lock_irqsave(&host->lock, flags);
1161
WARN_ON(host->removed);
1162
spin_unlock_irqrestore(&host->lock, flags);
1163
#endif
1164
1165
mmc_schedule_delayed_work(&host->detect, delay);
1166
}
1167
1168
EXPORT_SYMBOL(mmc_detect_change);
1169
1170
void mmc_init_erase(struct mmc_card *card)
1171
{
1172
unsigned int sz;
1173
1174
if (is_power_of_2(card->erase_size))
1175
card->erase_shift = ffs(card->erase_size) - 1;
1176
else
1177
card->erase_shift = 0;
1178
1179
/*
1180
* It is possible to erase an arbitrarily large area of an SD or MMC
1181
* card. That is not desirable because it can take a long time
1182
* (minutes) potentially delaying more important I/O, and also the
1183
* timeout calculations become increasingly hugely over-estimated.
1184
* Consequently, 'pref_erase' is defined as a guide to limit erases
1185
* to that size and alignment.
1186
*
1187
* For SD cards that define Allocation Unit size, limit erases to one
1188
* Allocation Unit at a time. For MMC cards that define High Capacity
1189
* Erase Size, whether it is switched on or not, limit to that size.
1190
* Otherwise just have a stab at a good value. For modern cards it
1191
* will end up being 4MiB. Note that if the value is too small, it
1192
* can end up taking longer to erase.
1193
*/
1194
if (mmc_card_sd(card) && card->ssr.au) {
1195
card->pref_erase = card->ssr.au;
1196
card->erase_shift = ffs(card->ssr.au) - 1;
1197
} else if (card->ext_csd.hc_erase_size) {
1198
card->pref_erase = card->ext_csd.hc_erase_size;
1199
} else {
1200
sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11;
1201
if (sz < 128)
1202
card->pref_erase = 512 * 1024 / 512;
1203
else if (sz < 512)
1204
card->pref_erase = 1024 * 1024 / 512;
1205
else if (sz < 1024)
1206
card->pref_erase = 2 * 1024 * 1024 / 512;
1207
else
1208
card->pref_erase = 4 * 1024 * 1024 / 512;
1209
if (card->pref_erase < card->erase_size)
1210
card->pref_erase = card->erase_size;
1211
else {
1212
sz = card->pref_erase % card->erase_size;
1213
if (sz)
1214
card->pref_erase += card->erase_size - sz;
1215
}
1216
}
1217
}
1218
1219
static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
1220
unsigned int arg, unsigned int qty)
1221
{
1222
unsigned int erase_timeout;
1223
1224
if (card->ext_csd.erase_group_def & 1) {
1225
/* High Capacity Erase Group Size uses HC timeouts */
1226
if (arg == MMC_TRIM_ARG)
1227
erase_timeout = card->ext_csd.trim_timeout;
1228
else
1229
erase_timeout = card->ext_csd.hc_erase_timeout;
1230
} else {
1231
/* CSD Erase Group Size uses write timeout */
1232
unsigned int mult = (10 << card->csd.r2w_factor);
1233
unsigned int timeout_clks = card->csd.tacc_clks * mult;
1234
unsigned int timeout_us;
1235
1236
/* Avoid overflow: e.g. tacc_ns=80000000 mult=1280 */
1237
if (card->csd.tacc_ns < 1000000)
1238
timeout_us = (card->csd.tacc_ns * mult) / 1000;
1239
else
1240
timeout_us = (card->csd.tacc_ns / 1000) * mult;
1241
1242
/*
1243
* ios.clock is only a target. The real clock rate might be
1244
* less but not that much less, so fudge it by multiplying by 2.
1245
*/
1246
timeout_clks <<= 1;
1247
timeout_us += (timeout_clks * 1000) /
1248
(mmc_host_clk_rate(card->host) / 1000);
1249
1250
erase_timeout = timeout_us / 1000;
1251
1252
/*
1253
* Theoretically, the calculation could underflow so round up
1254
* to 1ms in that case.
1255
*/
1256
if (!erase_timeout)
1257
erase_timeout = 1;
1258
}
1259
1260
/* Multiplier for secure operations */
1261
if (arg & MMC_SECURE_ARGS) {
1262
if (arg == MMC_SECURE_ERASE_ARG)
1263
erase_timeout *= card->ext_csd.sec_erase_mult;
1264
else
1265
erase_timeout *= card->ext_csd.sec_trim_mult;
1266
}
1267
1268
erase_timeout *= qty;
1269
1270
/*
1271
* Ensure at least a 1 second timeout for SPI as per
1272
* 'mmc_set_data_timeout()'
1273
*/
1274
if (mmc_host_is_spi(card->host) && erase_timeout < 1000)
1275
erase_timeout = 1000;
1276
1277
return erase_timeout;
1278
}
1279
1280
static unsigned int mmc_sd_erase_timeout(struct mmc_card *card,
1281
unsigned int arg,
1282
unsigned int qty)
1283
{
1284
unsigned int erase_timeout;
1285
1286
if (card->ssr.erase_timeout) {
1287
/* Erase timeout specified in SD Status Register (SSR) */
1288
erase_timeout = card->ssr.erase_timeout * qty +
1289
card->ssr.erase_offset;
1290
} else {
1291
/*
1292
* Erase timeout not specified in SD Status Register (SSR) so
1293
* use 250ms per write block.
1294
*/
1295
erase_timeout = 250 * qty;
1296
}
1297
1298
/* Must not be less than 1 second */
1299
if (erase_timeout < 1000)
1300
erase_timeout = 1000;
1301
1302
return erase_timeout;
1303
}
1304
1305
static unsigned int mmc_erase_timeout(struct mmc_card *card,
1306
unsigned int arg,
1307
unsigned int qty)
1308
{
1309
if (mmc_card_sd(card))
1310
return mmc_sd_erase_timeout(card, arg, qty);
1311
else
1312
return mmc_mmc_erase_timeout(card, arg, qty);
1313
}
1314
1315
static int mmc_do_erase(struct mmc_card *card, unsigned int from,
1316
unsigned int to, unsigned int arg)
1317
{
1318
struct mmc_command cmd = {0};
1319
unsigned int qty = 0;
1320
int err;
1321
1322
/*
1323
* qty is used to calculate the erase timeout which depends on how many
1324
* erase groups (or allocation units in SD terminology) are affected.
1325
* We count erasing part of an erase group as one erase group.
1326
* For SD, the allocation units are always a power of 2. For MMC, the
1327
* erase group size is almost certainly also power of 2, but it does not
1328
* seem to insist on that in the JEDEC standard, so we fall back to
1329
* division in that case. SD may not specify an allocation unit size,
1330
* in which case the timeout is based on the number of write blocks.
1331
*
1332
* Note that the timeout for secure trim 2 will only be correct if the
1333
* number of erase groups specified is the same as the total of all
1334
* preceding secure trim 1 commands. Since the power may have been
1335
* lost since the secure trim 1 commands occurred, it is generally
1336
* impossible to calculate the secure trim 2 timeout correctly.
1337
*/
1338
if (card->erase_shift)
1339
qty += ((to >> card->erase_shift) -
1340
(from >> card->erase_shift)) + 1;
1341
else if (mmc_card_sd(card))
1342
qty += to - from + 1;
1343
else
1344
qty += ((to / card->erase_size) -
1345
(from / card->erase_size)) + 1;
1346
1347
if (!mmc_card_blockaddr(card)) {
1348
from <<= 9;
1349
to <<= 9;
1350
}
1351
1352
if (mmc_card_sd(card))
1353
cmd.opcode = SD_ERASE_WR_BLK_START;
1354
else
1355
cmd.opcode = MMC_ERASE_GROUP_START;
1356
cmd.arg = from;
1357
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
1358
err = mmc_wait_for_cmd(card->host, &cmd, 0);
1359
if (err) {
1360
printk(KERN_ERR "mmc_erase: group start error %d, "
1361
"status %#x\n", err, cmd.resp[0]);
1362
err = -EINVAL;
1363
goto out;
1364
}
1365
1366
memset(&cmd, 0, sizeof(struct mmc_command));
1367
if (mmc_card_sd(card))
1368
cmd.opcode = SD_ERASE_WR_BLK_END;
1369
else
1370
cmd.opcode = MMC_ERASE_GROUP_END;
1371
cmd.arg = to;
1372
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
1373
err = mmc_wait_for_cmd(card->host, &cmd, 0);
1374
if (err) {
1375
printk(KERN_ERR "mmc_erase: group end error %d, status %#x\n",
1376
err, cmd.resp[0]);
1377
err = -EINVAL;
1378
goto out;
1379
}
1380
1381
memset(&cmd, 0, sizeof(struct mmc_command));
1382
cmd.opcode = MMC_ERASE;
1383
cmd.arg = arg;
1384
cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
1385
cmd.cmd_timeout_ms = mmc_erase_timeout(card, arg, qty);
1386
err = mmc_wait_for_cmd(card->host, &cmd, 0);
1387
if (err) {
1388
printk(KERN_ERR "mmc_erase: erase error %d, status %#x\n",
1389
err, cmd.resp[0]);
1390
err = -EIO;
1391
goto out;
1392
}
1393
1394
if (mmc_host_is_spi(card->host))
1395
goto out;
1396
1397
do {
1398
memset(&cmd, 0, sizeof(struct mmc_command));
1399
cmd.opcode = MMC_SEND_STATUS;
1400
cmd.arg = card->rca << 16;
1401
cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1402
/* Do not retry else we can't see errors */
1403
err = mmc_wait_for_cmd(card->host, &cmd, 0);
1404
if (err || (cmd.resp[0] & 0xFDF92000)) {
1405
printk(KERN_ERR "error %d requesting status %#x\n",
1406
err, cmd.resp[0]);
1407
err = -EIO;
1408
goto out;
1409
}
1410
} while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
1411
R1_CURRENT_STATE(cmd.resp[0]) == 7);
1412
out:
1413
return err;
1414
}
1415
1416
/**
1417
* mmc_erase - erase sectors.
1418
* @card: card to erase
1419
* @from: first sector to erase
1420
* @nr: number of sectors to erase
1421
* @arg: erase command argument (SD supports only %MMC_ERASE_ARG)
1422
*
1423
* Caller must claim host before calling this function.
1424
*/
1425
int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
1426
unsigned int arg)
1427
{
1428
unsigned int rem, to = from + nr;
1429
1430
if (!(card->host->caps & MMC_CAP_ERASE) ||
1431
!(card->csd.cmdclass & CCC_ERASE))
1432
return -EOPNOTSUPP;
1433
1434
if (!card->erase_size)
1435
return -EOPNOTSUPP;
1436
1437
if (mmc_card_sd(card) && arg != MMC_ERASE_ARG)
1438
return -EOPNOTSUPP;
1439
1440
if ((arg & MMC_SECURE_ARGS) &&
1441
!(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN))
1442
return -EOPNOTSUPP;
1443
1444
if ((arg & MMC_TRIM_ARGS) &&
1445
!(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN))
1446
return -EOPNOTSUPP;
1447
1448
if (arg == MMC_SECURE_ERASE_ARG) {
1449
if (from % card->erase_size || nr % card->erase_size)
1450
return -EINVAL;
1451
}
1452
1453
if (arg == MMC_ERASE_ARG) {
1454
rem = from % card->erase_size;
1455
if (rem) {
1456
rem = card->erase_size - rem;
1457
from += rem;
1458
if (nr > rem)
1459
nr -= rem;
1460
else
1461
return 0;
1462
}
1463
rem = nr % card->erase_size;
1464
if (rem)
1465
nr -= rem;
1466
}
1467
1468
if (nr == 0)
1469
return 0;
1470
1471
to = from + nr;
1472
1473
if (to <= from)
1474
return -EINVAL;
1475
1476
/* 'from' and 'to' are inclusive */
1477
to -= 1;
1478
1479
return mmc_do_erase(card, from, to, arg);
1480
}
1481
EXPORT_SYMBOL(mmc_erase);
1482
1483
int mmc_can_erase(struct mmc_card *card)
1484
{
1485
if ((card->host->caps & MMC_CAP_ERASE) &&
1486
(card->csd.cmdclass & CCC_ERASE) && card->erase_size)
1487
return 1;
1488
return 0;
1489
}
1490
EXPORT_SYMBOL(mmc_can_erase);
1491
1492
int mmc_can_trim(struct mmc_card *card)
1493
{
1494
if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN)
1495
return 1;
1496
return 0;
1497
}
1498
EXPORT_SYMBOL(mmc_can_trim);
1499
1500
int mmc_can_secure_erase_trim(struct mmc_card *card)
1501
{
1502
if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN)
1503
return 1;
1504
return 0;
1505
}
1506
EXPORT_SYMBOL(mmc_can_secure_erase_trim);
1507
1508
int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from,
1509
unsigned int nr)
1510
{
1511
if (!card->erase_size)
1512
return 0;
1513
if (from % card->erase_size || nr % card->erase_size)
1514
return 0;
1515
return 1;
1516
}
1517
EXPORT_SYMBOL(mmc_erase_group_aligned);
1518
1519
int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)
1520
{
1521
struct mmc_command cmd = {0};
1522
1523
if (mmc_card_blockaddr(card) || mmc_card_ddr_mode(card))
1524
return 0;
1525
1526
cmd.opcode = MMC_SET_BLOCKLEN;
1527
cmd.arg = blocklen;
1528
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
1529
return mmc_wait_for_cmd(card->host, &cmd, 5);
1530
}
1531
EXPORT_SYMBOL(mmc_set_blocklen);
1532
1533
static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
1534
{
1535
host->f_init = freq;
1536
1537
#ifdef CONFIG_MMC_DEBUG
1538
pr_info("%s: %s: trying to init card at %u Hz\n",
1539
mmc_hostname(host), __func__, host->f_init);
1540
#endif
1541
mmc_power_up(host);
1542
1543
/*
1544
* sdio_reset sends CMD52 to reset card. Since we do not know
1545
* if the card is being re-initialized, just send it. CMD52
1546
* should be ignored by SD/eMMC cards.
1547
*/
1548
sdio_reset(host);
1549
mmc_go_idle(host);
1550
1551
mmc_send_if_cond(host, host->ocr_avail);
1552
1553
/* Order's important: probe SDIO, then SD, then MMC */
1554
if (!mmc_attach_sdio(host))
1555
return 0;
1556
if (!mmc_attach_sd(host))
1557
return 0;
1558
if (!mmc_attach_mmc(host))
1559
return 0;
1560
1561
mmc_power_off(host);
1562
return -EIO;
1563
}
1564
1565
void mmc_rescan(struct work_struct *work)
1566
{
1567
static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
1568
struct mmc_host *host =
1569
container_of(work, struct mmc_host, detect.work);
1570
int i;
1571
1572
if (host->rescan_disable)
1573
return;
1574
1575
mmc_bus_get(host);
1576
1577
/*
1578
* if there is a _removable_ card registered, check whether it is
1579
* still present
1580
*/
1581
if (host->bus_ops && host->bus_ops->detect && !host->bus_dead
1582
&& !(host->caps & MMC_CAP_NONREMOVABLE))
1583
host->bus_ops->detect(host);
1584
1585
/*
1586
* Let mmc_bus_put() free the bus/bus_ops if we've found that
1587
* the card is no longer present.
1588
*/
1589
mmc_bus_put(host);
1590
mmc_bus_get(host);
1591
1592
/* if there still is a card present, stop here */
1593
if (host->bus_ops != NULL) {
1594
mmc_bus_put(host);
1595
goto out;
1596
}
1597
1598
/*
1599
* Only we can add a new handler, so it's safe to
1600
* release the lock here.
1601
*/
1602
mmc_bus_put(host);
1603
1604
if (host->ops->get_cd && host->ops->get_cd(host) == 0)
1605
goto out;
1606
1607
mmc_claim_host(host);
1608
for (i = 0; i < ARRAY_SIZE(freqs); i++) {
1609
if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min)))
1610
break;
1611
if (freqs[i] <= host->f_min)
1612
break;
1613
}
1614
mmc_release_host(host);
1615
1616
out:
1617
if (host->caps & MMC_CAP_NEEDS_POLL)
1618
mmc_schedule_delayed_work(&host->detect, HZ);
1619
}
1620
1621
void mmc_start_host(struct mmc_host *host)
1622
{
1623
mmc_power_off(host);
1624
mmc_detect_change(host, 0);
1625
}
1626
1627
void mmc_stop_host(struct mmc_host *host)
1628
{
1629
#ifdef CONFIG_MMC_DEBUG
1630
unsigned long flags;
1631
spin_lock_irqsave(&host->lock, flags);
1632
host->removed = 1;
1633
spin_unlock_irqrestore(&host->lock, flags);
1634
#endif
1635
1636
if (host->caps & MMC_CAP_DISABLE)
1637
cancel_delayed_work(&host->disable);
1638
cancel_delayed_work_sync(&host->detect);
1639
mmc_flush_scheduled_work();
1640
1641
/* clear pm flags now and let card drivers set them as needed */
1642
host->pm_flags = 0;
1643
1644
mmc_bus_get(host);
1645
if (host->bus_ops && !host->bus_dead) {
1646
if (host->bus_ops->remove)
1647
host->bus_ops->remove(host);
1648
1649
mmc_claim_host(host);
1650
mmc_detach_bus(host);
1651
mmc_release_host(host);
1652
mmc_bus_put(host);
1653
return;
1654
}
1655
mmc_bus_put(host);
1656
1657
BUG_ON(host->card);
1658
1659
mmc_power_off(host);
1660
}
1661
1662
int mmc_power_save_host(struct mmc_host *host)
1663
{
1664
int ret = 0;
1665
1666
mmc_bus_get(host);
1667
1668
if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) {
1669
mmc_bus_put(host);
1670
return -EINVAL;
1671
}
1672
1673
if (host->bus_ops->power_save)
1674
ret = host->bus_ops->power_save(host);
1675
1676
mmc_bus_put(host);
1677
1678
mmc_power_off(host);
1679
1680
return ret;
1681
}
1682
EXPORT_SYMBOL(mmc_power_save_host);
1683
1684
int mmc_power_restore_host(struct mmc_host *host)
1685
{
1686
int ret;
1687
1688
mmc_bus_get(host);
1689
1690
if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) {
1691
mmc_bus_put(host);
1692
return -EINVAL;
1693
}
1694
1695
mmc_power_up(host);
1696
ret = host->bus_ops->power_restore(host);
1697
1698
mmc_bus_put(host);
1699
1700
return ret;
1701
}
1702
EXPORT_SYMBOL(mmc_power_restore_host);
1703
1704
int mmc_card_awake(struct mmc_host *host)
1705
{
1706
int err = -ENOSYS;
1707
1708
mmc_bus_get(host);
1709
1710
if (host->bus_ops && !host->bus_dead && host->bus_ops->awake)
1711
err = host->bus_ops->awake(host);
1712
1713
mmc_bus_put(host);
1714
1715
return err;
1716
}
1717
EXPORT_SYMBOL(mmc_card_awake);
1718
1719
int mmc_card_sleep(struct mmc_host *host)
1720
{
1721
int err = -ENOSYS;
1722
1723
mmc_bus_get(host);
1724
1725
if (host->bus_ops && !host->bus_dead && host->bus_ops->awake)
1726
err = host->bus_ops->sleep(host);
1727
1728
mmc_bus_put(host);
1729
1730
return err;
1731
}
1732
EXPORT_SYMBOL(mmc_card_sleep);
1733
1734
int mmc_card_can_sleep(struct mmc_host *host)
1735
{
1736
struct mmc_card *card = host->card;
1737
1738
if (card && mmc_card_mmc(card) && card->ext_csd.rev >= 3)
1739
return 1;
1740
return 0;
1741
}
1742
EXPORT_SYMBOL(mmc_card_can_sleep);
1743
1744
#ifdef CONFIG_PM
1745
1746
/**
1747
* mmc_suspend_host - suspend a host
1748
* @host: mmc host
1749
*/
1750
int mmc_suspend_host(struct mmc_host *host)
1751
{
1752
int err = 0;
1753
1754
if (host->caps & MMC_CAP_DISABLE)
1755
cancel_delayed_work(&host->disable);
1756
cancel_delayed_work(&host->detect);
1757
mmc_flush_scheduled_work();
1758
1759
mmc_bus_get(host);
1760
if (host->bus_ops && !host->bus_dead) {
1761
if (host->bus_ops->suspend)
1762
err = host->bus_ops->suspend(host);
1763
if (err == -ENOSYS || !host->bus_ops->resume) {
1764
/*
1765
* We simply "remove" the card in this case.
1766
* It will be redetected on resume.
1767
*/
1768
if (host->bus_ops->remove)
1769
host->bus_ops->remove(host);
1770
mmc_claim_host(host);
1771
mmc_detach_bus(host);
1772
mmc_release_host(host);
1773
host->pm_flags = 0;
1774
err = 0;
1775
}
1776
}
1777
mmc_bus_put(host);
1778
1779
if (!err && !mmc_card_keep_power(host))
1780
mmc_power_off(host);
1781
1782
return err;
1783
}
1784
1785
EXPORT_SYMBOL(mmc_suspend_host);
1786
1787
/**
1788
* mmc_resume_host - resume a previously suspended host
1789
* @host: mmc host
1790
*/
1791
int mmc_resume_host(struct mmc_host *host)
1792
{
1793
int err = 0;
1794
1795
mmc_bus_get(host);
1796
if (host->bus_ops && !host->bus_dead) {
1797
if (!mmc_card_keep_power(host)) {
1798
mmc_power_up(host);
1799
mmc_select_voltage(host, host->ocr);
1800
/*
1801
* Tell runtime PM core we just powered up the card,
1802
* since it still believes the card is powered off.
1803
* Note that currently runtime PM is only enabled
1804
* for SDIO cards that are MMC_CAP_POWER_OFF_CARD
1805
*/
1806
if (mmc_card_sdio(host->card) &&
1807
(host->caps & MMC_CAP_POWER_OFF_CARD)) {
1808
pm_runtime_disable(&host->card->dev);
1809
pm_runtime_set_active(&host->card->dev);
1810
pm_runtime_enable(&host->card->dev);
1811
}
1812
}
1813
BUG_ON(!host->bus_ops->resume);
1814
err = host->bus_ops->resume(host);
1815
if (err) {
1816
printk(KERN_WARNING "%s: error %d during resume "
1817
"(card was removed?)\n",
1818
mmc_hostname(host), err);
1819
err = 0;
1820
}
1821
}
1822
host->pm_flags &= ~MMC_PM_KEEP_POWER;
1823
mmc_bus_put(host);
1824
1825
return err;
1826
}
1827
EXPORT_SYMBOL(mmc_resume_host);
1828
1829
/* Do the card removal on suspend if card is assumed removeable
1830
* Do that in pm notifier while userspace isn't yet frozen, so we will be able
1831
to sync the card.
1832
*/
1833
int mmc_pm_notify(struct notifier_block *notify_block,
1834
unsigned long mode, void *unused)
1835
{
1836
struct mmc_host *host = container_of(
1837
notify_block, struct mmc_host, pm_notify);
1838
unsigned long flags;
1839
1840
1841
switch (mode) {
1842
case PM_HIBERNATION_PREPARE:
1843
case PM_SUSPEND_PREPARE:
1844
1845
spin_lock_irqsave(&host->lock, flags);
1846
host->rescan_disable = 1;
1847
spin_unlock_irqrestore(&host->lock, flags);
1848
cancel_delayed_work_sync(&host->detect);
1849
1850
if (!host->bus_ops || host->bus_ops->suspend)
1851
break;
1852
1853
mmc_claim_host(host);
1854
1855
if (host->bus_ops->remove)
1856
host->bus_ops->remove(host);
1857
1858
mmc_detach_bus(host);
1859
mmc_release_host(host);
1860
host->pm_flags = 0;
1861
break;
1862
1863
case PM_POST_SUSPEND:
1864
case PM_POST_HIBERNATION:
1865
case PM_POST_RESTORE:
1866
1867
spin_lock_irqsave(&host->lock, flags);
1868
host->rescan_disable = 0;
1869
spin_unlock_irqrestore(&host->lock, flags);
1870
mmc_detect_change(host, 0);
1871
1872
}
1873
1874
return 0;
1875
}
1876
#endif
1877
1878
static int __init mmc_init(void)
1879
{
1880
int ret;
1881
1882
workqueue = alloc_ordered_workqueue("kmmcd", 0);
1883
if (!workqueue)
1884
return -ENOMEM;
1885
1886
ret = mmc_register_bus();
1887
if (ret)
1888
goto destroy_workqueue;
1889
1890
ret = mmc_register_host_class();
1891
if (ret)
1892
goto unregister_bus;
1893
1894
ret = sdio_register_bus();
1895
if (ret)
1896
goto unregister_host_class;
1897
1898
return 0;
1899
1900
unregister_host_class:
1901
mmc_unregister_host_class();
1902
unregister_bus:
1903
mmc_unregister_bus();
1904
destroy_workqueue:
1905
destroy_workqueue(workqueue);
1906
1907
return ret;
1908
}
1909
1910
static void __exit mmc_exit(void)
1911
{
1912
sdio_unregister_bus();
1913
mmc_unregister_host_class();
1914
mmc_unregister_bus();
1915
destroy_workqueue(workqueue);
1916
}
1917
1918
subsys_initcall(mmc_init);
1919
module_exit(mmc_exit);
1920
1921
MODULE_LICENSE("GPL");
1922
1923