Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/drivers/mmc/host/mmci.c
15111 views
1
/*
2
* linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver
3
*
4
* Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
5
* Copyright (C) 2010 ST-Ericsson SA
6
*
7
* This program is free software; you can redistribute it and/or modify
8
* it under the terms of the GNU General Public License version 2 as
9
* published by the Free Software Foundation.
10
*/
11
#include <linux/module.h>
12
#include <linux/moduleparam.h>
13
#include <linux/init.h>
14
#include <linux/ioport.h>
15
#include <linux/device.h>
16
#include <linux/interrupt.h>
17
#include <linux/kernel.h>
18
#include <linux/delay.h>
19
#include <linux/err.h>
20
#include <linux/highmem.h>
21
#include <linux/log2.h>
22
#include <linux/mmc/host.h>
23
#include <linux/mmc/card.h>
24
#include <linux/amba/bus.h>
25
#include <linux/clk.h>
26
#include <linux/scatterlist.h>
27
#include <linux/gpio.h>
28
#include <linux/regulator/consumer.h>
29
#include <linux/dmaengine.h>
30
#include <linux/dma-mapping.h>
31
#include <linux/amba/mmci.h>
32
33
#include <asm/div64.h>
34
#include <asm/io.h>
35
#include <asm/sizes.h>
36
37
#include "mmci.h"
38
39
#define DRIVER_NAME "mmci-pl18x"
40
41
static unsigned int fmax = 515633;
42
43
/**
44
* struct variant_data - MMCI variant-specific quirks
45
* @clkreg: default value for MCICLOCK register
46
* @clkreg_enable: enable value for MMCICLOCK register
47
* @datalength_bits: number of bits in the MMCIDATALENGTH register
48
* @fifosize: number of bytes that can be written when MMCI_TXFIFOEMPTY
49
* is asserted (likewise for RX)
50
* @fifohalfsize: number of bytes that can be written when MCI_TXFIFOHALFEMPTY
51
* is asserted (likewise for RX)
52
* @sdio: variant supports SDIO
53
* @st_clkdiv: true if using a ST-specific clock divider algorithm
54
* @blksz_datactrl16: true if Block size is at b16..b30 position in datactrl register
55
*/
56
struct variant_data {
57
unsigned int clkreg;
58
unsigned int clkreg_enable;
59
unsigned int datalength_bits;
60
unsigned int fifosize;
61
unsigned int fifohalfsize;
62
bool sdio;
63
bool st_clkdiv;
64
bool blksz_datactrl16;
65
};
66
67
static struct variant_data variant_arm = {
68
.fifosize = 16 * 4,
69
.fifohalfsize = 8 * 4,
70
.datalength_bits = 16,
71
};
72
73
static struct variant_data variant_arm_extended_fifo = {
74
.fifosize = 128 * 4,
75
.fifohalfsize = 64 * 4,
76
.datalength_bits = 16,
77
};
78
79
static struct variant_data variant_u300 = {
80
.fifosize = 16 * 4,
81
.fifohalfsize = 8 * 4,
82
.clkreg_enable = MCI_ST_U300_HWFCEN,
83
.datalength_bits = 16,
84
.sdio = true,
85
};
86
87
static struct variant_data variant_ux500 = {
88
.fifosize = 30 * 4,
89
.fifohalfsize = 8 * 4,
90
.clkreg = MCI_CLK_ENABLE,
91
.clkreg_enable = MCI_ST_UX500_HWFCEN,
92
.datalength_bits = 24,
93
.sdio = true,
94
.st_clkdiv = true,
95
};
96
97
static struct variant_data variant_ux500v2 = {
98
.fifosize = 30 * 4,
99
.fifohalfsize = 8 * 4,
100
.clkreg = MCI_CLK_ENABLE,
101
.clkreg_enable = MCI_ST_UX500_HWFCEN,
102
.datalength_bits = 24,
103
.sdio = true,
104
.st_clkdiv = true,
105
.blksz_datactrl16 = true,
106
};
107
108
/*
109
* This must be called with host->lock held
110
*/
111
static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired)
112
{
113
struct variant_data *variant = host->variant;
114
u32 clk = variant->clkreg;
115
116
if (desired) {
117
if (desired >= host->mclk) {
118
clk = MCI_CLK_BYPASS;
119
if (variant->st_clkdiv)
120
clk |= MCI_ST_UX500_NEG_EDGE;
121
host->cclk = host->mclk;
122
} else if (variant->st_clkdiv) {
123
/*
124
* DB8500 TRM says f = mclk / (clkdiv + 2)
125
* => clkdiv = (mclk / f) - 2
126
* Round the divider up so we don't exceed the max
127
* frequency
128
*/
129
clk = DIV_ROUND_UP(host->mclk, desired) - 2;
130
if (clk >= 256)
131
clk = 255;
132
host->cclk = host->mclk / (clk + 2);
133
} else {
134
/*
135
* PL180 TRM says f = mclk / (2 * (clkdiv + 1))
136
* => clkdiv = mclk / (2 * f) - 1
137
*/
138
clk = host->mclk / (2 * desired) - 1;
139
if (clk >= 256)
140
clk = 255;
141
host->cclk = host->mclk / (2 * (clk + 1));
142
}
143
144
clk |= variant->clkreg_enable;
145
clk |= MCI_CLK_ENABLE;
146
/* This hasn't proven to be worthwhile */
147
/* clk |= MCI_CLK_PWRSAVE; */
148
}
149
150
if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4)
151
clk |= MCI_4BIT_BUS;
152
if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8)
153
clk |= MCI_ST_8BIT_BUS;
154
155
writel(clk, host->base + MMCICLOCK);
156
}
157
158
static void
159
mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
160
{
161
writel(0, host->base + MMCICOMMAND);
162
163
BUG_ON(host->data);
164
165
host->mrq = NULL;
166
host->cmd = NULL;
167
168
/*
169
* Need to drop the host lock here; mmc_request_done may call
170
* back into the driver...
171
*/
172
spin_unlock(&host->lock);
173
mmc_request_done(host->mmc, mrq);
174
spin_lock(&host->lock);
175
}
176
177
static void mmci_set_mask1(struct mmci_host *host, unsigned int mask)
178
{
179
void __iomem *base = host->base;
180
181
if (host->singleirq) {
182
unsigned int mask0 = readl(base + MMCIMASK0);
183
184
mask0 &= ~MCI_IRQ1MASK;
185
mask0 |= mask;
186
187
writel(mask0, base + MMCIMASK0);
188
}
189
190
writel(mask, base + MMCIMASK1);
191
}
192
193
static void mmci_stop_data(struct mmci_host *host)
194
{
195
writel(0, host->base + MMCIDATACTRL);
196
mmci_set_mask1(host, 0);
197
host->data = NULL;
198
}
199
200
static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data)
201
{
202
unsigned int flags = SG_MITER_ATOMIC;
203
204
if (data->flags & MMC_DATA_READ)
205
flags |= SG_MITER_TO_SG;
206
else
207
flags |= SG_MITER_FROM_SG;
208
209
sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
210
}
211
212
/*
213
* All the DMA operation mode stuff goes inside this ifdef.
214
* This assumes that you have a generic DMA device interface,
215
* no custom DMA interfaces are supported.
216
*/
217
#ifdef CONFIG_DMA_ENGINE
218
static void __devinit mmci_dma_setup(struct mmci_host *host)
219
{
220
struct mmci_platform_data *plat = host->plat;
221
const char *rxname, *txname;
222
dma_cap_mask_t mask;
223
224
if (!plat || !plat->dma_filter) {
225
dev_info(mmc_dev(host->mmc), "no DMA platform data\n");
226
return;
227
}
228
229
/* Try to acquire a generic DMA engine slave channel */
230
dma_cap_zero(mask);
231
dma_cap_set(DMA_SLAVE, mask);
232
233
/*
234
* If only an RX channel is specified, the driver will
235
* attempt to use it bidirectionally, however if it is
236
* is specified but cannot be located, DMA will be disabled.
237
*/
238
if (plat->dma_rx_param) {
239
host->dma_rx_channel = dma_request_channel(mask,
240
plat->dma_filter,
241
plat->dma_rx_param);
242
/* E.g if no DMA hardware is present */
243
if (!host->dma_rx_channel)
244
dev_err(mmc_dev(host->mmc), "no RX DMA channel\n");
245
}
246
247
if (plat->dma_tx_param) {
248
host->dma_tx_channel = dma_request_channel(mask,
249
plat->dma_filter,
250
plat->dma_tx_param);
251
if (!host->dma_tx_channel)
252
dev_warn(mmc_dev(host->mmc), "no TX DMA channel\n");
253
} else {
254
host->dma_tx_channel = host->dma_rx_channel;
255
}
256
257
if (host->dma_rx_channel)
258
rxname = dma_chan_name(host->dma_rx_channel);
259
else
260
rxname = "none";
261
262
if (host->dma_tx_channel)
263
txname = dma_chan_name(host->dma_tx_channel);
264
else
265
txname = "none";
266
267
dev_info(mmc_dev(host->mmc), "DMA channels RX %s, TX %s\n",
268
rxname, txname);
269
270
/*
271
* Limit the maximum segment size in any SG entry according to
272
* the parameters of the DMA engine device.
273
*/
274
if (host->dma_tx_channel) {
275
struct device *dev = host->dma_tx_channel->device->dev;
276
unsigned int max_seg_size = dma_get_max_seg_size(dev);
277
278
if (max_seg_size < host->mmc->max_seg_size)
279
host->mmc->max_seg_size = max_seg_size;
280
}
281
if (host->dma_rx_channel) {
282
struct device *dev = host->dma_rx_channel->device->dev;
283
unsigned int max_seg_size = dma_get_max_seg_size(dev);
284
285
if (max_seg_size < host->mmc->max_seg_size)
286
host->mmc->max_seg_size = max_seg_size;
287
}
288
}
289
290
/*
291
* This is used in __devinit or __devexit so inline it
292
* so it can be discarded.
293
*/
294
static inline void mmci_dma_release(struct mmci_host *host)
295
{
296
struct mmci_platform_data *plat = host->plat;
297
298
if (host->dma_rx_channel)
299
dma_release_channel(host->dma_rx_channel);
300
if (host->dma_tx_channel && plat->dma_tx_param)
301
dma_release_channel(host->dma_tx_channel);
302
host->dma_rx_channel = host->dma_tx_channel = NULL;
303
}
304
305
static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
306
{
307
struct dma_chan *chan = host->dma_current;
308
enum dma_data_direction dir;
309
u32 status;
310
int i;
311
312
/* Wait up to 1ms for the DMA to complete */
313
for (i = 0; ; i++) {
314
status = readl(host->base + MMCISTATUS);
315
if (!(status & MCI_RXDATAAVLBLMASK) || i >= 100)
316
break;
317
udelay(10);
318
}
319
320
/*
321
* Check to see whether we still have some data left in the FIFO -
322
* this catches DMA controllers which are unable to monitor the
323
* DMALBREQ and DMALSREQ signals while allowing us to DMA to non-
324
* contiguous buffers. On TX, we'll get a FIFO underrun error.
325
*/
326
if (status & MCI_RXDATAAVLBLMASK) {
327
dmaengine_terminate_all(chan);
328
if (!data->error)
329
data->error = -EIO;
330
}
331
332
if (data->flags & MMC_DATA_WRITE) {
333
dir = DMA_TO_DEVICE;
334
} else {
335
dir = DMA_FROM_DEVICE;
336
}
337
338
dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
339
340
/*
341
* Use of DMA with scatter-gather is impossible.
342
* Give up with DMA and switch back to PIO mode.
343
*/
344
if (status & MCI_RXDATAAVLBLMASK) {
345
dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n");
346
mmci_dma_release(host);
347
}
348
}
349
350
static void mmci_dma_data_error(struct mmci_host *host)
351
{
352
dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n");
353
dmaengine_terminate_all(host->dma_current);
354
}
355
356
static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
357
{
358
struct variant_data *variant = host->variant;
359
struct dma_slave_config conf = {
360
.src_addr = host->phybase + MMCIFIFO,
361
.dst_addr = host->phybase + MMCIFIFO,
362
.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
363
.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
364
.src_maxburst = variant->fifohalfsize >> 2, /* # of words */
365
.dst_maxburst = variant->fifohalfsize >> 2, /* # of words */
366
};
367
struct mmc_data *data = host->data;
368
struct dma_chan *chan;
369
struct dma_device *device;
370
struct dma_async_tx_descriptor *desc;
371
int nr_sg;
372
373
host->dma_current = NULL;
374
375
if (data->flags & MMC_DATA_READ) {
376
conf.direction = DMA_FROM_DEVICE;
377
chan = host->dma_rx_channel;
378
} else {
379
conf.direction = DMA_TO_DEVICE;
380
chan = host->dma_tx_channel;
381
}
382
383
/* If there's no DMA channel, fall back to PIO */
384
if (!chan)
385
return -EINVAL;
386
387
/* If less than or equal to the fifo size, don't bother with DMA */
388
if (host->size <= variant->fifosize)
389
return -EINVAL;
390
391
device = chan->device;
392
nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, conf.direction);
393
if (nr_sg == 0)
394
return -EINVAL;
395
396
dmaengine_slave_config(chan, &conf);
397
desc = device->device_prep_slave_sg(chan, data->sg, nr_sg,
398
conf.direction, DMA_CTRL_ACK);
399
if (!desc)
400
goto unmap_exit;
401
402
/* Okay, go for it. */
403
host->dma_current = chan;
404
405
dev_vdbg(mmc_dev(host->mmc),
406
"Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n",
407
data->sg_len, data->blksz, data->blocks, data->flags);
408
dmaengine_submit(desc);
409
dma_async_issue_pending(chan);
410
411
datactrl |= MCI_DPSM_DMAENABLE;
412
413
/* Trigger the DMA transfer */
414
writel(datactrl, host->base + MMCIDATACTRL);
415
416
/*
417
* Let the MMCI say when the data is ended and it's time
418
* to fire next DMA request. When that happens, MMCI will
419
* call mmci_data_end()
420
*/
421
writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK,
422
host->base + MMCIMASK0);
423
return 0;
424
425
unmap_exit:
426
dmaengine_terminate_all(chan);
427
dma_unmap_sg(device->dev, data->sg, data->sg_len, conf.direction);
428
return -ENOMEM;
429
}
430
#else
431
/* Blank functions if the DMA engine is not available */
432
static inline void mmci_dma_setup(struct mmci_host *host)
433
{
434
}
435
436
static inline void mmci_dma_release(struct mmci_host *host)
437
{
438
}
439
440
static inline void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
441
{
442
}
443
444
static inline void mmci_dma_data_error(struct mmci_host *host)
445
{
446
}
447
448
static inline int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
449
{
450
return -ENOSYS;
451
}
452
#endif
453
454
static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
455
{
456
struct variant_data *variant = host->variant;
457
unsigned int datactrl, timeout, irqmask;
458
unsigned long long clks;
459
void __iomem *base;
460
int blksz_bits;
461
462
dev_dbg(mmc_dev(host->mmc), "blksz %04x blks %04x flags %08x\n",
463
data->blksz, data->blocks, data->flags);
464
465
host->data = data;
466
host->size = data->blksz * data->blocks;
467
data->bytes_xfered = 0;
468
469
clks = (unsigned long long)data->timeout_ns * host->cclk;
470
do_div(clks, 1000000000UL);
471
472
timeout = data->timeout_clks + (unsigned int)clks;
473
474
base = host->base;
475
writel(timeout, base + MMCIDATATIMER);
476
writel(host->size, base + MMCIDATALENGTH);
477
478
blksz_bits = ffs(data->blksz) - 1;
479
BUG_ON(1 << blksz_bits != data->blksz);
480
481
if (variant->blksz_datactrl16)
482
datactrl = MCI_DPSM_ENABLE | (data->blksz << 16);
483
else
484
datactrl = MCI_DPSM_ENABLE | blksz_bits << 4;
485
486
if (data->flags & MMC_DATA_READ)
487
datactrl |= MCI_DPSM_DIRECTION;
488
489
/*
490
* Attempt to use DMA operation mode, if this
491
* should fail, fall back to PIO mode
492
*/
493
if (!mmci_dma_start_data(host, datactrl))
494
return;
495
496
/* IRQ mode, map the SG list for CPU reading/writing */
497
mmci_init_sg(host, data);
498
499
if (data->flags & MMC_DATA_READ) {
500
irqmask = MCI_RXFIFOHALFFULLMASK;
501
502
/*
503
* If we have less than the fifo 'half-full' threshold to
504
* transfer, trigger a PIO interrupt as soon as any data
505
* is available.
506
*/
507
if (host->size < variant->fifohalfsize)
508
irqmask |= MCI_RXDATAAVLBLMASK;
509
} else {
510
/*
511
* We don't actually need to include "FIFO empty" here
512
* since its implicit in "FIFO half empty".
513
*/
514
irqmask = MCI_TXFIFOHALFEMPTYMASK;
515
}
516
517
/* The ST Micro variants has a special bit to enable SDIO */
518
if (variant->sdio && host->mmc->card)
519
if (mmc_card_sdio(host->mmc->card))
520
datactrl |= MCI_ST_DPSM_SDIOEN;
521
522
writel(datactrl, base + MMCIDATACTRL);
523
writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0);
524
mmci_set_mask1(host, irqmask);
525
}
526
527
static void
528
mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c)
529
{
530
void __iomem *base = host->base;
531
532
dev_dbg(mmc_dev(host->mmc), "op %02x arg %08x flags %08x\n",
533
cmd->opcode, cmd->arg, cmd->flags);
534
535
if (readl(base + MMCICOMMAND) & MCI_CPSM_ENABLE) {
536
writel(0, base + MMCICOMMAND);
537
udelay(1);
538
}
539
540
c |= cmd->opcode | MCI_CPSM_ENABLE;
541
if (cmd->flags & MMC_RSP_PRESENT) {
542
if (cmd->flags & MMC_RSP_136)
543
c |= MCI_CPSM_LONGRSP;
544
c |= MCI_CPSM_RESPONSE;
545
}
546
if (/*interrupt*/0)
547
c |= MCI_CPSM_INTERRUPT;
548
549
host->cmd = cmd;
550
551
writel(cmd->arg, base + MMCIARGUMENT);
552
writel(c, base + MMCICOMMAND);
553
}
554
555
static void
556
mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
557
unsigned int status)
558
{
559
/* First check for errors */
560
if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
561
u32 remain, success;
562
563
/* Terminate the DMA transfer */
564
if (dma_inprogress(host))
565
mmci_dma_data_error(host);
566
567
/*
568
* Calculate how far we are into the transfer. Note that
569
* the data counter gives the number of bytes transferred
570
* on the MMC bus, not on the host side. On reads, this
571
* can be as much as a FIFO-worth of data ahead. This
572
* matters for FIFO overruns only.
573
*/
574
remain = readl(host->base + MMCIDATACNT);
575
success = data->blksz * data->blocks - remain;
576
577
dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n",
578
status, success);
579
if (status & MCI_DATACRCFAIL) {
580
/* Last block was not successful */
581
success -= 1;
582
data->error = -EILSEQ;
583
} else if (status & MCI_DATATIMEOUT) {
584
data->error = -ETIMEDOUT;
585
} else if (status & MCI_STARTBITERR) {
586
data->error = -ECOMM;
587
} else if (status & MCI_TXUNDERRUN) {
588
data->error = -EIO;
589
} else if (status & MCI_RXOVERRUN) {
590
if (success > host->variant->fifosize)
591
success -= host->variant->fifosize;
592
else
593
success = 0;
594
data->error = -EIO;
595
}
596
data->bytes_xfered = round_down(success, data->blksz);
597
}
598
599
if (status & MCI_DATABLOCKEND)
600
dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n");
601
602
if (status & MCI_DATAEND || data->error) {
603
if (dma_inprogress(host))
604
mmci_dma_unmap(host, data);
605
mmci_stop_data(host);
606
607
if (!data->error)
608
/* The error clause is handled above, success! */
609
data->bytes_xfered = data->blksz * data->blocks;
610
611
if (!data->stop) {
612
mmci_request_end(host, data->mrq);
613
} else {
614
mmci_start_command(host, data->stop, 0);
615
}
616
}
617
}
618
619
static void
620
mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
621
unsigned int status)
622
{
623
void __iomem *base = host->base;
624
625
host->cmd = NULL;
626
627
if (status & MCI_CMDTIMEOUT) {
628
cmd->error = -ETIMEDOUT;
629
} else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) {
630
cmd->error = -EILSEQ;
631
} else {
632
cmd->resp[0] = readl(base + MMCIRESPONSE0);
633
cmd->resp[1] = readl(base + MMCIRESPONSE1);
634
cmd->resp[2] = readl(base + MMCIRESPONSE2);
635
cmd->resp[3] = readl(base + MMCIRESPONSE3);
636
}
637
638
if (!cmd->data || cmd->error) {
639
if (host->data)
640
mmci_stop_data(host);
641
mmci_request_end(host, cmd->mrq);
642
} else if (!(cmd->data->flags & MMC_DATA_READ)) {
643
mmci_start_data(host, cmd->data);
644
}
645
}
646
647
static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int remain)
648
{
649
void __iomem *base = host->base;
650
char *ptr = buffer;
651
u32 status;
652
int host_remain = host->size;
653
654
do {
655
int count = host_remain - (readl(base + MMCIFIFOCNT) << 2);
656
657
if (count > remain)
658
count = remain;
659
660
if (count <= 0)
661
break;
662
663
readsl(base + MMCIFIFO, ptr, count >> 2);
664
665
ptr += count;
666
remain -= count;
667
host_remain -= count;
668
669
if (remain == 0)
670
break;
671
672
status = readl(base + MMCISTATUS);
673
} while (status & MCI_RXDATAAVLBL);
674
675
return ptr - buffer;
676
}
677
678
static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int remain, u32 status)
679
{
680
struct variant_data *variant = host->variant;
681
void __iomem *base = host->base;
682
char *ptr = buffer;
683
684
do {
685
unsigned int count, maxcnt;
686
687
maxcnt = status & MCI_TXFIFOEMPTY ?
688
variant->fifosize : variant->fifohalfsize;
689
count = min(remain, maxcnt);
690
691
/*
692
* The ST Micro variant for SDIO transfer sizes
693
* less then 8 bytes should have clock H/W flow
694
* control disabled.
695
*/
696
if (variant->sdio &&
697
mmc_card_sdio(host->mmc->card)) {
698
if (count < 8)
699
writel(readl(host->base + MMCICLOCK) &
700
~variant->clkreg_enable,
701
host->base + MMCICLOCK);
702
else
703
writel(readl(host->base + MMCICLOCK) |
704
variant->clkreg_enable,
705
host->base + MMCICLOCK);
706
}
707
708
/*
709
* SDIO especially may want to send something that is
710
* not divisible by 4 (as opposed to card sectors
711
* etc), and the FIFO only accept full 32-bit writes.
712
* So compensate by adding +3 on the count, a single
713
* byte become a 32bit write, 7 bytes will be two
714
* 32bit writes etc.
715
*/
716
writesl(base + MMCIFIFO, ptr, (count + 3) >> 2);
717
718
ptr += count;
719
remain -= count;
720
721
if (remain == 0)
722
break;
723
724
status = readl(base + MMCISTATUS);
725
} while (status & MCI_TXFIFOHALFEMPTY);
726
727
return ptr - buffer;
728
}
729
730
/*
731
* PIO data transfer IRQ handler.
732
*/
733
static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
734
{
735
struct mmci_host *host = dev_id;
736
struct sg_mapping_iter *sg_miter = &host->sg_miter;
737
struct variant_data *variant = host->variant;
738
void __iomem *base = host->base;
739
unsigned long flags;
740
u32 status;
741
742
status = readl(base + MMCISTATUS);
743
744
dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status);
745
746
local_irq_save(flags);
747
748
do {
749
unsigned int remain, len;
750
char *buffer;
751
752
/*
753
* For write, we only need to test the half-empty flag
754
* here - if the FIFO is completely empty, then by
755
* definition it is more than half empty.
756
*
757
* For read, check for data available.
758
*/
759
if (!(status & (MCI_TXFIFOHALFEMPTY|MCI_RXDATAAVLBL)))
760
break;
761
762
if (!sg_miter_next(sg_miter))
763
break;
764
765
buffer = sg_miter->addr;
766
remain = sg_miter->length;
767
768
len = 0;
769
if (status & MCI_RXACTIVE)
770
len = mmci_pio_read(host, buffer, remain);
771
if (status & MCI_TXACTIVE)
772
len = mmci_pio_write(host, buffer, remain, status);
773
774
sg_miter->consumed = len;
775
776
host->size -= len;
777
remain -= len;
778
779
if (remain)
780
break;
781
782
status = readl(base + MMCISTATUS);
783
} while (1);
784
785
sg_miter_stop(sg_miter);
786
787
local_irq_restore(flags);
788
789
/*
790
* If we have less than the fifo 'half-full' threshold to transfer,
791
* trigger a PIO interrupt as soon as any data is available.
792
*/
793
if (status & MCI_RXACTIVE && host->size < variant->fifohalfsize)
794
mmci_set_mask1(host, MCI_RXDATAAVLBLMASK);
795
796
/*
797
* If we run out of data, disable the data IRQs; this
798
* prevents a race where the FIFO becomes empty before
799
* the chip itself has disabled the data path, and
800
* stops us racing with our data end IRQ.
801
*/
802
if (host->size == 0) {
803
mmci_set_mask1(host, 0);
804
writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0);
805
}
806
807
return IRQ_HANDLED;
808
}
809
810
/*
811
* Handle completion of command and data transfers.
812
*/
813
static irqreturn_t mmci_irq(int irq, void *dev_id)
814
{
815
struct mmci_host *host = dev_id;
816
u32 status;
817
int ret = 0;
818
819
spin_lock(&host->lock);
820
821
do {
822
struct mmc_command *cmd;
823
struct mmc_data *data;
824
825
status = readl(host->base + MMCISTATUS);
826
827
if (host->singleirq) {
828
if (status & readl(host->base + MMCIMASK1))
829
mmci_pio_irq(irq, dev_id);
830
831
status &= ~MCI_IRQ1MASK;
832
}
833
834
status &= readl(host->base + MMCIMASK0);
835
writel(status, host->base + MMCICLEAR);
836
837
dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status);
838
839
data = host->data;
840
if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|
841
MCI_RXOVERRUN|MCI_DATAEND|MCI_DATABLOCKEND) && data)
842
mmci_data_irq(host, data, status);
843
844
cmd = host->cmd;
845
if (status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|MCI_CMDSENT|MCI_CMDRESPEND) && cmd)
846
mmci_cmd_irq(host, cmd, status);
847
848
ret = 1;
849
} while (status);
850
851
spin_unlock(&host->lock);
852
853
return IRQ_RETVAL(ret);
854
}
855
856
static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
857
{
858
struct mmci_host *host = mmc_priv(mmc);
859
unsigned long flags;
860
861
WARN_ON(host->mrq != NULL);
862
863
if (mrq->data && !is_power_of_2(mrq->data->blksz)) {
864
dev_err(mmc_dev(mmc), "unsupported block size (%d bytes)\n",
865
mrq->data->blksz);
866
mrq->cmd->error = -EINVAL;
867
mmc_request_done(mmc, mrq);
868
return;
869
}
870
871
spin_lock_irqsave(&host->lock, flags);
872
873
host->mrq = mrq;
874
875
if (mrq->data && mrq->data->flags & MMC_DATA_READ)
876
mmci_start_data(host, mrq->data);
877
878
mmci_start_command(host, mrq->cmd, 0);
879
880
spin_unlock_irqrestore(&host->lock, flags);
881
}
882
883
static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
884
{
885
struct mmci_host *host = mmc_priv(mmc);
886
u32 pwr = 0;
887
unsigned long flags;
888
int ret;
889
890
switch (ios->power_mode) {
891
case MMC_POWER_OFF:
892
if (host->vcc)
893
ret = mmc_regulator_set_ocr(mmc, host->vcc, 0);
894
break;
895
case MMC_POWER_UP:
896
if (host->vcc) {
897
ret = mmc_regulator_set_ocr(mmc, host->vcc, ios->vdd);
898
if (ret) {
899
dev_err(mmc_dev(mmc), "unable to set OCR\n");
900
/*
901
* The .set_ios() function in the mmc_host_ops
902
* struct return void, and failing to set the
903
* power should be rare so we print an error
904
* and return here.
905
*/
906
return;
907
}
908
}
909
if (host->plat->vdd_handler)
910
pwr |= host->plat->vdd_handler(mmc_dev(mmc), ios->vdd,
911
ios->power_mode);
912
/* The ST version does not have this, fall through to POWER_ON */
913
if (host->hw_designer != AMBA_VENDOR_ST) {
914
pwr |= MCI_PWR_UP;
915
break;
916
}
917
case MMC_POWER_ON:
918
pwr |= MCI_PWR_ON;
919
break;
920
}
921
922
if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) {
923
if (host->hw_designer != AMBA_VENDOR_ST)
924
pwr |= MCI_ROD;
925
else {
926
/*
927
* The ST Micro variant use the ROD bit for something
928
* else and only has OD (Open Drain).
929
*/
930
pwr |= MCI_OD;
931
}
932
}
933
934
spin_lock_irqsave(&host->lock, flags);
935
936
mmci_set_clkreg(host, ios->clock);
937
938
if (host->pwr != pwr) {
939
host->pwr = pwr;
940
writel(pwr, host->base + MMCIPOWER);
941
}
942
943
spin_unlock_irqrestore(&host->lock, flags);
944
}
945
946
static int mmci_get_ro(struct mmc_host *mmc)
947
{
948
struct mmci_host *host = mmc_priv(mmc);
949
950
if (host->gpio_wp == -ENOSYS)
951
return -ENOSYS;
952
953
return gpio_get_value_cansleep(host->gpio_wp);
954
}
955
956
static int mmci_get_cd(struct mmc_host *mmc)
957
{
958
struct mmci_host *host = mmc_priv(mmc);
959
struct mmci_platform_data *plat = host->plat;
960
unsigned int status;
961
962
if (host->gpio_cd == -ENOSYS) {
963
if (!plat->status)
964
return 1; /* Assume always present */
965
966
status = plat->status(mmc_dev(host->mmc));
967
} else
968
status = !!gpio_get_value_cansleep(host->gpio_cd)
969
^ plat->cd_invert;
970
971
/*
972
* Use positive logic throughout - status is zero for no card,
973
* non-zero for card inserted.
974
*/
975
return status;
976
}
977
978
static irqreturn_t mmci_cd_irq(int irq, void *dev_id)
979
{
980
struct mmci_host *host = dev_id;
981
982
mmc_detect_change(host->mmc, msecs_to_jiffies(500));
983
984
return IRQ_HANDLED;
985
}
986
987
static const struct mmc_host_ops mmci_ops = {
988
.request = mmci_request,
989
.set_ios = mmci_set_ios,
990
.get_ro = mmci_get_ro,
991
.get_cd = mmci_get_cd,
992
};
993
994
static int __devinit mmci_probe(struct amba_device *dev,
995
const struct amba_id *id)
996
{
997
struct mmci_platform_data *plat = dev->dev.platform_data;
998
struct variant_data *variant = id->data;
999
struct mmci_host *host;
1000
struct mmc_host *mmc;
1001
int ret;
1002
1003
/* must have platform data */
1004
if (!plat) {
1005
ret = -EINVAL;
1006
goto out;
1007
}
1008
1009
ret = amba_request_regions(dev, DRIVER_NAME);
1010
if (ret)
1011
goto out;
1012
1013
mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev);
1014
if (!mmc) {
1015
ret = -ENOMEM;
1016
goto rel_regions;
1017
}
1018
1019
host = mmc_priv(mmc);
1020
host->mmc = mmc;
1021
1022
host->gpio_wp = -ENOSYS;
1023
host->gpio_cd = -ENOSYS;
1024
host->gpio_cd_irq = -1;
1025
1026
host->hw_designer = amba_manf(dev);
1027
host->hw_revision = amba_rev(dev);
1028
dev_dbg(mmc_dev(mmc), "designer ID = 0x%02x\n", host->hw_designer);
1029
dev_dbg(mmc_dev(mmc), "revision = 0x%01x\n", host->hw_revision);
1030
1031
host->clk = clk_get(&dev->dev, NULL);
1032
if (IS_ERR(host->clk)) {
1033
ret = PTR_ERR(host->clk);
1034
host->clk = NULL;
1035
goto host_free;
1036
}
1037
1038
ret = clk_enable(host->clk);
1039
if (ret)
1040
goto clk_free;
1041
1042
host->plat = plat;
1043
host->variant = variant;
1044
host->mclk = clk_get_rate(host->clk);
1045
/*
1046
* According to the spec, mclk is max 100 MHz,
1047
* so we try to adjust the clock down to this,
1048
* (if possible).
1049
*/
1050
if (host->mclk > 100000000) {
1051
ret = clk_set_rate(host->clk, 100000000);
1052
if (ret < 0)
1053
goto clk_disable;
1054
host->mclk = clk_get_rate(host->clk);
1055
dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n",
1056
host->mclk);
1057
}
1058
host->phybase = dev->res.start;
1059
host->base = ioremap(dev->res.start, resource_size(&dev->res));
1060
if (!host->base) {
1061
ret = -ENOMEM;
1062
goto clk_disable;
1063
}
1064
1065
mmc->ops = &mmci_ops;
1066
mmc->f_min = (host->mclk + 511) / 512;
1067
/*
1068
* If the platform data supplies a maximum operating
1069
* frequency, this takes precedence. Else, we fall back
1070
* to using the module parameter, which has a (low)
1071
* default value in case it is not specified. Either
1072
* value must not exceed the clock rate into the block,
1073
* of course.
1074
*/
1075
if (plat->f_max)
1076
mmc->f_max = min(host->mclk, plat->f_max);
1077
else
1078
mmc->f_max = min(host->mclk, fmax);
1079
dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max);
1080
1081
#ifdef CONFIG_REGULATOR
1082
/* If we're using the regulator framework, try to fetch a regulator */
1083
host->vcc = regulator_get(&dev->dev, "vmmc");
1084
if (IS_ERR(host->vcc))
1085
host->vcc = NULL;
1086
else {
1087
int mask = mmc_regulator_get_ocrmask(host->vcc);
1088
1089
if (mask < 0)
1090
dev_err(&dev->dev, "error getting OCR mask (%d)\n",
1091
mask);
1092
else {
1093
host->mmc->ocr_avail = (u32) mask;
1094
if (plat->ocr_mask)
1095
dev_warn(&dev->dev,
1096
"Provided ocr_mask/setpower will not be used "
1097
"(using regulator instead)\n");
1098
}
1099
}
1100
#endif
1101
/* Fall back to platform data if no regulator is found */
1102
if (host->vcc == NULL)
1103
mmc->ocr_avail = plat->ocr_mask;
1104
mmc->caps = plat->capabilities;
1105
1106
/*
1107
* We can do SGIO
1108
*/
1109
mmc->max_segs = NR_SG;
1110
1111
/*
1112
* Since only a certain number of bits are valid in the data length
1113
* register, we must ensure that we don't exceed 2^num-1 bytes in a
1114
* single request.
1115
*/
1116
mmc->max_req_size = (1 << variant->datalength_bits) - 1;
1117
1118
/*
1119
* Set the maximum segment size. Since we aren't doing DMA
1120
* (yet) we are only limited by the data length register.
1121
*/
1122
mmc->max_seg_size = mmc->max_req_size;
1123
1124
/*
1125
* Block size can be up to 2048 bytes, but must be a power of two.
1126
*/
1127
mmc->max_blk_size = 2048;
1128
1129
/*
1130
* No limit on the number of blocks transferred.
1131
*/
1132
mmc->max_blk_count = mmc->max_req_size;
1133
1134
spin_lock_init(&host->lock);
1135
1136
writel(0, host->base + MMCIMASK0);
1137
writel(0, host->base + MMCIMASK1);
1138
writel(0xfff, host->base + MMCICLEAR);
1139
1140
if (gpio_is_valid(plat->gpio_cd)) {
1141
ret = gpio_request(plat->gpio_cd, DRIVER_NAME " (cd)");
1142
if (ret == 0)
1143
ret = gpio_direction_input(plat->gpio_cd);
1144
if (ret == 0)
1145
host->gpio_cd = plat->gpio_cd;
1146
else if (ret != -ENOSYS)
1147
goto err_gpio_cd;
1148
1149
/*
1150
* A gpio pin that will detect cards when inserted and removed
1151
* will most likely want to trigger on the edges if it is
1152
* 0 when ejected and 1 when inserted (or mutatis mutandis
1153
* for the inverted case) so we request triggers on both
1154
* edges.
1155
*/
1156
ret = request_any_context_irq(gpio_to_irq(plat->gpio_cd),
1157
mmci_cd_irq,
1158
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
1159
DRIVER_NAME " (cd)", host);
1160
if (ret >= 0)
1161
host->gpio_cd_irq = gpio_to_irq(plat->gpio_cd);
1162
}
1163
if (gpio_is_valid(plat->gpio_wp)) {
1164
ret = gpio_request(plat->gpio_wp, DRIVER_NAME " (wp)");
1165
if (ret == 0)
1166
ret = gpio_direction_input(plat->gpio_wp);
1167
if (ret == 0)
1168
host->gpio_wp = plat->gpio_wp;
1169
else if (ret != -ENOSYS)
1170
goto err_gpio_wp;
1171
}
1172
1173
if ((host->plat->status || host->gpio_cd != -ENOSYS)
1174
&& host->gpio_cd_irq < 0)
1175
mmc->caps |= MMC_CAP_NEEDS_POLL;
1176
1177
ret = request_irq(dev->irq[0], mmci_irq, IRQF_SHARED, DRIVER_NAME " (cmd)", host);
1178
if (ret)
1179
goto unmap;
1180
1181
if (dev->irq[1] == NO_IRQ)
1182
host->singleirq = true;
1183
else {
1184
ret = request_irq(dev->irq[1], mmci_pio_irq, IRQF_SHARED,
1185
DRIVER_NAME " (pio)", host);
1186
if (ret)
1187
goto irq0_free;
1188
}
1189
1190
writel(MCI_IRQENABLE, host->base + MMCIMASK0);
1191
1192
amba_set_drvdata(dev, mmc);
1193
1194
dev_info(&dev->dev, "%s: PL%03x manf %x rev%u at 0x%08llx irq %d,%d (pio)\n",
1195
mmc_hostname(mmc), amba_part(dev), amba_manf(dev),
1196
amba_rev(dev), (unsigned long long)dev->res.start,
1197
dev->irq[0], dev->irq[1]);
1198
1199
mmci_dma_setup(host);
1200
1201
mmc_add_host(mmc);
1202
1203
return 0;
1204
1205
irq0_free:
1206
free_irq(dev->irq[0], host);
1207
unmap:
1208
if (host->gpio_wp != -ENOSYS)
1209
gpio_free(host->gpio_wp);
1210
err_gpio_wp:
1211
if (host->gpio_cd_irq >= 0)
1212
free_irq(host->gpio_cd_irq, host);
1213
if (host->gpio_cd != -ENOSYS)
1214
gpio_free(host->gpio_cd);
1215
err_gpio_cd:
1216
iounmap(host->base);
1217
clk_disable:
1218
clk_disable(host->clk);
1219
clk_free:
1220
clk_put(host->clk);
1221
host_free:
1222
mmc_free_host(mmc);
1223
rel_regions:
1224
amba_release_regions(dev);
1225
out:
1226
return ret;
1227
}
1228
1229
static int __devexit mmci_remove(struct amba_device *dev)
1230
{
1231
struct mmc_host *mmc = amba_get_drvdata(dev);
1232
1233
amba_set_drvdata(dev, NULL);
1234
1235
if (mmc) {
1236
struct mmci_host *host = mmc_priv(mmc);
1237
1238
mmc_remove_host(mmc);
1239
1240
writel(0, host->base + MMCIMASK0);
1241
writel(0, host->base + MMCIMASK1);
1242
1243
writel(0, host->base + MMCICOMMAND);
1244
writel(0, host->base + MMCIDATACTRL);
1245
1246
mmci_dma_release(host);
1247
free_irq(dev->irq[0], host);
1248
if (!host->singleirq)
1249
free_irq(dev->irq[1], host);
1250
1251
if (host->gpio_wp != -ENOSYS)
1252
gpio_free(host->gpio_wp);
1253
if (host->gpio_cd_irq >= 0)
1254
free_irq(host->gpio_cd_irq, host);
1255
if (host->gpio_cd != -ENOSYS)
1256
gpio_free(host->gpio_cd);
1257
1258
iounmap(host->base);
1259
clk_disable(host->clk);
1260
clk_put(host->clk);
1261
1262
if (host->vcc)
1263
mmc_regulator_set_ocr(mmc, host->vcc, 0);
1264
regulator_put(host->vcc);
1265
1266
mmc_free_host(mmc);
1267
1268
amba_release_regions(dev);
1269
}
1270
1271
return 0;
1272
}
1273
1274
#ifdef CONFIG_PM
1275
static int mmci_suspend(struct amba_device *dev, pm_message_t state)
1276
{
1277
struct mmc_host *mmc = amba_get_drvdata(dev);
1278
int ret = 0;
1279
1280
if (mmc) {
1281
struct mmci_host *host = mmc_priv(mmc);
1282
1283
ret = mmc_suspend_host(mmc);
1284
if (ret == 0)
1285
writel(0, host->base + MMCIMASK0);
1286
}
1287
1288
return ret;
1289
}
1290
1291
static int mmci_resume(struct amba_device *dev)
1292
{
1293
struct mmc_host *mmc = amba_get_drvdata(dev);
1294
int ret = 0;
1295
1296
if (mmc) {
1297
struct mmci_host *host = mmc_priv(mmc);
1298
1299
writel(MCI_IRQENABLE, host->base + MMCIMASK0);
1300
1301
ret = mmc_resume_host(mmc);
1302
}
1303
1304
return ret;
1305
}
1306
#else
1307
#define mmci_suspend NULL
1308
#define mmci_resume NULL
1309
#endif
1310
1311
static struct amba_id mmci_ids[] = {
1312
{
1313
.id = 0x00041180,
1314
.mask = 0xff0fffff,
1315
.data = &variant_arm,
1316
},
1317
{
1318
.id = 0x01041180,
1319
.mask = 0xff0fffff,
1320
.data = &variant_arm_extended_fifo,
1321
},
1322
{
1323
.id = 0x00041181,
1324
.mask = 0x000fffff,
1325
.data = &variant_arm,
1326
},
1327
/* ST Micro variants */
1328
{
1329
.id = 0x00180180,
1330
.mask = 0x00ffffff,
1331
.data = &variant_u300,
1332
},
1333
{
1334
.id = 0x00280180,
1335
.mask = 0x00ffffff,
1336
.data = &variant_u300,
1337
},
1338
{
1339
.id = 0x00480180,
1340
.mask = 0xf0ffffff,
1341
.data = &variant_ux500,
1342
},
1343
{
1344
.id = 0x10480180,
1345
.mask = 0xf0ffffff,
1346
.data = &variant_ux500v2,
1347
},
1348
{ 0, 0 },
1349
};
1350
1351
static struct amba_driver mmci_driver = {
1352
.drv = {
1353
.name = DRIVER_NAME,
1354
},
1355
.probe = mmci_probe,
1356
.remove = __devexit_p(mmci_remove),
1357
.suspend = mmci_suspend,
1358
.resume = mmci_resume,
1359
.id_table = mmci_ids,
1360
};
1361
1362
static int __init mmci_init(void)
1363
{
1364
return amba_driver_register(&mmci_driver);
1365
}
1366
1367
static void __exit mmci_exit(void)
1368
{
1369
amba_driver_unregister(&mmci_driver);
1370
}
1371
1372
module_init(mmci_init);
1373
module_exit(mmci_exit);
1374
module_param(fmax, uint, 0444);
1375
1376
MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver");
1377
MODULE_LICENSE("GPL");
1378
1379