Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/drivers/mmc/host/mxcmmc.c
15111 views
1
/*
2
* linux/drivers/mmc/host/mxcmmc.c - Freescale i.MX MMCI driver
3
*
4
* This is a driver for the SDHC controller found in Freescale MX2/MX3
5
* SoCs. It is basically the same hardware as found on MX1 (imxmmc.c).
6
* Unlike the hardware found on MX1, this hardware just works and does
7
* not need all the quirks found in imxmmc.c, hence the separate driver.
8
*
9
* Copyright (C) 2008 Sascha Hauer, Pengutronix <[email protected]>
10
* Copyright (C) 2006 Pavel Pisa, PiKRON <[email protected]>
11
*
12
* derived from pxamci.c by Russell King
13
*
14
* This program is free software; you can redistribute it and/or modify
15
* it under the terms of the GNU General Public License version 2 as
16
* published by the Free Software Foundation.
17
*
18
*/
19
20
#include <linux/module.h>
21
#include <linux/init.h>
22
#include <linux/ioport.h>
23
#include <linux/platform_device.h>
24
#include <linux/interrupt.h>
25
#include <linux/irq.h>
26
#include <linux/blkdev.h>
27
#include <linux/dma-mapping.h>
28
#include <linux/mmc/host.h>
29
#include <linux/mmc/card.h>
30
#include <linux/delay.h>
31
#include <linux/clk.h>
32
#include <linux/io.h>
33
#include <linux/gpio.h>
34
#include <linux/regulator/consumer.h>
35
#include <linux/dmaengine.h>
36
37
#include <asm/dma.h>
38
#include <asm/irq.h>
39
#include <asm/sizes.h>
40
#include <mach/mmc.h>
41
42
#include <mach/dma.h>
43
44
#define DRIVER_NAME "mxc-mmc"
45
46
#define MMC_REG_STR_STP_CLK 0x00
47
#define MMC_REG_STATUS 0x04
48
#define MMC_REG_CLK_RATE 0x08
49
#define MMC_REG_CMD_DAT_CONT 0x0C
50
#define MMC_REG_RES_TO 0x10
51
#define MMC_REG_READ_TO 0x14
52
#define MMC_REG_BLK_LEN 0x18
53
#define MMC_REG_NOB 0x1C
54
#define MMC_REG_REV_NO 0x20
55
#define MMC_REG_INT_CNTR 0x24
56
#define MMC_REG_CMD 0x28
57
#define MMC_REG_ARG 0x2C
58
#define MMC_REG_RES_FIFO 0x34
59
#define MMC_REG_BUFFER_ACCESS 0x38
60
61
#define STR_STP_CLK_RESET (1 << 3)
62
#define STR_STP_CLK_START_CLK (1 << 1)
63
#define STR_STP_CLK_STOP_CLK (1 << 0)
64
65
#define STATUS_CARD_INSERTION (1 << 31)
66
#define STATUS_CARD_REMOVAL (1 << 30)
67
#define STATUS_YBUF_EMPTY (1 << 29)
68
#define STATUS_XBUF_EMPTY (1 << 28)
69
#define STATUS_YBUF_FULL (1 << 27)
70
#define STATUS_XBUF_FULL (1 << 26)
71
#define STATUS_BUF_UND_RUN (1 << 25)
72
#define STATUS_BUF_OVFL (1 << 24)
73
#define STATUS_SDIO_INT_ACTIVE (1 << 14)
74
#define STATUS_END_CMD_RESP (1 << 13)
75
#define STATUS_WRITE_OP_DONE (1 << 12)
76
#define STATUS_DATA_TRANS_DONE (1 << 11)
77
#define STATUS_READ_OP_DONE (1 << 11)
78
#define STATUS_WR_CRC_ERROR_CODE_MASK (3 << 10)
79
#define STATUS_CARD_BUS_CLK_RUN (1 << 8)
80
#define STATUS_BUF_READ_RDY (1 << 7)
81
#define STATUS_BUF_WRITE_RDY (1 << 6)
82
#define STATUS_RESP_CRC_ERR (1 << 5)
83
#define STATUS_CRC_READ_ERR (1 << 3)
84
#define STATUS_CRC_WRITE_ERR (1 << 2)
85
#define STATUS_TIME_OUT_RESP (1 << 1)
86
#define STATUS_TIME_OUT_READ (1 << 0)
87
#define STATUS_ERR_MASK 0x2f
88
89
#define CMD_DAT_CONT_CMD_RESP_LONG_OFF (1 << 12)
90
#define CMD_DAT_CONT_STOP_READWAIT (1 << 11)
91
#define CMD_DAT_CONT_START_READWAIT (1 << 10)
92
#define CMD_DAT_CONT_BUS_WIDTH_4 (2 << 8)
93
#define CMD_DAT_CONT_INIT (1 << 7)
94
#define CMD_DAT_CONT_WRITE (1 << 4)
95
#define CMD_DAT_CONT_DATA_ENABLE (1 << 3)
96
#define CMD_DAT_CONT_RESPONSE_48BIT_CRC (1 << 0)
97
#define CMD_DAT_CONT_RESPONSE_136BIT (2 << 0)
98
#define CMD_DAT_CONT_RESPONSE_48BIT (3 << 0)
99
100
#define INT_SDIO_INT_WKP_EN (1 << 18)
101
#define INT_CARD_INSERTION_WKP_EN (1 << 17)
102
#define INT_CARD_REMOVAL_WKP_EN (1 << 16)
103
#define INT_CARD_INSERTION_EN (1 << 15)
104
#define INT_CARD_REMOVAL_EN (1 << 14)
105
#define INT_SDIO_IRQ_EN (1 << 13)
106
#define INT_DAT0_EN (1 << 12)
107
#define INT_BUF_READ_EN (1 << 4)
108
#define INT_BUF_WRITE_EN (1 << 3)
109
#define INT_END_CMD_RES_EN (1 << 2)
110
#define INT_WRITE_OP_DONE_EN (1 << 1)
111
#define INT_READ_OP_EN (1 << 0)
112
113
struct mxcmci_host {
114
struct mmc_host *mmc;
115
struct resource *res;
116
void __iomem *base;
117
int irq;
118
int detect_irq;
119
struct dma_chan *dma;
120
struct dma_async_tx_descriptor *desc;
121
int do_dma;
122
int default_irq_mask;
123
int use_sdio;
124
unsigned int power_mode;
125
struct imxmmc_platform_data *pdata;
126
127
struct mmc_request *req;
128
struct mmc_command *cmd;
129
struct mmc_data *data;
130
131
unsigned int datasize;
132
unsigned int dma_dir;
133
134
u16 rev_no;
135
unsigned int cmdat;
136
137
struct clk *clk;
138
139
int clock;
140
141
struct work_struct datawork;
142
spinlock_t lock;
143
144
struct regulator *vcc;
145
146
int burstlen;
147
int dmareq;
148
struct dma_slave_config dma_slave_config;
149
struct imx_dma_data dma_data;
150
};
151
152
static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios);
153
154
static inline void mxcmci_init_ocr(struct mxcmci_host *host)
155
{
156
host->vcc = regulator_get(mmc_dev(host->mmc), "vmmc");
157
158
if (IS_ERR(host->vcc)) {
159
host->vcc = NULL;
160
} else {
161
host->mmc->ocr_avail = mmc_regulator_get_ocrmask(host->vcc);
162
if (host->pdata && host->pdata->ocr_avail)
163
dev_warn(mmc_dev(host->mmc),
164
"pdata->ocr_avail will not be used\n");
165
}
166
167
if (host->vcc == NULL) {
168
/* fall-back to platform data */
169
if (host->pdata && host->pdata->ocr_avail)
170
host->mmc->ocr_avail = host->pdata->ocr_avail;
171
else
172
host->mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
173
}
174
}
175
176
static inline void mxcmci_set_power(struct mxcmci_host *host,
177
unsigned char power_mode,
178
unsigned int vdd)
179
{
180
if (host->vcc) {
181
if (power_mode == MMC_POWER_UP)
182
mmc_regulator_set_ocr(host->mmc, host->vcc, vdd);
183
else if (power_mode == MMC_POWER_OFF)
184
mmc_regulator_set_ocr(host->mmc, host->vcc, 0);
185
}
186
187
if (host->pdata && host->pdata->setpower)
188
host->pdata->setpower(mmc_dev(host->mmc), vdd);
189
}
190
191
static inline int mxcmci_use_dma(struct mxcmci_host *host)
192
{
193
return host->do_dma;
194
}
195
196
static void mxcmci_softreset(struct mxcmci_host *host)
197
{
198
int i;
199
200
dev_dbg(mmc_dev(host->mmc), "mxcmci_softreset\n");
201
202
/* reset sequence */
203
writew(STR_STP_CLK_RESET, host->base + MMC_REG_STR_STP_CLK);
204
writew(STR_STP_CLK_RESET | STR_STP_CLK_START_CLK,
205
host->base + MMC_REG_STR_STP_CLK);
206
207
for (i = 0; i < 8; i++)
208
writew(STR_STP_CLK_START_CLK, host->base + MMC_REG_STR_STP_CLK);
209
210
writew(0xff, host->base + MMC_REG_RES_TO);
211
}
212
static int mxcmci_setup_dma(struct mmc_host *mmc);
213
214
static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
215
{
216
unsigned int nob = data->blocks;
217
unsigned int blksz = data->blksz;
218
unsigned int datasize = nob * blksz;
219
struct scatterlist *sg;
220
int i, nents;
221
222
if (data->flags & MMC_DATA_STREAM)
223
nob = 0xffff;
224
225
host->data = data;
226
data->bytes_xfered = 0;
227
228
writew(nob, host->base + MMC_REG_NOB);
229
writew(blksz, host->base + MMC_REG_BLK_LEN);
230
host->datasize = datasize;
231
232
if (!mxcmci_use_dma(host))
233
return 0;
234
235
for_each_sg(data->sg, sg, data->sg_len, i) {
236
if (sg->offset & 3 || sg->length & 3) {
237
host->do_dma = 0;
238
return 0;
239
}
240
}
241
242
if (data->flags & MMC_DATA_READ)
243
host->dma_dir = DMA_FROM_DEVICE;
244
else
245
host->dma_dir = DMA_TO_DEVICE;
246
247
nents = dma_map_sg(host->dma->device->dev, data->sg,
248
data->sg_len, host->dma_dir);
249
if (nents != data->sg_len)
250
return -EINVAL;
251
252
host->desc = host->dma->device->device_prep_slave_sg(host->dma,
253
data->sg, data->sg_len, host->dma_dir,
254
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
255
256
if (!host->desc) {
257
dma_unmap_sg(host->dma->device->dev, data->sg, data->sg_len,
258
host->dma_dir);
259
host->do_dma = 0;
260
return 0; /* Fall back to PIO */
261
}
262
wmb();
263
264
dmaengine_submit(host->desc);
265
266
return 0;
267
}
268
269
static int mxcmci_start_cmd(struct mxcmci_host *host, struct mmc_command *cmd,
270
unsigned int cmdat)
271
{
272
u32 int_cntr = host->default_irq_mask;
273
unsigned long flags;
274
275
WARN_ON(host->cmd != NULL);
276
host->cmd = cmd;
277
278
switch (mmc_resp_type(cmd)) {
279
case MMC_RSP_R1: /* short CRC, OPCODE */
280
case MMC_RSP_R1B:/* short CRC, OPCODE, BUSY */
281
cmdat |= CMD_DAT_CONT_RESPONSE_48BIT_CRC;
282
break;
283
case MMC_RSP_R2: /* long 136 bit + CRC */
284
cmdat |= CMD_DAT_CONT_RESPONSE_136BIT;
285
break;
286
case MMC_RSP_R3: /* short */
287
cmdat |= CMD_DAT_CONT_RESPONSE_48BIT;
288
break;
289
case MMC_RSP_NONE:
290
break;
291
default:
292
dev_err(mmc_dev(host->mmc), "unhandled response type 0x%x\n",
293
mmc_resp_type(cmd));
294
cmd->error = -EINVAL;
295
return -EINVAL;
296
}
297
298
int_cntr = INT_END_CMD_RES_EN;
299
300
if (mxcmci_use_dma(host))
301
int_cntr |= INT_READ_OP_EN | INT_WRITE_OP_DONE_EN;
302
303
spin_lock_irqsave(&host->lock, flags);
304
if (host->use_sdio)
305
int_cntr |= INT_SDIO_IRQ_EN;
306
writel(int_cntr, host->base + MMC_REG_INT_CNTR);
307
spin_unlock_irqrestore(&host->lock, flags);
308
309
writew(cmd->opcode, host->base + MMC_REG_CMD);
310
writel(cmd->arg, host->base + MMC_REG_ARG);
311
writew(cmdat, host->base + MMC_REG_CMD_DAT_CONT);
312
313
return 0;
314
}
315
316
static void mxcmci_finish_request(struct mxcmci_host *host,
317
struct mmc_request *req)
318
{
319
u32 int_cntr = host->default_irq_mask;
320
unsigned long flags;
321
322
spin_lock_irqsave(&host->lock, flags);
323
if (host->use_sdio)
324
int_cntr |= INT_SDIO_IRQ_EN;
325
writel(int_cntr, host->base + MMC_REG_INT_CNTR);
326
spin_unlock_irqrestore(&host->lock, flags);
327
328
host->req = NULL;
329
host->cmd = NULL;
330
host->data = NULL;
331
332
mmc_request_done(host->mmc, req);
333
}
334
335
static int mxcmci_finish_data(struct mxcmci_host *host, unsigned int stat)
336
{
337
struct mmc_data *data = host->data;
338
int data_error;
339
340
if (mxcmci_use_dma(host)) {
341
dmaengine_terminate_all(host->dma);
342
dma_unmap_sg(host->dma->device->dev, data->sg, data->sg_len,
343
host->dma_dir);
344
}
345
346
if (stat & STATUS_ERR_MASK) {
347
dev_dbg(mmc_dev(host->mmc), "request failed. status: 0x%08x\n",
348
stat);
349
if (stat & STATUS_CRC_READ_ERR) {
350
dev_err(mmc_dev(host->mmc), "%s: -EILSEQ\n", __func__);
351
data->error = -EILSEQ;
352
} else if (stat & STATUS_CRC_WRITE_ERR) {
353
u32 err_code = (stat >> 9) & 0x3;
354
if (err_code == 2) { /* No CRC response */
355
dev_err(mmc_dev(host->mmc),
356
"%s: No CRC -ETIMEDOUT\n", __func__);
357
data->error = -ETIMEDOUT;
358
} else {
359
dev_err(mmc_dev(host->mmc),
360
"%s: -EILSEQ\n", __func__);
361
data->error = -EILSEQ;
362
}
363
} else if (stat & STATUS_TIME_OUT_READ) {
364
dev_err(mmc_dev(host->mmc),
365
"%s: read -ETIMEDOUT\n", __func__);
366
data->error = -ETIMEDOUT;
367
} else {
368
dev_err(mmc_dev(host->mmc), "%s: -EIO\n", __func__);
369
data->error = -EIO;
370
}
371
} else {
372
data->bytes_xfered = host->datasize;
373
}
374
375
data_error = data->error;
376
377
host->data = NULL;
378
379
return data_error;
380
}
381
382
static void mxcmci_read_response(struct mxcmci_host *host, unsigned int stat)
383
{
384
struct mmc_command *cmd = host->cmd;
385
int i;
386
u32 a, b, c;
387
388
if (!cmd)
389
return;
390
391
if (stat & STATUS_TIME_OUT_RESP) {
392
dev_dbg(mmc_dev(host->mmc), "CMD TIMEOUT\n");
393
cmd->error = -ETIMEDOUT;
394
} else if (stat & STATUS_RESP_CRC_ERR && cmd->flags & MMC_RSP_CRC) {
395
dev_dbg(mmc_dev(host->mmc), "cmd crc error\n");
396
cmd->error = -EILSEQ;
397
}
398
399
if (cmd->flags & MMC_RSP_PRESENT) {
400
if (cmd->flags & MMC_RSP_136) {
401
for (i = 0; i < 4; i++) {
402
a = readw(host->base + MMC_REG_RES_FIFO);
403
b = readw(host->base + MMC_REG_RES_FIFO);
404
cmd->resp[i] = a << 16 | b;
405
}
406
} else {
407
a = readw(host->base + MMC_REG_RES_FIFO);
408
b = readw(host->base + MMC_REG_RES_FIFO);
409
c = readw(host->base + MMC_REG_RES_FIFO);
410
cmd->resp[0] = a << 24 | b << 8 | c >> 8;
411
}
412
}
413
}
414
415
static int mxcmci_poll_status(struct mxcmci_host *host, u32 mask)
416
{
417
u32 stat;
418
unsigned long timeout = jiffies + HZ;
419
420
do {
421
stat = readl(host->base + MMC_REG_STATUS);
422
if (stat & STATUS_ERR_MASK)
423
return stat;
424
if (time_after(jiffies, timeout)) {
425
mxcmci_softreset(host);
426
mxcmci_set_clk_rate(host, host->clock);
427
return STATUS_TIME_OUT_READ;
428
}
429
if (stat & mask)
430
return 0;
431
cpu_relax();
432
} while (1);
433
}
434
435
static int mxcmci_pull(struct mxcmci_host *host, void *_buf, int bytes)
436
{
437
unsigned int stat;
438
u32 *buf = _buf;
439
440
while (bytes > 3) {
441
stat = mxcmci_poll_status(host,
442
STATUS_BUF_READ_RDY | STATUS_READ_OP_DONE);
443
if (stat)
444
return stat;
445
*buf++ = readl(host->base + MMC_REG_BUFFER_ACCESS);
446
bytes -= 4;
447
}
448
449
if (bytes) {
450
u8 *b = (u8 *)buf;
451
u32 tmp;
452
453
stat = mxcmci_poll_status(host,
454
STATUS_BUF_READ_RDY | STATUS_READ_OP_DONE);
455
if (stat)
456
return stat;
457
tmp = readl(host->base + MMC_REG_BUFFER_ACCESS);
458
memcpy(b, &tmp, bytes);
459
}
460
461
return 0;
462
}
463
464
static int mxcmci_push(struct mxcmci_host *host, void *_buf, int bytes)
465
{
466
unsigned int stat;
467
u32 *buf = _buf;
468
469
while (bytes > 3) {
470
stat = mxcmci_poll_status(host, STATUS_BUF_WRITE_RDY);
471
if (stat)
472
return stat;
473
writel(*buf++, host->base + MMC_REG_BUFFER_ACCESS);
474
bytes -= 4;
475
}
476
477
if (bytes) {
478
u8 *b = (u8 *)buf;
479
u32 tmp;
480
481
stat = mxcmci_poll_status(host, STATUS_BUF_WRITE_RDY);
482
if (stat)
483
return stat;
484
485
memcpy(&tmp, b, bytes);
486
writel(tmp, host->base + MMC_REG_BUFFER_ACCESS);
487
}
488
489
stat = mxcmci_poll_status(host, STATUS_BUF_WRITE_RDY);
490
if (stat)
491
return stat;
492
493
return 0;
494
}
495
496
static int mxcmci_transfer_data(struct mxcmci_host *host)
497
{
498
struct mmc_data *data = host->req->data;
499
struct scatterlist *sg;
500
int stat, i;
501
502
host->data = data;
503
host->datasize = 0;
504
505
if (data->flags & MMC_DATA_READ) {
506
for_each_sg(data->sg, sg, data->sg_len, i) {
507
stat = mxcmci_pull(host, sg_virt(sg), sg->length);
508
if (stat)
509
return stat;
510
host->datasize += sg->length;
511
}
512
} else {
513
for_each_sg(data->sg, sg, data->sg_len, i) {
514
stat = mxcmci_push(host, sg_virt(sg), sg->length);
515
if (stat)
516
return stat;
517
host->datasize += sg->length;
518
}
519
stat = mxcmci_poll_status(host, STATUS_WRITE_OP_DONE);
520
if (stat)
521
return stat;
522
}
523
return 0;
524
}
525
526
static void mxcmci_datawork(struct work_struct *work)
527
{
528
struct mxcmci_host *host = container_of(work, struct mxcmci_host,
529
datawork);
530
int datastat = mxcmci_transfer_data(host);
531
532
writel(STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE,
533
host->base + MMC_REG_STATUS);
534
mxcmci_finish_data(host, datastat);
535
536
if (host->req->stop) {
537
if (mxcmci_start_cmd(host, host->req->stop, 0)) {
538
mxcmci_finish_request(host, host->req);
539
return;
540
}
541
} else {
542
mxcmci_finish_request(host, host->req);
543
}
544
}
545
546
static void mxcmci_data_done(struct mxcmci_host *host, unsigned int stat)
547
{
548
struct mmc_data *data = host->data;
549
int data_error;
550
551
if (!data)
552
return;
553
554
data_error = mxcmci_finish_data(host, stat);
555
556
mxcmci_read_response(host, stat);
557
host->cmd = NULL;
558
559
if (host->req->stop) {
560
if (mxcmci_start_cmd(host, host->req->stop, 0)) {
561
mxcmci_finish_request(host, host->req);
562
return;
563
}
564
} else {
565
mxcmci_finish_request(host, host->req);
566
}
567
}
568
569
static void mxcmci_cmd_done(struct mxcmci_host *host, unsigned int stat)
570
{
571
mxcmci_read_response(host, stat);
572
host->cmd = NULL;
573
574
if (!host->data && host->req) {
575
mxcmci_finish_request(host, host->req);
576
return;
577
}
578
579
/* For the DMA case the DMA engine handles the data transfer
580
* automatically. For non DMA we have to do it ourselves.
581
* Don't do it in interrupt context though.
582
*/
583
if (!mxcmci_use_dma(host) && host->data)
584
schedule_work(&host->datawork);
585
586
}
587
588
static irqreturn_t mxcmci_irq(int irq, void *devid)
589
{
590
struct mxcmci_host *host = devid;
591
unsigned long flags;
592
bool sdio_irq;
593
u32 stat;
594
595
stat = readl(host->base + MMC_REG_STATUS);
596
writel(stat & ~(STATUS_SDIO_INT_ACTIVE | STATUS_DATA_TRANS_DONE |
597
STATUS_WRITE_OP_DONE), host->base + MMC_REG_STATUS);
598
599
dev_dbg(mmc_dev(host->mmc), "%s: 0x%08x\n", __func__, stat);
600
601
spin_lock_irqsave(&host->lock, flags);
602
sdio_irq = (stat & STATUS_SDIO_INT_ACTIVE) && host->use_sdio;
603
spin_unlock_irqrestore(&host->lock, flags);
604
605
if (mxcmci_use_dma(host) &&
606
(stat & (STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE)))
607
writel(STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE,
608
host->base + MMC_REG_STATUS);
609
610
if (sdio_irq) {
611
writel(STATUS_SDIO_INT_ACTIVE, host->base + MMC_REG_STATUS);
612
mmc_signal_sdio_irq(host->mmc);
613
}
614
615
if (stat & STATUS_END_CMD_RESP)
616
mxcmci_cmd_done(host, stat);
617
618
if (mxcmci_use_dma(host) &&
619
(stat & (STATUS_DATA_TRANS_DONE | STATUS_WRITE_OP_DONE)))
620
mxcmci_data_done(host, stat);
621
622
if (host->default_irq_mask &&
623
(stat & (STATUS_CARD_INSERTION | STATUS_CARD_REMOVAL)))
624
mmc_detect_change(host->mmc, msecs_to_jiffies(200));
625
626
return IRQ_HANDLED;
627
}
628
629
static void mxcmci_request(struct mmc_host *mmc, struct mmc_request *req)
630
{
631
struct mxcmci_host *host = mmc_priv(mmc);
632
unsigned int cmdat = host->cmdat;
633
int error;
634
635
WARN_ON(host->req != NULL);
636
637
host->req = req;
638
host->cmdat &= ~CMD_DAT_CONT_INIT;
639
640
if (host->dma)
641
host->do_dma = 1;
642
643
if (req->data) {
644
error = mxcmci_setup_data(host, req->data);
645
if (error) {
646
req->cmd->error = error;
647
goto out;
648
}
649
650
651
cmdat |= CMD_DAT_CONT_DATA_ENABLE;
652
653
if (req->data->flags & MMC_DATA_WRITE)
654
cmdat |= CMD_DAT_CONT_WRITE;
655
}
656
657
error = mxcmci_start_cmd(host, req->cmd, cmdat);
658
659
out:
660
if (error)
661
mxcmci_finish_request(host, req);
662
}
663
664
static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios)
665
{
666
unsigned int divider;
667
int prescaler = 0;
668
unsigned int clk_in = clk_get_rate(host->clk);
669
670
while (prescaler <= 0x800) {
671
for (divider = 1; divider <= 0xF; divider++) {
672
int x;
673
674
x = (clk_in / (divider + 1));
675
676
if (prescaler)
677
x /= (prescaler * 2);
678
679
if (x <= clk_ios)
680
break;
681
}
682
if (divider < 0x10)
683
break;
684
685
if (prescaler == 0)
686
prescaler = 1;
687
else
688
prescaler <<= 1;
689
}
690
691
writew((prescaler << 4) | divider, host->base + MMC_REG_CLK_RATE);
692
693
dev_dbg(mmc_dev(host->mmc), "scaler: %d divider: %d in: %d out: %d\n",
694
prescaler, divider, clk_in, clk_ios);
695
}
696
697
static int mxcmci_setup_dma(struct mmc_host *mmc)
698
{
699
struct mxcmci_host *host = mmc_priv(mmc);
700
struct dma_slave_config *config = &host->dma_slave_config;
701
702
config->dst_addr = host->res->start + MMC_REG_BUFFER_ACCESS;
703
config->src_addr = host->res->start + MMC_REG_BUFFER_ACCESS;
704
config->dst_addr_width = 4;
705
config->src_addr_width = 4;
706
config->dst_maxburst = host->burstlen;
707
config->src_maxburst = host->burstlen;
708
709
return dmaengine_slave_config(host->dma, config);
710
}
711
712
static void mxcmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
713
{
714
struct mxcmci_host *host = mmc_priv(mmc);
715
int burstlen, ret;
716
717
/*
718
* use burstlen of 64 in 4 bit mode (--> reg value 0)
719
* use burstlen of 16 in 1 bit mode (--> reg value 16)
720
*/
721
if (ios->bus_width == MMC_BUS_WIDTH_4)
722
burstlen = 64;
723
else
724
burstlen = 16;
725
726
if (mxcmci_use_dma(host) && burstlen != host->burstlen) {
727
host->burstlen = burstlen;
728
ret = mxcmci_setup_dma(mmc);
729
if (ret) {
730
dev_err(mmc_dev(host->mmc),
731
"failed to config DMA channel. Falling back to PIO\n");
732
dma_release_channel(host->dma);
733
host->do_dma = 0;
734
}
735
}
736
737
if (ios->bus_width == MMC_BUS_WIDTH_4)
738
host->cmdat |= CMD_DAT_CONT_BUS_WIDTH_4;
739
else
740
host->cmdat &= ~CMD_DAT_CONT_BUS_WIDTH_4;
741
742
if (host->power_mode != ios->power_mode) {
743
mxcmci_set_power(host, ios->power_mode, ios->vdd);
744
host->power_mode = ios->power_mode;
745
746
if (ios->power_mode == MMC_POWER_ON)
747
host->cmdat |= CMD_DAT_CONT_INIT;
748
}
749
750
if (ios->clock) {
751
mxcmci_set_clk_rate(host, ios->clock);
752
writew(STR_STP_CLK_START_CLK, host->base + MMC_REG_STR_STP_CLK);
753
} else {
754
writew(STR_STP_CLK_STOP_CLK, host->base + MMC_REG_STR_STP_CLK);
755
}
756
757
host->clock = ios->clock;
758
}
759
760
static irqreturn_t mxcmci_detect_irq(int irq, void *data)
761
{
762
struct mmc_host *mmc = data;
763
764
dev_dbg(mmc_dev(mmc), "%s\n", __func__);
765
766
mmc_detect_change(mmc, msecs_to_jiffies(250));
767
return IRQ_HANDLED;
768
}
769
770
static int mxcmci_get_ro(struct mmc_host *mmc)
771
{
772
struct mxcmci_host *host = mmc_priv(mmc);
773
774
if (host->pdata && host->pdata->get_ro)
775
return !!host->pdata->get_ro(mmc_dev(mmc));
776
/*
777
* Board doesn't support read only detection; let the mmc core
778
* decide what to do.
779
*/
780
return -ENOSYS;
781
}
782
783
static void mxcmci_enable_sdio_irq(struct mmc_host *mmc, int enable)
784
{
785
struct mxcmci_host *host = mmc_priv(mmc);
786
unsigned long flags;
787
u32 int_cntr;
788
789
spin_lock_irqsave(&host->lock, flags);
790
host->use_sdio = enable;
791
int_cntr = readl(host->base + MMC_REG_INT_CNTR);
792
793
if (enable)
794
int_cntr |= INT_SDIO_IRQ_EN;
795
else
796
int_cntr &= ~INT_SDIO_IRQ_EN;
797
798
writel(int_cntr, host->base + MMC_REG_INT_CNTR);
799
spin_unlock_irqrestore(&host->lock, flags);
800
}
801
802
static void mxcmci_init_card(struct mmc_host *host, struct mmc_card *card)
803
{
804
/*
805
* MX3 SoCs have a silicon bug which corrupts CRC calculation of
806
* multi-block transfers when connected SDIO peripheral doesn't
807
* drive the BUSY line as required by the specs.
808
* One way to prevent this is to only allow 1-bit transfers.
809
*/
810
811
if (cpu_is_mx3() && card->type == MMC_TYPE_SDIO)
812
host->caps &= ~MMC_CAP_4_BIT_DATA;
813
else
814
host->caps |= MMC_CAP_4_BIT_DATA;
815
}
816
817
static bool filter(struct dma_chan *chan, void *param)
818
{
819
struct mxcmci_host *host = param;
820
821
if (!imx_dma_is_general_purpose(chan))
822
return false;
823
824
chan->private = &host->dma_data;
825
826
return true;
827
}
828
829
static const struct mmc_host_ops mxcmci_ops = {
830
.request = mxcmci_request,
831
.set_ios = mxcmci_set_ios,
832
.get_ro = mxcmci_get_ro,
833
.enable_sdio_irq = mxcmci_enable_sdio_irq,
834
.init_card = mxcmci_init_card,
835
};
836
837
static int mxcmci_probe(struct platform_device *pdev)
838
{
839
struct mmc_host *mmc;
840
struct mxcmci_host *host = NULL;
841
struct resource *iores, *r;
842
int ret = 0, irq;
843
dma_cap_mask_t mask;
844
845
printk(KERN_INFO "i.MX SDHC driver\n");
846
847
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
848
irq = platform_get_irq(pdev, 0);
849
if (!iores || irq < 0)
850
return -EINVAL;
851
852
r = request_mem_region(iores->start, resource_size(iores), pdev->name);
853
if (!r)
854
return -EBUSY;
855
856
mmc = mmc_alloc_host(sizeof(struct mxcmci_host), &pdev->dev);
857
if (!mmc) {
858
ret = -ENOMEM;
859
goto out_release_mem;
860
}
861
862
mmc->ops = &mxcmci_ops;
863
mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
864
865
/* MMC core transfer sizes tunable parameters */
866
mmc->max_segs = 64;
867
mmc->max_blk_size = 2048;
868
mmc->max_blk_count = 65535;
869
mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
870
mmc->max_seg_size = mmc->max_req_size;
871
872
host = mmc_priv(mmc);
873
host->base = ioremap(r->start, resource_size(r));
874
if (!host->base) {
875
ret = -ENOMEM;
876
goto out_free;
877
}
878
879
host->mmc = mmc;
880
host->pdata = pdev->dev.platform_data;
881
spin_lock_init(&host->lock);
882
883
mxcmci_init_ocr(host);
884
885
if (host->pdata && host->pdata->dat3_card_detect)
886
host->default_irq_mask =
887
INT_CARD_INSERTION_EN | INT_CARD_REMOVAL_EN;
888
else
889
host->default_irq_mask = 0;
890
891
host->res = r;
892
host->irq = irq;
893
894
host->clk = clk_get(&pdev->dev, NULL);
895
if (IS_ERR(host->clk)) {
896
ret = PTR_ERR(host->clk);
897
goto out_iounmap;
898
}
899
clk_enable(host->clk);
900
901
mxcmci_softreset(host);
902
903
host->rev_no = readw(host->base + MMC_REG_REV_NO);
904
if (host->rev_no != 0x400) {
905
ret = -ENODEV;
906
dev_err(mmc_dev(host->mmc), "wrong rev.no. 0x%08x. aborting.\n",
907
host->rev_no);
908
goto out_clk_put;
909
}
910
911
mmc->f_min = clk_get_rate(host->clk) >> 16;
912
mmc->f_max = clk_get_rate(host->clk) >> 1;
913
914
/* recommended in data sheet */
915
writew(0x2db4, host->base + MMC_REG_READ_TO);
916
917
writel(host->default_irq_mask, host->base + MMC_REG_INT_CNTR);
918
919
r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
920
if (r) {
921
host->dmareq = r->start;
922
host->dma_data.peripheral_type = IMX_DMATYPE_SDHC;
923
host->dma_data.priority = DMA_PRIO_LOW;
924
host->dma_data.dma_request = host->dmareq;
925
dma_cap_zero(mask);
926
dma_cap_set(DMA_SLAVE, mask);
927
host->dma = dma_request_channel(mask, filter, host);
928
if (host->dma)
929
mmc->max_seg_size = dma_get_max_seg_size(
930
host->dma->device->dev);
931
}
932
933
if (!host->dma)
934
dev_info(mmc_dev(host->mmc), "dma not available. Using PIO\n");
935
936
INIT_WORK(&host->datawork, mxcmci_datawork);
937
938
ret = request_irq(host->irq, mxcmci_irq, 0, DRIVER_NAME, host);
939
if (ret)
940
goto out_free_dma;
941
942
platform_set_drvdata(pdev, mmc);
943
944
if (host->pdata && host->pdata->init) {
945
ret = host->pdata->init(&pdev->dev, mxcmci_detect_irq,
946
host->mmc);
947
if (ret)
948
goto out_free_irq;
949
}
950
951
mmc_add_host(mmc);
952
953
return 0;
954
955
out_free_irq:
956
free_irq(host->irq, host);
957
out_free_dma:
958
if (host->dma)
959
dma_release_channel(host->dma);
960
out_clk_put:
961
clk_disable(host->clk);
962
clk_put(host->clk);
963
out_iounmap:
964
iounmap(host->base);
965
out_free:
966
mmc_free_host(mmc);
967
out_release_mem:
968
release_mem_region(iores->start, resource_size(iores));
969
return ret;
970
}
971
972
static int mxcmci_remove(struct platform_device *pdev)
973
{
974
struct mmc_host *mmc = platform_get_drvdata(pdev);
975
struct mxcmci_host *host = mmc_priv(mmc);
976
977
platform_set_drvdata(pdev, NULL);
978
979
mmc_remove_host(mmc);
980
981
if (host->vcc)
982
regulator_put(host->vcc);
983
984
if (host->pdata && host->pdata->exit)
985
host->pdata->exit(&pdev->dev, mmc);
986
987
free_irq(host->irq, host);
988
iounmap(host->base);
989
990
if (host->dma)
991
dma_release_channel(host->dma);
992
993
clk_disable(host->clk);
994
clk_put(host->clk);
995
996
release_mem_region(host->res->start, resource_size(host->res));
997
998
mmc_free_host(mmc);
999
1000
return 0;
1001
}
1002
1003
#ifdef CONFIG_PM
1004
static int mxcmci_suspend(struct device *dev)
1005
{
1006
struct mmc_host *mmc = dev_get_drvdata(dev);
1007
struct mxcmci_host *host = mmc_priv(mmc);
1008
int ret = 0;
1009
1010
if (mmc)
1011
ret = mmc_suspend_host(mmc);
1012
clk_disable(host->clk);
1013
1014
return ret;
1015
}
1016
1017
static int mxcmci_resume(struct device *dev)
1018
{
1019
struct mmc_host *mmc = dev_get_drvdata(dev);
1020
struct mxcmci_host *host = mmc_priv(mmc);
1021
int ret = 0;
1022
1023
clk_enable(host->clk);
1024
if (mmc)
1025
ret = mmc_resume_host(mmc);
1026
1027
return ret;
1028
}
1029
1030
static const struct dev_pm_ops mxcmci_pm_ops = {
1031
.suspend = mxcmci_suspend,
1032
.resume = mxcmci_resume,
1033
};
1034
#endif
1035
1036
static struct platform_driver mxcmci_driver = {
1037
.probe = mxcmci_probe,
1038
.remove = mxcmci_remove,
1039
.driver = {
1040
.name = DRIVER_NAME,
1041
.owner = THIS_MODULE,
1042
#ifdef CONFIG_PM
1043
.pm = &mxcmci_pm_ops,
1044
#endif
1045
}
1046
};
1047
1048
static int __init mxcmci_init(void)
1049
{
1050
return platform_driver_register(&mxcmci_driver);
1051
}
1052
1053
static void __exit mxcmci_exit(void)
1054
{
1055
platform_driver_unregister(&mxcmci_driver);
1056
}
1057
1058
module_init(mxcmci_init);
1059
module_exit(mxcmci_exit);
1060
1061
MODULE_DESCRIPTION("i.MX Multimedia Card Interface Driver");
1062
MODULE_AUTHOR("Sascha Hauer, Pengutronix");
1063
MODULE_LICENSE("GPL");
1064
MODULE_ALIAS("platform:imx-mmc");
1065
1066