Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/ata/pata_arasan_cf.c
26278 views
1
/*
2
* drivers/ata/pata_arasan_cf.c
3
*
4
* Arasan Compact Flash host controller source file
5
*
6
* Copyright (C) 2011 ST Microelectronics
7
* Viresh Kumar <[email protected]>
8
*
9
* This file is licensed under the terms of the GNU General Public
10
* License version 2. This program is licensed "as is" without any
11
* warranty of any kind, whether express or implied.
12
*/
13
14
/*
15
* The Arasan CompactFlash Device Controller IP core has three basic modes of
16
* operation: PC card ATA using I/O mode, PC card ATA using memory mode, PC card
17
* ATA using true IDE modes. This driver supports only True IDE mode currently.
18
*
19
* Arasan CF Controller shares global irq register with Arasan XD Controller.
20
*
21
* Tested on arch/arm/mach-spear13xx
22
*/
23
24
#include <linux/ata.h>
25
#include <linux/clk.h>
26
#include <linux/completion.h>
27
#include <linux/delay.h>
28
#include <linux/dmaengine.h>
29
#include <linux/io.h>
30
#include <linux/irq.h>
31
#include <linux/kernel.h>
32
#include <linux/libata.h>
33
#include <linux/module.h>
34
#include <linux/of.h>
35
#include <linux/pata_arasan_cf_data.h>
36
#include <linux/platform_device.h>
37
#include <linux/pm.h>
38
#include <linux/slab.h>
39
#include <linux/spinlock.h>
40
#include <linux/types.h>
41
#include <linux/workqueue.h>
42
#include <trace/events/libata.h>
43
44
#define DRIVER_NAME "arasan_cf"
45
#define TIMEOUT msecs_to_jiffies(3000)
46
47
/* Registers */
48
/* CompactFlash Interface Status */
49
#define CFI_STS 0x000
50
#define STS_CHG (1)
51
#define BIN_AUDIO_OUT (1 << 1)
52
#define CARD_DETECT1 (1 << 2)
53
#define CARD_DETECT2 (1 << 3)
54
#define INP_ACK (1 << 4)
55
#define CARD_READY (1 << 5)
56
#define IO_READY (1 << 6)
57
#define B16_IO_PORT_SEL (1 << 7)
58
/* IRQ */
59
#define IRQ_STS 0x004
60
/* Interrupt Enable */
61
#define IRQ_EN 0x008
62
#define CARD_DETECT_IRQ (1)
63
#define STATUS_CHNG_IRQ (1 << 1)
64
#define MEM_MODE_IRQ (1 << 2)
65
#define IO_MODE_IRQ (1 << 3)
66
#define TRUE_IDE_MODE_IRQ (1 << 8)
67
#define PIO_XFER_ERR_IRQ (1 << 9)
68
#define BUF_AVAIL_IRQ (1 << 10)
69
#define XFER_DONE_IRQ (1 << 11)
70
#define IGNORED_IRQS (STATUS_CHNG_IRQ | MEM_MODE_IRQ | IO_MODE_IRQ |\
71
TRUE_IDE_MODE_IRQ)
72
#define TRUE_IDE_IRQS (CARD_DETECT_IRQ | PIO_XFER_ERR_IRQ |\
73
BUF_AVAIL_IRQ | XFER_DONE_IRQ)
74
/* Operation Mode */
75
#define OP_MODE 0x00C
76
#define CARD_MODE_MASK (0x3)
77
#define MEM_MODE (0x0)
78
#define IO_MODE (0x1)
79
#define TRUE_IDE_MODE (0x2)
80
81
#define CARD_TYPE_MASK (1 << 2)
82
#define CF_CARD (0)
83
#define CF_PLUS_CARD (1 << 2)
84
85
#define CARD_RESET (1 << 3)
86
#define CFHOST_ENB (1 << 4)
87
#define OUTPUTS_TRISTATE (1 << 5)
88
#define ULTRA_DMA_ENB (1 << 8)
89
#define MULTI_WORD_DMA_ENB (1 << 9)
90
#define DRQ_BLOCK_SIZE_MASK (0x3 << 11)
91
#define DRQ_BLOCK_SIZE_512 (0)
92
#define DRQ_BLOCK_SIZE_1024 (1 << 11)
93
#define DRQ_BLOCK_SIZE_2048 (2 << 11)
94
#define DRQ_BLOCK_SIZE_4096 (3 << 11)
95
/* CF Interface Clock Configuration */
96
#define CLK_CFG 0x010
97
#define CF_IF_CLK_MASK (0XF)
98
/* CF Timing Mode Configuration */
99
#define TM_CFG 0x014
100
#define MEM_MODE_TIMING_MASK (0x3)
101
#define MEM_MODE_TIMING_250NS (0x0)
102
#define MEM_MODE_TIMING_120NS (0x1)
103
#define MEM_MODE_TIMING_100NS (0x2)
104
#define MEM_MODE_TIMING_80NS (0x3)
105
106
#define IO_MODE_TIMING_MASK (0x3 << 2)
107
#define IO_MODE_TIMING_250NS (0x0 << 2)
108
#define IO_MODE_TIMING_120NS (0x1 << 2)
109
#define IO_MODE_TIMING_100NS (0x2 << 2)
110
#define IO_MODE_TIMING_80NS (0x3 << 2)
111
112
#define TRUEIDE_PIO_TIMING_MASK (0x7 << 4)
113
#define TRUEIDE_PIO_TIMING_SHIFT 4
114
115
#define TRUEIDE_MWORD_DMA_TIMING_MASK (0x7 << 7)
116
#define TRUEIDE_MWORD_DMA_TIMING_SHIFT 7
117
118
#define ULTRA_DMA_TIMING_MASK (0x7 << 10)
119
#define ULTRA_DMA_TIMING_SHIFT 10
120
/* CF Transfer Address */
121
#define XFER_ADDR 0x014
122
#define XFER_ADDR_MASK (0x7FF)
123
#define MAX_XFER_COUNT 0x20000u
124
/* Transfer Control */
125
#define XFER_CTR 0x01C
126
#define XFER_COUNT_MASK (0x3FFFF)
127
#define ADDR_INC_DISABLE (1 << 24)
128
#define XFER_WIDTH_MASK (1 << 25)
129
#define XFER_WIDTH_8B (0)
130
#define XFER_WIDTH_16B (1 << 25)
131
132
#define MEM_TYPE_MASK (1 << 26)
133
#define MEM_TYPE_COMMON (0)
134
#define MEM_TYPE_ATTRIBUTE (1 << 26)
135
136
#define MEM_IO_XFER_MASK (1 << 27)
137
#define MEM_XFER (0)
138
#define IO_XFER (1 << 27)
139
140
#define DMA_XFER_MODE (1 << 28)
141
142
#define AHB_BUS_NORMAL_PIO_OPRTN (~(1 << 29))
143
#define XFER_DIR_MASK (1 << 30)
144
#define XFER_READ (0)
145
#define XFER_WRITE (1 << 30)
146
147
#define XFER_START (1 << 31)
148
/* Write Data Port */
149
#define WRITE_PORT 0x024
150
/* Read Data Port */
151
#define READ_PORT 0x028
152
/* ATA Data Port */
153
#define ATA_DATA_PORT 0x030
154
#define ATA_DATA_PORT_MASK (0xFFFF)
155
/* ATA Error/Features */
156
#define ATA_ERR_FTR 0x034
157
/* ATA Sector Count */
158
#define ATA_SC 0x038
159
/* ATA Sector Number */
160
#define ATA_SN 0x03C
161
/* ATA Cylinder Low */
162
#define ATA_CL 0x040
163
/* ATA Cylinder High */
164
#define ATA_CH 0x044
165
/* ATA Select Card/Head */
166
#define ATA_SH 0x048
167
/* ATA Status-Command */
168
#define ATA_STS_CMD 0x04C
169
/* ATA Alternate Status/Device Control */
170
#define ATA_ASTS_DCTR 0x050
171
/* Extended Write Data Port 0x200-0x3FC */
172
#define EXT_WRITE_PORT 0x200
173
/* Extended Read Data Port 0x400-0x5FC */
174
#define EXT_READ_PORT 0x400
175
#define FIFO_SIZE 0x200u
176
/* Global Interrupt Status */
177
#define GIRQ_STS 0x800
178
/* Global Interrupt Status enable */
179
#define GIRQ_STS_EN 0x804
180
/* Global Interrupt Signal enable */
181
#define GIRQ_SGN_EN 0x808
182
#define GIRQ_CF (1)
183
#define GIRQ_XD (1 << 1)
184
185
/* Compact Flash Controller Dev Structure */
186
struct arasan_cf_dev {
187
/* pointer to ata_host structure */
188
struct ata_host *host;
189
/* clk structure */
190
struct clk *clk;
191
192
/* physical base address of controller */
193
dma_addr_t pbase;
194
/* virtual base address of controller */
195
void __iomem *vbase;
196
/* irq number*/
197
int irq;
198
199
/* status to be updated to framework regarding DMA transfer */
200
u8 dma_status;
201
/* Card is present or Not */
202
u8 card_present;
203
204
/* dma specific */
205
/* Completion for transfer complete interrupt from controller */
206
struct completion cf_completion;
207
/* Completion for DMA transfer complete. */
208
struct completion dma_completion;
209
/* Dma channel allocated */
210
struct dma_chan *dma_chan;
211
/* Mask for DMA transfers */
212
dma_cap_mask_t mask;
213
/* DMA transfer work */
214
struct work_struct work;
215
/* DMA delayed finish work */
216
struct delayed_work dwork;
217
/* qc to be transferred using DMA */
218
struct ata_queued_cmd *qc;
219
};
220
221
static const struct scsi_host_template arasan_cf_sht = {
222
ATA_BASE_SHT(DRIVER_NAME),
223
.dma_boundary = 0xFFFFFFFFUL,
224
};
225
226
static void cf_dumpregs(struct arasan_cf_dev *acdev)
227
{
228
struct device *dev = acdev->host->dev;
229
230
dev_dbg(dev, ": =========== REGISTER DUMP ===========");
231
dev_dbg(dev, ": CFI_STS: %x", readl(acdev->vbase + CFI_STS));
232
dev_dbg(dev, ": IRQ_STS: %x", readl(acdev->vbase + IRQ_STS));
233
dev_dbg(dev, ": IRQ_EN: %x", readl(acdev->vbase + IRQ_EN));
234
dev_dbg(dev, ": OP_MODE: %x", readl(acdev->vbase + OP_MODE));
235
dev_dbg(dev, ": CLK_CFG: %x", readl(acdev->vbase + CLK_CFG));
236
dev_dbg(dev, ": TM_CFG: %x", readl(acdev->vbase + TM_CFG));
237
dev_dbg(dev, ": XFER_CTR: %x", readl(acdev->vbase + XFER_CTR));
238
dev_dbg(dev, ": GIRQ_STS: %x", readl(acdev->vbase + GIRQ_STS));
239
dev_dbg(dev, ": GIRQ_STS_EN: %x", readl(acdev->vbase + GIRQ_STS_EN));
240
dev_dbg(dev, ": GIRQ_SGN_EN: %x", readl(acdev->vbase + GIRQ_SGN_EN));
241
dev_dbg(dev, ": =====================================");
242
}
243
244
/* Enable/Disable global interrupts shared between CF and XD ctrlr. */
245
static void cf_ginterrupt_enable(struct arasan_cf_dev *acdev, bool enable)
246
{
247
/* enable should be 0 or 1 */
248
writel(enable, acdev->vbase + GIRQ_STS_EN);
249
writel(enable, acdev->vbase + GIRQ_SGN_EN);
250
}
251
252
/* Enable/Disable CF interrupts */
253
static inline void
254
cf_interrupt_enable(struct arasan_cf_dev *acdev, u32 mask, bool enable)
255
{
256
u32 val = readl(acdev->vbase + IRQ_EN);
257
/* clear & enable/disable irqs */
258
if (enable) {
259
writel(mask, acdev->vbase + IRQ_STS);
260
writel(val | mask, acdev->vbase + IRQ_EN);
261
} else
262
writel(val & ~mask, acdev->vbase + IRQ_EN);
263
}
264
265
static inline void cf_card_reset(struct arasan_cf_dev *acdev)
266
{
267
u32 val = readl(acdev->vbase + OP_MODE);
268
269
writel(val | CARD_RESET, acdev->vbase + OP_MODE);
270
udelay(200);
271
writel(val & ~CARD_RESET, acdev->vbase + OP_MODE);
272
}
273
274
static inline void cf_ctrl_reset(struct arasan_cf_dev *acdev)
275
{
276
writel(readl(acdev->vbase + OP_MODE) & ~CFHOST_ENB,
277
acdev->vbase + OP_MODE);
278
writel(readl(acdev->vbase + OP_MODE) | CFHOST_ENB,
279
acdev->vbase + OP_MODE);
280
}
281
282
static void cf_card_detect(struct arasan_cf_dev *acdev, bool hotplugged)
283
{
284
struct ata_port *ap = acdev->host->ports[0];
285
struct ata_eh_info *ehi = &ap->link.eh_info;
286
u32 val = readl(acdev->vbase + CFI_STS);
287
288
/* Both CD1 & CD2 should be low if card inserted completely */
289
if (!(val & (CARD_DETECT1 | CARD_DETECT2))) {
290
if (acdev->card_present)
291
return;
292
acdev->card_present = 1;
293
cf_card_reset(acdev);
294
} else {
295
if (!acdev->card_present)
296
return;
297
acdev->card_present = 0;
298
}
299
300
if (hotplugged) {
301
ata_ehi_hotplugged(ehi);
302
ata_port_freeze(ap);
303
}
304
}
305
306
static int cf_init(struct arasan_cf_dev *acdev)
307
{
308
struct arasan_cf_pdata *pdata = dev_get_platdata(acdev->host->dev);
309
unsigned int if_clk;
310
unsigned long flags;
311
int ret = 0;
312
313
ret = clk_prepare_enable(acdev->clk);
314
if (ret) {
315
dev_dbg(acdev->host->dev, "clock enable failed");
316
return ret;
317
}
318
319
ret = clk_set_rate(acdev->clk, 166000000);
320
if (ret) {
321
dev_warn(acdev->host->dev, "clock set rate failed");
322
clk_disable_unprepare(acdev->clk);
323
return ret;
324
}
325
326
spin_lock_irqsave(&acdev->host->lock, flags);
327
/* configure CF interface clock */
328
/* TODO: read from device tree */
329
if_clk = CF_IF_CLK_166M;
330
if (pdata && pdata->cf_if_clk <= CF_IF_CLK_200M)
331
if_clk = pdata->cf_if_clk;
332
333
writel(if_clk, acdev->vbase + CLK_CFG);
334
335
writel(TRUE_IDE_MODE | CFHOST_ENB, acdev->vbase + OP_MODE);
336
cf_interrupt_enable(acdev, CARD_DETECT_IRQ, 1);
337
cf_ginterrupt_enable(acdev, 1);
338
spin_unlock_irqrestore(&acdev->host->lock, flags);
339
340
return ret;
341
}
342
343
static void cf_exit(struct arasan_cf_dev *acdev)
344
{
345
unsigned long flags;
346
347
spin_lock_irqsave(&acdev->host->lock, flags);
348
cf_ginterrupt_enable(acdev, 0);
349
cf_interrupt_enable(acdev, TRUE_IDE_IRQS, 0);
350
cf_card_reset(acdev);
351
writel(readl(acdev->vbase + OP_MODE) & ~CFHOST_ENB,
352
acdev->vbase + OP_MODE);
353
spin_unlock_irqrestore(&acdev->host->lock, flags);
354
clk_disable_unprepare(acdev->clk);
355
}
356
357
static void dma_callback(void *dev)
358
{
359
struct arasan_cf_dev *acdev = dev;
360
361
complete(&acdev->dma_completion);
362
}
363
364
static inline void dma_complete(struct arasan_cf_dev *acdev)
365
{
366
struct ata_queued_cmd *qc = acdev->qc;
367
unsigned long flags;
368
369
acdev->qc = NULL;
370
ata_sff_interrupt(acdev->irq, acdev->host);
371
372
spin_lock_irqsave(&acdev->host->lock, flags);
373
if (unlikely(qc->err_mask) && ata_is_dma(qc->tf.protocol))
374
ata_ehi_push_desc(&qc->ap->link.eh_info, "DMA Failed: Timeout");
375
spin_unlock_irqrestore(&acdev->host->lock, flags);
376
}
377
378
static inline int wait4buf(struct arasan_cf_dev *acdev)
379
{
380
if (!wait_for_completion_timeout(&acdev->cf_completion, TIMEOUT)) {
381
u32 rw = acdev->qc->tf.flags & ATA_TFLAG_WRITE;
382
383
dev_err(acdev->host->dev, "%s TimeOut", rw ? "write" : "read");
384
return -ETIMEDOUT;
385
}
386
387
/* Check if PIO Error interrupt has occurred */
388
if (acdev->dma_status & ATA_DMA_ERR)
389
return -EAGAIN;
390
391
return 0;
392
}
393
394
static int
395
dma_xfer(struct arasan_cf_dev *acdev, dma_addr_t src, dma_addr_t dest, u32 len)
396
{
397
struct dma_async_tx_descriptor *tx;
398
struct dma_chan *chan = acdev->dma_chan;
399
dma_cookie_t cookie;
400
unsigned long flags = DMA_PREP_INTERRUPT;
401
int ret = 0;
402
403
tx = chan->device->device_prep_dma_memcpy(chan, dest, src, len, flags);
404
if (!tx) {
405
dev_err(acdev->host->dev, "device_prep_dma_memcpy failed\n");
406
return -EAGAIN;
407
}
408
409
tx->callback = dma_callback;
410
tx->callback_param = acdev;
411
cookie = tx->tx_submit(tx);
412
413
ret = dma_submit_error(cookie);
414
if (ret) {
415
dev_err(acdev->host->dev, "dma_submit_error\n");
416
return ret;
417
}
418
419
chan->device->device_issue_pending(chan);
420
421
/* Wait for DMA to complete */
422
if (!wait_for_completion_timeout(&acdev->dma_completion, TIMEOUT)) {
423
dmaengine_terminate_all(chan);
424
dev_err(acdev->host->dev, "wait_for_completion_timeout\n");
425
return -ETIMEDOUT;
426
}
427
428
return ret;
429
}
430
431
static int sg_xfer(struct arasan_cf_dev *acdev, struct scatterlist *sg)
432
{
433
dma_addr_t dest = 0, src = 0;
434
u32 xfer_cnt, sglen, dma_len, xfer_ctr;
435
u32 write = acdev->qc->tf.flags & ATA_TFLAG_WRITE;
436
unsigned long flags;
437
int ret = 0;
438
439
sglen = sg_dma_len(sg);
440
if (write) {
441
src = sg_dma_address(sg);
442
dest = acdev->pbase + EXT_WRITE_PORT;
443
} else {
444
dest = sg_dma_address(sg);
445
src = acdev->pbase + EXT_READ_PORT;
446
}
447
448
/*
449
* For each sg:
450
* MAX_XFER_COUNT data will be transferred before we get transfer
451
* complete interrupt. Between after FIFO_SIZE data
452
* buffer available interrupt will be generated. At this time we will
453
* fill FIFO again: max FIFO_SIZE data.
454
*/
455
while (sglen) {
456
xfer_cnt = min(sglen, MAX_XFER_COUNT);
457
spin_lock_irqsave(&acdev->host->lock, flags);
458
xfer_ctr = readl(acdev->vbase + XFER_CTR) &
459
~XFER_COUNT_MASK;
460
writel(xfer_ctr | xfer_cnt | XFER_START,
461
acdev->vbase + XFER_CTR);
462
spin_unlock_irqrestore(&acdev->host->lock, flags);
463
464
/* continue dma xfers until current sg is completed */
465
while (xfer_cnt) {
466
/* wait for read to complete */
467
if (!write) {
468
ret = wait4buf(acdev);
469
if (ret)
470
goto fail;
471
}
472
473
/* read/write FIFO in chunk of FIFO_SIZE */
474
dma_len = min(xfer_cnt, FIFO_SIZE);
475
ret = dma_xfer(acdev, src, dest, dma_len);
476
if (ret) {
477
dev_err(acdev->host->dev, "dma failed");
478
goto fail;
479
}
480
481
if (write)
482
src += dma_len;
483
else
484
dest += dma_len;
485
486
sglen -= dma_len;
487
xfer_cnt -= dma_len;
488
489
/* wait for write to complete */
490
if (write) {
491
ret = wait4buf(acdev);
492
if (ret)
493
goto fail;
494
}
495
}
496
}
497
498
fail:
499
spin_lock_irqsave(&acdev->host->lock, flags);
500
writel(readl(acdev->vbase + XFER_CTR) & ~XFER_START,
501
acdev->vbase + XFER_CTR);
502
spin_unlock_irqrestore(&acdev->host->lock, flags);
503
504
return ret;
505
}
506
507
/*
508
* This routine uses External DMA controller to read/write data to FIFO of CF
509
* controller. There are two xfer related interrupt supported by CF controller:
510
* - buf_avail: This interrupt is generated as soon as we have buffer of 512
511
* bytes available for reading or empty buffer available for writing.
512
* - xfer_done: This interrupt is generated on transfer of "xfer_size" amount of
513
* data to/from FIFO. xfer_size is programmed in XFER_CTR register.
514
*
515
* Max buffer size = FIFO_SIZE = 512 Bytes.
516
* Max xfer_size = MAX_XFER_COUNT = 256 KB.
517
*/
518
static void data_xfer(struct work_struct *work)
519
{
520
struct arasan_cf_dev *acdev = container_of(work, struct arasan_cf_dev,
521
work);
522
struct ata_queued_cmd *qc = acdev->qc;
523
struct scatterlist *sg;
524
unsigned long flags;
525
u32 temp;
526
int ret = 0;
527
528
/* request dma channels */
529
/* dma_request_channel may sleep, so calling from process context */
530
acdev->dma_chan = dma_request_chan(acdev->host->dev, "data");
531
if (IS_ERR(acdev->dma_chan)) {
532
dev_err_probe(acdev->host->dev, PTR_ERR(acdev->dma_chan),
533
"Unable to get dma_chan\n");
534
acdev->dma_chan = NULL;
535
goto chan_request_fail;
536
}
537
538
for_each_sg(qc->sg, sg, qc->n_elem, temp) {
539
ret = sg_xfer(acdev, sg);
540
if (ret)
541
break;
542
}
543
544
dma_release_channel(acdev->dma_chan);
545
acdev->dma_chan = NULL;
546
547
/* data xferred successfully */
548
if (!ret) {
549
u32 status;
550
551
spin_lock_irqsave(&acdev->host->lock, flags);
552
status = ioread8(qc->ap->ioaddr.altstatus_addr);
553
spin_unlock_irqrestore(&acdev->host->lock, flags);
554
if (status & (ATA_BUSY | ATA_DRQ)) {
555
ata_sff_queue_delayed_work(&acdev->dwork, 1);
556
return;
557
}
558
559
goto sff_intr;
560
}
561
562
cf_dumpregs(acdev);
563
564
chan_request_fail:
565
spin_lock_irqsave(&acdev->host->lock, flags);
566
/* error when transferring data to/from memory */
567
qc->err_mask |= AC_ERR_HOST_BUS;
568
qc->ap->hsm_task_state = HSM_ST_ERR;
569
570
cf_ctrl_reset(acdev);
571
spin_unlock_irqrestore(&acdev->host->lock, flags);
572
sff_intr:
573
dma_complete(acdev);
574
}
575
576
static void delayed_finish(struct work_struct *work)
577
{
578
struct arasan_cf_dev *acdev = container_of(work, struct arasan_cf_dev,
579
dwork.work);
580
struct ata_queued_cmd *qc = acdev->qc;
581
unsigned long flags;
582
u8 status;
583
584
spin_lock_irqsave(&acdev->host->lock, flags);
585
status = ioread8(qc->ap->ioaddr.altstatus_addr);
586
spin_unlock_irqrestore(&acdev->host->lock, flags);
587
588
if (status & (ATA_BUSY | ATA_DRQ))
589
ata_sff_queue_delayed_work(&acdev->dwork, 1);
590
else
591
dma_complete(acdev);
592
}
593
594
static irqreturn_t arasan_cf_interrupt(int irq, void *dev)
595
{
596
struct arasan_cf_dev *acdev = ((struct ata_host *)dev)->private_data;
597
unsigned long flags;
598
u32 irqsts;
599
600
irqsts = readl(acdev->vbase + GIRQ_STS);
601
if (!(irqsts & GIRQ_CF))
602
return IRQ_NONE;
603
604
spin_lock_irqsave(&acdev->host->lock, flags);
605
irqsts = readl(acdev->vbase + IRQ_STS);
606
writel(irqsts, acdev->vbase + IRQ_STS); /* clear irqs */
607
writel(GIRQ_CF, acdev->vbase + GIRQ_STS); /* clear girqs */
608
609
/* handle only relevant interrupts */
610
irqsts &= ~IGNORED_IRQS;
611
612
if (irqsts & CARD_DETECT_IRQ) {
613
cf_card_detect(acdev, 1);
614
spin_unlock_irqrestore(&acdev->host->lock, flags);
615
return IRQ_HANDLED;
616
}
617
618
if (irqsts & PIO_XFER_ERR_IRQ) {
619
acdev->dma_status = ATA_DMA_ERR;
620
writel(readl(acdev->vbase + XFER_CTR) & ~XFER_START,
621
acdev->vbase + XFER_CTR);
622
spin_unlock_irqrestore(&acdev->host->lock, flags);
623
complete(&acdev->cf_completion);
624
dev_err(acdev->host->dev, "pio xfer err irq\n");
625
return IRQ_HANDLED;
626
}
627
628
spin_unlock_irqrestore(&acdev->host->lock, flags);
629
630
if (irqsts & BUF_AVAIL_IRQ) {
631
complete(&acdev->cf_completion);
632
return IRQ_HANDLED;
633
}
634
635
if (irqsts & XFER_DONE_IRQ) {
636
struct ata_queued_cmd *qc = acdev->qc;
637
638
/* Send Complete only for write */
639
if (qc->tf.flags & ATA_TFLAG_WRITE)
640
complete(&acdev->cf_completion);
641
}
642
643
return IRQ_HANDLED;
644
}
645
646
static void arasan_cf_freeze(struct ata_port *ap)
647
{
648
struct arasan_cf_dev *acdev = ap->host->private_data;
649
650
/* stop transfer and reset controller */
651
writel(readl(acdev->vbase + XFER_CTR) & ~XFER_START,
652
acdev->vbase + XFER_CTR);
653
cf_ctrl_reset(acdev);
654
acdev->dma_status = ATA_DMA_ERR;
655
656
ata_sff_dma_pause(ap);
657
ata_sff_freeze(ap);
658
}
659
660
static void arasan_cf_error_handler(struct ata_port *ap)
661
{
662
struct arasan_cf_dev *acdev = ap->host->private_data;
663
664
/*
665
* DMA transfers using an external DMA controller may be scheduled.
666
* Abort them before handling error. Refer data_xfer() for further
667
* details.
668
*/
669
cancel_work_sync(&acdev->work);
670
cancel_delayed_work_sync(&acdev->dwork);
671
return ata_sff_error_handler(ap);
672
}
673
674
static void arasan_cf_dma_start(struct arasan_cf_dev *acdev)
675
{
676
struct ata_queued_cmd *qc = acdev->qc;
677
struct ata_port *ap = qc->ap;
678
struct ata_taskfile *tf = &qc->tf;
679
u32 xfer_ctr = readl(acdev->vbase + XFER_CTR) & ~XFER_DIR_MASK;
680
u32 write = tf->flags & ATA_TFLAG_WRITE;
681
682
xfer_ctr |= write ? XFER_WRITE : XFER_READ;
683
writel(xfer_ctr, acdev->vbase + XFER_CTR);
684
685
ap->ops->sff_exec_command(ap, tf);
686
ata_sff_queue_work(&acdev->work);
687
}
688
689
static unsigned int arasan_cf_qc_issue(struct ata_queued_cmd *qc)
690
{
691
struct ata_port *ap = qc->ap;
692
struct arasan_cf_dev *acdev = ap->host->private_data;
693
694
/* defer PIO handling to sff_qc_issue */
695
if (!ata_is_dma(qc->tf.protocol))
696
return ata_sff_qc_issue(qc);
697
698
/* select the device */
699
ata_wait_idle(ap);
700
ata_sff_dev_select(ap, qc->dev->devno);
701
ata_wait_idle(ap);
702
703
/* start the command */
704
switch (qc->tf.protocol) {
705
case ATA_PROT_DMA:
706
WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);
707
708
trace_ata_tf_load(ap, &qc->tf);
709
ap->ops->sff_tf_load(ap, &qc->tf);
710
acdev->dma_status = 0;
711
acdev->qc = qc;
712
trace_ata_bmdma_start(ap, &qc->tf, qc->tag);
713
arasan_cf_dma_start(acdev);
714
ap->hsm_task_state = HSM_ST_LAST;
715
break;
716
717
default:
718
WARN_ON(1);
719
return AC_ERR_SYSTEM;
720
}
721
722
return 0;
723
}
724
725
static void arasan_cf_set_piomode(struct ata_port *ap, struct ata_device *adev)
726
{
727
struct arasan_cf_dev *acdev = ap->host->private_data;
728
u8 pio = adev->pio_mode - XFER_PIO_0;
729
unsigned long flags;
730
u32 val;
731
732
/* Arasan ctrl supports Mode0 -> Mode6 */
733
if (pio > 6) {
734
dev_err(ap->dev, "Unknown PIO mode\n");
735
return;
736
}
737
738
spin_lock_irqsave(&acdev->host->lock, flags);
739
val = readl(acdev->vbase + OP_MODE) &
740
~(ULTRA_DMA_ENB | MULTI_WORD_DMA_ENB | DRQ_BLOCK_SIZE_MASK);
741
writel(val, acdev->vbase + OP_MODE);
742
val = readl(acdev->vbase + TM_CFG) & ~TRUEIDE_PIO_TIMING_MASK;
743
val |= pio << TRUEIDE_PIO_TIMING_SHIFT;
744
writel(val, acdev->vbase + TM_CFG);
745
746
cf_interrupt_enable(acdev, BUF_AVAIL_IRQ | XFER_DONE_IRQ, 0);
747
cf_interrupt_enable(acdev, PIO_XFER_ERR_IRQ, 1);
748
spin_unlock_irqrestore(&acdev->host->lock, flags);
749
}
750
751
static void arasan_cf_set_dmamode(struct ata_port *ap, struct ata_device *adev)
752
{
753
struct arasan_cf_dev *acdev = ap->host->private_data;
754
u32 opmode, tmcfg, dma_mode = adev->dma_mode;
755
unsigned long flags;
756
757
spin_lock_irqsave(&acdev->host->lock, flags);
758
opmode = readl(acdev->vbase + OP_MODE) &
759
~(MULTI_WORD_DMA_ENB | ULTRA_DMA_ENB);
760
tmcfg = readl(acdev->vbase + TM_CFG);
761
762
if ((dma_mode >= XFER_UDMA_0) && (dma_mode <= XFER_UDMA_6)) {
763
opmode |= ULTRA_DMA_ENB;
764
tmcfg &= ~ULTRA_DMA_TIMING_MASK;
765
tmcfg |= (dma_mode - XFER_UDMA_0) << ULTRA_DMA_TIMING_SHIFT;
766
} else if ((dma_mode >= XFER_MW_DMA_0) && (dma_mode <= XFER_MW_DMA_4)) {
767
opmode |= MULTI_WORD_DMA_ENB;
768
tmcfg &= ~TRUEIDE_MWORD_DMA_TIMING_MASK;
769
tmcfg |= (dma_mode - XFER_MW_DMA_0) <<
770
TRUEIDE_MWORD_DMA_TIMING_SHIFT;
771
} else {
772
dev_err(ap->dev, "Unknown DMA mode\n");
773
spin_unlock_irqrestore(&acdev->host->lock, flags);
774
return;
775
}
776
777
writel(opmode, acdev->vbase + OP_MODE);
778
writel(tmcfg, acdev->vbase + TM_CFG);
779
writel(DMA_XFER_MODE, acdev->vbase + XFER_CTR);
780
781
cf_interrupt_enable(acdev, PIO_XFER_ERR_IRQ, 0);
782
cf_interrupt_enable(acdev, BUF_AVAIL_IRQ | XFER_DONE_IRQ, 1);
783
spin_unlock_irqrestore(&acdev->host->lock, flags);
784
}
785
786
static struct ata_port_operations arasan_cf_ops = {
787
.inherits = &ata_sff_port_ops,
788
.freeze = arasan_cf_freeze,
789
.error_handler = arasan_cf_error_handler,
790
.qc_issue = arasan_cf_qc_issue,
791
.set_piomode = arasan_cf_set_piomode,
792
.set_dmamode = arasan_cf_set_dmamode,
793
};
794
795
static int arasan_cf_probe(struct platform_device *pdev)
796
{
797
struct arasan_cf_dev *acdev;
798
struct arasan_cf_pdata *pdata = dev_get_platdata(&pdev->dev);
799
struct ata_host *host;
800
struct ata_port *ap;
801
struct resource *res;
802
u32 quirk;
803
irq_handler_t irq_handler = NULL;
804
int ret;
805
806
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
807
if (!res)
808
return -EINVAL;
809
810
if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res),
811
DRIVER_NAME)) {
812
dev_warn(&pdev->dev, "Failed to get memory region resource\n");
813
return -ENOENT;
814
}
815
816
acdev = devm_kzalloc(&pdev->dev, sizeof(*acdev), GFP_KERNEL);
817
if (!acdev)
818
return -ENOMEM;
819
820
if (pdata)
821
quirk = pdata->quirk;
822
else
823
quirk = CF_BROKEN_UDMA; /* as it is on spear1340 */
824
825
/*
826
* If there's an error getting IRQ (or we do get IRQ0),
827
* support only PIO
828
*/
829
ret = platform_get_irq(pdev, 0);
830
if (ret > 0) {
831
acdev->irq = ret;
832
irq_handler = arasan_cf_interrupt;
833
} else if (ret == -EPROBE_DEFER) {
834
return ret;
835
} else {
836
quirk |= CF_BROKEN_MWDMA | CF_BROKEN_UDMA;
837
}
838
839
acdev->pbase = res->start;
840
acdev->vbase = devm_ioremap(&pdev->dev, res->start,
841
resource_size(res));
842
if (!acdev->vbase) {
843
dev_warn(&pdev->dev, "ioremap fail\n");
844
return -ENOMEM;
845
}
846
847
acdev->clk = devm_clk_get(&pdev->dev, NULL);
848
if (IS_ERR(acdev->clk)) {
849
dev_warn(&pdev->dev, "Clock not found\n");
850
return PTR_ERR(acdev->clk);
851
}
852
853
/* allocate host */
854
host = ata_host_alloc(&pdev->dev, 1);
855
if (!host) {
856
dev_warn(&pdev->dev, "alloc host fail\n");
857
return -ENOMEM;
858
}
859
860
ap = host->ports[0];
861
host->private_data = acdev;
862
acdev->host = host;
863
ap->ops = &arasan_cf_ops;
864
ap->pio_mask = ATA_PIO6;
865
ap->mwdma_mask = ATA_MWDMA4;
866
ap->udma_mask = ATA_UDMA6;
867
868
init_completion(&acdev->cf_completion);
869
init_completion(&acdev->dma_completion);
870
INIT_WORK(&acdev->work, data_xfer);
871
INIT_DELAYED_WORK(&acdev->dwork, delayed_finish);
872
dma_cap_set(DMA_MEMCPY, acdev->mask);
873
874
/* Handle platform specific quirks */
875
if (quirk) {
876
if (quirk & CF_BROKEN_PIO) {
877
ap->ops->set_piomode = NULL;
878
ap->pio_mask = 0;
879
}
880
if (quirk & CF_BROKEN_MWDMA)
881
ap->mwdma_mask = 0;
882
if (quirk & CF_BROKEN_UDMA)
883
ap->udma_mask = 0;
884
}
885
ap->flags |= ATA_FLAG_PIO_POLLING | ATA_FLAG_NO_ATAPI;
886
887
ap->ioaddr.cmd_addr = acdev->vbase + ATA_DATA_PORT;
888
ap->ioaddr.data_addr = acdev->vbase + ATA_DATA_PORT;
889
ap->ioaddr.error_addr = acdev->vbase + ATA_ERR_FTR;
890
ap->ioaddr.feature_addr = acdev->vbase + ATA_ERR_FTR;
891
ap->ioaddr.nsect_addr = acdev->vbase + ATA_SC;
892
ap->ioaddr.lbal_addr = acdev->vbase + ATA_SN;
893
ap->ioaddr.lbam_addr = acdev->vbase + ATA_CL;
894
ap->ioaddr.lbah_addr = acdev->vbase + ATA_CH;
895
ap->ioaddr.device_addr = acdev->vbase + ATA_SH;
896
ap->ioaddr.status_addr = acdev->vbase + ATA_STS_CMD;
897
ap->ioaddr.command_addr = acdev->vbase + ATA_STS_CMD;
898
ap->ioaddr.altstatus_addr = acdev->vbase + ATA_ASTS_DCTR;
899
ap->ioaddr.ctl_addr = acdev->vbase + ATA_ASTS_DCTR;
900
901
ata_port_desc(ap, "phy_addr %llx virt_addr %p",
902
(unsigned long long) res->start, acdev->vbase);
903
904
ret = cf_init(acdev);
905
if (ret)
906
return ret;
907
908
cf_card_detect(acdev, 0);
909
910
ret = ata_host_activate(host, acdev->irq, irq_handler, 0,
911
&arasan_cf_sht);
912
if (!ret)
913
return 0;
914
915
cf_exit(acdev);
916
917
return ret;
918
}
919
920
static void arasan_cf_remove(struct platform_device *pdev)
921
{
922
struct ata_host *host = platform_get_drvdata(pdev);
923
struct arasan_cf_dev *acdev = host->ports[0]->private_data;
924
925
ata_host_detach(host);
926
cf_exit(acdev);
927
}
928
929
#ifdef CONFIG_PM_SLEEP
930
static int arasan_cf_suspend(struct device *dev)
931
{
932
struct ata_host *host = dev_get_drvdata(dev);
933
struct arasan_cf_dev *acdev = host->ports[0]->private_data;
934
935
if (acdev->dma_chan)
936
dmaengine_terminate_all(acdev->dma_chan);
937
938
cf_exit(acdev);
939
ata_host_suspend(host, PMSG_SUSPEND);
940
return 0;
941
}
942
943
static int arasan_cf_resume(struct device *dev)
944
{
945
struct ata_host *host = dev_get_drvdata(dev);
946
struct arasan_cf_dev *acdev = host->ports[0]->private_data;
947
948
cf_init(acdev);
949
ata_host_resume(host);
950
951
return 0;
952
}
953
#endif
954
955
static SIMPLE_DEV_PM_OPS(arasan_cf_pm_ops, arasan_cf_suspend, arasan_cf_resume);
956
957
#ifdef CONFIG_OF
958
static const struct of_device_id arasan_cf_id_table[] = {
959
{ .compatible = "arasan,cf-spear1340" },
960
{}
961
};
962
MODULE_DEVICE_TABLE(of, arasan_cf_id_table);
963
#endif
964
965
static struct platform_driver arasan_cf_driver = {
966
.probe = arasan_cf_probe,
967
.remove = arasan_cf_remove,
968
.driver = {
969
.name = DRIVER_NAME,
970
.pm = &arasan_cf_pm_ops,
971
.of_match_table = of_match_ptr(arasan_cf_id_table),
972
},
973
};
974
975
module_platform_driver(arasan_cf_driver);
976
977
MODULE_AUTHOR("Viresh Kumar <[email protected]>");
978
MODULE_DESCRIPTION("Arasan ATA Compact Flash driver");
979
MODULE_LICENSE("GPL");
980
MODULE_ALIAS("platform:" DRIVER_NAME);
981
982