Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/drivers/ata/pata_arasan_cf.c
15109 views
1
/*
2
* drivers/ata/pata_arasan_cf.c
3
*
4
* Arasan Compact Flash host controller source file
5
*
6
* Copyright (C) 2011 ST Microelectronics
7
* Viresh Kumar <[email protected]>
8
*
9
* This file is licensed under the terms of the GNU General Public
10
* License version 2. This program is licensed "as is" without any
11
* warranty of any kind, whether express or implied.
12
*/
13
14
/*
15
* The Arasan CompactFlash Device Controller IP core has three basic modes of
16
* operation: PC card ATA using I/O mode, PC card ATA using memory mode, PC card
17
* ATA using true IDE modes. This driver supports only True IDE mode currently.
18
*
19
* Arasan CF Controller shares global irq register with Arasan XD Controller.
20
*
21
* Tested on arch/arm/mach-spear13xx
22
*/
23
24
#include <linux/ata.h>
25
#include <linux/clk.h>
26
#include <linux/completion.h>
27
#include <linux/delay.h>
28
#include <linux/dmaengine.h>
29
#include <linux/io.h>
30
#include <linux/irq.h>
31
#include <linux/kernel.h>
32
#include <linux/libata.h>
33
#include <linux/module.h>
34
#include <linux/pata_arasan_cf_data.h>
35
#include <linux/platform_device.h>
36
#include <linux/pm.h>
37
#include <linux/slab.h>
38
#include <linux/spinlock.h>
39
#include <linux/types.h>
40
#include <linux/workqueue.h>
41
42
#define DRIVER_NAME "arasan_cf"
43
#define TIMEOUT msecs_to_jiffies(3000)
44
45
/* Registers */
46
/* CompactFlash Interface Status */
47
#define CFI_STS 0x000
48
#define STS_CHG (1)
49
#define BIN_AUDIO_OUT (1 << 1)
50
#define CARD_DETECT1 (1 << 2)
51
#define CARD_DETECT2 (1 << 3)
52
#define INP_ACK (1 << 4)
53
#define CARD_READY (1 << 5)
54
#define IO_READY (1 << 6)
55
#define B16_IO_PORT_SEL (1 << 7)
56
/* IRQ */
57
#define IRQ_STS 0x004
58
/* Interrupt Enable */
59
#define IRQ_EN 0x008
60
#define CARD_DETECT_IRQ (1)
61
#define STATUS_CHNG_IRQ (1 << 1)
62
#define MEM_MODE_IRQ (1 << 2)
63
#define IO_MODE_IRQ (1 << 3)
64
#define TRUE_IDE_MODE_IRQ (1 << 8)
65
#define PIO_XFER_ERR_IRQ (1 << 9)
66
#define BUF_AVAIL_IRQ (1 << 10)
67
#define XFER_DONE_IRQ (1 << 11)
68
#define IGNORED_IRQS (STATUS_CHNG_IRQ | MEM_MODE_IRQ | IO_MODE_IRQ |\
69
TRUE_IDE_MODE_IRQ)
70
#define TRUE_IDE_IRQS (CARD_DETECT_IRQ | PIO_XFER_ERR_IRQ |\
71
BUF_AVAIL_IRQ | XFER_DONE_IRQ)
72
/* Operation Mode */
73
#define OP_MODE 0x00C
74
#define CARD_MODE_MASK (0x3)
75
#define MEM_MODE (0x0)
76
#define IO_MODE (0x1)
77
#define TRUE_IDE_MODE (0x2)
78
79
#define CARD_TYPE_MASK (1 << 2)
80
#define CF_CARD (0)
81
#define CF_PLUS_CARD (1 << 2)
82
83
#define CARD_RESET (1 << 3)
84
#define CFHOST_ENB (1 << 4)
85
#define OUTPUTS_TRISTATE (1 << 5)
86
#define ULTRA_DMA_ENB (1 << 8)
87
#define MULTI_WORD_DMA_ENB (1 << 9)
88
#define DRQ_BLOCK_SIZE_MASK (0x3 << 11)
89
#define DRQ_BLOCK_SIZE_512 (0)
90
#define DRQ_BLOCK_SIZE_1024 (1 << 11)
91
#define DRQ_BLOCK_SIZE_2048 (2 << 11)
92
#define DRQ_BLOCK_SIZE_4096 (3 << 11)
93
/* CF Interface Clock Configuration */
94
#define CLK_CFG 0x010
95
#define CF_IF_CLK_MASK (0XF)
96
/* CF Timing Mode Configuration */
97
#define TM_CFG 0x014
98
#define MEM_MODE_TIMING_MASK (0x3)
99
#define MEM_MODE_TIMING_250NS (0x0)
100
#define MEM_MODE_TIMING_120NS (0x1)
101
#define MEM_MODE_TIMING_100NS (0x2)
102
#define MEM_MODE_TIMING_80NS (0x3)
103
104
#define IO_MODE_TIMING_MASK (0x3 << 2)
105
#define IO_MODE_TIMING_250NS (0x0 << 2)
106
#define IO_MODE_TIMING_120NS (0x1 << 2)
107
#define IO_MODE_TIMING_100NS (0x2 << 2)
108
#define IO_MODE_TIMING_80NS (0x3 << 2)
109
110
#define TRUEIDE_PIO_TIMING_MASK (0x7 << 4)
111
#define TRUEIDE_PIO_TIMING_SHIFT 4
112
113
#define TRUEIDE_MWORD_DMA_TIMING_MASK (0x7 << 7)
114
#define TRUEIDE_MWORD_DMA_TIMING_SHIFT 7
115
116
#define ULTRA_DMA_TIMING_MASK (0x7 << 10)
117
#define ULTRA_DMA_TIMING_SHIFT 10
118
/* CF Transfer Address */
119
#define XFER_ADDR 0x014
120
#define XFER_ADDR_MASK (0x7FF)
121
#define MAX_XFER_COUNT 0x20000u
122
/* Transfer Control */
123
#define XFER_CTR 0x01C
124
#define XFER_COUNT_MASK (0x3FFFF)
125
#define ADDR_INC_DISABLE (1 << 24)
126
#define XFER_WIDTH_MASK (1 << 25)
127
#define XFER_WIDTH_8B (0)
128
#define XFER_WIDTH_16B (1 << 25)
129
130
#define MEM_TYPE_MASK (1 << 26)
131
#define MEM_TYPE_COMMON (0)
132
#define MEM_TYPE_ATTRIBUTE (1 << 26)
133
134
#define MEM_IO_XFER_MASK (1 << 27)
135
#define MEM_XFER (0)
136
#define IO_XFER (1 << 27)
137
138
#define DMA_XFER_MODE (1 << 28)
139
140
#define AHB_BUS_NORMAL_PIO_OPRTN (~(1 << 29))
141
#define XFER_DIR_MASK (1 << 30)
142
#define XFER_READ (0)
143
#define XFER_WRITE (1 << 30)
144
145
#define XFER_START (1 << 31)
146
/* Write Data Port */
147
#define WRITE_PORT 0x024
148
/* Read Data Port */
149
#define READ_PORT 0x028
150
/* ATA Data Port */
151
#define ATA_DATA_PORT 0x030
152
#define ATA_DATA_PORT_MASK (0xFFFF)
153
/* ATA Error/Features */
154
#define ATA_ERR_FTR 0x034
155
/* ATA Sector Count */
156
#define ATA_SC 0x038
157
/* ATA Sector Number */
158
#define ATA_SN 0x03C
159
/* ATA Cylinder Low */
160
#define ATA_CL 0x040
161
/* ATA Cylinder High */
162
#define ATA_CH 0x044
163
/* ATA Select Card/Head */
164
#define ATA_SH 0x048
165
/* ATA Status-Command */
166
#define ATA_STS_CMD 0x04C
167
/* ATA Alternate Status/Device Control */
168
#define ATA_ASTS_DCTR 0x050
169
/* Extended Write Data Port 0x200-0x3FC */
170
#define EXT_WRITE_PORT 0x200
171
/* Extended Read Data Port 0x400-0x5FC */
172
#define EXT_READ_PORT 0x400
173
#define FIFO_SIZE 0x200u
174
/* Global Interrupt Status */
175
#define GIRQ_STS 0x800
176
/* Global Interrupt Status enable */
177
#define GIRQ_STS_EN 0x804
178
/* Global Interrupt Signal enable */
179
#define GIRQ_SGN_EN 0x808
180
#define GIRQ_CF (1)
181
#define GIRQ_XD (1 << 1)
182
183
/* Compact Flash Controller Dev Structure */
184
struct arasan_cf_dev {
185
/* pointer to ata_host structure */
186
struct ata_host *host;
187
/* clk structure, only if HAVE_CLK is defined */
188
#ifdef CONFIG_HAVE_CLK
189
struct clk *clk;
190
#endif
191
192
/* physical base address of controller */
193
dma_addr_t pbase;
194
/* virtual base address of controller */
195
void __iomem *vbase;
196
/* irq number*/
197
int irq;
198
199
/* status to be updated to framework regarding DMA transfer */
200
u8 dma_status;
201
/* Card is present or Not */
202
u8 card_present;
203
204
/* dma specific */
205
/* Completion for transfer complete interrupt from controller */
206
struct completion cf_completion;
207
/* Completion for DMA transfer complete. */
208
struct completion dma_completion;
209
/* Dma channel allocated */
210
struct dma_chan *dma_chan;
211
/* Mask for DMA transfers */
212
dma_cap_mask_t mask;
213
/* dma channel private data */
214
void *dma_priv;
215
/* DMA transfer work */
216
struct work_struct work;
217
/* DMA delayed finish work */
218
struct delayed_work dwork;
219
/* qc to be transferred using DMA */
220
struct ata_queued_cmd *qc;
221
};
222
223
static struct scsi_host_template arasan_cf_sht = {
224
ATA_BASE_SHT(DRIVER_NAME),
225
.sg_tablesize = SG_NONE,
226
.dma_boundary = 0xFFFFFFFFUL,
227
};
228
229
static void cf_dumpregs(struct arasan_cf_dev *acdev)
230
{
231
struct device *dev = acdev->host->dev;
232
233
dev_dbg(dev, ": =========== REGISTER DUMP ===========");
234
dev_dbg(dev, ": CFI_STS: %x", readl(acdev->vbase + CFI_STS));
235
dev_dbg(dev, ": IRQ_STS: %x", readl(acdev->vbase + IRQ_STS));
236
dev_dbg(dev, ": IRQ_EN: %x", readl(acdev->vbase + IRQ_EN));
237
dev_dbg(dev, ": OP_MODE: %x", readl(acdev->vbase + OP_MODE));
238
dev_dbg(dev, ": CLK_CFG: %x", readl(acdev->vbase + CLK_CFG));
239
dev_dbg(dev, ": TM_CFG: %x", readl(acdev->vbase + TM_CFG));
240
dev_dbg(dev, ": XFER_CTR: %x", readl(acdev->vbase + XFER_CTR));
241
dev_dbg(dev, ": GIRQ_STS: %x", readl(acdev->vbase + GIRQ_STS));
242
dev_dbg(dev, ": GIRQ_STS_EN: %x", readl(acdev->vbase + GIRQ_STS_EN));
243
dev_dbg(dev, ": GIRQ_SGN_EN: %x", readl(acdev->vbase + GIRQ_SGN_EN));
244
dev_dbg(dev, ": =====================================");
245
}
246
247
/* Enable/Disable global interrupts shared between CF and XD ctrlr. */
248
static void cf_ginterrupt_enable(struct arasan_cf_dev *acdev, bool enable)
249
{
250
/* enable should be 0 or 1 */
251
writel(enable, acdev->vbase + GIRQ_STS_EN);
252
writel(enable, acdev->vbase + GIRQ_SGN_EN);
253
}
254
255
/* Enable/Disable CF interrupts */
256
static inline void
257
cf_interrupt_enable(struct arasan_cf_dev *acdev, u32 mask, bool enable)
258
{
259
u32 val = readl(acdev->vbase + IRQ_EN);
260
/* clear & enable/disable irqs */
261
if (enable) {
262
writel(mask, acdev->vbase + IRQ_STS);
263
writel(val | mask, acdev->vbase + IRQ_EN);
264
} else
265
writel(val & ~mask, acdev->vbase + IRQ_EN);
266
}
267
268
static inline void cf_card_reset(struct arasan_cf_dev *acdev)
269
{
270
u32 val = readl(acdev->vbase + OP_MODE);
271
272
writel(val | CARD_RESET, acdev->vbase + OP_MODE);
273
udelay(200);
274
writel(val & ~CARD_RESET, acdev->vbase + OP_MODE);
275
}
276
277
static inline void cf_ctrl_reset(struct arasan_cf_dev *acdev)
278
{
279
writel(readl(acdev->vbase + OP_MODE) & ~CFHOST_ENB,
280
acdev->vbase + OP_MODE);
281
writel(readl(acdev->vbase + OP_MODE) | CFHOST_ENB,
282
acdev->vbase + OP_MODE);
283
}
284
285
static void cf_card_detect(struct arasan_cf_dev *acdev, bool hotplugged)
286
{
287
struct ata_port *ap = acdev->host->ports[0];
288
struct ata_eh_info *ehi = &ap->link.eh_info;
289
u32 val = readl(acdev->vbase + CFI_STS);
290
291
/* Both CD1 & CD2 should be low if card inserted completely */
292
if (!(val & (CARD_DETECT1 | CARD_DETECT2))) {
293
if (acdev->card_present)
294
return;
295
acdev->card_present = 1;
296
cf_card_reset(acdev);
297
} else {
298
if (!acdev->card_present)
299
return;
300
acdev->card_present = 0;
301
}
302
303
if (hotplugged) {
304
ata_ehi_hotplugged(ehi);
305
ata_port_freeze(ap);
306
}
307
}
308
309
static int cf_init(struct arasan_cf_dev *acdev)
310
{
311
struct arasan_cf_pdata *pdata = dev_get_platdata(acdev->host->dev);
312
unsigned long flags;
313
int ret = 0;
314
315
#ifdef CONFIG_HAVE_CLK
316
ret = clk_enable(acdev->clk);
317
if (ret) {
318
dev_dbg(acdev->host->dev, "clock enable failed");
319
return ret;
320
}
321
#endif
322
323
spin_lock_irqsave(&acdev->host->lock, flags);
324
/* configure CF interface clock */
325
writel((pdata->cf_if_clk <= CF_IF_CLK_200M) ? pdata->cf_if_clk :
326
CF_IF_CLK_166M, acdev->vbase + CLK_CFG);
327
328
writel(TRUE_IDE_MODE | CFHOST_ENB, acdev->vbase + OP_MODE);
329
cf_interrupt_enable(acdev, CARD_DETECT_IRQ, 1);
330
cf_ginterrupt_enable(acdev, 1);
331
spin_unlock_irqrestore(&acdev->host->lock, flags);
332
333
return ret;
334
}
335
336
static void cf_exit(struct arasan_cf_dev *acdev)
337
{
338
unsigned long flags;
339
340
spin_lock_irqsave(&acdev->host->lock, flags);
341
cf_ginterrupt_enable(acdev, 0);
342
cf_interrupt_enable(acdev, TRUE_IDE_IRQS, 0);
343
cf_card_reset(acdev);
344
writel(readl(acdev->vbase + OP_MODE) & ~CFHOST_ENB,
345
acdev->vbase + OP_MODE);
346
spin_unlock_irqrestore(&acdev->host->lock, flags);
347
#ifdef CONFIG_HAVE_CLK
348
clk_disable(acdev->clk);
349
#endif
350
}
351
352
static void dma_callback(void *dev)
353
{
354
struct arasan_cf_dev *acdev = (struct arasan_cf_dev *) dev;
355
356
complete(&acdev->dma_completion);
357
}
358
359
static bool filter(struct dma_chan *chan, void *slave)
360
{
361
chan->private = slave;
362
return true;
363
}
364
365
static inline void dma_complete(struct arasan_cf_dev *acdev)
366
{
367
struct ata_queued_cmd *qc = acdev->qc;
368
unsigned long flags;
369
370
acdev->qc = NULL;
371
ata_sff_interrupt(acdev->irq, acdev->host);
372
373
spin_lock_irqsave(&acdev->host->lock, flags);
374
if (unlikely(qc->err_mask) && ata_is_dma(qc->tf.protocol))
375
ata_ehi_push_desc(&qc->ap->link.eh_info, "DMA Failed: Timeout");
376
spin_unlock_irqrestore(&acdev->host->lock, flags);
377
}
378
379
static inline int wait4buf(struct arasan_cf_dev *acdev)
380
{
381
if (!wait_for_completion_timeout(&acdev->cf_completion, TIMEOUT)) {
382
u32 rw = acdev->qc->tf.flags & ATA_TFLAG_WRITE;
383
384
dev_err(acdev->host->dev, "%s TimeOut", rw ? "write" : "read");
385
return -ETIMEDOUT;
386
}
387
388
/* Check if PIO Error interrupt has occurred */
389
if (acdev->dma_status & ATA_DMA_ERR)
390
return -EAGAIN;
391
392
return 0;
393
}
394
395
static int
396
dma_xfer(struct arasan_cf_dev *acdev, dma_addr_t src, dma_addr_t dest, u32 len)
397
{
398
struct dma_async_tx_descriptor *tx;
399
struct dma_chan *chan = acdev->dma_chan;
400
dma_cookie_t cookie;
401
unsigned long flags = DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP |
402
DMA_COMPL_SKIP_DEST_UNMAP;
403
int ret = 0;
404
405
tx = chan->device->device_prep_dma_memcpy(chan, dest, src, len, flags);
406
if (!tx) {
407
dev_err(acdev->host->dev, "device_prep_dma_memcpy failed\n");
408
return -EAGAIN;
409
}
410
411
tx->callback = dma_callback;
412
tx->callback_param = acdev;
413
cookie = tx->tx_submit(tx);
414
415
ret = dma_submit_error(cookie);
416
if (ret) {
417
dev_err(acdev->host->dev, "dma_submit_error\n");
418
return ret;
419
}
420
421
chan->device->device_issue_pending(chan);
422
423
/* Wait for DMA to complete */
424
if (!wait_for_completion_timeout(&acdev->dma_completion, TIMEOUT)) {
425
chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
426
dev_err(acdev->host->dev, "wait_for_completion_timeout\n");
427
return -ETIMEDOUT;
428
}
429
430
return ret;
431
}
432
433
static int sg_xfer(struct arasan_cf_dev *acdev, struct scatterlist *sg)
434
{
435
dma_addr_t dest = 0, src = 0;
436
u32 xfer_cnt, sglen, dma_len, xfer_ctr;
437
u32 write = acdev->qc->tf.flags & ATA_TFLAG_WRITE;
438
unsigned long flags;
439
int ret = 0;
440
441
sglen = sg_dma_len(sg);
442
if (write) {
443
src = sg_dma_address(sg);
444
dest = acdev->pbase + EXT_WRITE_PORT;
445
} else {
446
dest = sg_dma_address(sg);
447
src = acdev->pbase + EXT_READ_PORT;
448
}
449
450
/*
451
* For each sg:
452
* MAX_XFER_COUNT data will be transferred before we get transfer
453
* complete interrupt. Between after FIFO_SIZE data
454
* buffer available interrupt will be generated. At this time we will
455
* fill FIFO again: max FIFO_SIZE data.
456
*/
457
while (sglen) {
458
xfer_cnt = min(sglen, MAX_XFER_COUNT);
459
spin_lock_irqsave(&acdev->host->lock, flags);
460
xfer_ctr = readl(acdev->vbase + XFER_CTR) &
461
~XFER_COUNT_MASK;
462
writel(xfer_ctr | xfer_cnt | XFER_START,
463
acdev->vbase + XFER_CTR);
464
spin_unlock_irqrestore(&acdev->host->lock, flags);
465
466
/* continue dma xfers until current sg is completed */
467
while (xfer_cnt) {
468
/* wait for read to complete */
469
if (!write) {
470
ret = wait4buf(acdev);
471
if (ret)
472
goto fail;
473
}
474
475
/* read/write FIFO in chunk of FIFO_SIZE */
476
dma_len = min(xfer_cnt, FIFO_SIZE);
477
ret = dma_xfer(acdev, src, dest, dma_len);
478
if (ret) {
479
dev_err(acdev->host->dev, "dma failed");
480
goto fail;
481
}
482
483
if (write)
484
src += dma_len;
485
else
486
dest += dma_len;
487
488
sglen -= dma_len;
489
xfer_cnt -= dma_len;
490
491
/* wait for write to complete */
492
if (write) {
493
ret = wait4buf(acdev);
494
if (ret)
495
goto fail;
496
}
497
}
498
}
499
500
fail:
501
spin_lock_irqsave(&acdev->host->lock, flags);
502
writel(readl(acdev->vbase + XFER_CTR) & ~XFER_START,
503
acdev->vbase + XFER_CTR);
504
spin_unlock_irqrestore(&acdev->host->lock, flags);
505
506
return ret;
507
}
508
509
/*
510
* This routine uses External DMA controller to read/write data to FIFO of CF
511
* controller. There are two xfer related interrupt supported by CF controller:
512
* - buf_avail: This interrupt is generated as soon as we have buffer of 512
513
* bytes available for reading or empty buffer available for writing.
514
* - xfer_done: This interrupt is generated on transfer of "xfer_size" amount of
515
* data to/from FIFO. xfer_size is programmed in XFER_CTR register.
516
*
517
* Max buffer size = FIFO_SIZE = 512 Bytes.
518
* Max xfer_size = MAX_XFER_COUNT = 256 KB.
519
*/
520
static void data_xfer(struct work_struct *work)
521
{
522
struct arasan_cf_dev *acdev = container_of(work, struct arasan_cf_dev,
523
work);
524
struct ata_queued_cmd *qc = acdev->qc;
525
struct scatterlist *sg;
526
unsigned long flags;
527
u32 temp;
528
int ret = 0;
529
530
/* request dma channels */
531
/* dma_request_channel may sleep, so calling from process context */
532
acdev->dma_chan = dma_request_channel(acdev->mask, filter,
533
acdev->dma_priv);
534
if (!acdev->dma_chan) {
535
dev_err(acdev->host->dev, "Unable to get dma_chan\n");
536
goto chan_request_fail;
537
}
538
539
for_each_sg(qc->sg, sg, qc->n_elem, temp) {
540
ret = sg_xfer(acdev, sg);
541
if (ret)
542
break;
543
}
544
545
dma_release_channel(acdev->dma_chan);
546
547
/* data xferred successfully */
548
if (!ret) {
549
u32 status;
550
551
spin_lock_irqsave(&acdev->host->lock, flags);
552
status = ioread8(qc->ap->ioaddr.altstatus_addr);
553
spin_unlock_irqrestore(&acdev->host->lock, flags);
554
if (status & (ATA_BUSY | ATA_DRQ)) {
555
ata_sff_queue_delayed_work(&acdev->dwork, 1);
556
return;
557
}
558
559
goto sff_intr;
560
}
561
562
cf_dumpregs(acdev);
563
564
chan_request_fail:
565
spin_lock_irqsave(&acdev->host->lock, flags);
566
/* error when transferring data to/from memory */
567
qc->err_mask |= AC_ERR_HOST_BUS;
568
qc->ap->hsm_task_state = HSM_ST_ERR;
569
570
cf_ctrl_reset(acdev);
571
spin_unlock_irqrestore(qc->ap->lock, flags);
572
sff_intr:
573
dma_complete(acdev);
574
}
575
576
static void delayed_finish(struct work_struct *work)
577
{
578
struct arasan_cf_dev *acdev = container_of(work, struct arasan_cf_dev,
579
dwork.work);
580
struct ata_queued_cmd *qc = acdev->qc;
581
unsigned long flags;
582
u8 status;
583
584
spin_lock_irqsave(&acdev->host->lock, flags);
585
status = ioread8(qc->ap->ioaddr.altstatus_addr);
586
spin_unlock_irqrestore(&acdev->host->lock, flags);
587
588
if (status & (ATA_BUSY | ATA_DRQ))
589
ata_sff_queue_delayed_work(&acdev->dwork, 1);
590
else
591
dma_complete(acdev);
592
}
593
594
static irqreturn_t arasan_cf_interrupt(int irq, void *dev)
595
{
596
struct arasan_cf_dev *acdev = ((struct ata_host *)dev)->private_data;
597
unsigned long flags;
598
u32 irqsts;
599
600
irqsts = readl(acdev->vbase + GIRQ_STS);
601
if (!(irqsts & GIRQ_CF))
602
return IRQ_NONE;
603
604
spin_lock_irqsave(&acdev->host->lock, flags);
605
irqsts = readl(acdev->vbase + IRQ_STS);
606
writel(irqsts, acdev->vbase + IRQ_STS); /* clear irqs */
607
writel(GIRQ_CF, acdev->vbase + GIRQ_STS); /* clear girqs */
608
609
/* handle only relevant interrupts */
610
irqsts &= ~IGNORED_IRQS;
611
612
if (irqsts & CARD_DETECT_IRQ) {
613
cf_card_detect(acdev, 1);
614
spin_unlock_irqrestore(&acdev->host->lock, flags);
615
return IRQ_HANDLED;
616
}
617
618
if (irqsts & PIO_XFER_ERR_IRQ) {
619
acdev->dma_status = ATA_DMA_ERR;
620
writel(readl(acdev->vbase + XFER_CTR) & ~XFER_START,
621
acdev->vbase + XFER_CTR);
622
spin_unlock_irqrestore(&acdev->host->lock, flags);
623
complete(&acdev->cf_completion);
624
dev_err(acdev->host->dev, "pio xfer err irq\n");
625
return IRQ_HANDLED;
626
}
627
628
spin_unlock_irqrestore(&acdev->host->lock, flags);
629
630
if (irqsts & BUF_AVAIL_IRQ) {
631
complete(&acdev->cf_completion);
632
return IRQ_HANDLED;
633
}
634
635
if (irqsts & XFER_DONE_IRQ) {
636
struct ata_queued_cmd *qc = acdev->qc;
637
638
/* Send Complete only for write */
639
if (qc->tf.flags & ATA_TFLAG_WRITE)
640
complete(&acdev->cf_completion);
641
}
642
643
return IRQ_HANDLED;
644
}
645
646
static void arasan_cf_freeze(struct ata_port *ap)
647
{
648
struct arasan_cf_dev *acdev = ap->host->private_data;
649
650
/* stop transfer and reset controller */
651
writel(readl(acdev->vbase + XFER_CTR) & ~XFER_START,
652
acdev->vbase + XFER_CTR);
653
cf_ctrl_reset(acdev);
654
acdev->dma_status = ATA_DMA_ERR;
655
656
ata_sff_dma_pause(ap);
657
ata_sff_freeze(ap);
658
}
659
660
void arasan_cf_error_handler(struct ata_port *ap)
661
{
662
struct arasan_cf_dev *acdev = ap->host->private_data;
663
664
/*
665
* DMA transfers using an external DMA controller may be scheduled.
666
* Abort them before handling error. Refer data_xfer() for further
667
* details.
668
*/
669
cancel_work_sync(&acdev->work);
670
cancel_delayed_work_sync(&acdev->dwork);
671
return ata_sff_error_handler(ap);
672
}
673
674
static void arasan_cf_dma_start(struct arasan_cf_dev *acdev)
675
{
676
u32 xfer_ctr = readl(acdev->vbase + XFER_CTR) & ~XFER_DIR_MASK;
677
u32 write = acdev->qc->tf.flags & ATA_TFLAG_WRITE;
678
679
xfer_ctr |= write ? XFER_WRITE : XFER_READ;
680
writel(xfer_ctr, acdev->vbase + XFER_CTR);
681
682
acdev->qc->ap->ops->sff_exec_command(acdev->qc->ap, &acdev->qc->tf);
683
ata_sff_queue_work(&acdev->work);
684
}
685
686
unsigned int arasan_cf_qc_issue(struct ata_queued_cmd *qc)
687
{
688
struct ata_port *ap = qc->ap;
689
struct arasan_cf_dev *acdev = ap->host->private_data;
690
691
/* defer PIO handling to sff_qc_issue */
692
if (!ata_is_dma(qc->tf.protocol))
693
return ata_sff_qc_issue(qc);
694
695
/* select the device */
696
ata_wait_idle(ap);
697
ata_sff_dev_select(ap, qc->dev->devno);
698
ata_wait_idle(ap);
699
700
/* start the command */
701
switch (qc->tf.protocol) {
702
case ATA_PROT_DMA:
703
WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);
704
705
ap->ops->sff_tf_load(ap, &qc->tf);
706
acdev->dma_status = 0;
707
acdev->qc = qc;
708
arasan_cf_dma_start(acdev);
709
ap->hsm_task_state = HSM_ST_LAST;
710
break;
711
712
default:
713
WARN_ON(1);
714
return AC_ERR_SYSTEM;
715
}
716
717
return 0;
718
}
719
720
static void arasan_cf_set_piomode(struct ata_port *ap, struct ata_device *adev)
721
{
722
struct arasan_cf_dev *acdev = ap->host->private_data;
723
u8 pio = adev->pio_mode - XFER_PIO_0;
724
unsigned long flags;
725
u32 val;
726
727
/* Arasan ctrl supports Mode0 -> Mode6 */
728
if (pio > 6) {
729
dev_err(ap->dev, "Unknown PIO mode\n");
730
return;
731
}
732
733
spin_lock_irqsave(&acdev->host->lock, flags);
734
val = readl(acdev->vbase + OP_MODE) &
735
~(ULTRA_DMA_ENB | MULTI_WORD_DMA_ENB | DRQ_BLOCK_SIZE_MASK);
736
writel(val, acdev->vbase + OP_MODE);
737
val = readl(acdev->vbase + TM_CFG) & ~TRUEIDE_PIO_TIMING_MASK;
738
val |= pio << TRUEIDE_PIO_TIMING_SHIFT;
739
writel(val, acdev->vbase + TM_CFG);
740
741
cf_interrupt_enable(acdev, BUF_AVAIL_IRQ | XFER_DONE_IRQ, 0);
742
cf_interrupt_enable(acdev, PIO_XFER_ERR_IRQ, 1);
743
spin_unlock_irqrestore(&acdev->host->lock, flags);
744
}
745
746
static void arasan_cf_set_dmamode(struct ata_port *ap, struct ata_device *adev)
747
{
748
struct arasan_cf_dev *acdev = ap->host->private_data;
749
u32 opmode, tmcfg, dma_mode = adev->dma_mode;
750
unsigned long flags;
751
752
spin_lock_irqsave(&acdev->host->lock, flags);
753
opmode = readl(acdev->vbase + OP_MODE) &
754
~(MULTI_WORD_DMA_ENB | ULTRA_DMA_ENB);
755
tmcfg = readl(acdev->vbase + TM_CFG);
756
757
if ((dma_mode >= XFER_UDMA_0) && (dma_mode <= XFER_UDMA_6)) {
758
opmode |= ULTRA_DMA_ENB;
759
tmcfg &= ~ULTRA_DMA_TIMING_MASK;
760
tmcfg |= (dma_mode - XFER_UDMA_0) << ULTRA_DMA_TIMING_SHIFT;
761
} else if ((dma_mode >= XFER_MW_DMA_0) && (dma_mode <= XFER_MW_DMA_4)) {
762
opmode |= MULTI_WORD_DMA_ENB;
763
tmcfg &= ~TRUEIDE_MWORD_DMA_TIMING_MASK;
764
tmcfg |= (dma_mode - XFER_MW_DMA_0) <<
765
TRUEIDE_MWORD_DMA_TIMING_SHIFT;
766
} else {
767
dev_err(ap->dev, "Unknown DMA mode\n");
768
spin_unlock_irqrestore(&acdev->host->lock, flags);
769
return;
770
}
771
772
writel(opmode, acdev->vbase + OP_MODE);
773
writel(tmcfg, acdev->vbase + TM_CFG);
774
writel(DMA_XFER_MODE, acdev->vbase + XFER_CTR);
775
776
cf_interrupt_enable(acdev, PIO_XFER_ERR_IRQ, 0);
777
cf_interrupt_enable(acdev, BUF_AVAIL_IRQ | XFER_DONE_IRQ, 1);
778
spin_unlock_irqrestore(&acdev->host->lock, flags);
779
}
780
781
static struct ata_port_operations arasan_cf_ops = {
782
.inherits = &ata_sff_port_ops,
783
.freeze = arasan_cf_freeze,
784
.error_handler = arasan_cf_error_handler,
785
.qc_issue = arasan_cf_qc_issue,
786
.set_piomode = arasan_cf_set_piomode,
787
.set_dmamode = arasan_cf_set_dmamode,
788
};
789
790
static int __devinit arasan_cf_probe(struct platform_device *pdev)
791
{
792
struct arasan_cf_dev *acdev;
793
struct arasan_cf_pdata *pdata = dev_get_platdata(&pdev->dev);
794
struct ata_host *host;
795
struct ata_port *ap;
796
struct resource *res;
797
irq_handler_t irq_handler = NULL;
798
int ret = 0;
799
800
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
801
if (!res)
802
return -EINVAL;
803
804
if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res),
805
DRIVER_NAME)) {
806
dev_warn(&pdev->dev, "Failed to get memory region resource\n");
807
return -ENOENT;
808
}
809
810
acdev = devm_kzalloc(&pdev->dev, sizeof(*acdev), GFP_KERNEL);
811
if (!acdev) {
812
dev_warn(&pdev->dev, "kzalloc fail\n");
813
return -ENOMEM;
814
}
815
816
/* if irq is 0, support only PIO */
817
acdev->irq = platform_get_irq(pdev, 0);
818
if (acdev->irq)
819
irq_handler = arasan_cf_interrupt;
820
else
821
pdata->quirk |= CF_BROKEN_MWDMA | CF_BROKEN_UDMA;
822
823
acdev->pbase = res->start;
824
acdev->vbase = devm_ioremap_nocache(&pdev->dev, res->start,
825
resource_size(res));
826
if (!acdev->vbase) {
827
dev_warn(&pdev->dev, "ioremap fail\n");
828
return -ENOMEM;
829
}
830
831
#ifdef CONFIG_HAVE_CLK
832
acdev->clk = clk_get(&pdev->dev, NULL);
833
if (IS_ERR(acdev->clk)) {
834
dev_warn(&pdev->dev, "Clock not found\n");
835
return PTR_ERR(acdev->clk);
836
}
837
#endif
838
839
/* allocate host */
840
host = ata_host_alloc(&pdev->dev, 1);
841
if (!host) {
842
ret = -ENOMEM;
843
dev_warn(&pdev->dev, "alloc host fail\n");
844
goto free_clk;
845
}
846
847
ap = host->ports[0];
848
host->private_data = acdev;
849
acdev->host = host;
850
ap->ops = &arasan_cf_ops;
851
ap->pio_mask = ATA_PIO6;
852
ap->mwdma_mask = ATA_MWDMA4;
853
ap->udma_mask = ATA_UDMA6;
854
855
init_completion(&acdev->cf_completion);
856
init_completion(&acdev->dma_completion);
857
INIT_WORK(&acdev->work, data_xfer);
858
INIT_DELAYED_WORK(&acdev->dwork, delayed_finish);
859
dma_cap_set(DMA_MEMCPY, acdev->mask);
860
acdev->dma_priv = pdata->dma_priv;
861
862
/* Handle platform specific quirks */
863
if (pdata->quirk) {
864
if (pdata->quirk & CF_BROKEN_PIO) {
865
ap->ops->set_piomode = NULL;
866
ap->pio_mask = 0;
867
}
868
if (pdata->quirk & CF_BROKEN_MWDMA)
869
ap->mwdma_mask = 0;
870
if (pdata->quirk & CF_BROKEN_UDMA)
871
ap->udma_mask = 0;
872
}
873
ap->flags |= ATA_FLAG_PIO_POLLING | ATA_FLAG_NO_ATAPI;
874
875
ap->ioaddr.cmd_addr = acdev->vbase + ATA_DATA_PORT;
876
ap->ioaddr.data_addr = acdev->vbase + ATA_DATA_PORT;
877
ap->ioaddr.error_addr = acdev->vbase + ATA_ERR_FTR;
878
ap->ioaddr.feature_addr = acdev->vbase + ATA_ERR_FTR;
879
ap->ioaddr.nsect_addr = acdev->vbase + ATA_SC;
880
ap->ioaddr.lbal_addr = acdev->vbase + ATA_SN;
881
ap->ioaddr.lbam_addr = acdev->vbase + ATA_CL;
882
ap->ioaddr.lbah_addr = acdev->vbase + ATA_CH;
883
ap->ioaddr.device_addr = acdev->vbase + ATA_SH;
884
ap->ioaddr.status_addr = acdev->vbase + ATA_STS_CMD;
885
ap->ioaddr.command_addr = acdev->vbase + ATA_STS_CMD;
886
ap->ioaddr.altstatus_addr = acdev->vbase + ATA_ASTS_DCTR;
887
ap->ioaddr.ctl_addr = acdev->vbase + ATA_ASTS_DCTR;
888
889
ata_port_desc(ap, "phy_addr %llx virt_addr %p",
890
(unsigned long long) res->start, acdev->vbase);
891
892
ret = cf_init(acdev);
893
if (ret)
894
goto free_clk;
895
896
cf_card_detect(acdev, 0);
897
898
return ata_host_activate(host, acdev->irq, irq_handler, 0,
899
&arasan_cf_sht);
900
901
free_clk:
902
#ifdef CONFIG_HAVE_CLK
903
clk_put(acdev->clk);
904
#endif
905
return ret;
906
}
907
908
static int __devexit arasan_cf_remove(struct platform_device *pdev)
909
{
910
struct ata_host *host = dev_get_drvdata(&pdev->dev);
911
struct arasan_cf_dev *acdev = host->ports[0]->private_data;
912
913
ata_host_detach(host);
914
cf_exit(acdev);
915
#ifdef CONFIG_HAVE_CLK
916
clk_put(acdev->clk);
917
#endif
918
919
return 0;
920
}
921
922
#ifdef CONFIG_PM
923
static int arasan_cf_suspend(struct device *dev)
924
{
925
struct platform_device *pdev = to_platform_device(dev);
926
struct ata_host *host = dev_get_drvdata(&pdev->dev);
927
struct arasan_cf_dev *acdev = host->ports[0]->private_data;
928
929
if (acdev->dma_chan) {
930
acdev->dma_chan->device->device_control(acdev->dma_chan,
931
DMA_TERMINATE_ALL, 0);
932
dma_release_channel(acdev->dma_chan);
933
}
934
cf_exit(acdev);
935
return ata_host_suspend(host, PMSG_SUSPEND);
936
}
937
938
static int arasan_cf_resume(struct device *dev)
939
{
940
struct platform_device *pdev = to_platform_device(dev);
941
struct ata_host *host = dev_get_drvdata(&pdev->dev);
942
struct arasan_cf_dev *acdev = host->ports[0]->private_data;
943
944
cf_init(acdev);
945
ata_host_resume(host);
946
947
return 0;
948
}
949
950
static const struct dev_pm_ops arasan_cf_pm_ops = {
951
.suspend = arasan_cf_suspend,
952
.resume = arasan_cf_resume,
953
};
954
#endif
955
956
static struct platform_driver arasan_cf_driver = {
957
.probe = arasan_cf_probe,
958
.remove = __devexit_p(arasan_cf_remove),
959
.driver = {
960
.name = DRIVER_NAME,
961
.owner = THIS_MODULE,
962
#ifdef CONFIG_PM
963
.pm = &arasan_cf_pm_ops,
964
#endif
965
},
966
};
967
968
static int __init arasan_cf_init(void)
969
{
970
return platform_driver_register(&arasan_cf_driver);
971
}
972
module_init(arasan_cf_init);
973
974
static void __exit arasan_cf_exit(void)
975
{
976
platform_driver_unregister(&arasan_cf_driver);
977
}
978
module_exit(arasan_cf_exit);
979
980
MODULE_AUTHOR("Viresh Kumar <[email protected]>");
981
MODULE_DESCRIPTION("Arasan ATA Compact Flash driver");
982
MODULE_LICENSE("GPL");
983
MODULE_ALIAS("platform:" DRIVER_NAME);
984
985