Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/arm/broadcom/bcm2835/bcm2835_sdhci.c
39566 views
1
/*-
2
* SPDX-License-Identifier: BSD-2-Clause
3
*
4
* Copyright (c) 2012 Oleksandr Tymoshenko <[email protected]>
5
* All rights reserved.
6
*
7
* Redistribution and use in source and binary forms, with or without
8
* modification, are permitted provided that the following conditions
9
* are met:
10
* 1. Redistributions of source code must retain the above copyright
11
* notice, this list of conditions and the following disclaimer.
12
* 2. Redistributions in binary form must reproduce the above copyright
13
* notice, this list of conditions and the following disclaimer in the
14
* documentation and/or other materials provided with the distribution.
15
*
16
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26
* SUCH DAMAGE.
27
*
28
*/
29
30
#include <sys/param.h>
31
#include <sys/systm.h>
32
#include <sys/bus.h>
33
#include <sys/conf.h>
34
#include <sys/kernel.h>
35
#include <sys/lock.h>
36
#include <sys/malloc.h>
37
#include <sys/module.h>
38
#include <sys/mutex.h>
39
#include <sys/rman.h>
40
#include <sys/sysctl.h>
41
#include <sys/taskqueue.h>
42
43
#include <machine/bus.h>
44
45
#include <dev/ofw/ofw_bus.h>
46
#include <dev/ofw/ofw_bus_subr.h>
47
48
#include <dev/mmc/bridge.h>
49
#include <dev/mmc/mmcreg.h>
50
#include <dev/mmc/mmc_fdt_helpers.h>
51
52
#include <dev/sdhci/sdhci.h>
53
54
#include "mmcbr_if.h"
55
#include "sdhci_if.h"
56
57
#include "opt_mmccam.h"
58
59
#include "bcm2835_dma.h"
60
#include <arm/broadcom/bcm2835/bcm2835_mbox_prop.h>
61
#ifdef NOTYET
62
#include <arm/broadcom/bcm2835/bcm2835_clkman.h>
63
#endif
64
#include <arm/broadcom/bcm2835/bcm2835_vcbus.h>
65
66
#define BCM2835_DEFAULT_SDHCI_FREQ 50
67
#define BCM2838_DEFAULT_SDHCI_FREQ 100
68
69
#define BCM_SDHCI_BUFFER_SIZE 512
70
/*
71
* NUM_DMA_SEGS is the number of DMA segments we want to accommodate on average.
72
* We add in a number of segments based on how much we may need to spill into
73
* another segment due to crossing page boundaries. e.g. up to PAGE_SIZE, an
74
* extra page is needed as we can cross a page boundary exactly once.
75
*/
76
#define NUM_DMA_SEGS 1
77
#define NUM_DMA_SPILL_SEGS \
78
((((NUM_DMA_SEGS * BCM_SDHCI_BUFFER_SIZE) - 1) / PAGE_SIZE) + 1)
79
#define ALLOCATED_DMA_SEGS (NUM_DMA_SEGS + NUM_DMA_SPILL_SEGS)
80
#define BCM_DMA_MAXSIZE (NUM_DMA_SEGS * BCM_SDHCI_BUFFER_SIZE)
81
82
#define BCM_SDHCI_SLOT_LEFT(slot) \
83
((slot)->curcmd->data->len - (slot)->offset)
84
85
#define BCM_SDHCI_SEGSZ_LEFT(slot) \
86
min(BCM_DMA_MAXSIZE, \
87
rounddown(BCM_SDHCI_SLOT_LEFT(slot), BCM_SDHCI_BUFFER_SIZE))
88
89
#define DATA_PENDING_MASK (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL)
90
#define DATA_XFER_MASK (DATA_PENDING_MASK | SDHCI_INT_DATA_END)
91
92
#ifdef DEBUG
93
static int bcm2835_sdhci_debug = 0;
94
95
TUNABLE_INT("hw.bcm2835.sdhci.debug", &bcm2835_sdhci_debug);
96
SYSCTL_INT(_hw_sdhci, OID_AUTO, bcm2835_sdhci_debug, CTLFLAG_RWTUN,
97
&bcm2835_sdhci_debug, 0, "bcm2835 SDHCI debug level");
98
99
#define dprintf(fmt, args...) \
100
do { \
101
if (bcm2835_sdhci_debug) \
102
printf("%s: " fmt, __func__, ##args); \
103
} while (0)
104
#else
105
#define dprintf(fmt, args...)
106
#endif
107
108
static int bcm2835_sdhci_hs = 1;
109
static int bcm2835_sdhci_pio_mode = 0;
110
111
struct bcm_mmc_conf {
112
int clock_id;
113
int clock_src;
114
int default_freq;
115
int quirks;
116
int emmc_dreq;
117
};
118
119
struct bcm_mmc_conf bcm2835_sdhci_conf = {
120
.clock_id = BCM2835_MBOX_CLOCK_ID_EMMC,
121
.clock_src = -1,
122
.default_freq = BCM2835_DEFAULT_SDHCI_FREQ,
123
.quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
124
SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | SDHCI_QUIRK_DONT_SET_HISPD_BIT |
125
SDHCI_QUIRK_MISSING_CAPS,
126
.emmc_dreq = BCM_DMA_DREQ_EMMC,
127
};
128
129
struct bcm_mmc_conf bcm2838_emmc2_conf = {
130
.clock_id = BCM2838_MBOX_CLOCK_ID_EMMC2,
131
.clock_src = -1,
132
.default_freq = BCM2838_DEFAULT_SDHCI_FREQ,
133
.quirks = 0,
134
.emmc_dreq = BCM_DMA_DREQ_NONE,
135
};
136
137
static struct ofw_compat_data compat_data[] = {
138
{"broadcom,bcm2835-sdhci", (uintptr_t)&bcm2835_sdhci_conf},
139
{"brcm,bcm2835-sdhci", (uintptr_t)&bcm2835_sdhci_conf},
140
{"brcm,bcm2835-mmc", (uintptr_t)&bcm2835_sdhci_conf},
141
{"brcm,bcm2711-emmc2", (uintptr_t)&bcm2838_emmc2_conf},
142
{"brcm,bcm2838-emmc2", (uintptr_t)&bcm2838_emmc2_conf},
143
{NULL, 0}
144
};
145
146
TUNABLE_INT("hw.bcm2835.sdhci.hs", &bcm2835_sdhci_hs);
147
TUNABLE_INT("hw.bcm2835.sdhci.pio_mode", &bcm2835_sdhci_pio_mode);
148
149
struct bcm_sdhci_softc {
150
device_t sc_dev;
151
struct resource * sc_mem_res;
152
struct resource * sc_irq_res;
153
bus_space_tag_t sc_bst;
154
bus_space_handle_t sc_bsh;
155
void * sc_intrhand;
156
struct mmc_request * sc_req;
157
struct sdhci_slot sc_slot;
158
struct mmc_helper sc_mmc_helper;
159
int sc_dma_ch;
160
bus_dma_tag_t sc_dma_tag;
161
bus_dmamap_t sc_dma_map;
162
vm_paddr_t sc_sdhci_buffer_phys;
163
bus_addr_t dmamap_seg_addrs[ALLOCATED_DMA_SEGS];
164
bus_size_t dmamap_seg_sizes[ALLOCATED_DMA_SEGS];
165
int dmamap_seg_count;
166
int dmamap_seg_index;
167
int dmamap_status;
168
uint32_t blksz_and_count;
169
uint32_t cmd_and_mode;
170
bool need_update_blk;
171
#ifdef NOTYET
172
device_t clkman;
173
#endif
174
struct bcm_mmc_conf * conf;
175
};
176
177
static int bcm_sdhci_probe(device_t);
178
static int bcm_sdhci_attach(device_t);
179
static int bcm_sdhci_detach(device_t);
180
static void bcm_sdhci_intr(void *);
181
182
static int bcm_sdhci_get_ro(device_t, device_t);
183
static void bcm_sdhci_dma_intr(int ch, void *arg);
184
static void bcm_sdhci_start_dma(struct sdhci_slot *slot);
185
186
static void
187
bcm_sdhci_dmacb(void *arg, bus_dma_segment_t *segs, int nseg, int err)
188
{
189
struct bcm_sdhci_softc *sc = arg;
190
int i;
191
192
/* Sanity check: we can only ever have one mapping at a time. */
193
KASSERT(sc->dmamap_seg_count == 0, ("leaked DMA segment"));
194
sc->dmamap_status = err;
195
sc->dmamap_seg_count = nseg;
196
197
/* Note nseg is guaranteed to be zero if err is non-zero. */
198
for (i = 0; i < nseg; i++) {
199
sc->dmamap_seg_addrs[i] = segs[i].ds_addr;
200
sc->dmamap_seg_sizes[i] = segs[i].ds_len;
201
}
202
}
203
204
static int
205
bcm_sdhci_probe(device_t dev)
206
{
207
208
if (!ofw_bus_status_okay(dev))
209
return (ENXIO);
210
211
if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0)
212
return (ENXIO);
213
214
device_set_desc(dev, "Broadcom 2708 SDHCI controller");
215
216
return (BUS_PROBE_DEFAULT);
217
}
218
219
static int
220
bcm_sdhci_attach(device_t dev)
221
{
222
struct bcm_sdhci_softc *sc = device_get_softc(dev);
223
int rid, err;
224
phandle_t node;
225
pcell_t cell;
226
u_int default_freq;
227
228
sc->sc_dev = dev;
229
sc->sc_req = NULL;
230
231
sc->conf = (struct bcm_mmc_conf *)ofw_bus_search_compatible(dev,
232
compat_data)->ocd_data;
233
if (sc->conf == 0)
234
return (ENXIO);
235
236
err = bcm2835_mbox_set_power_state(BCM2835_MBOX_POWER_ID_EMMC, TRUE);
237
if (err != 0) {
238
if (bootverbose)
239
device_printf(dev, "Unable to enable the power\n");
240
return (err);
241
}
242
243
default_freq = 0;
244
err = bcm2835_mbox_get_clock_rate(sc->conf->clock_id, &default_freq);
245
if (err == 0) {
246
/* Convert to MHz */
247
default_freq /= 1000000;
248
}
249
if (default_freq == 0) {
250
node = ofw_bus_get_node(sc->sc_dev);
251
if ((OF_getencprop(node, "clock-frequency", &cell,
252
sizeof(cell))) > 0)
253
default_freq = cell / 1000000;
254
}
255
if (default_freq == 0)
256
default_freq = sc->conf->default_freq;
257
258
if (bootverbose)
259
device_printf(dev, "SDHCI frequency: %dMHz\n", default_freq);
260
#ifdef NOTYET
261
if (sc->conf->clock_src > 0) {
262
uint32_t f;
263
sc->clkman = devclass_get_device(
264
devclass_find("bcm2835_clkman"), 0);
265
if (sc->clkman == NULL) {
266
device_printf(dev, "cannot find Clock Manager\n");
267
return (ENXIO);
268
}
269
270
f = bcm2835_clkman_set_frequency(sc->clkman,
271
sc->conf->clock_src, default_freq);
272
if (f == 0)
273
return (EINVAL);
274
275
if (bootverbose)
276
device_printf(dev, "Clock source frequency: %dMHz\n",
277
f);
278
}
279
#endif
280
281
rid = 0;
282
sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
283
RF_ACTIVE);
284
if (!sc->sc_mem_res) {
285
device_printf(dev, "cannot allocate memory window\n");
286
err = ENXIO;
287
goto fail;
288
}
289
290
sc->sc_bst = rman_get_bustag(sc->sc_mem_res);
291
sc->sc_bsh = rman_get_bushandle(sc->sc_mem_res);
292
293
rid = 0;
294
sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
295
RF_ACTIVE | RF_SHAREABLE);
296
if (!sc->sc_irq_res) {
297
device_printf(dev, "cannot allocate interrupt\n");
298
err = ENXIO;
299
goto fail;
300
}
301
302
if (bus_setup_intr(dev, sc->sc_irq_res, INTR_TYPE_BIO | INTR_MPSAFE,
303
NULL, bcm_sdhci_intr, sc, &sc->sc_intrhand)) {
304
device_printf(dev, "cannot setup interrupt handler\n");
305
err = ENXIO;
306
goto fail;
307
}
308
309
if (!bcm2835_sdhci_pio_mode)
310
sc->sc_slot.opt = SDHCI_PLATFORM_TRANSFER;
311
312
sc->sc_slot.caps = SDHCI_CAN_VDD_330 | SDHCI_CAN_VDD_180;
313
if (bcm2835_sdhci_hs)
314
sc->sc_slot.caps |= SDHCI_CAN_DO_HISPD;
315
sc->sc_slot.caps |= (default_freq << SDHCI_CLOCK_BASE_SHIFT);
316
sc->sc_slot.quirks = sc->conf->quirks;
317
318
sdhci_init_slot(dev, &sc->sc_slot, 0);
319
mmc_fdt_parse(dev, 0, &sc->sc_mmc_helper, &sc->sc_slot.host);
320
321
sc->sc_dma_ch = bcm_dma_allocate(BCM_DMA_CH_ANY);
322
if (sc->sc_dma_ch == BCM_DMA_CH_INVALID)
323
goto fail;
324
325
err = bcm_dma_setup_intr(sc->sc_dma_ch, bcm_sdhci_dma_intr, sc);
326
if (err != 0) {
327
device_printf(dev,
328
"cannot setup dma interrupt handler\n");
329
err = ENXIO;
330
goto fail;
331
}
332
333
/* Allocate bus_dma resources. */
334
err = bus_dma_tag_create(bus_get_dma_tag(dev),
335
1, 0, bcm283x_dmabus_peripheral_lowaddr(),
336
BUS_SPACE_MAXADDR, NULL, NULL,
337
BCM_DMA_MAXSIZE, ALLOCATED_DMA_SEGS, BCM_SDHCI_BUFFER_SIZE,
338
BUS_DMA_ALLOCNOW, NULL, NULL,
339
&sc->sc_dma_tag);
340
341
if (err) {
342
device_printf(dev, "failed allocate DMA tag");
343
goto fail;
344
}
345
346
err = bus_dmamap_create(sc->sc_dma_tag, 0, &sc->sc_dma_map);
347
if (err) {
348
device_printf(dev, "bus_dmamap_create failed\n");
349
goto fail;
350
}
351
352
/* FIXME: Fix along with other BUS_SPACE_PHYSADDR instances */
353
sc->sc_sdhci_buffer_phys = rman_get_start(sc->sc_mem_res) +
354
SDHCI_BUFFER;
355
356
bus_identify_children(dev);
357
bus_attach_children(dev);
358
359
sdhci_start_slot(&sc->sc_slot);
360
361
/* Seed our copies. */
362
sc->blksz_and_count = SDHCI_READ_4(dev, &sc->sc_slot, SDHCI_BLOCK_SIZE);
363
sc->cmd_and_mode = SDHCI_READ_4(dev, &sc->sc_slot, SDHCI_TRANSFER_MODE);
364
365
return (0);
366
367
fail:
368
if (sc->sc_intrhand)
369
bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_intrhand);
370
if (sc->sc_irq_res)
371
bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq_res);
372
if (sc->sc_mem_res)
373
bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->sc_mem_res);
374
375
return (err);
376
}
377
378
static int
379
bcm_sdhci_detach(device_t dev)
380
{
381
382
return (EBUSY);
383
}
384
385
static void
386
bcm_sdhci_intr(void *arg)
387
{
388
struct bcm_sdhci_softc *sc = arg;
389
390
sdhci_generic_intr(&sc->sc_slot);
391
}
392
393
static int
394
bcm_sdhci_update_ios(device_t bus, device_t child)
395
{
396
struct bcm_sdhci_softc *sc;
397
struct mmc_ios *ios;
398
int rv;
399
400
sc = device_get_softc(bus);
401
ios = &sc->sc_slot.host.ios;
402
403
if (ios->power_mode == power_up) {
404
if (sc->sc_mmc_helper.vmmc_supply)
405
regulator_enable(sc->sc_mmc_helper.vmmc_supply);
406
if (sc->sc_mmc_helper.vqmmc_supply)
407
regulator_enable(sc->sc_mmc_helper.vqmmc_supply);
408
}
409
410
rv = sdhci_generic_update_ios(bus, child);
411
if (rv != 0)
412
return (rv);
413
414
if (ios->power_mode == power_off) {
415
if (sc->sc_mmc_helper.vmmc_supply)
416
regulator_disable(sc->sc_mmc_helper.vmmc_supply);
417
if (sc->sc_mmc_helper.vqmmc_supply)
418
regulator_disable(sc->sc_mmc_helper.vqmmc_supply);
419
}
420
421
return (0);
422
}
423
424
static int
425
bcm_sdhci_get_ro(device_t bus, device_t child)
426
{
427
428
return (0);
429
}
430
431
static inline uint32_t
432
RD4(struct bcm_sdhci_softc *sc, bus_size_t off)
433
{
434
uint32_t val = bus_space_read_4(sc->sc_bst, sc->sc_bsh, off);
435
return val;
436
}
437
438
static inline void
439
WR4(struct bcm_sdhci_softc *sc, bus_size_t off, uint32_t val)
440
{
441
442
bus_space_write_4(sc->sc_bst, sc->sc_bsh, off, val);
443
/*
444
* The Arasan HC has a bug where it may lose the content of
445
* consecutive writes to registers that are within two SD-card
446
* clock cycles of each other (a clock domain crossing problem).
447
*/
448
if (sc->sc_slot.clock > 0)
449
DELAY(((2 * 1000000) / sc->sc_slot.clock) + 1);
450
}
451
452
static uint8_t
453
bcm_sdhci_read_1(device_t dev, struct sdhci_slot *slot, bus_size_t off)
454
{
455
struct bcm_sdhci_softc *sc = device_get_softc(dev);
456
uint32_t val = RD4(sc, off & ~3);
457
458
return ((val >> (off & 3)*8) & 0xff);
459
}
460
461
static uint16_t
462
bcm_sdhci_read_2(device_t dev, struct sdhci_slot *slot, bus_size_t off)
463
{
464
struct bcm_sdhci_softc *sc = device_get_softc(dev);
465
uint32_t val32;
466
467
/*
468
* Standard 32-bit handling of command and transfer mode, as
469
* well as block size and count.
470
*/
471
if ((off == SDHCI_BLOCK_SIZE || off == SDHCI_BLOCK_COUNT) &&
472
sc->need_update_blk)
473
val32 = sc->blksz_and_count;
474
else if (off == SDHCI_TRANSFER_MODE || off == SDHCI_COMMAND_FLAGS)
475
val32 = sc->cmd_and_mode;
476
else
477
val32 = RD4(sc, off & ~3);
478
479
return ((val32 >> (off & 3)*8) & 0xffff);
480
}
481
482
static uint32_t
483
bcm_sdhci_read_4(device_t dev, struct sdhci_slot *slot, bus_size_t off)
484
{
485
struct bcm_sdhci_softc *sc = device_get_softc(dev);
486
487
return RD4(sc, off);
488
}
489
490
static void
491
bcm_sdhci_read_multi_4(device_t dev, struct sdhci_slot *slot, bus_size_t off,
492
uint32_t *data, bus_size_t count)
493
{
494
struct bcm_sdhci_softc *sc = device_get_softc(dev);
495
496
bus_space_read_multi_4(sc->sc_bst, sc->sc_bsh, off, data, count);
497
}
498
499
static void
500
bcm_sdhci_write_1(device_t dev, struct sdhci_slot *slot, bus_size_t off,
501
uint8_t val)
502
{
503
struct bcm_sdhci_softc *sc = device_get_softc(dev);
504
uint32_t val32 = RD4(sc, off & ~3);
505
val32 &= ~(0xff << (off & 3)*8);
506
val32 |= (val << (off & 3)*8);
507
WR4(sc, off & ~3, val32);
508
}
509
510
static void
511
bcm_sdhci_write_2(device_t dev, struct sdhci_slot *slot, bus_size_t off,
512
uint16_t val)
513
{
514
struct bcm_sdhci_softc *sc = device_get_softc(dev);
515
uint32_t val32;
516
517
/*
518
* If we have a queued up 16bit value for blk size or count, use and
519
* update the saved value rather than doing any real register access.
520
* If we did not touch either since the last write, then read from
521
* register as at least block count can change.
522
* Similarly, if we are about to issue a command, always use the saved
523
* value for transfer mode as we can never write that without issuing
524
* a command.
525
*/
526
if ((off == SDHCI_BLOCK_SIZE || off == SDHCI_BLOCK_COUNT) &&
527
sc->need_update_blk)
528
val32 = sc->blksz_and_count;
529
else if (off == SDHCI_COMMAND_FLAGS)
530
val32 = sc->cmd_and_mode;
531
else
532
val32 = RD4(sc, off & ~3);
533
534
val32 &= ~(0xffff << (off & 3)*8);
535
val32 |= (val << (off & 3)*8);
536
537
if (off == SDHCI_TRANSFER_MODE)
538
sc->cmd_and_mode = val32;
539
else if (off == SDHCI_BLOCK_SIZE || off == SDHCI_BLOCK_COUNT) {
540
sc->blksz_and_count = val32;
541
sc->need_update_blk = true;
542
} else {
543
if (off == SDHCI_COMMAND_FLAGS) {
544
/* If we saved blk writes, do them now before cmd. */
545
if (sc->need_update_blk) {
546
WR4(sc, SDHCI_BLOCK_SIZE, sc->blksz_and_count);
547
sc->need_update_blk = false;
548
}
549
/* Always save cmd and mode registers. */
550
sc->cmd_and_mode = val32;
551
}
552
WR4(sc, off & ~3, val32);
553
}
554
}
555
556
static void
557
bcm_sdhci_write_4(device_t dev, struct sdhci_slot *slot, bus_size_t off,
558
uint32_t val)
559
{
560
struct bcm_sdhci_softc *sc = device_get_softc(dev);
561
WR4(sc, off, val);
562
}
563
564
static void
565
bcm_sdhci_write_multi_4(device_t dev, struct sdhci_slot *slot, bus_size_t off,
566
uint32_t *data, bus_size_t count)
567
{
568
struct bcm_sdhci_softc *sc = device_get_softc(dev);
569
570
bus_space_write_multi_4(sc->sc_bst, sc->sc_bsh, off, data, count);
571
}
572
573
static void
574
bcm_sdhci_start_dma_seg(struct bcm_sdhci_softc *sc)
575
{
576
struct sdhci_slot *slot;
577
vm_paddr_t pdst, psrc;
578
int err __diagused, idx, len, sync_op, width;
579
580
slot = &sc->sc_slot;
581
mtx_assert(&slot->mtx, MA_OWNED);
582
idx = sc->dmamap_seg_index++;
583
len = sc->dmamap_seg_sizes[idx];
584
slot->offset += len;
585
width = (len & 0xf ? BCM_DMA_32BIT : BCM_DMA_128BIT);
586
587
if (slot->curcmd->data->flags & MMC_DATA_READ) {
588
/*
589
* Peripherals on the AXI bus do not need DREQ pacing for reads
590
* from the ARM core, so we can safely set this to NONE.
591
*/
592
bcm_dma_setup_src(sc->sc_dma_ch, BCM_DMA_DREQ_NONE,
593
BCM_DMA_SAME_ADDR, BCM_DMA_32BIT);
594
bcm_dma_setup_dst(sc->sc_dma_ch, BCM_DMA_DREQ_NONE,
595
BCM_DMA_INC_ADDR, width);
596
psrc = sc->sc_sdhci_buffer_phys;
597
pdst = sc->dmamap_seg_addrs[idx];
598
sync_op = BUS_DMASYNC_PREREAD;
599
} else {
600
/*
601
* The ordering here is important, because the last write to
602
* dst/src in the dma control block writes the real dreq value.
603
*/
604
bcm_dma_setup_src(sc->sc_dma_ch, BCM_DMA_DREQ_NONE,
605
BCM_DMA_INC_ADDR, width);
606
bcm_dma_setup_dst(sc->sc_dma_ch, sc->conf->emmc_dreq,
607
BCM_DMA_SAME_ADDR, BCM_DMA_32BIT);
608
psrc = sc->dmamap_seg_addrs[idx];
609
pdst = sc->sc_sdhci_buffer_phys;
610
sync_op = BUS_DMASYNC_PREWRITE;
611
}
612
613
/*
614
* When starting a new DMA operation do the busdma sync operation, and
615
* disable SDCHI data interrrupts because we'll be driven by DMA
616
* interrupts (or SDHCI error interrupts) until the IO is done.
617
*/
618
if (idx == 0) {
619
bus_dmamap_sync(sc->sc_dma_tag, sc->sc_dma_map, sync_op);
620
621
slot->intmask &= ~DATA_XFER_MASK;
622
bcm_sdhci_write_4(sc->sc_dev, slot, SDHCI_SIGNAL_ENABLE,
623
slot->intmask);
624
}
625
626
/*
627
* Start the DMA transfer. Only programming errors (like failing to
628
* allocate a channel) cause a non-zero return from bcm_dma_start().
629
*/
630
err = bcm_dma_start(sc->sc_dma_ch, psrc, pdst, len);
631
KASSERT((err == 0), ("bcm2835_sdhci: failed DMA start"));
632
}
633
634
static void
635
bcm_sdhci_dma_exit(struct bcm_sdhci_softc *sc)
636
{
637
struct sdhci_slot *slot = &sc->sc_slot;
638
639
mtx_assert(&slot->mtx, MA_OWNED);
640
641
/* Re-enable interrupts */
642
slot->intmask |= DATA_XFER_MASK;
643
bcm_sdhci_write_4(slot->bus, slot, SDHCI_SIGNAL_ENABLE,
644
slot->intmask);
645
}
646
647
static void
648
bcm_sdhci_dma_unload(struct bcm_sdhci_softc *sc)
649
{
650
struct sdhci_slot *slot = &sc->sc_slot;
651
652
if (sc->dmamap_seg_count == 0)
653
return;
654
if ((slot->curcmd->data->flags & MMC_DATA_READ) != 0)
655
bus_dmamap_sync(sc->sc_dma_tag, sc->sc_dma_map,
656
BUS_DMASYNC_POSTREAD);
657
else
658
bus_dmamap_sync(sc->sc_dma_tag, sc->sc_dma_map,
659
BUS_DMASYNC_POSTWRITE);
660
bus_dmamap_unload(sc->sc_dma_tag, sc->sc_dma_map);
661
662
sc->dmamap_seg_count = 0;
663
sc->dmamap_seg_index = 0;
664
}
665
666
static void
667
bcm_sdhci_dma_intr(int ch, void *arg)
668
{
669
struct bcm_sdhci_softc *sc = (struct bcm_sdhci_softc *)arg;
670
struct sdhci_slot *slot = &sc->sc_slot;
671
uint32_t reg;
672
673
mtx_lock(&slot->mtx);
674
if (slot->curcmd == NULL)
675
goto out;
676
/*
677
* If there are more segments for the current dma, start the next one.
678
* Otherwise unload the dma map and decide what to do next based on the
679
* status of the sdhci controller and whether there's more data left.
680
*/
681
if (sc->dmamap_seg_index < sc->dmamap_seg_count) {
682
bcm_sdhci_start_dma_seg(sc);
683
goto out;
684
}
685
686
bcm_sdhci_dma_unload(sc);
687
688
/*
689
* If we had no further segments pending, we need to determine how to
690
* proceed next. If the 'data/space pending' bit is already set and we
691
* can continue via DMA, do so. Otherwise, re-enable interrupts and
692
* return.
693
*/
694
reg = bcm_sdhci_read_4(slot->bus, slot, SDHCI_INT_STATUS) &
695
DATA_XFER_MASK;
696
if ((reg & DATA_PENDING_MASK) != 0 &&
697
BCM_SDHCI_SEGSZ_LEFT(slot) >= BCM_SDHCI_BUFFER_SIZE) {
698
/* ACK any pending interrupts */
699
bcm_sdhci_write_4(slot->bus, slot, SDHCI_INT_STATUS,
700
DATA_PENDING_MASK);
701
702
bcm_sdhci_start_dma(slot);
703
if (slot->curcmd->error != 0) {
704
/* We won't recover from this error for this command. */
705
bcm_sdhci_dma_unload(sc);
706
bcm_sdhci_dma_exit(sc);
707
sdhci_finish_data(slot);
708
}
709
} else if ((reg & SDHCI_INT_DATA_END) != 0) {
710
bcm_sdhci_dma_exit(sc);
711
bcm_sdhci_write_4(slot->bus, slot, SDHCI_INT_STATUS,
712
reg);
713
slot->flags &= ~PLATFORM_DATA_STARTED;
714
sdhci_finish_data(slot);
715
} else {
716
bcm_sdhci_dma_exit(sc);
717
}
718
out:
719
mtx_unlock(&slot->mtx);
720
}
721
722
static void
723
bcm_sdhci_start_dma(struct sdhci_slot *slot)
724
{
725
struct bcm_sdhci_softc *sc = device_get_softc(slot->bus);
726
uint8_t *buf;
727
size_t left;
728
729
mtx_assert(&slot->mtx, MA_OWNED);
730
731
left = BCM_SDHCI_SEGSZ_LEFT(slot);
732
buf = (uint8_t *)slot->curcmd->data->data + slot->offset;
733
KASSERT(left != 0,
734
("%s: DMA handling incorrectly indicated", __func__));
735
736
/*
737
* No need to check segment count here; if we've not yet unloaded
738
* previous segments, we'll catch that in bcm_sdhci_dmacb.
739
*/
740
if (bus_dmamap_load(sc->sc_dma_tag, sc->sc_dma_map, buf, left,
741
bcm_sdhci_dmacb, sc, BUS_DMA_NOWAIT) != 0 ||
742
sc->dmamap_status != 0) {
743
slot->curcmd->error = MMC_ERR_NO_MEMORY;
744
return;
745
}
746
747
/* DMA start */
748
bcm_sdhci_start_dma_seg(sc);
749
}
750
751
static int
752
bcm_sdhci_will_handle_transfer(device_t dev, struct sdhci_slot *slot)
753
{
754
#ifdef INVARIANTS
755
struct bcm_sdhci_softc *sc = device_get_softc(slot->bus);
756
#endif
757
758
/*
759
* We don't want to perform DMA in this context -- interrupts are
760
* disabled, and a transaction may already be in progress.
761
*/
762
if (dumping)
763
return (0);
764
765
/*
766
* This indicates that we somehow let a data interrupt slip by into the
767
* SDHCI framework, when it should not have. This really needs to be
768
* caught and fixed ASAP, as it really shouldn't happen.
769
*/
770
KASSERT(sc->dmamap_seg_count == 0,
771
("data pending interrupt pushed through SDHCI framework"));
772
773
/*
774
* Do not use DMA for transfers less than our block size. Checking
775
* alignment serves little benefit, as we round transfer sizes down to
776
* a multiple of the block size and push the transfer back to
777
* SDHCI-driven PIO once we're below the block size.
778
*/
779
if (BCM_SDHCI_SEGSZ_LEFT(slot) < BCM_DMA_BLOCK_SIZE)
780
return (0);
781
782
return (1);
783
}
784
785
static void
786
bcm_sdhci_start_transfer(device_t dev, struct sdhci_slot *slot,
787
uint32_t *intmask)
788
{
789
790
/* DMA transfer FIFO 1KB */
791
bcm_sdhci_start_dma(slot);
792
}
793
794
static void
795
bcm_sdhci_finish_transfer(device_t dev, struct sdhci_slot *slot)
796
{
797
struct bcm_sdhci_softc *sc = device_get_softc(slot->bus);
798
799
/*
800
* Clean up. Interrupts are clearly enabled, because we received an
801
* SDHCI_INT_DATA_END to get this far -- just make sure we don't leave
802
* anything laying around.
803
*/
804
if (sc->dmamap_seg_count != 0) {
805
/*
806
* Our segment math should have worked out such that we would
807
* never finish the transfer without having used up all of the
808
* segments. If we haven't, that means we must have erroneously
809
* regressed to SDHCI-driven PIO to finish the operation and
810
* this is certainly caused by developer-error.
811
*/
812
bcm_sdhci_dma_unload(sc);
813
}
814
815
sdhci_finish_data(slot);
816
}
817
818
static device_method_t bcm_sdhci_methods[] = {
819
/* Device interface */
820
DEVMETHOD(device_probe, bcm_sdhci_probe),
821
DEVMETHOD(device_attach, bcm_sdhci_attach),
822
DEVMETHOD(device_detach, bcm_sdhci_detach),
823
824
/* Bus interface */
825
DEVMETHOD(bus_read_ivar, sdhci_generic_read_ivar),
826
DEVMETHOD(bus_write_ivar, sdhci_generic_write_ivar),
827
DEVMETHOD(bus_add_child, bus_generic_add_child),
828
829
/* MMC bridge interface */
830
DEVMETHOD(mmcbr_update_ios, bcm_sdhci_update_ios),
831
DEVMETHOD(mmcbr_request, sdhci_generic_request),
832
DEVMETHOD(mmcbr_get_ro, bcm_sdhci_get_ro),
833
DEVMETHOD(mmcbr_acquire_host, sdhci_generic_acquire_host),
834
DEVMETHOD(mmcbr_release_host, sdhci_generic_release_host),
835
836
/* Platform transfer methods */
837
DEVMETHOD(sdhci_platform_will_handle, bcm_sdhci_will_handle_transfer),
838
DEVMETHOD(sdhci_platform_start_transfer, bcm_sdhci_start_transfer),
839
DEVMETHOD(sdhci_platform_finish_transfer, bcm_sdhci_finish_transfer),
840
/* SDHCI registers accessors */
841
DEVMETHOD(sdhci_read_1, bcm_sdhci_read_1),
842
DEVMETHOD(sdhci_read_2, bcm_sdhci_read_2),
843
DEVMETHOD(sdhci_read_4, bcm_sdhci_read_4),
844
DEVMETHOD(sdhci_read_multi_4, bcm_sdhci_read_multi_4),
845
DEVMETHOD(sdhci_write_1, bcm_sdhci_write_1),
846
DEVMETHOD(sdhci_write_2, bcm_sdhci_write_2),
847
DEVMETHOD(sdhci_write_4, bcm_sdhci_write_4),
848
DEVMETHOD(sdhci_write_multi_4, bcm_sdhci_write_multi_4),
849
850
DEVMETHOD_END
851
};
852
853
static driver_t bcm_sdhci_driver = {
854
"sdhci_bcm",
855
bcm_sdhci_methods,
856
sizeof(struct bcm_sdhci_softc),
857
};
858
859
DRIVER_MODULE(sdhci_bcm, simplebus, bcm_sdhci_driver, NULL, NULL);
860
#ifdef NOTYET
861
MODULE_DEPEND(sdhci_bcm, bcm2835_clkman, 1, 1, 1);
862
#endif
863
SDHCI_DEPEND(sdhci_bcm);
864
#ifndef MMCCAM
865
MMC_DECLARE_BRIDGE(sdhci_bcm);
866
#endif
867
868