Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/sound/soc/apple/mca.c
26436 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
//
3
// Apple SoCs MCA driver
4
//
5
// Copyright (C) The Asahi Linux Contributors
6
//
7
// The MCA peripheral is made up of a number of identical units called clusters.
8
// Each cluster has its separate clock parent, SYNC signal generator, carries
9
// four SERDES units and has a dedicated I2S port on the SoC's periphery.
10
//
11
// The clusters can operate independently, or can be combined together in a
12
// configurable manner. We mostly treat them as self-contained independent
13
// units and don't configure any cross-cluster connections except for the I2S
14
// ports. The I2S ports can be routed to any of the clusters (irrespective
15
// of their native cluster). We map this onto ASoC's (DPCM) notion of backend
16
// and frontend DAIs. The 'cluster guts' are frontends which are dynamically
17
// routed to backend I2S ports.
18
//
19
// DAI references in devicetree are resolved to backends. The routing between
20
// frontends and backends is determined by the machine driver in the DAPM paths
21
// it supplies.
22
23
#include <linux/bitfield.h>
24
#include <linux/clk.h>
25
#include <linux/dma-mapping.h>
26
#include <linux/init.h>
27
#include <linux/kernel.h>
28
#include <linux/module.h>
29
#include <linux/of.h>
30
#include <linux/of_clk.h>
31
#include <linux/of_dma.h>
32
#include <linux/platform_device.h>
33
#include <linux/pm_domain.h>
34
#include <linux/regmap.h>
35
#include <linux/reset.h>
36
#include <linux/slab.h>
37
38
#include <sound/core.h>
39
#include <sound/pcm.h>
40
#include <sound/pcm_params.h>
41
#include <sound/soc.h>
42
#include <sound/dmaengine_pcm.h>
43
44
#define USE_RXB_FOR_CAPTURE
45
46
/* Relative to cluster base */
47
#define REG_STATUS 0x0
48
#define STATUS_MCLK_EN BIT(0)
49
#define REG_MCLK_CONF 0x4
50
#define MCLK_CONF_DIV GENMASK(11, 8)
51
52
#define REG_SYNCGEN_STATUS 0x100
53
#define SYNCGEN_STATUS_EN BIT(0)
54
#define REG_SYNCGEN_MCLK_SEL 0x104
55
#define SYNCGEN_MCLK_SEL GENMASK(3, 0)
56
#define REG_SYNCGEN_HI_PERIOD 0x108
57
#define REG_SYNCGEN_LO_PERIOD 0x10c
58
59
#define REG_PORT_ENABLES 0x600
60
#define PORT_ENABLES_CLOCKS GENMASK(2, 1)
61
#define PORT_ENABLES_TX_DATA BIT(3)
62
#define REG_PORT_CLOCK_SEL 0x604
63
#define PORT_CLOCK_SEL GENMASK(11, 8)
64
#define REG_PORT_DATA_SEL 0x608
65
#define PORT_DATA_SEL_TXA(cl) (1 << ((cl)*2))
66
#define PORT_DATA_SEL_TXB(cl) (2 << ((cl)*2))
67
68
#define REG_INTSTATE 0x700
69
#define REG_INTMASK 0x704
70
71
/* Bases of serdes units (relative to cluster) */
72
#define CLUSTER_RXA_OFF 0x200
73
#define CLUSTER_TXA_OFF 0x300
74
#define CLUSTER_RXB_OFF 0x400
75
#define CLUSTER_TXB_OFF 0x500
76
77
#define CLUSTER_TX_OFF CLUSTER_TXA_OFF
78
79
#ifndef USE_RXB_FOR_CAPTURE
80
#define CLUSTER_RX_OFF CLUSTER_RXA_OFF
81
#else
82
#define CLUSTER_RX_OFF CLUSTER_RXB_OFF
83
#endif
84
85
/* Relative to serdes unit base */
86
#define REG_SERDES_STATUS 0x00
87
#define SERDES_STATUS_EN BIT(0)
88
#define SERDES_STATUS_RST BIT(1)
89
#define REG_TX_SERDES_CONF 0x04
90
#define REG_RX_SERDES_CONF 0x08
91
#define SERDES_CONF_NCHANS GENMASK(3, 0)
92
#define SERDES_CONF_WIDTH_MASK GENMASK(8, 4)
93
#define SERDES_CONF_WIDTH_16BIT 0x40
94
#define SERDES_CONF_WIDTH_20BIT 0x80
95
#define SERDES_CONF_WIDTH_24BIT 0xc0
96
#define SERDES_CONF_WIDTH_32BIT 0x100
97
#define SERDES_CONF_BCLK_POL 0x400
98
#define SERDES_CONF_LSB_FIRST 0x800
99
#define SERDES_CONF_UNK1 BIT(12)
100
#define SERDES_CONF_UNK2 BIT(13)
101
#define SERDES_CONF_UNK3 BIT(14)
102
#define SERDES_CONF_NO_DATA_FEEDBACK BIT(15)
103
#define SERDES_CONF_SYNC_SEL GENMASK(18, 16)
104
#define REG_TX_SERDES_BITSTART 0x08
105
#define REG_RX_SERDES_BITSTART 0x0c
106
#define REG_TX_SERDES_SLOTMASK 0x0c
107
#define REG_RX_SERDES_SLOTMASK 0x10
108
#define REG_RX_SERDES_PORT 0x04
109
110
/* Relative to switch base */
111
#define REG_DMA_ADAPTER_A(cl) (0x8000 * (cl))
112
#define REG_DMA_ADAPTER_B(cl) (0x8000 * (cl) + 0x4000)
113
#define DMA_ADAPTER_TX_LSB_PAD GENMASK(4, 0)
114
#define DMA_ADAPTER_TX_NCHANS GENMASK(6, 5)
115
#define DMA_ADAPTER_RX_MSB_PAD GENMASK(12, 8)
116
#define DMA_ADAPTER_RX_NCHANS GENMASK(14, 13)
117
#define DMA_ADAPTER_NCHANS GENMASK(22, 20)
118
119
#define SWITCH_STRIDE 0x8000
120
#define CLUSTER_STRIDE 0x4000
121
122
#define MAX_NCLUSTERS 6
123
124
#define APPLE_MCA_FMTBITS (SNDRV_PCM_FMTBIT_S16_LE | \
125
SNDRV_PCM_FMTBIT_S24_LE | \
126
SNDRV_PCM_FMTBIT_S32_LE)
127
128
struct mca_cluster {
129
int no;
130
__iomem void *base;
131
struct mca_data *host;
132
struct device *pd_dev;
133
struct clk *clk_parent;
134
struct dma_chan *dma_chans[SNDRV_PCM_STREAM_LAST + 1];
135
136
bool port_started[SNDRV_PCM_STREAM_LAST + 1];
137
int port_driver; /* The cluster driving this cluster's port */
138
139
bool clocks_in_use[SNDRV_PCM_STREAM_LAST + 1];
140
struct device_link *pd_link;
141
142
unsigned int bclk_ratio;
143
144
/* Masks etc. picked up via the set_tdm_slot method */
145
int tdm_slots;
146
int tdm_slot_width;
147
unsigned int tdm_tx_mask;
148
unsigned int tdm_rx_mask;
149
};
150
151
struct mca_data {
152
struct device *dev;
153
154
__iomem void *switch_base;
155
156
struct device *pd_dev;
157
struct reset_control *rstc;
158
struct device_link *pd_link;
159
160
/* Mutex for accessing port_driver of foreign clusters */
161
struct mutex port_mutex;
162
163
int nclusters;
164
struct mca_cluster clusters[] __counted_by(nclusters);
165
};
166
167
static void mca_modify(struct mca_cluster *cl, int regoffset, u32 mask, u32 val)
168
{
169
__iomem void *ptr = cl->base + regoffset;
170
u32 newval;
171
172
newval = (val & mask) | (readl_relaxed(ptr) & ~mask);
173
writel_relaxed(newval, ptr);
174
}
175
176
/*
177
* Get the cluster of FE or BE DAI
178
*/
179
static struct mca_cluster *mca_dai_to_cluster(struct snd_soc_dai *dai)
180
{
181
struct mca_data *mca = snd_soc_dai_get_drvdata(dai);
182
/*
183
* FE DAIs are 0 ... nclusters - 1
184
* BE DAIs are nclusters ... 2*nclusters - 1
185
*/
186
int cluster_no = dai->id % mca->nclusters;
187
188
return &mca->clusters[cluster_no];
189
}
190
191
/* called before PCM trigger */
192
static void mca_fe_early_trigger(struct snd_pcm_substream *substream, int cmd,
193
struct snd_soc_dai *dai)
194
{
195
struct mca_cluster *cl = mca_dai_to_cluster(dai);
196
bool is_tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
197
int serdes_unit = is_tx ? CLUSTER_TX_OFF : CLUSTER_RX_OFF;
198
int serdes_conf =
199
serdes_unit + (is_tx ? REG_TX_SERDES_CONF : REG_RX_SERDES_CONF);
200
201
switch (cmd) {
202
case SNDRV_PCM_TRIGGER_START:
203
case SNDRV_PCM_TRIGGER_RESUME:
204
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
205
mca_modify(cl, serdes_conf, SERDES_CONF_SYNC_SEL,
206
FIELD_PREP(SERDES_CONF_SYNC_SEL, 0));
207
mca_modify(cl, serdes_conf, SERDES_CONF_SYNC_SEL,
208
FIELD_PREP(SERDES_CONF_SYNC_SEL, 7));
209
mca_modify(cl, serdes_unit + REG_SERDES_STATUS,
210
SERDES_STATUS_EN | SERDES_STATUS_RST,
211
SERDES_STATUS_RST);
212
/*
213
* Experiments suggest that it takes at most ~1 us
214
* for the bit to clear, so wait 2 us for good measure.
215
*/
216
udelay(2);
217
WARN_ON(readl_relaxed(cl->base + serdes_unit + REG_SERDES_STATUS) &
218
SERDES_STATUS_RST);
219
mca_modify(cl, serdes_conf, SERDES_CONF_SYNC_SEL,
220
FIELD_PREP(SERDES_CONF_SYNC_SEL, 0));
221
mca_modify(cl, serdes_conf, SERDES_CONF_SYNC_SEL,
222
FIELD_PREP(SERDES_CONF_SYNC_SEL, cl->no + 1));
223
break;
224
default:
225
break;
226
}
227
}
228
229
static int mca_fe_trigger(struct snd_pcm_substream *substream, int cmd,
230
struct snd_soc_dai *dai)
231
{
232
struct mca_cluster *cl = mca_dai_to_cluster(dai);
233
bool is_tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
234
int serdes_unit = is_tx ? CLUSTER_TX_OFF : CLUSTER_RX_OFF;
235
236
switch (cmd) {
237
case SNDRV_PCM_TRIGGER_START:
238
case SNDRV_PCM_TRIGGER_RESUME:
239
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
240
mca_modify(cl, serdes_unit + REG_SERDES_STATUS,
241
SERDES_STATUS_EN | SERDES_STATUS_RST,
242
SERDES_STATUS_EN);
243
break;
244
245
case SNDRV_PCM_TRIGGER_STOP:
246
case SNDRV_PCM_TRIGGER_SUSPEND:
247
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
248
mca_modify(cl, serdes_unit + REG_SERDES_STATUS,
249
SERDES_STATUS_EN, 0);
250
break;
251
252
default:
253
return -EINVAL;
254
}
255
256
return 0;
257
}
258
259
static int mca_fe_enable_clocks(struct mca_cluster *cl)
260
{
261
struct mca_data *mca = cl->host;
262
int ret;
263
264
ret = clk_prepare_enable(cl->clk_parent);
265
if (ret) {
266
dev_err(mca->dev,
267
"cluster %d: unable to enable clock parent: %d\n",
268
cl->no, ret);
269
return ret;
270
}
271
272
/*
273
* We can't power up the device earlier than this because
274
* the power state driver would error out on seeing the device
275
* as clock-gated.
276
*/
277
cl->pd_link = device_link_add(mca->dev, cl->pd_dev,
278
DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME |
279
DL_FLAG_RPM_ACTIVE);
280
if (!cl->pd_link) {
281
dev_err(mca->dev,
282
"cluster %d: unable to prop-up power domain\n", cl->no);
283
clk_disable_unprepare(cl->clk_parent);
284
return -EINVAL;
285
}
286
287
writel_relaxed(cl->no + 1, cl->base + REG_SYNCGEN_MCLK_SEL);
288
mca_modify(cl, REG_SYNCGEN_STATUS, SYNCGEN_STATUS_EN,
289
SYNCGEN_STATUS_EN);
290
mca_modify(cl, REG_STATUS, STATUS_MCLK_EN, STATUS_MCLK_EN);
291
292
return 0;
293
}
294
295
static void mca_fe_disable_clocks(struct mca_cluster *cl)
296
{
297
mca_modify(cl, REG_SYNCGEN_STATUS, SYNCGEN_STATUS_EN, 0);
298
mca_modify(cl, REG_STATUS, STATUS_MCLK_EN, 0);
299
300
device_link_del(cl->pd_link);
301
clk_disable_unprepare(cl->clk_parent);
302
}
303
304
static bool mca_fe_clocks_in_use(struct mca_cluster *cl)
305
{
306
struct mca_data *mca = cl->host;
307
struct mca_cluster *be_cl;
308
int stream, i;
309
310
mutex_lock(&mca->port_mutex);
311
for (i = 0; i < mca->nclusters; i++) {
312
be_cl = &mca->clusters[i];
313
314
if (be_cl->port_driver != cl->no)
315
continue;
316
317
for_each_pcm_streams(stream) {
318
if (be_cl->clocks_in_use[stream]) {
319
mutex_unlock(&mca->port_mutex);
320
return true;
321
}
322
}
323
}
324
mutex_unlock(&mca->port_mutex);
325
return false;
326
}
327
328
static int mca_be_prepare(struct snd_pcm_substream *substream,
329
struct snd_soc_dai *dai)
330
{
331
struct mca_cluster *cl = mca_dai_to_cluster(dai);
332
struct mca_data *mca = cl->host;
333
struct mca_cluster *fe_cl;
334
int ret;
335
336
if (cl->port_driver < 0)
337
return -EINVAL;
338
339
fe_cl = &mca->clusters[cl->port_driver];
340
341
/*
342
* Typically the CODECs we are paired with will require clocks
343
* to be present at time of unmute with the 'mute_stream' op
344
* or at time of DAPM widget power-up. We need to enable clocks
345
* here at the latest (frontend prepare would be too late).
346
*/
347
if (!mca_fe_clocks_in_use(fe_cl)) {
348
ret = mca_fe_enable_clocks(fe_cl);
349
if (ret < 0)
350
return ret;
351
}
352
353
cl->clocks_in_use[substream->stream] = true;
354
355
return 0;
356
}
357
358
static int mca_be_hw_free(struct snd_pcm_substream *substream,
359
struct snd_soc_dai *dai)
360
{
361
struct mca_cluster *cl = mca_dai_to_cluster(dai);
362
struct mca_data *mca = cl->host;
363
struct mca_cluster *fe_cl;
364
365
if (cl->port_driver < 0)
366
return -EINVAL;
367
368
/*
369
* We are operating on a foreign cluster here, but since we
370
* belong to the same PCM, accesses should have been
371
* synchronized at ASoC level.
372
*/
373
fe_cl = &mca->clusters[cl->port_driver];
374
if (!mca_fe_clocks_in_use(fe_cl))
375
return 0; /* Nothing to do */
376
377
cl->clocks_in_use[substream->stream] = false;
378
379
if (!mca_fe_clocks_in_use(fe_cl))
380
mca_fe_disable_clocks(fe_cl);
381
382
return 0;
383
}
384
385
static unsigned int mca_crop_mask(unsigned int mask, int nchans)
386
{
387
while (hweight32(mask) > nchans)
388
mask &= ~(1 << __fls(mask));
389
390
return mask;
391
}
392
393
static int mca_configure_serdes(struct mca_cluster *cl, int serdes_unit,
394
unsigned int mask, int slots, int nchans,
395
int slot_width, bool is_tx, int port)
396
{
397
__iomem void *serdes_base = cl->base + serdes_unit;
398
u32 serdes_conf, serdes_conf_mask;
399
400
serdes_conf_mask = SERDES_CONF_WIDTH_MASK | SERDES_CONF_NCHANS;
401
serdes_conf = FIELD_PREP(SERDES_CONF_NCHANS, max(slots, 1) - 1);
402
switch (slot_width) {
403
case 16:
404
serdes_conf |= SERDES_CONF_WIDTH_16BIT;
405
break;
406
case 20:
407
serdes_conf |= SERDES_CONF_WIDTH_20BIT;
408
break;
409
case 24:
410
serdes_conf |= SERDES_CONF_WIDTH_24BIT;
411
break;
412
case 32:
413
serdes_conf |= SERDES_CONF_WIDTH_32BIT;
414
break;
415
default:
416
goto err;
417
}
418
419
serdes_conf_mask |= SERDES_CONF_SYNC_SEL;
420
serdes_conf |= FIELD_PREP(SERDES_CONF_SYNC_SEL, cl->no + 1);
421
422
if (is_tx) {
423
serdes_conf_mask |= SERDES_CONF_UNK1 | SERDES_CONF_UNK2 |
424
SERDES_CONF_UNK3;
425
serdes_conf |= SERDES_CONF_UNK1 | SERDES_CONF_UNK2 |
426
SERDES_CONF_UNK3;
427
} else {
428
serdes_conf_mask |= SERDES_CONF_UNK1 | SERDES_CONF_UNK2 |
429
SERDES_CONF_UNK3 |
430
SERDES_CONF_NO_DATA_FEEDBACK;
431
serdes_conf |= SERDES_CONF_UNK1 | SERDES_CONF_UNK2 |
432
SERDES_CONF_NO_DATA_FEEDBACK;
433
}
434
435
mca_modify(cl,
436
serdes_unit +
437
(is_tx ? REG_TX_SERDES_CONF : REG_RX_SERDES_CONF),
438
serdes_conf_mask, serdes_conf);
439
440
if (is_tx) {
441
writel_relaxed(0xffffffff,
442
serdes_base + REG_TX_SERDES_SLOTMASK);
443
writel_relaxed(~((u32)mca_crop_mask(mask, nchans)),
444
serdes_base + REG_TX_SERDES_SLOTMASK + 0x4);
445
writel_relaxed(0xffffffff,
446
serdes_base + REG_TX_SERDES_SLOTMASK + 0x8);
447
writel_relaxed(~((u32)mask),
448
serdes_base + REG_TX_SERDES_SLOTMASK + 0xc);
449
} else {
450
writel_relaxed(0xffffffff,
451
serdes_base + REG_RX_SERDES_SLOTMASK);
452
writel_relaxed(~((u32)mca_crop_mask(mask, nchans)),
453
serdes_base + REG_RX_SERDES_SLOTMASK + 0x4);
454
writel_relaxed(1 << port,
455
serdes_base + REG_RX_SERDES_PORT);
456
}
457
458
return 0;
459
460
err:
461
dev_err(cl->host->dev,
462
"unsupported SERDES configuration requested (mask=0x%x slots=%d slot_width=%d)\n",
463
mask, slots, slot_width);
464
return -EINVAL;
465
}
466
467
static int mca_fe_startup(struct snd_pcm_substream *substream,
468
struct snd_soc_dai *dai)
469
{
470
struct mca_cluster *cl = mca_dai_to_cluster(dai);
471
unsigned int mask, nchannels;
472
473
if (cl->tdm_slots) {
474
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
475
mask = cl->tdm_tx_mask;
476
else
477
mask = cl->tdm_rx_mask;
478
479
nchannels = hweight32(mask);
480
} else {
481
nchannels = 2;
482
}
483
484
return snd_pcm_hw_constraint_minmax(substream->runtime,
485
SNDRV_PCM_HW_PARAM_CHANNELS,
486
1, nchannels);
487
}
488
489
static int mca_fe_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
490
unsigned int rx_mask, int slots, int slot_width)
491
{
492
struct mca_cluster *cl = mca_dai_to_cluster(dai);
493
494
cl->tdm_slots = slots;
495
cl->tdm_slot_width = slot_width;
496
cl->tdm_tx_mask = tx_mask;
497
cl->tdm_rx_mask = rx_mask;
498
499
return 0;
500
}
501
502
static int mca_fe_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
503
{
504
struct mca_cluster *cl = mca_dai_to_cluster(dai);
505
struct mca_data *mca = cl->host;
506
bool fpol_inv = false;
507
u32 serdes_conf = 0;
508
u32 bitstart;
509
510
if ((fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) !=
511
SND_SOC_DAIFMT_BP_FP)
512
goto err;
513
514
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
515
case SND_SOC_DAIFMT_I2S:
516
fpol_inv = 0;
517
bitstart = 1;
518
break;
519
case SND_SOC_DAIFMT_LEFT_J:
520
fpol_inv = 1;
521
bitstart = 0;
522
break;
523
default:
524
goto err;
525
}
526
527
switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
528
case SND_SOC_DAIFMT_NB_IF:
529
case SND_SOC_DAIFMT_IB_IF:
530
fpol_inv ^= 1;
531
break;
532
}
533
534
switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
535
case SND_SOC_DAIFMT_NB_NF:
536
case SND_SOC_DAIFMT_NB_IF:
537
serdes_conf |= SERDES_CONF_BCLK_POL;
538
break;
539
}
540
541
if (!fpol_inv)
542
goto err;
543
544
mca_modify(cl, CLUSTER_TX_OFF + REG_TX_SERDES_CONF,
545
SERDES_CONF_BCLK_POL, serdes_conf);
546
mca_modify(cl, CLUSTER_RX_OFF + REG_RX_SERDES_CONF,
547
SERDES_CONF_BCLK_POL, serdes_conf);
548
writel_relaxed(bitstart,
549
cl->base + CLUSTER_TX_OFF + REG_TX_SERDES_BITSTART);
550
writel_relaxed(bitstart,
551
cl->base + CLUSTER_RX_OFF + REG_RX_SERDES_BITSTART);
552
553
return 0;
554
555
err:
556
dev_err(mca->dev, "unsupported DAI format (0x%x) requested\n", fmt);
557
return -EINVAL;
558
}
559
560
static int mca_set_bclk_ratio(struct snd_soc_dai *dai, unsigned int ratio)
561
{
562
struct mca_cluster *cl = mca_dai_to_cluster(dai);
563
564
cl->bclk_ratio = ratio;
565
566
return 0;
567
}
568
569
static int mca_fe_get_port(struct snd_pcm_substream *substream)
570
{
571
struct snd_soc_pcm_runtime *fe = snd_soc_substream_to_rtd(substream);
572
struct snd_soc_pcm_runtime *be;
573
struct snd_soc_dpcm *dpcm;
574
575
be = NULL;
576
for_each_dpcm_be(fe, substream->stream, dpcm) {
577
be = dpcm->be;
578
break;
579
}
580
581
if (!be)
582
return -EINVAL;
583
584
return mca_dai_to_cluster(snd_soc_rtd_to_cpu(be, 0))->no;
585
}
586
587
static int mca_fe_hw_params(struct snd_pcm_substream *substream,
588
struct snd_pcm_hw_params *params,
589
struct snd_soc_dai *dai)
590
{
591
struct mca_cluster *cl = mca_dai_to_cluster(dai);
592
struct mca_data *mca = cl->host;
593
struct device *dev = mca->dev;
594
unsigned int samp_rate = params_rate(params);
595
bool is_tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
596
bool refine_tdm = false;
597
unsigned long bclk_ratio;
598
unsigned int tdm_slots, tdm_slot_width, tdm_mask;
599
u32 regval, pad;
600
int ret, port, nchans_ceiled;
601
602
if (!cl->tdm_slot_width) {
603
/*
604
* We were not given TDM settings from above, set initial
605
* guesses which will later be refined.
606
*/
607
tdm_slot_width = params_width(params);
608
tdm_slots = params_channels(params);
609
refine_tdm = true;
610
} else {
611
tdm_slot_width = cl->tdm_slot_width;
612
tdm_slots = cl->tdm_slots;
613
tdm_mask = is_tx ? cl->tdm_tx_mask : cl->tdm_rx_mask;
614
}
615
616
if (cl->bclk_ratio)
617
bclk_ratio = cl->bclk_ratio;
618
else
619
bclk_ratio = tdm_slot_width * tdm_slots;
620
621
if (refine_tdm) {
622
int nchannels = params_channels(params);
623
624
if (nchannels > 2) {
625
dev_err(dev, "missing TDM for stream with two or more channels\n");
626
return -EINVAL;
627
}
628
629
if ((bclk_ratio % nchannels) != 0) {
630
dev_err(dev, "BCLK ratio (%ld) not divisible by no. of channels (%d)\n",
631
bclk_ratio, nchannels);
632
return -EINVAL;
633
}
634
635
tdm_slot_width = bclk_ratio / nchannels;
636
637
if (tdm_slot_width > 32 && nchannels == 1)
638
tdm_slot_width = 32;
639
640
if (tdm_slot_width < params_width(params)) {
641
dev_err(dev, "TDM slots too narrow (tdm=%u params=%d)\n",
642
tdm_slot_width, params_width(params));
643
return -EINVAL;
644
}
645
646
tdm_mask = (1 << tdm_slots) - 1;
647
}
648
649
port = mca_fe_get_port(substream);
650
if (port < 0)
651
return port;
652
653
ret = mca_configure_serdes(cl, is_tx ? CLUSTER_TX_OFF : CLUSTER_RX_OFF,
654
tdm_mask, tdm_slots, params_channels(params),
655
tdm_slot_width, is_tx, port);
656
if (ret)
657
return ret;
658
659
pad = 32 - params_width(params);
660
661
/*
662
* TODO: Here the register semantics aren't clear.
663
*/
664
nchans_ceiled = min_t(int, params_channels(params), 4);
665
regval = FIELD_PREP(DMA_ADAPTER_NCHANS, nchans_ceiled) |
666
FIELD_PREP(DMA_ADAPTER_TX_NCHANS, 0x2) |
667
FIELD_PREP(DMA_ADAPTER_RX_NCHANS, 0x2) |
668
FIELD_PREP(DMA_ADAPTER_TX_LSB_PAD, pad) |
669
FIELD_PREP(DMA_ADAPTER_RX_MSB_PAD, pad);
670
671
#ifndef USE_RXB_FOR_CAPTURE
672
writel_relaxed(regval, mca->switch_base + REG_DMA_ADAPTER_A(cl->no));
673
#else
674
if (is_tx)
675
writel_relaxed(regval,
676
mca->switch_base + REG_DMA_ADAPTER_A(cl->no));
677
else
678
writel_relaxed(regval,
679
mca->switch_base + REG_DMA_ADAPTER_B(cl->no));
680
#endif
681
682
if (!mca_fe_clocks_in_use(cl)) {
683
/*
684
* Set up FSYNC duty cycle as even as possible.
685
*/
686
writel_relaxed((bclk_ratio / 2) - 1,
687
cl->base + REG_SYNCGEN_HI_PERIOD);
688
writel_relaxed(((bclk_ratio + 1) / 2) - 1,
689
cl->base + REG_SYNCGEN_LO_PERIOD);
690
writel_relaxed(FIELD_PREP(MCLK_CONF_DIV, 0x1),
691
cl->base + REG_MCLK_CONF);
692
693
ret = clk_set_rate(cl->clk_parent, bclk_ratio * samp_rate);
694
if (ret) {
695
dev_err(mca->dev, "cluster %d: unable to set clock parent: %d\n",
696
cl->no, ret);
697
return ret;
698
}
699
}
700
701
return 0;
702
}
703
704
static const struct snd_soc_dai_ops mca_fe_ops = {
705
.startup = mca_fe_startup,
706
.set_fmt = mca_fe_set_fmt,
707
.set_bclk_ratio = mca_set_bclk_ratio,
708
.set_tdm_slot = mca_fe_set_tdm_slot,
709
.hw_params = mca_fe_hw_params,
710
.trigger = mca_fe_trigger,
711
};
712
713
static bool mca_be_started(struct mca_cluster *cl)
714
{
715
int stream;
716
717
for_each_pcm_streams(stream)
718
if (cl->port_started[stream])
719
return true;
720
return false;
721
}
722
723
static int mca_be_startup(struct snd_pcm_substream *substream,
724
struct snd_soc_dai *dai)
725
{
726
struct snd_soc_pcm_runtime *be = snd_soc_substream_to_rtd(substream);
727
struct snd_soc_pcm_runtime *fe;
728
struct mca_cluster *cl = mca_dai_to_cluster(dai);
729
struct mca_cluster *fe_cl;
730
struct mca_data *mca = cl->host;
731
struct snd_soc_dpcm *dpcm;
732
733
fe = NULL;
734
735
for_each_dpcm_fe(be, substream->stream, dpcm) {
736
if (fe && dpcm->fe != fe) {
737
dev_err(mca->dev, "many FE per one BE unsupported\n");
738
return -EINVAL;
739
}
740
741
fe = dpcm->fe;
742
}
743
744
if (!fe)
745
return -EINVAL;
746
747
fe_cl = mca_dai_to_cluster(snd_soc_rtd_to_cpu(fe, 0));
748
749
if (mca_be_started(cl)) {
750
/*
751
* Port is already started in the other direction.
752
* Make sure there isn't a conflict with another cluster
753
* driving the port.
754
*/
755
if (cl->port_driver != fe_cl->no)
756
return -EINVAL;
757
758
cl->port_started[substream->stream] = true;
759
return 0;
760
}
761
762
writel_relaxed(PORT_ENABLES_CLOCKS | PORT_ENABLES_TX_DATA,
763
cl->base + REG_PORT_ENABLES);
764
writel_relaxed(FIELD_PREP(PORT_CLOCK_SEL, fe_cl->no + 1),
765
cl->base + REG_PORT_CLOCK_SEL);
766
writel_relaxed(PORT_DATA_SEL_TXA(fe_cl->no),
767
cl->base + REG_PORT_DATA_SEL);
768
mutex_lock(&mca->port_mutex);
769
cl->port_driver = fe_cl->no;
770
mutex_unlock(&mca->port_mutex);
771
cl->port_started[substream->stream] = true;
772
773
return 0;
774
}
775
776
static void mca_be_shutdown(struct snd_pcm_substream *substream,
777
struct snd_soc_dai *dai)
778
{
779
struct mca_cluster *cl = mca_dai_to_cluster(dai);
780
struct mca_data *mca = cl->host;
781
782
cl->port_started[substream->stream] = false;
783
784
if (!mca_be_started(cl)) {
785
/*
786
* Were we the last direction to shutdown?
787
* Turn off the lights.
788
*/
789
writel_relaxed(0, cl->base + REG_PORT_ENABLES);
790
writel_relaxed(0, cl->base + REG_PORT_DATA_SEL);
791
mutex_lock(&mca->port_mutex);
792
cl->port_driver = -1;
793
mutex_unlock(&mca->port_mutex);
794
}
795
}
796
797
static const struct snd_soc_dai_ops mca_be_ops = {
798
.prepare = mca_be_prepare,
799
.hw_free = mca_be_hw_free,
800
.startup = mca_be_startup,
801
.shutdown = mca_be_shutdown,
802
};
803
804
static int mca_set_runtime_hwparams(struct snd_soc_component *component,
805
struct snd_pcm_substream *substream,
806
struct dma_chan *chan)
807
{
808
struct device *dma_dev = chan->device->dev;
809
struct snd_dmaengine_dai_dma_data dma_data = {};
810
int ret;
811
812
struct snd_pcm_hardware hw;
813
814
memset(&hw, 0, sizeof(hw));
815
816
hw.info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID |
817
SNDRV_PCM_INFO_INTERLEAVED;
818
hw.periods_min = 2;
819
hw.periods_max = UINT_MAX;
820
hw.period_bytes_min = 256;
821
hw.period_bytes_max = dma_get_max_seg_size(dma_dev);
822
hw.buffer_bytes_max = SIZE_MAX;
823
hw.fifo_size = 16;
824
825
ret = snd_dmaengine_pcm_refine_runtime_hwparams(substream, &dma_data,
826
&hw, chan);
827
828
if (ret)
829
return ret;
830
831
return snd_soc_set_runtime_hwparams(substream, &hw);
832
}
833
834
static int mca_pcm_open(struct snd_soc_component *component,
835
struct snd_pcm_substream *substream)
836
{
837
struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
838
struct mca_cluster *cl = mca_dai_to_cluster(snd_soc_rtd_to_cpu(rtd, 0));
839
struct dma_chan *chan = cl->dma_chans[substream->stream];
840
int ret;
841
842
if (rtd->dai_link->no_pcm)
843
return 0;
844
845
ret = mca_set_runtime_hwparams(component, substream, chan);
846
if (ret)
847
return ret;
848
849
return snd_dmaengine_pcm_open(substream, chan);
850
}
851
852
static int mca_hw_params(struct snd_soc_component *component,
853
struct snd_pcm_substream *substream,
854
struct snd_pcm_hw_params *params)
855
{
856
struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
857
struct dma_chan *chan = snd_dmaengine_pcm_get_chan(substream);
858
struct dma_slave_config slave_config;
859
int ret;
860
861
if (rtd->dai_link->no_pcm)
862
return 0;
863
864
memset(&slave_config, 0, sizeof(slave_config));
865
ret = snd_hwparams_to_dma_slave_config(substream, params,
866
&slave_config);
867
if (ret < 0)
868
return ret;
869
870
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
871
slave_config.dst_port_window_size =
872
min_t(u32, params_channels(params), 4);
873
else
874
slave_config.src_port_window_size =
875
min_t(u32, params_channels(params), 4);
876
877
return dmaengine_slave_config(chan, &slave_config);
878
}
879
880
static int mca_close(struct snd_soc_component *component,
881
struct snd_pcm_substream *substream)
882
{
883
struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
884
885
if (rtd->dai_link->no_pcm)
886
return 0;
887
888
return snd_dmaengine_pcm_close(substream);
889
}
890
891
static int mca_trigger(struct snd_soc_component *component,
892
struct snd_pcm_substream *substream, int cmd)
893
{
894
struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
895
896
if (rtd->dai_link->no_pcm)
897
return 0;
898
899
/*
900
* Before we do the PCM trigger proper, insert an opportunity
901
* to reset the frontend's SERDES.
902
*/
903
mca_fe_early_trigger(substream, cmd, snd_soc_rtd_to_cpu(rtd, 0));
904
905
return snd_dmaengine_pcm_trigger(substream, cmd);
906
}
907
908
static snd_pcm_uframes_t mca_pointer(struct snd_soc_component *component,
909
struct snd_pcm_substream *substream)
910
{
911
struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
912
913
if (rtd->dai_link->no_pcm)
914
return -ENOTSUPP;
915
916
return snd_dmaengine_pcm_pointer(substream);
917
}
918
919
static struct dma_chan *mca_request_dma_channel(struct mca_cluster *cl, unsigned int stream)
920
{
921
bool is_tx = (stream == SNDRV_PCM_STREAM_PLAYBACK);
922
#ifndef USE_RXB_FOR_CAPTURE
923
char *name = devm_kasprintf(cl->host->dev, GFP_KERNEL,
924
is_tx ? "tx%da" : "rx%da", cl->no);
925
#else
926
char *name = devm_kasprintf(cl->host->dev, GFP_KERNEL,
927
is_tx ? "tx%da" : "rx%db", cl->no);
928
#endif
929
return of_dma_request_slave_channel(cl->host->dev->of_node, name);
930
931
}
932
933
static void mca_pcm_free(struct snd_soc_component *component,
934
struct snd_pcm *pcm)
935
{
936
struct snd_soc_pcm_runtime *rtd = snd_pcm_chip(pcm);
937
struct mca_cluster *cl = mca_dai_to_cluster(snd_soc_rtd_to_cpu(rtd, 0));
938
unsigned int i;
939
940
if (rtd->dai_link->no_pcm)
941
return;
942
943
for_each_pcm_streams(i) {
944
struct snd_pcm_substream *substream =
945
rtd->pcm->streams[i].substream;
946
947
if (!substream || !cl->dma_chans[i])
948
continue;
949
950
dma_release_channel(cl->dma_chans[i]);
951
cl->dma_chans[i] = NULL;
952
}
953
}
954
955
956
static int mca_pcm_new(struct snd_soc_component *component,
957
struct snd_soc_pcm_runtime *rtd)
958
{
959
struct mca_cluster *cl = mca_dai_to_cluster(snd_soc_rtd_to_cpu(rtd, 0));
960
unsigned int i;
961
962
if (rtd->dai_link->no_pcm)
963
return 0;
964
965
for_each_pcm_streams(i) {
966
struct snd_pcm_substream *substream =
967
rtd->pcm->streams[i].substream;
968
struct dma_chan *chan;
969
970
if (!substream)
971
continue;
972
973
chan = mca_request_dma_channel(cl, i);
974
975
if (IS_ERR_OR_NULL(chan)) {
976
mca_pcm_free(component, rtd->pcm);
977
978
if (chan && PTR_ERR(chan) == -EPROBE_DEFER)
979
return PTR_ERR(chan);
980
981
dev_err(component->dev, "unable to obtain DMA channel (stream %d cluster %d): %pe\n",
982
i, cl->no, chan);
983
984
if (!chan)
985
return -EINVAL;
986
return PTR_ERR(chan);
987
}
988
989
cl->dma_chans[i] = chan;
990
snd_pcm_set_managed_buffer(substream, SNDRV_DMA_TYPE_DEV_IRAM,
991
chan->device->dev, 512 * 1024 * 6,
992
SIZE_MAX);
993
}
994
995
return 0;
996
}
997
998
static const struct snd_soc_component_driver mca_component = {
999
.name = "apple-mca",
1000
.open = mca_pcm_open,
1001
.close = mca_close,
1002
.hw_params = mca_hw_params,
1003
.trigger = mca_trigger,
1004
.pointer = mca_pointer,
1005
.pcm_construct = mca_pcm_new,
1006
.pcm_destruct = mca_pcm_free,
1007
};
1008
1009
static void apple_mca_release(struct mca_data *mca)
1010
{
1011
int i;
1012
1013
for (i = 0; i < mca->nclusters; i++) {
1014
struct mca_cluster *cl = &mca->clusters[i];
1015
1016
if (!IS_ERR_OR_NULL(cl->clk_parent))
1017
clk_put(cl->clk_parent);
1018
1019
if (!IS_ERR_OR_NULL(cl->pd_dev))
1020
dev_pm_domain_detach(cl->pd_dev, true);
1021
}
1022
1023
if (mca->pd_link)
1024
device_link_del(mca->pd_link);
1025
1026
if (!IS_ERR_OR_NULL(mca->pd_dev))
1027
dev_pm_domain_detach(mca->pd_dev, true);
1028
1029
reset_control_rearm(mca->rstc);
1030
}
1031
1032
static int apple_mca_probe(struct platform_device *pdev)
1033
{
1034
struct mca_data *mca;
1035
struct mca_cluster *clusters;
1036
struct snd_soc_dai_driver *dai_drivers;
1037
struct resource *res;
1038
void __iomem *base;
1039
int nclusters;
1040
int ret, i;
1041
1042
base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
1043
if (IS_ERR(base))
1044
return PTR_ERR(base);
1045
1046
if (resource_size(res) < CLUSTER_STRIDE)
1047
return -EINVAL;
1048
nclusters = (resource_size(res) - CLUSTER_STRIDE) / CLUSTER_STRIDE + 1;
1049
1050
mca = devm_kzalloc(&pdev->dev, struct_size(mca, clusters, nclusters),
1051
GFP_KERNEL);
1052
if (!mca)
1053
return -ENOMEM;
1054
mca->dev = &pdev->dev;
1055
mca->nclusters = nclusters;
1056
mutex_init(&mca->port_mutex);
1057
platform_set_drvdata(pdev, mca);
1058
clusters = mca->clusters;
1059
1060
mca->switch_base =
1061
devm_platform_ioremap_resource(pdev, 1);
1062
if (IS_ERR(mca->switch_base))
1063
return PTR_ERR(mca->switch_base);
1064
1065
mca->rstc = devm_reset_control_get_optional_shared(&pdev->dev, NULL);
1066
if (IS_ERR(mca->rstc))
1067
return PTR_ERR(mca->rstc);
1068
1069
dai_drivers = devm_kzalloc(
1070
&pdev->dev, sizeof(*dai_drivers) * 2 * nclusters, GFP_KERNEL);
1071
if (!dai_drivers)
1072
return -ENOMEM;
1073
1074
mca->pd_dev = dev_pm_domain_attach_by_id(&pdev->dev, 0);
1075
if (IS_ERR(mca->pd_dev))
1076
return -EINVAL;
1077
1078
mca->pd_link = device_link_add(&pdev->dev, mca->pd_dev,
1079
DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME |
1080
DL_FLAG_RPM_ACTIVE);
1081
if (!mca->pd_link) {
1082
ret = -EINVAL;
1083
/* Prevent an unbalanced reset rearm */
1084
mca->rstc = NULL;
1085
goto err_release;
1086
}
1087
1088
reset_control_reset(mca->rstc);
1089
1090
for (i = 0; i < nclusters; i++) {
1091
struct mca_cluster *cl = &clusters[i];
1092
struct snd_soc_dai_driver *fe =
1093
&dai_drivers[mca->nclusters + i];
1094
struct snd_soc_dai_driver *be = &dai_drivers[i];
1095
1096
cl->host = mca;
1097
cl->no = i;
1098
cl->base = base + CLUSTER_STRIDE * i;
1099
cl->port_driver = -1;
1100
cl->clk_parent = of_clk_get(pdev->dev.of_node, i);
1101
if (IS_ERR(cl->clk_parent)) {
1102
dev_err(&pdev->dev, "unable to obtain clock %d: %ld\n",
1103
i, PTR_ERR(cl->clk_parent));
1104
ret = PTR_ERR(cl->clk_parent);
1105
goto err_release;
1106
}
1107
cl->pd_dev = dev_pm_domain_attach_by_id(&pdev->dev, i + 1);
1108
if (IS_ERR(cl->pd_dev)) {
1109
dev_err(&pdev->dev,
1110
"unable to obtain cluster %d PD: %ld\n", i,
1111
PTR_ERR(cl->pd_dev));
1112
ret = PTR_ERR(cl->pd_dev);
1113
goto err_release;
1114
}
1115
1116
fe->id = i;
1117
fe->name =
1118
devm_kasprintf(&pdev->dev, GFP_KERNEL, "mca-pcm-%d", i);
1119
if (!fe->name) {
1120
ret = -ENOMEM;
1121
goto err_release;
1122
}
1123
fe->ops = &mca_fe_ops;
1124
fe->playback.channels_min = 1;
1125
fe->playback.channels_max = 32;
1126
fe->playback.rates = SNDRV_PCM_RATE_8000_192000;
1127
fe->playback.formats = APPLE_MCA_FMTBITS;
1128
fe->capture.channels_min = 1;
1129
fe->capture.channels_max = 32;
1130
fe->capture.rates = SNDRV_PCM_RATE_8000_192000;
1131
fe->capture.formats = APPLE_MCA_FMTBITS;
1132
fe->symmetric_rate = 1;
1133
1134
fe->playback.stream_name =
1135
devm_kasprintf(&pdev->dev, GFP_KERNEL, "PCM%d TX", i);
1136
fe->capture.stream_name =
1137
devm_kasprintf(&pdev->dev, GFP_KERNEL, "PCM%d RX", i);
1138
1139
if (!fe->playback.stream_name || !fe->capture.stream_name) {
1140
ret = -ENOMEM;
1141
goto err_release;
1142
}
1143
1144
be->id = i + nclusters;
1145
be->name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "mca-i2s-%d", i);
1146
if (!be->name) {
1147
ret = -ENOMEM;
1148
goto err_release;
1149
}
1150
be->ops = &mca_be_ops;
1151
be->playback.channels_min = 1;
1152
be->playback.channels_max = 32;
1153
be->playback.rates = SNDRV_PCM_RATE_8000_192000;
1154
be->playback.formats = APPLE_MCA_FMTBITS;
1155
be->capture.channels_min = 1;
1156
be->capture.channels_max = 32;
1157
be->capture.rates = SNDRV_PCM_RATE_8000_192000;
1158
be->capture.formats = APPLE_MCA_FMTBITS;
1159
1160
be->playback.stream_name =
1161
devm_kasprintf(&pdev->dev, GFP_KERNEL, "I2S%d TX", i);
1162
be->capture.stream_name =
1163
devm_kasprintf(&pdev->dev, GFP_KERNEL, "I2S%d RX", i);
1164
if (!be->playback.stream_name || !be->capture.stream_name) {
1165
ret = -ENOMEM;
1166
goto err_release;
1167
}
1168
}
1169
1170
ret = snd_soc_register_component(&pdev->dev, &mca_component,
1171
dai_drivers, nclusters * 2);
1172
if (ret) {
1173
dev_err(&pdev->dev, "unable to register ASoC component: %d\n",
1174
ret);
1175
goto err_release;
1176
}
1177
1178
return 0;
1179
1180
err_release:
1181
apple_mca_release(mca);
1182
return ret;
1183
}
1184
1185
static void apple_mca_remove(struct platform_device *pdev)
1186
{
1187
struct mca_data *mca = platform_get_drvdata(pdev);
1188
1189
snd_soc_unregister_component(&pdev->dev);
1190
apple_mca_release(mca);
1191
}
1192
1193
static const struct of_device_id apple_mca_of_match[] = {
1194
{ .compatible = "apple,mca", },
1195
{}
1196
};
1197
MODULE_DEVICE_TABLE(of, apple_mca_of_match);
1198
1199
static struct platform_driver apple_mca_driver = {
1200
.driver = {
1201
.name = "apple-mca",
1202
.of_match_table = apple_mca_of_match,
1203
},
1204
.probe = apple_mca_probe,
1205
.remove = apple_mca_remove,
1206
};
1207
module_platform_driver(apple_mca_driver);
1208
1209
MODULE_AUTHOR("Martin Povišer <[email protected]>");
1210
MODULE_DESCRIPTION("ASoC Apple MCA driver");
1211
MODULE_LICENSE("GPL");
1212
1213