Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/sound/soc/qcom/lpass-platform.c
26427 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* Copyright (c) 2010-2011,2013-2015 The Linux Foundation. All rights reserved.
4
*
5
* lpass-platform.c -- ALSA SoC platform driver for QTi LPASS
6
*/
7
8
#include <dt-bindings/sound/qcom,lpass.h>
9
#include <linux/dma-mapping.h>
10
#include <linux/export.h>
11
#include <linux/kernel.h>
12
#include <linux/module.h>
13
#include <linux/platform_device.h>
14
#include <sound/pcm_params.h>
15
#include <linux/regmap.h>
16
#include <sound/soc.h>
17
#include "lpass-lpaif-reg.h"
18
#include "lpass.h"
19
20
#define DRV_NAME "lpass-platform"
21
22
#define LPASS_PLATFORM_BUFFER_SIZE (24 * 2 * 1024)
23
#define LPASS_PLATFORM_PERIODS 2
24
#define LPASS_RXTX_CDC_DMA_LPM_BUFF_SIZE (8 * 1024)
25
#define LPASS_VA_CDC_DMA_LPM_BUFF_SIZE (12 * 1024)
26
#define LPASS_CDC_DMA_REGISTER_FIELDS_MAX 15
27
28
static const struct snd_pcm_hardware lpass_platform_pcm_hardware = {
29
.info = SNDRV_PCM_INFO_MMAP |
30
SNDRV_PCM_INFO_MMAP_VALID |
31
SNDRV_PCM_INFO_INTERLEAVED |
32
SNDRV_PCM_INFO_PAUSE |
33
SNDRV_PCM_INFO_RESUME,
34
.formats = SNDRV_PCM_FMTBIT_S16 |
35
SNDRV_PCM_FMTBIT_S24 |
36
SNDRV_PCM_FMTBIT_S32,
37
.rates = SNDRV_PCM_RATE_8000_192000,
38
.rate_min = 8000,
39
.rate_max = 192000,
40
.channels_min = 1,
41
.channels_max = 8,
42
.buffer_bytes_max = LPASS_PLATFORM_BUFFER_SIZE,
43
.period_bytes_max = LPASS_PLATFORM_BUFFER_SIZE /
44
LPASS_PLATFORM_PERIODS,
45
.period_bytes_min = LPASS_PLATFORM_BUFFER_SIZE /
46
LPASS_PLATFORM_PERIODS,
47
.periods_min = LPASS_PLATFORM_PERIODS,
48
.periods_max = LPASS_PLATFORM_PERIODS,
49
.fifo_size = 0,
50
};
51
52
static const struct snd_pcm_hardware lpass_platform_rxtx_hardware = {
53
.info = SNDRV_PCM_INFO_MMAP |
54
SNDRV_PCM_INFO_MMAP_VALID |
55
SNDRV_PCM_INFO_INTERLEAVED |
56
SNDRV_PCM_INFO_PAUSE |
57
SNDRV_PCM_INFO_RESUME,
58
.formats = SNDRV_PCM_FMTBIT_S16 |
59
SNDRV_PCM_FMTBIT_S24 |
60
SNDRV_PCM_FMTBIT_S32,
61
.rates = SNDRV_PCM_RATE_8000_192000,
62
.rate_min = 8000,
63
.rate_max = 192000,
64
.channels_min = 1,
65
.channels_max = 8,
66
.buffer_bytes_max = LPASS_RXTX_CDC_DMA_LPM_BUFF_SIZE,
67
.period_bytes_max = LPASS_RXTX_CDC_DMA_LPM_BUFF_SIZE /
68
LPASS_PLATFORM_PERIODS,
69
.period_bytes_min = LPASS_RXTX_CDC_DMA_LPM_BUFF_SIZE /
70
LPASS_PLATFORM_PERIODS,
71
.periods_min = LPASS_PLATFORM_PERIODS,
72
.periods_max = LPASS_PLATFORM_PERIODS,
73
.fifo_size = 0,
74
};
75
76
static const struct snd_pcm_hardware lpass_platform_va_hardware = {
77
.info = SNDRV_PCM_INFO_MMAP |
78
SNDRV_PCM_INFO_MMAP_VALID |
79
SNDRV_PCM_INFO_INTERLEAVED |
80
SNDRV_PCM_INFO_PAUSE |
81
SNDRV_PCM_INFO_RESUME,
82
.formats = SNDRV_PCM_FMTBIT_S16 |
83
SNDRV_PCM_FMTBIT_S24 |
84
SNDRV_PCM_FMTBIT_S32,
85
.rates = SNDRV_PCM_RATE_8000_192000,
86
.rate_min = 8000,
87
.rate_max = 192000,
88
.channels_min = 1,
89
.channels_max = 8,
90
.buffer_bytes_max = LPASS_VA_CDC_DMA_LPM_BUFF_SIZE,
91
.period_bytes_max = LPASS_VA_CDC_DMA_LPM_BUFF_SIZE /
92
LPASS_PLATFORM_PERIODS,
93
.period_bytes_min = LPASS_VA_CDC_DMA_LPM_BUFF_SIZE /
94
LPASS_PLATFORM_PERIODS,
95
.periods_min = LPASS_PLATFORM_PERIODS,
96
.periods_max = LPASS_PLATFORM_PERIODS,
97
.fifo_size = 0,
98
};
99
100
static int lpass_platform_alloc_rxtx_dmactl_fields(struct device *dev,
101
struct regmap *map)
102
{
103
struct lpass_data *drvdata = dev_get_drvdata(dev);
104
const struct lpass_variant *v = drvdata->variant;
105
struct lpaif_dmactl *rd_dmactl, *wr_dmactl;
106
int rval;
107
108
rd_dmactl = devm_kzalloc(dev, sizeof(*rd_dmactl), GFP_KERNEL);
109
if (!rd_dmactl)
110
return -ENOMEM;
111
112
wr_dmactl = devm_kzalloc(dev, sizeof(*wr_dmactl), GFP_KERNEL);
113
if (!wr_dmactl)
114
return -ENOMEM;
115
116
drvdata->rxtx_rd_dmactl = rd_dmactl;
117
drvdata->rxtx_wr_dmactl = wr_dmactl;
118
119
rval = devm_regmap_field_bulk_alloc(dev, map, &rd_dmactl->intf,
120
&v->rxtx_rdma_intf, LPASS_CDC_DMA_REGISTER_FIELDS_MAX);
121
if (rval)
122
return rval;
123
124
return devm_regmap_field_bulk_alloc(dev, map, &wr_dmactl->intf,
125
&v->rxtx_wrdma_intf, LPASS_CDC_DMA_REGISTER_FIELDS_MAX);
126
}
127
128
static int lpass_platform_alloc_va_dmactl_fields(struct device *dev,
129
struct regmap *map)
130
{
131
struct lpass_data *drvdata = dev_get_drvdata(dev);
132
const struct lpass_variant *v = drvdata->variant;
133
struct lpaif_dmactl *wr_dmactl;
134
135
wr_dmactl = devm_kzalloc(dev, sizeof(*wr_dmactl), GFP_KERNEL);
136
if (!wr_dmactl)
137
return -ENOMEM;
138
139
drvdata->va_wr_dmactl = wr_dmactl;
140
return devm_regmap_field_bulk_alloc(dev, map, &wr_dmactl->intf,
141
&v->va_wrdma_intf, LPASS_CDC_DMA_REGISTER_FIELDS_MAX);
142
}
143
144
145
static int lpass_platform_alloc_dmactl_fields(struct device *dev,
146
struct regmap *map)
147
{
148
struct lpass_data *drvdata = dev_get_drvdata(dev);
149
const struct lpass_variant *v = drvdata->variant;
150
struct lpaif_dmactl *rd_dmactl, *wr_dmactl;
151
int rval;
152
153
drvdata->rd_dmactl = devm_kzalloc(dev, sizeof(struct lpaif_dmactl),
154
GFP_KERNEL);
155
if (drvdata->rd_dmactl == NULL)
156
return -ENOMEM;
157
158
drvdata->wr_dmactl = devm_kzalloc(dev, sizeof(struct lpaif_dmactl),
159
GFP_KERNEL);
160
if (drvdata->wr_dmactl == NULL)
161
return -ENOMEM;
162
163
rd_dmactl = drvdata->rd_dmactl;
164
wr_dmactl = drvdata->wr_dmactl;
165
166
rval = devm_regmap_field_bulk_alloc(dev, map, &rd_dmactl->intf,
167
&v->rdma_intf, 6);
168
if (rval)
169
return rval;
170
171
return devm_regmap_field_bulk_alloc(dev, map, &wr_dmactl->intf,
172
&v->wrdma_intf, 6);
173
}
174
175
static int lpass_platform_alloc_hdmidmactl_fields(struct device *dev,
176
struct regmap *map)
177
{
178
struct lpass_data *drvdata = dev_get_drvdata(dev);
179
const struct lpass_variant *v = drvdata->variant;
180
struct lpaif_dmactl *rd_dmactl;
181
182
rd_dmactl = devm_kzalloc(dev, sizeof(struct lpaif_dmactl), GFP_KERNEL);
183
if (rd_dmactl == NULL)
184
return -ENOMEM;
185
186
drvdata->hdmi_rd_dmactl = rd_dmactl;
187
188
return devm_regmap_field_bulk_alloc(dev, map, &rd_dmactl->bursten,
189
&v->hdmi_rdma_bursten, 8);
190
}
191
192
static int lpass_platform_pcmops_open(struct snd_soc_component *component,
193
struct snd_pcm_substream *substream)
194
{
195
struct snd_pcm_runtime *runtime = substream->runtime;
196
struct snd_soc_pcm_runtime *soc_runtime = snd_soc_substream_to_rtd(substream);
197
struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(soc_runtime, 0);
198
struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
199
const struct lpass_variant *v = drvdata->variant;
200
int ret, dma_ch, dir = substream->stream;
201
struct lpass_pcm_data *data;
202
struct regmap *map;
203
unsigned int dai_id = cpu_dai->driver->id;
204
205
data = kzalloc(sizeof(*data), GFP_KERNEL);
206
if (!data)
207
return -ENOMEM;
208
209
data->i2s_port = cpu_dai->driver->id;
210
runtime->private_data = data;
211
212
if (v->alloc_dma_channel)
213
dma_ch = v->alloc_dma_channel(drvdata, dir, dai_id);
214
else
215
dma_ch = 0;
216
217
if (dma_ch < 0) {
218
kfree(data);
219
return dma_ch;
220
}
221
222
switch (dai_id) {
223
case MI2S_PRIMARY ... MI2S_QUINARY:
224
map = drvdata->lpaif_map;
225
drvdata->substream[dma_ch] = substream;
226
break;
227
case LPASS_DP_RX:
228
map = drvdata->hdmiif_map;
229
drvdata->hdmi_substream[dma_ch] = substream;
230
break;
231
case LPASS_CDC_DMA_RX0 ... LPASS_CDC_DMA_RX9:
232
case LPASS_CDC_DMA_TX0 ... LPASS_CDC_DMA_TX8:
233
map = drvdata->rxtx_lpaif_map;
234
drvdata->rxtx_substream[dma_ch] = substream;
235
break;
236
case LPASS_CDC_DMA_VA_TX0 ... LPASS_CDC_DMA_VA_TX8:
237
map = drvdata->va_lpaif_map;
238
drvdata->va_substream[dma_ch] = substream;
239
break;
240
default:
241
break;
242
}
243
244
data->dma_ch = dma_ch;
245
switch (dai_id) {
246
case MI2S_PRIMARY ... MI2S_QUINARY:
247
case LPASS_DP_RX:
248
ret = regmap_write(map, LPAIF_DMACTL_REG(v, dma_ch, dir, data->i2s_port), 0);
249
if (ret) {
250
kfree(data);
251
dev_err(soc_runtime->dev, "error writing to rdmactl reg: %d\n", ret);
252
return ret;
253
}
254
snd_soc_set_runtime_hwparams(substream, &lpass_platform_pcm_hardware);
255
runtime->dma_bytes = lpass_platform_pcm_hardware.buffer_bytes_max;
256
break;
257
case LPASS_CDC_DMA_RX0 ... LPASS_CDC_DMA_RX9:
258
case LPASS_CDC_DMA_TX0 ... LPASS_CDC_DMA_TX8:
259
snd_soc_set_runtime_hwparams(substream, &lpass_platform_rxtx_hardware);
260
runtime->dma_bytes = lpass_platform_rxtx_hardware.buffer_bytes_max;
261
snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
262
break;
263
case LPASS_CDC_DMA_VA_TX0 ... LPASS_CDC_DMA_VA_TX8:
264
snd_soc_set_runtime_hwparams(substream, &lpass_platform_va_hardware);
265
runtime->dma_bytes = lpass_platform_va_hardware.buffer_bytes_max;
266
snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
267
break;
268
default:
269
break;
270
}
271
ret = snd_pcm_hw_constraint_integer(runtime,
272
SNDRV_PCM_HW_PARAM_PERIODS);
273
if (ret < 0) {
274
kfree(data);
275
dev_err(soc_runtime->dev, "setting constraints failed: %d\n",
276
ret);
277
return -EINVAL;
278
}
279
280
return 0;
281
}
282
283
static int lpass_platform_pcmops_close(struct snd_soc_component *component,
284
struct snd_pcm_substream *substream)
285
{
286
struct snd_pcm_runtime *runtime = substream->runtime;
287
struct snd_soc_pcm_runtime *soc_runtime = snd_soc_substream_to_rtd(substream);
288
struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(soc_runtime, 0);
289
struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
290
const struct lpass_variant *v = drvdata->variant;
291
struct lpass_pcm_data *data;
292
unsigned int dai_id = cpu_dai->driver->id;
293
294
data = runtime->private_data;
295
296
switch (dai_id) {
297
case MI2S_PRIMARY ... MI2S_QUINARY:
298
drvdata->substream[data->dma_ch] = NULL;
299
break;
300
case LPASS_DP_RX:
301
drvdata->hdmi_substream[data->dma_ch] = NULL;
302
break;
303
case LPASS_CDC_DMA_RX0 ... LPASS_CDC_DMA_RX9:
304
case LPASS_CDC_DMA_TX0 ... LPASS_CDC_DMA_TX8:
305
drvdata->rxtx_substream[data->dma_ch] = NULL;
306
break;
307
case LPASS_CDC_DMA_VA_TX0 ... LPASS_CDC_DMA_VA_TX8:
308
drvdata->va_substream[data->dma_ch] = NULL;
309
break;
310
default:
311
break;
312
}
313
314
if (v->free_dma_channel)
315
v->free_dma_channel(drvdata, data->dma_ch, dai_id);
316
317
kfree(data);
318
return 0;
319
}
320
321
static struct lpaif_dmactl *__lpass_get_dmactl_handle(const struct snd_pcm_substream *substream,
322
struct snd_soc_component *component)
323
{
324
struct snd_soc_pcm_runtime *soc_runtime = snd_soc_substream_to_rtd(substream);
325
struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(soc_runtime, 0);
326
struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
327
struct lpaif_dmactl *dmactl = NULL;
328
329
switch (cpu_dai->driver->id) {
330
case MI2S_PRIMARY ... MI2S_QUINARY:
331
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
332
dmactl = drvdata->rd_dmactl;
333
else
334
dmactl = drvdata->wr_dmactl;
335
break;
336
case LPASS_DP_RX:
337
dmactl = drvdata->hdmi_rd_dmactl;
338
break;
339
case LPASS_CDC_DMA_RX0 ... LPASS_CDC_DMA_RX9:
340
dmactl = drvdata->rxtx_rd_dmactl;
341
break;
342
case LPASS_CDC_DMA_TX0 ... LPASS_CDC_DMA_TX8:
343
dmactl = drvdata->rxtx_wr_dmactl;
344
break;
345
case LPASS_CDC_DMA_VA_TX0 ... LPASS_CDC_DMA_VA_TX8:
346
dmactl = drvdata->va_wr_dmactl;
347
break;
348
}
349
350
return dmactl;
351
}
352
353
static int __lpass_get_id(const struct snd_pcm_substream *substream,
354
struct snd_soc_component *component)
355
{
356
struct snd_soc_pcm_runtime *soc_runtime = snd_soc_substream_to_rtd(substream);
357
struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(soc_runtime, 0);
358
struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
359
struct snd_pcm_runtime *rt = substream->runtime;
360
struct lpass_pcm_data *pcm_data = rt->private_data;
361
const struct lpass_variant *v = drvdata->variant;
362
int id;
363
364
switch (cpu_dai->driver->id) {
365
case MI2S_PRIMARY ... MI2S_QUINARY:
366
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
367
id = pcm_data->dma_ch;
368
else
369
id = pcm_data->dma_ch - v->wrdma_channel_start;
370
break;
371
case LPASS_DP_RX:
372
id = pcm_data->dma_ch;
373
break;
374
case LPASS_CDC_DMA_RX0 ... LPASS_CDC_DMA_RX9:
375
id = pcm_data->dma_ch;
376
break;
377
case LPASS_CDC_DMA_TX0 ... LPASS_CDC_DMA_TX8:
378
id = pcm_data->dma_ch - v->rxtx_wrdma_channel_start;
379
break;
380
case LPASS_CDC_DMA_VA_TX0 ... LPASS_CDC_DMA_VA_TX8:
381
id = pcm_data->dma_ch - v->va_wrdma_channel_start;
382
break;
383
}
384
385
return id;
386
}
387
388
static struct regmap *__lpass_get_regmap_handle(const struct snd_pcm_substream *substream,
389
struct snd_soc_component *component)
390
{
391
struct snd_soc_pcm_runtime *soc_runtime = snd_soc_substream_to_rtd(substream);
392
struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(soc_runtime, 0);
393
struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
394
struct regmap *map = NULL;
395
396
switch (cpu_dai->driver->id) {
397
case MI2S_PRIMARY ... MI2S_QUINARY:
398
map = drvdata->lpaif_map;
399
break;
400
case LPASS_DP_RX:
401
map = drvdata->hdmiif_map;
402
break;
403
case LPASS_CDC_DMA_RX0 ... LPASS_CDC_DMA_RX9:
404
case LPASS_CDC_DMA_TX0 ... LPASS_CDC_DMA_TX8:
405
map = drvdata->rxtx_lpaif_map;
406
break;
407
case LPASS_CDC_DMA_VA_TX0 ... LPASS_CDC_DMA_VA_TX8:
408
map = drvdata->va_lpaif_map;
409
break;
410
}
411
412
return map;
413
}
414
415
static int lpass_platform_pcmops_hw_params(struct snd_soc_component *component,
416
struct snd_pcm_substream *substream,
417
struct snd_pcm_hw_params *params)
418
{
419
struct snd_soc_pcm_runtime *soc_runtime = snd_soc_substream_to_rtd(substream);
420
struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(soc_runtime, 0);
421
struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
422
struct snd_pcm_runtime *rt = substream->runtime;
423
struct lpass_pcm_data *pcm_data = rt->private_data;
424
const struct lpass_variant *v = drvdata->variant;
425
snd_pcm_format_t format = params_format(params);
426
unsigned int channels = params_channels(params);
427
unsigned int regval;
428
struct lpaif_dmactl *dmactl;
429
int id;
430
int bitwidth;
431
int ret, dma_port = pcm_data->i2s_port + v->dmactl_audif_start;
432
unsigned int dai_id = cpu_dai->driver->id;
433
434
dmactl = __lpass_get_dmactl_handle(substream, component);
435
id = __lpass_get_id(substream, component);
436
437
bitwidth = snd_pcm_format_width(format);
438
if (bitwidth < 0) {
439
dev_err(soc_runtime->dev, "invalid bit width given: %d\n",
440
bitwidth);
441
return bitwidth;
442
}
443
444
ret = regmap_fields_write(dmactl->bursten, id, LPAIF_DMACTL_BURSTEN_INCR4);
445
if (ret) {
446
dev_err(soc_runtime->dev, "error updating bursten field: %d\n", ret);
447
return ret;
448
}
449
450
ret = regmap_fields_write(dmactl->fifowm, id, LPAIF_DMACTL_FIFOWM_8);
451
if (ret) {
452
dev_err(soc_runtime->dev, "error updating fifowm field: %d\n", ret);
453
return ret;
454
}
455
456
switch (dai_id) {
457
case LPASS_DP_RX:
458
ret = regmap_fields_write(dmactl->burst8, id,
459
LPAIF_DMACTL_BURSTEN_INCR4);
460
if (ret) {
461
dev_err(soc_runtime->dev, "error updating burst8en field: %d\n", ret);
462
return ret;
463
}
464
ret = regmap_fields_write(dmactl->burst16, id,
465
LPAIF_DMACTL_BURSTEN_INCR4);
466
if (ret) {
467
dev_err(soc_runtime->dev, "error updating burst16en field: %d\n", ret);
468
return ret;
469
}
470
ret = regmap_fields_write(dmactl->dynburst, id,
471
LPAIF_DMACTL_BURSTEN_INCR4);
472
if (ret) {
473
dev_err(soc_runtime->dev, "error updating dynbursten field: %d\n", ret);
474
return ret;
475
}
476
break;
477
case MI2S_PRIMARY:
478
case MI2S_SECONDARY:
479
case MI2S_TERTIARY:
480
case MI2S_QUATERNARY:
481
case MI2S_QUINARY:
482
ret = regmap_fields_write(dmactl->intf, id,
483
LPAIF_DMACTL_AUDINTF(dma_port));
484
if (ret) {
485
dev_err(soc_runtime->dev, "error updating audio interface field: %d\n",
486
ret);
487
return ret;
488
}
489
490
break;
491
case LPASS_CDC_DMA_RX0 ... LPASS_CDC_DMA_RX9:
492
case LPASS_CDC_DMA_TX0 ... LPASS_CDC_DMA_TX8:
493
case LPASS_CDC_DMA_VA_TX0 ... LPASS_CDC_DMA_VA_TX0:
494
break;
495
default:
496
dev_err(soc_runtime->dev, "%s: invalid interface: %d\n", __func__, dai_id);
497
break;
498
}
499
switch (bitwidth) {
500
case 16:
501
switch (channels) {
502
case 1:
503
case 2:
504
regval = LPAIF_DMACTL_WPSCNT_ONE;
505
break;
506
case 4:
507
regval = LPAIF_DMACTL_WPSCNT_TWO;
508
break;
509
case 6:
510
regval = LPAIF_DMACTL_WPSCNT_THREE;
511
break;
512
case 8:
513
regval = LPAIF_DMACTL_WPSCNT_FOUR;
514
break;
515
default:
516
dev_err(soc_runtime->dev, "invalid PCM config given: bw=%d, ch=%u\n",
517
bitwidth, channels);
518
return -EINVAL;
519
}
520
break;
521
case 24:
522
case 32:
523
switch (channels) {
524
case 1:
525
regval = LPAIF_DMACTL_WPSCNT_ONE;
526
break;
527
case 2:
528
regval = (dai_id == LPASS_DP_RX ?
529
LPAIF_DMACTL_WPSCNT_ONE :
530
LPAIF_DMACTL_WPSCNT_TWO);
531
break;
532
case 4:
533
regval = (dai_id == LPASS_DP_RX ?
534
LPAIF_DMACTL_WPSCNT_TWO :
535
LPAIF_DMACTL_WPSCNT_FOUR);
536
break;
537
case 6:
538
regval = (dai_id == LPASS_DP_RX ?
539
LPAIF_DMACTL_WPSCNT_THREE :
540
LPAIF_DMACTL_WPSCNT_SIX);
541
break;
542
case 8:
543
regval = (dai_id == LPASS_DP_RX ?
544
LPAIF_DMACTL_WPSCNT_FOUR :
545
LPAIF_DMACTL_WPSCNT_EIGHT);
546
break;
547
default:
548
dev_err(soc_runtime->dev, "invalid PCM config given: bw=%d, ch=%u\n",
549
bitwidth, channels);
550
return -EINVAL;
551
}
552
break;
553
default:
554
dev_err(soc_runtime->dev, "invalid PCM config given: bw=%d, ch=%u\n",
555
bitwidth, channels);
556
return -EINVAL;
557
}
558
559
ret = regmap_fields_write(dmactl->wpscnt, id, regval);
560
if (ret) {
561
dev_err(soc_runtime->dev, "error writing to dmactl reg: %d\n",
562
ret);
563
return ret;
564
}
565
566
return 0;
567
}
568
569
static int lpass_platform_pcmops_hw_free(struct snd_soc_component *component,
570
struct snd_pcm_substream *substream)
571
{
572
struct snd_soc_pcm_runtime *soc_runtime = snd_soc_substream_to_rtd(substream);
573
struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(soc_runtime, 0);
574
struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
575
struct snd_pcm_runtime *rt = substream->runtime;
576
struct lpass_pcm_data *pcm_data = rt->private_data;
577
const struct lpass_variant *v = drvdata->variant;
578
unsigned int reg;
579
int ret;
580
struct regmap *map;
581
unsigned int dai_id = cpu_dai->driver->id;
582
583
if (is_cdc_dma_port(dai_id))
584
return 0;
585
map = __lpass_get_regmap_handle(substream, component);
586
587
reg = LPAIF_DMACTL_REG(v, pcm_data->dma_ch, substream->stream, dai_id);
588
ret = regmap_write(map, reg, 0);
589
if (ret)
590
dev_err(soc_runtime->dev, "error writing to rdmactl reg: %d\n",
591
ret);
592
593
return ret;
594
}
595
596
static int lpass_platform_pcmops_prepare(struct snd_soc_component *component,
597
struct snd_pcm_substream *substream)
598
{
599
struct snd_pcm_runtime *runtime = substream->runtime;
600
struct snd_soc_pcm_runtime *soc_runtime = snd_soc_substream_to_rtd(substream);
601
struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(soc_runtime, 0);
602
struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
603
struct snd_pcm_runtime *rt = substream->runtime;
604
struct lpass_pcm_data *pcm_data = rt->private_data;
605
const struct lpass_variant *v = drvdata->variant;
606
struct lpaif_dmactl *dmactl;
607
struct regmap *map;
608
int ret, id, ch, dir = substream->stream;
609
unsigned int dai_id = cpu_dai->driver->id;
610
611
ch = pcm_data->dma_ch;
612
613
dmactl = __lpass_get_dmactl_handle(substream, component);
614
id = __lpass_get_id(substream, component);
615
map = __lpass_get_regmap_handle(substream, component);
616
617
ret = regmap_write(map, LPAIF_DMABASE_REG(v, ch, dir, dai_id),
618
runtime->dma_addr);
619
if (ret) {
620
dev_err(soc_runtime->dev, "error writing to rdmabase reg: %d\n",
621
ret);
622
return ret;
623
}
624
625
ret = regmap_write(map, LPAIF_DMABUFF_REG(v, ch, dir, dai_id),
626
(snd_pcm_lib_buffer_bytes(substream) >> 2) - 1);
627
if (ret) {
628
dev_err(soc_runtime->dev, "error writing to rdmabuff reg: %d\n",
629
ret);
630
return ret;
631
}
632
633
ret = regmap_write(map, LPAIF_DMAPER_REG(v, ch, dir, dai_id),
634
(snd_pcm_lib_period_bytes(substream) >> 2) - 1);
635
if (ret) {
636
dev_err(soc_runtime->dev, "error writing to rdmaper reg: %d\n",
637
ret);
638
return ret;
639
}
640
641
if (is_cdc_dma_port(dai_id)) {
642
ret = regmap_fields_write(dmactl->fifowm, id, LPAIF_DMACTL_FIFOWM_8);
643
if (ret) {
644
dev_err(soc_runtime->dev, "error writing fifowm field to dmactl reg: %d, id: %d\n",
645
ret, id);
646
return ret;
647
}
648
}
649
ret = regmap_fields_write(dmactl->enable, id, LPAIF_DMACTL_ENABLE_ON);
650
if (ret) {
651
dev_err(soc_runtime->dev, "error writing to rdmactl reg: %d\n",
652
ret);
653
return ret;
654
}
655
656
return 0;
657
}
658
659
static int lpass_platform_pcmops_trigger(struct snd_soc_component *component,
660
struct snd_pcm_substream *substream,
661
int cmd)
662
{
663
struct snd_soc_pcm_runtime *soc_runtime = snd_soc_substream_to_rtd(substream);
664
struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(soc_runtime, 0);
665
struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
666
struct snd_pcm_runtime *rt = substream->runtime;
667
struct lpass_pcm_data *pcm_data = rt->private_data;
668
const struct lpass_variant *v = drvdata->variant;
669
struct lpaif_dmactl *dmactl;
670
struct regmap *map;
671
int ret, ch, id;
672
unsigned int reg_irqclr = 0, val_irqclr = 0;
673
unsigned int reg_irqen = 0, val_irqen = 0, val_mask = 0;
674
unsigned int dai_id = cpu_dai->driver->id;
675
676
ch = pcm_data->dma_ch;
677
dmactl = __lpass_get_dmactl_handle(substream, component);
678
id = __lpass_get_id(substream, component);
679
map = __lpass_get_regmap_handle(substream, component);
680
681
switch (cmd) {
682
case SNDRV_PCM_TRIGGER_START:
683
case SNDRV_PCM_TRIGGER_RESUME:
684
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
685
ret = regmap_fields_write(dmactl->enable, id,
686
LPAIF_DMACTL_ENABLE_ON);
687
if (ret) {
688
dev_err(soc_runtime->dev,
689
"error writing to rdmactl reg: %d\n", ret);
690
return ret;
691
}
692
switch (dai_id) {
693
case LPASS_DP_RX:
694
ret = regmap_fields_write(dmactl->dyncclk, id,
695
LPAIF_DMACTL_DYNCLK_ON);
696
if (ret) {
697
dev_err(soc_runtime->dev,
698
"error writing to rdmactl reg: %d\n", ret);
699
return ret;
700
}
701
reg_irqclr = LPASS_HDMITX_APP_IRQCLEAR_REG(v);
702
val_irqclr = (LPAIF_IRQ_ALL(ch) |
703
LPAIF_IRQ_HDMI_REQ_ON_PRELOAD(ch) |
704
LPAIF_IRQ_HDMI_METADONE |
705
LPAIF_IRQ_HDMI_SDEEP_AUD_DIS(ch));
706
707
reg_irqen = LPASS_HDMITX_APP_IRQEN_REG(v);
708
val_mask = (LPAIF_IRQ_ALL(ch) |
709
LPAIF_IRQ_HDMI_REQ_ON_PRELOAD(ch) |
710
LPAIF_IRQ_HDMI_METADONE |
711
LPAIF_IRQ_HDMI_SDEEP_AUD_DIS(ch));
712
val_irqen = (LPAIF_IRQ_ALL(ch) |
713
LPAIF_IRQ_HDMI_REQ_ON_PRELOAD(ch) |
714
LPAIF_IRQ_HDMI_METADONE |
715
LPAIF_IRQ_HDMI_SDEEP_AUD_DIS(ch));
716
break;
717
case MI2S_PRIMARY:
718
case MI2S_SECONDARY:
719
case MI2S_TERTIARY:
720
case MI2S_QUATERNARY:
721
case MI2S_QUINARY:
722
reg_irqclr = LPAIF_IRQCLEAR_REG(v, LPAIF_IRQ_PORT_HOST);
723
val_irqclr = LPAIF_IRQ_ALL(ch);
724
725
726
reg_irqen = LPAIF_IRQEN_REG(v, LPAIF_IRQ_PORT_HOST);
727
val_mask = LPAIF_IRQ_ALL(ch);
728
val_irqen = LPAIF_IRQ_ALL(ch);
729
break;
730
case LPASS_CDC_DMA_RX0 ... LPASS_CDC_DMA_RX9:
731
case LPASS_CDC_DMA_TX0 ... LPASS_CDC_DMA_TX8:
732
ret = regmap_fields_write(dmactl->dyncclk, id, LPAIF_DMACTL_DYNCLK_ON);
733
if (ret) {
734
dev_err(soc_runtime->dev,
735
"error writing to rdmactl reg field: %d\n", ret);
736
return ret;
737
}
738
reg_irqclr = LPAIF_RXTX_IRQCLEAR_REG(v, LPAIF_IRQ_PORT_HOST);
739
val_irqclr = LPAIF_IRQ_ALL(ch);
740
741
reg_irqen = LPAIF_RXTX_IRQEN_REG(v, LPAIF_IRQ_PORT_HOST);
742
val_mask = LPAIF_IRQ_ALL(ch);
743
val_irqen = LPAIF_IRQ_ALL(ch);
744
break;
745
case LPASS_CDC_DMA_VA_TX0 ... LPASS_CDC_DMA_VA_TX8:
746
ret = regmap_fields_write(dmactl->dyncclk, id, LPAIF_DMACTL_DYNCLK_ON);
747
if (ret) {
748
dev_err(soc_runtime->dev,
749
"error writing to rdmactl reg field: %d\n", ret);
750
return ret;
751
}
752
reg_irqclr = LPAIF_VA_IRQCLEAR_REG(v, LPAIF_IRQ_PORT_HOST);
753
val_irqclr = LPAIF_IRQ_ALL(ch);
754
755
reg_irqen = LPAIF_VA_IRQEN_REG(v, LPAIF_IRQ_PORT_HOST);
756
val_mask = LPAIF_IRQ_ALL(ch);
757
val_irqen = LPAIF_IRQ_ALL(ch);
758
break;
759
default:
760
dev_err(soc_runtime->dev, "%s: invalid %d interface\n", __func__, dai_id);
761
return -EINVAL;
762
}
763
764
ret = regmap_write_bits(map, reg_irqclr, val_irqclr, val_irqclr);
765
if (ret) {
766
dev_err(soc_runtime->dev, "error writing to irqclear reg: %d\n", ret);
767
return ret;
768
}
769
ret = regmap_update_bits(map, reg_irqen, val_mask, val_irqen);
770
if (ret) {
771
dev_err(soc_runtime->dev, "error writing to irqen reg: %d\n", ret);
772
return ret;
773
}
774
break;
775
case SNDRV_PCM_TRIGGER_STOP:
776
case SNDRV_PCM_TRIGGER_SUSPEND:
777
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
778
ret = regmap_fields_write(dmactl->enable, id,
779
LPAIF_DMACTL_ENABLE_OFF);
780
if (ret) {
781
dev_err(soc_runtime->dev,
782
"error writing to rdmactl reg: %d\n", ret);
783
return ret;
784
}
785
switch (dai_id) {
786
case LPASS_DP_RX:
787
ret = regmap_fields_write(dmactl->dyncclk, id,
788
LPAIF_DMACTL_DYNCLK_OFF);
789
if (ret) {
790
dev_err(soc_runtime->dev,
791
"error writing to rdmactl reg: %d\n", ret);
792
return ret;
793
}
794
reg_irqen = LPASS_HDMITX_APP_IRQEN_REG(v);
795
val_mask = (LPAIF_IRQ_ALL(ch) |
796
LPAIF_IRQ_HDMI_REQ_ON_PRELOAD(ch) |
797
LPAIF_IRQ_HDMI_METADONE |
798
LPAIF_IRQ_HDMI_SDEEP_AUD_DIS(ch));
799
val_irqen = 0;
800
break;
801
case MI2S_PRIMARY:
802
case MI2S_SECONDARY:
803
case MI2S_TERTIARY:
804
case MI2S_QUATERNARY:
805
case MI2S_QUINARY:
806
reg_irqen = LPAIF_IRQEN_REG(v, LPAIF_IRQ_PORT_HOST);
807
val_mask = LPAIF_IRQ_ALL(ch);
808
val_irqen = 0;
809
break;
810
case LPASS_CDC_DMA_RX0 ... LPASS_CDC_DMA_RX9:
811
case LPASS_CDC_DMA_TX0 ... LPASS_CDC_DMA_TX8:
812
ret = regmap_fields_write(dmactl->dyncclk, id, LPAIF_DMACTL_DYNCLK_OFF);
813
if (ret) {
814
dev_err(soc_runtime->dev,
815
"error writing to rdmactl reg field: %d\n", ret);
816
return ret;
817
}
818
819
reg_irqclr = LPAIF_RXTX_IRQCLEAR_REG(v, LPAIF_IRQ_PORT_HOST);
820
val_irqclr = LPAIF_IRQ_ALL(ch);
821
822
reg_irqen = LPAIF_RXTX_IRQEN_REG(v, LPAIF_IRQ_PORT_HOST);
823
val_mask = LPAIF_IRQ_ALL(ch);
824
val_irqen = LPAIF_IRQ_ALL(ch);
825
break;
826
case LPASS_CDC_DMA_VA_TX0 ... LPASS_CDC_DMA_VA_TX8:
827
ret = regmap_fields_write(dmactl->dyncclk, id, LPAIF_DMACTL_DYNCLK_OFF);
828
if (ret) {
829
dev_err(soc_runtime->dev,
830
"error writing to rdmactl reg field: %d\n", ret);
831
return ret;
832
}
833
834
reg_irqclr = LPAIF_VA_IRQCLEAR_REG(v, LPAIF_IRQ_PORT_HOST);
835
val_irqclr = LPAIF_IRQ_ALL(ch);
836
837
reg_irqen = LPAIF_VA_IRQEN_REG(v, LPAIF_IRQ_PORT_HOST);
838
val_mask = LPAIF_IRQ_ALL(ch);
839
val_irqen = LPAIF_IRQ_ALL(ch);
840
break;
841
default:
842
dev_err(soc_runtime->dev, "%s: invalid %d interface\n", __func__, dai_id);
843
return -EINVAL;
844
}
845
846
ret = regmap_update_bits(map, reg_irqen, val_mask, val_irqen);
847
if (ret) {
848
dev_err(soc_runtime->dev,
849
"error writing to irqen reg: %d\n", ret);
850
return ret;
851
}
852
break;
853
}
854
855
return 0;
856
}
857
858
static snd_pcm_uframes_t lpass_platform_pcmops_pointer(
859
struct snd_soc_component *component,
860
struct snd_pcm_substream *substream)
861
{
862
struct snd_soc_pcm_runtime *soc_runtime = snd_soc_substream_to_rtd(substream);
863
struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(soc_runtime, 0);
864
struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
865
struct snd_pcm_runtime *rt = substream->runtime;
866
struct lpass_pcm_data *pcm_data = rt->private_data;
867
const struct lpass_variant *v = drvdata->variant;
868
unsigned int base_addr, curr_addr;
869
int ret, ch, dir = substream->stream;
870
struct regmap *map;
871
unsigned int dai_id = cpu_dai->driver->id;
872
873
map = __lpass_get_regmap_handle(substream, component);
874
ch = pcm_data->dma_ch;
875
876
ret = regmap_read(map,
877
LPAIF_DMABASE_REG(v, ch, dir, dai_id), &base_addr);
878
if (ret) {
879
dev_err(soc_runtime->dev,
880
"error reading from rdmabase reg: %d\n", ret);
881
return ret;
882
}
883
884
ret = regmap_read(map,
885
LPAIF_DMACURR_REG(v, ch, dir, dai_id), &curr_addr);
886
if (ret) {
887
dev_err(soc_runtime->dev,
888
"error reading from rdmacurr reg: %d\n", ret);
889
return ret;
890
}
891
892
return bytes_to_frames(substream->runtime, curr_addr - base_addr);
893
}
894
895
static int lpass_platform_cdc_dma_mmap(struct snd_pcm_substream *substream,
896
struct vm_area_struct *vma)
897
{
898
struct snd_pcm_runtime *runtime = substream->runtime;
899
unsigned long size, offset;
900
901
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
902
size = vma->vm_end - vma->vm_start;
903
offset = vma->vm_pgoff << PAGE_SHIFT;
904
return io_remap_pfn_range(vma, vma->vm_start,
905
(runtime->dma_addr + offset) >> PAGE_SHIFT,
906
size, vma->vm_page_prot);
907
908
}
909
910
static int lpass_platform_pcmops_mmap(struct snd_soc_component *component,
911
struct snd_pcm_substream *substream,
912
struct vm_area_struct *vma)
913
{
914
struct snd_soc_pcm_runtime *soc_runtime = snd_soc_substream_to_rtd(substream);
915
struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(soc_runtime, 0);
916
unsigned int dai_id = cpu_dai->driver->id;
917
918
if (is_cdc_dma_port(dai_id))
919
return lpass_platform_cdc_dma_mmap(substream, vma);
920
921
return snd_pcm_lib_default_mmap(substream, vma);
922
}
923
924
static irqreturn_t lpass_dma_interrupt_handler(
925
struct snd_pcm_substream *substream,
926
struct lpass_data *drvdata,
927
int chan, u32 interrupts)
928
{
929
struct snd_soc_pcm_runtime *soc_runtime = snd_soc_substream_to_rtd(substream);
930
struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(soc_runtime, 0);
931
const struct lpass_variant *v = drvdata->variant;
932
irqreturn_t ret = IRQ_NONE;
933
int rv;
934
unsigned int reg, val, mask;
935
struct regmap *map;
936
unsigned int dai_id = cpu_dai->driver->id;
937
938
mask = LPAIF_IRQ_ALL(chan);
939
switch (dai_id) {
940
case LPASS_DP_RX:
941
map = drvdata->hdmiif_map;
942
reg = LPASS_HDMITX_APP_IRQCLEAR_REG(v);
943
val = (LPAIF_IRQ_HDMI_REQ_ON_PRELOAD(chan) |
944
LPAIF_IRQ_HDMI_METADONE |
945
LPAIF_IRQ_HDMI_SDEEP_AUD_DIS(chan));
946
break;
947
case MI2S_PRIMARY:
948
case MI2S_SECONDARY:
949
case MI2S_TERTIARY:
950
case MI2S_QUATERNARY:
951
case MI2S_QUINARY:
952
map = drvdata->lpaif_map;
953
reg = LPAIF_IRQCLEAR_REG(v, LPAIF_IRQ_PORT_HOST);
954
val = 0;
955
break;
956
case LPASS_CDC_DMA_RX0 ... LPASS_CDC_DMA_RX9:
957
case LPASS_CDC_DMA_TX0 ... LPASS_CDC_DMA_TX8:
958
map = drvdata->rxtx_lpaif_map;
959
reg = LPAIF_RXTX_IRQCLEAR_REG(v, LPAIF_IRQ_PORT_HOST);
960
val = 0;
961
break;
962
case LPASS_CDC_DMA_VA_TX0 ... LPASS_CDC_DMA_VA_TX8:
963
map = drvdata->va_lpaif_map;
964
reg = LPAIF_VA_IRQCLEAR_REG(v, LPAIF_IRQ_PORT_HOST);
965
val = 0;
966
break;
967
default:
968
dev_err(soc_runtime->dev, "%s: invalid %d interface\n", __func__, dai_id);
969
return -EINVAL;
970
}
971
if (interrupts & LPAIF_IRQ_PER(chan)) {
972
rv = regmap_write_bits(map, reg, mask, (LPAIF_IRQ_PER(chan) | val));
973
if (rv) {
974
dev_err(soc_runtime->dev,
975
"error writing to irqclear reg: %d\n", rv);
976
return IRQ_NONE;
977
}
978
snd_pcm_period_elapsed(substream);
979
ret = IRQ_HANDLED;
980
}
981
982
if (interrupts & LPAIF_IRQ_XRUN(chan)) {
983
rv = regmap_write_bits(map, reg, mask, (LPAIF_IRQ_XRUN(chan) | val));
984
if (rv) {
985
dev_err(soc_runtime->dev,
986
"error writing to irqclear reg: %d\n", rv);
987
return IRQ_NONE;
988
}
989
dev_warn_ratelimited(soc_runtime->dev, "xrun warning\n");
990
991
snd_pcm_stop_xrun(substream);
992
ret = IRQ_HANDLED;
993
}
994
995
if (interrupts & LPAIF_IRQ_ERR(chan)) {
996
rv = regmap_write_bits(map, reg, mask, (LPAIF_IRQ_ERR(chan) | val));
997
if (rv) {
998
dev_err(soc_runtime->dev,
999
"error writing to irqclear reg: %d\n", rv);
1000
return IRQ_NONE;
1001
}
1002
dev_err(soc_runtime->dev, "bus access error\n");
1003
snd_pcm_stop(substream, SNDRV_PCM_STATE_DISCONNECTED);
1004
ret = IRQ_HANDLED;
1005
}
1006
1007
if (interrupts & val) {
1008
rv = regmap_write(map, reg, val);
1009
if (rv) {
1010
dev_err(soc_runtime->dev,
1011
"error writing to irqclear reg: %d\n", rv);
1012
return IRQ_NONE;
1013
}
1014
ret = IRQ_HANDLED;
1015
}
1016
1017
return ret;
1018
}
1019
1020
static irqreturn_t lpass_platform_lpaif_irq(int irq, void *data)
1021
{
1022
struct lpass_data *drvdata = data;
1023
const struct lpass_variant *v = drvdata->variant;
1024
unsigned int irqs;
1025
int rv, chan;
1026
1027
rv = regmap_read(drvdata->lpaif_map,
1028
LPAIF_IRQSTAT_REG(v, LPAIF_IRQ_PORT_HOST), &irqs);
1029
if (rv) {
1030
pr_err("error reading from irqstat reg: %d\n", rv);
1031
return IRQ_NONE;
1032
}
1033
1034
/* Handle per channel interrupts */
1035
for (chan = 0; chan < LPASS_MAX_DMA_CHANNELS; chan++) {
1036
if (irqs & LPAIF_IRQ_ALL(chan) && drvdata->substream[chan]) {
1037
rv = lpass_dma_interrupt_handler(
1038
drvdata->substream[chan],
1039
drvdata, chan, irqs);
1040
if (rv != IRQ_HANDLED)
1041
return rv;
1042
}
1043
}
1044
1045
return IRQ_HANDLED;
1046
}
1047
1048
static irqreturn_t lpass_platform_hdmiif_irq(int irq, void *data)
1049
{
1050
struct lpass_data *drvdata = data;
1051
const struct lpass_variant *v = drvdata->variant;
1052
unsigned int irqs;
1053
int rv, chan;
1054
1055
rv = regmap_read(drvdata->hdmiif_map,
1056
LPASS_HDMITX_APP_IRQSTAT_REG(v), &irqs);
1057
if (rv) {
1058
pr_err("error reading from irqstat reg: %d\n", rv);
1059
return IRQ_NONE;
1060
}
1061
1062
/* Handle per channel interrupts */
1063
for (chan = 0; chan < LPASS_MAX_HDMI_DMA_CHANNELS; chan++) {
1064
if (irqs & (LPAIF_IRQ_ALL(chan) | LPAIF_IRQ_HDMI_REQ_ON_PRELOAD(chan) |
1065
LPAIF_IRQ_HDMI_METADONE |
1066
LPAIF_IRQ_HDMI_SDEEP_AUD_DIS(chan))
1067
&& drvdata->hdmi_substream[chan]) {
1068
rv = lpass_dma_interrupt_handler(
1069
drvdata->hdmi_substream[chan],
1070
drvdata, chan, irqs);
1071
if (rv != IRQ_HANDLED)
1072
return rv;
1073
}
1074
}
1075
return IRQ_HANDLED;
1076
}
1077
1078
static irqreturn_t lpass_platform_rxtxif_irq(int irq, void *data)
1079
{
1080
struct lpass_data *drvdata = data;
1081
const struct lpass_variant *v = drvdata->variant;
1082
unsigned int irqs;
1083
irqreturn_t rv;
1084
int chan;
1085
1086
rv = regmap_read(drvdata->rxtx_lpaif_map,
1087
LPAIF_RXTX_IRQSTAT_REG(v, LPAIF_IRQ_PORT_HOST), &irqs);
1088
1089
/* Handle per channel interrupts */
1090
for (chan = 0; chan < LPASS_MAX_CDC_DMA_CHANNELS; chan++) {
1091
if (irqs & LPAIF_IRQ_ALL(chan) && drvdata->rxtx_substream[chan]) {
1092
rv = lpass_dma_interrupt_handler(
1093
drvdata->rxtx_substream[chan],
1094
drvdata, chan, irqs);
1095
if (rv != IRQ_HANDLED)
1096
return rv;
1097
}
1098
}
1099
1100
return IRQ_HANDLED;
1101
}
1102
1103
static irqreturn_t lpass_platform_vaif_irq(int irq, void *data)
1104
{
1105
struct lpass_data *drvdata = data;
1106
const struct lpass_variant *v = drvdata->variant;
1107
unsigned int irqs;
1108
irqreturn_t rv;
1109
int chan;
1110
1111
rv = regmap_read(drvdata->va_lpaif_map,
1112
LPAIF_VA_IRQSTAT_REG(v, LPAIF_IRQ_PORT_HOST), &irqs);
1113
1114
/* Handle per channel interrupts */
1115
for (chan = 0; chan < LPASS_MAX_VA_CDC_DMA_CHANNELS; chan++) {
1116
if (irqs & LPAIF_IRQ_ALL(chan) && drvdata->va_substream[chan]) {
1117
rv = lpass_dma_interrupt_handler(
1118
drvdata->va_substream[chan],
1119
drvdata, chan, irqs);
1120
if (rv != IRQ_HANDLED)
1121
return rv;
1122
}
1123
}
1124
return IRQ_HANDLED;
1125
}
1126
1127
static int lpass_platform_prealloc_cdc_dma_buffer(struct snd_soc_component *component,
1128
struct snd_pcm *pcm, int dai_id)
1129
{
1130
struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
1131
struct snd_pcm_substream *substream;
1132
struct snd_dma_buffer *buf;
1133
1134
if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream)
1135
substream = pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
1136
else
1137
substream = pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream;
1138
1139
buf = &substream->dma_buffer;
1140
buf->dev.dev = pcm->card->dev;
1141
buf->private_data = NULL;
1142
1143
/* Assign Codec DMA buffer pointers */
1144
buf->dev.type = SNDRV_DMA_TYPE_CONTINUOUS;
1145
1146
switch (dai_id) {
1147
case LPASS_CDC_DMA_RX0 ... LPASS_CDC_DMA_RX9:
1148
buf->bytes = lpass_platform_rxtx_hardware.buffer_bytes_max;
1149
buf->addr = drvdata->rxtx_cdc_dma_lpm_buf;
1150
break;
1151
case LPASS_CDC_DMA_TX0 ... LPASS_CDC_DMA_TX8:
1152
buf->bytes = lpass_platform_rxtx_hardware.buffer_bytes_max;
1153
buf->addr = drvdata->rxtx_cdc_dma_lpm_buf + LPASS_RXTX_CDC_DMA_LPM_BUFF_SIZE;
1154
break;
1155
case LPASS_CDC_DMA_VA_TX0 ... LPASS_CDC_DMA_VA_TX8:
1156
buf->bytes = lpass_platform_va_hardware.buffer_bytes_max;
1157
buf->addr = drvdata->va_cdc_dma_lpm_buf;
1158
break;
1159
default:
1160
break;
1161
}
1162
1163
buf->area = (unsigned char * __force)memremap(buf->addr, buf->bytes, MEMREMAP_WC);
1164
1165
return 0;
1166
}
1167
1168
static int lpass_platform_pcm_new(struct snd_soc_component *component,
1169
struct snd_soc_pcm_runtime *soc_runtime)
1170
{
1171
struct snd_pcm *pcm = soc_runtime->pcm;
1172
struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(soc_runtime, 0);
1173
unsigned int dai_id = cpu_dai->driver->id;
1174
1175
size_t size = lpass_platform_pcm_hardware.buffer_bytes_max;
1176
1177
/*
1178
* Lpass codec dma can access only lpass lpm hardware memory.
1179
* ioremap is for HLOS to access hardware memory.
1180
*/
1181
if (is_cdc_dma_port(dai_id))
1182
return lpass_platform_prealloc_cdc_dma_buffer(component, pcm, dai_id);
1183
1184
return snd_pcm_set_fixed_buffer_all(pcm, SNDRV_DMA_TYPE_NONCOHERENT,
1185
component->dev, size);
1186
}
1187
1188
static int lpass_platform_pcmops_suspend(struct snd_soc_component *component)
1189
{
1190
struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
1191
struct regmap *map;
1192
1193
if (drvdata->hdmi_port_enable) {
1194
map = drvdata->hdmiif_map;
1195
regcache_cache_only(map, true);
1196
regcache_mark_dirty(map);
1197
}
1198
1199
map = drvdata->lpaif_map;
1200
regcache_cache_only(map, true);
1201
regcache_mark_dirty(map);
1202
1203
return 0;
1204
}
1205
1206
static int lpass_platform_pcmops_resume(struct snd_soc_component *component)
1207
{
1208
struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
1209
struct regmap *map;
1210
int ret;
1211
1212
if (drvdata->hdmi_port_enable) {
1213
map = drvdata->hdmiif_map;
1214
regcache_cache_only(map, false);
1215
ret = regcache_sync(map);
1216
if (ret)
1217
return ret;
1218
}
1219
1220
map = drvdata->lpaif_map;
1221
regcache_cache_only(map, false);
1222
1223
return regcache_sync(map);
1224
}
1225
1226
static int lpass_platform_copy(struct snd_soc_component *component,
1227
struct snd_pcm_substream *substream, int channel,
1228
unsigned long pos, struct iov_iter *buf,
1229
unsigned long bytes)
1230
{
1231
struct snd_pcm_runtime *rt = substream->runtime;
1232
struct snd_soc_pcm_runtime *soc_runtime = snd_soc_substream_to_rtd(substream);
1233
struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(soc_runtime, 0);
1234
unsigned int dai_id = cpu_dai->driver->id;
1235
int ret = 0;
1236
1237
void __iomem *dma_buf = (void __iomem *) (rt->dma_area + pos +
1238
channel * (rt->dma_bytes / rt->channels));
1239
1240
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
1241
if (is_cdc_dma_port(dai_id)) {
1242
if (copy_from_iter_toio(dma_buf, bytes, buf) != bytes)
1243
ret = -EFAULT;
1244
} else {
1245
if (copy_from_iter((void __force *)dma_buf, bytes, buf) != bytes)
1246
ret = -EFAULT;
1247
}
1248
} else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
1249
if (is_cdc_dma_port(dai_id)) {
1250
if (copy_to_iter_fromio(dma_buf, bytes, buf) != bytes)
1251
ret = -EFAULT;
1252
} else {
1253
if (copy_to_iter((void __force *)dma_buf, bytes, buf) != bytes)
1254
ret = -EFAULT;
1255
}
1256
}
1257
1258
return ret;
1259
}
1260
1261
static const struct snd_soc_component_driver lpass_component_driver = {
1262
.name = DRV_NAME,
1263
.open = lpass_platform_pcmops_open,
1264
.close = lpass_platform_pcmops_close,
1265
.hw_params = lpass_platform_pcmops_hw_params,
1266
.hw_free = lpass_platform_pcmops_hw_free,
1267
.prepare = lpass_platform_pcmops_prepare,
1268
.trigger = lpass_platform_pcmops_trigger,
1269
.pointer = lpass_platform_pcmops_pointer,
1270
.mmap = lpass_platform_pcmops_mmap,
1271
.pcm_construct = lpass_platform_pcm_new,
1272
.suspend = lpass_platform_pcmops_suspend,
1273
.resume = lpass_platform_pcmops_resume,
1274
.copy = lpass_platform_copy,
1275
1276
};
1277
1278
int asoc_qcom_lpass_platform_register(struct platform_device *pdev)
1279
{
1280
struct lpass_data *drvdata = platform_get_drvdata(pdev);
1281
const struct lpass_variant *v = drvdata->variant;
1282
int ret;
1283
1284
drvdata->lpaif_irq = platform_get_irq_byname(pdev, "lpass-irq-lpaif");
1285
if (drvdata->lpaif_irq < 0)
1286
return -ENODEV;
1287
1288
/* ensure audio hardware is disabled */
1289
ret = regmap_write(drvdata->lpaif_map,
1290
LPAIF_IRQEN_REG(v, LPAIF_IRQ_PORT_HOST), 0);
1291
if (ret) {
1292
dev_err(&pdev->dev, "error writing to irqen reg: %d\n", ret);
1293
return ret;
1294
}
1295
1296
ret = devm_request_irq(&pdev->dev, drvdata->lpaif_irq,
1297
lpass_platform_lpaif_irq, IRQF_TRIGGER_RISING,
1298
"lpass-irq-lpaif", drvdata);
1299
if (ret) {
1300
dev_err(&pdev->dev, "irq request failed: %d\n", ret);
1301
return ret;
1302
}
1303
1304
ret = lpass_platform_alloc_dmactl_fields(&pdev->dev,
1305
drvdata->lpaif_map);
1306
if (ret) {
1307
dev_err(&pdev->dev,
1308
"error initializing dmactl fields: %d\n", ret);
1309
return ret;
1310
}
1311
1312
if (drvdata->codec_dma_enable) {
1313
ret = regmap_write(drvdata->rxtx_lpaif_map,
1314
LPAIF_RXTX_IRQEN_REG(v, LPAIF_IRQ_PORT_HOST), 0x0);
1315
if (ret) {
1316
dev_err(&pdev->dev, "error writing to rxtx irqen reg: %d\n", ret);
1317
return ret;
1318
}
1319
ret = regmap_write(drvdata->va_lpaif_map,
1320
LPAIF_VA_IRQEN_REG(v, LPAIF_IRQ_PORT_HOST), 0x0);
1321
if (ret) {
1322
dev_err(&pdev->dev, "error writing to rxtx irqen reg: %d\n", ret);
1323
return ret;
1324
}
1325
drvdata->rxtxif_irq = platform_get_irq_byname(pdev, "lpass-irq-rxtxif");
1326
if (drvdata->rxtxif_irq < 0)
1327
return -ENODEV;
1328
1329
ret = devm_request_irq(&pdev->dev, drvdata->rxtxif_irq,
1330
lpass_platform_rxtxif_irq, 0, "lpass-irq-rxtxif", drvdata);
1331
if (ret) {
1332
dev_err(&pdev->dev, "rxtx irq request failed: %d\n", ret);
1333
return ret;
1334
}
1335
1336
ret = lpass_platform_alloc_rxtx_dmactl_fields(&pdev->dev,
1337
drvdata->rxtx_lpaif_map);
1338
if (ret) {
1339
dev_err(&pdev->dev,
1340
"error initializing rxtx dmactl fields: %d\n", ret);
1341
return ret;
1342
}
1343
1344
drvdata->vaif_irq = platform_get_irq_byname(pdev, "lpass-irq-vaif");
1345
if (drvdata->vaif_irq < 0)
1346
return -ENODEV;
1347
1348
ret = devm_request_irq(&pdev->dev, drvdata->vaif_irq,
1349
lpass_platform_vaif_irq, 0, "lpass-irq-vaif", drvdata);
1350
if (ret) {
1351
dev_err(&pdev->dev, "va irq request failed: %d\n", ret);
1352
return ret;
1353
}
1354
1355
ret = lpass_platform_alloc_va_dmactl_fields(&pdev->dev,
1356
drvdata->va_lpaif_map);
1357
if (ret) {
1358
dev_err(&pdev->dev,
1359
"error initializing va dmactl fields: %d\n", ret);
1360
return ret;
1361
}
1362
}
1363
1364
if (drvdata->hdmi_port_enable) {
1365
drvdata->hdmiif_irq = platform_get_irq_byname(pdev, "lpass-irq-hdmi");
1366
if (drvdata->hdmiif_irq < 0)
1367
return -ENODEV;
1368
1369
ret = devm_request_irq(&pdev->dev, drvdata->hdmiif_irq,
1370
lpass_platform_hdmiif_irq, 0, "lpass-irq-hdmi", drvdata);
1371
if (ret) {
1372
dev_err(&pdev->dev, "irq hdmi request failed: %d\n", ret);
1373
return ret;
1374
}
1375
ret = regmap_write(drvdata->hdmiif_map,
1376
LPASS_HDMITX_APP_IRQEN_REG(v), 0);
1377
if (ret) {
1378
dev_err(&pdev->dev, "error writing to hdmi irqen reg: %d\n", ret);
1379
return ret;
1380
}
1381
1382
ret = lpass_platform_alloc_hdmidmactl_fields(&pdev->dev,
1383
drvdata->hdmiif_map);
1384
if (ret) {
1385
dev_err(&pdev->dev,
1386
"error initializing hdmidmactl fields: %d\n", ret);
1387
return ret;
1388
}
1389
}
1390
return devm_snd_soc_register_component(&pdev->dev,
1391
&lpass_component_driver, NULL, 0);
1392
}
1393
EXPORT_SYMBOL_GPL(asoc_qcom_lpass_platform_register);
1394
1395
MODULE_DESCRIPTION("QTi LPASS Platform Driver");
1396
MODULE_LICENSE("GPL");
1397
1398