Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/jdk17u
Path: blob/master/src/java.desktop/macosx/native/libjsound/PLATFORM_API_MacOSX_PCM.cpp
66644 views
1
/*
2
* Copyright (c) 2002, 2020, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation. Oracle designates this
8
* particular file as subject to the "Classpath" exception as provided
9
* by Oracle in the LICENSE file that accompanied this code.
10
*
11
* This code is distributed in the hope that it will be useful, but WITHOUT
12
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14
* version 2 for more details (a copy is included in the LICENSE file that
15
* accompanied this code).
16
*
17
* You should have received a copy of the GNU General Public License version
18
* 2 along with this work; if not, write to the Free Software Foundation,
19
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20
*
21
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
22
* or visit www.oracle.com if you need additional information or have any
23
* questions.
24
*/
25
26
//#define USE_ERROR
27
//#define USE_TRACE
28
//#define USE_VERBOSE_TRACE
29
30
#include <AudioUnit/AudioUnit.h>
31
#include <AudioToolbox/AudioConverter.h>
32
#include <pthread.h>
33
#include <math.h>
34
/*
35
#if !defined(__COREAUDIO_USE_FLAT_INCLUDES__)
36
#include <CoreAudio/CoreAudioTypes.h>
37
#else
38
#include <CoreAudioTypes.h>
39
#endif
40
*/
41
42
#include "PLATFORM_API_MacOSX_Utils.h"
43
44
extern "C" {
45
#include "Utilities.h"
46
#include "DirectAudio.h"
47
#if TARGET_OS_IPHONE
48
void DAUDIO_RequestRecordPermission();
49
#endif
50
}
51
52
#if USE_DAUDIO == TRUE
53
54
55
#ifdef USE_TRACE
56
static void PrintStreamDesc(const AudioStreamBasicDescription *inDesc) {
57
TRACE4("ID='%c%c%c%c'", (char)(inDesc->mFormatID >> 24), (char)(inDesc->mFormatID >> 16), (char)(inDesc->mFormatID >> 8), (char)(inDesc->mFormatID));
58
TRACE2(", %f Hz, flags=0x%lX", (float)inDesc->mSampleRate, (long unsigned)inDesc->mFormatFlags);
59
TRACE2(", %ld channels, %ld bits", (long)inDesc->mChannelsPerFrame, (long)inDesc->mBitsPerChannel);
60
TRACE1(", %ld bytes per frame\n", (long)inDesc->mBytesPerFrame);
61
}
62
#else
63
static inline void PrintStreamDesc(const AudioStreamBasicDescription *inDesc) { }
64
#endif
65
66
67
#define MAX(x, y) ((x) >= (y) ? (x) : (y))
68
#define MIN(x, y) ((x) <= (y) ? (x) : (y))
69
70
71
// =======================================
72
// MixerProvider functions implementation
73
74
static DeviceList deviceCache;
75
76
INT32 DAUDIO_GetDirectAudioDeviceCount() {
77
#ifdef TARGET_OS_IPHONE
78
DAUDIO_RequestRecordPermission();
79
#endif
80
deviceCache.Refresh();
81
int count = deviceCache.GetCount();
82
if (count > 0) {
83
// add "default" device
84
count++;
85
TRACE1("DAUDIO_GetDirectAudioDeviceCount: returns %d devices\n", count);
86
} else {
87
TRACE0("DAUDIO_GetDirectAudioDeviceCount: no devices found\n");
88
}
89
return count;
90
}
91
92
INT32 DAUDIO_GetDirectAudioDeviceDescription(INT32 mixerIndex, DirectAudioDeviceDescription *desc) {
93
bool result = true;
94
desc->deviceID = 0;
95
if (mixerIndex == 0) {
96
// default device
97
strncpy(desc->name, "Default Audio Device", DAUDIO_STRING_LENGTH);
98
strncpy(desc->description, "Default Audio Device", DAUDIO_STRING_LENGTH);
99
desc->maxSimulLines = -1;
100
} else {
101
AudioDeviceID deviceID;
102
result = deviceCache.GetDeviceInfo(mixerIndex-1, &deviceID, DAUDIO_STRING_LENGTH,
103
desc->name, desc->vendor, desc->description, desc->version);
104
if (result) {
105
desc->deviceID = (INT32)deviceID;
106
desc->maxSimulLines = -1;
107
}
108
}
109
return result ? TRUE : FALSE;
110
}
111
112
113
void DAUDIO_GetFormats(INT32 mixerIndex, INT32 deviceID, int isSource, void* creator) {
114
TRACE3(">>DAUDIO_GetFormats mixerIndex=%d deviceID=0x%x isSource=%d\n", (int)mixerIndex, (int)deviceID, isSource);
115
116
AudioDeviceID audioDeviceID = deviceID == 0 ? GetDefaultDevice(isSource) : (AudioDeviceID)deviceID;
117
118
if (audioDeviceID == 0) {
119
return;
120
}
121
122
int totalChannels = GetChannelCount(audioDeviceID, isSource);
123
124
if (totalChannels == 0) {
125
TRACE0("<<DAUDIO_GetFormats, no streams!\n");
126
return;
127
}
128
129
if (isSource && totalChannels < 2) {
130
// report 2 channels even if only mono is supported
131
totalChannels = 2;
132
}
133
134
int channels[] = {1, 2, totalChannels};
135
int channelsCount = MIN(totalChannels, 3);
136
137
float hardwareSampleRate = GetSampleRate(audioDeviceID, isSource);
138
TRACE2(" DAUDIO_GetFormats: got %d channels, sampleRate == %f\n", totalChannels, hardwareSampleRate);
139
140
// any sample rates are supported
141
float sampleRate = -1;
142
143
static int sampleBits[] = {8, 16, 24};
144
static int sampleBitsCount = sizeof(sampleBits)/sizeof(sampleBits[0]);
145
146
// the last audio format is the default one (used by DataLine.open() if format is not specified)
147
// consider as default 16bit PCM stereo (mono is stereo is not supported) with the current sample rate
148
int defBits = 16;
149
int defChannels = MIN(2, channelsCount);
150
float defSampleRate = hardwareSampleRate;
151
// don't add default format is sample rate is not specified
152
bool addDefault = defSampleRate > 0;
153
154
// TODO: CoreAudio can handle signed/unsigned, little-endian/big-endian
155
// TODO: register the formats (to prevent DirectAudio software conversion) - need to fix DirectAudioDevice.createDataLineInfo
156
// to avoid software conversions if both signed/unsigned or big-/little-endian are supported
157
for (int channelIndex = 0; channelIndex < channelsCount; channelIndex++) {
158
for (int bitIndex = 0; bitIndex < sampleBitsCount; bitIndex++) {
159
int bits = sampleBits[bitIndex];
160
if (addDefault && bits == defBits && channels[channelIndex] != defChannels && sampleRate == defSampleRate) {
161
// the format is the default one, don't add it now
162
continue;
163
}
164
DAUDIO_AddAudioFormat(creator,
165
bits, // sample size in bits
166
-1, // frame size (auto)
167
channels[channelIndex], // channels
168
sampleRate, // sample rate
169
DAUDIO_PCM, // only accept PCM
170
bits == 8 ? FALSE : TRUE, // signed
171
bits == 8 ? FALSE // little-endian for 8bit
172
: UTIL_IsBigEndianPlatform());
173
}
174
}
175
// add default format
176
if (addDefault) {
177
DAUDIO_AddAudioFormat(creator,
178
defBits, // 16 bits
179
-1, // automatically calculate frame size
180
defChannels, // channels
181
defSampleRate, // sample rate
182
DAUDIO_PCM, // PCM
183
TRUE, // signed
184
UTIL_IsBigEndianPlatform()); // native endianess
185
}
186
187
TRACE0("<<DAUDIO_GetFormats\n");
188
}
189
190
191
// =======================================
192
// Source/Target DataLine functions implementation
193
194
// ====
195
/* 1writer-1reader ring buffer class with flush() support */
196
class RingBuffer {
197
public:
198
RingBuffer() : pBuffer(NULL), nBufferSize(0) {
199
pthread_mutex_init(&lockMutex, NULL);
200
}
201
~RingBuffer() {
202
Deallocate();
203
pthread_mutex_destroy(&lockMutex);
204
}
205
206
// extraBytes: number of additionally allocated bytes to prevent data
207
// overlapping when almost whole buffer is filled
208
// (required only if Write() can override the buffer)
209
bool Allocate(int requestedBufferSize, int extraBytes) {
210
int fullBufferSize = requestedBufferSize + extraBytes;
211
long powerOfTwo = 1;
212
while (powerOfTwo < fullBufferSize) {
213
powerOfTwo <<= 1;
214
}
215
if (powerOfTwo > INT_MAX || fullBufferSize < 0) {
216
ERROR0("RingBuffer::Allocate: REQUESTED MEMORY SIZE IS TOO BIG\n");
217
return false;
218
}
219
pBuffer = (Byte*)malloc(powerOfTwo);
220
if (pBuffer == NULL) {
221
ERROR0("RingBuffer::Allocate: OUT OF MEMORY\n");
222
return false;
223
}
224
225
nBufferSize = requestedBufferSize;
226
nAllocatedBytes = powerOfTwo;
227
nPosMask = powerOfTwo - 1;
228
nWritePos = 0;
229
nReadPos = 0;
230
nFlushPos = -1;
231
232
TRACE2("RingBuffer::Allocate: OK, bufferSize=%d, allocated:%d\n", nBufferSize, nAllocatedBytes);
233
return true;
234
}
235
236
void Deallocate() {
237
if (pBuffer) {
238
free(pBuffer);
239
pBuffer = NULL;
240
nBufferSize = 0;
241
}
242
}
243
244
inline int GetBufferSize() {
245
return nBufferSize;
246
}
247
248
inline int GetAllocatedSize() {
249
return nAllocatedBytes;
250
}
251
252
// gets number of bytes available for reading
253
int GetValidByteCount() {
254
lock();
255
INT64 result = nWritePos - (nFlushPos >= 0 ? nFlushPos : nReadPos);
256
unlock();
257
return result > (INT64)nBufferSize ? nBufferSize : (int)result;
258
}
259
260
int Write(void *srcBuffer, int len, bool preventOverflow) {
261
lock();
262
TRACE2("RingBuffer::Write (%d bytes, preventOverflow=%d)\n", len, preventOverflow ? 1 : 0);
263
TRACE2(" writePos = %lld (%d)", (long long)nWritePos, Pos2Offset(nWritePos));
264
TRACE2(" readPos=%lld (%d)", (long long)nReadPos, Pos2Offset(nReadPos));
265
TRACE2(" flushPos=%lld (%d)\n", (long long)nFlushPos, Pos2Offset(nFlushPos));
266
267
INT64 writePos = nWritePos;
268
if (preventOverflow) {
269
INT64 avail_read = writePos - (nFlushPos >= 0 ? nFlushPos : nReadPos);
270
if (avail_read >= (INT64)nBufferSize) {
271
// no space
272
TRACE0(" preventOverlow: OVERFLOW => len = 0;\n");
273
len = 0;
274
} else {
275
int avail_write = nBufferSize - (int)avail_read;
276
if (len > avail_write) {
277
TRACE2(" preventOverlow: desrease len: %d => %d\n", len, avail_write);
278
len = avail_write;
279
}
280
}
281
}
282
unlock();
283
284
if (len > 0) {
285
286
write((Byte *)srcBuffer, Pos2Offset(writePos), len);
287
288
lock();
289
TRACE4("--RingBuffer::Write writePos: %lld (%d) => %lld, (%d)\n",
290
(long long)nWritePos, Pos2Offset(nWritePos), (long long)nWritePos + len, Pos2Offset(nWritePos + len));
291
nWritePos += len;
292
unlock();
293
}
294
return len;
295
}
296
297
int Read(void *dstBuffer, int len) {
298
lock();
299
TRACE1("RingBuffer::Read (%d bytes)\n", len);
300
TRACE2(" writePos = %lld (%d)", (long long)nWritePos, Pos2Offset(nWritePos));
301
TRACE2(" readPos=%lld (%d)", (long long)nReadPos, Pos2Offset(nReadPos));
302
TRACE2(" flushPos=%lld (%d)\n", (long long)nFlushPos, Pos2Offset(nFlushPos));
303
304
applyFlush();
305
INT64 avail_read = nWritePos - nReadPos;
306
// check for overflow
307
if (avail_read > (INT64)nBufferSize) {
308
nReadPos = nWritePos - nBufferSize;
309
avail_read = nBufferSize;
310
TRACE0(" OVERFLOW\n");
311
}
312
INT64 readPos = nReadPos;
313
unlock();
314
315
if (len > (int)avail_read) {
316
TRACE2(" RingBuffer::Read - don't have enough data, len: %d => %d\n", len, (int)avail_read);
317
len = (int)avail_read;
318
}
319
320
if (len > 0) {
321
322
read((Byte *)dstBuffer, Pos2Offset(readPos), len);
323
324
lock();
325
if (applyFlush()) {
326
// just got flush(), results became obsolete
327
TRACE0("--RingBuffer::Read, got Flush, return 0\n");
328
len = 0;
329
} else {
330
TRACE4("--RingBuffer::Read readPos: %lld (%d) => %lld (%d)\n",
331
(long long)nReadPos, Pos2Offset(nReadPos), (long long)nReadPos + len, Pos2Offset(nReadPos + len));
332
nReadPos += len;
333
}
334
unlock();
335
} else {
336
// underrun!
337
}
338
return len;
339
}
340
341
// returns number of the flushed bytes
342
int Flush() {
343
lock();
344
INT64 flushedBytes = nWritePos - (nFlushPos >= 0 ? nFlushPos : nReadPos);
345
nFlushPos = nWritePos;
346
unlock();
347
return flushedBytes > (INT64)nBufferSize ? nBufferSize : (int)flushedBytes;
348
}
349
350
private:
351
Byte *pBuffer;
352
int nBufferSize;
353
int nAllocatedBytes;
354
INT64 nPosMask;
355
356
pthread_mutex_t lockMutex;
357
358
volatile INT64 nWritePos;
359
volatile INT64 nReadPos;
360
// Flush() sets nFlushPos value to nWritePos;
361
// next Read() sets nReadPos to nFlushPos and resests nFlushPos to -1
362
volatile INT64 nFlushPos;
363
364
inline void lock() {
365
pthread_mutex_lock(&lockMutex);
366
}
367
inline void unlock() {
368
pthread_mutex_unlock(&lockMutex);
369
}
370
371
inline bool applyFlush() {
372
if (nFlushPos >= 0) {
373
nReadPos = nFlushPos;
374
nFlushPos = -1;
375
return true;
376
}
377
return false;
378
}
379
380
inline int Pos2Offset(INT64 pos) {
381
return (int)(pos & nPosMask);
382
}
383
384
void write(Byte *srcBuffer, int dstOffset, int len) {
385
int dstEndOffset = dstOffset + len;
386
387
int lenAfterWrap = dstEndOffset - nAllocatedBytes;
388
if (lenAfterWrap > 0) {
389
// dest.buffer does wrap
390
len = nAllocatedBytes - dstOffset;
391
memcpy(pBuffer+dstOffset, srcBuffer, len);
392
memcpy(pBuffer, srcBuffer+len, lenAfterWrap);
393
} else {
394
// dest.buffer does not wrap
395
memcpy(pBuffer+dstOffset, srcBuffer, len);
396
}
397
}
398
399
void read(Byte *dstBuffer, int srcOffset, int len) {
400
int srcEndOffset = srcOffset + len;
401
402
int lenAfterWrap = srcEndOffset - nAllocatedBytes;
403
if (lenAfterWrap > 0) {
404
// need to unwrap data
405
len = nAllocatedBytes - srcOffset;
406
memcpy(dstBuffer, pBuffer+srcOffset, len);
407
memcpy(dstBuffer+len, pBuffer, lenAfterWrap);
408
} else {
409
// source buffer is not wrapped
410
memcpy(dstBuffer, pBuffer+srcOffset, len);
411
}
412
}
413
};
414
415
416
class Resampler {
417
private:
418
enum {
419
kResamplerEndOfInputData = 1 // error to interrupt conversion (end of input data)
420
};
421
public:
422
Resampler() : converter(NULL), outBuffer(NULL) { }
423
~Resampler() {
424
if (converter != NULL) {
425
AudioConverterDispose(converter);
426
}
427
if (outBuffer != NULL) {
428
free(outBuffer);
429
}
430
}
431
432
// inFormat & outFormat must be interleaved!
433
bool Init(const AudioStreamBasicDescription *inFormat, const AudioStreamBasicDescription *outFormat,
434
int inputBufferSizeInBytes)
435
{
436
TRACE0(">>Resampler::Init\n");
437
TRACE0(" inFormat: ");
438
PrintStreamDesc(inFormat);
439
TRACE0(" outFormat: ");
440
PrintStreamDesc(outFormat);
441
TRACE1(" inputBufferSize: %d bytes\n", inputBufferSizeInBytes);
442
OSStatus err;
443
444
if ((outFormat->mFormatFlags & kAudioFormatFlagIsNonInterleaved) != 0 && outFormat->mChannelsPerFrame != 1) {
445
ERROR0("Resampler::Init ERROR: outFormat is non-interleaved\n");
446
return false;
447
}
448
if ((inFormat->mFormatFlags & kAudioFormatFlagIsNonInterleaved) != 0 && inFormat->mChannelsPerFrame != 1) {
449
ERROR0("Resampler::Init ERROR: inFormat is non-interleaved\n");
450
return false;
451
}
452
453
memcpy(&asbdIn, inFormat, sizeof(AudioStreamBasicDescription));
454
memcpy(&asbdOut, outFormat, sizeof(AudioStreamBasicDescription));
455
456
err = AudioConverterNew(inFormat, outFormat, &converter);
457
458
if (err || converter == NULL) {
459
OS_ERROR1(err, "Resampler::Init (AudioConverterNew), converter=%p", converter);
460
return false;
461
}
462
463
// allocate buffer for output data
464
int maximumInFrames = inputBufferSizeInBytes / inFormat->mBytesPerFrame;
465
// take into account trailingFrames
466
AudioConverterPrimeInfo primeInfo = {0, 0};
467
UInt32 sizePrime = sizeof(primeInfo);
468
err = AudioConverterGetProperty(converter, kAudioConverterPrimeInfo, &sizePrime, &primeInfo);
469
if (err) {
470
OS_ERROR0(err, "Resampler::Init (get kAudioConverterPrimeInfo)");
471
// ignore the error
472
} else {
473
// the default primeMethod is kConverterPrimeMethod_Normal, so we need only trailingFrames
474
maximumInFrames += primeInfo.trailingFrames;
475
}
476
float outBufferSizeInFrames = (outFormat->mSampleRate / inFormat->mSampleRate) * ((float)maximumInFrames);
477
// to avoid complex calculation just set outBufferSize as double of the calculated value
478
outBufferSize = (int)outBufferSizeInFrames * outFormat->mBytesPerFrame * 2;
479
// safety check - consider 256 frame as the minimum input buffer
480
int minOutSize = 256 * outFormat->mBytesPerFrame;
481
if (outBufferSize < minOutSize) {
482
outBufferSize = minOutSize;
483
}
484
485
outBuffer = malloc(outBufferSize);
486
487
if (outBuffer == NULL) {
488
ERROR1("Resampler::Init ERROR: malloc failed (%d bytes)\n", outBufferSize);
489
AudioConverterDispose(converter);
490
converter = NULL;
491
return false;
492
}
493
494
TRACE1(" allocated: %d bytes for output buffer\n", outBufferSize);
495
496
TRACE0("<<Resampler::Init: OK\n");
497
return true;
498
}
499
500
// returns size of the internal output buffer
501
int GetOutBufferSize() {
502
return outBufferSize;
503
}
504
505
// process next part of data (writes resampled data to the ringBuffer without overflow check)
506
int Process(void *srcBuffer, int len, RingBuffer *ringBuffer) {
507
int bytesWritten = 0;
508
TRACE2(">>Resampler::Process: %d bytes, converter = %p\n", len, converter);
509
if (converter == NULL) { // sanity check
510
bytesWritten = ringBuffer->Write(srcBuffer, len, false);
511
} else {
512
InputProcData data;
513
data.pThis = this;
514
data.data = (Byte *)srcBuffer;
515
data.dataSize = len;
516
517
OSStatus err;
518
do {
519
AudioBufferList abl; // by default it contains 1 AudioBuffer
520
abl.mNumberBuffers = 1;
521
abl.mBuffers[0].mNumberChannels = asbdOut.mChannelsPerFrame;
522
abl.mBuffers[0].mDataByteSize = outBufferSize;
523
abl.mBuffers[0].mData = outBuffer;
524
525
UInt32 packets = (UInt32)outBufferSize / asbdOut.mBytesPerPacket;
526
527
TRACE2(">>AudioConverterFillComplexBuffer: request %d packets, provide %d bytes buffer\n",
528
(int)packets, (int)abl.mBuffers[0].mDataByteSize);
529
530
err = AudioConverterFillComplexBuffer(converter, ConverterInputProc, &data, &packets, &abl, NULL);
531
532
TRACE2("<<AudioConverterFillComplexBuffer: got %d packets (%d bytes)\n",
533
(int)packets, (int)abl.mBuffers[0].mDataByteSize);
534
if (packets > 0) {
535
int bytesToWrite = (int)(packets * asbdOut.mBytesPerPacket);
536
bytesWritten += ringBuffer->Write(abl.mBuffers[0].mData, bytesToWrite, false);
537
}
538
539
// if outputBuffer is small to store all available frames,
540
// we get noErr here. In the case just continue the conversion
541
} while (err == noErr);
542
543
if (err != kResamplerEndOfInputData) {
544
// unexpected error
545
OS_ERROR0(err, "Resampler::Process (AudioConverterFillComplexBuffer)");
546
}
547
}
548
TRACE2("<<Resampler::Process: written %d bytes (converted from %d bytes)\n", bytesWritten, len);
549
550
return bytesWritten;
551
}
552
553
// resets internal bufferes
554
void Discontinue() {
555
TRACE0(">>Resampler::Discontinue\n");
556
if (converter != NULL) {
557
AudioConverterReset(converter);
558
}
559
TRACE0("<<Resampler::Discontinue\n");
560
}
561
562
private:
563
AudioConverterRef converter;
564
565
// buffer for output data
566
// note that there is no problem if the buffer is not big enough to store
567
// all converted data - it's only performance issue
568
void *outBuffer;
569
int outBufferSize;
570
571
AudioStreamBasicDescription asbdIn;
572
AudioStreamBasicDescription asbdOut;
573
574
struct InputProcData {
575
Resampler *pThis;
576
Byte *data; // data == NULL means we handle Discontinue(false)
577
int dataSize; // == 0 if all data was already provided to the converted of we handle Discontinue(false)
578
};
579
580
static OSStatus ConverterInputProc(AudioConverterRef inAudioConverter, UInt32 *ioNumberDataPackets,
581
AudioBufferList *ioData, AudioStreamPacketDescription **outDataPacketDescription, void *inUserData)
582
{
583
InputProcData *data = (InputProcData *)inUserData;
584
585
TRACE3(" >>ConverterInputProc: requested %d packets, data contains %d bytes (%d packets)\n",
586
(int)*ioNumberDataPackets, (int)data->dataSize, (int)(data->dataSize / data->pThis->asbdIn.mBytesPerPacket));
587
if (data->dataSize == 0) {
588
// already called & provided all input data
589
// interrupt conversion by returning error
590
*ioNumberDataPackets = 0;
591
TRACE0(" <<ConverterInputProc: returns kResamplerEndOfInputData\n");
592
return kResamplerEndOfInputData;
593
}
594
595
ioData->mNumberBuffers = 1;
596
ioData->mBuffers[0].mNumberChannels = data->pThis->asbdIn.mChannelsPerFrame;
597
ioData->mBuffers[0].mDataByteSize = data->dataSize;
598
ioData->mBuffers[0].mData = data->data;
599
600
*ioNumberDataPackets = data->dataSize / data->pThis->asbdIn.mBytesPerPacket;
601
602
// all data has been provided to the converter
603
data->dataSize = 0;
604
605
TRACE1(" <<ConverterInputProc: returns %d packets\n", (int)(*ioNumberDataPackets));
606
return noErr;
607
}
608
609
};
610
611
612
struct OSX_DirectAudioDevice {
613
AudioUnit audioUnit;
614
RingBuffer ringBuffer;
615
AudioStreamBasicDescription asbd;
616
617
// only for target lines
618
UInt32 inputBufferSizeInBytes;
619
Resampler *resampler;
620
// to detect discontinuity (to reset resampler)
621
SInt64 lastWrittenSampleTime;
622
623
624
OSX_DirectAudioDevice() : audioUnit(NULL), asbd(), resampler(NULL), lastWrittenSampleTime(0) {
625
}
626
627
~OSX_DirectAudioDevice() {
628
if (audioUnit) {
629
AudioComponentInstanceDispose(audioUnit);
630
}
631
if (resampler) {
632
delete resampler;
633
}
634
}
635
};
636
637
static AudioUnit CreateOutputUnit(AudioDeviceID deviceID, int isSource)
638
{
639
OSStatus err;
640
AudioUnit unit;
641
642
AudioComponentDescription desc;
643
desc.componentType = kAudioUnitType_Output;
644
#if !TARGET_OS_IPHONE
645
desc.componentSubType = (deviceID == 0 && isSource) ? kAudioUnitSubType_DefaultOutput : kAudioUnitSubType_HALOutput;
646
#else
647
desc.componentSubType = kAudioUnitSubType_RemoteIO;
648
#endif
649
desc.componentManufacturer = kAudioUnitManufacturer_Apple;
650
desc.componentFlags = 0;
651
desc.componentFlagsMask = 0;
652
653
AudioComponent comp = AudioComponentFindNext(NULL, &desc);
654
err = AudioComponentInstanceNew(comp, &unit);
655
656
if (err) {
657
OS_ERROR0(err, "CreateOutputUnit:OpenAComponent");
658
return NULL;
659
}
660
661
if (!isSource) {
662
int enableIO = 0;
663
err = AudioUnitSetProperty(unit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output,
664
0, &enableIO, sizeof(enableIO));
665
if (err) {
666
OS_ERROR0(err, "SetProperty (output EnableIO)");
667
}
668
enableIO = 1;
669
err = AudioUnitSetProperty(unit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input,
670
1, &enableIO, sizeof(enableIO));
671
if (err) {
672
OS_ERROR0(err, "SetProperty (input EnableIO)");
673
}
674
675
if (!deviceID) {
676
// get real AudioDeviceID for default input device (macosx current input device)
677
deviceID = GetDefaultDevice(isSource);
678
if (!deviceID) {
679
AudioComponentInstanceDispose(unit);
680
return NULL;
681
}
682
}
683
}
684
685
if (deviceID) {
686
err = AudioUnitSetProperty(unit, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global,
687
0, &deviceID, sizeof(deviceID));
688
if (err) {
689
OS_ERROR0(err, "SetProperty (CurrentDevice)");
690
AudioComponentInstanceDispose(unit);
691
return NULL;
692
}
693
}
694
695
return unit;
696
}
697
698
static OSStatus OutputCallback(void *inRefCon,
699
AudioUnitRenderActionFlags *ioActionFlags,
700
const AudioTimeStamp *inTimeStamp,
701
UInt32 inBusNumber,
702
UInt32 inNumberFrames,
703
AudioBufferList *ioData)
704
{
705
OSX_DirectAudioDevice *device = (OSX_DirectAudioDevice*)inRefCon;
706
707
int nchannels = ioData->mNumberBuffers; // should be always == 1 (interleaved channels)
708
AudioBuffer *audioBuffer = ioData->mBuffers;
709
710
TRACE3(">>OutputCallback: busNum=%d, requested %d frames (%d bytes)\n",
711
(int)inBusNumber, (int)inNumberFrames, (int)(inNumberFrames * device->asbd.mBytesPerFrame));
712
TRACE3(" abl: %d buffers, buffer[0].channels=%d, buffer.size=%d\n",
713
nchannels, (int)audioBuffer->mNumberChannels, (int)audioBuffer->mDataByteSize);
714
715
int bytesToRead = inNumberFrames * device->asbd.mBytesPerFrame;
716
if (bytesToRead > (int)audioBuffer->mDataByteSize) {
717
TRACE0("--OutputCallback: !!! audioBuffer IS TOO SMALL!!!\n");
718
bytesToRead = audioBuffer->mDataByteSize / device->asbd.mBytesPerFrame * device->asbd.mBytesPerFrame;
719
}
720
int bytesRead = device->ringBuffer.Read(audioBuffer->mData, bytesToRead);
721
if (bytesRead < bytesToRead) {
722
// no enough data (underrun)
723
TRACE2("--OutputCallback: !!! UNDERRUN (read %d bytes of %d)!!!\n", bytesRead, bytesToRead);
724
// silence the rest
725
memset((Byte*)audioBuffer->mData + bytesRead, 0, bytesToRead-bytesRead);
726
bytesRead = bytesToRead;
727
}
728
729
audioBuffer->mDataByteSize = (UInt32)bytesRead;
730
// SAFETY: set mDataByteSize for all other AudioBuffer in the AudioBufferList to zero
731
while (--nchannels > 0) {
732
audioBuffer++;
733
audioBuffer->mDataByteSize = 0;
734
}
735
TRACE1("<<OutputCallback (returns %d)\n", bytesRead);
736
737
return noErr;
738
}
739
740
static OSStatus InputCallback(void *inRefCon,
741
AudioUnitRenderActionFlags *ioActionFlags,
742
const AudioTimeStamp *inTimeStamp,
743
UInt32 inBusNumber,
744
UInt32 inNumberFrames,
745
AudioBufferList *ioData)
746
{
747
OSX_DirectAudioDevice *device = (OSX_DirectAudioDevice*)inRefCon;
748
749
TRACE4(">>InputCallback: busNum=%d, timeStamp=%lld, %d frames (%d bytes)\n",
750
(int)inBusNumber, (long long)inTimeStamp->mSampleTime, (int)inNumberFrames, (int)(inNumberFrames * device->asbd.mBytesPerFrame));
751
752
AudioBufferList abl; // by default it contains 1 AudioBuffer
753
abl.mNumberBuffers = 1;
754
abl.mBuffers[0].mNumberChannels = device->asbd.mChannelsPerFrame;
755
abl.mBuffers[0].mDataByteSize = device->inputBufferSizeInBytes; // assume this is == (inNumberFrames * device->asbd.mBytesPerFrame)
756
abl.mBuffers[0].mData = NULL; // request for the audioUnit's buffer
757
758
OSStatus err = AudioUnitRender(device->audioUnit, ioActionFlags, inTimeStamp, inBusNumber, inNumberFrames, &abl);
759
if (err) {
760
OS_ERROR0(err, "<<InputCallback: AudioUnitRender");
761
} else {
762
if (device->resampler != NULL) {
763
// test for discontinuity
764
// AUHAL starts timestamps at zero, so test if the current timestamp less then the last written
765
SInt64 sampleTime = inTimeStamp->mSampleTime;
766
if (sampleTime < device->lastWrittenSampleTime) {
767
// discontinuity, reset the resampler
768
TRACE2(" InputCallback (RESAMPLED), DISCONTINUITY (%f -> %f)\n",
769
(float)device->lastWrittenSampleTime, (float)sampleTime);
770
771
device->resampler->Discontinue();
772
} else {
773
TRACE2(" InputCallback (RESAMPLED), continuous: lastWrittenSampleTime = %f, sampleTime=%f\n",
774
(float)device->lastWrittenSampleTime, (float)sampleTime);
775
}
776
device->lastWrittenSampleTime = sampleTime + inNumberFrames;
777
778
int bytesWritten = device->resampler->Process(abl.mBuffers[0].mData, (int)abl.mBuffers[0].mDataByteSize, &device->ringBuffer);
779
TRACE2("<<InputCallback (RESAMPLED, saved %d bytes of %d)\n", bytesWritten, (int)abl.mBuffers[0].mDataByteSize);
780
} else {
781
int bytesWritten = device->ringBuffer.Write(abl.mBuffers[0].mData, (int)abl.mBuffers[0].mDataByteSize, false);
782
TRACE2("<<InputCallback (saved %d bytes of %d)\n", bytesWritten, (int)abl.mBuffers[0].mDataByteSize);
783
}
784
}
785
786
return noErr;
787
}
788
789
790
static void FillASBDForNonInterleavedPCM(AudioStreamBasicDescription& asbd,
791
float sampleRate, int channels, int sampleSizeInBits, bool isFloat, int isSigned, bool isBigEndian)
792
{
793
// FillOutASBDForLPCM cannot produce unsigned integer format
794
asbd.mSampleRate = sampleRate;
795
asbd.mFormatID = kAudioFormatLinearPCM;
796
asbd.mFormatFlags = (isFloat ? kAudioFormatFlagIsFloat : (isSigned ? kAudioFormatFlagIsSignedInteger : 0))
797
| (isBigEndian ? (kAudioFormatFlagIsBigEndian) : 0)
798
| kAudioFormatFlagIsPacked;
799
asbd.mBytesPerPacket = channels * ((sampleSizeInBits + 7) / 8);
800
asbd.mFramesPerPacket = 1;
801
asbd.mBytesPerFrame = asbd.mBytesPerPacket;
802
asbd.mChannelsPerFrame = channels;
803
asbd.mBitsPerChannel = sampleSizeInBits;
804
}
805
806
void* DAUDIO_Open(INT32 mixerIndex, INT32 deviceID, int isSource,
807
int encoding, float sampleRate, int sampleSizeInBits,
808
int frameSize, int channels,
809
int isSigned, int isBigEndian, int bufferSizeInBytes)
810
{
811
TRACE3(">>DAUDIO_Open: mixerIndex=%d deviceID=0x%x isSource=%d\n", (int)mixerIndex, (unsigned int)deviceID, isSource);
812
TRACE3(" sampleRate=%d sampleSizeInBits=%d channels=%d\n", (int)sampleRate, sampleSizeInBits, channels);
813
#ifdef USE_TRACE
814
{
815
AudioDeviceID audioDeviceID = deviceID;
816
if (audioDeviceID == 0) {
817
// default device
818
audioDeviceID = GetDefaultDevice(isSource);
819
}
820
char name[256];
821
OSStatus err = GetAudioObjectProperty(audioDeviceID, kAudioUnitScope_Global, kAudioDevicePropertyDeviceName, 256, &name, 0);
822
if (err != noErr) {
823
OS_ERROR1(err, " audioDeviceID=0x%x, name is N/A:", (int)audioDeviceID);
824
} else {
825
TRACE2(" audioDeviceID=0x%x, name=%s\n", (int)audioDeviceID, name);
826
}
827
}
828
#endif
829
830
if (encoding != DAUDIO_PCM) {
831
ERROR1("<<DAUDIO_Open: ERROR: unsupported encoding (%d)\n", encoding);
832
return NULL;
833
}
834
if (channels <= 0) {
835
ERROR1("<<DAUDIO_Open: ERROR: Invalid number of channels=%d!\n", channels);
836
return NULL;
837
}
838
839
OSX_DirectAudioDevice *device = new OSX_DirectAudioDevice();
840
841
AudioUnitScope scope = isSource ? kAudioUnitScope_Input : kAudioUnitScope_Output;
842
int element = isSource ? 0 : 1;
843
OSStatus err = noErr;
844
int extraBufferBytes = 0;
845
846
device->audioUnit = CreateOutputUnit(deviceID, isSource);
847
848
if (!device->audioUnit) {
849
delete device;
850
return NULL;
851
}
852
853
if (!isSource) {
854
AudioDeviceID actualDeviceID = deviceID != 0 ? deviceID : GetDefaultDevice(isSource);
855
float hardwareSampleRate = GetSampleRate(actualDeviceID, isSource);
856
TRACE2("--DAUDIO_Open: sampleRate = %f, hardwareSampleRate=%f\n", sampleRate, hardwareSampleRate);
857
858
if (fabs(sampleRate - hardwareSampleRate) > 1) {
859
device->resampler = new Resampler();
860
861
// request HAL for Float32 with native endianess
862
FillASBDForNonInterleavedPCM(device->asbd, hardwareSampleRate, channels, 32, true, false, kAudioFormatFlagsNativeEndian != 0);
863
} else {
864
sampleRate = hardwareSampleRate; // in case sample rates are not exactly equal
865
}
866
}
867
868
if (device->resampler == NULL) {
869
// no resampling, request HAL for the requested format
870
FillASBDForNonInterleavedPCM(device->asbd, sampleRate, channels, sampleSizeInBits, false, isSigned, isBigEndian);
871
}
872
873
err = AudioUnitSetProperty(device->audioUnit, kAudioUnitProperty_StreamFormat, scope, element, &device->asbd, sizeof(device->asbd));
874
if (err) {
875
OS_ERROR0(err, "<<DAUDIO_Open set StreamFormat");
876
delete device;
877
return NULL;
878
}
879
880
AURenderCallbackStruct output;
881
output.inputProc = isSource ? OutputCallback : InputCallback;
882
output.inputProcRefCon = device;
883
884
err = AudioUnitSetProperty(device->audioUnit,
885
isSource
886
? (AudioUnitPropertyID)kAudioUnitProperty_SetRenderCallback
887
: (AudioUnitPropertyID)kAudioOutputUnitProperty_SetInputCallback,
888
kAudioUnitScope_Global, 0, &output, sizeof(output));
889
if (err) {
890
OS_ERROR0(err, "<<DAUDIO_Open set RenderCallback");
891
delete device;
892
return NULL;
893
}
894
895
err = AudioUnitInitialize(device->audioUnit);
896
if (err) {
897
OS_ERROR0(err, "<<DAUDIO_Open UnitInitialize");
898
delete device;
899
return NULL;
900
}
901
902
if (!isSource) {
903
// for target lines we need extra bytes in the ringBuffer
904
// to prevent collisions when InputCallback overrides data on overflow
905
UInt32 size;
906
OSStatus err;
907
908
size = sizeof(device->inputBufferSizeInBytes);
909
err = AudioUnitGetProperty(device->audioUnit, kAudioDevicePropertyBufferFrameSize, kAudioUnitScope_Global,
910
0, &device->inputBufferSizeInBytes, &size);
911
if (err) {
912
OS_ERROR0(err, "<<DAUDIO_Open (TargetDataLine)GetBufferSize\n");
913
delete device;
914
return NULL;
915
}
916
device->inputBufferSizeInBytes *= device->asbd.mBytesPerFrame; // convert frames to bytes
917
extraBufferBytes = (int)device->inputBufferSizeInBytes;
918
}
919
920
if (device->resampler != NULL) {
921
// resampler output format is a user requested format (== ringBuffer format)
922
AudioStreamBasicDescription asbdOut; // ringBuffer format
923
FillASBDForNonInterleavedPCM(asbdOut, sampleRate, channels, sampleSizeInBits, false, isSigned, isBigEndian);
924
925
// set resampler input buffer size to the HAL buffer size
926
if (!device->resampler->Init(&device->asbd, &asbdOut, (int)device->inputBufferSizeInBytes)) {
927
ERROR0("<<DAUDIO_Open: resampler.Init() FAILED.\n");
928
delete device;
929
return NULL;
930
}
931
// extra bytes in the ringBuffer (extraBufferBytes) should be equal resampler output buffer size
932
extraBufferBytes = device->resampler->GetOutBufferSize();
933
}
934
935
if (!device->ringBuffer.Allocate(bufferSizeInBytes, extraBufferBytes)) {
936
ERROR0("<<DAUDIO_Open: Ring buffer allocation error\n");
937
delete device;
938
return NULL;
939
}
940
941
TRACE0("<<DAUDIO_Open: OK\n");
942
return device;
943
}
944
945
int DAUDIO_Start(void* id, int isSource) {
946
OSX_DirectAudioDevice *device = (OSX_DirectAudioDevice*)id;
947
TRACE0("DAUDIO_Start\n");
948
949
OSStatus err = AudioOutputUnitStart(device->audioUnit);
950
951
if (err != noErr) {
952
OS_ERROR0(err, "DAUDIO_Start");
953
}
954
955
return err == noErr ? TRUE : FALSE;
956
}
957
958
int DAUDIO_Stop(void* id, int isSource) {
959
OSX_DirectAudioDevice *device = (OSX_DirectAudioDevice*)id;
960
TRACE0("DAUDIO_Stop\n");
961
962
OSStatus err = AudioOutputUnitStop(device->audioUnit);
963
964
return err == noErr ? TRUE : FALSE;
965
}
966
967
void DAUDIO_Close(void* id, int isSource) {
968
OSX_DirectAudioDevice *device = (OSX_DirectAudioDevice*)id;
969
TRACE0("DAUDIO_Close\n");
970
971
delete device;
972
}
973
974
int DAUDIO_Write(void* id, char* data, int byteSize) {
975
OSX_DirectAudioDevice *device = (OSX_DirectAudioDevice*)id;
976
TRACE1(">>DAUDIO_Write: %d bytes to write\n", byteSize);
977
978
int result = device->ringBuffer.Write(data, byteSize, true);
979
980
TRACE1("<<DAUDIO_Write: %d bytes written\n", result);
981
return result;
982
}
983
984
int DAUDIO_Read(void* id, char* data, int byteSize) {
985
OSX_DirectAudioDevice *device = (OSX_DirectAudioDevice*)id;
986
TRACE1(">>DAUDIO_Read: %d bytes to read\n", byteSize);
987
988
int result = device->ringBuffer.Read(data, byteSize);
989
990
TRACE1("<<DAUDIO_Read: %d bytes has been read\n", result);
991
return result;
992
}
993
994
int DAUDIO_GetBufferSize(void* id, int isSource) {
995
OSX_DirectAudioDevice *device = (OSX_DirectAudioDevice*)id;
996
997
int bufferSizeInBytes = device->ringBuffer.GetBufferSize();
998
999
TRACE1("DAUDIO_GetBufferSize returns %d\n", bufferSizeInBytes);
1000
return bufferSizeInBytes;
1001
}
1002
1003
int DAUDIO_StillDraining(void* id, int isSource) {
1004
OSX_DirectAudioDevice *device = (OSX_DirectAudioDevice*)id;
1005
1006
int draining = device->ringBuffer.GetValidByteCount() > 0 ? TRUE : FALSE;
1007
1008
TRACE1("DAUDIO_StillDraining returns %d\n", draining);
1009
return draining;
1010
}
1011
1012
int DAUDIO_Flush(void* id, int isSource) {
1013
OSX_DirectAudioDevice *device = (OSX_DirectAudioDevice*)id;
1014
TRACE0("DAUDIO_Flush\n");
1015
1016
device->ringBuffer.Flush();
1017
1018
return TRUE;
1019
}
1020
1021
int DAUDIO_GetAvailable(void* id, int isSource) {
1022
OSX_DirectAudioDevice *device = (OSX_DirectAudioDevice*)id;
1023
1024
int bytesInBuffer = device->ringBuffer.GetValidByteCount();
1025
if (isSource) {
1026
return device->ringBuffer.GetBufferSize() - bytesInBuffer;
1027
} else {
1028
return bytesInBuffer;
1029
}
1030
}
1031
1032
INT64 DAUDIO_GetBytePosition(void* id, int isSource, INT64 javaBytePos) {
1033
OSX_DirectAudioDevice *device = (OSX_DirectAudioDevice*)id;
1034
INT64 position;
1035
1036
if (isSource) {
1037
position = javaBytePos - device->ringBuffer.GetValidByteCount();
1038
} else {
1039
position = javaBytePos + device->ringBuffer.GetValidByteCount();
1040
}
1041
1042
TRACE2("DAUDIO_GetBytePosition returns %lld (javaBytePos = %lld)\n", (long long)position, (long long)javaBytePos);
1043
return position;
1044
}
1045
1046
void DAUDIO_SetBytePosition(void* id, int isSource, INT64 javaBytePos) {
1047
// no need javaBytePos (it's available in DAUDIO_GetBytePosition)
1048
}
1049
1050
int DAUDIO_RequiresServicing(void* id, int isSource) {
1051
return FALSE;
1052
}
1053
1054
void DAUDIO_Service(void* id, int isSource) {
1055
// unreachable
1056
}
1057
1058
#endif // USE_DAUDIO == TRUE
1059
1060