Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
emscripten-core
GitHub Repository: emscripten-core/emscripten
Path: blob/main/src/audio_worklet.js
6165 views
1
// This file is the main bootstrap script for Wasm Audio Worklets loaded in an
2
// Emscripten application. Build with -sAUDIO_WORKLET linker flag to enable
3
// targeting Audio Worklets.
4
5
// AudioWorkletGlobalScope does not have a onmessage/postMessage() functionality
6
// at the global scope, which means that after creating an
7
// AudioWorkletGlobalScope and loading this script into it, we cannot
8
// postMessage() information into it like one would do with Web Workers.
9
10
// Instead, we must create an AudioWorkletProcessor class, then instantiate a
11
// Web Audio graph node from it on the main thread. Using its message port and
12
// the node constructor's "processorOptions" field, we can share the necessary
13
// bootstrap information from the main thread to the AudioWorkletGlobalScope.
14
15
#if MINIMAL_RUNTIME
16
var instantiatePromise;
17
#endif
18
19
if (ENVIRONMENT_IS_AUDIO_WORKLET) {
20
21
#if AUDIO_WORKLET_SUPPORT_AUDIO_PARAMS
22
function createWasmAudioWorkletProcessor(audioParams) {
23
#else
24
function createWasmAudioWorkletProcessor() {
25
#endif
26
class WasmAudioWorkletProcessor extends AudioWorkletProcessor {
27
constructor(args) {
28
super();
29
30
// Capture the Wasm function callback to invoke.
31
let opts = args.processorOptions;
32
#if ASSERTIONS
33
assert(opts.callback)
34
assert(opts.samplesPerChannel)
35
#endif
36
this.callback = {{{ makeDynCall('iipipipp', 'opts.callback') }}};
37
this.userData = opts.userData;
38
// Then the samples per channel to process, fixed for the lifetime of the
39
// context that created this processor. Even though this 'render quantum
40
// size' is fixed at 128 samples in the 1.0 spec, it will be variable in
41
// the 1.1 spec. It's passed in now, just to prove it's settable, but will
42
// eventually be a property of the AudioWorkletGlobalScope (globalThis).
43
this.samplesPerChannel = opts.samplesPerChannel;
44
this.bytesPerChannel = this.samplesPerChannel * {{{ getNativeTypeSize('float') }}};
45
46
// Prepare the output views; see createOutputViews(). The 'STACK_ALIGN'
47
// deduction stops the STACK_OVERFLOW_CHECK failing (since the stack will
48
// be full if we allocate all the available space) leaving room for a
49
// single AudioSampleFrame as a minimum. There's an arbitrary maximum of
50
// 64 frames, for the case where a multi-MB stack is passed.
51
this.outputViews = new Array(Math.min(((wwParams.stackSize - {{{ STACK_ALIGN }}}) / this.bytesPerChannel) | 0, /*sensible limit*/ 64));
52
#if ASSERTIONS
53
assert(this.outputViews.length > 0, `AudioWorklet needs more stack allocating (at least ${this.bytesPerChannel})`);
54
#endif
55
this.createOutputViews();
56
57
#if ASSERTIONS
58
// Explicitly verify this later in process(). Note to self, stackSave is a
59
// bit of a misnomer as it simply gets the stack address.
60
this.ctorOldStackPtr = stackSave();
61
#endif
62
}
63
64
/**
65
* Create up-front as many typed views for marshalling the output data as
66
* may be required, allocated at the *top* of the worklet's stack (and whose
67
* addresses are fixed).
68
*/
69
createOutputViews() {
70
// These are still alloc'd to take advantage of the overflow checks, etc.
71
var oldStackPtr = stackSave();
72
var viewDataIdx = {{{ getHeapOffset('stackAlloc(this.outputViews.length * this.bytesPerChannel)', 'float') }}};
73
#if WEBAUDIO_DEBUG
74
console.log(`AudioWorklet creating ${this.outputViews.length} buffer one-time views (for a stack size of ${wwParams.stackSize} at address ${ptrToString(viewDataIdx * 4)})`);
75
#endif
76
// Inserted in reverse so the lowest indices are closest to the stack top
77
for (var n = this.outputViews.length - 1; n >= 0; n--) {
78
this.outputViews[n] = HEAPF32.subarray(viewDataIdx, viewDataIdx += this.samplesPerChannel);
79
}
80
stackRestore(oldStackPtr);
81
}
82
83
#if AUDIO_WORKLET_SUPPORT_AUDIO_PARAMS
84
static get parameterDescriptors() {
85
return audioParams;
86
}
87
#endif
88
89
/**
90
* Marshals all inputs and parameters to the Wasm memory on the thread's
91
* stack, then performs the wasm audio worklet call, and finally marshals
92
* audio output data back.
93
*
94
* @param {Object} parameters
95
*/
96
#if AUDIO_WORKLET_SUPPORT_AUDIO_PARAMS
97
process(inputList, outputList, parameters) {
98
#else
99
/** @suppress {checkTypes} */
100
process(inputList, outputList) {
101
#endif
102
103
#if ALLOW_MEMORY_GROWTH
104
// Recreate the output views if the heap has changed
105
// TODO: add support for GROWABLE_ARRAYBUFFERS
106
if (HEAPF32.buffer != this.outputViews[0].buffer) {
107
this.createOutputViews();
108
}
109
#endif
110
111
var numInputs = inputList.length;
112
var numOutputs = outputList.length;
113
114
var entry; // reused list entry or index
115
var subentry; // reused channel or other array in each list entry or index
116
117
// Calculate the required stack and output buffer views (stack is further
118
// split into aligned structs and the raw float data).
119
var stackMemoryStruct = (numInputs + numOutputs) * {{{ C_STRUCTS.AudioSampleFrame.__size__ }}};
120
var stackMemoryData = 0;
121
for (entry of inputList) {
122
stackMemoryData += entry.length;
123
}
124
stackMemoryData *= this.bytesPerChannel;
125
// Collect the total number of output channels (mapped to array views)
126
var outputViewsNeeded = 0;
127
for (entry of outputList) {
128
outputViewsNeeded += entry.length;
129
}
130
stackMemoryData += outputViewsNeeded * this.bytesPerChannel;
131
var numParams = 0;
132
#if AUDIO_WORKLET_SUPPORT_AUDIO_PARAMS
133
for (entry in parameters) {
134
++numParams;
135
stackMemoryStruct += {{{ C_STRUCTS.AudioParamFrame.__size__ }}};
136
stackMemoryData += parameters[entry].byteLength;
137
}
138
#endif
139
var oldStackPtr = stackSave();
140
#if ASSERTIONS
141
assert(oldStackPtr == this.ctorOldStackPtr, 'AudioWorklet stack address has unexpectedly moved');
142
assert(outputViewsNeeded <= this.outputViews.length, `Too many AudioWorklet outputs (need ${outputViewsNeeded} but have stack space for ${this.outputViews.length})`);
143
#endif
144
145
// Allocate the necessary stack space. All pointer variables are in bytes;
146
// 'structPtr' starts at the first struct entry (all run sequentially)
147
// and is the working start to each record; 'dataPtr' is the same for the
148
// audio/params data, starting after *all* the structs.
149
// 'structPtr' begins 16-byte aligned, allocated from the internal
150
// _emscripten_stack_alloc(), as are the output views, and so to ensure
151
// the views fall on the correct addresses (and we finish at stacktop) we
152
// request additional bytes, taking this alignment into account, then
153
// offset `dataPtr` by the difference.
154
var stackMemoryAligned = (stackMemoryStruct + stackMemoryData + 15) & ~15;
155
var structPtr = stackAlloc(stackMemoryAligned);
156
var dataPtr = structPtr + (stackMemoryAligned - stackMemoryData);
157
#if ASSERTIONS
158
// TODO: look at why stackAlloc isn't tripping the assertions
159
assert(stackMemoryAligned <= wwParams.stackSize, `Not enough stack allocated to the AudioWorklet (need ${stackMemoryAligned}, got ${wwParams.stackSize})`);
160
#endif
161
162
// Copy input audio descriptor structs and data to Wasm (recall, structs
163
// first, audio data after). 'inputsPtr' is the start of the C callback's
164
// input AudioSampleFrame.
165
var /*const*/ inputsPtr = structPtr;
166
for (entry of inputList) {
167
// Write the AudioSampleFrame struct instance
168
{{{ makeSetValue('structPtr', C_STRUCTS.AudioSampleFrame.numberOfChannels, 'entry.length', 'u32') }}};
169
{{{ makeSetValue('structPtr', C_STRUCTS.AudioSampleFrame.samplesPerChannel, 'this.samplesPerChannel', 'u32') }}};
170
{{{ makeSetValue('structPtr', C_STRUCTS.AudioSampleFrame.data, 'dataPtr', '*') }}};
171
structPtr += {{{ C_STRUCTS.AudioSampleFrame.__size__ }}};
172
// Marshal the input audio sample data for each audio channel of this input
173
for (subentry of entry) {
174
HEAPF32.set(subentry, {{{ getHeapOffset('dataPtr', 'float') }}});
175
dataPtr += this.bytesPerChannel;
176
}
177
}
178
179
#if AUDIO_WORKLET_SUPPORT_AUDIO_PARAMS
180
// Copy parameters descriptor structs and data to Wasm. 'paramsPtr' is the
181
// start of the C callback's input AudioParamFrame.
182
var /*const*/ paramsPtr = structPtr;
183
for (entry = 0; subentry = parameters[entry++];) {
184
// Write the AudioParamFrame struct instance
185
{{{ makeSetValue('structPtr', C_STRUCTS.AudioParamFrame.length, 'subentry.length', 'u32') }}};
186
{{{ makeSetValue('structPtr', C_STRUCTS.AudioParamFrame.data, 'dataPtr', '*') }}};
187
structPtr += {{{ C_STRUCTS.AudioParamFrame.__size__ }}};
188
// Marshal the audio parameters array
189
HEAPF32.set(subentry, {{{ getHeapOffset('dataPtr', 'float') }}});
190
dataPtr += subentry.length * {{{ getNativeTypeSize('float') }}};
191
}
192
#else
193
var paramsPtr = 0;
194
#endif
195
196
// Copy output audio descriptor structs to Wasm. 'outputsPtr' is the start
197
// of the C callback's output AudioSampleFrame. 'dataPtr' will now be
198
// aligned with the output views, ending at stacktop (which is why this
199
// needs to be last).
200
var /*const*/ outputsPtr = structPtr;
201
for (entry of outputList) {
202
// Write the AudioSampleFrame struct instance
203
{{{ makeSetValue('structPtr', C_STRUCTS.AudioSampleFrame.numberOfChannels, 'entry.length', 'u32') }}};
204
{{{ makeSetValue('structPtr', C_STRUCTS.AudioSampleFrame.samplesPerChannel, 'this.samplesPerChannel', 'u32') }}};
205
{{{ makeSetValue('structPtr', C_STRUCTS.AudioSampleFrame.data, 'dataPtr', '*') }}};
206
structPtr += {{{ C_STRUCTS.AudioSampleFrame.__size__ }}};
207
// Advance the output pointer to the next output (matching the pre-allocated views)
208
dataPtr += this.bytesPerChannel * entry.length;
209
}
210
211
#if ASSERTIONS
212
// If all the maths worked out, we arrived at the original stack address
213
console.assert(dataPtr == oldStackPtr, `AudioWorklet stack mismatch (audio data finishes at ${dataPtr} instead of ${oldStackPtr})`);
214
215
// Sanity checks. If these trip the most likely cause, beyond unforeseen
216
// stack shenanigans, is that the 'render quantum size' changed after
217
// construction (which shouldn't be possible).
218
if (numOutputs) {
219
// First that the output view addresses match the stack positions
220
dataPtr -= this.bytesPerChannel;
221
for (entry = 0; entry < outputViewsNeeded; entry++) {
222
console.assert(dataPtr == this.outputViews[entry].byteOffset, 'AudioWorklet internal error in addresses of the output array views');
223
dataPtr -= this.bytesPerChannel;
224
}
225
// And that the views' size match the passed in output buffers
226
for (entry of outputList) {
227
for (subentry of entry) {
228
assert(subentry.byteLength == this.bytesPerChannel, `AudioWorklet unexpected output buffer size (expected ${this.bytesPerChannel} got ${subentry.byteLength})`);
229
}
230
}
231
}
232
#endif
233
234
// Call out to Wasm callback to perform audio processing
235
var didProduceAudio = this.callback(numInputs, inputsPtr, numOutputs, outputsPtr, numParams, paramsPtr, this.userData);
236
if (didProduceAudio) {
237
// Read back the produced audio data to all outputs and their channels.
238
// The preallocated 'outputViews' already have the correct offsets and
239
// sizes into the stack (recall from createOutputViews() that they run
240
// backwards).
241
for (entry of outputList) {
242
for (subentry of entry) {
243
subentry.set(this.outputViews[--outputViewsNeeded]);
244
}
245
}
246
}
247
248
stackRestore(oldStackPtr);
249
250
// Return 'true' to tell the browser to continue running this processor.
251
// (Returning 1 or any other truthy value won't work in Chrome)
252
return !!didProduceAudio;
253
}
254
}
255
return WasmAudioWorkletProcessor;
256
}
257
258
#if MIN_FIREFOX_VERSION < 138 || MIN_CHROME_VERSION != TARGET_NOT_SUPPORTED || MIN_SAFARI_VERSION != TARGET_NOT_SUPPORTED
259
// If this browser does not support the up-to-date AudioWorklet standard
260
// that has a MessagePort over to the AudioWorklet, then polyfill that by
261
// a hacky AudioWorkletProcessor that provides the MessagePort.
262
// Firefox added support in https://hg-edge.mozilla.org/integration/autoland/rev/ab38a1796126f2b3fc06475ffc5a625059af59c1
263
// Chrome ticket: https://crbug.com/446920095
264
// Safari ticket: https://webkit.org/b/299386
265
/**
266
* @suppress {duplicate, checkTypes}
267
*/
268
var port = globalThis.port || {};
269
270
// Specify a worklet processor that will be used to receive messages to this
271
// AudioWorkletGlobalScope. We never connect this initial AudioWorkletProcessor
272
// to the audio graph to do any audio processing.
273
class BootstrapMessages extends AudioWorkletProcessor {
274
constructor(arg) {
275
super();
276
startWasmWorker(arg.processorOptions)
277
// Listen to messages from the main thread. These messages will ask this
278
// scope to create the real AudioWorkletProcessors that call out to Wasm to
279
// do audio processing.
280
if (!(port instanceof MessagePort)) {
281
this.port.onmessage = port.onmessage;
282
/** @suppress {checkTypes} */
283
port = this.port;
284
}
285
}
286
287
// No-op, not doing audio processing in this processor. It is just for
288
// receiving bootstrap messages. However browsers require it to still be
289
// present. It should never be called because we never add a node to the graph
290
// with this processor, although it does look like Chrome does still call this
291
// function.
292
process() {
293
// keep this function a no-op. Chrome redundantly wants to call this even
294
// though this processor is never added to the graph.
295
}
296
};
297
298
// Register the dummy processor that will just receive messages.
299
registerProcessor('em-bootstrap', BootstrapMessages);
300
#endif
301
302
port.onmessage = async (msg) => {
303
#if MINIMAL_RUNTIME
304
// Wait for the module instantiation before processing messages.
305
await instantiatePromise;
306
#endif
307
let d = msg.data;
308
if (d['_boot']) {
309
startWasmWorker(d);
310
#if WEBAUDIO_DEBUG
311
console.log('AudioWorklet global scope looks like this:');
312
console.dir(globalThis);
313
#endif
314
} else if (d['_wpn']) {
315
// '_wpn' is short for 'Worklet Processor Node', using an identifier
316
// that will never conflict with user messages
317
// Register a real AudioWorkletProcessor that will actually do audio processing.
318
#if AUDIO_WORKLET_SUPPORT_AUDIO_PARAMS
319
registerProcessor(d['_wpn'], createWasmAudioWorkletProcessor(d.audioParams));
320
#else
321
registerProcessor(d['_wpn'], createWasmAudioWorkletProcessor());
322
#endif
323
#if WEBAUDIO_DEBUG
324
console.log(`Registered a new WasmAudioWorkletProcessor "${d['_wpn']}" with AudioParams: ${d.audioParams}`);
325
#endif
326
// Post a Wasm Call message back telling that we have now registered the
327
// AudioWorkletProcessor, and should trigger the user onSuccess callback
328
// of the emscripten_create_wasm_audio_worklet_processor_async() call.
329
//
330
// '_wsc' is short for 'wasm call', using an identifier that will never
331
// conflict with user messages.
332
//
333
// Note: we convert the pointer arg manually here since the call site
334
// ($_EmAudioDispatchProcessorCallback) is used with various signatures
335
// and we do not know the types in advance.
336
port.postMessage({'_wsc': d.callback, args: [d.contextHandle, 1/*EM_TRUE*/, {{{ to64('d.userData') }}}] });
337
} else if (d['_wsc']) {
338
getWasmTableEntry(d['_wsc'])(...d.args);
339
};
340
}
341
342
} // ENVIRONMENT_IS_AUDIO_WORKLET
343
344