// This file is the main bootstrap script for Wasm Audio Worklets loaded in an1// Emscripten application. Build with -sAUDIO_WORKLET linker flag to enable2// targeting Audio Worklets.34// AudioWorkletGlobalScope does not have a onmessage/postMessage() functionality5// at the global scope, which means that after creating an6// AudioWorkletGlobalScope and loading this script into it, we cannot7// postMessage() information into it like one would do with Web Workers.89// Instead, we must create an AudioWorkletProcessor class, then instantiate a10// Web Audio graph node from it on the main thread. Using its message port and11// the node constructor's "processorOptions" field, we can share the necessary12// bootstrap information from the main thread to the AudioWorkletGlobalScope.1314#if MINIMAL_RUNTIME15var instantiatePromise;16#endif1718if (ENVIRONMENT_IS_AUDIO_WORKLET) {1920function createWasmAudioWorkletProcessor(audioParams) {21class WasmAudioWorkletProcessor extends AudioWorkletProcessor {22constructor(args) {23super();2425// Capture the Wasm function callback to invoke.26let opts = args.processorOptions;27#if ASSERTIONS28assert(opts.callback)29assert(opts.samplesPerChannel)30#endif31this.callback = {{{ makeDynCall('iipipipp', 'opts.callback') }}};32this.userData = opts.userData;33// Then the samples per channel to process, fixed for the lifetime of the34// context that created this processor. Even though this 'render quantum35// size' is fixed at 128 samples in the 1.0 spec, it will be variable in36// the 1.1 spec. It's passed in now, just to prove it's settable, but will37// eventually be a property of the AudioWorkletGlobalScope (globalThis).38this.samplesPerChannel = opts.samplesPerChannel;39this.bytesPerChannel = this.samplesPerChannel * {{{ getNativeTypeSize('float') }}};4041// Prepare the output views; see createOutputViews(). The 'STACK_ALIGN'42// deduction stops the STACK_OVERFLOW_CHECK failing (since the stack will43// be full if we allocate all the available space) leaving room for a44// single AudioSampleFrame as a minumum. There's an arbitrary maximum of45// 64 frames, for the case where a multi-MB stack is passed.46this.outputViews = new Array(Math.min(((wwParams.stackSize - {{{ STACK_ALIGN }}}) / this.bytesPerChannel) | 0, /*sensible limit*/ 64));47#if ASSERTIONS48console.assert(this.outputViews.length > 0, `AudioWorklet needs more stack allocating (at least ${this.bytesPerChannel})`);49#endif50this.createOutputViews();5152#if ASSERTIONS53// Explicitly verify this later in process(). Note to self, stackSave is a54// bit of a misnomer as it simply gets the stack address.55this.ctorOldStackPtr = stackSave();56#endif57}5859/**60* Create up-front as many typed views for marshalling the output data as61* may be required, allocated at the *top* of the worklet's stack (and whose62* addresses are fixed).63*/64createOutputViews() {65// These are still alloc'd to take advantage of the overflow checks, etc.66var oldStackPtr = stackSave();67var viewDataIdx = {{{ getHeapOffset('stackAlloc(this.outputViews.length * this.bytesPerChannel)', 'float') }}};68#if WEBAUDIO_DEBUG69console.log(`AudioWorklet creating ${this.outputViews.length} buffer one-time views (for a stack size of ${wwParams.stackSize} at address ${ptrToString(viewDataIdx * 4)})`);70#endif71// Inserted in reverse so the lowest indices are closest to the stack top72for (var n = this.outputViews.length - 1; n >= 0; n--) {73this.outputViews[n] = HEAPF32.subarray(viewDataIdx, viewDataIdx += this.samplesPerChannel);74}75stackRestore(oldStackPtr);76}7778static get parameterDescriptors() {79return audioParams;80}8182/**83* Marshals all inputs and parameters to the Wasm memory on the thread's84* stack, then performs the wasm audio worklet call, and finally marshals85* audio output data back.86*87* @param {Object} parameters88*/89process(inputList, outputList, parameters) {90#if ALLOW_MEMORY_GROWTH91// Recreate the output views if the heap has changed92// TODO: add support for GROWABLE_ARRAYBUFFERS93if (HEAPF32.buffer != this.outputViews[0].buffer) {94this.createOutputViews();95}96#endif9798var numInputs = inputList.length;99var numOutputs = outputList.length;100101var entry; // reused list entry or index102var subentry; // reused channel or other array in each list entry or index103104// Calculate the required stack and output buffer views (stack is further105// split into aligned structs and the raw float data).106var stackMemoryStruct = (numInputs + numOutputs) * {{{ C_STRUCTS.AudioSampleFrame.__size__ }}};107var stackMemoryData = 0;108for (entry of inputList) {109stackMemoryData += entry.length;110}111stackMemoryData *= this.bytesPerChannel;112// Collect the total number of output channels (mapped to array views)113var outputViewsNeeded = 0;114for (entry of outputList) {115outputViewsNeeded += entry.length;116}117stackMemoryData += outputViewsNeeded * this.bytesPerChannel;118var numParams = 0;119for (entry in parameters) {120++numParams;121stackMemoryStruct += {{{ C_STRUCTS.AudioParamFrame.__size__ }}};122stackMemoryData += parameters[entry].byteLength;123}124var oldStackPtr = stackSave();125#if ASSERTIONS126console.assert(oldStackPtr == this.ctorOldStackPtr, 'AudioWorklet stack address has unexpectedly moved');127console.assert(outputViewsNeeded <= this.outputViews.length, `Too many AudioWorklet outputs (need ${outputViewsNeeded} but have stack space for ${this.outputViews.length})`);128#endif129130// Allocate the necessary stack space. All pointer variables are in bytes;131// 'structPtr' starts at the first struct entry (all run sequentially)132// and is the working start to each record; 'dataPtr' is the same for the133// audio/params data, starting after *all* the structs.134// 'structPtr' begins 16-byte aligned, allocated from the internal135// _emscripten_stack_alloc(), as are the output views, and so to ensure136// the views fall on the correct addresses (and we finish at stacktop) we137// request additional bytes, taking this alignment into account, then138// offset `dataPtr` by the difference.139var stackMemoryAligned = (stackMemoryStruct + stackMemoryData + 15) & ~15;140var structPtr = stackAlloc(stackMemoryAligned);141var dataPtr = structPtr + (stackMemoryAligned - stackMemoryData);142143// Copy input audio descriptor structs and data to Wasm (recall, structs144// first, audio data after). 'inputsPtr' is the start of the C callback's145// input AudioSampleFrame.146var /*const*/ inputsPtr = structPtr;147for (entry of inputList) {148// Write the AudioSampleFrame struct instance149{{{ makeSetValue('structPtr', C_STRUCTS.AudioSampleFrame.numberOfChannels, 'entry.length', 'u32') }}};150{{{ makeSetValue('structPtr', C_STRUCTS.AudioSampleFrame.samplesPerChannel, 'this.samplesPerChannel', 'u32') }}};151{{{ makeSetValue('structPtr', C_STRUCTS.AudioSampleFrame.data, 'dataPtr', '*') }}};152structPtr += {{{ C_STRUCTS.AudioSampleFrame.__size__ }}};153// Marshal the input audio sample data for each audio channel of this input154for (subentry of entry) {155HEAPF32.set(subentry, {{{ getHeapOffset('dataPtr', 'float') }}});156dataPtr += this.bytesPerChannel;157}158}159160// Copy parameters descriptor structs and data to Wasm. 'paramsPtr' is the161// start of the C callback's input AudioParamFrame.162var /*const*/ paramsPtr = structPtr;163for (entry = 0; subentry = parameters[entry++];) {164// Write the AudioParamFrame struct instance165{{{ makeSetValue('structPtr', C_STRUCTS.AudioParamFrame.length, 'subentry.length', 'u32') }}};166{{{ makeSetValue('structPtr', C_STRUCTS.AudioParamFrame.data, 'dataPtr', '*') }}};167structPtr += {{{ C_STRUCTS.AudioParamFrame.__size__ }}};168// Marshal the audio parameters array169HEAPF32.set(subentry, {{{ getHeapOffset('dataPtr', 'float') }}});170dataPtr += subentry.length * {{{ getNativeTypeSize('float') }}};171}172173// Copy output audio descriptor structs to Wasm. 'outputsPtr' is the start174// of the C callback's output AudioSampleFrame. 'dataPtr' will now be175// aligned with the output views, ending at stacktop (which is why this176// needs to be last).177var /*const*/ outputsPtr = structPtr;178for (entry of outputList) {179// Write the AudioSampleFrame struct instance180{{{ makeSetValue('structPtr', C_STRUCTS.AudioSampleFrame.numberOfChannels, 'entry.length', 'u32') }}};181{{{ makeSetValue('structPtr', C_STRUCTS.AudioSampleFrame.samplesPerChannel, 'this.samplesPerChannel', 'u32') }}};182{{{ makeSetValue('structPtr', C_STRUCTS.AudioSampleFrame.data, 'dataPtr', '*') }}};183structPtr += {{{ C_STRUCTS.AudioSampleFrame.__size__ }}};184// Advance the output pointer to the next output (matching the pre-allocated views)185dataPtr += this.bytesPerChannel * entry.length;186}187188#if ASSERTIONS189// If all the maths worked out, we arrived at the original stack address190console.assert(dataPtr == oldStackPtr, `AudioWorklet stack missmatch (audio data finishes at ${dataPtr} instead of ${oldStackPtr})`);191192// Sanity checks. If these trip the most likely cause, beyond unforeseen193// stack shenanigans, is that the 'render quantum size' changed after194// construction (which shouldn't be possible).195if (numOutputs) {196// First that the output view addresses match the stack positions197dataPtr -= this.bytesPerChannel;198for (entry = 0; entry < outputViewsNeeded; entry++) {199console.assert(dataPtr == this.outputViews[entry].byteOffset, 'AudioWorklet internal error in addresses of the output array views');200dataPtr -= this.bytesPerChannel;201}202// And that the views' size match the passed in output buffers203for (entry of outputList) {204for (subentry of entry) {205console.assert(subentry.byteLength == this.bytesPerChannel, `AudioWorklet unexpected output buffer size (expected ${this.bytesPerChannel} got ${subentry.byteLength})`);206}207}208}209#endif210211// Call out to Wasm callback to perform audio processing212var didProduceAudio = this.callback(numInputs, inputsPtr, numOutputs, outputsPtr, numParams, paramsPtr, this.userData);213if (didProduceAudio) {214// Read back the produced audio data to all outputs and their channels.215// The preallocated 'outputViews' already have the correct offsets and216// sizes into the stack (recall from createOutputViews() that they run217// backwards).218for (entry of outputList) {219for (subentry of entry) {220subentry.set(this.outputViews[--outputViewsNeeded]);221}222}223}224225stackRestore(oldStackPtr);226227// Return 'true' to tell the browser to continue running this processor.228// (Returning 1 or any other truthy value won't work in Chrome)229return !!didProduceAudio;230}231}232return WasmAudioWorkletProcessor;233}234235var messagePort;236237// Specify a worklet processor that will be used to receive messages to this238// AudioWorkletGlobalScope. We never connect this initial AudioWorkletProcessor239// to the audio graph to do any audio processing.240class BootstrapMessages extends AudioWorkletProcessor {241constructor(arg) {242super();243startWasmWorker(arg.processorOptions)244#if WEBAUDIO_DEBUG245console.log('AudioWorklet global scope looks like this:');246console.dir(globalThis);247#endif248// Listen to messages from the main thread. These messages will ask this249// scope to create the real AudioWorkletProcessors that call out to Wasm to250// do audio processing.251messagePort = this.port;252/** @suppress {checkTypes} */253messagePort.onmessage = async (msg) => {254#if MINIMAL_RUNTIME255// Wait for the module instantiation before processing messages.256await instantiatePromise;257#endif258let d = msg.data;259if (d['_wpn']) {260// '_wpn' is short for 'Worklet Processor Node', using an identifier261// that will never conflict with user messages262// Register a real AudioWorkletProcessor that will actually do audio processing.263registerProcessor(d['_wpn'], createWasmAudioWorkletProcessor(d.audioParams));264#if WEBAUDIO_DEBUG265console.log(`Registered a new WasmAudioWorkletProcessor "${d['_wpn']}" with AudioParams: ${d.audioParams}`);266#endif267// Post a Wasm Call message back telling that we have now registered the268// AudioWorkletProcessor, and should trigger the user onSuccess callback269// of the emscripten_create_wasm_audio_worklet_processor_async() call.270//271// '_wsc' is short for 'wasm call', using an identifier that will never272// conflict with user messages.273//274// Note: we convert the pointer arg manually here since the call site275// ($_EmAudioDispatchProcessorCallback) is used with various signatures276// and we do not know the types in advance.277messagePort.postMessage({'_wsc': d.callback, args: [d.contextHandle, 1/*EM_TRUE*/, {{{ to64('d.userData') }}}] });278} else if (d['_wsc']) {279getWasmTableEntry(d['_wsc'])(...d.args);280};281}282}283284// No-op, not doing audio processing in this processor. It is just for285// receiving bootstrap messages. However browsers require it to still be286// present. It should never be called because we never add a node to the graph287// with this processor, although it does look like Chrome does still call this288// function.289process() {290// keep this function a no-op. Chrome redundantly wants to call this even291// though this processor is never added to the graph.292}293};294295// Register the dummy processor that will just receive messages.296registerProcessor('em-bootstrap', BootstrapMessages);297298} // ENVIRONMENT_IS_AUDIO_WORKLET299300301