diff --git a/src/audio_worklet.js b/src/audio_worklet.js index 6ed827af22e7..cfb932578312 100644 --- a/src/audio_worklet.js +++ b/src/audio_worklet.js @@ -33,10 +33,13 @@ function createWasmAudioWorkletProcessor(audioParams) { this.userData = opts['ud']; // Then the samples per channel to process, fixed for the lifetime of the // context that created this processor. Note for when moving to Web Audio - // 1.1: the typed array passed to process() should be the same size as this - // 'render quantum size', and this exercise of passing in the value - // shouldn't be required (to be verified). + // 1.1: the typed array passed to process() should be the same size as + // this 'render quantum size', and this exercise of passing in the value + // shouldn't be required (to be verified with the in/out data lengths). this.samplesPerChannel = opts['sc']; + // Typed views of the output buffers on the worklet's stack, which after + // creation should only change if the audio chain changes. + this.outputViews = []; } static get parameterDescriptors() { @@ -44,6 +47,7 @@ function createWasmAudioWorkletProcessor(audioParams) { } process(inputList, outputList, parameters) { + var time = Date.now(); // Marshal all inputs and parameters to the Wasm memory on the thread stack, // then perform the wasm audio worklet call, // and finally marshal audio output data back. @@ -54,7 +58,7 @@ function createWasmAudioWorkletProcessor(audioParams) { bytesPerChannel = this.samplesPerChannel * 4, stackMemoryNeeded = (numInputs + numOutputs) * {{{ C_STRUCTS.AudioSampleFrame.__size__ }}}, oldStackPtr = stackSave(), - inputsPtr, outputsPtr, outputDataPtr, paramsPtr, + inputsPtr, outputsPtr, outputDataPtr, paramsPtr, requiredViews = 0, didProduceAudio, paramArray; // Calculate how much stack space is needed. @@ -62,12 +66,47 @@ function createWasmAudioWorkletProcessor(audioParams) { for (i of outputList) stackMemoryNeeded += i.length * bytesPerChannel; for (i in parameters) stackMemoryNeeded += parameters[i].byteLength + {{{ C_STRUCTS.AudioParamFrame.__size__ }}}, ++numParams; - // Allocate the necessary stack space. - inputsPtr = stackAlloc(stackMemoryNeeded); + // Allocate the necessary stack space (dataPtr is always in bytes, and + // advances as space for structs as data is taken, but note the switching + // between bytes and indices into the various heaps). + dataPtr = stackAlloc(stackMemoryNeeded); + + // But start the output view allocs first, since once views are created we + // want them to always start from the same address. Even if in the next + // process() the outputList is empty, as soon as there are channels again + // the views' addresses will still be the same (and only if more views are + // required we will recreate them). + outputDataPtr = dataPtr; + for (/*which output*/i of outputList) { +#if ASSERTIONS + for (/*which channel*/ j of i) { + console.assert(j.byteLength === bytesPerChannel, `Unexpected AudioWorklet output buffer size (expected ${bytesPerChannel} got ${j.byteLength})`); + } +#endif + // Keep advancing to make room for the output views + dataPtr += bytesPerChannel * i.length; + // How many output views are needed in total? + requiredViews += i.length; + } + // Verify we have enough views (it doesn't matter if we have too many, any + // excess won't be accessed) then also verify the views' start address + // haven't changed. + if (this.outputViews.length < requiredViews || (this.outputViews.length && this.outputViews[0].byteOffset != outputDataPtr)) { + this.outputViews = []; + k = outputDataPtr >> 2; + for (i of outputList) { + for (j of i) { + this.outputViews.push( + HEAPF32.subarray(k, k += this.samplesPerChannel) + ); + } + } + } // Copy input audio descriptor structs and data to Wasm + inputsPtr = dataPtr; k = inputsPtr >> 2; - dataPtr = inputsPtr + numInputs * {{{ C_STRUCTS.AudioSampleFrame.__size__ }}}; + dataPtr += numInputs * {{{ C_STRUCTS.AudioSampleFrame.__size__ }}}; for (i of inputList) { // Write the AudioSampleFrame struct instance HEAPU32[k + {{{ C_STRUCTS.AudioSampleFrame.numberOfChannels / 4 }}}] = i.length; @@ -82,17 +121,18 @@ function createWasmAudioWorkletProcessor(audioParams) { } // Copy output audio descriptor structs to Wasm + // TODO: now dataPtr tracks the next address, move this above outputsPtr = dataPtr; k = outputsPtr >> 2; - outputDataPtr = (dataPtr += numOutputs * {{{ C_STRUCTS.AudioSampleFrame.__size__ }}}) >> 2; + dataPtr += numOutputs * {{{ C_STRUCTS.AudioSampleFrame.__size__ }}}; for (i of outputList) { // Write the AudioSampleFrame struct instance HEAPU32[k + {{{ C_STRUCTS.AudioSampleFrame.numberOfChannels / 4 }}}] = i.length; HEAPU32[k + {{{ C_STRUCTS.AudioSampleFrame.samplesPerChannel / 4 }}}] = this.samplesPerChannel; - HEAPU32[k + {{{ C_STRUCTS.AudioSampleFrame.data / 4 }}}] = dataPtr; + HEAPU32[k + {{{ C_STRUCTS.AudioSampleFrame.data / 4 }}}] = outputDataPtr; k += {{{ C_STRUCTS.AudioSampleFrame.__size__ / 4 }}}; - // Reserve space for the output data - dataPtr += bytesPerChannel * i.length; + // Advance the output pointer to the next output + outputDataPtr += bytesPerChannel * i.length; } // Copy parameters descriptor structs and data to Wasm @@ -112,19 +152,20 @@ function createWasmAudioWorkletProcessor(audioParams) { // Call out to Wasm callback to perform audio processing if (didProduceAudio = this.callbackFunction(numInputs, inputsPtr, numOutputs, outputsPtr, numParams, paramsPtr, this.userData)) { // Read back the produced audio data to all outputs and their channels. - // (A garbage-free function TypedArray.copy(dstTypedArray, dstOffset, - // srcTypedArray, srcOffset, count) would sure be handy.. but web does - // not have one, so manually copy all bytes in) + // The 'outputViews' are subarray views into the heap, each with the + // correct offset and size to be copied directly into the output. + k = 0; for (i of outputList) { for (j of i) { - for (k = 0; k < this.samplesPerChannel; ++k) { - j[k] = HEAPF32[outputDataPtr++]; - } + j.set(this.outputViews[k++]); } } } stackRestore(oldStackPtr); + + time = Date.now() - time; + //console.log(time); // Return 'true' to tell the browser to continue running this processor. // (Returning 1 or any other truthy value won't work in Chrome) diff --git a/src/library_webaudio.js b/src/library_webaudio.js index f4269e9759ba..f3b01f633ea5 100644 --- a/src/library_webaudio.js +++ b/src/library_webaudio.js @@ -164,7 +164,10 @@ let LibraryWebAudio = { let audioWorkletCreationFailed = () => { #if WEBAUDIO_DEBUG - console.error(`emscripten_start_wasm_audio_worklet_thread_async() addModule() failed!`); + // Note about Cross-Origin here: a lack of Cross-Origin-Opener-Policy and + // Cross-Origin-Embedder-Policy headers to the client request will result + // in the worklet file failing to load. + console.error(`emscripten_start_wasm_audio_worklet_thread_async() addModule() failed! Are the Cross-Origin headers being set?`); #endif {{{ makeDynCall('viip', 'callback') }}}(contextHandle, 0/*EM_FALSE*/, userData); };