diff --git a/src/audio_worklet.js b/src/audio_worklet.js index 31297aa5e33c..d2ee130dd646 100644 --- a/src/audio_worklet.js +++ b/src/audio_worklet.js @@ -31,6 +31,8 @@ function createWasmAudioWorkletProcessor(audioParams) { let opts = args.processorOptions; this.callbackFunction = Module['wasmTable'].get(opts['cb']); this.userData = opts['ud']; + // Plus the number of samples to process, fixed for the lifetime of the context that created this processor + this.quantumSize = opts['qs']; } static get parameterDescriptors() { @@ -38,9 +40,6 @@ function createWasmAudioWorkletProcessor(audioParams) { } process(inputList, outputList, parameters) { - // hardcoding this until the Web Audio 1.1 API spec makes it into browsers and we can get the AudioWorkletProcessor's context quantum - const quantumSize = 128; - // Marshal all inputs and parameters to the Wasm memory on the thread stack, // then perform the wasm audio worklet call, // and finally marshal audio output data back. @@ -54,7 +53,7 @@ function createWasmAudioWorkletProcessor(audioParams) { didProduceAudio, paramArray; // Calculate how much stack space is needed. - const quantumBytes = quantumSize * 4; + const quantumBytes = this.quantumSize * 4; for (i of inputList) stackMemoryNeeded += i.length * quantumBytes; for (i of outputList) stackMemoryNeeded += i.length * quantumBytes; for (i in parameters) stackMemoryNeeded += parameters[i].byteLength + {{{ C_STRUCTS.AudioParamFrame.__size__ }}}, ++numParams; @@ -68,7 +67,7 @@ function createWasmAudioWorkletProcessor(audioParams) { for (i of inputList) { // Write the AudioSampleFrame struct instance HEAPU32[k + {{{ C_STRUCTS.AudioSampleFrame.numberOfChannels / 4 }}}] = i.length; - HEAPU32[k + {{{ C_STRUCTS.AudioSampleFrame.quantumSize / 4 }}}] = quantumSize; + HEAPU32[k + {{{ C_STRUCTS.AudioSampleFrame.quantumSize / 4 }}}] = this.quantumSize; HEAPU32[k + {{{ C_STRUCTS.AudioSampleFrame.data / 4 }}}] = dataPtr; k += {{{ C_STRUCTS.AudioSampleFrame.__size__ / 4 }}}; // Marshal the input audio sample data for each audio channel of this input @@ -85,7 +84,7 @@ function createWasmAudioWorkletProcessor(audioParams) { for (i of outputList) { // Write the AudioSampleFrame struct instance HEAPU32[k + {{{ C_STRUCTS.AudioSampleFrame.numberOfChannels / 4 }}}] = i.length; - HEAPU32[k + {{{ C_STRUCTS.AudioSampleFrame.quantumSize / 4 }}}] = quantumSize; + HEAPU32[k + {{{ C_STRUCTS.AudioSampleFrame.quantumSize / 4 }}}] = this.quantumSize; HEAPU32[k + {{{ C_STRUCTS.AudioSampleFrame.data / 4 }}}] = dataPtr; k += {{{ C_STRUCTS.AudioSampleFrame.__size__ / 4 }}}; // Reserve space for the output data @@ -114,7 +113,7 @@ function createWasmAudioWorkletProcessor(audioParams) { // not have one, so manually copy all bytes in) for (i of outputList) { for (j of i) { - for (k = 0; k < quantumSize; ++k) { + for (k = 0; k < this.quantumSize; ++k) { j[k] = HEAPF32[outputDataPtr++]; } } diff --git a/src/library_webaudio.js b/src/library_webaudio.js index 3d382b9ff08e..04f2aabb5766 100644 --- a/src/library_webaudio.js +++ b/src/library_webaudio.js @@ -37,11 +37,21 @@ let LibraryWebAudio = { // Wasm handle ID. $emscriptenGetAudioObject: (objectHandle) => EmAudio[objectHandle], - // emscripten_create_audio_context() does not itself use + // Performs the work of getting the AudioContext's quantum size. + $emscriptenGetContextQuantumSize: (contextHandle) => { + // TODO: in a future release this will be something like: + // return EmAudio[contextHandle].renderQuantumSize || 128; + // It comes two caveats: it needs the hint when generating the context adding to + // emscripten_create_audio_context(), and altering the quantum requires a secure + // context and fallback implementing. Until then we simply use the 1.0 API value: + return 128; + }, + + // emscripten_create_audio_context() does not itself use the // emscriptenGetAudioObject() function, but mark it as a dependency, because // the user will not be able to utilize the node unless they call // emscriptenGetAudioObject() on it on JS side to connect it to the graph, so - // this avoids the user needing to manually do it on the command line. + // this avoids the user needing to manually add the dependency on the command line. emscripten_create_audio_context__deps: ['$emscriptenRegisterAudioObject', '$emscriptenGetAudioObject'], emscripten_create_audio_context: (options) => { let ctx = window.AudioContext || window.webkitAudioContext; @@ -264,6 +274,7 @@ let LibraryWebAudio = { }); }, + emscripten_create_wasm_audio_worklet_node__deps: ['$emscriptenGetContextQuantumSize'], emscripten_create_wasm_audio_worklet_node: (contextHandle, name, options, callback, userData) => { #if ASSERTIONS assert(contextHandle, `Called emscripten_create_wasm_audio_worklet_node() with a null Web Audio Context handle!`); @@ -282,7 +293,11 @@ let LibraryWebAudio = { numberOfInputs: HEAP32[options], numberOfOutputs: HEAP32[options+1], outputChannelCount: HEAPU32[options+2] ? readChannelCountArray(HEAPU32[options+2]>>2, HEAP32[options+1]) : void 0, - processorOptions: { 'cb': callback, 'ud': userData } + processorOptions: { + 'cb': callback, + 'ud': userData, + 'qs': emscriptenGetContextQuantumSize(contextHandle) + } } : void 0; #if WEBAUDIO_DEBUG @@ -293,14 +308,13 @@ let LibraryWebAudio = { }, #endif // ~AUDIO_WORKLET + emscripten_audio_context_quantum_size__deps: ['$emscriptenGetContextQuantumSize'], emscripten_audio_context_quantum_size: (contextHandle) => { #if ASSERTIONS assert(EmAudio[contextHandle], `Called emscripten_audio_context_quantum_size() with an invalid Web Audio Context handle ${contextHandle}`); assert(EmAudio[contextHandle] instanceof (window.AudioContext || window.webkitAudioContext), `Called emscripten_audio_context_quantum_size() on handle ${contextHandle} that is not an AudioContext, but of type ${EmAudio[contextHandle]}`); #endif - // TODO: once the Web Audio 1.1 API is implemented we can query the context for its renderQuantumSize - // (With two caveats: it needs the hint when generating the context adding, plus it requires a secure context and fallback implementing) - return 128; + return emscriptenGetContextQuantumSize(contextHandle); }, emscripten_audio_node_connect: (source, destination, outputIndex, inputIndex) => {