diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index bf75c754..dca454da 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -84,14 +84,14 @@ jobs: # in particular for RPi, do it locally and workaround... # --------------------------------------------------------- - name: stable - ${{ matrix.settings.target }} - node@20 + name: stable - ${{ matrix.settings.target }} - node@22.1 runs-on: ${{ matrix.settings.host }} steps: - uses: actions/checkout@v4 - name: Setup node uses: actions/setup-node@v4 with: - node-version: 20 + node-version: 22.1 check-latest: true architecture: ${{ matrix.settings.architecture }} diff --git a/.scripts/wpt-harness.mjs b/.scripts/wpt-harness.mjs index 5ff9ad6e..6a72aafb 100644 --- a/.scripts/wpt-harness.mjs +++ b/.scripts/wpt-harness.mjs @@ -1,3 +1,5 @@ +import { Blob } from 'node:buffer'; + import path from 'path'; import wptRunner from 'wpt-runner'; import chalk from 'chalk'; @@ -38,6 +40,13 @@ const rootURL = 'webaudio'; // monkey patch `window` with our web audio API const setup = window => { + // monkey patch innerText with textContent + Object.defineProperty(window.HTMLScriptElement.prototype, 'innerText', { + get: function() { + return this.textContent; + }, + }) + // return; // This is meant to make some idlharness tests pass: // cf. wpt-runnner/testharness/idlharness.js line 1466-1472 // These tests, which assess the descriptor of the classes according to window, @@ -75,6 +84,9 @@ const setup = window => { window.Promise = Promise; window.Event = Event; window.EventTarget = EventTarget; + window.URL = URL; + window.Blob = Blob; + window.SharedArrayBuffer = SharedArrayBuffer; // @note - adding Function this crashes some tests: // the-pannernode-interface/pannernode-setposition-throws.html // the-periodicwave-interface/createPeriodicWaveInfiniteValuesThrows.html diff --git a/Cargo.toml b/Cargo.toml index 6c99c732..d445d509 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -10,8 +10,10 @@ version = "0.20.0" crate-type = ["cdylib"] [dependencies] -napi = {version="2.15", features=["napi9", "tokio_rt"]} -napi-derive = "2.15" +crossbeam-channel = "0.5.12" +napi = { version="2.15", features=["napi9", "tokio_rt"] } +napi-derive = { version="2.15" } +thread-priority = "1.1.0" web-audio-api = "=0.45.0" # web-audio-api = { path = "../web-audio-api-rs" } diff --git a/examples/audio-worklet-shared-array-buffer.mjs b/examples/audio-worklet-shared-array-buffer.mjs new file mode 100644 index 00000000..91b2dcb3 --- /dev/null +++ b/examples/audio-worklet-shared-array-buffer.mjs @@ -0,0 +1,43 @@ +import path from 'node:path'; + +import { AudioContext, AudioWorkletNode } from '../index.mjs'; +import { sleep } from '@ircam/sc-utils'; + +const latencyHint = process.env.WEB_AUDIO_LATENCY === 'playback' ? 'playback' : 'interactive'; +const audioContext = new AudioContext({ latencyHint }); + +await audioContext.audioWorklet.addModule(path.join('examples', 'worklets', 'array-source.js')); + +// Create a shared float array big enough for 128 floats +let sharedArray = new SharedArrayBuffer(512); +let sharedFloats = new Float32Array(sharedArray); + +async function runSource() { + const src = new AudioWorkletNode(audioContext, 'array-source', { + processorOptions: { sharedFloats }, + }); + src.connect(audioContext.destination); + + console.log("Sawtooth"); + for (let i = 0; i < sharedFloats.length; i++) { + sharedFloats[i] = -1. + i / 64; // create saw + } + await sleep(1); + + console.log("Square"); + for (let i = 0; i < sharedFloats.length; i++) { + sharedFloats[i] = i > 64 ? 1 : -1; + } + await sleep(1); + + src.disconnect(); + + // src goes out of scope and is disconnected, so it should be cleaned up +} + +await runSource(); + +// @todo - this should close the AudioWorkletGlobalScope properly +// before closing the "real" context +console.log('closing'); +await audioContext.close(); diff --git a/examples/audio-worklet.mjs b/examples/audio-worklet.mjs new file mode 100644 index 00000000..309bd296 --- /dev/null +++ b/examples/audio-worklet.mjs @@ -0,0 +1,60 @@ +import path from 'node:path'; + +import { AudioContext, OfflineAudioContext, OscillatorNode, AudioWorkletNode } from '../index.mjs'; +import { sleep } from '@ircam/sc-utils'; + +const latencyHint = process.env.WEB_AUDIO_LATENCY === 'playback' ? 'playback' : 'interactive'; + +const TEST_ONLINE = false; + +const audioContext = TEST_ONLINE + ? new AudioContext({ latencyHint }) + : new OfflineAudioContext(2, 8 * 48000, 48000) + +await audioContext.audioWorklet.addModule(path.join('examples', 'worklets', 'bitcrusher.js')); // relative to cwd +await audioContext.audioWorklet.addModule(path.join('worklets', 'white-noise.js')); // relative path to call site + +const sine = new OscillatorNode(audioContext, { type: 'sawtooth', frequency: 5000 }); +const bitCrusher = new AudioWorkletNode(audioContext, 'bitcrusher', { + processorOptions: { msg: 'hello world' }, +}); + +bitCrusher.port.on('message', (event) => console.log('main recv', event)); +bitCrusher.port.postMessage({ hello: 'from main' }); + +sine + .connect(bitCrusher) + .connect(audioContext.destination); + +const paramBitDepth = bitCrusher.parameters.get('bitDepth'); +const paramReduction = bitCrusher.parameters.get('frequencyReduction'); + +paramBitDepth.setValueAtTime(1, 0); + +paramReduction.setValueAtTime(0.01, 0.); +paramReduction.linearRampToValueAtTime(0.1, 4.); +paramReduction.exponentialRampToValueAtTime(0.01, 8.); + +sine.start(); +sine.stop(8); + +const whiteNoise = new AudioWorkletNode(audioContext, 'white-noise'); +whiteNoise.connect(audioContext.destination); + +if (TEST_ONLINE) { + audioContext.renderCapacity.addEventListener('update', e => { + const { timestamp, averageLoad, peakLoad, underrunRatio } = e; + console.log('AudioRenderCapacityEvent:', { timestamp, averageLoad, peakLoad, underrunRatio }); + }); + audioContext.renderCapacity.start({ updateInterval: 1. }); + + await sleep(8); + await audioContext.close(); +} else { + const buffer = await audioContext.startRendering(); + const online = new AudioContext(); + const src = online.createBufferSource(); + src.buffer = buffer; + src.connect(online.destination); + src.start(); +} diff --git a/examples/worklets/array-source.js b/examples/worklets/array-source.js new file mode 100644 index 00000000..d4f2529f --- /dev/null +++ b/examples/worklets/array-source.js @@ -0,0 +1,20 @@ +class ArraySourceProcessor extends AudioWorkletProcessor { + constructor(options) { + super(); + this.sharedFloats = options.processorOptions.sharedFloats; + } + + process(inputs, outputs, parameters) { + const output = outputs[0]; + + output.forEach((channel) => { + for (let i = 0; i < channel.length; i++) { + channel[i] = this.sharedFloats[i]; + } + }); + + return true; + } +} + +registerProcessor('array-source', ArraySourceProcessor); diff --git a/examples/worklets/bitcrusher.js b/examples/worklets/bitcrusher.js new file mode 100644 index 00000000..296823e9 --- /dev/null +++ b/examples/worklets/bitcrusher.js @@ -0,0 +1,77 @@ +class Bitcrusher extends AudioWorkletProcessor { + static get parameterDescriptors() { + return [{ + name: 'bitDepth', + defaultValue: 12, + minValue: 1, + maxValue: 16 + }, { + name: 'frequencyReduction', + defaultValue: 0.5, + minValue: 0, + maxValue: 1 + }]; + } + + constructor(options) { + console.log(`++ in constructor: ${JSON.stringify(options, null, 2)}\n`); + // The initial parameter value can be set by passing |options| + // to the processor's constructor. + super(); + + this._phase = 0; + this._lastSampleValue = 0; + this._msg = options.processorOptions.msg; + + this.port.on('message', event => { + console.log(`++ on message: ${JSON.stringify(event, null, 2)}\n`); + }); + } + + process(inputs, outputs, parameters) { + const input = inputs[0]; + const output = outputs[0]; + const bitDepth = parameters.bitDepth; + const frequencyReduction = parameters.frequencyReduction; + + if (bitDepth.length > 1) { + for (let channel = 0; channel < output.length; ++channel) { + for (let i = 0; i < output[channel].length; ++i) { + let step = Math.pow(0.5, bitDepth[i]); + // Use modulo for indexing to handle the case where + // the length of the frequencyReduction array is 1. + this._phase += frequencyReduction[i % frequencyReduction.length]; + if (this._phase >= 1.0) { + this._phase -= 1.0; + this._lastSampleValue = step * Math.floor(input[channel][i] / step + 0.5); + } + output[channel][i] = this._lastSampleValue; + } + } + } else { + // Because we know bitDepth is constant for this call, + // we can lift the computation of step outside the loop, + // saving many operations. + const step = Math.pow(0.5, bitDepth[0]); + for (let channel = 0; channel < output.length; ++channel) { + for (let i = 0; i < output[channel].length; ++i) { + this._phase += frequencyReduction[i % frequencyReduction.length]; + if (this._phase >= 1.0) { + this._phase -= 1.0; + this._lastSampleValue = step * Math.floor(input[channel][i] / step + 0.5); + } + output[channel][i] = this._lastSampleValue; + } + } + } + + if (Math.random() < 0.005) { + this.port.postMessage({ hello: 'from render', msg: this._msg }); + } + + // No need to return a value; this node's lifetime is dependent only on its + // input connections. + } +} + +registerProcessor('bitcrusher', Bitcrusher); diff --git a/examples/worklets/white-noise.js b/examples/worklets/white-noise.js new file mode 100644 index 00000000..48083735 --- /dev/null +++ b/examples/worklets/white-noise.js @@ -0,0 +1,15 @@ +class WhiteNoiseProcessor extends AudioWorkletProcessor { + process(inputs, outputs, parameters) { + const output = outputs[0]; + + output.forEach((channel) => { + for (let i = 0; i < channel.length; i++) { + channel[i] = Math.random() * 2 - 1; + } + }); + + return true; + } +} + +registerProcessor('white-noise', WhiteNoiseProcessor); diff --git a/generator/index.mjs b/generator/index.mjs index b88cecd9..b3a229e1 100644 --- a/generator/index.mjs +++ b/generator/index.mjs @@ -14,6 +14,7 @@ import { ESLint } from 'eslint'; // and extended with the list of generatedNodes let supportedNodes = [ 'ScriptProcessorNode', + 'AudioWorkletNode', ]; const generatedNodes = [ @@ -276,13 +277,16 @@ async function beautifyAndLint(pathname, code) { } // Generate files that require the list of generated AudioNode -['index', 'monkey-patch', 'BaseAudioContext'].forEach(src => { +['indexCjs', 'indexMjs', 'BaseAudioContext'].forEach(src => { let input; let output; // index.tmpl.js generates the ES module re-export - if (src === 'index') { - input = path.join(jsTemplates, `${src}.tmpl.mjs`); - output = path.join(process.cwd(), `${src}.mjs`); + if (src === 'indexCjs') { + input = path.join(jsTemplates, `index.tmpl.cjs`); + output = path.join(process.cwd(), `index.cjs`); + } else if (src === 'indexMjs') { + input = path.join(jsTemplates, `index.tmpl.mjs`); + output = path.join(process.cwd(), `index.mjs`); } else { input = path.join(jsTemplates, `${src}.tmpl.js`); output = path.join(jsOutput, `${src}.js`); diff --git a/generator/js/BaseAudioContext.tmpl.js b/generator/js/BaseAudioContext.tmpl.js index 65ffa6ba..299a65e2 100644 --- a/generator/js/BaseAudioContext.tmpl.js +++ b/generator/js/BaseAudioContext.tmpl.js @@ -5,12 +5,16 @@ const { } = require('./lib/utils.js'); const { kNapiObj, + kPrivateConstructor, } = require('./lib/symbols.js'); +const AudioWorklet = require('./AudioWorklet.js'); + module.exports = (jsExport, _nativeBinding) => { class BaseAudioContext extends EventTarget { - #listener = null; + #audioWorklet = null; #destination = null; + #listener = null; constructor(options) { // Make constructor "private" @@ -29,24 +33,23 @@ module.exports = (jsExport, _nativeBinding) => { ...kHiddenProperty, }); - this.#listener = null; // lazily instanciated + this.#audioWorklet = new AudioWorklet({ + [kPrivateConstructor]: true, + workletId: this[kNapiObj].workletId, + sampleRate: this[kNapiObj].sampleRate, + }); + this.#destination = new jsExport.AudioDestinationNode(this, { [kNapiObj]: this[kNapiObj].destination, }); } - get listener() { + get audioWorklet() { if (!(this instanceof BaseAudioContext)) { throw new TypeError("Invalid Invocation: Value of 'this' must be of type 'BaseAudioContext'"); } - if (this.#listener === null) { - this.#listener = new jsExport.AudioListener({ - [kNapiObj]: this[kNapiObj].listener, - }); - } - - return this.#listener; + return this.#audioWorklet; } get destination() { @@ -57,6 +60,20 @@ module.exports = (jsExport, _nativeBinding) => { return this.#destination; } + get listener() { + if (!(this instanceof BaseAudioContext)) { + throw new TypeError("Invalid Invocation: Value of 'this' must be of type 'BaseAudioContext'"); + } + + if (this.#listener === null) { + this.#listener = new jsExport.AudioListener({ + [kNapiObj]: this[kNapiObj].listener, + }); + } + + return this.#listener; + } + get sampleRate() { if (!(this instanceof BaseAudioContext)) { throw new TypeError("Invalid Invocation: Value of 'this' must be of type 'BaseAudioContext'"); @@ -73,6 +90,15 @@ module.exports = (jsExport, _nativeBinding) => { return this[kNapiObj].currentTime; } + // @todo - implement in upstream crate + pass to AudioWorkletGlobalScope + // get renderQuantumSize() { + // if (!(this instanceof BaseAudioContext)) { + // throw new TypeError("Invalid Invocation: Value of 'this' must be of type 'BaseAudioContext'"); + // } + + // return this[kNapiObj].renderQuantumSize; + // } + get state() { if (!(this instanceof BaseAudioContext)) { throw new TypeError("Invalid Invocation: Value of 'this' must be of type 'BaseAudioContext'"); @@ -81,9 +107,6 @@ module.exports = (jsExport, _nativeBinding) => { return this[kNapiObj].state; } - // renderQuantumSize - // audioWorklet - get onstatechange() { if (!(this instanceof BaseAudioContext)) { throw new TypeError("Invalid Invocation: Value of 'this' must be of type 'BaseAudioContext'"); diff --git a/generator/js/index.tmpl.cjs b/generator/js/index.tmpl.cjs new file mode 100644 index 00000000..dabd767e --- /dev/null +++ b/generator/js/index.tmpl.cjs @@ -0,0 +1,56 @@ +const nativeBinding = require('./load-native.cjs'); +const jsExport = {}; + +// -------------------------------------------------------------------------- +// Events +// -------------------------------------------------------------------------- +jsExport.OfflineAudioCompletionEvent = require('./js/Events').OfflineAudioCompletionEvent; +jsExport.AudioProcessingEvent = require('./js/Events').AudioProcessingEvent; +jsExport.AudioRenderCapacityEvent = require('./js/Events').AudioRenderCapacityEvent; +// -------------------------------------------------------------------------- +// Create Web Audio API facade +// -------------------------------------------------------------------------- +jsExport.BaseAudioContext = require('./js/BaseAudioContext.js')(jsExport, nativeBinding); +jsExport.AudioContext = require('./js/AudioContext.js')(jsExport, nativeBinding); +jsExport.OfflineAudioContext = require('./js/OfflineAudioContext.js')(jsExport, nativeBinding); + +${d.nodes.map((node) => { +return ` +jsExport.${d.name(node)} = require('./js/${d.name(node)}.js')(jsExport, nativeBinding);` +}).join('')} + +jsExport.AudioNode = require('./js/AudioNode.js'); +jsExport.AudioScheduledSourceNode = require('./js/AudioScheduledSourceNode.js'); +jsExport.AudioParam = require('./js/AudioParam.js'); +jsExport.AudioDestinationNode = require('./js/AudioDestinationNode.js'); +jsExport.AudioListener = require('./js/AudioListener.js'); +jsExport.AudioWorklet = require('./js/AudioWorklet.js'); +jsExport.AudioParamMap = require('./js/AudioParamMap.js'); +jsExport.AudioRenderCapacity = require('./js/AudioRenderCapacity.js'); + +jsExport.PeriodicWave = require('./js/PeriodicWave.js')(jsExport, nativeBinding); +jsExport.AudioBuffer = require('./js/AudioBuffer.js')(jsExport, nativeBinding); + +// -------------------------------------------------------------------------- +// Promisify MediaDevices API +// -------------------------------------------------------------------------- +jsExport.mediaDevices = {}; + +const enumerateDevicesSync = nativeBinding.mediaDevices.enumerateDevices; +jsExport.mediaDevices.enumerateDevices = async function enumerateDevices() { + const list = enumerateDevicesSync(); + return Promise.resolve(list); +}; + +const getUserMediaSync = nativeBinding.mediaDevices.getUserMedia; +jsExport.mediaDevices.getUserMedia = async function getUserMedia(options) { + if (options === undefined) { + throw new TypeError('Failed to execute "getUserMedia" on "MediaDevices": audio must be requested'); + } + + const stream = getUserMediaSync(options); + return Promise.resolve(stream); +}; + +module.exports = jsExport; + diff --git a/generator/js/index.tmpl.mjs b/generator/js/index.tmpl.mjs index 177d1ff6..2c636b8e 100644 --- a/generator/js/index.tmpl.mjs +++ b/generator/js/index.tmpl.mjs @@ -21,6 +21,8 @@ export const { AudioParam, AudioDestinationNode, AudioListener, + AudioWorklet, + AudioParamMap, AudioRenderCapacity, PeriodicWave, diff --git a/generator/js/monkey-patch.tmpl.js b/generator/js/monkey-patch.tmpl.js deleted file mode 100644 index f8718dd5..00000000 --- a/generator/js/monkey-patch.tmpl.js +++ /dev/null @@ -1,55 +0,0 @@ -module.exports = function monkeyPatch(nativeBinding) { - let jsExport = {}; - - // -------------------------------------------------------------------------- - // Events - // -------------------------------------------------------------------------- - jsExport.OfflineAudioCompletionEvent = require('./Events').OfflineAudioCompletionEvent; - jsExport.AudioProcessingEvent = require('./Events').AudioProcessingEvent; - jsExport.AudioRenderCapacityEvent = require('./Events').AudioRenderCapacityEvent; - // -------------------------------------------------------------------------- - // Create Web Audio API facade - // -------------------------------------------------------------------------- - jsExport.BaseAudioContext = require('./BaseAudioContext.js')(jsExport, nativeBinding); - jsExport.AudioContext = require('./AudioContext.js')(jsExport, nativeBinding); - jsExport.OfflineAudioContext = require('./OfflineAudioContext.js')(jsExport, nativeBinding); - -${d.nodes.map((node) => { - return ` - jsExport.${d.name(node)} = require('./${d.name(node)}.js')(jsExport, nativeBinding);` -}).join('')} - - jsExport.AudioNode = require('./AudioNode.js'); - jsExport.AudioScheduledSourceNode = require('./AudioScheduledSourceNode.js'); - jsExport.AudioParam = require('./AudioParam.js'); - jsExport.AudioDestinationNode = require('./AudioDestinationNode.js'); - jsExport.AudioListener = require('./AudioListener.js'); - jsExport.AudioRenderCapacity = require('./AudioRenderCapacity.js'); - - jsExport.PeriodicWave = require('./PeriodicWave.js')(jsExport, nativeBinding); - jsExport.AudioBuffer = require('./AudioBuffer.js')(jsExport, nativeBinding); - - // -------------------------------------------------------------------------- - // Promisify MediaDevices API - // -------------------------------------------------------------------------- - jsExport.mediaDevices = {}; - - const enumerateDevicesSync = nativeBinding.mediaDevices.enumerateDevices; - jsExport.mediaDevices.enumerateDevices = async function enumerateDevices() { - const list = enumerateDevicesSync(); - return Promise.resolve(list); - }; - - const getUserMediaSync = nativeBinding.mediaDevices.getUserMedia; - jsExport.mediaDevices.getUserMedia = async function getUserMedia(options) { - if (options === undefined) { - throw new TypeError('Failed to execute "getUserMedia" on "MediaDevices": audio must be requested'); - } - - const stream = getUserMediaSync(options); - return Promise.resolve(stream); - }; - - return jsExport; -}; - diff --git a/generator/rs/lib.tmpl.rs b/generator/rs/lib.tmpl.rs index 1a447cca..4340668f 100644 --- a/generator/rs/lib.tmpl.rs +++ b/generator/rs/lib.tmpl.rs @@ -32,6 +32,12 @@ ${d.nodes.map(n => { return ` mod ${d.slug(n)}; use crate::${d.slug(n)}::${d.napiName(n)};`}).join('')} +// AudioWorklet internals +use crate::audio_worklet_node::{ + exit_audio_worklet_global_scope, + run_audio_worklet_global_scope, +}; + // MediaDevices & MediaStream API mod media_streams; use crate::media_streams::NapiMediaStream; @@ -76,6 +82,18 @@ fn init(mut exports: JsObject, env: Env) -> Result<()> { exports.set_named_property("${d.name(n)}", napi_class)?; `}).join('')} + // ---------------------------------------------------------------- + // AudioWorklet utils (internal) + // ---------------------------------------------------------------- + exports.create_named_method( + "run_audio_worklet_global_scope", + run_audio_worklet_global_scope, + )?; + exports.create_named_method( + "exit_audio_worklet_global_scope", + exit_audio_worklet_global_scope, + )?; + // ---------------------------------------------------------------- // MediaStream API & Media Devices API // ---------------------------------------------------------------- @@ -109,10 +127,9 @@ fn init(mut exports: JsObject, env: Env) -> Result<()> { let napi_class = NapiMediaStream::create_js_class(&env)?; store.set_named_property("MediaStream", napi_class)?; - // store the store into instance so that it can be globally accessed + // push store into env instance data so that it can be globally accessed let store_ref = env.create_reference(store)?; env.set_instance_data(store_ref, 0, |mut c| { - // don't have any idea of what this does c.value.unref(c.env).unwrap(); })?; diff --git a/index.cjs b/index.cjs index 652601ce..fa85b4ea 100644 --- a/index.cjs +++ b/index.cjs @@ -1,90 +1,88 @@ -const { platform, arch } = process; +// -------------------------------------------------------------------------- // +// -------------------------------------------------------------------------- // +// // +// // +// // +// ██╗ ██╗ █████╗ ██████╗ ███╗ ██╗██╗███╗ ██╗ ██████╗ // +// ██║ ██║██╔══██╗██╔══██╗████╗ ██║██║████╗ ██║██╔════╝ // +// ██║ █╗ ██║███████║██████╔╝██╔██╗ ██║██║██╔██╗ ██║██║ ███╗ // +// ██║███╗██║██╔══██║██╔══██╗██║╚██╗██║██║██║╚██╗██║██║ ██║ // +// ╚███╔███╔╝██║ ██║██║ ██║██║ ╚████║██║██║ ╚████║╚██████╔╝ // +// ╚══╝╚══╝ ╚═╝ ╚═╝╚═╝ ╚═╝╚═╝ ╚═══╝╚═╝╚═╝ ╚═══╝ ╚═════╝ // +// // +// // +// - This file has been generated --------------------------- // +// // +// // +// -------------------------------------------------------------------------- // +// -------------------------------------------------------------------------- // -let nativeBinding = null; -let loadError = null; +const nativeBinding = require('./load-native.cjs'); +const jsExport = {}; -switch (platform) { - case 'win32': - switch (arch) { - case 'x64': - try { - nativeBinding = require('./node-web-audio-api.win32-x64-msvc.node'); - } catch (e) { - loadError = e; - } - break; - case 'arm64': - try { - nativeBinding = require('./node-web-audio-api.win32-arm64-msvc.node'); - } catch (e) { - loadError = e; - } - break; - default: - throw new Error(`Unsupported architecture on Windows: ${arch}`); - } - break; - case 'darwin': - switch (arch) { - case 'x64': - try { - nativeBinding = require('./node-web-audio-api.darwin-x64.node'); - } catch (e) { - loadError = e; - } - break; - case 'arm64': - try { - nativeBinding = require('./node-web-audio-api.darwin-arm64.node'); - } catch (e) { - loadError = e; - } - break; - default: - throw new Error(`Unsupported architecture on macOS: ${arch}`); - } - break; - case 'linux': - switch (arch) { - case 'x64': - try { - nativeBinding = require('./node-web-audio-api.linux-x64-gnu.node'); - } catch (e) { - loadError = e; - } - break; - case 'arm64': - try { - nativeBinding = require('./node-web-audio-api.linux-arm64-gnu.node'); - } catch (e) { - loadError = e; - } - break; - case 'arm': - try { - nativeBinding = require('./node-web-audio-api.linux-arm-gnueabihf.node'); - } catch (e) { - loadError = e; - } - break; - default: - throw new Error(`Unsupported architecture on Linux: ${arch}`); - } - break; - default: - throw new Error(`Unsupported OS: ${platform}, architecture: ${arch}`); -} +// -------------------------------------------------------------------------- +// Events +// -------------------------------------------------------------------------- +jsExport.OfflineAudioCompletionEvent = require('./js/Events').OfflineAudioCompletionEvent; +jsExport.AudioProcessingEvent = require('./js/Events').AudioProcessingEvent; +jsExport.AudioRenderCapacityEvent = require('./js/Events').AudioRenderCapacityEvent; +// -------------------------------------------------------------------------- +// Create Web Audio API facade +// -------------------------------------------------------------------------- +jsExport.BaseAudioContext = require('./js/BaseAudioContext.js')(jsExport, nativeBinding); +jsExport.AudioContext = require('./js/AudioContext.js')(jsExport, nativeBinding); +jsExport.OfflineAudioContext = require('./js/OfflineAudioContext.js')(jsExport, nativeBinding); -if (!nativeBinding) { - if (loadError) { - throw loadError; - } +jsExport.ScriptProcessorNode = require('./js/ScriptProcessorNode.js')(jsExport, nativeBinding); +jsExport.AudioWorkletNode = require('./js/AudioWorkletNode.js')(jsExport, nativeBinding); +jsExport.AnalyserNode = require('./js/AnalyserNode.js')(jsExport, nativeBinding); +jsExport.AudioBufferSourceNode = require('./js/AudioBufferSourceNode.js')(jsExport, nativeBinding); +jsExport.BiquadFilterNode = require('./js/BiquadFilterNode.js')(jsExport, nativeBinding); +jsExport.ChannelMergerNode = require('./js/ChannelMergerNode.js')(jsExport, nativeBinding); +jsExport.ChannelSplitterNode = require('./js/ChannelSplitterNode.js')(jsExport, nativeBinding); +jsExport.ConstantSourceNode = require('./js/ConstantSourceNode.js')(jsExport, nativeBinding); +jsExport.ConvolverNode = require('./js/ConvolverNode.js')(jsExport, nativeBinding); +jsExport.DelayNode = require('./js/DelayNode.js')(jsExport, nativeBinding); +jsExport.DynamicsCompressorNode = require('./js/DynamicsCompressorNode.js')(jsExport, nativeBinding); +jsExport.GainNode = require('./js/GainNode.js')(jsExport, nativeBinding); +jsExport.IIRFilterNode = require('./js/IIRFilterNode.js')(jsExport, nativeBinding); +jsExport.MediaStreamAudioSourceNode = require('./js/MediaStreamAudioSourceNode.js')(jsExport, nativeBinding); +jsExport.OscillatorNode = require('./js/OscillatorNode.js')(jsExport, nativeBinding); +jsExport.PannerNode = require('./js/PannerNode.js')(jsExport, nativeBinding); +jsExport.StereoPannerNode = require('./js/StereoPannerNode.js')(jsExport, nativeBinding); +jsExport.WaveShaperNode = require('./js/WaveShaperNode.js')(jsExport, nativeBinding); + +jsExport.AudioNode = require('./js/AudioNode.js'); +jsExport.AudioScheduledSourceNode = require('./js/AudioScheduledSourceNode.js'); +jsExport.AudioParam = require('./js/AudioParam.js'); +jsExport.AudioDestinationNode = require('./js/AudioDestinationNode.js'); +jsExport.AudioListener = require('./js/AudioListener.js'); +jsExport.AudioWorklet = require('./js/AudioWorklet.js'); +jsExport.AudioParamMap = require('./js/AudioParamMap.js'); +jsExport.AudioRenderCapacity = require('./js/AudioRenderCapacity.js'); + +jsExport.PeriodicWave = require('./js/PeriodicWave.js')(jsExport, nativeBinding); +jsExport.AudioBuffer = require('./js/AudioBuffer.js')(jsExport, nativeBinding); - throw new Error(`Failed to load native binding for OS: ${platform}, architecture: ${arch}`); -} +// -------------------------------------------------------------------------- +// Promisify MediaDevices API +// -------------------------------------------------------------------------- +jsExport.mediaDevices = {}; -const monkeyPatch = require('./js/monkey-patch.js'); -nativeBinding = monkeyPatch(nativeBinding); +const enumerateDevicesSync = nativeBinding.mediaDevices.enumerateDevices; +jsExport.mediaDevices.enumerateDevices = async function enumerateDevices() { + const list = enumerateDevicesSync(); + return Promise.resolve(list); +}; + +const getUserMediaSync = nativeBinding.mediaDevices.getUserMedia; +jsExport.mediaDevices.getUserMedia = async function getUserMedia(options) { + if (options === undefined) { + throw new TypeError('Failed to execute "getUserMedia" on "MediaDevices": audio must be requested'); + } -module.exports = nativeBinding; + const stream = getUserMediaSync(options); + return Promise.resolve(stream); +}; +module.exports = jsExport; diff --git a/index.mjs b/index.mjs index 97dc18c8..74549b99 100644 --- a/index.mjs +++ b/index.mjs @@ -42,12 +42,15 @@ export const { AudioParam, AudioDestinationNode, AudioListener, + AudioWorklet, + AudioParamMap, AudioRenderCapacity, PeriodicWave, AudioBuffer, // generated nodes ScriptProcessorNode, + AudioWorkletNode, AnalyserNode, AudioBufferSourceNode, BiquadFilterNode, diff --git a/js/AudioContext.js b/js/AudioContext.js index 67d7f69f..1f6be81e 100644 --- a/js/AudioContext.js +++ b/js/AudioContext.js @@ -11,6 +11,7 @@ const { kNapiObj, kOnStateChange, kOnSinkChange, + kWorkletRelease, } = require('./lib/symbols.js'); const { propagateEvent, @@ -214,6 +215,9 @@ module.exports = function(jsExport, nativeBinding) { throw new TypeError('Invalid Invocation: Value of \'this\' must be of type \'AudioContext\''); } + // Close audioWorklet first so that `run_audio_worklet_global_scope` exit first + // The other way around works too because of `recv_timeout` but cleaner this way + await this.audioWorklet[kWorkletRelease](); await this[kNapiObj].close(); } diff --git a/js/AudioParamMap.js b/js/AudioParamMap.js new file mode 100644 index 00000000..4c67e483 --- /dev/null +++ b/js/AudioParamMap.js @@ -0,0 +1,88 @@ +const { + kPrivateConstructor, +} = require('./lib/symbols.js'); +const { + kEnumerableProperty, +} = require('./lib/utils.js'); + +class AudioParamMap { + #parameters = null; + + constructor(options) { + if ( + (typeof options !== 'object') || + options[kPrivateConstructor] !== true + ) { + throw new TypeError('Illegal constructor'); + } + + this.#parameters = options.parameters; + } + + get size() { + return this.#parameters.size; + } + + entries() { + return this.#parameters.entries(); + } + + keys() { + return this.#parameters.keys(); + } + + values() { + return this.#parameters.values(); + } + + forEach(func) { + return this.#parameters.forEach(func); + } + + get(name) { + return this.#parameters.get(name); + } + + has(name) { + return this.#parameters.has(name); + } +} + +Object.defineProperties(AudioParamMap, { + length: { + __proto__: null, + writable: false, + enumerable: false, + configurable: true, + value: 0, + }, +}); + +Object.defineProperties(AudioParamMap.prototype, { + [Symbol.toStringTag]: { + __proto__: null, + writable: false, + enumerable: false, + configurable: true, + value: 'AudioParamMap', + }, + [Symbol.iterator]: { + value: AudioParamMap.prototype.entries, + enumerable: false, + configurable: true, + writable: true, + }, + size: { + __proto__: null, + enumerable: true, + configurable: true, + }, + entries: kEnumerableProperty, + keys: kEnumerableProperty, + values: kEnumerableProperty, + forEach: kEnumerableProperty, + get: kEnumerableProperty, + has: kEnumerableProperty, +}); + +module.exports = AudioParamMap; diff --git a/js/AudioWorklet.js b/js/AudioWorklet.js new file mode 100644 index 00000000..e98455a5 --- /dev/null +++ b/js/AudioWorklet.js @@ -0,0 +1,261 @@ +const { + resolveObjectURL +} = require('node:buffer'); +const fs = require('node:fs').promises; +const { existsSync } = require('node:fs'); +const path = require('node:path'); +const { + Worker, + MessageChannel, +} = require('node:worker_threads'); + +const { + kProcessorRegistered, + kGetParameterDescriptors, + kCreateProcessor, + kPrivateConstructor, + kWorkletRelease, + kCheckProcessorsCreated, +} = require('./lib/symbols.js'); +const { + kEnumerableProperty, +} = require('./lib/utils.js'); + +const caller = require('caller'); +// cf. https://www.npmjs.com/package/node-fetch#commonjs +const fetch = (...args) => import('node-fetch').then(({default: fetch}) => fetch(...args)); + +/** + * Retrieve code with different module resolution strategies + * - file - absolute or relative to cwd path + * - URL + * - Blob + * - fallback: relative to caller site + * + in fs + * + caller site is url - required for wpt, probably no other use case + */ +const resolveModule = async (moduleUrl) => { + let code; + + if (existsSync(moduleUrl)) { + const pathname = moduleUrl; + + try { + const buffer = await fs.readFile(pathname); + code = buffer.toString(); + } catch (err) { + throw new Error(`Failed to execute 'addModule' on 'AudioWorklet': ${err.message}`); + } + } else if (moduleUrl.startsWith('http')) { + try { + const res = await fetch(moduleUrl); + code = await res.text(); + } catch (err) { + throw new Error(`Failed to execute 'addModule' on 'AudioWorklet': ${err.message}`); + } + } else if (moduleUrl.startsWith('blob:')) { + try { + const blob = resolveObjectURL(moduleUrl); + code = await blob.text(); + } catch (err) { + throw new Error(`Failed to execute 'addModule' on 'AudioWorklet': ${err.message}`); + } + } else { + // get caller site from error stack trace + const callerSite = caller(2); + + if (callerSite.startsWith('http')) { + let url; + // handle origin relative and caller path relative URLs + if (moduleUrl.startsWith('/')) { + const origin = new URL(baseUrl).origin; + url = origin + moduleUrl; + } else { + // we know separators are '/' + const baseUrl = callerSite.substr(0, callerSite.lastIndexOf('/')); + url = baseUrl + '/' + moduleUrl; + } + + try { + const res = await fetch(url); + code = await res.text(); + } catch (err) { + throw new Error(`Failed to execute 'addModule' on 'AudioWorklet': ${err.message}`); + } + } else { + const dirname = callerSite.substr(0, callerSite.lastIndexOf(path.sep)); + const absDirname = dirname.replace('file://', ''); + const pathname = path.join(absDirname, moduleUrl); + + if (existsSync(pathname)) { + try { + const buffer = await fs.readFile(pathname); + code = buffer.toString(); + } catch (err) { + throw new Error(`Failed to execute 'addModule' on 'AudioWorklet': ${err.message}`); + } + } else { + throw new Error(`Failed to execute 'addModule' on 'AudioWorklet': Cannot resolve module ${moduleUrl}`); + } + } + } + + return code; +} + +class AudioWorklet { + #workletId = null; + #sampleRate = null; + #port = null; + #idPromiseMap = new Map(); + #promiseId = 0; + #workletParamDescriptorsMap = new Map(); + #pendingCreateProcessors = new Set(); + + constructor(options) { + if ( + (typeof options !== 'object') || + options[kPrivateConstructor] !== true + ) { + throw new TypeError('Illegal constructor'); + } + + this.#workletId = options.workletId; + this.#sampleRate = options.sampleRate; + } + + #bindEvents() { + this.#port.on('message', event => { + switch (event.cmd) { + case 'node-web-audio-api:worklet:module-added': { + const { promiseId } = event; + const { resolve } = this.#idPromiseMap.get(promiseId); + this.#idPromiseMap.delete(promiseId); + resolve(); + break; + } + case 'node-web-audio-api:worlet:processor-registered': { + const { name, parameterDescriptors } = event; + this.#workletParamDescriptorsMap.set(name, parameterDescriptors); + break; + } + case 'node-web-audio-api:worklet:processor-created': { + const { id } = event; + this.#pendingCreateProcessors.delete(id); + break; + } + } + }); + } + + get port() { + return this.#port; + } + + async addModule(moduleUrl) { + const code = await resolveModule(moduleUrl); + + // launch Worker if not exists + if (!this.#port) { + await new Promise(resolve => { + const workletPathname = path.join(__dirname, 'AudioWorkletGlobalScope.js'); + this.#port = new Worker(workletPathname, { + workerData: { + workletId: this.#workletId, + sampleRate: this.#sampleRate, + }, + }); + this.#port.on('online', resolve); + + this.#bindEvents(); + }); + } + + const promiseId = this.#promiseId++; + // This promise is resolved when the Worker returns the name and + // parameterDescriptors from the added module + await new Promise((resolve, reject) => { + this.#idPromiseMap.set(promiseId, { resolve, reject }); + + this.#port.postMessage({ + cmd: 'node-web-audio-api:worklet:add-module', + code, + promiseId, + }); + }); + } + + // For OfflineAudioContext only, check that all processors have been properly + // created before actual `startRendering` + async [kCheckProcessorsCreated]() { + // console.log(this.#pendingCreateProcessors); + return new Promise(async resolve => { + while (this.#pendingCreateProcessors.size !== 0) { + // we need a microtask to ensure message can be received + await new Promise(resolve => setTimeout(resolve, 0)); + } + + resolve(); + }); + } + + [kProcessorRegistered](name) { + return Array.from(this.#workletParamDescriptorsMap.keys()).includes(name); + } + + [kGetParameterDescriptors](name) { + return this.#workletParamDescriptorsMap.get(name); + } + + [kCreateProcessor](name, options, id) { + this.#pendingCreateProcessors.add(id); + + const { port1, port2 } = new MessageChannel(); + // @todo - check if some processorOptions must be transfered as well + this.#port.postMessage({ + cmd: 'node-web-audio-api:worklet:create-processor', + name, + id, + options, + port: port2, + }, [port2]); + + return port1; + } + + async [kWorkletRelease]() { + if (this.#port) { + await new Promise(resolve => { + this.#port.on('exit', resolve); + this.#port.postMessage({ + cmd: 'node-web-audio-api:worklet:exit', + }); + }); + } + } +} + +Object.defineProperties(AudioWorklet, { + length: { + __proto__: null, + writable: false, + enumerable: false, + configurable: true, + value: 0, + }, +}); + +Object.defineProperties(AudioWorklet.prototype, { + [Symbol.toStringTag]: { + __proto__: null, + writable: false, + enumerable: false, + configurable: true, + value: 'AudioWorklet', + }, + addModule: kEnumerableProperty, + port: kEnumerableProperty, +}); + +module.exports = AudioWorklet; + diff --git a/js/AudioWorkletGlobalScope.js b/js/AudioWorkletGlobalScope.js new file mode 100644 index 00000000..3fc2799e --- /dev/null +++ b/js/AudioWorkletGlobalScope.js @@ -0,0 +1,303 @@ +const { + parentPort, + workerData, +} = require('node:worker_threads'); + +const conversions = require('webidl-conversions'); + +const { + exit_audio_worklet_global_scope, + run_audio_worklet_global_scope, +} = require('../load-native.cjs'); + +const { + workletId, + sampleRate, +} = workerData; + +const kWorkletQueueTask = Symbol.for('node-web-audio-api:worklet-queue-task'); +const kWorkletCallableProcess = Symbol.for('node-web-audio-api:worklet-callable-process'); +const kWorkletInputs = Symbol.for('node-web-audio-api:worklet-inputs'); +const kWorkletOutputs = Symbol.for('node-web-audio-api:worklet-outputs'); +const kWorkletParams = Symbol.for('node-web-audio-api:worklet-params'); +const kWorkletParamsCache = Symbol.for('node-web-audio-api:worklet-params-cache'); +// const kWorkletOrderedParamNames = Symbol.for('node-web-audio-api:worklet-ordered-param-names'); + +const nameProcessorCtorMap = new Map(); +const processors = {}; +let pendingProcessorConstructionData = null; +let loopStarted = false; +let runLoopImmediateId = null; + +function isIterable(obj) { + // checks for null and undefined + if (obj === null || obj === undefined) { + return false; + } + return typeof obj[Symbol.iterator] === 'function'; +} + +// cf. https://stackoverflow.com/a/46759625 +function isConstructor(f) { + try { + Reflect.construct(String, [], f); + } catch (e) { + return false; + } + return true; +} + +function runLoop() { + // block until we need to render a quantum + run_audio_worklet_global_scope(workletId, processors); + // yield to the event loop, and then repeat + runLoopImmediateId = setImmediate(runLoop); +} + +// s +globalThis.currentTime = 0 +globalThis.currentFrame = 0; +globalThis.sampleRate = sampleRate; +// @todo - implement in upstream crate +// globalThis.renderQuantumSize = 128; + +globalThis.AudioWorkletProcessor = class AudioWorkletProcessor { + static get parameterDescriptors() { + return []; + } + + #port = null; + + constructor() { + const { + port, + numberOfInputs, + numberOfOutputs, + parameterDescriptors, + } = pendingProcessorConstructionData; + + // @todo - Mark [[callable process]] as true, set to false in render quantum + // either "process" doese not exists, either it throws an error + this[kWorkletCallableProcess] = true; + // @todo - reuse Float32Arrays between calls + freeze arrays + this[kWorkletInputs] = new Array(numberOfInputs).fill([]); + // @todo - use `outputChannelCount` + this[kWorkletOutputs] = new Array(numberOfOutputs).fill([]); + // Object to be reused as `process` parameters argument + this[kWorkletParams] = {}; + // Cache of 2 Float32Array (of length 128 and 1) for each param, to be reused on + // each process call according to the size the param for the current render quantum + this[kWorkletParamsCache] = {}; + + parameterDescriptors.forEach(desc => { + this[kWorkletParamsCache][desc.name] = [ + new Float32Array(128), // should be globalThis.renderQuantumSize + new Float32Array(1), + ] + }); + + this.#port = port; + } + + get port() { + if (!(this instanceof AudioWorkletProcessor)) { + throw new TypeError('Invalid Invocation: Value of \'this\' must be of type \'AudioWorkletProcessor\''); + } + + return this.#port; + } + + [kWorkletQueueTask](cmd, err) { + this.#port.postMessage({ cmd, err }); + } +} + +// follow algorithm from: +// https://webaudio.github.io/web-audio-api/#dom-audioworkletglobalscope-registerprocessor +globalThis.registerProcessor = function registerProcessor(name, processorCtor) { + const parsedName = conversions['DOMString'](name, { + context: `Cannot execute 'registerProcessor' in 'AudoWorkletGlobalScope': name (${name})`, + }); + + if (parsedName === '') { + throw new DOMException(`Cannot execute 'registerProcessor' in 'AudoWorkletGlobalScope': name is empty`, 'NotSupportedError'); + } + + if (nameProcessorCtorMap.has(name)) { + throw new DOMException(`Cannot execute 'registerProcessor' in 'AudoWorkletGlobalScope': A processor with name '${name}' has already been registered in this scope`, 'NotSupportedError'); + } + + if (!isConstructor(processorCtor)) { + throw new TypeError(`Cannot execute 'registerProcessor")' in 'AudoWorkletGlobalScope': argument 2 for name '${name}' is not a constructor`); + } + + if (typeof processorCtor.prototype !== 'object') { + throw new TypeError(`Cannot execute 'registerProcessor")' in 'AudoWorkletGlobalScope': argument 2 for name '${name}' is not is not a valid AudioWorkletProcessor`); + } + + // must support Array, Set or iterators + let parameterDescriptorsValue = processorCtor.parameterDescriptors; + + if (!isIterable(parameterDescriptorsValue)) { + throw new TypeError(`Cannot execute 'registerProcessor' in 'AudoWorkletGlobalScope': Invalid 'parameterDescriptors' for processor '${name}: 'parameterDescriptors' is not iterable'`); + } + + const paramDescriptors = Array.from(parameterDescriptorsValue); + const parsedParamDescriptors = []; + + // Parse AudioParamDescriptor sequence + // cf. https://webaudio.github.io/web-audio-api/#AudioParamDescriptor + for (let i = 0; i < paramDescriptors.length; i++) { + const descriptor = paramDescriptors[i]; + const parsedDescriptor = {}; + + if (typeof descriptor !== 'object' || descriptor === null) { + throw new TypeError(`Cannot execute 'registerProcessor' in 'AudoWorkletGlobalScope': Invalid 'parameterDescriptors' for processor '${name}: Element at index ${i} is not an instance of 'AudioParamDescriptor'`); + } + + if (descriptor.name === undefined) { + throw new TypeError(`Cannot execute 'registerProcessor' in 'AudoWorkletGlobalScope': Invalid 'parameterDescriptors' for processor '${name}: Element at index ${i} is not an instance of 'AudioParamDescriptor'`); + } + + parsedDescriptor.name = conversions['DOMString'](descriptor.name, { + context: `Cannot execute 'registerProcessor' in 'AudoWorkletGlobalScope': Invalid 'parameterDescriptors' for processor '${name}: Invalid 'name' for 'AudioParamDescriptor' at index ${i}`, + }); + + if (descriptor.defaultValue !== undefined) { + parsedDescriptor.defaultValue = conversions['float'](descriptor.defaultValue, { + context: `Cannot execute 'registerProcessor' in 'AudoWorkletGlobalScope': Invalid 'parameterDescriptors' for processor '${name}: Invalid 'defaultValue' for 'AudioParamDescriptor' at index ${i}`, + }); + } else { + parsedDescriptor.defaultValue = 0; + } + + if (descriptor.maxValue !== undefined) { + parsedDescriptor.maxValue = conversions['float'](descriptor.maxValue, { + context: `Cannot execute 'registerProcessor' in 'AudoWorkletGlobalScope': Invalid 'parameterDescriptors' for processor '${name}: Invalid 'maxValue' for 'AudioParamDescriptor' at index ${i}`, + }); + } else { + parsedDescriptor.maxValue = 3.4028235e38; + } + + if (descriptor.minValue !== undefined) { + parsedDescriptor.minValue = conversions['float'](descriptor.minValue, { + context: `Cannot execute 'registerProcessor' in 'AudoWorkletGlobalScope': Invalid 'parameterDescriptors' for processor '${name}: Invalid 'minValue' for 'AudioParamDescriptor' at index ${i}`, + }); + } else { + parsedDescriptor.minValue = -3.4028235e38; + } + + if (descriptor.automationRate !== undefined) { + if (!['a-rate', 'k-rate'].includes(descriptor.automationRate)) { + throw new TypeError(`Cannot execute 'registerProcessor' in 'AudoWorkletGlobalScope': Invalid 'parameterDescriptors' for processor '${name}: The provided value '${descriptor.automationRate}' is not a valid enum value of type AutomationRate for 'AudioParamDescriptor' at index ${i}`); + } + + parsedDescriptor.automationRate = conversions['DOMString'](descriptor.automationRate, { + context: `Cannot execute 'registerProcessor' in 'AudoWorkletGlobalScope': Invalid 'parameterDescriptors' for processor '${name}: The provided value '${descriptor.automationRate}'`, + }); + } else { + parsedDescriptor.automationRate = 'a-rate'; + } + + parsedParamDescriptors.push(parsedDescriptor); + } + + // check for duplicate parame names and consistency of min, max and default values + const paramNames = []; + + for (let i = 0; i < parsedParamDescriptors.length; i++) { + const { name, defaultValue, minValue, maxValue } = parsedParamDescriptors[i]; + + if (paramNames.includes(name)) { + throw new DOMException(`Cannot execute 'registerProcessor' in 'AudoWorkletGlobalScope': Invalid 'parameterDescriptors' for processor '${name}': 'AudioParamDescriptor' with name '${name}' already declared`, 'NotSupportedError'); + } + + paramNames.push(name); + + if (!(minValue <= defaultValue && defaultValue <= maxValue)) { + throw new DOMException(`Cannot execute 'registerProcessor' in 'AudoWorkletGlobalScope': Invalid 'parameterDescriptors' for processor '${name}': The constraint minValue <= defaultValue <= maxValue is not met`, 'InvalidStateError'); + } + } + + // store constructor + nameProcessorCtorMap.set(parsedName, processorCtor); + // send param descriptors back to main thread + parentPort.postMessage({ + cmd: 'node-web-audio-api:worlet:processor-registered', + name: parsedName, + parameterDescriptors: parsedParamDescriptors, + }); +}; + + +// @todo - recheck this, not sure this is relevant in our case +// NOTE: Authors that register an event listener on the "message" event of this +// port should call close on either end of the MessageChannel (either in the +// AudioWorklet or the AudioWorkletGlobalScope side) to allow for resources to be collected. +parentPort.on('exit', () => { + process.stdout.write('closing worklet'); +}); + +parentPort.on('message', event => { + console.log(event.cmd + '\n'); + + switch (event.cmd) { + case 'node-web-audio-api:worklet:init': { + const { workletId, processors, promiseId } = event; + break; + } + case 'node-web-audio-api:worklet:exit': { + clearImmediate(runLoopImmediateId); + // properly exit audio worklet on rust side + exit_audio_worklet_global_scope(workletId, processors); + // exit process + process.exit(0); + break; + } + case 'node-web-audio-api:worklet:add-module': { + const { code, promiseId } = event; + const func = new Function('AudioWorkletProcessor', 'registerProcessor', code); + func(AudioWorkletProcessor, registerProcessor); + + // send registered param descriptors on main thread and resolve Promise + parentPort.postMessage({ + cmd: 'node-web-audio-api:worklet:module-added', + promiseId, + }); + break; + } + case 'node-web-audio-api:worklet:create-processor': { + const { name, id, options, port } = event; + const ctor = nameProcessorCtorMap.get(name); + + // rewrap options of interest for the AudioWorkletNodeBaseClass + pendingProcessorConstructionData = { + port, + numberOfInputs: options.numberOfInputs, + numberOfOutputs: options.numberOfOutputs, + parameterDescriptors: ctor.parameterDescriptors, + }; + + let instance; + + try { + instance = new ctor(options); + } catch (err) { + port.postMessage({ cmd: 'node-web-audio-api:worklet:ctor-error', err }); + } + + pendingProcessorConstructionData = null; + // store in global so that Rust can match the JS processor + // with its corresponding NapiAudioWorkletProcessor + processors[`${id}`] = instance; + // notify audio worklet back that processor has finished instanciation + parentPort.postMessage({ cmd: 'node-web-audio-api:worklet:processor-created', id }); + + if (!loopStarted) { + loopStarted = true; + setImmediate(runLoop); + } + break; + } + } +}); diff --git a/js/AudioWorkletNode.js b/js/AudioWorkletNode.js new file mode 100644 index 00000000..5a3b76f0 --- /dev/null +++ b/js/AudioWorkletNode.js @@ -0,0 +1,290 @@ +/* eslint-disable no-unused-vars */ +const conversions = require('webidl-conversions'); +const { + toSanitizedSequence, +} = require('./lib/cast.js'); +const { + throwSanitizedError, +} = require('./lib/errors.js'); +const { + kNapiObj, + kProcessorRegistered, + kGetParameterDescriptors, + kPrivateConstructor, + kCreateProcessor, +} = require('./lib/symbols.js'); +const { + kEnumerableProperty, +} = require('./lib/utils.js'); +const { + propagateEvent, +} = require('./lib/events.js'); +const { + ErrorEvent, +} = require('./Events.js'); + +/* eslint-enable no-unused-vars */ + +const AudioNode = require('./AudioNode.js'); +const AudioParamMap = require('./AudioParamMap.js'); +const IMPLEMENTATION_MAX_NUMBER_OF_CHANNELS = 32; + +module.exports = (jsExport, nativeBinding) => { + class AudioWorkletNode extends AudioNode { + #port = null; + #parameters = {}; + + constructor(context, name, options) { + if (arguments.length < 2) { + throw new TypeError(`Failed to construct 'AudioWorkletNode': 2 arguments required, but only ${arguments.length} present`); + } + + if (!(context instanceof jsExport.BaseAudioContext)) { + throw new TypeError(`Failed to construct 'AudioWorkletNode': argument 1 is not of type BaseAudioContext`); + } + + const parsedName = conversions['DOMString'](name, { + context: `Failed to construct 'AudioWorkletNode': The given 'AudioWorkletProcessor' name`, + }); + + if (!context.audioWorklet[kProcessorRegistered](parsedName)) { + throw new DOMException(`Failed to construct 'AudioWorkletNode': processor '${parsedName}' is not registered in 'AudioWorklet'`, 'InvalidStateError'); + } + + // parsed version of the option to be passed to NAPI + const parsedOptions = {}; + + if (options && (typeof options !== 'object' || options === null)) { + throw new TypeError('Failed to construct \'AudioWorkletNode\': argument 3 is not of type \'AudioWorkletNodeOptions\''); + } + + if (options && options.numberOfInputs !== undefined) { + parsedOptions.numberOfInputs = conversions['unsigned long'](options.numberOfInputs, { + enforceRange: true, + context: `Failed to construct 'AudioWorkletNode': Failed to read the 'numberOfInputs' property from AudioWorkletNodeOptions: The provided value (${options.numberOfInputs}})`, + }); + } else { + parsedOptions.numberOfInputs = 1; + } + + if (options && options.numberOfOutputs !== undefined) { + parsedOptions.numberOfOutputs = conversions['unsigned long'](options.numberOfOutputs, { + enforceRange: true, + context: `Failed to construct 'AudioWorkletNode': Failed to read the 'numberOfOutputs' property from AudioWorkletNodeOptions: The provided value (${options.numberOfOutputs}})`, + }); + } else { + parsedOptions.numberOfOutputs = 1; + } + + // If outputChannelCount exists, + // - If any value in outputChannelCount is zero or greater than the implementation’s maximum number of channels, throw a NotSupportedError and abort the remaining steps. + // - If the length of outputChannelCount does not equal numberOfOutputs, throw an IndexSizeError and abort the remaining steps. + // - If both numberOfInputs and numberOfOutputs are 1, set the channel count of the node output to the one value in outputChannelCount. + // - Otherwise set the channel count of the kth output of the node to the kth element of outputChannelCount sequence and return. + if (options && options.outputChannelCount !== undefined) { + try { + parsedOptions.outputChannelCount = toSanitizedSequence(options.outputChannelCount, Uint32Array); + } catch (err) { + throw new TypeError(`Failed to construct 'AudioWorkletNode': Failed to read the 'outputChannelCount' property from AudioWorkletNodeOptions: The provided value ${err.message}`); + } + + parsedOptions.outputChannelCount.forEach((value, index) => { + if (value <= 0 || value > IMPLEMENTATION_MAX_NUMBER_OF_CHANNELS) { + throw new DOMException(`Failed to construct 'AudioWorkletNode': Invalid 'outputChannelCount' property from AudioWorkletNodeOptions: Value at index ${index} in outside supported range [1, 32]`, 'NotSupportedError'); + } + }); + + if (parsedOptions.numberOfOutputs !== parsedOptions.outputChannelCount.length) { + throw new DOMException(`Failed to construct 'AudioWorkletNode': Invalid 'outputChannelCount' property from AudioWorkletNodeOptions: 'outputChannelCount' length (${parsedOptions.outputChannelCount.length}) does not equal 'numberOfOutputs' (${parsedOptions.numberOfOutputs})`, 'IndexSizeError'); + } + } else { + // If outputChannelCount does not exists, + // - If both numberOfInputs and numberOfOutputs are 1, set the initial channel count of the node output to 1 and return. + // NOTE: For this case, the output chanel count will change to computedNumberOfChannels dynamically based on the input and the channelCountMode at runtime. + // - Otherwise set the channel count of each output of the node to 1 and return. + + // @note - not sure what this means, let's go simple + parsedOptions.outputChannelCount = new Uint32Array(parsedOptions.numberOfOutputs); + parsedOptions.outputChannelCount.fill(1); + } + + // @todo + // - This should be a "record", let's treat it as a raw object of now + // - Check if this needs to checked against the declared `parameterDescriptors` + if (options && options.parameterData !== undefined) { + if (typeof options.parameterData === 'object' && options.parameterData !== null) { + parsedOptions.parameterData = {}; + + for (let [key, value] in Object.entries(options.parameterData)) { + const parsedKey = conversions['DOMString'](key, { + context: `Failed to construct 'AudioWorkletNode': Invalid 'parameterData' property from AudioWorkletNodeOptions: Invalid key (${key})`, + }); + + const parsedValue = conversions['double'](value, { + context: `Failed to construct 'AudioWorkletNode': Invalid 'parameterData' property from AudioWorkletNodeOptions: Invalid value for key ${parsedKey}`, + }); + + parsedOptions.parameterData[parsedKey] = parsedValue; + } + } else { + throw new TypeError(`Failed to construct 'AudioWorkletNode': Invalid 'parameterData' property from AudioWorkletNodeOptions: 'outputChannelCount' length (${parsedOptions.outputChannelCount.length}) does not equal 'numberOfOutputs' (${parsedOptions.numberOfOutputs})`); + } + } else { + parsedOptions.parameterData = {}; + } + + // These ones are for the JS processor + if (options && options.processorOptions !== undefined) { + if (typeof options.processorOptions === 'object' && options.processorOptions !== null) { + parsedOptions.processorOptions = Object.assign({}, options.processorOptions); + } else { + throw new TypeError(`Failed to construct 'AudioWorkletNode': Invalid 'processorOptions' property from AudioWorkletNodeOptions: 'processorOptions' is not an object`); + } + } else { + parsedOptions.processorOptions = {}; + } + + // AudioNodeOptions + if (options && options.channelCount !== undefined) { + parsedOptions.channelCount = conversions['unsigned long'](options.channelCount, { + enforceRange: true, + context: `Failed to construct 'AudioWorkletNode': Failed to read the 'channelCount' property from AudioWorkletNodeOptions: The provided value '${options.channelCount}'`, + }); + + // if we delegate this check to Rust, this can poison a Mutex + // (probably the `audio_param_descriptor_channel` one) + if (parsedOptions.channelCount <= 0 || parsedOptions.channelCount > IMPLEMENTATION_MAX_NUMBER_OF_CHANNELS) { + throw new DOMException(`Failed to construct 'AudioWorkletNode': Invalid 'channelCount' property: Number of channels: ${parsedOptions.channelCount} is outside range [1, 32]`, 'NotSupportedError') + } + } + + if (options && options.channelCountMode !== undefined) { + if (!['max', 'clamped-max', 'explicit'].includes(options.channelCountMode)) { + throw new TypeError(`Failed to construct 'AudioWorkletNode': Failed to read the 'channelCountMode' property from 'AudioNodeOptions': The provided value '${options.channelCountMode}' is not a valid enum value of type ChannelCountMode`); + } + + parsedOptions.channelCountMode = conversions['DOMString'](options.channelCountMode, { + context: `Failed to construct 'AudioWorkletNode': Failed to read the 'channelCount' property from AudioWorkletNodeOptions: The provided value '${options.channelCountMode}'`, + }); + } + + if (options && options.channelInterpretation !== undefined) { + if (!['speakers', 'discrete'].includes(options.channelInterpretation)) { + throw new TypeError(`Failed to construct 'AudioWorkletNode': Failed to read the 'channelInterpretation' property from 'AudioNodeOptions': The provided value '${options.channelInterpretation}' is not a valid enum value of type ChannelCountMode`); + } + + parsedOptions.channelInterpretation = conversions['DOMString'](options.channelInterpretation, { + context: `Failed to construct 'AudioWorkletNode': Failed to read the 'channelInterpretation' property from AudioWorkletNodeOptions: The provided value '${options.channelInterpretation}'`, + }); + } + + // Create NapiAudioWorkletNode + const parameterDescriptors = context.audioWorklet[kGetParameterDescriptors](parsedName); + let napiObj; + + try { + napiObj = new nativeBinding.AudioWorkletNode( + context[kNapiObj], + parsedName, + parsedOptions, + parameterDescriptors, + ); + } catch (err) { + throwSanitizedError(err); + } + + super(context, { + [kNapiObj]: napiObj, + }); + + let parameters = new Map(); + + for (let name in this[kNapiObj].parameters) { + const audioParam = new jsExport.AudioParam({ + [kNapiObj]: this[kNapiObj].parameters[name], + }); + + parameters.set(name, audioParam); + } + + this.#parameters = new AudioParamMap({ + [kPrivateConstructor]: true, + parameters, + }); + + // Create JS processor + this.#port = context.audioWorklet[kCreateProcessor]( + parsedName, + parsedOptions, + napiObj.id, + ); + + this.#port.on('message', msg => { + // ErrorEvent named processorerror + switch (msg.cmd) { + case 'node-web-audio-api:worklet:ctor-error': { + const message = `Failed to construct '${parsedName}' AudioWorkletProcessor: ${msg.err.message}`; + const event = new ErrorEvent('processorerror', { message, error: msg.err }); + propagateEvent(this, event); + break; + } + case 'node-web-audio-api:worklet:process-invalid': { + const message = `Failed to execute 'process' on '${parsedName}' AudioWorkletProcessor: ${msg.err.message}`; + const error = new TypeError(message); + error.stack = msg.err.stack.replace(msg.err.message, message); + + const event = new ErrorEvent('processorerror', { message, error }); + propagateEvent(this, event); + break; + } + case 'node-web-audio-api:worklet:process-error': { + const message = `Failed to execute 'process' on '${parsedName}' AudioWorkletProcessor: ${msg.err.message}`; + const event = new ErrorEvent('processorerror', { message, error: msg.err }); + propagateEvent(this, event); + break; + } + } + }); + } + + get parameters() { + if (!(this instanceof AudioWorkletNode)) { + throw new TypeError('Invalid Invocation: Value of \'this\' must be of type \'AudioWorkletNode\''); + } + + return this.#parameters; + } + + get port() { + if (!(this instanceof AudioWorkletNode)) { + throw new TypeError('Invalid Invocation: Value of \'this\' must be of type \'AudioWorkletNode\''); + } + + return this.#port; + } + } + + Object.defineProperties(AudioWorkletNode, { + length: { + __proto__: null, + writable: false, + enumerable: false, + configurable: true, + value: 2, + }, + }); + + Object.defineProperties(AudioWorkletNode.prototype, { + [Symbol.toStringTag]: { + __proto__: null, + writable: false, + enumerable: false, + configurable: true, + value: 'AudioWorkletNode', + }, + parameters: kEnumerableProperty, + port: kEnumerableProperty, + }); + + return AudioWorkletNode; +}; diff --git a/js/BaseAudioContext.js b/js/BaseAudioContext.js index 547c9424..22639006 100644 --- a/js/BaseAudioContext.js +++ b/js/BaseAudioContext.js @@ -24,12 +24,16 @@ const { } = require('./lib/utils.js'); const { kNapiObj, + kPrivateConstructor, } = require('./lib/symbols.js'); +const AudioWorklet = require('./AudioWorklet.js'); + module.exports = (jsExport, _nativeBinding) => { class BaseAudioContext extends EventTarget { - #listener = null; + #audioWorklet = null; #destination = null; + #listener = null; constructor(options) { // Make constructor "private" @@ -47,24 +51,23 @@ module.exports = (jsExport, _nativeBinding) => { ...kHiddenProperty, }); - this.#listener = null; // lazily instanciated + this.#audioWorklet = new AudioWorklet({ + [kPrivateConstructor]: true, + workletId: this[kNapiObj].workletId, + sampleRate: this[kNapiObj].sampleRate, + }); + this.#destination = new jsExport.AudioDestinationNode(this, { [kNapiObj]: this[kNapiObj].destination, }); } - get listener() { + get audioWorklet() { if (!(this instanceof BaseAudioContext)) { throw new TypeError('Invalid Invocation: Value of \'this\' must be of type \'BaseAudioContext\''); } - if (this.#listener === null) { - this.#listener = new jsExport.AudioListener({ - [kNapiObj]: this[kNapiObj].listener, - }); - } - - return this.#listener; + return this.#audioWorklet; } get destination() { @@ -75,6 +78,20 @@ module.exports = (jsExport, _nativeBinding) => { return this.#destination; } + get listener() { + if (!(this instanceof BaseAudioContext)) { + throw new TypeError('Invalid Invocation: Value of \'this\' must be of type \'BaseAudioContext\''); + } + + if (this.#listener === null) { + this.#listener = new jsExport.AudioListener({ + [kNapiObj]: this[kNapiObj].listener, + }); + } + + return this.#listener; + } + get sampleRate() { if (!(this instanceof BaseAudioContext)) { throw new TypeError('Invalid Invocation: Value of \'this\' must be of type \'BaseAudioContext\''); @@ -91,6 +108,15 @@ module.exports = (jsExport, _nativeBinding) => { return this[kNapiObj].currentTime; } + // @todo - implement in upstream crate + pass to AudioWorkletGlobalScope + // get renderQuantumSize() { + // if (!(this instanceof BaseAudioContext)) { + // throw new TypeError("Invalid Invocation: Value of 'this' must be of type 'BaseAudioContext'"); + // } + + // return this[kNapiObj].renderQuantumSize; + // } + get state() { if (!(this instanceof BaseAudioContext)) { throw new TypeError('Invalid Invocation: Value of \'this\' must be of type \'BaseAudioContext\''); @@ -99,9 +125,6 @@ module.exports = (jsExport, _nativeBinding) => { return this[kNapiObj].state; } - // renderQuantumSize - // audioWorklet - get onstatechange() { if (!(this instanceof BaseAudioContext)) { throw new TypeError('Invalid Invocation: Value of \'this\' must be of type \'BaseAudioContext\''); diff --git a/js/Events.js b/js/Events.js index 24bc554c..f4205071 100644 --- a/js/Events.js +++ b/js/Events.js @@ -30,7 +30,6 @@ Object.defineProperties(OfflineAudioCompletionEvent.prototype, { configurable: true, value: 'OfflineAudioCompletionEvent', }, - renderedBuffer: kEnumerableProperty, }); @@ -78,7 +77,6 @@ Object.defineProperties(AudioProcessingEvent.prototype, { configurable: true, value: 'AudioProcessingEvent', }, - playbackTime: kEnumerableProperty, inputBuffer: kEnumerableProperty, outputBuffer: kEnumerableProperty, @@ -135,13 +133,98 @@ Object.defineProperties(AudioRenderCapacityEvent.prototype, { configurable: true, value: 'AudioRenderCapacityEvent', }, - timestamp: kEnumerableProperty, averageLoad: kEnumerableProperty, peakLoad: kEnumerableProperty, underrunRatio: kEnumerableProperty, }); +// https://html.spec.whatwg.org/multipage/webappapis.html#errorevent +// interface ErrorEvent : Event { +// constructor(DOMString type, optional ErrorEventInit eventInitDict = {}); + +// readonly attribute DOMString message; +// readonly attribute USVString filename; +// readonly attribute unsigned long lineno; +// readonly attribute unsigned long colno; +// readonly attribute any error; +// }; + +// dictionary ErrorEventInit : EventInit { +// DOMString message = ""; +// USVString filename = ""; +// unsigned long lineno = 0; +// unsigned long colno = 0; +// any error; +// }; +class ErrorEvent extends Event { + #message = ''; + #filename = ''; + #lineno = 0; + #colno = 0; + #error = undefined; + + constructor(type, eventInitDict = {}) { + super(type); + + if (eventInitDict && typeof eventInitDict.message === 'string') { + this.#message = eventInitDict.message; + } + + if (eventInitDict && typeof eventInitDict.filename === 'string') { + this.#filename = eventInitDict.filename; + } + + if (eventInitDict && Number.isFinite(eventInitDict.lineno)) { + this.#lineno = eventInitDict.lineno; + } + + if (eventInitDict && Number.isFinite(eventInitDict.colno)) { + this.#colno = eventInitDict.colno; + } + + if (eventInitDict && eventInitDict.error instanceof Error) { + this.#error = eventInitDict.error; + } + } + + get message() { + return this.#message; + } + + get filename() { + return this.#filename; + } + + get lineno() { + return this.#lineno; + } + + get colno() { + return this.#colno; + } + + get error() { + return this.#error; + } +} + +Object.defineProperties(ErrorEvent.prototype, { + [Symbol.toStringTag]: { + __proto__: null, + writable: false, + enumerable: false, + configurable: true, + value: 'ErrorEvent', + }, + message: kEnumerableProperty, + filename: kEnumerableProperty, + lineno: kEnumerableProperty, + colno: kEnumerableProperty, + error: kEnumerableProperty, +}); + module.exports.OfflineAudioCompletionEvent = OfflineAudioCompletionEvent; module.exports.AudioProcessingEvent = AudioProcessingEvent; module.exports.AudioRenderCapacityEvent = AudioRenderCapacityEvent; +module.exports.ErrorEvent = ErrorEvent; diff --git a/js/OfflineAudioContext.js b/js/OfflineAudioContext.js index 87f953c0..8fb102b2 100644 --- a/js/OfflineAudioContext.js +++ b/js/OfflineAudioContext.js @@ -12,8 +12,10 @@ const { } = require('./lib/utils.js'); const { kNapiObj, + kWorkletRelease, kOnStateChange, kOnComplete, + kCheckProcessorsCreated, } = require('./lib/symbols.js'); module.exports = function patchOfflineAudioContext(jsExport, nativeBinding) { @@ -140,6 +142,9 @@ module.exports = function patchOfflineAudioContext(jsExport, nativeBinding) { throw new TypeError(`Invalid Invocation: Value of 'this' must be of type 'OfflineAudioContext'`); } + // ensure all AudioWorkletProcessor have finished their instanciation + await this.audioWorklet[kCheckProcessorsCreated](); + let nativeAudioBuffer; try { @@ -148,7 +153,10 @@ module.exports = function patchOfflineAudioContext(jsExport, nativeBinding) { throwSanitizedError(err); } - // @fixme: workaround the fact that this event seems to be triggered before + // release audio worklet, if any + await this.audioWorklet[kWorkletRelease](); + + // workaround the fact that this event seems to be triggered before // startRendering fulfills and that we want to return the exact same instance if (this.#renderedBuffer === null) { this.#renderedBuffer = new jsExport.AudioBuffer({ [kNapiObj]: nativeAudioBuffer }); diff --git a/js/lib/symbols.js b/js/lib/symbols.js index 1f69135f..3e96c38e 100644 --- a/js/lib/symbols.js +++ b/js/lib/symbols.js @@ -1,6 +1,11 @@ module.exports.kNapiObj = Symbol('node-web-audio-api:napi-obj'); module.exports.kAudioBuffer = Symbol('node-web-audio-api:audio-buffer'); - +module.exports.kPrivateConstructor = Symbol('node-web-audio-api:private-constructor'); +module.exports.kCreateProcessor = Symbol('node-web-audio-api:create-processor'); +module.exports.kProcessorRegistered = Symbol('node-web-audio-api:processor-registered'); +module.exports.kGetParameterDescriptors = Symbol('node-web-audio-api:get-parameter-descriptors'); +module.exports.kWorkletRelease = Symbol('node-web-audio-api:worklet-release'); +module.exports.kCheckProcessorsCreated = Symbol('node-web-audio-api:check-processor-created'); // semi-private keys for events listeners @@ -19,4 +24,3 @@ module.exports.kOnEnded = Symbol.for('node-web-audio-api:onended'); module.exports.kOnAudioProcess = Symbol.for('node-web-audio-api:onaudioprocess'); // # AudioRenderCapacity module.exports.kOnUpdate = Symbol.for('node-web-audio-api:onupdate'); - diff --git a/js/monkey-patch.js b/js/monkey-patch.js deleted file mode 100644 index 67c8cad6..00000000 --- a/js/monkey-patch.js +++ /dev/null @@ -1,86 +0,0 @@ -// -------------------------------------------------------------------------- // -// -------------------------------------------------------------------------- // -// // -// // -// // -// ██╗ ██╗ █████╗ ██████╗ ███╗ ██╗██╗███╗ ██╗ ██████╗ // -// ██║ ██║██╔══██╗██╔══██╗████╗ ██║██║████╗ ██║██╔════╝ // -// ██║ █╗ ██║███████║██████╔╝██╔██╗ ██║██║██╔██╗ ██║██║ ███╗ // -// ██║███╗██║██╔══██║██╔══██╗██║╚██╗██║██║██║╚██╗██║██║ ██║ // -// ╚███╔███╔╝██║ ██║██║ ██║██║ ╚████║██║██║ ╚████║╚██████╔╝ // -// ╚══╝╚══╝ ╚═╝ ╚═╝╚═╝ ╚═╝╚═╝ ╚═══╝╚═╝╚═╝ ╚═══╝ ╚═════╝ // -// // -// // -// - This file has been generated --------------------------- // -// // -// // -// -------------------------------------------------------------------------- // -// -------------------------------------------------------------------------- // - -module.exports = function monkeyPatch(nativeBinding) { - let jsExport = {}; - - // -------------------------------------------------------------------------- - // Events - // -------------------------------------------------------------------------- - jsExport.OfflineAudioCompletionEvent = require('./Events').OfflineAudioCompletionEvent; - jsExport.AudioProcessingEvent = require('./Events').AudioProcessingEvent; - jsExport.AudioRenderCapacityEvent = require('./Events').AudioRenderCapacityEvent; - // -------------------------------------------------------------------------- - // Create Web Audio API facade - // -------------------------------------------------------------------------- - jsExport.BaseAudioContext = require('./BaseAudioContext.js')(jsExport, nativeBinding); - jsExport.AudioContext = require('./AudioContext.js')(jsExport, nativeBinding); - jsExport.OfflineAudioContext = require('./OfflineAudioContext.js')(jsExport, nativeBinding); - - jsExport.ScriptProcessorNode = require('./ScriptProcessorNode.js')(jsExport, nativeBinding); - jsExport.AnalyserNode = require('./AnalyserNode.js')(jsExport, nativeBinding); - jsExport.AudioBufferSourceNode = require('./AudioBufferSourceNode.js')(jsExport, nativeBinding); - jsExport.BiquadFilterNode = require('./BiquadFilterNode.js')(jsExport, nativeBinding); - jsExport.ChannelMergerNode = require('./ChannelMergerNode.js')(jsExport, nativeBinding); - jsExport.ChannelSplitterNode = require('./ChannelSplitterNode.js')(jsExport, nativeBinding); - jsExport.ConstantSourceNode = require('./ConstantSourceNode.js')(jsExport, nativeBinding); - jsExport.ConvolverNode = require('./ConvolverNode.js')(jsExport, nativeBinding); - jsExport.DelayNode = require('./DelayNode.js')(jsExport, nativeBinding); - jsExport.DynamicsCompressorNode = require('./DynamicsCompressorNode.js')(jsExport, nativeBinding); - jsExport.GainNode = require('./GainNode.js')(jsExport, nativeBinding); - jsExport.IIRFilterNode = require('./IIRFilterNode.js')(jsExport, nativeBinding); - jsExport.MediaStreamAudioSourceNode = require('./MediaStreamAudioSourceNode.js')(jsExport, nativeBinding); - jsExport.OscillatorNode = require('./OscillatorNode.js')(jsExport, nativeBinding); - jsExport.PannerNode = require('./PannerNode.js')(jsExport, nativeBinding); - jsExport.StereoPannerNode = require('./StereoPannerNode.js')(jsExport, nativeBinding); - jsExport.WaveShaperNode = require('./WaveShaperNode.js')(jsExport, nativeBinding); - - jsExport.AudioNode = require('./AudioNode.js'); - jsExport.AudioScheduledSourceNode = require('./AudioScheduledSourceNode.js'); - jsExport.AudioParam = require('./AudioParam.js'); - jsExport.AudioDestinationNode = require('./AudioDestinationNode.js'); - jsExport.AudioListener = require('./AudioListener.js'); - jsExport.AudioRenderCapacity = require('./AudioRenderCapacity.js'); - - jsExport.PeriodicWave = require('./PeriodicWave.js')(jsExport, nativeBinding); - jsExport.AudioBuffer = require('./AudioBuffer.js')(jsExport, nativeBinding); - - // -------------------------------------------------------------------------- - // Promisify MediaDevices API - // -------------------------------------------------------------------------- - jsExport.mediaDevices = {}; - - const enumerateDevicesSync = nativeBinding.mediaDevices.enumerateDevices; - jsExport.mediaDevices.enumerateDevices = async function enumerateDevices() { - const list = enumerateDevicesSync(); - return Promise.resolve(list); - }; - - const getUserMediaSync = nativeBinding.mediaDevices.getUserMedia; - jsExport.mediaDevices.getUserMedia = async function getUserMedia(options) { - if (options === undefined) { - throw new TypeError('Failed to execute "getUserMedia" on "MediaDevices": audio must be requested'); - } - - const stream = getUserMediaSync(options); - return Promise.resolve(stream); - }; - - return jsExport; -}; diff --git a/load-native.cjs b/load-native.cjs new file mode 100644 index 00000000..9a1031d0 --- /dev/null +++ b/load-native.cjs @@ -0,0 +1,87 @@ +const { platform, arch } = process; + +let nativeBinding = null; +let loadError = null; + +switch (platform) { + case 'win32': + switch (arch) { + case 'x64': + try { + nativeBinding = require('./node-web-audio-api.win32-x64-msvc.node'); + } catch (e) { + loadError = e; + } + break; + case 'arm64': + try { + nativeBinding = require('./node-web-audio-api.win32-arm64-msvc.node'); + } catch (e) { + loadError = e; + } + break; + default: + throw new Error(`Unsupported architecture on Windows: ${arch}`); + } + break; + case 'darwin': + switch (arch) { + case 'x64': + try { + nativeBinding = require('./node-web-audio-api.darwin-x64.node'); + } catch (e) { + loadError = e; + } + break; + case 'arm64': + try { + nativeBinding = require('./node-web-audio-api.darwin-arm64.node'); + } catch (e) { + loadError = e; + } + break; + default: + throw new Error(`Unsupported architecture on macOS: ${arch}`); + } + break; + case 'linux': + switch (arch) { + case 'x64': + try { + nativeBinding = require('./node-web-audio-api.linux-x64-gnu.node'); + } catch (e) { + loadError = e; + } + break; + case 'arm64': + try { + nativeBinding = require('./node-web-audio-api.linux-arm64-gnu.node'); + } catch (e) { + loadError = e; + } + break; + case 'arm': + try { + nativeBinding = require('./node-web-audio-api.linux-arm-gnueabihf.node'); + } catch (e) { + loadError = e; + } + break; + default: + throw new Error(`Unsupported architecture on Linux: ${arch}`); + } + break; + default: + throw new Error(`Unsupported OS: ${platform}, architecture: ${arch}`); +} + +if (!nativeBinding) { + if (loadError) { + throw loadError; + } + + throw new Error(`Failed to load native binding for OS: ${platform}, architecture: ${arch}`); +} + +module.exports = nativeBinding; + diff --git a/package.json b/package.json index 29b9ef0f..7a3bd81a 100644 --- a/package.json +++ b/package.json @@ -73,6 +73,8 @@ "dependencies": { "@napi-rs/cli": "^2.14.3", "@node-rs/helper": "^1.3.3", + "caller": "^1.1.0", + "node-fetch": "^3.3.2", "webidl-conversions": "^7.0.0" } } diff --git a/src/audio_context.rs b/src/audio_context.rs index 415284b1..94d7f7f9 100644 --- a/src/audio_context.rs +++ b/src/audio_context.rs @@ -9,8 +9,9 @@ use web_audio_api::Event; use crate::*; +/// Napi object wrapping the native AudioContext and the AudioWorklet ID #[derive(Clone)] -pub(crate) struct NapiAudioContext(Arc); +pub(crate) struct NapiAudioContext(Arc, usize); // for debug purpose // impl Drop for NapiAudioContext { @@ -41,6 +42,10 @@ impl NapiAudioContext { pub fn unwrap(&self) -> &AudioContext { &self.0 } + + pub fn worklet_id(&self) -> usize { + self.1 + } } #[js_function(1)] @@ -95,11 +100,12 @@ fn constructor(ctx: CallContext) -> Result { }; let audio_context = AudioContext::new(audio_context_options); + let worklet_id = crate::audio_worklet_node::allocate_process_call_channel(); // ------------------------------------------------- // Wrap context // ------------------------------------------------- - let napi_audio_context = NapiAudioContext(Arc::new(audio_context)); + let napi_audio_context = NapiAudioContext(Arc::new(audio_context), worklet_id); ctx.env.wrap(&mut js_this, napi_audio_context)?; js_this.define_properties(&[Property::new("Symbol.toStringTag")? @@ -120,6 +126,9 @@ fn constructor(ctx: CallContext) -> Result { let js_obj = ctor.new_instance(&[&js_this])?; js_this.set_named_property("renderCapacity", &js_obj)?; + // internal id to retrieve worklet message channel + js_this.set_named_property("workletId", ctx.env.create_uint32(worklet_id as u32)?)?; + ctx.env.get_undefined() } diff --git a/src/audio_node.rs b/src/audio_node.rs index f5d80af5..dca7ea74 100644 --- a/src/audio_node.rs +++ b/src/audio_node.rs @@ -200,6 +200,15 @@ macro_rules! audio_node_impl { // proper return value is handled on JS side ctx.env.get_undefined() } + "AudioWorkletNode" => { + let napi_dest = ctx + .env + .unwrap::<$crate::audio_worklet_node::NapiAudioWorkletNode>(&js_dest)?; + let native_dest = napi_dest.unwrap(); + native_src.connect_from_output_to_input(native_dest, output, input); + // proper return value is handled on JS side + ctx.env.get_undefined() + } "AnalyserNode" => { let napi_dest = ctx .env @@ -442,6 +451,27 @@ macro_rules! audio_node_impl { native_src.disconnect_dest(native_dest); } } + "AudioWorkletNode" => { + let napi_dest = ctx + .env + .unwrap::<$crate::audio_worklet_node::NapiAudioWorkletNode>(&js_dest)?; + let native_dest = napi_dest.unwrap(); + + if ctx.length == 3 { + let output = ctx.get::(1)?.get_double()? as usize; + let input = ctx.get::(2)?.get_double()? as usize; + native_src.disconnect_dest_from_output_to_input( + native_dest, + output, + input + ); + } else if ctx.length == 2 { + let output = ctx.get::(1)?.get_double()? as usize; + native_src.disconnect_dest_from_output(native_dest, output); + } else { + native_src.disconnect_dest(native_dest); + } + } "AnalyserNode" => { let napi_dest = ctx .env diff --git a/src/audio_param.rs b/src/audio_param.rs index d37959c4..68c48752 100644 --- a/src/audio_param.rs +++ b/src/audio_param.rs @@ -75,7 +75,7 @@ fn set_automation_rate(ctx: CallContext) -> Result { let value = match utf8_str.as_str() { "a-rate" => AutomationRate::A, "k-rate" => AutomationRate::K, - _ => panic!("TypeError - The provided value '{:?}' is not a valid enum value of type AutomationRate.", utf8_str), + _ => unreachable!(), }; obj.set_automation_rate(value); diff --git a/src/audio_worklet_node.rs b/src/audio_worklet_node.rs new file mode 100644 index 00000000..8caa4003 --- /dev/null +++ b/src/audio_worklet_node.rs @@ -0,0 +1,788 @@ +use crate::{NapiAudioContext, NapiAudioParam, NapiOfflineAudioContext}; + +use crossbeam_channel::{self, Receiver, Sender}; + +use napi::*; +use napi_derive::js_function; + +use web_audio_api::node::{AudioNode, AudioNodeOptions, ChannelCountMode, ChannelInterpretation}; +use web_audio_api::worklet::{ + AudioParamValues, AudioWorkletGlobalScope, AudioWorkletNode, AudioWorkletNodeOptions, + AudioWorkletProcessor, +}; +use web_audio_api::{AudioParamDescriptor, AutomationRate}; + +use std::cell::Cell; +use std::collections::HashMap; +use std::option::Option; +use std::sync::atomic::{AtomicBool, AtomicU32, Ordering}; +use std::sync::{Arc, Mutex, OnceLock, RwLock}; + +/// Unique ID generator for AudioWorkletProcessors +static INCREMENTING_ID: AtomicU32 = AtomicU32::new(0); + +/// Command issued from render thread to the Worker +enum WorkletCommand { + Drop(u32), + Process(ProcessorArguments), +} + +/// Render thread to Worker processor arguments +struct ProcessorArguments { + // processor unique ID + id: u32, + // processor inputs (unsafely cast to static) + inputs: &'static [&'static [&'static [f32]]], + // processor ouputs (unsafely cast to static) + outputs: &'static [&'static [&'static [f32]]], + // processor audio params (unsafely cast to static) + param_values: &'static [(&'static str, &'static [f32])], + // AudioWorkletGlobalScope currentTime + current_time: f64, + // AudioWorkletGlobalScope currentFrame + current_frame: u64, + // channel for tail_time return value + tail_time_sender: Sender, +} + +/// Message channel from render thread to Worker +struct ProcessCallChannel { + send: Sender, + recv: Receiver, + // mark that the worklet has been exited to prevent any further `process` call + exited: Arc, +} + +/// Global map of ID -> ProcessCallChannel +/// +/// Every (Offline)AudioContext is assigned a new channel + ID. The ID is passed to the +/// AudioWorklet Worker and to every AudioNode in the context so they can grab the channel and use +/// message passing. +static GLOBAL_PROCESS_CALL_CHANNEL_MAP: RwLock> = RwLock::new(vec![]); + +/// Request a new channel + ID for a newly created (Offline)AudioContext +pub(crate) fn allocate_process_call_channel() -> usize { + // Only one process message can be sent at same time from a given context, + // but Drop messages could be send too, so let's take some room + let (send, recv) = crossbeam_channel::bounded(32); + let channel = ProcessCallChannel { + send, + recv, + exited: Arc::new(AtomicBool::new(false)), + }; + + // We need a write-lock to initialize the channel + let mut write_lock = GLOBAL_PROCESS_CALL_CHANNEL_MAP.write().unwrap(); + let id = write_lock.len(); + write_lock.push(channel); + + id +} + +/// Obtain the WorkletCommand sender for this context ID +fn process_call_sender(id: usize) -> Sender { + // optimistically assume the channel exists and we can use a shared read-lock + GLOBAL_PROCESS_CALL_CHANNEL_MAP.read().unwrap()[id] + .send + .clone() +} + +/// Obtain the WorkletCommand receiver for this context ID +fn process_call_receiver(id: usize) -> Receiver { + // optimistically assume the channel exists and we can use a shared read-lock + GLOBAL_PROCESS_CALL_CHANNEL_MAP.read().unwrap()[id] + .recv + .clone() +} + +/// Obtain the WorkletCommand exited flag for this context ID +fn process_call_exited(id: usize) -> Arc { + // optimistically assume the channel exists and we can use a shared read-lock + GLOBAL_PROCESS_CALL_CHANNEL_MAP.read().unwrap()[id] + .exited + .clone() +} + +/// Message channel inside the control thread to pass param descriptors of a given AudioWorkletNode +/// into the static method AudioWorkletProcessor::parameter_descriptors +struct AudioParamDescriptorsChannel { + send: Mutex>>, + recv: Receiver>, +} + +/// Generate the AudioParamDescriptorsChannel +/// +/// It is shared by the whole application, so even by different AudioContexts. This is no issue +/// because it's using a Mutex to prevent concurrency. +fn audio_param_descriptor_channel() -> &'static AudioParamDescriptorsChannel { + static PAIR: OnceLock = OnceLock::new(); + PAIR.get_or_init(|| { + let (send, recv) = crossbeam_channel::unbounded(); + AudioParamDescriptorsChannel { + send: Mutex::new(send), + recv, + } + }) +} + +thread_local! { + /// Denotes if the Worker thread priority has already been upped + static HAS_THREAD_PRIO: Cell = const { Cell::new(false) }; +} + +struct WorkletAbruptCompletionResult { + cmd: String, + err: Error, +} + +/// Check that given JS and Rust input / output layout are the same, i.e. check +/// that each input / output have the same number of channels +/// +/// Note that we don't check the number of inputs / outputs as they is defined +/// at construction and cannot change +fn check_same_io_layout(js_io: &JsObject, rs_io: &'static [&'static [&'static [f32]]]) -> bool { + for (i, io) in rs_io.iter().enumerate() { + if io.len() + != js_io + .get_element::(i as u32) + .unwrap() + .get_array_length_unchecked() + .unwrap() as usize + { + return false; + } + } + + true +} + +/// Recreate the whole JS inputs and output data structure. It is required to start +/// from scratch because Array are frozen with prevents us to add, remove or modify items. +fn rebuild_io_layout( + env: &Env, + js_io: JsObject, + rs_io: &'static [&'static [&'static [f32]]], + render_quantum_size: usize, +) -> JsObject { + let mut new_js_io = env.create_array(rs_io.len() as u32).unwrap(); + + for (i, io) in rs_io.iter().enumerate() { + let mut channels = env.create_array(rs_io[i].len() as u32).unwrap(); + let old_channels = js_io.get_element::(i as u32).unwrap(); + + for j in 0..io.len() { + // Try to reuse existing Float32Array + let float32_arr = if old_channels.has_element(j as u32).unwrap() { + old_channels.get_element::(j as u32).unwrap() + } else { + env.create_arraybuffer(render_quantum_size * 4) + .unwrap() + .into_raw() + .into_typedarray(napi::TypedArrayType::Float32, render_quantum_size, 0) + .unwrap() + }; + + let _ = channels.set(j as u32, float32_arr); + } + + let mut channels = channels.coerce_to_object().unwrap(); + let _ = channels.freeze(); + + new_js_io.set(i as u32, channels).unwrap(); + } + + let mut new_js_io = new_js_io.coerce_to_object().unwrap(); + let _ = new_js_io.freeze(); + + new_js_io +} + +/// Handle a AudioWorkletProcessor::process call in the Worker +fn process_audio_worklet(env: &Env, processors: &JsObject, args: ProcessorArguments) -> Result<()> { + let ProcessorArguments { + id, + inputs, + outputs, + param_values, + current_time, + current_frame, + tail_time_sender, + } = args; + + let processor = processors.get_named_property::(&id.to_string())?; + + // Make sure the processor exists, might run into race conditions + // between Rust Audio thread and JS Worker thread + if processor.get_type()? == ValueType::Undefined { + let _ = tail_time_sender.send(true); // make sure we will be called + return Ok(()); + } + + // fill AudioWorkletGlobalScope + let mut global = env.get_global()?; + global.set_named_property("currentTime", current_time)?; + global.set_named_property("currentFrame", current_frame)?; + + let mut processor = processor.coerce_to_object()?; + + let k_worklet_callable_process = + env.symbol_for("node-web-audio-api:worklet-callable-process")?; + // return early if worklet has been tagged as not callable, + // @note - maybe this could be guaranteed on rust side + let callable_process = processor + .get_property::(k_worklet_callable_process)? + .get_value()?; + + if !callable_process { + let _ = tail_time_sender.send(false); + return Ok(()); + } + + // This value become Some if "process" do not exist or throw an error at execution + let mut completion: Option = None; + + match processor.get_named_property::("process") { + Ok(process_method) => { + let k_worklet_inputs = env.symbol_for("node-web-audio-api:worklet-inputs")?; + let k_worklet_outputs = env.symbol_for("node-web-audio-api:worklet-outputs")?; + let k_worklet_params = env.symbol_for("node-web-audio-api:worklet-params")?; + let k_worklet_params_cache = + env.symbol_for("node-web-audio-api:worklet-params-cache")?; + + // @todo - get from global + let render_quantum_size = 128; + let mut js_inputs = processor.get_property::(k_worklet_inputs)?; + let mut js_outputs = processor.get_property::(k_worklet_outputs)?; + let mut js_params = processor.get_property::(k_worklet_params)?; + let js_params_cache = + processor.get_property::(k_worklet_params_cache)?; + + // Check JS input and output, and rebuild JS object if layout changed + if !check_same_io_layout(&js_inputs, inputs) { + let new_js_inputs = rebuild_io_layout(env, js_inputs, inputs, render_quantum_size); + // Store new layout in processor + processor.set_property(k_worklet_inputs, new_js_inputs)?; + // Override js_inputs with new reference + js_inputs = processor.get_property::(k_worklet_inputs)?; + } + + if !check_same_io_layout(&js_outputs, outputs) { + let new_js_outputs = + rebuild_io_layout(env, js_outputs, outputs, render_quantum_size); + // Store new layout in processor + processor.set_property(k_worklet_outputs, new_js_outputs)?; + // Override js_outputs with new reference + js_outputs = processor.get_property::(k_worklet_outputs)?; + } + + // Copy inputs into JS inputs buffers + for (input_number, input) in inputs.iter().enumerate() { + let js_input = js_inputs.get_element::(input_number as u32)?; + + for (channel_number, channel) in input.iter().enumerate() { + let js_channel = js_input.get_element::(channel_number as u32)?; + let mut js_channel_value = js_channel.into_value()?; + let js_channel_buffer: &mut [f32] = js_channel_value.as_mut(); + js_channel_buffer.copy_from_slice(channel); + } + } + + // Copy params values into JS params buffers + // + // @perf - We could rely on the fact that ParameterDescriptors + // are ordered maps to avoid sending param names in `param_values` + for (name, data) in param_values.iter() { + let float32_arr_cache = js_params_cache.get_named_property::(name)?; + // retrieve right Float32Array according to actual param size, i.e. 128 or 1 + let cache_index = if data.len() == 1 { 1 } else { 0 }; + let float32_arr = float32_arr_cache.get_element::(cache_index)?; + // copy data into underlying ArrayBuffer + let mut float32_arr_value = float32_arr.into_value()?; + let buffer: &mut [f32] = float32_arr_value.as_mut(); + buffer.copy_from_slice(data); + // get new owned value, as `float32_arr` as been consumed by `into_value` call + let float32_arr = float32_arr_cache.get_element::(cache_index)?; + js_params.set_named_property(name, float32_arr)?; + } + + let res: Result = + process_method.apply3(processor, js_inputs, js_outputs, js_params); + + match res { + Ok(js_ret) => { + // Grab back new owned value processor and js_ouputs, has been + // consumed by `apply` call + let processor = processors.get_named_property::(&id.to_string())?; + let js_outputs = + processor.get_property::(k_worklet_outputs)?; + + // copy JS output buffers back into outputs + for (output_number, output) in outputs.iter().enumerate() { + let js_output = js_outputs.get_element::(output_number as u32)?; + + for (channel_number, channel) in output.iter().enumerate() { + let js_channel = + js_output.get_element::(channel_number as u32)?; + let js_channel_value = js_channel.into_value()?; + let js_channel_buffer: &[f32] = js_channel_value.as_ref(); + + let src = js_channel_buffer.as_ptr(); + let dst = channel.as_ptr() as *mut f32; + + unsafe { + std::ptr::copy_nonoverlapping(src, dst, render_quantum_size); + } + } + } + + let ret = js_ret.coerce_to_bool()?.get_value()?; + let _ = tail_time_sender.send(ret); // allowed to fail + } + Err(err) => { + completion = Some(WorkletAbruptCompletionResult { + cmd: "node-web-audio-api:worklet:process-error".to_string(), + err, + }); + } + } + } + Err(err) => { + completion = Some(WorkletAbruptCompletionResult { + cmd: "node-web-audio-api:worklet:process-invalid".to_string(), + err, + }); + } + } + + // Handle eventual errors + if let Some(value) = completion { + let WorkletAbruptCompletionResult { cmd, err } = value; + // Grab back our process which may have been consumed by the process apply + let mut processor = global.get_named_property::(&id.to_string())?; + let k_worklet_queue_task = env.symbol_for("node-web-audio-api:worklet-queue-task")?; + // @todo - would be usefull to propagate to rust side too so that the + // processor can be removed from graph (?) + let value = env.get_boolean(false)?; + processor.set_property(k_worklet_callable_process, value)?; + // set active source flag to false, same semantic as tail time + // https://webaudio.github.io/web-audio-api/#active-source + let _ = tail_time_sender.send(false); + // Dispatch processorerror event on main thread + let queue_task = processor.get_property::(k_worklet_queue_task)?; + let js_cmd = env.create_string(&cmd)?; + let js_err = env.create_error(err)?; + queue_task.apply2(processor, js_cmd, js_err)?; + } + + Ok(()) +} + +/// The entry point into Rust from the Worker +#[js_function(2)] +pub(crate) fn run_audio_worklet_global_scope(ctx: CallContext) -> Result { + // Set thread priority to highest, if not done already + if !HAS_THREAD_PRIO.replace(true) { + // allowed to fail + let _ = thread_priority::set_current_thread_priority(thread_priority::ThreadPriority::Max); + } + + // Obtain the unique worker ID + let worklet_id = ctx.get::(0)?.get_uint32()? as usize; + // List of registered processors + let processors = ctx.get::(1)?; + + // Poll for incoming commands and yield back to the event loop if there are none. + // recv_timeout is not an option due to realtime safety, see discussion of + // https://github.com/ircam-ismm/node-web-audio-api/pull/124#pullrequestreview-2053515583 + while let Ok(msg) = process_call_receiver(worklet_id).try_recv() { + match msg { + WorkletCommand::Drop(id) => { + let mut processors = ctx.get::(1)?; + processors.delete_named_property(&id.to_string()).unwrap(); + } + WorkletCommand::Process(args) => { + process_audio_worklet(ctx.env, &processors, args)?; + } + } + } + + ctx.env.get_undefined() +} + +#[js_function(1)] +pub(crate) fn exit_audio_worklet_global_scope(ctx: CallContext) -> Result { + // Obtain the unique worker ID + let worklet_id = ctx.get::(0)?.get_uint32()? as usize; + // Flag message channel as exited to prevent any other render call + process_call_exited(worklet_id).store(true, Ordering::SeqCst); + // Handle any pending message from audio thread + if let Ok(WorkletCommand::Process(args)) = process_call_receiver(worklet_id).try_recv() { + let _ = args.tail_time_sender.send(false); + } + + ctx.env.get_undefined() +} + +pub(crate) struct NapiAudioWorkletNode(AudioWorkletNode); + +impl NapiAudioWorkletNode { + pub fn create_js_class(env: &Env) -> Result { + let interface = audio_node_interface![]; + + env.define_class("AudioWorkletNode", constructor, &interface) + } + + pub fn unwrap(&self) -> &AudioWorkletNode { + &self.0 + } +} + +#[js_function(4)] +fn constructor(ctx: CallContext) -> Result { + let mut js_this = ctx.this_unchecked::(); + + let js_audio_context = ctx.get::(0)?; + + // @note - not used, handled in the JS code + // let js_name = ctx.get::(1)?; + + // -------------------------------------------------------- + // Parse options + // -------------------------------------------------------- + let options_js = ctx.get::(2)?; + + let number_of_inputs = options_js + .get_named_property::("numberOfInputs")? + .get_double()? as usize; + + let number_of_outputs = options_js + .get_named_property::("numberOfOutputs")? + .get_double()? as usize; + + let output_channel_count_js = options_js + .get::<&str, JsTypedArray>("outputChannelCount")? + .unwrap(); + let output_channel_count_value = output_channel_count_js.into_value()?; + let output_channel_count_u32: &[u32] = output_channel_count_value.as_ref(); + let output_channel_count: Vec = output_channel_count_u32 + .iter() + .map(|&v| v as usize) + .collect(); + + let mut parameter_data = HashMap::::new(); + let parameter_data_js = options_js.get_named_property::("parameterData")?; + let parameter_keys_js = parameter_data_js.get_all_property_names( + KeyCollectionMode::OwnOnly, + KeyFilter::Enumerable, + KeyConversion::NumbersToStrings, + )?; + let length = parameter_keys_js.get_array_length()?; + + for i in 0..length { + let key_js = parameter_keys_js.get_element::(i)?; + let utf8_key = key_js.into_utf8()?; + let key = utf8_key.into_owned()?; + + let value = parameter_data_js + .get_property::(key_js)? + .get_double()?; + + parameter_data.insert(key, value); + } + + // No `processorOptions` here, they are sent to JS processor + + // -------------------------------------------------------- + // Parse AudioNodeOptions + // -------------------------------------------------------- + let audio_node_options_default = AudioNodeOptions::default(); + + let some_channel_count_js = options_js.get::<&str, JsObject>("channelCount")?; + let channel_count = if let Some(channel_count_js) = some_channel_count_js { + channel_count_js.coerce_to_number()?.get_double()? as usize + } else { + audio_node_options_default.channel_count + }; + + let some_channel_count_mode_js = options_js.get::<&str, JsObject>("channelCountMode")?; + let channel_count_mode = if let Some(channel_count_mode_js) = some_channel_count_mode_js { + let channel_count_mode_str = channel_count_mode_js + .coerce_to_string()? + .into_utf8()? + .into_owned()?; + + match channel_count_mode_str.as_str() { + "max" => ChannelCountMode::Max, + "clamped-max" => ChannelCountMode::ClampedMax, + "explicit" => ChannelCountMode::Explicit, + _ => unreachable!(), + } + } else { + audio_node_options_default.channel_count_mode + }; + + let some_channel_interpretation_js = + options_js.get::<&str, JsObject>("channelInterpretation")?; + let channel_interpretation = + if let Some(channel_interpretation_js) = some_channel_interpretation_js { + let channel_interpretation_str = channel_interpretation_js + .coerce_to_string()? + .into_utf8()? + .into_owned()?; + + match channel_interpretation_str.as_str() { + "speakers" => ChannelInterpretation::Speakers, + "discrete" => ChannelInterpretation::Discrete, + _ => unreachable!(), + } + } else { + audio_node_options_default.channel_interpretation + }; + + // -------------------------------------------------------- + // Parse ParameterDescriptors + // -------------------------------------------------------- + let params_js = ctx.get::(3)?; + let length = params_js.get_array_length()? as usize; + let mut rs_params: Vec = Vec::with_capacity(length); + + for i in 0..length { + let param = params_js.get_element::(i.try_into().unwrap())?; + + let js_name = param.get_named_property::("name").unwrap(); + let utf8_name = js_name.into_utf8().unwrap(); + let name = utf8_name.into_owned().unwrap(); + + let min_value = param + .get_named_property::("minValue") + .unwrap() + .get_double() + .unwrap() as f32; + + let max_value = param + .get_named_property::("maxValue") + .unwrap() + .get_double() + .unwrap() as f32; + + let default_value = param + .get_named_property::("defaultValue") + .unwrap() + .get_double() + .unwrap() as f32; + + let js_str = param.get_named_property::("automationRate")?; + let utf8_str = js_str.coerce_to_string()?.into_utf8()?.into_owned()?; + let automation_rate = match utf8_str.as_str() { + "a-rate" => AutomationRate::A, + "k-rate" => AutomationRate::K, + _ => unreachable!(), + }; + + let param_descriptor = AudioParamDescriptor { + name, + min_value, + max_value, + default_value, + automation_rate, + }; + + rs_params.insert(i, param_descriptor); + } + + let audio_context_name = + js_audio_context.get_named_property::("Symbol.toStringTag")?; + let audio_context_str = audio_context_name.into_utf8()?; + + let worklet_id = match audio_context_str.as_str()? { + "AudioContext" => { + let napi_audio_context = ctx.env.unwrap::(&js_audio_context)?; + napi_audio_context.worklet_id() + } + "OfflineAudioContext" => { + let napi_audio_context = ctx + .env + .unwrap::(&js_audio_context)?; + napi_audio_context.worklet_id() + } + &_ => panic!("not supported"), + }; + + // -------------------------------------------------------- + // Create AudioWorkletNodeOptions object + // -------------------------------------------------------- + let id = INCREMENTING_ID.fetch_add(1, Ordering::Relaxed); + let processor_options = NapiAudioWorkletProcessor { + id, + send: process_call_sender(worklet_id), + exited: process_call_exited(worklet_id), + tail_time_channel: crossbeam_channel::bounded(1), + param_values: Vec::with_capacity(32), + }; + + let options = AudioWorkletNodeOptions { + number_of_inputs, + number_of_outputs, + output_channel_count, + parameter_data, + audio_node_options: AudioNodeOptions { + channel_count, + channel_count_mode, + channel_interpretation, + }, + processor_options, + }; + + // -------------------------------------------------------- + // send parameterDescriptors so that NapiAudioWorkletProcessor can retrieve them + // -------------------------------------------------------- + let guard = audio_param_descriptor_channel().send.lock().unwrap(); + guard.send(rs_params).unwrap(); + + // -------------------------------------------------------- + // Create native AudioWorkletNode + // -------------------------------------------------------- + let native_node = match audio_context_str.as_str()? { + "AudioContext" => { + let napi_audio_context = ctx.env.unwrap::(&js_audio_context)?; + let audio_context = napi_audio_context.unwrap(); + AudioWorkletNode::new::(audio_context, options) + } + "OfflineAudioContext" => { + let napi_audio_context = ctx + .env + .unwrap::(&js_audio_context)?; + let audio_context = napi_audio_context.unwrap(); + AudioWorkletNode::new::(audio_context, options) + } + &_ => unreachable!(), + }; + + drop(guard); + + let mut js_parameters = ctx.env.create_object()?; + + for (name, native_param) in native_node.parameters().iter() { + let native_param = native_param.clone(); + let napi_param = NapiAudioParam::new(native_param); + let mut js_obj = NapiAudioParam::create_js_object(ctx.env)?; + ctx.env.wrap(&mut js_obj, napi_param)?; + + js_parameters.set_named_property(name, js_obj)?; + } + + // -------------------------------------------------------- + // Finalize instance creation + // -------------------------------------------------------- + js_this.define_properties(&[ + Property::new("context")? + .with_value(&js_audio_context) + .with_property_attributes(PropertyAttributes::Enumerable), + Property::new("parameters")? + .with_value(&js_parameters) + .with_property_attributes(PropertyAttributes::Enumerable), + Property::new("id")? + .with_value(&ctx.env.create_uint32(id)?) + .with_property_attributes(PropertyAttributes::Enumerable), + // this must be put on the instance and not in the prototype to be reachable + Property::new("Symbol.toStringTag")? + .with_value(&ctx.env.create_string("AudioWorkletNode")?) + .with_property_attributes(PropertyAttributes::Static), + ])?; + + // finalize instance creation + let napi_node = NapiAudioWorkletNode(native_node); + ctx.env.wrap(&mut js_this, napi_node)?; + + ctx.env.get_undefined() +} + +audio_node_impl!(NapiAudioWorkletNode); + +// ------------------------------------------------- +// AudioWorkletNode Interface +// ------------------------------------------------- + +struct NapiAudioWorkletProcessor { + /// Unique id to pair Napi Worklet and JS processor + id: u32, + /// Sender to the JS Worklet + send: Sender, + /// Flag that marks the JS worklet as exited + exited: Arc, + /// tail_time result channel + tail_time_channel: (Sender, Receiver), + /// Reusable Vec for AudioParam values + param_values: Vec<(&'static str, &'static [f32])>, +} + +impl AudioWorkletProcessor for NapiAudioWorkletProcessor { + type ProcessorOptions = NapiAudioWorkletProcessor; + + fn constructor(opts: Self::ProcessorOptions) -> Self { + opts // the opts contain the full processor + } + + fn parameter_descriptors() -> Vec + where + Self: Sized, + { + // Get the values out of thin air, see `audio_param_descriptor_channel()` for details + audio_param_descriptor_channel().recv.recv().unwrap() + } + + fn process<'a, 'b>( + &mut self, + inputs: &'b [&'a [&'a [f32]]], + outputs: &'b mut [&'a mut [&'a mut [f32]]], + params: AudioParamValues<'b>, + scope: &'b AudioWorkletGlobalScope, + ) -> bool { + // Early return if audio thread is still closing while worklet has been exited + if self.exited.load(Ordering::SeqCst) { + return false; + } + + // SAFETY: + // We are transmuting the a' and b' lifetimes to static in order to send them to the Worker + // thread. This should be safe as long as: + // - this function does not return before the Worker has finished using the slices + // - the Worker / JS-code doesn't keep a copy of these slices - fingers crossed on this one + + let inputs: &'static [&'static [&'static [f32]]] = unsafe { std::mem::transmute(inputs) }; + let outputs: &'static [&'static [&'static [f32]]] = unsafe { std::mem::transmute(outputs) }; + + self.param_values.clear(); + self.param_values.extend(params.keys().map(|k| { + let label: &'static str = unsafe { std::mem::transmute(k) }; + let value: &'static [f32] = unsafe { std::mem::transmute(¶ms.get(k)[..]) }; + (label, value) + })); + let param_values: &'static [_] = unsafe { std::mem::transmute(&self.param_values[..]) }; + + // end SAFETY comment + + let item = ProcessorArguments { + id: self.id, + inputs, + outputs, + param_values, + current_time: scope.current_time, + current_frame: scope.current_frame, + tail_time_sender: self.tail_time_channel.0.clone(), + }; + + // send command to Worker + self.send.send(WorkletCommand::Process(item)).unwrap(); + // await result + self.tail_time_channel.1.recv().unwrap() + } +} + +impl Drop for NapiAudioWorkletProcessor { + fn drop(&mut self) { + if !self.exited.load(Ordering::SeqCst) { + self.send.send(WorkletCommand::Drop(self.id)).unwrap(); + } + } +} diff --git a/src/lib.rs b/src/lib.rs index c12429ba..6f486491 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -50,6 +50,8 @@ use crate::offline_audio_context::NapiOfflineAudioContext; mod script_processor_node; use crate::script_processor_node::NapiScriptProcessorNode; +mod audio_worklet_node; +use crate::audio_worklet_node::NapiAudioWorkletNode; mod analyser_node; use crate::analyser_node::NapiAnalyserNode; mod audio_buffer_source_node; @@ -83,6 +85,9 @@ use crate::stereo_panner_node::NapiStereoPannerNode; mod wave_shaper_node; use crate::wave_shaper_node::NapiWaveShaperNode; +// AudioWorklet internals +use crate::audio_worklet_node::{exit_audio_worklet_global_scope, run_audio_worklet_global_scope}; + // MediaDevices & MediaStream API mod media_streams; use crate::media_streams::NapiMediaStream; @@ -126,6 +131,9 @@ fn init(mut exports: JsObject, env: Env) -> Result<()> { let napi_class = NapiScriptProcessorNode::create_js_class(&env)?; exports.set_named_property("ScriptProcessorNode", napi_class)?; + let napi_class = NapiAudioWorkletNode::create_js_class(&env)?; + exports.set_named_property("AudioWorkletNode", napi_class)?; + let napi_class = NapiAnalyserNode::create_js_class(&env)?; exports.set_named_property("AnalyserNode", napi_class)?; @@ -174,6 +182,18 @@ fn init(mut exports: JsObject, env: Env) -> Result<()> { let napi_class = NapiWaveShaperNode::create_js_class(&env)?; exports.set_named_property("WaveShaperNode", napi_class)?; + // ---------------------------------------------------------------- + // AudioWorklet utils (internal) + // ---------------------------------------------------------------- + exports.create_named_method( + "run_audio_worklet_global_scope", + run_audio_worklet_global_scope, + )?; + exports.create_named_method( + "exit_audio_worklet_global_scope", + exit_audio_worklet_global_scope, + )?; + // ---------------------------------------------------------------- // MediaStream API & Media Devices API // ---------------------------------------------------------------- @@ -207,10 +227,9 @@ fn init(mut exports: JsObject, env: Env) -> Result<()> { let napi_class = NapiMediaStream::create_js_class(&env)?; store.set_named_property("MediaStream", napi_class)?; - // store the store into instance so that it can be globally accessed + // push store into env instance data so that it can be globally accessed let store_ref = env.create_reference(store)?; env.set_instance_data(store_ref, 0, |mut c| { - // don't have any idea of what this does c.value.unref(c.env).unwrap(); })?; diff --git a/src/offline_audio_context.rs b/src/offline_audio_context.rs index ebec2a94..ba658dde 100644 --- a/src/offline_audio_context.rs +++ b/src/offline_audio_context.rs @@ -9,8 +9,9 @@ use web_audio_api::{Event, OfflineAudioCompletionEvent}; use crate::*; +/// Napi object wrapping the native OfflineAudioContext and the AudioWorklet ID #[derive(Clone)] -pub(crate) struct NapiOfflineAudioContext(Arc); +pub(crate) struct NapiOfflineAudioContext(Arc, usize); // for debug purpose // impl Drop for NapiOfflineAudioContext { @@ -34,6 +35,10 @@ impl NapiOfflineAudioContext { pub fn unwrap(&self) -> &OfflineAudioContext { &self.0 } + + pub fn worklet_id(&self) -> usize { + self.1 + } } #[js_function(3)] @@ -48,11 +53,12 @@ fn constructor(ctx: CallContext) -> Result { let sample_rate = ctx.get::(2)?.get_double()? as f32; let audio_context = OfflineAudioContext::new(number_of_channels, length, sample_rate); + let worklet_id = crate::audio_worklet_node::allocate_process_call_channel(); // ------------------------------------------------- // Wrap context // ------------------------------------------------- - let napi_audio_context = NapiOfflineAudioContext(Arc::new(audio_context)); + let napi_audio_context = NapiOfflineAudioContext(Arc::new(audio_context), worklet_id); ctx.env.wrap(&mut js_this, napi_audio_context)?; js_this.define_properties(&[ @@ -71,6 +77,9 @@ fn constructor(ctx: CallContext) -> Result { let js_obj = ctor.new_instance(&[&js_this])?; js_this.set_named_property("destination", &js_obj)?; + // internal id to retrieve worklet message channel + js_this.set_named_property("workletId", ctx.env.create_uint32(worklet_id as u32)?)?; + ctx.env.get_undefined() } diff --git a/tests/AudioWorklet.spec.mjs b/tests/AudioWorklet.spec.mjs new file mode 100644 index 00000000..49beeefa --- /dev/null +++ b/tests/AudioWorklet.spec.mjs @@ -0,0 +1,67 @@ +import { Blob } from 'node:buffer'; +import { assert } from 'chai'; +import { AudioContext, OscillatorNode, AudioWorkletNode } from '../index.mjs'; + +const scriptTexts = ` +class FirstProcessor extends AudioWorkletProcessor { + process(inputs, outputs, parameters) { + const output = outputs[0]; + + output.forEach((channel) => { + for (let i = 0; i < channel.length; i++) { + channel[i] = Math.random() * 2 - 1; + } + }); + + return true; + } +} + +registerProcessor('first-processor', FirstProcessor); + +class SecondProcessor extends AudioWorkletProcessor { + process(inputs, outputs, parameters) { + const output = outputs[0]; + + output.forEach((channel) => { + for (let i = 0; i < channel.length; i++) { + channel[i] = Math.random() * 2 - 1; + } + }); + + return true; + } +} + +registerProcessor('second-processor', SecondProcessor); +`; + +describe('AudioWorklet', () => { + describe('# addModule(moduleUrl', () => { + it(`should support loading from Blob`, async () => { + const blob = new Blob([scriptTexts], { type: 'application/javascript' }); + const objectUrl = URL.createObjectURL(blob); + + const audioContext = new AudioContext(); + let errored = false; + + try { + // should support blobs + await audioContext.audioWorklet.addModule(objectUrl); + + const firstProcessor = new AudioWorkletNode(audioContext, 'first-processor'); + const secondProcessor = new AudioWorkletNode(audioContext, 'second-processor'); + } catch (err) { + errored = true; + console.log(err.message); + } + + await audioContext.close(); + assert.isFalse(errored); + }); + + it.skip(`should support loading from cwd relative path`, async () => {}); + it.skip(`should support loading from caller relative path`, async () => {}); + it.skip(`should support loading from url`, async () => {}); + }); +});