Skip to content

Commit

Permalink
Reword API to make it clearer
Browse files Browse the repository at this point in the history
  • Loading branch information
cwoffenden committed Oct 16, 2024
1 parent afa356a commit 2efd0f5
Show file tree
Hide file tree
Showing 9 changed files with 30 additions and 30 deletions.
4 changes: 2 additions & 2 deletions site/source/docs/api_reference/wasm_audio_worklets.rst
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ Once a class type is instantiated on the Web Audio graph and the graph is
running, a C/C++ function pointer callback will be invoked for each 128
samples of the processed audio stream that flows through the node. Newer Web
Audio API specs allow this to be changed, so for future compatibility use the
``AudioSampleFrame``'s ``quantumSize`` to get the value.
``AudioSampleFrame``'s ``samplesPerChannel`` to get the value.

This callback will be executed on a dedicated separate audio processing
thread with real-time processing priority. Each Web Audio context will
Expand Down Expand Up @@ -159,7 +159,7 @@ which resumes the audio context when the user clicks on the DOM Canvas element t
void *userData)
{
for(int i = 0; i < numOutputs; ++i)
for(int j = 0; j < outputs[i].quantumSize*outputs[i].numberOfChannels; ++j)
for(int j = 0; j < outputs[i].samplesPerChannel*outputs[i].numberOfChannels; ++j)
outputs[i].data[j] = emscripten_random() * 0.2 - 0.1; // Warning: scale down audio volume by factor of 0.2, raw noise can be really loud otherwise
return true; // Keep the graph output going
Expand Down
26 changes: 13 additions & 13 deletions src/audio_worklet.js
Original file line number Diff line number Diff line change
Expand Up @@ -31,12 +31,12 @@ function createWasmAudioWorkletProcessor(audioParams) {
let opts = args.processorOptions;
this.callbackFunction = Module['wasmTable'].get(opts['cb']);
this.userData = opts['ud'];
// Plus the number of samples to process, fixed for the lifetime of the
// Then the samples per channel to process, fixed for the lifetime of the
// context that created this processor. Note for when moving to Web Audio
// 1.1: the typed array passed to process() should be the same size as the
// the quantum size, and this exercise of passing in the value shouldn't
// be required (to be verified).
this.quantumSize = opts['qs'];
// 1.1: the typed array passed to process() should be the same size as this
// 'render quantum size', and this exercise of passing in the value
// shouldn't be required (to be verified).
this.samplesPerChannel = opts['qs'];
}

static get parameterDescriptors() {
Expand All @@ -51,15 +51,15 @@ function createWasmAudioWorkletProcessor(audioParams) {
let numInputs = inputList.length,
numOutputs = outputList.length,
numParams = 0, i, j, k, dataPtr,
quantumBytes = this.quantumSize * 4,
bytesPerChannel = this.samplesPerChannel * 4,
stackMemoryNeeded = (numInputs + numOutputs) * {{{ C_STRUCTS.AudioSampleFrame.__size__ }}},
oldStackPtr = stackSave(),
inputsPtr, outputsPtr, outputDataPtr, paramsPtr,
didProduceAudio, paramArray;

// Calculate how much stack space is needed.
for (i of inputList) stackMemoryNeeded += i.length * quantumBytes;
for (i of outputList) stackMemoryNeeded += i.length * quantumBytes;
for (i of inputList) stackMemoryNeeded += i.length * bytesPerChannel;
for (i of outputList) stackMemoryNeeded += i.length * bytesPerChannel;
for (i in parameters) stackMemoryNeeded += parameters[i].byteLength + {{{ C_STRUCTS.AudioParamFrame.__size__ }}}, ++numParams;

// Allocate the necessary stack space.
Expand All @@ -71,13 +71,13 @@ function createWasmAudioWorkletProcessor(audioParams) {
for (i of inputList) {
// Write the AudioSampleFrame struct instance
HEAPU32[k + {{{ C_STRUCTS.AudioSampleFrame.numberOfChannels / 4 }}}] = i.length;
HEAPU32[k + {{{ C_STRUCTS.AudioSampleFrame.quantumSize / 4 }}}] = this.quantumSize;
HEAPU32[k + {{{ C_STRUCTS.AudioSampleFrame.samplesPerChannel / 4 }}}] = this.samplesPerChannel;
HEAPU32[k + {{{ C_STRUCTS.AudioSampleFrame.data / 4 }}}] = dataPtr;
k += {{{ C_STRUCTS.AudioSampleFrame.__size__ / 4 }}};
// Marshal the input audio sample data for each audio channel of this input
for (j of i) {
HEAPF32.set(j, dataPtr>>2);
dataPtr += quantumBytes;
dataPtr += bytesPerChannel;
}
}

Expand All @@ -88,11 +88,11 @@ function createWasmAudioWorkletProcessor(audioParams) {
for (i of outputList) {
// Write the AudioSampleFrame struct instance
HEAPU32[k + {{{ C_STRUCTS.AudioSampleFrame.numberOfChannels / 4 }}}] = i.length;
HEAPU32[k + {{{ C_STRUCTS.AudioSampleFrame.quantumSize / 4 }}}] = this.quantumSize;
HEAPU32[k + {{{ C_STRUCTS.AudioSampleFrame.samplesPerChannel / 4 }}}] = this.samplesPerChannel;
HEAPU32[k + {{{ C_STRUCTS.AudioSampleFrame.data / 4 }}}] = dataPtr;
k += {{{ C_STRUCTS.AudioSampleFrame.__size__ / 4 }}};
// Reserve space for the output data
dataPtr += quantumBytes * i.length;
dataPtr += bytesPerChannel * i.length;
}

// Copy parameters descriptor structs and data to Wasm
Expand All @@ -117,7 +117,7 @@ function createWasmAudioWorkletProcessor(audioParams) {
// not have one, so manually copy all bytes in)
for (i of outputList) {
for (j of i) {
for (k = 0; k < this.quantumSize; ++k) {
for (k = 0; k < this.samplesPerChannel; ++k) {
j[k] = HEAPF32[outputDataPtr++];
}
}
Expand Down
2 changes: 1 addition & 1 deletion src/library_webaudio.js
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ let LibraryWebAudio = {
// Wasm handle ID.
$emscriptenGetAudioObject: (objectHandle) => EmAudio[objectHandle],

// Performs the work of getting the AudioContext's quantum size.
// Performs the work of getting the AudioContext's render quantum size.
$emscriptenGetContextQuantumSize: (contextHandle) => {
// TODO: in a future release this will be something like:
// return EmAudio[contextHandle].renderQuantumSize || 128;
Expand Down
2 changes: 1 addition & 1 deletion src/struct_info.json
Original file line number Diff line number Diff line change
Expand Up @@ -1203,7 +1203,7 @@
"structs": {
"AudioSampleFrame": [
"numberOfChannels",
"quantumSize",
"samplesPerChannel",
"data"
],
"AudioParamFrame": [
Expand Down
2 changes: 1 addition & 1 deletion src/struct_info_generated.json
Original file line number Diff line number Diff line change
Expand Up @@ -479,7 +479,7 @@
"__size__": 12,
"data": 8,
"numberOfChannels": 0,
"quantumSize": 4
"samplesPerChannel": 4
},
"EmscriptenBatteryEvent": {
"__size__": 32,
Expand Down
2 changes: 1 addition & 1 deletion src/struct_info_generated_wasm64.json
Original file line number Diff line number Diff line change
Expand Up @@ -479,7 +479,7 @@
"__size__": 16,
"data": 8,
"numberOfChannels": 0,
"quantumSize": 4
"samplesPerChannel": 4
},
"EmscriptenBatteryEvent": {
"__size__": 32,
Expand Down
10 changes: 5 additions & 5 deletions system/include/emscripten/webaudio.h
Original file line number Diff line number Diff line change
Expand Up @@ -103,19 +103,19 @@ typedef int EMSCRIPTEN_AUDIO_WORKLET_NODE_T;

typedef struct AudioSampleFrame
{
// Number of audio channels to process (multiplied by quantumSize gives the elements in data)
// Number of audio channels to process (multiplied by samplesPerChannel gives the elements in data)
const int numberOfChannels;
// Number of samples per channel in data
const int quantumSize;
// An array of length numberOfChannels*quantumSize elements. Samples are always arranged in a planar fashion,
// where data[channelIndex*quantumSize+i] locates the data of the i'th sample of channel channelIndex.
const int samplesPerChannel;
// An array of length numberOfChannels*samplesPerChannel elements. Samples are always arranged in a planar fashion,
// where data[channelIndex*samplesPerChannel+i] locates the data of the i'th sample of channel channelIndex.
float *data;
} AudioSampleFrame;

typedef struct AudioParamFrame
{
// Specifies the length of the input array data (in float elements). This will be guaranteed to either have
// a value of 1, for a parameter valid for the entire frame, or emscripten_audio_context_quantum_size() for a parameter that changes during the frame.
// a value of 1, for a parameter valid for the entire frame, or emscripten_audio_context_quantum_size() for a parameter that changes per sample during the frame.
int length;
// An array of length specified in 'length'.
float *data;
Expand Down
10 changes: 5 additions & 5 deletions test/webaudio/audio_worklet_tone_generator.c
Original file line number Diff line number Diff line change
Expand Up @@ -40,12 +40,12 @@ bool ProcessAudio(int numInputs, const AudioSampleFrame *inputs, int numOutputs,

// Produce a sine wave tone of desired frequency to all output channels.
for(int o = 0; o < numOutputs; ++o)
for(int i = 0; i < outputs[o].quantumSize; ++i)
for(int i = 0; i < outputs[o].samplesPerChannel; ++i)
{
float s = emscripten_math_sin(phase);
phase += phaseIncrement;
for(int ch = 0; ch < outputs[o].numberOfChannels; ++ch)
outputs[o].data[ch*outputs[o].quantumSize + i] = s * currentVolume;
outputs[o].data[ch*outputs[o].samplesPerChannel + i] = s * currentVolume;
}

// Range reduce to keep precision around zero.
Expand Down Expand Up @@ -150,11 +150,11 @@ int main() {

EMSCRIPTEN_WEBAUDIO_T context = emscripten_create_audio_context(&attrs);

// Get the context's quantum size. Once the audio API allows this to be user
// defined or exposes the hardware's own value, this will be needed to
// Get the context's render quantum size. Once the audio API allows this to be
// user defined or exposes the hardware's own value, this will be needed to
// determine the worklet stack size.
int quantumSize = emscripten_audio_context_quantum_size(context);
printf("Context quantum size: %d\n", quantumSize);
printf("Context render quantum size: %d\n", quantumSize);

// and kick off Audio Worklet scope initialization, which shares the Wasm
// Module and Memory to the AudioWorklet scope and initializes its stack.
Expand Down
2 changes: 1 addition & 1 deletion test/webaudio/audioworklet.c
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ bool ProcessAudio(int numInputs, const AudioSampleFrame *inputs, int numOutputs,

// Produce noise in all output channels.
for(int i = 0; i < numOutputs; ++i)
for(int j = 0; j < outputs[i].quantumSize*outputs[i].numberOfChannels; ++j)
for(int j = 0; j < outputs[i].samplesPerChannel*outputs[i].numberOfChannels; ++j)
outputs[i].data[j] = (rand() / (float)RAND_MAX * 2.0f - 1.0f) * 0.3f;

// We generated audio and want to keep this processor going. Return false here to shut down.
Expand Down

0 comments on commit 2efd0f5

Please sign in to comment.