forked from WebAudio/web-audio-api
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathwebrtc-integration.html
308 lines (259 loc) · 11.1 KB
/
webrtc-integration.html
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
<title>Web Audio API - Convolution Architecture</title>
<link rel="stylesheet" href="http://www.w3.org/StyleSheets/TR/W3C-ED" type="text/css" />
<link rel="stylesheet" href="style.css" type="text/css" />
</head>
<body>
<h1>MediaStream Integration</h1>
<p>
<em>This section is informative.</em>
</p>
<p>
The following examples illustrate
<a href="http://dev.w3.org/2011/webrtc/editor/webrtc-20111004.html">WebRTC</a> integration with the
<a href="http://www.w3.org/TR/webaudio">Web Audio API</a>. They are borrowed and modified from Robert O'Callahan's
<a href="http://hg.mozilla.org/users/rocallahan_mozilla.com/specs/raw-file/tip/StreamProcessing/StreamProcessing.html#examples">MediaStream Processing API</a>
proposal.
</p>
<p>
Please note the addition of two new <b>AudioContext</b> methods: createMediaStreamSource() and createMediaStreamDestination().
They need to be documented
more fully, but provide a simple and straightforward integration with the Web Audio API.
</p>
<h2>Examples</h2>
<ol>
<li>Play video with processing effect applied to the audio track
<div class="block"><div class="blockTitleDiv"><span class="blockTitle">ECMAScript</span></div><div class="blockContent"><pre class="code"><code class="es-code">
<video src="foo.webm" id="v" controls></video>
<script>
var audioSource = context.createMediaElementSource(document.getElementById("v"));
var filter = context.createBiquadFilter();
audioSource.connect(filter);
filter.connect(context.destination);
</script>
</code></pre></div>
</div>
</li>
<li>Play video with processing effects mixing in out-of-band audio tracks (in sync)
<div class="block"><div class="blockTitleDiv"><span class="blockTitle">ECMAScript</span></div><div class="blockContent"><pre class="code"><code class="es-code">
<video src="foo.webm" id="v"></video>
<audio src="back.webm" id="back"></audio>
<script>
var videoSource = context.createMediaElementSource(document.getElementById("v"));
var audioSource = context.createMediaElementSource(document.getElementById("back"));
var effect = context.createJavaScriptNode();
effect.onaudioprocess = customAudioProcessor;
videoSource.connect(effect);
effect.connect(context.destination);
audioSource.connect(context.destination);
function startPlaying() {
document.getElementById("v").play();
document.getElementById("back").play();
}
</script>
</code></pre>
</div>
</div>
</li>
<li>Capture microphone input and stream it out to a peer with a processing effect applied to the audio
<div class="block"><div class="blockTitleDiv"><span class="blockTitle">ECMAScript</span></div><div class="blockContent"><pre class="code"><code class="es-code">
<script>
navigator.getUserMedia('audio', gotAudio);
function gotAudio(stream) {
var microphone = context.createMediaStreamSource(stream);
var filter = context.createBiquadFilter();
var peer = context.createMediaStreamDestination();
microphone.connect(filter);
filter.connect(peer);
peerConnection.addStream(peer.stream);
}
</script>
</code></pre></div></div>
</li>
<li>Capture microphone input and visualize it as it is being streamed out to a peer and recorded
<div class="block"><div class="blockTitleDiv"><span class="blockTitle">ECMAScript</span></div><div class="blockContent"><pre class="code"><code class="es-code">
<canvas id="c"></canvas>
<script>
navigator.getUserMedia('audio', gotAudio);
var streamRecorder;
function gotAudio(stream) {
var microphone = context.createMediaStreamSource(stream);
var analyser = context.createAnalyser();
microphone.connect(analyser);
analyser.connect(context.destination);
requestAnimationFrame(drawAnimation);
streamRecorder = stream.record();
peerConnection.addStream(stream);
}
</script>
</code></pre></div></div>
</li>
<li>Capture microphone input, visualize it, mix in another audio track and stream the result to a peer and record
<div class="block"><div class="blockTitleDiv"><span class="blockTitle">ECMAScript</span></div><div class="blockContent"><pre class="code"><code class="es-code">
<canvas id="c"></canvas>
<audio src="back.webm" id="back"></audio>
<script>
navigator.getUserMedia('audio', gotAudio);
var streamRecorder;
function gotAudio(stream) {
var microphone = context.createMediaStreamSource(stream);
var backgroundMusic = context.createMediaElementSource(document.getElementById("back"));
var analyser = context.createAnalyser();
var mixedOutput = context.createMediaStreamDestination();
microphone.connect(analyser);
analyser.connect(mixedOutput);
backgroundMusic.connect(mixedOutput);
requestAnimationFrame(drawAnimation);
streamRecorder = mixedOutput.stream.record();
peerConnection.addStream(mixedOutput.stream);
}
</script>
</code></pre></div></div>
</li>
<li>Receive audio streams from peers, mix them with spatialization effects, and play
<div class="block"><div class="blockTitleDiv"><span class="blockTitle">ECMAScript</span></div><div class="blockContent"><pre class="code"><code class="es-code">
<audio id="out" autoplay></audio>
<script>
peerConnection.onaddstream = function(event) {
var peerInput = context.createMediaStreamSource(event.stream);
var panner = context.createPanner();
panner.setPosition(x, y, z);
peerInput.connect(panner);
panner.connect(context.destination);
};
</script>
</code></pre></div></div>
</li>
<li>Seamlessly chain from the end of one input stream to another
<div class="block"><div class="blockTitleDiv"><span class="blockTitle">ECMAScript</span></div><div class="blockContent"><pre class="code"><code class="es-code">
<audio src="in1.webm" id="in1" preload></audio>
<audio src="in2.webm" id="in2"></audio>
<script>
var in1 = document.getElementById("in1");
var in2 = document.getElementById("in2");
in1.onloadeddata = function() {
in1.onended = function() { in2.play(); };
in1.play();
}
</script>
</code></pre></div></div>
</li>
<li>Seamlessly switch from one input stream to another, e.g. to implement adaptive streaming
<div class="block"><div class="blockTitleDiv"><span class="blockTitle">ECMAScript</span></div><div class="blockContent"><pre class="code"><code class="es-code">
<audio src="in1.webm" id="in1" preload></audio>
<audio src="in2.webm" id="in2"></audio>
<script>
var in1 = document.getElementById("in1");
var in2 = document.getElementById("in2");
var source1 = context.createMediaElementSource(in1);
var source2 = context.createMediaElementSource(in2);
source1.connect(context.destination);
source2.connect(context.destination);
source1.play();
function switchStreams() {
in2.currentTime = in1.currentTime + 10; // arbitrary, but we should be able to complete the seek within this time
var switchTime = context.currentTime + 10;
in1.pause(switchTime);
in2.play(switchTime);
}
</script>
</code></pre></div></div>
</li>
<li>Synthesize samples from JS data
<div class="block"><div class="blockTitleDiv"><span class="blockTitle">ECMAScript</span></div><div class="blockContent"><pre class="code"><code class="es-code">
<script>
var processor = context.createJavaScriptNode(2048, 2, 2);
processor.onaudioprocess = customAudioProcess;
processor.connect(context.destination);
</script>
</code></pre></div></div>
</li>
<li>Trigger a sound sample to be played through the effects graph ASAP but without causing any blocking
<div class="block"><div class="blockTitleDiv"><span class="blockTitle">ECMAScript</span></div><div class="blockContent"><pre class="code"><code class="es-code">
<script>
var source = context.createBufferSource();
source.buffer = kickDrum909;
source.connect(context.destination);
source.noteOn(0);
</script>
</code></pre></div></div>
</li>
<li>Trigger a sound sample to be played through the effects graph in five seconds
<div class="block"><div class="blockTitleDiv"><span class="blockTitle">ECMAScript</span></div><div class="blockContent"><pre class="code"><code class="es-code">
<script>
var source = context.createBufferSource();
source.buffer = kickDrum909;
var effect = context.createConvolver();
effect.buffer = immenseCathedral;
source.connect(effect);
effect.connect(context.destination);
source.noteOn(context.currentTime + 5);
</script>
</code></pre></div></div>
<p>
Please note that examples 12, 13, and 14 involve no audio processing and thus should work with the
<a href="http://dev.w3.org/2011/webrtc/editor/webrtc-20111004.html">WebRTC API</a> as it is currently designed.
</p>
</li>
<li>Capture video from a camera and analyze it (e.g. face recognition)
<div class="block"><div class="blockTitleDiv"><span class="blockTitle">ECMAScript</span></div><div class="blockContent"><pre class="code"><code class="es-code">
<script>
navigator.getUserMedia('video', gotVideo);
function gotVideo(stream) {
stream.createWorkerProcessor(new Worker("face-recognizer.js"));
}
</script>
</code></pre></div></div>
</li>
<li>Capture video, record it to a file and upload the file (e.g. Youtube)
<div class="block"><div class="blockTitleDiv"><span class="blockTitle">ECMAScript</span></div><div class="blockContent"><pre class="code"><code class="es-code">
<script>
navigator.getUserMedia('video', gotVideo);
var streamRecorder;
function gotVideo(stream) {
streamRecorder = stream.record();
}
function stopRecording() {
streamRecorder.getRecordedData(gotData);
}
function gotData(blob) {
var x = new XMLHttpRequest();
x.open('POST', 'uploadMessage');
x.send(blob);
}
</script>
</code></pre></div></div>
</li>
<li>Capture video from a canvas, record it to a file then upload
<div class="block"><div class="blockTitleDiv"><span class="blockTitle">ECMAScript</span></div><div class="blockContent"><pre class="code"><code class="es-code">
<canvas width="640" height="480" id="c"></canvas>
<script>
var canvas = document.getElementById("c");
var streamRecorder = canvas.stream.record();
function stopRecording() {
streamRecorder.getRecordedData(gotData);
}
function gotData(blob) {
var x = new XMLHttpRequest();
x.open('POST', 'uploadMessage');
x.send(blob);
}
var frame = 0;
function updateCanvas() {
var ctx = canvas.getContext("2d");
ctx.clearRect(0, 0, 640, 480);
ctx.fillText("Frame " + frame, 0, 200);
++frame;
}
setInterval(updateCanvas, 30);
</script>
</code></pre></div></div>
</li>
</ol>
</body>
</html>