From ca06ff9c3162c4ee29c68e1ec385e33229e80f8f Mon Sep 17 00:00:00 2001
From: Aleksandar Stojiljkovic
class="mjx-mo" style= "padding-left: 0.267em; padding-right: 0.267em;">−−n class="mjx-mo" style= "padding-left: 0.267em; padding-right: 0.267em;">−−n
d16bit to [0, 1] range:
@@ -635,11 +639,10 @@
d
class="mjx-mo" style=
"padding-left: 0.267em; padding-right: 0.267em;">−−n
+ Use cases like rendering 3D point cloud, background removal,
+ pattern recognition, motion recognition are a good fit to be, at
+ least partially, implemented on GPU. This section explains which
+ APIs to use for some of the listed use cases; the concrete usage
+ examples are provided in the
+ Examples section.
+
A video element whose source is a MediaStream object
containing a depth stream track may be uploaded to a WebGL
- texture of format
For each pixel of this WebGL texture, the R component represents
- the lower 8 bit value of 16 bit depth value, the G component
- represents the upper 8 bit value of 16 bit depth value and the
- value in B component is not defined.
+ normalized 16-bit value following the formula:
+ dfloat=d16bit65535.0
+
+ We list here some of the possible approaches:In addition to , there
+ are two ways available for asynchronous access:
+
+ Performance of synchronous readPixels from float example in
+ the current implementation suffice for some of the use cases. The
+ reason is that there is no rendering to the float texture bound to
+ named framebuffer.
+ This example extends upload to float texture example.
+
+ Use
+
+ Use cases like rendering 3D point cloud, background removal,
+ pattern recognition, motion recognition are a good fit to be, at
+ least partially, implemented on GPU. This section explains which
+ APIs to use for some of the listed use cases; the concrete usage
+ examples are provided in the
+ Examples section.
+
A video element whose source is a MediaStream object
containing a depth stream track may be uploaded to a WebGL
- texture of format
For each pixel of this WebGL texture, the R component represents
- the lower 8 bit value of 16 bit depth value, the G component
- represents the upper 8 bit value of 16 bit depth value and the
- value in B component is not defined.
+ normalized 16-bit value following the formula: `d_(float) =
+ d_(16bit) / 65535.0`
+
+ We list here some of the possible approaches:In addition to , there
+ are two ways available for asynchronous access:
+
+ Performance of synchronous readPixels from float example in
+ the current implementation suffice for some of the use cases. The
+ reason is that there is no rendering to the float texture bound to
+ named framebuffer.
+ This example extends upload to float texture example.
+
+ Use
+
d:
+ Upload video frame to WebGL texture
+
RGB
and type
- UNSIGNED_BYTE
. [[WEBGL]]
+ texture of format RGBA
or RED
and type
+ FLOAT
. See also the specification [[WEBGL]] and the
+ upload to float texture example code.
+ Read the data from WebGL texture
+
+
+
+
+
);
- WebGL Fragment Shader based post-processing
+ WebGL2: upload to float texture
-// This code sets up a video element from a depth stream, uploads it to a WebGL
-// texture, and samples that texture in the fragment shader, reconstructing the
-// 16-bit depth values from the red and green channels.
+// This code sets up a video element from a depth stream, uploads it to a WebGL2
+// float texture holding the full precision data as the 16-bit depth map.
navigator.mediaDevices.getUserMedia({
video: {videoKind: {exact: "depth"}}
}).then(function (stream) {
@@ -1256,31 +1344,71 @@
+
// handle gUM error here
});
+// ... initialize WebGL2 context when application starts.
+var gl = canvas.getContext("webgl2");
+// Activate the extension to use single component R32F texture format.
+gl.getExtension('EXT_color_buffer_float');
+
// ... later, in the rendering loop ...
+gl.bindTexture(gl.TEXTURE_2D, depthTexture);
gl.texImage2D(
gl.TEXTURE_2D,
0,
- gl.RGB,
- gl.RGB,
- gl.UNSIGNED_BYTE,
- depthVideo
-);
+ gl.R32F,
+ gl.RED,
+ gl.FLOAT,
+ depthVideo);
-<script id="fragment-shader" type="x-shader/x-fragment">
- varying vec2 v_texCoord;
- // u_tex points to the texture unit containing the depth texture.
- uniform sampler2D u_tex;
- uniform float far;
- uniform float near;
- void main() {
- vec4 floatColor = texture2D(u_tex, v_texCoord);
- float dn = floatColor.r;
- float depth = 0.;
- depth = far * near / ( far - dn * ( far - near));
- // ...
- }
-</script>
+ WebGL2: readPixels from float texture
+
+
+// This code sets up a named framebuffer, attach the texture we upload the depth
+// video to as color attachment and, in rendering time, reads the texture
+// content to Float32Array.
+
+// ... initialize framebuffer for reading back the texture...
+var framebuffer = gl.createFramebuffer();
+gl.bindFramebuffer(gl.FRAMEBUFFER, framebuffer);
+gl.framebufferTexture2D(
+ gl.FRAMEBUFFER,
+ gl.COLOR_ATTACHMENT0,
+ gl.TEXTURE_2D,
+ depthTexture,
+ 0);
+
+// ... later, in the rendering loop ...
+gl.bindTexture(gl.TEXTURE_2D, depthTexture);
+gl.texImage2D(
+ gl.TEXTURE_2D,
+ 0,
+ gl.R32F,
+ gl.RED,
+ gl.FLOAT,
+ depthVideo);
+
+var buffer = new Float32Array(depthVideo.videoWidth * depthVideo.videoHeight);
+gl.readPixels(
+ 0,
+ 0,
+ depthVideo.videoWidth,
+ depthVideo.videoHeight,
+ gl.RED,
+ gl.FLOAT,
+ buffer);
+
+
+ gl.getParameter(gl.IMPLEMENTATION_COLOR_READ_FORMAT);
to
+ check whether readPixels to gl.RED or gl.RGBA float is supported.
+
diff --git a/index.src.html b/index.src.html
index 3b2c5a1..ea6425b 100644
--- a/index.src.html
+++ b/index.src.html
@@ -879,17 +879,56 @@
+ Upload video frame to WebGL texture
+
RGB
and type
- UNSIGNED_BYTE
. [[WEBGL]]
+ texture of format RGBA
or RED
and type
+ FLOAT
. See also the specification [[WEBGL]] and the
+ upload to float texture example code.
+ Read the data from WebGL texture
+
+
+
+
+
);
- WebGL Fragment Shader based post-processing
+ WebGL2: upload to float texture
-// This code sets up a video element from a depth stream, uploads it to a WebGL
-// texture, and samples that texture in the fragment shader, reconstructing the
-// 16-bit depth values from the red and green channels.
+// This code sets up a video element from a depth stream, uploads it to a WebGL2
+// float texture holding the full precision data as the 16-bit depth map.
navigator.mediaDevices.getUserMedia({
video: {videoKind: {exact: "depth"}}
}).then(function (stream) {
@@ -942,31 +980,71 @@
+
// handle gUM error here
});
+// ... initialize WebGL2 context when application starts.
+var gl = canvas.getContext("webgl2");
+// Activate the extension to use single component R32F texture format.
+gl.getExtension('EXT_color_buffer_float');
+
// ... later, in the rendering loop ...
+gl.bindTexture(gl.TEXTURE_2D, depthTexture);
gl.texImage2D(
gl.TEXTURE_2D,
0,
- gl.RGB,
- gl.RGB,
- gl.UNSIGNED_BYTE,
- depthVideo
-);
+ gl.R32F,
+ gl.RED,
+ gl.FLOAT,
+ depthVideo);
-<script id="fragment-shader" type="x-shader/x-fragment">
- varying vec2 v_texCoord;
- // u_tex points to the texture unit containing the depth texture.
- uniform sampler2D u_tex;
- uniform float far;
- uniform float near;
- void main() {
- vec4 floatColor = texture2D(u_tex, v_texCoord);
- float dn = floatColor.r;
- float depth = 0.;
- depth = far * near / ( far - dn * ( far - near));
- // ...
- }
-</script>
+ WebGL2: readPixels from float texture
+
+
+// This code sets up a named framebuffer, attach the texture we upload the depth
+// video to as color attachment and, in rendering time, reads the texture
+// content to Float32Array.
+
+// ... initialize framebuffer for reading back the texture...
+var framebuffer = gl.createFramebuffer();
+gl.bindFramebuffer(gl.FRAMEBUFFER, framebuffer);
+gl.framebufferTexture2D(
+ gl.FRAMEBUFFER,
+ gl.COLOR_ATTACHMENT0,
+ gl.TEXTURE_2D,
+ depthTexture,
+ 0);
+
+// ... later, in the rendering loop ...
+gl.bindTexture(gl.TEXTURE_2D, depthTexture);
+gl.texImage2D(
+ gl.TEXTURE_2D,
+ 0,
+ gl.R32F,
+ gl.RED,
+ gl.FLOAT,
+ depthVideo);
+
+var buffer = new Float32Array(depthVideo.videoWidth * depthVideo.videoHeight);
+gl.readPixels(
+ 0,
+ 0,
+ depthVideo.videoWidth,
+ depthVideo.videoHeight,
+ gl.RED,
+ gl.FLOAT,
+ buffer);
+
+
+ gl.getParameter(gl.IMPLEMENTATION_COLOR_READ_FORMAT);
to
+ check whether readPixels to gl.RED or gl.RGBA float is supported.
+