From a49f675643dac9d63861a0ab3b09d6af26d1212f Mon Sep 17 00:00:00 2001 From: gaikwadrahul8 Date: Sat, 27 Apr 2024 19:14:59 +0530 Subject: [PATCH] Address tfjs-models typos in documentation strings --- body-pix/README_Archive.md | 2 +- body-pix/src/setup_test.ts | 2 +- body-segmentation/demos/shared/params.js | 2 +- body-segmentation/demos/shared/util.js | 2 +- body-segmentation/src/body_pix/README.md | 2 +- body-segmentation/src/body_pix/impl/setup_test.ts | 2 +- coco-ssd/src/index.ts | 4 ++-- deeplab/README.md | 4 ++-- deeplab/src/index.ts | 4 ++-- depth-estimation/demos/depth_map/js/gl-class.js | 2 +- depth-estimation/demos/relighting/js/gl-class.js | 2 +- depth-estimation/demos/relighting/js/gl-shaders.js | 4 ++-- depth-estimation/src/ar_portrait_depth/estimator.ts | 2 +- 13 files changed, 17 insertions(+), 17 deletions(-) diff --git a/body-pix/README_Archive.md b/body-pix/README_Archive.md index ca0be125da..29b71f1193 100644 --- a/body-pix/README_Archive.md +++ b/body-pix/README_Archive.md @@ -141,7 +141,7 @@ const net = await bodyPix.load({ - `2`. 2 bytes per float. Leads to slightly lower accuracy and 2x model size reduction. - `1`. 1 byte per float. Leads to lower accuracy and 4x model size reduction. - The following table contains the corresponding BodyPix 2.0 model checkpoint sizes (widthout gzip) when using different quantization bytes: + The following table contains the corresponding BodyPix 2.0 model checkpoint sizes (without gzip) when using different quantization bytes: | Architecture | quantBytes=4 | quantBytes=2 | quantBytes=1 | | ------------------ |:------------:|:------------:|:------------:| diff --git a/body-pix/src/setup_test.ts b/body-pix/src/setup_test.ts index d345e8884b..6a87d74f7f 100644 --- a/body-pix/src/setup_test.ts +++ b/body-pix/src/setup_test.ts @@ -27,7 +27,7 @@ import {setTestEnvs} from '@tensorflow/tfjs-core/dist/jasmine_util'; // Increase test timeout since we are fetching the model files from GCS. jasmine.DEFAULT_TIMEOUT_INTERVAL = 20000; -// Run browser tests againts both the cpu and webgl backends. +// Run browser tests against both the cpu and webgl backends. setTestEnvs([ // WebGL. { diff --git a/body-segmentation/demos/shared/params.js b/body-segmentation/demos/shared/params.js index 9388fa939c..20832fd819 100644 --- a/body-segmentation/demos/shared/params.js +++ b/body-segmentation/demos/shared/params.js @@ -57,7 +57,7 @@ export const BLAZE_POSE_CONFIG = { visualization: 'binaryMask' }; /** - * This map descripes tunable flags and theior corresponding types. + * This map describes tunable flags and theior corresponding types. * * The flags (keys) in the map satisfy the following two conditions: * - Is tunable. For example, `IS_BROWSER` and `IS_CHROME` is not tunable, diff --git a/body-segmentation/demos/shared/util.js b/body-segmentation/demos/shared/util.js index 3319e2d992..d94baca1d4 100644 --- a/body-segmentation/demos/shared/util.js +++ b/body-segmentation/demos/shared/util.js @@ -37,7 +37,7 @@ export function isMobile() { async function resetBackend(backendName) { const ENGINE = tf.engine(); if (!(backendName in ENGINE.registryFactory)) { - throw new Error(`${backendName} backend is not registed.`); + throw new Error(`${backendName} backend is not registered.`); } if (backendName in ENGINE.registry) { diff --git a/body-segmentation/src/body_pix/README.md b/body-segmentation/src/body_pix/README.md index 86cb0ded7f..73bb3d55a1 100644 --- a/body-segmentation/src/body_pix/README.md +++ b/body-segmentation/src/body_pix/README.md @@ -74,7 +74,7 @@ Pass in `bodySegmentation.SupportedModels.BodyPix` from the - `2`. 2 bytes per float. Leads to slightly lower accuracy and 2x model size reduction. - `1`. 1 byte per float. Leads to lower accuracy and 4x model size reduction. - The following table contains the corresponding BodyPix 2.0 model checkpoint sizes (widthout gzip) when using different quantization bytes: + The following table contains the corresponding BodyPix 2.0 model checkpoint sizes (without gzip) when using different quantization bytes: | Architecture | quantBytes=4 | quantBytes=2 | quantBytes=1 | | ------------------ |:------------:|:------------:|:------------:| diff --git a/body-segmentation/src/body_pix/impl/setup_test.ts b/body-segmentation/src/body_pix/impl/setup_test.ts index 80b2a4d598..420c841b2e 100644 --- a/body-segmentation/src/body_pix/impl/setup_test.ts +++ b/body-segmentation/src/body_pix/impl/setup_test.ts @@ -27,7 +27,7 @@ import { setTestEnvs } from '@tensorflow/tfjs-core/dist/jasmine_util'; // Increase test timeout since we are fetching the model files from GCS. jasmine.DEFAULT_TIMEOUT_INTERVAL = 20000; -// Run browser tests againts both the cpu and webgl backends. +// Run browser tests against both the cpu and webgl backends. setTestEnvs([ // WebGL. { diff --git a/coco-ssd/src/index.ts b/coco-ssd/src/index.ts index 76ba66c3f0..607787e92a 100644 --- a/coco-ssd/src/index.ts +++ b/coco-ssd/src/index.ts @@ -39,7 +39,7 @@ export interface DetectedObject { */ export interface ModelConfig { /** - * It determines wich object detection architecture to load. The supported + * It determines which object detection architecture to load. The supported * architectures are: 'mobilenet_v1', 'mobilenet_v2' and 'lite_mobilenet_v2'. * It is default to 'lite_mobilenet_v2'. */ @@ -212,7 +212,7 @@ export class ObjectDetection { /** * Detect objects for an image returning a list of bounding boxes with - * assocated class and score. + * associated class and score. * * @param img The image to detect objects from. Can be a tensor or a DOM * element image, video, or canvas. diff --git a/deeplab/README.md b/deeplab/README.md index 9c2ba0900b..1c5890d677 100644 --- a/deeplab/README.md +++ b/deeplab/README.md @@ -29,7 +29,7 @@ loadModel() console.log(`The predicted classes are ${JSON.stringify(legend)}`)); ``` -By default, calling `load` initalizes the PASCAL variant of the model quantized to 2 bytes. +By default, calling `load` initializes the PASCAL variant of the model quantized to 2 bytes. If you would rather load custom weights, you can pass the URL in the config instead: @@ -136,7 +136,7 @@ const classify = async (image) => { ### Producing a Semantic Segmentation Map -To segment an arbitrary image and generate a two-dimensional tensor with class labels assigned to each cell of the grid overlayed on the image (with the maximum number of cells on the side fixed to 513), use the `predict` method of the `SemanticSegmentation` object. +To segment an arbitrary image and generate a two-dimensional tensor with class labels assigned to each cell of the grid overlaid on the image (with the maximum number of cells on the side fixed to 513), use the `predict` method of the `SemanticSegmentation` object. #### `model.predict(image)` input diff --git a/deeplab/src/index.ts b/deeplab/src/index.ts index e44926ae7b..d91878f492 100644 --- a/deeplab/src/index.ts +++ b/deeplab/src/index.ts @@ -112,7 +112,7 @@ export class SemanticSegmentation { /** * Segments an arbitrary image and generates a two-dimensional tensor with - * class labels assigned to each cell of the grid overlayed on the image ( the + * class labels assigned to each cell of the grid overlaid on the image ( the * maximum number of cells on the side is fixed to 513). * * @param input :: @@ -133,7 +133,7 @@ export class SemanticSegmentation { /** * Segments an arbitrary image and generates a two-dimensional tensor with - * class labels assigned to each cell of the grid overlayed on the image ( the + * class labels assigned to each cell of the grid overlaid on the image ( the * maximum number of cells on the side is fixed to 513). * * @param image :: `ImageData | HTMLImageElement | HTMLCanvasElement | diff --git a/depth-estimation/demos/depth_map/js/gl-class.js b/depth-estimation/demos/depth_map/js/gl-class.js index 058c46417c..e403556902 100644 --- a/depth-estimation/demos/depth_map/js/gl-class.js +++ b/depth-estimation/demos/depth_map/js/gl-class.js @@ -29,7 +29,7 @@ class GlTextureImpl { } } -// A wrapper class for WebGL texture and its associted framebuffer and utility +// A wrapper class for WebGL texture and its associated framebuffer and utility // functions. class GlTextureFramebuffer extends GlTextureImpl { constructor(gl, framebuffer, texture, width, height) { diff --git a/depth-estimation/demos/relighting/js/gl-class.js b/depth-estimation/demos/relighting/js/gl-class.js index c9a35f5531..a12738ede3 100644 --- a/depth-estimation/demos/relighting/js/gl-class.js +++ b/depth-estimation/demos/relighting/js/gl-class.js @@ -29,7 +29,7 @@ class GlTextureImpl { } } -// A wrapper class for WebGL texture and its associted framebuffer and utility +// A wrapper class for WebGL texture and its associated framebuffer and utility // functions. class GlTextureFramebuffer extends GlTextureImpl { constructor(gl, framebuffer, texture, width, height) { diff --git a/depth-estimation/demos/relighting/js/gl-shaders.js b/depth-estimation/demos/relighting/js/gl-shaders.js index fa5597da42..83b4c482d8 100644 --- a/depth-estimation/demos/relighting/js/gl-shaders.js +++ b/depth-estimation/demos/relighting/js/gl-shaders.js @@ -47,7 +47,7 @@ out vec4 out_color; #define GetDepth(uv) (texture(uDepth, uv).r) #define GetColor(uv) (texture(uColor, uv).rgb) -// Computes the aspect ratio for portait and landscape modes. +// Computes the aspect ratio for portrait and landscape modes. vec2 CalculateAspectRatio(in vec2 size) { return pow(size.yy / size, vec2(step(size.x, size.y) * 2.0 - 1.0)); } @@ -177,7 +177,7 @@ vec3 RenderMotionLights(in vec2 uv) { col = smoothstep(0.0, 0.7, col + 0.05); col = pow(col, vec3(1.0 / 1.8)); - // Perceptual light radius propotional to percentage in the screen space. + // Perceptual light radius proportional to percentage in the screen space. float light_radius = 2.0 * atan(kLightRadius, 2.0 * (1.0 - center.z)); float l = distance(center.xy, normalized_uv); diff --git a/depth-estimation/src/ar_portrait_depth/estimator.ts b/depth-estimation/src/ar_portrait_depth/estimator.ts index 37dc8ef95b..a3a3e3f62f 100644 --- a/depth-estimation/src/ar_portrait_depth/estimator.ts +++ b/depth-estimation/src/ar_portrait_depth/estimator.ts @@ -128,7 +128,7 @@ class ARPortraitDepthEstimator implements DepthEstimator { // Shape after expansion is [1, height, width, 3]. const batchInput = tf.expandDims(imageResized); - // Depth prediction (ouput shape is [1, height, width, 1]). + // Depth prediction (output shape is [1, height, width, 1]). const depth4D = this.estimatorModel.predict(batchInput) as tf.Tensor4D; // Normalize to user requirements.