diff --git a/.github/workflows/ghpages.yml b/.github/workflows/ghpages.yml
new file mode 100644
index 0000000..3780672
--- /dev/null
+++ b/.github/workflows/ghpages.yml
@@ -0,0 +1,21 @@
+name: Build and Deploy
+on:
+ push:
+ branches:
+ - main
+jobs:
+ build-and-publish-live-demo:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+
+ - name: Install and Build
+ run: |
+ npm install
+ npm run build
+ - name: Deploy
+ uses: JamesIves/github-pages-deploy-action@v4
+ with:
+ branch: demo # The branch the action should deploy to.
+ folder: dist # The folder the action should deploy.
\ No newline at end of file
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..a547bf3
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,24 @@
+# Logs
+logs
+*.log
+npm-debug.log*
+yarn-debug.log*
+yarn-error.log*
+pnpm-debug.log*
+lerna-debug.log*
+
+node_modules
+dist
+dist-ssr
+*.local
+
+# Editor directories and files
+.vscode/*
+!.vscode/extensions.json
+.idea
+.DS_Store
+*.suo
+*.ntvs*
+*.njsproj
+*.sln
+*.sw?
diff --git a/README.md b/README.md
index bce0aba..2d14777 100644
--- a/README.md
+++ b/README.md
@@ -23,7 +23,7 @@
- We make the implementation of Brainchop freely available, releasing its pure javascript code as open-source. The user interface (UI) provides a web-based end-to-end solution for 3D MRI segmentation. Papaya viewer is integrated with the tool for MRI visualization. In version 1.3.0, Three.js is used for MRI 3D rendering. For more information about Brainchop, please refer to this detailed Wiki and this Blog.
+ We make the implementation of Brainchop freely available, releasing its pure javascript code as open-source. The user interface (UI) provides a web-based end-to-end solution for 3D MRI segmentation. NiiVue viewer is integrated with the tool for MRI visualization. For more information about Brainchop, please refer to this detailed Wiki and this Blog.
For questions or to share ideas, please refer to our Discussions board.
@@ -60,6 +60,24 @@ To see Brainchop in action please click [here](https://neuroneural.github.io/br
+## Hot Reloadable Development
+
+Develop brainchop interactively: when you save modified source files the web page is reloaded.
+
+```
+git clone git clone git@github.com:neuroneural/brainchop.git
+cd niivue-brainchop
+npm install
+npm run dev
+
+```
+
+## To build
+
+```
+npm run build
+```
+
## Updates
diff --git a/brainchop-diagnostics.js b/brainchop-diagnostics.js
new file mode 100644
index 0000000..1f89e1a
--- /dev/null
+++ b/brainchop-diagnostics.js
@@ -0,0 +1,200 @@
+export { isChrome, localSystemDetails }
+
+async function detectBrowser() {
+ if (navigator.userAgent.indexOf('OPR/') > -1) {
+ return 'Opera'
+ } else if (navigator.userAgent.indexOf('Edg/') > -1) {
+ return 'Edge'
+ } else if (navigator.userAgent.indexOf('Falkon/') > -1) {
+ return 'Falkon'
+ } else if (navigator.userAgent.indexOf('Chrome/') > -1) {
+ return 'Chrome'
+ } else if (navigator.userAgent.indexOf('Firefox/') > -1) {
+ return 'Firefox'
+ } else if (navigator.userAgent.indexOf('Safari/') > -1) {
+ return 'Safari'
+ } else if (navigator.userAgent.indexOf('MSIE/') > -1 || navigator.userAgent.indexOf('rv:') > -1) {
+ return 'IExplorer'
+ } else {
+ return 'Unknown'
+ }
+}
+
+async function detectBrowserVersion() {
+ if (navigator.userAgent.indexOf('OPR/') > -1) {
+ return parseInt(navigator.userAgent.split('OPR/')[1])
+ } else if (navigator.userAgent.indexOf('Edg/') > -1) {
+ return parseInt(navigator.userAgent.split('Edg/')[1])
+ } else if (navigator.userAgent.indexOf('Falkon/') > -1) {
+ return parseInt(navigator.userAgent.split('Falkon/')[1])
+ } else if (navigator.userAgent.indexOf('Chrome/') > -1) {
+ return parseInt(navigator.userAgent.split('Chrome/')[1])
+ } else if (navigator.userAgent.indexOf('Firefox/') > -1) {
+ return parseInt(navigator.userAgent.split('Firefox/')[1])
+ } else if (navigator.userAgent.indexOf('Safari/') > -1) {
+ return parseInt(navigator.userAgent.split('Safari/')[1])
+ } else if (navigator.userAgent.indexOf('MSIE/') > -1 || navigator.userAgent.indexOf('rv:') > -1) {
+ return parseInt(navigator.userAgent.split('MSIE/')[1])
+ } else {
+ return Infinity
+ }
+}
+
+async function detectOperatingSys() {
+ if (navigator.userAgent.indexOf('Win') > -1) {
+ return 'Windows'
+ } else if (navigator.userAgent.indexOf('Mac') > -1) {
+ return 'MacOS'
+ } else if (navigator.userAgent.indexOf('Linux') > -1) {
+ return 'Linux'
+ } else if (navigator.userAgent.indexOf('UNIX') > -1) {
+ return 'UNIX'
+ } else {
+ return 'Unknown'
+ }
+}
+
+async function checkWebGl2(gl) {
+ // const gl = document.createElement('canvas').getContext('webgl2')
+ if (!gl) {
+ if (typeof WebGL2RenderingContext !== 'undefined') {
+ console.log('WebGL2 may be disabled. Please try updating video card drivers')
+ } else {
+ console.log('WebGL2 is not supported')
+ }
+ return false
+ } else {
+ console.log('WebGl2 is enabled')
+ return true
+ }
+}
+
+async function detectGPUVendor(gl) {
+ // const gl = document.createElement('canvas').getContext('webgl')
+ let debugInfo
+ if (gl) {
+ debugInfo = gl.getExtension('WEBGL_debug_renderer_info')
+ if (debugInfo) {
+ const result = gl.getParameter(debugInfo.UNMASKED_VENDOR_WEBGL)
+ // --e.g. : NVIDIA Corporation
+ if (result.indexOf('(') > -1 && result.indexOf(')') > -1) {
+ return result.substring(result.indexOf('(') + 1, result.indexOf(')'))
+ }
+ return result
+ }
+ }
+ return null
+}
+
+async function detectGPUVendor_v0(gl) {
+ // const gl = document.createElement('canvas').getContext('webgl')
+ if (gl) {
+ const debugInfo = gl.getExtension('WEBGL_debug_renderer_info')
+ return debugInfo ? gl.getParameter(debugInfo.UNMASKED_VENDOR_WEBGL) : null
+ } else {
+ return null
+ }
+}
+
+async function detectGPUCardType_v0(gl) {
+ if (gl) {
+ if (detectBrowser() === 'Firefox') {
+ // -- return e.g: "GeForce GTX 980/PCIe/SSE2"
+ return gl.getParameter(gl.RENDERER)
+ }
+
+ const debugInfo = gl.getExtension('WEBGL_debug_renderer_info')
+ return debugInfo ? gl.getParameter(debugInfo.UNMASKED_RENDERER_WEBGL) : null
+ } else {
+ return null
+ }
+}
+
+async function detectGPUCardType(gl) {
+ let debugInfo
+
+ if (gl) {
+ if (detectBrowser() === 'Firefox') {
+ // -- return e.g: "GeForce GTX 980/PCIe/SSE2"
+ return gl.getParameter(gl.RENDERER)
+ }
+
+ debugInfo = gl.getExtension('WEBGL_debug_renderer_info')
+
+ if (debugInfo) {
+ let result = gl.getParameter(debugInfo.UNMASKED_RENDERER_WEBGL)
+ // --e.g. : ANGLE (NVIDIA Corporation, GeForce GTX 1050 Ti/PCIe/SSE2, OpenGL 4.5.0 NVIDIA 390.144) as with Chrome
+ // Or: GeForce GTX 1050 Ti/PCIe/SSE2 as with fireFox
+
+ if (result.indexOf('(') > -1 && result.indexOf(')') > -1 && result.indexOf('(R)') === -1) {
+ result = result.substring(result.indexOf('(') + 1, result.indexOf(')'))
+
+ if (result.split(',').length === 3) {
+ return result.split(',')[1].trim()
+ }
+ }
+
+ return result
+ }
+ }
+ return null
+}
+
+async function getCPUNumCores() {
+ return navigator.hardwareConcurrency
+}
+
+async function isChrome() {
+ return /Chrome/.test(navigator.userAgent) && /Google Inc/.test(navigator.vendor)
+}
+
+async function localSystemDetails(statData, gl = null) {
+ // -- Timing data to collect
+ const today = new Date()
+ if (statData.isModelFullVol) {
+ statData.Brainchop_Ver = 'FullVolume'
+ } else {
+ statData.Brainchop_Ver = 'SubVolumes'
+ }
+
+ /* let geoData = getBrowserLocationInfo()
+ if(geoData) {
+ statData["Country"] = geoData["Country"]
+ statData["State"] = geoData["Region"]
+ statData["City"] = geoData["City"]
+ } else {
+ statData["Country"] = ""
+ statData["State"] = ""
+ statData["City"] = ""
+ } */
+ statData.Total_t = (Date.now() - statData.startTime) / 1000.0
+ delete statData.startTime
+ statData.Date = parseInt(today.getMonth() + 1) + '/' + today.getDate() + '/' + today.getFullYear()
+ statData.Browser = await detectBrowser()
+ statData.Browser_Ver = await detectBrowserVersion()
+ statData.OS = await detectOperatingSys()
+ statData.WebGL2 = await checkWebGl2(gl)
+ statData.GPU_Vendor = await detectGPUVendor(gl)
+ statData.GPU_Card = await detectGPUCardType(gl)
+ statData.GPU_Vendor_Full = await detectGPUVendor_v0(gl)
+ statData.GPU_Card_Full = await detectGPUCardType_v0(gl)
+ statData.CPU_Cores = await getCPUNumCores()
+ statData.Which_Brainchop = 'latest'
+ if (await isChrome()) {
+ statData.Heap_Size_MB = window.performance.memory.totalJSHeapSize / (1024 * 1024).toFixed(2)
+ statData.Used_Heap_MB = window.performance.memory.usedJSHeapSize / (1024 * 1024).toFixed(2)
+ statData.Heap_Limit_MB = window.performance.memory.jsHeapSizeLimit / (1024 * 1024).toFixed(2)
+ }
+ if (gl) {
+ console.log('MAX_TEXTURE_SIZE :', gl.getParameter(gl.MAX_TEXTURE_SIZE))
+ console.log('MAX_RENDERBUFFER_SIZE :', gl.getParameter(gl.MAX_RENDERBUFFER_SIZE))
+ // -- check to see if machine has two graphics card: one is the builtin e.g. Intel Iris Pro, the other is NVIDIA GeForce GT 750M.
+ // -- check browser use which one, if debugInfo is null then installed GPU is not used
+ const debugInfo = gl.getExtension('WEBGL_debug_renderer_info')
+ console.log('VENDOR WEBGL:', gl.getParameter(debugInfo.UNMASKED_VENDOR_WEBGL))
+ statData.Texture_Size = gl.getParameter(gl.MAX_TEXTURE_SIZE) // --returns the maximum dimension the GPU can address
+ } else {
+ statData.Texture_Size = null
+ }
+ return statData
+}
diff --git a/brainchop-mainthread.js b/brainchop-mainthread.js
new file mode 100644
index 0000000..dcab2b4
--- /dev/null
+++ b/brainchop-mainthread.js
@@ -0,0 +1,1355 @@
+import * as tf from '@tensorflow/tfjs'
+import { inferenceModelsList } from './brainchop-parameters.js'
+import {
+ addZeroPaddingTo3dTensor,
+ applyMriThreshold,
+ binarizeVolumeDataTensor,
+ convByOutputChannelAndInputSlicing,
+ draw3dObjBoundingVolume,
+ firstLastNonZero3D,
+ generateBrainMask,
+ generateOutputSlicesV2,
+ getAllSlicesDataAsTF3D,
+ getModelNumLayers,
+ getModelNumParameters,
+ isModelChnlLast,
+ load_model,
+ minMaxNormalizeVolumeData,
+ quantileNormalizeVolumeData,
+ removeZeroPaddingFrom3dTensor,
+ resizeWithZeroPadding,
+ SequentialConvLayer
+} from './tensor-utils.js'
+
+async function inferenceFullVolumeSeqCovLayerPhase2(
+ opts,
+ modelEntry,
+ model,
+ slices_3d,
+ num_of_slices,
+ slice_height,
+ slice_width,
+ pipeline1_out,
+ callbackUI,
+ callbackImg,
+ statData,
+ niftiImage
+) {
+ // --Phase-2, After remove the skull try to allocate brain volume and make inferece
+
+ console.log(' ---- Start FullVolume Inference with Sequential Conv Layer for phase-II ---- ')
+ // console.log("BOB", callbackUI); console.log("UNCLE",callbackImg); return
+ const quantileNorm = modelEntry.enableQuantileNorm
+ if (quantileNorm) {
+ // Quantile normalize function needs specific models to be used
+ console.log('preModel Quantile normalization enabled')
+ slices_3d = await quantileNormalizeVolumeData(slices_3d)
+ } else {
+ // Min Max Nomalize MRI data to be from 0 to 1
+ console.log('preModel Min Max normalization enabled')
+ slices_3d = await minMaxNormalizeVolumeData(slices_3d)
+ }
+
+ let mask_3d
+
+ if (pipeline1_out == null) {
+ // preModel is null
+
+ // Check if thresholding the MRI to remove noisy voxels for better cropping is needed.
+ const autoThresholdValue = modelEntry.autoThreshold
+
+ if (autoThresholdValue > 0 && autoThresholdValue <= 1) {
+ // Filtered MRI from noisy voxel below autoThresholdValue
+ mask_3d = await applyMriThreshold(slices_3d, autoThresholdValue)
+ } else {
+ console.log('No valid crop threshold value')
+ // binarize original image
+ mask_3d = await slices_3d.greater([0]).asType('bool')
+ }
+ } else {
+ mask_3d = await pipeline1_out.greater([0]).asType('bool')
+ // -- pipeline1_out.dispose();
+ }
+ console.log(' mask_3d shape : ', mask_3d.shape)
+ const [row_min, row_max, col_min, col_max, depth_min, depth_max] = await firstLastNonZero3D(mask_3d)
+ mask_3d.dispose()
+ // -- Reference voxel that cropped volume started slice with it
+ const refVoxel = [row_min, col_min, depth_min]
+ // -- Starting form refVoxel, size of bounding volume
+ const boundVolSizeArr = [row_max - row_min + 1, col_max - col_min + 1, depth_max - depth_min + 1]
+
+ // -- Extract 3d object (e.g. brain)
+ const cropped_slices_3d = await slices_3d.slice(
+ [row_min, col_min, depth_min],
+ [row_max - row_min + 1, col_max - col_min + 1, depth_max - depth_min + 1]
+ )
+ slices_3d.dispose()
+
+ // -- Padding size add to cropped brain
+ const pad = modelEntry.cropPadding
+
+ // Create margin around the bounding volume
+ let cropped_slices_3d_w_pad = await addZeroPaddingTo3dTensor(cropped_slices_3d, [pad, pad], [pad, pad], [pad, pad])
+ console.log(' cropped slices_3d with padding shape: ', cropped_slices_3d_w_pad.shape)
+
+ cropped_slices_3d.dispose()
+
+ if (opts.drawBoundingVolume) {
+ let testVol = await removeZeroPaddingFrom3dTensor(cropped_slices_3d_w_pad, pad, pad, pad)
+ console.log(' outLabelVolume without padding shape : ', testVol.shape)
+
+ testVol = await resizeWithZeroPadding(testVol, num_of_slices, slice_height, slice_width, refVoxel, boundVolSizeArr)
+ console.log(' outLabelVolume final shape after resizing : ', testVol.shape)
+
+ draw3dObjBoundingVolume(tf.unstack(testVol), opts, modelEntry, callbackImg)
+ testVol.dispose()
+
+ return 0
+ }
+
+ statData.Brainchop_Ver = 'FullVolume'
+ // model.then(function (res) {
+ // console.log("--->>>>", opts.drawBoundingVolume); return
+ const res = await model
+ try {
+ let startTime = performance.now()
+ const inferenceStartTime = performance.now()
+ // maxLabelPredicted in whole volume of the brain
+ let maxLabelPredicted = 0
+ const transpose = modelEntry.enableTranspose
+ const delay = modelEntry.inferenceDelay
+ console.log('Inference delay :', delay)
+
+ if (transpose) {
+ cropped_slices_3d_w_pad = await cropped_slices_3d_w_pad.transpose()
+ console.log('Input transposed for pre-model')
+ } else {
+ console.log('Transpose not enabled for pre-model')
+ }
+
+ let i = 1
+ const layersLength = res.layers.length
+ console.log('res.layers.length ', layersLength)
+
+ const isChannelLast = isModelChnlLast(res)
+ const batchSize = opts.batchSize
+ const numOfChan = opts.numOfChan
+ let adjusted_input_shape
+ // -- Adjust model input shape
+ if (isChannelLast) {
+ res.layers[0].batchInputShape[1] = cropped_slices_3d_w_pad.shape[0]
+ res.layers[0].batchInputShape[2] = cropped_slices_3d_w_pad.shape[1]
+ res.layers[0].batchInputShape[3] = cropped_slices_3d_w_pad.shape[2]
+
+ adjusted_input_shape = [
+ batchSize,
+ res.layers[0].batchInputShape[1],
+ res.layers[0].batchInputShape[2],
+ res.layers[0].batchInputShape[3],
+ numOfChan
+ ]
+ } else {
+ res.layers[0].batchInputShape[2] = cropped_slices_3d_w_pad.shape[0]
+ res.layers[0].batchInputShape[3] = cropped_slices_3d_w_pad.shape[1]
+ res.layers[0].batchInputShape[4] = cropped_slices_3d_w_pad.shape[2]
+
+ adjusted_input_shape = [
+ batchSize,
+ numOfChan,
+ res.layers[0].batchInputShape[2],
+ res.layers[0].batchInputShape[3],
+ res.layers[0].batchInputShape[4]
+ ]
+ }
+
+ console.log(' Model batch input shape : ', res.layers[0].batchInputShape)
+ // -- batchInputShape {Array} input_shape - e.g. [?, D, H, W, Ch] or [?, Ch, D, H, W]
+
+ statData.Input_Shape = JSON.stringify(res.layers[0].batchInputShape)
+ statData.Output_Shape = JSON.stringify(res.output.shape)
+ statData.Channel_Last = isChannelLast
+ statData.Model_Param = await getModelNumParameters(res)
+ statData.Model_Layers = await getModelNumLayers(res)
+ statData.Model = modelEntry.modelName
+ statData.Seq_Conv = modelEntry.enableSeqConv
+ statData.Extra_Info = null
+
+ // Determine the number of output channels in the last layer of the model
+ // e.g. 3, 50, 104
+ const outputLayer = res.layers[res.layers.length - 1]
+ console.log('Output Layer : ', outputLayer)
+
+ const expected_Num_labels = isChannelLast
+ ? outputLayer.outputShape[outputLayer.outputShape.length - 1]
+ : outputLayer.outputShape[1]
+ console.log('Num of output channels : ', expected_Num_labels)
+
+ const curTensor = []
+ curTensor[0] = await cropped_slices_3d_w_pad.reshape(adjusted_input_shape)
+ const timer = window.setInterval(async function () {
+ try {
+ if (res.layers[i].activation.getClassName() !== 'linear') {
+ curTensor[i] = await res.layers[i].apply(curTensor[i - 1])
+ } else {
+ curTensor[i] = await convByOutputChannelAndInputSlicing(
+ curTensor[i - 1],
+ res.layers[i].getWeights()[0],
+ res.layers[i].getWeights()[1],
+ res.layers[i].strides,
+ res.layers[i].padding,
+ res.layers[i].dilationRate,
+ 3
+ ) // important for memory use
+ }
+ tf.dispose(curTensor[i - 1])
+ } catch (err) {
+ const errTxt = 'Your graphics card (e.g. Intel) may not be compatible with WebGL. ' + err.message
+ callbackUI(errTxt, -1, errTxt)
+
+ window.clearInterval(timer)
+ tf.engine().endScope()
+ tf.engine().disposeVariables()
+
+ statData.Inference_t = Infinity
+ statData.Postprocess_t = Infinity
+ statData.Status = 'Fail'
+ statData.Error_Type = err.message
+ statData.Extra_Err_Info = 'Failed while model layer ' + i + ' apply'
+
+ callbackUI('', -1, '', statData)
+
+ return 0
+ }
+
+ console.log('layer output Tensor shape : ', curTensor[i].shape)
+ console.log('layer count params ', res.layers[i].countParams())
+
+ res.layers[i].dispose()
+ curTensor[i - 1].dispose()
+
+ callbackUI('Layer ' + i.toString(), (i + 1) / layersLength)
+ if (tf.memory().unreliable) {
+ const unreliableReasons = 'unreliable reasons :' + tf.memory().reasons
+ callbackUI(unreliableReasons, NaN, unreliableReasons)
+ }
+ if (i === layersLength - 2) {
+ // Stop before the last layer or classification layer.
+
+ window.clearInterval(timer)
+ // // Create an instance of SequentialConvLayer
+ // The second parameter is important for memory,
+ // the larger it is, the more memory it uses
+ // it was 8, but I set it to 3, got a different error
+ // let seqConvLayer = new SequentialConvLayer(res, 10, isChannelLast);
+ const seqConvLayer = await new SequentialConvLayer(res, 10, isChannelLast, callbackUI, false)
+ // Apply the last output tensor to the seq. instance
+ const outputTensor = await seqConvLayer.apply(curTensor[i])
+ callbackUI('seqConvLayer Done')
+ // -- document.getElementById("progressBarChild").style.width = 0 + "%";;
+
+ // Dispose the previous layer input tensor
+ tf.dispose(curTensor[i])
+ // delete the used class
+ // ? delete seqConvLayer;
+
+ // You can now use 'outputTensor' as needed
+ console.log(' Output tensor', outputTensor)
+ console.log(' Output tensor shape : ', outputTensor.shape)
+ // Array(3) [ 256, 256, 256 ]
+
+ if (outputTensor.shape.length !== 3) {
+ const msg = 'Output tensor shape should be 3 dims but it is ' + outputTensor.shape.length
+ callbackUI(msg, -1, msg)
+ }
+
+ const Inference_t = ((performance.now() - startTime) / 1000).toFixed(4)
+
+ console.log(' find array max ')
+ const curBatchMaxLabel = await outputTensor.max().dataSync()[0]
+ if (maxLabelPredicted < curBatchMaxLabel) {
+ maxLabelPredicted = curBatchMaxLabel
+ }
+
+ const numSegClasses = maxLabelPredicted + 1
+ console.log('Predicted num of segmentation classes', numSegClasses)
+ statData.Actual_Labels = numSegClasses
+ statData.Expect_Labels = expected_Num_labels
+ statData.NumLabels_Match = numSegClasses === expected_Num_labels
+ if (numSegClasses !== expected_Num_labels) {
+ const msg = 'expected ' + expected_Num_labels + ' labels, but the predicted are ' + numSegClasses
+ callbackUI(msg, -1, msg)
+ }
+
+ // -- Transpose back to original unpadded size
+ let outLabelVolume = outputTensor.reshape([
+ cropped_slices_3d_w_pad.shape[0],
+ cropped_slices_3d_w_pad.shape[1],
+ cropped_slices_3d_w_pad.shape[2]
+ ])
+ tf.dispose(outputTensor)
+
+ // Transpose MRI data to be match pytorch/keras input output
+ if (transpose) {
+ console.log('outLabelVolume transposed')
+ outLabelVolume = outLabelVolume.transpose()
+ }
+
+ outLabelVolume = await removeZeroPaddingFrom3dTensor(outLabelVolume, pad, pad, pad)
+ console.log(' outLabelVolume without padding shape : ', outLabelVolume.shape)
+ outLabelVolume = await resizeWithZeroPadding(
+ outLabelVolume,
+ num_of_slices,
+ slice_height,
+ slice_width,
+ refVoxel,
+ boundVolSizeArr
+ )
+ console.log(' outLabelVolume final shape after resizing : ', outLabelVolume.shape)
+
+ // let filterOutWithPreMask = inferenceModelsList[$$("selectModel").getValue() - 1]["filterOutWithPreMask"];
+ const filterOutWithPreMask = modelEntry.filterOutWithPreMask
+ // To clean the skull area wrongly segmented inphase-2.
+ if (pipeline1_out != null && opts.isBrainCropMaskBased && filterOutWithPreMask) {
+ const bin = await binarizeVolumeDataTensor(pipeline1_out)
+ outLabelVolume = await outLabelVolume.mul(bin)
+ }
+
+ startTime = performance.now()
+ // Generate output volume or slices
+ console.log('Generating correct output')
+ let outimg
+ try {
+ const img = await new Uint32Array(outLabelVolume.dataSync())
+ const Vshape = outLabelVolume.shape
+ const Vtype = outLabelVolume.dtype
+ outimg = await generateOutputSlicesV2(
+ img,
+ Vshape,
+ Vtype,
+ num_of_slices,
+ numSegClasses,
+ slice_height,
+ slice_width,
+ modelEntry,
+ opts,
+ niftiImage
+ )
+ console.log(' Phase-2 num of tensors after generateOutputSlicesV2: ', tf.memory().numTensors)
+
+ tf.dispose(outLabelVolume)
+ tf.engine().endScope()
+ tf.engine().disposeVariables()
+ } catch (error) {
+ // -- Timing data to collect
+ tf.engine().endScope()
+ tf.engine().disposeVariables()
+ console.log('Error while generating output: ', error)
+ const msg = 'Failed while generating output due to limited browser memory available'
+ callbackUI(msg, -1, msg)
+
+ statData.Inference_t = Inference_t
+ statData.Postprocess_t = Infinity
+ statData.Status = 'Fail'
+ statData.Error_Type = error.message
+ statData.Extra_Err_Info = 'Failed while generating output'
+
+ callbackUI('', -1, '', statData)
+
+ return 0
+ }
+ const Postprocess_t = ((performance.now() - startTime) / 1000).toFixed(4)
+
+ console.log(
+ 'Processing the whole brain volume in tfjs for multi-class output mask took : ',
+ ((performance.now() - inferenceStartTime) / 1000).toFixed(4) + ' Seconds'
+ )
+
+ // -- Timing data to collect
+ statData.Inference_t = Inference_t
+ statData.Postprocess_t = Postprocess_t
+ statData.Status = 'OK'
+
+ callbackUI('', -1, '', statData)
+ callbackUI('Segmentation finished', 0)
+ callbackImg(outimg, opts, modelEntry)
+ return 0
+ } else {
+ i++
+ }
+ }, delay)
+ } catch (err) {
+ callbackUI(err.message, -1, err.message)
+ console.log(
+ 'If webgl context is lost, try to restore webgl context by visit the link ' +
+ 'here'
+ )
+ if (tf.memory().unreliable) {
+ const unreliableReasons = 'unreliable reasons :' + tf.memory().reasons
+ callbackUI(unreliableReasons, NaN, unreliableReasons)
+ }
+ }
+}
+
+async function inferenceFullVolumePhase2(
+ model,
+ slices_3d,
+ num_of_slices,
+ slice_height,
+ slice_width,
+ pipeline1_out,
+ modelEntry,
+ statData,
+ opts,
+ callbackImg,
+ callbackUI,
+ niftiImage
+) {
+ let outimg = []
+ // --Phase-2, After remove the skull try to allocate brain volume and make inferece
+ console.log(' ---- Start FullVolume inference phase-II ---- ')
+ const quantileNorm = modelEntry.enableQuantileNorm
+ if (quantileNorm) {
+ // Quantile normalize function needs specific models to be used
+ console.log('preModel Quantile normalization enabled')
+ slices_3d = await quantileNormalizeVolumeData(slices_3d)
+ } else {
+ // Min Max Nomalize MRI data to be from 0 to 1
+ console.log('preModel Min Max normalization enabled')
+ slices_3d = await minMaxNormalizeVolumeData(slices_3d)
+ }
+ let mask_3d
+ if (pipeline1_out == null) {
+ // preModel is null
+
+ // Check if thresholding the MRI to remove noisy voxels for better cropping is needed.
+ const autoThresholdValue = modelEntry.autoThreshold
+
+ if (autoThresholdValue > 0 && autoThresholdValue <= 1) {
+ // Filtered MRI from noisy voxel below autoThresholdValue
+ mask_3d = await applyMriThreshold(slices_3d, autoThresholdValue)
+ } else {
+ console.log('No valid crop threshold value')
+ // binarize original image
+ mask_3d = await slices_3d.greater([0]).asType('bool')
+ }
+ } else {
+ mask_3d = pipeline1_out.greater([0]).asType('bool')
+ // -- pipeline1_out.dispose()
+ }
+ console.log(' mask_3d shape : ', mask_3d.shape)
+ const [row_min, row_max, col_min, col_max, depth_min, depth_max] = await firstLastNonZero3D(mask_3d)
+ mask_3d.dispose()
+ // -- Reference voxel that cropped volume started slice with it
+ const refVoxel = [row_min, col_min, depth_min]
+ console.log('refVoxel :', refVoxel)
+
+ // -- Starting form refVoxel, size of bounding volume
+ const boundVolSizeArr = [row_max - row_min + 1, col_max - col_min + 1, depth_max - depth_min + 1]
+
+ console.log('boundVolSizeArr :', boundVolSizeArr)
+
+ // -- Extract 3d object (e.g. brain)
+ const cropped_slices_3d = slices_3d.slice(
+ [row_min, col_min, depth_min],
+ [row_max - row_min + 1, col_max - col_min + 1, depth_max - depth_min + 1]
+ )
+
+ slices_3d.dispose()
+
+ // -- Padding size add to cropped brain
+ const pad = modelEntry.cropPadding
+
+ // Create margin around the bounding volume
+ let cropped_slices_3d_w_pad = await addZeroPaddingTo3dTensor(cropped_slices_3d, [pad, pad], [pad, pad], [pad, pad])
+ console.log(' cropped slices_3d with padding shape: ', cropped_slices_3d_w_pad.shape)
+
+ cropped_slices_3d.dispose()
+
+ // -- Test dim after padding ..
+ // for (let i = 0; i < cropped_slices_3d_w_pad.rank; i++) {
+ // if(cropped_slices_3d_w_pad.shape[i] > 256) {
+ // console.log(" cropped_slices_3d_w_pad > 256 ")
+ // }
+
+ // }
+
+ if (opts.drawBoundingVolume) {
+ let testVol = await removeZeroPaddingFrom3dTensor(cropped_slices_3d_w_pad, pad, pad, pad)
+ console.log(' outLabelVolume without padding shape : ', testVol.shape)
+
+ testVol = await resizeWithZeroPadding(testVol, num_of_slices, slice_height, slice_width, refVoxel, boundVolSizeArr)
+ console.log(' outLabelVolume final shape after resizing : ', testVol.shape)
+ draw3dObjBoundingVolume(tf.unstack(testVol), opts, modelEntry, callbackImg)
+
+ testVol.dispose()
+
+ return 0
+ }
+
+ statData.Brainchop_Ver = 'FullVolume'
+ let startTime = performance.now()
+ let adjusted_input_shape = []
+ const res = await model
+ try {
+ startTime = performance.now()
+ const inferenceStartTime = performance.now()
+ // maxLabelPredicted in whole volume of the brain
+ let maxLabelPredicted = 0
+ const transpose = modelEntry.enableTranspose
+ const delay = modelEntry.inferenceDelay
+ console.log('Inference delay :', delay)
+
+ if (transpose) {
+ cropped_slices_3d_w_pad = cropped_slices_3d_w_pad.transpose()
+ console.log('Input transposed for pre-model')
+ } else {
+ console.log('Transpose not enabled for pre-model')
+ }
+
+ let i = 1
+ const layersLength = res.layers.length
+ console.log('res.layers.length ', layersLength)
+
+ const isChannelLast = await isModelChnlLast(res)
+ const batchSize = opts.batchSize
+ const numOfChan = opts.numOfChan
+
+ // -- Adjust model input shape
+ if (isChannelLast) {
+ res.layers[0].batchInputShape[1] = cropped_slices_3d_w_pad.shape[0]
+ res.layers[0].batchInputShape[2] = cropped_slices_3d_w_pad.shape[1]
+ res.layers[0].batchInputShape[3] = cropped_slices_3d_w_pad.shape[2]
+
+ adjusted_input_shape = [
+ batchSize,
+ res.layers[0].batchInputShape[1],
+ res.layers[0].batchInputShape[2],
+ res.layers[0].batchInputShape[3],
+ numOfChan
+ ]
+ } else {
+ res.layers[0].batchInputShape[2] = cropped_slices_3d_w_pad.shape[0]
+ res.layers[0].batchInputShape[3] = cropped_slices_3d_w_pad.shape[1]
+ res.layers[0].batchInputShape[4] = cropped_slices_3d_w_pad.shape[2]
+
+ adjusted_input_shape = [
+ batchSize,
+ numOfChan,
+ res.layers[0].batchInputShape[2],
+ res.layers[0].batchInputShape[3],
+ res.layers[0].batchInputShape[4]
+ ]
+ }
+
+ console.log(' Model batch input shape : ', res.layers[0].batchInputShape)
+ // -- batchInputShape {Array} input_shape - e.g. [?, D, H, W, Ch] or [?, Ch, D, H, W]
+
+ statData.Input_Shape = JSON.stringify(res.layers[0].batchInputShape)
+ statData.Output_Shape = JSON.stringify(res.output.shape)
+ statData.Channel_Last = isChannelLast
+ statData.Model_Param = await getModelNumParameters(res)
+ statData.Model_Layers = await getModelNumLayers(res)
+ statData.Model = modelEntry.modelName
+ statData.Extra_Info = null
+
+ const curTensor = []
+ curTensor[0] = cropped_slices_3d_w_pad.reshape(adjusted_input_shape)
+ const timer = window.setInterval(async function () {
+ try {
+ curTensor[i] = res.layers[i].apply(curTensor[i - 1])
+ } catch (err) {
+ callbackUI(err.message, -1, err.message)
+ window.clearInterval(timer)
+ tf.engine().endScope()
+ tf.engine().disposeVariables()
+
+ statData.Inference_t = Infinity
+ statData.Postprocess_t = Infinity
+ statData.Status = 'Fail'
+ statData.Error_Type = err.message
+ statData.Extra_Err_Info = 'Failed while model layer ' + i + ' apply'
+
+ callbackUI('', -1, '', statData)
+
+ return 0
+ }
+ callbackUI('Layer ' + i.toString(), (i + 1) / layersLength)
+ console.log('layer output Tensor shape : ', curTensor[i].shape)
+ console.log('layer count params ', res.layers[i].countParams())
+ res.layers[i].dispose()
+ curTensor[i - 1].dispose()
+ if (tf.memory().unreliable) {
+ const unreliableReasons = 'unreliable reasons :' + tf.memory().reasons
+ callbackUI(unreliableReasons, NaN, unreliableReasons)
+ }
+ if (i === layersLength - 1) {
+ window.clearInterval(timer)
+
+ const axis = isChannelLast ? -1 : 1
+ console.log(' find argmax ')
+ console.log('last Tensor shape : ', curTensor[i].shape)
+ // -- curTensor[i].shape e.g. [ 1, 256, 256, 256, 3 ]
+ const expected_Num_labels = isChannelLast ? curTensor[i].shape[4] : curTensor[i].shape[1]
+ let prediction_argmax
+
+ // Try for argMax with model output tensor.
+
+ try {
+ const argMaxTime = performance.now()
+ console.log(' Try tf.argMax for fullVolume ..')
+ prediction_argmax = tf.argMax(curTensor[i], axis)
+ console.log('tf.argMax for fullVolume takes : ', ((performance.now() - argMaxTime) / 1000).toFixed(4))
+ } catch (err1) {
+ // if channel last
+ if (axis === -1) {
+ try {
+ const argMaxLargeTime = performance.now()
+ console.log(' tf.argMax failed .. try argMaxLarge ..')
+ window.alert('tensor2LightBuffer() is not dead code?')
+ window.alert('argMaxLarge() is not dead code?')
+ console.log(
+ 'argMaxLarge for fullVolume takes : ',
+ ((performance.now() - argMaxLargeTime) / 1000).toFixed(4)
+ )
+ } catch (err2) {
+ const errTxt = "argMax buffer couldn't be created due to limited memory resources."
+ callbackUI(errTxt, -1, errTxt)
+
+ window.clearInterval(timer)
+ tf.engine().endScope()
+ tf.engine().disposeVariables()
+
+ statData.Inference_t = Infinity
+ statData.Postprocess_t = Infinity
+ statData.Status = 'Fail'
+ statData.Error_Type = err2.message
+ statData.Extra_Err_Info = 'prediction_argmax from argMaxLarge failed'
+
+ callbackUI('', -1, '', statData)
+
+ return 0
+ }
+ } else {
+ // if channel first ..
+ const errTxt = "argMax buffer couldn't be created due to limited memory resources."
+ callbackUI(errTxt, -1, errTxt)
+
+ prediction_argmax.dispose()
+
+ window.clearInterval(timer)
+ tf.engine().endScope()
+ tf.engine().disposeVariables()
+
+ statData.Inference_t = Infinity
+ statData.Postprocess_t = Infinity
+ statData.Status = 'Fail'
+ statData.Error_Type = err1.message
+ statData.Extra_Err_Info = 'prediction_argmax from argMaxLarge not support yet channel first'
+
+ callbackUI('', -1, '', statData)
+
+ return 0
+ }
+ }
+
+ console.log(' prediction_argmax shape : ', prediction_argmax.shape)
+ // -- prediction_argmax.shape : [ 1, 256, 256, 256]
+
+ const Inference_t = ((performance.now() - startTime) / 1000).toFixed(4)
+
+ // outputDataBeforArgmx = Array.from(prediction_argmax.dataSync())
+ tf.dispose(curTensor[i])
+ // allPredictions.push({"id": allBatches[j].id, "coordinates": allBatches[j].coordinates, "data": Array.from(prediction_argmax.dataSync()) })
+ const curBatchMaxLabel = await prediction_argmax.max().dataSync()[0]
+ if (maxLabelPredicted < curBatchMaxLabel) {
+ maxLabelPredicted = curBatchMaxLabel
+ }
+
+ const numSegClasses = maxLabelPredicted + 1
+ console.log('numSegClasses', numSegClasses)
+ statData.Actual_Labels = numSegClasses
+ statData.Expect_Labels = expected_Num_labels
+ statData.NumLabels_Match = numSegClasses === expected_Num_labels
+
+ if (numSegClasses !== expected_Num_labels) {
+ // errTxt = "expected " + expected_Num_labels + " labels, but the predicted are " + numSegClasses + ". For possible solutions please refer to FAQ .", "alert-error"
+ const errTxt = 'expected ' + expected_Num_labels + ' labels, but the predicted are ' + numSegClasses
+ callbackUI(errTxt, -1, errTxt)
+ }
+
+ // -- Transpose back to original unpadded size
+ let outLabelVolume = prediction_argmax.reshape([
+ cropped_slices_3d_w_pad.shape[0],
+ cropped_slices_3d_w_pad.shape[1],
+ cropped_slices_3d_w_pad.shape[2]
+ ])
+ tf.dispose(prediction_argmax)
+
+ // Transpose MRI data to be match pytorch/keras input output
+ if (transpose) {
+ console.log('outLabelVolume transposed')
+ outLabelVolume = outLabelVolume.transpose()
+ }
+ outLabelVolume = await removeZeroPaddingFrom3dTensor(outLabelVolume, pad, pad, pad)
+ console.log(' outLabelVolume without padding shape : ', outLabelVolume.shape)
+ outLabelVolume = await resizeWithZeroPadding(
+ outLabelVolume,
+ num_of_slices,
+ slice_height,
+ slice_width,
+ refVoxel,
+ boundVolSizeArr
+ )
+ console.log(' outLabelVolume final shape after resizing : ', outLabelVolume.shape)
+
+ const filterOutWithPreMask = modelEntry.filterOutWithPreMask
+ // To clean the skull area wrongly segmented in phase-2.
+ if (pipeline1_out != null && opts.isBrainCropMaskBased && filterOutWithPreMask) {
+ const bin = binarizeVolumeDataTensor(pipeline1_out)
+ outLabelVolume = outLabelVolume.mul(bin)
+ }
+
+ startTime = performance.now()
+ // Generate output volume or slices
+ console.log('Generating correct output')
+
+ try {
+ const img = new Uint32Array(outLabelVolume.dataSync())
+ const Vshape = outLabelVolume.shape
+ const Vtype = outLabelVolume.dtype
+ tf.dispose(outLabelVolume)
+ tf.engine().endScope()
+ tf.engine().disposeVariables()
+ outimg = await generateOutputSlicesV2(
+ img,
+ Vshape,
+ Vtype,
+ num_of_slices,
+ numSegClasses,
+ slice_height,
+ slice_width,
+ modelEntry,
+ opts,
+ niftiImage
+ )
+ console.log(' Phase-2 num of tensors after generateOutputSlicesV2: ', tf.memory().numTensors)
+ } catch (error) {
+ // -- Timing data to collect
+ tf.engine().endScope()
+ tf.engine().disposeVariables()
+
+ const errTxt = 'Failed while generating output due to limited browser memory available'
+ callbackUI(errTxt, -1, errTxt)
+ statData.Inference_t = Inference_t
+ statData.Postprocess_t = Infinity
+ statData.Status = 'Fail'
+ statData.Error_Type = error.message
+ statData.Extra_Err_Info = 'Failed while generating output'
+
+ callbackUI('', -1, '', statData)
+
+ return 0
+ }
+
+ const Postprocess_t = ((performance.now() - startTime) / 1000).toFixed(4)
+
+ // tf.engine().endScope()
+ tf.engine().disposeVariables()
+
+ console.log(
+ 'Processing the whole brain volume in tfjs for multi-class output mask took : ',
+ ((performance.now() - inferenceStartTime) / 1000).toFixed(4) + ' Seconds'
+ )
+
+ // -- Timing data to collect
+ statData.Inference_t = Inference_t
+ statData.Postprocess_t = Postprocess_t
+ statData.Status = 'OK'
+
+ callbackUI('', -1, '', statData)
+ clearInterval(timer)
+ callbackUI('Segmentation finished', 0)
+ callbackImg(outimg, opts, modelEntry)
+ return 0
+ }
+ i++
+ }, delay)
+ } catch (err) {
+ callbackUI(err.message, -1, err.message)
+ console.log(
+ 'If webgl context is lost, try to restore webgl context by visit the link ' +
+ 'here'
+ )
+ }
+ // })
+}
+
+async function inferenceFullVolumePhase1(
+ model,
+ slices_3d,
+ num_of_slices,
+ slice_height,
+ slice_width,
+ isModelFullVol,
+ modelEntry,
+ statData,
+ opts,
+ callbackImg,
+ callbackUI,
+ niftiImage
+) {
+ statData.No_SubVolumes = 1
+ // load pre-model for inference first, can be null if no pre-model such as GWM models
+ if (modelEntry.preModelId) {
+ const preModel = await load_model(opts.rootURL + inferenceModelsList[modelEntry.preModelId - 1].path)
+ const transpose = inferenceModelsList[modelEntry.preModelId - 1].enableTranspose
+ const quantileNorm = inferenceModelsList[modelEntry.preModelId - 1].enableQuantileNorm
+ let preModel_slices_3d = null
+
+ // -- If pre-model is not null then slices_3d mask will be generated..
+ // -- The mask is needed to remove the skull and set noise in background to 0, and get the brain bounding volume properly
+ const slices_3d_mask = null
+
+ if (quantileNorm) {
+ // Quantile normalize function needs specific models to be used
+ console.log('preModel Quantile normalization enabled')
+ preModel_slices_3d = await quantileNormalizeVolumeData(slices_3d)
+ } else {
+ // Min Max Nomalize MRI data to be from 0 to 1
+ console.log('preModel Min Max normalization enabled')
+ preModel_slices_3d = await minMaxNormalizeVolumeData(slices_3d)
+ }
+
+ // -- Transpose MRI data to be match pytorch/keras input output
+ // -- Check if pre-model needs transpose..
+ if (transpose) {
+ preModel_slices_3d = await preModel_slices_3d.transpose()
+ console.log('Input transposed for pre-model')
+ } else {
+ console.log('Transpose not enabled for pre-model')
+ }
+
+ statData.Brainchop_Ver = 'PreModel_FV' // e.g. "PreModel_FV"
+
+ // preModel.then(function (res) {
+ const res = await preModel
+
+ try {
+ const inferenceStartTime = performance.now()
+ const preModelObject = res
+
+ // read input shape from model.json object
+ const preModelBatchInputShape = preModelObject.layers[0].batchInputShape
+ console.log(' Pre-Model batch input shape : ', preModelBatchInputShape)
+
+ // -- Verify input shape
+ if (preModelBatchInputShape.length !== 5) {
+ const errTxt = 'The pre-model input shape must be 5D '
+ callbackUI(errTxt, -1, errTxt)
+ return 0
+ }
+
+ const isPreModelChannelLast = isModelChnlLast(preModelObject)
+ const batchSize = opts.batchSize
+ const numOfChan = opts.numOfChan
+ let batch_D, batch_H, batch_W
+ let preModel_input_shape
+ if (isPreModelChannelLast) {
+ console.log('Pre-Model Channel Last')
+ if (isNaN(preModelBatchInputShape[4]) || preModelBatchInputShape[4] !== 1) {
+ const errTxt = 'The number of channels for pre-model input shape must be 1'
+ callbackUI(errTxt, -1, errTxt)
+ return 0
+ }
+
+ batch_D = preModelBatchInputShape[1]
+ batch_H = preModelBatchInputShape[2]
+ batch_W = preModelBatchInputShape[3]
+
+ preModel_input_shape = [batchSize, batch_D, batch_H, batch_W, numOfChan]
+ } else {
+ console.log('Pre-Model Channel First')
+ if (isNaN(preModelBatchInputShape[1]) || preModelBatchInputShape[1] !== 1) {
+ const errTxt = 'The number of channels for pre-model input shape must be 1'
+ callbackUI(errTxt, -1, errTxt)
+ return 0
+ }
+
+ batch_D = preModelBatchInputShape[2]
+ batch_H = preModelBatchInputShape[3]
+ batch_W = preModelBatchInputShape[4]
+
+ preModel_input_shape = [batchSize, numOfChan, batch_D, batch_H, batch_W]
+ }
+
+ statData.Input_Shape = JSON.stringify(preModel_input_shape)
+ statData.Output_Shape = JSON.stringify(preModelObject.output.shape)
+ statData.Channel_Last = isPreModelChannelLast
+ statData.Model_Param = await getModelNumParameters(preModelObject)
+ statData.Model_Layers = await getModelNumLayers(preModelObject)
+
+ // maxLabelPredicted in whole volume of the brain
+ let maxLabelPredicted = 0
+ const delay = inferenceModelsList[modelEntry.preModelId - 1].inferenceDelay
+
+ let i = 1
+ const layersLength = res.layers.length
+
+ const curTensor = []
+ // -- reshape MRI to model input shape
+ curTensor[0] = preModel_slices_3d.reshape(preModel_input_shape)
+
+ // Dispose the volume
+ tf.dispose(preModel_slices_3d)
+
+ const timer = window.setInterval(async function () {
+ try {
+ curTensor[i] = await res.layers[i].apply(curTensor[i - 1])
+ } catch (err) {
+ const errTxt = 'Your graphics card (e.g. Intel) may not be compatible with WebGL. ' + err.message
+ callbackUI(errTxt, -1, errTxt)
+
+ window.clearInterval(timer)
+ tf.engine().endScope()
+ tf.engine().disposeVariables()
+
+ statData.Inference_t = Infinity
+ statData.Postprocess_t = Infinity
+ statData.Status = 'Fail'
+ statData.Error_Type = err.message
+ statData.Extra_Err_Info = 'PreModel Failed while model layer ' + i + ' apply'
+
+ callbackUI('', -1, '', statData)
+
+ return 0
+ }
+
+ res.layers[i].dispose()
+ curTensor[i - 1].dispose()
+
+ callbackUI('Layer ' + i.toString(), (i + 1) / layersLength)
+ if (tf.memory().unreliable) {
+ const unreliableReasons = 'unreliable reasons :' + tf.memory().reasons
+ callbackUI(unreliableReasons, NaN, unreliableReasons)
+ }
+
+ if (i === layersLength - 1) {
+ window.clearInterval(timer)
+
+ // -- prediction = res.layers[res.layers.length-1].apply(curTensor[i])
+ // -- curTensor[i].print()
+ // -- outputDataBeforArgmx = Array.from(curTensor[i].dataSync())
+
+ const axis = isPreModelChannelLast ? -1 : 1
+ console.log(' find argmax ')
+ console.log('last Tensor shape : ', curTensor[i].shape)
+ // -- curTensor[i].shape : [ 1, 256, 256, 256, 3 ]
+ const expected_Num_labels = isPreModelChannelLast ? curTensor[i].shape[4] : curTensor[i].shape[1]
+ let prediction_argmax
+
+ // Try for argMax with model output tensor.
+
+ try {
+ console.log(' Try tf.argMax for fullVolume ..')
+ prediction_argmax = await tf.argMax(curTensor[i], axis)
+ } catch (err1) {
+ // if channel last
+ if (axis === -1) {
+ try {
+ const argMaxLargeTime = performance.now()
+ console.log(' tf.argMax failed .. try argMaxLarge ..')
+ window.alert('tensor2LightBuffer() is not dead code?')
+ window.alert('argMaxLarge() is not dead code?')
+ console.log(
+ 'argMaxLarge for fullVolume takes : ',
+ ((performance.now() - argMaxLargeTime) / 1000).toFixed(4)
+ )
+ } catch (err2) {
+ const errTxt = "argMax buffer couldn't be created due to limited memory resources."
+ callbackUI(errTxt, -1, errTxt)
+
+ prediction_argmax.dispose()
+
+ window.clearInterval(timer)
+ tf.engine().endScope()
+ tf.engine().disposeVariables()
+
+ statData.Inference_t = Infinity
+ statData.Postprocess_t = Infinity
+ statData.Status = 'Fail'
+ statData.Error_Type = err2.message
+ statData.Extra_Err_Info = 'preModel prediction_argmax from argMaxLarge failed'
+
+ callbackUI('', -1, '', statData)
+
+ return 0
+ }
+ } else {
+ // if channel first ..
+ const errTxt = "argMax buffer couldn't be created due to limited memory resources."
+ callbackUI(errTxt, -1, errTxt)
+
+ prediction_argmax.dispose()
+
+ window.clearInterval(timer)
+ tf.engine().endScope()
+ tf.engine().disposeVariables()
+
+ statData.Inference_t = Infinity
+ statData.Postprocess_t = Infinity
+ statData.Status = 'Fail'
+ statData.Error_Type = err1.message
+ statData.Extra_Err_Info = 'preModel prediction_argmax from argMaxLarge not support yet channel first'
+
+ callbackUI('', -1, '', statData)
+
+ return 0
+ }
+ }
+ console.log(' Pre-model prediction_argmax shape : ', prediction_argmax.shape)
+ // -- prediction_argmax.shape : [ 1, 256, 256, 256]
+
+ const Inference_t = ((performance.now() - inferenceStartTime) / 1000).toFixed(4)
+
+ tf.dispose(curTensor[i])
+
+ console.log(' Pre-model find array max ')
+ const curBatchMaxLabel = await prediction_argmax.max().dataSync()[0]
+ if (maxLabelPredicted < curBatchMaxLabel) {
+ maxLabelPredicted = curBatchMaxLabel
+ }
+
+ const numSegClasses = maxLabelPredicted + 1
+ console.log('Pre-model numSegClasses', numSegClasses)
+
+ statData.Actual_Labels = numSegClasses
+ statData.Expect_Labels = expected_Num_labels
+ statData.NumLabels_Match = numSegClasses === expected_Num_labels
+
+ // -- Transpose back to original unpadded size
+ let outLabelVolume = await prediction_argmax.reshape([num_of_slices, slice_height, slice_width])
+ tf.dispose(prediction_argmax)
+ // Transpose MRI data to be match pytorch/keras input output
+ if (transpose) {
+ console.log('Pre-model outLabelVolume transposed')
+ outLabelVolume = outLabelVolume.transpose()
+ }
+ const startTime = performance.now()
+ // Generate output volume or slices
+ console.log('Generating pre-model output')
+ let slices_3d_mask
+ try {
+ const unstackOutVolumeTensor = await tf.unstack(outLabelVolume)
+ slices_3d_mask = await generateBrainMask(
+ unstackOutVolumeTensor,
+ num_of_slices,
+ slice_height,
+ slice_width,
+ modelEntry,
+ opts,
+ callbackUI,
+ callbackImg,
+ false
+ )
+ await tf.dispose(outLabelVolume)
+ console.log(' Phase-1 num of tensors after generateBrainMask: ', tf.memory().numTensors)
+ } catch (error) {
+ // -- Timing data to collect
+ tf.engine().endScope()
+ tf.engine().disposeVariables()
+
+ const errTxt = 'Failed while generating pre-model output due to limited browser memory available'
+ callbackUI(errTxt, -1, errTxt)
+
+ statData.Inference_t = Inference_t
+ statData.Postprocess_t = Infinity
+ statData.Status = 'Fail'
+ statData.Error_Type = error.message
+ statData.Extra_Err_Info = 'Pre-model failed while generating output'
+
+ callbackUI('', -1, '', statData)
+
+ return 0
+ }
+ const Postprocess_t = ((performance.now() - startTime) / 1000).toFixed(4)
+ console.log(
+ 'Pre-model processing the whole brain volume in tfjs tooks for multi-class output mask : ',
+ ((performance.now() - inferenceStartTime) / 1000).toFixed(4) + ' Seconds'
+ )
+
+ // -- Timing data to collect
+ statData.Inference_t = Inference_t
+ statData.Postprocess_t = Postprocess_t
+ statData.Status = 'OK'
+
+ callbackUI('', -1, '', statData)
+
+ if (slices_3d_mask == null) {
+ const msg = 'slice_3d_mask failed ...'
+ callbackUI(msg, -1, msg)
+ return 0
+ } else {
+ // --Phase-2, After remove the skull try to allocate brain volume and make inferece
+ console.log('--- pre-model done ---')
+ // --mask_3d = slices_3d_mask.greater([0]).asType('bool')
+ // --slices_3d_mask.dispose()
+
+ if (isModelFullVol) {
+ if (modelEntry.enableSeqConv) {
+ // Mask cropping & seq conv
+ // Non-Atlas model (e.g. GWM) needs sequential convolution layer.
+ // Sequential convolution layer to be used after cropping - slow but reliable on most machines
+ console.log('------ Mask Cropping & Seq Convoluton ------')
+ await inferenceFullVolumeSeqCovLayerPhase2(
+ opts,
+ modelEntry,
+ model,
+ slices_3d,
+ num_of_slices,
+ slice_height,
+ slice_width,
+ slices_3d_mask,
+ callbackUI,
+ callbackImg,
+ statData,
+ niftiImage
+ )
+ return 0
+ // inferenceFullVolumeSeqCovLayerPhase2(model, slices_3d.transpose(), num_of_slices, slice_height, slice_width, slices_3d_mask)
+ } else {
+ // Mask cropping BUT no seq conv
+ console.log('------ Mask Cropping - NO Seq Convoluton ------')
+ await inferenceFullVolumePhase2(
+ model,
+ slices_3d,
+ num_of_slices,
+ slice_height,
+ slice_width,
+ slices_3d_mask,
+ modelEntry,
+ statData,
+ opts,
+ callbackImg,
+ callbackUI,
+ niftiImage
+ )
+ // inferenceFullVolumePhase2(model, slices_3d.transpose(), num_of_slices, slice_height, slice_width, slices_3d_mask)
+ }
+ } else {
+ // -- In version 3.0.0 this function not used
+ window.alert('inferenceSubVolumes() is not dead code?')
+ }
+ }
+ }
+ i++
+ }, delay)
+ } catch (err) {
+ callbackUI(err.message, -1, err.message)
+ console.log(
+ 'If webgl context is lost, try to restore webgl context by visit the link ' +
+ 'here'
+ )
+
+ // document.getElementById("webGl2Status").style.backgroundColor = isWebGL2ContextLost() ? "Red" : "Green"
+
+ // document.getElementById("memoryStatus").style.backgroundColor = tf.memory().unreliable ? "Red" : "Green"
+ }
+ // })
+
+ // -- if(...) end
+ } else {
+ // No preModel
+
+ // --Phase-2, After remove the skull try to allocate brain volume and make inferece
+ console.log('--- No pre-model is selected ---')
+ console.log('------ Run voxel cropping ------')
+ // -- mask_3d = slices_3d.greater([0]).asType('bool')
+
+ if (isModelFullVol) {
+ if (modelEntry.enableSeqConv) {
+ // Voxel cropping & seq conv
+ // Non-Atlas model (e.g. GWM) needs sequential convolution layer.
+ // Sequential convolution layer to be used after cropping - slow but reliable on most machines
+ console.log('------ Seq Convoluton ------')
+ await inferenceFullVolumeSeqCovLayerPhase2(
+ opts,
+ modelEntry,
+ model,
+ slices_3d,
+ num_of_slices,
+ slice_height,
+ slice_width,
+ null,
+ callbackUI,
+ callbackImg,
+ statData,
+ niftiImage
+ )
+ } else {
+ // Voxel cropping BUT no seq conv
+ // todo: we do not use result const outimg = await
+ inferenceFullVolumePhase2(
+ model,
+ slices_3d,
+ num_of_slices,
+ slice_height,
+ slice_width,
+ null,
+ modelEntry,
+ statData,
+ opts,
+ callbackImg,
+ callbackUI,
+ niftiImage
+ )
+ }
+ } else {
+ // -- In version 3.0.0 this function not used
+ window.alert('inferenceSubVolumes() is not dead code?')
+ }
+ }
+}
+
+async function enableProductionMode(textureF16Flag = true) {
+ // -- tf.setBackend('cpu')
+ // -- tf.removeBackend('cpu')
+ // -- Calling enableProdMode() method
+ await tf.enableProdMode()
+ // -- Setting debug mode of the environment
+ tf.env().set('DEBUG', false)
+ tf.env().set('WEBGL_FORCE_F16_TEXTURES', textureF16Flag)
+ // -- set this flag so that textures are deleted when tensors are disposed.
+ tf.env().set('WEBGL_DELETE_TEXTURE_THRESHOLD', 0)
+ // -- tf.env().set('WEBGL_PACK', false)
+ // -- Put ready after sets above
+ await tf.ready()
+ // -- Printing output
+ console.log('tf env() flags :', tf.env().flags)
+ console.log('tf env() features :', tf.env().features)
+ console.log('tf env total features: ', Object.keys(tf.env().features).length)
+ console.log(tf.getBackend())
+}
+
+export async function runInference(opts, modelEntry, niftiHeader, niftiImage, callbackImg, callbackUI) {
+ const statData = []
+ statData.startTime = Date.now() // for common webworker/mainthread do not use performance.now()
+ callbackUI('Segmentation started', 0)
+ const startTime = performance.now()
+ const batchSize = opts.batchSize
+ const numOfChan = opts.numOfChan
+ if (isNaN(batchSize) || batchSize !== 1) {
+ const errTxt = 'The batch Size for input shape must be 1'
+ callbackUI(errTxt, -1, errTxt)
+ return 0
+ }
+ if (isNaN(numOfChan) || numOfChan !== 1) {
+ const errTxt = 'The number of channels for input shape must be 1'
+ callbackUI(errTxt, -1, errTxt)
+ return 0
+ }
+ tf.engine().startScope()
+ console.log('Batch size: ', batchSize)
+ console.log('Num of Channels: ', numOfChan)
+ const model = await load_model(opts.rootURL + modelEntry.path)
+ await enableProductionMode(true)
+ statData.TF_Backend = tf.getBackend()
+ const modelObject = model
+ let batchInputShape = []
+ // free global variable of 16777216 voxel
+ // allOutputSlices3DCC1DimArray = []
+ // outputSceneRendered = false
+ // read input shape from model.json object
+ batchInputShape = modelObject.layers[0].batchInputShape
+ console.log(' Model batch input shape : ', batchInputShape)
+ // -- Verify input shape
+ if (batchInputShape.length !== 5) {
+ const errTxt = 'The model input shape must be 5D'
+ callbackUI(errTxt, -1, errTxt)
+ return 0
+ }
+ let batch_D, batch_H, batch_W
+ const slice_width = niftiHeader.dims[1]
+ const slice_height = niftiHeader.dims[2]
+ const num_of_slices = niftiHeader.dims[3]
+ const isChannelLast = await isModelChnlLast(modelObject)
+ if (isChannelLast) {
+ console.log('Model Channel Last')
+ if (isNaN(batchInputShape[4]) || batchInputShape[4] !== 1) {
+ const errTxt = 'The number of channels for input shape must be 1'
+ callbackUI(errTxt, -1, errTxt)
+ return 0
+ }
+ batch_D = batchInputShape[1]
+ batch_H = batchInputShape[2]
+ batch_W = batchInputShape[3]
+ } else {
+ console.log('Model Channel First')
+ if (isNaN(batchInputShape[1]) || batchInputShape[1] !== 1) {
+ const errTxt = 'The number of channels for input shape must be 1'
+ callbackUI(errTxt, -1, errTxt)
+ return 0
+ }
+ batch_D = batchInputShape[2]
+ batch_H = batchInputShape[3]
+ batch_W = batchInputShape[4]
+ }
+ // const input_shape = [batchSize, numOfChan, batch_D, batch_H, batch_W]
+ // //-- Atlas version check
+ // if ( (batch_D > 30) && (batch_H == 256) && (batch_W == 256) ) {
+ // const errTxt = "The subvolume dimension in z-axis shouldn't exceed 30 number of slices for browser limitation"
+ // callbackUI(errTxt, -1, errTxt)
+ // return 0
+ // }
+ // --Check whether the model will make inference at once as FullVolumeModel
+ let isModelFullVol
+ if (batch_D === 256 && batch_H === 256 && batch_W === 256) {
+ isModelFullVol = true
+ } else {
+ isModelFullVol = false
+ }
+ statData.isModelFullVol = isModelFullVol
+ // Model output number of segmentations
+ let slices_3d = await getAllSlicesDataAsTF3D(num_of_slices, niftiHeader, niftiImage)
+ const transpose = modelEntry.enableTranspose
+ const enableCrop = modelEntry.enableCrop
+ if (isModelFullVol) {
+ if (enableCrop) {
+ // FullVolume with Crop option before inference ..
+ // pre-model to mask the volume, can also be null and the cropping will be on the MRI.
+ await inferenceFullVolumePhase1(
+ model,
+ slices_3d,
+ num_of_slices,
+ slice_height,
+ slice_width,
+ isModelFullVol,
+ modelEntry,
+ statData,
+ opts,
+ callbackImg,
+ callbackUI,
+ niftiImage
+ )
+ } else {
+ // Transpose MRI data to be match pytorch/keras input output
+ console.log('Cropping Disabled')
+
+ if (transpose) {
+ slices_3d = slices_3d.transpose()
+ console.log('Input transposed')
+ } else {
+ console.log('Transpose NOT Enabled')
+ }
+
+ const enableSeqConv = modelEntry.enableSeqConv
+
+ if (enableSeqConv) {
+ console.log('Seq Convoluton Enabled')
+ window.alert('inferenceFullVolumeSeqCovLayer() is not dead code?')
+ } else {
+ console.log('Seq Convoluton Disabled')
+ window.alert('inferenceFullVolume() is not dead code?')
+ }
+ }
+ }
+}
diff --git a/brainchop-parameters.js b/brainchop-parameters.js
new file mode 100644
index 0000000..c39c880
--- /dev/null
+++ b/brainchop-parameters.js
@@ -0,0 +1,380 @@
+export {inferenceModelsList, brainChopOpts }
+
+const brainChopOpts = {
+ // General settings for input shape [batchSize, batch_D, batch_H, batch_W, numOfChan]
+ batchSize: 1, // How many batches are used during each inference iteration
+ numOfChan: 1, // num of channel of the input shape
+ isColorEnable: true, // If false, grey scale will enabled
+ isAutoColors: true, // If false, manualColorsRange will be in use
+ bgLabelValue: 0, // Semenatic Segmentation background label value
+ drawBoundingVolume: false, // plot bounding volume used to crop the brain
+ isGPU: true, //use WebGL/GPU (faster) or CPU (compatibility)
+ isBrainCropMaskBased: true, // Check if brain masking will be used for cropping & optional show or brain tissue will be used
+ showPhase1Output: false, // This will load to papaya the output of phase-1 (ie. brain mask or brain tissue)
+ isPostProcessEnable: true, // If true 3D Connected Components filter will apply
+ isContoursViewEnable: false, // If true 3D contours of the labeled regions will apply
+ browserArrayBufferMaxZDim: 30, // This value depends on Memory available
+ telemetryFlag: false, // Ethical and transparent collection of browser usage while adhering to security and privacy standards
+ chartXaxisStepPercent: 10, // percent from total labels on Xaxis
+ uiSampleName: 'BC_UI_Sample', // Sample name used by interface
+ atlasSelectedColorTable: 'Fire' // Select from ["Hot-and-Cold", "Fire", "Grayscale", "Gold", "Spectrum"]
+}
+
+// Inference Models, the ids must start from 1 in sequence
+const inferenceModelsList = [
+ {
+ id: 1,
+ type: 'Segmentation',
+ path: '/models/model5_gw_ae/model.json',
+ modelName: '\u26A1 Tissue GWM (light)',
+ colormapPath: './models/model5_gw_ae/colormap3.json',
+ preModelId: null, // Model run first e.g. crop the brain { null, 1, 2, .. }
+ preModelPostProcess: false, // If true, perform postprocessing to remove noisy regions after preModel inference generate output.
+ isBatchOverlapEnable: false, // create extra overlap batches for inference
+ numOverlapBatches: 0, // Number of extra overlap batches for inference
+ enableTranspose: true, // Keras and tfjs input orientation may need a tranposing step to be matched
+ enableCrop: true, // For speed-up inference, crop brain from background before feeding to inference model to lower memory use.
+ cropPadding: 18, // Padding size add to cropped brain
+ autoThreshold: 0, // Threshold between 0 and 1, given no preModel and tensor is normalized either min-max or by quantiles. Will remove noisy voxels around brain
+ enableQuantileNorm: false, // Some models needs Quantile Normaliztion.
+ filterOutWithPreMask: false, // Can be used to multiply final output with premodel output mask to crean noisy areas
+ enableSeqConv: false, // For low memory system and low configuration, enable sequential convolution instead of last layer
+ textureSize: 0, // Requested Texture size for the model, if unknown can be 0.
+ warning: null, // Warning message to show when select the model.
+ inferenceDelay: 100, // Delay in ms time while looping layers applying.
+ description:
+ 'Gray and white matter segmentation model. Operates on full T1 image in a single pass, but uses only 5 filters per layer. Can work on integrated graphics cards but is barely large enough to provide good accuracy. Still more accurate than the subvolume model.'
+ },
+ {
+ id: 2,
+ type: 'Segmentation',
+ path: '/models/model20chan3cls/model.json',
+ modelName: '\u{1F52A} Tissue GWM (High Acc)',
+ colormapPath: './models/model20chan3cls/colormap.json',
+ preModelId: null, // Model run first e.g. crop the brain { null, 1, 2, .. }
+ preModelPostProcess: false, // If true, perform postprocessing to remove noisy regions after preModel inference generate output.
+ isBatchOverlapEnable: false, // create extra overlap batches for inference
+ numOverlapBatches: 0, // Number of extra overlap batches for inference
+ enableTranspose: true, // Keras and tfjs input orientation may need a tranposing step to be matched
+ enableCrop: true, // For speed-up inference, crop brain from background before feeding to inference model to lower memory use.
+ cropPadding: 0, // Padding size add to cropped brain
+ autoThreshold: 0.2, // Threshold between 0 and 1, given no preModel and tensor is normalized either min-max or by quantiles. Will remove noisy voxels around brain
+ enableQuantileNorm: true, // Some models needs Quantile Normaliztion.
+ filterOutWithPreMask: false, // Can be used to multiply final output with premodel output mask to crean noisy areas
+ enableSeqConv: false, // For low memory system and low configuration, enable sequential convolution instead of last layer
+ textureSize: 0, // Requested Texture size for the model, if unknown can be 0.
+ warning:
+ "This model may need dedicated graphics card. For more info please check with Browser Resources .",
+ inferenceDelay: 100, // Delay in ms time while looping layers applying.
+ description:
+ 'Gray and white matter segmentation model. Operates on full T1 image in a single pass but needs a dedicated graphics card to operate. Provides the best accuracy with hard cropping for better speed'
+ },
+ {
+ id: 3,
+ type: 'Segmentation',
+ path: '/models/model20chan3cls/model.json',
+ modelName: '\u{1F52A} Tissue GWM (High Acc, Low Mem)',
+ colormapPath: './models/model20chan3cls/colormap.json',
+ preModelId: null, // Model run first e.g. crop the brain { null, 1, 2, .. }
+ preModelPostProcess: false, // If true, perform postprocessing to remove noisy regions after preModel inference generate output.
+ isBatchOverlapEnable: false, // create extra overlap batches for inference
+ numOverlapBatches: 0, // Number of extra overlap batches for inference
+ enableTranspose: true, // Keras and tfjs input orientation may need a tranposing step to be matched
+ enableCrop: true, // For speed-up inference, crop brain from background before feeding to inference model to lower memory use.
+ cropPadding: 0, // Padding size add to cropped brain
+ autoThreshold: 0.2, // Threshold between 0 and 1, given no preModel and tensor is normalized either min-max or by quantiles. Will remove noisy voxels around brain
+ enableQuantileNorm: true, // Some models needs Quantile Normaliztion.
+ filterOutWithPreMask: false, // Can be used to multiply final output with premodel output mask to crean noisy areas
+ enableSeqConv: true, // For low memory system and low configuration, enable sequential convolution instead of last layer
+ textureSize: 0, // Requested Texture size for the model, if unknown can be 0.
+ warning:
+ "This model may need dedicated graphics card. For more info please check with Browser Resources .",
+ inferenceDelay: 100, // Delay in ms time while looping layers applying.
+ description:
+ 'Gray and white matter segmentation model. Operates on full T1 image in a single pass but needs a dedicated graphics card to operate. Provides high accuracy and fit low memory available but slower'
+ },
+ {
+ id: 4,
+ type: 'Atlas',
+ path: '/models/model30chan18cls/model.json',
+ modelName: '\u{1FA93} Subcortical + GWM (High Mem, Fast)',
+ colormapPath: './models/model30chan18cls/colormap.json',
+ preModelId: null, // Model run first e.g. crop the brain { null, 1, 2, .. }
+ preModelPostProcess: false, // If true, perform postprocessing to remove noisy regions after preModel inference generate output.
+ isBatchOverlapEnable: false, // create extra overlap batches for inference
+ numOverlapBatches: 200, // Number of extra overlap batches for inference
+ enableTranspose: true, // Keras and tfjs input orientation may need a tranposing step to be matched
+ enableCrop: true, // For speed-up inference, crop brain from background before feeding to inference model to lower memory use.
+ cropPadding: 0, // Padding size add to cropped brain
+ autoThreshold: 0.2, // Threshold between 0 and 1, given no preModel and tensor is normalized either min-max or by quantiles. Will remove noisy voxels around brain
+ enableQuantileNorm: false, // Some models needs Quantile Normaliztion.
+ filterOutWithPreMask: false, // Can be used to multiply final output with premodel output mask to crean noisy areas
+ enableSeqConv: false, // For low memory system and low configuration, enable sequential convolution instead of last layer
+ textureSize: 0, // Requested Texture size for the model, if unknown can be 0.
+ warning:
+ "This model may need dedicated graphics card. For more info please check with Browser Resources .", // Warning message to show when select the model.
+ inferenceDelay: 100, // Delay in ms time while looping layers applying.
+ description:
+ 'Parcellation of the brain into 17 regions: gray and white matter plus subcortical areas. This is a robust model able to handle range of data quality, including varying saturation, and even clinical scans. It may work on infant brains, but your mileage may vary.'
+ },
+ {
+ id: 5,
+ type: 'Atlas',
+ path: '/models/model30chan18cls/model.json',
+ modelName: '\u{1FA93} Subcortical + GWM (Low Mem, Slow)',
+ colormapPath: './models/model30chan18cls/colormap.json',
+ preModelId: null, // Model run first e.g. crop the brain { null, 1, 2, .. }
+ preModelPostProcess: false, // If true, perform postprocessing to remove noisy regions after preModel inference generate output.
+ isBatchOverlapEnable: false, // create extra overlap batches for inference
+ numOverlapBatches: 200, // Number of extra overlap batches for inference
+ enableTranspose: true, // Keras and tfjs input orientation may need a tranposing step to be matched
+ enableCrop: true, // For speed-up inference, crop brain from background before feeding to inference model to lower memory use.
+ cropPadding: 0, // Padding size add to cropped brain
+ autoThreshold: 0.2, // Threshold between 0 and 1, given no preModel and tensor is normalized either min-max or by quantiles. Will remove noisy voxels around brain
+ enableQuantileNorm: false, // Some models needs Quantile Normaliztion.
+ filterOutWithPreMask: false, // Can be used to multiply final output with premodel output mask to crean noisy areas
+ enableSeqConv: true, // For low memory system and low configuration, enable sequential convolution instead of last layer
+ textureSize: 0, // Requested Texture size for the model, if unknown can be 0.
+ warning:
+ "This model may need dedicated graphics card. For more info please check with Browser Resources .", // Warning message to show when select the model.
+ inferenceDelay: 100, // Delay in ms time while looping layers applying.
+ description:
+ 'Parcellation of the brain into 17 regions: gray and white matter plus subcortical areas. This is a robust model able to handle range of data quality, including varying saturation, and even clinical scans. It may work on infant brains, but your mileage may vary.'
+ },
+ {
+ id: 6,
+ type: 'Atlas',
+ path: '/models/model18cls/model.json',
+ modelName: '\u{1FA93} Subcortical + GWM (Low Mem, Faster)',
+ colormapPath: './models/model18cls/colormap.json',
+ preModelId: null, // model run first e.g. Brain_Extraction { null, 1, 2, .. }
+ preModelPostProcess: false, // If true, perform postprocessing to remove noisy regions after preModel inference generate output.
+ isBatchOverlapEnable: false, // create extra overlap batches for inference
+ numOverlapBatches: 200, // Number of extra overlap batches for inference
+ enableTranspose: true, // Keras and tfjs input orientation may need a tranposing step to be matched
+ enableCrop: true, // For speed-up inference, crop brain from background before feeding to inference model to lower memory use.
+ cropPadding: 0, // Padding size add to cropped brain
+ autoThreshold: 0.2, // Threshold between 0 and 1, given no preModel and tensor is normalized either min-max or by quantiles. Will remove noisy voxels around brain
+ enableQuantileNorm: false, // Some models needs Quantile Normaliztion.
+ filterOutWithPreMask: false, // Can be used to multiply final output with premodel output mask to crean noisy areas
+ enableSeqConv: true, // For low memory system and low configuration, enable sequential convolution instead of last layer
+ textureSize: 0, // Requested Texture size for the model, if unknown can be 0.
+ warning:
+ "This model may need dedicated graphics card. For more info please check with Browser Resources .", // Warning message to show when select the model.
+ inferenceDelay: 100, // Delay in ms time while looping layers applying.
+ description:
+ 'Parcellation of the brain into 17 regions: gray and white matter plus subcortical areas. This is a robust model able to handle range of data quality, including varying saturation, and even clinical scans. It may work on infant brains, but your mileage may vary.'
+ },
+ {
+ id: 7,
+ type: 'Atlas',
+ path: '/models/model30chan18cls/model.json',
+ modelName: '\u{1F52A}\u{1FA93} Subcortical + GWM (Failsafe, Less Acc)',
+ colormapPath: './models/model30chan18cls/colormap.json',
+ preModelId: 1, // model run first e.g. Brain_Extraction { null, 1, 2, .. }
+ preModelPostProcess: false, // If true, perform postprocessing to remove noisy regions after preModel inference generate output.
+ isBatchOverlapEnable: false, // create extra overlap batches for inference
+ numOverlapBatches: 200, // Number of extra overlap batches for inference
+ enableTranspose: true, // Keras and tfjs input orientation may need a tranposing step to be matched
+ enableCrop: true, // For speed-up inference, crop brain from background before feeding to inference model to lower memory use.
+ cropPadding: 0, // Padding size add to cropped brain
+ autoThreshold: 0, // Threshold between 0 and 1, given no preModel and tensor is normalized either min-max or by quantiles. Will remove noisy voxels around brain
+ enableQuantileNorm: false, // Some models needs Quantile Normaliztion.
+ filterOutWithPreMask: false, // Can be used to multiply final output with premodel output mask to crean noisy areas
+ enableSeqConv: false, // For low memory system and low configuration, enable sequential convolution instead of last layer
+ textureSize: 0, // Requested Texture size for the model, if unknown can be 0.
+ warning:
+ "This model may need dedicated graphics card. For more info please check with Browser Resources .", // Warning message to show when select the model.
+ inferenceDelay: 100, // Delay in ms time while looping layers applying.
+ description:
+ 'Parcellation of the brain into 17 regions: gray and white matter plus subcortical areas. This is not a robust model, it may work on low data quality, including varying saturation, and even clinical scans. It may work also on infant brains, but your mileage may vary.'
+ },
+ {
+ id: 8,
+ type: 'Atlas',
+ path: '/models/model30chan50cls/model.json',
+ modelName: '\u{1F52A} Aparc+Aseg 50 (High Mem, Fast)',
+ colormapPath: './models/model30chan50cls/colormap.json',
+ preModelId: 1, // Model run first e.g. crop the brain { null, 1, 2, .. }
+ preModelPostProcess: false, // If true, perform postprocessing to remove noisy regions after preModel inference generate output.
+ isBatchOverlapEnable: false, // create extra overlap batches for inference
+ numOverlapBatches: 200, // Number of extra overlap batches for inference
+ enableTranspose: true, // Keras and tfjs input orientation may need a tranposing step to be matched
+ enableCrop: true, // For speed-up inference, crop brain from background before feeding to inference model to lower memory use.
+ cropPadding: 0, // Padding size add to cropped brain
+ autoThreshold: 0, // Threshold between 0 and 1, given no preModel and tensor is normalized either min-max or by quantiles. Will remove noisy voxels around brain
+ enableQuantileNorm: true, // Some models needs Quantile Normaliztion.
+ filterOutWithPreMask: false, // Can be used to multiply final output with premodel output mask to crean noisy areas
+ enableSeqConv: false, // For low memory system and low configuration, enable sequential convolution instead of last layer
+ textureSize: 0, // Requested Texture size for the model, if unknown can be 0.
+ warning:
+ "This model may need dedicated graphics card. For more info please check with Browser Resources .", // Warning message to show when select the model.
+ inferenceDelay: 100, // Delay in ms time while looping layers applying.
+ description:
+ 'This is a 50-class model, that segments the brain into the Aparc+Aseg Freesurfer Atlas but one where cortical homologues are merged into a single class.'
+ },
+ {
+ id: 9,
+ type: 'Atlas',
+ path: '/models/model30chan50cls/model.json',
+ modelName: '\u{1F52A} Aparc+Aseg 50 (Low Mem, Slow)',
+ colormapPath: './models/model30chan50cls/colormap.json',
+ preModelId: 1, // Model run first e.g. crop the brain { null, 1, 2, .. }
+ preModelPostProcess: false, // If true, perform postprocessing to remove noisy regions after preModel inference generate output.
+ isBatchOverlapEnable: false, // create extra overlap batches for inference
+ numOverlapBatches: 200, // Number of extra overlap batches for inference
+ enableTranspose: true, // Keras and tfjs input orientation may need a tranposing step to be matched
+ enableCrop: true, // For speed-up inference, crop brain from background before feeding to inference model to lower memory use.
+ cropPadding: 0, // Padding size add to cropped brain
+ autoThreshold: 0, // Threshold between 0 and 1, given no preModel and tensor is normalized either min-max or by quantiles. Will remove noisy voxels around brain
+ enableQuantileNorm: true, // Some models needs Quantile Normaliztion.
+ filterOutWithPreMask: false, // Can be used to multiply final output with premodel output mask to crean noisy areas
+ enableSeqConv: true, // For low memory system and low configuration, enable sequential convolution instead of last laye
+ textureSize: 0, // Requested Texture size for the model, if unknown can be 0.
+ warning:
+ "This model may need dedicated graphics card. For more info please check with Browser Resources .", // Warning message to show when select the model.
+ inferenceDelay: 100, // Delay in ms time while looping layers applying.
+ description:
+ 'This is a 50-class model, that segments the brain into the Aparc+Aseg Freesurfer Atlas but one where cortical homologues are merged into a single class. The model use sequential convolution for inference to overcome browser memory limitations but leads to longer computation time.'
+ },
+ // './models/model5_gw_ae/colorLUT.json',
+ {
+ id: 10,
+ type: 'Brain_Extraction',
+ path: '/models/model5_gw_ae/model.json',
+ modelName: '\u26A1 Extract the Brain (FAST)',
+ preModelId: null, // Model run first e.g. crop the brain { null, 1, 2, .. }
+ preModelPostProcess: false, // If true, perform postprocessing to remove noisy regions after preModel inference generate output.
+ isBatchOverlapEnable: false, // create extra overlap batches for inference
+ numOverlapBatches: 0, // Number of extra overlap batches for inference
+ enableTranspose: true, // Keras and tfjs input orientation may need a tranposing step to be matched
+ enableCrop: true, // For speed-up inference, crop brain from background before feeding to inference model to lower memory use.
+ cropPadding: 18, // Padding size add to cropped brain
+ autoThreshold: 0, // Threshold between 0 and 1, given no preModel and tensor is normalized either min-max or by quantiles. Will remove noisy voxels around brain
+ enableQuantileNorm: false, // Some models needs Quantile Normaliztion.
+ filterOutWithPreMask: false, // Can be used to multiply final output with premodel output mask to crean noisy areas
+ enableSeqConv: false, // For low memory system and low configuration, enable sequential convolution instead of last layer
+ textureSize: 0, // Requested Texture size for the model, if unknown can be 0.
+ warning: null, // Warning message to show when select the model.
+ inferenceDelay: 100, // Delay in ms time while looping layers applying.
+ description:
+ 'Extract the brain fast model operates on full T1 image in a single pass, but uses only 5 filters per layer. Can work on integrated graphics cards but is barely large enough to provide good accuracy. Still more accurate than the failsafe version.'
+ },
+ {
+ id: 11,
+ type: 'Brain_Extraction',
+ path: '/models/model11_gw_ae/model.json',
+ modelName: '\u{1F52A} Extract the Brain (High Acc, Slow)',
+ preModelId: null, // Model run first e.g. crop the brain { null, 1, 2, .. }
+ preModelPostProcess: false, // If true, perform postprocessing to remove noisy regions after preModel inference generate output.
+ isBatchOverlapEnable: false, // create extra overlap batches for inference
+ numOverlapBatches: 0, // Number of extra overlap batches for inference
+ enableTranspose: true, // Keras and tfjs input orientation may need a tranposing step to be matched
+ enableCrop: true, // For speed-up inference, crop brain from background before feeding to inference model to lower memory use.
+ cropPadding: 0, // Padding size add to cropped brain
+ autoThreshold: 0, // Threshold between 0 and 1, given no preModel and tensor is normalized either min-max or by quantiles. Will remove noisy voxels around brain
+ enableQuantileNorm: false, // Some models needs Quantile Normaliztion.
+ filterOutWithPreMask: false, // Can be used to multiply final output with premodel output mask to crean noisy areas
+ enableSeqConv: true, // For low memory system and low configuration, enable sequential convolution instead of last layer
+ textureSize: 0, // Requested Texture size for the model, if unknown can be 0.
+ warning:
+ "This model may need dedicated graphics card. For more info please check with Browser Resources .",
+ inferenceDelay: 100, // Delay in ms time while looping layers applying.
+ description:
+ 'Extract the brain high accuracy model operates on full T1 image in a single pass, but uses only 11 filters per layer. Can work on dedicated graphics cards. Still more accurate than the fast version.'
+ },
+ {
+ id: 12,
+ type: 'Brain_Masking',
+ path: '/models/model5_gw_ae/model.json',
+ modelName: '\u26A1 Brain Mask (FAST)',
+ colormapPath: './models/model5_gw_ae/colormap.json',
+ preModelId: null, // Model run first e.g. crop the brain { null, 1, 2, .. }
+ preModelPostProcess: false, // If true, perform postprocessing to remove noisy regions after preModel inference generate output.
+ isBatchOverlapEnable: false, // create extra overlap batches for inference
+ numOverlapBatches: 0, // Number of extra overlap batches for inference
+ enableTranspose: true, // Keras and tfjs input orientation may need a tranposing step to be matched
+ enableCrop: true, // For speed-up inference, crop brain from background before feeding to inference model to lower memory use.
+ cropPadding: 17, // Padding size add to cropped brain
+ autoThreshold: 0, // Threshold between 0 and 1, given no preModel and tensor is normalized either min-max or by quantiles. Will remove noisy voxels around brain
+ enableQuantileNorm: false, // Some models needs Quantile Normaliztion.
+ filterOutWithPreMask: false, // Can be used to multiply final output with premodel output mask to crean noisy areas
+ enableSeqConv: false, // For low memory system and low configuration, enable sequential convolution instead of last layer
+ textureSize: 0, // Requested Texture size for the model, if unknown can be 0.
+ warning: null, // Warning message to show when select the model.
+ inferenceDelay: 100, // Delay in ms time while looping layers applying.
+ description:
+ 'This fast masking model operates on full T1 image in a single pass, but uses only 5 filters per layer. Can work on integrated graphics cards but is barely large enough to provide good accuracy. Still more accurate than failsafe version.'
+ },
+ {
+ id: 13,
+ type: 'Brain_Masking',
+ path: '/models/model11_gw_ae/model.json',
+ modelName: '\u{1F52A} Brain Mask (High Acc, Low Mem)',
+ preModelId: null, // Model run first e.g. crop the brain { null, 1, 2, .. }
+ preModelPostProcess: false, // If true, perform postprocessing to remove noisy regions after preModel inference generate output.
+ isBatchOverlapEnable: false, // create extra overlap batches for inference
+ numOverlapBatches: 0, // Number of extra overlap batches for inference
+ enableTranspose: true, // Keras and tfjs input orientation may need a tranposing step to be matched
+ enableCrop: true, // For speed-up inference, crop brain from background before feeding to inference model to lower memory use.
+ cropPadding: 0, // Padding size add to cropped brain
+ autoThreshold: 0, // Threshold between 0 and 1, given no preModel and tensor is normalized either min-max or by quantiles. Will remove noisy voxels around brain
+ enableQuantileNorm: true, // Some models needs Quantile Normaliztion.
+ filterOutWithPreMask: false, // Can be used to multiply final output with premodel output mask to crean noisy areas
+ enableSeqConv: true, // For low memory system and low configuration, enable sequential convolution instead of last layer
+ textureSize: 0, // Requested Texture size for the model, if unknown can be 0.
+ warning:
+ "This model may need dedicated graphics card. For more info please check with Browser Resources .",
+ inferenceDelay: 100, // Delay in ms time while looping layers applying.
+ description:
+ 'This masking model operates on full T1 image in a single pass, but uses 11 filters per layer. Can work on dedicated graphics cards. Still more accurate than fast version.'
+ },
+ {
+ id: 14,
+ type: 'Atlas',
+ path: '/models/model21_104class/model.json',
+ modelName: '\u{1F52A} Aparc+Aseg 104 (High Mem, Fast)',
+ colormapPath: './models/model21_104class/colormap.json',
+ preModelId: 0, // model run first e.g. Brain_Extraction { null, 1, 2, .. }
+ preModelPostProcess: false, // If true, perform postprocessing to remove noisy regions after preModel inference generate output.
+ isBatchOverlapEnable: false, // create extra overlap batches for inference
+ numOverlapBatches: 200, // Number of extra overlap batches for inference
+ enableTranspose: true, // Keras and tfjs input orientation may need a tranposing step to be matched
+ enableCrop: true, // For speed-up inference, crop brain from background before feeding to inference model to lower memory use.
+ cropPadding: 0, // Padding size add to cropped brain
+ autoThreshold: 0, // Threshold between 0 and 1, given no preModel and tensor is normalized either min-max or by quantiles. Will remove noisy voxels around brain
+ enableQuantileNorm: false, // Some models needs Quantile Normaliztion.
+ filterOutWithPreMask: false, // Can be used to multiply final output with premodel output mask to crean noisy areas
+ enableSeqConv: false, // For low memory system and low configuration, enable sequential convolution instead of last layer
+ textureSize: 0, // Requested Texture size for the model, if unknown can be 0.
+ warning:
+ "This model may need dedicated graphics card. For more info please check with Browser Resources .", // Warning message to show when select the model.
+ inferenceDelay: 100, // Delay in ms time while looping layers applying.
+ description:
+ 'FreeSurfer aparc+aseg atlas 104 parcellate brain areas into 104 regions. It contains a combination of the Desikan-Killiany atlas for cortical area and also segmentation of subcortical regions.'
+ },
+ {
+ id: 15,
+ type: 'Atlas',
+ path: '/models/model21_104class/model.json',
+ modelName: '\u{1F52A} Aparc+Aseg 104 (Low Mem, Slow)',
+ colormapPath: './models/model21_104class/colormap.json',
+ preModelId: 0, // model run first e.g. Brain_Extraction { null, 1, 2, .. }
+ preModelPostProcess: false, // If true, perform postprocessing to remove noisy regions after preModel inference generate output.
+ isBatchOverlapEnable: false, // create extra overlap batches for inference
+ numOverlapBatches: 200, // Number of extra overlap batches for inference
+ enableTranspose: true, // Keras and tfjs input orientation may need a tranposing step to be matched
+ enableCrop: true, // For speed-up inference, crop brain from background before feeding to inference model to lower memory use.
+ cropPadding: 0, // Padding size add to cropped brain
+ autoThreshold: 0, // Threshold between 0 and 1, given no preModel and tensor is normalized either min-max or by quantiles. Will remove noisy voxels around brain
+ enableQuantileNorm: false, // Some models needs Quantile Normaliztion.
+ filterOutWithPreMask: false, // Can be used to multiply final output with premodel output mask to crean noisy areas
+ enableSeqConv: true, // For low memory system and low configuration, enable sequential convolution instead of last layer
+ textureSize: 0, // Requested Texture size for the model, if unknown can be 0.
+ warning:
+ "This model may need dedicated graphics card. For more info please check with Browser Resources .", // Warning message to show when select the model.
+ inferenceDelay: 100, // Delay in ms time while looping layers applying.
+ description:
+ 'FreeSurfer aparc+aseg atlas 104 parcellate brain areas into 104 regions. It contains a combination of the Desikan-Killiany atlas for cortical area and also segmentation of subcortical regions. The model use sequential convolution for inference to overcome browser memory limitations but leads to longer computation time. '
+ }
+] // inferenceModelsList
diff --git a/brainchop-webworker.js b/brainchop-webworker.js
new file mode 100644
index 0000000..af17007
--- /dev/null
+++ b/brainchop-webworker.js
@@ -0,0 +1,1356 @@
+import * as tf from '@tensorflow/tfjs'
+import { inferenceModelsList } from './brainchop-parameters.js'
+import {
+ addZeroPaddingTo3dTensor,
+ applyMriThreshold,
+ binarizeVolumeDataTensor,
+ convByOutputChannelAndInputSlicing,
+ draw3dObjBoundingVolume,
+ firstLastNonZero3D,
+ generateBrainMask,
+ generateOutputSlicesV2,
+ getAllSlicesDataAsTF3D,
+ getModelNumLayers,
+ getModelNumParameters,
+ isModelChnlLast,
+ load_model,
+ minMaxNormalizeVolumeData,
+ quantileNormalizeVolumeData,
+ removeZeroPaddingFrom3dTensor,
+ resizeWithZeroPadding,
+ SequentialConvLayer
+} from './tensor-utils.js'
+
+function callbackUI(message = '', progressFrac = -1, modalMessage = '', statData = []) {
+ let statStr = []
+ if (Object.keys(statData).length > 0) {
+ function arrayToStr() {
+ const list = {}
+ for (const key in statData) {
+ list[key] = statData[key]
+ }
+ return JSON.stringify(list)
+ }
+ statStr = arrayToStr(statData)
+ }
+ self.postMessage({
+ cmd: 'ui',
+ message,
+ progressFrac,
+ modalMessage,
+ statData: statStr
+ })
+}
+
+function callbackImg(img, opts, modelEntry) {
+ self.postMessage({ cmd: 'img', img, opts, modelEntry })
+}
+
+async function inferenceFullVolumeSeqCovLayerPhase2(
+ opts,
+ modelEntry,
+ model,
+ slices_3d,
+ num_of_slices,
+ slice_height,
+ slice_width,
+ pipeline1_out,
+ statData,
+ niftiImage
+) {
+ // --Phase-2, After remove the skull try to allocate brain volume and make inferece
+
+ console.log(' ---- Start FullVolume Inference with Sequential Conv Layer for phase-II ---- ')
+ const quantileNorm = modelEntry.enableQuantileNorm
+ if (quantileNorm) {
+ // Quantile normalize function needs specific models to be used
+ console.log('preModel Quantile normalization enabled')
+ slices_3d = await quantileNormalizeVolumeData(slices_3d)
+ } else {
+ // Min Max Nomalize MRI data to be from 0 to 1
+ console.log('preModel Min Max normalization enabled')
+ slices_3d = await minMaxNormalizeVolumeData(slices_3d)
+ }
+
+ let mask_3d
+
+ if (pipeline1_out == null) {
+ // preModel is null
+
+ // Check if thresholding the MRI to remove noisy voxels for better cropping is needed.
+ const autoThresholdValue = modelEntry.autoThreshold
+
+ if (autoThresholdValue > 0 && autoThresholdValue <= 1) {
+ // Filtered MRI from noisy voxel below autoThresholdValue
+ mask_3d = await applyMriThreshold(slices_3d, autoThresholdValue)
+ } else {
+ console.log('No valid crop threshold value')
+ // binarize original image
+ mask_3d = await slices_3d.greater([0]).asType('bool')
+ }
+ } else {
+ mask_3d = await pipeline1_out.greater([0]).asType('bool')
+ // -- pipeline1_out.dispose()
+ }
+
+ console.log(' mask_3d shape : ', mask_3d.shape)
+ const [row_min, row_max, col_min, col_max, depth_min, depth_max] = await firstLastNonZero3D(mask_3d)
+ mask_3d.dispose()
+ // -- Reference voxel that cropped volume started slice with it
+ const refVoxel = [row_min, col_min, depth_min]
+ // -- Starting form refVoxel, size of bounding volume
+ const boundVolSizeArr = [row_max - row_min + 1, col_max - col_min + 1, depth_max - depth_min + 1]
+
+ // -- Extract 3d object (e.g. brain)
+ const cropped_slices_3d = await slices_3d.slice(
+ [row_min, col_min, depth_min],
+ [row_max - row_min + 1, col_max - col_min + 1, depth_max - depth_min + 1]
+ )
+ slices_3d.dispose()
+
+ // -- Padding size add to cropped brain
+ const pad = modelEntry.cropPadding
+
+ // Create margin around the bounding volume
+ let cropped_slices_3d_w_pad = await addZeroPaddingTo3dTensor(cropped_slices_3d, [pad, pad], [pad, pad], [pad, pad])
+ console.log(' cropped slices_3d with padding shape: ', cropped_slices_3d_w_pad.shape)
+
+ cropped_slices_3d.dispose()
+
+ if (opts.drawBoundingVolume) {
+ let testVol = await removeZeroPaddingFrom3dTensor(cropped_slices_3d_w_pad, pad, pad, pad)
+ console.log(' outLabelVolume without padding shape : ', testVol.shape)
+
+ testVol = await resizeWithZeroPadding(testVol, num_of_slices, slice_height, slice_width, refVoxel, boundVolSizeArr)
+ console.log(' outLabelVolume final shape after resizing : ', testVol.shape)
+ draw3dObjBoundingVolume(tf.unstack(testVol), opts, modelEntry, callbackImg)
+ testVol.dispose()
+
+ return 0
+ }
+
+ statData.Brainchop_Ver = 'FullVolume'
+ const res = await model
+ try {
+ let startTime = performance.now()
+ const inferenceStartTime = performance.now()
+ // maxLabelPredicted in whole volume of the brain
+ let maxLabelPredicted = 0
+ const transpose = modelEntry.enableTranspose
+
+ if (transpose) {
+ cropped_slices_3d_w_pad = await cropped_slices_3d_w_pad.transpose()
+ console.log('Input transposed for pre-model')
+ } else {
+ console.log('Transpose not enabled for pre-model')
+ }
+
+ let i = 1
+ const layersLength = res.layers.length
+ console.log('res.layers.length ', layersLength)
+
+ const isChannelLast = isModelChnlLast(res)
+ const batchSize = opts.batchSize
+ const numOfChan = opts.numOfChan
+ let adjusted_input_shape
+ // -- Adjust model input shape
+ if (isChannelLast) {
+ res.layers[0].batchInputShape[1] = cropped_slices_3d_w_pad.shape[0]
+ res.layers[0].batchInputShape[2] = cropped_slices_3d_w_pad.shape[1]
+ res.layers[0].batchInputShape[3] = cropped_slices_3d_w_pad.shape[2]
+
+ adjusted_input_shape = [
+ batchSize,
+ res.layers[0].batchInputShape[1],
+ res.layers[0].batchInputShape[2],
+ res.layers[0].batchInputShape[3],
+ numOfChan
+ ]
+ } else {
+ res.layers[0].batchInputShape[2] = cropped_slices_3d_w_pad.shape[0]
+ res.layers[0].batchInputShape[3] = cropped_slices_3d_w_pad.shape[1]
+ res.layers[0].batchInputShape[4] = cropped_slices_3d_w_pad.shape[2]
+
+ adjusted_input_shape = [
+ batchSize,
+ numOfChan,
+ res.layers[0].batchInputShape[2],
+ res.layers[0].batchInputShape[3],
+ res.layers[0].batchInputShape[4]
+ ]
+ }
+
+ console.log(' Model batch input shape : ', res.layers[0].batchInputShape)
+ // -- batchInputShape {Array} input_shape - e.g. [?, D, H, W, Ch] or [?, Ch, D, H, W]
+
+ statData.Input_Shape = JSON.stringify(res.layers[0].batchInputShape)
+ statData.Output_Shape = JSON.stringify(res.output.shape)
+ statData.Channel_Last = await isChannelLast
+ statData.Model_Param = await getModelNumParameters(res)
+ statData.Model_Layers = await getModelNumLayers(res)
+ statData.Model = modelEntry.modelName
+ statData.Seq_Conv = modelEntry.enableSeqConv
+ // statData.Extra_Info = null
+
+ // Determine the number of output channels in the last layer of the model
+ // e.g. 3, 50, 104
+ const outputLayer = res.layers[res.layers.length - 1]
+ console.log('Output Layer : ', outputLayer)
+
+ const expected_Num_labels = isChannelLast
+ ? outputLayer.outputShape[outputLayer.outputShape.length - 1]
+ : outputLayer.outputShape[1]
+ console.log('Num of output channels x: ', expected_Num_labels)
+
+ const curTensor = []
+ curTensor[0] = await cropped_slices_3d_w_pad.reshape(adjusted_input_shape)
+ while (true) {
+ try {
+ if (res.layers[i].activation.getClassName() !== 'linear') {
+ curTensor[i] = await res.layers[i].apply(curTensor[i - 1])
+ } else {
+ curTensor[i] = await convByOutputChannelAndInputSlicing(
+ curTensor[i - 1],
+ res.layers[i].getWeights()[0],
+ res.layers[i].getWeights()[1],
+ res.layers[i].strides,
+ res.layers[i].padding,
+ res.layers[i].dilationRate,
+ 3
+ ) // important for memory use
+ }
+
+ tf.dispose(curTensor[i - 1])
+ } catch (err) {
+ const errTxt = 'Your graphics card (e.g. Intel) may not be compatible with WebGL. ' + err.message
+ callbackUI(errTxt, -1, errTxt)
+
+ tf.engine().endScope()
+ tf.engine().disposeVariables()
+
+ statData.Inference_t = Infinity
+ statData.Postprocess_t = Infinity
+ statData.Status = 'Fail'
+ statData.Error_Type = err.message
+ statData.Extra_Err_Info = 'Failed while model layer ' + i + ' apply'
+
+ callbackUI('', -1, '', statData)
+
+ return 0
+ }
+
+ console.log('layer output Tensor shape : ', curTensor[i].shape)
+ console.log('layer count params ', res.layers[i].countParams())
+
+ res.layers[i].dispose()
+ curTensor[i - 1].dispose()
+
+ callbackUI('Layer ' + i.toString(), (i + 1) / layersLength)
+ if (tf.memory().unreliable) {
+ const unreliableReasons = 'unreliable reasons :' + tf.memory().reasons
+ callbackUI(unreliableReasons, NaN, unreliableReasons)
+ }
+ if (i === layersLength - 2) {
+ // Stop before the last layer or classification layer.
+
+ // // Create an instance of SequentialConvLayer
+ // The second parameter is important for memory,
+ // the larger it is, the more memory it uses
+ // it was 8, but I set it to 3, got a different error
+ // let seqConvLayer = new SequentialConvLayer(res, 10, isChannelLast)
+ const seqConvLayer = await new SequentialConvLayer(res, 10, isChannelLast, callbackUI)
+
+ // Apply the last output tensor to the seq. instance
+ let outputTensor = null
+ const profileInfo = await tf.profile(async () => {
+ // Your tensor operations here
+ outputTensor = await seqConvLayer.apply(curTensor[i])
+ })
+ console.log('profileInfo : ', profileInfo)
+
+ // -- document.getElementById("progressBarChild").style.width = 0 + "%";
+
+ // Dispose the previous layer input tensor
+ tf.dispose(curTensor[i])
+ // delete the used class
+ // ? delete seqConvLayer
+
+ // You can now use 'outputTensor' as needed
+ console.log(' Output tensor', outputTensor)
+ console.log(' Output tensor shape : ', outputTensor.shape)
+ // Array(3) [ 256, 256, 256 ]
+
+ if (outputTensor.shape.length !== 3) {
+ const msg = 'Output tensor shape should be 3 dims but it is ' + outputTensor.shape.length
+ callbackUI(msg, -1, msg)
+ }
+
+ const Inference_t = ((performance.now() - startTime) / 1000).toFixed(4)
+
+ console.log(' find array max ')
+ const curBatchMaxLabel = await outputTensor.max().dataSync()[0]
+ if (maxLabelPredicted < curBatchMaxLabel) {
+ maxLabelPredicted = curBatchMaxLabel
+ }
+
+ const numSegClasses = maxLabelPredicted + 1
+ console.log('Predicted num of segmentation classes', numSegClasses)
+ statData.Actual_Labels = numSegClasses
+ statData.Expect_Labels = expected_Num_labels
+ statData.NumLabels_Match = numSegClasses === expected_Num_labels
+ if (numSegClasses !== expected_Num_labels) {
+ const msg = 'expected ' + expected_Num_labels + ' labels, but the predicted are ' + numSegClasses
+ callbackUI(msg, -1, msg)
+ }
+
+ // -- Transpose back to original unpadded size
+ let outLabelVolume = outputTensor.reshape([
+ cropped_slices_3d_w_pad.shape[0],
+ cropped_slices_3d_w_pad.shape[1],
+ cropped_slices_3d_w_pad.shape[2]
+ ])
+ tf.dispose(outputTensor)
+
+ // Transpose MRI data to be match pytorch/keras input output
+ if (transpose) {
+ console.log('outLabelVolume transposed')
+ outLabelVolume = outLabelVolume.transpose()
+ }
+
+ outLabelVolume = await removeZeroPaddingFrom3dTensor(outLabelVolume, pad, pad, pad)
+ console.log(' outLabelVolume without padding shape : ', outLabelVolume.shape)
+ outLabelVolume = await resizeWithZeroPadding(
+ outLabelVolume,
+ num_of_slices,
+ slice_height,
+ slice_width,
+ refVoxel,
+ boundVolSizeArr
+ )
+ console.log(' outLabelVolume final shape after resizing : ', outLabelVolume.shape)
+
+ // let filterOutWithPreMask = inferenceModelsList[$$("selectModel").getValue() - 1]["filterOutWithPreMask"]
+ const filterOutWithPreMask = modelEntry.filterOutWithPreMask
+ // To clean the skull area wrongly segmented inphase-2.
+ if (pipeline1_out != null && opts.isBrainCropMaskBased && filterOutWithPreMask) {
+ const bin = await binarizeVolumeDataTensor(pipeline1_out)
+ outLabelVolume = await outLabelVolume.mul(bin)
+ }
+
+ startTime = performance.now()
+ // Generate output volume or slices
+ console.log('Generating correct output')
+ let outimg
+ try {
+ const img = await new Uint32Array(outLabelVolume.dataSync())
+ const Vshape = outLabelVolume.shape
+ const Vtype = outLabelVolume.dtype
+ outimg = await generateOutputSlicesV2(
+ img,
+ Vshape,
+ Vtype,
+ num_of_slices,
+ numSegClasses,
+ slice_height,
+ slice_width,
+ modelEntry,
+ opts,
+ niftiImage
+ )
+ console.log(' Phase-2 num of tensors after generateOutputSlicesV2: ', tf.memory().numTensors)
+
+ tf.dispose(outLabelVolume)
+ tf.engine().endScope()
+ tf.engine().disposeVariables()
+ } catch (error) {
+ // -- Timing data to collect
+ tf.engine().endScope()
+ tf.engine().disposeVariables()
+ console.log('Error while generating output: ', error)
+ const msg = 'Failed while generating output due to limited browser memory available'
+ callbackUI(msg, -1, msg)
+
+ statData.Inference_t = Inference_t
+ statData.Postprocess_t = Infinity
+ statData.Status = 'Fail'
+ statData.Error_Type = error.message
+ statData.Extra_Err_Info = 'Failed while generating output'
+
+ callbackUI('', -1, '', statData)
+
+ return 0
+ }
+ const Postprocess_t = ((performance.now() - startTime) / 1000).toFixed(4)
+
+ console.log(
+ 'Processing the whole brain volume in tfjs for multi-class output mask took : ',
+ ((performance.now() - inferenceStartTime) / 1000).toFixed(4) + ' Seconds'
+ )
+
+ // -- Timing data to collect
+ statData.Inference_t = Inference_t
+ statData.Postprocess_t = Postprocess_t
+ statData.Status = 'OK'
+
+ callbackUI('', -1, '', statData)
+ callbackUI('Segmentation finished', 0)
+ callbackImg(outimg, opts, modelEntry)
+ return 0
+ } else {
+ i++
+ }
+ }
+ } catch (err) {
+ callbackUI(err.message, -1, err.message)
+ console.log(
+ 'If webgl context is lost, try to restore webgl context by visit the link ' +
+ 'here'
+ )
+ if (tf.memory().unreliable) {
+ const unreliableReasons = 'unreliable reasons :' + tf.memory().reasons
+ callbackUI(unreliableReasons, NaN, unreliableReasons)
+ }
+ }
+}
+
+async function inferenceFullVolumePhase2(
+ model,
+ slices_3d,
+ num_of_slices,
+ slice_height,
+ slice_width,
+ pipeline1_out,
+ modelEntry,
+ statData,
+ opts,
+ niftiImage
+) {
+ let outimg = []
+ // --Phase-2, After remove the skull try to allocate brain volume and make inferece
+ console.log(' ---- Start FullVolume inference phase-II ---- ')
+ const quantileNorm = modelEntry.enableQuantileNorm
+ if (quantileNorm) {
+ // Quantile normalize function needs specific models to be used
+ console.log('preModel Quantile normalization enabled')
+ slices_3d = await quantileNormalizeVolumeData(slices_3d)
+ } else {
+ // Min Max Nomalize MRI data to be from 0 to 1
+ console.log('preModel Min Max normalization enabled')
+ slices_3d = await minMaxNormalizeVolumeData(slices_3d)
+ }
+ let mask_3d
+ if (pipeline1_out == null) {
+ // preModel is null
+
+ // Check if thresholding the MRI to remove noisy voxels for better cropping is needed.
+ const autoThresholdValue = modelEntry.autoThreshold
+
+ if (autoThresholdValue > 0 && autoThresholdValue <= 1) {
+ // Filtered MRI from noisy voxel below autoThresholdValue
+ mask_3d = await applyMriThreshold(slices_3d, autoThresholdValue)
+ } else {
+ console.log('No valid crop threshold value')
+ // binarize original image
+ mask_3d = await slices_3d.greater([0]).asType('bool')
+ }
+ } else {
+ mask_3d = await pipeline1_out.greater([0]).asType('bool')
+ // -- pipeline1_out.dispose()
+ }
+ console.log(' mask_3d shape : ', mask_3d.shape)
+ const [row_min, row_max, col_min, col_max, depth_min, depth_max] = await firstLastNonZero3D(mask_3d)
+ mask_3d.dispose()
+ // -- Reference voxel that cropped volume started slice with it
+ const refVoxel = [row_min, col_min, depth_min]
+ console.log('refVoxel :', refVoxel)
+
+ // -- Starting form refVoxel, size of bounding volume
+ const boundVolSizeArr = [row_max - row_min + 1, col_max - col_min + 1, depth_max - depth_min + 1]
+
+ console.log('boundVolSizeArr :', boundVolSizeArr)
+ // -- Extract 3d object (e.g. brain)
+ const cropped_slices_3d = slices_3d.slice(
+ [row_min, col_min, depth_min],
+ [row_max - row_min + 1, col_max - col_min + 1, depth_max - depth_min + 1]
+ )
+
+ slices_3d.dispose()
+
+ // -- Padding size add to cropped brain
+ const pad = modelEntry.cropPadding
+
+ // Create margin around the bounding volume
+ let cropped_slices_3d_w_pad = await addZeroPaddingTo3dTensor(cropped_slices_3d, [pad, pad], [pad, pad], [pad, pad])
+ console.log(' cropped slices_3d with padding shape: ', cropped_slices_3d_w_pad.shape)
+
+ cropped_slices_3d.dispose()
+
+ // -- Test dim after padding ..
+ // for (let i = 0; i < cropped_slices_3d_w_pad.rank; i++) {
+ // if(cropped_slices_3d_w_pad.shape[i] > 256) {
+ // console.log(" cropped_slices_3d_w_pad > 256 ")
+ // }
+
+ // }
+
+ if (opts.drawBoundingVolume) {
+ let testVol = await removeZeroPaddingFrom3dTensor(cropped_slices_3d_w_pad, pad, pad, pad)
+ console.log(' outLabelVolume without padding shape : ', testVol.shape)
+
+ testVol = await resizeWithZeroPadding(testVol, num_of_slices, slice_height, slice_width, refVoxel, boundVolSizeArr)
+ console.log(' outLabelVolume final shape after resizing : ', testVol.shape)
+ draw3dObjBoundingVolume(tf.unstack(testVol), opts, modelEntry, callbackImg)
+ testVol.dispose()
+
+ return 0
+ }
+
+ statData.Brainchop_Ver = 'FullVolume'
+ let startTime = performance.now()
+ let adjusted_input_shape = []
+ const res = await model
+ try {
+ startTime = performance.now()
+ const inferenceStartTime = performance.now()
+ // maxLabelPredicted in whole volume of the brain
+ let maxLabelPredicted = 0
+ const transpose = modelEntry.enableTranspose
+
+ if (transpose) {
+ cropped_slices_3d_w_pad = cropped_slices_3d_w_pad.transpose()
+ console.log('Input transposed for pre-model')
+ } else {
+ console.log('Transpose not enabled for pre-model')
+ }
+
+ let i = 1
+ const layersLength = res.layers.length
+ console.log('res.layers.length ', layersLength)
+
+ const isChannelLast = isModelChnlLast(res)
+ const batchSize = opts.batchSize
+ const numOfChan = opts.numOfChan
+
+ // -- Adjust model input shape
+ if (isChannelLast) {
+ res.layers[0].batchInputShape[1] = cropped_slices_3d_w_pad.shape[0]
+ res.layers[0].batchInputShape[2] = cropped_slices_3d_w_pad.shape[1]
+ res.layers[0].batchInputShape[3] = cropped_slices_3d_w_pad.shape[2]
+
+ adjusted_input_shape = [
+ batchSize,
+ res.layers[0].batchInputShape[1],
+ res.layers[0].batchInputShape[2],
+ res.layers[0].batchInputShape[3],
+ numOfChan
+ ]
+ } else {
+ res.layers[0].batchInputShape[2] = cropped_slices_3d_w_pad.shape[0]
+ res.layers[0].batchInputShape[3] = cropped_slices_3d_w_pad.shape[1]
+ res.layers[0].batchInputShape[4] = cropped_slices_3d_w_pad.shape[2]
+
+ adjusted_input_shape = [
+ batchSize,
+ numOfChan,
+ res.layers[0].batchInputShape[2],
+ res.layers[0].batchInputShape[3],
+ res.layers[0].batchInputShape[4]
+ ]
+ }
+
+ console.log(' Model batch input shape : ', res.layers[0].batchInputShape)
+ // -- batchInputShape {Array} input_shape - e.g. [?, D, H, W, Ch] or [?, Ch, D, H, W]
+
+ statData.Input_Shape = JSON.stringify(res.layers[0].batchInputShape)
+ statData.Output_Shape = JSON.stringify(res.output.shape)
+ statData.Channel_Last = await isChannelLast
+ statData.Model_Param = await getModelNumParameters(res)
+ statData.Model_Layers = await getModelNumLayers(res)
+ statData.Model = modelEntry.modelName
+ // statData.Extra_Info = null
+
+ const curTensor = []
+ curTensor[0] = cropped_slices_3d_w_pad.reshape(adjusted_input_shape)
+ // console.log("curTensor[0] :", curTensor[0].dataSync())
+
+ while (true) {
+ try {
+ // -- curTensor[i] = res.layers[i].apply( curTensor[i-1])
+ curTensor[i] = res.layers[i].apply(curTensor[i - 1])
+ } catch (err) {
+ callbackUI(err.message, -1, err.message)
+ tf.engine().endScope()
+ tf.engine().disposeVariables()
+
+ statData.Inference_t = Infinity
+ statData.Postprocess_t = Infinity
+ statData.Status = 'Fail'
+ statData.Error_Type = err.message
+ statData.Extra_Err_Info = 'Failed while model layer ' + i + ' apply'
+
+ callbackUI('', -1, '', statData)
+
+ return 0
+ }
+ callbackUI('Layer ' + i.toString(), (i + 1) / layersLength)
+ console.log('layer output Tensor shape : ', curTensor[i].shape)
+ console.log('layer count params ', res.layers[i].countParams())
+ res.layers[i].dispose()
+ curTensor[i - 1].dispose()
+ if (tf.memory().unreliable) {
+ const unreliableReasons = 'unreliable reasons :' + tf.memory().reasons
+ callbackUI(unreliableReasons, NaN, unreliableReasons)
+ }
+
+ if (i === layersLength - 1) {
+ // prediction = res.layers[res.layers.length-1].apply(curTensor[i])
+ // curTensor[i].print()
+ // outputDataBeforArgmx = Array.from(curTensor[i].dataSync())
+
+ const axis = isChannelLast ? -1 : 1
+ console.log(' find argmax ')
+ console.log('last Tensor shape : ', curTensor[i].shape)
+ // -- curTensor[i].shape e.g. [ 1, 256, 256, 256, 3 ]
+ const expected_Num_labels = isChannelLast ? curTensor[i].shape[4] : curTensor[i].shape[1]
+ let prediction_argmax
+
+ // Try for argMax with model output tensor.
+
+ try {
+ const argMaxTime = performance.now()
+ console.log(' Try tf.argMax for fullVolume ..')
+ prediction_argmax = tf.argMax(curTensor[i], axis)
+ console.log('tf.argMax for fullVolume takes : ', ((performance.now() - argMaxTime) / 1000).toFixed(4))
+ } catch (err1) {
+ // if channel last
+ if (axis === -1) {
+ try {
+ const argMaxLargeTime = performance.now()
+ console.log(' tf.argMax failed .. try argMaxLarge ..')
+ callbackUI('', -1, 'tensor2LightBuffer() is not dead code?')
+ callbackUI('', -1, 'argMaxLarge() is not dead code?')
+ console.log(
+ 'argMaxLarge for fullVolume takes : ',
+ ((performance.now() - argMaxLargeTime) / 1000).toFixed(4)
+ )
+ } catch (err2) {
+ const errTxt = "argMax buffer couldn't be created due to limited memory resources."
+ callbackUI(errTxt, -1, errTxt)
+
+ tf.engine().endScope()
+ tf.engine().disposeVariables()
+
+ statData.Inference_t = Infinity
+ statData.Postprocess_t = Infinity
+ statData.Status = 'Fail'
+ statData.Error_Type = err2.message
+ statData.Extra_Err_Info = 'prediction_argmax from argMaxLarge failed'
+
+ callbackUI('', -1, '', statData)
+ return 0
+ }
+ } else {
+ // if channel first ..
+ const errTxt = "argMax buffer couldn't be created due to limited memory resources."
+ callbackUI(errTxt, -1, errTxt)
+
+ prediction_argmax.dispose()
+
+ tf.engine().endScope()
+ tf.engine().disposeVariables()
+
+ statData.Inference_t = Infinity
+ statData.Postprocess_t = Infinity
+ statData.Status = 'Fail'
+ statData.Error_Type = err1.message
+ statData.Extra_Err_Info = 'prediction_argmax from argMaxLarge not support yet channel first'
+
+ callbackUI('', -1, '', statData)
+
+ return 0
+ }
+ }
+
+ console.log(' prediction_argmax shape : ', prediction_argmax.shape)
+ // -- prediction_argmax.shape : [ 1, 256, 256, 256]
+
+ const Inference_t = ((performance.now() - startTime) / 1000).toFixed(4)
+
+ // outputDataBeforArgmx = Array.from(prediction_argmax.dataSync())
+ tf.dispose(curTensor[i])
+ console.log(' find array max ')
+ const curBatchMaxLabel = await prediction_argmax.max().dataSync()[0]
+
+ if (maxLabelPredicted < curBatchMaxLabel) {
+ maxLabelPredicted = curBatchMaxLabel
+ }
+
+ const numSegClasses = maxLabelPredicted + 1
+ console.log('numSegClasses', numSegClasses)
+ statData.Actual_Labels = numSegClasses
+ statData.Expect_Labels = expected_Num_labels
+ statData.NumLabels_Match = numSegClasses === expected_Num_labels
+
+ if (numSegClasses !== expected_Num_labels) {
+ // errTxt = "expected " + expected_Num_labels + " labels, but the predicted are " + numSegClasses + ". For possible solutions please refer to FAQ .", "alert-error"
+ const errTxt = 'expected ' + expected_Num_labels + ' labels, but the predicted are ' + numSegClasses
+ callbackUI(errTxt, -1, errTxt)
+ }
+
+ // -- Transpose back to original unpadded size
+ let outLabelVolume = prediction_argmax.reshape([
+ cropped_slices_3d_w_pad.shape[0],
+ cropped_slices_3d_w_pad.shape[1],
+ cropped_slices_3d_w_pad.shape[2]
+ ])
+ tf.dispose(prediction_argmax)
+
+ // Transpose MRI data to be match pytorch/keras input output
+ if (transpose) {
+ console.log('outLabelVolume transposed')
+ outLabelVolume = outLabelVolume.transpose()
+ }
+ outLabelVolume = await removeZeroPaddingFrom3dTensor(outLabelVolume, pad, pad, pad)
+ console.log(' outLabelVolume without padding shape : ', outLabelVolume.shape)
+ outLabelVolume = await resizeWithZeroPadding(
+ outLabelVolume,
+ num_of_slices,
+ slice_height,
+ slice_width,
+ refVoxel,
+ boundVolSizeArr
+ )
+ console.log(' outLabelVolume final shape after resizing : ', outLabelVolume.shape)
+
+ const filterOutWithPreMask = modelEntry.filterOutWithPreMask
+ // To clean the skull area wrongly segmented in phase-2.
+ if (pipeline1_out != null && opts.isBrainCropMaskBased && filterOutWithPreMask) {
+ const bin = binarizeVolumeDataTensor(pipeline1_out)
+ outLabelVolume = outLabelVolume.mul(bin)
+ }
+
+ startTime = performance.now()
+ // Generate output volume or slices
+ console.log('Generating correct output')
+
+ try {
+ const img = new Uint32Array(outLabelVolume.dataSync())
+ const Vshape = outLabelVolume.shape
+ const Vtype = outLabelVolume.dtype
+ tf.dispose(outLabelVolume)
+ tf.engine().endScope()
+ tf.engine().disposeVariables()
+ outimg = await generateOutputSlicesV2(
+ img,
+ Vshape,
+ Vtype,
+ num_of_slices,
+ numSegClasses,
+ slice_height,
+ slice_width,
+ modelEntry,
+ opts,
+ niftiImage
+ )
+ console.log(' Phase-2 num of tensors after generateOutputSlicesV2: ', tf.memory().numTensors)
+ } catch (error) {
+ // -- Timing data to collect
+ tf.engine().endScope()
+ tf.engine().disposeVariables()
+
+ const errTxt = 'Failed while generating output due to limited browser memory available'
+ callbackUI(errTxt, -1, errTxt)
+ statData.Inference_t = Inference_t
+ statData.Postprocess_t = Infinity
+ statData.Status = 'Fail'
+ statData.Error_Type = error.message
+ statData.Extra_Err_Info = 'Failed while generating output'
+
+ callbackUI('', -1, '', statData)
+
+ return 0
+ }
+
+ const Postprocess_t = ((performance.now() - startTime) / 1000).toFixed(4)
+
+ tf.engine().disposeVariables()
+
+ console.log(
+ 'Processing the whole brain volume in tfjs for multi-class output mask took : ',
+ ((performance.now() - inferenceStartTime) / 1000).toFixed(4) + ' Seconds'
+ )
+
+ // -- Timing data to collect
+ statData.Inference_t = Inference_t
+ statData.Postprocess_t = Postprocess_t
+ statData.Status = 'OK'
+ callbackUI('Segmentation finished', 0)
+ callbackUI('', -1, '', statData)
+ callbackImg(outimg, opts, modelEntry)
+
+ return 0
+ }
+ i++
+ }
+ } catch (err) {
+ callbackUI(err.message, -1, err.message)
+ console.log(
+ 'If webgl context is lost, try to restore webgl context by visit the link ' +
+ 'here'
+ )
+ }
+}
+
+async function inferenceFullVolumePhase1(
+ model,
+ slices_3d,
+ num_of_slices,
+ slice_height,
+ slice_width,
+ isModelFullVol,
+ modelEntry,
+ statData,
+ opts,
+ niftiHeader,
+ niftiImage
+) {
+ statData.No_SubVolumes = 1
+ // load pre-model for inference first, can be null if no pre-model such as GWM models
+ if (modelEntry.preModelId) {
+ const preModel = await load_model(opts.rootURL + inferenceModelsList[modelEntry.preModelId - 1].path)
+ const transpose = inferenceModelsList[modelEntry.preModelId - 1].enableTranspose
+ const quantileNorm = inferenceModelsList[modelEntry.preModelId - 1].enableQuantileNorm
+ let preModel_slices_3d = null
+
+ // -- If pre-model is not null then slices_3d mask will be generated..
+ // -- The mask is needed to remove the skull and set noise in background to 0, and get the brain bounding volume properly
+ const slices_3d_mask = null
+
+ if (quantileNorm) {
+ // Quantile normalize function needs specific models to be used
+ console.log('preModel Quantile normalization enabled')
+ preModel_slices_3d = await quantileNormalizeVolumeData(slices_3d)
+ } else {
+ // Min Max Nomalize MRI data to be from 0 to 1
+ console.log('preModel Min Max normalization enabled')
+ preModel_slices_3d = await minMaxNormalizeVolumeData(slices_3d)
+ }
+
+ // -- Transpose MRI data to be match pytorch/keras input output
+ // -- Check if pre-model needs transpose..
+ if (transpose) {
+ preModel_slices_3d = preModel_slices_3d.transpose()
+ console.log('Input transposed for pre-model')
+ } else {
+ console.log('Transpose not enabled for pre-model')
+ }
+
+ statData.Brainchop_Ver = 'PreModel_FV' // e.g. "PreModel_FV"
+
+ // preModel.then(function (res) {
+ const res = await preModel
+
+ try {
+ const inferenceStartTime = performance.now()
+ const preModelObject = res
+
+ // read input shape from model.json object
+ const preModelBatchInputShape = preModelObject.layers[0].batchInputShape
+ console.log(' Pre-Model batch input shape : ', preModelBatchInputShape)
+
+ // -- Verify input shape
+ if (preModelBatchInputShape.length !== 5) {
+ const errTxt = 'The pre-model input shape must be 5D '
+ callbackUI(errTxt, -1, errTxt)
+ return 0
+ }
+
+ const isPreModelChannelLast = await isModelChnlLast(preModelObject)
+ const batchSize = opts.batchSize
+ const numOfChan = opts.numOfChan
+ let batch_D, batch_H, batch_W
+ let preModel_input_shape
+ if (isPreModelChannelLast) {
+ console.log('Pre-Model Channel Last')
+ if (isNaN(preModelBatchInputShape[4]) || preModelBatchInputShape[4] !== 1) {
+ const errTxt = 'The number of channels for pre-model input shape must be 1'
+ callbackUI(errTxt, -1, errTxt)
+ return 0
+ }
+
+ batch_D = preModelBatchInputShape[1]
+ batch_H = preModelBatchInputShape[2]
+ batch_W = preModelBatchInputShape[3]
+
+ preModel_input_shape = [batchSize, batch_D, batch_H, batch_W, numOfChan]
+ } else {
+ console.log('Pre-Model Channel First')
+ if (isNaN(preModelBatchInputShape[1]) || preModelBatchInputShape[1] !== 1) {
+ const errTxt = 'The number of channels for pre-model input shape must be 1'
+ callbackUI(errTxt, -1, errTxt)
+ return 0
+ }
+
+ batch_D = preModelBatchInputShape[2]
+ batch_H = preModelBatchInputShape[3]
+ batch_W = preModelBatchInputShape[4]
+
+ preModel_input_shape = [batchSize, numOfChan, batch_D, batch_H, batch_W]
+ }
+
+ statData.Input_Shape = JSON.stringify(preModel_input_shape)
+ statData.Output_Shape = JSON.stringify(preModelObject.output.shape)
+ statData.Channel_Last = await isPreModelChannelLast
+ statData.Model_Param = await getModelNumParameters(preModelObject)
+ statData.Model_Layers = await getModelNumLayers(preModelObject)
+
+ // maxLabelPredicted in whole volume of the brain
+ let maxLabelPredicted = 0
+
+ let i = 1
+ const layersLength = res.layers.length
+
+ const curTensor = []
+ // -- reshape MRI to model input shape
+ curTensor[0] = preModel_slices_3d.reshape(preModel_input_shape)
+
+ // Dispose the volume
+ tf.dispose(preModel_slices_3d)
+ while (true) {
+ try {
+ curTensor[i] = res.layers[i].apply(curTensor[i - 1])
+ } catch (err) {
+ const errTxt = 'Your graphics card (e.g. Intel) may not be compatible with WebGL. ' + err.message
+ callbackUI(errTxt, -1, errTxt)
+
+ tf.engine().endScope()
+ tf.engine().disposeVariables()
+
+ statData.Inference_t = Infinity
+ statData.Postprocess_t = Infinity
+ statData.Status = 'Fail'
+ statData.Error_Type = err.message
+ statData.Extra_Err_Info = 'PreModel Failed while model layer ' + i + ' apply'
+
+ callbackUI('', -1, '', statData)
+
+ return 0
+ }
+
+ res.layers[i].dispose()
+ curTensor[i - 1].dispose()
+
+ callbackUI('Layer ' + i.toString(), (i + 1) / layersLength)
+ if (tf.memory().unreliable) {
+ const unreliableReasons = 'unreliable reasons :' + tf.memory().reasons
+ callbackUI(unreliableReasons, NaN, unreliableReasons)
+ }
+
+ if (i === layersLength - 1) {
+ // -- prediction = res.layers[res.layers.length-1].apply(curTensor[i])
+ // -- curTensor[i].print()
+ // -- outputDataBeforArgmx = Array.from(curTensor[i].dataSync())
+
+ const axis = isPreModelChannelLast ? -1 : 1
+ console.log(' find argmax ')
+ console.log('last Tensor shape : ', curTensor[i].shape)
+ // -- curTensor[i].shape : [ 1, 256, 256, 256, 3 ]
+ const expected_Num_labels = isPreModelChannelLast ? curTensor[i].shape[4] : curTensor[i].shape[1]
+ let prediction_argmax
+
+ // Try for argMax with model output tensor.
+
+ try {
+ console.log(' Try tf.argMax for fullVolume ..')
+ prediction_argmax = await tf.argMax(curTensor[i], axis)
+ } catch (err1) {
+ // if channel last
+ if (axis === -1) {
+ try {
+ const argMaxLargeTime = performance.now()
+ console.log(' tf.argMax failed .. try argMaxLarge ..')
+ callbackUI('', -1, 'tensor2LightBuffer() is not dead code?')
+ callbackUI('', -1, 'argMaxLarge() is not dead code?')
+ console.log(
+ 'argMaxLarge for fullVolume takes : ',
+ ((performance.now() - argMaxLargeTime) / 1000).toFixed(4)
+ )
+ } catch (err2) {
+ const errTxt = "argMax buffer couldn't be created due to limited memory resources."
+ callbackUI(errTxt, -1, errTxt)
+
+ prediction_argmax.dispose()
+
+ tf.engine().endScope()
+ tf.engine().disposeVariables()
+
+ statData.Inference_t = Infinity
+ statData.Postprocess_t = Infinity
+ statData.Status = 'Fail'
+ statData.Error_Type = err2.message
+ statData.Extra_Err_Info = 'preModel prediction_argmax from argMaxLarge failed'
+
+ callbackUI('', -1, '', statData)
+
+ return 0
+ }
+ } else {
+ // if channel first ..
+ const errTxt = "argMax buffer couldn't be created due to limited memory resources."
+ callbackUI(errTxt, -1, errTxt)
+
+ prediction_argmax.dispose()
+
+ tf.engine().endScope()
+ tf.engine().disposeVariables()
+
+ statData.Inference_t = Infinity
+ statData.Postprocess_t = Infinity
+ statData.Status = 'Fail'
+ statData.Error_Type = err1.message
+ statData.Extra_Err_Info = 'preModel prediction_argmax from argMaxLarge not support yet channel first'
+
+ callbackUI('', -1, '', statData)
+
+ return 0
+ }
+ }
+
+ console.log(' Pre-model prediction_argmax shape : ', prediction_argmax.shape)
+ // -- prediction_argmax.shape : [ 1, 256, 256, 256]
+
+ const Inference_t = ((performance.now() - inferenceStartTime) / 1000).toFixed(4)
+
+ tf.dispose(curTensor[i])
+
+ console.log(' Pre-model find array max ')
+ const curBatchMaxLabel = await prediction_argmax.max().dataSync()[0]
+
+ if (maxLabelPredicted < curBatchMaxLabel) {
+ maxLabelPredicted = curBatchMaxLabel
+ }
+
+ const numSegClasses = maxLabelPredicted + 1
+ console.log('Pre-model numSegClasses', numSegClasses)
+
+ statData.Actual_Labels = numSegClasses
+ statData.Expect_Labels = expected_Num_labels
+ statData.NumLabels_Match = numSegClasses === expected_Num_labels
+
+ // -- Transpose back to original unpadded size
+ let outLabelVolume = await prediction_argmax.reshape([num_of_slices, slice_height, slice_width])
+ tf.dispose(prediction_argmax)
+ // Transpose MRI data to be match pytorch/keras input output
+ if (transpose) {
+ console.log('Pre-model outLabelVolume transposed')
+ outLabelVolume = outLabelVolume.transpose()
+ }
+ const startTime = performance.now()
+ // Generate output volume or slices
+ console.log('Generating pre-model output')
+ let slices_3d_mask
+ try {
+ const unstackOutVolumeTensor = await tf.unstack(outLabelVolume)
+ slices_3d_mask = await generateBrainMask(
+ unstackOutVolumeTensor,
+ num_of_slices,
+ slice_height,
+ slice_width,
+ modelEntry,
+ opts,
+ niftiHeader,
+ niftiImage,
+ false
+ )
+ await tf.dispose(outLabelVolume)
+ console.log(' Phase-1 num of tensors after generateBrainMask: ', tf.memory().numTensors)
+ } catch (error) {
+ // -- Timing data to collect
+ tf.engine().endScope()
+ tf.engine().disposeVariables()
+
+ const errTxt = 'Failed while generating pre-model output due to limited browser memory available'
+ callbackUI(errTxt, -1, errTxt)
+
+ statData.Inference_t = Inference_t
+ statData.Postprocess_t = Infinity
+ statData.Status = 'Fail'
+ statData.Error_Type = error.message
+ statData.Extra_Err_Info = 'Pre-model failed while generating output'
+
+ callbackUI('', -1, '', statData)
+
+ return 0
+ }
+ const Postprocess_t = ((performance.now() - startTime) / 1000).toFixed(4)
+ console.log(
+ 'Pre-model processing the whole brain volume in tfjs tooks for multi-class output mask : ',
+ ((performance.now() - inferenceStartTime) / 1000).toFixed(4) + ' Seconds'
+ )
+
+ // -- Timing data to collect
+ statData.Inference_t = Inference_t
+ statData.Postprocess_t = Postprocess_t
+ statData.Status = 'OK'
+
+ callbackUI('', -1, '', statData)
+
+ if (slices_3d_mask == null) {
+ const msg = 'slice_3d_mask failed ...'
+ callbackUI(msg, -1, msg)
+ return 0
+ } else {
+ // --Phase-2, After remove the skull try to allocate brain volume and make inferece
+ console.log('--- pre-model done ---')
+ // --mask_3d = slices_3d_mask.greater([0]).asType('bool')
+ // --slices_3d_mask.dispose()
+
+ if (isModelFullVol) {
+ if (modelEntry.enableSeqConv) {
+ // Mask cropping & seq conv
+ // Non-Atlas model (e.g. GWM) needs sequential convolution layer.
+ // Sequential convolution layer to be used after cropping - slow but reliable on most machines
+ console.log('------ Mask Cropping & Seq Convoluton ------')
+ await inferenceFullVolumeSeqCovLayerPhase2(
+ opts,
+ modelEntry,
+ model,
+ slices_3d,
+ num_of_slices,
+ slice_height,
+ slice_width,
+ slices_3d_mask,
+ statData,
+ niftiImage
+ )
+ return 0
+ // inferenceFullVolumeSeqCovLayerPhase2(model, slices_3d.transpose(), num_of_slices, slice_height, slice_width, slices_3d_mask)
+ } else {
+ // Mask cropping BUT no seq conv
+ console.log('------ Mask Cropping - NO Seq Convoluton ------')
+ await inferenceFullVolumePhase2(
+ model,
+ slices_3d,
+ num_of_slices,
+ slice_height,
+ slice_width,
+ slices_3d_mask,
+ modelEntry,
+ statData,
+ opts,
+ niftiImage
+ )
+ // inferenceFullVolumePhase2(model, slices_3d.transpose(), num_of_slices, slice_height, slice_width, slices_3d_mask)
+ }
+ } else {
+ // -- In version 3.0.0 this function not used
+ callbackUI('', -1, 'inferenceSubVolumes() is not dead code?')
+ }
+ }
+ }
+ i++
+ }
+ } catch (err) {
+ callbackUI(err.message, -1, err.message)
+ console.log(
+ 'If webgl context is lost, try to restore webgl context by visit the link ' +
+ 'here'
+ )
+
+ // document.getElementById("webGl2Status").style.backgroundColor = isWebGL2ContextLost() ? "Red" : "Green"
+ // document.getElementById("memoryStatus").style.backgroundColor = tf.memory().unreliable ? "Red" : "Green"
+ }
+ // })
+
+ // -- if(...) end
+ } else {
+ // No preModel
+
+ // --Phase-2, After remove the skull try to allocate brain volume and make inferece
+ console.log('--- No pre-model is selected ---')
+ console.log('------ Run voxel cropping ------')
+ // -- mask_3d = slices_3d.greater([0]).asType('bool')
+
+ if (isModelFullVol) {
+ if (modelEntry.enableSeqConv) {
+ // Voxel cropping & seq conv
+ // Non-Atlas model (e.g. GWM) needs sequential convolution layer.
+ // Sequential convolution layer to be used after cropping - slow but reliable on most machines
+ console.log('------ Seq Convoluton ------')
+ await inferenceFullVolumeSeqCovLayerPhase2(
+ opts,
+ modelEntry,
+ model,
+ slices_3d,
+ num_of_slices,
+ slice_height,
+ slice_width,
+ null,
+ statData,
+ niftiImage
+ )
+ } else {
+ // Voxel cropping BUT no seq conv
+ // todo: we do not use result const outimg = await
+ inferenceFullVolumePhase2(
+ model,
+ slices_3d,
+ num_of_slices,
+ slice_height,
+ slice_width,
+ null,
+ modelEntry,
+ statData,
+ opts,
+ niftiImage
+ )
+ }
+ } else {
+ // -- In version 3.0.0 this function not used
+ callbackUI('', -1, 'inferenceSubVolumes() is not dead code?')
+ }
+ }
+}
+
+async function enableProductionMode(textureF16Flag = true) {
+ // -- tf.setBackend('cpu')
+ tf.setBackend('webgl')
+ // -- tf.removeBackend('cpu')
+ // -- Calling enableProdMode() method
+ await tf.enableProdMode()
+ // -- Setting debug mode of the environment
+ tf.env().set('DEBUG', false)
+ tf.env().set('WEBGL_FORCE_F16_TEXTURES', textureF16Flag)
+ // -- set this flag so that textures are deleted when tensors are disposed.
+ tf.env().set('WEBGL_DELETE_TEXTURE_THRESHOLD', 0)
+ // -- tf.env().set('WEBGL_PACK', false)
+ // -- Put ready after sets above
+ await tf.ready()
+ // -- Printing output
+ console.log('tf env() flags :', tf.env().flags)
+ console.log('tf env() features :', tf.env().features)
+ console.log('tf env total features: ', Object.keys(tf.env().features).length)
+ console.log('tf backend: ', tf.getBackend())
+}
+
+async function runInferenceWW(opts, modelEntry, niftiHeader, niftiImage) {
+ const statData = []
+ statData.startTime = Date.now() // for common webworker/mainthread do not use performance.now()
+ callbackUI('Segmentation started', 0)
+ const batchSize = opts.batchSize
+ const numOfChan = opts.numOfChan
+ if (isNaN(batchSize) || batchSize !== 1) {
+ const errTxt = 'The batch Size for input shape must be 1'
+ callbackUI(errTxt, -1, errTxt)
+ return 0
+ }
+ if (isNaN(numOfChan) || numOfChan !== 1) {
+ const errTxt = 'The number of channels for input shape must be 1'
+ callbackUI(errTxt, -1, errTxt)
+ return 0
+ }
+ tf.engine().startScope()
+ console.log('Batch size: ', batchSize)
+ console.log('Num of Channels: ', numOfChan)
+ const model = await load_model(opts.rootURL + modelEntry.path)
+ await enableProductionMode(true)
+ statData.TF_Backend = tf.getBackend()
+ const modelObject = model
+ let batchInputShape = []
+ // free global variable of 16777216 voxel
+ // allOutputSlices3DCC1DimArray = []
+ // outputSceneRendered = false
+ // read input shape from model.json object
+ batchInputShape = modelObject.layers[0].batchInputShape
+ console.log(' Model batch input shape : ', batchInputShape)
+ // -- Verify input shape
+ if (batchInputShape.length !== 5) {
+ const errTxt = 'The model input shape must be 5D'
+ callbackUI(errTxt, -1, errTxt)
+ return 0
+ }
+ let batch_D, batch_H, batch_W
+ const slice_width = niftiHeader.dims[1]
+ const slice_height = niftiHeader.dims[2]
+ const num_of_slices = niftiHeader.dims[3]
+ const isChannelLast = await isModelChnlLast(modelObject)
+ if (isChannelLast) {
+ console.log('Model Channel Last')
+ if (isNaN(batchInputShape[4]) || batchInputShape[4] !== 1) {
+ const errTxt = 'The number of channels for input shape must be 1'
+ callbackUI(errTxt, -1, errTxt)
+ return 0
+ }
+ batch_D = batchInputShape[1]
+ batch_H = batchInputShape[2]
+ batch_W = batchInputShape[3]
+ } else {
+ console.log('Model Channel First')
+ if (isNaN(batchInputShape[1]) || batchInputShape[1] !== 1) {
+ const errTxt = 'The number of channels for input shape must be 1'
+ callbackUI(errTxt, -1, errTxt)
+ return 0
+ }
+ batch_D = batchInputShape[2]
+ batch_H = batchInputShape[3]
+ batch_W = batchInputShape[4]
+ }
+ // const input_shape = [batchSize, numOfChan, batch_D, batch_H, batch_W]
+ // --Check whether the model will make inference at once as FullVolumeModel
+ let isModelFullVol
+ if (batch_D === 256 && batch_H === 256 && batch_W === 256) {
+ isModelFullVol = true
+ } else {
+ isModelFullVol = false
+ }
+ statData.isModelFullVol = isModelFullVol
+ // Model output number of segmentations
+ let slices_3d = await getAllSlicesDataAsTF3D(num_of_slices, niftiHeader, niftiImage)
+ const transpose = modelEntry.enableTranspose
+ const enableCrop = modelEntry.enableCrop
+ if (isModelFullVol) {
+ if (enableCrop) {
+ // FullVolume with Crop option before inference ..
+ // pre-model to mask the volume, can also be null and the cropping will be on the MRI.
+ await inferenceFullVolumePhase1(
+ model,
+ slices_3d,
+ num_of_slices,
+ slice_height,
+ slice_width,
+ isModelFullVol,
+ modelEntry,
+ statData,
+ opts,
+ niftiHeader,
+ niftiImage
+ )
+ } else {
+ // Transpose MRI data to be match pytorch/keras input output
+ console.log('Cropping Disabled')
+
+ if (transpose) {
+ slices_3d = slices_3d.transpose()
+ console.log('Input transposed')
+ } else {
+ console.log('Transpose NOT Enabled')
+ }
+
+ const enableSeqConv = modelEntry.enableSeqConv
+
+ if (enableSeqConv) {
+ callbackUI('', -1, 'inferenceFullVolumeSeqCovLayer() is not dead code?')
+ } else {
+ callbackUI('', -1, 'inferenceFullVolume() is not dead code?')
+ }
+ }
+ }
+}
+
+self.addEventListener(
+ 'message',
+ function (event) {
+ runInferenceWW(event.data.opts, event.data.modelEntry, event.data.niftiHeader, event.data.niftiImage)
+ },
+ false
+)
diff --git a/bwlabels.js b/bwlabels.js
new file mode 100644
index 0000000..d83dc7b
--- /dev/null
+++ b/bwlabels.js
@@ -0,0 +1,278 @@
+export class BWLabeler {
+ // port of https://github.com/rordenlab/niimath/blob/master/src/bwlabel.c
+ // return voxel address given row A, column B, and slice C
+ idx(A, B, C, DIM) {
+ return C * DIM[0] * DIM[1] + B * DIM[0] + A
+ } // idx()
+
+ // determine if voxels below candidate voxel have already been assigned a label
+ check_previous_slice(bw, il, r, c, sl, dim, conn, tt, nabo, tn) {
+ let nr_set = 0
+ if (!sl) {
+ return 0
+ }
+ const val = bw[this.idx(r, c, sl, dim)]
+ if (conn >= 6) {
+ const idx = this.idx(r, c, sl - 1, dim)
+ if (val === bw[idx]) {
+ nabo[nr_set++] = il[idx]
+ }
+ }
+ if (conn >= 18) {
+ if (r) {
+ const idx = this.idx(r - 1, c, sl - 1, dim)
+ if (val === bw[idx]) {
+ nabo[nr_set++] = il[idx]
+ }
+ }
+ if (c) {
+ const idx = this.idx(r, c - 1, sl - 1, dim)
+ if (val === bw[idx]) {
+ nabo[nr_set++] = il[idx]
+ }
+ }
+ if (r < dim[0] - 1) {
+ const idx = this.idx(r + 1, c, sl - 1, dim)
+ if (val === bw[idx]) {
+ nabo[nr_set++] = il[idx]
+ }
+ }
+ if (c < dim[1] - 1) {
+ const idx = this.idx(r, c + 1, sl - 1, dim)
+ if (val === bw[idx]) {
+ nabo[nr_set++] = il[idx]
+ }
+ }
+ }
+ if (conn === 26) {
+ if (r && c) {
+ const idx = this.idx(r - 1, c - 1, sl - 1, dim)
+ if (val === bw[idx]) {
+ nabo[nr_set++] = il[idx]
+ }
+ }
+ if (r < dim[0] - 1 && c) {
+ const idx = this.idx(r + 1, c - 1, sl - 1, dim)
+ if (val === bw[idx]) {
+ nabo[nr_set++] = il[idx]
+ }
+ }
+ if (r && c < dim[1] - 1) {
+ const idx = this.idx(r - 1, c + 1, sl - 1, dim)
+ if (val === bw[idx]) {
+ nabo[nr_set++] = il[idx]
+ }
+ }
+ if (r < dim[0] - 1 && c < dim[1] - 1) {
+ const idx = this.idx(r + 1, c + 1, sl - 1, dim)
+ if (val === bw[idx]) {
+ nabo[nr_set++] = il[idx]
+ }
+ }
+ }
+ if (nr_set) {
+ this.fill_tratab(tt, nabo, nr_set, tn)
+ return nabo[0]
+ } else {
+ return 0
+ }
+ } // check_previous_slice()
+
+ // provisionally label all voxels in volume
+ do_initial_labelling(bw, dim, conn) {
+ const naboPS = new Uint32Array(32)
+ const tn = new Uint32Array(32)
+ let label = 1
+ const kGrowArrayBy = 8192
+ let ttn = kGrowArrayBy
+ let tt = new Uint32Array(ttn).fill(0)
+ const il = new Uint32Array(dim[0] * dim[1] * dim[2]).fill(0)
+ const nabo = new Uint32Array(27)
+ for (let sl = 0; sl < dim[2]; sl++) {
+ for (let c = 0; c < dim[1]; c++) {
+ for (let r = 0; r < dim[0]; r++) {
+ let nr_set = 0
+ const val = bw[this.idx(r, c, sl, dim)]
+ if (val === 0) {
+ continue
+ }
+ nabo[0] = this.check_previous_slice(bw, il, r, c, sl, dim, conn, tt, naboPS, tn)
+ if (nabo[0]) {
+ nr_set += 1
+ }
+ if (conn >= 6) {
+ if (r) {
+ const idx = this.idx(r - 1, c, sl, dim)
+ if (val === bw[idx]) {
+ nabo[nr_set++] = il[idx]
+ }
+ }
+ if (c) {
+ const idx = this.idx(r, c - 1, sl, dim)
+ if (val === bw[idx]) {
+ nabo[nr_set++] = il[idx]
+ }
+ }
+ }
+ if (conn >= 18) {
+ if (c && r) {
+ const idx = this.idx(r - 1, c - 1, sl, dim)
+ if (val === bw[idx]) {
+ nabo[nr_set++] = il[idx]
+ }
+ }
+ if (c && r < dim[0] - 1) {
+ const idx = this.idx(r + 1, c - 1, sl, dim)
+ if (val === bw[idx]) {
+ nabo[nr_set++] = il[idx]
+ }
+ }
+ }
+ if (nr_set) {
+ il[this.idx(r, c, sl, dim)] = nabo[0]
+ this.fill_tratab(tt, nabo, nr_set, tn)
+ } else {
+ il[this.idx(r, c, sl, dim)] = label
+ if (label >= ttn) {
+ ttn += kGrowArrayBy
+ const ext = new Uint32Array(ttn)
+ ext.set(tt)
+ tt = ext
+ }
+ tt[label - 1] = label
+ label++
+ }
+ }
+ }
+ }
+ for (let i = 0; i < label - 1; i++) {
+ let j = i
+ while (tt[j] !== j + 1) {
+ j = tt[j] - 1
+ }
+ tt[i] = j + 1
+ }
+ return [label - 1, tt, il]
+ } // do_initial_labelling()
+
+ // translation table unifies a region that has been assigned multiple classes
+ fill_tratab(tt, nabo, nr_set, tn) {
+ // let cntr = 0
+ //tn.fill(0)
+ const INT_MAX = 2147483647
+ let ltn = INT_MAX
+ for (let i = 0; i < nr_set; i++) {
+ let j = nabo[i]
+ // cntr = 0
+ while (tt[j - 1] !== j) {
+ j = tt[j - 1]
+ /* cntr++
+ if (cntr > 100) {
+ console.log('\nOoh no!!')
+ break
+ } */
+ }
+ tn[i] = j
+ ltn = Math.min(ltn, j)
+ }
+ for (let i = 0; i < nr_set; i++) {
+ tt[tn[i] - 1] = ltn
+ }
+ } // fill_tratab()
+
+ // remove any residual gaps so label numbers are dense rather than sparse
+ translate_labels(il, dim, tt, ttn) {
+ const nvox = dim[0] * dim[1] * dim[2]
+ let ml = 0
+ const l = new Uint32Array(nvox).fill(0)
+ for (let i = 0; i < ttn; i++) {
+ ml = Math.max(ml, tt[i])
+ }
+ const fl = new Uint32Array(ml).fill(0)
+ let cl = 0
+ for (let i = 0; i < nvox; i++) {
+ if (il[i]) {
+ if (!fl[tt[il[i] - 1] - 1]) {
+ cl += 1
+ fl[tt[il[i] - 1] - 1] = cl
+ }
+ l[i] = fl[tt[il[i] - 1] - 1]
+ }
+ }
+ return [cl, l]
+ } // translate_labels()
+
+ // retain only the largest cluster for each region
+ largest_original_cluster_labels(bw, cl, ls) {
+ const nvox = bw.length
+ const ls2bw = new Uint32Array(cl + 1).fill(0)
+ const sumls = new Uint32Array(cl + 1).fill(0)
+ for (let i = 0; i < nvox; i++) {
+ const bwVal = bw[i]
+ const lsVal = ls[i]
+ ls2bw[lsVal] = bwVal
+ sumls[lsVal]++
+ }
+ let mxbw = 0
+ for (let i = 0; i < cl + 1; i++) {
+ const bwVal = ls2bw[i]
+ mxbw = Math.max(mxbw, bwVal)
+ // see if this is largest cluster of this bw-value
+ for (let j = 0; j < cl + 1; j++) {
+ if (j === i) {
+ continue
+ }
+ if (bwVal !== ls2bw[j]) {
+ continue
+ }
+ if (sumls[i] < sumls[j]) {
+ ls2bw[i] = 0
+ } else if (sumls[i] === sumls[j] && i < j) {
+ ls2bw[i] = 0
+ } // ties: arbitrary winner
+ }
+ }
+ const vxs = new Uint32Array(nvox).fill(0)
+ for (let i = 0; i < nvox; i++) {
+ vxs[i] = ls2bw[ls[i]]
+ }
+ return [mxbw, vxs]
+ }
+
+ // given a 3D image, return a clustered label map
+ // for an explanation and optimized C code see
+ // https://github.com/seung-lab/connected-components-3d
+ bwlabel(img, dim, conn = 26, binarize = false, onlyLargestClusterPerClass = false) {
+ const start = Date.now()
+ const nvox = dim[0] * dim[1] * dim[2]
+ const bw = new Uint32Array(nvox).fill(0)
+ if (![6, 18, 26].includes(conn)) {
+ console.log('bwlabel: conn must be 6, 18 or 26.')
+ return [0, bw]
+ }
+ if (dim[0] < 2 || dim[1] < 2 || dim[2] < 1) {
+ console.log('bwlabel: img must be 2 or 3-dimensional')
+ return [0, bw]
+ }
+ if (binarize) {
+ for (let i = 0; i < nvox; i++) {
+ if (img[i] !== 0.0) {
+ bw[i] = 1
+ }
+ }
+ } else {
+ bw.set(img)
+ }
+ let [ttn, tt, il] = this.do_initial_labelling(bw, dim, conn)
+ if (tt === undefined) {
+ tt = new Uint32Array(0)
+ }
+ const [cl, ls] = this.translate_labels(il, dim, tt, ttn)
+ console.log(conn + ' neighbor clustering into ' + cl + ' regions in ' + (Date.now() - start) + 'ms')
+ if (onlyLargestClusterPerClass) {
+ const [nbw, bwMx] = this.largest_original_cluster_labels(bw, cl, ls)
+ return [nbw, bwMx]
+ }
+ return [cl, ls]
+ } // bwlabel()
+}
diff --git a/data/labels.nii.gz b/data/labels.nii.gz
deleted file mode 100644
index e7dbca9..0000000
Binary files a/data/labels.nii.gz and /dev/null differ
diff --git a/data/t1_c.nii.gz b/data/t1_c.nii.gz
deleted file mode 100644
index ed11d44..0000000
Binary files a/data/t1_c.nii.gz and /dev/null differ
diff --git a/experimental_mode/CanvasVersion/MRI Sample/labels.nii.gz b/experimental_mode/CanvasVersion/MRI Sample/labels.nii.gz
deleted file mode 100644
index e7dbca9..0000000
Binary files a/experimental_mode/CanvasVersion/MRI Sample/labels.nii.gz and /dev/null differ
diff --git a/experimental_mode/CanvasVersion/MRI Sample/t1_c.nii.gz b/experimental_mode/CanvasVersion/MRI Sample/t1_c.nii.gz
deleted file mode 100644
index ed11d44..0000000
Binary files a/experimental_mode/CanvasVersion/MRI Sample/t1_c.nii.gz and /dev/null differ
diff --git a/experimental_mode/CanvasVersion/README.md b/experimental_mode/CanvasVersion/README.md
deleted file mode 100644
index 722c0ec..0000000
--- a/experimental_mode/CanvasVersion/README.md
+++ /dev/null
@@ -1,41 +0,0 @@
-# 3D Brain Segmentation v1.0.0
-Mohamed Masoud - Sergey Plis - 2021
-
-
-## Whole Brain Inference at the Browser
-Demo shows segmenation of 3D brain MRI at the browser based on a pretrained MeshNet model.
-
-Main settings
- -input_shape=(1, 38, 38, 38, 1)
-
-
-To run the server:
- -Open a terminal window.
- -Navigate to the directory root directory .
- -Execute the command to start the server.
- For Python 2 run server with free port 80xx (e.g. `python -m SimpleHTTPServer 8020` )
- For Python 3 run server with free port 80xx (e.g. `python -m http.server 8020` )
-
-
-
- -In the browser url
-
- `http://localhost:8020/`
-
-
- - Open browser console by press F12 to see the sample outputs
-
- - click on Browse File button, and navigate to "MRI Sample" folder
-
- - Please DON'T change batch size, it is static to 1 for now.
-
-
-
-
-## Demo
-
-
-![Interface](https://github.com/Mmasoud1/Portfolios/blob/master/ShowMe/BrainInference/Compare2_3DCC.gif)
-
-
-
diff --git a/experimental_mode/CanvasVersion/index.html b/experimental_mode/CanvasVersion/index.html
deleted file mode 100644
index b93a27a..0000000
--- a/experimental_mode/CanvasVersion/index.html
+++ /dev/null
@@ -1,127 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
MeshNet timing with tfjs for whole Brain
-
-
Measure the performance of tfjs when loading a pretrained MeshNet model in the browser to make whole brain inference.
-
-
Select NIfTI file(*.nii, *.nii.gz) :
-
Select NIfTI label file(*.nii, *.nii.gz) :
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
Slice :
-
-
-
-
-
-
-
-
-
-
-
Batch Size :
-
Num of Channels :
-
-
-
Num of Classes :
-
Background Label :
-
-
-
-
Num of Overlapped Batches :
-
-
-
-
Metrics:
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/experimental_mode/CanvasVersion/js/mainMeshNetFunctions.js b/experimental_mode/CanvasVersion/js/mainMeshNetFunctions.js
deleted file mode 100644
index 97a25c6..0000000
--- a/experimental_mode/CanvasVersion/js/mainMeshNetFunctions.js
+++ /dev/null
@@ -1,1506 +0,0 @@
-/*
-=========================================================
-* 3D xSegmentation Demo - v1.0.0
-=========================================================
-
-* Discription: A user interface for whole brain segmentation
-* Input shape : [1, 38, 38, 38, 1]
-* Model : Meshnet
-*
-* Author: Mohamed Masoud , (Sergey Plis Lab) - 2021
-=========================================================
-
-
-
-=========================================================
- 3D Brain Segmentation
-=========================================================*/
-
-(function(){
-
-
- allOutputSlices = [];
- maxLabel = 0;
- allOutputSlices2DCC = [];
- allOutputSlices3DCC = [];
-
- // Return 1-Dim Array of pixel value, this 1 dim represent one channel
- getSliceData1D = (sliceIdx, niftiHeader, niftiImage) => {
- // get nifti dimensions
- let cols = niftiHeader.dims[1]; // Slice width
- let rows = niftiHeader.dims[2]; // Slice height
-
- let typedData;
-
- if (niftiHeader.datatypeCode === nifti.NIFTI1.TYPE_UINT8) {
- typedData = new Uint8Array(niftiImage);
- } else if (niftiHeader.datatypeCode === nifti.NIFTI1.TYPE_INT16) {
- typedData = new Int16Array(niftiImage);
- } else if (niftiHeader.datatypeCode === nifti.NIFTI1.TYPE_INT32) {
- typedData = new Int32Array(niftiImage);
- } else if (niftiHeader.datatypeCode === nifti.NIFTI1.TYPE_FLOAT32) {
- typedData = new Float32Array(niftiImage);
- } else if (niftiHeader.datatypeCode === nifti.NIFTI1.TYPE_FLOAT64) {
- typedData = new Float64Array(niftiImage);
- } else if (niftiHeader.datatypeCode === nifti.NIFTI1.TYPE_INT8) {
- typedData = new Int8Array(niftiImage);
- } else if (niftiHeader.datatypeCode === nifti.NIFTI1.TYPE_UINT16) {
- typedData = new Uint16Array(niftiImage);
- } else if (niftiHeader.datatypeCode === nifti.NIFTI1.TYPE_UINT32) {
- typedData = new Uint32Array(niftiImage);
- } else {
- return;
- }
-
- // offset to specified slice
- let sliceSize = cols * rows;
-
- let sliceOffset = sliceSize * sliceIdx;
-
- let data1DimArr = [];
-
- // draw pixels
- for (let row = 0; row < rows; row++) {
- let rowOffset = row * cols;
-
- for (let col = 0; col < cols; col++) {
- let offset = sliceOffset + rowOffset + col;
- let value = typedData[offset];
- // Create 1Dim Array of pixel value, this 1 dim represent one channel
- data1DimArr[(rowOffset + col)] = value & 0xFF;
-
- }
- }
-
- return data1DimArr;
- }
-
- // to use with ml5j
- computeConfusionMatrix = (trueLabels, predictedLabels) => {
- const CM = ConfusionMatrix.fromLabels(trueLabels, predictedLabels);
- return CM.getAccuracy();
-
- }
-
- // to use with bci.js
- compConfusionMat = (predictedLabels, trueLabels) => {
- const CM = confusionMatrix(predictedLabels, trueLabels);
- return accuracy(CM);
-
- }
-
- generateColors = (s, l, num_colors) => {
- let colors = []
- let delta = Math.trunc(360 / num_colors)
-
- for (let i = 0; i < num_colors; i++) {
- let h = i * delta
- colors.push("hsla("+ h + "," + s +"%," + l+ "%" + ")")
- }
-
- return colors
- }
-
- getRgbObject = (rgbString) => {
-
- let RGB = {};
- let rgbArray = rgbString;
- rgbArray = rgbArray.replace(/[^\d,]/g, '').split(',');
- let rgbKeys=["r","g","b"];
- RGB=rgbKeys.reduce((obj, key, index) => ({ ...obj, [key]:parseInt(rgbArray[index]) }), {});
- return RGB;
- }
-
- hslToRgb = (hsl) => {
- let sep = hsl.indexOf(",") > -1 ? "," : " ";
- hsl = hsl.substr(5).split(")")[0].split(sep);
-
- if (hsl.indexOf("/") > -1)
- hsl.splice(3,1);
-
- let h = hsl[0],
- s = hsl[1].substr(0,hsl[1].length - 1) / 100,
- l = hsl[2].substr(0,hsl[2].length - 1) / 100;
-
-
-
- let c = (1 - Math.abs(2 * l - 1)) * s,
- x = c * (1 - Math.abs((h / 60) % 2 - 1)),
- m = l - c/2,
- r = 0,
- g = 0,
- b = 0;
- if (0 <= h && h < 60) {
- r = c; g = x; b = 0;
- } else if (60 <= h && h < 120) {
- r = x; g = c; b = 0;
- } else if (120 <= h && h < 180) {
- r = 0; g = c; b = x;
- } else if (180 <= h && h < 240) {
- r = 0; g = x; b = c;
- } else if (240 <= h && h < 300) {
- r = x; g = 0; b = c;
- } else if (300 <= h && h < 360) {
- r = c; g = 0; b = x;
- }
- r = Math.round((r + m) * 255);
- g = Math.round((g + m) * 255);
- b = Math.round((b + m) * 255);
-
-
- return "rgb(" + r + "," + g + "," + b + ")";
- }
-
- // For Dice calculations
- intersect = (ar1, ar2) => {
- const intersection = [];
- for(let i = 0; i < ar1.length ; i++) {
- if(ar1[i] == ar2[i]) {
- intersection.push(ar1[i]);
- }
- }
-
- return intersection;
- }
-
- diceCoefficient = (ar1, ar2) => {
- return ( 2 * intersect(ar1, ar2).length ) / ( ar1.length + ar2.length );
- }
-
-
- drawConfusionMat = async(groundTruthLabels, predictedLabels, elemId) => {
-
- if(elemId == "accuracyTitleFilter3DCC") {
- const values = await tfvis.metrics.confusionMatrix(groundTruthLabels, predictedLabels);
- const data = { values };
- const surface = { name: 'Confusion Matrix 3D CC', tab: 'Charts' };
- tfvis.render.confusionMatrix(surface, data);
- }
- }
-
- calculateAccuracy = async(groundTruthLabels, predictedLabels, elemId) => {
- document.getElementById(elemId).innerHTML = "Accuracy: " +
- ( await tfvis.metrics.accuracy(groundTruthLabels, predictedLabels) ).toFixed(3);
-
- }
-
- drawOutputCanvas = (canvas, sliceIdx, niftiHeader, niftiImage, outputSlices) => {
-
- let n_classes = parseInt(document.getElementById("numOfClassesId").value);
- let isColorEnable = document.getElementById("mriColoring").checked;
- // get nifti dimensions
- let cols = niftiHeader.dims[1];
- let rows = niftiHeader.dims[2];
-
- // set canvas dimensions to nifti slice dimensions
- canvas.width = cols;
- canvas.height = rows;
-
- // make canvas image data
- let ctx = canvas.getContext("2d");
- let canvasImageData = ctx.createImageData(canvas.width, canvas.height);
-
- let colors = generateColors(100, 50, n_classes);
- let bgLabelValue = parseInt(document.getElementById("bgLabelId").value);
-
- for (let pixelIdx = 0; pixelIdx < outputSlices[sliceIdx].length; pixelIdx++) {
- if(isColorEnable) {
- let color = { r: 0, g: 0, b: 0 };
- if(outputSlices[sliceIdx][pixelIdx] != bgLabelValue) {
- color = getRgbObject(hslToRgb(colors[outputSlices[sliceIdx][pixelIdx]]));
- }
- canvasImageData.data[pixelIdx * 4] = color.r & 0xFF;
- canvasImageData.data[pixelIdx * 4 + 1] = color.g & 0xFF;
- canvasImageData.data[pixelIdx * 4 + 2] = color.b & 0xFF;
- canvasImageData.data[pixelIdx * 4 + 3] = 0xFF;
-
- } else {
- let value = Math.ceil(outputSlices[sliceIdx][pixelIdx]*255/(n_classes - 1));
-
- canvasImageData.data[pixelIdx * 4] = value & 0xFF;
- canvasImageData.data[pixelIdx * 4 + 1] = value & 0xFF;
- canvasImageData.data[pixelIdx * 4 + 2] = value & 0xFF;
- canvasImageData.data[pixelIdx * 4 + 3] = 0xFF;
- }
-
- }
-
- ctx.putImageData(canvasImageData, 0, 0);
-
- // console.log("canvasImageData :", canvasImageData.data)
-
- let elemId = null;
-
- if(canvas.id == "outputCanvas") {
- document.getElementById("predTitle").innerHTML = "Model Output";
- elemId = "accuracyTitleModelPred";
- }
-
- if(canvas.id == "out2dCC") {
- document.getElementById("CC2DTitle").innerHTML = "Filter by 2D CC";
- elemId = "accuracyTitleFilter2DCC";
- }
-
- if(canvas.id == "out3dCC") {
- document.getElementById("CC3DTitle").innerHTML = "Filter by 3D CC";
- elemId = "accuracyTitleFilter3DCC";
- }
-
- let gtCanvas = document.getElementById('gtCanvas');
- let ctxGt = gtCanvas.getContext("2d");
-
- let trueLabels = ctxGt.getImageData(0, 0, gtCanvas.width, gtCanvas.height)
-
- // trueLabels.data is Uint8ClampedArray and need to convert to regular array first such that
- // normalArray = Array.prototype.slice.call(trueLabels.data);
-
-
- if(! isColorEnable){
- if(gtLabelLoaded) {
-
- const labels = tf.tensor1d( Array.prototype.slice.call(trueLabels.data) );
- const predictions = tf.tensor1d( Array.prototype.slice.call(canvasImageData.data) );
-
- if(document.getElementById("metricsId").value == "DiceCoef") {
- document.getElementById(elemId).innerHTML = "Dice Coef: " +
- diceCoefficient( Array.prototype.slice.call(trueLabels.data) ,
- Array.prototype.slice.call(canvasImageData.data)
- ).toFixed(4);
- }
-
-
- if(document.getElementById("metricsId").value == "Accuracy") {
- calculateAccuracy(labels, predictions, elemId);
- labels.dispose();
- predictions.dispose();
-
- }
-
-
- if(elemId = "accuracyTitleFilter3DCC") {
- // drawConfusionMat(labels, predictions, elemId);
- }
-
- }
-
- } else {
- document.getElementById(elemId).innerHTML = "";
- }
-
- }
-
-
-
-
- getMaxRegionMaskByContour= (canvasImageData) => { // slice matrix
-
- let mat = cv.matFromImageData(canvasImageData);
-
- let mask = cv.Mat.zeros(mat.cols, mat.rows, cv.CV_8UC3);
-
- let mask_gray = new cv.Mat ();
- let mask_binary = new cv.Mat ();
- let contours = new cv.MatVector();
- let hierarchy = new cv.Mat();
-
- // Grayscale conversion
- cv.cvtColor (mat, mask_gray, cv.COLOR_RGBA2GRAY, 0);
-
- cv.findContours(mask_gray, contours, hierarchy, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_NONE); // cv.CHAIN_APPROX_SIMPLE
-
- let maxContourArea = 0
- let maxContourAreaIdx = -1
- for (let i = 0; i < contours.size(); ++i) {
- let cnt = contours.get(i);
- let area = cv.contourArea(cnt, false)
- if(maxContourArea < area){
- maxContourArea = area;
- maxContourAreaIdx = i;
-
- }
-
- cnt.delete();
- }
-
- let color = new cv.Scalar(255, 255, 255);
- cv.drawContours(mask, contours, maxContourAreaIdx, color, -1); //cv.LINE_8
-
- cv.cvtColor (mask, mask_gray, cv.COLOR_RGBA2GRAY, 0);
- cv.threshold (mask_gray, mask_binary, 0, 255, cv.THRESH_BINARY | cv.THRESH_OTSU);
-
-
- mat.delete();
- mask.delete();
- mask_gray.delete();
-
- contours.delete();
- hierarchy.delete();
-
- return mask_binary.data;
- }
-
-
-
- postProcessSlices = (outputSlices) => {
- let canvas = document.createElement("CANVAS");
-
- // get nifti dimensions
- let cols = niftiHeader.dims[1];
- let rows = niftiHeader.dims[2];
-
- // set canvas dimensions to nifti slice dimensions
- canvas.width = cols;
- canvas.height = rows;
-
- // make canvas image data
- let ctx = canvas.getContext("2d");
-
- let canvasImageData = ctx.createImageData(canvas.width, canvas.height);
-
- let bgLabelValue = parseInt(document.getElementById("bgLabelId").value);
-
- for(let sliceIdx = 0; sliceIdx < outputSlices.length; sliceIdx++) {
-
- for (let pixelIdx = 0; pixelIdx < outputSlices[sliceIdx].length; pixelIdx++) {
-
- let color = { r: 0, g: 0, b: 0 };
- if(outputSlices[sliceIdx][pixelIdx] != bgLabelValue) {
- color = { r: 255, g: 255, b: 255 };
- }
-
- canvasImageData.data[pixelIdx * 4] = color.r & 0xFF;
- canvasImageData.data[pixelIdx * 4 + 1] = color.g & 0xFF;
- canvasImageData.data[pixelIdx * 4 + 2] = color.b & 0xFF;
- canvasImageData.data[pixelIdx * 4 + 3] = 0xFF;
- }
-
- let maskData = getMaxRegionMaskByContour(canvasImageData);
-
- // show slice max area only
- for( let idx = 0; idx < maskData.length; idx += 1) {
-
- if(maskData[idx] == bgLabelValue ) {
- outputSlices[sliceIdx][idx] = 0;
- }
- }
-
- }
-
- return outputSlices;
- }
-
-/////////////******************* 3D Connected Components**************************/////////////////
-
- getBinaryMaskData1D = (sliceData) => { // greyImage is one channel 2D image with values 0-255
-
- let maskBinaryData1D = [];
- for (let idx = 0; idx < sliceData.length; idx++) {
-
- if(sliceData[idx] > 0) {
- maskBinaryData1D[idx] = 1;
- } else {
- maskBinaryData1D[idx] = 0;
- }
- }
-
- return maskBinaryData1D;
- }
-
- getBinaryMaskImage = (greyImage) => { // greyImage is one channel 2D image with values 0-255
- let binaryMaskImage = greyImage.clone(); // from opencvjs
- let value = null;
-
- for (let idx = 0; idx < greyImage.data.length; idx++) {
-
- if(greyImage.data[idx] > 0) {
- value = 255;
- } else {
- value = 0;
- }
-
- binaryMaskImage.data[idx] = value;
- binaryMaskImage.data[idx + 1] = value;
- binaryMaskImage.data[idx + 2] = value;
- binaryMaskImage.data[idx + 3] = 255; // Alpha channel
- }
-
- return binaryMaskImage;
- }
-
- convertBinaryDataTo2D = (binaryData1D, imgHeight, imgWidth) => {
- return tf.tensor(binaryData1D, [imgHeight, imgWidth]).arraySync();
- }
-
-
- getConComponentsFor2D = (binaryMaskData2D, imgHeight, imgWidth) => {
- // initiat label
- let label1D = [];
- resetEquivalenceTable();
- for(let idx = 0; idx < imgHeight * imgWidth; idx++) {
- label1D[idx] = 0;
- }
-
- let label2D = convertBinaryDataTo2D(label1D, imgHeight, imgWidth);
-
- // maxLabel initiation to zero, starting label for 2d and 3d labeling
- maxLabel = 0;
-
- // 1st pass
- for(let row = 0; row < imgHeight; row++) {
- for(let col = 0; col < imgWidth; col++) {
-
- if( binaryMaskData2D[row][col] != 0) {
- label2D[row][col] = checkNeighbors2D(label2D, row, col, maxLabel)
- if(maxLabel < label2D[row][col]) {
- maxLabel = label2D[row][col];
- }
-
- }
- }
- }
-
- // adjust Equivalence table labels such that eqvTabel[3] = 2 && eqvTabel[2] = 1 => eqvTabel[3] = 1
- for(let labelIdx = equivalenceTabel.length - 1; labelIdx > 0; labelIdx = labelIdx-1 ) {
- adjustEquivalenceTable (labelIdx);
- }
-
- // 2nd pass : relabeling the slice after eqvTable adjustment
- for(let row = 0; row < imgHeight; row++) {
- for(let col = 0; col < imgWidth; col++) {
-
- if( label2D[row][col] != 0) {
- label2D[row][col] = equivalenceTabel[label2D[row][col]];
- }
- }
- }
-
- return label2D;
- }
-
-
- getMaxLabelFor2D = (label2D, imgHeight, imgWidth) => {
-
- let maxLabelFor2D = 0;
- for(let row = 0; row < imgHeight; row++) {
- for(let col = 0; col < imgWidth; col++) {
-
- if( label2D[row][col] > maxLabelFor2D) {
- maxLabelFor2D = label2D[row][col];
- }
- }
- }
-
- return maxLabelFor2D;
- }
-
-
- getMaxLabelFor3D = (label3D, sliceHeight, sliceWidth, numSlices) => {
-
- let maxLabelFor3D = 0;
-
- for(let sliceIdx = 0; sliceIdx < numSlices; sliceIdx++ ) {
- for(let row = 0; row < sliceHeight; row++) {
- for(let col = 0; col < sliceWidth; col++) {
-
- if( label3D[sliceIdx][row][col] > maxLabelFor3D) {
- maxLabelFor3D = label3D[sliceIdx][row][col];
- }
- }
- }
- }
-
- return maxLabelFor3D;
- }
-
-
- getMaxVolumeLabel3D = (label3D, sliceHeight, sliceWidth, numSlices) => {
-
- // Initiat connected component volumes to zeros
- let ccVolume = [];
- let maxCCLabel3D = getMaxLabelFor3D(label3D, sliceHeight, sliceWidth, numSlices)
-
- for( let idx = 0; idx < maxCCLabel3D; idx ++) {
- ccVolume[idx] = 0;
- }
-
- for(let sliceIdx = 0; sliceIdx < numSlices; sliceIdx++ ) {
- for(let row = 0; row < sliceHeight; row++) {
- for(let col = 0; col < sliceWidth; col++) {
- ccVolume[label3D[sliceIdx][row][col]] = ccVolume[label3D[sliceIdx][row][col]] +1;
- }
- }
- }
-
- let maxCcVolume = 0;
- let maxCcVolumeLabel = -1;
-
- for( let idx = 1; idx < maxCCLabel3D; idx ++) {
-
- if( maxCcVolume < ccVolume[idx] ) {
- maxCcVolume = ccVolume[idx];
- maxCcVolumeLabel = idx;
- }
- }
-
- return maxCcVolumeLabel;
- }
-
-
-
- getMaxAreaLabel2D = (label2D, imgHeight, imgWidth) => {
-
- // Initiat connected component areas to zeros
- let ccAreas = [];
- let maxCCLabel = getMaxLabelFor2D(label2D, imgHeight, imgWidth)
-
- for( let idx = 0; idx < maxCCLabel; idx ++) {
- ccAreas[idx] = 0;
- }
-
- // Find areas of connected components where ccAreas[0] is for background
- for(let row = 0; row < imgHeight; row++) {
- for(let col = 0; col < imgWidth; col++) {
- ccAreas[label2D[row][col]] = ccAreas[label2D[row][col]] +1;
- }
- }
-
- let maxCcArea = 0;
- let maxCcAreaLabel = -1;
- for( let idx = 1; idx < maxCCLabel; idx ++) {
- if( maxCcArea < ccAreas[idx] ) {
- maxCcArea = ccAreas[idx];
- maxCcAreaLabel = idx;
- }
- }
-
-
- return maxCcAreaLabel;
- }
-
-
- resetEquivalenceTable = () => {
- equivalenceTabel = [];
- equivalenceTabel[0] = 0;
- }
-
- updateEquivalenceTable = (label, newLabel) => {
- equivalenceTabel[label] = newLabel;
- }
-
-
- adjustEquivalenceTable = (labelIdx) => {
-
- if(equivalenceTabel[labelIdx] != labelIdx) {
- equivalenceTabel[labelIdx] = adjustEquivalenceTable(equivalenceTabel[labelIdx]);
- }
-
- return equivalenceTabel[labelIdx];
- }
-
-
- checkNeighbors2D = (label, row, col, maxLabel) => {
-
- if ( label[row][col - 1] && label[row - 1][col]) {
-
- if(label[row][col - 1] == label[row - 1][col]) {
- return label[row ][col - 1];
-
- } else {
-
- let smallerLabel = ( label[row][col - 1] < label[row - 1][col] ) ? label[row][col - 1] : label[row - 1][col];
- let largerLabel = ( label[row][col - 1] > label[row - 1][col] ) ? label[row][col - 1] : label[row - 1][col];
- updateEquivalenceTable(largerLabel, smallerLabel);
- return smallerLabel;
- }
-
- } else if ( label[row ][col - 1] ) {
- return label[row ][col - 1] ;
- } else if ( label[row - 1][col] ) {
- return label[row - 1][col];
- } else {
- updateEquivalenceTable(maxLabel+1, maxLabel+1);
- return maxLabel+1 ;
- }
-
- }
-
- checkNeighbors3D = (label, z_1PixelLabel, row, col, maxLabel) => { //z_1PixelLabel same x,y pixel label of z-1 prev slice
- if ( label[row][col - 1] && label[row - 1][col] && z_1PixelLabel) {
-
- if( (label[row][col - 1] == label[row - 1][col]) && (label[row][col - 1] == z_1PixelLabel) ) {
- return z_1PixelLabel;
-
- } else {
-
- let smallLabel = ( label[row][col - 1] < label[row - 1][col] ) ? label[row][col - 1] : label[row - 1][col];
- let smallestLabel = ( z_1PixelLabel < smallLabel ) ? z_1PixelLabel : smallLabel;
- let largerLabel = ( label[row][col - 1] > label[row - 1][col] ) ? label[row][col - 1] : label[row - 1][col];
- updateEquivalenceTable(largerLabel, smallestLabel);
- updateEquivalenceTable(smallLabel, smallestLabel);
- return smallestLabel;
- }
-
- } else if ( label[row][col - 1] && label[row - 1][col] ) {
-
- if(label[row][col - 1] == label[row - 1][col]) {
- return label[row ][col - 1];
-
- } else {
-
- let smallerLabel = ( label[row][col - 1] < label[row - 1][col] ) ? label[row][col - 1] : label[row - 1][col];
- let largerLabel = ( label[row][col - 1] > label[row - 1][col] ) ? label[row][col - 1] : label[row - 1][col];
- updateEquivalenceTable(largerLabel, smallerLabel);
- return smallerLabel;
- }
-
-
- } else if ( label[row - 1][col] && z_1PixelLabel ) {
-
- if(label[row - 1][col] == z_1PixelLabel) {
- return z_1PixelLabel;
-
- } else {
-
- let smallerLabel = ( z_1PixelLabel < label[row - 1][col] ) ? z_1PixelLabel : label[row - 1][col];
- let largerLabel = ( z_1PixelLabel > label[row - 1][col] ) ? z_1PixelLabel : label[row - 1][col];
- updateEquivalenceTable(largerLabel, smallerLabel);
- return smallerLabel;
- }
-
- } else if ( label[row][col - 1] && z_1PixelLabel ) {
-
- if( label[row][col - 1] == z_1PixelLabel ) {
- return z_1PixelLabel;
-
- } else {
-
- let smallerLabel = ( label[row][col - 1] < z_1PixelLabel ) ? label[row][col - 1] : z_1PixelLabel;
- let largerLabel = ( label[row][col - 1] > z_1PixelLabel ) ? label[row][col - 1] : z_1PixelLabel;
- updateEquivalenceTable(largerLabel, smallerLabel);
- return smallerLabel;
- }
-
- } else if ( label[row ][col - 1] ) {
- return label[row ][col - 1] ;
- } else if ( label[row - 1][col] ) {
- return label[row - 1][col];
- } else if ( z_1PixelLabel) {
- return z_1PixelLabel;
- } else {
- updateEquivalenceTable(maxLabel+1, maxLabel+1);
- return maxLabel+1 ;
- }
- }
-
- getConComponentsFor3DVolume = (outputSlices, sliceHeight, sliceWidth) => {
-
- let binaryMaskData1D = [];
- let binaryMaskData2D = [];
- let label3D = [];
-
- for(let sliceIdx = 0; sliceIdx < outputSlices.length; sliceIdx++) {
-
- binaryMaskData1D[sliceIdx] = getBinaryMaskData1D(outputSlices[sliceIdx]); // binaryMaskData1D has values 0 or 1
-
- binaryMaskData2D[sliceIdx] = convertBinaryDataTo2D(binaryMaskData1D[sliceIdx], sliceHeight, sliceWidth);
-
- if(sliceIdx == 0) {
- label3D[sliceIdx] = getConComponentsFor2D(binaryMaskData2D[sliceIdx], sliceHeight, sliceWidth);
-
- } else {
- label3D[sliceIdx] = getConComponentsFor2Slices(binaryMaskData2D[sliceIdx], label3D[sliceIdx - 1], sliceHeight, sliceWidth);
- }
-
- }
-
- // 3d cc third pass
- for(let sliceIdx = 0; sliceIdx < outputSlices.length; sliceIdx++) {
- let row, col;
- for(row = 0; row < sliceHeight; row++) {
- for(col = 0; col < sliceWidth; col++) {
-
- if( label3D[sliceIdx][row][col] != 0) {
- label3D[sliceIdx][row][col] = equivalenceTabel[label3D[sliceIdx][row][col]];
- }
- }
- }
- }
-
- return label3D;
- }
-
- getConComponentsFor2Slices = (binaryMaskData2D, preSliceLabels, imgHeight, imgWidth) => {
- let label1D = [];
-
- for(let idx = 0; idx < imgHeight * imgWidth; idx++) {
- label1D[idx] = 0;
- }
-
- let label2D = convertBinaryDataTo2D(label1D, imgHeight, imgWidth);
-
- for(let row = 0; row < imgHeight; row++) {
- for(let col = 0; col < imgWidth; col++) {
-
- if( binaryMaskData2D[row][col] != 0) {
- label2D[row][col] = checkNeighbors3D(label2D, preSliceLabels[row][col], row, col, maxLabel)
- if(maxLabel < label2D[row][col]) {
- maxLabel = label2D[row][col];
- }
-
- }
- }
- }
-
- for(let labelIdx = equivalenceTabel.length - 1; labelIdx > 0; labelIdx = labelIdx-1 ) {
- adjustEquivalenceTable (labelIdx);
- }
-
- for(let row = 0; row < imgHeight; row++) {
- for(let col = 0; col < imgWidth; col++) {
-
- if( label2D[row][col] != 0) {
- label2D[row][col] = equivalenceTabel[label2D[row][col]];
- }
- }
- }
-
- return label2D;
- }
-
-
- postProcessSlices3D = (outputSlices) => {
- // get nifti dimensions
- let sliceWidth = niftiHeader.dims[1];
- let sliceHeight = niftiHeader.dims[2];
-
- let bgLabelValue = parseInt(document.getElementById("bgLabelId").value);
-
- let label3D = [];
-
- label3D = getConComponentsFor3DVolume(outputSlices, sliceHeight, sliceWidth);
-
- let maxVolumeLabel = getMaxVolumeLabel3D(label3D, sliceHeight, sliceWidth, outputSlices.length);
-
-
- for(let sliceIdx = 0; sliceIdx < outputSlices.length; sliceIdx++) {
- //Get max volume mask
- let row, col;
- for(row = 0; row < sliceHeight; row++) {
- for(col = 0; col < sliceWidth; col++) {
- if(label3D[sliceIdx][row][col] != maxVolumeLabel) {
- label3D[sliceIdx][row][col] = 0;
- } else {
- label3D[sliceIdx][row][col] = 255;
- }
- }
- }
-
- let pixelIdx;
-
- for(row = 0, pixelIdx = 0; row < sliceHeight; row++) {
- for(col = 0; col < sliceWidth; col++, pixelIdx++) {
-
- if(label3D[sliceIdx][row][col] == 0) {
- outputSlices[sliceIdx][pixelIdx] = 0;
- }
-
- }
- }
- }
-
- return outputSlices;
- }
-
-
-
- postProcessSlices2D = (outputSlices) => {
- // get nifti dimensions
- let sliceWidth = niftiHeader.dims[1];
- let sliceHeight = niftiHeader.dims[2];
-
- let binaryMaskData1D = [];
- let binaryMaskData2D = [];
- let maxAreaLabel;
- let label2D = [];
-
- for(let sliceIdx = 0; sliceIdx < outputSlices.length; sliceIdx++) {
-
- binaryMaskData1D = getBinaryMaskData1D(outputSlices[sliceIdx]); // binaryMaskData1D has values 0 or 1
-
- binaryMaskData2D = convertBinaryDataTo2D(binaryMaskData1D, sliceHeight, sliceWidth);
-
- // labels 2d are starting from 0 and increment by 1 with each new label
- label2D = getConComponentsFor2D(binaryMaskData2D, sliceHeight, sliceWidth);
-
-
- maxAreaLabel = getMaxAreaLabel2D(label2D, sliceHeight, sliceWidth);
-
-
- // Get max area mask
- // It is fine to set label2D to 255 since each slice labels have no effect on other slices labels.
- let row, col;
- for(row = 0; row < sliceHeight; row++) {
- for(col = 0; col < sliceWidth; col++) {
- if(label2D[row][col] != maxAreaLabel){
- label2D[row][col] = 0;
- } else {
- label2D[row][col] = 255;
- }
- }
- }
-
-
- // Remove all areas except largest brain area
- let pixelIdx;
- for(row = 0, pixelIdx = 0; row < sliceHeight; row++) {
- for(col = 0; col < sliceWidth; col++, pixelIdx++) {
- if(label2D[row][col] == 0){
- outputSlices[sliceIdx][pixelIdx] = 0;
- }
- }
- }
- }
-
- return outputSlices;
- }
-
-
-///////////////******************************************************************////////////////////
-
-
- //1- Standard Normal variate using Box-Muller transform.
- randn_bm = () => {
- let u = 0, v = 0;
- while(u === 0) u = Math.random(); //Converting [0,1) to (0,1)
- while(v === 0) v = Math.random();
- return Math.sqrt( -2.0 * Math.log( u ) ) * Math.cos( 2.0 * Math.PI * v );
- }
-
-
- // check whether the proposed subvolumes coords are feasible
- checkInside = (DHW, cubeSides, subCubeSides) => {
- for (let i = 0; i < 3; i++) {
- if ( (Math.sign(DHW[i]) < 0) || ( (DHW[i] + subCubeSides[i]) > cubeSides[i]) ) {
- return false;
- }
- }
-
- return true;
- }
-
-
-
- findCoordsOfAddBrainBatches = (numOfSubCubes, mean, sigma, cubeSides, subCubeSides ) => {
-
- const allCoords = [];
- let coord;
-
- for (let i = 0; i < numOfSubCubes; i++) {
- coord = Array(Math.round(mean[0]+randn_bm()*sigma[0]),
- Math.round(mean[1]+randn_bm()*sigma[1]),
- Math.round(mean[2]+randn_bm()*sigma[2]) );
- if( !checkInside(coord, cubeSides, subCubeSides) ) {
- i--;
- // console.log(coord);
- } else {
- allCoords[i] = coord;
- }
- }
-
- return allCoords;
- }
-
- // Return Tensor with binary 3D volume data 0 or 1
- binarizeVolumeDataTensor = (volumeDataTensor) => {
-
- let alpha = 0;
- return volumeDataTensor.step(alpha); // element-wise: (x > 0 ? 1 : alpha * x ); e.g. Tenosr [0, 0.9, 0.8, -3] => Tensor [0, 1, 1, 0]
- }
-
-
- // Convert tensor to buffer so immutable tensor can be mutable buffer with get() and set()
- tensor2Buffer = (tensor) => {
- return tf.buffer(tensor.shape, tensor.dtype, tensor.dataSync());
- }
-
-
- cubeMoments = (cube3d, threshold) => {
- // mean and variance of a normalized cube data [0, 1]
- let cube = tensor2Buffer(cube3d);
- let coords = [];
- for(let i = 0; i < cube3d.shape[0]; i++) {
- for(let j = 0; j < cube3d.shape[1]; j++) {
- for(let k = 0; k < cube3d.shape[2]; k++) {
- if (cube.get(i,j,k) > threshold) {
- coords.push([i, j, k]);
- }
- }
- }
- }
- let coordsTensor = tf.tensor2d(coords);
- let moments = tf.moments(coordsTensor, 0, false);
- let meanArray = Array.from(tf.round(moments['mean']).dataSync());
- let varArray = Array.from(moments['variance'].dataSync());
- coordsTensor.dispose();
-
- return [meanArray, varArray];
- };
-
-
- // For all MRI volume values > 0 , find the centroid of those data
- findHeadCentroid = (slices_3d, num_of_slices, slice_height, slice_width) => {
- // Threshold tensor volume values to 0 or 1 such that if (voxelVal > 0 ? 1 : 0 )
- let binarizeVolumeTensor = binarizeVolumeDataTensor(slices_3d);
- let binarizeVolumeBuffer = tensor2Buffer(binarizeVolumeTensor);
-
- const grid_coords = [];
- let counter = 0;
-
-
- // Find coordinates of nonzero voxels as (x_i, y_i, z_i) vectors
- for(let depthIdx = 0; depthIdx < num_of_slices; depthIdx += 1) {
- for(let rowIdx = 0; rowIdx < slice_height; rowIdx += 1) {
- for(let colIdx = 0; colIdx < slice_width; colIdx += 1) {
-
- let voxelValue = binarizeVolumeBuffer.get(depthIdx, rowIdx, colIdx);
- if(voxelValue == 1) {
- grid_coords[counter] = Array(depthIdx, rowIdx, colIdx);
- counter += 1;
- }
- }
- }
- }
-
- // Create 2D Tesnor with three columns for depth, row, col index
- let gridCoordsTensor = tf.tensor2d(grid_coords);
- let axis = 0;
-
- let headCentroidTensor = tf.round(gridCoordsTensor.mean(axis));
-
- // Find the Centroid voxel Array [d, h, w]
- let headCentroidArray = Array.from(headCentroidTensor.dataSync());
- tf.dispose(gridCoordsTensor);
- tf.dispose(headCentroidTensor);
-
- return headCentroidArray;
-
- }
-
- // Try to create batches with the volume of slices each of D,H,W sub_volume and focus on brain area for the additional sub_volumes
- sliceVolumeIntoOverlappedBatches = (slices_3d, num_of_slices, slice_height, slice_width, batch_D, batch_H, batch_W, headSubCubesCoords ) => {
-
- let allSlicedBatches = [];
- let batch_id = 1;
-
- for(let depthIdx = 0; depthIdx < num_of_slices; depthIdx += batch_D) {
- for(let rowIdx = 0; rowIdx < slice_height; rowIdx += batch_H) {
- for(let colIdx = 0; colIdx < slice_width; colIdx += batch_W) {
- // for overlap calculations of last batches
- let depthIdxDiff = 0;
- let rowIdxDiff = 0;
- let colIdxDiff = 0;
-
- if((depthIdx + batch_D) > num_of_slices) {
- depthIdxDiff = (depthIdx + batch_D) - num_of_slices;
- }
-
- if((rowIdx + batch_H) > slice_height) {
- rowIdxDiff = (rowIdx + batch_H) - slice_height;
- }
-
- if((colIdx + batch_W) > slice_width) {
- colIdxDiff = (colIdx + batch_W) - slice_width;
- }
-
- let startIndex = [depthIdx - depthIdxDiff, rowIdx - rowIdxDiff, colIdx - colIdxDiff];
- let batch = slices_3d.slice(startIndex, [batch_D, batch_H, batch_W]);
-
- allSlicedBatches.push({id: batch_id , coordinates: startIndex, data: batch});
- batch_id += 1;
- }
- }
- }
-
- // Additional sub_volumes or batches focus around the head centroid
- for(let cubeIdx = 0; cubeIdx < headSubCubesCoords.length; cubeIdx++) {
-
- let startIndex = [headSubCubesCoords[cubeIdx][0], headSubCubesCoords[cubeIdx][1], headSubCubesCoords[cubeIdx][2]];
- let batch = slices_3d.slice(startIndex, [batch_D, batch_H, batch_W]);
- allSlicedBatches.push({id: batch_id , coordinates: startIndex, data: batch});
- batch_id += 1;
- }
-
-
- return allSlicedBatches;
- }
-
- // Try to create batches with the volume of slices each of D,H,W sub_volume with minimum overlap option
- sliceVolumeIntoBatches = (slices_3d, num_of_slices, slice_height, slice_width, batch_D, batch_H, batch_W ) => {
- let allSlicedBatches = [];
- let batch_id = 1;
-
- for(let depthIdx = 0; depthIdx < num_of_slices; depthIdx += batch_D) {
- for(let rowIdx = 0; rowIdx < slice_height; rowIdx += batch_H) {
- for(let colIdx = 0; colIdx < slice_width; colIdx += batch_W) {
- // for overlap calculations of last batches
- let depthIdxDiff = 0;
- let rowIdxDiff = 0;
- let colIdxDiff = 0;
-
- if((depthIdx + batch_D) > num_of_slices) {
- depthIdxDiff = (depthIdx + batch_D) - num_of_slices;
- }
-
- if((rowIdx + batch_H) > slice_height) {
- rowIdxDiff = (rowIdx + batch_H) - slice_height;
- }
-
- if((colIdx + batch_W) > slice_width) {
- colIdxDiff = (colIdx + batch_W) - slice_width;
- }
-
- let startIndex = [depthIdx - depthIdxDiff, rowIdx - rowIdxDiff, colIdx - colIdxDiff];
- let batch = slices_3d.slice(startIndex, [batch_D, batch_H, batch_W]);
-
- allSlicedBatches.push({id: batch_id , coordinates: startIndex, data: batch});
- batch_id += 1;
- }
- }
- }
-
- return allSlicedBatches;
- }
-
- getAllSlicesData1D = (num_of_slices) => {
- let allSlices = [];
- for(let sliceIdx = 0; sliceIdx < num_of_slices; sliceIdx++) {
- let slice = getSliceData1D(sliceIdx, niftiHeader, niftiImage);
- allSlices.push(slice);
- }
-
- return allSlices;
- }
-
- getAllSlices2D = (allSlices, slice_height, slice_width) => {
- let allSlices_2D = [];
- for(let sliceIdx = 0; sliceIdx < allSlices.length; sliceIdx ++){
- allSlices_2D.push(tf.tensor(allSlices[sliceIdx], [slice_height, slice_width]));
- }
-
- return allSlices_2D;
- }
-
- getSlices3D = (allSlices_2D) => {
-
- return tf.stack(allSlices_2D);
-
- }
-
- normalizeVolumeData = (volumeData) => {
- //Normalize the data to the range 0 - 1 using min-max scaling
- const volumeData_Max = volumeData.max();
- const volumeData_Min = volumeData.min();
- const normalizedSlices_3d = volumeData.sub(volumeData_Min).div(volumeData_Max.sub(volumeData_Min));
- return normalizedSlices_3d;
- }
-
- load_model = async() => {
- let modelUrl = './mnm_tfjs_me_test/model.json';
- // let modelUrl = './meshnet_dropout/mnm_dropout/model2.json';
- const Model = await tf.loadLayersModel(modelUrl);
- return Model;
- }
-
-
- findPixelIndex = (allPixels, d, h, w) => {
-
- for( pIndex = 0; pIndex < allPixels.length; pIndex++) {
- if( (allPixels[pIndex]["d"] == d) &&
- (allPixels[pIndex]["h"] == h) &&
- (allPixels[pIndex]["w"] == w) ) {
-
- return pIndex;
- }
-
- }
-
- return null;
- }
-
-
-
-// Find current voxel value of the related seg class buffer, if we have numSegClasses = 3 then we have 3 buffers, one for each seg classes 0, 1, 2
-generateOutputSlicesV2 = (allPredictions, num_of_slices, numSegClasses, slice_height, slice_width, batch_D, batch_H, batch_W) => {
-
- console.log("version 2 num of seg classes: ", numSegClasses);
- // buffer set ( depth, H, W) in order
- let outVolumeBuffer = tf.buffer([num_of_slices, slice_height, slice_width, numSegClasses ], dtype=tf.float32)
- let isPostProcessEnable = document.getElementById("postProcessing").checked;
-
-
- for(batchIdx = 0; batchIdx < allPredictions.length; batchIdx += 1) {
-
- let coord = allPredictions[batchIdx]["coordinates"];
- let pixelValues = allPredictions[batchIdx]["data"];
- let pixelValuesCounter = 0;
-
- for(depthIdx = coord[0]; depthIdx < (batch_D + coord[0]); depthIdx += 1) {
- for(rowIdx = coord[1]; rowIdx < (batch_H + coord[1]); rowIdx += 1) {
- for(colIdx = coord[2]; colIdx < (batch_W + coord[2]); colIdx += 1) {
- // Find current voxel value of the related seg class buffer
- // if we have numSegClasses = 3 then we have 3 buffers, one for each seg classes 0, 1, 2
- let voxelValue = outVolumeBuffer.get(depthIdx, rowIdx, colIdx, pixelValues[pixelValuesCounter] );
- // increment current voxel value by 1 in the current class buffer
- outVolumeBuffer.set(voxelValue + 1, depthIdx, rowIdx, colIdx, pixelValues[pixelValuesCounter] );
-
- pixelValuesCounter += 1;
- }
- }
- }
- }
-
- // convert output buffer to tensor
- let axis = -1; // last axis
- // Set for each voxel the value of the index of the buffer that has the max voxel value, e.g. third buffer with index = 2 (cont..)
- // has max voxel value = 10 then the related voxel in outVolumeTensor will have value of 2
- let outVolumeTensor = tf.argMax(outVolumeBuffer.toTensor(), axis);
-
-
- let unstackOutVolumeTensor = tf.unstack(outVolumeTensor);
- outVolumeTensor.dispose();
-
- console.log("Converting unstack tensors to arrays: ")
-
-
- for(sliceTensorIdx = 0; sliceTensorIdx < unstackOutVolumeTensor.length; sliceTensorIdx++ ) {
- allOutputSlices[sliceTensorIdx] = Array.from(unstackOutVolumeTensor[sliceTensorIdx].dataSync())
- allOutputSlices2DCC[sliceTensorIdx] = Array.from(unstackOutVolumeTensor[sliceTensorIdx].dataSync())
- allOutputSlices3DCC[sliceTensorIdx] = Array.from(unstackOutVolumeTensor[sliceTensorIdx].dataSync())
- }
-
-
-
- if(isPostProcessEnable) {
- // console.log("wait postprocessing slices");
- document.getElementById("postProcessHint").innerHTML = "Post processing status => 2D Connected Comp: " + " In progress".fontcolor("red").bold();
- allOutputSlices2DCC = postProcessSlices(allOutputSlices2DCC); // remove noisy regions using 2d CC
- document.getElementById("postProcessHint").innerHTML = "postprocessing status => 2D Connected Comp: " + " Ok".fontcolor("green").bold() + " => 3D Connected Comp: " + " In progress".fontcolor("red").bold()
- allOutputSlices3DCC = postProcessSlices3D(allOutputSlices3DCC); // remove noisy regions using 3d CC
- document.getElementById("postProcessHint").innerHTML = "Post processing status => 2D Connected Comp: " + " Ok".fontcolor("green").bold() + " => 3D Connected Comp : " + " Ok".fontcolor("green").bold()
- }
-
- // draw output canvas
- let outCanvas = document.getElementById('outputCanvas');
- let output2dCC = document.getElementById('out2dCC');
- let output3dCC = document.getElementById('out3dCC');
- let slider = document.getElementById('sliceNav');
- drawOutputCanvas(outCanvas, slider.value, niftiHeader, niftiImage, allOutputSlices);
- drawOutputCanvas(output2dCC, slider.value, niftiHeader, niftiImage, allOutputSlices2DCC);
- drawOutputCanvas(output3dCC, slider.value, niftiHeader, niftiImage, allOutputSlices3DCC);
- }
-
-
- generateOutputSlices = (allPredictions, num_of_slices, slice_height, slice_width, batch_D, batch_H, batch_W) => {
- console.log("version 1");
- // buffer set ( depth, H, W) in order
- let outVolumeBuffer = tf.buffer([num_of_slices, slice_height, slice_width], dtype=tf.float32)
- let isPostProcessEnable = document.getElementById("postProcessing").checked;
-
-
- for(batchIdx = 0; batchIdx < allPredictions.length; batchIdx += 1) {
-
- let coord = allPredictions[batchIdx]["coordinates"]
- let pixelValues = allPredictions[batchIdx]["data"]
- let pixelValuesCounter = 0;
-
- for(depthIdx = coord[0]; depthIdx < (batch_D + coord[0]); depthIdx += 1) {
- for(rowIdx = coord[1]; rowIdx < (batch_H + coord[1]); rowIdx += 1) {
- for(colIdx = coord[2]; colIdx < (batch_W + coord[2]); colIdx += 1) {
- outVolumeBuffer.set(pixelValues[pixelValuesCounter], depthIdx, rowIdx, colIdx );
- pixelValuesCounter += 1;
- }
- }
- }
- }
-
- // convert output buffer to tensor
- let outVolumeTensor = outVolumeBuffer.toTensor();
-
- let unstackOutVolumeTensor = tf.unstack(outVolumeTensor)
-
- console.log("Converting unstack tensors to arrays: ")
-
-
- for(sliceTensorIdx = 0; sliceTensorIdx < unstackOutVolumeTensor.length; sliceTensorIdx++ ) {
- allOutputSlices[sliceTensorIdx] = Array.from(unstackOutVolumeTensor[sliceTensorIdx].dataSync())
- allOutputSlices2DCC[sliceTensorIdx] = Array.from(unstackOutVolumeTensor[sliceTensorIdx].dataSync())
- allOutputSlices3DCC[sliceTensorIdx] = Array.from(unstackOutVolumeTensor[sliceTensorIdx].dataSync())
- }
-
-
- if(isPostProcessEnable) {
- // console.log("wait postprocessing slices");
- document.getElementById("postProcessHint").innerHTML = "Post processing status => 2D Connected Comp: " + " In progress".fontcolor("red").bold();
- allOutputSlices2DCC = postProcessSlices(allOutputSlices2DCC); // remove noisy regions using 2d CC
- document.getElementById("postProcessHint").innerHTML = "postprocessing status => 2D Connected Comp: " + " Ok".fontcolor("green").bold() + " => 3D Connected Comp: " + " In progress".fontcolor("red").bold()
- allOutputSlices3DCC = postProcessSlices3D(allOutputSlices3DCC); // remove noisy regions using 3d CC
- document.getElementById("postProcessHint").innerHTML = "Post processing status => 2D Connected Comp: " + " Ok".fontcolor("green").bold() + " => 3D Connected Comp : " + " Ok".fontcolor("green").bold()
- }
-
- // draw output canvas
- let outCanvas = document.getElementById('outputCanvas');
- let output2dCC = document.getElementById('out2dCC');
- let output3dCC = document.getElementById('out3dCC');
- let slider = document.getElementById('sliceNav');
- drawOutputCanvas(outCanvas, slider.value, niftiHeader, niftiImage, allOutputSlices);
- drawOutputCanvas(output2dCC, slider.value, niftiHeader, niftiImage, allOutputSlices2DCC);
- drawOutputCanvas(output3dCC, slider.value, niftiHeader, niftiImage, allOutputSlices3DCC);
- }
-
-
- inputVolumeChange = (val) => {
- document.getElementById("inputVolumeId").innerHTML = "Input Volume Dim :" + " [" + document.getElementById("batchSizeId").value + ", 38, 38, 38, " +
- document.getElementById("numOfChanId").value + "]"
- }
-
- // For future use
- download = (content, fileName, contentType) => {
- var a = document.createElement("a");
- var file = new Blob([content], {type: contentType});
- a.href = URL.createObjectURL(file);
- a.download = fileName;
- a.click();
- }
-
- checkWebGl1 = () => {
-
- const gl = document.createElement('canvas').getContext('webgl');
- if (!gl) {
- if (typeof WebGLRenderingContext !== 'undefined') {
- console.log('WebGL1 may be disabled. Please try updating video card drivers');
- document.getElementById("results").innerHTML += ' WebGL1 status: ' + "Disabled".fontcolor("red").bold() + ' Try updating video card driver';
- } else {
- console.log('WebGL1 is not supported');
- document.getElementById("results").innerHTML += ' WebGL1 status: ' + "Red".fontcolor("red").bold() + ' Not supported';
- }
- } else {
- console.log('WebGl1 is enabled');
- document.getElementById("results").innerHTML += ' WebGL1 status: ' + "Green".fontcolor("green").bold();
- }
-
- }
-
- checkWebGl2 = () => {
-
- const gl = document.createElement('canvas').getContext('webgl2');
- if (!gl) {
- if (typeof WebGL2RenderingContext !== 'undefined') {
- console.log('WebGL2 may be disabled. Please try updating video card drivers');
- document.getElementById("results").innerHTML = 'WebGL2 status: ' + "Disabled".fontcolor("red").bold() + ' Try updating video card driver';
- } else {
- console.log('WebGL2 is not supported');
- document.getElementById("results").innerHTML = 'WebGL2 status: ' + "Red".fontcolor("red").bold() + ' Not supported';
- }
-
- checkWebGl1();
- } else {
- console.log('WebGl2 is enabled');
- document.getElementById("results").innerHTML = 'WebGL2 status: ' + "Green".fontcolor("green").bold();
- }
-
-
- }
-
- runInference = () => {
-
- let processingFlag = true;
-
- let batchSize = parseInt(document.getElementById("batchSizeId").value);
- let numOfChan = parseInt(document.getElementById("numOfChanId").value);
-
- if (document.getElementById("file").value == "") {
- document.getElementById("results").innerHTML = "No NIfTI file is selected".fontcolor("red");
- processingFlag = false;
- }
-
- if (isNaN(batchSize) || batchSize < 1 || batchSize > 1) {
- document.getElementById("results").innerHTML = "The batch Size must be 1 for this demo".fontcolor("red");
- processingFlag = false;
- }
-
- if (isNaN(numOfChan) || (numOfChan !=1)) {
- document.getElementById("results").innerHTML = "The number of channels must be a number of 1 for this demo".fontcolor("red");
- processingFlag = false;
- }
-
- if(processingFlag) {
-
- tf.engine().startScope()
-
- console.log("Batch size: ", batchSize);
- console.log("Num of Channels: ", numOfChan);
-
- // Propose subvolume size as needed by inference model input e.g. 38x38x38
- let batch_D = 38;
- let batch_H = 38;
- let batch_W = 38;
-
- let slice_width = niftiHeader.dims[1];
- let slice_height = niftiHeader.dims[2];
- let num_of_slices = niftiHeader.dims[3];
-
- let isBatchOverlapEnable = document.getElementById("batchOverlapId").checked;
-
-
-
- // let input_shape = [batchSize, 38, 38, 38, numOfChan];
- let input_shape = [batchSize, batch_D, batch_H, batch_W, numOfChan];
-
-
- let allSlices = getAllSlicesData1D(num_of_slices);
-
- let allSlices_2D = getAllSlices2D(allSlices, slice_height, slice_width);
- // get slices_3d tensor
- let slices_3d = getSlices3D(allSlices_2D);
- tf.dispose(allSlices_2D);
- // nomalize MRI data to be from 0 to 1
- slices_3d = normalizeVolumeData(slices_3d);
-
- let allBatches = [];
- let headSubCubesCoords = [];
-
-
- if(isBatchOverlapEnable) {
- // number of additional batches focus on the brain/head volume
- let num_of_Overlap_batches = parseInt(document.getElementById("numOverlapBatchesId").value);
- console.log(" num of overlapped batches: ", num_of_Overlap_batches);
-
- // Find the centroid of 3D head volume
- // const headCentroid = findHeadCentroid(slices_3d, num_of_slices, slice_height, slice_width);
-
- // Find the centroid of 3D head volume and the variance
- let cent_var = cubeMoments(slices_3d, 0.5);
- // Mean or centroid
- const headCentroid = cent_var[0];
- console.log(" Head 3D Centroid : ", headCentroid);
- // Variance
- const sigma = cent_var[1];
- console.log(" Head 3D Variance : ", sigma);
-
-
- headSubCubesCoords = findCoordsOfAddBrainBatches(num_of_Overlap_batches,
- new Array(headCentroid[0], headCentroid[1], headCentroid[2]),
- new Array(sigma[0], sigma[1], sigma[2]),
- new Array(num_of_slices, slice_height, slice_width),
- new Array(batch_D, batch_H, batch_W));
-
- allBatches = sliceVolumeIntoOverlappedBatches(slices_3d, num_of_slices, slice_height, slice_width, batch_D, batch_H, batch_W, headSubCubesCoords);
-
- } else {
- // This option will cover all slices, some slices that are not enough to create a batch will need overlap with prevous batch slices
- // e.g. slice volume = 3*5*5 DHW , and batch is 2*2*2 , 2*3*3 =18 batches will be considered
- let num_of_batches = Math.ceil(slice_width/batch_W) * Math.ceil(slice_height/batch_H) * Math.ceil(num_of_slices/batch_D);
- console.log("Num of Batches for inference: ", num_of_batches);
-
- allBatches = sliceVolumeIntoBatches(slices_3d, num_of_slices, slice_height, slice_width, batch_D, batch_H, batch_W);
- }
-
- tf.dispose(slices_3d);
- console.log(" sample of a batch for inference : ", Array.from(allBatches[0].data.dataSync()))
-
- console.log(tf.getBackend());
- checkWebGl2();
-
-
- fetch('./mnm_tfjs_me_test/model.json')
- .then(response => {
- return response.json();
- })
- .then(data => console.log("fetch results: ", data));
-
-
- let allPredictions = [];
- console.log("predictOnBatch enabled");
-
- model.then(function (res) {
-
- try {
- let startTime = performance.now();
- // maxLabelPredicted in whole volume of the brain
- let maxLabelPredicted = 0;
-
- let j = 0;
- let timer = window.setInterval(function() {
-
- let curTensor = tf.tensor(allBatches[j].data.dataSync(), input_shape);
- // let prediction = res.predict( curTensor );
- let prediction = res.predictOnBatch( curTensor );
- tf.dispose(curTensor);
- let axis = -1; //4;
- let prediction_argmax = tf.argMax(prediction, axis);
- tf.dispose(prediction);
- allPredictions.push({"id": allBatches[j].id, "coordinates": allBatches[j].coordinates, "data": Array.from(prediction_argmax.dataSync()) })
- let curBatchMaxLabel = Math.max(...Array.from(prediction_argmax.dataSync()));
-
- if( maxLabelPredicted < curBatchMaxLabel ) {
- maxLabelPredicted = curBatchMaxLabel;
- }
-
- tf.dispose(prediction_argmax);
-
-
- let memStatus = tf.memory().unreliable ? "Red" : "Green";
- let unreliableReasons = tf.memory().unreliable ? "unreliable reasons :" + tf.memory().reasons.fontcolor("red").bold() : "";
- document.getElementById("completed").innerHTML = "Batches completed: " + (j+1) + " / " + allBatches.length +
- // https://js.tensorflow.org/api/latest/#memory
- "