forked from microsoft/onnxruntime-inference-examples
-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathmain.js
37 lines (29 loc) · 1.31 KB
/
main.js
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT license.
const ort = require('onnxruntime-web');
// use an async context to call onnxruntime functions.
async function main() {
try {
// create a new session and load the specific model.
//
// the model in this example contains a single MatMul node
// it has 2 inputs: 'a'(float32, 3x4) and 'b'(float32, 4x3)
// it has 1 output: 'c'(float32, 3x3)
const session = await ort.InferenceSession.create('./model.onnx');
// prepare inputs. a tensor need its corresponding TypedArray as data
const dataA = Float32Array.from([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]);
const dataB = Float32Array.from([10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120]);
const tensorA = new ort.Tensor('float32', dataA, [3, 4]);
const tensorB = new ort.Tensor('float32', dataB, [4, 3]);
// prepare feeds. use model input names as keys.
const feeds = { a: tensorA, b: tensorB };
// feed inputs and run
const results = await session.run(feeds);
// read from results
const dataC = results.c.data;
document.write(`data of result tensor 'c': ${dataC}`);
} catch (e) {
document.write(`failed to inference ONNX model: ${e}.`);
}
}
main();