diff --git a/README.md b/README.md index 1fdcef1..55ed708 100644 --- a/README.md +++ b/README.md @@ -56,7 +56,14 @@ The following attributes are available to configure your module: | `model_path` | string | **Required** | | Path to **standalone** model file | | `label_path` | string | Optional | | Path to file with class labels. | +### Example configuration +```json +{ + "model_path": "${packages.ml_model.myMLModel}/my_model.pt", + "label_path": "${packages.ml_model.myMLModel}/labels.txt" +} +``` # Methods diff --git a/meta.json b/meta.json index c78e0e4..a5d457c 100644 --- a/meta.json +++ b/meta.json @@ -1,12 +1,14 @@ { "module_id": "viam:torch-cpu", "visibility": "public", - "url": "https://github.com/viam-labs/torch", + "url": "https://github.com/viam-modules/torch", "description": "Viam ML Module service serving PyTorch models.", "models": [ { "api": "rdk:service:mlmodel", - "model": "viam:mlmodel:torch-cpu" + "model": "viam:mlmodel:torch-cpu", + "markdown_link": "README.md#example-configuration", + "short_description": "An ML Model Service that can run PyTorch models in a standard format" } ], "build": { @@ -19,4 +21,4 @@ ] }, "entrypoint": "dist/main" -} \ No newline at end of file +} diff --git a/src/test_local.py b/src/test_local.py index 85b286b..bdf7bb1 100644 --- a/src/test_local.py +++ b/src/test_local.py @@ -180,4 +180,3 @@ def test_infer_method(self): if __name__ == "__main__": unittest.main() - \ No newline at end of file diff --git a/src/torch_mlmodel_module.py b/src/torch_mlmodel_module.py index ff8b90b..62cf255 100644 --- a/src/torch_mlmodel_module.py +++ b/src/torch_mlmodel_module.py @@ -95,7 +95,11 @@ def get_attribute_from_config(attribute_name: str, default, of_type=None): self._metadata = self.inspector.find_metadata(label_file) async def infer( - self, input_tensors: Dict[str, NDArray], *, timeout: Optional[float] + self, + input_tensors: Dict[str, NDArray], + *, + extra: Optional[Mapping[str, ValueTypes]], + timeout: Optional[float], ) -> Dict[str, NDArray]: """Take an already ordered input tensor as an array, make an inference on the model, and return an output tensor map. @@ -110,7 +114,12 @@ async def infer( """ return self.torch_model.infer(input_tensors) - async def metadata(self, *, timeout: Optional[float]) -> Metadata: + async def metadata( + self, + *, + extra: Optional[Mapping[str, ValueTypes]], + timeout: Optional[float], + ) -> Metadata: """Get the metadata (such as name, type, expected tensor/array shape, inputs, and outputs) associated with the ML model.