diff --git a/examples/README.md b/examples/README.md index aef20234..0b4a53df 100644 --- a/examples/README.md +++ b/examples/README.md @@ -138,7 +138,7 @@ This code sample shows how to get a deployment from the server. > geti = Geti(server_config=server_config) > > # Create deployment for the project, and prepare it for running inference -> deployment = geti.deploy_project(PROJECT_NAME) +> deployment = geti.deploy_project(project_name=PROJECT_NAME) > > # Save deployment on local > deployment.save(PATH_TO_DEPLOYMENT) diff --git a/geti_sdk/demos/demo_projects/coco_demos.py b/geti_sdk/demos/demo_projects/coco_demos.py index 21237cd1..7b9f1c59 100644 --- a/geti_sdk/demos/demo_projects/coco_demos.py +++ b/geti_sdk/demos/demo_projects/coco_demos.py @@ -13,6 +13,7 @@ # and limitations under the License. import logging +import time from typing import Optional from geti_sdk import Geti @@ -382,6 +383,12 @@ def ensure_trained_example_project( number_of_images_to_annotate=45, enable_auto_train=True, ) + # Should wait for some time for the job to appear as scheduled before checking if + # the project is trained. Auto training is triggered after around 5 seconds. + print( + "Project created. Waiting for training job to be scheduled. This may take a few seconds." + ) + time.sleep(5) else: raise ValueError( f"The project named `{project_name}` does not exist on the server at " diff --git a/geti_sdk/demos/demo_projects/utils.py b/geti_sdk/demos/demo_projects/utils.py index 84509680..8194b4d8 100644 --- a/geti_sdk/demos/demo_projects/utils.py +++ b/geti_sdk/demos/demo_projects/utils.py @@ -51,7 +51,9 @@ def ensure_project_is_trained(geti: Geti, project: Project) -> bool: ) # If there are no jobs running for the project, we launch them jobs = training_client.get_jobs(project_only=True) - running_jobs = [job for job in jobs if job.state == JobState.RUNNING] + running_jobs = [ + job for job in jobs if job.state in [JobState.RUNNING, JobState.SCHEDULED] + ] tasks = project.get_trainable_tasks() new_jobs = [] diff --git a/geti_sdk/geti.py b/geti_sdk/geti.py index 8c017c9c..6c85eb67 100644 --- a/geti_sdk/geti.py +++ b/geti_sdk/geti.py @@ -371,7 +371,7 @@ def download_project_data( # Download deployment if include_deployment: logging.info("Creating deployment for project...") - self.deploy_project(project, output_folder=target_folder) + self.deploy_project(project=project, output_folder=target_folder) logging.info(f"Project '{project.name}' was downloaded successfully.") return project @@ -1132,7 +1132,8 @@ def upload_and_predict_video( def deploy_project( self, - project: Project, + project: Optional[Project] = None, + project_name: Optional[str] = None, output_folder: Optional[Union[str, os.PathLike]] = None, models: Optional[Sequence[BaseModel]] = None, enable_explainable_ai: bool = False, @@ -1147,7 +1148,10 @@ def deploy_project( for each task in the project. However, it is possible to specify a particular model to use, by passing it in the list of `models` as input to this method. - :param project: Project object to deploy + :param project: Project object to deploy. Either `project` or `project_name` + must be specified. + :param project_name: Name of the project to deploy. Either `project` or + `project_name` must be specified. :param output_folder: Path to a folder on local disk to which the Deployment should be downloaded. If no path is specified, the deployment will not be saved. @@ -1165,6 +1169,11 @@ def deploy_project( launch an OVMS container serving the models. :return: Deployment for the project """ + if project is None and project_name is None: + raise ValueError("Either `project` or `project_name` must be specified.") + if project is None: + project = self.project_client.get_project_by_name(project_name=project_name) + deployment_client = self._deployment_clients.get(project.id, None) if deployment_client is None: # Create deployment client and add to cache. diff --git a/notebooks/001_create_project.ipynb b/notebooks/001_create_project.ipynb index 4d2f5dd2..425b6920 100644 --- a/notebooks/001_create_project.ipynb +++ b/notebooks/001_create_project.ipynb @@ -14,15 +14,15 @@ }, { "cell_type": "code", - "execution_count": null, "id": "3cb49dd2-7032-40e0-a4c2-8203ba1072bf", "metadata": {}, - "outputs": [], "source": [ "from geti_sdk.utils import get_server_details_from_env\n", "\n", "geti_server_configuration = get_server_details_from_env()" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -34,15 +34,15 @@ }, { "cell_type": "code", - "execution_count": null, "id": "ba8318f4-3d97-4949-abf0-6cdafe46572e", "metadata": {}, - "outputs": [], "source": [ "from geti_sdk import Geti\n", "\n", "geti = Geti(server_config=geti_server_configuration)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -55,17 +55,17 @@ }, { "cell_type": "code", - "execution_count": null, "id": "081e700d-9e2e-4022-b4d6-dac3adf1d4f8", "metadata": {}, - "outputs": [], "source": [ "from geti_sdk.rest_clients import ProjectClient\n", "\n", "project_client = ProjectClient(session=geti.session, workspace_id=geti.workspace_id)\n", "\n", "projects = project_client.list_projects()" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -89,10 +89,8 @@ }, { "cell_type": "code", - "execution_count": null, "id": "30e46136-5283-4e3d-8dcf-7a89bee4de6b", "metadata": {}, - "outputs": [], "source": [ "from geti_sdk.data_models.enums import TaskType\n", "\n", @@ -101,7 +99,9 @@ "for task_type in TaskType:\n", " if task_type.is_trainable:\n", " print(\" \" + str(task_type))" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -138,29 +138,29 @@ }, { "cell_type": "code", - "execution_count": null, "id": "b18c25d1-f53b-495e-a48e-ee89b7f95d4e", "metadata": {}, - "outputs": [], "source": [ "# First set the project parameters. Feel free to experiment here!\n", "PROJECT_NAME = \"Segmentation demo\"\n", "PROJECT_TYPE = \"segmentation\"\n", "LABELS = [[\"dog\", \"cat\", \"horse\"]]" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "code", - "execution_count": null, "id": "4941f712-f97b-48ff-a11b-74687d0bb49f", "metadata": {}, - "outputs": [], "source": [ "# Now, use the project client to create the project\n", "project = project_client.create_project(\n", " project_name=PROJECT_NAME, project_type=PROJECT_TYPE, labels=LABELS\n", ")" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -173,13 +173,13 @@ }, { "cell_type": "code", - "execution_count": null, "id": "c9d3496b-663d-41d3-b381-5e05b4c8e5b0", "metadata": {}, - "outputs": [], "source": [ "print(project.summary)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -191,13 +191,13 @@ }, { "cell_type": "code", - "execution_count": null, "id": "8786e8df-3ecb-4947-ad71-c8512e0b22ac", "metadata": {}, - "outputs": [], "source": [ "print(project.overview)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -209,16 +209,16 @@ }, { "cell_type": "code", - "execution_count": null, "id": "01cb2ee6-9d45-4d17-aa6f-2ac0330ce369", "metadata": {}, - "outputs": [], "source": [ "task_list = project.get_trainable_tasks()\n", "print(f\"Project '{project.name}' contains {len(task_list)} trainable tasks.\")\n", "for task in task_list:\n", " print(task.summary)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -230,14 +230,14 @@ }, { "cell_type": "code", - "execution_count": null, "id": "71dbcca8-b79e-441d-9143-091c2b08ba35", "metadata": {}, - "outputs": [], "source": [ "project = project_client.get_project(project_name=PROJECT_NAME)\n", "print(project.summary)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -250,10 +250,8 @@ }, { "cell_type": "code", - "execution_count": null, "id": "9c20a99c-07b6-4cc2-87fa-abf0ba210031", "metadata": {}, - "outputs": [], "source": [ "PIPELINE_PROJECT_NAME = \"Detection to hierarchical classification demo\"\n", "PIPELINE_PROJECT_TYPE = \"detection_to_classification\"\n", @@ -270,21 +268,23 @@ " {\"name\": \"school bus\", \"parent_id\": \"bus\", \"group\": \"bus\"},\n", " ],\n", "]" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "code", - "execution_count": null, "id": "01d01757-035c-4690-86f0-2f40487a6c52", "metadata": {}, - "outputs": [], "source": [ "pipeline_project = project_client.create_project(\n", " project_name=PIPELINE_PROJECT_NAME,\n", " project_type=PIPELINE_PROJECT_TYPE,\n", " labels=PIPELINE_LABELS,\n", ")" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -296,13 +296,13 @@ }, { "cell_type": "code", - "execution_count": null, "id": "71a5b21f-edf6-4be4-8e86-08db768040f2", "metadata": {}, - "outputs": [], "source": [ "print(pipeline_project.summary)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -314,13 +314,13 @@ }, { "cell_type": "code", - "execution_count": null, "id": "d23f29b8-056d-476a-a244-5f7355e00743", "metadata": {}, - "outputs": [], "source": [ "print(pipeline_project.overview)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -339,15 +339,15 @@ }, { "cell_type": "code", - "execution_count": null, "id": "e2a2548a-8f84-4395-bdf7-8a4ebf3447a3", "metadata": {}, - "outputs": [], "source": [ "# Delete the simple project\n", "\n", "# project_client.delete_project(project)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -361,22 +361,23 @@ }, { "cell_type": "code", - "execution_count": null, "id": "ab209bce-5195-4f40-9d40-032af0bb5031", "metadata": {}, - "outputs": [], "source": [ "# Delete the pipeline project\n", - "project_client.delete_project(pipeline_project, requires_confirmation=False)" - ] + "\n", + "# project_client.delete_project(pipeline_project, requires_confirmation=False)" + ], + "outputs": [], + "execution_count": null }, { "cell_type": "code", - "execution_count": null, "id": "fa885dd0-b669-4c83-b6b2-bbde010144cd", "metadata": {}, + "source": [], "outputs": [], - "source": [] + "execution_count": null } ], "metadata": { diff --git a/notebooks/003_upload_and_predict_image.ipynb b/notebooks/003_upload_and_predict_image.ipynb index 630e384a..b60a28cb 100644 --- a/notebooks/003_upload_and_predict_image.ipynb +++ b/notebooks/003_upload_and_predict_image.ipynb @@ -11,14 +11,12 @@ }, { "cell_type": "code", - "execution_count": null, "id": "bc86f115-d96c-463c-962d-6b50d88b330d", "metadata": { "pycharm": { "name": "#%%\n" } }, - "outputs": [], "source": [ "# As usual we will connect to the platform first, using the server details from the .env file\n", "\n", @@ -28,7 +26,9 @@ "geti_server_configuration = get_server_details_from_env()\n", "\n", "geti = Geti(server_config=geti_server_configuration)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -41,15 +41,15 @@ }, { "cell_type": "code", - "execution_count": null, "id": "63108a6c-c99b-4be9-b4fc-eca5556756c6", "metadata": {}, - "outputs": [], "source": [ "from geti_sdk.rest_clients import ImageClient, PredictionClient, ProjectClient\n", "\n", "project_client = ProjectClient(session=geti.session, workspace_id=geti.workspace_id)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -61,13 +61,13 @@ }, { "cell_type": "code", - "execution_count": null, "id": "5b2dbd9b-29b9-4000-8c55-cdb5cfd86463", "metadata": {}, - "outputs": [], "source": [ "PROJECT_NAME = \"COCO animal detection demo\"" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -79,10 +79,8 @@ }, { "cell_type": "code", - "execution_count": null, "id": "059ff478-a5da-4363-a281-3ef5ad265151", "metadata": {}, - "outputs": [], "source": [ "project = project_client.get_project(project_name=PROJECT_NAME)\n", "image_client = ImageClient(\n", @@ -91,7 +89,9 @@ "prediction_client = PredictionClient(\n", " session=geti.session, workspace_id=geti.workspace_id, project=project\n", ")" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -104,14 +104,14 @@ }, { "cell_type": "code", - "execution_count": null, "id": "2465640a-50ca-46cf-92bb-75dc29c65ab2", "metadata": {}, - "outputs": [], "source": [ "images = image_client.get_all_images()\n", "print(f\"Project '{project.name}' contains {len(images)} images.\")" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -123,15 +123,15 @@ }, { "cell_type": "code", - "execution_count": null, "id": "5113c81a-5e5f-4109-b781-b057bb7e674d", "metadata": {}, - "outputs": [], "source": [ "from geti_sdk.demos import EXAMPLE_IMAGE_PATH\n", "\n", "image = image_client.upload_image(image=EXAMPLE_IMAGE_PATH)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -143,14 +143,14 @@ }, { "cell_type": "code", - "execution_count": null, "id": "a8ff33cf-f693-4c55-95bb-215e5065f99f", "metadata": {}, - "outputs": [], "source": [ "images = image_client.get_all_images()\n", "print(f\"Project '{project.name}' contains {len(images)} images.\")" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -167,10 +167,8 @@ }, { "cell_type": "code", - "execution_count": null, "id": "1b9fc8a2-8020-40ed-927c-ea124d698190", "metadata": {}, - "outputs": [], "source": [ "from geti_sdk.demos import ensure_trained_example_project\n", "\n", @@ -179,7 +177,9 @@ "\n", "# Then, request the prediction\n", "prediction = prediction_client.get_image_prediction(image)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -192,20 +192,18 @@ }, { "cell_type": "code", - "execution_count": null, "id": "3ff32064-a90a-4de7-9bd7-4bc8f1377e6b", "metadata": {}, - "outputs": [], "source": [ "print(prediction.overview)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "code", - "execution_count": null, "id": "1affebae-88b4-46ac-97c5-e8373a880d74", "metadata": {}, - "outputs": [], "source": [ "import cv2\n", "\n", @@ -220,7 +218,9 @@ "image_rgb = cv2.cvtColor(numpy_image, cv2.COLOR_BGR2RGB)\n", "result = visualizer.draw(image_rgb, prediction)\n", "visualizer.show_in_notebook(result)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -233,13 +233,11 @@ }, { "cell_type": "code", - "execution_count": null, "id": "1bb56cd8-35db-45f8-8b2f-64aa36caff1e", "metadata": {}, - "outputs": [], "source": [ "quick_image, quick_prediction = geti.upload_and_predict_image(\n", - " project_name=PROJECT_NAME,\n", + " project=project,\n", " image=EXAMPLE_IMAGE_PATH,\n", " visualise_output=False,\n", " delete_after_prediction=False,\n", @@ -247,7 +245,9 @@ "quick_image_rgb = cv2.cvtColor(quick_image.numpy, cv2.COLOR_BGR2RGB)\n", "quick_result = visualizer.draw(quick_image_rgb, quick_prediction)\n", "visualizer.show_in_notebook(quick_result)" - ] + ], + "outputs": [], + "execution_count": null } ], "metadata": { diff --git a/notebooks/004_create_pipeline_project_from_dataset.ipynb b/notebooks/004_create_pipeline_project_from_dataset.ipynb index 6b5b3df9..0afcee4c 100644 --- a/notebooks/004_create_pipeline_project_from_dataset.ipynb +++ b/notebooks/004_create_pipeline_project_from_dataset.ipynb @@ -13,10 +13,8 @@ }, { "cell_type": "code", - "execution_count": null, "id": "35717528-ee7e-41be-af63-7451e6c169f1", "metadata": {}, - "outputs": [], "source": [ "# As usual we will connect to the platform first, using the server details from the .env file\n", "\n", @@ -26,7 +24,9 @@ "geti_server_configuration = get_server_details_from_env()\n", "\n", "geti = Geti(server_config=geti_server_configuration)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -39,15 +39,15 @@ }, { "cell_type": "code", - "execution_count": null, "id": "45ee2b8f-6f79-4916-857b-9af585848d21", "metadata": {}, - "outputs": [], "source": [ "from geti_sdk.demos import get_coco_dataset\n", "\n", "COCO_PATH = get_coco_dataset(dataset_path=None)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -60,10 +60,8 @@ }, { "cell_type": "code", - "execution_count": null, "id": "25c1033b-2856-46c0-b513-7b41de0b128e", "metadata": {}, - "outputs": [], "source": [ "from geti_sdk.annotation_readers import DatumAnnotationReader\n", "\n", @@ -73,7 +71,9 @@ "annotation_reader_task_2 = DatumAnnotationReader(\n", " base_data_folder=COCO_PATH, annotation_format=\"coco\"\n", ")" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -90,14 +90,14 @@ }, { "cell_type": "code", - "execution_count": null, "id": "58d7ab28-11e3-43bd-ba8a-cf111b9c209b", "metadata": {}, - "outputs": [], "source": [ "domestic_animals = [\"dog\", \"cat\", \"horse\"]\n", "wild_animals = [\"elephant\", \"giraffe\"]" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -110,15 +110,15 @@ }, { "cell_type": "code", - "execution_count": null, "id": "3b2b3428-b4c4-4421-83cb-0726502c9fcf", "metadata": {}, - "outputs": [], "source": [ "all_labels = domestic_animals + wild_animals\n", "annotation_reader_task_1.filter_dataset(labels=all_labels, criterion=\"OR\")\n", "annotation_reader_task_1.group_labels(labels_to_group=all_labels, group_name=\"animal\")" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -131,17 +131,17 @@ }, { "cell_type": "code", - "execution_count": null, "id": "4a7d6e23-799b-4e1c-9d0e-3c7f10812020", "metadata": {}, - "outputs": [], "source": [ "annotation_reader_task_2.filter_dataset(labels=all_labels, criterion=\"OR\")\n", "annotation_reader_task_2.group_labels(\n", " labels_to_group=domestic_animals, group_name=\"domestic\"\n", ")\n", "annotation_reader_task_2.group_labels(labels_to_group=wild_animals, group_name=\"wild\")" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -158,10 +158,8 @@ }, { "cell_type": "code", - "execution_count": null, "id": "e73a6646-1b1d-4997-b986-c8f75bbe0c38", "metadata": {}, - "outputs": [], "source": [ "PROJECT_NAME = \"COCO multitask animal demo\"\n", "PROJECT_TYPE = \"detection_to_classification\"\n", @@ -175,7 +173,9 @@ " number_of_images_to_annotate=90,\n", " enable_auto_train=True,\n", ")" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -187,21 +187,21 @@ }, { "cell_type": "code", - "execution_count": null, "id": "142e6932-0b3e-4a32-b5e2-bcc655dd807e", "metadata": {}, - "outputs": [], "source": [ "print(project.summary)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "code", - "execution_count": null, "id": "32050c75-56ec-4489-9a6d-7452a0d80345", "metadata": {}, + "source": [], "outputs": [], - "source": [] + "execution_count": null } ], "metadata": { diff --git a/notebooks/006_reconfigure_task.ipynb b/notebooks/006_reconfigure_task.ipynb index b5dc4fc0..f20db1a3 100644 --- a/notebooks/006_reconfigure_task.ipynb +++ b/notebooks/006_reconfigure_task.ipynb @@ -11,10 +11,8 @@ }, { "cell_type": "code", - "execution_count": null, "id": "4ebd0b09-0adb-4197-a3f2-3add32dbe2c9", "metadata": {}, - "outputs": [], "source": [ "# As usual we will connect to the platform first, using the server details from the .env file. We will also create a ProjectClient for the server\n", "\n", @@ -27,7 +25,9 @@ "geti = Geti(server_config=geti_server_configuration)\n", "\n", "project_client = ProjectClient(session=geti.session, workspace_id=geti.workspace_id)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -40,16 +40,16 @@ }, { "cell_type": "code", - "execution_count": null, "id": "a3ca66c8-b40e-454d-aaa0-99b937c6e5c1", "metadata": {}, - "outputs": [], "source": [ "PROJECT_NAME = \"COCO multitask animal demo\"\n", "projects = project_client.list_projects()\n", "\n", "project = project_client.get_project(PROJECT_NAME)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -64,17 +64,17 @@ }, { "cell_type": "code", - "execution_count": null, "id": "294b15c7-c8f3-46e0-88f1-9c5b8b94ed0a", "metadata": {}, - "outputs": [], "source": [ "from geti_sdk.rest_clients import ConfigurationClient\n", "\n", "configuration_client = ConfigurationClient(\n", " session=geti.session, workspace_id=geti.workspace_id, project=project\n", ")" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -87,14 +87,14 @@ }, { "cell_type": "code", - "execution_count": null, "id": "7508fb8e-d1a2-4b05-8fc4-a8339002fd62", "metadata": {}, - "outputs": [], "source": [ "task = project.get_trainable_tasks()[0]\n", "print(task.summary)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -107,14 +107,14 @@ }, { "cell_type": "code", - "execution_count": null, "id": "5357dbd8-d4b7-417f-94a6-04f30ca91898", "metadata": {}, - "outputs": [], "source": [ "task_configuration = configuration_client.get_task_configuration(task_id=task.id)\n", "print(task_configuration.summary)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -127,13 +127,13 @@ }, { "cell_type": "code", - "execution_count": null, "id": "b87f33ce-dc66-49bb-ba04-e30ee34de3ae", "metadata": {}, - "outputs": [], "source": [ "print(task_configuration.batch_size.overview)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -148,26 +148,26 @@ }, { "cell_type": "code", - "execution_count": null, "id": "faaa9cdf-2e1c-42d6-ac78-3661c8524e17", "metadata": {}, - "outputs": [], "source": [ "# Set batch size to 10\n", "task_configuration.batch_size.value = 10" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "code", - "execution_count": null, "id": "a60d6b7c-36cd-4bb2-b8c8-350245e7d1b3", "metadata": {}, - "outputs": [], "source": [ "# Double the learning rate\n", "old_learning_rate = task_configuration.learning_rate.value\n", "task_configuration.learning_rate.value = 2 * old_learning_rate" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -180,13 +180,13 @@ }, { "cell_type": "code", - "execution_count": null, "id": "9d81328c-88b4-48c2-a0a7-d67ac31a038c", "metadata": {}, - "outputs": [], "source": [ "configuration_client.set_configuration(task_configuration)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -199,22 +199,22 @@ }, { "cell_type": "code", - "execution_count": null, "id": "2a46b005-d8c4-4e36-8032-3f5f342ce2df", "metadata": {}, - "outputs": [], "source": [ "new_task_configuration = configuration_client.get_task_configuration(task_id=task.id)\n", "print(new_task_configuration.summary)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "code", - "execution_count": null, "id": "f4290c2f-9952-4387-8150-b757972d9900", "metadata": {}, + "source": [], "outputs": [], - "source": [] + "execution_count": null } ], "metadata": { diff --git a/notebooks/007_train_project.ipynb b/notebooks/007_train_project.ipynb index 9ae790ba..fd86c0c3 100644 --- a/notebooks/007_train_project.ipynb +++ b/notebooks/007_train_project.ipynb @@ -11,10 +11,8 @@ }, { "cell_type": "code", - "execution_count": null, "id": "9b08efaa-e4ad-4a87-9d6c-9c7468a9a557", "metadata": {}, - "outputs": [], "source": [ "# As usual we will connect to the platform first, using the server details from the .env file\n", "\n", @@ -24,7 +22,9 @@ "geti_server_configuration = get_server_details_from_env()\n", "\n", "geti = Geti(server_config=geti_server_configuration)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -37,16 +37,16 @@ }, { "cell_type": "code", - "execution_count": null, "id": "2005dc7b-e319-4875-a6b0-089ec9025739", "metadata": {}, - "outputs": [], "source": [ "from geti_sdk.rest_clients import ProjectClient\n", "\n", "project_client = ProjectClient(session=geti.session, workspace_id=geti.workspace_id)\n", "projects = project_client.list_projects()" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -58,15 +58,15 @@ }, { "cell_type": "code", - "execution_count": null, "id": "71b3bffd-2c07-48eb-85a2-382fe1d0de2f", "metadata": {}, - "outputs": [], "source": [ "PROJECT_NAME = \"COCO multitask animal demo\"\n", "\n", "project = project_client.get_project(project_name=PROJECT_NAME)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -81,17 +81,17 @@ }, { "cell_type": "code", - "execution_count": null, "id": "93022572-a722-4a36-b5f4-af74d41286c0", "metadata": {}, - "outputs": [], "source": [ "from geti_sdk.rest_clients import TrainingClient\n", "\n", "training_client = TrainingClient(\n", " session=geti.session, workspace_id=geti.workspace_id, project=project\n", ")" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -104,14 +104,14 @@ }, { "cell_type": "code", - "execution_count": null, "id": "8102e029-f6de-4788-ac0f-5e043b969beb", "metadata": {}, - "outputs": [], "source": [ "task = project.get_trainable_tasks()[0]\n", "print(task.summary)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -124,14 +124,14 @@ }, { "cell_type": "code", - "execution_count": null, "id": "42e93702-5f12-44a7-b2d4-0a0c152b3b21", "metadata": {}, - "outputs": [], "source": [ "available_algorithms = training_client.get_algorithms_for_task(task=task)\n", "print(available_algorithms.summary)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -145,16 +145,16 @@ }, { "cell_type": "code", - "execution_count": null, "id": "ff07e8b9-6a15-49d3-8908-d3a4f8ba81e5", "metadata": {}, - "outputs": [], "source": [ "algorithm = available_algorithms.get_default_for_task_type(task.type)\n", "\n", "print(f\"Default algorithm for `{task.type}` task: `{algorithm.name}`.\\n\")\n", "print(algorithm.overview)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -167,14 +167,14 @@ }, { "cell_type": "code", - "execution_count": null, "id": "9f8281ba-b513-4832-990d-2038a47fc33c", "metadata": {}, - "outputs": [], "source": [ "status = training_client.get_status()\n", "print(status.summary)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -187,16 +187,16 @@ }, { "cell_type": "code", - "execution_count": null, "id": "e96169a7-67dd-4734-8372-1dd5c88f4a88", "metadata": {}, - "outputs": [], "source": [ "job = training_client.train_task(\n", " algorithm=algorithm,\n", " task=task,\n", ")" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -211,13 +211,13 @@ }, { "cell_type": "code", - "execution_count": null, "id": "1964a851-a2ef-4343-9506-fdca0f0ffbfb", "metadata": {}, - "outputs": [], "source": [ "training_client.monitor_job(job);" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -230,17 +230,17 @@ }, { "cell_type": "code", - "execution_count": null, "id": "8544c368-27a3-4c3f-b319-d6d298eed9c4", "metadata": {}, - "outputs": [], "source": [ "from geti_sdk.rest_clients import ModelClient\n", "\n", "model_client = ModelClient(\n", " session=geti.session, workspace_id=geti.workspace_id, project=project\n", ")" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -254,10 +254,8 @@ }, { "cell_type": "code", - "execution_count": null, "id": "96194256-990b-4b48-b1b1-d8ca2a679945", "metadata": {}, - "outputs": [], "source": [ "model = model_client.get_model_for_job(job)\n", "\n", @@ -270,15 +268,17 @@ " f\"the model training has failed, you could try restarting the training to \"\n", " f\"see if the problem persists.\"\n", " )" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "code", - "execution_count": null, "id": "fbb7f4c7-6e52-4e91-81be-20167e00fbb2", "metadata": {}, + "source": [], "outputs": [], - "source": [] + "execution_count": null } ], "metadata": { diff --git a/notebooks/008_deploy_project.ipynb b/notebooks/008_deploy_project.ipynb index e5387fe1..3b1ed4cd 100644 --- a/notebooks/008_deploy_project.ipynb +++ b/notebooks/008_deploy_project.ipynb @@ -12,10 +12,8 @@ }, { "cell_type": "code", - "execution_count": null, "id": "c9baafb1-4be1-427e-8665-2e9a4d377142", "metadata": {}, - "outputs": [], "source": [ "# As usual we will connect to the platform first, using the server details from the .env file\n", "\n", @@ -25,7 +23,9 @@ "geti_server_configuration = get_server_details_from_env()\n", "\n", "geti = Geti(server_config=geti_server_configuration)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -38,16 +38,16 @@ }, { "cell_type": "code", - "execution_count": null, "id": "608a8fb7-24b8-45ee-aff8-e3044d236756", "metadata": {}, - "outputs": [], "source": [ "from geti_sdk.rest_clients import ProjectClient\n", "\n", "project_client = ProjectClient(session=geti.session, workspace_id=geti.workspace_id)\n", "projects = project_client.list_projects()" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -62,13 +62,13 @@ }, { "cell_type": "code", - "execution_count": null, "id": "8b748042-6d21-471a-8acd-d3f360d543e3", "metadata": {}, - "outputs": [], "source": [ "PROJECT_NAME = \"COCO multitask animal demo\"" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -80,15 +80,15 @@ }, { "cell_type": "code", - "execution_count": null, "id": "ffd5fb89-f4a2-4398-9951-8b1cefa4ab99", "metadata": {}, - "outputs": [], "source": [ "from geti_sdk.demos import ensure_trained_example_project\n", "\n", "ensure_trained_example_project(geti=geti, project_name=PROJECT_NAME);" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -102,13 +102,13 @@ }, { "cell_type": "code", - "execution_count": null, "id": "0d17e98e-ac6c-4353-9282-23c2e20835d3", "metadata": {}, - "outputs": [], "source": [ "deployment = geti.deploy_project(project_name=PROJECT_NAME, enable_explainable_ai=True)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -121,13 +121,13 @@ }, { "cell_type": "code", - "execution_count": null, "id": "ab0d01de-6b6a-4174-a033-562034c1374b", "metadata": {}, - "outputs": [], "source": [ "deployment.load_inference_models(device=\"CPU\")" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -141,10 +141,8 @@ }, { "cell_type": "code", - "execution_count": null, "id": "df3dbf77-b18b-4d86-9787-513ee9d1f328", "metadata": {}, - "outputs": [], "source": [ "import time\n", "\n", @@ -162,7 +160,9 @@ "t_elapsed = time.time() - t_start\n", "\n", "print(f\"Running local inference on image took {t_elapsed*1000:.2f} milliseconds\")" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -180,10 +180,8 @@ }, { "cell_type": "code", - "execution_count": null, "id": "2dde3d0c-aa3a-4635-b76c-df5c6f033de2", "metadata": {}, - "outputs": [], "source": [ "from geti_sdk import Visualizer\n", "\n", @@ -191,7 +189,9 @@ "\n", "result = visualizer.draw(numpy_rgb, prediction)\n", "visualizer.show_in_notebook(result)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -203,13 +203,13 @@ }, { "cell_type": "code", - "execution_count": null, "id": "f086979f", "metadata": {}, - "outputs": [], "source": [ "print(prediction.overview)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -225,10 +225,8 @@ }, { "cell_type": "code", - "execution_count": null, "id": "f954741c", "metadata": {}, - "outputs": [], "source": [ "t_start = time.time()\n", "prediction_with_saliency_map = deployment.explain(numpy_rgb)\n", @@ -242,7 +240,9 @@ " numpy_rgb, prediction_with_saliency_map, label_name=\"animal\"\n", ")\n", "visualizer.show_in_notebook(result)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -255,17 +255,17 @@ }, { "cell_type": "code", - "execution_count": null, "id": "ffa3c911-af19-418d-8220-c85e4d907453", "metadata": {}, - "outputs": [], "source": [ "import os\n", "\n", "PATH_TO_DEPLOYMENT_FOLDER = os.path.join(\"deployments\", PROJECT_NAME)\n", "\n", "deployment.save(path_to_folder=PATH_TO_DEPLOYMENT_FOLDER)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -278,15 +278,15 @@ }, { "cell_type": "code", - "execution_count": null, "id": "8bbdf5ae-b282-411b-afc3-c6a390cccb9a", "metadata": {}, - "outputs": [], "source": [ "from geti_sdk.deployment import Deployment\n", "\n", "offline_deployment = Deployment.from_folder(PATH_TO_DEPLOYMENT_FOLDER)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -298,13 +298,13 @@ }, { "cell_type": "code", - "execution_count": null, "id": "73fabe19-5a55-451d-a54f-e250c449e9a8", "metadata": {}, - "outputs": [], "source": [ "offline_deployment.load_inference_models(device=\"CPU\")" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -325,10 +325,8 @@ }, { "cell_type": "code", - "execution_count": null, "id": "b8293cec-743f-459a-8634-f0d87b0b7601", "metadata": {}, - "outputs": [], "source": [ "from geti_sdk.rest_clients import ImageClient, PredictionClient\n", "\n", @@ -340,7 +338,9 @@ "prediction_client = PredictionClient(\n", " session=geti.session, workspace_id=geti.workspace_id, project=project\n", ")" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -352,15 +352,15 @@ }, { "cell_type": "code", - "execution_count": null, "id": "e0ca9c4d-a904-4d9a-bf42-5c84d83cf0a3", "metadata": {}, - "outputs": [], "source": [ "geti_image = image_client.upload_image(numpy_image)\n", "# Load the pixel data to visualize the image later on\n", "geti_image.get_data(geti.session);" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -373,10 +373,8 @@ }, { "cell_type": "code", - "execution_count": null, "id": "0322cd4c-4fb1-42c5-9fa9-547d286b7ca9", "metadata": {}, - "outputs": [], "source": [ "from geti_sdk.data_models.enums import PredictionMode\n", "\n", @@ -394,7 +392,9 @@ "\n", "print(f\"Platform prediction completed in {t_elapsed_platform*1000:.1f} milliseconds\")\n", "print(f\"Local prediction completed in {t_elapsed_local*1000:.1f} milliseconds\")" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -407,10 +407,8 @@ }, { "cell_type": "code", - "execution_count": null, "id": "65f55ff1-f2ca-4b2d-9dbe-d4e09d6c2b35", "metadata": {}, - "outputs": [], "source": [ "geti_image_rgb = cv2.cvtColor(geti_image.numpy, cv2.COLOR_BGR2RGB)\n", "platform_result = visualizer.draw(geti_image_rgb, platform_prediction)\n", @@ -418,7 +416,9 @@ "\n", "local_result = visualizer.draw(numpy_rgb, local_prediction)\n", "visualizer.show_in_notebook(local_result)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -431,13 +431,13 @@ }, { "cell_type": "code", - "execution_count": null, "id": "26681cc5-fc42-4412-8ea5-af5d57201c37", "metadata": {}, - "outputs": [], "source": [ "image_client.delete_images([geti_image])" - ] + ], + "outputs": [], + "execution_count": null } ], "metadata": { diff --git a/notebooks/009_download_and_upload_project.ipynb b/notebooks/009_download_and_upload_project.ipynb index 73df1ca9..f0f0bd7c 100644 --- a/notebooks/009_download_and_upload_project.ipynb +++ b/notebooks/009_download_and_upload_project.ipynb @@ -13,10 +13,8 @@ }, { "cell_type": "code", - "execution_count": null, "id": "f2b19601-3208-47c8-af74-2527daf33044", "metadata": {}, - "outputs": [], "source": [ "# As usual we will connect to the platform first, using the server details from the .env file. We will also create a ProjectClient for the server\n", "\n", @@ -29,7 +27,9 @@ "geti = Geti(server_config=geti_server_configuration)\n", "\n", "project_client = ProjectClient(session=geti.session, workspace_id=geti.workspace_id)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -42,13 +42,13 @@ }, { "cell_type": "code", - "execution_count": null, "id": "0d6b6c22-4281-4a49-9ecc-8f93903a44e8", "metadata": {}, - "outputs": [], "source": [ "projects = project_client.list_projects()" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -60,13 +60,13 @@ }, { "cell_type": "code", - "execution_count": null, "id": "60de5767-d39f-4ac0-91b0-ff1645e7b0b2", "metadata": {}, - "outputs": [], "source": [ "PROJECT_NAME = \"COCO multitask animal demo\"" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -76,7 +76,7 @@ "## Project download\n", "Now, let's do the project download itself. The `Geti` provides a method `download_project_data()` to do so. It takes the following arguments:\n", "\n", - "- `project_name`: Name of the project to download\n", + "- `project`: The project to download\n", "- `target_folder`: Path of the folder to download to. If left empty, a folder named `project_name` will be created in the current directory\n", "- `include_predictions`: True to download predictions for all media, False to not download any predictions\n", "- `include_active_models`: True to download the active models for all tasks in the project, False to not download any models\n", @@ -87,21 +87,22 @@ }, { "cell_type": "code", - "execution_count": null, "id": "da106bf0-41df-40c5-a8b3-60b377397393", "metadata": {}, - "outputs": [], "source": [ "import os\n", "\n", - "project = geti.download_project_data(\n", - " project_name=PROJECT_NAME,\n", + "project = project_client.get_project_by_name(project_name=PROJECT_NAME)\n", + "geti.download_project_data(\n", + " project=project,\n", " target_folder=os.path.join(\"projects\", PROJECT_NAME),\n", " include_predictions=False,\n", " include_active_models=False,\n", " include_deployment=False,\n", ")" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -129,17 +130,17 @@ }, { "cell_type": "code", - "execution_count": null, "id": "8cca36fb-690a-4955-b99d-0bdd5f46f691", "metadata": {}, - "outputs": [], "source": [ "uploaded_project = geti.upload_project_data(\n", " target_folder=os.path.join(\"projects\", PROJECT_NAME),\n", " project_name=PROJECT_NAME,\n", " enable_auto_train=False,\n", ")" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -151,11 +152,11 @@ }, { "cell_type": "code", - "execution_count": null, "id": "e4474f3e-9aa7-4256-95de-62fbbba2d36f", "metadata": {}, + "source": [], "outputs": [], - "source": [] + "execution_count": null } ], "metadata": { diff --git a/notebooks/010_model_serving.ipynb b/notebooks/010_model_serving.ipynb index 8175bdcd..591f371a 100644 --- a/notebooks/010_model_serving.ipynb +++ b/notebooks/010_model_serving.ipynb @@ -14,12 +14,10 @@ }, { "cell_type": "code", - "execution_count": null, "id": "c9baafb1-4be1-427e-8665-2e9a4d377142", "metadata": { "tags": [] }, - "outputs": [], "source": [ "# As usual we will connect to the platform first, using the server details from the .env file\n", "\n", @@ -29,7 +27,9 @@ "geti_server_configuration = get_server_details_from_env()\n", "\n", "geti = Geti(server_config=geti_server_configuration)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -42,18 +42,18 @@ }, { "cell_type": "code", - "execution_count": null, "id": "608a8fb7-24b8-45ee-aff8-e3044d236756", "metadata": { "tags": [] }, - "outputs": [], "source": [ "from geti_sdk.rest_clients import ProjectClient\n", "\n", "project_client = ProjectClient(session=geti.session, workspace_id=geti.workspace_id)\n", "projects = project_client.list_projects()" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -66,15 +66,15 @@ }, { "cell_type": "code", - "execution_count": null, "id": "8b748042-6d21-471a-8acd-d3f360d543e3", "metadata": { "tags": [] }, - "outputs": [], "source": [ "PROJECT_NAME = \"COCO animal detection demo\"" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -88,17 +88,17 @@ }, { "cell_type": "code", - "execution_count": null, "id": "ffd5fb89-f4a2-4398-9951-8b1cefa4ab99", "metadata": { "tags": [] }, - "outputs": [], "source": [ "from geti_sdk.demos import ensure_trained_example_project\n", "\n", "ensure_trained_example_project(geti=geti, project_name=PROJECT_NAME);" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -110,12 +110,10 @@ }, { "cell_type": "code", - "execution_count": null, "id": "0d17e98e-ac6c-4353-9282-23c2e20835d3", "metadata": { "tags": [] }, - "outputs": [], "source": [ "import os\n", "\n", @@ -132,7 +130,9 @@ "deployment = geti.deploy_project(\n", " project_name=PROJECT_NAME, prepare_ovms_config=True, output_folder=output_folder\n", ")" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -160,15 +160,15 @@ }, { "cell_type": "code", - "execution_count": null, "id": "84599e9f-4597-4449-b7ec-d33d6132487a", "metadata": { "tags": [] }, - "outputs": [], "source": [ "! docker pull openvino/model_server:latest" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -181,15 +181,15 @@ }, { "cell_type": "code", - "execution_count": null, "id": "7395a5c9-29ae-4778-a884-0b2fbb7d90ba", "metadata": { "tags": [] }, - "outputs": [], "source": [ "ovms_config_path = os.path.join(os.getcwd(), output_folder, \"ovms_models\")" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -201,15 +201,15 @@ }, { "cell_type": "code", - "execution_count": null, "id": "5171e2ed-4144-4970-9e56-f8447db16a86", "metadata": { "tags": [] }, - "outputs": [], "source": [ "! docker run -d --rm -v {ovms_config_path}:/models -p 9000:9000 openvino/model_server:latest --port 9000 --config_path /models/ovms_model_config.json" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -223,15 +223,15 @@ }, { "cell_type": "code", - "execution_count": null, "id": "2cd18ef9-4eb8-4094-9f47-e58bf051e7bb", "metadata": { "tags": [] }, - "outputs": [], "source": [ "deployment.load_inference_models(device=\"http://localhost:9000\")" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -247,12 +247,10 @@ }, { "cell_type": "code", - "execution_count": null, "id": "df3dbf77-b18b-4d86-9787-513ee9d1f328", "metadata": { "tags": [] }, - "outputs": [], "source": [ "import time\n", "\n", @@ -270,7 +268,9 @@ "t_elapsed = time.time() - t_start\n", "\n", "print(f\"Running OVMS inference on image took {t_elapsed*1000:.2f} milliseconds\")" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -288,19 +288,19 @@ }, { "cell_type": "code", - "execution_count": null, "id": "2dde3d0c-aa3a-4635-b76c-df5c6f033de2", "metadata": { "tags": [] }, - "outputs": [], "source": [ "from geti_sdk import Visualizer\n", "\n", "visualizer = Visualizer()\n", "result = visualizer.draw(numpy_image, prediction)\n", "visualizer.show_in_notebook(result)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -313,31 +313,31 @@ }, { "cell_type": "code", - "execution_count": null, "id": "ffa3c911-af19-418d-8220-c85e4d907453", "metadata": { "tags": [] }, - "outputs": [], "source": [ "deployment.load_inference_models(device=\"CPU\")" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "code", - "execution_count": null, "id": "8bbdf5ae-b282-411b-afc3-c6a390cccb9a", "metadata": { "tags": [] }, - "outputs": [], "source": [ "t_start = time.time()\n", "prediction = deployment.infer(numpy_rgb)\n", "t_elapsed = time.time() - t_start\n", "\n", "print(f\"Running local inference on image took {t_elapsed*1000:.2f} milliseconds\")" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -355,18 +355,18 @@ }, { "cell_type": "code", - "execution_count": null, "id": "73fabe19-5a55-451d-a54f-e250c449e9a8", "metadata": { "tags": [] }, - "outputs": [], "source": [ "%%timeit -n 10 -r 3\n", "\n", "# CPU inference\n", "prediction = deployment.infer(numpy_rgb)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -378,30 +378,30 @@ }, { "cell_type": "code", - "execution_count": null, "id": "62339ec9-c1cf-4a85-86b4-809f35580237", "metadata": { "tags": [] }, - "outputs": [], "source": [ "deployment.load_inference_models(device=\"http://localhost:9000\")" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "code", - "execution_count": null, "id": "41da1af8-672d-4ccb-ae34-4ed3f44b9be5", "metadata": { "tags": [] }, - "outputs": [], "source": [ "%%timeit -n 10 -r 3\n", "\n", "# OVMS inference\n", "prediction = deployment.infer(numpy_rgb)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -422,15 +422,15 @@ }, { "cell_type": "code", - "execution_count": null, "id": "9171f158-1354-4214-a220-8c86e0487150", "metadata": { "tags": [] }, - "outputs": [], "source": [ "MULTITASK_PROJECT_NAME = \"COCO multitask animal demo\"" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -442,26 +442,24 @@ }, { "cell_type": "code", - "execution_count": null, "id": "b6819f77-de78-4c2b-bcda-57b096dd6278", "metadata": { "tags": [] }, - "outputs": [], "source": [ "mt_project = ensure_trained_example_project(\n", " geti=geti, project_name=MULTITASK_PROJECT_NAME\n", ")" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "code", - "execution_count": null, "id": "691904af-5603-4486-86d2-386b86abd298", "metadata": { "tags": [] }, - "outputs": [], "source": [ "safe_mt_project_name = sanitize_filepath(MULTITASK_PROJECT_NAME).replace(\" \", \"_\")\n", "\n", @@ -474,7 +472,9 @@ " prepare_ovms_config=True,\n", " output_folder=mt_output_folder,\n", ")" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -488,17 +488,17 @@ }, { "cell_type": "code", - "execution_count": null, "id": "e0ca9c4d-a904-4d9a-bf42-5c84d83cf0a3", "metadata": { "tags": [] }, - "outputs": [], "source": [ "mt_ovms_config_path = os.path.join(os.getcwd(), mt_output_folder, \"ovms_models\")\n", "\n", "! docker run -d --rm -v {mt_ovms_config_path}:/models -p 9001:9001 openvino/model_server:latest --port 9001 --config_path /models/ovms_model_config.json" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -511,15 +511,15 @@ }, { "cell_type": "code", - "execution_count": null, "id": "0645f97a-6088-45b3-87f0-f17666d3623e", "metadata": { "tags": [] }, - "outputs": [], "source": [ "multitask_deployment.load_inference_models(device=\"http://localhost:9001\")" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -531,12 +531,10 @@ }, { "cell_type": "code", - "execution_count": null, "id": "f2accc67-7f40-4b63-8000-590df1acb9dd", "metadata": { "tags": [] }, - "outputs": [], "source": [ "t_start = time.time()\n", "prediction = multitask_deployment.infer(numpy_rgb)\n", @@ -546,7 +544,9 @@ "\n", "result = visualizer.draw(numpy_rgb, prediction)\n", "visualizer.show_in_notebook(result)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -559,18 +559,18 @@ }, { "cell_type": "code", - "execution_count": null, "id": "65f55ff1-f2ca-4b2d-9dbe-d4e09d6c2b35", "metadata": { "tags": [] }, - "outputs": [], "source": [ "%%timeit -n 10 -r 3\n", "\n", "# OVMS inference\n", "prediction = multitask_deployment.infer(numpy_rgb)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -582,15 +582,15 @@ }, { "cell_type": "code", - "execution_count": null, "id": "26681cc5-fc42-4412-8ea5-af5d57201c37", "metadata": { "tags": [] }, - "outputs": [], "source": [ "multitask_deployment.load_inference_models(device=\"CPU\")" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -602,18 +602,18 @@ }, { "cell_type": "code", - "execution_count": null, "id": "f9e99d51-aed0-44a4-b1cc-a3939370eab7", "metadata": { "tags": [] }, - "outputs": [], "source": [ "%%timeit -n 10 -r 3\n", "\n", "# CPU inference\n", "prediction = multitask_deployment.infer(numpy_rgb)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -640,18 +640,18 @@ }, { "cell_type": "code", - "execution_count": null, "id": "0c5cca6b-fedf-4fe6-bcb2-e3afeadefbbd", "metadata": { "jp-MarkdownHeadingCollapsed": true, "tags": [] }, - "outputs": [], "source": [ "container_ids = ! docker ps -q --filter ancestor=openvino/model_server\n", "\n", "print(f\"Found {len(container_ids)} running OVMS containers.\")" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -666,13 +666,11 @@ }, { "cell_type": "code", - "execution_count": null, "id": "80fc7bf9-1b9d-477a-b9e0-45a4a554c70f", "metadata": { "jp-MarkdownHeadingCollapsed": true, "tags": [] }, - "outputs": [], "source": [ "# Stop each container\n", "for ovms_container_id in container_ids:\n", @@ -682,7 +680,9 @@ " print(f\"OVMS container '{ovms_container_id}' stopped and removed successfully.\")\n", " else:\n", " print(result[0])" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", diff --git a/notebooks/011_benchmarking_models.ipynb b/notebooks/011_benchmarking_models.ipynb index a2151bdb..b0e8a920 100644 --- a/notebooks/011_benchmarking_models.ipynb +++ b/notebooks/011_benchmarking_models.ipynb @@ -12,10 +12,8 @@ }, { "cell_type": "code", - "execution_count": null, "id": "8fbb21e7-b203-467d-b136-3a8f6a5879b0", "metadata": {}, - "outputs": [], "source": [ "# As usual we will connect to the platform first, using the server details from the .env file\n", "\n", @@ -25,7 +23,9 @@ "geti_server_configuration = get_server_details_from_env()\n", "\n", "geti = Geti(server_config=geti_server_configuration)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -38,14 +38,14 @@ }, { "cell_type": "code", - "execution_count": null, "id": "1c5670de-61e8-44d9-95dd-dd0a4b88e606", "metadata": {}, - "outputs": [], "source": [ "PROJECT_NAME = \"COCO animal detection demo\"\n", "project = geti.get_project(project_name=PROJECT_NAME)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -64,13 +64,13 @@ }, { "cell_type": "code", - "execution_count": null, "id": "83641870-794d-4cbd-a108-2197d319e163", "metadata": {}, - "outputs": [], "source": [ "algorithms_to_benchmark = [\"MobileNetV2-ATSS\", \"SSD\", \"YOLOX-S\"]" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -83,10 +83,8 @@ }, { "cell_type": "code", - "execution_count": null, "id": "7988bff8-fc2b-48f0-84c6-5f26b945f3c1", "metadata": {}, - "outputs": [], "source": [ "from geti_sdk.rest_clients import ImageClient\n", "\n", @@ -94,7 +92,9 @@ " session=geti.session, workspace_id=geti.workspace_id, project=project\n", ")\n", "images = image_client.get_all_images()" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -107,13 +107,13 @@ }, { "cell_type": "code", - "execution_count": null, "id": "e6c3a26b-126e-454e-befe-38f3d477c328", "metadata": {}, - "outputs": [], "source": [ "precision_levels = [\"FP32\", \"FP16\", \"INT8\"]" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -126,10 +126,8 @@ }, { "cell_type": "code", - "execution_count": null, "id": "8506f981-cbb9-41c4-8751-cba07592dad4", "metadata": {}, - "outputs": [], "source": [ "from geti_sdk.benchmarking import Benchmarker\n", "\n", @@ -140,7 +138,9 @@ " precision_levels=precision_levels,\n", " benchmark_images=images,\n", ")" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -160,16 +160,16 @@ }, { "cell_type": "code", - "execution_count": null, "id": "0463e42c-e9ea-42a0-9b2c-19c9ccd23cd0", "metadata": {}, - "outputs": [], "source": [ "import os\n", "\n", "benchmark_folder = os.path.join(\"benchmarks\", PROJECT_NAME)\n", "benchmarker.prepare_benchmark(working_directory=benchmark_folder)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -190,10 +190,8 @@ }, { "cell_type": "code", - "execution_count": null, "id": "15680806-5a03-439a-809b-35e791e509a3", "metadata": {}, - "outputs": [], "source": [ "results = benchmarker.run_throughput_benchmark(\n", " working_directory=benchmark_folder,\n", @@ -202,7 +200,9 @@ " frames=100,\n", " repeats=2,\n", ")" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -224,10 +224,8 @@ }, { "cell_type": "code", - "execution_count": null, "id": "ad8f1f09-2722-41f5-b338-e83e1cf6c11b", "metadata": {}, - "outputs": [], "source": [ "from IPython.display import display\n", "from PIL import Image\n", @@ -241,7 +239,9 @@ " include_online_prediction_for_active_model=True,\n", ")\n", "display(Image.fromarray(prediction_comparison))" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -254,16 +254,16 @@ }, { "cell_type": "code", - "execution_count": null, "id": "ff3e62e5-0b86-4c7d-a16c-49c2ebe1d969", "metadata": {}, - "outputs": [], "source": [ "import pandas as pd\n", "\n", "df = pd.DataFrame(results)\n", "df" - ] + ], + "outputs": [], + "execution_count": null } ], "metadata": { diff --git a/notebooks/012_post_inference_hooks.ipynb b/notebooks/012_post_inference_hooks.ipynb index 04a0d174..0a464f55 100644 --- a/notebooks/012_post_inference_hooks.ipynb +++ b/notebooks/012_post_inference_hooks.ipynb @@ -17,10 +17,8 @@ }, { "cell_type": "code", - "execution_count": null, "id": "a844097d-e4f8-4c99-ae85-a01c77f91395", "metadata": {}, - "outputs": [], "source": [ "# As usual we will connect to the platform first, using the server details from the .env file\n", "\n", @@ -30,7 +28,9 @@ "geti_server_configuration = get_server_details_from_env()\n", "\n", "geti = Geti(server_config=geti_server_configuration)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -44,16 +44,16 @@ }, { "cell_type": "code", - "execution_count": null, "id": "1584f03e-4e35-4d24-9a43-e0bbdf0fb78a", "metadata": {}, - "outputs": [], "source": [ "from geti_sdk.demos import ensure_trained_example_project\n", "\n", "PROJECT_NAME = \"COCO animal detection demo\"\n", "project = ensure_trained_example_project(geti=geti, project_name=PROJECT_NAME)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -65,13 +65,13 @@ }, { "cell_type": "code", - "execution_count": null, "id": "2e2ae82f-bc66-4863-b423-82347737f810", "metadata": {}, - "outputs": [], "source": [ - "deployment = geti.deploy_project(PROJECT_NAME)" - ] + "deployment = geti.deploy_project(project=project)" + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -84,25 +84,23 @@ }, { "cell_type": "code", - "execution_count": null, "id": "6d2788bc-fd8d-4363-85b0-cfcf00825ef7", "metadata": {}, - "outputs": [], "source": [ "deployment.load_inference_models()" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "code", - "execution_count": null, "id": "d26688a7-9e2d-4443-acb8-cd90798da662", "metadata": {}, - "outputs": [], "source": [ "import cv2\n", "\n", - "from geti_sdk.demos import EXAMPLE_IMAGE_PATH\n", "from geti_sdk import Visualizer\n", + "from geti_sdk.demos import EXAMPLE_IMAGE_PATH\n", "\n", "numpy_image = cv2.imread(EXAMPLE_IMAGE_PATH)\n", "numpy_rgb = cv2.cvtColor(numpy_image, cv2.COLOR_BGR2RGB)\n", @@ -112,7 +110,9 @@ "visualizer = Visualizer()\n", "result = visualizer.draw(numpy_rgb, prediction)\n", "visualizer.show_in_notebook(result)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -139,10 +139,8 @@ }, { "cell_type": "code", - "execution_count": null, "id": "302568fe-0d1a-4333-863c-a99f2a94c9b9", "metadata": {}, - "outputs": [], "source": [ "from geti_sdk.post_inference_hooks import (\n", " GetiDataCollection,\n", @@ -165,7 +163,9 @@ "hook = PostInferenceHook( # The Hook attaches the action to the trigger\n", " trigger=trigger, action=action\n", ")" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -177,13 +177,13 @@ }, { "cell_type": "code", - "execution_count": null, "id": "edaee43a-a8bc-49ee-a6e8-592a9efe940a", "metadata": {}, - "outputs": [], "source": [ "deployment.add_post_inference_hook(hook)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -195,14 +195,14 @@ }, { "cell_type": "code", - "execution_count": null, "id": "c6f9b972-3411-4345-8951-5f2c9dbcf66c", "metadata": {}, - "outputs": [], "source": [ "prediction = deployment.infer(numpy_rgb)\n", "print(f\"Prediction contains objects with labels: {prediction.get_label_names()}\")" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -236,10 +236,8 @@ }, { "cell_type": "code", - "execution_count": null, "id": "7be449fe-26a0-466b-92f7-78d4477ac255", "metadata": {}, - "outputs": [], "source": [ "from geti_sdk.post_inference_hooks import FileSystemDataCollection, ObjectCountTrigger\n", "\n", @@ -287,7 +285,9 @@ "# Add both hooks to the deployment\n", "deployment.add_post_inference_hook(dogs_hook)\n", "deployment.add_post_inference_hook(no_dogs_hook)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -303,10 +303,8 @@ }, { "cell_type": "code", - "execution_count": null, "id": "4065cb88-f208-45f4-bc8e-ee778e0a6b34", "metadata": {}, - "outputs": [], "source": [ "import os\n", "\n", @@ -323,7 +321,9 @@ " os.path.join(path, \"images\", \"val2017\", fn + \".jpg\") for fn in dog_image_filenames\n", "][0:n_images]\n", "print(f\"Selected the first {n_images} images containing dogs from COCO dataset\")" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -335,10 +335,8 @@ }, { "cell_type": "code", - "execution_count": null, "id": "c4fa2040-09e8-40db-9278-558c03781e3d", "metadata": {}, - "outputs": [], "source": [ "import time\n", "\n", @@ -353,7 +351,9 @@ "print(\n", " f\"Inference on {n_images} images with 2 post-inference hooks completed in {t_elapsed:.2f} seconds.\"\n", ")" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -385,10 +385,8 @@ }, { "cell_type": "code", - "execution_count": null, "id": "e8e5992a-984e-4ccc-84f8-441b150d467d", "metadata": {}, - "outputs": [], "source": [ "# Remove any post-inference hooks\n", "deployment.clear_inference_hooks()\n", @@ -403,7 +401,9 @@ "print(\n", " f\"Inference on {n_images} images without post-inference hooks completed in {t_elapsed:.2f} seconds.\"\n", ")" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -424,14 +424,14 @@ }, { "cell_type": "code", - "execution_count": null, "id": "4054c0d3-c6e3-40cf-baf1-a698d2b040bb", "metadata": {}, - "outputs": [], "source": [ "target_folder = os.path.join(\"deployments\", PROJECT_NAME)\n", "deployment.save(target_folder);" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -443,15 +443,15 @@ }, { "cell_type": "code", - "execution_count": null, "id": "1f63c0e4-2805-4c5e-b7f6-9ef27d3699a8", "metadata": {}, - "outputs": [], "source": [ "from geti_sdk.deployment import Deployment\n", "\n", "offline_deployment = Deployment.from_folder(target_folder)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -469,10 +469,8 @@ }, { "cell_type": "code", - "execution_count": null, "id": "381fa7c3-72ce-47c0-8039-239629e46ee1", "metadata": {}, - "outputs": [], "source": [ "from geti_sdk.post_inference_hooks import AlwaysTrigger\n", "\n", @@ -491,7 +489,9 @@ " limit_action_rate=True,\n", " max_frames_per_second=1,\n", ")" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -503,15 +503,15 @@ }, { "cell_type": "code", - "execution_count": null, "id": "329cfc88-5fc2-4c3c-8e66-b6e85d14c608", "metadata": {}, - "outputs": [], "source": [ "offline_deployment.clear_inference_hooks()\n", "offline_deployment.add_post_inference_hook(geti_hook)\n", "offline_deployment.load_inference_models()" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -523,10 +523,8 @@ }, { "cell_type": "code", - "execution_count": null, "id": "0c86d82f-c990-486f-981e-2b11347f255f", "metadata": {}, - "outputs": [], "source": [ "image = cv2.imread(EXAMPLE_IMAGE_PATH)\n", "numpy_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n", @@ -539,7 +537,9 @@ "print(\n", " f\"50 inference iterations with rate-limited Geti I/O hook completed in {t_elapsed:.2f} seconds.\"\n", ")" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -553,11 +553,11 @@ }, { "cell_type": "code", - "execution_count": null, "id": "852cf6f3-9765-4d61-ab5c-31e79bc7e73d", "metadata": {}, + "source": [], "outputs": [], - "source": [] + "execution_count": null } ], "metadata": { diff --git a/notebooks/013_manage_models.ipynb b/notebooks/013_manage_models.ipynb index 70145f06..4c5211ca 100644 --- a/notebooks/013_manage_models.ipynb +++ b/notebooks/013_manage_models.ipynb @@ -11,9 +11,7 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": {}, - "outputs": [], "source": [ "# As usual we will connect to the platform first, using the server details from the .env file\n", "\n", @@ -27,7 +25,9 @@ "\n", "# We will also create a ProjectClient for the server\n", "project_client = ProjectClient(session=geti.session, workspace_id=geti.workspace_id)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -39,9 +39,7 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": {}, - "outputs": [], "source": [ "from geti_sdk.demos import ensure_trained_example_project\n", "\n", @@ -53,7 +51,9 @@ "trainable_tasks = project.get_trainable_tasks()\n", "for task in trainable_tasks:\n", " print(task.summary)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -64,28 +64,28 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": {}, - "outputs": [], "source": [ "from geti_sdk.rest_clients.model_client import ModelClient\n", "\n", "model_client = ModelClient(\n", " workspace_id=geti.workspace_id, project=project, session=geti.session\n", ")" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "code", - "execution_count": null, "metadata": {}, - "outputs": [], "source": [ "active_models = model_client.get_all_active_models()\n", "print(\"Active models:\")\n", "for i, model in enumerate(active_models):\n", " print(f\"Task {i + 1}: \", model.name)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -99,9 +99,7 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": {}, - "outputs": [], "source": [ "from typing import List\n", "\n", @@ -121,7 +119,9 @@ " print(f\"Default algorithm for {task.title} is {default_algo_name}\\n\")\n", " print(\"Other available algorithms are\")\n", " print(algo_list_for_task[:4].summary)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -137,9 +137,7 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": {}, - "outputs": [], "source": [ "# Choose a task and algorithm we will use for training\n", "task_number = 0\n", @@ -157,13 +155,13 @@ "print(\n", " f\"We will proceed with training task `{task_to_train.title}` with algorithm `{algo_to_train.name}`\"\n", ")" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "code", - "execution_count": null, "metadata": {}, - "outputs": [], "source": [ "from geti_sdk.rest_clients.configuration_client import ConfigurationClient\n", "\n", @@ -174,18 +172,20 @@ "algorithm_hyperparameters = configuration_client.get_for_task_and_algorithm(\n", " task_to_train, algo_to_train\n", ")" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "code", - "execution_count": null, "metadata": {}, - "outputs": [], "source": [ "# Update a hyperparameter and make sure the change is reflected in the configuration object\n", "algorithm_hyperparameters.set_parameter_value(parameter_name=\"batch_size\", value=32)\n", "print(algorithm_hyperparameters.summary)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -198,9 +198,7 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": {}, - "outputs": [], "source": [ "print(f\"Training {task_to_train.title} with {algo_to_train.name} algorithm\\n\")\n", "job = training_client.train_task(\n", @@ -209,7 +207,9 @@ " hyper_parameters=algorithm_hyperparameters,\n", ")\n", "training_client.monitor_job(job);" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -220,15 +220,15 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": {}, - "outputs": [], "source": [ "active_models = model_client.get_all_active_models()\n", "print(\"Active models:\")\n", "for i, model in enumerate(active_models):\n", " print(f\"Task {i + 1}: \", model.name)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -241,15 +241,15 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": {}, - "outputs": [], "source": [ "job = model_client.optimize_model(\n", " model=active_models[task_number], optimization_type=\"pot\"\n", ")\n", "_ = model_client.monitor_job(job)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -265,15 +265,15 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": {}, - "outputs": [], "source": [ "model_groups = model_client.get_all_model_groups()\n", "print(\"Model groups:\")\n", "for i, model_group in enumerate(model_groups):\n", " print(f\"{i + 1}. \", model_group.name)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -285,9 +285,7 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": {}, - "outputs": [], "source": [ "# We will find the newly created group by matching the name of the algorithm we trained\n", "for model_group in model_groups:\n", @@ -298,7 +296,9 @@ "model_summary = trained_algo_model_group.models[0]\n", "model = model_client.update_model_detail(model_summary)\n", "print(model.overview)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -309,12 +309,12 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": {}, - "outputs": [], "source": [ "_ = model_client.get_latest_model_by_algo_name(algorithm_name=algo_to_train.name)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -327,12 +327,12 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": {}, - "outputs": [], "source": [ "optimized_model = model.get_optimized_model(optimization_type=\"pot\")" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -344,15 +344,15 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": {}, - "outputs": [], "source": [ "optimized_model = model_client.get_latest_optimized_model(\n", " algorithm_name=algo_to_train.name, optimization_type=\"pot\", precision=\"INT8\"\n", ")\n", "print(optimized_model.overview)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -366,12 +366,12 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": {}, - "outputs": [], "source": [ "_ = model.get_optimized_model(optimization_type=\"pot\", require_xai=True)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -382,12 +382,12 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": {}, - "outputs": [], "source": [ "model_client.set_active_model(model_groups[0].models[0])" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -401,16 +401,16 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": {}, - "outputs": [], "source": [ "from geti_sdk.rest_clients.deployment_client import DeploymentClient\n", "\n", "deployment_client = DeploymentClient(\n", " workspace_id=geti.workspace_id, project=project, session=geti.session\n", ")" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -422,12 +422,12 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": {}, - "outputs": [], "source": [ "deployment = deployment_client.deploy_project(models=[optimized_model])" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", diff --git a/notebooks/014_asynchronous_inference.ipynb b/notebooks/014_asynchronous_inference.ipynb index 7a520717..73225b82 100644 --- a/notebooks/014_asynchronous_inference.ipynb +++ b/notebooks/014_asynchronous_inference.ipynb @@ -42,10 +42,8 @@ }, { "cell_type": "code", - "execution_count": null, "id": "f90c0f1b-07c4-449a-b926-2dd0839f9f29", "metadata": {}, - "outputs": [], "source": [ "from geti_sdk import Geti\n", "from geti_sdk.utils import get_server_details_from_env\n", @@ -56,7 +54,9 @@ "\n", "PROJECT_NAME = \"COCO multitask animal demo\"\n", "project = geti.get_project(project_name=PROJECT_NAME)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -68,15 +68,17 @@ }, { "cell_type": "code", - "execution_count": null, "id": "9c6e4b7f-c735-4def-b4b4-62e51c96f250", "metadata": {}, - "outputs": [], "source": [ "DEPLOYMENT_FOLDER = \"deployments\"\n", "\n", - "deployment = geti.deploy_project(PROJECT_NAME, output_folder=DEPLOYMENT_FOLDER)" - ] + "deployment = geti.deploy_project(\n", + " project_name=PROJECT_NAME, output_folder=DEPLOYMENT_FOLDER\n", + ")" + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -95,10 +97,8 @@ }, { "cell_type": "code", - "execution_count": null, "id": "47f25327-eda0-4843-80d3-48b332f4b4a4", "metadata": {}, - "outputs": [], "source": [ "import os\n", "\n", @@ -110,7 +110,9 @@ " max_async_infer_requests=num_cores,\n", " openvino_configuration={\"PERFORMANCE_HINT\": \"THROUGHPUT\"},\n", ")" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -129,10 +131,8 @@ }, { "cell_type": "code", - "execution_count": null, "id": "4bf35ecf-5cf1-4751-ac6a-d1a8b9fdff3c", "metadata": {}, - "outputs": [], "source": [ "import numpy as np\n", "\n", @@ -158,7 +158,9 @@ " for obj in predicted_objects:\n", " label_mapping = {lab.name: lab.probability for lab in obj.labels}\n", " print(f\" {label_mapping}\")" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -170,13 +172,13 @@ }, { "cell_type": "code", - "execution_count": null, "id": "de52b3a7-4eb7-4333-bd4a-88d2cdca7acb", "metadata": {}, - "outputs": [], "source": [ "deployment.set_asynchronous_callback(handle_results)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -191,10 +193,8 @@ }, { "cell_type": "code", - "execution_count": null, "id": "32bc3285-a288-4efe-94a0-d1579ab598f9", "metadata": {}, - "outputs": [], "source": [ "from geti_sdk.annotation_readers import DatumAnnotationReader\n", "from geti_sdk.demos import get_coco_dataset\n", @@ -209,7 +209,9 @@ " os.path.join(path, \"images\", \"val2017\", fn + \".jpg\") for fn in coco_image_filenames\n", "][0:n_images]\n", "print(f\"Selected {n_images} images from COCO dataset\")" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -221,10 +223,8 @@ }, { "cell_type": "code", - "execution_count": null, "id": "7320912a-c7c1-456a-84ed-d0601a765ab9", "metadata": {}, - "outputs": [], "source": [ "import time\n", "\n", @@ -243,7 +243,9 @@ "print(\n", " f\"Asynchronous mode: Inferred {len(coco_image_filepaths)} images in {t_elapsed_async:.2f} seconds ({len(coco_image_filepaths)/t_elapsed_async:.1f} fps)\"\n", ")" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -268,13 +270,13 @@ }, { "cell_type": "code", - "execution_count": null, "id": "c847923a-aff1-463f-b946-781ecc70eae4", "metadata": {}, - "outputs": [], "source": [ "deployment.asynchronous_mode = False" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -295,10 +297,8 @@ }, { "cell_type": "code", - "execution_count": null, "id": "8f10c7b8-2113-4a90-9322-01afdcff81f4", "metadata": {}, - "outputs": [], "source": [ "t_start_sync = time.time()\n", "for img_index, image_path in enumerate(coco_image_filepaths):\n", @@ -316,7 +316,9 @@ "print(\n", " f\"Synchronous mode: Inferred {len(coco_image_filepaths)} images in {t_elapsed_sync:.2f} seconds ({len(coco_image_filepaths)/t_elapsed_sync:.1f} fps)\"\n", ")" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -329,10 +331,8 @@ }, { "cell_type": "code", - "execution_count": null, "id": "dcda95ac-3bbc-49df-8b51-471f01432f86", "metadata": {}, - "outputs": [], "source": [ "print(\n", " f\"Sychronous mode: Time elapsed is {t_elapsed_sync:.2f} seconds ({len(coco_image_filepaths)/t_elapsed_sync:.1f} fps)\"\n", @@ -343,7 +343,9 @@ "print(\n", " f\"Asynchronous inference is {t_elapsed_sync/t_elapsed_async:.1f} times faster than synchronous inference.\"\n", ")" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -370,10 +372,8 @@ }, { "cell_type": "code", - "execution_count": null, "id": "43a1c35f-568a-445c-9639-5950cff006c1", "metadata": {}, - "outputs": [], "source": [ "from typing import List, Tuple\n", "\n", @@ -389,7 +389,9 @@ "\n", "\n", "deployment.set_asynchronous_callback(inference_callback)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -405,10 +407,8 @@ }, { "cell_type": "code", - "execution_count": null, "id": "1dfad108-3d89-49e4-b4de-cd3214930a66", "metadata": {}, - "outputs": [], "source": [ "indices_async: List[int] = []\n", "tstart_pure_async = time.time()\n", @@ -426,7 +426,9 @@ "print(\n", " f\"Pure asynchronous mode: Time elapsed is {telapsed_pure_async:.2f} seconds ({len(coco_image_filepaths)/telapsed_pure_async:.1f} fps)\"\n", ")" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -438,10 +440,8 @@ }, { "cell_type": "code", - "execution_count": null, "id": "4fb404f1-441c-4a5a-94fb-d92dca864c38", "metadata": {}, - "outputs": [], "source": [ "def is_list_sorted(input_list: List[int]):\n", " \"\"\"\n", @@ -452,7 +452,9 @@ "\n", "print(f\"Is the list of indices sorted?: {is_list_sorted(indices_async)}\")\n", "print(f\"The frames were processed in this order:\\n{indices_async}\")" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -468,10 +470,8 @@ }, { "cell_type": "code", - "execution_count": null, "id": "74613ff2-6caf-441b-a9d0-6e8928671e6a", "metadata": {}, - "outputs": [], "source": [ "from geti_sdk.demos import AsyncVideoProcessor\n", "\n", @@ -499,7 +499,9 @@ "print(\n", " f\"AsyncVideoProcessor: Time elapsed is {telapsed_async_vp:.2f} seconds ({len(coco_image_filepaths)/telapsed_async_vp:.1f} fps)\"\n", ")" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -511,14 +513,14 @@ }, { "cell_type": "code", - "execution_count": null, "id": "e1355c3d-d94e-4adf-a1cf-f9e7d4a84975", "metadata": {}, - "outputs": [], "source": [ "print(f\"Is the list of indices sorted?: {is_list_sorted(indices_async_vp)}\")\n", "print(f\"The frames were processed in this order:\\n{indices_async_vp}\")" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -549,11 +551,11 @@ }, { "cell_type": "code", - "execution_count": null, "id": "68a904d0-6736-4deb-b69a-e7447965d225", "metadata": {}, + "source": [], "outputs": [], - "source": [] + "execution_count": null } ], "metadata": { diff --git a/notebooks/use_cases/101_simulate_low_light_product_inspection.ipynb b/notebooks/use_cases/101_simulate_low_light_product_inspection.ipynb index 8a373348..2cc72cfb 100644 --- a/notebooks/use_cases/101_simulate_low_light_product_inspection.ipynb +++ b/notebooks/use_cases/101_simulate_low_light_product_inspection.ipynb @@ -235,7 +235,7 @@ "outputs": [], "source": [ "sc_noisy_image, noisy_prediction = geti.upload_and_predict_image(\n", - " image=new_image_with_noise, project_name=PROJECT_NAME, visualise_output=False\n", + " image=new_image_with_noise, project=project, visualise_output=False\n", ")\n", "\n", "result = visualizer.draw(new_image_with_noise, noisy_prediction)\n", @@ -267,7 +267,7 @@ "for alpha in np.arange(start_factor, stop_factor, step):\n", " new_image_with_noise = simulate_low_light_image(image_2, reduction_factor=alpha)\n", " image, prediction = geti.upload_and_predict_image(\n", - " image=new_image_with_noise, project_name=PROJECT_NAME, visualise_output=False\n", + " image=new_image_with_noise, project=project, visualise_output=False\n", " )\n", " predicted_label = prediction.annotations[0].labels[0]\n", " print(\n", diff --git a/tests/nightly/test_nightly_project.py b/tests/nightly/test_nightly_project.py index 0c1a2fed..00b63bf7 100644 --- a/tests/nightly/test_nightly_project.py +++ b/tests/nightly/test_nightly_project.py @@ -160,7 +160,7 @@ def test_deployment( deployment_folder = os.path.join(fxt_temp_directory, project.name) deployment = fxt_geti_no_vcr.deploy_project( - project, + project=project, output_folder=deployment_folder, enable_explainable_ai=True, ) diff --git a/tests/pre-merge/integration/test_geti.py b/tests/pre-merge/integration/test_geti.py index 009716f3..eed5004c 100644 --- a/tests/pre-merge/integration/test_geti.py +++ b/tests/pre-merge/integration/test_geti.py @@ -518,7 +518,7 @@ def test_deployment( for _ in range(n_attempts): try: deployment = fxt_geti.deploy_project( - project, + project=project, output_folder=deployment_folder, enable_explainable_ai=True, ) @@ -576,7 +576,7 @@ def test_post_inference_hooks( project = fxt_project_service.project deployment_folder = os.path.join(fxt_temp_directory, project.name) - deployment = fxt_geti.deploy_project(project) + deployment = fxt_geti.deploy_project(project=project) dataset_name = "Test hooks" # Add a GetiDataCollectionHook