Skip to content

Commit

Permalink
update fiftyone utils (#146)
Browse files Browse the repository at this point in the history
* update fiftyone utils

* add fiftyone conversion into demo utils
  • Loading branch information
fcakyon committed Jun 23, 2021
1 parent fa4cfe6 commit 559f4bd
Show file tree
Hide file tree
Showing 3 changed files with 123 additions and 32 deletions.
78 changes: 66 additions & 12 deletions demo/inference_for_mmdetection.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,7 @@
"metadata": {},
"outputs": [],
"source": [
"!pip install -U sahi\n",
"!pip install -U mmdet mmcv-full"
"!pip install -U torch sahi mmdet mmcv-full"
]
},
{
Expand Down Expand Up @@ -77,9 +76,17 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 2,
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"mmdet config file has been downloaded to /home/fatihakyon/dev/obss/sahi/demo/mmdet_configs/v2.11.0/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py\n"
]
}
],
"source": [
"# download cascade mask rcnn model&config\n",
"model_path = 'models/cascade_mask_rcnn.pth'\n",
Expand Down Expand Up @@ -107,9 +114,17 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 3,
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Use load_from_local loader\n"
]
}
],
"source": [
"detection_model = MmdetDetectionModel(\n",
" model_path=model_path,\n",
Expand Down Expand Up @@ -145,9 +160,18 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 5,
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"/home/fatihakyon/miniconda3/envs/sahi/lib/python3.8/site-packages/mmdet/datasets/utils.py:64: UserWarning: \"ImageToTensor\" pipeline is replaced by \"DefaultFormatBundle\" for batch inference. It is recommended to manually replace it in the test data pipeline in your config file.\n",
" warnings.warn(\n"
]
}
],
"source": [
"result = get_prediction(image, detection_model)"
]
Expand Down Expand Up @@ -272,7 +296,7 @@
},
{
"cell_type": "code",
"execution_count": 12,
"execution_count": 6,
"metadata": {},
"outputs": [],
"source": [
Expand Down Expand Up @@ -403,20 +427,50 @@
"cell_type": "code",
"execution_count": 16,
"metadata": {},
"outputs": [],
"source": [
"object_prediction_list[0].to_imantics_annotation()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"- ObjectPrediction's can be converted to [fiftyone](https://github.com/voxel51/fiftyone) detection format:"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"<imantics.annotation.Annotation at 0x7f96ca34b280>"
"<Detection: {\n",
" 'id': '60d30f040a342ce5d7cd9380',\n",
" 'attributes': BaseDict({}),\n",
" 'tags': BaseList([]),\n",
" 'label': 'car',\n",
" 'bounding_box': BaseList([\n",
" 0.6151685393258427,\n",
" 0.35172413793103446,\n",
" 0.012172284644194757,\n",
" 0.017241379310344827,\n",
" ]),\n",
" 'mask': None,\n",
" 'confidence': 0.9727994203567505,\n",
" 'index': None,\n",
"}>"
]
},
"execution_count": 16,
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"object_prediction_list[0].to_imantics_annotation()"
"object_prediction_list[0].to_fiftyone_detection(image_height=image.shape[0], image_width=image.shape[1])"
]
},
{
Expand Down
61 changes: 45 additions & 16 deletions demo/inference_for_yolov5.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,7 @@
"metadata": {},
"outputs": [],
"source": [
"!pip install -U sahi\n",
"!pip install -U yolov5"
"!pip install -U torch sahi yolov5"
]
},
{
Expand All @@ -50,7 +49,7 @@
},
{
"cell_type": "code",
"execution_count": 1,
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
Expand All @@ -76,7 +75,7 @@
},
{
"cell_type": "code",
"execution_count": 23,
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
Expand Down Expand Up @@ -105,7 +104,7 @@
},
{
"cell_type": "code",
"execution_count": 24,
"execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
Expand All @@ -125,7 +124,7 @@
},
{
"cell_type": "code",
"execution_count": 25,
"execution_count": 7,
"metadata": {},
"outputs": [],
"source": [
Expand Down Expand Up @@ -198,7 +197,7 @@
},
{
"cell_type": "code",
"execution_count": 29,
"execution_count": 8,
"metadata": {},
"outputs": [
{
Expand Down Expand Up @@ -269,7 +268,7 @@
},
{
"cell_type": "code",
"execution_count": 34,
"execution_count": 10,
"metadata": {},
"outputs": [],
"source": [
Expand Down Expand Up @@ -343,23 +342,23 @@
},
{
"cell_type": "code",
"execution_count": 38,
"execution_count": 11,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'image_id': None,\n",
" 'bbox': [749, 191, 12, 9],\n",
" 'score': 0.66015625,\n",
" 'bbox': [447, 308, 49, 33],\n",
" 'score': 1,\n",
" 'category_id': 2,\n",
" 'category_name': 'car',\n",
" 'segmentation': [],\n",
" 'iscrowd': 0,\n",
" 'area': 108}"
" 'area': 1617}"
]
},
"execution_count": 38,
"execution_count": 11,
"metadata": {},
"output_type": "execute_result"
}
Expand All @@ -379,20 +378,50 @@
"cell_type": "code",
"execution_count": 35,
"metadata": {},
"outputs": [],
"source": [
"object_prediction_list[0].to_imantics_annotation()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"- ObjectPrediction's can be converted to [fiftyone](https://github.com/voxel51/fiftyone) detection format:"
]
},
{
"cell_type": "code",
"execution_count": 18,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"<imantics.annotation.Annotation at 0x7fe7471a3220>"
"<Detection: {\n",
" 'id': '60d30eaed7d2c7a9c3980b8e',\n",
" 'attributes': BaseDict({}),\n",
" 'tags': BaseList([]),\n",
" 'label': 'car',\n",
" 'bounding_box': BaseList([\n",
" 0.41853932584269665,\n",
" 0.5310344827586206,\n",
" 0.04588014981273408,\n",
" 0.056896551724137934,\n",
" ]),\n",
" 'mask': None,\n",
" 'confidence': 0.9154346585273743,\n",
" 'index': None,\n",
"}>"
]
},
"execution_count": 35,
"execution_count": 18,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"object_prediction_list[0].to_imantics_annotation()"
"object_prediction_list[0].to_fiftyone_detection(image_height=image.shape[0], image_width=image.shape[1])"
]
},
{
Expand Down
16 changes: 12 additions & 4 deletions sahi/utils/fiftyon.py → sahi/utils/fiftyone.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,11 @@
import os

import fiftyone as fo
from fiftyone.utils.coco import COCODetectionDatasetImporter as BaseCOCODetectionDatasetImporter
from fiftyone.utils.coco import load_coco_detection_annotations
try:
import fiftyone as fo
from fiftyone.utils.coco import COCODetectionDatasetImporter as BaseCOCODetectionDatasetImporter
from fiftyone.utils.coco import load_coco_detection_annotations
except ImportError:
raise ImportError('Please run "pip install -U fiftyone" to install fiftyone first for fiftyone utilities.')


class COCODetectionDatasetImporter(BaseCOCODetectionDatasetImporter):
Expand Down Expand Up @@ -74,9 +77,14 @@ def setup(self):
self._filenames = self._preprocess_list(filenames)


def launch_fiftyone_app(coco_image_dir: str, coco_json_path: str):
def create_fiftyone_dataset_from_coco_file(coco_image_dir: str, coco_json_path: str):
coco_importer = COCODetectionDatasetImporter(image_dir=coco_image_dir, json_path=coco_json_path)
dataset = fo.Dataset.from_importer(coco_importer)
return dataset


def launch_fiftyone_app(coco_image_dir: str, coco_json_path: str):
dataset = create_fiftyone_dataset_from_coco_file(coco_image_dir, coco_json_path)
session = fo.launch_app()
session.dataset = dataset
return session

0 comments on commit 559f4bd

Please sign in to comment.