Skip to content

Commit

Permalink
add black and isort check to ci + reformat codebase (#179)
Browse files Browse the repository at this point in the history
* add black check to ci workflow

* update dev package versions

* add isort check for ci

* reformat with isort

* reformat with upodate black and isort

* reformat config files with black

* update readme with new contributing guidelines

* update project toml with isort config
  • Loading branch information
fcakyon committed Jul 28, 2021
1 parent b523d0a commit 007dc92
Show file tree
Hide file tree
Showing 35 changed files with 858 additions and 317 deletions.
6 changes: 4 additions & 2 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -71,11 +71,13 @@ jobs:
run: >
pip install mmcv-full==1.3.7 mmdet==2.13.0 yolov5==5.0.6 norfair==0.3.0
- name: Lint with flake8
- name: Lint with flake8, black and isort
run: |
pip install flake8
pip install -e .[dev]
# stop the build if there are Python syntax errors or undefined names
flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
black . --check --config pyproject.toml
isort -c .
# exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
Expand Down
16 changes: 16 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -405,6 +405,22 @@ mot_video.export(export_dir="mot_gt", type="gt")

All you need to do is, creating a new class in [model.py](sahi/model.py) that implements [DetectionModel class](https://github.com/obss/sahi/blob/651f8e6cdb20467815748764bb198dd50241ab2b/sahi/model.py#L10). You can take the [MMDetection wrapper](https://github.com/obss/sahi/blob/651f8e6cdb20467815748764bb198dd50241ab2b/sahi/model.py#L164) or [YOLOv5 wrapper](https://github.com/obss/sahi/blob/ffa168fc38b75a002a0117f1fdde9470e1a9ce8c/sahi/model.py#L363) as a reference.

Before opening a PR:

- Install required development packages:

```bash
pip install -U -e .[dev]
```

- Reformat with black and isort:

```bash
black . --config pyproject.toml
isort .
```


## <div align="center">Contributers</div>

<div align="center">
Expand Down
6 changes: 5 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,2 +1,6 @@
[tool.black]
line-length = 120
line-length = 120

[tool.isort]
line_length = 120
profile = "black"
74 changes: 60 additions & 14 deletions sahi/annotation.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
# Code written by Fatih C Akyon, 2020.

import copy
from typing import List, Optional, Dict
from typing import Dict, List, Optional

import numpy as np

Expand Down Expand Up @@ -111,7 +111,11 @@ def __repr__(self):
class Mask:
@classmethod
def from_float_mask(
cls, mask, full_shape=None, mask_threshold: float = 0.5, shift_amount: list = [0, 0],
cls,
mask,
full_shape=None,
mask_threshold: float = 0.5,
shift_amount: list = [0, 0],
):
"""
Args:
Expand All @@ -126,11 +130,18 @@ def from_float_mask(
Size of the full image after shifting, should be in the form of [height, width]
"""
bool_mask = mask > mask_threshold
return cls(bool_mask=bool_mask, shift_amount=shift_amount, full_shape=full_shape,)
return cls(
bool_mask=bool_mask,
shift_amount=shift_amount,
full_shape=full_shape,
)

@classmethod
def from_coco_segmentation(
cls, segmentation, full_shape=None, shift_amount: list = [0, 0],
cls,
segmentation,
full_shape=None,
shift_amount: list = [0, 0],
):
"""
Init Mask from coco segmentation representation.
Expand All @@ -152,10 +163,17 @@ def from_coco_segmentation(
assert full_shape is not None, "full_shape must be provided"

bool_mask = get_bool_mask_from_coco_segmentation(segmentation, height=full_shape[0], width=full_shape[1])
return cls(bool_mask=bool_mask, shift_amount=shift_amount, full_shape=full_shape,)
return cls(
bool_mask=bool_mask,
shift_amount=shift_amount,
full_shape=full_shape,
)

def __init__(
self, bool_mask=None, full_shape=None, shift_amount: list = [0, 0],
self,
bool_mask=None,
full_shape=None,
shift_amount: list = [0, 0],
):
"""
Args:
Expand Down Expand Up @@ -216,7 +234,14 @@ def get_shifted_mask(self):
# Confirm full_shape is specified
assert (self.full_shape_height is not None) and (self.full_shape_width is not None), "full_shape is None"
# init full mask
mask_fullsized = np.full((self.full_shape_height, self.full_shape_width,), 0, dtype="float32",)
mask_fullsized = np.full(
(
self.full_shape_height,
self.full_shape_width,
),
0,
dtype="float32",
)

# arrange starting ending indexes
starting_pixel = [self.shift_x, self.shift_y]
Expand All @@ -230,7 +255,11 @@ def get_shifted_mask(self):
: ending_pixel[1] - starting_pixel[1], : ending_pixel[0] - starting_pixel[0]
]

return Mask(mask_fullsized, shift_amount=[0, 0], full_shape=self.full_shape,)
return Mask(
mask_fullsized,
shift_amount=[0, 0],
full_shape=self.full_shape,
)

def to_coco_segmentation(self):
"""
Expand Down Expand Up @@ -441,7 +470,10 @@ def from_shapely_annotation(

@classmethod
def from_imantics_annotation(
cls, annotation, shift_amount: Optional[List[int]] = [0, 0], full_shape: Optional[List[int]] = None,
cls,
annotation,
shift_amount: Optional[List[int]] = [0, 0],
full_shape: Optional[List[int]] = None,
):
"""
Creates ObjectAnnotation from imantics.annotation.Annotation
Expand Down Expand Up @@ -495,11 +527,18 @@ def __init__(
self.mask = None
self.bbox = BoundingBox(bbox, shift_amount)
else:
self.mask = Mask(bool_mask=bool_mask, shift_amount=shift_amount, full_shape=full_shape,)
self.mask = Mask(
bool_mask=bool_mask,
shift_amount=shift_amount,
full_shape=full_shape,
)
bbox = get_bbox_from_bool_mask(bool_mask)
self.bbox = BoundingBox(bbox, shift_amount)
category_name = category_name if category_name else str(category_id)
self.category = Category(id=category_id, name=category_name,)
self.category = Category(
id=category_id,
name=category_name,
)

self.merged = None

Expand All @@ -515,7 +554,9 @@ def to_coco_annotation(self):
)
else:
coco_annotation = CocoAnnotation.from_coco_bbox(
bbox=self.bbox.to_coco_bbox(), category_id=self.category.id, category_name=self.category.name,
bbox=self.bbox.to_coco_bbox(),
category_id=self.category.id,
category_name=self.category.name,
)
return coco_annotation

Expand All @@ -532,7 +573,10 @@ def to_coco_prediction(self):
)
else:
coco_prediction = CocoPrediction.from_coco_bbox(
bbox=self.bbox.to_coco_bbox(), category_id=self.category.id, category_name=self.category.name, score=1,
bbox=self.bbox.to_coco_bbox(),
category_id=self.category.id,
category_name=self.category.name,
score=1,
)
return coco_prediction

Expand All @@ -545,7 +589,9 @@ def to_shapely_annotation(self):
segmentation=self.mask.to_coco_segmentation(),
)
else:
shapely_annotation = ShapelyAnnotation.from_coco_bbox(bbox=self.bbox.to_coco_bbox(),)
shapely_annotation = ShapelyAnnotation.from_coco_bbox(
bbox=self.bbox.to_coco_bbox(),
)
return shapely_annotation

def to_imantics_annotation(self):
Expand Down
38 changes: 28 additions & 10 deletions sahi/model.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,12 @@
# OBSS SAHI Tool
# Code written by Fatih C Akyon, 2020.

from typing import Dict, List, Optional, Union

import numpy as np

from sahi.prediction import ObjectPrediction
from sahi.utils.torch import cuda_is_available, empty_cuda_cache
from typing import List, Dict, Optional, Union


class DetectionModel:
Expand Down Expand Up @@ -89,7 +90,9 @@ def perform_inference(self, image: np.ndarray, image_size: int = None):
NotImplementedError()

def _create_object_prediction_list_from_original_predictions(
self, shift_amount: Optional[List[int]] = [0, 0], full_shape: Optional[List[int]] = None,
self,
shift_amount: Optional[List[int]] = [0, 0],
full_shape: Optional[List[int]] = None,
):
"""
This function should be implemented in a way that self._original_predictions should
Expand Down Expand Up @@ -117,7 +120,9 @@ def _apply_category_remapping(self):
object_prediction.category.id = new_category_id_int

def convert_original_predictions(
self, shift_amount: Optional[List[int]] = [0, 0], full_shape: Optional[List[int]] = None,
self,
shift_amount: Optional[List[int]] = [0, 0],
full_shape: Optional[List[int]] = None,
):
"""
Converts original predictions of the detection model to a list of
Expand All @@ -130,7 +135,8 @@ def convert_original_predictions(
Size of the full image after shifting, should be in the form of [height, width]
"""
self._create_object_prediction_list_from_original_predictions(
shift_amount=shift_amount, full_shape=full_shape,
shift_amount=shift_amount,
full_shape=full_shape,
)
if self.category_remapping:
self._apply_category_remapping()
Expand All @@ -143,7 +149,9 @@ def object_prediction_list(self):
def original_predictions(self):
return self._original_predictions

def _create_predictions_from_object_prediction_list(object_prediction_list: List[ObjectPrediction],):
def _create_predictions_from_object_prediction_list(
object_prediction_list: List[ObjectPrediction],
):
"""
This function should be implemented in a way that it converts a list of
prediction.ObjectPrediction instance to detection model's original prediction format.
Expand Down Expand Up @@ -172,7 +180,11 @@ def load_model(self):
from mmdet.apis import init_detector

# set model
model = init_detector(config=self.config_path, checkpoint=self.model_path, device=self.device,)
model = init_detector(
config=self.config_path,
checkpoint=self.model_path,
device=self.device,
)
self.model = model

# set category_mapping
Expand Down Expand Up @@ -239,7 +251,9 @@ def category_names(self):
return self.model.CLASSES

def _create_object_prediction_list_from_original_predictions(
self, shift_amount: Optional[List[int]] = [0, 0], full_shape: Optional[List[int]] = None,
self,
shift_amount: Optional[List[int]] = [0, 0],
full_shape: Optional[List[int]] = None,
):
"""
self._original_predictions is converted to a list of prediction.ObjectPrediction and set to
Expand Down Expand Up @@ -294,7 +308,8 @@ def _create_object_prediction_list_from_original_predictions(
self._object_prediction_list = object_prediction_list

def _create_original_predictions_from_object_prediction_list(
self, object_prediction_list: List[ObjectPrediction],
self,
object_prediction_list: List[ObjectPrediction],
):
"""
Converts a list of prediction.ObjectPrediction instance to detection model's original prediction format.
Expand Down Expand Up @@ -412,7 +427,9 @@ def category_names(self):
return self.model.names

def _create_object_prediction_list_from_original_predictions(
self, shift_amount: Optional[List[int]] = [0, 0], full_shape: Optional[List[int]] = None,
self,
shift_amount: Optional[List[int]] = [0, 0],
full_shape: Optional[List[int]] = None,
):
"""
self._original_predictions is converted to a list of prediction.ObjectPrediction and set to
Expand Down Expand Up @@ -456,7 +473,8 @@ def _create_object_prediction_list_from_original_predictions(
self._object_prediction_list = object_prediction_list

def _create_original_predictions_from_object_prediction_list(
self, object_prediction_list: List[ObjectPrediction],
self,
object_prediction_list: List[ObjectPrediction],
):
"""
Converts a list of prediction.ObjectPrediction instance to detection model's original
Expand Down
Loading

0 comments on commit 007dc92

Please sign in to comment.