Skip to content

Commit

Permalink
fix: dataset.py snake case and remove unused imports
Browse files Browse the repository at this point in the history
  • Loading branch information
PaulHax committed Jun 20, 2024
1 parent 2f72d23 commit 82c6c55
Show file tree
Hide file tree
Showing 3 changed files with 17 additions and 19 deletions.
16 changes: 8 additions & 8 deletions src/nrtk_explorer/app/transforms.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
"""

import logging
from typing import Dict, Sequence
from typing import Dict
import asyncio
from functools import partial
import os
Expand Down Expand Up @@ -32,8 +32,8 @@
)
import nrtk_explorer.test_data
from nrtk_explorer.app.trame_utils import delete_state
from nrtk_explorer.app.image_ids import image_id_to_dataset_id, image_id_to_result_id, DatasetId
from nrtk_explorer.library.dataset import getDataset
from nrtk_explorer.app.image_ids import image_id_to_dataset_id, image_id_to_result_id
from nrtk_explorer.library.dataset import get_dataset


logger = logging.getLogger(__name__)
Expand Down Expand Up @@ -146,7 +146,7 @@ def on_apply_transform(self, *args, **kwargs):
if len(transformed_image_ids) == 0:
return

dataset = getDataset(self.state.current_dataset)
dataset = get_dataset(self.state.current_dataset)

# Erase current annotations
dataset_ids = [image_id_to_dataset_id(id) for id in self.state.source_image_ids]
Expand Down Expand Up @@ -221,7 +221,7 @@ def compute_annotations(self, ids):
return predictions

def on_current_num_elements_change(self, current_num_elements, **kwargs):
dataset = getDataset(self.state.current_dataset)
dataset = get_dataset(self.state.current_dataset)
ids = [img["id"] for img in dataset["images"]]
return self.set_source_images(ids[:current_num_elements])

Expand All @@ -236,7 +236,7 @@ def compute_predictions_source_images(self, old_ids, ids):
if len(ids) == 0:
return

dataset = getDataset(self.state.current_dataset)
dataset = get_dataset(self.state.current_dataset)

annotations = self.compute_annotations(ids)
self.predictions_source_images = convert_from_predictions_to_first_arg(
Expand Down Expand Up @@ -280,7 +280,7 @@ def _update_images(self, selected_ids):

current_dir = os.path.dirname(self.state.current_dataset)

dataset = getDataset(self.state.current_dataset)
dataset = get_dataset(self.state.current_dataset)

for selected_id in selected_ids:
image_index = self.context.image_id_to_index[selected_id]
Expand Down Expand Up @@ -345,7 +345,7 @@ def on_current_dataset_change(self, current_dataset, **kwargs):

self.reset_data()

dataset = getDataset(current_dataset)
dataset = get_dataset(current_dataset)
categories = {}

for category in dataset["categories"]:
Expand Down
18 changes: 9 additions & 9 deletions src/nrtk_explorer/library/dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,18 +27,18 @@ class Dataset(TypedDict):
annotations: List[DatasetAnnotation]


def loadDataset(path: str) -> Dataset:
def load_dataset(path: str) -> Dataset:
with open(path) as f:
return json.load(f)


datasetJson: Dataset = {"categories": [], "images": [], "annotations": []}
_datasetPath: str = ""
dataset_json: Dataset = {"categories": [], "images": [], "annotations": []}
dataset_path: str = ""


def getDataset(path: str) -> Dataset:
global datasetJson, _datasetPath
if _datasetPath != path:
_datasetPath = path
datasetJson = loadDataset(_datasetPath)
return datasetJson
def get_dataset(path: str) -> Dataset:
global dataset_json, dataset_path
if dataset_path != path:
dataset_path = path
dataset_json = load_dataset(dataset_path)
return dataset_json
2 changes: 0 additions & 2 deletions src/nrtk_explorer/library/object_detector.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,7 @@
import logging
import operator
import torch
import transformers

from functools import reduce
from typing import Optional

from nrtk_explorer.library import images_manager
Expand Down

0 comments on commit 82c6c55

Please sign in to comment.