Skip to content

Commit

Permalink
Formatted with black
Browse files Browse the repository at this point in the history
  • Loading branch information
kylevedder committed Jan 10, 2024
1 parent c3fb517 commit 672507b
Show file tree
Hide file tree
Showing 29 changed files with 1,579 additions and 1,168 deletions.
1 change: 0 additions & 1 deletion bucketed_scene_flow_eval/datasets/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@
name_to_class_lookup = {cls.__name__.lower(): cls for cls in importable_classes}



def construct_dataset(name: str, args: dict):
name = name.lower()
if name not in name_to_class_lookup:
Expand Down
277 changes: 152 additions & 125 deletions bucketed_scene_flow_eval/datasets/argoverse2/argoverse_raw_data.py

Large diffs are not rendered by default.

45 changes: 27 additions & 18 deletions bucketed_scene_flow_eval/datasets/argoverse2/av2_metacategories.py
Original file line number Diff line number Diff line change
@@ -1,29 +1,38 @@
BACKGROUND_CATEGORIES = ['BACKGROUND']
BACKGROUND_CATEGORIES = ["BACKGROUND"]

ROAD_SIGNS = [
'BOLLARD',
'CONSTRUCTION_BARREL',
'CONSTRUCTION_CONE',
'MOBILE_PEDESTRIAN_CROSSING_SIGN',
'SIGN',
'STOP_SIGN',
'MESSAGE_BOARD_TRAILER',
'TRAFFIC_LIGHT_TRAILER',
]
PEDESTRIAN_CATEGORIES = [
'PEDESTRIAN', 'STROLLER', 'WHEELCHAIR', 'OFFICIAL_SIGNALER'
"BOLLARD",
"CONSTRUCTION_BARREL",
"CONSTRUCTION_CONE",
"MOBILE_PEDESTRIAN_CROSSING_SIGN",
"SIGN",
"STOP_SIGN",
"MESSAGE_BOARD_TRAILER",
"TRAFFIC_LIGHT_TRAILER",
]
PEDESTRIAN_CATEGORIES = ["PEDESTRIAN", "STROLLER", "WHEELCHAIR", "OFFICIAL_SIGNALER"]

WHEELED_VRU = [
'BICYCLE', 'BICYCLIST', 'MOTORCYCLE', 'MOTORCYCLIST', 'WHEELED_DEVICE',
'WHEELED_RIDER'
"BICYCLE",
"BICYCLIST",
"MOTORCYCLE",
"MOTORCYCLIST",
"WHEELED_DEVICE",
"WHEELED_RIDER",
]

CAR = ['REGULAR_VEHICLE']
CAR = ["REGULAR_VEHICLE"]

OTHER_VEHICLES = [
'BOX_TRUCK', 'LARGE_VEHICLE', 'RAILED_VEHICLE', 'TRUCK', 'TRUCK_CAB',
'VEHICULAR_TRAILER', 'ARTICULATED_BUS', 'BUS', 'SCHOOL_BUS'
"BOX_TRUCK",
"LARGE_VEHICLE",
"RAILED_VEHICLE",
"TRUCK",
"TRUCK_CAB",
"VEHICULAR_TRAILER",
"ARTICULATED_BUS",
"BUS",
"SCHOOL_BUS",
]

METACATAGORIES = {
Expand All @@ -33,4 +42,4 @@
"PEDESTRIAN": PEDESTRIAN_CATEGORIES,
"WHEELED_VRU": WHEELED_VRU,
"OTHER_VEHICLES": OTHER_VEHICLES,
}
}
27 changes: 12 additions & 15 deletions bucketed_scene_flow_eval/datasets/argoverse2/symlink_camera_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,38 +5,35 @@

# Get path to missing_cam_frames AV2 and with_cam_frames AV2 copies.
parser = argparse.ArgumentParser()
parser.add_argument('missing_cam_frames', type=Path)
parser.add_argument('with_cam_frames', type=Path)
parser.add_argument("missing_cam_frames", type=Path)
parser.add_argument("with_cam_frames", type=Path)
args = parser.parse_args()

assert args.missing_cam_frames.is_dir(
), f"{args.missing_cam_frames} is not a directory"
assert args.with_cam_frames.is_dir(
), f"{args.with_cam_frames} is not a directory"
assert args.missing_cam_frames.is_dir(), f"{args.missing_cam_frames} is not a directory"
assert args.with_cam_frames.is_dir(), f"{args.with_cam_frames} is not a directory"

split_names = ['train', 'val', 'test']
split_names = ["train", "val", "test"]

for split in split_names:
missing_frames_dir = args.missing_cam_frames / split
with_frames_dir = args.with_cam_frames / split

# iterate through directories in missing_frames_dir
for missing_dir in tqdm.tqdm(list(missing_frames_dir.iterdir()),
desc=f"Processing {split} split"):
for missing_dir in tqdm.tqdm(
list(missing_frames_dir.iterdir()), desc=f"Processing {split} split"
):
# Corresponding data dir
with_dir = with_frames_dir / missing_dir.name
assert missing_dir.is_dir(), f"{missing_dir} is not a directory"
assert with_dir.is_dir(), f"{with_dir} is not a directory"

# Symlink the "sensors/cameras" directory from with_dir to missing_dir.
# Remove the "sensors/cameras" directory from missing_dir if it exists.
missing_cameras_dir = missing_dir / 'sensors/cameras'
with_cameras_dir = with_dir / 'sensors/cameras'
assert with_cameras_dir.is_dir(
), f"{with_cameras_dir} is not a directory"
missing_cameras_dir = missing_dir / "sensors/cameras"
with_cameras_dir = with_dir / "sensors/cameras"
assert with_cameras_dir.is_dir(), f"{with_cameras_dir} is not a directory"

if missing_cameras_dir.is_dir():
shutil.rmtree(missing_cameras_dir)

missing_cameras_dir.symlink_to(with_cameras_dir,
target_is_directory=True)
missing_cameras_dir.symlink_to(with_cameras_dir, target_is_directory=True)
8 changes: 6 additions & 2 deletions bucketed_scene_flow_eval/datasets/waymoopen/__init__.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,10 @@
from .waymo_supervised_flow import WaymoSupervisedSceneFlowSequenceLoader, WaymoSupervisedSceneFlowSequence, CATEGORY_MAP
from .waymo_supervised_flow import (
WaymoSupervisedSceneFlowSequenceLoader,
WaymoSupervisedSceneFlowSequence,
CATEGORY_MAP,
)
from .dataset import WaymoOpenSceneFlow

__all__ = [
'WaymoOpenSceneFlow',
"WaymoOpenSceneFlow",
]
124 changes: 77 additions & 47 deletions bucketed_scene_flow_eval/datasets/waymoopen/dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,18 +18,21 @@ class EvalType(enum.Enum):
BUCKETED_EPE = 3


class WaymoOpenSceneFlow():
class WaymoOpenSceneFlow:
"""
Wrapper for the Argoverse 2 dataset.
It provides iterable access over all problems in the dataset.
"""
def __init__(self,
root_dir: Path,
subsequence_length: int = 2,
cache_path: Path = Path("/tmp/"),
eval_type: str = "bucketed_epe",
eval_args=dict()) -> None:

def __init__(
self,
root_dir: Path,
subsequence_length: int = 2,
cache_path: Path = Path("/tmp/"),
eval_type: str = "bucketed_epe",
eval_args=dict(),
) -> None:
self.root_dir = Path(root_dir)
self.sequence_loader = WaymoSupervisedSceneFlowSequenceLoader(root_dir)
self.subsequence_length = subsequence_length
Expand All @@ -41,14 +44,21 @@ def __init__(self,
self.relative_pc_flowed_key = "relative_flowed_pc"
self.pc_classes_key = "pc_classes"

self.dataset_to_sequence_subsequence_idx = self._load_dataset_to_sequence_subsequence_idx(
self.dataset_to_sequence_subsequence_idx = (
self._load_dataset_to_sequence_subsequence_idx()
)

self.eval_type = EvalType[eval_type.strip().upper()]
self.eval_args = eval_args

def _load_dataset_to_sequence_subsequence_idx(self):
cache_file = self.cache_path / "waymo" / self.root_dir.parent.name / self.root_dir.name / f"dataset_to_sequence_subsequence_idx_cache_len_{self.subsequence_length}.pkl"
cache_file = (
self.cache_path
/ "waymo"
/ self.root_dir.parent.name
/ self.root_dir.name
/ f"dataset_to_sequence_subsequence_idx_cache_len_{self.subsequence_length}.pkl"
)
if cache_file.exists():
return load_pickle(cache_file)

Expand All @@ -57,9 +67,11 @@ def _load_dataset_to_sequence_subsequence_idx(self):
dataset_to_sequence_subsequence_idx = []
for sequence_idx, sequence in enumerate(self.sequence_loader):
for subsequence_start_idx in range(
len(sequence) - self.subsequence_length + 1):
len(sequence) - self.subsequence_length + 1
):
dataset_to_sequence_subsequence_idx.append(
(sequence_idx, subsequence_start_idx))
(sequence_idx, subsequence_start_idx)
)

print(
f"Loaded {len(dataset_to_sequence_subsequence_idx)} subsequence pairs. Saving it to {cache_file}"
Expand All @@ -71,7 +83,8 @@ def __len__(self):
return len(self.dataset_to_sequence_subsequence_idx)

def _make_scene_sequence(
self, subsequence_frames: List[Dict], seq_id : str) -> RawSceneSequence:
self, subsequence_frames: List[Dict], seq_id: str
) -> RawSceneSequence:
# Build percept lookup. This stores the percepts for the entire sequence, with the
# global frame being zero'd at the target frame.
percept_lookup: Dict[Timestamp, RawSceneItem] = {}
Expand All @@ -80,33 +93,42 @@ def _make_scene_sequence(
lidar_to_ego = SE3.identity()
ego_to_world: SE3 = entry["relative_pose"]
point_cloud_frame = PointCloudFrame(
pc, PoseInfo(lidar_to_ego, ego_to_world))
pc, PoseInfo(lidar_to_ego, ego_to_world)
)
percept_lookup[dataset_idx] = RawSceneItem(
pc_frame=point_cloud_frame, rgb_frame=None)
pc_frame=point_cloud_frame, rgb_frame=None
)

return RawSceneSequence(percept_lookup, seq_id)

def _make_query_scene_sequence(
self, scene_sequence: RawSceneSequence,
subsequence_frames: List[Dict], subsequence_src_index: int,
subsequence_tgt_index: int) -> QuerySceneSequence:
self,
scene_sequence: RawSceneSequence,
subsequence_frames: List[Dict],
subsequence_src_index: int,
subsequence_tgt_index: int,
) -> QuerySceneSequence:
# Build query scene sequence. This requires enumerating all points in the source frame.
query_timestamps: List[Timestamp] = [
subsequence_src_index, subsequence_tgt_index
subsequence_src_index,
subsequence_tgt_index,
]
source_entry = subsequence_frames[subsequence_src_index]

pc_points_array = source_entry[self.relative_pc_key].points

query_particles = QueryParticleLookup(len(pc_points_array),
subsequence_src_index)
return QuerySceneSequence(scene_sequence, query_particles,
query_timestamps)
query_particles = QueryParticleLookup(
len(pc_points_array), subsequence_src_index
)
return QuerySceneSequence(scene_sequence, query_particles, query_timestamps)

def _make_results_scene_sequence(
self, query: QuerySceneSequence, subsequence_frames: List[Dict],
subsequence_src_index: int,
subsequence_tgt_index: int) -> GroundTruthParticleTrajectories:
self,
query: QuerySceneSequence,
subsequence_frames: List[Dict],
subsequence_src_index: int,
subsequence_tgt_index: int,
) -> GroundTruthParticleTrajectories:
# Build query scene sequence. This requires enumerating all points in
# the source frame and the associated flowed points.

Expand All @@ -115,16 +137,18 @@ def _make_results_scene_sequence(
target_pc = source_entry[self.relative_pc_flowed_key].points
pc_class_ids = source_entry[self.pc_classes_key]
assert len(source_pc) == len(
target_pc), "Source and target point clouds must be the same size."
target_pc
), "Source and target point clouds must be the same size."
assert len(source_pc) == len(
pc_class_ids
), f"Source point cloud and class ids must be the same size. Instead got {len(source_pc)} and {len(pc_class_ids)}."


particle_trajectories = GroundTruthParticleTrajectories(
len(source_pc),
np.array([subsequence_src_index, subsequence_tgt_index]),
query.query_particles.query_init_timestamp, CATEGORY_MAP)
query.query_particles.query_init_timestamp,
CATEGORY_MAP,
)

points = np.stack([source_pc, target_pc], axis=1)
# Stack the false false array len(source_pc) times.
Expand All @@ -133,24 +157,24 @@ def _make_results_scene_sequence(
particle_ids = np.arange(len(source_pc))
is_valids = np.ones((len(source_pc), 2), dtype=bool)

particle_trajectories[particle_ids] = (points, is_occluded,
pc_class_ids, is_valids)
particle_trajectories[particle_ids] = (
points,
is_occluded,
pc_class_ids,
is_valids,
)

return particle_trajectories

def __getitem__(
self,
dataset_idx,
verbose: bool = False
self, dataset_idx, verbose: bool = False
) -> Tuple[QuerySceneSequence, GroundTruthParticleTrajectories]:

if verbose:
print(
f"Waymo Open Scene Flow dataset __getitem__({dataset_idx}) start"
)
print(f"Waymo Open Scene Flow dataset __getitem__({dataset_idx}) start")

sequence_idx, subsequence_start_idx = self.dataset_to_sequence_subsequence_idx[
dataset_idx]
dataset_idx
]

# Load sequence
sequence = self.sequence_loader[sequence_idx]
Expand All @@ -160,25 +184,31 @@ def __getitem__(
# Load subsequence

subsequence_frames = [
sequence.load(subsequence_start_idx + i,
subsequence_start_idx + in_subsequence_tgt_index)
sequence.load(
subsequence_start_idx + i,
subsequence_start_idx + in_subsequence_tgt_index,
)
for i in range(self.subsequence_length)
]

scene_sequence = self._make_scene_sequence(subsequence_frames, sequence.log_id)

query_scene_sequence = self._make_query_scene_sequence(
scene_sequence, subsequence_frames, in_subsequence_src_index,
in_subsequence_tgt_index)
scene_sequence,
subsequence_frames,
in_subsequence_src_index,
in_subsequence_tgt_index,
)

results_scene_sequence = self._make_results_scene_sequence(
query_scene_sequence, subsequence_frames, in_subsequence_src_index,
in_subsequence_tgt_index)
query_scene_sequence,
subsequence_frames,
in_subsequence_src_index,
in_subsequence_tgt_index,
)

if verbose:
print(
f"Waymo Open Scene Flow dataset __getitem__({dataset_idx}) end"
)
print(f"Waymo Open Scene Flow dataset __getitem__({dataset_idx}) end")

return query_scene_sequence, results_scene_sequence

Expand Down
Loading

0 comments on commit 672507b

Please sign in to comment.