Skip to content

Commit

Permalink
update ImageNetVID dataset
Browse files Browse the repository at this point in the history
  • Loading branch information
natlouis committed Aug 12, 2019
1 parent 54d9bae commit c837df7
Showing 1 changed file with 0 additions and 86 deletions.
86 changes: 0 additions & 86 deletions datasets/ImageNetVID.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,6 @@ def __getitem__(self, idx):
ret_dict = dict()
ret_dict['data'] = vid_data
annot_dict = dict()
annot_dict['data'] = vid_data #TODO: Delete once done debugging
annot_dict['xmin'] = xmin_data
annot_dict['ymin'] = ymin_data
annot_dict['xmax'] = xmax_data
Expand All @@ -100,88 +99,3 @@ def __getitem__(self, idx):
ret_dict['annots'] = annot_dict

return ret_dict



class PreprocessTrain(object):
"""
Container for all transforms used to preprocess clips for training in this dataset.
"""
def __init__(self, **kwargs):
crop_type = kwargs['crop_type']
self.transforms = []

self.transforms.append(pt.ResizeClip(**kwargs))

if crop_type == 'Random':
self.transforms.append(pt.RandomCropClip(**kwargs))
elif crop_type=='RandomFrame':
self.transforms.append(pt.ApplyToClip(transform=torchvision.transforms.RandomCrop(**kwargs)))
elif crop_type == 'Center':
self.transforms.append(pt.CenterCropClip(**kwargs))

self.transforms.append(pt.RandomFlipClip(direction='h', p=0.5, **kwargs))
#self.transforms.append(pt.RandomRotateClip(**kwargs))
self.transforms.append(pt.ToTensorClip(**kwargs))



def __call__(self, input_data, bbox_data):
"""
Preprocess the clip and the bbox data accordingly
Args:
input_data: List of PIL images containing clip frames
bbox_data: Numpy array containing bbox coordinates per object per frame
Return:
input_data: Pytorch tensor containing the processed clip data
bbox_data: Numpy tensor containing the augmented bbox coordinates
"""
for transform in self.transforms:
input_data, bbox_data = transform(input_data, bbox_data)



return input_data, bbox_data


class PreprocessEval(object):
"""
Container for all transforms used to preprocess clips for evaluation in this dataset.
"""
def __init__(self, **kwargs):
crop_shape = kwargs['crop_shape']
crop_type = kwargs['crop_type']
resize_shape = kwargs['resize_shape']
self.transforms = []

if crop_type == 'Random':
self.transforms.append(pt.RandomCropClip(*crop_shape))
elif crop_type == 'Center':
self.transforms.append(pt.CenterCropClip(*crop_shape))

self.transforms.append(pt.ResizeClip(*resize_shape))
self.transforms.append(pt.ToTensorClip())



def __call__(self, input_data, bbox_data):
"""
Preprocess the clip and the bbox data accordingly
Args:
input_data: List of PIL images containing clip frames
bbox_data: Numpy array containing bbox coordinates per object per frame
Return:
input_data: Pytorch tensor containing the processed clip data
bbox_data: Numpy tensor containing the augmented bbox coordinates
"""
for transform in self.transforms:
input_data, bbox_data = transform(input_data, bbox_data)

return input_data, bbox_data





0 comments on commit c837df7

Please sign in to comment.