Skip to content

Commit

Permalink
request
Browse files Browse the repository at this point in the history
  • Loading branch information
nilsleh committed Jan 27, 2025
1 parent 0c8d112 commit 88676c9
Show file tree
Hide file tree
Showing 4 changed files with 23 additions and 21 deletions.
4 changes: 2 additions & 2 deletions docs/api/datasets.rst
Original file line number Diff line number Diff line change
Expand Up @@ -217,8 +217,8 @@ BioMassters

.. autoclass:: BioMassters

BRIGHT DFC2025
^^^^^^^^^^^^^^
BRIGHT
^^^^^^

.. autoclass:: BRIGHTDFC2025

Expand Down
2 changes: 1 addition & 1 deletion docs/api/datasets/non_geo_datasets.csv
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ Dataset,Task,Source,License,# Samples,# Classes,Size (px),Resolution (m),Bands
`Benin Cashew Plantations`_,S,Airbus Pléiades,"CC-BY-4.0",70,6,"1,122x1,186",10,MSI
`BigEarthNet`_,C,Sentinel-1/2,"CDLA-Permissive-1.0","590,326",19--43,120x120,10,"SAR, MSI"
`BioMassters`_,R,Sentinel-1/2 and Lidar,"CC-BY-4.0",,,256x256, 10, "SAR, MSI"
`BRIGHT DFC2025`_,CD,"MAXAR, NAIP, Capella, Umbra","CC-BY-SA-4.0",3239,4,"0.1-1","RGB,SAR"
`BRIGHT`_,CD,"MAXAR, NAIP, Capella, Umbra","CC-BY-NC-4.0",3239,4,"0.1-1","RGB,SAR"
`CaBuAr`_,CD,Sentinel-2,"OpenRAIL",424,2,512x512,20,MSI
`CaFFe`_,S,"Sentinel-1, TerraSAR-X, TanDEM-X, ENVISAT, ERS-1/2, ALOS PALSAR, and RADARSAT-1","CC-BY-4.0","19092","2 or 4","512x512",6-20,"SAR"
`ChaBuD`_,CD,Sentinel-2,"OpenRAIL",356,2,512x512,10,MSI
Expand Down
12 changes: 6 additions & 6 deletions tests/datasets/test_bright.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,14 +32,14 @@ def dataset(
def test_getitem(self, dataset: BRIGHTDFC2025) -> None:
x = dataset[0]
assert isinstance(x, dict)
assert isinstance(x['pre_image'], torch.Tensor)
assert x['pre_image'].shape[0] == 3
assert isinstance(x['post_image'], torch.Tensor)
assert x['post_image'].shape[0] == 3
assert x['pre_image'].shape[-2:] == x['post_image'].shape[-2:]
assert isinstance(x['image_pre'], torch.Tensor)
assert x['image_pre'].shape[0] == 3
assert isinstance(x['image_post'], torch.Tensor)
assert x['image_post'].shape[0] == 3
assert x['image_pre'].shape[-2:] == x['image_post'].shape[-2:]
if dataset.split != 'test':
assert isinstance(x['mask'], torch.Tensor)
assert x['pre_image'].shape[-2:] == x['mask'].shape[-2:]
assert x['image_pre'].shape[-2:] == x['mask'].shape[-2:]

def test_len(self, dataset: BRIGHTDFC2025) -> None:
if dataset.split == 'train':
Expand Down
26 changes: 14 additions & 12 deletions torchgeo/datasets/bright.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,8 @@
class BRIGHTDFC2025(NonGeoDataset):
"""BRIGHT DFC2025 dataset.
The `BRIGHT <https://github.com/ChenHongruixuan/BRIGHT>`__ dataset consists of bi-temporal high-resolution multimodal images for
The `BRIGHT <https://github.com/ChenHongruixuan/BRIGHT>`__ dataset consists of bi-temporal
high-resolution multimodal images for
building damage assessment. The dataset is part of the 2025 IEEE GRSS Data Fusion Contest.
The pre-disaster images are optical images and the post-disaster images are SAR images, and
targets were manually annotated. The dataset is split into train, val, and test splits, but
Expand Down Expand Up @@ -64,7 +65,7 @@ class BRIGHTDFC2025(NonGeoDataset):

md5 = '2c435bb50345d425390eff59a92134ac'

url = 'https://huggingface.co/datasets/Kullervo/BRIGHT/resolve/50901f05db4acbd141e7c96d719d8317910498fb/dfc25_track2_trainval.zip'
url = 'https://huggingface.co/datasets/torchgeo/bright/resolve/d19972f5e682ad684dcde35529a6afad4c719f1b/dfc25_track2_trainval_with_split.zip'

data_dir = 'dfc25_track2_trainval'

Expand Down Expand Up @@ -126,17 +127,18 @@ def __getitem__(self, index: int) -> dict[str, Tensor]:
index: index to return
Returns:
data and target at that index
data and target at that index, pre and post image
are returned under separate image keys
"""
idx_paths = self.sample_paths[index]

pre_image = self._load_image(idx_paths['pre_image']).float()
post_image = self._load_image(idx_paths['post_image']).float()
image_pre = self._load_image(idx_paths['image_pre']).float()
image_post = self._load_image(idx_paths['image_post']).float()
# https://github.com/ChenHongruixuan/BRIGHT/blob/11b1ffafa4d30d2df2081189b56864b0de4e3ed7/dfc25_benchmark/dataset/make_data_loader.py#L101
# post image is stacked to also have 3 channels
post_image = repeat(post_image, 'c h w -> (repeat c) h w', repeat=3)
image_post = repeat(image_post, 'c h w -> (repeat c) h w', repeat=3)

sample = {'pre_image': pre_image, 'post_image': post_image}
sample = {'image_pre': image_pre, 'image_post': image_post}

if 'target' in idx_paths and self.split != 'test':
target = self._load_image(idx_paths['target']).long()
Expand All @@ -151,7 +153,7 @@ def _get_paths(self) -> list[dict[str, str]]:
"""Get paths to the dataset files based on specified splits.
Returns:
a list of dictionaries containing paths to the pre,post and target images
a list of dictionaries containing paths to the pre, post, and target images
"""
split_file = self.split_files[self.split]

Expand All @@ -166,14 +168,14 @@ def _get_paths(self) -> list[dict[str, str]]:

sample_paths = [
{
'pre_image': os.path.join(
'image_pre': os.path.join(
self.root,
self.data_dir,
dir_split_name,
'pre-event',
f'{sample_id.strip()}_pre_disaster.tif',
),
'post_image': os.path.join(
'image_post': os.path.join(
self.root,
self.data_dir,
dir_split_name,
Expand Down Expand Up @@ -290,10 +292,10 @@ def plot(

fig, axs = plt.subplots(nrows=1, ncols=ncols, figsize=(15, 5))

axs[0].imshow(sample['pre_image'].permute(1, 2, 0) / 255.0)
axs[0].imshow(sample['image_pre'].permute(1, 2, 0) / 255.0)
axs[0].axis('off')

axs[1].imshow(sample['post_image'].permute(1, 2, 0) / 255.0)
axs[1].imshow(sample['image_post'].permute(1, 2, 0) / 255.0)
axs[1].axis('off')

cmap = colors.ListedColormap(self.colormap)
Expand Down

0 comments on commit 88676c9

Please sign in to comment.