forked from icerain-alt/brats-unet
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathBraTS.py
151 lines (118 loc) · 4.79 KB
/
BraTS.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
import os
import torch
from torch.utils.data import Dataset
import random
import numpy as np
from torchvision.transforms import transforms
import h5py
class RandomCrop(object):
"""
Crop randomly the image in a sample
Args:
output_size (int): Desired output size
"""
def __init__(self, output_size):
self.output_size = output_size
def __call__(self, sample):
image, label = sample['image'], sample['label']
(c, w, h, d) = image.shape
w1 = np.random.randint(0, w - self.output_size[0])
h1 = np.random.randint(0, h - self.output_size[1])
d1 = np.random.randint(0, d - self.output_size[2])
label = label[w1:w1 + self.output_size[0], h1:h1 + self.output_size[1], d1:d1 + self.output_size[2]]
image = image[:,w1:w1 + self.output_size[0], h1:h1 + self.output_size[1], d1:d1 + self.output_size[2]]
return {'image': image, 'label': label}
class CenterCrop(object):
def __init__(self, output_size):
self.output_size = output_size
def __call__(self, sample):
image, label = sample['image'], sample['label']
(c,w, h, d) = image.shape
w1 = int(round((w - self.output_size[0]) / 2.))
h1 = int(round((h - self.output_size[1]) / 2.))
d1 = int(round((d - self.output_size[2]) / 2.))
label = label[w1:w1 + self.output_size[0], h1:h1 + self.output_size[1], d1:d1 + self.output_size[2]]
image = image[:,w1:w1 + self.output_size[0], h1:h1 + self.output_size[1], d1:d1 + self.output_size[2]]
return {'image': image, 'label': label}
class RandomRotFlip(object):
"""
Crop randomly flip the dataset in a sample
Args:
output_size (int): Desired output size
"""
def __call__(self, sample):
image, label = sample['image'], sample['label']
k = np.random.randint(0, 4)
image = np.stack([np.rot90(x,k) for x in image],axis=0)
label = np.rot90(label, k)
axis = np.random.randint(1, 4)
image = np.flip(image, axis=axis).copy()
label = np.flip(label, axis=axis-1).copy()
return {'image': image, 'label': label}
def augment_gaussian_noise(data_sample, noise_variance=(0, 0.1)):
if noise_variance[0] == noise_variance[1]:
variance = noise_variance[0]
else:
variance = random.uniform(noise_variance[0], noise_variance[1])
data_sample = data_sample + np.random.normal(0.0, variance, size=data_sample.shape)
return data_sample
class GaussianNoise(object):
def __init__(self, noise_variance=(0, 0.1), p=0.5):
self.prob = p
self.noise_variance = noise_variance
def __call__(self, sample):
image = sample['image']
label = sample['label']
if np.random.uniform() < self.prob:
image = augment_gaussian_noise(image, self.noise_variance)
return {'image': image, 'label': label}
class ToTensor(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, sample):
image = sample['image']
label = sample['label']
image = torch.from_numpy(image).float()
label = torch.from_numpy(label).long()
return {'image': image, 'label': label}
class BraTS(Dataset):
def __init__(self,data_path, file_path,transform=None):
with open(file_path, 'r') as f:
self.paths = [os.path.join(data_path, x.strip()) for x in f.readlines()]
self.transform = transform
def __getitem__(self, item):
h5f = h5py.File(self.paths[item], 'r')
image = h5f['image'][:]
label = h5f['label'][:]
<<<<<<< HEAD
print(image.shape)
=======
# print(image.shape)
>>>>>>> 465a5c0d4bc21b38d6085bff23b53bda8dcf9a9a
sample = {'image': image, 'label': label}
if self.transform:
sample = self.transform(sample)
return sample['image'], sample['label']
def __len__(self):
return len(self.paths)
def collate(self, batch):
return [torch.cat(v) for v in zip(*batch)]
if __name__ == '__main__':
from torchvision import transforms
<<<<<<< HEAD
data_path = "/data/omnisky/postgraduate/Yb/data_set/BraTS2021/dataset"
test_txt = "/data/omnisky/postgraduate/Yb/data_set/BraTS2021/test.txt"
=======
data_path = "/***/data_set/BraTS2021/dataset"
test_txt = "/***/data_set/BraTS2021/test.txt"
>>>>>>> 465a5c0d4bc21b38d6085bff23b53bda8dcf9a9a
test_set = BraTS(data_path,test_txt,transform=transforms.Compose([
RandomRotFlip(),
RandomCrop((160,160,128)),
GaussianNoise(p=0.1),
ToTensor()
]))
d1 = test_set[0]
image,label = d1
print(image.shape)
print(label.shape)
print(np.unique(label))