Skip to content
This repository has been archived by the owner on Oct 28, 2022. It is now read-only.

Commit

Permalink
Copied files to updated Ubuntu.
Browse files Browse the repository at this point in the history
  • Loading branch information
veritas9872 committed Jul 4, 2019
1 parent 5ca7655 commit 8670b36
Show file tree
Hide file tree
Showing 50 changed files with 105 additions and 479 deletions.
Empty file modified LICENSE
100644 → 100755
Empty file.
Empty file modified README.md
100644 → 100755
Empty file.
Empty file modified conversion_script.py
100644 → 100755
Empty file.
Empty file modified data/collate_functions.py
100644 → 100755
Empty file.
Empty file modified data/data_transforms.py
100644 → 100755
Empty file.
Empty file modified data/input_transforms.py
100644 → 100755
Empty file.
Empty file modified data/mri_data.py
100644 → 100755
Empty file.
Empty file modified data/output_transforms.py
100644 → 100755
Empty file.
Empty file modified data/post_processing.py
100644 → 100755
Empty file.
Empty file modified data/pre_processing.py
100644 → 100755
Empty file.
Empty file modified data/test_data_transforms.py
100644 → 100755
Empty file.
Empty file modified eval/create_submission_file.py
100644 → 100755
Empty file.
Empty file modified eval/evaluate_recons.py
100644 → 100755
Empty file.
Empty file modified eval/model_evaluator.py
100644 → 100755
Empty file.
Empty file modified eval/save_reconstructions.py
100644 → 100755
Empty file.
482 changes: 78 additions & 404 deletions main.py
100644 → 100755

Large diffs are not rendered by default.

Empty file modified metrics/combination_losses.py
100644 → 100755
Empty file.
Empty file modified metrics/custom_losses.py
100644 → 100755
Empty file.
Empty file modified metrics/my_ssim.py
100644 → 100755
Empty file.
Empty file modified metrics/ssim.py
100644 → 100755
Empty file.
57 changes: 7 additions & 50 deletions models/ase_unet.py
100644 → 100755
Original file line number Diff line number Diff line change
Expand Up @@ -2,48 +2,7 @@
from torch import nn
import torch.nn.functional as F


class ChannelAttention(nn.Module):
def __init__(self, num_chans, reduction=16, use_gap=True, use_gmp=True):
super().__init__()
self.gap = nn.AdaptiveAvgPool2d(output_size=(1, 1)) # Global Average Pooling.
self.gmp = nn.AdaptiveMaxPool2d(output_size=(1, 1)) # Global Maximum Pooling.

self.use_gap = use_gap
self.use_gmp = use_gmp

self.layer = nn.Sequential(
nn.Linear(in_features=num_chans, out_features=num_chans // reduction),
nn.ReLU(),
nn.Linear(in_features=num_chans // reduction, out_features=num_chans)
)

self.sigmoid = nn.Sigmoid()

def forward(self, tensor):
batch, chans, _, _ = tensor.shape
if self.use_gap and self.use_gmp:
gap = self.gap(tensor).view(batch, chans)
gmp = self.gmp(tensor).view(batch, chans)
# Maybe batch-norm the two pooling types to make their scales more similar.
# This might make training slower, however.
features = self.layer(gap) + self.layer(gmp)
att = self.sigmoid(features).view(batch, chans, 1, 1)

elif self.use_gap:
gap = self.gap(tensor).view(batch, chans)
features = self.layer(gap)
att = self.sigmoid(features).view(batch, chans, 1, 1)

elif self.use_gmp:
gmp = self.gmp(tensor).view(batch, chans)
features = self.layer(gmp)
att = self.sigmoid(features).view(batch, chans, 1, 1)

else:
att = 1

return tensor * att
from models.attention import ChannelAttention


class AsymmetricSignalExtractor(nn.Module):
Expand All @@ -60,12 +19,8 @@ def __init__(self, in_chans, out_chans, ext_chans, min_ext_size, max_ext_size, u
conv = nn.Conv2d(in_chans, ext_chans, kernel_size=1, bias=use_bias)
self.ext_layers.append(conv)

# I think that 3x3 kernels do not need the 1x3, 3x1 separation.
if min_ext_size <= 3 <= max_ext_size:
conv = nn.Conv2d(in_chans, ext_chans, kernel_size=3, padding=1, bias=use_bias)
self.ext_layers.append(conv)

min_ext_size = max(min_ext_size, 5)
min_ext_size = max(min_ext_size, 3)
# print(f'min_ext_size: {min_ext_size}') # For debugging
# The cases where the maximum size is smaller than 5 will automatically be dealt with by the for-loop.
for size in range(min_ext_size, max_ext_size + 1, 2):
# Left-right, then up-down. This is because of the sampling pattern.
Expand Down Expand Up @@ -116,7 +71,7 @@ def forward(self, tensor):

class UnetASE(nn.Module):
def __init__(self, in_chans, out_chans, ext_chans, chans, num_pool_layers,
min_ext_size, max_ext_size, use_ext_bias=True):
min_ext_size, max_ext_size, use_ext_bias=True, use_att=True):

super().__init__()
self.extractor = AsymmetricSignalExtractor(
Expand All @@ -126,6 +81,7 @@ def __init__(self, in_chans, out_chans, ext_chans, chans, num_pool_layers,
self.pool = nn.AvgPool2d(2)
self.interp = Bilinear()
self.input_att = ChannelAttention(num_chans=chans, reduction=16, use_gap=True, use_gmp=True)
self.use_att = use_att
self.down_sample_layers = nn.ModuleList()
ch = chans

Expand All @@ -151,7 +107,8 @@ def forward(self, tensor):
stack = list()
output = self.extractor(tensor)
# Added channel attention to input layer after feature extraction, compression, and ReLU.
output = self.input_att(output)
if self.use_att:
output = self.input_att(output)
stack.append(output)
output = self.pool(output)

Expand Down
Empty file modified models/fb_unet_model.py
100644 → 100755
Empty file.
Empty file modified models/k_unet_model.py
100644 → 100755
Empty file.
16 changes: 0 additions & 16 deletions models/ks_unet.py
100644 → 100755
Original file line number Diff line number Diff line change
Expand Up @@ -29,22 +29,6 @@
# return outputs


class ChannelAttention(nn.Module):
def __init__(self):
super().__init__()
self.gap = nn.AdaptiveAvgPool2d(1)
self.gmp = nn.AdaptiveMaxPool2d(1)
self.sigmoid = nn.Sigmoid()

def forward(self, tensor):
gap = self.gap(tensor)
gmp = self.gmp(tensor)

# Maybe batch-norm the two pooling types to make their scales more similar.
att = self.sigmoid(gap + gmp)
return tensor * att


class AsymmetricSignalExtractor(nn.Module):
def __init__(self, in_chans, out_chans, ext_chans, min_ext_size, max_ext_size, use_bias=True):
super().__init__()
Expand Down
Empty file modified models/ksse_unet.py
100644 → 100755
Empty file.
Empty file modified models/new_unet_model.py
100644 → 100755
Empty file.
Empty file modified models/pp_unet_model.py
100644 → 100755
Empty file.
Empty file modified models/unet_model.py
100644 → 100755
Empty file.
Empty file modified requirements.txt
100644 → 100755
Empty file.
Empty file modified tests/test_my_ssim.py
100644 → 100755
Empty file.
Empty file modified tests/test_ssim.py
100644 → 100755
Empty file.
Empty file modified train/README.md
100644 → 100755
Empty file.
Empty file modified train/metrics.py
100644 → 100755
Empty file.
Empty file modified train/model_trainers/model_trainer_C2C.py
100644 → 100755
Empty file.
Empty file modified train/model_trainers/model_trainer_IMG.py
100644 → 100755
Empty file.
Empty file modified train/model_trainers/model_trainer_K2C.py
100644 → 100755
Empty file.
5 changes: 3 additions & 2 deletions train/model_trainers/model_trainer_K2CI.py
100644 → 100755
Original file line number Diff line number Diff line change
Expand Up @@ -29,8 +29,9 @@ class ModelTrainerK2CI:
def __init__(self, args, model, optimizer, train_loader, val_loader,
input_train_transform, input_val_transform, output_transform, losses, scheduler=None):

# Allow multiple processes to access tensors on GPU.
multiprocessing.set_start_method(method='spawn')
# Allow multiple processes to access tensors on GPU. Add checking for multiple continuous runs.
if multiprocessing.get_start_method(allow_none=True) is None:
multiprocessing.set_start_method(method='spawn')

self.logger = get_logger(name=__name__, save_file=args.log_path / args.run_name)

Expand Down
Empty file modified train/model_trainers/model_trainer_K2I.py
100644 → 100755
Empty file.
Empty file modified train/model_trainers/model_trainer_K2K.py
100644 → 100755
Empty file.
Empty file modified train/processing.py
100644 → 100755
Empty file.
Empty file modified train/subsample.py
100644 → 100755
Empty file.
Empty file modified train/test_subsample.py
100644 → 100755
Empty file.
Empty file modified train/trainer.py
100644 → 100755
Empty file.
Empty file modified train/training.py
100644 → 100755
Empty file.
Empty file modified train_img.py
100644 → 100755
Empty file.
24 changes: 17 additions & 7 deletions train_k2ci.py
100644 → 100755
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,16 @@
from metrics.combination_losses import L1CSSIM7


"""
Memo: I have found that there is a great deal of variation in performance when training.
Even under the same settings, the results can be extremely different when using small numbers of samples.
I believe that this is because of the large degree of variation in data quality in the dataset.
Therefore, demonstrating that one method works better than another requires using a large portion of the dataset.
However, this takes a lot of time...
Using small datasets for multiple runs may also prove useful.
"""


def train_img(args):

# Maybe move this to args later.
Expand Down Expand Up @@ -87,7 +97,7 @@ def train_img(args):

model = UnetASE(in_chans=data_chans, out_chans=data_chans, ext_chans=args.chans, chans=args.chans,
num_pool_layers=args.num_pool_layers, min_ext_size=args.min_ext_size,
max_ext_size=args.max_ext_size, use_ext_bias=args.use_ext_bias).to(device)
max_ext_size=args.max_ext_size, use_ext_bias=args.use_ext_bias, use_att=False).to(device)

optimizer = optim.Adam(model.parameters(), lr=args.init_lr)

Expand Down Expand Up @@ -123,17 +133,17 @@ def train_img(args):
# Variables that occasionally change.
max_images=6, # Maximum number of images to save.
num_workers=1,
init_lr=1.E-3,
init_lr=1E-3,
gpu=1, # Set to None for CPU mode.
max_to_keep=0,
start_slice=10,

# Variables that change frequently.
sample_rate=0.02,
img_lambda=64,
num_epochs=30,
min_ext_size=3,
max_ext_size=11,
sample_rate=0.05,
img_lambda=8,
num_epochs=10,
min_ext_size=1,
max_ext_size=15,
verbose=False,
use_slice_metrics=True, # Using slice metrics causes a 30% increase in training time.
lr_red_epoch=20,
Expand Down
Empty file modified utils/arguments.py
100644 → 100755
Empty file.
Empty file modified utils/model_summary.py
100644 → 100755
Empty file.
Empty file modified utils/modelsummary.py
100644 → 100755
Empty file.
Empty file modified utils/train_utils.py
100644 → 100755
Empty file.

0 comments on commit 8670b36

Please sign in to comment.