Skip to content

Commit

Permalink
Merge pull request #11 from FireShadow05/main
Browse files Browse the repository at this point in the history
Final Changes
  • Loading branch information
mr-sarthakgupta authored May 26, 2023
2 parents 63012d8 + 250f6f1 commit 89d20bd
Show file tree
Hide file tree
Showing 6 changed files with 95 additions and 57 deletions.
65 changes: 54 additions & 11 deletions app.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,19 +8,26 @@
from packaging_class import *
from datetime import datetime
import torch
from train_denoiser import denoiser
from integrate_dsrs_denoiser import DenoisedModel
import utils_image as util
import numpy as np

app = Flask(__name__)

async def calculate_dsrs_radius(
denoised_model,
denoised_model,
de,
form_used: dict,
sigma= 0.5,
Distributon_type="general-gaussian",
training = "mnist",
k =380,
N=100000,
in_nc = 3,
Alpha = 0.05,
batch = 400,
img_size = (32,32)

):
'''
Expand All @@ -37,11 +44,12 @@ async def calculate_dsrs_radius(
8. Alpha : default =
9. batch: default = 400
'''

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if form_used["dataset"][0] == "mnist":
d = 784
k = 380
num_classes = 10
img_size = (28,28)
elif form_used["dataset"][0] == "cifar":
d = 3*32*32
k = 1530
Expand All @@ -65,18 +73,40 @@ async def calculate_dsrs_radius(
dist1 = "gaussian"
dist2 = "gaussian"

classifier = torch.load(denoised_model, map_location=torch.device('cuda' if torch.cuda.is_available() else "cpu"))
print(classifier)
secure_model = FinishedModel(classifier, d, k, num_classes, dist1,dist2, float(form_used['sigma'][0]),float(sigma_q), float(form_used['alpha'][0]), num_sampling_min = 100)
x = torch.randn((28, 28)).float()
final_model = DenoisedModel(de,denoised_model,img_size)

secure_model = FinishedModel(final_model, d, k, num_classes, dist1,dist2, float(form_used['sigma'][0]),float(sigma_q), float(form_used['alpha'][0]), num_sampling_min = 100)
noise_level_img = 15
noise_level_model = 15
y = torch.randn((32, 32, in_nc)).float()
img_L = util.uint2single(y)
img_L += np.random.normal(0, noise_level_img/255., img_L.shape)
img_L = util.single2tensor4(img_L)
img_L = torch.cat((img_L, torch.FloatTensor([noise_level_model/255.]).repeat(1, 1, img_L.shape[2], img_L.shape[3])), dim=1)
x = img_L.to(device)
label = secure_model.label_inference_without_certification(x, int(form_used['N'][0]), 0.01, batch_size = int(form_used['batch_size'][0]))
logits_old = secure_model.logits_inference_without_certification(x, int(form_used['N'][0]), 0.01, batch_size = int(form_used['batch_size'][0]))
logits, r = secure_model.inference_and_certification(x, int(form_used['N'][0]), 0.01, batch_size = int(form_used['batch_size'][0]))
model_id = form_used["model_id"]
final_path = f"/final_model_weights/final_model_{model_id}"
torch.save(final_model,final_path)
final_path = f"final_model_weights/final_model_{model_id}.pth"
torch.save(secure_model,final_path)
return r

async def train_denoiser(
epochs = 1,
in_nc = 4,
out_nc = 3,
nc = [64,128,256,512],
nb = 4,
pth = None,
model_path = None
):
de=denoiser(in_nc = in_nc,out_nc=out_nc,nc = nc, nb=nb)
if model_path:
de.ld(model_path)
de.train_drunet(epochs,pth)
return de.drunet.netG

models = [{
"name":"test1",
"file_name":"test.pt",
Expand Down Expand Up @@ -126,13 +156,26 @@ def calculate_denoised_form():
print(request.form.get("model_id"))
model_id = int(request.form.get("model_id"))
model_dict = models[model_id]

print("flag1")
# TODO: Add option to train denoisers
# p1 = subprocess.Popen(['python', 'train.py','-e', '1', '-n', f'{denoised_model}'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# p1 = subprocess.Popen(['python', 'train.py','--epochs', '1', '--in_nc','2','--out_nc','1','--model_name',f'{model_id}','--dataset','mnist/testSample'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# print("flag")
# out1, err1 = p1.communicate()
# print(out1)
# if err1:
# error = 'An error occurred while executing the scripts'
# return jsonify({'error': error})
# print("flag2")
asyncio.set_event_loop(asyncio.new_event_loop())
loop = asyncio.get_event_loop()

# For now images in trainsets/trainH will be used for training of the denoiser
in_nc = 4
out_nc = 3
if model_dict["dataset"][0] == "mnist":
in_nc = 2
out_nc = 1
de = loop.run_until_complete(train_denoiser(in_nc = in_nc,out_nc = out_nc))
#Execute the second Bash script

denoised_model = f"models/{model_dict['file_name']}"
Expand All @@ -143,7 +186,7 @@ def calculate_denoised_form():
# sample results
asyncio.set_event_loop(asyncio.new_event_loop())
loop = asyncio.get_event_loop()
r = loop.run_until_complete(calculate_dsrs_radius(denoised_model, model_dict))
r = loop.run_until_complete(calculate_dsrs_radius(denoised_model,de,model_dict,in_nc = out_nc))
results = [
{"confidence Radius": r},
]
Expand Down
5 changes: 4 additions & 1 deletion data/dataset_fdncnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
import cv2
import torch.utils.data as data
import utils_image as util

from torchvision import transforms

class DatasetFDnCNN(data.Dataset):
"""
Expand Down Expand Up @@ -36,6 +36,7 @@ def __getitem__(self, index):
# -------------------------------------
# get H image
# -------------------------------------
transform = transforms.Resize((32,32))
H_path = self.paths_H[index]
img_H = util.imread_uint(H_path, self.n_channels)
# cv2.imshow(img_H)
Expand Down Expand Up @@ -68,6 +69,7 @@ def __getitem__(self, index):
# HWC to CHW, numpy(uint) to tensor
# ---------------------------------
img_H = util.uint2tensor3(patch_H)
img_H = transform(img_H)
img_L = img_H.clone()

# ---------------------------------
Expand All @@ -91,6 +93,7 @@ def __getitem__(self, index):
# --------------------------------
"""
img_H = util.uint2single(img_H)
img_H = transforms(img_H)
img_L = np.copy(img_H)
np.random.seed(seed=0)
img_L += np.random.normal(0, self.sigma_test/255.0, img_L.shape)
Expand Down
26 changes: 12 additions & 14 deletions smooth.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
from math import ceil
from statsmodels.stats.proportion import proportion_confint
from torchvision.transforms import RandomCrop
#from icecream import ic
from icecream import ic

from distribution import Distribution

Expand All @@ -17,9 +17,8 @@ def sample_noise(model: torch.nn.Module, x: torch.tensor, dist: Distribution, nu
:param batch_size:
:return: an ndarray[int] of length num_classes containing the per-class counts
"""
model.to('cuda' if torch.cuda.is_available() else 'cpu')
if torch.cuda.is_available():
x = x.cuda()
model.to('cuda')
x = x.cuda()
tot = num
random_cropper = RandomCrop((x[0].shape[2], x[0].shape[3]))
with torch.no_grad():
Expand All @@ -31,25 +30,25 @@ def sample_noise(model: torch.nn.Module, x: torch.tensor, dist: Distribution, nu
this_batch_size = min(batch_size, num)
num -= this_batch_size*num_crop
batch = x.repeat((this_batch_size, 1, 1, 1))
noise = dist.sample(this_batch_size, cuda=True if torch.cuda.is_available() else False)
noise = torch.tensor(noise, device='cuda' if torch.cuda.is_available() else 'cpu').resize_as(batch)
noise = dist.sample(this_batch_size, cuda=True)
noise = torch.tensor(noise, device='cuda').resize_as(batch)
for j in range(num_crop):
predictions = model(random_cropper(batch) + noise).argmax(1)
counts += np.sum(torch.squeeze(predictions).item() == label)
# if counts <= 1000 and tot - num >= 4000:
# break
return counts, tot - num

def get_logits(model: torch.nn.Module, x: torch.tensor, dist: Distribution, num: int, num_classes: int, batch_size: int, num_crop: int = 5):
def get_logits(model: torch.nn.Module, x: torch.tensor, dist: Distribution, num: int, num_classes: int, batch_size: int, num_crop: int = 1):
""" Sample the base classifier's prediction under noisy corruptions of the input x.
:param x: the input [channel x width x height]
:param num: number of samples to collect
:param batch_size:
:return: an ndarray[int] of length num_classes containing the per-class counts
"""
model.to('cuda' if torch.cuda.is_available() else 'cpu')
if torch.cuda.is_available():
x = x.cuda()
model.to('cuda')
x = x.cuda()
tot = num
random_cropper = RandomCrop((x.shape[-2], x.shape[-1]))
with torch.no_grad():
Expand All @@ -60,10 +59,9 @@ def get_logits(model: torch.nn.Module, x: torch.tensor, dist: Distribution, num:
this_batch_size = min(batch_size, num)
num -= this_batch_size*num_crop
batch = x.repeat((this_batch_size, 1, 1, 1))
noise = dist.sample(this_batch_size, cuda=True if torch.cuda.is_available() else False )
noise = torch.tensor(noise, device='cuda' if torch.cuda.is_available() else "cpu").resize_as(batch)
print(this_batch_size)
for j in range(num_crop):
predictions = model(random_cropper(batch) + noise).argmax(1)
predictions = model(random_cropper(batch)).argmax(1)
for idx in predictions:
counts[idx.item()] += 1
return counts, tot - num
Expand Down Expand Up @@ -167,7 +165,7 @@ def _sample_noise(self, x: torch.tensor, num: int, batch_size) -> np.ndarray:
num -= this_batch_size

batch = x.repeat((this_batch_size, 1, 1, 1))
noise = torch.randn_like(batch, device='cuda' if torch.cuda.is_available() else "cpu") * self.sigma
noise = torch.randn_like(batch, device='cuda') * self.sigma
predictions = self.base_classifier(batch + noise).argmax(1)
counts += self._count_arr(predictions.cpu().numpy(), self.num_classes)
return counts
Expand Down
40 changes: 18 additions & 22 deletions tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,31 +51,27 @@ def test_bunk_radius(self):
r2 = self.model.bunk_radius_calc(full_info, self.model.dist_name_2, self.model.num_dims, self.model.k, self.model.std_1, self.model.std_2, 'precise')
self.assertGreater(r2,r1)

def test_iss_dsrs(self):
def denoiser(self):
transform = transforms.Resize((28,28))
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
num_sampling = 20000
batch_size = 64
fractional_loss = 0.02
nA_1_1, realN_1_1 = smooth.get_logits(model = self.model.denoised_model, x = self.x, dist = self.model.dist_1, num = self.model.num_sampling_min, num_classes = self.model.num_classes, batch_size = batch_size)
p1low_1, p1high_1 = smooth.confidence_bound(nA_1_1[nA_1_1.argmax().item()].item(), realN_1_1, self.model.alpha)
num_opt = self.model.get_opt_num_sampling(p1low_1, p1high_1, num_sampling, fractional_loss, batch_size, self.model.std_1, self.model.alpha)
nA_1_2, realN_1_2 = smooth.get_logits(model = self.model.denoised_model, x = self.x, dist = self.model.dist_1, num = num_opt*batch_size - self.model.num_sampling_min, num_classes = self.model.num_classes, batch_size = batch_size)
nA_1 = nA_1_1 + nA_1_2
realN_1 = realN_1_1 + realN_1_2
p1low_1, p1high_1 = smooth.confidence_bound(nA_1[nA_1.argmax().item()].item(), realN_1, self.model.alpha)
nA_2, realN_2 = smooth.get_logits(model = self.model.denoised_model, x = self.x, dist = self.model.dist_2, num = num_opt*batch_size, num_classes = self.model.num_classes, batch_size = batch_size)
p2low_2, p2high_2 = smooth.confidence_bound(nA_2[nA_2.argmax().item()].item(), realN_2, self.model.alpha)
r1, now_time = self.model.orig_radius_pool_func(p1low_1, self.model.dist_1)
full_info = [0, r1, p1low_1, p1high_1, [[p2low_2, p2high_2]]]
r2 = self.model.bunk_radius_calc(full_info, self.model.dist_name_2, self.model.num_dims, self.model.k, self.model.std_1, self.model.std_2, 'precise')
nA1_1, realN_1_1 = smooth.get_logits(model = self.model.denoised_model, x = self.x, dist = self.model.dist_1, num = self.model.num_sampling_min, num_classes = self.model.num_classes, batch_size = batch_size)
nA2_1, realN_2_1 = smooth.get_logits(model = self.model.denoised_model, x = self.x, dist = self.model.dist_2, num = self.model.num_sampling_min, num_classes = self.model.num_classes, batch_size = batch_size)
p1low_1, p1high_1 = smooth.confidence_bound(nA1_1[nA1_1.argmax().item()].item(), realN_1_1, self.model.alpha)
p2low_2, p2high_2 = smooth.confidence_bound(nA2_1[nA2_1.argmax().item()].item(), realN_2_1, self.model.alpha)
r, now_time = self.model.orig_radius_pool_func(p1low_1, self.model.dist_1)
full_info = [0, r, p1low_1, p1high_1, [[p2low_2, p2high_2]]]
r3 = self.model.bunk_radius_calc(full_info, self.model.dist_name_2, self.model.num_dims, self.model.k, self.model.std_1, self.model.std_2, 'precise')
self.assertGreater(r2,r3)

model = self.model
de = denoiser(in_nc=2,out_nc=1,nc = [64,128,256,512],nb=4)
de.ld('drunet_gray.pth')
noise_level_img = 15
noise_level_model = 15
y = torch.randn((32, 32, 1)).float()
img_L = util.uint2single(y)
img_L += np.random.normal(0, noise_level_img/255., img_L.shape)
img_L = util.single2tensor4(img_L)
x = img_L.to(device)
img_L = torch.cat((img_L, torch.FloatTensor([noise_level_model/255.]).repeat(1, 1, img_L.shape[2], img_L.shape[3])), dim=1)
x_denoised = de.drunet.netG.forward(img_L)
_,r1 = model.inference_and_certification(transform(x),num_sampling, fractional_loss, batch_size)
_,r2 = model.inference_and_certification(transform(x_denoised),num_sampling, fractional_loss, batch_size)
self.assertGreater(r2,r1)

if __name__ == "__main__":
warnings.filterwarnings("ignore")
Expand Down
12 changes: 4 additions & 8 deletions train.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,8 @@
# python3 train.py --epochs --dataset --input_dim --output_dim --model_name --model_path --no_of_channels --hidden_state_dim

# Training Objective
parser.add_argument('--epochs', type=int, help="Number of epochs")
parser.add_argument('--epochs', type=int,
help="Number of epochs")

# Dataset
parser.add_argument('--dataset', type=str, help='path to dataset of choice')
Expand All @@ -21,7 +22,7 @@
parser.add_argument('--model_path', type=str, help="path to model")

# Setting
parser.add_argument('--nc', type=str, help='number of channels (input array, example input format: "265340,268738,270774,270817") ')
parser.add_argument('--nc', type=str, help='input should look like "265340,268738,270774,270817" ')
parser.add_argument('--h', type=int, help='dimensions of hidden state')

args = parser.parse_args()
Expand All @@ -34,9 +35,4 @@
if args.model_path:
de.ld(args.model_path)
de.train_drunet(args.epochs,args.dataset)
de.drunet.save(args.model_name)
# <<<<<<< dev
# de.drunet.save(args.model_name)
# =======
# de.drunet.save(args.model_name)
# >>>>>>> main
de.drunet.save(args.model_name)
4 changes: 3 additions & 1 deletion train_denoiser.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
import torch
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
from torchvision import transforms


from extra import utils_logger
Expand Down Expand Up @@ -81,6 +82,7 @@ def __init__(self,json_path='options/train_drunet.json',in_nc=None, out_nc=None,
# init_iter_optimizerG, init_path_optimizerG = option.find_last_checkpoint(opt['path']['models'], net_type='optimizerG')
# opt['path']['pretrained_optimizerG'] = init_path_optimizerG
# self.current_step = max(init_iter_G, init_iter_optimizerG)
self.current_step = 0

# border = opt['scale']
# # --<--<--<--<--<--<--<--<--<--<--<--<--<-
Expand Down Expand Up @@ -132,7 +134,7 @@ def train_drunet(self,epochs=1000,pth=None,batch_size = 64, num_workers = 8):
# 2) creat_dataloader for train and test
# ----------------------------------------
if pth is not None:
train_set = FDnCNNdata(dataroot_H=pth)
train_set = FDnCNNdata(dataroot_H=pth,n_c = self.opt['netG']['in_nc']-1)
train_size = int(math.ceil(len(train_set) / batch_size))
print(train_size)
if self.opt['dist']:
Expand Down

0 comments on commit 89d20bd

Please sign in to comment.