-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathgen_diff1.py
161 lines (133 loc) · 9.11 KB
/
gen_diff1.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
'''
usage: python gen_diff1.py -h
'''
from __future__ import print_function
import argparse
#from scipy.misc import imsave
from imageio import imsave
from driving_models import *
from utils import *
from keras.preprocessing.image import *
# read the parameter
# argument parsing
parser = argparse.ArgumentParser(
description='Main function for difference-inducing input generation in Driving dataset')
parser.add_argument('--transformation', help="realistic transformation type", default=['blackout'], choices=['light', 'occl', 'blackout'])
parser.add_argument('--weight_diff', help="weight hyperparm to control differential behavior", default=0.2, type=float)
parser.add_argument('--weight_nc', help="weight hyperparm to control neuron coverage", default=0.2, type=float)
parser.add_argument('--step', help="step size of gradient descent", default=10.0, type=float)
parser.add_argument('--seeds', help="number of seeds of input", default=1, type=int)
parser.add_argument('--grad_iterations', help="number of iterations of gradient descent", default=10, type=int)
parser.add_argument('--threshold', help="threshold for determining neuron activated", default=0.3, type=float)
parser.add_argument('-t', '--target_model', help="target model that we want it predicts differently",
choices=[0, 1, 2], default=0, type=int)
parser.add_argument('-sp', '--start_point', help="occlusion upper left corner coordinate", default=(0, 0), type=tuple)
parser.add_argument('-occl_size', '--occlusion_size', help="occlusion size", default=(50, 50), type=tuple)
args = parser.parse_args()
# input image dimensions
img_rows, img_cols = 100, 100
input_shape = (img_rows, img_cols, 3)
# define input tensor as a placeholder
input_tensor = Input(shape=input_shape)
# load multiple models sharing same input tensor
K.set_learning_phase(0)
model1 = Dave_orig(input_tensor=input_tensor, load_weights=True)
model2 = Dave_norminit(input_tensor=input_tensor, load_weights=True)
model3 = Dave_dropout(input_tensor=input_tensor, load_weights=True)
# init coverage table
model_layer_dict1, model_layer_dict2, model_layer_dict3 = init_coverage_tables(model1, model2, model3)
# ==============================================================================================
# start gen inputs
img_paths = image.list_pictures('./final_images', ext='jpg')
for _ in range(args.seeds):
gen_img = preprocess_image(random.choice(img_paths))
orig_img = gen_img.copy()
# first check if input already induces differences
angle1, angle2, angle3 = model1.predict(gen_img)[0], model2.predict(gen_img)[0], model3.predict(gen_img)[0]
if angle_diverged(angle1, angle2, angle3):
print(bcolors.OKGREEN + 'input already causes different outputs: {}, {}, {}'.format(angle1, angle2, angle3) + bcolors.ENDC)
update_coverage(gen_img, model1, model_layer_dict1, args.threshold)#更新神经元覆盖率
update_coverage(gen_img, model2, model_layer_dict2, args.threshold)
update_coverage(gen_img, model3, model_layer_dict3, args.threshold)
print(bcolors.OKGREEN + 'covered neurons percentage %d neurons %.3f, %d neurons %.3f, %d neurons %.3f'
% (len(model_layer_dict1), neuron_covered(model_layer_dict1)[2], len(model_layer_dict2),
neuron_covered(model_layer_dict2)[2], len(model_layer_dict3),
neuron_covered(model_layer_dict3)[2]) + bcolors.ENDC)
averaged_nc = (neuron_covered(model_layer_dict1)[0] + neuron_covered(model_layer_dict2)[0] +
neuron_covered(model_layer_dict3)[0]) / float(
neuron_covered(model_layer_dict1)[1] + neuron_covered(model_layer_dict2)[1] +
neuron_covered(model_layer_dict3)[
1])
print(bcolors.OKGREEN + 'averaged covered neurons %.3f' % averaged_nc + bcolors.ENDC)
#gen_img_deprocessed = draw_arrow(deprocess_image(gen_img), angle1, angle2, angle3)
gen_img_deprocessed = deprocess_image(gen_img)
# save the result to disk
imsave('./generated_final_images/' + 'already_differ_' + str(angle1) + '.png',gen_img_deprocessed)
continue
# if all turning angles roughly the same
orig_angle1, orig_angle2, orig_angle3 = angle1, angle2, angle3
layer_name1, index1 = neuron_to_cover(model_layer_dict1) #从模型层字典中选择一个未覆盖的神经元,然后返回其所在的层名称和神经元索引
layer_name2, index2 = neuron_to_cover(model_layer_dict2)
layer_name3, index3 = neuron_to_cover(model_layer_dict3)
# construct joint loss function
if args.target_model == 0:
loss1 = -args.weight_diff * K.mean(model1.get_layer('before_prediction').output[..., 0])
loss2 = K.mean(model2.get_layer('before_prediction').output[..., 0])
loss3 = K.mean(model3.get_layer('before_prediction').output[..., 0])
elif args.target_model == 1:
loss1 = K.mean(model1.get_layer('before_prediction').output[..., 0])
loss2 = -args.weight_diff * K.mean(model2.get_layer('before_prediction').output[..., 0])
loss3 = K.mean(model3.get_layer('before_prediction').output[..., 0])
elif args.target_model == 2:
loss1 = K.mean(model1.get_layer('before_prediction').output[..., 0])
loss2 = K.mean(model2.get_layer('before_prediction').output[..., 0])
loss3 = -args.weight_diff * K.mean(model3.get_layer('before_prediction').output[..., 0])
loss1_neuron = K.mean(model1.get_layer(layer_name1).output[..., index1])
loss2_neuron = K.mean(model2.get_layer(layer_name2).output[..., index2])
loss3_neuron = K.mean(model3.get_layer(layer_name3).output[..., index3])
layer_output = (loss1 + loss2 + loss3) + args.weight_nc * (loss1_neuron + loss2_neuron + loss3_neuron)
# for adversarial image generation
final_loss = K.mean(layer_output)
# we compute the gradient of the input picture wrt this loss
grads = normalize(K.gradients(final_loss, input_tensor)[0])
# this function returns the loss and grads given the input picture
iterate = K.function([input_tensor], [loss1, loss2, loss3, loss1_neuron, loss2_neuron, loss3_neuron, grads])
# we run gradient ascent for 20 steps
#开始梯度上升的迭代过程,用于生成对抗性图像
for iters in range(args.grad_iterations):
loss_value1, loss_value2, loss_value3, loss_neuron1, loss_neuron2, loss_neuron3, grads_value = iterate(
[gen_img])
if args.transformation == 'light':
grads_value = constraint_light(grads_value) # constraint the gradients value
elif args.transformation == 'occl':
grads_value = constraint_occl(grads_value, args.start_point,
args.occlusion_size) # constraint the gradients value
elif args.transformation == 'blackout':
grads_value = constraint_black(grads_value) # constraint the gradients value
gen_img += grads_value * args.step
angle1, angle2, angle3 = model1.predict(gen_img)[0], model2.predict(gen_img)[0], model3.predict(gen_img)[0]
if angle_diverged(angle1, angle2, angle3):
update_coverage(gen_img, model1, model_layer_dict1, args.threshold)
update_coverage(gen_img, model2, model_layer_dict2, args.threshold)
update_coverage(gen_img, model3, model_layer_dict3, args.threshold)
print(bcolors.OKGREEN + 'covered neurons percentage %d neurons %.3f, %d neurons %.3f, %d neurons %.3f'
% (len(model_layer_dict1), neuron_covered(model_layer_dict1)[2], len(model_layer_dict2),
neuron_covered(model_layer_dict2)[2], len(model_layer_dict3),
neuron_covered(model_layer_dict3)[2]) + bcolors.ENDC)
averaged_nc = (neuron_covered(model_layer_dict1)[0] + neuron_covered(model_layer_dict2)[0] +
neuron_covered(model_layer_dict3)[0]) / float(
neuron_covered(model_layer_dict1)[1] + neuron_covered(model_layer_dict2)[1] +
neuron_covered(model_layer_dict3)[
1])
print(bcolors.OKGREEN + 'averaged covered neurons %.3f' % averaged_nc + bcolors.ENDC)
#gen_img_deprocessed = draw_arrow(deprocess_image(gen_img), angle1, angle2, angle3)
#orig_img_deprocessed = draw_arrow(deprocess_image(orig_img), orig_angle1, orig_angle2, orig_angle3)
gen_img_deprocessed = deprocess_image(gen_img)
orig_img_deprocessed = deprocess_image(orig_img)
# save the result to disk
'''imsave('./generated_inputs/' + args.transformation + '_' + str(angle1) + '_' + str(angle2) + '_' + str(angle3) + '.png', gen_img_deprocessed)
imsave('./generated_inputs/' + args.transformation + '_' + str(angle1) + '_' + str(angle2) + '_' + str(angle3) + '_orig.png', orig_img_deprocessed)
break'''
#imsave('./generated_inputs/{}_{}_{}_{}.png'.format(args.transformation, angle1, angle2, angle3),gen_img_deprocessed)
#imsave('./generated_inputs/{}_{}_{}_{}_orig.png'.format(args.transformation, angle1, angle2, angle3),orig_img_deprocessed)
imsave('./generated_final_images/{}_{}.jpg'.format(args.transformation, angle1),gen_img_deprocessed)