diff --git a/Thursday/practical-part1/keras-mnist-bettercnn-vis-maxact3.py b/Thursday/practical-part1/keras-mnist-bettercnn-vis-maxact3.py index ed65b075..9633d1cb 100644 --- a/Thursday/practical-part1/keras-mnist-bettercnn-vis-maxact3.py +++ b/Thursday/practical-part1/keras-mnist-bettercnn-vis-maxact3.py @@ -13,9 +13,8 @@ step=1 # we're interested in maximising outputs of the 3rd layer: -layer_output = model.layers[3].output - -for i in xrange(0,15): +layer_output = model.layers[2].output +for i in range(0,15): # build a loss function that maximizes the activation # of the nth filter of the layer considered loss = K.mean(layer_output[:, :, :, i]) @@ -33,7 +32,7 @@ input_img_data = np.random.random((1, 28, 28, 1)) * 0.07 + 0.5 # run gradient ascent for 50 steps - for j in range(50): + for j in range(30): loss_value, grads_value = iterate([input_img_data]) input_img_data += grads_value * step diff --git a/Thursday/practical-part1/keras-tutorial.md b/Thursday/practical-part1/keras-tutorial.md index 11af1208..cdf733b5 100644 --- a/Thursday/practical-part1/keras-tutorial.md +++ b/Thursday/practical-part1/keras-tutorial.md @@ -631,7 +631,7 @@ layer_output = model.layers[3].output for i in xrange(0,15): # build a loss function that maximizes the activation # of the nth filter of the layer considered - loss = K.mean(layer_output[:, i, :, :]) + loss = K.mean(layer_output[:, :, :, i]) # compute the gradient of the input picture wrt this loss grads = K.gradients(loss, input_img)[0] diff --git a/Thursday/practical-part1/keras-tutorial.pdf b/Thursday/practical-part1/keras-tutorial.pdf index 3a9e5acd..5f6d907b 100644 Binary files a/Thursday/practical-part1/keras-tutorial.pdf and b/Thursday/practical-part1/keras-tutorial.pdf differ diff --git a/Thursday/practical-part1/maxact.png b/Thursday/practical-part1/maxact.png index 7e454e59..66a64804 100644 Binary files a/Thursday/practical-part1/maxact.png and b/Thursday/practical-part1/maxact.png differ