forked from heyxhh/nnet-numpy
-
Notifications
You must be signed in to change notification settings - Fork 0
/
activations.py
57 lines (43 loc) · 1.34 KB
/
activations.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
import numpy as np
# 定义Relu层
class Relu(object):
def __init__(self):
self.X = None
def __call__(self, X):
self.X = X
return self.forward(self.X)
def forward(self, X):
return np.maximum(0, X)
def backward(self, grad_output):
"""
grad_output: loss对relu激活输出的梯度
return: relu对输入input_z的梯度
"""
grad_relu = self.X > 0 # input_z大于0的提放梯度为1,其它为0
return grad_relu * grad_output # numpy中*为点乘
# 定义Tanh层
class Tanh():
def __init__(self):
self.X = None
def __call__(self, X):
self.X = X
return self.forward(self.X)
def forward(self, X):
return np.tanh(X)
def backward(self, grad_output):
grad_tanh = 1 - (np.tanh(self.X)) ** 2
return grad_output * grad_tanh
# 定义Sigmoid层
class Sigmoid():
def __init__(self):
self.X = None
def __call__(self, X):
self.X = X
return self.forward(self.X)
def forward(self, X):
return self._sigmoid(X)
def backward(self, grad_output):
sigmoid_grad = self._sigmoid(self.X) * (1 - self._sigmoid(self.X))
return grad_output * sigmoid_grad
def _sigmoid(self, X):
return 1.0 / (1 + np.exp(-X))