forked from modern-fortran/neural-fortran
-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathtest_optimizers.f90
156 lines (112 loc) · 3.71 KB
/
test_optimizers.f90
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
program test_optimizers
use nf, only: dense, input, network, rmsprop, sgd, adam, adagrad
use iso_fortran_env, only: stderr => error_unit
implicit none
type(network) :: net(6)
real, allocatable :: x(:), y(:)
real, allocatable :: ypred(:)
integer, parameter :: num_iterations = 1000
integer :: n
logical :: ok = .true.
logical :: converged = .false.
! Instantiate a network and copy an instance to the rest of the array
net(1) = network([input(3), dense(5), dense(2)])
net(2:) = net(1)
x = [0.2, 0.4, 0.6]
y = [0.123456, 0.246802]
do n = 0, num_iterations
call net(1) % forward(x)
call net(1) % backward(y)
call net(1) % update(optimizer=sgd(learning_rate=1.))
ypred = net(1) % predict(x)
converged = check_convergence(y, ypred)
if (converged) exit
end do
if (.not. converged) then
write(stderr, '(a)') 'sgd should converge in simple training.. failed'
ok = .false.
end if
converged = .false.
do n = 0, num_iterations
call net(2) % forward(x)
call net(2) % backward(y)
call net(2) % update(optimizer=sgd(learning_rate=1., momentum=0.9))
ypred = net(2) % predict(x)
converged = check_convergence(y, ypred)
if (converged) exit
end do
if (.not. converged) then
write(stderr, '(a)') &
'sgd(momentum) should converge in simple training.. failed'
ok = .false.
end if
converged = .false.
do n = 0, num_iterations
call net(3) % forward(x)
call net(3) % backward(y)
call net(3) % update(optimizer=sgd(learning_rate=1., momentum=0.9, nesterov=.true.))
ypred = net(3) % predict(x)
converged = check_convergence(y, ypred)
if (converged) exit
end do
if (.not. converged) then
write(stderr, '(a)') &
'sgd(nesterov) should converge in simple training.. failed'
ok = .false.
end if
! Resetting convergence flag
converged = .false.
do n = 0, num_iterations
call net(4) % forward(x)
call net(4) % backward(y)
call net(4) % update(optimizer=rmsprop(learning_rate=0.01, decay_rate=0.9))
ypred = net(4) % predict(x)
converged = check_convergence(y, ypred)
if (converged) exit
end do
if (.not. converged) then
write(stderr, '(a)') 'rmsprop should converge in simple training.. failed'
ok = .false.
end if
! Test Adam optimizer
converged = .false.
do n = 0, num_iterations
call net(5) % forward(x)
call net(5) % backward(y)
call net(5) % update(optimizer=adam(learning_rate=0.01, beta1=0.9, beta2=0.999))
ypred = net(5) % predict(x)
converged = check_convergence(y, ypred)
if (converged) exit
end do
if (.not. converged) then
write(stderr, '(a)') 'adam should converge in simple training.. failed'
ok = .false.
end if
! Test Adagrad optimizer
converged = .false.
do n = 0, num_iterations
call net(6) % forward(x)
call net(6) % backward(y)
call net(6) % update(optimizer=adagrad(learning_rate=0.01, weight_decay_l2=1e-4, learning_rate_decay=0.99))
ypred = net(5) % predict(x)
converged = check_convergence(y, ypred)
if (converged) exit
end do
if (.not. converged) then
write(stderr, '(a)') 'adagrad should converge in simple training.. failed'
ok = .false.
end if
if (ok) then
print '(a)', 'test_optimizers: All tests passed.'
else
write(stderr, '(a)') 'test_optimizers: One or more tests failed.'
stop 1
end if
contains
pure logical function check_convergence(y, ypred) result(converged)
! Check convergence of ypred to y based on RMSE < tolerance.
real, intent(in) :: y(:), ypred(:)
real, parameter :: tolerance = 1e-3
converged = sqrt(sum((ypred - y)**2) / size(y)) < tolerance
end function check_convergence
end program test_optimizers