From 05cce263547a2c4b370de24ba114475afba312f8 Mon Sep 17 00:00:00 2001 From: Milan Curcic Date: Thu, 19 May 2022 12:42:09 -0400 Subject: [PATCH 01/17] Add module for the flatten layer; not implemented yet --- src/nf_flatten_layer.f90 | 75 ++++++++++++++++++++++++++++++ src/nf_flatten_layer_submodule.f90 | 36 ++++++++++++++ 2 files changed, 111 insertions(+) create mode 100644 src/nf_flatten_layer.f90 create mode 100644 src/nf_flatten_layer_submodule.f90 diff --git a/src/nf_flatten_layer.f90 b/src/nf_flatten_layer.f90 new file mode 100644 index 00000000..5d9b8fc5 --- /dev/null +++ b/src/nf_flatten_layer.f90 @@ -0,0 +1,75 @@ +module nf_flatten_layer + + !! This module provides the concrete flatten layer type. + !! It is used internally by the layer type. + !! It is not intended to be used directly by the user. + + use nf_base_layer, only: base_layer + + implicit none + + private + public :: flatten_layer + + type, extends(base_layer) :: flatten_layer + + !! Concrete implementation of a flatten (3-d to 1-d) layer. + + integer :: input_shape + integer :: output_size + + real, allocatable :: output(:) + + contains + + procedure :: backward + procedure :: forward + procedure :: init + + end type flatten_layer + + interface flatten_layer + elemental module function flatten_layer_cons() result(res) + !! This function returns the `flatten_layer` instance. + type(flatten_layer) :: res + !! `flatten_layer` instance + end function flatten_layer_cons + end interface flatten_layer + + interface + + pure module subroutine backward(self, input, gradient) + !! Apply the backward gradient descent pass. + !! Only weight and bias gradients are updated in this subroutine, + !! while the weights and biases themselves are untouched. + class(flatten_layer), intent(in out) :: self + !! Dense layer instance + real, intent(in) :: input(:,:,:) + !! Input from the previous layer + real, intent(in) :: gradient(:) + !! Gradient from the next layer + end subroutine backward + + pure module subroutine forward(self, input) + !! Propagate forward the layer. + !! Calling this subroutine updates the values of a few data components + !! of `flatten_layer` that are needed for the backward pass. + class(flatten_layer), intent(in out) :: self + !! Dense layer instance + real, intent(in) :: input(:,:,:) + !! Input from the previous layer + end subroutine forward + + module subroutine init(self, input_shape) + !! Initialize the layer data structures. + !! + !! This is a deferred procedure from the `base_layer` abstract type. + class(flatten_layer), intent(in out) :: self + !! Dense layer instance + integer, intent(in) :: input_shape(:) + !! Shape of the input layer + end subroutine init + + end interface + +end module nf_flatten_layer diff --git a/src/nf_flatten_layer_submodule.f90 b/src/nf_flatten_layer_submodule.f90 new file mode 100644 index 00000000..5805f400 --- /dev/null +++ b/src/nf_flatten_layer_submodule.f90 @@ -0,0 +1,36 @@ +submodule(nf_flatten_layer) nf_flatten_layer_submodule + + !! This module provides the concrete flatten layer type. + !! It is used internally by the layer type. + !! It is not intended to be used directly by the user. + + use nf_base_layer, only: base_layer + + implicit none + +contains + + elemental module function flatten_layer_cons() result(res) + type(flatten_layer) :: res + end function flatten_layer_cons + + + pure module subroutine backward(self, input, gradient) + class(flatten_layer), intent(in out) :: self + real, intent(in) :: input(:,:,:) + real, intent(in) :: gradient(:) + end subroutine backward + + + pure module subroutine forward(self, input) + class(flatten_layer), intent(in out) :: self + real, intent(in) :: input(:,:,:) + end subroutine forward + + + module subroutine init(self, input_shape) + class(flatten_layer), intent(in out) :: self + integer, intent(in) :: input_shape(:) + end subroutine init + +end submodule nf_flatten_layer_submodule From d575c4739b0d33e272afb53a0c159a9cecd19484 Mon Sep 17 00:00:00 2001 From: Milan Curcic Date: Thu, 19 May 2022 12:53:10 -0400 Subject: [PATCH 02/17] Flatten layer constructor --- src/nf.f90 | 2 +- src/nf_layer_constructors.f90 | 23 +++++++- src/nf_layer_constructors_submodule.f90 | 73 ++++++++++++++----------- 3 files changed, 63 insertions(+), 35 deletions(-) diff --git a/src/nf.f90 b/src/nf.f90 index 474127bf..5a399178 100644 --- a/src/nf.f90 +++ b/src/nf.f90 @@ -1,6 +1,6 @@ module nf use nf_datasets_mnist, only: label_digits, load_mnist use nf_layer, only: layer - use nf_layer_constructors, only: conv2d, dense, input, maxpool2d + use nf_layer_constructors, only: conv2d, dense, flatten, input, maxpool2d use nf_network, only: network end module nf diff --git a/src/nf_layer_constructors.f90 b/src/nf_layer_constructors.f90 index eaab6d52..0222a8c5 100644 --- a/src/nf_layer_constructors.f90 +++ b/src/nf_layer_constructors.f90 @@ -7,7 +7,7 @@ module nf_layer_constructors implicit none private - public :: conv2d, dense, input, maxpool2d + public :: conv2d, dense, flatten, input, maxpool2d interface input @@ -84,6 +84,27 @@ pure module function dense(layer_size, activation) result(res) !! Resulting layer instance end function dense + pure module function flatten() result(res) + !! Flatten (3-d -> 1-d) layer constructor. + !! + !! Use this layer to chain layers with 3-d outputs to layers with 1-d + !! inputs. For example, to chain a `conv2d` or a `maxpool2d` layer + !! with a `dense` layer for a CNN for classification, place a `flatten` + !! layer between them. + !! + !! A flatten layer must not be the first layer in the network. + !! + !! Example: + !! + !! ``` + !! use nf, only :: flatten, layer + !! type(layer) :: flatten_layer + !! flatten_layer = flatten() + !! ``` + type(layer) :: res + !! Resulting layer instance + end function flatten + pure module function conv2d(filters, kernel_size, activation) result(res) !! 2-d convolutional layer constructor. !! diff --git a/src/nf_layer_constructors_submodule.f90 b/src/nf_layer_constructors_submodule.f90 index 7fd0637a..8e991901 100644 --- a/src/nf_layer_constructors_submodule.f90 +++ b/src/nf_layer_constructors_submodule.f90 @@ -3,6 +3,7 @@ use nf_layer, only: layer use nf_conv2d_layer, only: conv2d_layer use nf_dense_layer, only: dense_layer + use nf_flatten_layer, only: flatten_layer use nf_input1d_layer, only: input1d_layer use nf_input3d_layer, only: input3d_layer use nf_maxpool2d_layer, only: maxpool2d_layer @@ -11,26 +12,26 @@ contains - pure module function input1d(layer_size) result(res) - integer, intent(in) :: layer_size + pure module function conv2d(filters, kernel_size, activation) result(res) + integer, intent(in) :: filters + integer, intent(in) :: kernel_size + character(*), intent(in), optional :: activation type(layer) :: res - res % name = 'input' - res % layer_shape = [layer_size] - res % input_layer_shape = [integer ::] - allocate(res % p, source=input1d_layer(layer_size)) - res % initialized = .true. - end function input1d + res % name = 'conv2d' - pure module function input3d(layer_shape) result(res) - integer, intent(in) :: layer_shape(3) - type(layer) :: res - res % name = 'input' - res % layer_shape = layer_shape - res % input_layer_shape = [integer ::] - allocate(res % p, source=input3d_layer(layer_shape)) - res % initialized = .true. - end function input3d + if (present(activation)) then + res % activation = activation + else + res % activation = 'sigmoid' + end if + + allocate( & + res % p, & + source=conv2d_layer(filters, kernel_size, res % activation) & + ) + + end function conv2d pure module function dense(layer_size, activation) result(res) @@ -52,27 +53,33 @@ pure module function dense(layer_size, activation) result(res) end function dense - pure module function conv2d(filters, kernel_size, activation) result(res) - integer, intent(in) :: filters - integer, intent(in) :: kernel_size - character(*), intent(in), optional :: activation + pure module function flatten() result(res) type(layer) :: res + res % name = 'flatten' + allocate(res % p, source=flatten_layer()) + end function flatten - res % name = 'conv2d' - if (present(activation)) then - res % activation = activation - else - res % activation = 'sigmoid' - end if - - allocate( & - res % p, & - source=conv2d_layer(filters, kernel_size, res % activation) & - ) + pure module function input1d(layer_size) result(res) + integer, intent(in) :: layer_size + type(layer) :: res + res % name = 'input' + res % layer_shape = [layer_size] + res % input_layer_shape = [integer ::] + allocate(res % p, source=input1d_layer(layer_size)) + res % initialized = .true. + end function input1d - end function conv2d + pure module function input3d(layer_shape) result(res) + integer, intent(in) :: layer_shape(3) + type(layer) :: res + res % name = 'input' + res % layer_shape = layer_shape + res % input_layer_shape = [integer ::] + allocate(res % p, source=input3d_layer(layer_shape)) + res % initialized = .true. + end function input3d pure module function maxpool2d(pool_size, stride) result(res) integer, intent(in) :: pool_size From cfdfed166d44cae7741c3d9dd02c04a924d11771 Mon Sep 17 00:00:00 2001 From: Milan Curcic Date: Thu, 19 May 2022 13:15:54 -0400 Subject: [PATCH 03/17] Begin flatten layer tests --- test/test_flatten_layer.f90 | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) create mode 100644 test/test_flatten_layer.f90 diff --git a/test/test_flatten_layer.f90 b/test/test_flatten_layer.f90 new file mode 100644 index 00000000..6465ef68 --- /dev/null +++ b/test/test_flatten_layer.f90 @@ -0,0 +1,32 @@ +program test_flatten_layer + + use iso_fortran_env, only: stderr => error_unit + use nf, only: flatten, layer + use nf_flatten_layer, only: flatten_layer + + implicit none + + type(layer) :: test_layer + real, allocatable :: output(:) + logical :: ok = .true. + + test_layer = flatten() + + if (.not. test_layer % name == 'flatten') then + ok = .false. + write(stderr, '(a)') 'flatten layer has its name set correctly.. failed' + end if + + if (test_layer % initialized) then + ok = .false. + write(stderr, '(a)') 'flatten layer is not initialized yet.. failed' + end if + + if (ok) then + print '(a)', 'test_flatten_layer: All tests passed.' + else + write(stderr, '(a)') 'test_flatten_layer: One or more tests failed.' + stop 1 + end if + +end program test_flatten_layer From 1a35e01bffaaff7cf8d7c059806f72e1087cb840 Mon Sep 17 00:00:00 2001 From: Milan Curcic Date: Thu, 19 May 2022 13:16:04 -0400 Subject: [PATCH 04/17] CMake rules for flatten layer --- CMakeLists.txt | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 3ef47b32..2ebd4ce7 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -73,6 +73,8 @@ add_library(neural src/nf_datasets_mnist_submodule.f90 src/nf_dense_layer.f90 src/nf_dense_layer_submodule.f90 + src/nf_flatten_layer.f90 + src/nf_flatten_layer_submodule.f90 src/nf.f90 src/nf_input1d_layer.f90 src/nf_input1d_layer_submodule.f90 @@ -102,7 +104,7 @@ string(REGEX REPLACE "^ | $" "" LIBS "${LIBS}") # tests enable_testing() -foreach(execid input1d_layer input3d_layer dense_layer conv2d_layer maxpool2d_layer dense_network conv2d_network) +foreach(execid input1d_layer input3d_layer dense_layer conv2d_layer maxpool2d_layer flatten_layer dense_network conv2d_network) add_executable(test_${execid} test/test_${execid}.f90) target_link_libraries(test_${execid} neural ${LIBS}) add_test(test_${execid} bin/test_${execid}) From 315d56b977045470efe808155858dcedc44bd1ca Mon Sep 17 00:00:00 2001 From: Milan Curcic Date: Thu, 19 May 2022 17:40:44 -0400 Subject: [PATCH 05/17] First stab at forward and backward passes for the flatten layer --- src/nf_flatten_layer.f90 | 10 +++++----- src/nf_flatten_layer_submodule.f90 | 12 ++++++++++++ src/nf_layer_submodule.f90 | 30 ++++++++++++++++++++++++++---- 3 files changed, 43 insertions(+), 9 deletions(-) diff --git a/src/nf_flatten_layer.f90 b/src/nf_flatten_layer.f90 index 5d9b8fc5..38e38098 100644 --- a/src/nf_flatten_layer.f90 +++ b/src/nf_flatten_layer.f90 @@ -15,9 +15,10 @@ module nf_flatten_layer !! Concrete implementation of a flatten (3-d to 1-d) layer. - integer :: input_shape + integer, allocatable :: input_shape(:) integer :: output_size + real, allocatable :: gradient(:,:,:) real, allocatable :: output(:) contains @@ -39,11 +40,10 @@ end function flatten_layer_cons interface pure module subroutine backward(self, input, gradient) - !! Apply the backward gradient descent pass. - !! Only weight and bias gradients are updated in this subroutine, - !! while the weights and biases themselves are untouched. + !! Apply the backward pass to the flatten layer. + !! This is a reshape operation from 1-d gradient to 3-d input. class(flatten_layer), intent(in out) :: self - !! Dense layer instance + !! Flatten layer instance real, intent(in) :: input(:,:,:) !! Input from the previous layer real, intent(in) :: gradient(:) diff --git a/src/nf_flatten_layer_submodule.f90 b/src/nf_flatten_layer_submodule.f90 index 5805f400..8423c187 100644 --- a/src/nf_flatten_layer_submodule.f90 +++ b/src/nf_flatten_layer_submodule.f90 @@ -19,18 +19,30 @@ pure module subroutine backward(self, input, gradient) class(flatten_layer), intent(in out) :: self real, intent(in) :: input(:,:,:) real, intent(in) :: gradient(:) + self % gradient = reshape(gradient, shape(input)) end subroutine backward pure module subroutine forward(self, input) class(flatten_layer), intent(in out) :: self real, intent(in) :: input(:,:,:) + self % output = pack(input, .true.) end subroutine forward module subroutine init(self, input_shape) class(flatten_layer), intent(in out) :: self integer, intent(in) :: input_shape(:) + + self % input_shape = input_shape + self % output_size = product(input_shape) + + allocate(self % gradient(input_shape(1), input_shape(2), input_shape(3))) + self % output = 0 + + allocate(self % output(self % output_size)) + self % output = 0 + end subroutine init end submodule nf_flatten_layer_submodule diff --git a/src/nf_layer_submodule.f90 b/src/nf_layer_submodule.f90 index fdbda9d2..d1f63e16 100644 --- a/src/nf_layer_submodule.f90 +++ b/src/nf_layer_submodule.f90 @@ -2,15 +2,15 @@ use nf_conv2d_layer, only: conv2d_layer use nf_dense_layer, only: dense_layer + use nf_flatten_layer, only: flatten_layer use nf_input1d_layer, only: input1d_layer use nf_input3d_layer, only: input3d_layer use nf_maxpool2d_layer, only: maxpool2d_layer - implicit none - contains pure module subroutine backward(self, previous, gradient) + implicit none class(layer), intent(in out) :: self class(layer), intent(in) :: previous real, intent(in) :: gradient(:) @@ -35,6 +35,7 @@ end subroutine backward pure module subroutine forward(self, input) + implicit none class(layer), intent(in out) :: self class(layer), intent(in) :: input @@ -74,12 +75,25 @@ pure module subroutine forward(self, input) call this_layer % forward(prev_layer % output) end select + type is(flatten_layer) + + ! Input layers permitted: input3d, conv2d, maxpool2d + select type(prev_layer => input % p) + type is(input3d_layer) + call this_layer % forward(prev_layer % output) + type is(conv2d_layer) + call this_layer % forward(prev_layer % output) + type is(maxpool2d_layer) + call this_layer % forward(prev_layer % output) + end select + end select end subroutine forward pure module subroutine get_output_1d(self, output) + implicit none class(layer), intent(in) :: self real, allocatable, intent(out) :: output(:) @@ -89,8 +103,10 @@ pure module subroutine get_output_1d(self, output) allocate(output, source=this_layer % output) type is(dense_layer) allocate(output, source=this_layer % output) + type is(flatten_layer) + allocate(output, source=this_layer % output) class default - error stop '1-d output can only be read from an input1d or dense layer.' + error stop '1-d output can only be read from an input1d, dense, or flatten layer.' end select @@ -98,6 +114,7 @@ end subroutine get_output_1d pure module subroutine get_output_3d(self, output) + implicit none class(layer), intent(in) :: self real, allocatable, intent(out) :: output(:,:,:) @@ -118,6 +135,7 @@ end subroutine get_output_3d impure elemental module subroutine init(self, input) + implicit none class(layer), intent(in out) :: self class(layer), intent(in) :: input @@ -128,13 +146,15 @@ impure elemental module subroutine init(self, input) call this_layer % init(input % layer_shape) end select - ! The shape of conv2d or maxpool2d layers is not known + ! The shape of conv2d, maxpool2d, or flatten layers is not known ! until we receive an input layer. select type(this_layer => self % p) type is(conv2d_layer) self % layer_shape = shape(this_layer % output) type is(maxpool2d_layer) self % layer_shape = shape(this_layer % output) + type is(flatten_layer) + self % layer_shape = shape(this_layer % output) end select self % input_layer_shape = input % layer_shape @@ -144,6 +164,7 @@ end subroutine init impure elemental module subroutine print_info(self) + implicit none class(layer), intent(in) :: self print '("Layer: ", a)', self % name print '(60("-"))' @@ -157,6 +178,7 @@ end subroutine print_info impure elemental module subroutine update(self, learning_rate) + implicit none class(layer), intent(in out) :: self real, intent(in) :: learning_rate From 2cb076df44b4583cdd13bd0dbee455ffdcbbef1f Mon Sep 17 00:00:00 2001 From: milancurcic Date: Thu, 19 May 2022 20:19:57 -0400 Subject: [PATCH 06/17] Fix typo in initialization --- src/nf_flatten_layer_submodule.f90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/nf_flatten_layer_submodule.f90 b/src/nf_flatten_layer_submodule.f90 index 8423c187..d52e996d 100644 --- a/src/nf_flatten_layer_submodule.f90 +++ b/src/nf_flatten_layer_submodule.f90 @@ -38,7 +38,7 @@ module subroutine init(self, input_shape) self % output_size = product(input_shape) allocate(self % gradient(input_shape(1), input_shape(2), input_shape(3))) - self % output = 0 + self % gradient = 0 allocate(self % output(self % output_size)) self % output = 0 From c74fb6ad0633b2817f4ab944b42f674d1de021ae Mon Sep 17 00:00:00 2001 From: milancurcic Date: Thu, 19 May 2022 20:20:17 -0400 Subject: [PATCH 07/17] Integrate flatten layer with network and layer methods --- src/nf_layer_submodule.f90 | 43 ++++++++++++++++++++++++------------ src/nf_network_submodule.f90 | 9 ++++---- 2 files changed, 33 insertions(+), 19 deletions(-) diff --git a/src/nf_layer_submodule.f90 b/src/nf_layer_submodule.f90 index d1f63e16..36b19670 100644 --- a/src/nf_layer_submodule.f90 +++ b/src/nf_layer_submodule.f90 @@ -16,19 +16,32 @@ pure module subroutine backward(self, previous, gradient) real, intent(in) :: gradient(:) ! Backward pass currently implemented only for dense layers - select type(this_layer => self % p); type is(dense_layer) - - ! Previous layer is the input layer to this layer. - ! For a backward pass on a dense layer, we must accept either an input layer - ! or another dense layer as input. - select type(prev_layer => previous % p) + select type(this_layer => self % p) - type is(input1d_layer) - call this_layer % backward(prev_layer % output, gradient) type is(dense_layer) - call this_layer % backward(prev_layer % output, gradient) - end select + ! Upstream layers permitted: input1d, dense, flatten + select type(prev_layer => previous % p) + type is(input1d_layer) + call this_layer % backward(prev_layer % output, gradient) + type is(dense_layer) + call this_layer % backward(prev_layer % output, gradient) + type is(flatten_layer) + call this_layer % backward(prev_layer % output, gradient) + end select + + type is(flatten_layer) + + ! Downstream layers permitted: input3d, conv2d, maxpool2d + select type(prev_layer => previous % p) + type is(input3d_layer) + call this_layer % backward(prev_layer % output, gradient) + type is(conv2d_layer) + call this_layer % backward(prev_layer % output, gradient) + type is(maxpool2d_layer) + call this_layer % backward(prev_layer % output, gradient) + end select + end select end subroutine backward @@ -43,17 +56,19 @@ pure module subroutine forward(self, input) type is(dense_layer) - ! Input layers permitted: input1d, dense + ! Upstream layers permitted: input1d, dense, flatten select type(prev_layer => input % p) type is(input1d_layer) call this_layer % forward(prev_layer % output) type is(dense_layer) call this_layer % forward(prev_layer % output) + type is(flatten_layer) + call this_layer % forward(prev_layer % output) end select type is(conv2d_layer) - ! Input layers permitted: input3d, conv2d, maxpool2d + ! Upstream layers permitted: input3d, conv2d, maxpool2d select type(prev_layer => input % p) type is(input3d_layer) call this_layer % forward(prev_layer % output) @@ -65,7 +80,7 @@ pure module subroutine forward(self, input) type is(maxpool2d_layer) - ! Input layers permitted: input3d, conv2d, maxpool2d + ! Upstream layers permitted: input3d, conv2d, maxpool2d select type(prev_layer => input % p) type is(input3d_layer) call this_layer % forward(prev_layer % output) @@ -77,7 +92,7 @@ pure module subroutine forward(self, input) type is(flatten_layer) - ! Input layers permitted: input3d, conv2d, maxpool2d + ! Upstream layers permitted: input3d, conv2d, maxpool2d select type(prev_layer => input % p) type is(input3d_layer) call this_layer % forward(prev_layer % output) diff --git a/src/nf_network_submodule.f90 b/src/nf_network_submodule.f90 index f2a6c909..643cfdfa 100644 --- a/src/nf_network_submodule.f90 +++ b/src/nf_network_submodule.f90 @@ -30,11 +30,10 @@ module function network_cons(layers) result(res) !TODO Ensure that the layers are in allowed sequence: !TODO input1d -> dense !TODO dense -> dense - !TODO input3d -> conv2d - !TODO conv2d -> conv2d - !TODO conv2d -> maxpool2d - !TODO maxpool2d -> conv2d - !TODO conv2d -> flatten + !TODO input3d -> conv2d, maxpool2d, flatten + !TODO conv2d -> conv2d, maxpool2d, flatten + !TODO maxpool2d -> conv2d, maxpool2d, flatten + !TODO flatten -> dense res % layers = layers From eb4764d6f601e340c6af00f0d50bb4f2c39d05d9 Mon Sep 17 00:00:00 2001 From: milancurcic Date: Thu, 19 May 2022 20:20:32 -0400 Subject: [PATCH 08/17] Add flatten layer to the table --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index e94e7905..b561bc4e 100644 --- a/README.md +++ b/README.md @@ -30,6 +30,7 @@ Read the paper [here](https://arxiv.org/abs/1902.06714). | Dense (fully-connected) | `dense` | `input` (1-d) | 1 | ✅ | ✅ | | Convolutional (2-d) | `conv2d` | `input` (3-d), `conv2d`, `maxpool2d` | 3 | ✅ | ❌ | | Max-pooling (2-d) | `maxpool2d` | `input` (3-d), `conv2d`, `maxpool2d` | 3 | ✅ | ❌ | +| Flatten | `flatten` | `input` (3-d), `conv2d`, `maxpool2d` | 1 | ✅ | ✅ | ## Getting started From 6951a3b29e844a2555730b4b5c4d57f0cdb0a4d4 Mon Sep 17 00:00:00 2001 From: milancurcic Date: Thu, 19 May 2022 20:20:54 -0400 Subject: [PATCH 09/17] Test the forward pass of a flatten layer --- test/test_flatten_layer.f90 | 31 +++++++++++++++++++++++++++++-- 1 file changed, 29 insertions(+), 2 deletions(-) diff --git a/test/test_flatten_layer.f90 b/test/test_flatten_layer.f90 index 6465ef68..a9e491d4 100644 --- a/test/test_flatten_layer.f90 +++ b/test/test_flatten_layer.f90 @@ -1,12 +1,14 @@ program test_flatten_layer use iso_fortran_env, only: stderr => error_unit - use nf, only: flatten, layer + use nf, only: flatten, input, layer use nf_flatten_layer, only: flatten_layer + use nf_input3d_layer, only: input3d_layer implicit none - type(layer) :: test_layer + type(layer) :: test_layer, input_layer + real, allocatable :: input_data(:,:,:) real, allocatable :: output(:) logical :: ok = .true. @@ -22,6 +24,31 @@ program test_flatten_layer write(stderr, '(a)') 'flatten layer is not initialized yet.. failed' end if + input_layer = input([1, 2, 2]) + call test_layer % init(input_layer) + + if (.not. test_layer % initialized) then + ok = .false. + write(stderr, '(a)') 'flatten layer is now initialized.. failed' + end if + + if (.not. all(test_layer % layer_shape == [4])) then + ok = .false. + write(stderr, '(a)') 'flatten layer has an incorrect output shape.. failed' + end if + + select type(this_layer => input_layer % p); type is(input3d_layer) + call this_layer % set(reshape(real([1, 2, 3, 4]), [1, 2, 2])) + end select + + call test_layer % forward(input_layer) + call test_layer % get_output(output) + + if (.not. all(output == [1, 2, 3, 4])) then + ok = .false. + write(stderr, '(a)') 'flatten layer correctly propagates forward.. failed' + end if + if (ok) then print '(a)', 'test_flatten_layer: All tests passed.' else From 70704be27db28ec9f75dada26753d50c4bceb846 Mon Sep 17 00:00:00 2001 From: milancurcic Date: Thu, 19 May 2022 22:29:14 -0400 Subject: [PATCH 10/17] Move flatten source files one directory down --- src/{ => nf}/nf_flatten_layer.f90 | 0 src/{ => nf}/nf_flatten_layer_submodule.f90 | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename src/{ => nf}/nf_flatten_layer.f90 (100%) rename src/{ => nf}/nf_flatten_layer_submodule.f90 (100%) diff --git a/src/nf_flatten_layer.f90 b/src/nf/nf_flatten_layer.f90 similarity index 100% rename from src/nf_flatten_layer.f90 rename to src/nf/nf_flatten_layer.f90 diff --git a/src/nf_flatten_layer_submodule.f90 b/src/nf/nf_flatten_layer_submodule.f90 similarity index 100% rename from src/nf_flatten_layer_submodule.f90 rename to src/nf/nf_flatten_layer_submodule.f90 From 09fc9a570d28d8a1cc170af36619052810bf9795 Mon Sep 17 00:00:00 2001 From: Milan Curcic Date: Fri, 20 May 2022 11:46:54 -0400 Subject: [PATCH 11/17] Fix comment --- src/nf/nf_layer_submodule.f90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/nf/nf_layer_submodule.f90 b/src/nf/nf_layer_submodule.f90 index 36b19670..dbffb6b1 100644 --- a/src/nf/nf_layer_submodule.f90 +++ b/src/nf/nf_layer_submodule.f90 @@ -15,7 +15,7 @@ pure module subroutine backward(self, previous, gradient) class(layer), intent(in) :: previous real, intent(in) :: gradient(:) - ! Backward pass currently implemented only for dense layers + ! Backward pass currently implemented only for dense and flatten layers select type(this_layer => self % p) type is(dense_layer) From be5ca0a6104fe5c261003f4147987d09014d0cd1 Mon Sep 17 00:00:00 2001 From: Milan Curcic Date: Fri, 20 May 2022 11:47:37 -0400 Subject: [PATCH 12/17] Test the flatten backward pass --- test/test_flatten_layer.f90 | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/test/test_flatten_layer.f90 b/test/test_flatten_layer.f90 index a9e491d4..b8d66928 100644 --- a/test/test_flatten_layer.f90 +++ b/test/test_flatten_layer.f90 @@ -8,7 +8,7 @@ program test_flatten_layer implicit none type(layer) :: test_layer, input_layer - real, allocatable :: input_data(:,:,:) + real, allocatable :: input_data(:,:,:), gradient(:,:,:) real, allocatable :: output(:) logical :: ok = .true. @@ -37,6 +37,8 @@ program test_flatten_layer write(stderr, '(a)') 'flatten layer has an incorrect output shape.. failed' end if + ! Test forward pass - reshaping from 3-d to 1-d + select type(this_layer => input_layer % p); type is(input3d_layer) call this_layer % set(reshape(real([1, 2, 3, 4]), [1, 2, 2])) end select @@ -49,6 +51,21 @@ program test_flatten_layer write(stderr, '(a)') 'flatten layer correctly propagates forward.. failed' end if + ! Test backward pass - reshaping from 1-d to 3-d + + ! Calling backward() will set the values on the gradient component + ! input_layer is used only to determine shape + call test_layer % backward(input_layer, real([1, 2, 3, 4])) + + select type(this_layer => test_layer % p); type is(flatten_layer) + gradient = this_layer % gradient + end select + + if (.not. all(gradient == reshape(real([1, 2, 3, 4]), [1, 2, 2]))) then + ok = .false. + write(stderr, '(a)') 'flatten layer correctly propagates backward.. failed' + end if + if (ok) then print '(a)', 'test_flatten_layer: All tests passed.' else From 40a6f33bab7e317bbc4019becdce9d0e8200263d Mon Sep 17 00:00:00 2001 From: Milan Curcic Date: Fri, 20 May 2022 12:50:29 -0400 Subject: [PATCH 13/17] Fix out-of-bounds error for inputs with odd widths and heights --- src/nf/nf_maxpool2d_layer_submodule.f90 | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/src/nf/nf_maxpool2d_layer_submodule.f90 b/src/nf/nf_maxpool2d_layer_submodule.f90 index 68aa0152..3105f447 100644 --- a/src/nf/nf_maxpool2d_layer_submodule.f90 +++ b/src/nf/nf_maxpool2d_layer_submodule.f90 @@ -43,15 +43,19 @@ pure module subroutine forward(self, input) integer :: i, j, n integer :: ii, jj integer :: iend, jend + integer :: iextent, jextent integer :: maxloc_xy(2) input_width = size(input, dim=2) - input_height = size(input, dim=2) + input_height = size(input, dim=3) + + iextent = input_width - mod(input_width, self % stride) + jextent = input_height - mod(input_height, self % stride) ! Stride along the width and height of the input image stride_over_input: do concurrent( & - i = 1:input_width:self % stride, & - j = 1:input_height:self % stride & + i = 1:iextent:self % stride, & + j = 1:jextent:self % stride & ) ! Indices of the pooling layer From ab1faa670a21102dc3d7bdcb93413ed3f0bba4b7 Mon Sep 17 00:00:00 2001 From: Milan Curcic Date: Fri, 20 May 2022 12:51:08 -0400 Subject: [PATCH 14/17] network % output is now generic for input ranks 1 and 3 --- src/nf/nf_network.f90 | 38 +++++++++++++++++++++++---------- src/nf/nf_network_submodule.f90 | 32 +++++++++++++++++++++++---- 2 files changed, 55 insertions(+), 15 deletions(-) diff --git a/src/nf/nf_network.f90 b/src/nf/nf_network.f90 index 0bac70c1..d9c90821 100644 --- a/src/nf/nf_network.f90 +++ b/src/nf/nf_network.f90 @@ -17,15 +17,17 @@ module nf_network contains procedure :: backward - procedure :: output procedure :: print_info procedure :: train procedure :: update procedure, private :: forward_1d procedure, private :: forward_3d + procedure, private :: output_1d + procedure, private :: output_3d generic :: forward => forward_1d, forward_3d + generic :: output => output_1d, output_3d end type network @@ -72,6 +74,30 @@ end subroutine forward_3d end interface forward + interface output + + module function output_1d(self, input) result(res) + !! Return the output of the network given the input 1-d array. + class(network), intent(in out) :: self + !! Network instance + real, intent(in) :: input(:) + !! Input data + real, allocatable :: res(:) + !! Output of the network + end function output_1d + + module function output_3d(self, input) result(res) + !! Return the output of the network given the input 3-d array. + class(network), intent(in out) :: self + !! Network instance + real, intent(in) :: input(:,:,:) + !! Input data + real, allocatable :: res(:) + !! Output of the network + end function output_3d + + end interface output + interface pure module subroutine backward(self, output) @@ -85,16 +111,6 @@ pure module subroutine backward(self, output) !! Output data end subroutine backward - module function output(self, input) result(res) - !! Return the output of the network given the input array. - class(network), intent(in out) :: self - !! Network instance - real, intent(in) :: input(:) - !! Input data - real, allocatable :: res(:) - !! Output of the network - end function output - module subroutine print_info(self) !! Prints a brief summary of the network and its layers to the screen. class(network), intent(in) :: self diff --git a/src/nf/nf_network_submodule.f90 b/src/nf/nf_network_submodule.f90 index 643cfdfa..7d49bec8 100644 --- a/src/nf/nf_network_submodule.f90 +++ b/src/nf/nf_network_submodule.f90 @@ -1,6 +1,7 @@ submodule(nf_network) nf_network_submodule use nf_dense_layer, only: dense_layer + use nf_flatten_layer, only: flatten_layer use nf_input1d_layer, only: input1d_layer use nf_input3d_layer, only: input3d_layer use nf_layer, only: layer @@ -114,7 +115,7 @@ pure module subroutine forward_3d(self, input) end subroutine forward_3d - module function output(self, input) result(res) + module function output_1d(self, input) result(res) class(network), intent(in out) :: self real, intent(in) :: input(:) real, allocatable :: res(:) @@ -124,11 +125,34 @@ module function output(self, input) result(res) call self % forward(input) - select type(output_layer => self % layers(num_layers) % p); type is(dense_layer) - res = output_layer % output + select type(output_layer => self % layers(num_layers) % p) + type is(dense_layer) + res = output_layer % output + type is(flatten_layer) + res = output_layer % output end select - end function output + end function output_1d + + + module function output_3d(self, input) result(res) + class(network), intent(in out) :: self + real, intent(in) :: input(:,:,:) + real, allocatable :: res(:) + integer :: num_layers + + num_layers = size(self % layers) + + call self % forward(input) + + select type(output_layer => self % layers(num_layers) % p) + type is(dense_layer) + res = output_layer % output + type is(flatten_layer) + res = output_layer % output + end select + + end function output_3d module subroutine print_info(self) From d0901992e1b0ae881587288dc627741ce10e5481 Mon Sep 17 00:00:00 2001 From: Milan Curcic Date: Fri, 20 May 2022 12:51:30 -0400 Subject: [PATCH 15/17] Add a CNN (forward-only) example --- CMakeLists.txt | 2 +- example/cnn.f90 | 32 ++++++++++++++++++++++++++++++++ 2 files changed, 33 insertions(+), 1 deletion(-) create mode 100644 example/cnn.f90 diff --git a/CMakeLists.txt b/CMakeLists.txt index 2ebd4ce7..fa7240ba 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -110,7 +110,7 @@ foreach(execid input1d_layer input3d_layer dense_layer conv2d_layer maxpool2d_la add_test(test_${execid} bin/test_${execid}) endforeach() -foreach(execid mnist simple sine) +foreach(execid cnn mnist simple sine) add_executable(${execid} example/${execid}.f90) target_link_libraries(${execid} neural ${LIBS}) endforeach() diff --git a/example/cnn.f90 b/example/cnn.f90 new file mode 100644 index 00000000..03c92b03 --- /dev/null +++ b/example/cnn.f90 @@ -0,0 +1,32 @@ +program cnn + + use nf, only: conv2d, dense, flatten, input, maxpool2d, network + + implicit none + type(network) :: net + real, allocatable :: x(:,:,:) + integer :: n + + print '("Creating a CNN and doing a forward pass")' + print '("(backward pass not implemented yet)")' + print '(60("="))' + + net = network([ & + input([3, 32, 32]), & + conv2d(filters=16, kernel_size=3, activation='relu'), & ! (16, 30, 30) + maxpool2d(pool_size=2), & ! (16, 15, 15) + conv2d(filters=32, kernel_size=3, activation='relu'), & ! (32, 13, 13) + maxpool2d(pool_size=2), & ! (32, 6, 6) + flatten(), & + dense(10) & + ]) + + ! Print a network summary to the screen + call net % print_info() + + allocate(x(3,32,32)) + call random_number(x) + + print *, 'Output:', net % output(x) + +end program cnn From 063a74e913b114af1f641b4bdc94992c94fbd9a5 Mon Sep 17 00:00:00 2001 From: Milan Curcic Date: Fri, 20 May 2022 12:54:09 -0400 Subject: [PATCH 16/17] Add CNN to the list of examples --- README.md | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 1e235533..b1bd7205 100644 --- a/README.md +++ b/README.md @@ -173,9 +173,13 @@ to run the tests. The easiest way to get a sense of how to use neural-fortran is to look at examples, in increasing level of complexity: -1. [simple](example/simple.f90): Approximating a simple, constant data relationship +1. [simple](example/simple.f90): Approximating a simple, constant data + relationship 2. [sine](example/sine.f90): Approximating a sine function -3. [mnist](example/mnist.f90): Hand-written digit recognition using the MNIST dataset +3. [mnist](example/mnist.f90): Hand-written digit recognition using the MNIST + dataset +4. [cnn](example/cnn.f90): Creating and running forward a simple CNN using + `input`, `conv2d`, `maxpool2d`, `flatten`, and `dense` layers. The examples also show you the extent of the public API that's meant to be used in applications, i.e. anything from the `nf` module. From fa73fb764833760f38f63f85d0e0a7e0e942ffbc Mon Sep 17 00:00:00 2001 From: Milan Curcic Date: Fri, 20 May 2022 17:38:54 -0400 Subject: [PATCH 17/17] Test that we can chain input3d with dense using flatten --- test/test_flatten_layer.f90 | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/test/test_flatten_layer.f90 b/test/test_flatten_layer.f90 index b8d66928..cc780acd 100644 --- a/test/test_flatten_layer.f90 +++ b/test/test_flatten_layer.f90 @@ -1,13 +1,14 @@ program test_flatten_layer use iso_fortran_env, only: stderr => error_unit - use nf, only: flatten, input, layer + use nf, only: dense, flatten, input, layer, network use nf_flatten_layer, only: flatten_layer use nf_input3d_layer, only: input3d_layer implicit none type(layer) :: test_layer, input_layer + type(network) :: net real, allocatable :: input_data(:,:,:), gradient(:,:,:) real, allocatable :: output(:) logical :: ok = .true. @@ -66,6 +67,18 @@ program test_flatten_layer write(stderr, '(a)') 'flatten layer correctly propagates backward.. failed' end if + net = network([ & + input([1, 28, 28]), & + flatten(), & + dense(10) & + ]) + + ! Test that the output layer receives 784 elements in the input + if (.not. all(net % layers(3) % input_layer_shape == [784])) then + ok = .false. + write(stderr, '(a)') 'flatten layer correctly chains input3d to dense.. failed' + end if + if (ok) then print '(a)', 'test_flatten_layer: All tests passed.' else