Skip to content

Commit

Permalink
Merge pull request #75 from milancurcic/flatten-layer
Browse files Browse the repository at this point in the history
Implement a flatten layer
  • Loading branch information
milancurcic authored May 23, 2022
2 parents 6fda1a5 + fa73fb7 commit c78c078
Show file tree
Hide file tree
Showing 13 changed files with 439 additions and 80 deletions.
6 changes: 4 additions & 2 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,8 @@ add_library(neural
src/nf/nf_datasets_mnist_submodule.f90
src/nf/nf_dense_layer.f90
src/nf/nf_dense_layer_submodule.f90
src/nf/nf_flatten_layer.f90
src/nf/nf_flatten_layer_submodule.f90
src/nf/nf_input1d_layer.f90
src/nf/nf_input1d_layer_submodule.f90
src/nf/nf_input3d_layer.f90
Expand Down Expand Up @@ -102,13 +104,13 @@ string(REGEX REPLACE "^ | $" "" LIBS "${LIBS}")

# tests
enable_testing()
foreach(execid input1d_layer input3d_layer dense_layer conv2d_layer maxpool2d_layer dense_network conv2d_network)
foreach(execid input1d_layer input3d_layer dense_layer conv2d_layer maxpool2d_layer flatten_layer dense_network conv2d_network)
add_executable(test_${execid} test/test_${execid}.f90)
target_link_libraries(test_${execid} neural ${LIBS})
add_test(test_${execid} bin/test_${execid})
endforeach()

foreach(execid mnist simple sine)
foreach(execid cnn mnist simple sine)
add_executable(${execid} example/${execid}.f90)
target_link_libraries(${execid} neural ${LIBS})
endforeach()
9 changes: 7 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ Read the paper [here](https://arxiv.org/abs/1902.06714).
| Dense (fully-connected) | `dense` | `input` (1-d) | 1 |||
| Convolutional (2-d) | `conv2d` | `input` (3-d), `conv2d`, `maxpool2d` | 3 |||
| Max-pooling (2-d) | `maxpool2d` | `input` (3-d), `conv2d`, `maxpool2d` | 3 |||
| Flatten | `flatten` | `input` (3-d), `conv2d`, `maxpool2d` | 1 |||

## Getting started

Expand Down Expand Up @@ -172,9 +173,13 @@ to run the tests.
The easiest way to get a sense of how to use neural-fortran is to look at
examples, in increasing level of complexity:

1. [simple](example/simple.f90): Approximating a simple, constant data relationship
1. [simple](example/simple.f90): Approximating a simple, constant data
relationship
2. [sine](example/sine.f90): Approximating a sine function
3. [mnist](example/mnist.f90): Hand-written digit recognition using the MNIST dataset
3. [mnist](example/mnist.f90): Hand-written digit recognition using the MNIST
dataset
4. [cnn](example/cnn.f90): Creating and running forward a simple CNN using
`input`, `conv2d`, `maxpool2d`, `flatten`, and `dense` layers.

The examples also show you the extent of the public API that's meant to be
used in applications, i.e. anything from the `nf` module.
Expand Down
32 changes: 32 additions & 0 deletions example/cnn.f90
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
program cnn

use nf, only: conv2d, dense, flatten, input, maxpool2d, network

implicit none
type(network) :: net
real, allocatable :: x(:,:,:)
integer :: n

print '("Creating a CNN and doing a forward pass")'
print '("(backward pass not implemented yet)")'
print '(60("="))'

net = network([ &
input([3, 32, 32]), &
conv2d(filters=16, kernel_size=3, activation='relu'), & ! (16, 30, 30)
maxpool2d(pool_size=2), & ! (16, 15, 15)
conv2d(filters=32, kernel_size=3, activation='relu'), & ! (32, 13, 13)
maxpool2d(pool_size=2), & ! (32, 6, 6)
flatten(), &
dense(10) &
])

! Print a network summary to the screen
call net % print_info()

allocate(x(3,32,32))
call random_number(x)

print *, 'Output:', net % output(x)

end program cnn
2 changes: 1 addition & 1 deletion src/nf.f90
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ module nf
!! User API: everything an application needs to reference directly
use nf_datasets_mnist, only: label_digits, load_mnist
use nf_layer, only: layer
use nf_layer_constructors, only: conv2d, dense, input, maxpool2d
use nf_layer_constructors, only: conv2d, dense, flatten, input, maxpool2d
use nf_network, only: network
use nf_optimizers, only: sgd
end module nf
75 changes: 75 additions & 0 deletions src/nf/nf_flatten_layer.f90
Original file line number Diff line number Diff line change
@@ -0,0 +1,75 @@
module nf_flatten_layer

!! This module provides the concrete flatten layer type.
!! It is used internally by the layer type.
!! It is not intended to be used directly by the user.

use nf_base_layer, only: base_layer

implicit none

private
public :: flatten_layer

type, extends(base_layer) :: flatten_layer

!! Concrete implementation of a flatten (3-d to 1-d) layer.

integer, allocatable :: input_shape(:)
integer :: output_size

real, allocatable :: gradient(:,:,:)
real, allocatable :: output(:)

contains

procedure :: backward
procedure :: forward
procedure :: init

end type flatten_layer

interface flatten_layer
elemental module function flatten_layer_cons() result(res)
!! This function returns the `flatten_layer` instance.
type(flatten_layer) :: res
!! `flatten_layer` instance
end function flatten_layer_cons
end interface flatten_layer

interface

pure module subroutine backward(self, input, gradient)
!! Apply the backward pass to the flatten layer.
!! This is a reshape operation from 1-d gradient to 3-d input.
class(flatten_layer), intent(in out) :: self
!! Flatten layer instance
real, intent(in) :: input(:,:,:)
!! Input from the previous layer
real, intent(in) :: gradient(:)
!! Gradient from the next layer
end subroutine backward

pure module subroutine forward(self, input)
!! Propagate forward the layer.
!! Calling this subroutine updates the values of a few data components
!! of `flatten_layer` that are needed for the backward pass.
class(flatten_layer), intent(in out) :: self
!! Dense layer instance
real, intent(in) :: input(:,:,:)
!! Input from the previous layer
end subroutine forward

module subroutine init(self, input_shape)
!! Initialize the layer data structures.
!!
!! This is a deferred procedure from the `base_layer` abstract type.
class(flatten_layer), intent(in out) :: self
!! Dense layer instance
integer, intent(in) :: input_shape(:)
!! Shape of the input layer
end subroutine init

end interface

end module nf_flatten_layer
48 changes: 48 additions & 0 deletions src/nf/nf_flatten_layer_submodule.f90
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
submodule(nf_flatten_layer) nf_flatten_layer_submodule

!! This module provides the concrete flatten layer type.
!! It is used internally by the layer type.
!! It is not intended to be used directly by the user.

use nf_base_layer, only: base_layer

implicit none

contains

elemental module function flatten_layer_cons() result(res)
type(flatten_layer) :: res
end function flatten_layer_cons


pure module subroutine backward(self, input, gradient)
class(flatten_layer), intent(in out) :: self
real, intent(in) :: input(:,:,:)
real, intent(in) :: gradient(:)
self % gradient = reshape(gradient, shape(input))
end subroutine backward


pure module subroutine forward(self, input)
class(flatten_layer), intent(in out) :: self
real, intent(in) :: input(:,:,:)
self % output = pack(input, .true.)
end subroutine forward


module subroutine init(self, input_shape)
class(flatten_layer), intent(in out) :: self
integer, intent(in) :: input_shape(:)

self % input_shape = input_shape
self % output_size = product(input_shape)

allocate(self % gradient(input_shape(1), input_shape(2), input_shape(3)))
self % gradient = 0

allocate(self % output(self % output_size))
self % output = 0

end subroutine init

end submodule nf_flatten_layer_submodule
23 changes: 22 additions & 1 deletion src/nf/nf_layer_constructors.f90
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ module nf_layer_constructors
implicit none

private
public :: conv2d, dense, input, maxpool2d
public :: conv2d, dense, flatten, input, maxpool2d

interface input

Expand Down Expand Up @@ -84,6 +84,27 @@ pure module function dense(layer_size, activation) result(res)
!! Resulting layer instance
end function dense

pure module function flatten() result(res)
!! Flatten (3-d -> 1-d) layer constructor.
!!
!! Use this layer to chain layers with 3-d outputs to layers with 1-d
!! inputs. For example, to chain a `conv2d` or a `maxpool2d` layer
!! with a `dense` layer for a CNN for classification, place a `flatten`
!! layer between them.
!!
!! A flatten layer must not be the first layer in the network.
!!
!! Example:
!!
!! ```
!! use nf, only :: flatten, layer
!! type(layer) :: flatten_layer
!! flatten_layer = flatten()
!! ```
type(layer) :: res
!! Resulting layer instance
end function flatten

pure module function conv2d(filters, kernel_size, activation) result(res)
!! 2-d convolutional layer constructor.
!!
Expand Down
73 changes: 40 additions & 33 deletions src/nf/nf_layer_constructors_submodule.f90
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
use nf_layer, only: layer
use nf_conv2d_layer, only: conv2d_layer
use nf_dense_layer, only: dense_layer
use nf_flatten_layer, only: flatten_layer
use nf_input1d_layer, only: input1d_layer
use nf_input3d_layer, only: input3d_layer
use nf_maxpool2d_layer, only: maxpool2d_layer
Expand All @@ -11,26 +12,26 @@

contains

pure module function input1d(layer_size) result(res)
integer, intent(in) :: layer_size
pure module function conv2d(filters, kernel_size, activation) result(res)
integer, intent(in) :: filters
integer, intent(in) :: kernel_size
character(*), intent(in), optional :: activation
type(layer) :: res
res % name = 'input'
res % layer_shape = [layer_size]
res % input_layer_shape = [integer ::]
allocate(res % p, source=input1d_layer(layer_size))
res % initialized = .true.
end function input1d

res % name = 'conv2d'

pure module function input3d(layer_shape) result(res)
integer, intent(in) :: layer_shape(3)
type(layer) :: res
res % name = 'input'
res % layer_shape = layer_shape
res % input_layer_shape = [integer ::]
allocate(res % p, source=input3d_layer(layer_shape))
res % initialized = .true.
end function input3d
if (present(activation)) then
res % activation = activation
else
res % activation = 'sigmoid'
end if

allocate( &
res % p, &
source=conv2d_layer(filters, kernel_size, res % activation) &
)

end function conv2d


pure module function dense(layer_size, activation) result(res)
Expand All @@ -52,27 +53,33 @@ pure module function dense(layer_size, activation) result(res)
end function dense


pure module function conv2d(filters, kernel_size, activation) result(res)
integer, intent(in) :: filters
integer, intent(in) :: kernel_size
character(*), intent(in), optional :: activation
pure module function flatten() result(res)
type(layer) :: res
res % name = 'flatten'
allocate(res % p, source=flatten_layer())
end function flatten

res % name = 'conv2d'

if (present(activation)) then
res % activation = activation
else
res % activation = 'sigmoid'
end if

allocate( &
res % p, &
source=conv2d_layer(filters, kernel_size, res % activation) &
)
pure module function input1d(layer_size) result(res)
integer, intent(in) :: layer_size
type(layer) :: res
res % name = 'input'
res % layer_shape = [layer_size]
res % input_layer_shape = [integer ::]
allocate(res % p, source=input1d_layer(layer_size))
res % initialized = .true.
end function input1d

end function conv2d

pure module function input3d(layer_shape) result(res)
integer, intent(in) :: layer_shape(3)
type(layer) :: res
res % name = 'input'
res % layer_shape = layer_shape
res % input_layer_shape = [integer ::]
allocate(res % p, source=input3d_layer(layer_shape))
res % initialized = .true.
end function input3d

pure module function maxpool2d(pool_size, stride) result(res)
integer, intent(in) :: pool_size
Expand Down
Loading

0 comments on commit c78c078

Please sign in to comment.