Skip to content

Commit

Permalink
Merge pull request #49 from rouson/ford-commits-only
Browse files Browse the repository at this point in the history
Replace PR #48: FORD commits only
  • Loading branch information
rouson authored Apr 6, 2022
2 parents 9e269ba + 59092ed commit f49d95f
Show file tree
Hide file tree
Showing 9 changed files with 127 additions and 90 deletions.
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -2,3 +2,4 @@
*.mod
build
data/*/*.dat
doc
13 changes: 13 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ Read the paper [here](https://arxiv.org/abs/1902.06714).
- [Training the network](https://github.com/modern-fortran/neural-fortran#training-the-network)
- [Saving and loading from file](https://github.com/modern-fortran/neural-fortran#saving-and-loading-from-file)
- [MNIST training example](https://github.com/modern-fortran/neural-fortran#mnist-training-example)
* [API documentation](https://github.com/modern-fortran/neural-fortran#api-documentation)
* [Contributing](https://github.com/modern-fortran/neural-fortran#contributing)
* [Contributors](https://github.com/modern-fortran/neural-fortran#contributors)
* [Related projects](https://github.com/modern-fortran/neural-fortran#related-projects)
Expand Down Expand Up @@ -369,6 +370,18 @@ for example on 16 cores using [OpenCoarrays](https://github.com/sourceryinstitut
$ cafrun -n 16 ./example_mnist
```

## API documentation

API documentation can be generated with [FORD](https://github.com/Fortran-FOSS-Programmers/ford/).
Assuming you have FORD installed on your system, run

```
ford ford.md
```

from the neural-fortran top-level directory to generate the API documentation in doc/html.
Point your browser to doc/html/index.html to read it.

## Contributing

neural-fortran is currently a proof-of-concept with potential for
Expand Down
23 changes: 23 additions & 0 deletions ford.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
project:
summary: A parallel neural net microframework
src_dir: src
output_dir: doc/html
preprocess: true
display: public
protected
private
source: true
graph: true
md_extensions: markdown.extensions.toc
coloured_edges: true
sort: permission-alpha
extra_mods: iso_fortran_env:https://gcc.gnu.org/onlinedocs/gfortran/ISO_005fFORTRAN_005fENV.html
iso_c_binding:https://gcc.gnu.org/onlinedocs/gfortran/ISO_005fC_005fBINDING.html#ISO_005fC_005fBINDING
author: Milan Curcic
print_creation_date: true
creation_date: %Y-%m-%d %H:%M %z
project_github: https://github.com/modern-fortran/neural-fortran
project_download: https://github.com/modern-fortran/neural-fortran/releases
github: https://github.com/modern-fortran

{!README.md!}
26 changes: 13 additions & 13 deletions src/mod_activation.f90
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
module mod_activation

! A collection of activation functions and their derivatives.
!! A collection of activation functions and their derivatives.

use mod_kinds, only: ik, rk

Expand All @@ -26,14 +26,14 @@ end function activation_function
contains

pure function gaussian(x) result(res)
! Gaussian activation function.
!! Gaussian activation function.
real(rk), intent(in) :: x(:)
real(rk) :: res(size(x))
res = exp(-x**2)
end function gaussian

pure function gaussian_prime(x) result(res)
! First derivative of the Gaussian activation function.
!! First derivative of the Gaussian activation function.
real(rk), intent(in) :: x(:)
real(rk) :: res(size(x))
res = -2 * x * gaussian(x)
Expand All @@ -47,7 +47,7 @@ pure function relu(x) result(res)
end function relu

pure function relu_prime(x) result(res)
! First derivative of the REctified Linear Unit (RELU) activation function.
!! First derivative of the REctified Linear Unit (RELU) activation function.
real(rk), intent(in) :: x(:)
real(rk) :: res(size(x))
where (x > 0)
Expand All @@ -58,21 +58,21 @@ pure function relu_prime(x) result(res)
end function relu_prime

pure function sigmoid(x) result(res)
! Sigmoid activation function.
!! Sigmoid activation function.
real(rk), intent(in) :: x(:)
real(rk) :: res(size(x))
res = 1 / (1 + exp(-x))
endfunction sigmoid

pure function sigmoid_prime(x) result(res)
! First derivative of the sigmoid activation function.
!! First derivative of the sigmoid activation function.
real(rk), intent(in) :: x(:)
real(rk) :: res(size(x))
res = sigmoid(x) * (1 - sigmoid(x))
end function sigmoid_prime

pure function step(x) result(res)
! Step activation function.
!! Step activation function.
real(rk), intent(in) :: x(:)
real(rk) :: res(size(x))
where (x > 0)
Expand All @@ -83,24 +83,24 @@ pure function step(x) result(res)
end function step

pure function step_prime(x) result(res)
! First derivative of the step activation function.
!! First derivative of the step activation function.
real(rk), intent(in) :: x(:)
real(rk) :: res(size(x))
res = 0
end function step_prime

pure function tanhf(x) result(res)
! Tangent hyperbolic activation function.
! Same as the intrinsic tanh, but must be
! defined here so that we can use procedure
! pointer with it.
!! Tangent hyperbolic activation function.
!! Same as the intrinsic tanh, but must be
!! defined here so that we can use procedure
!! pointer with it.
real(rk), intent(in) :: x(:)
real(rk) :: res(size(x))
res = tanh(x)
end function tanhf

pure function tanh_prime(x) result(res)
! First derivative of the tanh activation function.
!! First derivative of the tanh activation function.
real(rk), intent(in) :: x(:)
real(rk) :: res(size(x))
res = 1 - tanh(x)**2
Expand Down
36 changes: 18 additions & 18 deletions src/mod_layer.f90
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
module mod_layer

! Defines the layer type and its methods.
!! Defines the layer type and its methods.

use mod_activation
use mod_kinds, only: ik, rk
Expand All @@ -12,13 +12,13 @@ module mod_layer
public :: array1d, array2d, db_init, db_co_sum, dw_init, dw_co_sum, layer_type

type :: layer_type
real(rk), allocatable :: a(:) ! activations
real(rk), allocatable :: b(:) ! biases
real(rk), allocatable :: w(:,:) ! weights
real(rk), allocatable :: z(:) ! arg. to activation function
real(rk), allocatable :: a(:) !! activations
real(rk), allocatable :: b(:) !! biases
real(rk), allocatable :: w(:,:) !! weights
real(rk), allocatable :: z(:) !! arg. to activation function
procedure(activation_function), pointer, nopass :: activation => null()
procedure(activation_function), pointer, nopass :: activation_prime => null()
character(len=:), allocatable :: activation_str ! activation character string
character(len=:), allocatable :: activation_str !! activation character string
contains
procedure, public, pass(self) :: set_activation
end type layer_type
Expand Down Expand Up @@ -46,9 +46,9 @@ module mod_layer
contains

type(layer_type) function constructor(this_size, next_size) result(layer)
! Layer class constructor. this_size is the number of neurons in the layer.
! next_size is the number of neurons in the next layer, used to allocate
! the weights.
!! Layer class constructor. this_size is the number of neurons in the layer.
!! next_size is the number of neurons in the next layer, used to allocate
!! the weights.
integer(ik), intent(in) :: this_size, next_size
allocate(layer % a(this_size))
allocate(layer % z(this_size))
Expand All @@ -59,21 +59,21 @@ type(layer_type) function constructor(this_size, next_size) result(layer)
end function constructor

pure type(array1d) function array1d_constructor(length) result(a)
! Overloads the default type constructor.
!! Overloads the default type constructor.
integer(ik), intent(in) :: length
allocate(a % array(length))
a % array = 0
end function array1d_constructor

pure type(array2d) function array2d_constructor(dims) result(a)
! Overloads the default type constructor.
!! Overloads the default type constructor.
integer(ik), intent(in) :: dims(2)
allocate(a % array(dims(1), dims(2)))
a % array = 0
end function array2d_constructor

pure subroutine db_init(db, dims)
! Initialises biases structure.
!! Initialises biases structure.
type(array1d), allocatable, intent(in out) :: db(:)
integer(ik), intent(in) :: dims(:)
integer(ik) :: n, nm
Expand All @@ -86,7 +86,7 @@ pure subroutine db_init(db, dims)
end subroutine db_init

pure subroutine dw_init(dw, dims)
! Initialises weights structure.
!! Initialises weights structure.
type(array2d), allocatable, intent(in out) :: dw(:)
integer(ik), intent(in) :: dims(:)
integer(ik) :: n, nm
Expand All @@ -99,7 +99,7 @@ pure subroutine dw_init(dw, dims)
end subroutine dw_init

subroutine db_co_sum(db)
! Performs a collective sum of bias tendencies.
!! Performs a collective sum of bias tendencies.
type(array1d), allocatable, intent(in out) :: db(:)
integer(ik) :: n
do n = 2, size(db)
Expand All @@ -110,7 +110,7 @@ subroutine db_co_sum(db)
end subroutine db_co_sum

subroutine dw_co_sum(dw)
! Performs a collective sum of weights tendencies.
!! Performs a collective sum of weights tendencies.
type(array2d), allocatable, intent(in out) :: dw(:)
integer(ik) :: n
do n = 1, size(dw) - 1
Expand All @@ -121,9 +121,9 @@ subroutine dw_co_sum(dw)
end subroutine dw_co_sum

pure elemental subroutine set_activation(self, activation)
! Sets the activation function. Input string must match one of
! provided activation functions, otherwise it defaults to sigmoid.
! If activation not present, defaults to sigmoid.
!! Sets the activation function. Input string must match one of
!! provided activation functions, otherwise it defaults to sigmoid.
!! If activation not present, defaults to sigmoid.
class(layer_type), intent(in out) :: self
character(len=*), intent(in) :: activation
select case(trim(activation))
Expand Down
24 changes: 12 additions & 12 deletions src/mod_mnist.f90
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
module mod_mnist

! Procedures to work with MNIST dataset, usable with data format
! as provided in this repo and not the original data format (idx).
!! Procedures to work with MNIST dataset, usable with data format
!! as provided in this repo and not the original data format (idx).

use iso_fortran_env, only: real32 ! TODO make MNIST work with arbitrary precision
use iso_fortran_env, only: real32 !! TODO make MNIST work with arbitrary precision
use mod_io, only: read_binary_file
use mod_kinds, only: ik, rk

Expand All @@ -16,20 +16,20 @@ module mod_mnist
contains

pure function digits(x)
! Returns an array of 10 reals, with zeros everywhere
! and a one corresponding to the input number, for example:
! digits(0) = [1., 0., 0., 0., 0., 0., 0., 0., 0., 0.]
! digits(1) = [0., 1., 0., 0., 0., 0., 0., 0., 0., 0.]
! digits(6) = [0., 0., 0., 0., 0., 0., 1., 0., 0., 0.]
!! Returns an array of 10 reals, with zeros everywhere
!! and a one corresponding to the input number, for example:
!! digits(0) = [1., 0., 0., 0., 0., 0., 0., 0., 0., 0.]
!! digits(1) = [0., 1., 0., 0., 0., 0., 0., 0., 0., 0.]
!! digits(6) = [0., 0., 0., 0., 0., 0., 1., 0., 0., 0.]
real(rk), intent(in) :: x
real(rk) :: digits(10)
digits = 0
digits(int(x + 1)) = 1
end function digits

pure function label_digits(labels) result(res)
! Converts an array of MNIST labels into a form
! that can be input to the network_type instance.
!! Converts an array of MNIST labels into a form
!! that can be input to the network_type instance.
real(rk), intent(in) :: labels(:)
real(rk) :: res(10, size(labels))
integer(ik) :: i
Expand All @@ -40,7 +40,7 @@ end function label_digits

subroutine load_mnist(tr_images, tr_labels, te_images,&
te_labels, va_images, va_labels)
! Loads the MNIST dataset into arrays.
!! Loads the MNIST dataset into arrays.
real(rk), allocatable, intent(in out) :: tr_images(:,:), tr_labels(:)
real(rk), allocatable, intent(in out) :: te_images(:,:), te_labels(:)
real(rk), allocatable, intent(in out), optional :: va_images(:,:), va_labels(:)
Expand Down Expand Up @@ -69,7 +69,7 @@ subroutine load_mnist(tr_images, tr_labels, te_images,&
end subroutine load_mnist

subroutine print_image(images, labels, n)
! Prints a single image and label to screen.
!! Prints a single image and label to screen.
real(rk), intent(in) :: images(:,:), labels(:)
integer(ik), intent(in) :: n
real(rk) :: image(28, 28)
Expand Down
Loading

0 comments on commit f49d95f

Please sign in to comment.