Skip to content

Commit

Permalink
Merge branch 'master' into OSI_Approved_Apache_Software_License
Browse files Browse the repository at this point in the history
  • Loading branch information
Christian-B authored May 26, 2023
2 parents 3cc361d + 6737d2e commit 17faab3
Show file tree
Hide file tree
Showing 19 changed files with 7,770 additions and 20 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/python_actions.yml
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: [3.7, 3.8, 3.9, "3.10", "3.11"]
python-version: [3.8, 3.9, "3.10", "3.11"]

steps:
- name: Checkout
Expand Down
11 changes: 11 additions & 0 deletions c_code/init_i.c
Original file line number Diff line number Diff line change
Expand Up @@ -291,6 +291,17 @@ void var_init (uint reset_examples)
{
example_inx = 0;
}
else
{
if (xcfg.training)
{
example_inx = train_cnt;
}
else
{
example_inx = test_cnt;
}
}

// initialise example counter
example_cnt = 0;
Expand Down
11 changes: 11 additions & 0 deletions c_code/init_s.c
Original file line number Diff line number Diff line change
Expand Up @@ -274,6 +274,17 @@ void var_init (uint reset_examples)
{
example_inx = 0;
}
else
{
if (xcfg.training)
{
example_inx = train_cnt;
}
else
{
example_inx = test_cnt;
}
}

// initialise example counter
example_cnt = 0;
Expand Down
11 changes: 11 additions & 0 deletions c_code/init_t.c
Original file line number Diff line number Diff line change
Expand Up @@ -583,6 +583,17 @@ void var_init (uint reset_examples, uint reset_epochs_trained)
{
example_inx = 0;
}
else
{
if (xcfg.training)
{
example_inx = train_cnt;
}
else
{
example_inx = test_cnt;
}
}

// initialise example counter
example_cnt = 0;
Expand Down
11 changes: 11 additions & 0 deletions c_code/init_w.c
Original file line number Diff line number Diff line change
Expand Up @@ -338,6 +338,17 @@ void var_init (uint init_weights, uint reset_examples)
{
example_inx = 0;
}
else
{
if (xcfg.training)
{
example_inx = train_cnt;
}
else
{
example_inx = test_cnt;
}
}

// initialise example counter
example_cnt = 0;
Expand Down
2 changes: 2 additions & 0 deletions c_code/input.c
Original file line number Diff line number Diff line change
Expand Up @@ -88,6 +88,8 @@ uchar net_stop; // network stop decision

uint epoch; // current training iteration
uint example_cnt; // example count in epoch
uint train_cnt; // latest example trained
uint test_cnt; // latest example tested
uint example_inx; // current example index
uint evt; // current event in example
uint num_events; // number of events in current example
Expand Down
2 changes: 2 additions & 0 deletions c_code/mlp_externs.h
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,8 @@ extern uchar net_stop; // network stop decision

extern uint epoch; // current training/testing iteration
extern uint example_cnt; // example count in epoch
extern uint train_cnt; // latest example trained
extern uint test_cnt; // latest example tested
extern uint example_inx; // current example index
extern uint evt; // current event in example
extern uint max_evt; // the last event reached in the current example
Expand Down
10 changes: 10 additions & 0 deletions c_code/process_i.c
Original file line number Diff line number Diff line change
Expand Up @@ -287,6 +287,16 @@ void i_advance_example (void)
// prepare for next epoch,
epoch++;

// record the last example presented
if (xcfg.training)
{
train_cnt = example_inx;
}
else
{
test_cnt = example_inx;
}

// access network stop flag with interrupts disabled,
uint cpsr = spin1_int_disable ();

Expand Down
10 changes: 10 additions & 0 deletions c_code/process_s.c
Original file line number Diff line number Diff line change
Expand Up @@ -334,6 +334,16 @@ void s_advance_example (void)
// prepare for next epoch,
epoch++;

// record the last example presented
if (xcfg.training)
{
train_cnt = example_inx;
}
else
{
test_cnt = example_inx;
}

// access network stop flag with interrupts disabled,
uint cpsr = spin1_int_disable ();

Expand Down
10 changes: 10 additions & 0 deletions c_code/process_t.c
Original file line number Diff line number Diff line change
Expand Up @@ -421,6 +421,16 @@ void t_advance_example (void)
// prepare for next epoch,
epoch++;

// record the last example presented
if (xcfg.training)
{
train_cnt = example_inx;
}
else
{
test_cnt = example_inx;
}

// check if stage done,
if (tcfg.is_last_output)
{
Expand Down
10 changes: 10 additions & 0 deletions c_code/process_w.c
Original file line number Diff line number Diff line change
Expand Up @@ -688,6 +688,16 @@ void w_advance_example (void)
// prepare for next epoch,
epoch++;

// record the last example presented
if (xcfg.training)
{
train_cnt = example_inx;
}
else
{
test_cnt = example_inx;
}

// reset example count for next epoch,
example_cnt = 0;

Expand Down
2 changes: 2 additions & 0 deletions c_code/sum.c
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,8 @@ uchar net_stop; // network stop decision

uint epoch; // current training iteration
uint example_cnt; // example count in epoch
uint train_cnt; // latest example trained
uint test_cnt; // latest example tested
uint example_inx; // current example index
uint evt; // current event in example
uint num_events; // number of events in current example
Expand Down
2 changes: 2 additions & 0 deletions c_code/threshold.c
Original file line number Diff line number Diff line change
Expand Up @@ -111,6 +111,8 @@ uchar net_stop; // network stop decision

uint epoch; // current training iteration
uint example_cnt; // example count in epoch
uint train_cnt; // latest example trained
uint test_cnt; // latest example tested
uint example_inx; // current example index
uint evt; // current event in example
uint max_evt; // the last event reached in the current example
Expand Down
2 changes: 2 additions & 0 deletions c_code/weight.c
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,8 @@ uchar net_stop; // network stop decision

uint epoch; // current training iteration
uint example_cnt; // example count in epoch
uint train_cnt; // latest example trained
uint test_cnt; // latest example tested
uint example_inx; // current example index
uint evt; // current event in example
uint num_events; // number of events in current example
Expand Down
129 changes: 129 additions & 0 deletions examples/HOW_TO_CREATE_A_MODEL_SCRIPT.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,129 @@
Initial documentation on how to create a PDP2 model as a python script. **Please note that this file is incomplete.**

Creating a model
================

mdl\_name = MLPNetwork (net\_type = ...,
intervals = ...,
ticks\_per\_interval = ...
)

net_type: the type of network
-expects an object of type MLPNetworkTypes: FEED\_FWD, SIMPLE\_REC, RBPTT, CONTINUOUS
intervals: the number of time intervals for which each example will be run
-expects an integer
-default value is 1
ticks_per_interval: the number of ticks, or subdivisions per time interval for CONTINUOUS networks only
-expects an integer
-default value is 1
-for non-continuous networks, this should be set to 1

Adding layers to the network
============================

gp\_name = mdl\_name.group (units = ...,
group_type = [LIST],
input_funcs = [LIST],
output_funcs = [LIST],
label = ...
)

units: the number of units in the layer
-expects an integer
group\_type: specifies whether the layer is input, output, hidden or bias
-expects a list of values of type MLPGroupType: BIAS, INPUT, OUTPUT, HIDDEN
-default value is HIDDEN
input\_funcs: specifies the functions used to compute the input to each unit of the group
-expects a list of values of type MLPInputProcs: IN\_INTEGR, IN\_SOFT\_CLAMP, IN\_NONE
-IN\_INTEGR integrates inputs over time so that they change gradually
-IN\_SOFT\_CLAMP adds a factor to the input which pulls output values those of the input
-default value is IN_NONE
output_funcs: specifies the functions to be applied to the output of each unit
-expects a list of values of type MLPOutputProcs: OUT\_LOGISTIC, OUT\_INTEGR, OUT\_HARD\_CLAMP, OUT\_WEAK\_CLAMP, OUT\_BIAS, OUT\_NONE
-OUT\_LOG\ISTIC computes the outputs of the unit
-OUT\_INTEGR integrates outputs over time so they change gradually
-OUT\_HARD\_CLAMP clamps the output value to the input value
-OUT\_WEAK\_CLAMP shifts the output value towards the input value by a certain amunt
-OUT\_BIAS clamps the output of the unit to 1
-bias units have OUT\_BIAS by default
-input groups have OUT\_HARD\_CLAMP by default
-hidden and output groups have OUT\_LOGISTIC by default
-output groups also have OUT\_INTEGR by default unless there is an input integrator
label: takes a string label that can be used to refer to the group

Creating links betweent the layers
==================================

mdl\_name.link (pre\_link\_group = ...,
post\_link\_group = ...,
label = ...
)
pre\_link\_group: the group from which the link originates
post\_link\_group: the group to which the link projects
label: takes a string label that can be used to refer to this group
-links from bias unit to hidden and output groups are created by default

Create, read in and set parameters for example set
==================================================
//TO DO

Set other network parameters
============================

mdl\_name.set (num_presentations = ...,
train_group_crit = ...,
test_group_crit = ...,
train_group_crit = ...,
learning_rate = ...,
weight_decay = ...,
momentum = ...)

num\_presentations: the number of times you would like the example set to be presented in total
-initially this sets num\_updates to the value specified in num\_presentations
-default value is MLPConstants.DEF\_NUM\_UPDATES, which is 1
train\_group\_crit: the criterion used to stop training - an example is considered correct when the difference between output and target for each unit in the group is less than this value
test\_group\_crit: the criterion used to stop training - an example is considered correct when the difference between output and target for each unit in the group is less than this value
learning\_rate: weight changes are scaled by this amount
-default value is MLPConstants.DEF_LEARNING_RATE, which is 0.1
weight\_decay: weights are reduced by this proportion after each weight update
-default value is MLPConstants.DEF_WEIGHT_DECAY, which is 0
momentum: previous weight changes are carried over by this amount into the next step
-default value is MLPConstants.DEF_MOMENTUM, which is 0.9

Set recording option
====================
//TO DO

Read the weights file
=====================
//TO DO

Train the network
=================

mdl\_name.train (update\_function = ...,
num\_examples = ...,
num\_updates = ...,
reset\_examples = ...
)

update\_function: the function to be used when weight updates are calculated
-expects a value of type MLPUpdateFuncs: UPD_STEEPEST, UPD_MOMENTUM, UPD_DOUGSMOMENTUM
-defaults to UPD_DOUGMOMENTUM
num\_examples: the number of examples to be run before a weight update
num\_updates: the total number of weight updates to be performed
-if num\_examples is set, but num\_updates is not, num\_updates is re-calculated so that the entire example set is presented the number of times specified by num\_presentations
reset\_examples: if the number of examples run during the training phase (i.e. num\_examples * num\_updates) is smaller than the total number of examples in the example set, this parameter determines whether or not the next training phase will start from the next example in the set, or return to the first example
-default value is False

Test the network
================

mdl\_name.test (num\_examples = ...,
reset\_examples = ...
)

num\_examples: the number of examples to be tested
reset\_examples: if num\_examples is smaller than the total number of examples in the example set, this parameter determines whether or not the next testing phase will start from the next example in the set, or return to the first example
-default value is True

25 changes: 14 additions & 11 deletions examples/simple_past_tense/simple_past_tense.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,12 @@
# simple_past_tense
#
# a simple model that learns to associate the present tense of English verbs with
# the appropriate past tense form e.g. aching-ached
# the appropriate past tense form e.g. aching-ached. There are two example files for
# this model - a short one with 40 items (simple_past_tense_40_items.ex) and a longer
# containing 1868 verbs taken from the CELEX corpus (simple_past_tense_1868_items.ex).
# Verbs are representing as strings of consonants and vowels in the form CCCVVCC-VC.
# Each vowel or consonant is coded as a string of 24 bits using the system for coding
# English phonetics developed by Harm (1998).
#
#-----------------------------------------------------------

Expand Down Expand Up @@ -52,8 +57,9 @@
# instantiate network example set
set1 = simple_past_tense.example_set (label = "set1")

# read Lens-style examples file
set1.read_Lens_examples_file ("simple_past_tense.ex")
# read Lens-style examples file - choose the long or the short version
set1.read_Lens_examples_file ("simple_past_tense_40_items.ex")
#set1.read_Lens_examples_file ("simple_past_tense_1868_items.ex")

# set example set parameters
set1.set (grace_time = 1.0,
Expand Down Expand Up @@ -86,14 +92,11 @@
# generate Lens-style output file
simple_past_tense.write_Lens_output_file ("%s_test.out" % model)

# train using steepest descent
simple_past_tense.train (update_function = MLPUpdateFuncs.UPD_STEEPEST)

# generate Lens-style output file
simple_past_tense.write_Lens_output_file ("%s_train.out" % model)

# do a final test of the network
simple_past_tense.test ()
# do 10 loops of training using steepest descent
for x in range (1, 11):
simple_past_tense.train (update_function = MLPUpdateFuncs.UPD_STEEPEST)
# test the network
simple_past_tense.test ()

# generate Lens-style output file
simple_past_tense.write_Lens_output_file ("%s_train_test.out" % model)
Expand Down
Loading

0 comments on commit 17faab3

Please sign in to comment.