Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Multidimensional learning #89

Merged
merged 1 commit into from
Sep 28, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 7 additions & 1 deletion nengo_loihi/loihi_interface.py
Original file line number Diff line number Diff line change
Expand Up @@ -536,9 +536,14 @@ def create_io_snip(self):
# --- generate custom code
# Determine which cores have learning
n_errors = 0
total_error_len = 0
max_error_len = 0
for core in self.board.chips[0].cores: # TODO: don't assume 1 chip
if core.learning_coreid:
error_len = core.groups[0].n // 2
max_error_len = max(error_len, max_error_len)
n_errors += 1
total_error_len += 2 + error_len

n_outputs = 1
probes = []
Expand All @@ -564,6 +569,7 @@ def create_io_snip(self):
code = template.render(
n_outputs=n_outputs,
n_errors=n_errors,
max_error_len=max_error_len,
cores=cores,
probes=probes,
)
Expand All @@ -590,7 +596,7 @@ def create_io_snip(self):
phase="preLearnMgmt",
)

size = self.snip_max_spikes_per_step * 2 + 1 + n_errors*2
size = self.snip_max_spikes_per_step * 2 + 1 + total_error_len
logger.debug("Creating nengo_io_h2c channel")
self.nengo_io_h2c = self.n2board.createChannel(b'nengo_io_h2c',
"int", size)
Expand Down
9 changes: 3 additions & 6 deletions nengo_loihi/simulator.py
Original file line number Diff line number Diff line change
Expand Up @@ -426,9 +426,6 @@ def run_steps(self, steps):
self.handle_chip2host_communications()

logger.info("Waiting for completion")
self.loihi.nengo_io_h2c.write(1, [0])
self.loihi.nengo_io_h2c.write(1, [0])
self.loihi.nengo_io_h2c.write(1, [0])
self.loihi.wait_for_completion()
logger.info("done")
else:
Expand Down Expand Up @@ -490,7 +487,8 @@ def handle_host2chip_communications(self): # noqa: C901
for sender, receiver in self.host2chip_senders.items():
if isinstance(receiver, splitter.PESModulatoryTarget):
for t, x in sender.queue:
x = int(100 * x) # >128 is an issue on chip
x = (100 * x).astype(int)
x = np.clip(x, -100, 100, out=x)
probe = receiver.target
conn = self.model.probe_conns[probe]
dec_cx = self.model.objs[conn]['decoded']
Expand All @@ -503,7 +501,7 @@ def handle_host2chip_communications(self): # noqa: C901

assert coreid is not None

errors.append([coreid, x])
errors.append([coreid, len(x)] + x.tolist())
del sender.queue[:]

else:
Expand Down Expand Up @@ -535,7 +533,6 @@ def handle_host2chip_communications(self): # noqa: C901
assert spike[0] == 0
msg.extend(spike[1:3])
for error in errors:
assert len(error) == 2
msg.extend(error)
self.loihi.nengo_io_h2c.write(len(msg), msg)

Expand Down
19 changes: 14 additions & 5 deletions nengo_loihi/snips/nengo_io.c.template
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@

#define N_OUTPUTS {{ n_outputs }}
#define N_ERRORS {{ n_errors }}
#define MAX_ERROR_LEN {{ max_error_len }}

int guard_io(runState *s) {
return 1;
Expand All @@ -18,7 +19,9 @@ void nengo_io(runState *s) {
int outChannel = getChannelID("nengo_io_c2h");
int32_t count[1];
int32_t spike[2];
int32_t error[2];
int32_t error_info[2];
int32_t error_data[MAX_ERROR_LEN];
int32_t error_index;
int32_t output[N_OUTPUTS];

if (inChannel == -1 || outChannel == -1) {
Expand All @@ -41,11 +44,17 @@ void nengo_io(runState *s) {
}

// Communicate with learning snip
s->userData[0] = N_ERRORS;
error_index = 1;
for (int i=0; i < N_ERRORS; i++) {
readChannel(inChannel, error, 2);
// printf("send error %d.%d\n", error[0], error[1]);
s->userData[0] = error[0];
s->userData[1] = error[1];
readChannel(inChannel, error_info, 2);
readChannel(inChannel, error_data, error_info[1]);
s->userData[error_index] = error_info[0];
s->userData[error_index + 1] = error_info[1];
for (int j=0; j < error_info[1]; j++) {
s->userData[error_index + 2 + j] = error_data[j];
}
error_index += 2 + error_info[1];
}

output[0] = s->time;
Expand Down
141 changes: 86 additions & 55 deletions nengo_loihi/snips/nengo_learn.c
Original file line number Diff line number Diff line change
Expand Up @@ -6,64 +6,95 @@ int guard_learn(runState *s) {
return 1;
}

void nengo_learn(runState *s) {
int core = s->userData[0];
int error = (signed char) s->userData[1];
// Handles passing learning information to the correct learning rules
// to implement PES learning on Loihi.
//
// The required data is passed to this snip from the standard nengo_io
// snip via the userData structure. The data format is as follows:
//
// 0 : n_errors
// the number of learning signals. This is the same as the number
// of Connections in the original Nengo model that terminate on
// a conn.learning_rule.
//
// This indicates how many copies of the following block there will be.
// 1 : core
// The core id for the weights of the first learning connection
// 2 : n_vals
// The number of error signal dimensions.
// 3..3+n_vals : error_sig
// The error signal, which has been multiplied by 100, rounded to an int,
// and clipped to the [-100, 100] range.

void nengo_learn(runState *s) {
int offset = 1;
int error;
int32_t n_errors = s->userData[0];
int32_t cx_idx;
int32_t core;
int32_t n_vals;
NeuronCore *neuron;
neuron = NEURON_PTR((CoreId) {.id=core});

int cx_idx = 0;
for (int error_index=0; error_index < n_errors; error_index++) {
core = s->userData[offset];
n_vals = s->userData[offset+1];
for (int i=0; i < n_vals; i++) {
error = (signed char) s->userData[offset+2+i];
neuron = NEURON_PTR((CoreId) {.id=core});
cx_idx = i;

if (error > 0) {
neuron->stdp_post_state[cx_idx] = \
(PostTraceEntry) {
.Yspike0 = 0,
.Yspike1 = 0,
.Yspike2 = 0,
.Yepoch0 = abs(error),
.Yepoch1 = 0,
.Yepoch2 = 0,
.Tspike = 0,
.TraceProfile = 3,
.StdpProfile = 1
};
neuron->stdp_post_state[cx_idx+1] = \
(PostTraceEntry) {
.Yspike0 = 0,
.Yspike1 = 0,
.Yspike2 = 0,
.Yepoch0 = abs(error),
.Yepoch1 = 0,
.Yepoch2 = 0,
.Tspike = 0,
.TraceProfile = 3,
.StdpProfile = 0
};
} else {
neuron->stdp_post_state[cx_idx] = \
(PostTraceEntry) {
.Yspike0 = 0,
.Yspike1 = 0,
.Yspike2 = 0,
.Yepoch0 = abs(error),
.Yepoch1 = 0,
.Yepoch2 = 0,
.Tspike = 0,
.TraceProfile = 3,
.StdpProfile = 0
};
neuron->stdp_post_state[cx_idx+1] = \
(PostTraceEntry) {
.Yspike0 = 0,
.Yspike1 = 0,
.Yspike2 = 0,
.Yepoch0 = abs(error),
.Yepoch1 = 0,
.Yepoch2 = 0,
.Tspike = 0,
.TraceProfile = 3,
.StdpProfile = 1
};
if (error > 0) {
neuron->stdp_post_state[cx_idx] = \
(PostTraceEntry) {
.Yspike0 = 0,
.Yspike1 = 0,
.Yspike2 = 0,
.Yepoch0 = abs(error),
.Yepoch1 = 0,
.Yepoch2 = 0,
.Tspike = 0,
.TraceProfile = 3,
.StdpProfile = 1
};
neuron->stdp_post_state[cx_idx+n_vals] = \
(PostTraceEntry) {
.Yspike0 = 0,
.Yspike1 = 0,
.Yspike2 = 0,
.Yepoch0 = abs(error),
.Yepoch1 = 0,
.Yepoch2 = 0,
.Tspike = 0,
.TraceProfile = 3,
.StdpProfile = 0
};
} else {
neuron->stdp_post_state[cx_idx] = \
(PostTraceEntry) {
.Yspike0 = 0,
.Yspike1 = 0,
.Yspike2 = 0,
.Yepoch0 = abs(error),
.Yepoch1 = 0,
.Yepoch2 = 0,
.Tspike = 0,
.TraceProfile = 3,
.StdpProfile = 0
};
neuron->stdp_post_state[cx_idx+n_vals] = \
(PostTraceEntry) {
.Yspike0 = 0,
.Yspike1 = 0,
.Yspike2 = 0,
.Yepoch0 = abs(error),
.Yepoch1 = 0,
.Yepoch2 = 0,
.Tspike = 0,
.TraceProfile = 3,
.StdpProfile = 1
};
}
}
offset += 2 + n_vals;
}
}
81 changes: 59 additions & 22 deletions nengo_loihi/tests/test_learning.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,45 +3,47 @@
import pytest


@pytest.mark.hang
@pytest.mark.parametrize('N', [100, 300])
def test_pes_comm_channel(allclose, Simulator, seed, plt, N):
input_fn = lambda t: np.sin(t*2*np.pi)
@pytest.mark.parametrize('n_neurons', [400, 600])
@pytest.mark.parametrize('dims', [1, 3])
def test_pes_comm_channel(allclose, plt, seed, Simulator, n_neurons, dims):
scale = np.linspace(1, 0, dims + 1)[:-1]
input_fn = lambda t: np.sin(t * 2 * np.pi) * scale

with nengo.Network(seed=seed) as model:
stim = nengo.Node(input_fn)

a = nengo.Ensemble(N, 1)
pre = nengo.Ensemble(n_neurons, dims)
post = nengo.Node(None, size_in=dims)

b = nengo.Node(None, size_in=1, size_out=1)

nengo.Connection(stim, a)
nengo.Connection(stim, pre)
conn = nengo.Connection(
a, b, function=lambda x: 0, synapse=0.01,
pre, post,
function=lambda x: np.zeros(dims),
synapse=0.01,
learning_rule_type=nengo.PES(learning_rate=1e-3))

error = nengo.Node(None, size_in=1)
nengo.Connection(b, error)
error = nengo.Node(None, size_in=dims)
nengo.Connection(post, error)
nengo.Connection(stim, error, transform=-1)
nengo.Connection(error, conn.learning_rule)

p_stim = nengo.Probe(stim)
p_a = nengo.Probe(a, synapse=0.02)
p_b = nengo.Probe(b, synapse=0.02)
p_pre = nengo.Probe(pre, synapse=0.02)
p_post = nengo.Probe(post, synapse=0.02)

with Simulator(model, precompute=False) as sim:
sim.run(5.0)

t = sim.trange()
plt.subplot(211)
plt.plot(t, sim.data[p_stim])
plt.plot(t, sim.data[p_a])
plt.plot(t, sim.data[p_b])
plt.plot(t, sim.data[p_pre])
plt.plot(t, sim.data[p_post])

# --- fit input_fn to output, determine magnitude
# The larger the magnitude is, the closer the output is to the input
x = input_fn(t)[t > 4]
y = sim.data[p_b][t > 4][:, 0]
# The larger the magnitude, the closer the output is to the input
x = np.array([input_fn(tt)[0] for tt in t[t > 4]])
y = sim.data[p_post][t > 4][:, 0]
m = np.linspace(0, 1, 21)
errors = np.abs(y - m[:, None]*x).mean(axis=1)
m_best = m[np.argmin(errors)]
Expand All @@ -51,7 +53,42 @@ def test_pes_comm_channel(allclose, Simulator, seed, plt, N):
plt.plot(t[t > 4], y)
plt.plot(t[t > 4], m_best * x, ':')

assert allclose(
sim.data[p_a][t > 0.1], sim.data[p_stim][t > 0.1], atol=0.2, rtol=0.2)
assert errors.min() < 0.3, "Not able to fit correctly"
assert m_best > (0.3 if N < 150 else 0.6)
assert allclose(sim.data[p_pre][t > 0.1],
sim.data[p_stim][t > 0.1],
atol=0.2,
rtol=0.2)
assert np.min(errors) < 0.3, "Not able to fit correctly"
assert m_best > (0.3 if n_neurons / dims < 150 else 0.6)


def test_multiple_pes(allclose, plt, seed, Simulator):
n_errors = 5
targets = np.linspace(-1, 1, n_errors)
with nengo.Network(seed=seed) as model:
pre_ea = nengo.networks.EnsembleArray(200, n_ensembles=n_errors)
errors = nengo.Node(None, size_in=n_errors)
output = nengo.Node(None, size_in=n_errors)

target = nengo.Node(targets)
nengo.Connection(target, errors, transform=-1)
nengo.Connection(output, errors)

for i in range(n_errors):
conn = nengo.Connection(
pre_ea.ea_ensembles[i],
output[i],
learning_rule_type=nengo.PES(learning_rate=1e-3),
)
nengo.Connection(errors[i], conn.learning_rule)

probe = nengo.Probe(output, synapse=0.1)
with Simulator(model, precompute=False) as sim:
sim.run(1.0)
t = sim.trange()

plt.plot(t, sim.data[probe])
for target, style in zip(targets, plt.rcParams["axes.prop_cycle"]):
plt.axhline(target, **style)

for i, target in enumerate(targets):
assert allclose(sim.data[probe][t > 0.8, i], target, atol=0.05)
2 changes: 1 addition & 1 deletion nengo_loihi/tests/test_splitter.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@


@pytest.mark.parametrize("pre_dims", [1, 3])
@pytest.mark.parametrize("post_dims", [1])
@pytest.mark.parametrize("post_dims", [1, 3])
@pytest.mark.parametrize("learn", [True, False])
@pytest.mark.parametrize("use_solver", [True, False])
def test_manual_decoders(
Expand Down