Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Refactor block.py #270

Draft
wants to merge 7 commits into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 15 additions & 0 deletions CHANGES.rst
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,10 @@ Release history
- Added a ``timers`` attribute to ``Simulator`` that tracks the wall time
taken by various parts of the model, including build time and run time.
(`#260 <https://github.com/nengo/nengo-loihi/pull/260>`__)
- Added the ``pop_type`` configuration option to the ``Connection`` config.
See `nengo_loihi.add_params
<https://www.nengo.ai/nengo-loihi/api.html#nengo_loihi.add_params>`__
for details. (`#261 <https://github.com/nengo/nengo-loihi/pull/261>`__)

**Changed**

Expand All @@ -43,11 +47,22 @@ Release history
- Added the ``add_to_container`` argument to ``DecodeNeurons.get_ensemble``,
which makes it easier to add a decode neurons ensemble to a network.
(`#260 <https://github.com/nengo/nengo-loihi/pull/260>`__)
- ``Convolution`` transforms with ``channels_last=True`` now work with outputs
up to 1024 neurons.
(`#261 <https://github.com/nengo/nengo-loihi/pull/261>`__)

**Fixed**

- We no longer create a spike generator if we are communicating through Snips.
(`#260 <https://github.com/nengo/nengo-loihi/pull/260>`__)
- Fixed an issue in which ignored axons were still having an effect in
convolutional networks where not all input pixels are used in the output.
(`#261 <https://github.com/nengo/nengo-loihi/pull/261>`__)
- Fixed an issue that prevented population spikes to be sent to the chip when
``precompute=True``. (`#261 <https://github.com/nengo/nengo-loihi/pull/261>`__)
- Fixed a bug preventing making sparse connections to an ensemble.
(`#245 <https://github.com/nengo/nengo-loihi/issues/245>`__,
`#246 <https://github.com/nengo/nengo-loihi/pull/246>`__)

0.10.0 (November 25, 2019)
==========================
Expand Down
109 changes: 62 additions & 47 deletions nengo_loihi/block.py
Original file line number Diff line number Diff line change
Expand Up @@ -282,12 +282,25 @@ def configure_relu(self, tau_ref=0.0, vth=1, dt=0.001):
class Axon:
"""A group of axons targeting a specific Synapse object.

Parameters
----------
n_axons : int
The number of outgoing axons.
target : Synapse
Target synapses for these axons.
compartment_map : array_like (``n_compartments``,)
Indices indicating which target axon each compartment maps to.
If < 0, the corresponding compartment will not be used with these axons.
atoms : array_like (``n_compartments``,)
Atom (weight index) associated with each compartment.

Attributes
----------
compartment_atoms : list of length ``block.n_neurons``
Atom (weight index) associated with each block compartment.
Atom (weight index) associated with each compartment.
compartment_map : list of length ``block.n_neurons``
Index of the axon in ``target`` targeted by each block compartment.
Indices indicating which target axon each compartment maps to.
If < 0, the corresponding compartment will not be used with these axons.
n_axons : int
The number of outgoing axons.
target : Synapse
Expand All @@ -302,33 +315,38 @@ class Spike:

Parameters
----------
axon_id : int
axon_idx : int
The index of the axon within the targeted Synapse object.
atom : int, optional (Default: 0)
An index into the target Synapse weights. This allows spikes
targeting a particular axon to use different weights.
"""

__slots__ = ["axon_id", "atom"]
__slots__ = ["axon_idx", "atom"]

def __init__(self, axon_id, atom=0):
self.axon_id = axon_id
def __init__(self, axon_idx, atom=0):
self.axon_idx = axon_idx
self.atom = atom

def __repr__(self):
return "%s(axon_id=%d, atom=%d)" % (
return "%s(axon_idx=%d, atom=%d)" % (
type(self).__name__,
self.axon_id,
self.axon_idx,
self.atom,
)

def __init__(self, n_axons, label=None):
def __init__(self, n_axons, target, compartment_map, atoms=None, label=None):
self.n_axons = n_axons
self.target = target
self.compartment_map = np.asarray(compartment_map, dtype=int)
self.compartment_atoms = (
np.zeros(self.compartment_map.size, dtype=int)
if atoms is None
else np.asarray(atoms, dtype=int)
)
self.label = label

self.target = None
self.compartment_map = None
self.compartment_atoms = None
assert self.compartment_map.ndim == self.compartment_atoms.ndim == 1

def __str__(self):
return "%s(%s)" % (type(self).__name__, self.label if self.label else "")
Expand All @@ -346,43 +364,15 @@ def axon_slots(self):
"""The total number of axon_cfg slots used by all axons."""
return self.slots_per_axon * self.n_axons

def map_axon(self, compartment_idxs):
return (
self.compartment_map[compartment_idxs]
if self.compartment_map is not None
else compartment_idxs
)

def map_atoms(self, compartment_idxs):
return (
self.compartment_atoms[compartment_idxs]
if self.compartment_atoms is not None
else [0 for _ in compartment_idxs]
)

def map_spikes(self, compartment_idxs):
axon_ids = self.map_axon(compartment_idxs)
atoms = self.map_atoms(compartment_idxs)
axon_ids = self.compartment_map[compartment_idxs]
atoms = self.compartment_atoms[compartment_idxs]

return [
self.Spike(axon_id, atom=atom) if axon_id >= 0 else None
for axon_id, atom in zip(axon_ids, atoms)
]

def set_compartment_axon_map(self, target_axons, atoms=None):
"""Set mapping from compartments to axons in target.

Parameters
----------
target_axons : array_like (``n_compartments``,)
Indices indicating which target axon each compartment maps to.
If < 0, the corresponding compartment will not be used with these
axons.
atoms : array_like (``n_compartments``,)
Atoms to use for each compartment. Use only if ``pop_type != 0``.
"""
self.compartment_map = target_axons
self.compartment_atoms = atoms


class SynapseConfig(Config):
INDEX_BITS_MAP = d(b"WzAsIDYsIDcsIDgsIDksIDEwLCAxMSwgMTJd", "list_int")
Expand Down Expand Up @@ -569,21 +559,31 @@ def __str__(self):
return "%s(%s)" % (type(self).__name__, self.label if self.label else "")

def atom_bits(self):
"""Number of bits needed to represent the atom for incoming spikes."""
max_populations = max(w.shape[0] for w in self.weights)
return int(np.ceil(np.log2(max_populations)))

def atom_bits_extra(self):
atom_bits = self.atom_bits()
assert atom_bits <= d(b"OQ==", int), "Too many atom bits"
return max(atom_bits - d(b"NQ==", int), 0)
"""Number of extra bits needed for the atom for incoming pop16 spikes."""
if self.pop_type == 16:
atom_bits = self.atom_bits()
assert atom_bits <= d(b"OQ==", int), "Too many atom bits"
return max(atom_bits - d(b"NQ==", int), 0)
else:
return 0 # meaningless if pop_type != 16

def axon_bits(self):
"""Number of bits available to represent the target axon on incoming spikes."""
if self.pop_type == 16:
return d(b"MTA=", int) - self.atom_bits_extra()
else:
return d(b"MTI=", int)

def axon_compartment_base(self, axon_idx):
"""Offset for compartment indices for a particular axon.

A return value of ``None`` indicates the axon is unused.
"""
if self.axon_compartment_bases is None:
return 0
base = self.axon_compartment_bases[axon_idx]
Expand All @@ -592,31 +592,37 @@ def axon_compartment_base(self, axon_idx):
return base if base >= 0 else None

def axon_populations(self, axon_idx):
"""Number of populations (atom values) for a particular axon."""
weight_idx = self.axon_weight_idx(axon_idx)
return self.weights[weight_idx].shape[0]

def axon_weight_idx(self, axon_idx):
"""Index of weights in weight array for a particular axon."""
return (
self.axon_to_weight_map[axon_idx]
if self.axon_to_weight_map is not None
else axon_idx
)

def axon_weights_indices(self, axon_idx, atom=0):
"""The weights and indices for a particular axon (and atom, if applicable)."""
weight_idx = self.axon_weight_idx(axon_idx)
w = self.weights[weight_idx]
i = self.indices[weight_idx]
return w[atom, :], i[atom, :]

def bits(self):
"""The total number of bits used by all weights in this Synapse."""
return sum(self.synapse_cfg.bits_per_axon(w.size) for w in self.weights)

def format(self, **kwargs):
"""Modify the SynapseConfig format of this Synapse."""
if self.synapse_cfg is None:
self.synapse_cfg = SynapseConfig()
self.synapse_cfg.set(**kwargs)

def idx_bits(self):
"""The number of index bits required for each weight entry."""
bits = int(np.ceil(np.log2(self.max_ind() + 1)))
assert (
bits <= SynapseConfig.INDEX_BITS_MAP[-1]
Expand All @@ -625,13 +631,19 @@ def idx_bits(self):
return bits

def idxs_per_synapse(self):
"""The number of axon indices (slots) required for each incoming axon."""
return d(b"Mg==", int) if self.learning else d(b"MQ==", int)

def max_abs_weight(self):
"""The maximum absolute value of all the weights in this Synapse."""
return max(np.abs(w).max() if w.size > 0 else -np.inf for w in self.weights)

def max_ind(self):
return max(i.max() if len(i) > 0 else -1 for i in self.indices)
"""The maximum compartment index in weight memory.

Does not include ``axon_compartment_base``.
"""
return max(i.max() if i.size > 0 else -1 for i in self.indices)

def _set_weights_indices(self, weights, indices=None):
weights = [np.array(w, copy=False, dtype=np.float32, ndmin=2) for w in weights]
Expand Down Expand Up @@ -660,6 +672,7 @@ def _set_weights_indices(self, weights, indices=None):
self.indices = indices

def set_weights(self, weights):
"""Set dense or sparse weights on this Synapse."""
if isinstance(weights, scipy.sparse.spmatrix):
csr = weights.tocsr()
weights_by_row, idxs_by_row = [], []
Expand Down Expand Up @@ -689,6 +702,7 @@ def set_weights(self, weights):
def set_learning(
self, learning_rate=1.0, tracing_tau=2, tracing_mag=1.0, wgt_exp=4
):
"""Set the learning parameters for this Synapse."""
assert tracing_tau == int(tracing_tau), "tracing_tau must be integer"

self.learning = True
Expand All @@ -707,6 +721,7 @@ def set_learning(
def set_population_weights(
self, weights, indices, axon_to_weight_map, compartment_bases, pop_type=None
):
"""Set population weights on this Synapse."""
self._set_weights_indices(weights, indices)
self.axon_to_weight_map = axon_to_weight_map
self.axon_compartment_bases = compartment_bases
Expand Down
Loading