Skip to content

Commit

Permalink
Fixed a bug in logging objectives and circuits. Working on template
Browse files Browse the repository at this point in the history
configs some more
  • Loading branch information
alex404 committed Oct 13, 2024
1 parent c35076d commit 31e8826
Show file tree
Hide file tree
Showing 5 changed files with 38 additions and 15 deletions.
29 changes: 20 additions & 9 deletions resources/config_templates/user/brain/deep-autoencoder.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -12,10 +12,11 @@ connections:
- ["vision", "retina"] # Input to retina
- ["retina","thalamus"] # Retina to thalamus
- ["thalamus","visual_cortex"] # Thalamus to visual cortex
- ["visual_cortex", "pfc"] # Visual cortex to prefrontal cortex
- ["pfc", "classifier"] # Prefrontal cortex to classifier
- ["pfc", "pfc_decoder"] # pfc to FC decoder
- ["pfc_decoder", "decoder"] # FC decoder to convoluational decoder
- ["visual_cortex", "inferotemporal"] # Visual cortex to inferotemporal cortex
- ["inferotemporal", "prefrontal"]
- ["prefrontal", "classifier"] # Prefrontal cortex to classifier
- ["inferotemporal", "inferotemporal_decoder"] # prefrontal to FC decoder
- ["inferotemporal_decoder", "decoder"] # FC decoder to convoluational decoder

# Define the individual nodes (neural circuits) of the network. Many circuit
# parameters are interpolated from the experiment config.
Expand Down Expand Up @@ -50,22 +51,32 @@ circuits:
act_name: ${activation}
layer_names: ["v1"] # Primary Visual Cortex

# Prefrontal Cortex: high-level cognitive processing
pfc:
# Inferotemporal Cortex: Associations
inferotemporal:
_target_: retinal_rl.models.circuits.fully_connected.FullyConnectedEncoder
output_shape:
- 128 # Size of the latent representation
- 64 # Size of the latent representation
hidden_units:
- 64 # Number of hidden units
act_name: ${activation}

# Prefrontal Cortex: high-level cognitive processing
pfc_decoder:
prefrontal:
_target_: retinal_rl.models.circuits.fully_connected.FullyConnectedEncoder
output_shape:
- 32 # Size of the latent representation
hidden_units:
- 32 # Number of hidden units
act_name: ${activation}

# Prefrontal Cortex: high-level cognitive processing
inferotemporal_decoder:
_target_: retinal_rl.models.circuits.fully_connected.FullyConnectedDecoder
output_shape: "pfc.input_shape" # Size of the latent representation
output_shape: "inferotemporal.input_shape" # Size of the latent representation
hidden_units:
- 64 # Number of hidden units
act_name: ${activation}

# Decoder: for reconstructing the input from the latent representation
decoder:
_target_: retinal_rl.models.circuits.convolutional.ConvolutionalDecoder
Expand Down
4 changes: 2 additions & 2 deletions resources/config_templates/user/dataset/cifar10.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,8 @@ imageset:
- ${eval:"1.5 if ${brightness_noise_transform} else 1"}
- _target_: retinal_rl.datasets.transforms.BlurTransform
blur_range:
- ${eval:"0 if ${blur_noise_transform} else 1"}
- ${eval:"2 if ${blur_noise_transform} else 1"}
- ${eval:"0 if ${blur_noise_transform} else 0"}
- ${eval:"2 if ${blur_noise_transform} else 0"}
apply_normalization: true
fixed_transformation: false
multiplier: 1
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ defaults:
- _self_
- override /dataset: cifar10
- override /brain: shallow-autoencoder
- override /optimizer: recon-weight
- override /optimizer: class-recon

# This is the main entry point for control of a retinal-rl experiment. Variables
# created here will be top-level, and defaults can be set for the various parts
Expand All @@ -18,6 +18,7 @@ activation: "elu"

recon_weight_retina: 1
recon_weight_thalamus: 0.99
recon_weight_cortex: 0.9

shot_noise_transform: True
contrast_noise_transform: True
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ goal:
weight: 1
target_circuits: # Circuit parameters to optimize with this optimizer. We train the retina and the decoder exclusively to maximize reconstruction
- decoder
- inferotemporal_decoder
mixed:
min_epoch: 0
max_epoch: 100
Expand All @@ -31,6 +32,17 @@ goal:
weight: ${eval:'1-${recon_weight_thalamus}'}
target_circuits: # The thalamus is somewhat sensitive to task losses
- thalamus
cortex:
min_epoch: 0
max_epoch: 100
losses:
- _target_: retinal_rl.models.loss.ReconstructionLoss
weight: ${recon_weight_cortex}
- _target_: retinal_rl.classification.loss.ClassificationLoss
weight: ${eval:'1-${recon_weight_cortex}'}
target_circuits: # Visual cortex and downstream layers are driven by the task
- visual_cortex
- inferotemporal
class:
min_epoch: 0
max_epoch: 100
Expand All @@ -40,6 +52,5 @@ goal:
- _target_: retinal_rl.classification.loss.PercentCorrect
weight: 0
target_circuits: # Visual cortex and downstream layers are driven by the task
- visual_cortex
- pfc
- prefrontal
- classifier
2 changes: 1 addition & 1 deletion retinal_rl/models/goal.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ def __init__(self, brain: Brain, objective_configs: Dict[str, DictConfig]):
instantiate(obj_config) for obj_config in config.losses
]
logger.info(
f"Initalized objective: {objective}, with losses: {[obj.key_name for obj in self.losses[objective]]}, and target circuits: {[circuit_name for circuit_name in self.target_circuits]}"
f"Initialized objective: {objective}, with losses: {[obj.key_name for obj in self.losses[objective]]}, and target circuits: {[circuit_name for circuit_name in self.target_circuits[objective]]}"
)

def evaluate_objective(
Expand Down

0 comments on commit 31e8826

Please sign in to comment.