diff --git a/.github/workflows/code_check.yml b/.github/workflows/code_check.yml new file mode 100644 index 00000000..dcd58524 --- /dev/null +++ b/.github/workflows/code_check.yml @@ -0,0 +1,48 @@ +name: Code Checking + +on: + push: + paths: + - '**/*.py' + pull_request: + branches: + - master + +env: + singularity_image: oras://ghcr.io/berenslab/retinal-rl:singularity-image-latest + sif_file: retinal-rl_singularity-image-latest.sif + +jobs: + check: + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + fetch-depth: 0 + - name: Fetch all branches + run: git fetch --all + + - name: Setup Apptainer + uses: eWaterCycle/setup-apptainer@v2 + with: + apptainer-version: 1.3.0 + - name: Cache Singularity Image + id: cache-singularity + uses: actions/cache@v3 + with: + path: ${{ env.sif_file }} + key: ${{ runner.os }}-singularity-${{ hashFiles('~/resources/retinal-rl.def') }} + restore-keys: | + ${{ runner.os }}-singularity-${{ hashFiles('~/resources/retinal-rl.def') }} + ${{ runner.os }}-singularity- + - name: Pull Singularity container + if: steps.cache-singularity.outputs.cache-hit != 'true' + run: | + singularity registry login --username ${{ github.actor }} --password ${{ secrets.GITHUB_TOKEN }} oras://ghcr.io + singularity pull ${{ env.sif_file }} ${{ env.singularity_image }} + + - name: Run Pylint + run: | + singularity exec ${{ env.sif_file }} \ + pylint $(git diff --name-only origin/master...HEAD -- '*.py') diff --git a/.github/workflows/config_scan.yml b/.github/workflows/config_scan.yml new file mode 100644 index 00000000..929ebb68 --- /dev/null +++ b/.github/workflows/config_scan.yml @@ -0,0 +1,36 @@ +name: Scan Configs +on: [pull_request,workflow_dispatch] + +env: + singularity_image: oras://ghcr.io/berenslab/retinal-rl:singularity-image-latest + sif_file: retinal-rl_singularity-image-latest.sif + +jobs: + scan: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Setup Apptainer + uses: eWaterCycle/setup-apptainer@v2 + with: + apptainer-version: 1.3.0 + - name: Cache Singularity Image + id: cache-singularity + uses: actions/cache@v3 + with: + path: ${{ env.sif_file }} + key: ${{ runner.os }}-singularity-${{ hashFiles('~/resources/retinal-rl.def') }} + restore-keys: | + ${{ runner.os }}-singularity-${{ hashFiles('~/resources/retinal-rl.def') }} + ${{ runner.os }}-singularity- + - name: Pull Singularity container + if: steps.cache-singularity.outputs.cache-hit != 'true' + run: | + singularity registry login --username ${{ github.actor }} --password ${{ secrets.GITHUB_TOKEN }} oras://ghcr.io + singularity pull ${{ env.sif_file }} ${{ env.singularity_image }} + + - name: Scan classification config + run: | + cp -r resources/config_templates/* config/ + singularity exec ${{ env.sif_file }} \ + python main.py -m +experiment=cifar10-class-recon command=scan system.device=cpu \ No newline at end of file diff --git a/.github/workflows/container_build.yml b/.github/workflows/container_build.yml new file mode 100644 index 00000000..b047ef29 --- /dev/null +++ b/.github/workflows/container_build.yml @@ -0,0 +1,35 @@ +name: Build Singularity Container + +on: + schedule: + - cron: '0 2 1 * *' + push: + paths: + - 'resources/retinal-rl.def' + pull_request: + branches: + - master + paths: + - 'resources/retinal-rl.def' + +jobs: + singularity-build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: eWaterCycle/setup-apptainer@v2 + with: + apptainer-version: 1.3.0 + + - name: Build Singularity container + run: apptainer build retinal-rl.sif resources/retinal-rl.def + + - name: Scan classification config / ensure minimal functionality + run: | + cp -r resources/config_templates/* config/ + singularity exec retinal-rl.sif python main.py -m +experiment=cifar10-class-recon command=scan system.device=cpu + + - name: Push to ghcr.io + run: | + singularity registry login --username ${{ github.actor }} --password ${{ secrets.GITHUB_TOKEN }} oras://ghcr.io + singularity push retinal-rl.sif oras://ghcr.io/berenslab/retinal-rl:singularity-image-latest \ No newline at end of file diff --git a/.pylintrc b/.pylintrc new file mode 100644 index 00000000..8f1de749 --- /dev/null +++ b/.pylintrc @@ -0,0 +1,2 @@ +[MASTER] +init-hook='import sys; sys.path.append(".")' \ No newline at end of file diff --git a/doom_creator/compile-scenario.py b/doom_creator/compile_scenario.py similarity index 77% rename from doom_creator/compile-scenario.py rename to doom_creator/compile_scenario.py index d6450ea0..395531cc 100644 --- a/doom_creator/compile-scenario.py +++ b/doom_creator/compile_scenario.py @@ -1,3 +1,15 @@ +"""Scenario Compiler for Retinal-RL + +This module provides a utility to construct scenarios for the retinal-rl project. +It merges YAML files from a specified directory into a scenario specification and +subsequently compiles the scenario defined through those yaml files. + +Usage: + python -m exec.compile-scenario [options] [yaml_files...] + +Example: + python -m exec.compile-scenario gathering apples""" + import argparse import os import sys @@ -11,6 +23,13 @@ def make_parser(): + """ + Create and configure an argument parser for the scenario compiler. + Check the parsers description and arguments help for further information. + + Returns: + argparse.ArgumentParser: Configured argument parser for the scenario compiler. + """ # Initialize parser Directories() parser = argparse.ArgumentParser( @@ -22,7 +41,7 @@ def make_parser(): running the first time one should use the --preload flag to download the necessary resources into the --out_dir ('{Directories().CACHE_DIR}'). """, - epilog="Example: python -m exec.compile-scenario gathering apples", + epilog="Example: python -m exec.compile_scenario gathering apples", ) # Positional argument for scenario yaml files (required, can be multiple) parser.add_argument( @@ -44,7 +63,7 @@ def make_parser(): parser.add_argument( "--dataset_dir", default=None, - help="source directory of a dataset (for preloading), if you already downloaded it somewhere", + help="source directory of a dataset (for preloading), if already downloaded somewhere", ) parser.add_argument( "--resource_dir", @@ -72,6 +91,15 @@ def make_parser(): def main(): + """ + Main function to parse arguments and execute scenario compilation tasks. + + The function supports various modes of operation including preloading resources, + listing available YAML files, and creating scenarios. It also handles error + checking and warns the user if no actions are specified. + + For a more detailed documentation, check the parser. + """ # Parse args argv = sys.argv[1:] parser = make_parser() @@ -92,7 +120,7 @@ def main(): print(f"Listing contents of {dirs.SCENARIO_YAML_DIR}:") for flnm in os.listdir(dirs.SCENARIO_YAML_DIR): print(flnm) - print(f"If you want to load from a different folder, change this to") + print("If you want to load from a different folder, change this to") if do_make: cfg, needed_types = check_preload(cfg, args.test) any_dataset = False diff --git a/doom_creator/util/_templates/vizdoom.py b/doom_creator/util/_templates/vizdoom.py index 52a05ea5..eb03d178 100644 --- a/doom_creator/util/_templates/vizdoom.py +++ b/doom_creator/util/_templates/vizdoom.py @@ -1,5 +1,13 @@ -def config(scenario_name): - return """\ +""" +This module contains a dummy for a vizdoom config. +""" + + +def config(scenario_name: str): + """Returns a config for a vizdoom game, + referencing {scenario_name}.zip as the scenario.""" + + return f"""\ doom_scenario_path = {scenario_name}.zip living_reward = 0.0 @@ -32,6 +40,4 @@ def config(scenario_name): available_game_variables = {{ HEALTH }} mode = PLAYER -""".format( - scenario_name=scenario_name - ) +""" diff --git a/doom_creator/util/templates.py b/doom_creator/util/templates.py index f093ebff..cbd87460 100644 --- a/doom_creator/util/templates.py +++ b/doom_creator/util/templates.py @@ -1 +1,8 @@ +"""Templates for doom scenario creation + +This module provides the templates for acs scripts, decorate +definitions and the overall config (vizdoom) +""" from doom_creator.util._templates import acs, decorate, vizdoom + +__all__ = ['acs', 'decorate', 'vizdoom'] diff --git a/resources/config_templates/user/brain/retinal-classifier.yaml b/resources/config_templates/user/brain/shallow-autoencoder.yaml similarity index 88% rename from resources/config_templates/user/brain/retinal-classifier.yaml rename to resources/config_templates/user/brain/shallow-autoencoder.yaml index 1d65923c..3cf4fc7e 100644 --- a/resources/config_templates/user/brain/retinal-classifier.yaml +++ b/resources/config_templates/user/brain/shallow-autoencoder.yaml @@ -24,8 +24,8 @@ circuits: _target_: retinal_rl.models.circuits.convolutional.ConvolutionalEncoder num_layers: 2 num_channels: [16,32] # Two layers with 16 and 32 channels - kernel_size: ${kernel_size} - stride: ${stride} + kernel_size: 8 + stride: 2 act_name: ${activation} layer_names: ["bipolar", "retinal_ganglion"] # Names inspired by retinal cell types @@ -34,8 +34,8 @@ circuits: _target_: retinal_rl.models.circuits.convolutional.ConvolutionalEncoder num_layers: 1 num_channels: 64 - kernel_size: ${kernel_size} - stride: ${stride} + kernel_size: 5 + stride: 1 act_name: ${activation} layer_names: ["lgn"] # Lateral Geniculate Nucleus @@ -44,8 +44,8 @@ circuits: _target_: retinal_rl.models.circuits.convolutional.ConvolutionalEncoder num_layers: 1 num_channels: 64 - kernel_size: ${kernel_size} - stride: ${stride} + kernel_size: 8 + stride: 2 act_name: ${activation} layer_names: ["v1"] # Primary Visual Cortex @@ -53,9 +53,9 @@ circuits: pfc: _target_: retinal_rl.models.circuits.fully_connected.FullyConnectedEncoder output_shape: - - ${latent_size} # Size of the latent representation + - 128 # Size of the latent representation hidden_units: - - ${hidden_units} # Number of hidden units + - 64 # Number of hidden units act_name: ${activation} # Decoder: for reconstructing the input from the latent representation @@ -63,8 +63,8 @@ circuits: _target_: retinal_rl.models.circuits.convolutional.ConvolutionalDecoder num_layers: 3 num_channels: [32,16,3] # For a symmetric encoder, this should be the reverse of the num_channels in the CNN layers up to the point of decoding (in this case, the thalamus) - kernel_size: ${kernel_size} - stride: ${stride} + kernel_size: [5,8,8] + stride: [1,2,2] act_name: ${activation} # Classifier: for categorizing the input into classes diff --git a/resources/config_templates/user/dataset/cifar10-large.yaml b/resources/config_templates/user/dataset/cifar10-decontrast.yaml similarity index 77% rename from resources/config_templates/user/dataset/cifar10-large.yaml rename to resources/config_templates/user/dataset/cifar10-decontrast.yaml index 645607d5..5e0624bf 100644 --- a/resources/config_templates/user/dataset/cifar10-large.yaml +++ b/resources/config_templates/user/dataset/cifar10-decontrast.yaml @@ -11,11 +11,9 @@ imageset: image_rescale_range: [1, 5] noise_transforms: - _target_: retinal_rl.datasets.transforms.ShotNoiseTransform - lambda_range: [0.8, 1.2] + lambda_range: [0.5, 1.5] - _target_: retinal_rl.datasets.transforms.ContrastTransform - contrast_range: [0.5, 1.5] + contrast_range: [0.01, 1.2] apply_normalization: true - # normalization_mean: [0.4914, 0.4822, 0.4465] - # normalization_std: [0.2023, 0.1994, 0.2010] fixed_transformation: false multiplier: 1 diff --git a/resources/config_templates/user/experiment/cifar10-class-recon.yaml b/resources/config_templates/user/experiment/cifar10-class-recon.yaml index fcc26ded..3c1720c0 100644 --- a/resources/config_templates/user/experiment/cifar10-class-recon.yaml +++ b/resources/config_templates/user/experiment/cifar10-class-recon.yaml @@ -1,25 +1,22 @@ -# This is the main entry point for users to specify their config parameters, and -# should be freely copied and edited. - -# Defaults for the various subconfigs. Can be overriden from the commandline -# with e.g. experiment/brain=new_brain, where new_brain.yaml lives in the brain -# subdirectory +# @package _global_ defaults: - _self_ - - sweep: kernel-size - - dataset: cifar10-large - - brain: retinal-classifier - - optimizer: class-recon + - override /dataset: cifar10-decontrast + - override /brain: shallow-autoencoder + - override /optimizer: recon-weight -# This *must* match the experiment file name -name: cifar10-class-recon +# This is the main entry point for control of a retinal-rl experiment. Variables +# created here will be top-level, and defaults can be set for the various parts +# of an experiment (NB: do not add comments above the defaults list or it will +# break the config system.) framework: classification # This is a free list of parameters that can be interpolated by the subconfigs # in sweep, dataset, brain, and optimizer. A major use for this is interpolating # values in the subconfigs, and then looping over them in a sweep. -latent_size: 128 -hidden_units: 64 activation: "elu" -kernel_size: 8 -stride: 2 +activation_sparsity: 0.0001 +weight_decay: 0.0001 +sparse_objective: retinal_rl.models.objective.L1Sparsity +recon_weight_retina: 1 +recon_weight_thalamus: 0.99 \ No newline at end of file diff --git a/resources/config_templates/user/optimizer/class-recon.yaml b/resources/config_templates/user/optimizer/class-recon.yaml deleted file mode 100644 index cdf8fc83..00000000 --- a/resources/config_templates/user/optimizer/class-recon.yaml +++ /dev/null @@ -1,46 +0,0 @@ -# BrainOptimizer config. Each top level defines a particular optimizer. Circuits -# should appear in at most one optimizer. -recon: - optimizer: # torch.optim Class and parameters - _target_: torch.optim.Adam - lr: 0.0003 - min_epoch: 0 # Epoch to start optimizer - max_epoch: 100 # Epoch to stop optimizer - objectives: # Weighted optimizer objectives as defined in retinal-rl - - _target_: retinal_rl.models.objective.ReconstructionObjective - weight: 1 - - _target_: retinal_rl.models.objective.L1Sparsity - weight: 0.001 - target_responses: - - retina - target_circuits: # Circuit parameters to optimize with this optimizer. We train the retina and the decoder exclusively to maximize reconstruction - - retina - - decoder -mixed: - optimizer: - _target_: torch.optim.Adam - lr: 0.0003 - min_epoch: 0 - max_epoch: 100 - objectives: - - _target_: retinal_rl.models.objective.ReconstructionObjective - weight: 0.99 - - _target_: retinal_rl.classification.objective.ClassificationObjective - weight: 0.01 - target_circuits: # The thalamus is somewhat sensitive to task objectives - - thalamus -class: - optimizer: - _target_: torch.optim.Adam - lr: 0.0003 - min_epoch: 0 - max_epoch: 100 - objectives: - - _target_: retinal_rl.classification.objective.ClassificationObjective - weight: 1 - - _target_: retinal_rl.classification.objective.PercentCorrect - weight: 0 - target_circuits: # Visual cortex and downstream layers are driven by the task - - visual_cortex - - pfc - - classifier diff --git a/resources/config_templates/user/optimizer/recon-weight.yaml b/resources/config_templates/user/optimizer/recon-weight.yaml new file mode 100644 index 00000000..83bc7e42 --- /dev/null +++ b/resources/config_templates/user/optimizer/recon-weight.yaml @@ -0,0 +1,45 @@ +optimizer: # torch.optim Class and parameters + _target_: torch.optim.Adam + lr: 0.0003 + +goal: + recon: + min_epoch: 0 # Epoch to start optimizer + max_epoch: 100 # Epoch to stop optimizer + losses: # Weighted optimizer losses as defined in retinal-rl + - _target_: retinal_rl.models.loss.ReconstructionLoss + weight: ${recon_weight_retina} + - _target_: retinal_rl.classification.loss.ClassificationLoss + weight: ${eval:'1-${recon_weight_retina}'} + target_circuits: # Circuit parameters to optimize with this optimizer. We train the retina and the decoder exclusively to maximize reconstruction + - retina + decode: + min_epoch: 0 # Epoch to start optimizer + max_epoch: 100 # Epoch to stop optimizer + losses: # Weighted optimizer losses as defined in retinal-rl + - _target_: retinal_rl.models.loss.ReconstructionLoss + weight: 1 + target_circuits: # Circuit parameters to optimize with this optimizer. We train the retina and the decoder exclusively to maximize reconstruction + - decoder + mixed: + min_epoch: 0 + max_epoch: 100 + losses: + - _target_: retinal_rl.models.loss.ReconstructionLoss + weight: ${recon_weight_thalamus} + - _target_: retinal_rl.classification.loss.ClassificationLoss + weight: ${eval:'1-${recon_weight_thalamus}'} + target_circuits: # The thalamus is somewhat sensitive to task losses + - thalamus + class: + min_epoch: 0 + max_epoch: 100 + losses: + - _target_: retinal_rl.classification.loss.ClassificationLoss + weight: 1 + - _target_: retinal_rl.classification.loss.PercentCorrect + weight: 0 + target_circuits: # Visual cortex and downstream layers are driven by the task + - visual_cortex + - pfc + - classifier diff --git a/resources/retinal-rl.def b/resources/retinal-rl.def index 6aef9845..c6be54a4 100644 --- a/resources/retinal-rl.def +++ b/resources/retinal-rl.def @@ -58,4 +58,10 @@ From: nvidia/cuda:11.7.1-runtime-ubuntu22.04 # retinal-rl Extra - pip3 install gymnasium==0.28.1 torch==1.13.1 vizdoom==1.2.0 torchvision==0.14.1 matplotlib opentsne pygame pycairo git+https://github.com/pytorch/captum.git torchscan num2words hiyapyco omgifol git+https://github.com/alex404/sample-factory.git@fix-vtrace dpcpp-cpp-rt seaborn hydra-core networkx + pip3 install --no-cache-dir gymnasium==0.28.1 torch==1.13.1 vizdoom==1.2.0 torchvision==0.14.1 matplotlib opentsne pygame pycairo==1.26.1 git+https://github.com/pytorch/captum.git torchscan num2words hiyapyco omgifol git+https://github.com/alex404/sample-factory.git@fix-vtrace dpcpp-cpp-rt seaborn hydra-core networkx pylint + + # Clean up for smaller container size + rm acc159linux-x64.zip + apt clean + rm -rf /var/lib/apt/lists/* + rm -rf /root/.cache/pip \ No newline at end of file