From 6b6fb5ffe4330b895dab3cdc0d8fcc6d2d16ce5f Mon Sep 17 00:00:00 2001 From: Alby M <1473644+Linux-cpp-lisp@users.noreply.github.com> Date: Mon, 3 May 2021 16:01:22 -0600 Subject: [PATCH 01/16] Enforce explicit PBC --- nequip/data/AtomicData.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/nequip/data/AtomicData.py b/nequip/data/AtomicData.py index 7f58b7e9..10b90486 100644 --- a/nequip/data/AtomicData.py +++ b/nequip/data/AtomicData.py @@ -5,7 +5,7 @@ import warnings from copy import deepcopy -from typing import Union, Tuple, Dict +from typing import Union, Tuple, Dict, Optional from collections.abc import Mapping import numpy as np @@ -132,7 +132,7 @@ def from_points( self_interaction: bool = False, strict_self_interaction: bool = True, cell=None, - pbc: PBC = False, + pbc: Optional[PBC] = None, **kwargs, ): """Build neighbor graph from points, optionally with PBC. @@ -154,6 +154,15 @@ def from_points( """ if pos is None or r_max is None: raise ValueError("pos and r_max must be given.") + + if pbc is None: + if cell is not None: + raise ValueError( + "A cell was provided, but pbc weren't. Please explicitly probide PBC." + ) + # there are no PBC if cell and pbc are not provided + pbc = False + if isinstance(pbc, bool): pbc = (pbc,) * 3 else: From 52add38055a7f24b336accdc425fe00ee44cef1d Mon Sep 17 00:00:00 2001 From: Alby M <1473644+Linux-cpp-lisp@users.noreply.github.com> Date: Mon, 3 May 2021 16:11:22 -0600 Subject: [PATCH 02/16] Atomic writes --- nequip/train/trainer.py | 26 +++++++------- nequip/utils/__init__.py | 2 +- nequip/utils/savenload.py | 72 +++++++++++++++++++++++---------------- 3 files changed, 58 insertions(+), 42 deletions(-) diff --git a/nequip/train/trainer.py b/nequip/train/trainer.py index d0f0d6bc..5aa7fc36 100644 --- a/nequip/train/trainer.py +++ b/nequip/train/trainer.py @@ -33,6 +33,7 @@ instantiate, save_file, load_file, + atomic_write, ) from .loss import Loss, LossStat @@ -410,13 +411,14 @@ def save(self, filename, format=None): filename=filename, enforced_format=format, ) - logger.debug(f"Saving trainer to {filename}") + logger.debug(f"Saved trainer to {filename}") - if hasattr(self.model, "save"): - self.model.save(self.last_model_path) - else: - torch.save(self.model, self.last_model_path) - logger.debug(f"Saving last model to to {self.last_model_path}") + with atomic_write(self.last_model_path) as write_to: + if hasattr(self.model, "save"): + self.model.save(write_to) + else: + torch.save(self.model, write_to) + logger.debug(f"Saved last model to to {self.last_model_path}") return filename @@ -836,21 +838,21 @@ def end_of_epoch_save(self): self.best_val_metrics = val_metrics self.best_epoch = self.iepoch - save_path = self.best_model_path - if self.use_ema: # If using EMA, store the EMA validation model # that gave us the good val metrics that made the model "best" # in the first place cm = self.ema.average_parameters() else: + # otherwise, do nothing cm = contextlib.nullcontext() with cm: - if hasattr(self.model, "save"): - self.model.save(save_path) - else: - torch.save(self.model, save_path) + with atomic_write(self.best_model_path) as save_path: + if hasattr(self.model, "save"): + self.model.save(save_path) + else: + torch.save(self.model, save_path) self.logger.info( f"! Best model {self.best_epoch+1:8d} {self.best_val_metrics:8.3f}" diff --git a/nequip/utils/__init__.py b/nequip/utils/__init__.py index 50adfa04..d9234f69 100644 --- a/nequip/utils/__init__.py +++ b/nequip/utils/__init__.py @@ -1,4 +1,4 @@ from .auto_init import instantiate_from_cls_name, instantiate, dataset_from_config -from .savenload import save_file, load_file +from .savenload import save_file, load_file, atomic_write from .config import Config from .output import Output diff --git a/nequip/utils/savenload.py b/nequip/utils/savenload.py index 60e58321..202b6bf5 100644 --- a/nequip/utils/savenload.py +++ b/nequip/utils/savenload.py @@ -1,12 +1,30 @@ """ utilities that involve file searching and operations (i.e. save/load) """ +from typing import Union import logging - +import contextlib +from pathlib import Path from os import makedirs from os.path import isfile, isdir, dirname, realpath +@contextlib.contextmanager +def atomic_write(filename: Union[Path, str]): + filename = Path(filename) + tmp_path = filename.parent / (f".tmp-{filename.name}~") + # Create the temp file + open(tmp_path, "w").close() + try: + # do the IO + yield tmp_path + # move the temp file to the final output path, which also removes the temp file + tmp_path.rename(filename) + finally: + # clean up + tmp_path.unlink(missing_ok=True) + + def save_file( item, supported_formats: dict, filename: str, enforced_format: str = None ): @@ -26,39 +44,35 @@ def save_file( enforced_format=enforced_format, ) - if format == "json": - import json - - with open(filename, "w+") as fout: - json.dump(item, fout) - - elif format == "yaml": - import yaml - - with open(filename, "w+") as fout: - yaml.dump(item, fout) - - elif format == "torch": - import torch + with atomic_write(filename) as write_to: + if format == "json": + import json - torch.save(item, filename) + with open(write_to, "w+") as fout: + json.dump(item, fout) + elif format == "yaml": + import yaml - elif format == "pickle": - import pickle + with open(write_to, "w+") as fout: + yaml.dump(item, fout) + elif format == "torch": + import torch - with open(filename, "wb") as fout: - pickle.save(item, fout) + torch.save(item, write_to) + elif format == "pickle": + import pickle - elif format == "npz": - import numpy as np + with open(write_to, "wb") as fout: + pickle.save(item, fout) + elif format == "npz": + import numpy as np - np.savez(filename, item) - - else: - raise NotImplementedError( - f"Output format {format} not supported:" - f" try from {supported_formats.keys()}" - ) + np.savez(write_to, item) + else: + raise NotImplementedError( + f"Output format {format} not supported:" + f" try from {supported_formats.keys()}" + ) return filename From e33f7bb21ab0fcafad1333a25b0889af646e4be5 Mon Sep 17 00:00:00 2001 From: Alby M <1473644+Linux-cpp-lisp@users.noreply.github.com> Date: Mon, 3 May 2021 16:13:41 -0600 Subject: [PATCH 03/16] Changelog --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index d96f5972..4e83a540 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 Most recent change on the bottom. +## [Unreleased] +### Changed +- Training now uses atomic file writes to avoid corruption if interupted + ## [0.2.1] - 2021-05-03 ### Fixed - `load_deployed_model` now correctly loads all metadata From cf3be116e1152b985ef0c0601c23d45e58359b87 Mon Sep 17 00:00:00 2001 From: Alby M <1473644+Linux-cpp-lisp@users.noreply.github.com> Date: Tue, 4 May 2021 10:26:16 -0600 Subject: [PATCH 04/16] Fix bessel GPU --- nequip/nn/radial_basis.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/nequip/nn/radial_basis.py b/nequip/nn/radial_basis.py index d0fc3bf2..21d04e06 100644 --- a/nequip/nn/radial_basis.py +++ b/nequip/nn/radial_basis.py @@ -1,5 +1,6 @@ +import math + import torch -import numpy as np from torch import nn @@ -31,12 +32,13 @@ def __init__(self, r_max, num_basis=8, trainable=True): self.r_max = float(r_max) self.prefactor = 2.0 / self.r_max - self.bessel_weights = torch.linspace( - start=1.0, end=num_basis, steps=num_basis - ) * torch.Tensor([np.pi]) - + bessel_weights = ( + torch.linspace(start=1.0, end=num_basis, steps=num_basis) * math.pi + ) if self.trainable: - self.bessel_weights = nn.Parameter(self.bessel_weights) + self.bessel_weights = nn.Parameter(bessel_weights) + else: + self.register_buffer("bessel_weights", bessel_weights) def forward(self, x): """ From c8cf4261190749280e45e64dd91e7bd05dfa5030 Mon Sep 17 00:00:00 2001 From: Alby M <1473644+Linux-cpp-lisp@users.noreply.github.com> Date: Tue, 4 May 2021 14:37:13 -0600 Subject: [PATCH 05/16] device for load_deployed_model --- nequip/scripts/deploy.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nequip/scripts/deploy.py b/nequip/scripts/deploy.py index a1ec2182..32ec7582 100644 --- a/nequip/scripts/deploy.py +++ b/nequip/scripts/deploy.py @@ -25,7 +25,7 @@ def load_deployed_model( - model_path: Union[pathlib.Path, str] + model_path: Union[pathlib.Path, str], device: Union[str, torch.device] = "cpu" ) -> Tuple[torch.jit.ScriptModule, Dict[str, str]]: r"""Load a deployed model. @@ -37,7 +37,7 @@ def load_deployed_model( """ metadata = {k: "" for k in _ALL_METADATA_KEYS} try: - model = torch.jit.load(model_path, _extra_files=metadata) + model = torch.jit.load(model_path, map_location=device, _extra_files=metadata) except RuntimeError as e: raise ValueError( f"{model_path} does not seem to be a deployed NequIP model file. Did you forget to deploy it using `nequip-deploy`? \n\n(Underlying error: {e})" From db477f86a67db3c98abda4e7c95e44d32ec3f731 Mon Sep 17 00:00:00 2001 From: Alby M <1473644+Linux-cpp-lisp@users.noreply.github.com> Date: Tue, 4 May 2021 22:40:55 -0600 Subject: [PATCH 06/16] Allow skipping sub builders --- nequip/utils/auto_init.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/nequip/utils/auto_init.py b/nequip/utils/auto_init.py index cd2b5292..497c4ced 100644 --- a/nequip/utils/auto_init.py +++ b/nequip/utils/auto_init.py @@ -208,9 +208,13 @@ def instantiate( search_keys = [key for key in init_args if key + "_kwargs" in config.allow_list()] for key in search_keys: sub_builder = init_args[key] + if sub_builder is None: + # if the builder is None, skip it + continue + if not (callable(sub_builder) or inspect.isclass(sub_builder)): raise ValueError( - f"Builder for submodule `{key}` must be a callable or a class, got `{builder!r}` instead." + f"Builder for submodule `{key}` must be a callable or a class, got `{sub_builder!r}` instead." ) # add double check to avoid cycle From 73732427ffef026260a8d873536aa48db275f515 Mon Sep 17 00:00:00 2001 From: Alby M <1473644+Linux-cpp-lisp@users.noreply.github.com> Date: Tue, 4 May 2021 23:59:18 -0600 Subject: [PATCH 07/16] extra fixed fields in all circumstances --- CHANGELOG.md | 3 +++ nequip/data/dataset.py | 2 ++ 2 files changed, 5 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4e83a540..bfba1359 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,9 @@ Most recent change on the bottom. ### Changed - Training now uses atomic file writes to avoid corruption if interupted +### Fixed +- Dataset `extra_fixed_fields` are now added even if `get_data()` returns `AtomicData` objects + ## [0.2.1] - 2021-05-03 ### Fixed - `load_deployed_model` now correctly loads all metadata diff --git a/nequip/data/dataset.py b/nequip/data/dataset.py index 4f582cc0..37859fe8 100644 --- a/nequip/data/dataset.py +++ b/nequip/data/dataset.py @@ -200,6 +200,8 @@ def process(self): if key in data_list[0]: fixed_fields[key] = data_list[0][key] + fixed_fields.update(self.extra_fixed_fields) + elif len(data) == 2: # It's fields and fixed_fields From c758b5d9596009ca4eeaf4342e03df7293fdff50 Mon Sep 17 00:00:00 2001 From: Alby M <1473644+Linux-cpp-lisp@users.noreply.github.com> Date: Wed, 5 May 2021 13:22:05 -0600 Subject: [PATCH 08/16] Initialization (#28) * rename * Fix minimal yaml irreps * update test for skipped subbuilders * Basic weight init * Xavier * changelog * fix test --- CHANGELOG.md | 3 ++ configs/example.yaml | 9 +++++- configs/minimal.yaml | 9 ++++-- nequip/scripts/train.py | 32 +++++++++++--------- nequip/utils/initialization.py | 53 +++++++++++++++++++++++++++++++++ nequip/utils/uniform_init.py | 23 -------------- tests/model/test_eng_force.py | 4 +-- tests/utils/test_instantiate.py | 2 +- tests/utils/test_weight_init.py | 11 ++++--- 9 files changed, 99 insertions(+), 47 deletions(-) create mode 100644 nequip/utils/initialization.py delete mode 100644 nequip/utils/uniform_init.py diff --git a/CHANGELOG.md b/CHANGELOG.md index bfba1359..76328b1b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,9 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 Most recent change on the bottom. ## [Unreleased] +### Added +- More flexible model initialization + ### Changed - Training now uses atomic file writes to avoid corruption if interupted diff --git a/configs/example.yaml b/configs/example.yaml index 9743cc9a..281d6e08 100644 --- a/configs/example.yaml +++ b/configs/example.yaml @@ -28,7 +28,6 @@ invariant_layers: 1 invariant_neurons: 8 # number of hidden neurons in radial function, again keep this small for MD applications, 8 - 32, smaller is faster avg_num_neighbors: null # number of neighbors to divide by, None => no normalization. use_sc: true # use self-connection or not, usually gives big improvement -model_uniform_init: false # whether to use uniform (instead of normal) initialization for e3nn operations # to specify different parameters for each convolutional layer, try examples below # layer1_use_sc: true # use "layer{i}_" prefix to specify parameters for only one of the layer, @@ -36,6 +35,14 @@ model_uniform_init: false # whether to use uniform (inst # invariant_neurons < InteractionBlock_invariant_neurons < layer{i}_invariant_neurons +# how to initialize the weights of the model: +# this can be the importable name of any function that can be `model.apply`ed to initialize some weights in the model. NequIP provides a number of useful initializers: +model_initializers: + - nequip.utils.initialization.uniform_initialize_fcs + - nequip.utils.initialization.uniform_initialize_tps + - nequip.utils.initialization.orthogonal_initialize_linears +# - nequip.utils.initialization.uniform_initialize_linears + # whether to apply a shift and scale, defined per-species, to the atomic energies PerSpeciesScaleShift_enable: false # if the PerSpeciesScaleShift is enabled, whether the shifts and scales are trainable diff --git a/configs/minimal.yaml b/configs/minimal.yaml index bfafca2b..f3a792c3 100644 --- a/configs/minimal.yaml +++ b/configs/minimal.yaml @@ -7,10 +7,15 @@ seed: 0 num_basis: 8 r_max: 4.0 irreps_edge_sh: 0e + 1o -conv_to_output_hidden_irreps_out: 16x0o + 16x0e + 16x1o + 16x1e + 16x2o + 16x2e -feature_irreps_hidden: 16x0o + 16x0e +conv_to_output_hidden_irreps_out: 16x0e +feature_irreps_hidden: 16x0o + 16x0e + 16x1o + 16x1e + 16x2o + 16x2e model_uniform_init: false +model_initializers: + - nequip.utils.initialization.uniform_initialize_fcs + - nequip.utils.initialization.uniform_initialize_tps + - nequip.utils.initialization.orthogonal_initialize_linears + # data dataset: aspirin dataset_file_name: benchmark_data/aspirin_ccsd-train.npz diff --git a/nequip/scripts/train.py b/nequip/scripts/train.py index 2a05a075..e6e9b39b 100644 --- a/nequip/scripts/train.py +++ b/nequip/scripts/train.py @@ -1,4 +1,5 @@ """ Train a network.""" +from typing import Union, Callable import logging import argparse import yaml @@ -23,7 +24,7 @@ wandb_resume=False, compile_model=False, model_builder="nequip.models.ForceModel", - model_uniform_init=False, + model_initializers=[], dataset_statistics_stride=1, default_dtype="float32", verbose="INFO", @@ -64,6 +65,17 @@ def parse_command_line(args=None): return config +def _load_callable(obj: Union[str, Callable]) -> Callable: + if callable(obj): + pass + elif isinstance(obj, str): + obj = yaml.load(f"!!python/name:{obj}", Loader=yaml.Loader) + else: + raise TypeError + assert callable(obj), f"{obj} isn't callable" + return obj + + def fresh_start(config): # = Set global state = if config.model_debug_mode: @@ -137,22 +149,14 @@ def fresh_start(config): config.update(dict(allowed_species=allowed_species)) # = Build a model = - model_builder = config.model_builder - if callable(model_builder): - pass - elif isinstance(model_builder, str): - model_builder = yaml.load(f"!!python/name:{model_builder}", Loader=yaml.Loader) - else: - raise TypeError - assert callable(model_builder), f"Model builder {model_builder} isn't callable" + model_builder = _load_callable(config.model_builder) core_model = model_builder(**dict(config)) # = Reinit if wanted = - if config.model_uniform_init: - from nequip.utils.uniform_init import uniform_initialize - - with torch.no_grad(): - core_model.apply(uniform_initialize) + with torch.no_grad(): + for initer in config.model_initializers: + initer = _load_callable(initer) + core_model.apply(initer) # = Determine shifts, scales = # This is a bit awkward, but necessary for there to be a value diff --git a/nequip/utils/initialization.py b/nequip/utils/initialization.py new file mode 100644 index 00000000..39d98694 --- /dev/null +++ b/nequip/utils/initialization.py @@ -0,0 +1,53 @@ +import math + +import torch + +import e3nn.o3 +import e3nn.nn + + +# == Uniform init == +def unit_uniform_init_(t: torch.Tensor): + t.uniform_(-math.sqrt(3), math.sqrt(3)) + + +def uniform_initialize_fcs(mod: torch.nn.Module): + if isinstance(mod, e3nn.nn.FullyConnectedNet): + for w in mod.weights: + unit_uniform_init_(w) + # no need to do torch.nn.Linear, which is uniform by default + + +def uniform_initialize_linears(mod: torch.nn.Module): + if isinstance(mod, e3nn.o3.Linear) and mod.internal_weights: + unit_uniform_init_(mod.weight) + + +def uniform_initialize_tps(mod: torch.nn.Module): + if isinstance(mod, e3nn.o3.TensorProduct) and mod.internal_weights: + unit_uniform_init_(mod.weight) + + +# == Xavier == +def xavier_initialize_fcs(mod: torch.nn.Module): + if isinstance(mod, e3nn.nn.FullyConnectedNet): + for w in mod.weights: + # in FC: + # h_in, _h_out = W.shape + # W = W / h_in**0.5 + torch.nn.init.xavier_uniform_(w, gain=w.shape[0] ** 0.5) + elif isinstance(mod, torch.nn.Linear): + torch.nn.init.xavier_uniform_(mod.weight) + + +# == Orthogonal == +def unit_orthogonal_init_(t: torch.Tensor): + """Orthogonal init with = N""" + assert t.ndim == 2 + torch.nn.init.orthogonal_(t, gain=math.sqrt(max(t.shape))) + + +def orthogonal_initialize_linears(mod: torch.nn.Module): + if isinstance(mod, e3nn.o3.Linear) and mod.internal_weights: + for w in mod.weight_views(): + unit_uniform_init_(w) diff --git a/nequip/utils/uniform_init.py b/nequip/utils/uniform_init.py deleted file mode 100644 index ca5affd5..00000000 --- a/nequip/utils/uniform_init.py +++ /dev/null @@ -1,23 +0,0 @@ -import math - -import torch - -import e3nn.o3 -import e3nn.nn - - -def unit_uniform_init_(t: torch.Tensor): - t.uniform_(-math.sqrt(3), math.sqrt(3)) - - -def uniform_initialize( - mod: torch.nn.Module, do_fc: bool = True, do_linear: bool = True, do_tp: bool = True -) -> None: - if do_fc and isinstance(mod, e3nn.nn.FullyConnectedNet): - for w in mod.weights: - unit_uniform_init_(w) - elif do_linear and isinstance(mod, e3nn.o3.Linear) and mod.internal_weights: - unit_uniform_init_(mod.weight) - elif do_tp and isinstance(mod, e3nn.o3.TensorProduct) and mod.internal_weights: - unit_uniform_init_(mod.weight) - return diff --git a/tests/model/test_eng_force.py b/tests/model/test_eng_force.py index dd147124..6b1417ac 100644 --- a/tests/model/test_eng_force.py +++ b/tests/model/test_eng_force.py @@ -13,7 +13,7 @@ from nequip.data import AtomicDataDict, AtomicData from nequip.models import EnergyModel, ForceModel from nequip.nn import GraphModuleMixin, AtomwiseLinear -from nequip.utils.uniform_init import uniform_initialize +from nequip.utils.initialization import uniform_initialize_linears from nequip.utils.test import assert_AtomicData_equivariant @@ -102,7 +102,7 @@ def test_weight_init(self, model, atomic_batch, device): out_orig = instance(data)[out_field] with torch.no_grad(): - instance.apply(uniform_initialize) + instance.apply(uniform_initialize_linears) out_unif = instance(data)[out_field] assert not torch.allclose(out_orig, out_unif) diff --git a/tests/utils/test_instantiate.py b/tests/utils/test_instantiate.py index 67380836..1762d51e 100644 --- a/tests/utils/test_instantiate.py +++ b/tests/utils/test_instantiate.py @@ -223,7 +223,7 @@ def __init__(self, thing_kwargs={}): class BadKwargs2: - def __init__(self, thing=None, thing_kwargs={}): + def __init__(self, thing="a string", thing_kwargs={}): pass diff --git a/tests/utils/test_weight_init.py b/tests/utils/test_weight_init.py index 7f25d895..b5e2a013 100644 --- a/tests/utils/test_weight_init.py +++ b/tests/utils/test_weight_init.py @@ -1,9 +1,12 @@ +import pytest + import torch -from nequip.utils.uniform_init import unit_uniform_init_ +from nequip.utils.initialization import unit_uniform_init_, unit_orthogonal_init_ -def test_unif_init(): - t = torch.empty(10_000) - unit_uniform_init_(t) +@pytest.mark.parametrize("init_func_", [unit_uniform_init_, unit_orthogonal_init_]) +def test_2mom(init_func_): + t = torch.empty(1000, 100) + init_func_(t) assert (t.square().mean() - 1.0).abs() <= 0.1 From c06849c40e5b9698668b0eaf9c63d01cc6eed4e2 Mon Sep 17 00:00:00 2001 From: Alby M <1473644+Linux-cpp-lisp@users.noreply.github.com> Date: Fri, 7 May 2021 12:28:41 -0600 Subject: [PATCH 09/16] explicit simplify() for irreps_mid --- nequip/data/dataset.py | 2 ++ nequip/nn/_interaction_block.py | 11 +++++++++-- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/nequip/data/dataset.py b/nequip/data/dataset.py index 37859fe8..bdc4f737 100644 --- a/nequip/data/dataset.py +++ b/nequip/data/dataset.py @@ -378,6 +378,8 @@ def statistics( class NpzDataset(AtomicInMemoryDataset): """Load data from an npz file. + To avoid loading unneeded data, keys are ignored by default unless they are in ``key_mapping``, ``npz_keys``, or ``npz_fixed_fields``. + Args: file_name (str): file name of the npz file key_mapping (Dict[str, str]): mapping of npz keys to ``AtomicData`` keys diff --git a/nequip/nn/_interaction_block.py b/nequip/nn/_interaction_block.py index 4aabfcfb..c7d0887a 100644 --- a/nequip/nn/_interaction_block.py +++ b/nequip/nn/_interaction_block.py @@ -89,9 +89,12 @@ def __init__( irreps_mid.append((mul, ir_out)) instructions.append((i, j, k, "uvu", True)) + # We sort the output irreps of the tensor product so that we can simplify them + # when they are provided to the second o3.Linear irreps_mid = o3.Irreps(irreps_mid) irreps_mid, p, _ = irreps_mid.sort() + # Permute the output indexes of the instructions to match the sorted irreps: instructions = [ (i_in1, i_in2, p[i_out], mode, train) for i_in1, i_in2, i_out, mode, train in instructions @@ -106,7 +109,7 @@ def __init__( internal_weights=False, ) - # init_irreps confirmed that the edge embeddding is all invariant scalars + # init_irreps already confirmed that the edge embeddding is all invariant scalars self.fc = FullyConnectedNet( [self.irreps_in[AtomicDataDict.EDGE_EMBEDDING_KEY].num_irreps] + invariant_layers * [invariant_neurons] @@ -117,7 +120,11 @@ def __init__( self.tp = tp self.linear_2 = Linear( - irreps_in=irreps_mid, + # irreps_mid has uncoallesed irreps because of the uvu instructions, + # but there's no reason to treat them seperately for the Linear + # Note that normalization of o3.Linear changes if irreps are coallesed + # (likely for the better) + irreps_in=irreps_mid.simplify(), irreps_out=feature_irreps_out, internal_weights=True, shared_weights=True, From abd9c80fda70ed2a772f727efd9e1f14429bd3a5 Mon Sep 17 00:00:00 2001 From: Simon Batzner Date: Fri, 7 May 2021 14:48:02 -0400 Subject: [PATCH 10/16] update chemical_embedding + how to set dimension --- configs/example.yaml | 24 ++++++++++++------------ nequip/models/_eng.py | 2 +- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/configs/example.yaml b/configs/example.yaml index 281d6e08..5343ad1b 100644 --- a/configs/example.yaml +++ b/configs/example.yaml @@ -9,8 +9,7 @@ run_name: example-run seed: 0 # random number seed for numpy and torch restart: false # set True for a restarted run append: false # set True if a restarted run should append to the previous log file - -default_dtype: float32 # type of float, e.g. float32 and float64 +default_dtype: float32 # type of float, e.g. float32 and float64 # network compile_model: False # whether to compile the constructed model to TorchScript @@ -18,10 +17,11 @@ num_basis: 8 r_max: 4.0 # cutoff radius irreps_edge_sh: 0e + 1o + 2e # irreps of the spherical harmonics used for edges. If a single integer, indicates the full SH up to L_max=that_integer conv_to_output_hidden_irreps_out: 16x0e # irreps used in hidden layer of output block -feature_irreps_hidden: 32x0o + 32x0e + 16x1o + 16x1e + 8x2o + 8x2e # irreps used for hidden features, here we go up to lmax=2, with even and odd parities +chemical_embedding_irreps_out: 32x0e # +feature_irreps_hidden: 32x0o + 32x0e + 16x1o + 16x1e + 8x2o + 8x2e # irreps used for hidden features, here we go up to lmax=2, with even and odd parities BesselBasis_trainable: true # set true to train the bessel weights nonlinearity_type: gate # may be 'gate' or 'norm', 'gate' is recommended -num_layers: 3 # number of interaction blocks, we found 5-6 to work best +num_layers: 6 # number of interaction blocks, we found 5-6 to work best resnet: false # set True to make interaction block a resnet-style update PolynomialCutoff_p: 6 # p-value used in polynomial cutoff function invariant_layers: 1 # number of radial layers, we found it important to keep this small, 1 or 2 @@ -30,18 +30,18 @@ avg_num_neighbors: null use_sc: true # use self-connection or not, usually gives big improvement # to specify different parameters for each convolutional layer, try examples below -# layer1_use_sc: true # use "layer{i}_" prefix to specify parameters for only one of the layer, -# priority for different definition: -# invariant_neurons < InteractionBlock_invariant_neurons < layer{i}_invariant_neurons +# layer1_use_sc: true # use "layer{i}_" prefix to specify parameters for only one of the layer, +# priority for different definition: +# invariant_neurons < InteractionBlock_invariant_neurons < layer{i}_invariant_neurons # how to initialize the weights of the model: # this can be the importable name of any function that can be `model.apply`ed to initialize some weights in the model. NequIP provides a number of useful initializers: -model_initializers: - - nequip.utils.initialization.uniform_initialize_fcs - - nequip.utils.initialization.uniform_initialize_tps - - nequip.utils.initialization.orthogonal_initialize_linears -# - nequip.utils.initialization.uniform_initialize_linears +#model_initializers: +# - nequip.utils.initialization.uniform_initialize_fcs +# - nequip.utils.initialization.uniform_initialize_tps +# - nequip.utils.initialization.orthogonal_initialize_linears +## - nequip.utils.initialization.uniform_initialize_linears # whether to apply a shift and scale, defined per-species, to the atomic energies PerSpeciesScaleShift_enable: false diff --git a/nequip/models/_eng.py b/nequip/models/_eng.py index eac9fa76..ec17a7b6 100644 --- a/nequip/models/_eng.py +++ b/nequip/models/_eng.py @@ -33,7 +33,7 @@ def EnergyModel(**shared_params) -> SequentialGraphNetwork: "spharm_edges": SphericalHarmonicEdgeAttrs, "radial_basis": RadialBasisEdgeEncoding, # -- Embed features -- - "feature_embedding": AtomwiseLinear, + "chemical_embedding": AtomwiseLinear, } # add convnet layers From c7cb4ad0f8698d3188edb95f4af654ca7ccc3f22 Mon Sep 17 00:00:00 2001 From: Simon Batzner Date: Fri, 7 May 2021 15:42:09 -0500 Subject: [PATCH 11/16] MD (#29) * add md * add nose hoover thermostat + update md script * langevin --> NH * add md * add nose hoover thermostat + update md script * Update .gitignore * update to nh * loading * update paths * black + changelog * Cleanup * Use argparse * Separate energy and length units Co-authored-by: Alby M <1473644+Linux-cpp-lisp@users.noreply.github.com> --- .gitignore | 1 + CHANGELOG.md | 4 +- nequip/dynamics/__init__.py | 0 nequip/dynamics/nequip_calculator.py | 72 +++++++++++ nequip/dynamics/nosehoover.py | 115 ++++++++++++++++++ nequip/scripts/run_md.py | 175 +++++++++++++++++++++++++++ 6 files changed, 366 insertions(+), 1 deletion(-) create mode 100644 nequip/dynamics/__init__.py create mode 100644 nequip/dynamics/nequip_calculator.py create mode 100644 nequip/dynamics/nosehoover.py create mode 100644 nequip/scripts/run_md.py diff --git a/.gitignore b/.gitignore index 65abae72..c183223e 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,4 @@ +md_runs/ simon_configs/ .idea/ .vscode/ diff --git a/CHANGELOG.md b/CHANGELOG.md index 76328b1b..a7458731 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,9 +9,11 @@ Most recent change on the bottom. ## [Unreleased] ### Added - More flexible model initialization +- Add MD w/ Nequip-ASE-calculator + run-MD script w/ custom Nose-Hoover ### Changed - Training now uses atomic file writes to avoid corruption if interupted +- `feature_embedding` -> `chemical_embedding` in default models ### Fixed - Dataset `extra_fixed_fields` are now added even if `get_data()` returns `AtomicData` objects @@ -20,4 +22,4 @@ Most recent change on the bottom. ### Fixed - `load_deployed_model` now correctly loads all metadata -## [0.2.0] - 2021-04-30 \ No newline at end of file +## [0.2.0] - 2021-04-30 diff --git a/nequip/dynamics/__init__.py b/nequip/dynamics/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/nequip/dynamics/nequip_calculator.py b/nequip/dynamics/nequip_calculator.py new file mode 100644 index 00000000..3e58ccfd --- /dev/null +++ b/nequip/dynamics/nequip_calculator.py @@ -0,0 +1,72 @@ +from typing import Union +import torch + +from ase.calculators.calculator import Calculator, all_changes + +from nequip.data import AtomicData, AtomicDataDict +import nequip.scripts.deploy + + +class NequIPCalculator(Calculator): + """NequIP ASE Calculator.""" + + implemented_properties = ["energy", "forces"] + + def __init__( + self, + model: torch.jit.ScriptModule, + r_max: float, + device: Union[str, torch.device], + energy_units_to_eV: float = 1.0, + length_units_to_A: float = 1.0, + **kwargs + ): + Calculator.__init__(self, **kwargs) + self.results = {} + self.model = model + self.r_max = r_max + self.device = device + self.energy_units_to_eV = energy_units_to_eV + self.length_units_to_A = length_units_to_A + + @classmethod + def from_deployed_model( + cls, model_path, device: Union[str, torch.device] = "cpu", **kwargs + ): + # load model + model, metadata = nequip.scripts.deploy.load_deployed_model( + model_path=model_path, device=device + ) + r_max = float(metadata[nequip.scripts.deploy.R_MAX_KEY]) + + # build nequip calculator + return cls(model=model, r_max=r_max, device=device, **kwargs) + + def calculate(self, atoms=None, properties=["energy"], system_changes=all_changes): + """ + Calculate properties. + + :param atoms: ase.Atoms object + :param properties: [str], properties to be computed, used by ASE internally + :param system_changes: [str], system changes since last calculation, used by ASE internally + :return: + """ + # call to base-class to set atoms attribute + Calculator.calculate(self, atoms) + + # prepare data + data = AtomicData.from_ase(atoms=atoms, r_max=self.r_max) + + data = data.to(self.device) + + # predict + extract data + out = self.model(AtomicData.to_AtomicDataDict(data)) + forces = out[AtomicDataDict.FORCE_KEY].detach().cpu().numpy() + energy = out[AtomicDataDict.TOTAL_ENERGY_KEY].detach().cpu().item() + + # store results + self.results = { + "energy": energy * self.energy_units_to_eV, + # force has units eng / len: + "forces": forces * (self.energy_units_to_eV / self.length_units_to_A), + } diff --git a/nequip/dynamics/nosehoover.py b/nequip/dynamics/nosehoover.py new file mode 100644 index 00000000..04827870 --- /dev/null +++ b/nequip/dynamics/nosehoover.py @@ -0,0 +1,115 @@ +""" Custom Nose-Hoover NVT thermostat based on ASE. + +This code was originally written by Jonathan Mailoa based on these notes: + + https://www2.ph.ed.ac.uk/~dmarendu/MVP/MVP03.pdf + +It was then adapted by Simon Batzner to be used within ASE. Parts of the overall outline of the class are also based on the Langevin class in ASE. +""" + +import numpy as np + +from ase.md.md import MolecularDynamics +from ase.md.velocitydistribution import Stationary, ZeroRotation +from ase import units + + +class NoseHoover(MolecularDynamics): + """Nose-Hoover (constant N, V, T) molecular dynamics. + + Usage: NoseHoover(atoms, dt, temperature) + + atoms + The list of atoms. + + timestep + The time step. + + temperature + Target temperature of the MD run in [K] + + nvt_q + Q in the Nose-Hoover equations + + Example Usage: + + nvt_dyn = NoseHoover( + atoms=atoms, + timestep=0.5 * units.fs, + temperature=300., + nvt_q=334. + ) + + """ + + def __init__( + self, + atoms, + timestep, + temperature, + nvt_q, + trajectory=None, + logfile=None, + loginterval=1, + append_trajectory=False, + ): + # set angular and com momentum to zero, necessary for nose-hoover dynamics. + ZeroRotation(atoms) + Stationary(atoms) + + # thermostat parameters + self.temp = temperature + self.nvt_q = nvt_q + self.dt = timestep + self.dtdt = np.power(self.dt, 2) + self.nvt_bath = 0.0 + + self.natoms = len(atoms) + + MolecularDynamics.__init__( + self, + atoms, + timestep, + trajectory, + logfile, + loginterval, + append_trajectory=append_trajectory, + ) + + def step(self): + """Perform a MD step.""" + masses = self.atoms.get_masses() + + modified_acc = ( + self.atoms.get_forces() / masses[:, np.newaxis] + - self.nvt_bath * self.atoms.get_velocities() + ) + pos_fullstep = ( + self.atoms.get_positions() + + self.dt * self.atoms.get_velocities() + + 0.5 * self.dtdt * modified_acc + ) + vel_halfstep = self.atoms.get_velocities() + 0.5 * self.dt * modified_acc + + self.atoms.set_positions(pos_fullstep) + + e_kin_diff = 0.5 * ( + np.sum(masses * np.sum(self.atoms.get_velocities() ** 2, axis=1)) + - (3 * self.natoms + 1) * units.kB * self.temp + ) + + nvt_bath_halfstep = self.nvt_bath + 0.5 * self.dt * e_kin_diff / self.nvt_q + e_kin_diff_halfstep = 0.5 * ( + np.sum(masses * np.sum(vel_halfstep ** 2, axis=1)) + - (3 * self.natoms + 1) * units.kB * self.temp + ) + self.nvt_bath = ( + nvt_bath_halfstep + 0.5 * self.dt * e_kin_diff_halfstep / self.nvt_q + ) + self.atoms.set_velocities( + ( + vel_halfstep + + 0.5 * self.dt * (self.atoms.get_forces() / masses[:, np.newaxis]) + ) + / (1 + 0.5 * self.dt * self.nvt_bath) + ) diff --git a/nequip/scripts/run_md.py b/nequip/scripts/run_md.py new file mode 100644 index 00000000..c67e4553 --- /dev/null +++ b/nequip/scripts/run_md.py @@ -0,0 +1,175 @@ +import logging +import os +import time +import argparse +import numpy as np + +import torch + +from ase import units +from ase.io import read, write +from ase.md.velocitydistribution import MaxwellBoltzmannDistribution +from ase.md.velocitydistribution import Stationary, ZeroRotation + +import nequip +from nequip.dynamics.nequip_calculator import NequIPCalculator +from nequip.scripts.deploy import load_deployed_model +from nequip.dynamics.nosehoover import NoseHoover + + +def save_to_xyz(atoms, logdir, prefix=""): + """ + Save structure to extended xyz file. + + :param atoms: ase.Atoms object to save + :param logdir, str, path/to/logging/directory + :param prefix: str, prefix to use for storing xyz files + """ + write( + filename=os.path.join(os.path.join(logdir, "xyz_strucs/"), prefix + ".xyz"), + images=atoms, + format="extxyz", + append=True, + ) + + +def write_ase_md_config(curr_atoms, curr_step, dt): + """Write time, positions, forces, and atomic kinetic energies to log file. + + :param curr_atoms: ase.Atoms object, current system to log + :param curr_step: int, current step / frame in MD simulation + :param dt: float, MD time step + """ + parsed_temperature = curr_atoms.get_temperature() + + # frame + log_txt = "-------------------- \n-Frame: {}".format(str(curr_step)) + log_txt += " Simulation Time: {:.6f}\t Temperature: {:.8f} K\n\n".format( + dt * curr_step, parsed_temperature + ) + + # header + log_txt += "El \t\t\t\t" + log_txt += "Position [A] \t\t\t\t\t\t\t\t " + log_txt += "Predicted Force [eV/A]\n" + + forces = curr_atoms.get_forces() + atomic_numbers = curr_atoms.get_atomic_numbers() + positions = curr_atoms.get_positions() + + # write atom by atom + for i in range(len(curr_atoms)): + log_txt += "{}\t ".format(str(atomic_numbers[i])) + + for j in range(3): + log_txt += "{:.8f} \t".format(positions[i][j]) + + log_txt += "\t\t" + + for j in range(3): + log_txt += "{:.8f} \t".format(forces[i][j]) + log_txt += "\n" + + logging.info(log_txt) + + +def main(args=None): + parser = argparse.ArgumentParser( + description="Run Nose-Hoover MD using a deployed NequIP model." + ) + parser.add_argument("model", help="The deployed NequIP model.", type=str) + parser.add_argument( + "initial_xyz", help="Initial positions in XYZ format.", type=str + ) + parser.add_argument("logdir", help="Output directory.", type=str) + parser.add_argument("--seed", help="Seed for PRNGs.", type=int, default=0) + parser.add_argument( + "--log-frequency", help="Log every n steps.", type=int, default=1000 + ) + parser.add_argument( + "--save-frequency", help="Save every n steps.", type=int, default=1000 + ) + parser.add_argument( + "--energy-units-to-eV", + help="Conversion factor from model energy units into eV", + type=float, + default=1.0, + ) + parser.add_argument( + "--length-units-to-A", + help="Conversion factor from model length units into Angstrom", + type=float, + default=1.0, + ) + parser.add_argument("--temperature", help="Temperature", type=float, default=300.0) + parser.add_argument("--dt", help="Timestep (fs)", type=float, default=1.0) + parser.add_argument( + "--n-steps", help="Number of steps to run", type=int, default=500000 + ) + parser.add_argument("--nvt-q", type=float, default=43.06225052549201) + args = parser.parse_args(args=args) + + logfilename = os.path.join(args.logdir, f"ase_md_run_{time.time()}.log") + + np.random.seed(args.seed) + torch.manual_seed(args.seed) + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + + if not os.path.exists(args.logdir): + os.makedirs(args.logdir) + os.makedirs(os.path.join(args.logdir, "xyz_strucs")) + + logging.basicConfig(filename=logfilename, format="%(message)s", level=logging.INFO) + + # load atoms + atoms = read(args.initial_xyz, index=0) + + # build nequip calculator + calc = NequIPCalculator.from_deployed_model( + model_path=args.model, + device=device, + energy_units_to_eV=args.energy_units_to_eV, + length_units_to_A=args.length_units_to_A, + ) + + atoms.set_calculator(calc=calc) + + # set starting temperature + MaxwellBoltzmannDistribution(atoms=atoms, temp=args.temperature * units.kB) + + ZeroRotation(atoms) + Stationary(atoms) + + nvt_dyn = NoseHoover( + atoms=atoms, + timestep=args.dt * units.fs, + temperature=args.temperature, + nvt_q=args.nvt_q, + ) + + # log first frame + logging.info( + f"\n\nStarting dynamics with Nose-Hoover Thermostat with nvt_q: {args.nvt_q}\n\n" + ) + write_ase_md_config(curr_atoms=atoms, curr_step=0, dt=args.dt) + logging.info(f"COM [A]: {atoms.get_center_of_mass()}\n") + + save_to_xyz(atoms, logdir=args.logdir, prefix="nvt_") + + for i in range(1, args.n_steps): + nvt_dyn.run(steps=1) + + if not i % args.log_frequency: + write_ase_md_config(curr_atoms=atoms, curr_step=i, dt=args.dt) + + logging.info(f"COM [A]: {atoms.get_center_of_mass()}\n") + + # append current structure to xyz file + if not i % args.save_frequency: + save_to_xyz(atoms, logdir=args.logdir, prefix="nvt_") + + print("finished...") + + +if __name__ == "__main__": + main() From e29db916efd64124bf156dd63d35d67a0e8a4bb2 Mon Sep 17 00:00:00 2001 From: Alby M <1473644+Linux-cpp-lisp@users.noreply.github.com> Date: Fri, 7 May 2021 16:44:28 -0600 Subject: [PATCH 12/16] rename in test --- tests/model/test_eng_force.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/model/test_eng_force.py b/tests/model/test_eng_force.py index 6b1417ac..ae198435 100644 --- a/tests/model/test_eng_force.py +++ b/tests/model/test_eng_force.py @@ -36,7 +36,7 @@ allowed_species=ALLOWED_SPECIES, irreps_edge_sh="0e + 1o", r_max=4, - feature_embedding_irreps_out="8x0e + 8x0o + 8x1e + 8x1o", + chemical_embedding_irreps_out="8x0e + 8x0o + 8x1e + 8x1o", irreps_mid_output_block="2x0e", feature_irreps_hidden="4x0e + 4x1o", ) @@ -142,15 +142,15 @@ def test_jit(self, model, atomic_batch, device): def test_submods(self): model = EnergyModel(**minimal_config2) - assert isinstance(model.feature_embedding, AtomwiseLinear) - true_irreps = o3.Irreps(minimal_config2["feature_embedding_irreps_out"]) + assert isinstance(model.chemical_embedding, AtomwiseLinear) + true_irreps = o3.Irreps(minimal_config2["chemical_embedding_irreps_out"]) assert ( - model.feature_embedding.irreps_out[model.feature_embedding.out_field] + model.chemical_embedding.irreps_out[model.chemical_embedding.out_field] == true_irreps ) # Make sure it propagates assert ( - model.layer0_convnet.irreps_in[model.feature_embedding.out_field] + model.layer0_convnet.irreps_in[model.chemical_embedding.out_field] == true_irreps ) From 9c582342a2e166cc805d56e26e3fa0a1c4a9403d Mon Sep 17 00:00:00 2001 From: Alby M <1473644+Linux-cpp-lisp@users.noreply.github.com> Date: Fri, 7 May 2021 16:44:35 -0600 Subject: [PATCH 13/16] Simon's README updates --- README.md | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/README.md b/README.md index 99462c5b..850f7d0e 100644 --- a/README.md +++ b/README.md @@ -35,7 +35,7 @@ pip install git+https://github.com/rusty1s/pytorch_geometric.git pip install git+https://github.com/e3nn/e3nn.git ``` -* Install [`opt_einsum_fx`](https://github.com/Linux-cpp-lisp/opt_einsum_fx) for optimized `e3nn` operations: +* Install [`opt_einsum_fx`](https://github.com/Linux-cpp-lisp/opt_einsum_fx) for optimized `e3nn` operations --- this is very important for performance: ```bash $ git clone https://github.com/Linux-cpp-lisp/opt_einsum_fx.git @@ -55,7 +55,7 @@ $ pip install . $ pip install -U git+https://github.com/Linux-cpp-lisp/pytorch_ema ``` -* We use [Weights&Biases](https://wandb.ai) to keep track of experiments. This is not a strict requirement, you can use our software without this, but it may make your life easier. If you want to use it, create an account [here](https://wandb.ai) and install it: +* We use [Weights&Biases](https://wandb.ai) to keep track of experiments. This is not a strict requirement, you can use our package without this, but it may make your life easier. If you want to use it, create an account [here](https://wandb.ai) and install it: ``` pip install wandb @@ -78,14 +78,6 @@ pip install pytest pytest ./tests ``` -One some platforms, the installation may complain about the scikit learn installation. If that's the case, specifically install the following scikit-learn version: - -``` -pip install -U scikit-learn==0.23.0 -``` - -That should fix it. - ### Tutorial The best way to learn how to use NequIP is [through the tutorial notebook hosted here](https://deepnote.com/project/2412ca93-7ad1-4458-972c-5d5add5a667e) From 2f96767b24b717c214bb53567df546dfae16654d Mon Sep 17 00:00:00 2001 From: Alby M <1473644+Linux-cpp-lisp@users.noreply.github.com> Date: Fri, 7 May 2021 16:45:45 -0600 Subject: [PATCH 14/16] no deps --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 850f7d0e..2843cb2a 100644 --- a/README.md +++ b/README.md @@ -32,7 +32,7 @@ pip install git+https://github.com/rusty1s/pytorch_geometric.git * Install [e3nn](https://github.com/e3nn/e3nn): ``` -pip install git+https://github.com/e3nn/e3nn.git +pip install --no-deps git+https://github.com/e3nn/e3nn.git ``` * Install [`opt_einsum_fx`](https://github.com/Linux-cpp-lisp/opt_einsum_fx) for optimized `e3nn` operations --- this is very important for performance: From 1d1adceb7be721cd3ac881cdfbf6f21a0fed4c4f Mon Sep 17 00:00:00 2001 From: Alby M <1473644+Linux-cpp-lisp@users.noreply.github.com> Date: Fri, 7 May 2021 16:49:05 -0600 Subject: [PATCH 15/16] Bump version --- CHANGELOG.md | 4 +++- nequip/_version.py | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a7458731..8c824031 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,13 +7,15 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 Most recent change on the bottom. ## [Unreleased] + +## [0.3.0] - 2021-05-07 ### Added - More flexible model initialization - Add MD w/ Nequip-ASE-calculator + run-MD script w/ custom Nose-Hoover ### Changed - Training now uses atomic file writes to avoid corruption if interupted -- `feature_embedding` -> `chemical_embedding` in default models +- `feature_embedding` renamed to `chemical_embedding` in default models ### Fixed - Dataset `extra_fixed_fields` are now added even if `get_data()` returns `AtomicData` objects diff --git a/nequip/_version.py b/nequip/_version.py index bf2a4433..355845c6 100644 --- a/nequip/_version.py +++ b/nequip/_version.py @@ -2,4 +2,4 @@ # See Python packaging guide # https://packaging.python.org/guides/single-sourcing-package-version/ -__version__ = "0.2.1" +__version__ = "0.3.0" From 04a1f07d3c4f4b049f2ddd908dcdf805b5c6ce06 Mon Sep 17 00:00:00 2001 From: Alby M <1473644+Linux-cpp-lisp@users.noreply.github.com> Date: Fri, 7 May 2021 16:52:07 -0600 Subject: [PATCH 16/16] CHANGELOG --- CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8c824031..6d257816 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,14 +10,17 @@ Most recent change on the bottom. ## [0.3.0] - 2021-05-07 ### Added +- Sub-builders can be skipped in `instantiate` by setting them to `None` - More flexible model initialization - Add MD w/ Nequip-ASE-calculator + run-MD script w/ custom Nose-Hoover ### Changed +- PBC must be explicit if a cell is provided - Training now uses atomic file writes to avoid corruption if interupted - `feature_embedding` renamed to `chemical_embedding` in default models ### Fixed +- `BesselBasis` now works on GPU when `trainable=False` - Dataset `extra_fixed_fields` are now added even if `get_data()` returns `AtomicData` objects ## [0.2.1] - 2021-05-03