From 5150b0ceaf61ec3c39c1f1eb0e9a0d619e0b967f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20R=C3=BCger?= Date: Tue, 12 Mar 2024 13:56:59 +0100 Subject: [PATCH] format source code with black -l 120 -t py38 --- __init__.py | 28 +- __version__.py | 2 +- core/Cluster.py | 142 ++-- core/ClusterExpansion.py | 160 +++-- core/ElementaryReaction.py | 292 ++++---- core/Lattice.py | 636 +++++++++-------- core/LatticeState.py | 319 +++++---- core/Mechanism.py | 253 +++---- core/ParametersBase.py | 57 +- core/RKFLoader.py | 654 ++++++++++-------- core/Settings.py | 186 ++--- core/Species.py | 214 +++--- core/SpeciesList.py | 108 +-- core/ZacrosJob.py | 389 ++++++----- core/ZacrosParametersScanJob.py | 124 ++-- core/ZacrosResults.py | 601 ++++++++-------- core/ZacrosSteadyStateJob.py | 508 ++++++++------ doc/source/conf.py | 20 +- examples/CO+Pt111/CO+Pt111.py | 67 +- .../CoveragesAndReactionRate.ipynb | 48 +- .../CoveragesAndReactionRate.py | 67 +- ...CoveragesAndReactionRate_ViewResults.ipynb | 86 +-- .../CoveragesAndReactionRate_ViewResults.py | 118 ++-- .../LangmuirHinshelwood.py | 22 +- examples/LangmuirHinshelwood/SteadyState.py | 49 +- .../PhaseTransition-SteadyState.py | 64 +- examples/ReuterScheffler/PhaseTransition.py | 67 +- examples/ReuterScheffler/ReuterScheffler.py | 22 +- examples/ReuterScheffler/SteadyState.py | 37 +- examples/ReuterScheffler/check.py | 6 +- .../WaterGasShiftOnPt111.py | 300 ++++---- .../PhaseTransitions-ADP+cover.py | 121 ++-- .../PhaseTransitions-ADP.ipynb | 122 ++-- .../ZiffGulariBarshad/PhaseTransitions-ADP.py | 137 ++-- .../PhaseTransitions-ADP_ViewResults.py | 16 +- .../PhaseTransitions-SteadyState-ADP.py | 117 ++-- .../PhaseTransitions-SteadyState.ipynb | 63 +- .../PhaseTransitions-SteadyState.py | 82 +-- .../ZiffGulariBarshad/PhaseTransitions-v2.py | 88 ++- .../ZiffGulariBarshad/PhaseTransitions.ipynb | 101 ++- .../ZiffGulariBarshad/PhaseTransitions.py | 112 ++- examples/ZiffGulariBarshad/SteadyState.ipynb | 57 +- examples/ZiffGulariBarshad/SteadyState.py | 66 +- .../SteadyState_ViewResults.py | 37 +- .../ZiffGulariBarshad/ZiffGulariBarshad.py | 81 ++- examples/intro/intro.py | 78 ++- examples/intro/intro0.py | 50 +- models/LangmuirHinshelwood.py | 116 ++-- models/ReuterScheffler.py | 371 ++++++---- models/ZiffGulariBarshad.py | 74 +- setup.py | 60 +- tests/test_Cluster.py | 76 +- tests/test_ElementaryReaction.py | 58 +- tests/test_Lattice.py | 159 +++-- tests/test_LatticeInitialState.py | 72 +- tests/test_Mechanism.py | 58 +- tests/test_Parameters.py | 23 +- tests/test_RKFLoader.py | 74 +- tests/test_Settings.py | 14 +- tests/test_Species.py | 6 +- tests/test_SpeciesList.py | 32 +- tests/test_ZacrosJob.py | 113 ++- tests/test_ZacrosJob_restart.py | 91 +-- tests/test_ZacrosParametersScanJob.py | 69 +- ...test_ZacrosParametersScanSteadyStateJob.py | 77 ++- tests/test_ZacrosResults.py | 176 +++-- tests/test_ZacrosSteadyStateJob.py | 46 +- tests/test_post_process.py | 41 +- utils/compareReports.py | 34 +- 69 files changed, 4734 insertions(+), 4080 deletions(-) diff --git a/__init__.py b/__init__.py index 1254f7f..40f3f2f 100644 --- a/__init__.py +++ b/__init__.py @@ -3,57 +3,59 @@ from .__version__ import __version__ + def __autoimport(path, folders): import os from os.path import join as opj - is_module = lambda x: x.endswith('.py') and not x.startswith('__init__') + + is_module = lambda x: x.endswith(".py") and not x.startswith("__init__") ret = [] for folder in folders: - for dirpath, dirnames, filenames in os.walk(opj(path,folder)): + for dirpath, dirnames, filenames in os.walk(opj(path, folder)): modules = [os.path.splitext(f)[0] for f in filter(is_module, filenames)] relpath = os.path.relpath(dirpath, path).split(os.sep) for module in modules: - imp = '.'.join(relpath + [module]) - tmp = __import__(imp, globals=globals(), fromlist=['*'], level=1) - if hasattr(tmp, '__all__'): + imp = ".".join(relpath + [module]) + tmp = __import__(imp, globals=globals(), fromlist=["*"], level=1) + if hasattr(tmp, "__all__"): ret += tmp.__all__ for name in tmp.__all__: globals()[name] = vars(tmp)[name] return ret -__all__ = __autoimport(__path__[0], ['core']) +__all__ = __autoimport(__path__[0], ["core"]) -def init(path=None, folder=None, config_settings:Dict=None): +def init(path=None, folder=None, config_settings: Dict = None): """Initializes pyZacros and PLAMS environment. It internally calls the scm.plams.init() method.""" - scm.plams.init( path=path, folder=folder, config_settings=config_settings ) + scm.plams.init(path=path, folder=folder, config_settings=config_settings) def finish(otherJM=None): """Wait for all threads to finish and clean the environment. It internally calls the scm.plams.finish() method.""" - scm.plams.finish( otherJM=otherJM ) + scm.plams.finish(otherJM=otherJM) def load(filename): """Load previously saved job from ``.dill`` file. It internally calls the scm.plams.load() method.""" - return scm.plams.load( filename ) + return scm.plams.load(filename) def load_all(path, jobmanager=None): """Load all jobs from *path*. It internally calls the scm.plams.load_all() method.""" - return scm.plams.load_all( path=path, jobmanager=jobmanager ) + return scm.plams.load_all(path=path, jobmanager=jobmanager) def delete_job(job): """Remove *job* from its corresponding |JobManager| and delete the job folder from the disk. Mark *job* as 'deleted'. It internally calls the scm.plams.delete_job() method.""" - scm.plams.load_all( job=job ) + scm.plams.load_all(job=job) def log(message, level=0): """Log *message* with verbosity *level*. It internally calls the scm.plams.log() method.""" - scm.plams.load_all( message=message, level=level ) + scm.plams.load_all(message=message, level=level) def workdir(): diff --git a/__version__.py b/__version__.py index 58d478a..c68196d 100644 --- a/__version__.py +++ b/__version__.py @@ -1 +1 @@ -__version__ = '1.2.0' +__version__ = "1.2.0" diff --git a/core/Cluster.py b/core/Cluster.py index fe92f33..3fe5d21 100644 --- a/core/Cluster.py +++ b/core/Cluster.py @@ -1,7 +1,8 @@ from .Species import * from .SpeciesList import SpeciesList -__all__ = ['Cluster'] +__all__ = ["Cluster"] + class Cluster: """ @@ -16,41 +17,38 @@ class Cluster: * ``label`` -- If None, a unique label is generated based on its composition. """ - def __init__(self, species, - site_types = None, - entity_number = None, - neighboring = None, - multiplicity = 1, - energy = 0.000, - label = None): + def __init__( + self, species, site_types=None, entity_number=None, neighboring=None, multiplicity=1, energy=0.000, label=None + ): """ Creates a new Cluster object """ self.species = species # e.g. [ Species("H*",1), Species("H*",1) ] - self.sites = len([ sp for sp in species if sp == Species.UNSPECIFIED or sp.is_adsorbed() ]) + self.sites = len([sp for sp in species if sp == Species.UNSPECIFIED or sp.is_adsorbed()]) - if( site_types is not None ): - if( not ( all([ type(st)==int for st in site_types ]) or all([ type(st)==str for st in site_types ]) ) ): - msg = "\n### ERROR ### ElementaryReaction.__init__.\n" + if site_types is not None: + if not (all([type(st) == int for st in site_types]) or all([type(st) == str for st in site_types])): + msg = "\n### ERROR ### ElementaryReaction.__init__.\n" msg += " Inconsistent type for site_types. It should be a list of int or str\n" raise NameError(msg) - self.site_types = site_types # e.g. [ "f", "f" ], [ 0, 0 ] + self.site_types = site_types # e.g. [ "f", "f" ], [ 0, 0 ] else: - self.site_types = self.sites*[ 0 ] + self.site_types = self.sites * [0] - self.neighboring = neighboring # e.g. [ (0,1) ] - self.multiplicity = multiplicity # e.g. 2 - self.energy = energy # Units eV + self.neighboring = neighboring # e.g. [ (0,1) ] + self.multiplicity = multiplicity # e.g. 2 + self.energy = energy # Units eV self.entity_number = entity_number - if( entity_number is None ): self.entity_number = SpeciesList.default_entity_numbers( self.sites, self.species ) + if entity_number is None: + self.entity_number = SpeciesList.default_entity_numbers(self.sites, self.species) - #TODO Make a way to check denticity consistency - #if( sum([s.denticity for s in self.species]) != self.sites ): - #msg = "\n### ERROR ### Cluster.__init__.\n" - #msg += "Inconsistent dimensions for species or site_types\n" - #raise NameError(msg) + # TODO Make a way to check denticity consistency + # if( sum([s.denticity for s in self.species]) != self.sites ): + # msg = "\n### ERROR ### Cluster.__init__.\n" + # msg += "Inconsistent dimensions for species or site_types\n" + # raise NameError(msg) self.__userLabel = label self.__label = None @@ -60,44 +58,40 @@ def __init__(self, species, self.__composition = {} for item in species: - if( item != Species.UNSPECIFIED ): + if item != Species.UNSPECIFIED: self.__mass += item.mass() else: continue - for symbol,n in item.composition().items(): - if( not symbol in self.__composition ): + for symbol, n in item.composition().items(): + if not symbol in self.__composition: self.__composition[symbol] = n else: self.__composition[symbol] += n - def __len__(self): """ Returns the number of species inside the cluster """ return len(self.species) - def __eq__(self, other): """ Returns True if both objects have the same label. Otherwise returns False * ``other`` -- """ - if( self.__label == other.__label ): + if self.__label == other.__label: return True else: return False - def __hash__(self): """ Returns a hash based on the label """ return hash(self.__label) - def __updateLabel(self): """ Updates the attribute 'label' @@ -108,17 +102,17 @@ def __updateLabel(self): self.__label = "" for i in range(len(self.species)): - if( self.species[i] != Species.UNSPECIFIED ): + if self.species[i] != Species.UNSPECIFIED: self.__label += self.species[i].symbol else: self.__label += "&" - if( len(self.entity_number)>1 ): - self.__label += str(self.entity_number[i]+1) + if len(self.entity_number) > 1: + self.__label += str(self.entity_number[i] + 1) self.__label += str(self.site_types[i]) if self.neighboring is not None: - if(len(self.neighboring) > 0): + if len(self.neighboring) > 0: self.__label += ":" # For neighboring nodes are sorted @@ -127,134 +121,134 @@ def __updateLabel(self): lNeighboring = list(self.neighboring[i]) lNeighboring.sort() self.__label += str(tuple(lNeighboring)).replace(" ", "") - if( i != len(self.neighboring)-1): + if i != len(self.neighboring) - 1: self.__label += "," - def label(self): """ Returns the label of the cluster """ - if( self.__label is None ): + if self.__label is None: self.__updateLabel() return self.__label - def __str__(self): """ Translates the object to a string """ - output = "cluster " + self.__label +"\n" + output = "cluster " + self.__label + "\n" - if( self.sites != 0 ): - output += " sites " + str(self.sites)+"\n" + if self.sites != 0: + output += " sites " + str(self.sites) + "\n" if self.neighboring is not None and len(self.neighboring) > 0: output += " neighboring " for i in range(len(self.neighboring)): - output += str(self.neighboring[i][0]+1)+"-"+str(self.neighboring[i][1]+1) - if( i != len(self.neighboring)-1 ): + output += str(self.neighboring[i][0] + 1) + "-" + str(self.neighboring[i][1] + 1) + if i != len(self.neighboring) - 1: output += " " output += "\n" site_identate = {} - output += " lattice_state"+"\n" + output += " lattice_state" + "\n" for i in range(self.sites): - if( self.entity_number[i] not in site_identate ): - site_identate[ self.entity_number[i] ] = 0 + if self.entity_number[i] not in site_identate: + site_identate[self.entity_number[i]] = 0 else: - site_identate[ self.entity_number[i] ] = site_identate[ self.entity_number[i] ] + 1 + site_identate[self.entity_number[i]] = site_identate[self.entity_number[i]] + 1 - if( self.species[i] != Species.UNSPECIFIED ): - if( site_identate[ self.entity_number[i] ] >= self.species[i].denticity ): - msg = "\n### ERROR ### Cluster.__str__.\n" - msg += "Inconsistent of denticity value for "+self.species[i].symbol+"\n" + if self.species[i] != Species.UNSPECIFIED: + if site_identate[self.entity_number[i]] >= self.species[i].denticity: + msg = "\n### ERROR ### Cluster.__str__.\n" + msg += "Inconsistent of denticity value for " + self.species[i].symbol + "\n" raise NameError(msg) - output += " "+str(self.entity_number[i]+1)+" "+self.species[i].symbol+" "+str(site_identate[self.entity_number[i]]+1)+"\n" + output += ( + " " + + str(self.entity_number[i] + 1) + + " " + + self.species[i].symbol + + " " + + str(site_identate[self.entity_number[i]] + 1) + + "\n" + ) else: output += " & & &\n" output += " site_types " for i in range(len(self.site_types)): - if( type(self.site_types[i]) == int ): - output += str(self.site_types[i]+1) - elif( type(self.site_types[i]) == str ): + if type(self.site_types[i]) == int: + output += str(self.site_types[i] + 1) + elif type(self.site_types[i]) == str: output += self.site_types[i] - if( i != len(self.site_types)-1 ): + if i != len(self.site_types) - 1: output += " " output += "\n" - output += " graph_multiplicity "+str(self.multiplicity)+"\n" + output += " graph_multiplicity " + str(self.multiplicity) + "\n" - output += " cluster_eng "+("%12.5e"%self.energy)+"\n" + output += " cluster_eng " + ("%12.5e" % self.energy) + "\n" output += "end_cluster" return output - def composition(self): """ Returns a dictionary containing the number of atoms of each kind. """ return self.__composition - def mass(self): """ Returns the mass of the cluster in Da """ return self.__mass - def gas_species(self): """Returns the gas species.""" species = [] for sp in self.species: - if( sp != Species.UNSPECIFIED and sp.kind == Species.GAS ): - species.append( sp ) + if sp != Species.UNSPECIFIED and sp.kind == Species.GAS: + species.append(sp) - species = SpeciesList( species ) + species = SpeciesList(species) species.remove_duplicates() return species - def surface_species(self): """Returns the surface species.""" species = [] for sp in self.species: - if( sp != Species.UNSPECIFIED and sp.kind == Species.SURFACE ): - species.append( sp ) + if sp != Species.UNSPECIFIED and sp.kind == Species.SURFACE: + species.append(sp) - species = SpeciesList( species ) + species = SpeciesList(species) species.remove_duplicates() return species - def site_types_set(self): """ Returns the set of the sites types """ return set(self.site_types) - - def replace_site_types( self, site_types_old, site_types_new ): + def replace_site_types(self, site_types_old, site_types_new): """ Replaces the site types names * ``site_types_old`` -- List of strings containing the old site_types to be replaced * ``site_types_new`` -- List of strings containing the new site_types which would replace old site_types_old. """ - assert( len(site_types_old) == len(site_types_new) ) + assert len(site_types_old) == len(site_types_new) for i in range(len(site_types_old)): for j in range(len(self.site_types)): - if( self.site_types[j] == site_types_old[i] ): + if self.site_types[j] == site_types_old[i]: self.site_types[j] = site_types_new[i] self.__updateLabel() diff --git a/core/ClusterExpansion.py b/core/ClusterExpansion.py index 2493d3e..289b118 100644 --- a/core/ClusterExpansion.py +++ b/core/ClusterExpansion.py @@ -4,9 +4,10 @@ from .SpeciesList import * from .Cluster import * -__all__ = ['ClusterExpansion'] +__all__ = ["ClusterExpansion"] -class ClusterExpansion( UserList ): + +class ClusterExpansion(UserList): """ Creates a new ClusterExpansion object which is formally a list of Clusters. It implements all python list operations. @@ -15,104 +16,109 @@ class ClusterExpansion( UserList ): * ``surface_species`` -- Surface species. It is required if the option ``fileName`` was used. """ - def __init__(self, data=[], fileName=None, surface_species=None ): - super(ClusterExpansion, self).__init__( data ) + def __init__(self, data=[], fileName=None, surface_species=None): + super(ClusterExpansion, self).__init__(data) if fileName is not None: if surface_species is not None: - self.__fromZacrosFile( fileName, surface_species ) + self.__fromZacrosFile(fileName, surface_species) else: - raise Exception( "Error: Parameter surface_species is required to load the ClusterExpansion from a zacros input file" ) + raise Exception( + "Error: Parameter surface_species is required to load the ClusterExpansion from a zacros input file" + ) # Duplicates are automatically removed. copy_data = self.data self.data = [] for cl in copy_data: - if( cl not in self.data ): - self.data.append( cl ) - + if cl not in self.data: + self.data.append(cl) def __fromZacrosFile(self, fileName, surface_species): """ Creates a Mechanism from a Zacros input file energetics_input.dat """ - if not os.path.isfile( fileName ): - raise Exception( "Trying to load a file that doen't exist: "+fileName ) + if not os.path.isfile(fileName): + raise Exception("Trying to load a file that doen't exist: " + fileName) - with open( fileName, "r" ) as inp: + with open(fileName, "r") as inp: file_content = inp.readlines() - file_content = [line.split("#")[0] for line in file_content if line.split("#")[0].strip()] # Removes empty lines and comments + file_content = [ + line.split("#")[0] for line in file_content if line.split("#")[0].strip() + ] # Removes empty lines and comments nline = 0 - while( nline < len(file_content) ): + while nline < len(file_content): tokens = file_content[nline].split() - if( tokens[0].lower() == "cluster" ): + if tokens[0].lower() == "cluster": parameters = {} - if( len(tokens) < 2 ): - raise Exception( "Error: Format inconsistent in section cluster. Label not found!" ) + if len(tokens) < 2: + raise Exception("Error: Format inconsistent in section cluster. Label not found!") parameters["label"] = tokens[1] nline += 1 - while( nline < len(file_content) ): + while nline < len(file_content): tokens = file_content[nline].split() - if( tokens[0] == "end_cluster" ): + if tokens[0] == "end_cluster": break - def process_neighboring( sv ): + def process_neighboring(sv): output = [] for pair in sv: - a,b = pair.split("-") - output.append( (int(a)-1,int(b)-1) ) + a, b = pair.split("-") + output.append((int(a) - 1, int(b) - 1)) return output - def process_site_types( sv ): + def process_site_types(sv): output = [] for i in range(len(sv)): - if( sv[i].isdigit() ): - output.append( int(sv[i])-1 ) + if sv[i].isdigit(): + output.append(int(sv[i]) - 1) else: - output.append( sv[i] ) + output.append(sv[i]) return output - def process_variant( sv ): - raise Exception( "ZacrosJob.__recreate_energetics_input. Option 'cluster%variant' is not supported yet!" ) + def process_variant(sv): + raise Exception( + "ZacrosJob.__recreate_energetics_input. Option 'cluster%variant' is not supported yet!" + ) cases = { - "sites" : lambda sv: parameters.setdefault("sites", int(sv[0])), - "site_types" : lambda sv: parameters.setdefault("site_types", process_site_types(sv)), - "graph_multiplicity" : lambda sv: parameters.setdefault("multiplicity", int(sv[0])), - "cluster_eng" : lambda sv: parameters.setdefault("energy", float(sv[0])), - "neighboring" : lambda sv: parameters.setdefault("neighboring", process_neighboring(sv)), - "variant" : lambda sv: parameters.setdefault("variant", process_variant(sv)) + "sites": lambda sv: parameters.setdefault("sites", int(sv[0])), + "site_types": lambda sv: parameters.setdefault("site_types", process_site_types(sv)), + "graph_multiplicity": lambda sv: parameters.setdefault("multiplicity", int(sv[0])), + "cluster_eng": lambda sv: parameters.setdefault("energy", float(sv[0])), + "neighboring": lambda sv: parameters.setdefault("neighboring", process_neighboring(sv)), + "variant": lambda sv: parameters.setdefault("variant", process_variant(sv)), } - cases.get( tokens[0], lambda sv: None )( tokens[1:] ) + cases.get(tokens[0], lambda sv: None)(tokens[1:]) - if( tokens[0] == "lattice_state" ): + if tokens[0] == "lattice_state": parameters["lattice_state"] = [] isites = 0 - while( nline < len(file_content) ): + while nline < len(file_content): nline += 1 tokens = file_content[nline].split() - if( isites == parameters["sites"] ): + if isites == parameters["sites"]: break - if( len(tokens) < 3 ): - raise Exception( "Error: Format inconsistent in section lattice_state!" ) + if len(tokens) < 3: + raise Exception("Error: Format inconsistent in section lattice_state!") - if( tokens[0]+tokens[1]+tokens[2] != "&&&" ): - entity_number = int(tokens[0])-1 + if tokens[0] + tokens[1] + tokens[2] != "&&&": + entity_number = int(tokens[0]) - 1 species_name = tokens[1] dentate_number = int(tokens[2]) - parameters["lattice_state"].append( [ entity_number, species_name, dentate_number ] ) + parameters["lattice_state"].append([entity_number, species_name, dentate_number]) isites += 1 else: @@ -121,39 +127,39 @@ def process_variant( sv ): parameters["species"] = [] parameters["entity_number"] = [] site_identate = {} - for entity_number,species_name,dentate_number in parameters["lattice_state"]: - if( entity_number not in site_identate ): - site_identate[ entity_number ] = 0 + for entity_number, species_name, dentate_number in parameters["lattice_state"]: + if entity_number not in site_identate: + site_identate[entity_number] = 0 else: - site_identate[ entity_number ] = site_identate[ entity_number ] + 1 + site_identate[entity_number] = site_identate[entity_number] + 1 - #TODO Find a way to check consistency of dentate_number + # TODO Find a way to check consistency of dentate_number loc_id = -1 - for i,sp in enumerate(surface_species): - if( sp.symbol == species_name and site_identate[ entity_number ]+1 == dentate_number ): + for i, sp in enumerate(surface_species): + if sp.symbol == species_name and site_identate[entity_number] + 1 == dentate_number: loc_id = i break - if( loc_id == -1 ): - raise Exception( "Error: Species "+species_name+" was not defined in the simulation_input.txt file!" ) + if loc_id == -1: + raise Exception( + "Error: Species " + species_name + " was not defined in the simulation_input.txt file!" + ) - parameters["species"].append( surface_species[loc_id] ) - parameters["entity_number"].append( entity_number ) + parameters["species"].append(surface_species[loc_id]) + parameters["entity_number"].append(entity_number) del parameters["sites"] del parameters["lattice_state"] - self.append( Cluster( **parameters ) ) + self.append(Cluster(**parameters)) nline += 1 - def append(self, item): """ Appends a cluster to the end of the sequence. Appends a cluster to the end of the sequence. Notice that duplicate items are not accepted. In case of duplicity, the new cluster is just ignored. """ - self.insert( len(self), item ) - + self.insert(len(self), item) def extend(self, other): """ @@ -162,8 +168,7 @@ def extend(self, other): * ``other`` -- Iterable with the clusters to extend the cluster expansion. """ for item in other: - self.append( item ) - + self.append(item) def insert(self, i, item): """ @@ -173,82 +178,75 @@ def insert(self, i, item): * ``item`` -- The cluster to be inserted in the list. """ for cl in self: - if( cl.label() == item.label() ): + if cl.label() == item.label(): return super(ClusterExpansion, self).insert(i, item) - def __str__(self): """ Translates the object to a string in Zacros input files format. """ - output = "energetics\n\n" + output = "energetics\n\n" for cl in self: - output += str(cl)+"\n\n" + output += str(cl) + "\n\n" output += "end_energetics" return output - def gas_species(self): """Returns the gas species.""" species = [] for cl in self: - species.extend( cl.gas_species() ) + species.extend(cl.gas_species()) - species = SpeciesList( species ) + species = SpeciesList(species) species.remove_duplicates() return species - def surface_species(self): """Returns the surface species.""" species = [] for cl in self: - species.extend( cl.surface_species() ) + species.extend(cl.surface_species()) - species = SpeciesList( species ) + species = SpeciesList(species) species.remove_duplicates() return species - - def site_types_set( self ): + def site_types_set(self): """ Returns the set of the sites types """ site_types = set() for cl in self: - site_types.update( cl.site_types_set() ) + site_types.update(cl.site_types_set()) return site_types - - def replace_site_types( self, site_types_old, site_types_new ): + def replace_site_types(self, site_types_old, site_types_new): """ Replaces the site types names * ``site_types_old`` -- List of strings containing the old site_types to be replaced * ``site_types_new`` -- List of strings containing the new site_types which would replace old site_types_old. """ - assert( len(site_types_old) == len(site_types_new) ) + assert len(site_types_old) == len(site_types_new) for cl in self: - cl.replace_site_types( site_types_old, site_types_new ) - + cl.replace_site_types(site_types_old, site_types_new) - def find( self, label ): + def find(self, label): """ Returns the list of clusters where the substring ``label`` is found in the clusters' label """ return [cl for cl in self if cl.label().find(label) != -1] - - def find_one( self, label ): + def find_one(self, label): """ Returns the first cluster where the substring ``label`` is found in the cluster's label """ diff --git a/core/ElementaryReaction.py b/core/ElementaryReaction.py index 84a6fdc..9bcf529 100644 --- a/core/ElementaryReaction.py +++ b/core/ElementaryReaction.py @@ -1,7 +1,8 @@ from collections import Counter from .SpeciesList import SpeciesList -__all__ = ['ElementaryReaction'] +__all__ = ["ElementaryReaction"] + class ElementaryReaction: """ @@ -21,279 +22,316 @@ class ElementaryReaction: * ``label`` -- If None, a unique label is generated based on the label of its initial and final states. """ - def __init__(self, - initial, - final, - site_types = None, - initial_entity_number = None, - final_entity_number = None, - neighboring = None, - reversible = True, - pre_expon = 0.0, - pe_ratio = 0.0, - activation_energy = 0.0, - prox_factor = None, - label = None): - - if( ( type(initial) != SpeciesList and type(initial) != list ) - or ( type(final) != SpeciesList and type(final) != list ) ): - msg = "\n### ERROR ### ElementaryReaction.__init__.\n" + def __init__( + self, + initial, + final, + site_types=None, + initial_entity_number=None, + final_entity_number=None, + neighboring=None, + reversible=True, + pre_expon=0.0, + pe_ratio=0.0, + activation_energy=0.0, + prox_factor=None, + label=None, + ): + + if (type(initial) != SpeciesList and type(initial) != list) or ( + type(final) != SpeciesList and type(final) != list + ): + msg = "\n### ERROR ### ElementaryReaction.__init__.\n" msg += " Inconsistent type for initial or final\n" raise NameError(msg) - sites_initial = len([ sp for sp in initial if sp.is_adsorbed() ]) - sites_final = len([ sp for sp in final if sp.is_adsorbed() ]) - if( sites_initial != sites_final ): - msg = "\n### ERROR ### ElementaryReaction.__init__.\n" + sites_initial = len([sp for sp in initial if sp.is_adsorbed()]) + sites_final = len([sp for sp in final if sp.is_adsorbed()]) + if sites_initial != sites_final: + msg = "\n### ERROR ### ElementaryReaction.__init__.\n" msg += "Inconsistent number of surface sites between initial and final\n" - msg += "sites_initial="+str([ sp.symbol for sp in initial if sp.is_adsorbed() ])+"\n" - msg += "sites_final="+str([ sp.symbol for sp in final if sp.is_adsorbed() ])+"\n" + msg += "sites_initial=" + str([sp.symbol for sp in initial if sp.is_adsorbed()]) + "\n" + msg += "sites_final=" + str([sp.symbol for sp in final if sp.is_adsorbed()]) + "\n" raise NameError(msg) self.sites = sites_initial - if( site_types is not None ): - if( not ( all([ type(st)==int for st in site_types ]) or all([ type(st)==str for st in site_types ]) ) ): - msg = "\n### ERROR ### ElementaryReaction.__init__.\n" + if site_types is not None: + if not (all([type(st) == int for st in site_types]) or all([type(st) == str for st in site_types])): + msg = "\n### ERROR ### ElementaryReaction.__init__.\n" msg += " Inconsistent type for site_types. It should be a list of int or str\n" raise NameError(msg) - self.site_types = site_types # e.g. [ "f", "f" ], [ 0, 0 ] + self.site_types = site_types # e.g. [ "f", "f" ], [ 0, 0 ] else: - self.site_types = self.sites*[ 0 ] + self.site_types = self.sites * [0] - self.neighboring = neighboring # e.g. [ (0,1) ] + self.neighboring = neighboring # e.g. [ (0,1) ] self.initial = initial - if( type(initial) == list ): self.initial = SpeciesList(initial) - self.__initial_gas = SpeciesList( [sp for sp in self.initial if sp.is_gas()] ) - self.__initial_adsorbed = SpeciesList( [sp for sp in self.initial if sp.is_adsorbed()] ) + if type(initial) == list: + self.initial = SpeciesList(initial) + self.__initial_gas = SpeciesList([sp for sp in self.initial if sp.is_gas()]) + self.__initial_adsorbed = SpeciesList([sp for sp in self.initial if sp.is_adsorbed()]) self.initial_entity_number = initial_entity_number - if( initial_entity_number is None ): - self.initial_entity_number = SpeciesList.default_entity_numbers( self.sites, self.__initial_adsorbed ) + if initial_entity_number is None: + self.initial_entity_number = SpeciesList.default_entity_numbers(self.sites, self.__initial_adsorbed) self.final = final - if( type(final) == list ): self.final = SpeciesList(final) - self.__final_gas = SpeciesList( [sp for sp in self.final if sp.is_gas()] ) - self.__final_adsorbed = SpeciesList( [sp for sp in self.final if sp.is_adsorbed()] ) + if type(final) == list: + self.final = SpeciesList(final) + self.__final_gas = SpeciesList([sp for sp in self.final if sp.is_gas()]) + self.__final_adsorbed = SpeciesList([sp for sp in self.final if sp.is_adsorbed()]) self.final_entity_number = final_entity_number - if( final_entity_number is None ): - self.final_entity_number = SpeciesList.default_entity_numbers( self.sites, self.__final_adsorbed ) + if final_entity_number is None: + self.final_entity_number = SpeciesList.default_entity_numbers(self.sites, self.__final_adsorbed) self.reversible = reversible self.pre_expon = pre_expon self.pe_ratio = pe_ratio - self.activation_energy = activation_energy # e.g. 0.200 + self.activation_energy = activation_energy # e.g. 0.200 self.prox_factor = prox_factor - #if( self.sites != sum([s.denticity for s in self.initial]) - #or self.sites != sum([s.denticity for s in self.final]) ): - #msg = "\n### ERROR ### ElementaryReaction.__init__.\n" - #msg += " Inconsistent dimensions for sites, initial or final\n" - #raise NameError(msg) + # if( self.sites != sum([s.denticity for s in self.initial]) + # or self.sites != sum([s.denticity for s in self.final]) ): + # msg = "\n### ERROR ### ElementaryReaction.__init__.\n" + # msg += " Inconsistent dimensions for sites, initial or final\n" + # raise NameError(msg) # If adsorbed species have 'mass==0.0', the user didn't use a chemical formula for its symbol, # or the job was reconstructed from a Zacros input file. So, it doesn't make any sense to # check mass conservation check_mass = True - if( abs( self.initial.mass( self.initial_entity_number ) ) < 1e-6 or abs(self.final.mass( self.final_entity_number )) < 1e-6 ): + if ( + abs(self.initial.mass(self.initial_entity_number)) < 1e-6 + or abs(self.final.mass(self.final_entity_number)) < 1e-6 + ): check_mass = False - if( abs( self.initial.mass( self.initial_entity_number ) - self.final.mass( self.final_entity_number ) ) > 1e-6 and check_mass ): - msg = "\n### ERROR ### ElementaryReaction.__init__.\n" + if ( + abs(self.initial.mass(self.initial_entity_number) - self.final.mass(self.final_entity_number)) > 1e-6 + and check_mass + ): + msg = "\n### ERROR ### ElementaryReaction.__init__.\n" msg += " The mass is not conserved during the reaction\n" - msg += " initial:mass("+str([sp.symbol for sp in self.initial])+")="+str(self.initial.mass(self.initial_entity_number)) - msg += ", final:mass("+str([sp.symbol for sp in self.final])+")="+str(self.final.mass(self.final_entity_number))+"\n" - msg += " initial:entity_number="+str(self.initial_entity_number) - msg += ", final:entity_number="+str(self.final_entity_number)+"\n" + msg += ( + " initial:mass(" + + str([sp.symbol for sp in self.initial]) + + ")=" + + str(self.initial.mass(self.initial_entity_number)) + ) + msg += ( + ", final:mass(" + + str([sp.symbol for sp in self.final]) + + ")=" + + str(self.final.mass(self.final_entity_number)) + + "\n" + ) + msg += " initial:entity_number=" + str(self.initial_entity_number) + msg += ", final:entity_number=" + str(self.final_entity_number) + "\n" raise NameError(msg) self.__userLabel = label self.__label = None self.__updateLabel() - def __eq__(self, other): """ Returns True if both objects have the same label. Otherwise returns False * ``other`` -- """ - if( self.__label == other.__label ): + if self.__label == other.__label: return True else: return False - def __hash__(self): """ Returns a hash based on the label """ return hash(self.__label) - @staticmethod - def __getSpeciesListFullName( species, entity_number, site_types ): + def __getSpeciesListFullName(species, entity_number, site_types): label = "" for i in range(len(species)): label += species[i].symbol - if( len(entity_number) > 1 ): - label += str(entity_number[i]+1) + if len(entity_number) > 1: + label += str(entity_number[i] + 1) label += str(site_types[i]) return label - def __updateLabel(self): """ Updates the attribute 'label' """ - if( self.__userLabel is not None ): + if self.__userLabel is not None: self.__label = self.__userLabel return - initialLabel = ElementaryReaction.__getSpeciesListFullName( self.__initial_adsorbed, self.initial_entity_number, self.site_types ) + initialLabel = ElementaryReaction.__getSpeciesListFullName( + self.__initial_adsorbed, self.initial_entity_number, self.site_types + ) - if( len(self.initial.gas_species()) > 0 ): + if len(self.initial.gas_species()) > 0: initialLabel += ":" + SpeciesList(self.initial.gas_species()).label() - finalLabel = ElementaryReaction.__getSpeciesListFullName( self.__final_adsorbed, self.final_entity_number, self.site_types ) + finalLabel = ElementaryReaction.__getSpeciesListFullName( + self.__final_adsorbed, self.final_entity_number, self.site_types + ) - if( len(self.final.gas_species()) > 0 ): + if len(self.final.gas_species()) > 0: finalLabel += ":" + SpeciesList(self.final.gas_species()).label() self.__label = "" - if( self.reversible ): + if self.reversible: # Reaction labels in lexicographical order - if( initialLabel > finalLabel ): - self.__label = initialLabel+"<->"+finalLabel + if initialLabel > finalLabel: + self.__label = initialLabel + "<->" + finalLabel else: - self.__label = finalLabel+"<->"+initialLabel + self.__label = finalLabel + "<->" + initialLabel else: - self.__label = initialLabel+"->"+finalLabel + self.__label = initialLabel + "->" + finalLabel # For neighboring nodes are sorted if self.neighboring is not None: for i in range(len(self.neighboring)): - if(i==0): self.__label += ";" + if i == 0: + self.__label += ";" lNeighboring = list(self.neighboring[i]) lNeighboring.sort() self.__label += str(tuple(lNeighboring)).replace(" ", "") - if( i != len(self.neighboring)-1): + if i != len(self.neighboring) - 1: self.__label += "," - def label(self): """ Returns the label of the cluster """ - if( self.__label is None ): + if self.__label is None: self.__updateLabel() return self.__label - def __str__(self): """ Translates the object to a string """ - if( self.reversible ): - output = "reversible_step " + self.__label +"\n" + if self.reversible: + output = "reversible_step " + self.__label + "\n" else: - output = "step " + self.__label +"\n" + output = "step " + self.__label + "\n" - initial_gas_species = SpeciesList( self.initial.gas_species() ) - final_gas_species = SpeciesList( self.final.gas_species() ) + initial_gas_species = SpeciesList(self.initial.gas_species()) + final_gas_species = SpeciesList(self.final.gas_species()) - if( len(initial_gas_species) != 0 or len(final_gas_species) != 0 ): + if len(initial_gas_species) != 0 or len(final_gas_species) != 0: output += " gas_reacs_prods " - gas_species_freqs = Counter( [ s.symbol for s in initial_gas_species ] ) + gas_species_freqs = Counter([s.symbol for s in initial_gas_species]) - i=0 - for symbol,freq in gas_species_freqs.items(): - output += symbol+" "+str(-freq) - if( i != len(gas_species_freqs)-1 ): + i = 0 + for symbol, freq in gas_species_freqs.items(): + output += symbol + " " + str(-freq) + if i != len(gas_species_freqs) - 1: output += " " i += 1 - gas_species_freqs = Counter( [ s.symbol for s in final_gas_species ] ) + gas_species_freqs = Counter([s.symbol for s in final_gas_species]) - i=0 - for symbol,freq in gas_species_freqs.items(): - output += symbol+" "+str(freq) - if( i != len(gas_species_freqs)-1 ): + i = 0 + for symbol, freq in gas_species_freqs.items(): + output += symbol + " " + str(freq) + if i != len(gas_species_freqs) - 1: output += " " i += 1 output += "\n" - if(self.sites != 0): - output += " sites " + str(self.sites)+"\n" + if self.sites != 0: + output += " sites " + str(self.sites) + "\n" if self.neighboring is not None and len(self.neighboring) > 0: output += " neighboring " for i in range(len(self.neighboring)): - output += str(self.neighboring[i][0]+1) + "-" + str(self.neighboring[i][1]+1) - if(i != len(self.neighboring)-1): + output += str(self.neighboring[i][0] + 1) + "-" + str(self.neighboring[i][1] + 1) + if i != len(self.neighboring) - 1: output += " " output += "\n" - output += " initial"+"\n" + output += " initial" + "\n" site_identate = {} for i in range(self.sites): - if( self.initial_entity_number[i] not in site_identate ): - site_identate[ self.initial_entity_number[i] ] = 0 + if self.initial_entity_number[i] not in site_identate: + site_identate[self.initial_entity_number[i]] = 0 else: - site_identate[ self.initial_entity_number[i] ] = site_identate[ self.initial_entity_number[i] ] + 1 + site_identate[self.initial_entity_number[i]] = site_identate[self.initial_entity_number[i]] + 1 - if( site_identate[ self.initial_entity_number[i] ] >= self.initial[i].denticity ): - msg = "\n### ERROR ### ElementaryReaction.__str__.\n" - msg += "Inconsistent of denticity value for "+self.initial[i].symbol+" (pos="+str(i)+")\n" - msg += "self.initial_entity_number = "+str(self.initial_entity_number)+"\n" + if site_identate[self.initial_entity_number[i]] >= self.initial[i].denticity: + msg = "\n### ERROR ### ElementaryReaction.__str__.\n" + msg += "Inconsistent of denticity value for " + self.initial[i].symbol + " (pos=" + str(i) + ")\n" + msg += "self.initial_entity_number = " + str(self.initial_entity_number) + "\n" raise NameError(msg) - output += " "+str(self.initial_entity_number[i]+1)+" "+self.initial[i].symbol+" "+str(site_identate[self.initial_entity_number[i]]+1)+"\n" - - output += " final"+"\n" + output += ( + " " + + str(self.initial_entity_number[i] + 1) + + " " + + self.initial[i].symbol + + " " + + str(site_identate[self.initial_entity_number[i]] + 1) + + "\n" + ) + + output += " final" + "\n" site_identate = {} for i in range(self.sites): - if( self.final_entity_number[i] not in site_identate ): - site_identate[ self.final_entity_number[i] ] = 0 + if self.final_entity_number[i] not in site_identate: + site_identate[self.final_entity_number[i]] = 0 else: - site_identate[ self.final_entity_number[i] ] = site_identate[ self.final_entity_number[i] ] + 1 + site_identate[self.final_entity_number[i]] = site_identate[self.final_entity_number[i]] + 1 - if( site_identate[ self.final_entity_number[i] ] >= self.final[i].denticity ): - msg = "\n### ERROR ### ElementaryReaction.__str__.\n" - msg += "Inconsistent of denticity value for "+self.final[i].symbol+" (pos="+str(i)+")\n" - msg += "self.final_entity_number = "+str(self.final_entity_number)+"\n" + if site_identate[self.final_entity_number[i]] >= self.final[i].denticity: + msg = "\n### ERROR ### ElementaryReaction.__str__.\n" + msg += "Inconsistent of denticity value for " + self.final[i].symbol + " (pos=" + str(i) + ")\n" + msg += "self.final_entity_number = " + str(self.final_entity_number) + "\n" raise NameError(msg) - output += " "+str(self.final_entity_number[i]+1)+" "+self.final[i].symbol+" "+str(site_identate[self.final_entity_number[i]]+1)+"\n" + output += ( + " " + + str(self.final_entity_number[i] + 1) + + " " + + self.final[i].symbol + + " " + + str(site_identate[self.final_entity_number[i]] + 1) + + "\n" + ) output += " site_types " for i in range(len(self.site_types)): - if( type(self.site_types[i]) == int ): - output += str(self.site_types[i]+1) - elif( type(self.site_types[i]) == str ): + if type(self.site_types[i]) == int: + output += str(self.site_types[i] + 1) + elif type(self.site_types[i]) == str: output += self.site_types[i] - if(i != len(self.site_types)-1): + if i != len(self.site_types) - 1: output += " " output += "\n" - output += " pre_expon "+("%12.5e"%self.pre_expon)+"\n" + output += " pre_expon " + ("%12.5e" % self.pre_expon) + "\n" if self.reversible: - output += " pe_ratio "+("%12.5e"%self.pe_ratio)+"\n" + output += " pe_ratio " + ("%12.5e" % self.pe_ratio) + "\n" - output += " activ_eng "+("%12.5e"%self.activation_energy)+"\n" + output += " activ_eng " + ("%12.5e" % self.activation_energy) + "\n" if self.prox_factor is not None: - output += " prox_factor "+("%12.5e"%self.prox_factor)+"\n" + output += " prox_factor " + ("%12.5e" % self.prox_factor) + "\n" if self.reversible: output += "end_reversible_step" @@ -302,26 +340,24 @@ def __str__(self): return output - def site_types_set(self): """ Returns the set of the sites types """ return set(self.site_types) - - def replace_site_types( self, site_types_old, site_types_new ): + def replace_site_types(self, site_types_old, site_types_new): """ Replaces the site types names * ``site_types_old`` -- List of strings containing the old site_types to be replaced * ``site_types_new`` -- List of strings containing the new site_types which would replace old site_types_old. """ - assert( len(site_types_old) == len(site_types_new) ) + assert len(site_types_old) == len(site_types_new) for i in range(len(site_types_old)): for j in range(len(self.site_types)): - if( self.site_types[j] == site_types_old[i] ): + if self.site_types[j] == site_types_old[i]: self.site_types[j] = site_types_new[i] self.__updateLabel() diff --git a/core/Lattice.py b/core/Lattice.py index 32ad3a6..40151c4 100644 --- a/core/Lattice.py +++ b/core/Lattice.py @@ -3,7 +3,8 @@ import os import math -__all__ = ['Lattice'] +__all__ = ["Lattice"] + class Lattice: """ @@ -145,13 +146,13 @@ class Lattice: HEXAGONAL = 6 # Neighboring_structure - SELF = (0,0) - NORTH = (0,1) - NORTHEAST = (1,1) - EAST = (1,0) - SOUTHEAST = (1,-1) + SELF = (0, 0) + NORTH = (0, 1) + NORTHEAST = (1, 1) + EAST = (1, 0) + SOUTHEAST = (1, -1) - __NeighboringToStr = { SELF:"self", NORTH:"north", NORTHEAST:"northeast", EAST:"east", SOUTHEAST:"southeast" } + __NeighboringToStr = {SELF: "self", NORTH: "north", NORTHEAST: "northeast", EAST: "east", SOUTHEAST: "southeast"} def __init__(self, **kwargs): self.cell_vectors = None @@ -162,22 +163,22 @@ def __init__(self, **kwargs): self.__origin = None # Default Lattices - if( "lattice_type" in kwargs and - "lattice_constant" in kwargs and - "repeat_cell" in kwargs ): + if "lattice_type" in kwargs and "lattice_constant" in kwargs and "repeat_cell" in kwargs: self.__origin = Lattice.__FROM_DEFAULT self.__lattice_type_default = None self.__lattice_constant_default = None self.__repeat_cell_default = None - self.__fromDefaultLattices( kwargs["lattice_type"], kwargs["lattice_constant"], kwargs["repeat_cell"] ) + self.__fromDefaultLattices(kwargs["lattice_type"], kwargs["lattice_constant"], kwargs["repeat_cell"]) # Unit-Cell-Defined Periodic Lattices - elif( "cell_vectors" in kwargs and - "repeat_cell" in kwargs and - "site_types" in kwargs and - "site_coordinates" in kwargs and - "neighboring_structure" in kwargs ): + elif ( + "cell_vectors" in kwargs + and "repeat_cell" in kwargs + and "site_types" in kwargs + and "site_coordinates" in kwargs + and "neighboring_structure" in kwargs + ): self.__origin = Lattice.__FROM_UNIT_CELL self.__cell_vectors_unit_cell = None self.__repeat_cell_unit_cell = None @@ -185,32 +186,40 @@ def __init__(self, **kwargs): self.__site_coordinates_unit_cell = None self.__neighboring_structure_unit_cell = None - self.__fromUnitCellDefined( kwargs["cell_vectors"], kwargs["repeat_cell"], kwargs["site_types"], - kwargs["site_coordinates"], kwargs["neighboring_structure"] ) + self.__fromUnitCellDefined( + kwargs["cell_vectors"], + kwargs["repeat_cell"], + kwargs["site_types"], + kwargs["site_coordinates"], + kwargs["neighboring_structure"], + ) # Explicitly Defined Custom Lattices - elif( "site_types" in kwargs and - "site_coordinates" in kwargs and - "nearest_neighbors" in kwargs ): + elif "site_types" in kwargs and "site_coordinates" in kwargs and "nearest_neighbors" in kwargs: self.__origin = Lattice.__FROM_EXPLICIT - self.__fromExplicitlyDefined( kwargs["site_types"], kwargs["site_coordinates"], - kwargs["nearest_neighbors"], cell_vectors=kwargs.get("cell_vectors") ) + self.__fromExplicitlyDefined( + kwargs["site_types"], + kwargs["site_coordinates"], + kwargs["nearest_neighbors"], + cell_vectors=kwargs.get("cell_vectors"), + ) # From a zacros file lattice_input.dat - elif( "fileName" in kwargs ): - self.__fromZacrosFile( kwargs["fileName"] ) + elif "fileName" in kwargs: + self.__fromZacrosFile(kwargs["fileName"]) else: - msg = "\nError: The constructor for Lattice with the parameters:"+str(kwargs)+" is not implemented!\n" + msg = "\nError: The constructor for Lattice with the parameters:" + str(kwargs) + " is not implemented!\n" msg += " Available options:\n" msg += " - Lattice( lattice_type, lattice_constant, repeat_cell )\n" - msg += " - Lattice( cell_vectors, repeat_cell, site_types, site_coordinates, neighboring_structure )\n" + msg += ( + " - Lattice( cell_vectors, repeat_cell, site_types, site_coordinates, neighboring_structure )\n" + ) msg += " - Lattice( site_types, site_coordinates, nearest_neighbors, cell_vectors=None )\n" msg += " - Lattice( fileName )\n" raise Exception(msg) - def __fromDefaultLattices(self, lattice_type, lattice_constant, repeat_cell): """ Creates a default Lattice @@ -219,50 +228,52 @@ def __fromDefaultLattices(self, lattice_type, lattice_constant, repeat_cell): self.__lattice_constant_default = lattice_constant self.__repeat_cell_default = repeat_cell - if( self.__lattice_type_default == Lattice.TRIANGULAR ): + if self.__lattice_type_default == Lattice.TRIANGULAR: - cell_vectors = [[lattice_constant*math.sqrt(3.0), 0.0],[0.0,3.0*lattice_constant]] + cell_vectors = [[lattice_constant * math.sqrt(3.0), 0.0], [0.0, 3.0 * lattice_constant]] site_types = ["StTp1", "StTp1", "StTp1", "StTp1"] - site_coordinates = [[0.0, 0.0],[1.0/2.0, 1.0/6.0],[1.0/2.0, 1.0/2.0],[0.0, 2.0/3.0]] - neighboring_structure=[ [(0,1), Lattice.SELF], - [(1,2), Lattice.SELF], - [(2,3), Lattice.SELF], - [(1,0), Lattice.EAST], - [(2,3), Lattice.EAST], - [(3,0), Lattice.NORTH] ] + site_coordinates = [[0.0, 0.0], [1.0 / 2.0, 1.0 / 6.0], [1.0 / 2.0, 1.0 / 2.0], [0.0, 2.0 / 3.0]] + neighboring_structure = [ + [(0, 1), Lattice.SELF], + [(1, 2), Lattice.SELF], + [(2, 3), Lattice.SELF], + [(1, 0), Lattice.EAST], + [(2, 3), Lattice.EAST], + [(3, 0), Lattice.NORTH], + ] self.__fromUnitCellDefined(cell_vectors, repeat_cell, site_types, site_coordinates, neighboring_structure) - elif( self.__lattice_type_default == Lattice.RECTANGULAR ): + elif self.__lattice_type_default == Lattice.RECTANGULAR: - cell_vectors = [[lattice_constant, 0.0],[0.0,lattice_constant]] + cell_vectors = [[lattice_constant, 0.0], [0.0, lattice_constant]] site_types = ["StTp1"] site_coordinates = [[0.0, 0.0]] - neighboring_structure=[ [(0,0), Lattice.NORTH], - [(0,0), Lattice.EAST] ] + neighboring_structure = [[(0, 0), Lattice.NORTH], [(0, 0), Lattice.EAST]] self.__fromUnitCellDefined(cell_vectors, repeat_cell, site_types, site_coordinates, neighboring_structure) - elif( self.__lattice_type_default == Lattice.HEXAGONAL ): + elif self.__lattice_type_default == Lattice.HEXAGONAL: - cell_vectors = [[lattice_constant*math.sqrt(3.0), 0.0],[0.0,lattice_constant]] + cell_vectors = [[lattice_constant * math.sqrt(3.0), 0.0], [0.0, lattice_constant]] site_types = ["StTp1", "StTp1"] - site_coordinates = [[0.0, 0.0],[0.5, 0.5]] - neighboring_structure=[ [(0,0), Lattice.NORTH], - [(0,1), Lattice.SELF], - [(1,0), Lattice.NORTH], - [(1,1), Lattice.NORTH], - [(1,0), Lattice.NORTHEAST], - [(1,0), Lattice.EAST] ] + site_coordinates = [[0.0, 0.0], [0.5, 0.5]] + neighboring_structure = [ + [(0, 0), Lattice.NORTH], + [(0, 1), Lattice.SELF], + [(1, 0), Lattice.NORTH], + [(1, 1), Lattice.NORTH], + [(1, 0), Lattice.NORTHEAST], + [(1, 0), Lattice.EAST], + ] self.__fromUnitCellDefined(cell_vectors, repeat_cell, site_types, site_coordinates, neighboring_structure) - def __fromUnitCellDefined(self, cell_vectors, repeat_cell, site_types, site_coordinates, neighboring_structure): """ Creates a Unit-Cell-Defined periodic Lattice """ - assert( len(site_types) == len(site_coordinates) ) + assert len(site_types) == len(site_coordinates) self.__cell_vectors_unit_cell = cell_vectors self.__repeat_cell_unit_cell = repeat_cell @@ -271,18 +282,18 @@ def __fromUnitCellDefined(self, cell_vectors, repeat_cell, site_types, site_coor self.__neighboring_structure_unit_cell = neighboring_structure ncellsites = len(site_types) - ncells = repeat_cell[0]*repeat_cell[1] - nsites = ncells*ncellsites + ncells = repeat_cell[0] * repeat_cell[1] + nsites = ncells * ncellsites - self.cell_vectors = [ [repeat_cell[0]*a,repeat_cell[1]*b] for a,b in cell_vectors ] - self.site_coordinates = nsites*[None] - self.site_types = nsites*[None] - self.nearest_neighbors = nsites*[None] + self.cell_vectors = [[repeat_cell[0] * a, repeat_cell[1] * b] for a, b in cell_vectors] + self.site_coordinates = nsites * [None] + self.site_types = nsites * [None] + self.nearest_neighbors = nsites * [None] def getcellnumber(i, j): - if( i < 0 or j < 0 or i >= repeat_cell[0] or j >= repeat_cell[1] ): + if i < 0 or j < 0 or i >= repeat_cell[0] or j >= repeat_cell[1]: return None - return i*repeat_cell[1] + j + return i * repeat_cell[1] + j v1 = cell_vectors[0] v2 = cell_vectors[1] @@ -290,45 +301,43 @@ def getcellnumber(i, j): for i in range(repeat_cell[0]): for j in range(repeat_cell[1]): - id_cell = i*repeat_cell[1] + j # cell counter + id_cell = i * repeat_cell[1] + j # cell counter - xcellpos = i*v1[0] + j*v2[0] # cell position x - ycellpos = i*v1[1] + j*v2[1] # cell position y + xcellpos = i * v1[0] + j * v2[0] # cell position x + ycellpos = i * v1[1] + j * v2[1] # cell position y for k in range(ncellsites): - id_site = ncellsites*id_cell + k + id_site = ncellsites * id_cell + k # x-y coordinates of the site - xsite = site_coordinates[k][0]*v1[0] + site_coordinates[k][1]*v2[0] + xcellpos - ysite = site_coordinates[k][0]*v1[1] + site_coordinates[k][1]*v2[1] + ycellpos + xsite = site_coordinates[k][0] * v1[0] + site_coordinates[k][1] * v2[0] + xcellpos + ysite = site_coordinates[k][0] * v1[1] + site_coordinates[k][1] * v2[1] + ycellpos - self.site_coordinates[id_site] = [xsite,ysite] + self.site_coordinates[id_site] = [xsite, ysite] self.site_types[id_site] = site_types[k] # neighboring structure - for (id_1,id_2),lDisp in neighboring_structure: # ldisp=latteral displacements + for (id_1, id_2), lDisp in neighboring_structure: # ldisp=latteral displacements - if( id_1 == k ): - id_cell_2 = getcellnumber(i+lDisp[0],j+lDisp[1]) - if( id_cell_2 is not None ): - id_2_shifted = ncellsites*id_cell_2 + id_2 + if id_1 == k: + id_cell_2 = getcellnumber(i + lDisp[0], j + lDisp[1]) + if id_cell_2 is not None: + id_2_shifted = ncellsites * id_cell_2 + id_2 - if( self.nearest_neighbors[id_site] is None ): + if self.nearest_neighbors[id_site] is None: self.nearest_neighbors[id_site] = set() - self.nearest_neighbors[id_site].add( id_2_shifted ) - if( id_2 == k ): - id_cell_1 = getcellnumber(i-lDisp[0],j-lDisp[1]) - if( id_cell_1 is not None ): - id_1_shifted = ncellsites*id_cell_1 + id_1 + self.nearest_neighbors[id_site].add(id_2_shifted) + if id_2 == k: + id_cell_1 = getcellnumber(i - lDisp[0], j - lDisp[1]) + if id_cell_1 is not None: + id_1_shifted = ncellsites * id_cell_1 + id_1 - if ( self.nearest_neighbors[id_site] is None ): + if self.nearest_neighbors[id_site] is None: self.nearest_neighbors[id_site] = set() - self.nearest_neighbors[id_site].add( id_1_shifted ) - - + self.nearest_neighbors[id_site].add(id_1_shifted) def __fromExplicitlyDefined(self, site_types, site_coordinates, nearest_neighbors, cell_vectors=None): """ @@ -339,153 +348,171 @@ def __fromExplicitlyDefined(self, site_types, site_coordinates, nearest_neighbor self.nearest_neighbors = nearest_neighbors self.cell_vectors = cell_vectors - def __fromZacrosFile(self, fileName): """ Creates a Lattice from a Zacros input file lattice_input.dat """ - if not os.path.isfile( fileName ): - raise Exception( "Trying to load a file that doen't exist: "+fileName ) + if not os.path.isfile(fileName): + raise Exception("Trying to load a file that doen't exist: " + fileName) - with open( fileName, "r" ) as inp: + with open(fileName, "r") as inp: file_content = inp.readlines() - file_content = [line.split("#")[0] for line in file_content if line.split("#")[0].strip()] # Removes empty lines and comments + file_content = [ + line.split("#")[0] for line in file_content if line.split("#")[0].strip() + ] # Removes empty lines and comments nline = 0 - while( nline < len(file_content) ): + while nline < len(file_content): tokens = file_content[nline].split() - if( tokens[0].lower() == "lattice" and tokens[1].lower() == "default_choice" ): + if tokens[0].lower() == "lattice" and tokens[1].lower() == "default_choice": nline += 1 tokens = file_content[nline].split() - if( len(tokens) < 4 ): - raise Exception( "Format Error in line "+str(nline)+" of file "+ZacrosJob._filenames['lattice'] ) + if len(tokens) < 4: + raise Exception( + "Format Error in line " + str(nline) + " of file " + ZacrosJob._filenames["lattice"] + ) cases = { - "triangular_periodic" : Lattice.TRIANGULAR, - "rectangular_periodic" : Lattice.RECTANGULAR, - "hexagonal_periodic" : Lattice.HEXAGONAL + "triangular_periodic": Lattice.TRIANGULAR, + "rectangular_periodic": Lattice.RECTANGULAR, + "hexagonal_periodic": Lattice.HEXAGONAL, } - lattice_type = cases.get( tokens[0].lower(), None ) + lattice_type = cases.get(tokens[0].lower(), None) - if( lattice_type is None ): - raise Exception( "Error: Keyword "+tokens[0]+" in file "+ZacrosJob._filenames['lattice']+" is not supported!" ) + if lattice_type is None: + raise Exception( + "Error: Keyword " + + tokens[0] + + " in file " + + ZacrosJob._filenames["lattice"] + + " is not supported!" + ) lattice_constant = float(tokens[1]) - repeat_cell = ( int(tokens[2]), int(tokens[3]) ) + repeat_cell = (int(tokens[2]), int(tokens[3])) - parameters = { "lattice_type":lattice_type, - "lattice_constant":lattice_constant, - "repeat_cell":repeat_cell } + parameters = { + "lattice_type": lattice_type, + "lattice_constant": lattice_constant, + "repeat_cell": repeat_cell, + } - self.__init__( **parameters ) + self.__init__(**parameters) - if( tokens[0] == "lattice" and tokens[1] == "periodic_cell" ): + if tokens[0] == "lattice" and tokens[1] == "periodic_cell": nline += 1 parameters = {} - while( nline < len(file_content) ): + while nline < len(file_content): tokens = file_content[nline].split() cases = { - "repeat_cell" : lambda sv: parameters.setdefault("repeat_cell", (int(sv[0]),int(sv[1])) ), - "n_site_types" : lambda sv: parameters.setdefault("n_site_types", int(sv[0])), - "site_type_names" : lambda sv: parameters.setdefault("site_type_names", sv), - "n_cell_sites" : lambda sv: parameters.setdefault("n_cell_sites", int(sv[0])), - "site_types" : lambda sv: parameters.setdefault("site_types", sv), + "repeat_cell": lambda sv: parameters.setdefault("repeat_cell", (int(sv[0]), int(sv[1]))), + "n_site_types": lambda sv: parameters.setdefault("n_site_types", int(sv[0])), + "site_type_names": lambda sv: parameters.setdefault("site_type_names", sv), + "n_cell_sites": lambda sv: parameters.setdefault("n_cell_sites", int(sv[0])), + "site_types": lambda sv: parameters.setdefault("site_types", sv), } - cases.get( tokens[0], lambda sv: None )( tokens[1:] ) + cases.get(tokens[0], lambda sv: None)(tokens[1:]) - if( tokens[0] == "cell_vectors" ): - parameters["cell_vectors"] = 2*[None] - for n in [0,1]: + if tokens[0] == "cell_vectors": + parameters["cell_vectors"] = 2 * [None] + for n in [0, 1]: nline += 1 tokens = file_content[nline].split() - parameters["cell_vectors"][n] = [ float(tokens[i]) for i in [0,1] ] + parameters["cell_vectors"][n] = [float(tokens[i]) for i in [0, 1]] - elif( tokens[0] == "site_coordinates" ): + elif tokens[0] == "site_coordinates": # WARNING. Here, I'm assuming that n_cell_sites is defined before site_coordinates - parameters["site_coordinates"] = parameters["n_cell_sites"]*[None] + parameters["site_coordinates"] = parameters["n_cell_sites"] * [None] for n in range(parameters["n_cell_sites"]): nline += 1 tokens = file_content[nline].split() - parameters["site_coordinates"][n] = [ float(tokens[i]) for i in [0,1] ] + parameters["site_coordinates"][n] = [float(tokens[i]) for i in [0, 1]] - elif( tokens[0] == "neighboring_structure" ): + elif tokens[0] == "neighboring_structure": parameters["neighboring_structure"] = [] - while( nline < len(file_content) ): + while nline < len(file_content): nline += 1 tokens = file_content[nline].split() - if( tokens[0] == "end_neighboring_structure" ): + if tokens[0] == "end_neighboring_structure": break cases = { - "self" : Lattice.SELF, - "north" : Lattice.NORTH, - "northeast" : Lattice.NORTHEAST, - "east" : Lattice.EAST, - "southeast" : Lattice.SOUTHEAST + "self": Lattice.SELF, + "north": Lattice.NORTH, + "northeast": Lattice.NORTHEAST, + "east": Lattice.EAST, + "southeast": Lattice.SOUTHEAST, } - value = cases.get( tokens[1] ) + value = cases.get(tokens[1]) - if( value is None ): - raise Exception( "Error: Keyword "+tokens[1]+" in file "+ZacrosJob._filenames['lattice']+" is not supported!" ) + if value is None: + raise Exception( + "Error: Keyword " + + tokens[1] + + " in file " + + ZacrosJob._filenames["lattice"] + + " is not supported!" + ) - parameters["neighboring_structure"].append( [ tuple( int(a)-1 for a in tokens[0].split("-") ), value ] ) + parameters["neighboring_structure"].append( + [tuple(int(a) - 1 for a in tokens[0].split("-")), value] + ) nline += 1 - self.__init__( **parameters ) + self.__init__(**parameters) - if( tokens[0] == "lattice" and tokens[1] == "explicit" ): + if tokens[0] == "lattice" and tokens[1] == "explicit": nline += 1 parameters = {} - while( nline < len(file_content) ): + while nline < len(file_content): tokens = file_content[nline].split() cases = { - "n_sites" : lambda sv: parameters.setdefault("n_sites", int(sv[0])), - "max_coord" : lambda sv: parameters.setdefault("max_coord", int(sv[0])), - "n_site_types" : lambda sv: parameters.setdefault("n_site_types", int(sv[0])), - "site_type_names" : lambda sv: parameters.setdefault("site_type_names", sv) + "n_sites": lambda sv: parameters.setdefault("n_sites", int(sv[0])), + "max_coord": lambda sv: parameters.setdefault("max_coord", int(sv[0])), + "n_site_types": lambda sv: parameters.setdefault("n_site_types", int(sv[0])), + "site_type_names": lambda sv: parameters.setdefault("site_type_names", sv), } - cases.get( tokens[0], lambda sv: None )( tokens[1:] ) + cases.get(tokens[0], lambda sv: None)(tokens[1:]) - if( tokens[0] == "lattice_structure" ): + if tokens[0] == "lattice_structure": parameters["site_types"] = [] parameters["site_coordinates"] = [] parameters["nearest_neighbors"] = [] - while( nline < len(file_content) ): + while nline < len(file_content): nline += 1 tokens = file_content[nline].split() - if( tokens[0] == "end_lattice_structure" ): + if tokens[0] == "end_lattice_structure": break - if( len(tokens) < 5 ): - raise Exception( "Error: Format inconsistent in section lattice_structure!" ) + if len(tokens) < 5: + raise Exception("Error: Format inconsistent in section lattice_structure!") - parameters["site_coordinates"].append( [ float(tokens[1]), float(tokens[2]) ] ) - parameters["site_types"].append( tokens[3] ) - parameters["nearest_neighbors"].append( [ int(tokens[i])-1 for i in range(5,len(tokens)) ] ) + parameters["site_coordinates"].append([float(tokens[1]), float(tokens[2])]) + parameters["site_types"].append(tokens[3]) + parameters["nearest_neighbors"].append([int(tokens[i]) - 1 for i in range(5, len(tokens))]) nline += 1 - self.__init__( **parameters ) + self.__init__(**parameters) nline += 1 - - def add_site_type( self, site_type, coordinates, precision=0.01 ): + def add_site_type(self, site_type, coordinates, precision=0.01): """ Adds a new site only if this is not already included in the lattice. It returns the id of the site @@ -496,39 +523,37 @@ def add_site_type( self, site_type, coordinates, precision=0.01 ): or not contained on the list of sites. Default: 0.01 """ locId = None - for i,(s,(x,y)) in enumerate(zip(self.site_types,self.site_coordinates)): + for i, (s, (x, y)) in enumerate(zip(self.site_types, self.site_coordinates)): - if( math.sqrt( (x-coordinates[0])**2 + (y-coordinates[1])**2 ) < precision ): + if math.sqrt((x - coordinates[0]) ** 2 + (y - coordinates[1]) ** 2) < precision: locId = i - if( s != site_type ): - msg = "### Error ### RKFLoader.add_site_type(). Trying to add a site that already exists with a different label\n" - msg += " (s_old,s_new) = ("+str(s)+","+str(site_type)+")\n" - msg += " coords_old = "+str([x,y])+"\n" - msg += " coords_new = "+str(coordinates)+"\n" - raise Exception( msg ) + if s != site_type: + msg = "### Error ### RKFLoader.add_site_type(). Trying to add a site that already exists with a different label\n" + msg += " (s_old,s_new) = (" + str(s) + "," + str(site_type) + ")\n" + msg += " coords_old = " + str([x, y]) + "\n" + msg += " coords_new = " + str(coordinates) + "\n" + raise Exception(msg) if locId is None: - self.site_types.append( site_type ) - self.site_coordinates.append( coordinates ) + self.site_types.append(site_type) + self.site_coordinates.append(coordinates) self.__origin = Lattice.__FROM_EXPLICIT - locId = len(self.site_types)-1 + locId = len(self.site_types) - 1 return locId - - def add_nearest_neighbor( self, id_site, id_neighbor ): + def add_nearest_neighbor(self, id_site, id_neighbor): """ Adds a new nearest-neighbor item to the lattice, e.g. (1,3) * ``id_site`` -- Site id, e.g. 1 * ``id_neighbor`` -- id of the new site neighbor, e.g. 3 """ - self.nearest_neighbors[ id_site ].append( id_neighbor ) + self.nearest_neighbors[id_site].append(id_neighbor) self.__origin = Lattice.__FROM_EXPLICIT - - def extend( self, other, precision=0.1, cell_vectors_precision=0.01 ): + def extend(self, other, precision=0.1, cell_vectors_precision=0.01): """ Extends the sites and corresponding neighboring information by appending the equivalent items from another lattice. @@ -539,35 +564,37 @@ def extend( self, other, precision=0.1, cell_vectors_precision=0.01 ): """ for i in range(len(self.cell_vectors)): for j in range(len(self.cell_vectors[0])): - if( self.cell_vectors[i][j] - other.cell_vectors[i][j] > cell_vectors_precision ): + if self.cell_vectors[i][j] - other.cell_vectors[i][j] > cell_vectors_precision: raise Exception("### Error ### RKFLoader.extend(). Lattices not compatible") - #-------------------------------------------- + # -------------------------------------------- # Merging the general attributes - #-------------------------------------------- + # -------------------------------------------- mapping = {} - for old_id,(site_type,coordinates,neighbors) in enumerate(zip(other.site_types,other.site_coordinates,other.nearest_neighbors)): - new_id = self.add_site_type( site_type, coordinates, precision ) + for old_id, (site_type, coordinates, neighbors) in enumerate( + zip(other.site_types, other.site_coordinates, other.nearest_neighbors) + ): + new_id = self.add_site_type(site_type, coordinates, precision) mapping[old_id] = new_id - if( new_id > len(self.nearest_neighbors)-1 ): - self.nearest_neighbors.append( set() ) + if new_id > len(self.nearest_neighbors) - 1: + self.nearest_neighbors.append(set()) - for old_id,nearest_neighbors in enumerate(other.nearest_neighbors): - if nearest_neighbors is None: continue + for old_id, nearest_neighbors in enumerate(other.nearest_neighbors): + if nearest_neighbors is None: + continue for id in nearest_neighbors: - self.nearest_neighbors[mapping[old_id]].add( mapping[id] ) + self.nearest_neighbors[mapping[old_id]].add(mapping[id]) - #self.__origin = other.__origin - #self.__cell_vectors_unit_cell = other.__cell_vectors_unit_cell - #self.__repeat_cell_unit_cell = other.__repeat_cell_unit_cell - #self.__site_types_unit_cell.extend( other.__site_types_unit_cell ) - #self.__site_coordinates_unit_cell.extend( other.__site_coordinates_unit_cell ) - #self.__neighboring_structure_unit_cell.extend( other.__neighboring_structure_unit_cell ) + # self.__origin = other.__origin + # self.__cell_vectors_unit_cell = other.__cell_vectors_unit_cell + # self.__repeat_cell_unit_cell = other.__repeat_cell_unit_cell + # self.__site_types_unit_cell.extend( other.__site_types_unit_cell ) + # self.__site_coordinates_unit_cell.extend( other.__site_coordinates_unit_cell ) + # self.__neighboring_structure_unit_cell.extend( other.__neighboring_structure_unit_cell ) self.__origin = Lattice.__FROM_EXPLICIT - #self.__origin = Lattice.__FROM_UNIT_CELL - + # self.__origin = Lattice.__FROM_UNIT_CELL def plot(self, pause=-1, show=True, color=None, ax=None, close=False, show_sites_ids=False, file_name=None): """ @@ -588,182 +615,198 @@ def plot(self, pause=-1, show=True, color=None, ax=None, close=False, show_sites except ImportError as e: return # module doesn't exist, deal with it. - if( ax is None ): - fig,ax = plt.subplots() + if ax is None: + fig, ax = plt.subplots() - if( self.cell_vectors is not None ): + if self.cell_vectors is not None: v1 = self.cell_vectors[0] v2 = self.cell_vectors[1] - xvalues = [0.0,v1[0], v1[0]+v2[0], v2[0], 0.0] - yvalues = [0.0,v1[1], v1[1]+v2[1], v2[1], 0.0] - - lcolor = color if color is not None else 'k' - ax.plot(xvalues, yvalues, color=lcolor, linestyle='dashed', linewidth=1, zorder=1) - - x_len = abs(v2[0]-v1[0]) - ax.set_xlim( [ 0.0-0.1*x_len, v1[0]+v2[0]+0.1*x_len ] ) - y_len = abs(v2[1]-v1[1]) - ax.set_ylim( [ 0.0-0.1*y_len, v1[1]+v2[1]+0.1*y_len ] ) - - if( x_len > y_len ): - ax.set_aspect(1.8*y_len/x_len) - elif( y_len > x_len ): - ax.set_aspect(1.8*x_len/y_len) - elif( x_len == y_len ): + xvalues = [0.0, v1[0], v1[0] + v2[0], v2[0], 0.0] + yvalues = [0.0, v1[1], v1[1] + v2[1], v2[1], 0.0] + + lcolor = color if color is not None else "k" + ax.plot(xvalues, yvalues, color=lcolor, linestyle="dashed", linewidth=1, zorder=1) + + x_len = abs(v2[0] - v1[0]) + ax.set_xlim([0.0 - 0.1 * x_len, v1[0] + v2[0] + 0.1 * x_len]) + y_len = abs(v2[1] - v1[1]) + ax.set_ylim([0.0 - 0.1 * y_len, v1[1] + v2[1] + 0.1 * y_len]) + + if x_len > y_len: + ax.set_aspect(1.8 * y_len / x_len) + elif y_len > x_len: + ax.set_aspect(1.8 * x_len / y_len) + elif x_len == y_len: ax.set_aspect(1.0) v1 = self.__cell_vectors_unit_cell[0] v2 = self.__cell_vectors_unit_cell[1] - xvalues = [0.0,v1[0], v1[0]+v2[0], v2[0], 0.0] - yvalues = [0.0,v1[1], v1[1]+v2[1], v2[1], 0.0] + xvalues = [0.0, v1[0], v1[0] + v2[0], v2[0], 0.0] + yvalues = [0.0, v1[1], v1[1] + v2[1], v2[1], 0.0] - lcolor = color if color is not None else 'k' - ax.plot(xvalues, yvalues, color=lcolor, linestyle='solid', linewidth=3, zorder=1) + lcolor = color if color is not None else "k" + ax.plot(xvalues, yvalues, color=lcolor, linestyle="solid", linewidth=3, zorder=1) - #ax.set_xlabel('x ($\AA$)') - #ax.set_ylabel('y ($\AA$)') + # ax.set_xlabel('x ($\AA$)') + # ax.set_ylabel('y ($\AA$)') - ax.set_xlabel('x (ang.)') - ax.set_ylabel('y (ang.)') + ax.set_xlabel("x (ang.)") + ax.set_ylabel("y (ang.)") - #markers = ['o', '.', ',', 'x', '+', 'v', '^', '<', '>', 's', 'd'] - markers = ['o', 's', 'v', '^', '+', '^'] - colors = ['r', 'g', 'b', 'm', 'c', 'k'] + # markers = ['o', '.', ',', 'x', '+', 'v', '^', '<', '>', 's', 'd'] + markers = ["o", "s", "v", "^", "+", "^"] + colors = ["r", "g", "b", "m", "c", "k"] - for i,st_i in enumerate(sorted(list(set(self.site_types)))): - xvalues = [ x for (x,y),st in zip(self.site_coordinates,self.site_types) if st==st_i ] - yvalues = [ y for (x,y),st in zip(self.site_coordinates,self.site_types) if st==st_i ] + for i, st_i in enumerate(sorted(list(set(self.site_types)))): + xvalues = [x for (x, y), st in zip(self.site_coordinates, self.site_types) if st == st_i] + yvalues = [y for (x, y), st in zip(self.site_coordinates, self.site_types) if st == st_i] lcolor = color if color is not None else colors[i] - ax.scatter(xvalues, yvalues, color=lcolor, marker=markers[i], - s=440/math.sqrt(len(self.site_coordinates)), zorder=2, label=st_i) - - if( show_sites_ids ): - for i,(x,y) in enumerate(self.site_coordinates): - plt.annotate(str(i), (x,y), ha='center', va='center', zorder=100) - - - for i,ineigh in enumerate(self.nearest_neighbors): - if( ineigh is None ): continue + ax.scatter( + xvalues, + yvalues, + color=lcolor, + marker=markers[i], + s=440 / math.sqrt(len(self.site_coordinates)), + zorder=2, + label=st_i, + ) + + if show_sites_ids: + for i, (x, y) in enumerate(self.site_coordinates): + plt.annotate(str(i), (x, y), ha="center", va="center", zorder=100) + + for i, ineigh in enumerate(self.nearest_neighbors): + if ineigh is None: + continue for k in ineigh: - xvalues = np.array([ self.site_coordinates[i][0], self.site_coordinates[k][0] ]) - yvalues = np.array([ self.site_coordinates[i][1], self.site_coordinates[k][1] ]) - - norm = math.sqrt( (xvalues[0]-xvalues[1])**2 + (yvalues[0]-yvalues[1])**2 ) - - if( self.cell_vectors is not None ): - if( norm > np.linalg.norm(1.5*np.array(v1)) ): continue - if( norm > np.linalg.norm(1.5*np.array(v2)) ): continue - - lcolor = color if color is not None else 'k' - ax.plot(xvalues, yvalues, color=lcolor, linestyle='solid', - linewidth=1.5/math.sqrt(len(self.site_coordinates)), zorder=1) - - ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) + xvalues = np.array([self.site_coordinates[i][0], self.site_coordinates[k][0]]) + yvalues = np.array([self.site_coordinates[i][1], self.site_coordinates[k][1]]) + + norm = math.sqrt((xvalues[0] - xvalues[1]) ** 2 + (yvalues[0] - yvalues[1]) ** 2) + + if self.cell_vectors is not None: + if norm > np.linalg.norm(1.5 * np.array(v1)): + continue + if norm > np.linalg.norm(1.5 * np.array(v2)): + continue + + lcolor = color if color is not None else "k" + ax.plot( + xvalues, + yvalues, + color=lcolor, + linestyle="solid", + linewidth=1.5 / math.sqrt(len(self.site_coordinates)), + zorder=1, + ) + + ax.legend(loc="center left", bbox_to_anchor=(1, 0.5)) plt.tight_layout() - if( file_name is not None ): - plt.savefig( file_name ) + if file_name is not None: + plt.savefig(file_name) - if( show ): - if( pause == -1 ): + if show: + if pause == -1: plt.show() else: - plt.pause( pause ) + plt.pause(pause) - if( close ): + if close: plt.close("all") - def __str__(self): """ Translate the object to a string following the Zacros input files format """ output = "" - if( self.__origin == Lattice.__FROM_DEFAULT ): + if self.__origin == Lattice.__FROM_DEFAULT: output += "lattice default_choice\n" - if( self.__lattice_type_default == Lattice.TRIANGULAR ): + if self.__lattice_type_default == Lattice.TRIANGULAR: output += " triangular_periodic" - elif( self.__lattice_type_default == Lattice.RECTANGULAR ): + elif self.__lattice_type_default == Lattice.RECTANGULAR: output += " rectangular_periodic" - elif( self.__lattice_type_default == Lattice.HEXAGONAL ): + elif self.__lattice_type_default == Lattice.HEXAGONAL: output += " hexagonal_periodic" else: - raise Exception("Error: Default lattice not implemented yet! ("+self.__lattice_type_default+")") + raise Exception("Error: Default lattice not implemented yet! (" + self.__lattice_type_default + ")") - output += " "+str(self.__lattice_constant_default) - output += " "+str(self.__repeat_cell_default[0]) - output += " "+str(self.__repeat_cell_default[1])+"\n" + output += " " + str(self.__lattice_constant_default) + output += " " + str(self.__repeat_cell_default[0]) + output += " " + str(self.__repeat_cell_default[1]) + "\n" output += "end_lattice" - elif( self.__origin == Lattice.__FROM_UNIT_CELL ): + elif self.__origin == Lattice.__FROM_UNIT_CELL: output += "lattice periodic_cell\n" output += " cell_vectors\n" for i in range(2): for j in range(2): - output += " "+("%.8f"%self.__cell_vectors_unit_cell[i][j]) + output += " " + ("%.8f" % self.__cell_vectors_unit_cell[i][j]) output += "\n" - output += " repeat_cell "+str(str(self.__repeat_cell_unit_cell)[1:-1]).replace(',', '')+"\n" + output += " repeat_cell " + str(str(self.__repeat_cell_unit_cell)[1:-1]).replace(",", "") + "\n" n_cell_sites = len(self.__site_coordinates_unit_cell) site_types = list(set(self.__site_types_unit_cell)) site_types.sort() - output += " n_site_types "+str(len(site_types))+"\n" - output += " site_type_names "+str(' '.join(str(x) for x in site_types))+"\n" - output += " n_cell_sites "+str(n_cell_sites)+"\n" - output += " site_types "+str(' '.join(str(x) for x in self.__site_types_unit_cell))+"\n" + output += " n_site_types " + str(len(site_types)) + "\n" + output += " site_type_names " + str(" ".join(str(x) for x in site_types)) + "\n" + output += " n_cell_sites " + str(n_cell_sites) + "\n" + output += " site_types " + str(" ".join(str(x) for x in self.__site_types_unit_cell)) + "\n" output += " site_coordinates\n" for i in range(n_cell_sites): for j in range(2): - output += " "+("%.8f"%self.__site_coordinates_unit_cell[i][j]) + output += " " + ("%.8f" % self.__site_coordinates_unit_cell[i][j]) output += "\n" output += " neighboring_structure\n" for i in range(len(self.__neighboring_structure_unit_cell)): - output += " "+str('-'.join(str(self.__neighboring_structure_unit_cell[i][0][j]+1) for j in range(2))) - output += " "+Lattice.__NeighboringToStr[self.__neighboring_structure_unit_cell[i][1]]+"\n" + output += " " + str( + "-".join(str(self.__neighboring_structure_unit_cell[i][0][j] + 1) for j in range(2)) + ) + output += " " + Lattice.__NeighboringToStr[self.__neighboring_structure_unit_cell[i][1]] + "\n" output += " end_neighboring_structure\n" output += "end_lattice" - elif( self.__origin == Lattice.__FROM_EXPLICIT ): + elif self.__origin == Lattice.__FROM_EXPLICIT: output += "lattice explicit\n" - if( self.cell_vectors is not None ): + if self.cell_vectors is not None: output += " cell_vectors\n" for i in range(2): for j in range(2): - output += " "+("%.8f"%self.cell_vectors[i][j]) + output += " " + ("%.8f" % self.cell_vectors[i][j]) output += "\n" - output += " n_sites "+str(len(self.site_types))+"\n" - output += " max_coord "+str(max([ len(neighbors) for neighbors in self.nearest_neighbors ]))+"\n" + output += " n_sites " + str(len(self.site_types)) + "\n" + output += " max_coord " + str(max([len(neighbors) for neighbors in self.nearest_neighbors])) + "\n" site_types = list(set(self.site_types)) site_types.sort() - output += " n_site_types "+str(len(site_types))+"\n" - output += " site_type_names "+str(' '.join(str(x) for x in site_types))+"\n" + output += " n_site_types " + str(len(site_types)) + "\n" + output += " site_type_names " + str(" ".join(str(x) for x in site_types)) + "\n" output += " lattice_structure\n" for i in range(len(self.site_types)): - output += " "+"%4d"%(i+1) - output += " "+"%15.8f"%self.site_coordinates[i][0]+" "+"%15.8f"%self.site_coordinates[i][1] - output += " "+"%10s"%self.site_types[i] - output += " "+"%4d"%len(self.nearest_neighbors[i]) + output += " " + "%4d" % (i + 1) + output += " " + "%15.8f" % self.site_coordinates[i][0] + " " + "%15.8f" % self.site_coordinates[i][1] + output += " " + "%10s" % self.site_types[i] + output += " " + "%4d" % len(self.nearest_neighbors[i]) for j in self.nearest_neighbors[i]: - output += "%6d"%(j+1) + output += "%6d" % (j + 1) output += "\n" @@ -773,55 +816,56 @@ def __str__(self): return output - - def number_of_sites( self ): + def number_of_sites(self): """ Returns the total number of sites """ return len(self.site_types) - - def site_types_set( self ): + def site_types_set(self): """ Returns the set of the sites types """ - if( self.__origin == Lattice.__FROM_DEFAULT ): + if self.__origin == Lattice.__FROM_DEFAULT: return set([0]) else: return set(self.site_types) - - def set_repeat_cell( self, repeat_cell ): + def set_repeat_cell(self, repeat_cell): """ Set the parameter repeat_cell and update all internal information * ``repeat_cell`` -- The number of repetitions of the unit cell in the directions of unit vectors. *e.g.* ``(10,10)`` """ - if( self.__origin == Lattice.__FROM_DEFAULT ): - self.__fromDefaultLattices( self.__lattice_type_default, self.__lattice_constant_default, repeat_cell ) - - elif( self.__origin == Lattice.__FROM_UNIT_CELL ): - self.__fromUnitCellDefined( self.__cell_vectors_unit_cell, repeat_cell, self.__site_types_unit_cell, - self.__site_coordinates_unit_cell, self.__neighboring_structure_unit_cell ) - - elif( self.__origin == Lattice.__FROM_EXPLICIT ): + if self.__origin == Lattice.__FROM_DEFAULT: + self.__fromDefaultLattices(self.__lattice_type_default, self.__lattice_constant_default, repeat_cell) + + elif self.__origin == Lattice.__FROM_UNIT_CELL: + self.__fromUnitCellDefined( + self.__cell_vectors_unit_cell, + repeat_cell, + self.__site_types_unit_cell, + self.__site_coordinates_unit_cell, + self.__neighboring_structure_unit_cell, + ) + + elif self.__origin == Lattice.__FROM_EXPLICIT: pass - - def replace_site_types( self, site_types_old, site_types_new ): + def replace_site_types(self, site_types_old, site_types_new): """ Replaces the site types names * ``site_types_old`` -- List of strings containing the old site_types to be replaced * ``site_types_new`` -- List of strings containing the new site_types which would replace old site_types_old. """ - assert( len(site_types_old) == len(site_types_new) ) + assert len(site_types_old) == len(site_types_new) for i in range(len(site_types_old)): for j in range(len(self.site_types)): - if( self.site_types[j] == site_types_old[i] ): + if self.site_types[j] == site_types_old[i]: self.site_types[j] = site_types_new[i] for j in range(len(self.__site_types_unit_cell)): - if( self.__site_types_unit_cell[j] == site_types_old[i] ): + if self.__site_types_unit_cell[j] == site_types_old[i]: self.__site_types_unit_cell[j] = site_types_new[i] diff --git a/core/LatticeState.py b/core/LatticeState.py index 7105000..3607448 100644 --- a/core/LatticeState.py +++ b/core/LatticeState.py @@ -6,7 +6,8 @@ from .SpeciesList import * from .Lattice import * -__all__ = ['LatticeState'] +__all__ = ["LatticeState"] + class LatticeState: """ @@ -25,71 +26,71 @@ def __init__(self, lattice, surface_species, initial=True, add_info=None): self.lattice = lattice self.add_info = add_info - if( type(surface_species) != SpeciesList and type(surface_species) != list ): - msg = "\n### ERROR ### LatticeState.__init__.\n" + if type(surface_species) != SpeciesList and type(surface_species) != list: + msg = "\n### ERROR ### LatticeState.__init__.\n" msg += " Inconsistent type for surface_species\n" raise Exception(msg) self.surface_species = surface_species - if( type(surface_species) == list ): + if type(surface_species) == list: self.surface_species = SpeciesList(surface_species) - if( len( self.surface_species.gas_species() ) > 0 ): - msg = "\n### ERROR ### LatticeState.__init__.\n" + if len(self.surface_species.gas_species()) > 0: + msg = "\n### ERROR ### LatticeState.__init__.\n" msg += " LatticeState doesn't accept gas surface_species\n" raise Exception(msg) self.initial = initial - self.__adsorbed_on_site = lattice.number_of_sites()*[ None ] - self.__entity_number = lattice.number_of_sites()*[ None ] + self.__adsorbed_on_site = lattice.number_of_sites() * [None] + self.__entity_number = lattice.number_of_sites() * [None] self.__next_entity_number = 0 self.__speciesNumbers = {} for sp in self.surface_species: self.__speciesNumbers[sp] = 0 - def __str__(self): """ Translates the object to a string """ - if( self.initial ): - output = "initial_state"+"\n" + if self.initial: + output = "initial_state" + "\n" else: - output = "state"+"\n" + output = "state" + "\n" - if( self.surface_species is not None ): output += " # species "+(" ".join([sp.symbol for sp in self.surface_species]))+"\n" + if self.surface_species is not None: + output += " # species " + (" ".join([sp.symbol for sp in self.surface_species])) + "\n" - if( len(self.__speciesNumbers) > 0 ): + if len(self.__speciesNumbers) > 0: output += " # species_numbers\n" - for sp,nsites in self.__speciesNumbers.items(): - output += " # - "+sp.symbol+" "+str(nsites)+"\n" + for sp, nsites in self.__speciesNumbers.items(): + output += " # - " + sp.symbol + " " + str(nsites) + "\n" processed_entity_number = {} - for id_site,sp in enumerate(self.__adsorbed_on_site): - if( sp is not None and self.__entity_number[id_site] not in processed_entity_number ): - entity_pos = [ str(i+1) for i,v in enumerate(self.__entity_number) if v==self.__entity_number[id_site] ] + for id_site, sp in enumerate(self.__adsorbed_on_site): + if sp is not None and self.__entity_number[id_site] not in processed_entity_number: + entity_pos = [ + str(i + 1) for i, v in enumerate(self.__entity_number) if v == self.__entity_number[id_site] + ] - if( len(entity_pos)>0 ): - output += " seed_on_sites "+sp.symbol+" "+' '.join(entity_pos)+"\n" + if len(entity_pos) > 0: + output += " seed_on_sites " + sp.symbol + " " + " ".join(entity_pos) + "\n" - processed_entity_number[ self.__entity_number[id_site] ] = 1 + processed_entity_number[self.__entity_number[id_site]] = 1 - if( self.initial ): + if self.initial: output += "end_initial_state" else: output += "end_state" return output - def empty(self): """ Returns True if the state is empty """ - return ( len(self.__adsorbed_on_site) == self.__adsorbed_on_site.count(None) ) - + return len(self.__adsorbed_on_site) == self.__adsorbed_on_site.count(None) def number_of_filled_sites(self): """ @@ -97,30 +98,27 @@ def number_of_filled_sites(self): """ return len(self.__adsorbed_on_site) - def _next_entity_number(self): entity = self.__next_entity_number self.__next_entity_number += 1 return entity - def _adsorbed_on_site(self): return self.__adsorbed_on_site - def _updateSpeciesNumbers(self): for sp in self.surface_species: self.__speciesNumbers[sp] = 0 for sp in self.__adsorbed_on_site: - if( sp is None ): continue + if sp is None: + continue - if( sp not in self.__speciesNumbers ): + if sp not in self.__speciesNumbers: self.__speciesNumbers[sp] = 1 else: self.__speciesNumbers[sp] += 1 - def fill_site(self, site_number, species, update_species_numbers=True): """ Fills the ``site_number`` site with the species ``species`` @@ -130,52 +128,59 @@ def fill_site(self, site_number, species, update_species_numbers=True): * ``update_species_numbers`` -- Forces to update the statistics about the number of species adsorbed in the lattice. For better performance, it would be wise to set it to False if a massive number of species are going to be added (one by one) using this function. """ lSpecies = None - if( isinstance(species, str) ): + if isinstance(species, str): for sp in self.surface_species: - if( sp.symbol == species ): + if sp.symbol == species: lSpecies = sp break - elif( isinstance(species, Species) ): + elif isinstance(species, Species): lSpecies = species else: - msg = "\n### ERROR ### LatticeState.fill_sites.\n" + msg = "\n### ERROR ### LatticeState.fill_sites.\n" msg += " Inconsistent type for species. It should be type str or Species.\n" - msg += " Expected: Species|str. Obtained: "+str(type(species))+"\n" + msg += " Expected: Species|str. Obtained: " + str(type(species)) + "\n" raise Exception(msg) - if( isinstance(site_number,int) ): + if isinstance(site_number, int): site_number = [site_number] - if( not ( isinstance(site_number,list) or isinstance(site_number,tuple) ) ): - msg = "\n### ERROR ### LatticeState.fill_site.\n" + if not (isinstance(site_number, list) or isinstance(site_number, tuple)): + msg = "\n### ERROR ### LatticeState.fill_site.\n" msg += " Inconsistent values for species denticity and dimensions of site_number\n" msg += " denticity>1 but site_number is not an instance of list or tuple\n" raise Exception(msg) - #if( len(site_number) != lSpecies.denticity ): - #msg = "\n### ERROR ### LatticeState.fill_site.\n" - #msg += " Inconsistent values for species denticity and dimensions of site_number\n" - #msg += " site_number should have the `denticity` number of elements\n" - #raise Exception(msg) + # if( len(site_number) != lSpecies.denticity ): + # msg = "\n### ERROR ### LatticeState.fill_site.\n" + # msg += " Inconsistent values for species denticity and dimensions of site_number\n" + # msg += " site_number should have the `denticity` number of elements\n" + # raise Exception(msg) - if( any([self.__adsorbed_on_site[site] is not None for site in site_number]) ): - msg = "\n### ERROR ### LatticeState.fill_site.\n" + if any([self.__adsorbed_on_site[site] is not None for site in site_number]): + msg = "\n### ERROR ### LatticeState.fill_site.\n" msg += " site is already filled\n" raise Exception(msg) connected = [site_number[0]] to_check = [site_number[0]] - while( to_check and len(connected) < lSpecies.denticity ): + while to_check and len(connected) < lSpecies.denticity: new_check = [] for site in to_check: - new_check.extend(list(filter(lambda x: x in site_number and x not in connected and x not in to_check,self.lattice.nearest_neighbors[site]))) + new_check.extend( + list( + filter( + lambda x: x in site_number and x not in connected and x not in to_check, + self.lattice.nearest_neighbors[site], + ) + ) + ) to_check = list(set(new_check)) connected.extend(to_check) - #if( len(connected) != lSpecies.denticity ): - #msg = "\n### ERROR ### LatticeState.fill_site.\n" - #msg += " sites are not neighboring\n" - #raise Exception(msg) + # if( len(connected) != lSpecies.denticity ): + # msg = "\n### ERROR ### LatticeState.fill_site.\n" + # msg += " sites are not neighboring\n" + # raise Exception(msg) entity_number = self._next_entity_number() @@ -183,10 +188,9 @@ def fill_site(self, site_number, species, update_species_numbers=True): self.__adsorbed_on_site[site] = lSpecies self.__entity_number[site] = entity_number - if( update_species_numbers ): + if update_species_numbers: self._updateSpeciesNumbers() - def fill_sites_random(self, site_name, species, coverage, neighboring=None): """ Fills the named sites ``site_name`` randomly with the species ``species`` by keeping a @@ -200,94 +204,111 @@ def fill_sites_random(self, site_name, species, coverage, neighboring=None): * ``neighboring`` -- Neighboring relations associated to the sites ``site_name``, e.g., ``[[0,2],[1,2]]``. """ lSpecies = None - if( isinstance(species, str) ): + if isinstance(species, str): for sp in self.surface_species: - if( sp.symbol == species ): + if sp.symbol == species: lSpecies = sp break - elif( isinstance(species, Species) ): + elif isinstance(species, Species): lSpecies = species else: - msg = "\n### ERROR ### LatticeState.fill_sites_random.\n" + msg = "\n### ERROR ### LatticeState.fill_sites_random.\n" msg += " Inconsistent type for species. It should be type str or Species\n" raise Exception(msg) - if ( isinstance(site_name, str) or isinstance(site_name, int) ): + if isinstance(site_name, str) or isinstance(site_name, int): site_name = [site_name] - if ( lSpecies.denticity != len(site_name) ): + if lSpecies.denticity != len(site_name): msg = "\n### ERROR ### LatticeState.fill_sites_random.\n" msg += " Inconsistent amount of site_name with species denticity\n" raise Exception(msg) neighboring_order = [] - if ( lSpecies.denticity > 1 ): - if ( neighboring == None ): - neighboring = [[x-1,x] for x in range(1,lSpecies.denticity)] + if lSpecies.denticity > 1: + if neighboring == None: + neighboring = [[x - 1, x] for x in range(1, lSpecies.denticity)] neighboring_order = range(lSpecies.denticity) else: connected = [0] to_check = [0] # We need to check if the sites are neighboring, and generate an ordering to generate the subgraphs from the site_names # E.g. when denticity == 3 and neighboring=[[0,2],[1,2]], it should generate as site_name[0], site_name[2], site_name[1] - while ( to_check and len(connected) < lSpecies.denticity ): + while to_check and len(connected) < lSpecies.denticity: new_check = [] for site in to_check: - neighbor_pairs = list(filter(lambda x: site in x,neighboring)) + neighbor_pairs = list(filter(lambda x: site in x, neighboring)) neighbors = [x[0] if x[1] == site else x[1] for x in neighbor_pairs] - new_check.extend(list(filter(lambda x: x not in connected and x not in to_check,neighbors))) + new_check.extend(list(filter(lambda x: x not in connected and x not in to_check, neighbors))) to_check = list(set(new_check)) connected.extend(to_check) - if( len(connected) != lSpecies.denticity ): + if len(connected) != lSpecies.denticity: msg = "\n### ERROR ### LatticeState.fill_sites_random.\n" msg += " neighboring sites not connected.\n" raise Exception(msg) neighboring_order = connected - neighboring = [[x[0],x[1]] if connected.index(x[0]) < connected.index(x[1]) else [x[1],x[0]] for x in neighboring] + neighboring = [ + [x[0], x[1]] if connected.index(x[0]) < connected.index(x[1]) else [x[1], x[0]] for x in neighboring + ] total_available_conf = [] - empty_sites = list(filter(lambda x: self.__adsorbed_on_site[x] is None and self.lattice.site_types[x] == site_name[0],range(self.lattice.number_of_sites()))) + empty_sites = list( + filter( + lambda x: self.__adsorbed_on_site[x] is None and self.lattice.site_types[x] == site_name[0], + range(self.lattice.number_of_sites()), + ) + ) for site_number_i in empty_sites: available_conf = [[site_number_i]] for identicity in neighboring_order[1:]: new_conf = [] neighbors = list(filter(lambda x: x[1] == identicity, neighboring)) for conf in available_conf: - nearest_neighbors = [self.lattice.nearest_neighbors[conf[neighboring_order.index(x[0])]] for x in neighbors] - if (not nearest_neighbors): + nearest_neighbors = [ + self.lattice.nearest_neighbors[conf[neighboring_order.index(x[0])]] for x in neighbors + ] + if not nearest_neighbors: continue - nearest_neighbors = set.intersection(*map(set,nearest_neighbors)) - new_conf.extend([conf + [x] for x in list(filter(lambda x: - self.__adsorbed_on_site[x] is None and - x not in conf and - self.lattice.site_types[x] == site_name[identicity], - nearest_neighbors))]) + nearest_neighbors = set.intersection(*map(set, nearest_neighbors)) + new_conf.extend( + [ + conf + [x] + for x in list( + filter( + lambda x: self.__adsorbed_on_site[x] is None + and x not in conf + and self.lattice.site_types[x] == site_name[identicity], + nearest_neighbors, + ) + ) + ] + ) available_conf = new_conf total_available_conf.extend(available_conf) target_sites = set([item for sublist in total_available_conf for item in sublist]) - if( len(target_sites) == 0 ): - msg = "\n### ERROR ### LatticeState.fill_sites_random.\n" - msg += " site_name="+str(site_name)+" not found\n" + if len(target_sites) == 0: + msg = "\n### ERROR ### LatticeState.fill_sites_random.\n" + msg += " site_name=" + str(site_name) + " not found\n" raise Exception(msg) - n_sites_to_fill = round(len(target_sites)*coverage) - random.shuffle( total_available_conf ) + n_sites_to_fill = round(len(target_sites) * coverage) + random.shuffle(total_available_conf) filled_sites = {} available_conf = [] for conf in total_available_conf: - if( len(filled_sites) >= n_sites_to_fill ): + if len(filled_sites) >= n_sites_to_fill: break if any((site in filled_sites for site in conf)): continue for site in conf: - filled_sites[ site ] = True - available_conf.append( conf ) + filled_sites[site] = True + available_conf.append(conf) entity_number = self._next_entity_number() for conf in available_conf: @@ -298,21 +319,19 @@ def fill_sites_random(self, site_name, species, coverage, neighboring=None): self._updateSpeciesNumbers() - if (lSpecies not in self.__speciesNumbers): + if lSpecies not in self.__speciesNumbers: return 0.0 - actual_coverage = self.__speciesNumbers[lSpecies]/len(target_sites) + actual_coverage = self.__speciesNumbers[lSpecies] / len(target_sites) return actual_coverage - - def fill_all_sites( self, site_name, species ): + def fill_all_sites(self, site_name, species): """ Fills all available named sites ``site_name`` with the species ``species``. * ``site_name`` -- Name of the sites to be filled, e.g., ``["fcc","hcp"]`` * ``species`` -- Species to be used to fill the site, e.g., ``Species("O2*")``, or ``"O2*"``. """ - self.fill_sites_random( site_name, species, coverage=1.0 ) - + self.fill_sites_random(site_name, species, coverage=1.0) def coverage_fractions(self): """ @@ -322,12 +341,11 @@ def coverage_fractions(self): for sp in self.surface_species: fractions[sp.symbol] = 0.0 - for sp,nsites in self.__speciesNumbers.items(): - fractions[sp.symbol] = float(nsites)/self.lattice.number_of_sites() + for sp, nsites in self.__speciesNumbers.items(): + fractions[sp.symbol] = float(nsites) / self.lattice.number_of_sites() return fractions - def plot(self, pause=-1, show=True, ax=None, close=False, show_sites_ids=False, file_name=None): """ Uses matplotlib to visualize the lattice state @@ -344,37 +362,38 @@ def plot(self, pause=-1, show=True, ax=None, close=False, show_sites_ids=False, except ImportError as e: return # module doesn't exist, deal with it. - if( ax is None ): - fig,ax = plt.subplots() + if ax is None: + fig, ax = plt.subplots() - #markers = ['o', '.', ',', 'x', '+', 'v', '^', '<', '>', 's', 'd'] - markers = ['o', 's', 'v', '^', 'x', 's', 'd', '+'] - colors = ['r', 'g', 'b', 'c', 'm', 'y', 'k', '#eeefff'] + # markers = ['o', '.', ',', 'x', '+', 'v', '^', '<', '>', 's', 'd'] + markers = ["o", "s", "v", "^", "x", "s", "d", "+"] + colors = ["r", "g", "b", "c", "m", "y", "k", "#eeefff"] symbols = [sp if sp is None else sp.symbol for sp in self.__adsorbed_on_site] items = list(filter(None.__ne__, set(self.__adsorbed_on_site))) - if( self.add_info is not None ): + if self.add_info is not None: ax.set_title("t = {:.3g} s".format(self.add_info.get("time"))) - #-------------------------------- + # -------------------------------- # Plots the lattice - #-------------------------------- - self.lattice.plot( show=False, ax=ax, close=False, color='0.8', show_sites_ids=show_sites_ids ) + # -------------------------------- + self.lattice.plot(show=False, ax=ax, close=False, color="0.8", show_sites_ids=show_sites_ids) - #-------------------------------- + # -------------------------------- # Plots the species - #-------------------------------- + # -------------------------------- site_types = sorted(list(set(self.lattice.site_types))) - for i,sym_i in enumerate([item.symbol for item in items]): + for i, sym_i in enumerate([item.symbol for item in items]): - if( all([sym is None for sym in symbols]) ): continue + if all([sym is None for sym in symbols]): + continue - ids_nn = [ pos for pos,v in enumerate(symbols) if v is not None ] - s_ids_nn = numpy.argsort( [ symbols[k] for k in ids_nn ] ) - sorted_ids = [ ids_nn[k] for k in s_ids_nn ] - sorted_ids = sorted_ids + [ k for k in range(len(symbols)) if k not in sorted_ids ] + ids_nn = [pos for pos, v in enumerate(symbols) if v is not None] + s_ids_nn = numpy.argsort([symbols[k] for k in ids_nn]) + sorted_ids = [ids_nn[k] for k in s_ids_nn] + sorted_ids = sorted_ids + [k for k in range(len(symbols)) if k not in sorted_ids] xvalues = [] yvalues = [] @@ -385,50 +404,62 @@ def plot(self, pause=-1, show=True, ax=None, close=False, show_sites_ids=False, site_type = self.lattice.site_types[sid] sym = symbols[sid] - if( sym == sym_i ): - xvalues.append( x ) - yvalues.append( y ) + if sym == sym_i: + xvalues.append(x) + yvalues.append(y) lSpecies = None for sp in self.surface_species: - if( sp.symbol == sym_i ): + if sp.symbol == sym_i: lSpecies = sp break - imarkers.append( site_types.index(site_type) ) - - if( len(xvalues)>0 ): - #ax.scatter(xvalues, yvalues, color=colors[i], marker=markers[imarkers[i]], s=100, zorder=4, label=sym_i) - #ax.scatter(xvalues, yvalues, color=colors[i], marker=markers[imarkers[i]], s=100/5, zorder=4, label=sym_i) - ax.scatter(xvalues, yvalues, color=colors[i], marker=markers[imarkers[i]], - s=450/math.sqrt(len(self.lattice.site_coordinates)), zorder=4, label=sym_i) - - #------------------------------------------------- + imarkers.append(site_types.index(site_type)) + + if len(xvalues) > 0: + # ax.scatter(xvalues, yvalues, color=colors[i], marker=markers[imarkers[i]], s=100, zorder=4, label=sym_i) + # ax.scatter(xvalues, yvalues, color=colors[i], marker=markers[imarkers[i]], s=100/5, zorder=4, label=sym_i) + ax.scatter( + xvalues, + yvalues, + color=colors[i], + marker=markers[imarkers[i]], + s=450 / math.sqrt(len(self.lattice.site_coordinates)), + zorder=4, + label=sym_i, + ) + + # ------------------------------------------------- # Plots the links for species with denticity > 1 - #------------------------------------------------- - for id_site,sp in enumerate(self.__adsorbed_on_site): - if( sp is not None ): - entity_pos = [ i for i,v in enumerate(self.__entity_number) if v==self.__entity_number[id_site] ] + # ------------------------------------------------- + for id_site, sp in enumerate(self.__adsorbed_on_site): + if sp is not None: + entity_pos = [i for i, v in enumerate(self.__entity_number) if v == self.__entity_number[id_site]] - if( len(entity_pos)>0 ): + if len(entity_pos) > 0: for id_site_2 in self.lattice.nearest_neighbors[id_site]: - if( id_site_2 in entity_pos ): + if id_site_2 in entity_pos: coords_i = self.lattice.site_coordinates[id_site] coords_j = self.lattice.site_coordinates[id_site_2] - ax.plot([coords_i[0],coords_j[0]], - [coords_i[1],coords_j[1]], - color=colors[items.index(sp)], linestyle='solid', linewidth=5, zorder=4) - - ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) - - if( file_name is not None ): - plt.savefig( file_name ) - - if( show ): - if( pause == -1 ): + ax.plot( + [coords_i[0], coords_j[0]], + [coords_i[1], coords_j[1]], + color=colors[items.index(sp)], + linestyle="solid", + linewidth=5, + zorder=4, + ) + + ax.legend(loc="center left", bbox_to_anchor=(1, 0.5)) + + if file_name is not None: + plt.savefig(file_name) + + if show: + if pause == -1: plt.show() else: - plt.pause( pause ) + plt.pause(pause) - if( close ): + if close: plt.close("all") diff --git a/core/Mechanism.py b/core/Mechanism.py index d174354..01764c8 100644 --- a/core/Mechanism.py +++ b/core/Mechanism.py @@ -4,9 +4,10 @@ from .SpeciesList import * from .ElementaryReaction import * -__all__ = ['Mechanism'] +__all__ = ["Mechanism"] -class Mechanism( UserList ): + +class Mechanism(UserList): """ Creates a new Mechanism object. If no argument is given, the constructor creates a new empty mechanism. @@ -16,258 +17,269 @@ class Mechanism( UserList ): * ``surface_species`` -- Surface species. It is required if the option ``fileName`` was used. """ - def __init__( self, data=[], fileName=None, gas_species=None, surface_species=None ): - super(Mechanism, self).__init__( data ) + def __init__(self, data=[], fileName=None, gas_species=None, surface_species=None): + super(Mechanism, self).__init__(data) if fileName is not None: if gas_species is not None and surface_species is not None: - self.__fromZacrosFile( fileName, gas_species, surface_species ) + self.__fromZacrosFile(fileName, gas_species, surface_species) else: - raise Exception( "Error: Parameters gas_species and surface_species are requiered to load the Mechanism from a zacros input file" ) + raise Exception( + "Error: Parameters gas_species and surface_species are requiered to load the Mechanism from a zacros input file" + ) # Duplicates are automatically removed. copy = self.data self.data = [] for er in copy: - if( er not in self.data ): - self.data.append( er ) - + if er not in self.data: + self.data.append(er) def __fromZacrosFile(self, fileName, gas_species, surface_species): """ Creates a Mechanism from a Zacros input file mechanism_input.dat """ - if not os.path.isfile( fileName ): - raise Exception( "Trying to load a file that doen't exist: "+fileName ) + if not os.path.isfile(fileName): + raise Exception("Trying to load a file that doen't exist: " + fileName) - with open( fileName, "r" ) as inp: + with open(fileName, "r") as inp: file_content = inp.readlines() - file_content = [line.split("#")[0] for line in file_content if line.split("#")[0].strip()] # Removes empty lines and comments + file_content = [ + line.split("#")[0] for line in file_content if line.split("#")[0].strip() + ] # Removes empty lines and comments nline = 0 - while( nline < len(file_content) ): + while nline < len(file_content): tokens = file_content[nline].split() - if( tokens[0].lower() == "reversible_step" or tokens[0].lower() == "step" ): + if tokens[0].lower() == "reversible_step" or tokens[0].lower() == "step": parameters = {} - if( len(tokens) < 2 ): - raise Exception( "Error: Format inconsistent in section reversible_step/step. Label not found!" ) + if len(tokens) < 2: + raise Exception("Error: Format inconsistent in section reversible_step/step. Label not found!") parameters["label"] = tokens[1] - if( tokens[0].lower() == "reversible_step" ): + if tokens[0].lower() == "reversible_step": parameters["reversible"] = True - elif( tokens[0].lower() == "step" ): + elif tokens[0].lower() == "step": parameters["reversible"] = False nline += 1 - while( nline < len(file_content) ): + while nline < len(file_content): tokens = file_content[nline].split() - if( tokens[0] == "end_reversible_step" or tokens[0] == "end_step" ): + if tokens[0] == "end_reversible_step" or tokens[0] == "end_step": break - def process_gas_reacs_prods( sv ): + def process_gas_reacs_prods(sv): output = [] - for i in range(len(sv)-1): - output.append( (sv[i],int(sv[i+1])) ) + for i in range(len(sv) - 1): + output.append((sv[i], int(sv[i + 1]))) return output - def process_neighboring( sv ): + def process_neighboring(sv): output = [] for pair in sv: - a,b = pair.split("-") - output.append( (int(a)-1,int(b)-1) ) + a, b = pair.split("-") + output.append((int(a) - 1, int(b) - 1)) return output - def process_site_types( sv ): + def process_site_types(sv): output = [] for i in range(len(sv)): - if( sv[i].isdigit() ): - output.append( int(sv[i])-1 ) + if sv[i].isdigit(): + output.append(int(sv[i]) - 1) else: - output.append( sv[i] ) + output.append(sv[i]) return output cases = { - "gas_reacs_prods" : lambda sv: parameters.setdefault("gas_reacs_prods", process_gas_reacs_prods(sv) ), - "sites" : lambda sv: parameters.setdefault("sites", int(sv[0])), - "neighboring" : lambda sv: parameters.setdefault("neighboring", process_neighboring(sv)), - "site_types" : lambda sv: parameters.setdefault("site_types", process_site_types(sv)), - "pre_expon" : lambda sv: parameters.setdefault("pre_expon", float(sv[0])), - "pe_ratio" : lambda sv: parameters.setdefault("pe_ratio", float(sv[0])), - "activ_eng" : lambda sv: parameters.setdefault("activation_energy", float(sv[0])), - "prox_factor" : lambda sv: parameters.setdefault("prox_factor", float(sv[0])), + "gas_reacs_prods": lambda sv: parameters.setdefault( + "gas_reacs_prods", process_gas_reacs_prods(sv) + ), + "sites": lambda sv: parameters.setdefault("sites", int(sv[0])), + "neighboring": lambda sv: parameters.setdefault("neighboring", process_neighboring(sv)), + "site_types": lambda sv: parameters.setdefault("site_types", process_site_types(sv)), + "pre_expon": lambda sv: parameters.setdefault("pre_expon", float(sv[0])), + "pe_ratio": lambda sv: parameters.setdefault("pe_ratio", float(sv[0])), + "activ_eng": lambda sv: parameters.setdefault("activation_energy", float(sv[0])), + "prox_factor": lambda sv: parameters.setdefault("prox_factor", float(sv[0])), } - cases.get( tokens[0], lambda sv: None )( tokens[1:] ) + cases.get(tokens[0], lambda sv: None)(tokens[1:]) - if( tokens[0] == "initial" ): + if tokens[0] == "initial": parameters["initial"] = [] site_identate = {} isites = 0 - while( nline < len(file_content) ): + while nline < len(file_content): nline += 1 tokens = file_content[nline].split() - if( isites == parameters["sites"] ): + if isites == parameters["sites"]: break - if( len(tokens) < 3 ): - raise Exception( "Error: Format inconsistent in section reversible_step/step!" ) + if len(tokens) < 3: + raise Exception("Error: Format inconsistent in section reversible_step/step!") - if( tokens[0]+tokens[1]+tokens[2] != "&&&" ): + if tokens[0] + tokens[1] + tokens[2] != "&&&": entity_number = int(tokens[0]) species_name = tokens[1] dentate_number = int(tokens[2]) loc_id = None - for i,sp in enumerate(surface_species): - if( entity_number not in site_identate ): - site_identate[ entity_number ] = 0 + for i, sp in enumerate(surface_species): + if entity_number not in site_identate: + site_identate[entity_number] = 0 - #TODO Find a way to check consistency of dentate_number + # TODO Find a way to check consistency of dentate_number - if( sp.symbol == species_name and site_identate[ entity_number ]+1 == dentate_number ): - site_identate[ entity_number ] = site_identate[ entity_number ] + 1 + if sp.symbol == species_name and site_identate[entity_number] + 1 == dentate_number: + site_identate[entity_number] = site_identate[entity_number] + 1 loc_id = i break - if( loc_id is None ): - raise Exception( "Error: Species "+species_name+" not found! See mechanism initial: "+parameters["label"] ) + if loc_id is None: + raise Exception( + "Error: Species " + + species_name + + " not found! See mechanism initial: " + + parameters["label"] + ) - parameters["initial"].append( surface_species[loc_id] ) + parameters["initial"].append(surface_species[loc_id]) isites += 1 - if( "gas_reacs_prods" in parameters ): - for spn,k in parameters["gas_reacs_prods"]: - if( k == -1 ): + if "gas_reacs_prods" in parameters: + for spn, k in parameters["gas_reacs_prods"]: + if k == -1: loc_id = None - for i,sp in enumerate(gas_species): - if( spn == sp.symbol ): + for i, sp in enumerate(gas_species): + if spn == sp.symbol: loc_id = i break - if( loc_id is None ): - raise Exception( "Error: Gas species "+species_name+" not found!" ) + if loc_id is None: + raise Exception("Error: Gas species " + species_name + " not found!") - parameters["initial"].append( gas_species[loc_id] ) + parameters["initial"].append(gas_species[loc_id]) - if( tokens[0] == "final" ): + if tokens[0] == "final": parameters["final"] = [] site_identate = {} isites = 0 - while( nline < len(file_content) ): + while nline < len(file_content): nline += 1 tokens = file_content[nline].split() - if( isites == parameters["sites"] ): + if isites == parameters["sites"]: break - if( len(tokens) < 3 ): - raise Exception( "Error: Format inconsistent in section lattice_state!" ) + if len(tokens) < 3: + raise Exception("Error: Format inconsistent in section lattice_state!") - if( tokens[0]+tokens[1]+tokens[2] != "&&&" ): + if tokens[0] + tokens[1] + tokens[2] != "&&&": entity_number = int(tokens[0]) species_name = tokens[1] dentate_number = int(tokens[2]) loc_id = None - for i,sp in enumerate(surface_species): - if( entity_number not in site_identate ): - site_identate[ entity_number ] = 0 + for i, sp in enumerate(surface_species): + if entity_number not in site_identate: + site_identate[entity_number] = 0 - #TODO Find a way to check consistency of dentate_number + # TODO Find a way to check consistency of dentate_number - if( sp.symbol == species_name and site_identate[ entity_number ]+1 == dentate_number ): - site_identate[ entity_number ] = site_identate[ entity_number ] + 1 + if sp.symbol == species_name and site_identate[entity_number] + 1 == dentate_number: + site_identate[entity_number] = site_identate[entity_number] + 1 loc_id = i break - if( loc_id is None ): - raise Exception( "Error: Species "+species_name+" not found! See mechanism final: "+parameters["label"] ) + if loc_id is None: + raise Exception( + "Error: Species " + + species_name + + " not found! See mechanism final: " + + parameters["label"] + ) - parameters["final"].append( surface_species[loc_id] ) + parameters["final"].append(surface_species[loc_id]) isites += 1 - if( "gas_reacs_prods" in parameters ): - for spn,k in parameters["gas_reacs_prods"]: - if( k == 1 ): + if "gas_reacs_prods" in parameters: + for spn, k in parameters["gas_reacs_prods"]: + if k == 1: loc_id = None - for i,sp in enumerate(gas_species): - if( spn == sp.symbol ): + for i, sp in enumerate(gas_species): + if spn == sp.symbol: loc_id = i break - if( loc_id is None ): - raise Exception( "Error: Gas species "+species_name+" not found!" ) + if loc_id is None: + raise Exception("Error: Gas species " + species_name + " not found!") - parameters["final"].append( gas_species[loc_id] ) + parameters["final"].append(gas_species[loc_id]) else: nline += 1 del parameters["sites"] - if( "gas_reacs_prods" in parameters ): del parameters["gas_reacs_prods"] + if "gas_reacs_prods" in parameters: + del parameters["gas_reacs_prods"] - rxn = ElementaryReaction( **parameters ) + rxn = ElementaryReaction(**parameters) - self.append( rxn ) + self.append(rxn) nline += 1 - def append(self, item): """ Append item to the end of the sequence """ - self.insert( len(self), item ) - + self.insert(len(self), item) def extend(self, other): """ Extend sequence by appending elements from the iterable """ for item in other: - self.append( item ) - + self.append(item) def insert(self, i, item): """ Insert value before index """ for erxn in self: - if( erxn.label() == item.label() ): + if erxn.label() == item.label(): return super(Mechanism, self).insert(i, item) - - def __str__( self ): + def __str__(self): """ Translates the object to a string """ - output = "mechanism"+"\n\n" + output = "mechanism" + "\n\n" for i in range(len(self)): output += str(self[i]) - if( i != len(self)-1 ): + if i != len(self) - 1: output += "\n\n" output += "\n\n" output += "end_mechanism" return output - - def surface_species( self ): + def surface_species(self): """ Returns the surface species list. """ @@ -275,17 +287,16 @@ def surface_species( self ): for erxn in self: for s in erxn.initial: - if( s.is_adsorbed() ): - species.append( s ) + if s.is_adsorbed(): + species.append(s) for s in erxn.final: - if( s.is_adsorbed() ): - species.append( s ) + if s.is_adsorbed(): + species.append(s) species.remove_duplicates() return species - - def gas_species( self ): + def gas_species(self): """ Returns the gas species list. """ @@ -293,55 +304,49 @@ def gas_species( self ): for erxn in self: for s in erxn.initial: - if( s.is_gas() ): - species.append( s ) + if s.is_gas(): + species.append(s) for s in erxn.final: - if( s.is_gas() ): - species.append( s ) + if s.is_gas(): + species.append(s) species.remove_duplicates() return species - def species(self): """Returns the adsorbed species.""" return self.surface_species() - - def site_types_set( self ): + def site_types_set(self): """ Returns the set of the sites types """ site_types = set() for erxn in self: - site_types.update( erxn.site_types_set() ) + site_types.update(erxn.site_types_set()) return site_types - - def replace_site_types( self, site_types_old, site_types_new ): + def replace_site_types(self, site_types_old, site_types_new): """ Replaces the site types names * ``site_types_old`` -- List of strings containing the old site_types to be replaced * ``site_types_new`` -- List of strings containing the new site_types which would replace old site_types_old. """ - assert( len(site_types_old) == len(site_types_new) ) + assert len(site_types_old) == len(site_types_new) for erxn in self: - erxn.replace_site_types( site_types_old, site_types_new ) + erxn.replace_site_types(site_types_old, site_types_new) - - def find( self, label ): + def find(self, label): """ Returns the list of reactions where the substring ``label`` is found in the reactions' label """ return [rxn for rxn in self if rxn.label().find(label) != -1] - - def find_one( self, label ): + def find_one(self, label): """ Returns the first reaction where the substring ``label`` is found in the reaction's label """ return next(rxn for rxn in self if rxn.label().find(label) != -1) - diff --git a/core/ParametersBase.py b/core/ParametersBase.py index 68d9d12..87ddf6a 100644 --- a/core/ParametersBase.py +++ b/core/ParametersBase.py @@ -1,10 +1,12 @@ import numpy from .Settings import * + class ParameterBase: """ Creates a new ParameterBase object. """ + INDEPENDENT = 0 DEPENDENT = 1 @@ -13,8 +15,7 @@ def __init__(self, name_in_settings, kind, values): self.kind = kind self.values = values - - def name2setitem(self, dummy_var='$var_value'): + def name2setitem(self, dummy_var="$var_value"): """ Converts the attribute ``name_in_settings`` in a string that may be combined with the python ``eval`` function to set new items in ``Settings`` objects. @@ -37,12 +38,12 @@ def name2setitem(self, dummy_var='$var_value'): This is just a tricky way to do ``sett.section.molar_fraction.CO = 3.0``, but it is particularly convenient in some punctual cases. """ - tokens = self.name_in_settings.split('.') + tokens = self.name_in_settings.split(".") output = "" - for i,token in enumerate(tokens): - if i != len(tokens)-1: - output += "[\'"+token+"\']" - return output+".__setitem__(\'"+tokens[-1]+"\',"+dummy_var+")" + for i, token in enumerate(tokens): + if i != len(tokens) - 1: + output += "['" + token + "']" + return output + ".__setitem__('" + tokens[-1] + "'," + dummy_var + ")" class ParametersBase(dict): @@ -57,22 +58,21 @@ class ParametersBase(dict): parameters.add( 'x_CO', 'molar_fraction.CO', [0.40, 0.50] ) parameters.add( 'x_O2', 'molar_fraction.O2', lambda params: 1.0-params['x_CO'] ) """ + def __init__(self, *args, **kwargs): dict.__init__(self, *args, **kwargs) self._generator = ParametersBase.zipGenerator - def __str__(self): """ Translates the object to a string """ output = "" - for index,values in self.values().items(): - output += str(index)+": "+str(values)+"\n" + for index, values in self.values().items(): + output += str(index) + ": " + str(values) + "\n" return output - def add(self, name, name_in_settings, values): """ Adds a new parameter @@ -86,52 +86,49 @@ def add(self, name, name_in_settings, values): it is considered ``DEPENDENT``. In the latter case, possible values rest on the values of other parameters, which can be accessed through the lambda function parameter ``params``. """ - if type(values) not in [list,numpy.ndarray] and not callable(values): - msg = "\n### ERROR ### ParametersBase.add.\n" + if type(values) not in [list, numpy.ndarray] and not callable(values): + msg = "\n### ERROR ### ParametersBase.add.\n" msg += " Parameter 'values' should be a 'list', 'numpy.ndarray', or a 'lambda function'.\n" raise Exception(msg) kind = None - if type(values) in [list,numpy.ndarray]: + if type(values) in [list, numpy.ndarray]: kind = ParameterBase.INDEPENDENT elif callable(values): kind = ParameterBase.DEPENDENT - self.__setitem__( name, ParameterBase( name_in_settings, kind, values ) ) - + self.__setitem__(name, ParameterBase(name_in_settings, kind, values)) def set_generator(self, generator): self._generator = generator - def values(self): settings = Settings() - indices,parameters_values,settings_list = self._generator( settings, self ) + indices, parameters_values, settings_list = self._generator(settings, self) return parameters_values - @staticmethod - def zipGenerator( reference_settings, parameters ): + def zipGenerator(reference_settings, parameters): independent_params = [] size = None - for name,item in parameters.items(): + for name, item in parameters.items(): if item.kind == ParameterBase.INDEPENDENT: - independent_params.append( item.values ) + independent_params.append(item.values) if size is None: size = len(item.values) elif size != len(item.values): - msg = "\n### ERROR ### ParametersBase.zipGenerator().\n" + msg = "\n### ERROR ### ParametersBase.zipGenerator().\n" msg += " All parameter in 'generator_parameters' should be lists of the same size.\n" raise Exception(msg) if size == 0: - msg = "\n### ERROR ### ParametersBase.zipGenerator().\n" + msg = "\n### ERROR ### ParametersBase.zipGenerator().\n" msg += " All parameter in 'generator_parameters' should be lists with at least one element.\n" raise Exception(msg) - indices = list( range(size) ) + indices = list(range(size)) parameters_values = {} settings_list = {} @@ -139,19 +136,19 @@ def zipGenerator( reference_settings, parameters ): settings_idx = reference_settings.copy() params = {} - for i,(name,item) in enumerate(parameters.items()): + for i, (name, item) in enumerate(parameters.items()): if item.kind == ParameterBase.INDEPENDENT: value = independent_params[i][idx] - eval('settings_idx'+item.name2setitem().replace('$var_value',str(value))) + eval("settings_idx" + item.name2setitem().replace("$var_value", str(value))) params[name] = value - for i,(name,item) in enumerate(parameters.items()): + for i, (name, item) in enumerate(parameters.items()): if item.kind == ParameterBase.DEPENDENT: value = item.values(params) - eval('settings_idx'+item.name2setitem().replace('$var_value',str(value))) + eval("settings_idx" + item.name2setitem().replace("$var_value", str(value))) params[name] = value parameters_values[idx] = params settings_list[idx] = settings_idx - return indices,parameters_values,settings_list + return indices, parameters_values, settings_list diff --git a/core/RKFLoader.py b/core/RKFLoader.py index 4d847b3..a96de1c 100644 --- a/core/RKFLoader.py +++ b/core/RKFLoader.py @@ -10,11 +10,12 @@ from .Mechanism import * from .Lattice import * -__all__ = ['RKFLoader'] +__all__ = ["RKFLoader"] + class RKFLoader: - def __init__( self, results=None ): + def __init__(self, results=None): """ Creates a new RKFLoader object @@ -24,11 +25,10 @@ def __init__( self, results=None ): self.mechanism = Mechanism() self.lattice = None - if( results is not None ): - self.__deriveLatticeAndMechanism( results ) - + if results is not None: + self.__deriveLatticeAndMechanism(results) - def __deriveLatticeAndMechanism( self, results ): + def __deriveLatticeAndMechanism(self, results): """ Parses the .rkf file from AMS. Basically it loads the energy landscape and the binding-sites lattice. @@ -36,7 +36,8 @@ def __deriveLatticeAndMechanism( self, results ): """ import scm.plams import networkx as nx - #import matplotlib.pyplot as plt + + # import matplotlib.pyplot as plt eV = 0.0367493088244753 angs = 1.88972612456506 @@ -44,18 +45,20 @@ def __deriveLatticeAndMechanism( self, results ): self.clusterExpansion = ClusterExpansion() self.mechanism = Mechanism() - #----------------------------- begin reading rkf ----------------------------------- + # ----------------------------- begin reading rkf ----------------------------------- rkf_skeleton = results.get_rkf_skeleton() nLatticeVectors = results.readrkf("Molecule", "nLatticeVectors") latticeVectors = results.readrkf("Molecule", "LatticeVectors") - latticeVectors = [ [latticeVectors[3*i+j]/angs for j in range(nLatticeVectors) ] for i in range(nLatticeVectors) ] + latticeVectors = [ + [latticeVectors[3 * i + j] / angs for j in range(nLatticeVectors)] for i in range(nLatticeVectors) + ] regions = results.readrkf("InputMolecule", "EngineAtomicInfo").split("\0") referenceRegion = results.readrkf("EnergyLandscape", "referenceRegionLabel").strip() nStates = results.readrkf("EnergyLandscape", "nStates") - fileNames = results.readrkf("EnergyLandscape", "fileNames").replace(".rkf","").split("\0") + fileNames = results.readrkf("EnergyLandscape", "fileNames").replace(".rkf", "").split("\0") counts = results.readrkf("EnergyLandscape", "counts") isTS = results.readrkf("EnergyLandscape", "isTS") reactants = results.readrkf("EnergyLandscape", "reactants") @@ -63,159 +66,190 @@ def __deriveLatticeAndMechanism( self, results ): prefactorsFromReactant = results.readrkf("EnergyLandscape", "prefactorsFromReactant") prefactorsFromProduct = results.readrkf("EnergyLandscape", "prefactorsFromProduct") - if type(counts) != list: counts = [ counts ] - if type(isTS) != list: isTS = [ isTS ] - if type(reactants) != list: reactants = [ reactants ] - if type(products) != list: products = [ products ] - if type(prefactorsFromReactant) != list: prefactorsFromReactant = [ prefactorsFromReactant ] - if type(prefactorsFromProduct) != list: prefactorsFromProduct = [ prefactorsFromProduct ] + if type(counts) != list: + counts = [counts] + if type(isTS) != list: + isTS = [isTS] + if type(reactants) != list: + reactants = [reactants] + if type(products) != list: + products = [products] + if type(prefactorsFromReactant) != list: + prefactorsFromReactant = [prefactorsFromReactant] + if type(prefactorsFromProduct) != list: + prefactorsFromProduct = [prefactorsFromProduct] # Fix ids from Fortran to python - reactants = [ idState-1 for idState in reactants ] - products = [ idState-1 for idState in products ] + reactants = [idState - 1 for idState in reactants] + products = [idState - 1 for idState in products] nFragments = 0 - if( "nFragments" in rkf_skeleton["EnergyLandscape"] ): + if "nFragments" in rkf_skeleton["EnergyLandscape"]: nFragments = results.readrkf("EnergyLandscape", "nFragments") fragmentsEnergies = results.readrkf("EnergyLandscape", "fragmentsEnergies") fragmentsRegions = results.readrkf("EnergyLandscape", "fragmentsRegions").split("\0") - fragmentsFileNames = results.readrkf("EnergyLandscape", "fragmentsFileNames").replace(".rkf","").split("\0") + fragmentsFileNames = ( + results.readrkf("EnergyLandscape", "fragmentsFileNames").replace(".rkf", "").split("\0") + ) nFStates = 0 - if( "nFStates" in rkf_skeleton["EnergyLandscape"] ): + if "nFStates" in rkf_skeleton["EnergyLandscape"]: nFStates = results.readrkf("EnergyLandscape", "nFStates") - fStatesEnergy = nFStates*[ None ] - fStatesNFragments = nFStates*[ None ] - fStatesComposition = nFStates*[ None ] - fStatesNConnections = nFStates*[ None ] - fStatesConnections = nFStates*[ None ] - fStatesAdsorptionPrefactors = nFStates*[ None ] - fStatesDesorptionPrefactors = nFStates*[ None ] + fStatesEnergy = nFStates * [None] + fStatesNFragments = nFStates * [None] + fStatesComposition = nFStates * [None] + fStatesNConnections = nFStates * [None] + fStatesConnections = nFStates * [None] + fStatesAdsorptionPrefactors = nFStates * [None] + fStatesDesorptionPrefactors = nFStates * [None] for i in range(nFStates): - fStatesEnergy[i] = results.readrkf("EnergyLandscape", "fStatesEnergy("+str(i+1)+")") - fStatesNFragments[i] = results.readrkf("EnergyLandscape", "fStatesNFragments("+str(i+1)+")") - fStatesComposition[i] = results.readrkf("EnergyLandscape", "fStatesComposition("+str(i+1)+")") - fStatesNConnections[i] = results.readrkf("EnergyLandscape", "fStatesNConnections("+str(i+1)+")") - fStatesConnections[i] = results.readrkf("EnergyLandscape", "fStatesConnections("+str(i+1)+")") - fStatesAdsorptionPrefactors[i] = results.readrkf("EnergyLandscape", "fStatesAdsorptionPrefactors("+str(i+1)+")") - fStatesDesorptionPrefactors[i] = results.readrkf("EnergyLandscape", "fStatesDesorptionPrefactors("+str(i+1)+")") - - if type(fStatesComposition[i]) != list: fStatesComposition[i] = [ fStatesComposition[i] ] - if type(fStatesConnections[i]) != list: fStatesConnections[i] = [ fStatesConnections[i] ] - if type(fStatesAdsorptionPrefactors[i]) != list: fStatesAdsorptionPrefactors[i] = [ fStatesAdsorptionPrefactors[i] ] - if type(fStatesDesorptionPrefactors[i]) != list: fStatesDesorptionPrefactors[i] = [ fStatesDesorptionPrefactors[i] ] + fStatesEnergy[i] = results.readrkf("EnergyLandscape", "fStatesEnergy(" + str(i + 1) + ")") + fStatesNFragments[i] = results.readrkf("EnergyLandscape", "fStatesNFragments(" + str(i + 1) + ")") + fStatesComposition[i] = results.readrkf("EnergyLandscape", "fStatesComposition(" + str(i + 1) + ")") + fStatesNConnections[i] = results.readrkf("EnergyLandscape", "fStatesNConnections(" + str(i + 1) + ")") + fStatesConnections[i] = results.readrkf("EnergyLandscape", "fStatesConnections(" + str(i + 1) + ")") + fStatesAdsorptionPrefactors[i] = results.readrkf( + "EnergyLandscape", "fStatesAdsorptionPrefactors(" + str(i + 1) + ")" + ) + fStatesDesorptionPrefactors[i] = results.readrkf( + "EnergyLandscape", "fStatesDesorptionPrefactors(" + str(i + 1) + ")" + ) + + if type(fStatesComposition[i]) != list: + fStatesComposition[i] = [fStatesComposition[i]] + if type(fStatesConnections[i]) != list: + fStatesConnections[i] = [fStatesConnections[i]] + if type(fStatesAdsorptionPrefactors[i]) != list: + fStatesAdsorptionPrefactors[i] = [fStatesAdsorptionPrefactors[i]] + if type(fStatesDesorptionPrefactors[i]) != list: + fStatesDesorptionPrefactors[i] = [fStatesDesorptionPrefactors[i]] # Fix ids from Fortran to python - fStatesComposition[i] = [ max(0,idFragment-1) for idFragment in fStatesComposition[i] ] - fStatesConnections[i] = [ max(0,idState-1) for idState in fStatesConnections[i] ] + fStatesComposition[i] = [max(0, idFragment - 1) for idFragment in fStatesComposition[i]] + fStatesConnections[i] = [max(0, idState - 1) for idState in fStatesConnections[i]] energyReferenceLabels = [] energyReferenceValues = [] - if( "energyReferenceLabels" in rkf_skeleton["EnergyLandscape"] and "energyReferenceValues" in rkf_skeleton["EnergyLandscape"] ): + if ( + "energyReferenceLabels" in rkf_skeleton["EnergyLandscape"] + and "energyReferenceValues" in rkf_skeleton["EnergyLandscape"] + ): energyReferenceLabels = results.readrkf("EnergyLandscape", "energyReferenceLabels").split("\0") energyReferenceValues = results.readrkf("EnergyLandscape", "energyReferenceValues") - if type(energyReferenceValues) != list: energyReferenceValues = [ energyReferenceValues ] + if type(energyReferenceValues) != list: + energyReferenceValues = [energyReferenceValues] - if( "BindingSites" not in rkf_skeleton ): - msg = "\n### ERROR ### RKFLoader.__deriveLatticeAndMechanism.\n" + if "BindingSites" not in rkf_skeleton: + msg = "\n### ERROR ### RKFLoader.__deriveLatticeAndMechanism.\n" msg += " The BindingSites section is required to derive the lattice\n" raise NameError(msg) nSites = results.readrkf("BindingSites", "nSites") - if( referenceRegion != results.readrkf("BindingSites", "ReferenceRegionLabel").strip() ): - msg = "\n### ERROR ### RKFLoader.__deriveLatticeAndMechanism.\n" - msg += " The reference region from the EnergyLandscape is different than for the BindingSites\n" + if referenceRegion != results.readrkf("BindingSites", "ReferenceRegionLabel").strip(): + msg = "\n### ERROR ### RKFLoader.__deriveLatticeAndMechanism.\n" + msg += ( + " The reference region from the EnergyLandscape is different than for the BindingSites\n" + ) raise NameError(msg) labels = results.readrkf("BindingSites", "Labels").split() coords = results.readrkf("BindingSites", "Coords") - coords = [ [coords[3*i+j]/angs for j in range(3) ] for i in range(nSites) ] + coords = [[coords[3 * i + j] / angs for j in range(3)] for i in range(nSites)] coordsFrac = results.readrkf("BindingSites", "CoordsFrac") - coordsFrac = [ [coordsFrac[3*i+j] for j in range(3) ] for i in range(nSites) ] + coordsFrac = [[coordsFrac[3 * i + j] for j in range(3)] for i in range(nSites)] nConnections = results.readrkf("BindingSites", "nConnections") fromSites = results.readrkf("BindingSites", "FromSites") toSites = results.readrkf("BindingSites", "ToSites") latticeDisplacements = results.readrkf("BindingSites", "LatticeDisplacements") - latticeDisplacements = [ [latticeDisplacements[nLatticeVectors*i+j] for j in range(nLatticeVectors) ] for i in range(nConnections) ] + latticeDisplacements = [ + [latticeDisplacements[nLatticeVectors * i + j] for j in range(nLatticeVectors)] for i in range(nConnections) + ] nParentStates = results.readrkf("BindingSites", "nParentStates") parentStatesRaw = results.readrkf("BindingSites", "ParentStates") parentAtomsRaw = results.readrkf("BindingSites", "ParentAtoms") - if type(fromSites) != list: fromSites = [ fromSites ] - if type(toSites) != list: toSites = [ toSites ] - if type(nParentStates) != list: nParentStates = [ nParentStates ] - if type(parentStatesRaw) != list: parentStatesRaw = [ parentStatesRaw ] - if type(parentAtomsRaw) != list: parentAtomsRaw = [ parentAtomsRaw ] + if type(fromSites) != list: + fromSites = [fromSites] + if type(toSites) != list: + toSites = [toSites] + if type(nParentStates) != list: + nParentStates = [nParentStates] + if type(parentStatesRaw) != list: + parentStatesRaw = [parentStatesRaw] + if type(parentAtomsRaw) != list: + parentAtomsRaw = [parentAtomsRaw] - #----------------------------- end reading rkf ----------------------------------- + # ----------------------------- end reading rkf ----------------------------------- energyReference = {} - for i,label in enumerate(energyReferenceLabels): + for i, label in enumerate(energyReferenceLabels): energyReference[label.strip()] = energyReferenceValues[i] # Fix ids from Fortran to python - fromSites = [ max(0,idSite-1) for idSite in fromSites ] - toSites = [ max(0,idSite-1) for idSite in toSites ] - parentStatesRaw = [ max(0,i-1) for i in parentStatesRaw ] + fromSites = [max(0, idSite - 1) for idSite in fromSites] + toSites = [max(0, idSite - 1) for idSite in toSites] + parentStatesRaw = [max(0, i - 1) for i in parentStatesRaw] - assert( len(fromSites) == len(toSites) ) + assert len(fromSites) == len(toSites) latticeGraph = nx.Graph() for i in range(nSites): - latticeGraph.add_node( i ) + latticeGraph.add_node(i) for i in range(len(fromSites)): - latticeGraph.add_edge( fromSites[i], toSites[i] ) + latticeGraph.add_edge(fromSites[i], toSites[i]) latticeSPaths = dict(nx.all_pairs_shortest_path(latticeGraph)) - def getLatticeRxnSubgraph( bs_from, bs_to ): + def getLatticeRxnSubgraph(bs_from, bs_to): path = [] for bs1 in bs_from: - if not bs1 in latticeSPaths: continue + if not bs1 in latticeSPaths: + continue for bs2 in bs_to: - if not bs2 in latticeSPaths[bs1]: continue - path.extend( latticeSPaths[bs1][bs2] ) + if not bs2 in latticeSPaths[bs1]: + continue + path.extend(latticeSPaths[bs1][bs2]) - return latticeGraph.subgraph( path ) + return latticeGraph.subgraph(path) - #G1 = getLatticeRxnSubgraph( [4, 7], [8, 11] ) + # G1 = getLatticeRxnSubgraph( [4, 7], [8, 11] ) - #subax1 = plt.subplot(121) - #nx.draw(latticeGraph, with_labels=True, font_weight='bold') - #subax2 = plt.subplot(122) - #nx.draw(G1, with_labels=True, font_weight='bold') - #plt.show() + # subax1 = plt.subplot(121) + # nx.draw(latticeGraph, with_labels=True, font_weight='bold') + # subax2 = plt.subplot(122) + # nx.draw(G1, with_labels=True, font_weight='bold') + # plt.show() - #exit(-1) + # exit(-1) # It is not neccessary for fix the parentAtomsRaw, because PLAMS # uses also 1-based indices for its atoms - #parentAtomsRaw = [ max(0,i-1) for i in parentAtomsRaw ] + # parentAtomsRaw = [ max(0,i-1) for i in parentAtomsRaw ] - state2Molecule = nStates*[ None ] - state2Energy = nStates*[ None ] - for i,fileName in enumerate(fileNames): + state2Molecule = nStates * [None] + state2Energy = nStates * [None] + for i, fileName in enumerate(fileNames): mol = results.get_molecule("Molecule", file=fileNames[i]) amsResults = results.read_rkf_section("AMSResults", file=fileNames[i]) state2Molecule[i] = mol - state2Energy[i] = amsResults["Energy"]/eV + state2Energy[i] = amsResults["Energy"] / eV - #vibrations = results.read_rkf_section("Vibrations", file=fileNames[i]) - #print("Frequencies = ", vibrations["Frequencies[cm-1]"]) + # vibrations = results.read_rkf_section("Vibrations", file=fileNames[i]) + # print("Frequencies = ", vibrations["Frequencies[cm-1]"]) # parentStates and parentAtoms elements are actually vectors for each binding site # Here I split the vectors in that way and disable original variables - parentStates = nSites*[ None ] - parentAtoms = nSites*[ None ] + parentStates = nSites * [None] + parentAtoms = nSites * [None] for idSite in range(nSites): idStart = sum(nParentStates[0:idSite]) - idEnd = sum(nParentStates[0:idSite])+nParentStates[idSite] + idEnd = sum(nParentStates[0:idSite]) + nParentStates[idSite] parentStates[idSite] = parentStatesRaw[idStart:idEnd] parentAtoms[idSite] = parentAtomsRaw[idStart:idEnd] @@ -223,7 +257,7 @@ def getLatticeRxnSubgraph( bs_from, bs_to ): parentStatesRaw = None parentAtomsRaw = None - #-------------------------------------------------------------------------------------- + # -------------------------------------------------------------------------------------- # The next lines feed the array state2BindingSites. # The binding sites for the reactants or products are already defined # The binding sites for a TS are the union of the ones from its reactants and products @@ -231,31 +265,31 @@ def getLatticeRxnSubgraph( bs_from, bs_to ): # TS ----> R --> bsR1, bsR2, ... # | # +--> P --> bsP1, bsP2, ... - #-------------------------------------------------------------------------------------- + # -------------------------------------------------------------------------------------- - state2BindingSites = nStates*[ None ] - attachedMoleculeData = {} # attachedMoleculeData[idState][idSite] + state2BindingSites = nStates * [None] + attachedMoleculeData = {} # attachedMoleculeData[idState][idSite] # Loop over the binding sites to load the binding sites for each local minima # Notice that the parent-states of the binding sites are always local minima for idSite in range(nSites): # Loop over the parent states of each binding site - for i,idState in enumerate(parentStates[idSite]): - if( state2BindingSites[ idState ] is None ): - state2BindingSites[ idState ] = [] - attachedMoleculeData[ idState ] = {} + for i, idState in enumerate(parentStates[idSite]): + if state2BindingSites[idState] is None: + state2BindingSites[idState] = [] + attachedMoleculeData[idState] = {} - state2BindingSites[ idState ].append( idSite ) + state2BindingSites[idState].append(idSite) - #------------------------------------------------------------------- + # ------------------------------------------------------------------- # This block finds the attached molecule to the binding site idSite attachedAtom = parentAtoms[idSite][i] adsorbate = scm.plams.Molecule() - for j,atom in enumerate(state2Molecule[idState]): - if( regions[j] != "region="+referenceRegion ): - adsorbate.add_atom( atom ) + for j, atom in enumerate(state2Molecule[idState]): + if regions[j] != "region=" + referenceRegion: + adsorbate.add_atom(atom) adsorbate.guess_bonds() adsorbateMols = adsorbate.separate() @@ -266,88 +300,94 @@ def getLatticeRxnSubgraph( bs_from, bs_to ): r1 = numpy.array(atom.coords) r2 = numpy.array(state2Molecule[idState][attachedAtom].coords) - if( all( abs(r1-r2) < 1e-6 ) ): - attachedMoleculeData[idState][idSite] = { 'formula':sMol.get_formula(), - 'cm':numpy.array(sMol.get_center_of_mass()), - 'mol':sMol } + if all(abs(r1 - r2) < 1e-6): + attachedMoleculeData[idState][idSite] = { + "formula": sMol.get_formula(), + "cm": numpy.array(sMol.get_center_of_mass()), + "mol": sMol, + } loc = 1 break - if( loc == 1 ): + if loc == 1: break - #------------------------------------------------------------------- + # ------------------------------------------------------------------- # Loop over the TSs to load their binding sites # Notice that the previous loop didn't include any TS # Here we defined attached molecules to a TS as the union of both the molecules attached to its reactants and products for idState in range(nStates): - if( not isTS[idState] ): continue + if not isTS[idState]: + continue idReactant = reactants[idState] idProduct = products[idState] # We only accept complete TSs - if( idReactant == -1 or idProduct == -1 ): + if idReactant == -1 or idProduct == -1: continue # Loop over the binding sites of the reactant - for i,idSite in enumerate(state2BindingSites[idReactant]): - if( state2BindingSites[ idState ] is None ): - state2BindingSites[ idState ] = [] + for i, idSite in enumerate(state2BindingSites[idReactant]): + if state2BindingSites[idState] is None: + state2BindingSites[idState] = [] - state2BindingSites[ idState ].append( idSite ) + state2BindingSites[idState].append(idSite) # Loop over the binding sites of the products - for i,idSite in enumerate(state2BindingSites[idProduct]): - if( state2BindingSites[ idState ] is None ): - state2BindingSites[ idState ] = [] + for i, idSite in enumerate(state2BindingSites[idProduct]): + if state2BindingSites[idState] is None: + state2BindingSites[idState] = [] - state2BindingSites[ idState ].append( idSite ) + state2BindingSites[idState].append(idSite) - attachedMoleculeData[idState] = { **attachedMoleculeData[idReactant], **attachedMoleculeData[idProduct] } + attachedMoleculeData[idState] = {**attachedMoleculeData[idReactant], **attachedMoleculeData[idProduct]} - def getProperties( idState ): + def getProperties(idState): - speciesNames = len(G1_nodes)*[ "" ] - entityNumber = len(G1_nodes)*[ -1 ] - for i,bs1 in enumerate(G1_nodes): - if( bs1 in attachedMoleculeData[idState].keys() ): - speciesName1 = attachedMoleculeData[idState][bs1]['formula'] - centerOfMass1 = attachedMoleculeData[idState][bs1]['cm'] + speciesNames = len(G1_nodes) * [""] + entityNumber = len(G1_nodes) * [-1] + for i, bs1 in enumerate(G1_nodes): + if bs1 in attachedMoleculeData[idState].keys(): + speciesName1 = attachedMoleculeData[idState][bs1]["formula"] + centerOfMass1 = attachedMoleculeData[idState][bs1]["cm"] speciesNames[i] = speciesName1 - for j,bs2 in enumerate(G1_nodes): - if( j<=i ): continue + for j, bs2 in enumerate(G1_nodes): + if j <= i: + continue - if( bs2 in attachedMoleculeData[idState].keys() ): - speciesName2 = attachedMoleculeData[idState][bs2]['formula'] - centerOfMass2 = attachedMoleculeData[idState][bs2]['cm'] + if bs2 in attachedMoleculeData[idState].keys(): + speciesName2 = attachedMoleculeData[idState][bs2]["formula"] + centerOfMass2 = attachedMoleculeData[idState][bs2]["cm"] - if( speciesName1 == speciesName2 - and numpy.linalg.norm( centerOfMass1-centerOfMass2 ) < 0.5 ): - entityNumber[i] = max(entityNumber)+1 - entityNumber[j] = entityNumber[i] + if speciesName1 == speciesName2 and numpy.linalg.norm(centerOfMass1 - centerOfMass2) < 0.5: + entityNumber[i] = max(entityNumber) + 1 + entityNumber[j] = entityNumber[i] - if( entityNumber[i] == -1 ): - entityNumber[i] = max(entityNumber)+1 + if entityNumber[i] == -1: + entityNumber[i] = max(entityNumber) + 1 - denticity = len(speciesNames)*[1] + denticity = len(speciesNames) * [1] if entityNumber.count(-1) != len(G1_nodes): - denticity = [ entityNumber.count(entityNumber[i]) if entityNumber[i] != -1 else 1 for i in range(len(speciesNames)) ] - species = SpeciesList( [ Species(f+"*"*denticity[i],denticity[i]) for i,f in enumerate(speciesNames) ] ) + denticity = [ + entityNumber.count(entityNumber[i]) if entityNumber[i] != -1 else 1 + for i in range(len(speciesNames)) + ] + species = SpeciesList([Species(f + "*" * denticity[i], denticity[i]) for i, f in enumerate(speciesNames)]) - return species,entityNumber + return species, entityNumber - def getPropertiesForCluster( species, entityNumber ): + def getPropertiesForCluster(species, entityNumber): # This section remove the empty adsorption sites which are not needed for clusters data = {} - data['site_types'] = [] - data['entity_number'] = [] - data['neighboring'] = [] - data['species'] = [] + data["site_types"] = [] + data["entity_number"] = [] + data["neighboring"] = [] + data["species"] = [] nonEmptySites = [] - for i,bs in enumerate(G1_nodes): - if( len(species[i].symbol.replace('*','')) != 0 ): + for i, bs in enumerate(G1_nodes): + if len(species[i].symbol.replace("*", "")) != 0: nonEmptySites.append(bs) if len(nonEmptySites) > 1: @@ -355,78 +395,78 @@ def getPropertiesForCluster( species, entityNumber ): path = [] for bs1 in nonEmptySites: for bs2 in nonEmptySites: - if( bs1 != bs2 ): - path.extend( G1_shortest_paths[bs1][bs2] ) + if bs1 != bs2: + path.extend(G1_shortest_paths[bs1][bs2]) - G2 = G1.subgraph( path ) + G2 = G1.subgraph(path) G2_nodes = list(G2.nodes()) - G2_edges = [ sorted([G2_nodes.index(pair[0]),G2_nodes.index(pair[1])]) for pair in G2.edges() ] + G2_edges = [sorted([G2_nodes.index(pair[0]), G2_nodes.index(pair[1])]) for pair in G2.edges()] for bs in G2.nodes(): old_id = G1_nodes.index(bs) - data['site_types'].append( site_types[old_id] ) - data['entity_number'].append( entityNumber[old_id] ) - data['species'].append( species[old_id] ) + data["site_types"].append(site_types[old_id]) + data["entity_number"].append(entityNumber[old_id]) + data["species"].append(species[old_id]) - data['entity_number'] = [ data['entity_number'][i]-min(data['entity_number']) for i in range(len(data['entity_number'])) ] - data['neighboring'] = G2_edges + data["entity_number"] = [ + data["entity_number"][i] - min(data["entity_number"]) for i in range(len(data["entity_number"])) + ] + data["neighboring"] = G2_edges else: - if len(site_types)>0 and len(entityNumber)>0 and len(species)>0: - data['site_types'].append( site_types[0] ) - data['entity_number'].append( entityNumber[0] ) - data['species'].append( species[0] ) - data['neighboring'] = [] + if len(site_types) > 0 and len(entityNumber) > 0 and len(species) > 0: + data["site_types"].append(site_types[0]) + data["entity_number"].append(entityNumber[0]) + data["species"].append(species[0]) + data["neighboring"] = [] return data - - def getMultiplicity( data ): + def getMultiplicity(data): mult = 1 - if( len(data['site_types'])==2 and - data['site_types'][0] == data['site_types'][1] and - data['species'][0].symbol == data['species'][1].symbol and - len(data['neighboring'])==1 and - data['neighboring'][0] in [[0,1],[1,0]] ): + if ( + len(data["site_types"]) == 2 + and data["site_types"][0] == data["site_types"][1] + and data["species"][0].symbol == data["species"][1].symbol + and len(data["neighboring"]) == 1 + and data["neighboring"][0] in [[0, 1], [1, 0]] + ): mult = 2 return mult + def getFormationEnergy(idState): + fenergy = state2Energy[idState] # In eV - def getFormationEnergy( idState ): - fenergy = state2Energy[idState] # In eV - - if( len(energyReference) > 0 ): - for i,atom in enumerate(state2Molecule[idState]): - if( regions[i] != "region="+referenceRegion ): + if len(energyReference) > 0: + for i, atom in enumerate(state2Molecule[idState]): + if regions[i] != "region=" + referenceRegion: fenergy -= energyReference[atom.symbol] - if( referenceRegion in regions ): - fenergy -= energyReference[referenceRegion]/eV + if referenceRegion in regions: + fenergy -= energyReference[referenceRegion] / eV return fenergy + def getGasFormationEnergy(mol, energy): + fenergy = energy # In eV - def getGasFormationEnergy( mol, energy ): - fenergy = energy # In eV - - if( len(energyReference) > 0 ): + if len(energyReference) > 0: for atom in mol: - fenergy -= energyReference[atom.symbol]/eV + fenergy -= energyReference[atom.symbol] / eV return fenergy - # Loop over the TSs to find the species # Each TS defines an ElementaryReaction and at the same time it defines # two Clusters from reactants and products. for idState in range(nStates): - if( isTS[idState] ): + if isTS[idState]: idTS = idState # We only accept complete TSs - if( reactants[idTS] == -1 or products[idTS] == -1 ): + if reactants[idTS] == -1 or products[idTS] == -1: continue # Locates the reactant and product @@ -436,187 +476,204 @@ def getGasFormationEnergy( mol, energy ): prefactorP = prefactorsFromProduct[idTS] # Filters the connection specifically for this TS - G1 = getLatticeRxnSubgraph( state2BindingSites[idReactant], state2BindingSites[idProduct] ) + G1 = getLatticeRxnSubgraph(state2BindingSites[idReactant], state2BindingSites[idProduct]) G1_nodes = sorted(list(G1.nodes())) - G1_edges = [ sorted([G1_nodes.index(pair[0]),G1_nodes.index(pair[1])]) for pair in G1.edges() ] + G1_edges = [sorted([G1_nodes.index(pair[0]), G1_nodes.index(pair[1])]) for pair in G1.edges()] G1_shortest_paths = dict(nx.all_pairs_shortest_path(G1)) - site_types = [ labels[j] for j in G1_nodes ] + site_types = [labels[j] for j in G1_nodes] - #-------------------------------------------------------------------- + # -------------------------------------------------------------------- # Reactant - speciesReactant, entityNumber = getProperties( idReactant ) - cluster_data = getPropertiesForCluster( speciesReactant, entityNumber ) - - clusterReactant = Cluster( site_types=cluster_data['site_types'], - entity_number=cluster_data['entity_number'], - neighboring=cluster_data['neighboring'], - species=cluster_data['species'], - multiplicity=getMultiplicity(cluster_data), - energy=getFormationEnergy(idReactant) ) + speciesReactant, entityNumber = getProperties(idReactant) + cluster_data = getPropertiesForCluster(speciesReactant, entityNumber) + + clusterReactant = Cluster( + site_types=cluster_data["site_types"], + entity_number=cluster_data["entity_number"], + neighboring=cluster_data["neighboring"], + species=cluster_data["species"], + multiplicity=getMultiplicity(cluster_data), + energy=getFormationEnergy(idReactant), + ) entityNumberReactant = entityNumber - #-------------------------------------------------------------------- + # -------------------------------------------------------------------- # Product - speciesProduct,entityNumber = getProperties( idProduct ) - cluster_data = getPropertiesForCluster( speciesProduct, entityNumber ) - - clusterProduct = Cluster( site_types=cluster_data['site_types'], - entity_number=cluster_data['entity_number'], - neighboring=cluster_data['neighboring'], - species=cluster_data['species'], - multiplicity=getMultiplicity(cluster_data), - energy=getFormationEnergy(idReactant) ) + speciesProduct, entityNumber = getProperties(idProduct) + cluster_data = getPropertiesForCluster(speciesProduct, entityNumber) + + clusterProduct = Cluster( + site_types=cluster_data["site_types"], + entity_number=cluster_data["entity_number"], + neighboring=cluster_data["neighboring"], + species=cluster_data["species"], + multiplicity=getMultiplicity(cluster_data), + energy=getFormationEnergy(idReactant), + ) entityNumberProduct = entityNumber - #-------------------------------------------------------------------- + # -------------------------------------------------------------------- # Reaction - entityNumberReactant = entityNumberReactant if entityNumberReactant.count(-1) != len(entityNumberReactant) else None - entityNumberProduct = entityNumberProduct if entityNumberProduct.count(-1) != len(entityNumberProduct) else None - activationEnergy = state2Energy[idTS]-state2Energy[idReactant] - - pe_ratio = prefactorR/max(1e-7,prefactorP) + entityNumberReactant = ( + entityNumberReactant if entityNumberReactant.count(-1) != len(entityNumberReactant) else None + ) + entityNumberProduct = ( + entityNumberProduct if entityNumberProduct.count(-1) != len(entityNumberProduct) else None + ) + activationEnergy = state2Energy[idTS] - state2Energy[idReactant] + + pe_ratio = prefactorR / max(1e-7, prefactorP) reversible = True if pe_ratio > 1e-6 else False - reaction = ElementaryReaction( site_types=site_types, - initial_entity_number=entityNumberReactant, - final_entity_number=entityNumberProduct, - neighboring=G1_edges, - initial=speciesReactant, - final=speciesProduct, - reversible=reversible, - pre_expon=prefactorR, - pe_ratio=pe_ratio, - activation_energy=activationEnergy ) - - #self.clusterExpansion.extend( [clusterReactant, clusterProduct] ) - self.mechanism.append( reaction ) + reaction = ElementaryReaction( + site_types=site_types, + initial_entity_number=entityNumberReactant, + final_entity_number=entityNumberProduct, + neighboring=G1_edges, + initial=speciesReactant, + final=speciesProduct, + reversible=reversible, + pre_expon=prefactorR, + pe_ratio=pe_ratio, + activation_energy=activationEnergy, + ) + + # self.clusterExpansion.extend( [clusterReactant, clusterProduct] ) + self.mechanism.append(reaction) # Loop over the Fragmented states to find the species and reactions for idFState in range(nFStates): - energy = fStatesEnergy[idFState]/eV + energy = fStatesEnergy[idFState] / eV nFragments = fStatesNFragments[idFState] composition = fStatesComposition[idFState] # Loop over the associated connected states - for pos,idState in enumerate(fStatesConnections[idFState]): + for pos, idState in enumerate(fStatesConnections[idFState]): prefactorAdsorption = fStatesAdsorptionPrefactors[idFState][pos] prefactorDesorption = fStatesDesorptionPrefactors[idFState][pos] - G1 = getLatticeRxnSubgraph( state2BindingSites[idState], state2BindingSites[idState] ) + G1 = getLatticeRxnSubgraph(state2BindingSites[idState], state2BindingSites[idState]) G1_nodes = sorted(list(G1.nodes())) - G1_edges = [ [G1_nodes.index(pair[0]),G1_nodes.index(pair[1])] for pair in G1.edges() ] + G1_edges = [[G1_nodes.index(pair[0]), G1_nodes.index(pair[1])] for pair in G1.edges()] G1_shortest_paths = dict(nx.all_pairs_shortest_path(G1)) - site_types = [ labels[j] for j in G1_nodes ] + site_types = [labels[j] for j in G1_nodes] - #-------------------------------------------------------------------- + # -------------------------------------------------------------------- # State - speciesState, entityNumber = getProperties( idState ) - cluster_data = getPropertiesForCluster( speciesState, entityNumber ) - - clusterState = Cluster( site_types=cluster_data['site_types'], - entity_number=cluster_data['entity_number'], - neighboring=cluster_data['neighboring'], - species=cluster_data['species'], - multiplicity=getMultiplicity(cluster_data), - energy=getFormationEnergy(idState) ) - - #-------------------------------------------------------------------- + speciesState, entityNumber = getProperties(idState) + cluster_data = getPropertiesForCluster(speciesState, entityNumber) + + clusterState = Cluster( + site_types=cluster_data["site_types"], + entity_number=cluster_data["entity_number"], + neighboring=cluster_data["neighboring"], + species=cluster_data["species"], + multiplicity=getMultiplicity(cluster_data), + energy=getFormationEnergy(idState), + ) + + # -------------------------------------------------------------------- # Fragmented State - speciesFState = SpeciesList( [ Species("*") for f in cluster_data['site_types'] ] ) + speciesFState = SpeciesList([Species("*") for f in cluster_data["site_types"]]) for idFragment in composition: - if( fragmentsRegions[idFragment] == "active" ): + if fragmentsRegions[idFragment] == "active": mol = results.get_molecule("Molecule", file=fragmentsFileNames[idFragment]) amsResults = results.read_rkf_section("AMSResults", file=fragmentsFileNames[idFragment]) - speciesFState.append( Species( mol.get_formula(), gas_energy=getGasFormationEnergy( mol, amsResults["Energy"]/eV ) ) ) + speciesFState.append( + Species(mol.get_formula(), gas_energy=getGasFormationEnergy(mol, amsResults["Energy"] / eV)) + ) - #-------------------------------------------------------------------- + # -------------------------------------------------------------------- # Reaction # X_gas --> X* - #TODO Here we are assuming that there is no a TS between the fragmented state and the state. - - pe_ratio = prefactorAdsorption/prefactorDesorption - activationEnergy = 0.0 if state2Energy[idState]= 2 ) + assert len(latticeVectors) >= 2 - neighboring_structure = nConnections*[ None ] + neighboring_structure = nConnections * [None] for i in range(nConnections): ld = latticeDisplacements[i] first = None - if( ld[0] >= 0 and ld[1] >= 0 ): - first = (fromSites[i],toSites[i]) + if ld[0] >= 0 and ld[1] >= 0: + first = (fromSites[i], toSites[i]) else: - first = (toSites[i],fromSites[i]) - ld = [ abs(v) for v in ld ] + first = (toSites[i], fromSites[i]) + ld = [abs(v) for v in ld] second = None - if( tuple(ld[0:2]) == (0,0) ): + if tuple(ld[0:2]) == (0, 0): second = Lattice.SELF - elif( tuple(ld[0:2]) == (0,1) ): + elif tuple(ld[0:2]) == (0, 1): second = Lattice.NORTH - elif( tuple(ld[0:2]) == (1,0) ): + elif tuple(ld[0:2]) == (1, 0): second = Lattice.EAST - elif( tuple(ld[0:2]) == (1,1) ): + elif tuple(ld[0:2]) == (1, 1): second = Lattice.NORTHEAST else: - raise NameError("Unknown case for LD="+str(ld[0:2])) + raise NameError("Unknown case for LD=" + str(ld[0:2])) - if( first is None or second is None ): continue + if first is None or second is None: + continue # (1,1):"northeast", <<< #TODO I don't understand this case # (1,-1):"southeast" <<< #TODO I don't understand this case - neighboring_structure[i] = [first,second] + neighboring_structure[i] = [first, second] - if( None in neighboring_structure ): + if None in neighboring_structure: raise NameError("Neighboring structure incomplete") # Here we omit the z-axis. In the future, we should make a 2D projection of # the 3D lattice instead. This is necessary to be able to study adsorption on nanoclusters. - self.lattice = Lattice( cell_vectors=[ [v[0],v[1]] for v in latticeVectors[0:2] ], - repeat_cell=(1,1), # Default value. - site_types=labels, - site_coordinates=coordsFrac, - neighboring_structure=neighboring_structure) - - - def replace_site_types( self, site_types_old, site_types_new ): + self.lattice = Lattice( + cell_vectors=[[v[0], v[1]] for v in latticeVectors[0:2]], + repeat_cell=(1, 1), # Default value. + site_types=labels, + site_coordinates=coordsFrac, + neighboring_structure=neighboring_structure, + ) + + def replace_site_types(self, site_types_old, site_types_new): """ Replaces the site types names * ``site_types_old`` -- List of strings containing the old site_types to be replaced * ``site_types_new`` -- List of strings containing the new site_types which would replace old site_types_old. """ - assert( len(site_types_old) == len(site_types_new) ) - - self.lattice.replace_site_types( site_types_old, site_types_new ) - self.mechanism.replace_site_types( site_types_old, site_types_new ) - self.clusterExpansion.replace_site_types( site_types_old, site_types_new ) + assert len(site_types_old) == len(site_types_new) + self.lattice.replace_site_types(site_types_old, site_types_new) + self.mechanism.replace_site_types(site_types_old, site_types_new) + self.clusterExpansion.replace_site_types(site_types_old, site_types_new) @staticmethod - def merge( rkf_loaders, bs_precision=0.5 ): + def merge(rkf_loaders, bs_precision=0.5): """ Merges a list of rkf_loader into a single one @@ -625,19 +682,16 @@ def merge( rkf_loaders, bs_precision=0.5 ): final_loader = RKFLoader() - for i,loader in enumerate(rkf_loaders): + for i, loader in enumerate(rkf_loaders): for cluster in loader.clusterExpansion: - final_loader.clusterExpansion.append( cluster ) + final_loader.clusterExpansion.append(cluster) for elementaryStep in loader.mechanism: - final_loader.mechanism.append( elementaryStep ) + final_loader.mechanism.append(elementaryStep) if final_loader.lattice is None: final_loader.lattice = loader.lattice else: - final_loader.lattice.extend( loader.lattice, precision=bs_precision ) + final_loader.lattice.extend(loader.lattice, precision=bs_precision) return final_loader - - - diff --git a/core/Settings.py b/core/Settings.py index 86c27ea..97dd82d 100644 --- a/core/Settings.py +++ b/core/Settings.py @@ -4,9 +4,10 @@ import scm.plams -__all__ = ['Settings'] +__all__ = ["Settings"] -class Settings( scm.plams.Settings ): + +class Settings(scm.plams.Settings): """ Automatic multi-level dictionary. Subclass of the PLAMS class `scm.plams.Settings <../../plams/components/settings.html>`_. This dictionary can contain any kind of information stored in key-value pairs. Be aware that no check of the rightness of the key/values is done at this level. This object is used just as a container. The verification of the physical meaning of the key-value pairs is done at the ZacrosJob class. The following is an example of use adapted to zacros: @@ -27,96 +28,105 @@ class Settings( scm.plams.Settings ): def __init__(self, *args, **kwargs): dict.__init__(self, *args, **kwargs) - - def __str__( self ): + def __str__(self): """ Translates the object to a string """ - output = "" - - if( 'random_seed' in self ): output += "random_seed " + "%10s"%self.get('random_seed')+"\n" - output += "temperature " + "%10s"%self.get('temperature')+"\n" - output += "pressure " + "%10s"%self.get('pressure')+"\n\n" - - for option in ['snapshots', 'process_statistics', 'species_numbers', 'energetics_lists', 'process_lists']: - if( option in self ): - pair = self[option] - add = None - - if( pair[0] in ['event', 'elemevent', 'time', 'logtime', 'realtime'] ): - - # e.g. sett.snapshots = ('time', 1e1*dt) - if( len(pair) == 2 ): - key,value = pair - - # e.g. sett.on_sites_seeding_reportevent_report = ('time', 1e1*dt, 'on') - elif( - len(pair) == 3 - and pair[2] in ['on', 'off'] - ): - key,value,add = pair[0],pair[1],pair[2] - - # e.g. sett.snapshots = ('logtime', 1e1*dt, 0.1) - elif( len(pair) == 3 ): - key,value = pair[0],(pair[1],pair[2]) - - # e.g. sett.on_sites_seeding_reportevent_report = ('logtime', 1e-08*dt, 1.5, 'on') - elif( - len(pair) == 4 - and pair[3] in ['on', 'off'] - ): - key,value,add = pair[0],(pair[1],pair[2]),pair[3] - - # e.g. sett.process_lists = ('time', 1e1*dt, 'select_event_type', [1,2]) - elif( - len(pair) == 4 - and pair[2] in ['select_cluster_type', 'select_event_type'] - and type(pair[3]) == list - ): - key,value,add = pair[0],pair[1],(pair[2],pair[3]) - - # e.g. sett.process_lists = ('logtime', 1e-08*dt, 1.5, 'select_event_type', [1,2]) - elif( - len(pair) == 5 - and pair[3] in ['select_cluster_type', 'select_event_type'] - and type(pair[4]) == list - ): - key,value,add = pair[0],(pair[1],pair[2]),(pair[3],pair[4]) - - else: - msg = "\n### ERROR ### keyword "+option+" in settings.\n" - msg += " Its value should be a pair (key,value[,value1]).\n" - msg += " Possible options for key: 'event', 'elemevent', 'time', 'logtime', 'realtime'\n" - msg += " Possible options for value: , , , (,), \n" - msg += " Given value: "+str(pair)+"\n" + output = "" + + if "random_seed" in self: + output += "random_seed " + "%10s" % self.get("random_seed") + "\n" + output += "temperature " + "%10s" % self.get("temperature") + "\n" + output += "pressure " + "%10s" % self.get("pressure") + "\n\n" + + for option in ["snapshots", "process_statistics", "species_numbers", "energetics_lists", "process_lists"]: + if option in self: + pair = self[option] + add = None + + if pair[0] in ["event", "elemevent", "time", "logtime", "realtime"]: + + # e.g. sett.snapshots = ('time', 1e1*dt) + if len(pair) == 2: + key, value = pair + + # e.g. sett.on_sites_seeding_reportevent_report = ('time', 1e1*dt, 'on') + elif len(pair) == 3 and pair[2] in ["on", "off"]: + key, value, add = pair[0], pair[1], pair[2] + + # e.g. sett.snapshots = ('logtime', 1e1*dt, 0.1) + elif len(pair) == 3: + key, value = pair[0], (pair[1], pair[2]) + + # e.g. sett.on_sites_seeding_reportevent_report = ('logtime', 1e-08*dt, 1.5, 'on') + elif len(pair) == 4 and pair[3] in ["on", "off"]: + key, value, add = pair[0], (pair[1], pair[2]), pair[3] + + # e.g. sett.process_lists = ('time', 1e1*dt, 'select_event_type', [1,2]) + elif ( + len(pair) == 4 + and pair[2] in ["select_cluster_type", "select_event_type"] + and type(pair[3]) == list + ): + key, value, add = pair[0], pair[1], (pair[2], pair[3]) + + # e.g. sett.process_lists = ('logtime', 1e-08*dt, 1.5, 'select_event_type', [1,2]) + elif ( + len(pair) == 5 + and pair[3] in ["select_cluster_type", "select_event_type"] + and type(pair[4]) == list + ): + key, value, add = pair[0], (pair[1], pair[2]), (pair[3], pair[4]) + + else: + msg = "\n### ERROR ### keyword " + option + " in settings.\n" + msg += " Its value should be a pair (key,value[,value1]).\n" + msg += " Possible options for key: 'event', 'elemevent', 'time', 'logtime', 'realtime'\n" + msg += " Possible options for value: , , , (,), \n" + msg += " Given value: " + str(pair) + "\n" + raise NameError(msg) + + if key == "logtime": + if len(value) != 2: + msg = "\n### ERROR ### keyword '" + option + " on " + key + "' in settings.\n" + msg += " Its value should be a pair of reals (,).\n" + msg += " Given value: " + str(value) + "\n" raise NameError(msg) - if( key == 'logtime' ): - if( len(value) != 2 ): - msg = "\n### ERROR ### keyword '"+option+" on "+key+"' in settings.\n" - msg += " Its value should be a pair of reals (,).\n" - msg += " Given value: "+str(value)+"\n" - raise NameError(msg) - - output += "%-20s"%option + " " + "on "+ key + " " + str(float(value[0])) + " " + str(float(value[1])) - elif( key == 'event' or key == 'elemevent' ): - output += "%-20s"%option + " " + "on "+ key + " " + str(int(value)) - else: - output += "%-20s"%option + " " + "on "+ key + " " + str(float(value)) - - if( add is not None ): - if( type(add) == tuple and type(add[1]) == list ): - output += "%20s"%add[0] + " " + " ".join([str(v) for v in add[1]]) - - output += "\n" - - if( 'on_sites_seeding_report' in self ): output += "on_sites_seeding_report " + str(self.get(('on_sites_seeding_report')))+"\n" - if( 'event_report' in self ): output += "event_report " + str(self.get(('event_report')))+"\n" - - if( 'max_steps' in self ): output += "max_steps " + str(self.get(('max_steps')))+"\n" - if( 'max_time' in self ): output += "max_time " + str(self.get(('max_time')))+"\n" - if( 'wall_time' in self ): output += "wall_time " + str(self.get(('wall_time')))+"\n" - - if( 'override_array_bounds' in self ): output += "override_array_bounds " + str(self.get(('override_array_bounds')))+"\n" + output += ( + "%-20s" % option + + " " + + "on " + + key + + " " + + str(float(value[0])) + + " " + + str(float(value[1])) + ) + elif key == "event" or key == "elemevent": + output += "%-20s" % option + " " + "on " + key + " " + str(int(value)) + else: + output += "%-20s" % option + " " + "on " + key + " " + str(float(value)) + + if add is not None: + if type(add) == tuple and type(add[1]) == list: + output += "%20s" % add[0] + " " + " ".join([str(v) for v in add[1]]) + + output += "\n" + + if "on_sites_seeding_report" in self: + output += "on_sites_seeding_report " + str(self.get(("on_sites_seeding_report"))) + "\n" + if "event_report" in self: + output += "event_report " + str(self.get(("event_report"))) + "\n" + + if "max_steps" in self: + output += "max_steps " + str(self.get(("max_steps"))) + "\n" + if "max_time" in self: + output += "max_time " + str(self.get(("max_time"))) + "\n" + if "wall_time" in self: + output += "wall_time " + str(self.get(("wall_time"))) + "\n" + + if "override_array_bounds" in self: + output += "override_array_bounds " + str(self.get(("override_array_bounds"))) + "\n" return output diff --git a/core/Species.py b/core/Species.py index 4ea553a..cc7f9dd 100644 --- a/core/Species.py +++ b/core/Species.py @@ -1,6 +1,7 @@ import chemparse -__all__ = ['Species'] +__all__ = ["Species"] + class Species: """ @@ -15,74 +16,74 @@ class Species: # Mass of the most common isotope in Da __ATOMIC_MASS = { - "H":1.0078, - "HE":4.0026, - "LI":7.0160, - "BE":9.0122, - "B":11.0093, - "C":12.0000, - "N":14.0031, - "O":15.9949, - "F":18.9984, - "NE":19.9924, - "NA":22.9898, - "MG":23.9850, - "AL":26.9815, - "SI":27.9769, - "P":30.9738, - "S":31.9721, - "CL":34.9689, - "AR":39.9624, - "K":38.9637, - "CA":39.9626, - "SC":44.9559, - "TI":47.9479, - "V":50.9440, - "CR":51.9405, - "MN":54.9380, - "FE":55.9349, - "CO":58.9332, - "NI":57.9353, - "CU":62.9296, - "ZN":63.9291, - "GA":68.9256, - "GE":73.9212, - "AS":74.9216, - "SE":79.9165, - "BR":78.9183, - "KR":83.9115, - "RB":84.9118, - "SR":87.9056, - "Y":88.9058, - "ZR":89.9047, - "NB":92.9064, - "RH":102.9055, - "AG":106.9051, - "IN":114.9039, - "SB":120.9038, - "I":126.9045, - "CS":132.9055, - "BA":137.9052, - "LA":138.9064, - "CE":139.9054, - "PR":140.9077, - "EU":152.9212, - "TB":158.9254, - "HO":164.9303, - "TM":168.9342, - "LU":174.9408, - "HF":179.9466, - "TA":180.9480, - "RE":186.9558, - "OS":191.9615, - "IR":192.9629, - "AU":196.9666, - "TL":204.9744, - "PB":207.9767, - "BI":208.9804, - "TH":232.0381, - "PA":231.0359, - "U":238.0508 + "H": 1.0078, + "HE": 4.0026, + "LI": 7.0160, + "BE": 9.0122, + "B": 11.0093, + "C": 12.0000, + "N": 14.0031, + "O": 15.9949, + "F": 18.9984, + "NE": 19.9924, + "NA": 22.9898, + "MG": 23.9850, + "AL": 26.9815, + "SI": 27.9769, + "P": 30.9738, + "S": 31.9721, + "CL": 34.9689, + "AR": 39.9624, + "K": 38.9637, + "CA": 39.9626, + "SC": 44.9559, + "TI": 47.9479, + "V": 50.9440, + "CR": 51.9405, + "MN": 54.9380, + "FE": 55.9349, + "CO": 58.9332, + "NI": 57.9353, + "CU": 62.9296, + "ZN": 63.9291, + "GA": 68.9256, + "GE": 73.9212, + "AS": 74.9216, + "SE": 79.9165, + "BR": 78.9183, + "KR": 83.9115, + "RB": 84.9118, + "SR": 87.9056, + "Y": 88.9058, + "ZR": 89.9047, + "NB": 92.9064, + "RH": 102.9055, + "AG": 106.9051, + "IN": 114.9039, + "SB": 120.9038, + "I": 126.9045, + "CS": 132.9055, + "BA": 137.9052, + "LA": 138.9064, + "CE": 139.9054, + "PR": 140.9077, + "EU": 152.9212, + "TB": 158.9254, + "HO": 164.9303, + "TM": 168.9342, + "LU": 174.9408, + "HF": 179.9466, + "TA": 180.9480, + "RE": 186.9558, + "OS": 191.9615, + "IR": 192.9629, + "AU": 196.9666, + "TL": 204.9744, + "PB": 207.9767, + "BI": 208.9804, + "TH": 232.0381, + "PA": 231.0359, + "U": 238.0508, } SURFACE = 0 @@ -90,8 +91,7 @@ class Species: UNSPECIFIED = -1 - def __init__(self, symbol, denticity = None, - gas_energy = None, kind = None, mass = None ): + def __init__(self, symbol, denticity=None, gas_energy=None, kind=None, mass=None): """ Creates a new Species object. """ @@ -99,112 +99,108 @@ def __init__(self, symbol, denticity = None, self.gas_energy = gas_energy self.denticity = denticity - if( denticity is None ): + if denticity is None: self.denticity = symbol.count("*") self.kind = kind - if( kind is None ): - if(len(self.symbol) > 1 and self.symbol.find("*") != -1 and self.denticity == 0): - msg = "\n### ERROR ### Species.__init__.\n" + if kind is None: + if len(self.symbol) > 1 and self.symbol.find("*") != -1 and self.denticity == 0: + msg = "\n### ERROR ### Species.__init__.\n" msg += "Inconsistent symbol and denticity\n" raise NameError(msg) - if(self.symbol.find("*") == -1 and self.denticity != 0): - msg = "\n### ERROR ### Species.__init__.\n" + if self.symbol.find("*") == -1 and self.denticity != 0: + msg = "\n### ERROR ### Species.__init__.\n" msg += "Denticity given for a gas species\n" msg += "Did you forget to add * in the species label?\n" raise NameError(msg) self.kind = Species.GAS - if( self.symbol.find("*") != -1 ): + if self.symbol.find("*") != -1: self.kind = Species.SURFACE - if( self.kind != Species.GAS and self.gas_energy is not None ): - msg = "\n### ERROR ### Species.__init__.\n" + if self.kind != Species.GAS and self.gas_energy is not None: + msg = "\n### ERROR ### Species.__init__.\n" msg += "Parameter gas_energy cannot be associated with a surface species\n" raise NameError(msg) - if( self.kind == Species.GAS and self.gas_energy is None ): + if self.kind == Species.GAS and self.gas_energy is None: self.gas_energy = 0.0 self.__composition = {} self.__mass = 0.0 - if( mass is None ): - self.__composition = chemparse.parse_formula( symbol.replace("*","") ) + if mass is None: + self.__composition = chemparse.parse_formula(symbol.replace("*", "")) - if( not all( [ key.upper() in Species.__ATOMIC_MASS.keys() for key in self.__composition.keys() ] ) ): + if not all([key.upper() in Species.__ATOMIC_MASS.keys() for key in self.__composition.keys()]): - if( self.kind == Species.GAS ): - msg = "\n### ERROR ### Species.__init__.\n" + if self.kind == Species.GAS: + msg = "\n### ERROR ### Species.__init__.\n" msg += "The parameter 'mass' is required for gas species if the symbol doesn't correspond to a chemical formula\n" raise NameError(msg) - elif( self.kind == Species.SURFACE ): - msg = "Species symbol ("+self.symbol+") does not correspond to a chemical formula. Using default mass=0.0" + elif self.kind == Species.SURFACE: + msg = ( + "Species symbol (" + + self.symbol + + ") does not correspond to a chemical formula. Using default mass=0.0" + ) print(msg) else: - for s,n in self.__composition.items(): - self.__mass += n*Species.__ATOMIC_MASS[s.upper()] + for s, n in self.__composition.items(): + self.__mass += n * Species.__ATOMIC_MASS[s.upper()] else: self.__mass = mass - - def __eq__( self, other ): + def __eq__(self, other): """ Returns True if both objects have the same symbol. Otherwise returns False * ``other`` -- """ - if( type(other) == int and other == Species.UNSPECIFIED ): + if type(other) == int and other == Species.UNSPECIFIED: return False - if( self.symbol == other.symbol ): + if self.symbol == other.symbol: return True else: return False - def __hash__(self): """ Returns a hash based on the symbol. """ return hash(self.symbol) - - def __str__( self ): + def __str__(self): """ Translates the object to a string """ - output = self.symbol + output = self.symbol return output - - def is_adsorbed( self ): + def is_adsorbed(self): """ Returns True if the name of the species has the character '*'. """ - return ( self.kind == Species.SURFACE ) + return self.kind == Species.SURFACE - - def is_gas( self ): + def is_gas(self): """ Returns True if the name of the species has no the character '*'. """ - return ( self.kind == Species.GAS ) - + return self.kind == Species.GAS - def composition( self ): + def composition(self): """ Returns a dictionary containing the number of atoms of each kind. """ return self.__composition - - def mass( self ): + def mass(self): """ Returns the mass of the species in Da. """ return self.__mass - diff --git a/core/SpeciesList.py b/core/SpeciesList.py index 87fcddc..be29614 100644 --- a/core/SpeciesList.py +++ b/core/SpeciesList.py @@ -3,7 +3,8 @@ from .Species import * -__all__ = ['SpeciesList'] +__all__ = ["SpeciesList"] + class SpeciesList(UserList): """ @@ -12,93 +13,98 @@ class SpeciesList(UserList): * ``data`` -- List of Species to initially include. """ - def __init__( self, data=[] ): - super(SpeciesList, self).__init__( data ) + def __init__(self, data=[]): + super(SpeciesList, self).__init__(data) self.__label = None self.__updateLabel() - def __hash__(self): """ Returns a hash based on the label """ return hash(self.__label) - - def __str__( self ): + def __str__(self): """ Translates the object to a string """ gasSpecies = [] adsorbedSpecies = [] - for i,sp in enumerate(self): - if( sp.is_adsorbed() ): - if( sp.symbol != "*" and sp.symbol != "**" and sp.symbol != "***" and sp.symbol != "****" ): + for i, sp in enumerate(self): + if sp.is_adsorbed(): + if sp.symbol != "*" and sp.symbol != "**" and sp.symbol != "***" and sp.symbol != "****": adsorbedSpecies.append(i) else: gasSpecies.append(i) output = "" - if( len(gasSpecies) > 0 ): - output = "n_gas_species "+str(len(gasSpecies))+"\n" + if len(gasSpecies) > 0: + output = "n_gas_species " + str(len(gasSpecies)) + "\n" output += "gas_specs_names " for i in gasSpecies: - output += " %20s"%self[i].symbol + output += " %20s" % self[i].symbol output += "\n" output += "gas_energies " for i in gasSpecies: - output += " %20.10e"%self[i].gas_energy + output += " %20.10e" % self[i].gas_energy output += "\n" output += "gas_molec_weights " for i in gasSpecies: - output += " %20.10e"%self[i].mass() + output += " %20.10e" % self[i].mass() output += "\n" - if( len(adsorbedSpecies) > 0 ): - output += "n_surf_species "+str(len(adsorbedSpecies))+"\n" + if len(adsorbedSpecies) > 0: + output += "n_surf_species " + str(len(adsorbedSpecies)) + "\n" output += "surf_specs_names " for i in adsorbedSpecies: - if( self[i].symbol != "*" and self[i].symbol != "**" and self[i].symbol != "***" and self[i].symbol != "****" ): - output += "%10s"%self[i].symbol + if ( + self[i].symbol != "*" + and self[i].symbol != "**" + and self[i].symbol != "***" + and self[i].symbol != "****" + ): + output += "%10s" % self[i].symbol output += "\n" output += "surf_specs_dent " for i in adsorbedSpecies: - if( self[i].symbol != "*" and self[i].symbol != "**" and self[i].symbol != "***" and self[i].symbol != "****" ): - output += "%10s"%self[i].denticity + if ( + self[i].symbol != "*" + and self[i].symbol != "**" + and self[i].symbol != "***" + and self[i].symbol != "****" + ): + output += "%10s" % self[i].denticity return output - def gas_species(self): """Returns the gas species.""" output = [] for sp in self: - if( sp.is_gas() ): - output.append( sp ) - - return SpeciesList( output ) + if sp.is_gas(): + output.append(sp) + return SpeciesList(output) def surface_species(self): """Returns the adsorbed species.""" output = [] for sp in self: - if( sp.is_adsorbed() ): - output.append( sp ) + if sp.is_adsorbed(): + output.append(sp) - return SpeciesList( output ) + return SpeciesList(output) - - def mass( self, entity_numbers ): + def mass(self, entity_numbers): """ Returns the total mass as the sum of its all species in Da. @@ -106,38 +112,35 @@ def mass( self, entity_numbers ): """ mass = 0.0 mapped_entity = {} - for i,sp in enumerate(self.surface_species()): - if( entity_numbers[i] not in mapped_entity ): + for i, sp in enumerate(self.surface_species()): + if entity_numbers[i] not in mapped_entity: mass += sp.mass() - mapped_entity[ entity_numbers[i] ] = 1 + mapped_entity[entity_numbers[i]] = 1 - for i,sp in enumerate(self.gas_species()): + for i, sp in enumerate(self.gas_species()): mass += sp.mass() return mass - - def __updateLabel( self ): + def __updateLabel(self): """ Updates the attribute 'label' """ self.__label = "" for i in range(len(self)): self.__label += self[i].symbol - if(i != len(self)-1): + if i != len(self) - 1: self.__label += "," - - def label( self ): + def label(self): """ Returns the label of the cluster """ - if( self.label is None ): + if self.label is None: self.__updateLabel() return self.__label - def remove_duplicates(self): """ Removes duplicate species. Two species are considered the same if they have the same symbol. @@ -146,33 +149,32 @@ def remove_duplicates(self): self.data = [] for sp in copy_self: - if( sp not in self.data ): - self.data.append( sp ) + if sp not in self.data: + self.data.append(sp) self.__updateLabel() - @staticmethod - def default_entity_numbers( nsites, species ): + def default_entity_numbers(nsites, species): """ Calculates the list of entity numbers assuming that species with the same symbol belong to the same entity. * ``nsites`` -- * ``species`` -- """ - entity_number = nsites*[ None ] + entity_number = nsites * [None] id_map = {} for i in range(nsites): - if( i==0 ): - id_map[ species[i] ] = i + if i == 0: + id_map[species[i]] = i else: - if( species[i] not in id_map ): - id_map[ species[i] ] = max( id_map.values() ) + 1 + if species[i] not in id_map: + id_map[species[i]] = max(id_map.values()) + 1 else: - if( species[0:i+1].count(species[i]) > species[i].denticity ): - id_map[ species[i] ] = max( id_map.values() ) + 1 + if species[0 : i + 1].count(species[i]) > species[i].denticity: + id_map[species[i]] = max(id_map.values()) + 1 - entity_number[i] = id_map[ species[i] ] + entity_number[i] = id_map[species[i]] return entity_number diff --git a/core/ZacrosJob.py b/core/ZacrosJob.py index 2e2933b..ab9049e 100644 --- a/core/ZacrosJob.py +++ b/core/ZacrosJob.py @@ -16,19 +16,21 @@ from .Mechanism import * from .Settings import * -__all__ = ['ZacrosJob', 'ZacrosExecutableNotFoundError'] +__all__ = ["ZacrosJob", "ZacrosExecutableNotFoundError"] -class ZacrosExecutableNotFoundError( Exception ): + +class ZacrosExecutableNotFoundError(Exception): """Exception raised if zacros executable is not found in path Attributes: command -- zacros command """ + def __init__(self, command): - super().__init__( "Zacros executable ("+command+") not found in $PATH" ) + super().__init__("Zacros executable (" + command + ") not found in $PATH") -class ZacrosJob( scm.plams.SingleJob ): +class ZacrosJob(scm.plams.SingleJob): """ Create a new ZacrosJob object. @@ -41,19 +43,19 @@ class ZacrosJob( scm.plams.SingleJob ): * ``restart`` -- ZacrosJob object from which the calculation will be restarted """ - _command = os.environ["AMSBIN"]+'/zacros' if 'AMSBIN' in os.environ else 'zacros.x' + _command = os.environ["AMSBIN"] + "/zacros" if "AMSBIN" in os.environ else "zacros.x" _result_type = ZacrosResults _filenames = { - 'simulation': 'simulation_input.dat', - 'lattice': 'lattice_input.dat', - 'energetics': 'energetics_input.dat', - 'mechanism': 'mechanism_input.dat', - 'state': 'state_input.dat', - 'restart': 'restart.inf', - 'run': 'slurm.run', - 'err': 'std.err', - 'out': 'std.out'} - + "simulation": "simulation_input.dat", + "lattice": "lattice_input.dat", + "energetics": "energetics_input.dat", + "mechanism": "mechanism_input.dat", + "state": "state_input.dat", + "restart": "restart.inf", + "run": "slurm.run", + "err": "std.err", + "out": "std.out", + } def __init__(self, lattice, mechanism, cluster_expansion, initial_state=None, restart=None, **kwargs): @@ -66,15 +68,15 @@ def check_molar_fraction(settings=Settings, species_list=SpeciesList): :parm settings: Settings object with the main settings of the KMC calculation. """ - list_of_species = [ sp.symbol for sp in species_list.gas_species() ] + list_of_species = [sp.symbol for sp in species_list.gas_species()] section = settings.molar_fraction - if( "molar_fraction" in settings ): + if "molar_fraction" in settings: # Check if the molar fraction is assigned to a gas species: for key in settings.molar_fraction.keys(): if key not in list_of_species: - msg = "\n### ERROR ### check_molar_fraction_labels.\n" + msg = "\n### ERROR ### check_molar_fraction_labels.\n" msg += "molar fraction defined for a non-gas species." raise NameError(msg) @@ -86,38 +88,40 @@ def check_molar_fraction(settings=Settings, species_list=SpeciesList): for key in list_of_species: section += {key: 0.000} - if( 'settings' not in kwargs ): - msg = "\n### ERROR ### ZacrosJob.__init__.\n" + if "settings" not in kwargs: + msg = "\n### ERROR ### ZacrosJob.__init__.\n" msg += " Parameter 'settings' is required by the ZacrosJob constructor.\n" raise NameError(msg) - if( 'molecule' in kwargs ): + if "molecule" in kwargs: print("Warning: parameter 'molecule' is not used by the ZacrosJob constructor'") - del kwargs['molecule'] + del kwargs["molecule"] scm.plams.SingleJob.__init__(self, molecule=None, **kwargs) self.lattice = lattice self.mechanism = mechanism - if( type(mechanism) == list ): self.mechanism = Mechanism(mechanism) + if type(mechanism) == list: + self.mechanism = Mechanism(mechanism) - #if( set(map(type, self.mechanism.site_types_set())) == {int} ) + # if( set(map(type, self.mechanism.site_types_set())) == {int} ) - if( not self.mechanism.site_types_set().issubset( self.lattice.site_types_set() ) ): - msg = "\n### ERROR ### ZacrosJob.__init__.\n" + if not self.mechanism.site_types_set().issubset(self.lattice.site_types_set()): + msg = "\n### ERROR ### ZacrosJob.__init__.\n" msg += " Inconsistent site types found between lattice and mechanism.\n" - msg += " lattice="+str(self.lattice.site_types_set())+"\n" - msg += " mechanism="+str(self.mechanism.site_types_set())+"\n" + msg += " lattice=" + str(self.lattice.site_types_set()) + "\n" + msg += " mechanism=" + str(self.mechanism.site_types_set()) + "\n" raise NameError(msg) self.cluster_expansion = cluster_expansion - if( type(cluster_expansion) == list ): self.cluster_expansion = ClusterExpansion(cluster_expansion) + if type(cluster_expansion) == list: + self.cluster_expansion = ClusterExpansion(cluster_expansion) - if( not self.cluster_expansion.site_types_set().issubset( self.lattice.site_types_set() ) ): - msg = "\n### ERROR ### ZacrosJob.__init__.\n" + if not self.cluster_expansion.site_types_set().issubset(self.lattice.site_types_set()): + msg = "\n### ERROR ### ZacrosJob.__init__.\n" msg += " Inconsistent site types found between lattice and cluster_expansion.\n" - msg += " lattice="+str(self.lattice.site_types_set())+"\n" - msg += " cluster_expansion="+str(self.cluster_expansion.site_types_set())+"\n" + msg += " lattice=" + str(self.lattice.site_types_set()) + "\n" + msg += " cluster_expansion=" + str(self.cluster_expansion.site_types_set()) + "\n" raise NameError(msg) self.initial_state = initial_state @@ -125,15 +129,14 @@ def check_molar_fraction(settings=Settings, species_list=SpeciesList): self._restart_file_content = None self.restart = None - if( restart is not None ): + if restart is not None: self.restart = restart - restart_file = os.path.join(restart.path, ZacrosJob._filenames['restart']) + restart_file = os.path.join(restart.path, ZacrosJob._filenames["restart"]) with open(restart_file, "r") as depFile: self._restart_file_content = depFile.readlines() check_molar_fraction(self.settings, self.mechanism.gas_species()) - def get_input(self): """ It should generate the Zacros input file. But Zacros has several @@ -144,13 +147,12 @@ def get_input(self): """ return str(self) - def get_simulation_input(self): """ Return a string with the content of simulation_input.dat. """ - def get_molar_fractions( settings, species_list ): + def get_molar_fractions(settings, species_list): """ Get molar fractions using the correct order of list_gas_species. @@ -183,104 +185,98 @@ def get_molar_fractions( settings, species_list ): list_of_molar_fractions.clear() tuple_tmp = [i[0] for i in total_list] molar_tmp = [i[1] for i in total_list] - for i in [ sp.symbol for sp in species_list.gas_species() ]: + for i in [sp.symbol for sp in species_list.gas_species()]: for j, k in enumerate(tuple_tmp): if i == k: list_of_molar_fractions.append(molar_tmp[j]) return list_of_molar_fractions - output = str(self.settings)+"\n" + output = str(self.settings) + "\n" gasSpecies = self.mechanism.gas_species() - gasSpecies.extend( self.cluster_expansion.gas_species() ) + gasSpecies.extend(self.cluster_expansion.gas_species()) gasSpecies.remove_duplicates() - if( len(gasSpecies) == 0 ): - output += "n_gas_species "+str(len(gasSpecies))+"\n\n" + if len(gasSpecies) == 0: + output += "n_gas_species " + str(len(gasSpecies)) + "\n\n" else: output += str(gasSpecies) molar_frac_list = get_molar_fractions(self.settings, gasSpecies) surfaceSpecies = self.mechanism.species() - surfaceSpecies.extend( self.cluster_expansion.surface_species() ) + surfaceSpecies.extend(self.cluster_expansion.surface_species()) surfaceSpecies.remove_duplicates() - if( len(molar_frac_list)>0 ): - output += "gas_molar_fracs " + ''.join([" %20.10e"%elem for elem in molar_frac_list]) + "\n\n" - output += str(surfaceSpecies)+"\n" + if len(molar_frac_list) > 0: + output += "gas_molar_fracs " + "".join([" %20.10e" % elem for elem in molar_frac_list]) + "\n\n" + output += str(surfaceSpecies) + "\n" output += "\n" output += "finish" return output - def get_lattice_input(self): """ Return a string with the content of the lattice_input.dat file. """ return str(self.lattice) - def get_energetics_input(self): """ Return a string with the content of the energetics_input.dat file. """ return str(self.cluster_expansion) - def get_mechanism_input(self): """ Returns a string with the content of the mechanism_input.dat file """ return str(self.mechanism) - def get_initial_state_input(self): """ Returns a string with the content of the state_input.dat file """ output = "" - if( self.initial_state is not None ): + if self.initial_state is not None: output = str(self.initial_state) return output - def get_restart_input(self): """ Returns a string with the content of the restart.inf file """ output = "" - if( self._restart_file_content is not None ): + if self._restart_file_content is not None: for line in self._restart_file_content: output += line return output - def check(self): """ Look for the normal termination signal in the output. Note, that it does not mean your calculation was successful! """ - lines = self.results.grep_file(self.results._filenames['general'], pattern='> Normal termination <') + lines = self.results.grep_file(self.results._filenames["general"], pattern="> Normal termination <") return len(lines) > 0 - def surface_poisoned(self): """ Returns true in the case the "Warning code 801002" is find in the output. """ - lines = self.results.grep_file(self.results._filenames['general'], pattern='Warning code 801002 .* this may indicate that the surface is poisoned') + lines = self.results.grep_file( + self.results._filenames["general"], + pattern="Warning code 801002 .* this may indicate that the surface is poisoned", + ) return len(lines) > 0 - def restart_aborted(self): """ Returns true in the case the "Restart aborted:" is find in the output. """ - lines = self.results.grep_file(self.results._filenames['general'], pattern='Restart aborted:') + lines = self.results.grep_file(self.results._filenames["general"], pattern="Restart aborted:") return len(lines) > 0 - def get_runscript(self): """ Generate a runscript for slurm @@ -288,46 +284,46 @@ def get_runscript(self): ``name`` is taken from the class attribute ``_command``. ``-n`` flag is added if ``settings.runscript.nproc`` exists. ``[>jobname.out]`` is used based on ``settings.runscript.stdout_redirect``. """ path = shutil.which(self._command) - if( path is None ): raise ZacrosExecutableNotFoundError( self._command ) + if path is None: + raise ZacrosExecutableNotFoundError(self._command) s = self.settings.runscript - ret = '#!/bin/bash\n' - ret += '\n' - ret += 'export OMP_NUM_THREADS='+str(s.get('nproc',1)) - ret += '\n' + ret = "#!/bin/bash\n" + ret += "\n" + ret += "export OMP_NUM_THREADS=" + str(s.get("nproc", 1)) + ret += "\n" ret += path - if( self._restart_file_content is not None and 'restart' in self.settings ): - if( 'max_time' in self.settings['restart'] ): - ret += ' --max_time='+str(self.settings.restart.max_time) - if( 'max_steps' in self.settings['restart'] ): - ret += ' --max_steps='+str(self.settings.restart.max_steps) - if( 'wall_time' in self.settings['restart'] ): - ret += ' --wall_time='+str(self.settings.restart.wall_time) + if self._restart_file_content is not None and "restart" in self.settings: + if "max_time" in self.settings["restart"]: + ret += " --max_time=" + str(self.settings.restart.max_time) + if "max_steps" in self.settings["restart"]: + ret += " --max_steps=" + str(self.settings.restart.max_steps) + if "wall_time" in self.settings["restart"]: + ret += " --wall_time=" + str(self.settings.restart.wall_time) if s.stdout_redirect: - ret += ' >"{}"'.format(ZacrosJob._filenames['out']) - ret += '\n' + ret += ' >"{}"'.format(ZacrosJob._filenames["out"]) + ret += "\n" return ret - def _get_ready(self): """ Create inputs and runscript files in the job folder. Filenames correspond to entries in the `_filenames` attribute """ - simulation = os.path.join(self.path, ZacrosJob._filenames['simulation']) - lattice = os.path.join(self.path, ZacrosJob._filenames['lattice']) - energetics = os.path.join(self.path, ZacrosJob._filenames['energetics']) - mechanism = os.path.join(self.path, ZacrosJob._filenames['mechanism']) - state = os.path.join(self.path, ZacrosJob._filenames['state']) - restart = os.path.join(self.path, ZacrosJob._filenames['restart']) + simulation = os.path.join(self.path, ZacrosJob._filenames["simulation"]) + lattice = os.path.join(self.path, ZacrosJob._filenames["lattice"]) + energetics = os.path.join(self.path, ZacrosJob._filenames["energetics"]) + mechanism = os.path.join(self.path, ZacrosJob._filenames["mechanism"]) + state = os.path.join(self.path, ZacrosJob._filenames["state"]) + restart = os.path.join(self.path, ZacrosJob._filenames["restart"]) - runfile = os.path.join(self.path, ZacrosJob._filenames['run']) - #err = os.path.join(self.path, ZacrosJob._filenames['err']) - #out = os.path.join(self.path, ZacrosJob._filenames['out']) + runfile = os.path.join(self.path, ZacrosJob._filenames["run"]) + # err = os.path.join(self.path, ZacrosJob._filenames['err']) + # out = os.path.join(self.path, ZacrosJob._filenames['out']) with open(simulation, "w") as inp: inp.write(self.get_simulation_input()) @@ -345,11 +341,11 @@ def _get_ready(self): with open(state, "w") as inp: inp.write(self.get_initial_state_input()) - if( self._restart_file_content is not None ): - with open(restart, 'w') as inp: + if self._restart_file_content is not None: + with open(restart, "w") as inp: inp.write(self.get_restart_input()) - with open(runfile, 'w') as run: + with open(runfile, "w") as run: run.write(self.get_runscript()) os.chmod(runfile, os.stat(runfile).st_mode | stat.S_IEXEC) @@ -360,49 +356,48 @@ def __str__(self): """ output = "" - output += "---------------------------------------------------------------------"+"\n" - output += ZacrosJob._filenames['simulation']+"\n" - output += "---------------------------------------------------------------------"+"\n" + output += "---------------------------------------------------------------------" + "\n" + output += ZacrosJob._filenames["simulation"] + "\n" + output += "---------------------------------------------------------------------" + "\n" output += self.get_simulation_input() output += "\n" - output += "---------------------------------------------------------------------"+"\n" - output += ZacrosJob._filenames['lattice']+"\n" - output += "---------------------------------------------------------------------"+"\n" + output += "---------------------------------------------------------------------" + "\n" + output += ZacrosJob._filenames["lattice"] + "\n" + output += "---------------------------------------------------------------------" + "\n" output += self.get_lattice_input() output += "\n" - output += "---------------------------------------------------------------------"+"\n" - output += ZacrosJob._filenames['energetics']+"\n" - output += "---------------------------------------------------------------------"+"\n" + output += "---------------------------------------------------------------------" + "\n" + output += ZacrosJob._filenames["energetics"] + "\n" + output += "---------------------------------------------------------------------" + "\n" output += self.get_energetics_input() output += "\n" - output += "---------------------------------------------------------------------"+"\n" - output += ZacrosJob._filenames['mechanism']+"\n" - output += "---------------------------------------------------------------------"+"\n" + output += "---------------------------------------------------------------------" + "\n" + output += ZacrosJob._filenames["mechanism"] + "\n" + output += "---------------------------------------------------------------------" + "\n" output += self.get_mechanism_input() - if( self.initial_state is not None ): + if self.initial_state is not None: output += "\n" - output += "---------------------------------------------------------------------"+"\n" - output += ZacrosJob._filenames['state']+"\n" - output += "---------------------------------------------------------------------"+"\n" + output += "---------------------------------------------------------------------" + "\n" + output += ZacrosJob._filenames["state"] + "\n" + output += "---------------------------------------------------------------------" + "\n" output += self.get_initial_state_input() - if( self._restart_file_content is not None ): + if self._restart_file_content is not None: output += "\n" - output += "---------------------------------------------------------------------"+"\n" - output += ZacrosJob._filenames['restart']+"\n" - output += "---------------------------------------------------------------------"+"\n" + output += "---------------------------------------------------------------------" + "\n" + output += ZacrosJob._filenames["restart"] + "\n" + output += "---------------------------------------------------------------------" + "\n" for line in self._restart_file_content: output += line return output - @staticmethod - def __recreate_simulation_input( path ): + def __recreate_simulation_input(path): """ Recreates the simulation input for the corresponding job based on file 'simulation_input.dat' present in the job folder. This method is used by :func:~scm.pyzacros.ZacrosJob.load_external. @@ -410,115 +405,125 @@ def __recreate_simulation_input( path ): """ sett = Settings() - with open( path+"/"+ZacrosJob._filenames['simulation'], "r" ) as inp: + with open(path + "/" + ZacrosJob._filenames["simulation"], "r") as inp: file_content = inp.readlines() - file_content = [line for line in file_content if line.strip()] # Removes empty lines + file_content = [line for line in file_content if line.strip()] # Removes empty lines for line in file_content: tokens = line.split() - if( len(tokens)<2 ): continue + if len(tokens) < 2: + continue - def process_scheme( sv ): - if( sv[1]=="event" ): - if( len(sv)<3 ): - return sv[1],1 + def process_scheme(sv): + if sv[1] == "event": + if len(sv) < 3: + return sv[1], 1 else: - return sv[1],int(sv[2]) - elif( sv[1]=="elemevent" ): - return sv[1],int(sv[2]) - elif( sv[1]=="time" ): - return sv[1],float(sv[2]) - elif( sv[1]=="logtime" ): - return sv[1],float(sv[2]),float(sv[3]) - elif( sv[1]=="realtime" ): - return sv[1],float(sv[2]) + return sv[1], int(sv[2]) + elif sv[1] == "elemevent": + return sv[1], int(sv[2]) + elif sv[1] == "time": + return sv[1], float(sv[2]) + elif sv[1] == "logtime": + return sv[1], float(sv[2]), float(sv[3]) + elif sv[1] == "realtime": + return sv[1], float(sv[2]) else: - raise Exception( "Error: Keyword "+str(sv)+" in file "+ZacrosJob._filenames['simulation']+" is not supported!" ) + raise Exception( + "Error: Keyword " + + str(sv) + + " in file " + + ZacrosJob._filenames["simulation"] + + " is not supported!" + ) # Specific conversion rules cases = { - "random_seed" : lambda sv: sett.setdefault("random_seed", int(sv[0])), - "temperature" : lambda sv: sett.setdefault("temperature", float(sv[0])), - "pressure" : lambda sv: sett.setdefault("pressure", float(sv[0])), - - "n_gas_species" : lambda sv: sett.setdefault("n_gas_species", int(sv[0])), - "gas_specs_names" : lambda sv: sett.setdefault("gas_specs_names", sv), - "gas_energies" : lambda sv: sett.setdefault("gas_energies", [float(a) for a in sv]), - "gas_molec_weights" : lambda sv: sett.setdefault("gas_molec_weights", [float(a) for a in sv]), - "gas_molar_fracs" : lambda sv: sett.setdefault("gas_molar_fracs", [float(a) for a in sv]), - - "n_surf_species" : lambda sv: sett.setdefault("n_surf_species", int(sv[0])), - "surf_specs_names" : lambda sv: sett.setdefault("surf_specs_names", sv), - "surf_specs_dent" : lambda sv: sett.setdefault("surf_specs_dent", [int(a) for a in sv]), - - "snapshots" : lambda sv: sett.setdefault("snapshots", process_scheme(sv) ), - "process_statistics" : lambda sv: sett.setdefault("process_statistics", process_scheme(sv) ), - "species_numbers" : lambda sv: sett.setdefault("species_numbers", process_scheme(sv) ), - "event_report" : lambda sv: sett.setdefault("event_report", sv[0]), - "max_steps" : lambda sv: sett.setdefault("max_steps", sv[0] if sv[0]=='infinity' else int(sv[0])), - "max_time" : lambda sv: sett.setdefault("max_time", float(sv[0])), - "wall_time" : lambda sv: sett.setdefault("wall_time", int(sv[0])), - - "override_array_bounds" : lambda sv: sett.setdefault("override_array_bounds", ' '.join(sv)) + "random_seed": lambda sv: sett.setdefault("random_seed", int(sv[0])), + "temperature": lambda sv: sett.setdefault("temperature", float(sv[0])), + "pressure": lambda sv: sett.setdefault("pressure", float(sv[0])), + "n_gas_species": lambda sv: sett.setdefault("n_gas_species", int(sv[0])), + "gas_specs_names": lambda sv: sett.setdefault("gas_specs_names", sv), + "gas_energies": lambda sv: sett.setdefault("gas_energies", [float(a) for a in sv]), + "gas_molec_weights": lambda sv: sett.setdefault("gas_molec_weights", [float(a) for a in sv]), + "gas_molar_fracs": lambda sv: sett.setdefault("gas_molar_fracs", [float(a) for a in sv]), + "n_surf_species": lambda sv: sett.setdefault("n_surf_species", int(sv[0])), + "surf_specs_names": lambda sv: sett.setdefault("surf_specs_names", sv), + "surf_specs_dent": lambda sv: sett.setdefault("surf_specs_dent", [int(a) for a in sv]), + "snapshots": lambda sv: sett.setdefault("snapshots", process_scheme(sv)), + "process_statistics": lambda sv: sett.setdefault("process_statistics", process_scheme(sv)), + "species_numbers": lambda sv: sett.setdefault("species_numbers", process_scheme(sv)), + "event_report": lambda sv: sett.setdefault("event_report", sv[0]), + "max_steps": lambda sv: sett.setdefault("max_steps", sv[0] if sv[0] == "infinity" else int(sv[0])), + "max_time": lambda sv: sett.setdefault("max_time", float(sv[0])), + "wall_time": lambda sv: sett.setdefault("wall_time", int(sv[0])), + "override_array_bounds": lambda sv: sett.setdefault("override_array_bounds", " ".join(sv)), } - value = cases.get( tokens[0], lambda sv: None )( tokens[1:] ) - - if( value is None ): - raise Exception( "Error: Keyword "+tokens[0]+" in file "+ZacrosJob._filenames['simulation']+" is not supported!" ) - - if( sett.get("gas_molar_fracs") is not None ): + value = cases.get(tokens[0], lambda sv: None)(tokens[1:]) + + if value is None: + raise Exception( + "Error: Keyword " + + tokens[0] + + " in file " + + ZacrosJob._filenames["simulation"] + + " is not supported!" + ) + + if sett.get("gas_molar_fracs") is not None: # Special case molar_fractions sett["molar_fraction"] = {} - for i,spn in enumerate(sett["gas_specs_names"]): + for i, spn in enumerate(sett["gas_specs_names"]): sett["molar_fraction"][spn] = sett["gas_molar_fracs"][i] del sett["gas_molar_fracs"] return sett - @staticmethod - def __recreate_lattice_input( path ): + def __recreate_lattice_input(path): """ Recreates the lattice input for the corresponding job based on file 'lattice_input.dat' present in the job folder. This method is used by :func:~scm.pyzacros.ZacrosJob.load_external. Returns a :func:~scm.pyzacros.Lattice object. """ - return Lattice( fileName=path+"/"+ZacrosJob._filenames['lattice'] ) - + return Lattice(fileName=path + "/" + ZacrosJob._filenames["lattice"]) @staticmethod - def __recreate_energetics_input( path, gas_species, surface_species ): + def __recreate_energetics_input(path, gas_species, surface_species): """ Recreates the energetics input for the corresponding job based on file 'energetics_input.dat' present in the job folder. This method is used by :func:~scm.pyzacros.ZacrosJob.load_external. Returns a list of :func:~scm.pyzacros.Cluster objects. """ - return ClusterExpansion( fileName=path+"/"+ZacrosJob._filenames['energetics'], surface_species=surface_species ) - + return ClusterExpansion( + fileName=path + "/" + ZacrosJob._filenames["energetics"], surface_species=surface_species + ) @staticmethod - def __recreate_mechanism_input( path, gas_species, surface_species ): + def __recreate_mechanism_input(path, gas_species, surface_species): """ Recreates the mechanism input for the corresponding job based on file 'mechanism_input.dat' present in the job folder. This method is used by :func:~scm.pyzacros.ZacrosJob.load_external. Returns a :func:~scm.pyzacros.Mechanism object. """ - return Mechanism( fileName=path+"/"+ZacrosJob._filenames['mechanism'], gas_species=gas_species, surface_species=surface_species ) - + return Mechanism( + fileName=path + "/" + ZacrosJob._filenames["mechanism"], + gas_species=gas_species, + surface_species=surface_species, + ) @staticmethod - def __recreate_initial_state_input( lattice ): + def __recreate_initial_state_input(lattice): """ Recreates the initial state input for the corresponding job based on file 'initial_state_input.dat' present in the job folder. This method is used by :func:~scm.pyzacros.ZacrosJob.load_external. Returns a :func:~scm.pyzacros.LatticeState object """ - raise Exception( "Error: __recreate_initial_state_input function is not implmented yet!" ) + raise Exception("Error: __recreate_initial_state_input function is not implmented yet!") lattice_state = LatticeState() return lattice_state - @classmethod def load_external(cls, path, settings=None, finalize=False, restart=None): """ @@ -535,32 +540,48 @@ def load_external(cls, path, settings=None, finalize=False, restart=None): * ``finalize`` -- If ``finalize`` is ``False``, the status of the returned job is *copied* and results will be restored too. Otherwise, results will not be restored, so the job will need to be executed again. * ``restart`` -- Selects the ``ZacrosJob`` to restart from """ - if( not os.path.isdir(path) ): - raise FileNotFoundError('Path {} does not exist, cannot load from it.'.format(path)) + if not os.path.isdir(path): + raise FileNotFoundError("Path {} does not exist, cannot load from it.".format(path)) path = os.path.abspath(path) jobname = os.path.basename(path) - sett = ZacrosJob.__recreate_simulation_input( path ) + sett = ZacrosJob.__recreate_simulation_input(path) gas_species = SpeciesList() for i in range(len(sett["gas_specs_names"])): - gas_species.append( Species( symbol=sett["gas_specs_names"][i], gas_energy=sett["gas_energies"][i], kind=Species.GAS, mass=sett["gas_molec_weights"][i] ) ) + gas_species.append( + Species( + symbol=sett["gas_specs_names"][i], + gas_energy=sett["gas_energies"][i], + kind=Species.GAS, + mass=sett["gas_molec_weights"][i], + ) + ) surface_species = SpeciesList() - surface_species.append( Species( "*", 1 ) ) # Empty adsorption site + surface_species.append(Species("*", 1)) # Empty adsorption site for i in range(len(sett["surf_specs_names"])): - surface_species.append( Species( symbol=sett["surf_specs_names"][i], denticity=sett["surf_specs_dent"][i], kind=Species.SURFACE ) ) - - lattice = ZacrosJob.__recreate_lattice_input( path ) - cluster_expansion = ZacrosJob.__recreate_energetics_input( path, gas_species, surface_species ) - mechanism = ZacrosJob.__recreate_mechanism_input( path, gas_species, surface_species ) - initial_state= None #TODO - - job = cls( settings=sett, lattice=lattice, mechanism=mechanism, cluster_expansion=cluster_expansion, initial_state=initial_state, name=jobname ) + surface_species.append( + Species(symbol=sett["surf_specs_names"][i], denticity=sett["surf_specs_dent"][i], kind=Species.SURFACE) + ) + + lattice = ZacrosJob.__recreate_lattice_input(path) + cluster_expansion = ZacrosJob.__recreate_energetics_input(path, gas_species, surface_species) + mechanism = ZacrosJob.__recreate_mechanism_input(path, gas_species, surface_species) + initial_state = None # TODO + + job = cls( + settings=sett, + lattice=lattice, + mechanism=mechanism, + cluster_expansion=cluster_expansion, + initial_state=initial_state, + name=jobname, + ) job.path = path - job.status = 'copied' + job.status = "copied" job.results.collect() if finalize: diff --git a/core/ZacrosParametersScanJob.py b/core/ZacrosParametersScanJob.py index fada934..ef1d35e 100644 --- a/core/ZacrosParametersScanJob.py +++ b/core/ZacrosParametersScanJob.py @@ -12,10 +12,10 @@ from .ZacrosSteadyStateJob import * from .ParametersBase import * -__all__ = ['ZacrosParametersScanJob', 'ZacrosParametersScanResults'] +__all__ = ["ZacrosParametersScanJob", "ZacrosParametersScanResults"] -class ZacrosParametersScanResults( scm.plams.Results ): +class ZacrosParametersScanResults(scm.plams.Results): """ A Class for handling ZacrosParametersScanJob Results. """ @@ -34,7 +34,6 @@ def indices(self): """ return self.job._indices - def children_results(self, child_id=None): """ Returns the children results in a dictionary form. @@ -50,14 +49,13 @@ def children_results(self, child_id=None): if child_id is None: output = {} - for pos,idx in enumerate(self.job._indices): + for pos, idx in enumerate(self.job._indices): output[idx] = self.job.children[idx].results return output else: return self.job.children[child_id].results - def turnover_frequency(self, nbatch=20, confidence=0.99, ignore_nbatch=1, update=None): """ Return a list with values related to the calculation of the turnover frequency for the gas species. @@ -84,30 +82,35 @@ def turnover_frequency(self, nbatch=20, confidence=0.99, ignore_nbatch=1, update else: output = [] - for pos,idx in enumerate(self.job._indices): + for pos, idx in enumerate(self.job._indices): params = self.job._parameters_values[idx] - if pos==0 and isinstance(self.job.children[idx],ZacrosSteadyStateJob): + if pos == 0 and isinstance(self.job.children[idx], ZacrosSteadyStateJob): nbatch = self.job.children[idx].nbatch confidence = self.job.children[idx].confidence ignore_nbatch = self.job.children[idx].ignore_nbatch - TOFs,errors,ratio,converged = self.job.children[idx].results.turnover_frequency( nbatch=nbatch, - confidence=confidence, - ignore_nbatch=ignore_nbatch ) + TOFs, errors, ratio, converged = self.job.children[idx].results.turnover_frequency( + nbatch=nbatch, confidence=confidence, ignore_nbatch=ignore_nbatch + ) if update: - output[pos]['turnover_frequency'] = TOFs - output[pos]['turnover_frequency_error'] = errors - output[pos]['turnover_frequency_ratio'] = ratio - output[pos]['turnover_frequency_converged'] = converged + output[pos]["turnover_frequency"] = TOFs + output[pos]["turnover_frequency_error"] = errors + output[pos]["turnover_frequency_ratio"] = ratio + output[pos]["turnover_frequency_converged"] = converged else: - output.append( {**params, 'turnover_frequency':TOFs, 'turnover_frequency_error':errors, - 'turnover_frequency_converged':converged} ) + output.append( + { + **params, + "turnover_frequency": TOFs, + "turnover_frequency_error": errors, + "turnover_frequency_converged": converged, + } + ) return output - def average_coverage(self, last=5, update=None): """ Return a list with values related to the calculation of the average coverage for the adsorbed species. @@ -127,19 +130,19 @@ def average_coverage(self, last=5, update=None): else: output = [] - for pos,idx in enumerate(self.job._indices): + for pos, idx in enumerate(self.job._indices): params = self.job._parameters_values[idx] - acf = self.job.children[idx].results.average_coverage( last=last ) + acf = self.job.children[idx].results.average_coverage(last=last) if update: - output[pos]['average_coverage'] = acf + output[pos]["average_coverage"] = acf else: - output.append( {**params, 'average_coverage':acf} ) + output.append({**params, "average_coverage": acf}) return output -class ZacrosParametersScanJob( scm.plams.MultiJob ): +class ZacrosParametersScanJob(scm.plams.MultiJob): """ Creates a new ZacrosParametersScanJob object. This class is a job that is a container for other jobs, called children jobs and it is an extension of the `PLAMS.MultiJob <../../plams/components/jobs.html#multijobs>`_. Children are copies of a reference @@ -155,6 +158,7 @@ class Parameter(ParameterBase): """ Creates a new Parameter object specifically tailored for ZacrosParametersScanJob """ + def __init__(self, name_in_settings, kind, values): super().__init__(self, name_in_settings, kind, values) @@ -162,54 +166,65 @@ class Parameters(ParametersBase): """ Creates a new Parameters object specifically tailored for ZacrosParametersScanJob """ + def __init__(self, *args, **kwargs): super().__init__(self, *args, **kwargs) - _result_type = ZacrosParametersScanResults - def __init__(self, reference, parameters=None, **kwargs): scm.plams.MultiJob.__init__(self, children=OrderedDict(), **kwargs) self._indices = None self._parameters_values = None - if isinstance(reference,ZacrosJob): - self._indices,self._parameters_values,settings_list = parameters._generator( reference.settings, parameters ) - elif isinstance(reference,ZacrosSteadyStateJob): - self._indices,self._parameters_values,settings_list = parameters._generator( reference._reference.settings, parameters ) + if isinstance(reference, ZacrosJob): + self._indices, self._parameters_values, settings_list = parameters._generator( + reference.settings, parameters + ) + elif isinstance(reference, ZacrosSteadyStateJob): + self._indices, self._parameters_values, settings_list = parameters._generator( + reference._reference.settings, parameters + ) else: - msg = "\n### ERROR ### ZacrosParametersScanJob.__init__.\n" + msg = "\n### ERROR ### ZacrosParametersScanJob.__init__.\n" msg += " Parameter 'reference' should be a ZacrosJob or ZacrosSteadyStateJob object.\n" raise Exception(msg) - for i,(idx,settings_idx) in enumerate(settings_list.items()): + for i, (idx, settings_idx) in enumerate(settings_list.items()): - new_name = "ps_cond"+"%03d"%i + new_name = "ps_cond" + "%03d" % i - if isinstance(reference,ZacrosJob): + if isinstance(reference, ZacrosJob): - job = ZacrosJob( settings=settings_idx, lattice=reference.lattice, mechanism=reference.mechanism, \ - cluster_expansion=reference.cluster_expansion, initial_state=reference.initial_state, \ - restart=reference.restart, name=new_name ) + job = ZacrosJob( + settings=settings_idx, + lattice=reference.lattice, + mechanism=reference.mechanism, + cluster_expansion=reference.cluster_expansion, + initial_state=reference.initial_state, + restart=reference.restart, + name=new_name, + ) - elif isinstance(reference,ZacrosSteadyStateJob): + elif isinstance(reference, ZacrosSteadyStateJob): new_reference = copy.copy(reference._reference) new_reference.settings = settings_idx - job = ZacrosSteadyStateJob( settings=reference.settings, reference=new_reference, - parameters=reference._parameters, name=new_name ) - - self.children[ idx ] = job + job = ZacrosSteadyStateJob( + settings=reference.settings, + reference=new_reference, + parameters=reference._parameters, + name=new_name, + ) + self.children[idx] = job def check(self): return all([job.ok() for job in self.children.values()]) - @staticmethod - def zipGenerator( reference_settings, parameters ): + def zipGenerator(reference_settings, parameters): """ This function combines the values of the parameters one-to-one following the order as they were defined @@ -235,11 +250,10 @@ def zipGenerator( reference_settings, parameters ): 2: {'x_CO': 0.5, 'x_O2': 0.5} 3: {'x_CO': 0.75, 'x_O2': 0.25} """ - return ZacrosParametersScanJob.Parameters.zipGenerator( reference_settings, parameters ) - + return ZacrosParametersScanJob.Parameters.zipGenerator(reference_settings, parameters) @staticmethod - def meshgridGenerator( reference_settings, parameters ): + def meshgridGenerator(reference_settings, parameters): """ This function combines the values of the parameters creating an `n-`dimensional rectangular grid, being `n` the number of parameters. Meshgrid generator is inspired by ``numpy.meshgrid`` function. @@ -274,17 +288,17 @@ def meshgridGenerator( reference_settings, parameters ): """ independent_params = [] - for name,item in parameters.items(): + for name, item in parameters.items(): if item.kind == ZacrosParametersScanJob.Parameter.INDEPENDENT: - independent_params.append( item.values ) + independent_params.append(item.values) if len(item.values) == 0: - msg = "\n### ERROR ### ZacrosParametersScanJob.meshgridGenerator().\n" + msg = "\n### ERROR ### ZacrosParametersScanJob.meshgridGenerator().\n" msg += " All parameter in 'generator_parameters' should be lists with at least one element.\n" raise Exception(msg) - mesh = numpy.meshgrid( *independent_params, sparse=False ) + mesh = numpy.meshgrid(*independent_params, sparse=False) - indices = [ tuple(idx) for idx in numpy.ndindex(mesh[0].shape) ] + indices = [tuple(idx) for idx in numpy.ndindex(mesh[0].shape)] parameters_values = {} settings_list = {} @@ -292,19 +306,19 @@ def meshgridGenerator( reference_settings, parameters ): settings_idx = reference_settings.copy() params = {} - for i,(name,item) in enumerate(parameters.items()): + for i, (name, item) in enumerate(parameters.items()): if item.kind == ZacrosParametersScanJob.Parameter.INDEPENDENT: value = mesh[i][idx] - eval('settings_idx'+item.name2setitem().replace('$var_value',str(value))) + eval("settings_idx" + item.name2setitem().replace("$var_value", str(value))) params[name] = value - for i,(name,item) in enumerate(parameters.items()): + for i, (name, item) in enumerate(parameters.items()): if item.kind == ZacrosParametersScanJob.Parameter.DEPENDENT: value = item.values(params) - eval('settings_idx'+item.name2setitem().replace('$var_value',str(value))) + eval("settings_idx" + item.name2setitem().replace("$var_value", str(value))) params[name] = value parameters_values[idx] = params settings_list[idx] = settings_idx - return indices,parameters_values,settings_list + return indices, parameters_values, settings_list diff --git a/core/ZacrosResults.py b/core/ZacrosResults.py index 85decc5..75385c7 100644 --- a/core/ZacrosResults.py +++ b/core/ZacrosResults.py @@ -15,56 +15,60 @@ from .LatticeState import * from .Settings import * -__all__ = ['ZacrosResults'] +__all__ = ["ZacrosResults"] -class ZacrosResults( scm.plams.Results ): + +class ZacrosResults(scm.plams.Results): """ A Class for handling Zacros Results. """ _filenames = { - 'general': 'general_output.txt', - 'history': 'history_output.txt', - 'lattice': 'lattice_output.txt', - 'procstat': 'procstat_output.txt', - 'specnum': 'specnum_output.txt', - 'restart': 'restart.inf', - 'err': 'std.err', - 'out': 'std.out'} - + "general": "general_output.txt", + "history": "history_output.txt", + "lattice": "lattice_output.txt", + "procstat": "procstat_output.txt", + "specnum": "specnum_output.txt", + "restart": "restart.inf", + "err": "std.err", + "out": "std.out", + } def get_zacros_version(self): """ Returns the zacros's version from the 'general_output.txt' file. """ - if( self.job.restart is None ): - lines = self.grep_file(self._filenames['general'], pattern='ZACROS') + if self.job.restart is None: + lines = self.grep_file(self._filenames["general"], pattern="ZACROS") - if( len(lines) > 0 ): + if len(lines) > 0: zversion = lines[0].split()[2] else: - lines = self.grep_file(self._filenames['restart'], pattern='Version') - zversion = float(lines[0].split()[1])/1e5 + lines = self.grep_file(self._filenames["restart"], pattern="Version") + zversion = float(lines[0].split()[1]) / 1e5 else: - lines = self.grep_file(self._filenames['restart'], pattern='Version') - zversion = float(lines[0].split()[1])/1e5 + lines = self.grep_file(self._filenames["restart"], pattern="Version") + zversion = float(lines[0].split()[1]) / 1e5 return float(zversion) - def get_reaction_network(self): """ Returns the reactions from the 'general_output.txt' file. """ - lines = self.get_file_chunk(self._filenames['general'], begin="Reaction network:", end="Finished reading mechanism input.") + lines = self.get_file_chunk( + self._filenames["general"], begin="Reaction network:", end="Finished reading mechanism input." + ) reaction_network = {} for line in lines: - if( not line.strip() or line.find("A(Tini)") == -1 ): continue - reaction_network[ line.split()[1].replace(':','') ] = line[line.find("Reaction:")+len("Reaction:"):].strip().replace(' ',' ') + if not line.strip() or line.find("A(Tini)") == -1: + continue + reaction_network[line.split()[1].replace(":", "")] = ( + line[line.find("Reaction:") + len("Reaction:") :].strip().replace(" ", " ") + ) return reaction_network - def provided_quantities_names(self): """ Returns the provided quantities headers from the ``specnum_output.txt`` file in a list. @@ -85,15 +89,14 @@ def provided_quantities_names(self): [ 'Entry', 'Nevents', 'Time', 'Temperature', 'Energy', 'O*', 'CO*', 'O2', 'CO', 'CO2' ] """ quantities = None - if( self.job.restart is None ): - lines = self.awk_file(self._filenames['specnum'],script='(NR==1){print $0}') + if self.job.restart is None: + lines = self.awk_file(self._filenames["specnum"], script="(NR==1){print $0}") names = lines[0].split() else: names = self.job.restart.results.provided_quantities_names() return names - def provided_quantities(self): """ Returns the provided quantities from the ``specnum_output.txt`` file in a form of a dictionary. @@ -127,7 +130,7 @@ def provided_quantities(self): quantities = None names = None - if( self.job.restart is None ): + if self.job.restart is None: names = self.provided_quantities_names() quantities = {} @@ -137,146 +140,147 @@ def provided_quantities(self): quantities = self.job.restart.results.provided_quantities() names = list(quantities.keys()) - if( self.job.restart is None ): - lines = self.awk_file(self._filenames['specnum'],script='(NR>1){print $0}') + if self.job.restart is None: + lines = self.awk_file(self._filenames["specnum"], script="(NR>1){print $0}") else: - lines = self.awk_file(self._filenames['specnum'],script='{print $0}') + lines = self.awk_file(self._filenames["specnum"], script="{print $0}") for line in lines: - for i,token in enumerate(line.split()): + for i, token in enumerate(line.split()): # Specific conversion rules cases = { - "Time" : lambda sv: float(sv), - "Temperature" : lambda sv: float(sv), - "Energy" : lambda sv: float(sv) + "Time": lambda sv: float(sv), + "Temperature": lambda sv: float(sv), + "Energy": lambda sv: float(sv), } # Notice that by default values are considered integers - value = cases.get( names[i], lambda sv: int(sv) )( token.strip() ) - quantities[ names[i] ].append( value ) + value = cases.get(names[i], lambda sv: int(sv))(token.strip()) + quantities[names[i]].append(value) return quantities - def number_of_lattice_sites(self): """ Returns the number of lattice sites from the 'general_output.txt' file. """ zversion = self.get_zacros_version() - if( self.job.restart is not None ): + if self.job.restart is not None: nsites = self.job.restart.results.number_of_lattice_sites() else: - if( zversion >= 2.0 and zversion < 3.0 ): - lines = self.grep_file(self._filenames['general'], pattern='Number of lattice sites:') - nsites = lines[0][ lines[0].find('Number of lattice sites:')+len("Number of lattice sites:"): ] - elif( zversion >= 3.0 ): - lines = self.grep_file(self._filenames['general'], pattern='Total number of lattice sites:') - nsites = lines[0][ lines[0].find('Total number of lattice sites:')+len("Total number of lattice sites:"): ] + if zversion >= 2.0 and zversion < 3.0: + lines = self.grep_file(self._filenames["general"], pattern="Number of lattice sites:") + nsites = lines[0][lines[0].find("Number of lattice sites:") + len("Number of lattice sites:") :] + elif zversion >= 3.0: + lines = self.grep_file(self._filenames["general"], pattern="Total number of lattice sites:") + nsites = lines[0][ + lines[0].find("Total number of lattice sites:") + len("Total number of lattice sites:") : + ] else: - raise Exception( "Error: Zacros version "+str(zversion)+" not supported!" ) + raise Exception("Error: Zacros version " + str(zversion) + " not supported!") nsites = int(nsites) return nsites - def gas_species_names(self): """ Returns the gas species names from the 'general_output.txt' file. """ output = [] - lines = self.grep_file(self._filenames['general'], pattern='Gas species names:') + lines = self.grep_file(self._filenames["general"], pattern="Gas species names:") - if( len(lines) != 0 ): - output = lines[0][ lines[0].find('Gas species names:')+len("Gas species names:"): ].split() + if len(lines) != 0: + output = lines[0][lines[0].find("Gas species names:") + len("Gas species names:") :].split() - if( self.job.restart is not None ): - output.extend( self.job.restart.results.gas_species_names() ) + if self.job.restart is not None: + output.extend(self.job.restart.results.gas_species_names()) return output - def surface_species_names(self): """ Returns the surface species names from the 'general_output.txt' file. """ output = [] - lines = self.grep_file(self._filenames['general'], pattern='Surface species names:') + lines = self.grep_file(self._filenames["general"], pattern="Surface species names:") - if( len(lines) != 0 ): - return lines[0][ lines[0].find('Surface species names:')+len("Surface species names:"): ].split() + if len(lines) != 0: + return lines[0][lines[0].find("Surface species names:") + len("Surface species names:") :].split() - if( self.job.restart is not None ): - output.extend( self.job.restart.results.surface_species_names() ) + if self.job.restart is not None: + output.extend(self.job.restart.results.surface_species_names()) return output - def site_type_names(self): """ Returns the site types from the 'general_output.txt' file. """ zversion = self.get_zacros_version() - if( zversion >= 2.0 and zversion < 3.0 ): - lines = self.get_file_chunk(self._filenames['general'], begin="Site type names and number of sites of that type:", - end='Maximum coordination number:') - elif( zversion >= 3.0 ): - lines = self.get_file_chunk(self._filenames['general'], begin="Site type names and total number of sites of that type:", - end='Maximum coordination number:') + if zversion >= 2.0 and zversion < 3.0: + lines = self.get_file_chunk( + self._filenames["general"], + begin="Site type names and number of sites of that type:", + end="Maximum coordination number:", + ) + elif zversion >= 3.0: + lines = self.get_file_chunk( + self._filenames["general"], + begin="Site type names and total number of sites of that type:", + end="Maximum coordination number:", + ) else: - raise Exception( "Error: Zacros version "+str(zversion)+" not supported!" ) + raise Exception("Error: Zacros version " + str(zversion) + " not supported!") site_types = [] for line in lines: - if( not line.strip() ): continue - site_types.append( line.split()[0] ) + if not line.strip(): + continue + site_types.append(line.split()[0]) return site_types - def number_of_snapshots(self): """ Returns the number of configurations from the 'history_output.txt' file. """ - lines = self.grep_file(self._filenames['history'], pattern='configuration') + lines = self.grep_file(self._filenames["history"], pattern="configuration") nconf = len(lines) - if( self.job.restart is not None ): + if self.job.restart is not None: nconf = self.job.restart.results.number_of_snapshots() + nconf return nconf - def number_of_process_statistics(self): """ Returns the number of process statistics from the 'procstat_output.txt' file. """ - lines = self.grep_file(self._filenames['procstat'], pattern='configuration') + lines = self.grep_file(self._filenames["procstat"], pattern="configuration") nconf = len(lines) - if( self.job.restart is not None ): + if self.job.restart is not None: nconf = self.job.restart.results.number_of_process_statistics() + nconf return nconf - def elementary_steps_names(self): """ Returns the names of elementary steps from the 'procstat_output.txt' file. """ - if( self.job.restart is None ): - lines = self.grep_file(self._filenames['procstat'], pattern='Overall') - names = lines[0][ lines[0].find('Overall')+len("Overall"): ].split() + if self.job.restart is None: + lines = self.grep_file(self._filenames["procstat"], pattern="Overall") + names = lines[0][lines[0].find("Overall") + len("Overall") :].split() else: names = self.job.restart.results.elementary_steps_names() return names - def lattice_states(self, last=None): """ Returns the configurations from the 'history_output.txt' file. @@ -293,43 +297,52 @@ def lattice_states(self, last=None): number_of_snapshots_to_load = total_number_of_snapshots llast = number_of_snapshots_to_load - if( last is not None ): llast = last + if last is not None: + llast = last - if( self.job.restart is not None ): + if self.job.restart is not None: prev_total_number_of_snapshots = self.job.restart.results.number_of_snapshots() - number_of_snapshots_to_load = total_number_of_snapshots-prev_total_number_of_snapshots + number_of_snapshots_to_load = total_number_of_snapshots - prev_total_number_of_snapshots - if( number_of_snapshots_to_load-llast < 0 ): - if( self.job.restart is not None ): - output = self.job.restart.results.lattice_states( last=abs(number_of_snapshots_to_load-llast) ) + if number_of_snapshots_to_load - llast < 0: + if self.job.restart is not None: + output = self.job.restart.results.lattice_states(last=abs(number_of_snapshots_to_load - llast)) else: - raise Exception("\n### ERROR ### Trying to load more snapshots ("+str(llast)+") than available ("+str(total_number_of_snapshots)+")") - - surface_species = len(surface_species_names)*[None] - for i,sname in enumerate(surface_species_names): + raise Exception( + "\n### ERROR ### Trying to load more snapshots (" + + str(llast) + + ") than available (" + + str(total_number_of_snapshots) + + ")" + ) + + surface_species = len(surface_species_names) * [None] + for i, sname in enumerate(surface_species_names): for sp in self.job.mechanism.surface_species(): - if( sname == sp.symbol ): + if sname == sp.symbol: surface_species[i] = sp - if( surface_species[i] is None ): + if surface_species[i] is None: for sp in self.job.cluster_expansion.surface_species(): - if( sname == sp.symbol ): + if sname == sp.symbol: surface_species[i] = sp - surface_species = SpeciesList( surface_species ) + surface_species = SpeciesList(surface_species) - lines = self.grep_file(self._filenames['history'], pattern='configuration', options="-A"+str(number_of_lattice_sites)) + lines = self.grep_file( + self._filenames["history"], pattern="configuration", options="-A" + str(number_of_lattice_sites) + ) lines = [line for line in lines if line != "--"] - for nconf in range(max(0,number_of_snapshots_to_load-llast),number_of_snapshots_to_load): - start = nconf*(number_of_lattice_sites+1) - end = (nconf+1)*(number_of_lattice_sites+1) + for nconf in range(max(0, number_of_snapshots_to_load - llast), number_of_snapshots_to_load): + start = nconf * (number_of_lattice_sites + 1) + end = (nconf + 1) * (number_of_lattice_sites + 1) conf_lines = lines[start:end] lattice_state = None - lattice_state_buffer = {} # key=adsorbate_number - for nline,line in enumerate(conf_lines): + lattice_state_buffer = {} # key=adsorbate_number + for nline, line in enumerate(conf_lines): tokens = line.split() - if( nline==0 ): + if nline == 0: assert tokens[0] == "configuration" configuration_number = int(tokens[1]) @@ -338,46 +351,48 @@ def lattice_states(self, last=None): temperature = float(tokens[4]) energy = float(tokens[5]) - add_info = {"number_of_events":number_of_events, "time":time, - "temperature":temperature, "energy":energy} - lattice_state = LatticeState( self.job.lattice, surface_species, add_info=add_info ) + add_info = { + "number_of_events": number_of_events, + "time": time, + "temperature": temperature, + "energy": energy, + } + lattice_state = LatticeState(self.job.lattice, surface_species, add_info=add_info) else: - site_number = int(tokens[0])-1 # Zacros uses arrays indexed from 1 + site_number = int(tokens[0]) - 1 # Zacros uses arrays indexed from 1 adsorbate_number = int(tokens[1]) - species_number = int(tokens[2])-1 # Zacros uses arrays indexed from 1 + species_number = int(tokens[2]) - 1 # Zacros uses arrays indexed from 1 dentation = int(tokens[3]) - if( species_number > -1 ): # In pyzacros -1 means empty site (0 for Zacros) - if( adsorbate_number not in lattice_state_buffer ): - lattice_state_buffer[adsorbate_number] = [ [site_number], species_number, dentation ] + if species_number > -1: # In pyzacros -1 means empty site (0 for Zacros) + if adsorbate_number not in lattice_state_buffer: + lattice_state_buffer[adsorbate_number] = [[site_number], species_number, dentation] else: - lattice_state_buffer[adsorbate_number][0].append( site_number ) - if( dentation > lattice_state_buffer[adsorbate_number][2] ): + lattice_state_buffer[adsorbate_number][0].append(site_number) + if dentation > lattice_state_buffer[adsorbate_number][2]: lattice_state_buffer[adsorbate_number][2] = dentation - for key,item in lattice_state_buffer.items(): - if( len(item[0]) != item[2] ): - msg = "Format error reading lattice state. Species' dentation is not compatible with the number of associated binding sites.\n" - msg += ">> adsorbate_number="+str(key)+", site_number="+str(site_number)+"\n" - msg += ">> species="+str(surface_species[item[1]])+", dentation="+str(dentation)+"\n" - raise Exception( msg ) - lattice_state.fill_site( item[0], surface_species[item[1]], update_species_numbers=False ) + for key, item in lattice_state_buffer.items(): + if len(item[0]) != item[2]: + msg = "Format error reading lattice state. Species' dentation is not compatible with the number of associated binding sites.\n" + msg += ">> adsorbate_number=" + str(key) + ", site_number=" + str(site_number) + "\n" + msg += ">> species=" + str(surface_species[item[1]]) + ", dentation=" + str(dentation) + "\n" + raise Exception(msg) + lattice_state.fill_site(item[0], surface_species[item[1]], update_species_numbers=False) - if( lattice_state is not None ): + if lattice_state is not None: lattice_state._updateSpeciesNumbers() - output.append( lattice_state ) + output.append(lattice_state) return output - def last_lattice_state(self): """ Returns the last configuration from the 'history_output.txt' file. """ return self.lattice_states(last=1)[0] - def average_coverage(self, last=5): """ Returns a dictionary with the average coverage fractions using the last ``last`` lattice states, e.g., ``{ "CO*":0.32, "O*":0.45 }`` @@ -389,28 +404,28 @@ def average_coverage(self, last=5): for sspecies in surface_species_names: acf[sspecies] = 0.0 - #for lattice_state in self.lattice_states(last=last): - #fractions = lattice_state.coverage_fractions() + # for lattice_state in self.lattice_states(last=last): + # fractions = lattice_state.coverage_fractions() - #for sspecies in surface_species_names: - #acf[sspecies] += fractions[sspecies]/last + # for sspecies in surface_species_names: + # acf[sspecies] += fractions[sspecies]/last provided_quantities = self.provided_quantities() - n_items = len(provided_quantities['Entry']) - nmol_total = n_items*[0] + n_items = len(provided_quantities["Entry"]) + nmol_total = n_items * [0] for i in reversed(range(n_items)): - if i==n_items-last-1: break + if i == n_items - last - 1: + break for sspecies in surface_species_names: acf[sspecies] += provided_quantities[sspecies][i] for sspecies in surface_species_names: - acf[sspecies] /= self.job.lattice.number_of_sites()*last + acf[sspecies] /= self.job.lattice.number_of_sites() * last return acf - def molecule_numbers(self, species_name, normalize_per_site=False): """ The key 'Time' is included by default and includes the time in seconds. @@ -427,17 +442,16 @@ def molecule_numbers(self, species_name, normalize_per_site=False): provided_quantities = self.provided_quantities() - data['Time'] = numpy.array(provided_quantities['Time']) + data["Time"] = numpy.array(provided_quantities["Time"]) for spn in species_name: - if( normalize_per_site ): - data[spn] = numpy.array(provided_quantities[spn])/self.number_of_lattice_sites() + if normalize_per_site: + data[spn] = numpy.array(provided_quantities[spn]) / self.number_of_lattice_sites() else: data[spn] = numpy.array(provided_quantities[spn]) return data - def plot_lattice_states(self, data, pause=-1, show=True, ax=None, close=False, time_perframe=0.5, file_name=None): """ Uses Matplotlib to create an animation of the lattice states. @@ -450,39 +464,47 @@ def plot_lattice_states(self, data, pause=-1, show=True, ax=None, close=False, t * ``time_perframe`` -- Sets the time interval between frames in seconds. * ``file_name`` -- Saves the figures to the file ``file_name-`` (the corresponding id on the list replaces the ````). The format is inferred from the extension, and by default, ``.png`` is used. """ - if( type(data) == LatticeState ): - data.plot( show=show, pause=pause, ax=ax, close=close, file_name=file_name ) - if( type(data) == list ): + if type(data) == LatticeState: + data.plot(show=show, pause=pause, ax=ax, close=close, file_name=file_name) + if type(data) == list: try: import matplotlib.pyplot as plt except ImportError as e: - return # module doesn't exist, deal with it. + return # module doesn't exist, deal with it. - if( ax is None ): - fig,ax = plt.subplots() + if ax is None: + fig, ax = plt.subplots() plt.rcParams["figure.autolayout"] = True - for i,ls in enumerate(data): + for i, ls in enumerate(data): ifile_name = None - if( file_name is not None ): - prefix,ext = os.path.splitext(file_name) - ifile_name = prefix+"-"+"%05d"%i+ext + if file_name is not None: + prefix, ext = os.path.splitext(file_name) + ifile_name = prefix + "-" + "%05d" % i + ext ax.cla() - ls.plot( show=show, pause=time_perframe, ax=ax, close=False, file_name=ifile_name ) + ls.plot(show=show, pause=time_perframe, ax=ax, close=False, file_name=ifile_name) - if( show ): - if( pause == -1 ): + if show: + if pause == -1: plt.show() else: - plt.pause( pause ) + plt.pause(pause) - if( close ): + if close: plt.close("all") - - def plot_molecule_numbers(self, species_name, pause=-1, show=True, ax=None, close=False, - file_name=None, normalize_per_site=False, derivative=False): + def plot_molecule_numbers( + self, + species_name, + pause=-1, + show=True, + ax=None, + close=False, + file_name=None, + normalize_per_site=False, + derivative=False, + ): """ uses Matplotlib to create an animation of the Molecule Numbers. @@ -498,56 +520,55 @@ def plot_molecule_numbers(self, species_name, pause=-1, show=True, ax=None, clos try: import matplotlib.pyplot as plt except ImportError as e: - return # module doesn't exist, deal with it. + return # module doesn't exist, deal with it. - if( ax is None ): - fig,ax = plt.subplots() + if ax is None: + fig, ax = plt.subplots() plt.rcParams["figure.autolayout"] = True provided_quantities = self.provided_quantities() - COLORS = ['r', 'g', 'b', 'm'] + COLORS = ["r", "g", "b", "m"] - ax.set_xlabel('t (s)') + ax.set_xlabel("t (s)") - if( normalize_per_site ): - if( derivative ): - ax.set_ylabel('Derivative of the Molecule Numbers per Site') + if normalize_per_site: + if derivative: + ax.set_ylabel("Derivative of the Molecule Numbers per Site") else: - ax.set_ylabel('Molecule Numbers per Site') + ax.set_ylabel("Molecule Numbers per Site") else: - if( derivative ): - ax.set_ylabel('Derivative of the Molecule Numbers') + if derivative: + ax.set_ylabel("Derivative of the Molecule Numbers") else: - ax.set_ylabel('Molecule Numbers') + ax.set_ylabel("Molecule Numbers") x = provided_quantities["Time"] - for i,spn in enumerate(species_name): - if( normalize_per_site ): - y = numpy.array(provided_quantities[spn])/self.number_of_lattice_sites() + for i, spn in enumerate(species_name): + if normalize_per_site: + y = numpy.array(provided_quantities[spn]) / self.number_of_lattice_sites() else: y = numpy.array(provided_quantities[spn]) - if( derivative ): + if derivative: y = numpy.gradient(y, x) - ax.step( x, y, where='post', color=COLORS[i], label=spn) + ax.step(x, y, where="post", color=COLORS[i], label=spn) - ax.legend(loc='best') + ax.legend(loc="best") - if( file_name is not None ): - plt.savefig( file_name ) + if file_name is not None: + plt.savefig(file_name) - if( show ): - if( pause == -1 ): + if show: + if pause == -1: plt.show() else: - plt.pause( pause ) + plt.pause(pause) - if( close ): + if close: plt.close("all") - def get_process_statistics(self): """ Returns the statistics from the 'procstat_output.txt' file in a form of a list of dictionaries. @@ -617,25 +638,25 @@ def get_process_statistics(self): prev_number_of_process_statistics = 0 number_of_process_statistics = self.number_of_process_statistics() - if( self.job.restart is None ): - lines = self.grep_file(self._filenames['procstat'], pattern='Overall') - elementary_steps_names = lines[0][ lines[0].find('Overall')+len("Overall"): ].split() + if self.job.restart is None: + lines = self.grep_file(self._filenames["procstat"], pattern="Overall") + elementary_steps_names = lines[0][lines[0].find("Overall") + len("Overall") :].split() else: prev_number_of_process_statistics = self.job.restart.results.number_of_process_statistics() elementary_steps_names = self.job.restart.results.elementary_steps_names() output = self.job.restart.results.get_process_statistics() - all_lines = self.grep_file(self._filenames['procstat'], pattern='configuration', options="-A2") + all_lines = self.grep_file(self._filenames["procstat"], pattern="configuration", options="-A2") all_lines = [line for line in all_lines if line != "--"] - for nconf in range(number_of_process_statistics-prev_number_of_process_statistics): - lines = all_lines[nconf*3:(nconf+1)*3] + for nconf in range(number_of_process_statistics - prev_number_of_process_statistics): + lines = all_lines[nconf * 3 : (nconf + 1) * 3] procstat_state = {} pos = 0 - for nline,line in enumerate(lines): + for nline, line in enumerate(lines): tokens = line.split() - if( nline==0 ): + if nline == 0: assert tokens[0] == "configuration" configuration_number = int(tokens[1]) @@ -646,25 +667,25 @@ def get_process_statistics(self): procstat_state["total_number_of_events"] = total_number_of_events procstat_state["time"] = time - elif( nline==1 ): - assert len(tokens)-1 == len(elementary_steps_names) + elif nline == 1: + assert len(tokens) - 1 == len(elementary_steps_names) average_waiting_time = {} - for i,k in enumerate(elementary_steps_names): - average_waiting_time[k] = float(tokens[i+1]) + for i, k in enumerate(elementary_steps_names): + average_waiting_time[k] = float(tokens[i + 1]) procstat_state["average_waiting_time"] = average_waiting_time - elif( nline==2 ): - assert len(tokens)-1 == len(elementary_steps_names) + elif nline == 2: + assert len(tokens) - 1 == len(elementary_steps_names) number_of_events = {} occurence_frequency = {} - for i,k in enumerate(elementary_steps_names): - number_of_events[k] = int(tokens[i+1]) + for i, k in enumerate(elementary_steps_names): + number_of_events[k] = int(tokens[i + 1]) - if( procstat_state["time"] > 0.0 ): - occurence_frequency[k] = number_of_events[k]/procstat_state["time"] + if procstat_state["time"] > 0.0: + occurence_frequency[k] = number_of_events[k] / procstat_state["time"] else: occurence_frequency[k] = 0.0 @@ -672,94 +693,97 @@ def get_process_statistics(self): procstat_state["occurence_frequency"] = occurence_frequency else: - raise Exception( "Error: Wrong format in file specnum_output.txt" ) + raise Exception("Error: Wrong format in file specnum_output.txt") pos += 1 - output.append( procstat_state ) - + output.append(procstat_state) return output - - def __plot_process_statistics(self, data, key, log_scale=False, pause=-1, show=True, ax=None, close=False, xmax=None, file_name=None): + def __plot_process_statistics( + self, data, key, log_scale=False, pause=-1, show=True, ax=None, close=False, xmax=None, file_name=None + ): """ Plots data as a histogram """ try: import matplotlib.pyplot as plt except ImportError as e: - return # module doesn't exist, deal with it. + return # module doesn't exist, deal with it. - if( ax is None ): - fig,ax = plt.subplots() + if ax is None: + fig, ax = plt.subplots() plt.rcParams["figure.autolayout"] = True provided_quantities = self.provided_quantities() - ax.set_title(r't $\in$ [0.0,{:.3g}] s'.format(data["time"])) + ax.set_title(r"t $\in$ [0.0,{:.3g}] s".format(data["time"])) ax.set_xlabel(key) keys = list(data[key].keys()) idkeys_sorted = sorted(enumerate(keys), key=lambda x: x[1]) - keys = [ k for i,k in idkeys_sorted ] - data_sorted = [ list(data[key].values())[i] for i,k in idkeys_sorted ] + keys = [k for i, k in idkeys_sorted] + data_sorted = [list(data[key].values())[i] for i, k in idkeys_sorted] - y_pos = len(keys)*[None] + y_pos = len(keys) * [None] j = 0 for i in range(len(keys)): - if( i==0 ): + if i == 0: y_pos[i] = j else: - if( keys[i].replace('_fwd','').replace('_rev','') != keys[i-1].replace('_fwd','').replace('_rev','') ): + if keys[i].replace("_fwd", "").replace("_rev", "") != keys[i - 1].replace("_fwd", "").replace( + "_rev", "" + ): j += 1 y_pos[i] = j - COLORS = ['r', 'b', 'g', 'm'] - color = len(y_pos)*[COLORS[0]] + COLORS = ["r", "b", "g", "m"] + color = len(y_pos) * [COLORS[0]] j = 0 - for i in range(1,len(y_pos)): - if( y_pos[i] == y_pos[i-1] ): + for i in range(1, len(y_pos)): + if y_pos[i] == y_pos[i - 1]: y_pos[i] += 0.15 - y_pos[i-1] -= 0.15 + y_pos[i - 1] -= 0.15 color[i] = COLORS[j] - color[i-1] = COLORS[j+1] + color[i - 1] = COLORS[j + 1] - ax.barh(y_pos, data_sorted, align='center', height=0.25, color=color) + ax.barh(y_pos, data_sorted, align="center", height=0.25, color=color) maxval = max(data_sorted) - if( xmax is not None ): + if xmax is not None: maxval = xmax - if( log_scale ): - ax.set_xlim((1e0,1.2*maxval)) + if log_scale: + ax.set_xlim((1e0, 1.2 * maxval)) plt.xscale("log") else: - ax.set_xlim((0,1.05*maxval)) + ax.set_xlim((0, 1.05 * maxval)) ax.invert_yaxis() # labels read top-to-bottom ax.set_yticks(y_pos) ax.set_yticklabels(keys) plt.tight_layout() - if( file_name is not None ): - plt.savefig( file_name ) + if file_name is not None: + plt.savefig(file_name) - if( show ): - if( pause == -1 ): + if show: + if pause == -1: plt.show() else: - plt.pause( pause ) + plt.pause(pause) - if( close ): + if close: plt.close("all") - - def plot_process_statistics(self, data, key, log_scale=False, pause=-1, show=True, ax=None, close=False, file_name=None): + def plot_process_statistics( + self, data, key, log_scale=False, pause=-1, show=True, ax=None, close=False, file_name=None + ): """ Uses Matplotlib to create an animation of the process statistics. @@ -772,44 +796,53 @@ def plot_process_statistics(self, data, key, log_scale=False, pause=-1, show=Tru * ``close`` -- Closes the figure window after pause time. * ``file_name`` -- Saves the figures to the file ``file_name-`` (the corresponding id on the list replaces the ````). The format is inferred from the extension, and by default, ``.png`` is used. """ - if( type(data) == dict ): - self.__plot_process_statistics( data, key, log_scale=log_scale, pause=pause, show=show, - close=close, file_name=file_name ) - if( type(data) == list ): + if type(data) == dict: + self.__plot_process_statistics( + data, key, log_scale=log_scale, pause=pause, show=show, close=close, file_name=file_name + ) + if type(data) == list: try: import matplotlib.pyplot as plt except ImportError as e: - return # module doesn't exist, deal with it. + return # module doesn't exist, deal with it. - if( ax is None ): - fig,ax = plt.subplots() + if ax is None: + fig, ax = plt.subplots() maxval = -1e8 for idata in data: - if( max(idata[key].values()) > maxval ): + if max(idata[key].values()) > maxval: maxval = max(idata[key].values()) - for i,idata in enumerate(data): + for i, idata in enumerate(data): ifile_name = None - if( file_name is not None ): - prefix,ext = os.path.splitext(file_name) - ifile_name = prefix+"-"+"%05d"%i+ext + if file_name is not None: + prefix, ext = os.path.splitext(file_name) + ifile_name = prefix + "-" + "%05d" % i + ext ax.cla() - self.__plot_process_statistics( idata, key, log_scale=log_scale, pause=0.5, show=show, ax=ax, - close=False, xmax=maxval, file_name=ifile_name ) - - if( show ): - if( pause == -1 ): + self.__plot_process_statistics( + idata, + key, + log_scale=log_scale, + pause=0.5, + show=show, + ax=ax, + close=False, + xmax=maxval, + file_name=ifile_name, + ) + + if show: + if pause == -1: plt.show() else: - plt.pause( pause ) + plt.pause(pause) - if( close ): + if close: plt.close("all") - - #--------------------------------------------------------------------- + # --------------------------------------------------------------------- # Function to compute the rate of production using the # Batch-Means-Stopping method. # Original author: Mauro Bracconi (mauro.bracconi@polimi.it) @@ -819,43 +852,44 @@ def plot_process_statistics(self, data, key, log_scale=False, pause=-1, show=Tru # reaction networks # J.Chem. Phys. 144, 074104 (2016) # https://doi.org/10.1063/1.4942008 - #--------------------------------------------------------------------- + # --------------------------------------------------------------------- @staticmethod - def __compute_rate( t_vect, spec, n_sites, n_batch=20, confidence=0.99, ignore_nbatch=1 ): + def __compute_rate(t_vect, spec, n_sites, n_batch=20, confidence=0.99, ignore_nbatch=1): # Batch means stopping implementation t_vect = numpy.array(t_vect) - prod_mol = numpy.array(spec)/n_sites + prod_mol = numpy.array(spec) / n_sites # Define batch length - lt = int(len(t_vect)/n_batch) + lt = int(len(t_vect) / n_batch) # Compute TOF in each batch ratet = numpy.empty(n_batch) for i in range(n_batch): - if ( i != n_batch-1 ) : - ratet[i] = numpy.polyfit(t_vect[lt*i:lt*(i+1)],prod_mol[lt*i:lt*(i+1)],1)[0] + if i != n_batch - 1: + ratet[i] = numpy.polyfit(t_vect[lt * i : lt * (i + 1)], prod_mol[lt * i : lt * (i + 1)], 1)[0] else: - ratet[i] = numpy.polyfit(t_vect[lt*i:-1],prod_mol[lt*i:-1],1)[0] + ratet[i] = numpy.polyfit(t_vect[lt * i : -1], prod_mol[lt * i : -1], 1)[0] # Exclude first ``ignore_nbatch`` elements rate = ratet[ignore_nbatch:] # Compute average and CI rate_av, se = numpy.mean(rate), scipy.stats.sem(rate) - rate_CI = se * scipy.stats.t._ppf( (1.0+confidence)/2.0, len(rate) - 1.0 ) - ratio = numpy.abs(rate_CI)/(numpy.abs(rate_av)+1e-8) + rate_CI = se * scipy.stats.t._ppf((1.0 + confidence) / 2.0, len(rate) - 1.0) + ratio = numpy.abs(rate_CI) / (numpy.abs(rate_av) + 1e-8) - if ratio<1.0-confidence: - return ( rate_av,rate_CI,ratio, True ) + if ratio < 1.0 - confidence: + return (rate_av, rate_CI, ratio, True) else: - if abs(rate_av) < 1.0/n_sites: - return ( rate[-1],rate_CI,0.0, True ) + if abs(rate_av) < 1.0 / n_sites: + return (rate[-1], rate_CI, 0.0, True) else: - return ( rate_av,rate_CI,ratio, False ) + return (rate_av, rate_CI, ratio, False) - - def turnover_frequency(self, nbatch=20, confidence=0.99, ignore_nbatch=1, species_name=None, provided_quantities=None): + def turnover_frequency( + self, nbatch=20, confidence=0.99, ignore_nbatch=1, species_name=None, provided_quantities=None + ): """ Returns the TOF (mol/sec/site) calculated by the batch-means stopping method. See Hashemi et al., J.Chem. Phys. 144, 074104 (2016) @@ -903,26 +937,31 @@ def turnover_frequency(self, nbatch=20, confidence=0.99, ignore_nbatch=1, specie converged[sn] = True if sum(numpy.abs(lprovided_quantities[sn])) > 0: - aver,ci,ratio,conv = ZacrosResults.__compute_rate( lprovided_quantities["Time"], lprovided_quantities[sn], - self.number_of_lattice_sites(), nbatch, confidence, ignore_nbatch ) + aver, ci, ratio, conv = ZacrosResults.__compute_rate( + lprovided_quantities["Time"], + lprovided_quantities[sn], + self.number_of_lattice_sites(), + nbatch, + confidence, + ignore_nbatch, + ) values[sn] = aver errors[sn] = ci ratios[sn] = ratio converged[sn] = conv if species_name is None: - return values,errors,ratios,converged + return values, errors, ratios, converged else: - return values[species_name],errors[species_name],ratios[species_name],converged[species_name] - + return values[species_name], errors[species_name], ratios[species_name], converged[species_name] @staticmethod - def _average_provided_quantities( provided_quantities_list, key_column_name, columns_name=None ): + def _average_provided_quantities(provided_quantities_list, key_column_name, columns_name=None): if len(provided_quantities_list) == 0: - msg = "### ERROR ### ZacrosResults._average_provided_quantities\n" + msg = "### ERROR ### ZacrosResults._average_provided_quantities\n" msg += ">> provided_quantities_list parameter should eb a list with at least one item\n" - raise Exception( msg ) + raise Exception(msg) nexp = len(provided_quantities_list) npoints = len(provided_quantities_list[0][key_column_name]) @@ -932,21 +971,26 @@ def _average_provided_quantities( provided_quantities_list, key_column_name, col average = {} - average[key_column_name] = npoints*[0.0] + average[key_column_name] = npoints * [0.0] for name in columns_name: - average[name] = npoints*[0.0] + average[name] = npoints * [0.0] for i in range(npoints): average[key_column_name][i] = provided_quantities_list[0][key_column_name][i] for k in range(nexp): - if k>0 and provided_quantities_list[k][key_column_name][i] != provided_quantities_list[0][key_column_name][i]: - msg = "### ERROR ### ZacrosResults._average_provided_quantities\n" + if ( + k > 0 + and provided_quantities_list[k][key_column_name][i] + != provided_quantities_list[0][key_column_name][i] + ): + msg = "### ERROR ### ZacrosResults._average_provided_quantities\n" msg += ">> Reference column has different values for each item\n" - raise Exception( msg ) + raise Exception(msg) for name in columns_name: - if name == key_column_name: continue + if name == key_column_name: + continue eff_nexp = 0 for k in range(nexp): @@ -962,4 +1006,3 @@ def _average_provided_quantities( provided_quantities_list, key_column_name, col average[name][i] /= float(eff_nexp) return average - diff --git a/core/ZacrosSteadyStateJob.py b/core/ZacrosSteadyStateJob.py index f7a62e6..356435a 100644 --- a/core/ZacrosSteadyStateJob.py +++ b/core/ZacrosSteadyStateJob.py @@ -13,10 +13,10 @@ from .ZacrosResults import * from .ParametersBase import * -__all__ = ['ZacrosSteadyStateJob', 'ZacrosSteadyStateResults'] +__all__ = ["ZacrosSteadyStateJob", "ZacrosSteadyStateResults"] -class ZacrosSteadyStateResults( scm.plams.Results ): +class ZacrosSteadyStateResults(scm.plams.Results): """ A Class for handling ZacrosSteadyStateJob Results. """ @@ -44,21 +44,18 @@ def history(self, pos=None): else: return self.job._history - def niterations(self): """ Returns the current number of iterations executed """ return self.job.niterations - def nreplicas(self): """ Returns the number of replicas used """ return self.job.nreplicas - def children_results(self, iteration=None, replica=None): """ Returns a list of the children's results or the results for a specific iteration or replica if requested. @@ -67,108 +64,95 @@ def children_results(self, iteration=None, replica=None): output = [] for i in range(len(self.job.children)): - output.append( self.job.children[i].results ) + output.append(self.job.children[i].results) return output elif iteration is not None and replica is None: output = [] for j in range(len(self.job.nreplicas)): - output.append( self.job.children[iteration*self.job.nreplicas+j].results ) + output.append(self.job.children[iteration * self.job.nreplicas + j].results) return output elif iteration is not None and replica is not None: - return self.job.children[iteration*self.job.nreplicas+replica].results + return self.job.children[iteration * self.job.nreplicas + replica].results else: - msg = "\n### ERROR ### ZacrosSteadyStateResults.children_results().\n" + msg = "\n### ERROR ### ZacrosSteadyStateResults.children_results().\n" msg += " Wrong parameters combination.\n" raise Exception(msg) - def get_zacros_version(self): """ Returns the zacros's version from the 'general_output.txt' file. """ return self.job.children[-1].results.get_zacros_version() - def get_reaction_network(self): """ Returns the reactions from the 'general_output.txt' file associated to the last children. """ return self.job.children[-1].results.get_reaction_network() - def provided_quantities(self): """ Returns the provided quantities headers from the ``specnum_output.txt`` file in a list associated to the last children. """ return self.job.children[-1].results.provided_quantities() - def number_of_lattice_sites(self): """ Returns the number of lattice sites from the 'general_output.txt' file associated to the last children. """ return self.job.children[-1].results.number_of_lattice_sites() - def gas_species_names(self): """ Returns the gas species names from the 'general_output.txt' file associated to the last children. """ return self.job.children[-1].results.gas_species_names() - def surface_species_names(self): """ Returns the surface species names from the 'general_output.txt' file associated to the last children. """ return self.job.children[-1].results.surface_species_names() - def site_type_names(self): """ Returns the site types from the 'general_output.txt' file associated to the last children. """ return self.job.children[-1].results.site_type_names() - def number_of_snapshots(self): """ Returns the number of configurations from the 'history_output.txt' file associated to the last children. """ return self.job.children[-1].results.number_of_snapshots() - def number_of_process_statistics(self): """ Returns the number of process statistics from the 'procstat_output.txt' file associated to the last children. """ return self.job.children[-1].results.number_of_process_statistics() - def elementary_steps_names(self): """ Returns the names of elementary steps from the 'procstat_output.txt' file associated to the last children. """ return self.job.children[-1].results.elementary_steps_names() - def lattice_states(self, last=None): """ Returns the configurations from the 'history_output.txt' file associated to the last children. """ return self.job.children[-1].results.lattice_states(last=last) - def last_lattice_state(self): """ Returns the last configuration from the 'history_output.txt' file associated to the last children. """ return self.job.children[-1].results.last_lattice_state() - def average_coverage(self, last=5): """ Returns a dictionary with the average coverage fractions using the last ``last`` lattice states, e.g., ``{ "CO*":0.32, "O*":0.45 }``. @@ -178,19 +162,18 @@ def average_coverage(self, last=5): acf = {} for i in range(self.job.nreplicas): - prev = self.job.children[i-self.job.nreplicas] + prev = self.job.children[i - self.job.nreplicas] lacf = prev.results.average_coverage(last=last) - for k,v in lacf.items(): + for k, v in lacf.items(): if k not in acf: - acf[k] = v/self.job.nreplicas + acf[k] = v / self.job.nreplicas else: - acf[k] += v/self.job.nreplicas + acf[k] += v / self.job.nreplicas return acf - def plot_lattice_states(self, data, pause=-1, show=True, ax=None, close=False, time_perframe=0.5, file_name=None): """ Uses Matplotlib to create an animation of the lattice states associated to the last children. @@ -203,12 +186,21 @@ def plot_lattice_states(self, data, pause=-1, show=True, ax=None, close=False, t * ``time_perframe`` -- Sets the time interval between frames in seconds. * ``file_name`` -- Saves the figures to the file ``file_name-`` (the corresponding id on the list replaces the ````). The format is inferred from the extension, and by default, ``.png`` is used. """ - self.job.children[-1].results.plot_lattice_states(data=data, pause=pause, show=show, ax=ax, close=close, - time_perframe=time_perframe, file_name=file_name) - - - def plot_molecule_numbers(self, species_name, pause=-1, show=True, ax=None, close=False, - file_name=None, normalize_per_site=False, derivative=False): + self.job.children[-1].results.plot_lattice_states( + data=data, pause=pause, show=show, ax=ax, close=close, time_perframe=time_perframe, file_name=file_name + ) + + def plot_molecule_numbers( + self, + species_name, + pause=-1, + show=True, + ax=None, + close=False, + file_name=None, + normalize_per_site=False, + derivative=False, + ): """ uses Matplotlib to create an animation of the Molecule Numbers associated to the last children. @@ -221,9 +213,16 @@ def plot_molecule_numbers(self, species_name, pause=-1, show=True, ax=None, clos * ``normalize_per_site`` -- Divides the molecule numbers by the total number of sites in the lattice. * ``derivative`` -- Plots the first derivative. """ - self.job.children[-1].results.plot_molecule_numbers(species_name=species_name, pause=pause, show=show, ax=ax, close=close, - file_name=file_name, normalize_per_site=normalize_per_site, derivative=derivative) - + self.job.children[-1].results.plot_molecule_numbers( + species_name=species_name, + pause=pause, + show=show, + ax=ax, + close=close, + file_name=file_name, + normalize_per_site=normalize_per_site, + derivative=derivative, + ) def get_process_statistics(self): """ @@ -231,8 +230,9 @@ def get_process_statistics(self): """ return self.job.children[-1].results.get_process_statistics() - - def plot_process_statistics(self, data, key, log_scale=False, pause=-1, show=True, ax=None, close=False, file_name=None): + def plot_process_statistics( + self, data, key, log_scale=False, pause=-1, show=True, ax=None, close=False, file_name=None + ): """ Uses Matplotlib to create an animation of the process statistics associated to the last children. @@ -245,41 +245,47 @@ def plot_process_statistics(self, data, key, log_scale=False, pause=-1, show=Tru * ``close`` -- Closes the figure window after pause time. * ``file_name`` -- Saves the figures to the file ``file_name-`` (the corresponding id on the list replaces the ````). The format is inferred from the extension, and by default, ``.png`` is used. """ - self.job.children[-1].results.plot_process_statistics(data=data, key=key, log_scale=log_scale, pause=pause, show=show, ax=ax, - close=close, file_name=file_name) - + self.job.children[-1].results.plot_process_statistics( + data=data, key=key, log_scale=log_scale, pause=pause, show=show, ax=ax, close=close, file_name=file_name + ) def turnover_frequency(self, nbatch=None, confidence=None, ignore_nbatch=None, species_name=None): - if nbatch is None: nbatch = self.job.nbatch - if confidence is None: confidence = self.job.confidence - if ignore_nbatch is None: ignore_nbatch = self.job.ignore_nbatch + if nbatch is None: + nbatch = self.job.nbatch + if confidence is None: + confidence = self.job.confidence + if ignore_nbatch is None: + ignore_nbatch = self.job.ignore_nbatch provided_quantities_list = [] for i in range(self.job.nreplicas): - prev = self.job.children[i-self.job.nreplicas] - provided_quantities_list.append( prev.results.provided_quantities() ) + prev = self.job.children[i - self.job.nreplicas] + provided_quantities_list.append(prev.results.provided_quantities()) - aver_provided_quantities = ZacrosResults._average_provided_quantities( provided_quantities_list, 'Time' ) + aver_provided_quantities = ZacrosResults._average_provided_quantities(provided_quantities_list, "Time") # This case happens only when the surface gets quickly poisoned; in less than one iteration. # In that case we use only the last values to estimate the TOF # We need at least 3 points to make an standard deviation if self.job.niterations == 1: - ignore_nbatch = nbatch-3 + ignore_nbatch = nbatch - 3 - TOF,error,ratio,conv = prev.results.turnover_frequency( nbatch=nbatch, confidence=confidence, - ignore_nbatch=ignore_nbatch, - provided_quantities=aver_provided_quantities ) + TOF, error, ratio, conv = prev.results.turnover_frequency( + nbatch=nbatch, + confidence=confidence, + ignore_nbatch=ignore_nbatch, + provided_quantities=aver_provided_quantities, + ) if species_name is None: - return TOF,error,ratio,conv + return TOF, error, ratio, conv else: - return TOF[species_name],error[species_name],ratio[species_name],conv[species_name] + return TOF[species_name], error[species_name], ratio[species_name], conv[species_name] -class ZacrosSteadyStateJob( scm.plams.MultiJob ): +class ZacrosSteadyStateJob(scm.plams.MultiJob): """ Create a new ZacrosSteadyStateJob object. ``ZacrosSteadyStateJob`` class represents a job that is a container for other jobs, called children jobs, which must be :ref:`ZacrosJobs ` or :ref:`ZacrosSteadyStateJob ` kind objects. This class is an extension of the PLAMS MultiJob class. So it inherits all its powerful features, e.g., being executed locally or submitted to some external queueing system transparently or executing jobs in parallel with a predefined dependency structure. See all configure possibilities on the PLAMS MultiJob class documentation in this link: `PLAMS.MultiJob <../../plams/components/jobs.html#multijobs>`_. @@ -304,7 +310,6 @@ class ZacrosSteadyStateJob( scm.plams.MultiJob ): _result_type = ZacrosSteadyStateResults - class Parameter(ParameterBase): """ Creates a new Parameter object specifically tailored for ZacrosSteadyStateJob @@ -313,7 +318,6 @@ class Parameter(ParameterBase): def __init__(self, name_in_settings, kind, values): super().__init__(self, name_in_settings, kind, values) - class Parameters(ParametersBase): """ Creates a new Parameters object specifically tailored for ZacrosSteadyStateJob @@ -322,7 +326,6 @@ class Parameters(ParametersBase): def __init__(self, *args, **kwargs): super().__init__(self, *args, **kwargs) - def __init__(self, reference, parameters, settings=Settings(), **kwargs): scm.plams.MultiJob.__init__(self, settings=settings, **kwargs) @@ -331,35 +334,35 @@ def __init__(self, reference, parameters, settings=Settings(), **kwargs): size = None if parameters._generator.__name__ != "zipGenerator": - msg = "\n### ERROR ### ZacrosSteadyStateJob.__init__().\n" + msg = "\n### ERROR ### ZacrosSteadyStateJob.__init__().\n" msg += " The only generator allowed is the zipGenerator.\n" raise Exception(msg) - for name,item in parameters.items(): + for name, item in parameters.items(): if size is None: size = len(item.values) elif size != len(item.values): - msg = "\n### ERROR ### ZacrosSteadyStateJob.__init__().\n" + msg = "\n### ERROR ### ZacrosSteadyStateJob.__init__().\n" msg += " All parameter in 'parameters' should be lists of the same size.\n" raise Exception(msg) if size == 0: - msg = "\n### ERROR ### ZacrosSteadyStateJob.__init__().\n" + msg = "\n### ERROR ### ZacrosSteadyStateJob.__init__().\n" msg += " All parameter in 'parameters' should be lists with at least one element.\n" raise Exception(msg) self._parameters = parameters - if not isinstance(reference,ZacrosJob): - msg = "\n### ERROR ### ZacrosSteadyStateJob.__init__.\n" + if not isinstance(reference, ZacrosJob): + msg = "\n### ERROR ### ZacrosSteadyStateJob.__init__.\n" msg += " Parameter 'reference' must be a ZacrosJob object.\n" raise Exception(msg) # We don't need the indices because we are sure that the generator is the zipGenerator - _,self._parameters_values,self._parameters_settings = parameters._generator( reference.settings, parameters ) + _, self._parameters_values, self._parameters_settings = parameters._generator(reference.settings, parameters) self._scaling = False - self._scaling_status = 'not_requested' + self._scaling_status = "not_requested" self._scaling_factors = None self.max_iterations = len(parameters[list(parameters.keys())[0]].values) @@ -379,32 +382,51 @@ def __init__(self, reference, parameters, settings=Settings(), **kwargs): self.scaling_nevents_per_timestep = None self._new_timestep = None - self.nreplicas = self.settings.turnover_frequency.get('nreplicas', default=self.nreplicas) + self.nreplicas = self.settings.turnover_frequency.get("nreplicas", default=self.nreplicas) - if 'turnover_frequency' in self.settings: - self.nbatch = self.settings.turnover_frequency.get('nbatch', default=self.nbatch) - self.confidence = self.settings.turnover_frequency.get('confidence', default=self.confidence) - self.ignore_nbatch = self.settings.turnover_frequency.get('ignore_nbatch', default=self.ignore_nbatch) + if "turnover_frequency" in self.settings: + self.nbatch = self.settings.turnover_frequency.get("nbatch", default=self.nbatch) + self.confidence = self.settings.turnover_frequency.get("confidence", default=self.confidence) + self.ignore_nbatch = self.settings.turnover_frequency.get("ignore_nbatch", default=self.ignore_nbatch) # Scaling pre-exponential terms parameters - if 'scaling' in self.settings: - self._scaling = self.settings.scaling.get('enabled', default=self._scaling) - if type(self._scaling) == str \ - and ( self._scaling.upper() == 'T' or self._scaling.upper() == 'TRUE' - or self._scaling.upper() == 'Y' or self._scaling.upper() == 'Yes' ): - self._scaling_status = 'requested' - if type(self._scaling) == bool and self._scaling : - self._scaling_status = 'requested' - - self.scaling_partial_equilibrium_index_threshold = self.settings.scaling.get('partial_equilibrium_index_threshold', default=self.scaling_partial_equilibrium_index_threshold) - self.scaling_upper_bound = self.settings.scaling.get('upper_bound', default=self.scaling_upper_bound) - self.scaling_max_steps = self.settings.scaling.get('max_steps', default=self.scaling_max_steps) - self.scaling_max_time = self.settings.scaling.get('max_time', default=self.scaling_max_time) - self.scaling_species_numbers = self.settings.scaling.get('species_numbers', default=self.scaling_species_numbers) - self.scaling_nevents_per_timestep = self.settings.scaling.get('nevents_per_timestep', default=self.scaling_nevents_per_timestep) - - scm.plams.log("JOB "+self._full_name()+" Steady State Convergence: Using nbatch="+str(self.nbatch)+ - ",confidence="+str(self.confidence)+",ignore_nbatch="+str(self.ignore_nbatch)+",nreplicas="+str(self.nreplicas)) + if "scaling" in self.settings: + self._scaling = self.settings.scaling.get("enabled", default=self._scaling) + if type(self._scaling) == str and ( + self._scaling.upper() == "T" + or self._scaling.upper() == "TRUE" + or self._scaling.upper() == "Y" + or self._scaling.upper() == "Yes" + ): + self._scaling_status = "requested" + if type(self._scaling) == bool and self._scaling: + self._scaling_status = "requested" + + self.scaling_partial_equilibrium_index_threshold = self.settings.scaling.get( + "partial_equilibrium_index_threshold", default=self.scaling_partial_equilibrium_index_threshold + ) + self.scaling_upper_bound = self.settings.scaling.get("upper_bound", default=self.scaling_upper_bound) + self.scaling_max_steps = self.settings.scaling.get("max_steps", default=self.scaling_max_steps) + self.scaling_max_time = self.settings.scaling.get("max_time", default=self.scaling_max_time) + self.scaling_species_numbers = self.settings.scaling.get( + "species_numbers", default=self.scaling_species_numbers + ) + self.scaling_nevents_per_timestep = self.settings.scaling.get( + "nevents_per_timestep", default=self.scaling_nevents_per_timestep + ) + + scm.plams.log( + "JOB " + + self._full_name() + + " Steady State Convergence: Using nbatch=" + + str(self.nbatch) + + ",confidence=" + + str(self.confidence) + + ",ignore_nbatch=" + + str(self.ignore_nbatch) + + ",nreplicas=" + + str(self.nreplicas) + ) # These parameters a needed to make ZacrosSteadyStateJob compatible with ZacrosJob self.lattice = reference.lattice @@ -412,11 +434,10 @@ def __init__(self, reference, parameters, settings=Settings(), **kwargs): self.cluster_expansion = reference.cluster_expansion self.initial_state = reference.initial_state - def __steady_state_step(self): if self.niterations >= self.max_iterations: - scm.plams.log("JOB "+self._full_name()+" Steady State Convergence: MAX ITERATIONS REACHED") + scm.plams.log("JOB " + self._full_name() + " Steady State Convergence: MAX ITERATIONS REACHED") return None if len(self.children) > 0: @@ -425,34 +446,34 @@ def __steady_state_step(self): # We wait for threads to finish for i in range(self.nreplicas): - prev = self.children[i-self.nreplicas] + prev = self.children[i - self.nreplicas] prev.ok() # We check for failures. If one thread fails, we stop the whole set of replicas. # Otherwise, we cannot make an average because we have different number of points. for i in range(self.nreplicas): - prev = self.children[i-self.nreplicas] + prev = self.children[i - self.nreplicas] if not prev.ok(): if len(self.children) > self.nreplicas: if prev.restart_aborted(): - scm.plams.log("JOB "+prev._full_name()+" Steady State Convergence: RESTART ABORTED") + scm.plams.log("JOB " + prev._full_name() + " Steady State Convergence: RESTART ABORTED") self._surface_poisoned = True - elif len(self.children) > 2*self.nreplicas: - prevprev = self.children[i-2*self.nreplicas] + elif len(self.children) > 2 * self.nreplicas: + prevprev = self.children[i - 2 * self.nreplicas] if prevprev.surface_poisoned(): - scm.plams.log("JOB "+prev._full_name()+" Steady State Convergence: SURFACE POISONED") + scm.plams.log("JOB " + prev._full_name() + " Steady State Convergence: SURFACE POISONED") self._surface_poisoned = True else: - scm.plams.log("JOB "+prev._full_name()+" Steady State Convergence: FAILED") + scm.plams.log("JOB " + prev._full_name() + " Steady State Convergence: FAILED") # If failures we clean previous results if needed and stops # the creation of new children if self._surface_poisoned: for i in range(self.nreplicas): - poisoned_job = self.children.pop(i-self.nreplicas) - scm.plams.delete_job( poisoned_job ) - scm.plams.log("JOB "+poisoned_job._full_name()+" Steady State Convergence: JOB REMOVED") + poisoned_job = self.children.pop(i - self.nreplicas) + scm.plams.delete_job(poisoned_job) + scm.plams.log("JOB " + poisoned_job._full_name() + " Steady State Convergence: JOB REMOVED") self.niterations -= 1 return None @@ -461,90 +482,116 @@ def __steady_state_step(self): # In that case we use only the last values to estimate the TOF # We need at least 3 points to make an standard deviation ignore_nbatch = self.ignore_nbatch - if len(self.children)==self.nreplicas: - ignore_nbatch = self.nbatch-3 + if len(self.children) == self.nreplicas: + ignore_nbatch = self.nbatch - 3 # If no failures we continue extracting the properties to make the average for i in range(self.nreplicas): - prev = self.children[i-self.nreplicas] + prev = self.children[i - self.nreplicas] - TOF,error,ratio,conv = prev.results.turnover_frequency( nbatch=self.nbatch, confidence=self.confidence, - ignore_nbatch=ignore_nbatch ) + TOF, error, ratio, conv = prev.results.turnover_frequency( + nbatch=self.nbatch, confidence=self.confidence, ignore_nbatch=ignore_nbatch + ) if self.nreplicas > 1: - scm.plams.log(" Replica #%d"%i ) - scm.plams.log(" %10s"%"species"+"%15s"%"TOF"+"%15s"%"error"+"%15s"%"ratio"+"%10s"%"conv?") + scm.plams.log(" Replica #%d" % i) + scm.plams.log( + " %10s" % "species" + "%15s" % "TOF" + "%15s" % "error" + "%15s" % "ratio" + "%10s" % "conv?" + ) for s in prev.results.gas_species_names(): - scm.plams.log(" %10s"%s+"%15.5f"%TOF[s]+"%15.5f"%error[s]+"%15.5f"%ratio[s]+"%10s"%conv[s]) + scm.plams.log( + " %10s" % s + + "%15.5f" % TOF[s] + + "%15.5f" % error[s] + + "%15.5f" % ratio[s] + + "%10s" % conv[s] + ) - provided_quantities_list.append( prev.results.provided_quantities() ) + provided_quantities_list.append(prev.results.provided_quantities()) + aver_provided_quantities = ZacrosResults._average_provided_quantities(provided_quantities_list, "Time") - aver_provided_quantities = ZacrosResults._average_provided_quantities( provided_quantities_list, 'Time' ) + TOF, error, ratio, conv = prev.results.turnover_frequency( + nbatch=self.nbatch, + confidence=self.confidence, + ignore_nbatch=ignore_nbatch, + provided_quantities=aver_provided_quantities, + ) - TOF,error,ratio,conv = prev.results.turnover_frequency( nbatch=self.nbatch, confidence=self.confidence, - ignore_nbatch=ignore_nbatch, - provided_quantities=aver_provided_quantities ) - - if self.nreplicas > 1: scm.plams.log(" Average" ) - scm.plams.log(" %10s"%"species"+"%15s"%"TOF"+"%15s"%"error"+"%15s"%"ratio"+"%10s"%"conv?") + if self.nreplicas > 1: + scm.plams.log(" Average") + scm.plams.log( + " %10s" % "species" + "%15s" % "TOF" + "%15s" % "error" + "%15s" % "ratio" + "%10s" % "conv?" + ) for s in prev.results.gas_species_names(): - scm.plams.log(" %10s"%s+"%15.5f"%TOF[s]+"%15.5f"%error[s]+"%15.5f"%ratio[s]+"%10s"%conv[s]) + scm.plams.log( + " %10s" % s + "%15.5f" % TOF[s] + "%15.5f" % error[s] + "%15.5f" % ratio[s] + "%10s" % conv[s] + ) - history_i = { 'turnover_frequency':TOF, - 'turnover_frequency_error':error, - 'converged':conv } + history_i = {"turnover_frequency": TOF, "turnover_frequency_error": error, "converged": conv} - for i,(name,item) in enumerate(self._parameters.items()): - history_i[name] = self._parameters_values[self.niterations-1][name] + for i, (name, item) in enumerate(self._parameters.items()): + history_i[name] = self._parameters_values[self.niterations - 1][name] - self._history.append( history_i ) + self._history.append(history_i) if all(conv.values()): - scm.plams.log("JOB "+self._full_name()+" Steady State Convergence: CONVERGENCE REACHED. DONE!") + scm.plams.log("JOB " + self._full_name() + " Steady State Convergence: CONVERGENCE REACHED. DONE!") return None else: - scm.plams.log("JOB "+self._full_name()+" Steady State Convergence: NO CONVERGENCE REACHED YET") + scm.plams.log("JOB " + self._full_name() + " Steady State Convergence: NO CONVERGENCE REACHED YET") # Here we apply the scaling factors mechanism = copy.deepcopy(self._reference.mechanism) if self._scaling_factors is not None: - for i,rxn in enumerate(mechanism): + for i, rxn in enumerate(mechanism): old = rxn.pre_expon rxn.pre_expon *= self._scaling_factors[i] lparallel = [] for i in range(self.nreplicas): - prev = None if len(self.children)==0 else self.children[i-self.nreplicas] + prev = None if len(self.children) == 0 else self.children[i - self.nreplicas] lsettings = self._parameters_settings[self.niterations].copy() - lsettings.random_seed = lsettings.get('random_seed',default=0) + i + lsettings.random_seed = lsettings.get("random_seed", default=0) + i if self.nreplicas > 1: - name = "ss_iter"+"%03d"%self.niterations+"_rep"+"%03d"%i + name = "ss_iter" + "%03d" % self.niterations + "_rep" + "%03d" % i else: - name = "ss_iter"+"%03d"%self.niterations + name = "ss_iter" + "%03d" % self.niterations - new_child = ZacrosJob( settings=lsettings, - lattice=self._reference.lattice, - mechanism=mechanism, - cluster_expansion=self._reference.cluster_expansion, - name=name, - restart=prev ) + new_child = ZacrosJob( + settings=lsettings, + lattice=self._reference.lattice, + mechanism=mechanism, + cluster_expansion=self._reference.cluster_expansion, + name=name, + restart=prev, + ) if prev is None: - scm.plams.log("JOB "+self.name+"/"+name+" Steady State: NEW") + scm.plams.log("JOB " + self.name + "/" + name + " Steady State: NEW") else: - scm.plams.log("JOB "+self.name+"/"+name+" Steady State: NEW"+" (dep="+self.name+"/"+prev.name+")") - - lparallel.append( new_child ) + scm.plams.log( + "JOB " + + self.name + + "/" + + name + + " Steady State: NEW" + + " (dep=" + + self.name + + "/" + + prev.name + + ")" + ) + + lparallel.append(new_child) self.niterations += 1 return lparallel - - #-------------------------------------------------------------- + # -------------------------------------------------------------- # Function to compute the scaling factors of the mechanisms # pre-exponential factors. # Original author: Mauro Bracconi (mauro.bracconi@polimi.it) @@ -555,24 +602,24 @@ def __steady_state_step(self): # constant rescaling # J. Chem. Phys. 147, 164103 (2017) # https://doi.org/10.1063/1.4998926 - #-------------------------------------------------------------- + # -------------------------------------------------------------- @staticmethod - def __scaling_factors( mechanism, process_statistics, quasieq_th=0.1, delta=100 ) : + def __scaling_factors(mechanism, process_statistics, quasieq_th=0.1, delta=100): # kMC rate scaling - freq = numpy.zeros(len(mechanism)*2) - value = list(process_statistics[-1]['number_of_events'].values()) + freq = numpy.zeros(len(mechanism) * 2) + value = list(process_statistics[-1]["number_of_events"].values()) cont = 0 - for i,step in enumerate(mechanism): - if(step.reversible): - freq[2*i] = value[cont] - freq[2*i+1] = value[cont+1] - cont = cont+2 + for i, step in enumerate(mechanism): + if step.reversible: + freq[2 * i] = value[cont] + freq[2 * i + 1] = value[cont + 1] + cont = cont + 2 else: - freq[2*i] = value[cont] - freq[2*i+1] = 0 - cont = cont+1 + freq[2 * i] = value[cont] + freq[2 * i + 1] = 0 + cont = cont + 1 # Forward & backward fwq = freq[0::2] @@ -589,19 +636,19 @@ def __scaling_factors( mechanism, process_statistics, quasieq_th=0.1, delta=100 for i in range(len(tot)): if tot[i] == 0: slow_rxn.append(i) - PE_vec.append( 0.0 ) # TODO Check this case - kind_vec.append( 'slow' ) + PE_vec.append(0.0) # TODO Check this case + kind_vec.append("slow") else: PE = float(net[i]) / tot[i] - PE_vec.append( PE ) + PE_vec.append(PE) if numpy.abs(PE) < quasieq_th: fast_rxn.append(i) - kind_vec.append( 'fast' ) + kind_vec.append("fast") else: slow_rxn.append(i) - kind_vec.append( 'slow' ) + kind_vec.append("slow") - slow_f = [1.] + slow_f = [1.0] for i in slow_rxn: slow_f.append(tot[i]) @@ -611,20 +658,29 @@ def __scaling_factors( mechanism, process_statistics, quasieq_th=0.1, delta=100 for i in fast_rxn: Nf = tot[i] / float(slow_scale) - delta_sdf[i] = numpy.min([1.0, delta / Nf ]) - - return delta_sdf,PE_vec,kind_vec + delta_sdf[i] = numpy.min([1.0, delta / Nf]) + return delta_sdf, PE_vec, kind_vec def __scaling_factors_step_start(self): sufix = "" - if self.scaling_max_steps is not None: sufix += ",max_steps="+str(self.scaling_max_steps) - if self.scaling_max_time is not None: sufix += ",max_time="+str(self.scaling_max_time) - if self.scaling_species_numbers is not None: sufix += ",species_numbers="+str(self.scaling_species_numbers) - - scm.plams.log("JOB "+self._full_name()+" Scaling: Using partial_equilibrium_index_threshold="+str(self.scaling_partial_equilibrium_index_threshold)+ - ",upper_bound="+str(self.scaling_upper_bound)+sufix) + if self.scaling_max_steps is not None: + sufix += ",max_steps=" + str(self.scaling_max_steps) + if self.scaling_max_time is not None: + sufix += ",max_time=" + str(self.scaling_max_time) + if self.scaling_species_numbers is not None: + sufix += ",species_numbers=" + str(self.scaling_species_numbers) + + scm.plams.log( + "JOB " + + self._full_name() + + " Scaling: Using partial_equilibrium_index_threshold=" + + str(self.scaling_partial_equilibrium_index_threshold) + + ",upper_bound=" + + str(self.scaling_upper_bound) + + sufix + ) lsettings = self._reference.settings.copy() @@ -634,46 +690,47 @@ def __scaling_factors_step_start(self): ok = False if self.scaling_max_steps is not None: - lsettings['process_statistics'] = ('event', self.scaling_max_steps) - lsettings['max_steps'] = self.scaling_max_steps + lsettings["process_statistics"] = ("event", self.scaling_max_steps) + lsettings["max_steps"] = self.scaling_max_steps ok = True if self.scaling_max_time is not None: - lsettings['process_statistics'] = ('time', self.scaling_max_time) - lsettings['max_time'] = self.scaling_max_time + lsettings["process_statistics"] = ("time", self.scaling_max_time) + lsettings["max_time"] = self.scaling_max_time ok = True if self.scaling_species_numbers is not None: - lsettings['species_numbers'] = self.scaling_species_numbers + lsettings["species_numbers"] = self.scaling_species_numbers if not ok: - if 'max_steps' in lsettings and lsettings.max_steps != 'infinity': - lsettings['process_statistics'] = ('event', lsettings.max_steps) + if "max_steps" in lsettings and lsettings.max_steps != "infinity": + lsettings["process_statistics"] = ("event", lsettings.max_steps) ok = True - if 'max_time' in lsettings: - lsettings['process_statistics'] = ('time', lsettings.max_time) + if "max_time" in lsettings: + lsettings["process_statistics"] = ("time", lsettings.max_time) ok = True if not ok: - msg = "\n### ERROR ### ZacrosSteadyStateJob.__scaling_factors_step_start.\n" + msg = "\n### ERROR ### ZacrosSteadyStateJob.__scaling_factors_step_start.\n" msg += " process_statistics section is needed in settings object.\n" raise Exception(msg) - for name,item in self._parameters.items(): + for name, item in self._parameters.items(): value = item.values[0] - eval('lsettings'+item.name2setitem().replace('$var_value',str(value))) + eval("lsettings" + item.name2setitem().replace("$var_value", str(value))) - new_child = ZacrosJob( settings=lsettings, - lattice=self._reference.lattice, - mechanism=self._reference.mechanism, - cluster_expansion=self._reference.cluster_expansion, - name="ss_scaling" ) + new_child = ZacrosJob( + settings=lsettings, + lattice=self._reference.lattice, + mechanism=self._reference.mechanism, + cluster_expansion=self._reference.cluster_expansion, + name="ss_scaling", + ) - self._scaling_status = 'started' - - return [ new_child ] + self._scaling_status = "started" + return [new_child] def __scaling_factors_step_end(self): @@ -683,45 +740,63 @@ def __scaling_factors_step_end(self): process_statistics = prev.results.get_process_statistics() if self.scaling_nevents_per_timestep is not None: - time = process_statistics[-1]['time'] - nevents = process_statistics[-1]['total_number_of_events'] - - self._new_timestep = (time/nevents)*self.scaling_nevents_per_timestep - - sf,PE,kind = self.__scaling_factors( self._reference.mechanism, - process_statistics, - quasieq_th=self.scaling_partial_equilibrium_index_threshold, - delta=self.scaling_upper_bound ) - - scm.plams.log(" "+" %5s"%"id"+" %10s"%"PE"+" %8s"%"kind"+" %15s"%"orig_pexp"+" %15s"%"sf"+" %15s"%"new_pexp"+" label") + time = process_statistics[-1]["time"] + nevents = process_statistics[-1]["total_number_of_events"] + + self._new_timestep = (time / nevents) * self.scaling_nevents_per_timestep + + sf, PE, kind = self.__scaling_factors( + self._reference.mechanism, + process_statistics, + quasieq_th=self.scaling_partial_equilibrium_index_threshold, + delta=self.scaling_upper_bound, + ) + + scm.plams.log( + " " + + " %5s" % "id" + + " %10s" % "PE" + + " %8s" % "kind" + + " %15s" % "orig_pexp" + + " %15s" % "sf" + + " %15s" % "new_pexp" + + " label" + ) self._scaling_factors = [] - for i,rxn in enumerate(self._reference.mechanism): + for i, rxn in enumerate(self._reference.mechanism): old = rxn.pre_expon - new = old*sf[i] - self._scaling_factors.append( sf[i] ) - - scm.plams.log(" "+" %5d"%i+" %10.5f"%PE[i]+" %8s"%kind[i]+" %15.5e"%old+" %15.5e"%sf[i]+" %15.5e"%new+" "+rxn.label()) - - self._scaling_status = 'finished' + new = old * sf[i] + self._scaling_factors.append(sf[i]) + + scm.plams.log( + " " + + " %5d" % i + + " %10.5f" % PE[i] + + " %8s" % kind[i] + + " %15.5e" % old + + " %15.5e" % sf[i] + + " %15.5e" % new + + " " + + rxn.label() + ) + + self._scaling_status = "finished" scaling_job = self.children.pop() - #scm.plams.delete_job( scaling_job ) - + # scm.plams.delete_job( scaling_job ) def new_children(self): - """ - """ + """ """ - if self._scaling and self._scaling_status != 'finished': - if self._scaling_status == 'requested': + if self._scaling and self._scaling_status != "finished": + if self._scaling_status == "requested": return self.__scaling_factors_step_start() - elif self._scaling_status == 'started': + elif self._scaling_status == "started": self.__scaling_factors_step_end() return self.__steady_state_step() - def check(self): """ Look for the normal termination signal in the output. Note, that it does not mean your calculation was successful! @@ -732,5 +807,4 @@ def check(self): if len(self._history) == 0: return False else: - return all(self._history[-1]['converged'].values()) - + return all(self._history[-1]["converged"].values()) diff --git a/doc/source/conf.py b/doc/source/conf.py index 30c0002..e37c26c 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -6,33 +6,37 @@ from pathlib import Path import sys + def setup(app): # For sphinx autodoc, we need the pyzacros module in the path. # So, here we add it to the path (in a quite hackish way...) sys.path.insert(0, str(Path(__file__).parent.parent.parent.resolve())) -if not tags.has('scm_theme'): - sys.exit("This conf.py file is for building the documentation within SCM's setups. You need the scm_theme and the SCM global_conf." ) + +if not tags.has("scm_theme"): + sys.exit( + "This conf.py file is for building the documentation within SCM's setups. You need the scm_theme and the SCM global_conf." + ) from global_conf import * -project, htmlhelp_basename, latex_documents = set_project_specific_var ('pyZacros') +project, htmlhelp_basename, latex_documents = set_project_specific_var("pyZacros") # html_logo = '_static/pyZacros_logo_compact.svg' extensions += [ - 'sphinx.ext.autodoc', + "sphinx.ext.autodoc", ] # Avoid duplicate names by prefixing the document's name. -#autosectionlabel_prefix_document = True +# autosectionlabel_prefix_document = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = False # The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' +pygments_style = "sphinx" -autodoc_default_options = {'members':True, 'private-members':True, 'special-members':True} -autodoc_member_order = 'bysource' +autodoc_default_options = {"members": True, "private-members": True, "special-members": True} +autodoc_member_order = "bysource" diff --git a/examples/CO+Pt111/CO+Pt111.py b/examples/CO+Pt111/CO+Pt111.py index f3abb83..c699ce0 100644 --- a/examples/CO+Pt111/CO+Pt111.py +++ b/examples/CO+Pt111/CO+Pt111.py @@ -1,59 +1,59 @@ import scm.plams import scm.pyzacros -mol = scm.plams.Molecule( 'CO_ads+Pt111.xyz' ) +mol = scm.plams.Molecule("CO_ads+Pt111.xyz") scm.plams.init() engine_sett = scm.plams.Settings() -engine_sett.input.ReaxFF.ForceField = 'CHONSFPtClNi.ff' -engine_sett.input.ReaxFF.Charges.Solver = 'Direct' +engine_sett.input.ReaxFF.ForceField = "CHONSFPtClNi.ff" +engine_sett.input.ReaxFF.Charges.Solver = "Direct" sett_ads = scm.plams.Settings() -sett_ads.input.ams.Constraints.FixedRegion = 'surface' +sett_ads.input.ams.Constraints.FixedRegion = "surface" sett_ads.input.ams.Task = "PESExploration" -sett_ads.input.ams.PESExploration.Job = 'ProcessSearch' +sett_ads.input.ams.PESExploration.Job = "ProcessSearch" sett_ads.input.ams.PESExploration.RandomSeed = 100 sett_ads.input.ams.PESExploration.NumExpeditions = 30 sett_ads.input.ams.PESExploration.NumExplorers = 4 sett_ads.input.ams.PESExploration.SaddleSearch.MaxEnergy = 2.0 -sett_ads.input.ams.PESExploration.DynamicSeedStates = 'T' -sett_ads.input.ams.PESExploration.CalculateFragments = 'T' -sett_ads.input.ams.PESExploration.StatesAlignment.ReferenceRegion = 'surface' +sett_ads.input.ams.PESExploration.DynamicSeedStates = "T" +sett_ads.input.ams.PESExploration.CalculateFragments = "T" +sett_ads.input.ams.PESExploration.StatesAlignment.ReferenceRegion = "surface" sett_ads.input.ams.PESExploration.StructureComparison.DistanceDifference = 0.2 sett_ads.input.ams.PESExploration.StructureComparison.NeighborCutoff = 2.5 sett_ads.input.ams.PESExploration.StructureComparison.EnergyDifference = 0.05 -sett_ads.input.ams.PESExploration.StructureComparison.CheckSymmetry = 'T' -sett_ads.input.ams.PESExploration.BindingSites.Calculate = 'T' +sett_ads.input.ams.PESExploration.StructureComparison.CheckSymmetry = "T" +sett_ads.input.ams.PESExploration.BindingSites.Calculate = "T" sett_ads.input.ams.PESExploration.BindingSites.DistanceDifference = 0.1 -job = scm.plams.AMSJob(name='pes_exploration', molecule=mol, settings=sett_ads+engine_sett) +job = scm.plams.AMSJob(name="pes_exploration", molecule=mol, settings=sett_ads + engine_sett) results_ads = job.run() energy_landscape = results_ads.get_energy_landscape() print(energy_landscape) sett_bs = sett_ads.copy() -sett_ads.input.ams.PESExploration.Job = 'BindingSites' -sett_bs.input.ams.PESExploration.LoadEnergyLandscape.Path= '../pes_exploration' -sett_bs.input.ams.PESExploration.LoadEnergyLandscape.GenerateSymmetryImages = 'T' -sett_bs.input.ams.PESExploration.CalculateFragments = 'F' -sett_bs.input.ams.PESExploration.StructureComparison.CheckSymmetry = 'F' +sett_ads.input.ams.PESExploration.Job = "BindingSites" +sett_bs.input.ams.PESExploration.LoadEnergyLandscape.Path = "../pes_exploration" +sett_bs.input.ams.PESExploration.LoadEnergyLandscape.GenerateSymmetryImages = "T" +sett_bs.input.ams.PESExploration.CalculateFragments = "F" +sett_bs.input.ams.PESExploration.StructureComparison.CheckSymmetry = "F" -job = scm.plams.AMSJob(name='binding_sites', molecule=mol, settings=sett_bs+engine_sett) +job = scm.plams.AMSJob(name="binding_sites", molecule=mol, settings=sett_bs + engine_sett) results_bs = job.run() -loader_ads = scm.pyzacros.RKFLoader( results_ads ) -loader_ads.replace_site_types( ['N33','N221','N331'], ['fcc','br','hcp'] ) -loader_bs = scm.pyzacros.RKFLoader( results_bs ) -loader_bs.replace_site_types( ['N33','N221','N331'], ['fcc','br','hcp'] ) +loader_ads = scm.pyzacros.RKFLoader(results_ads) +loader_ads.replace_site_types(["N33", "N221", "N331"], ["fcc", "br", "hcp"]) +loader_bs = scm.pyzacros.RKFLoader(results_bs) +loader_bs.replace_site_types(["N33", "N221", "N331"], ["fcc", "br", "hcp"]) print(loader_ads.clusterExpansion) print(loader_ads.mechanism) print(loader_bs.lattice) loader_bs.lattice.plot() -loader_bs.lattice.set_repeat_cell( (10,10) ) +loader_bs.lattice.set_repeat_cell((10, 10)) loader_bs.lattice.plot() settings = scm.pyzacros.Settings() @@ -63,18 +63,21 @@ settings.molar_fraction.CO = 0.1 dt = 1e-8 -settings.max_time = 1000*dt -settings.snapshots = ('logtime', dt, 3.5) -settings.species_numbers = ('time', dt) - -job = scm.pyzacros.ZacrosJob( name='zacros_job', lattice=loader_bs.lattice, - mechanism=loader_ads.mechanism, - cluster_expansion=loader_ads.clusterExpansion, - settings=settings ) +settings.max_time = 1000 * dt +settings.snapshots = ("logtime", dt, 3.5) +settings.species_numbers = ("time", dt) + +job = scm.pyzacros.ZacrosJob( + name="zacros_job", + lattice=loader_bs.lattice, + mechanism=loader_ads.mechanism, + cluster_expansion=loader_ads.clusterExpansion, + settings=settings, +) results_pz = job.run() if job.ok(): - results_pz.plot_lattice_states( results_pz.lattice_states() ) - results_pz.plot_molecule_numbers( ["CO*"] ) + results_pz.plot_lattice_states(results_pz.lattice_states()) + results_pz.plot_molecule_numbers(["CO*"]) scm.plams.finish() diff --git a/examples/LangmuirHinshelwood/CoveragesAndReactionRate.ipynb b/examples/LangmuirHinshelwood/CoveragesAndReactionRate.ipynb index c7e107b..359dea3 100644 --- a/examples/LangmuirHinshelwood/CoveragesAndReactionRate.ipynb +++ b/examples/LangmuirHinshelwood/CoveragesAndReactionRate.ipynb @@ -122,7 +122,7 @@ "source": [ "maxjobs = multiprocessing.cpu_count()\n", "scm.plams.config.default_jobrunner = scm.plams.JobRunner(parallel=True, maxjobs=maxjobs)\n", - "print('Running up to {} jobs in parallel simultaneously'.format(maxjobs))" + "print(\"Running up to {} jobs in parallel simultaneously\".format(maxjobs))" ] }, { @@ -186,13 +186,13 @@ "z_sett = pz.Settings()\n", "z_sett.temperature = 500.0\n", "z_sett.pressure = 1.000\n", - "z_sett.species_numbers = ('time', 1.0e-5)\n", - "z_sett.max_time = 100*1.0e-5\n", + "z_sett.species_numbers = (\"time\", 1.0e-5)\n", + "z_sett.max_time = 100 * 1.0e-5\n", "z_sett.random_seed = 1609\n", "\n", - "z_job = pz.ZacrosJob( settings=z_sett, lattice=lh.lattice,\n", - " mechanism=lh.mechanism,\n", - " cluster_expansion=lh.cluster_expansion )" + "z_job = pz.ZacrosJob(\n", + " settings=z_sett, lattice=lh.lattice, mechanism=lh.mechanism, cluster_expansion=lh.cluster_expansion\n", + ")" ] }, { @@ -237,15 +237,13 @@ "ss_sett = pz.Settings()\n", "ss_sett.turnover_frequency.confidence = 0.95\n", "ss_sett.turnover_frequency.nreplicas = 4\n", - "ss_sett.scaling.enabled = 'T'\n", - "ss_sett.scaling.max_time = 60*1e-5\n", + "ss_sett.scaling.enabled = \"T\"\n", + "ss_sett.scaling.max_time = 60 * 1e-5\n", "\n", "ss_params = pz.ZacrosSteadyStateJob.Parameters()\n", - "ss_params.add( 'max_time', 'restart.max_time',\n", - " 2*z_sett.max_time*( numpy.arange(10)+1 )**2 )\n", + "ss_params.add(\"max_time\", \"restart.max_time\", 2 * z_sett.max_time * (numpy.arange(10) + 1) ** 2)\n", "\n", - "ss_job = pz.ZacrosSteadyStateJob( settings=ss_sett, reference=z_job,\n", - " parameters=ss_params )" + "ss_job = pz.ZacrosSteadyStateJob(settings=ss_sett, reference=z_job, parameters=ss_params)" ] }, { @@ -295,10 +293,10 @@ ], "source": [ "ps_params = pz.ZacrosParametersScanJob.Parameters()\n", - "ps_params.add( 'x_CO', 'molar_fraction.CO', numpy.linspace(0.05, 0.95, 11) )\n", - "ps_params.add( 'x_O2', 'molar_fraction.O2', lambda params: 1.0-params['x_CO'] )\n", + "ps_params.add(\"x_CO\", \"molar_fraction.CO\", numpy.linspace(0.05, 0.95, 11))\n", + "ps_params.add(\"x_O2\", \"molar_fraction.O2\", lambda params: 1.0 - params[\"x_CO\"])\n", "\n", - "ps_job = pz.ZacrosParametersScanJob( reference=ss_job, parameters=ps_params )" + "ps_job = pz.ZacrosParametersScanJob(reference=ss_job, parameters=ps_params)" ] }, { @@ -1838,7 +1836,7 @@ "results = ps_job.run()\n", "\n", "if not ps_job.ok():\n", - " print('Something went wrong!')" + " print(\"Something went wrong!\")" ] }, { @@ -1892,19 +1890,19 @@ "TOF_CO2 = []\n", "\n", "results_dict = results.turnover_frequency()\n", - "results_dict = results.average_coverage( last=10, update=results_dict )\n", + "results_dict = results.average_coverage(last=10, update=results_dict)\n", "\n", "for i in range(len(results_dict)):\n", - " x_CO.append( results_dict[i]['x_CO'] )\n", - " ac_O.append( results_dict[i]['average_coverage']['O*'] )\n", - " ac_CO.append( results_dict[i]['average_coverage']['CO*'] )\n", - " TOF_CO2.append( results_dict[i]['turnover_frequency']['CO2'] )\n", + " x_CO.append(results_dict[i][\"x_CO\"])\n", + " ac_O.append(results_dict[i][\"average_coverage\"][\"O*\"])\n", + " ac_CO.append(results_dict[i][\"average_coverage\"][\"CO*\"])\n", + " TOF_CO2.append(results_dict[i][\"turnover_frequency\"][\"CO2\"])\n", "\n", - "print( '------------------------------------------------' )\n", - "print( '%4s'%'cond', '%8s'%'x_CO', '%10s'%'ac_O', '%10s'%'ac_CO', '%12s'%'TOF_CO2' )\n", - "print( '------------------------------------------------' )\n", + "print(\"------------------------------------------------\")\n", + "print(\"%4s\" % \"cond\", \"%8s\" % \"x_CO\", \"%10s\" % \"ac_O\", \"%10s\" % \"ac_CO\", \"%12s\" % \"TOF_CO2\")\n", + "print(\"------------------------------------------------\")\n", "for i in range(len(x_CO)):\n", - " print( '%4d'%i, '%8.2f'%x_CO[i], '%10.6f'%ac_O[i], '%10.6f'%ac_CO[i], '%12.6f'%TOF_CO2[i] )" + " print(\"%4d\" % i, \"%8.2f\" % x_CO[i], \"%10.6f\" % ac_O[i], \"%10.6f\" % ac_CO[i], \"%12.6f\" % TOF_CO2[i])" ] }, { diff --git a/examples/LangmuirHinshelwood/CoveragesAndReactionRate.py b/examples/LangmuirHinshelwood/CoveragesAndReactionRate.py index c9b5583..57926ea 100644 --- a/examples/LangmuirHinshelwood/CoveragesAndReactionRate.py +++ b/examples/LangmuirHinshelwood/CoveragesAndReactionRate.py @@ -14,7 +14,7 @@ # considerably increasing the computational cost. This tutorial shows how to speed up # the calculation by several orders of magnitude without sacrificing precision by # automatically detecting and scaling the rate constants of fast reactions. -# +# # We will focus on the net reaction $\text{CO}+\frac{1}{2}\text{O}_2\longrightarrow \text{CO}_2$ # that takes place at a catalyst's surface and whose reaction mechanism is described by # the Langmuir-Hinshelwood model. Because this model has four very fast processes @@ -35,7 +35,7 @@ import scm.pyzacros.models -# Then, we initialize the **pyZacros** environment. +# Then, we initialize the **pyZacros** environment. scm.pyzacros.init() @@ -48,11 +48,11 @@ # it should take around 20 min to complete. So, in order to speed things up, we'll # use the ``plams.JobRunner`` class to run as many parallel instances as possible. In this # case, we choose to use the maximum number of simultaneous processes (``maxjobs``) equal -# to the number of processors in the machine. +# to the number of processors in the machine. maxjobs = multiprocessing.cpu_count() scm.plams.config.default_jobrunner = scm.plams.JobRunner(parallel=True, maxjobs=maxjobs) -print('Running up to {} jobs in parallel simultaneously'.format(maxjobs)) +print("Running up to {} jobs in parallel simultaneously".format(maxjobs)) # First, we initialize our Langmuir-Hinshelwood model, which by luck is available as a @@ -67,7 +67,7 @@ # ``ZacrosJob``. So, We will go through them one at a time: # **1. Setting up the ZacrosJob** -# +# # For ``ZacrosJob``, all parameters are set using a ``Setting`` object. To begin, # we define the physical parameters: ``temperature`` (in K), and ``pressure`` # (in bar). The calculation parameters are then set: ``species numbers`` (in s) @@ -84,17 +84,17 @@ z_sett = pz.Settings() z_sett.temperature = 500.0 z_sett.pressure = 1.000 -z_sett.species_numbers = ('time', 1.0e-5) -z_sett.max_time = 100*1.0e-5 +z_sett.species_numbers = ("time", 1.0e-5) +z_sett.max_time = 100 * 1.0e-5 z_sett.random_seed = 1609 -z_job = pz.ZacrosJob( settings=z_sett, lattice=lh.lattice, - mechanism=lh.mechanism, - cluster_expansion=lh.cluster_expansion ) +z_job = pz.ZacrosJob( + settings=z_sett, lattice=lh.lattice, mechanism=lh.mechanism, cluster_expansion=lh.cluster_expansion +) # **2. Setting up the ZacrosSteadyStateJob** -# +# # We also need to create a ``Setting`` object for ``ZacrosJob`` There, we ask for a # steady-state configuration using a TOFs calculation with a 95% confidence level # (``turnover frequency.confidence``), using four replicas to speed up the calculation @@ -114,19 +114,17 @@ ss_sett = pz.Settings() ss_sett.turnover_frequency.confidence = 0.95 ss_sett.turnover_frequency.nreplicas = 4 -ss_sett.scaling.enabled = 'T' -ss_sett.scaling.max_time = 60*1e-5 +ss_sett.scaling.enabled = "T" +ss_sett.scaling.max_time = 60 * 1e-5 ss_params = pz.ZacrosSteadyStateJob.Parameters() -ss_params.add( 'max_time', 'restart.max_time', - 2*z_sett.max_time*( numpy.arange(10)+1 )**2 ) +ss_params.add("max_time", "restart.max_time", 2 * z_sett.max_time * (numpy.arange(10) + 1) ** 2) -ss_job = pz.ZacrosSteadyStateJob( settings=ss_sett, reference=z_job, - parameters=ss_params ) +ss_job = pz.ZacrosSteadyStateJob(settings=ss_sett, reference=z_job, parameters=ss_params) # **3. Setting up the ZacrosParametersScanJob** -# +# # Although the ``ZacrosParametersScanJob`` does not require a ``Setting`` object, # it does require a ``ZacrosSteadyStateJob.Parameters`` object to specify which # parameters must be modified systematically. In this instance, all we need is a @@ -136,13 +134,13 @@ # will be used internally to replace ``molar fraction.CO`` and ``molar fraction.O2`` # in the Zacros input files. Then, using the ``ZacrosSteadyStateJob`` defined # earlier (``ss job``) and the parameters we just defined (``ps params``), we -# create the ``ZacrosParametersScanJob``: +# create the ``ZacrosParametersScanJob``: ps_params = pz.ZacrosParametersScanJob.Parameters() -ps_params.add( 'x_CO', 'molar_fraction.CO', numpy.linspace(0.05, 0.95, 11) ) -ps_params.add( 'x_O2', 'molar_fraction.O2', lambda params: 1.0-params['x_CO'] ) +ps_params.add("x_CO", "molar_fraction.CO", numpy.linspace(0.05, 0.95, 11)) +ps_params.add("x_O2", "molar_fraction.O2", lambda params: 1.0 - params["x_CO"]) -ps_job = pz.ZacrosParametersScanJob( reference=ss_job, parameters=ps_params ) +ps_job = pz.ZacrosParametersScanJob(reference=ss_job, parameters=ps_params) # The parameters scan calculation setup is ready. Therefore, we can start it @@ -155,11 +153,11 @@ results = ps_job.run() if not ps_job.ok(): - print('Something went wrong!') + print("Something went wrong!") # If the execution got up to this point, everything worked as expected. Hooray! -# +# # Finally, in the following lines, we just nicely print the results in a table. See # the API documentation to learn more about how the ``results`` object is structured, # and the available methods. In this case, we use the ``turnover_frequency()`` and @@ -173,19 +171,19 @@ TOF_CO2 = [] results_dict = results.turnover_frequency() -results_dict = results.average_coverage( last=10, update=results_dict ) +results_dict = results.average_coverage(last=10, update=results_dict) for i in range(len(results_dict)): - x_CO.append( results_dict[i]['x_CO'] ) - ac_O.append( results_dict[i]['average_coverage']['O*'] ) - ac_CO.append( results_dict[i]['average_coverage']['CO*'] ) - TOF_CO2.append( results_dict[i]['turnover_frequency']['CO2'] ) - -print( '------------------------------------------------' ) -print( '%4s'%'cond', '%8s'%'x_CO', '%10s'%'ac_O', '%10s'%'ac_CO', '%12s'%'TOF_CO2' ) -print( '------------------------------------------------' ) + x_CO.append(results_dict[i]["x_CO"]) + ac_O.append(results_dict[i]["average_coverage"]["O*"]) + ac_CO.append(results_dict[i]["average_coverage"]["CO*"]) + TOF_CO2.append(results_dict[i]["turnover_frequency"]["CO2"]) + +print("------------------------------------------------") +print("%4s" % "cond", "%8s" % "x_CO", "%10s" % "ac_O", "%10s" % "ac_CO", "%12s" % "TOF_CO2") +print("------------------------------------------------") for i in range(len(x_CO)): - print( '%4d'%i, '%8.2f'%x_CO[i], '%10.6f'%ac_O[i], '%10.6f'%ac_CO[i], '%12.6f'%TOF_CO2[i] ) + print("%4d" % i, "%8.2f" % x_CO[i], "%10.6f" % ac_O[i], "%10.6f" % ac_CO[i], "%12.6f" % TOF_CO2[i]) # The results table above demonstrates that when $CO$ coverage rises, the net $CO$ @@ -201,4 +199,3 @@ # Now, we can close the pyZacros environment: scm.pyzacros.finish() - diff --git a/examples/LangmuirHinshelwood/CoveragesAndReactionRate_ViewResults.ipynb b/examples/LangmuirHinshelwood/CoveragesAndReactionRate_ViewResults.ipynb index ce28306..007617c 100644 --- a/examples/LangmuirHinshelwood/CoveragesAndReactionRate_ViewResults.ipynb +++ b/examples/LangmuirHinshelwood/CoveragesAndReactionRate_ViewResults.ipynb @@ -44,7 +44,7 @@ "\n", "scm.pyzacros.init()\n", "\n", - "job = scm.pyzacros.load( 'plams_workdir/plamsjob/plamsjob.dill' )\n", + "job = scm.pyzacros.load(\"plams_workdir/plamsjob/plamsjob.dill\")\n", "results = job.results\n", "\n", "scm.pyzacros.finish()" @@ -93,19 +93,19 @@ "TOF_CO2 = []\n", "\n", "results_dict = results.turnover_frequency()\n", - "results_dict = results.average_coverage( last=10, update=results_dict )\n", + "results_dict = results.average_coverage(last=10, update=results_dict)\n", "\n", "for i in range(len(results_dict)):\n", - " x_CO.append( results_dict[i]['x_CO'] )\n", - " ac_O.append( results_dict[i]['average_coverage']['O*'] )\n", - " ac_CO.append( results_dict[i]['average_coverage']['CO*'] )\n", - " TOF_CO2.append( results_dict[i]['turnover_frequency']['CO2'] )\n", + " x_CO.append(results_dict[i][\"x_CO\"])\n", + " ac_O.append(results_dict[i][\"average_coverage\"][\"O*\"])\n", + " ac_CO.append(results_dict[i][\"average_coverage\"][\"CO*\"])\n", + " TOF_CO2.append(results_dict[i][\"turnover_frequency\"][\"CO2\"])\n", "\n", - "print( '------------------------------------------------' )\n", - "print( '%4s'%'cond', '%8s'%'x_CO', '%10s'%'ac_O', '%10s'%'ac_CO', '%12s'%'TOF_CO2' )\n", - "print( '------------------------------------------------' )\n", + "print(\"------------------------------------------------\")\n", + "print(\"%4s\" % \"cond\", \"%8s\" % \"x_CO\", \"%10s\" % \"ac_O\", \"%10s\" % \"ac_CO\", \"%12s\" % \"TOF_CO2\")\n", + "print(\"------------------------------------------------\")\n", "for i in range(len(x_CO)):\n", - " print( '%4d'%i, '%8.2f'%x_CO[i], '%10.6f'%ac_O[i], '%10.6f'%ac_CO[i], '%12.6f'%TOF_CO2[i] )" + " print(\"%4d\" % i, \"%8.2f\" % x_CO[i], \"%10.6f\" % ac_O[i], \"%10.6f\" % ac_CO[i], \"%12.6f\" % TOF_CO2[i])" ] }, { @@ -142,17 +142,17 @@ "\n", "ax = plt.axes()\n", "ax.set_xlim([0.0, 1.0])\n", - "ax.set_xlabel('Molar Fraction CO', fontsize=14)\n", - "ax.set_ylabel('Coverage Fraction (%)', color='blue', fontsize=14)\n", - "ax.plot(x_CO, ac_O, marker='$\\u25CF$', color='blue', linestyle='-.', markersize=4, zorder=2)\n", - "ax.plot(x_CO, ac_CO, marker='$\\u25EF$', color='blue', markersize=4, zorder=4)\n", - "plt.text(0.3, 0.60, 'O*', fontsize=18, color='blue')\n", - "plt.text(0.7, 0.45, 'CO*', fontsize=18, color='blue')\n", + "ax.set_xlabel(\"Molar Fraction CO\", fontsize=14)\n", + "ax.set_ylabel(\"Coverage Fraction (%)\", color=\"blue\", fontsize=14)\n", + "ax.plot(x_CO, ac_O, marker=\"$\\u25CF$\", color=\"blue\", linestyle=\"-.\", markersize=4, zorder=2)\n", + "ax.plot(x_CO, ac_CO, marker=\"$\\u25EF$\", color=\"blue\", markersize=4, zorder=4)\n", + "plt.text(0.3, 0.60, \"O*\", fontsize=18, color=\"blue\")\n", + "plt.text(0.7, 0.45, \"CO*\", fontsize=18, color=\"blue\")\n", "\n", "ax2 = ax.twinx()\n", - "ax2.set_ylabel('TOF (mol/s/site)',color='red', fontsize=14)\n", - "ax2.plot(x_CO, TOF_CO2, marker='$\\u25EF$', color='red', markersize=4, zorder=6)\n", - "plt.text(0.3, 200.0, 'CO$_2$', fontsize=18, color='red')\n", + "ax2.set_ylabel(\"TOF (mol/s/site)\", color=\"red\", fontsize=14)\n", + "ax2.plot(x_CO, TOF_CO2, marker=\"$\\u25EF$\", color=\"red\", markersize=4, zorder=6)\n", + "plt.text(0.3, 200.0, \"CO$_2$\", fontsize=18, color=\"red\")\n", "\n", "plt.show()" ] @@ -241,11 +241,11 @@ "\n", "lh = pz.models.LangmuirHinshelwood()\n", "\n", - "B_CO = lh.mechanism.find_one( 'CO_adsorption' ).pe_ratio\n", - "B_O2 = lh.mechanism.find_one( 'O2_adsorption' ).pe_ratio\n", - "A_oxi = lh.mechanism.find_one( 'CO_oxidation' ).pre_expon\n", + "B_CO = lh.mechanism.find_one(\"CO_adsorption\").pe_ratio\n", + "B_O2 = lh.mechanism.find_one(\"O2_adsorption\").pe_ratio\n", + "A_oxi = lh.mechanism.find_one(\"CO_oxidation\").pre_expon\n", "\n", - "x_CO_model = numpy.linspace(0.0,1.0,201)\n", + "x_CO_model = numpy.linspace(0.0, 1.0, 201)\n", "\n", "ac_O_model = []\n", "ac_CO_model = []\n", @@ -253,9 +253,9 @@ "\n", "for i in range(len(x_CO_model)):\n", " x_O2 = 1 - x_CO_model[i]\n", - " ac_O_model.append( numpy.sqrt(B_O2*x_O2)/( 1 + B_CO*x_CO_model[i] + numpy.sqrt(B_O2*x_O2) ) )\n", - " ac_CO_model.append( B_CO*x_CO_model[i]/( 1 + B_CO*x_CO_model[i] + numpy.sqrt(B_O2*x_O2) ) )\n", - " TOF_CO2_model.append( 6*A_oxi*ac_CO_model[i]*ac_O_model[i] )" + " ac_O_model.append(numpy.sqrt(B_O2 * x_O2) / (1 + B_CO * x_CO_model[i] + numpy.sqrt(B_O2 * x_O2)))\n", + " ac_CO_model.append(B_CO * x_CO_model[i] / (1 + B_CO * x_CO_model[i] + numpy.sqrt(B_O2 * x_O2)))\n", + " TOF_CO2_model.append(6 * A_oxi * ac_CO_model[i] * ac_O_model[i])" ] }, { @@ -290,27 +290,33 @@ "source": [ "import matplotlib.pyplot as plt\n", "\n", - "x_CO_max = (B_O2/B_CO**2/2.0)*(numpy.sqrt(1.0+4.0*B_CO**2/B_O2)-1.0)\n", + "x_CO_max = (B_O2 / B_CO**2 / 2.0) * (numpy.sqrt(1.0 + 4.0 * B_CO**2 / B_O2) - 1.0)\n", "\n", "fig = plt.figure()\n", "\n", "ax = plt.axes()\n", "ax.set_xlim([0.0, 1.0])\n", - "ax.set_xlabel('Molar Fraction CO', fontsize=14)\n", - "ax.set_ylabel('Coverage Fraction (%)', color='blue', fontsize=14)\n", - "ax.vlines( x_CO_max, 0, max(ac_O_model), colors='0.8', linestyles='--',)\n", - "ax.plot(x_CO_model, ac_O_model, color='blue', linestyle='-.', lw=2, zorder=1)\n", - "ax.plot(x_CO, ac_O, marker='$\\u25CF$', color='blue', lw=0, markersize=4, zorder=2)\n", - "ax.plot(x_CO_model, ac_CO_model, color='blue', linestyle='-', lw=2, zorder=3)\n", - "ax.plot(x_CO, ac_CO, marker='$\\u25EF$', color='blue', markersize=4, lw=0, zorder=4)\n", - "plt.text(0.3, 0.60, 'O*', fontsize=18, color='blue')\n", - "plt.text(0.7, 0.45, 'CO*', fontsize=18, color='blue')\n", + "ax.set_xlabel(\"Molar Fraction CO\", fontsize=14)\n", + "ax.set_ylabel(\"Coverage Fraction (%)\", color=\"blue\", fontsize=14)\n", + "ax.vlines(\n", + " x_CO_max,\n", + " 0,\n", + " max(ac_O_model),\n", + " colors=\"0.8\",\n", + " linestyles=\"--\",\n", + ")\n", + "ax.plot(x_CO_model, ac_O_model, color=\"blue\", linestyle=\"-.\", lw=2, zorder=1)\n", + "ax.plot(x_CO, ac_O, marker=\"$\\u25CF$\", color=\"blue\", lw=0, markersize=4, zorder=2)\n", + "ax.plot(x_CO_model, ac_CO_model, color=\"blue\", linestyle=\"-\", lw=2, zorder=3)\n", + "ax.plot(x_CO, ac_CO, marker=\"$\\u25EF$\", color=\"blue\", markersize=4, lw=0, zorder=4)\n", + "plt.text(0.3, 0.60, \"O*\", fontsize=18, color=\"blue\")\n", + "plt.text(0.7, 0.45, \"CO*\", fontsize=18, color=\"blue\")\n", "\n", "ax2 = ax.twinx()\n", - "ax2.set_ylabel('TOF (mol/s/site)',color='red', fontsize=14)\n", - "ax2.plot(x_CO_model, TOF_CO2_model, color='red', linestyle='-', lw=2, zorder=5)\n", - "ax2.plot(x_CO, TOF_CO2, marker='$\\u25EF$', color='red', markersize=4, lw=0, zorder=6)\n", - "plt.text(0.3, 200.0, 'CO$_2$', fontsize=18, color='red')\n", + "ax2.set_ylabel(\"TOF (mol/s/site)\", color=\"red\", fontsize=14)\n", + "ax2.plot(x_CO_model, TOF_CO2_model, color=\"red\", linestyle=\"-\", lw=2, zorder=5)\n", + "ax2.plot(x_CO, TOF_CO2, marker=\"$\\u25EF$\", color=\"red\", markersize=4, lw=0, zorder=6)\n", + "plt.text(0.3, 200.0, \"CO$_2$\", fontsize=18, color=\"red\")\n", "\n", "plt.show()" ] diff --git a/examples/LangmuirHinshelwood/CoveragesAndReactionRate_ViewResults.py b/examples/LangmuirHinshelwood/CoveragesAndReactionRate_ViewResults.py index 00013c6..53864c7 100644 --- a/examples/LangmuirHinshelwood/CoveragesAndReactionRate_ViewResults.py +++ b/examples/LangmuirHinshelwood/CoveragesAndReactionRate_ViewResults.py @@ -11,26 +11,26 @@ # create a new working directory by appending a sequential number to its name, # for example, ``plams_workdir.001``. If this is the case, simply replace # ``plams_workdir`` with the appropriate value. -# +# # So, let's get started! -# +# # First, we load the required packages and retrieve the ``ZacrosParametersScanJob`` # (``job``) and corresponding results object (``results``) from the working -# directory, as shown below: +# directory, as shown below: import scm.pyzacros as pz import scm.pyzacros.models scm.pyzacros.init() -job = scm.pyzacros.load( 'plams_workdir/plamsjob/plamsjob.dill' ) +job = scm.pyzacros.load("plams_workdir/plamsjob/plamsjob.dill") results = job.results scm.pyzacros.finish() # To be certain, we generate and print the same summary table from the end of -# the first part of the tutorial. They must be exactly the same: +# the first part of the tutorial. They must be exactly the same: x_CO = [] ac_O = [] @@ -38,19 +38,19 @@ TOF_CO2 = [] results_dict = results.turnover_frequency() -results_dict = results.average_coverage( last=10, update=results_dict ) +results_dict = results.average_coverage(last=10, update=results_dict) for i in range(len(results_dict)): - x_CO.append( results_dict[i]['x_CO'] ) - ac_O.append( results_dict[i]['average_coverage']['O*'] ) - ac_CO.append( results_dict[i]['average_coverage']['CO*'] ) - TOF_CO2.append( results_dict[i]['turnover_frequency']['CO2'] ) - -print( '------------------------------------------------' ) -print( '%4s'%'cond', '%8s'%'x_CO', '%10s'%'ac_O', '%10s'%'ac_CO', '%12s'%'TOF_CO2' ) -print( '------------------------------------------------' ) + x_CO.append(results_dict[i]["x_CO"]) + ac_O.append(results_dict[i]["average_coverage"]["O*"]) + ac_CO.append(results_dict[i]["average_coverage"]["CO*"]) + TOF_CO2.append(results_dict[i]["turnover_frequency"]["CO2"]) + +print("------------------------------------------------") +print("%4s" % "cond", "%8s" % "x_CO", "%10s" % "ac_O", "%10s" % "ac_CO", "%12s" % "TOF_CO2") +print("------------------------------------------------") for i in range(len(x_CO)): - print( '%4d'%i, '%8.2f'%x_CO[i], '%10.6f'%ac_O[i], '%10.6f'%ac_CO[i], '%12.6f'%TOF_CO2[i] ) + print("%4d" % i, "%8.2f" % x_CO[i], "%10.6f" % ac_O[i], "%10.6f" % ac_CO[i], "%12.6f" % TOF_CO2[i]) # Additionally, you can see the aforementioned results visually if you have @@ -63,17 +63,17 @@ ax = plt.axes() ax.set_xlim([0.0, 1.0]) -ax.set_xlabel('Molar Fraction CO', fontsize=14) -ax.set_ylabel('Coverage Fraction (%)', color='blue', fontsize=14) -ax.plot(x_CO, ac_O, marker='$\u25CF$', color='blue', linestyle='-.', markersize=4, zorder=2) -ax.plot(x_CO, ac_CO, marker='$\u25EF$', color='blue', markersize=4, zorder=4) -plt.text(0.3, 0.60, 'O*', fontsize=18, color='blue') -plt.text(0.7, 0.45, 'CO*', fontsize=18, color='blue') +ax.set_xlabel("Molar Fraction CO", fontsize=14) +ax.set_ylabel("Coverage Fraction (%)", color="blue", fontsize=14) +ax.plot(x_CO, ac_O, marker="$\u25CF$", color="blue", linestyle="-.", markersize=4, zorder=2) +ax.plot(x_CO, ac_CO, marker="$\u25EF$", color="blue", markersize=4, zorder=4) +plt.text(0.3, 0.60, "O*", fontsize=18, color="blue") +plt.text(0.7, 0.45, "CO*", fontsize=18, color="blue") ax2 = ax.twinx() -ax2.set_ylabel('TOF (mol/s/site)',color='red', fontsize=14) -ax2.plot(x_CO, TOF_CO2, marker='$\u25EF$', color='red', markersize=4, zorder=6) -plt.text(0.3, 200.0, 'CO$_2$', fontsize=18, color='red') +ax2.set_ylabel("TOF (mol/s/site)", color="red", fontsize=14) +ax2.plot(x_CO, TOF_CO2, marker="$\u25EF$", color="red", markersize=4, zorder=6) +plt.text(0.3, 200.0, "CO$_2$", fontsize=18, color="red") plt.show() @@ -104,7 +104,7 @@ # overall quality of our results. Our results should be indistinguishable # from the Langmuir-Hinshelwood deterministic equations, which are shown # below: -# +# # $$ # \begin{gather} # \theta_\text{O} = \frac{ \sqrt{B_{\text{O}_2}x_{\text{O}_2}} }{ 1 + B_\text{CO}x_\text{CO} + \sqrt{B_{\text{O}_2}x_{\text{O}_2}} } \\ @@ -112,7 +112,7 @@ # \text{TOF}_{\text{CO}_2} = 6 \, A_\text{oxi}\theta_\text{CO}\theta_\text{O} # \end{gather} # $$ -# +# # Here $B_\text{CO}$/$B_{\text{O}_2}$ represent the ratio of the adsorption-desorption # pre-exponential terms of $CO$/$O_2$ (``pe_ratio`` in Zacros), $x_\text{CO}$/$x_{\text{O}_2}$ # the molar fractions of $CO$/$O_2$; $\theta_\text{CO}$/$\theta_\text{O}$ @@ -121,10 +121,10 @@ # frequency or production rate of $CO_2$. The number 6 is because, in our lattice, # each site has 6 neighbors, so the oxidation event is "replicated" across # each neighboring site. -# +# # Notice that to get the above expressions based on the ones shown in the Zacros # tutorial, you need the following equalities: -# +# # $$ # \begin{gather} # K_s P_s = \left( \frac{A^\text{ads}_s P^{-1}}{A^\text{des}_s} \right) x_s P = B_s x_s \qquad \therefore\qquad s=\text{CO},\text{O}_2 @@ -132,22 +132,22 @@ # k_\text{oxi} = A_\text{oxi} # \end{gather} # $$ -# +# # The final equality follows from the fact that in pyZacros, the activation energy # for all elementary reactions is equal to zero. -# +# # The code below simply computes the coverages and TOF of $CO_2$ using the analytical -# expression described above: +# expression described above: import numpy lh = pz.models.LangmuirHinshelwood() -B_CO = lh.mechanism.find_one( 'CO_adsorption' ).pe_ratio -B_O2 = lh.mechanism.find_one( 'O2_adsorption' ).pe_ratio -A_oxi = lh.mechanism.find_one( 'CO_oxidation' ).pre_expon +B_CO = lh.mechanism.find_one("CO_adsorption").pe_ratio +B_O2 = lh.mechanism.find_one("O2_adsorption").pe_ratio +A_oxi = lh.mechanism.find_one("CO_oxidation").pre_expon -x_CO_model = numpy.linspace(0.0,1.0,201) +x_CO_model = numpy.linspace(0.0, 1.0, 201) ac_O_model = [] ac_CO_model = [] @@ -155,40 +155,46 @@ for i in range(len(x_CO_model)): x_O2 = 1 - x_CO_model[i] - ac_O_model.append( numpy.sqrt(B_O2*x_O2)/( 1 + B_CO*x_CO_model[i] + numpy.sqrt(B_O2*x_O2) ) ) - ac_CO_model.append( B_CO*x_CO_model[i]/( 1 + B_CO*x_CO_model[i] + numpy.sqrt(B_O2*x_O2) ) ) - TOF_CO2_model.append( 6*A_oxi*ac_CO_model[i]*ac_O_model[i] ) + ac_O_model.append(numpy.sqrt(B_O2 * x_O2) / (1 + B_CO * x_CO_model[i] + numpy.sqrt(B_O2 * x_O2))) + ac_CO_model.append(B_CO * x_CO_model[i] / (1 + B_CO * x_CO_model[i] + numpy.sqrt(B_O2 * x_O2))) + TOF_CO2_model.append(6 * A_oxi * ac_CO_model[i] * ac_O_model[i]) # Additionally, if you have installed the package [matplotlib](https://matplotlib.org/), # you can see the aforementioned results visually. Please look over the code below, and # notice we plot the analytical and simulation results together. The points in the # figure represent simulation results, while the lines represent analytical model results. -# They are nearly identical. +# They are nearly identical. import matplotlib.pyplot as plt -x_CO_max = (B_O2/B_CO**2/2.0)*(numpy.sqrt(1.0+4.0*B_CO**2/B_O2)-1.0) +x_CO_max = (B_O2 / B_CO**2 / 2.0) * (numpy.sqrt(1.0 + 4.0 * B_CO**2 / B_O2) - 1.0) fig = plt.figure() ax = plt.axes() ax.set_xlim([0.0, 1.0]) -ax.set_xlabel('Molar Fraction CO', fontsize=14) -ax.set_ylabel('Coverage Fraction (%)', color='blue', fontsize=14) -ax.vlines( x_CO_max, 0, max(ac_O_model), colors='0.8', linestyles='--',) -ax.plot(x_CO_model, ac_O_model, color='blue', linestyle='-.', lw=2, zorder=1) -ax.plot(x_CO, ac_O, marker='$\u25CF$', color='blue', lw=0, markersize=4, zorder=2) -ax.plot(x_CO_model, ac_CO_model, color='blue', linestyle='-', lw=2, zorder=3) -ax.plot(x_CO, ac_CO, marker='$\u25EF$', color='blue', markersize=4, lw=0, zorder=4) -plt.text(0.3, 0.60, 'O*', fontsize=18, color='blue') -plt.text(0.7, 0.45, 'CO*', fontsize=18, color='blue') +ax.set_xlabel("Molar Fraction CO", fontsize=14) +ax.set_ylabel("Coverage Fraction (%)", color="blue", fontsize=14) +ax.vlines( + x_CO_max, + 0, + max(ac_O_model), + colors="0.8", + linestyles="--", +) +ax.plot(x_CO_model, ac_O_model, color="blue", linestyle="-.", lw=2, zorder=1) +ax.plot(x_CO, ac_O, marker="$\u25CF$", color="blue", lw=0, markersize=4, zorder=2) +ax.plot(x_CO_model, ac_CO_model, color="blue", linestyle="-", lw=2, zorder=3) +ax.plot(x_CO, ac_CO, marker="$\u25EF$", color="blue", markersize=4, lw=0, zorder=4) +plt.text(0.3, 0.60, "O*", fontsize=18, color="blue") +plt.text(0.7, 0.45, "CO*", fontsize=18, color="blue") ax2 = ax.twinx() -ax2.set_ylabel('TOF (mol/s/site)',color='red', fontsize=14) -ax2.plot(x_CO_model, TOF_CO2_model, color='red', linestyle='-', lw=2, zorder=5) -ax2.plot(x_CO, TOF_CO2, marker='$\u25EF$', color='red', markersize=4, lw=0, zorder=6) -plt.text(0.3, 200.0, 'CO$_2$', fontsize=18, color='red') +ax2.set_ylabel("TOF (mol/s/site)", color="red", fontsize=14) +ax2.plot(x_CO_model, TOF_CO2_model, color="red", linestyle="-", lw=2, zorder=5) +ax2.plot(x_CO, TOF_CO2, marker="$\u25EF$", color="red", markersize=4, lw=0, zorder=6) +plt.text(0.3, 200.0, "CO$_2$", fontsize=18, color="red") plt.show() @@ -196,11 +202,11 @@ # As a final note, we included in the code above the value of the $CO$ molar fraction # ($x_\text{CO}^*$) on which we get the maximum $CO_2$ production rate. The figure shows # this value as a vertical gray dashed line. It is simple to deduce it from the -# preceding analytical expressions: -# +# preceding analytical expressions: +# # $$ # x_{CO}^* = \frac{B_{\text{O}_2}}{2 B_\text{CO}^2}\left( \sqrt{1+\frac{4 B_\text{CO}^2}{B_{\text{O}_2}}} - 1 \right)\approx 0.656 # $$ -# +# # Notice that the position of this maximum $x_\text{CO}*$ depends exclusively on the ratio # of the ``pe_ratio`` parameters for $CO$ and $O_2$ in the Langmuir-Hinshelwood model. diff --git a/examples/LangmuirHinshelwood/LangmuirHinshelwood.py b/examples/LangmuirHinshelwood/LangmuirHinshelwood.py index c25807a..b99f900 100644 --- a/examples/LangmuirHinshelwood/LangmuirHinshelwood.py +++ b/examples/LangmuirHinshelwood/LangmuirHinshelwood.py @@ -12,25 +12,27 @@ sett.random_seed = 1609 sett.temperature = 300.0 sett.pressure = 1.000 -sett.snapshots = ('time', 10*dt) -sett.process_statistics = ('time', dt) -sett.species_numbers = ('time', dt) -sett.max_time = 100*dt +sett.snapshots = ("time", 10 * dt) +sett.process_statistics = ("time", dt) +sett.species_numbers = ("time", dt) +sett.max_time = 100 * dt sett.molar_fraction.O2 = 0.500 sett.molar_fraction.CO = 0.500 # Adsorption and diffusion scaling factors for rxn in lh.mechanism: - if 'adsorption' in rxn.label(): rxn.pre_expon *= 1e-2 - if 'diffusion' in rxn.label(): rxn.pre_expon *= 1e-2 + if "adsorption" in rxn.label(): + rxn.pre_expon *= 1e-2 + if "diffusion" in rxn.label(): + rxn.pre_expon *= 1e-2 -job = pz.ZacrosJob( settings=sett, lattice=lh.lattice, mechanism=lh.mechanism, cluster_expansion=lh.cluster_expansion ) +job = pz.ZacrosJob(settings=sett, lattice=lh.lattice, mechanism=lh.mechanism, cluster_expansion=lh.cluster_expansion) results = job.run() -if( job.ok() ): - results.plot_molecule_numbers( ['CO2'] ) - print("turnover_frequency = ", results.turnover_frequency(species_name='CO2')[0]) +if job.ok(): + results.plot_molecule_numbers(["CO2"]) + print("turnover_frequency = ", results.turnover_frequency(species_name="CO2")[0]) scm.pyzacros.finish() diff --git a/examples/LangmuirHinshelwood/SteadyState.py b/examples/LangmuirHinshelwood/SteadyState.py index b0b93c9..3d14213 100644 --- a/examples/LangmuirHinshelwood/SteadyState.py +++ b/examples/LangmuirHinshelwood/SteadyState.py @@ -5,6 +5,7 @@ # Execution time: aprox. 1 min with manual scaling (TOF_CO2 = 295.40334 +/- 3.33394; ratio=0.04000) # Execution time: aprox. 1.5 min with auto scaling (TOF_CO2 = 296.68140 +/- 3.59279; ratio=0.04000) """ + import multiprocessing import numpy @@ -14,16 +15,16 @@ lh = pz.models.LangmuirHinshelwood() -#--------------------------------------------- +# --------------------------------------------- # Calculation Settings -#--------------------------------------------- +# --------------------------------------------- scm.pyzacros.init() # Run as many job simultaneously as there are cpu on the system maxjobs = multiprocessing.cpu_count() scm.plams.config.default_jobrunner = scm.plams.JobRunner(parallel=True, maxjobs=maxjobs) scm.plams.config.job.runscript.nproc = 1 -print('Running up to {} jobs in parallel simultaneously'.format(maxjobs)) +print("Running up to {} jobs in parallel simultaneously".format(maxjobs)) # Settings: @@ -32,45 +33,49 @@ z_sett.random_seed = 1609 z_sett.temperature = 500.0 z_sett.pressure = 1.000 -#z_sett.snapshots = ('time', 10*dt) -#z_sett.process_statistics = ('time', dt) -z_sett.species_numbers = ('time', dt) -z_sett.max_time = 100*dt +# z_sett.snapshots = ('time', 10*dt) +# z_sett.process_statistics = ('time', dt) +z_sett.species_numbers = ("time", dt) +z_sett.max_time = 100 * dt -#z_sett.molar_fraction.CO = 0.4 -#z_sett.molar_fraction.O2 = 1.0 - z_sett.molar_fraction.CO +# z_sett.molar_fraction.CO = 0.4 +# z_sett.molar_fraction.O2 = 1.0 - z_sett.molar_fraction.CO z_sett.molar_fraction.CO = 0.9 z_sett.molar_fraction.O2 = 1.0 - z_sett.molar_fraction.CO ## Adsorption and diffusion scaling factors -#for rxn in lh.mechanism: - #if 'adsorption' in rxn.label(): rxn.pre_expon *= 1e-2 - #if 'diffusion' in rxn.label(): rxn.pre_expon *= 1e-2 +# for rxn in lh.mechanism: +# if 'adsorption' in rxn.label(): rxn.pre_expon *= 1e-2 +# if 'diffusion' in rxn.label(): rxn.pre_expon *= 1e-2 -job = pz.ZacrosJob( settings=z_sett, lattice=lh.lattice, mechanism=lh.mechanism, cluster_expansion=lh.cluster_expansion ) +job = pz.ZacrosJob(settings=z_sett, lattice=lh.lattice, mechanism=lh.mechanism, cluster_expansion=lh.cluster_expansion) dt = 5.0e-5 ss_sett = pz.Settings() ss_sett.turnover_frequency.nbatch = 20 ss_sett.turnover_frequency.confidence = 0.96 ss_sett.nreplicas = 4 -ss_sett.scaling.enabled = 'T' +ss_sett.scaling.enabled = "T" ss_sett.scaling.partial_equilibrium_index_threshold = 0.1 ss_sett.scaling.upper_bound = 100 -ss_sett.scaling.max_time = 10*dt -ss_sett.scaling.species_numbers = ('time', dt) +ss_sett.scaling.max_time = 10 * dt +ss_sett.scaling.species_numbers = ("time", dt) ss_params = pz.ZacrosSteadyStateJob.Parameters() -ss_params.add( 'max_time', 'restart.max_time', - 2*z_sett.max_time*( numpy.arange(50)+1 )**2 ) +ss_params.add("max_time", "restart.max_time", 2 * z_sett.max_time * (numpy.arange(50) + 1) ** 2) -cjob = pz.ZacrosSteadyStateJob( settings=ss_sett, reference=job, parameters=ss_params ) +cjob = pz.ZacrosSteadyStateJob(settings=ss_sett, reference=job, parameters=ss_params) results = cjob.run() if cjob.ok(): - for i,step in enumerate(results.history()): - print("%8d"%i, "%10.5f"%step['turnover_frequency']['CO2'], "%10.5f"%step['max_time'], - "%10.5f"%step['turnover_frequency_error']['CO2'], "%15s"%step['converged']['CO2']) + for i, step in enumerate(results.history()): + print( + "%8d" % i, + "%10.5f" % step["turnover_frequency"]["CO2"], + "%10.5f" % step["max_time"], + "%10.5f" % step["turnover_frequency_error"]["CO2"], + "%15s" % step["converged"]["CO2"], + ) scm.pyzacros.finish() diff --git a/examples/ReuterScheffler/PhaseTransition-SteadyState.py b/examples/ReuterScheffler/PhaseTransition-SteadyState.py index ba3ef4e..94d590d 100644 --- a/examples/ReuterScheffler/PhaseTransition-SteadyState.py +++ b/examples/ReuterScheffler/PhaseTransition-SteadyState.py @@ -13,21 +13,21 @@ maxjobs = multiprocessing.cpu_count() scm.plams.config.default_jobrunner = scm.plams.JobRunner(parallel=True, maxjobs=maxjobs) scm.plams.config.job.runscript.nproc = 1 -print('Running up to {} jobs in parallel simultaneously'.format(maxjobs)) +print("Running up to {} jobs in parallel simultaneously".format(maxjobs)) dt = 1e-6 sett = pz.Settings() sett.random_seed = 14390 sett.temperature = 600.0 sett.pressure = 1.0 -#sett.snapshots = ('time', 100*dt) -#sett.process_statistics = ('time', 10*dt) -sett.species_numbers = ('time', dt) -sett.event_report = 'off' -sett.max_steps = 'infinity' -sett.max_time = 1000*dt +# sett.snapshots = ('time', 100*dt) +# sett.process_statistics = ('time', 10*dt) +sett.species_numbers = ("time", dt) +sett.event_report = "off" +sett.max_steps = "infinity" +sett.max_time = 1000 * dt -job = pz.ZacrosJob( settings=sett, lattice=rs.lattice, mechanism=rs.mechanism, cluster_expansion=rs.cluster_expansion ) +job = pz.ZacrosJob(settings=sett, lattice=rs.lattice, mechanism=rs.mechanism, cluster_expansion=rs.cluster_expansion) ss_sett = pz.Settings() ss_sett.steady_state_job.turnover_frequency.nbatch = 20 @@ -35,64 +35,64 @@ ss_sett.steady_state_job.nreplicas = 4 parametersA = pz.ZacrosSteadyStateJob.Parameters() -parametersA.add( 'max_time', 'restart.max_time', 2*sett.max_time*( numpy.arange(20)+1 )**2 ) +parametersA.add("max_time", "restart.max_time", 2 * sett.max_time * (numpy.arange(20) + 1) ** 2) -ssjob = pz.ZacrosSteadyStateJob( settings=ss_sett, reference=job, parameters=parametersA ) +ssjob = pz.ZacrosSteadyStateJob(settings=ss_sett, reference=job, parameters=parametersA) parametersB = pz.ZacrosParametersScanJob.Parameters() -parametersB.add( 'x_CO', 'molar_fraction.CO', numpy.arange(0.900, 0.999, 0.001) ) -parametersB.add( 'x_O2', 'molar_fraction.O2', lambda params: 1.0-params['x_CO'] ) +parametersB.add("x_CO", "molar_fraction.CO", numpy.arange(0.900, 0.999, 0.001)) +parametersB.add("x_O2", "molar_fraction.O2", lambda params: 1.0 - params["x_CO"]) -mjob = pz.ZacrosParametersScanJob( reference=ssjob, parameters=parametersB, name='mesh' ) +mjob = pz.ZacrosParametersScanJob(reference=ssjob, parameters=parametersB, name="mesh") results = mjob.run() -if( results.job.ok() ): +if results.job.ok(): x_CO = [] ac_O = [] ac_CO = [] TOF_CO2 = [] results_dict = results.turnover_frequency() - results_dict = results.average_coverage( last=20, update=results_dict ) + results_dict = results.average_coverage(last=20, update=results_dict) for i in range(len(results_dict)): - x_CO.append( results_dict[i]['x_CO'] ) - ac_O.append( results_dict[i]['average_coverage']['O*'] ) - ac_CO.append( results_dict[i]['average_coverage']['CO*'] ) - TOF_CO2.append( results_dict[i]['turnover_frequency']['CO2']/1e4 ) - - print( "---------------------------------------------------" ) - print( "%4s"%"cond", "%8s"%"x_CO", "%10s"%"ac_O", "%10s"%"ac_CO", "%10s"%"TOF_CO2.10^4" ) - print( "---------------------------------------------------" ) + x_CO.append(results_dict[i]["x_CO"]) + ac_O.append(results_dict[i]["average_coverage"]["O*"]) + ac_CO.append(results_dict[i]["average_coverage"]["CO*"]) + TOF_CO2.append(results_dict[i]["turnover_frequency"]["CO2"] / 1e4) + + print("---------------------------------------------------") + print("%4s" % "cond", "%8s" % "x_CO", "%10s" % "ac_O", "%10s" % "ac_CO", "%10s" % "TOF_CO2.10^4") + print("---------------------------------------------------") for i in range(len(x_CO)): - print( "%4d"%i, "%8.2f"%x_CO[i], "%10.6f"%ac_O[i], "%10.6f"%ac_CO[i] , "%10.6f"%TOF_CO2[i]) + print("%4d" % i, "%8.2f" % x_CO[i], "%10.6f" % ac_O[i], "%10.6f" % ac_CO[i], "%10.6f" % TOF_CO2[i]) scm.pyzacros.finish() -#--------------------------------------------- +# --------------------------------------------- # Plotting the results -#--------------------------------------------- +# --------------------------------------------- try: import matplotlib.pyplot as plt except ImportError as e: - print('Consider to install matplotlib to visualize the results!') + print("Consider to install matplotlib to visualize the results!") exit(0) # Coverage and TOF plot fig = plt.figure() ax = plt.axes() -ax.set_xlabel('Partial Pressure CO', fontsize=14) +ax.set_xlabel("Partial Pressure CO", fontsize=14) ax.set_ylabel("Coverage Fraction (%)", color="blue", fontsize=14) ax.plot(x_CO, ac_O, color="blue", linestyle="-.", lw=2, zorder=1) ax.plot(x_CO, ac_CO, color="blue", linestyle="-", lw=2, zorder=2) -plt.text(0.3, 0.9, 'O', fontsize=18, color="blue") -plt.text(0.7, 0.9, 'CO', fontsize=18, color="blue") +plt.text(0.3, 0.9, "O", fontsize=18, color="blue") +plt.text(0.7, 0.9, "CO", fontsize=18, color="blue") ax2 = ax.twinx() -ax2.set_ylabel("TOF x 10$^4$ (mol/s/site)",color="red", fontsize=14) +ax2.set_ylabel("TOF x 10$^4$ (mol/s/site)", color="red", fontsize=14) ax2.plot(x_CO, TOF_CO2, color="red", lw=2, zorder=5) -plt.text(0.37, 1.5, 'CO$_2$', fontsize=18, color="red") +plt.text(0.37, 1.5, "CO$_2$", fontsize=18, color="red") plt.show() diff --git a/examples/ReuterScheffler/PhaseTransition.py b/examples/ReuterScheffler/PhaseTransition.py index e174f06..b2c4791 100644 --- a/examples/ReuterScheffler/PhaseTransition.py +++ b/examples/ReuterScheffler/PhaseTransition.py @@ -13,89 +13,88 @@ maxjobs = multiprocessing.cpu_count() scm.plams.config.default_jobrunner = scm.plams.JobRunner(parallel=True, maxjobs=maxjobs) scm.plams.config.job.runscript.nproc = 1 -print('Running up to {} jobs in parallel simultaneously'.format(maxjobs)) +print("Running up to {} jobs in parallel simultaneously".format(maxjobs)) dt = 1e-6 sett = pz.Settings() sett.random_seed = 14390 sett.temperature = 600.0 sett.pressure = 1.0 -#sett.snapshots = ('time', 100*dt) -#sett.process_statistics = ('time', 10*dt) -sett.species_numbers = ('time', dt) -sett.event_report = 'off' -sett.max_steps = 'infinity' -sett.max_time = 1000*dt +# sett.snapshots = ('time', 100*dt) +# sett.process_statistics = ('time', 10*dt) +sett.species_numbers = ("time", dt) +sett.event_report = "off" +sett.max_steps = "infinity" +sett.max_time = 1000 * dt parameters = pz.ZacrosParametersScanJob.Parameters() -parameters.add( 'x_CO', 'molar_fraction.CO', numpy.arange(0.900, 0.999, 0.001) ) -parameters.add( 'x_O2', 'molar_fraction.O2', lambda params: 1.0-params['x_CO'] ) +parameters.add("x_CO", "molar_fraction.CO", numpy.arange(0.900, 0.999, 0.001)) +parameters.add("x_O2", "molar_fraction.O2", lambda params: 1.0 - params["x_CO"]) -job = pz.ZacrosJob( settings=sett, lattice=rs.lattice, mechanism=rs.mechanism, cluster_expansion=rs.cluster_expansion ) +job = pz.ZacrosJob(settings=sett, lattice=rs.lattice, mechanism=rs.mechanism, cluster_expansion=rs.cluster_expansion) -mjob = pz.ZacrosParametersScanJob( reference=job, parameters=parameters ) +mjob = pz.ZacrosParametersScanJob(reference=job, parameters=parameters) results = mjob.run() -if( results.job.ok() ): +if results.job.ok(): x_CO = [] ac_O = [] ac_CO = [] TOF_CO2 = [] results_dict = results.turnover_frequency() - results_dict = results.average_coverage( last=20, update=results_dict ) + results_dict = results.average_coverage(last=20, update=results_dict) for i in range(len(results_dict)): - x_CO.append( results_dict[i]['x_CO'] ) - ac_O.append( results_dict[i]['average_coverage']['O*'] ) - ac_CO.append( results_dict[i]['average_coverage']['CO*'] ) - TOF_CO2.append( results_dict[i]['turnover_frequency']['CO2']/1e4 ) + x_CO.append(results_dict[i]["x_CO"]) + ac_O.append(results_dict[i]["average_coverage"]["O*"]) + ac_CO.append(results_dict[i]["average_coverage"]["CO*"]) + TOF_CO2.append(results_dict[i]["turnover_frequency"]["CO2"] / 1e4) print("----------------------------------------------") - print("%4s"%"cond", "%8s"%"x_CO", "%10s"%"ac_O", "%10s"%"ac_CO", "%10s"%"TOF_CO2.10^4") + print("%4s" % "cond", "%8s" % "x_CO", "%10s" % "ac_O", "%10s" % "ac_CO", "%10s" % "TOF_CO2.10^4") print("----------------------------------------------") for i in range(len(x_CO)): - print("%4d"%i, "%8.2f"%x_CO[i], "%10.6f"%ac_O[i], "%10.6f"%ac_CO[i], "%10.6f"%TOF_CO2[i]) + print("%4d" % i, "%8.2f" % x_CO[i], "%10.6f" % ac_O[i], "%10.6f" % ac_CO[i], "%10.6f" % TOF_CO2[i]) scm.pyzacros.finish() -#--------------------------------------------- +# --------------------------------------------- # Plotting the results -#--------------------------------------------- +# --------------------------------------------- try: import matplotlib.pyplot as plt except ImportError as e: - print('Consider to install matplotlib to visualize the results!') + print("Consider to install matplotlib to visualize the results!") exit(0) # Coverage and TOF plot fig = plt.figure() ax = plt.axes() -ax.set_xlabel('Partial Pressure CO', fontsize=14) +ax.set_xlabel("Partial Pressure CO", fontsize=14) ax.set_ylabel("Coverage Fraction (%)", color="blue", fontsize=14) ax.plot(x_CO, ac_O, color="blue", linestyle="-.", lw=2, zorder=1) ax.plot(x_CO, ac_CO, color="blue", linestyle="-", lw=2, zorder=2) -plt.text(0.3, 0.9, 'O', fontsize=18, color="blue") -plt.text(0.7, 0.9, 'CO', fontsize=18, color="blue") +plt.text(0.3, 0.9, "O", fontsize=18, color="blue") +plt.text(0.7, 0.9, "CO", fontsize=18, color="blue") ax2 = ax.twinx() -ax2.set_ylabel("TOF x 10$^4$ (mol/s/site)",color="red", fontsize=14) +ax2.set_ylabel("TOF x 10$^4$ (mol/s/site)", color="red", fontsize=14) ax2.plot(x_CO, TOF_CO2, color="red", lw=2, zorder=5) -plt.text(0.37, 1.5, 'CO$_2$', fontsize=18, color="red") +plt.text(0.37, 1.5, "CO$_2$", fontsize=18, color="red") plt.show() ## Lattice states for x_CO=0.54 and CO=0.55 -#results[33].last_lattice_state().plot() -#results[34].last_lattice_state().plot() +# results[33].last_lattice_state().plot() +# results[34].last_lattice_state().plot() ## Molecule numbers for x_CO=0.54 and CO=0.55 -#results[33].plot_molecule_numbers( ["CO2"], normalize_per_site=True ) -#results[34].plot_molecule_numbers( ["CO2"], normalize_per_site=True ) +# results[33].plot_molecule_numbers( ["CO2"], normalize_per_site=True ) +# results[34].plot_molecule_numbers( ["CO2"], normalize_per_site=True ) ## Molecule numbers for x_CO=0.54 and CO=0.55. First Derivative -#results[33].plot_molecule_numbers( ["CO2"], normalize_per_site=True, derivative=True ) -#results[34].plot_molecule_numbers( ["CO2"], normalize_per_site=True, derivative=True ) - +# results[33].plot_molecule_numbers( ["CO2"], normalize_per_site=True, derivative=True ) +# results[34].plot_molecule_numbers( ["CO2"], normalize_per_site=True, derivative=True ) diff --git a/examples/ReuterScheffler/ReuterScheffler.py b/examples/ReuterScheffler/ReuterScheffler.py index 0795896..a6360b7 100644 --- a/examples/ReuterScheffler/ReuterScheffler.py +++ b/examples/ReuterScheffler/ReuterScheffler.py @@ -10,23 +10,23 @@ sett.random_seed = 14390 sett.temperature = 600.0 sett.pressure = 1.0 -sett.snapshots = ('time', 100*dt) -sett.process_statistics = ('time', 10*dt) -sett.species_numbers = ('time', dt) -sett.event_report = 'off' -sett.max_steps = 'infinity' -sett.max_time = 1000*dt +sett.snapshots = ("time", 100 * dt) +sett.process_statistics = ("time", 10 * dt) +sett.species_numbers = ("time", dt) +sett.event_report = "off" +sett.max_steps = "infinity" +sett.max_time = 1000 * dt sett.wall_time = 600 sett.molar_fraction.CO = 0.995 -sett.molar_fraction.O2 = 1.0-sett.molar_fraction.CO +sett.molar_fraction.O2 = 1.0 - sett.molar_fraction.CO -job = pz.ZacrosJob( settings=sett, lattice=rs.lattice, mechanism=rs.mechanism, cluster_expansion=rs.cluster_expansion ) +job = pz.ZacrosJob(settings=sett, lattice=rs.lattice, mechanism=rs.mechanism, cluster_expansion=rs.cluster_expansion) results = job.run() -if( job.ok() ): - results.plot_molecule_numbers( ['CO2'] ) - print("turnover_frequency = ", results.turnover_frequency(species_name='CO2')[0]) +if job.ok(): + results.plot_molecule_numbers(["CO2"]) + print("turnover_frequency = ", results.turnover_frequency(species_name="CO2")[0]) scm.pyzacros.finish() diff --git a/examples/ReuterScheffler/SteadyState.py b/examples/ReuterScheffler/SteadyState.py index 813ee73..fbfa11f 100644 --- a/examples/ReuterScheffler/SteadyState.py +++ b/examples/ReuterScheffler/SteadyState.py @@ -14,19 +14,19 @@ maxjobs = multiprocessing.cpu_count() scm.plams.config.default_jobrunner = scm.plams.JobRunner(parallel=True, maxjobs=maxjobs) scm.plams.config.job.runscript.nproc = 1 -print('Running up to {} jobs in parallel simultaneously'.format(maxjobs)) +print("Running up to {} jobs in parallel simultaneously".format(maxjobs)) dt = 1e-6 sett = pz.Settings() sett.random_seed = 14390 sett.temperature = 600.0 sett.pressure = 1.0 -#sett.snapshots = ('time', 100*dt) -#sett.process_statistics = ('time', 10*dt) -sett.species_numbers = ('time', dt) -sett.event_report = 'off' -sett.max_steps = 'infinity' -sett.max_time = 1000*dt +# sett.snapshots = ('time', 100*dt) +# sett.process_statistics = ('time', 10*dt) +sett.species_numbers = ("time", dt) +sett.event_report = "off" +sett.max_steps = "infinity" +sett.max_time = 1000 * dt sett.steady_state_job.turnover_frequency.nbatch = 20 sett.steady_state_job.turnover_frequency.confidence = 0.90 @@ -35,22 +35,27 @@ sett.steady_state_job.scaling.partial_equilibrium_index_threshold = 0.1 sett.steady_state_job.scaling.scaling_upper_bound = 100 -sett.molar_fraction.CO = 0.900 # OK -#sett.molar_fraction.CO = 0.950 # ??? -sett.molar_fraction.O2 = 1.0-sett.molar_fraction.CO +sett.molar_fraction.CO = 0.900 # OK +# sett.molar_fraction.CO = 0.950 # ??? +sett.molar_fraction.O2 = 1.0 - sett.molar_fraction.CO -job = pz.ZacrosJob( settings=sett, lattice=rs.lattice, mechanism=rs.mechanism, cluster_expansion=rs.cluster_expansion ) +job = pz.ZacrosJob(settings=sett, lattice=rs.lattice, mechanism=rs.mechanism, cluster_expansion=rs.cluster_expansion) parameters = pz.ZacrosSteadyStateJob.Parameters() -parameters.add( 'max_time', 'restart.max_time', 2*sett.max_time*( numpy.arange(20)+1 )**2 ) +parameters.add("max_time", "restart.max_time", 2 * sett.max_time * (numpy.arange(20) + 1) ** 2) -cjob = pz.ZacrosSteadyStateJob( reference=job, parameters=parameters ) +cjob = pz.ZacrosSteadyStateJob(reference=job, parameters=parameters) results = cjob.run() if cjob.ok(): - for i,step in enumerate(results.history()): - print("%8d"%i, "%10.5f"%step['turnover_frequency']['CO2'], "%10.5f"%step['max_time'], - "%10.5f"%step['turnover_frequency_error']['CO2'], "%15s"%step['converged']['CO2']) + for i, step in enumerate(results.history()): + print( + "%8d" % i, + "%10.5f" % step["turnover_frequency"]["CO2"], + "%10.5f" % step["max_time"], + "%10.5f" % step["turnover_frequency_error"]["CO2"], + "%15s" % step["converged"]["CO2"], + ) scm.pyzacros.finish() diff --git a/examples/ReuterScheffler/check.py b/examples/ReuterScheffler/check.py index 1f73dc0..b2b95cd 100644 --- a/examples/ReuterScheffler/check.py +++ b/examples/ReuterScheffler/check.py @@ -3,8 +3,8 @@ scm.pyzacros.init() -job = scm.plams.load('plams_workdir-ok/plamsjob/plamsjob_ss_iter000.004/plamsjob_ss_iter000.004.dill') -print( job.results.provided_quantities_names() ) -print( len(job.results.provided_quantities()['CO2']) ) +job = scm.plams.load("plams_workdir-ok/plamsjob/plamsjob_ss_iter000.004/plamsjob_ss_iter000.004.dill") +print(job.results.provided_quantities_names()) +print(len(job.results.provided_quantities()["CO2"])) scm.pyzacros.finish() diff --git a/examples/WaterGasShiftOnPt111/WaterGasShiftOnPt111.py b/examples/WaterGasShiftOnPt111/WaterGasShiftOnPt111.py index d38a060..f3abefc 100644 --- a/examples/WaterGasShiftOnPt111/WaterGasShiftOnPt111.py +++ b/examples/WaterGasShiftOnPt111/WaterGasShiftOnPt111.py @@ -3,9 +3,9 @@ import scm import scm.pyzacros as pz -#--------------------------------------------- +# --------------------------------------------- # Species: -#--------------------------------------------- +# --------------------------------------------- # - Gas-species: CO_gas = pz.Species("CO") H2O_gas = pz.Species("H2O") @@ -14,7 +14,7 @@ O2_gas = pz.Species("O2", gas_energy=4.913) # - Surface species: -s0 = pz.Species("*", 1) # Empty adsorption site +s0 = pz.Species("*", 1) # Empty adsorption site CO_adsorbed = pz.Species("CO*", 1) H2O_adsorbed = pz.Species("H2O*", 1) OH_adsorbed = pz.Species("OH*", 1) @@ -22,177 +22,175 @@ H_adsorbed = pz.Species("H*", 1) COOH_adsorbed = pz.Species("COOH*", 1) -#--------------------------------------------- +# --------------------------------------------- # Lattice setup: -#--------------------------------------------- -latt = pz.Lattice( lattice_type=pz.Lattice.HEXAGONAL, lattice_constant=1.0, repeat_cell=[8,10] ) +# --------------------------------------------- +latt = pz.Lattice(lattice_type=pz.Lattice.HEXAGONAL, lattice_constant=1.0, repeat_cell=[8, 10]) -#--------------------------------------------- +# --------------------------------------------- # Clusters: -#--------------------------------------------- -CO_point = pz.Cluster( species=[CO_adsorbed], - energy=-2.077, - label="CO_point") - -H2O_point = pz.Cluster(species=[H2O_adsorbed], - energy=-0.362, - label="H2O_point") - -OH_point = pz.Cluster(species=[OH_adsorbed], - energy=0.830, - label="OH_point") - -O_point = pz.Cluster(species=[O_adsorbed], - energy=1.298, - label="O_point") - -H_point = pz.Cluster(species=[H_adsorbed], - energy=-0.619, - label="H_point") - -COOH_point = pz.Cluster(species=[COOH_adsorbed], - energy=-1.487, - label="COOH_point") - -CO_pair_1NN = pz.Cluster(species=[CO_adsorbed, CO_adsorbed], - neighboring=[(0, 1)], - energy=0.560, - label="CO_pair_1NN") - -OH_H_1NN = pz.Cluster(species=[OH_adsorbed, H_adsorbed], - neighboring=[(0, 1)], - energy=0.021, - label="OH_H_1NN") - -O_H_1NN = pz.Cluster(species=[O_adsorbed, H_adsorbed], - neighboring=[(0, 1)], - energy=0.198, - label="O_H_1NN") - -CO_OH_1NN = pz.Cluster(species=[CO_adsorbed, OH_adsorbed], - neighboring=[(0, 1)], - energy=0.066, - label="CO_OH_1NN") - -CO_O_1NN = pz.Cluster(species=[CO_adsorbed, O_adsorbed], - neighboring=[(0, 1)], - energy=0.423, - label="CO_O_1NN") - -#--------------------------------------------- +# --------------------------------------------- +CO_point = pz.Cluster(species=[CO_adsorbed], energy=-2.077, label="CO_point") + +H2O_point = pz.Cluster(species=[H2O_adsorbed], energy=-0.362, label="H2O_point") + +OH_point = pz.Cluster(species=[OH_adsorbed], energy=0.830, label="OH_point") + +O_point = pz.Cluster(species=[O_adsorbed], energy=1.298, label="O_point") + +H_point = pz.Cluster(species=[H_adsorbed], energy=-0.619, label="H_point") + +COOH_point = pz.Cluster(species=[COOH_adsorbed], energy=-1.487, label="COOH_point") + +CO_pair_1NN = pz.Cluster(species=[CO_adsorbed, CO_adsorbed], neighboring=[(0, 1)], energy=0.560, label="CO_pair_1NN") + +OH_H_1NN = pz.Cluster(species=[OH_adsorbed, H_adsorbed], neighboring=[(0, 1)], energy=0.021, label="OH_H_1NN") + +O_H_1NN = pz.Cluster(species=[O_adsorbed, H_adsorbed], neighboring=[(0, 1)], energy=0.198, label="O_H_1NN") + +CO_OH_1NN = pz.Cluster(species=[CO_adsorbed, OH_adsorbed], neighboring=[(0, 1)], energy=0.066, label="CO_OH_1NN") + +CO_O_1NN = pz.Cluster(species=[CO_adsorbed, O_adsorbed], neighboring=[(0, 1)], energy=0.423, label="CO_O_1NN") + +# --------------------------------------------- # Cluster expansion: -#--------------------------------------------- +# --------------------------------------------- myClusterExpansion = pz.ClusterExpansion() -myClusterExpansion.extend( [CO_point, H2O_point] ) -myClusterExpansion.append( OH_point ) -myClusterExpansion.extend( [O_point, H_point, COOH_point] ) -myClusterExpansion.extend( [CO_pair_1NN, OH_H_1NN, O_H_1NN, CO_OH_1NN, CO_O_1NN] ) +myClusterExpansion.extend([CO_point, H2O_point]) +myClusterExpansion.append(OH_point) +myClusterExpansion.extend([O_point, H_point, COOH_point]) +myClusterExpansion.extend([CO_pair_1NN, OH_H_1NN, O_H_1NN, CO_OH_1NN, CO_O_1NN]) -#--------------------------------------------- +# --------------------------------------------- # Elementary Reactions -#--------------------------------------------- - -CO_adsorption = pz.ElementaryReaction(initial=[s0,CO_gas], - final=[CO_adsorbed], - reversible=True, - pre_expon=2.226e+007, - pe_ratio=2.137e-006, - activation_energy=0.0, - label="CO_adsorption") - -H2_dissoc_adsorp = pz.ElementaryReaction(initial=[s0, s0, H2_gas], - final=[H_adsorbed, H_adsorbed], - neighboring=[(0, 1)], - reversible=True, - pre_expon=8.299e+007, - pe_ratio=7.966e-006, - activation_energy=0.0, - label="H2_dissoc_adsorp") - -H2O_adsorption = pz.ElementaryReaction(initial=[s0, H2O_gas], - final=[H2O_adsorbed], - reversible=True, - pre_expon=2.776e+002, # Scaled-down 1e+5 times for efficiency - pe_ratio=2.665e-006, - activation_energy=0.0, - label="H2O_adsorption") - -H2O_dissoc_adsorp = pz.ElementaryReaction(initial=[H2O_adsorbed, s0], - final=[OH_adsorbed, H_adsorbed], - neighboring=[(0, 1)], - reversible=True, - pre_expon=1.042e+13, - pe_ratio=1.000e+00, - activation_energy=0.777, - label="H2O_dissoc_adsorp") - -OH_decomposition = pz.ElementaryReaction(initial=[s0, OH_adsorbed], - final=[O_adsorbed, H_adsorbed], - neighboring=[(0, 1)], - reversible=True, - pre_expon=1.042e+13, - pe_ratio=1.000e+00, - activation_energy=0.940, - label="OH_decomposition") - -COOH_formation = pz.ElementaryReaction(initial=[CO_adsorbed, OH_adsorbed], - final=[s0, COOH_adsorbed], - neighboring=[(0, 1)], - reversible=True, - pre_expon=1.042e+13, - pe_ratio=1.000e+00, - activation_energy=0.405, - label="COOH_formation") - -COOH_decomposition = pz.ElementaryReaction(initial=[COOH_adsorbed, s0], - final=[s0, H_adsorbed, CO2_gas], - neighboring=[(0, 1)], - reversible=False, - pre_expon=1.042e+13, - activation_energy=0.852, - label="COOH_decomposition") - -CO_oxidation = pz.ElementaryReaction(initial=[CO_adsorbed, O_adsorbed], - final=[s0, s0, CO2_gas], - neighboring=[(0, 1)], - reversible=False, - pre_expon=1.042e+13, - activation_energy=0.988, - label="CO_oxidation") - -#--------------------------------------------- +# --------------------------------------------- + +CO_adsorption = pz.ElementaryReaction( + initial=[s0, CO_gas], + final=[CO_adsorbed], + reversible=True, + pre_expon=2.226e007, + pe_ratio=2.137e-006, + activation_energy=0.0, + label="CO_adsorption", +) + +H2_dissoc_adsorp = pz.ElementaryReaction( + initial=[s0, s0, H2_gas], + final=[H_adsorbed, H_adsorbed], + neighboring=[(0, 1)], + reversible=True, + pre_expon=8.299e007, + pe_ratio=7.966e-006, + activation_energy=0.0, + label="H2_dissoc_adsorp", +) + +H2O_adsorption = pz.ElementaryReaction( + initial=[s0, H2O_gas], + final=[H2O_adsorbed], + reversible=True, + pre_expon=2.776e002, # Scaled-down 1e+5 times for efficiency + pe_ratio=2.665e-006, + activation_energy=0.0, + label="H2O_adsorption", +) + +H2O_dissoc_adsorp = pz.ElementaryReaction( + initial=[H2O_adsorbed, s0], + final=[OH_adsorbed, H_adsorbed], + neighboring=[(0, 1)], + reversible=True, + pre_expon=1.042e13, + pe_ratio=1.000e00, + activation_energy=0.777, + label="H2O_dissoc_adsorp", +) + +OH_decomposition = pz.ElementaryReaction( + initial=[s0, OH_adsorbed], + final=[O_adsorbed, H_adsorbed], + neighboring=[(0, 1)], + reversible=True, + pre_expon=1.042e13, + pe_ratio=1.000e00, + activation_energy=0.940, + label="OH_decomposition", +) + +COOH_formation = pz.ElementaryReaction( + initial=[CO_adsorbed, OH_adsorbed], + final=[s0, COOH_adsorbed], + neighboring=[(0, 1)], + reversible=True, + pre_expon=1.042e13, + pe_ratio=1.000e00, + activation_energy=0.405, + label="COOH_formation", +) + +COOH_decomposition = pz.ElementaryReaction( + initial=[COOH_adsorbed, s0], + final=[s0, H_adsorbed, CO2_gas], + neighboring=[(0, 1)], + reversible=False, + pre_expon=1.042e13, + activation_energy=0.852, + label="COOH_decomposition", +) + +CO_oxidation = pz.ElementaryReaction( + initial=[CO_adsorbed, O_adsorbed], + final=[s0, s0, CO2_gas], + neighboring=[(0, 1)], + reversible=False, + pre_expon=1.042e13, + activation_energy=0.988, + label="CO_oxidation", +) + +# --------------------------------------------- # Build-up mechanism: -#--------------------------------------------- -mech = pz.Mechanism([CO_adsorption, H2_dissoc_adsorp, H2O_adsorption, - H2O_dissoc_adsorp, OH_decomposition, COOH_formation, - COOH_decomposition, CO_oxidation]) - -#--------------------------------------------- +# --------------------------------------------- +mech = pz.Mechanism( + [ + CO_adsorption, + H2_dissoc_adsorp, + H2O_adsorption, + H2O_dissoc_adsorp, + OH_decomposition, + COOH_formation, + COOH_decomposition, + CO_oxidation, + ] +) + +# --------------------------------------------- # Settings: -#--------------------------------------------- +# --------------------------------------------- scm.pyzacros.init() sett = pz.Settings() -sett.molar_fraction.CO = 1.e-5 +sett.molar_fraction.CO = 1.0e-5 sett.molar_fraction.H2O = 0.950 sett.random_seed = 123278 sett.temperature = 500.0 sett.pressure = 10.0 -sett.snapshots = ('time', 5.e-4) -sett.process_statistics = ('time', 5.e-4) -sett.species_numbers = ('time', 5.e-4) -sett.event_report = 'off' -sett.max_steps = 'infinity' +sett.snapshots = ("time", 5.0e-4) +sett.process_statistics = ("time", 5.0e-4) +sett.species_numbers = ("time", 5.0e-4) +sett.event_report = "off" +sett.max_steps = "infinity" sett.max_time = 0.25 -job = pz.ZacrosJob( settings=sett, lattice=latt, mechanism=mech, cluster_expansion=myClusterExpansion ) +job = pz.ZacrosJob(settings=sett, lattice=latt, mechanism=mech, cluster_expansion=myClusterExpansion) print(job) results = job.run() -if( job.ok() ): - results.plot_molecule_numbers( ["CO*", "H*", "H2O*", "COOH*"] ) +if job.ok(): + results.plot_molecule_numbers(["CO*", "H*", "H2O*", "COOH*"]) scm.pyzacros.finish() diff --git a/examples/ZiffGulariBarshad/PhaseTransitions-ADP+cover.py b/examples/ZiffGulariBarshad/PhaseTransitions-ADP+cover.py index 040061b..0ea0422 100644 --- a/examples/ZiffGulariBarshad/PhaseTransitions-ADP+cover.py +++ b/examples/ZiffGulariBarshad/PhaseTransitions-ADP+cover.py @@ -7,10 +7,11 @@ import adaptiveDesignProcedure as adp -#------------------------------------- + +# ------------------------------------- # Calculating the rates with pyZacros -#------------------------------------- -def getRate( conditions ): +# ------------------------------------- +def getRate(conditions): print("") print(" Requesting:") @@ -18,115 +19,117 @@ def getRate( conditions ): print(" xCO = ", cond[0]) print("") - #--------------------------------------- + # --------------------------------------- # Zacros calculation - #--------------------------------------- + # --------------------------------------- zgb = pz.models.ZiffGulariBarshad() z_sett = pz.Settings() z_sett.random_seed = 953129 z_sett.temperature = 500.0 z_sett.pressure = 1.0 - z_sett.species_numbers = ('time', 0.1) + z_sett.species_numbers = ("time", 0.1) z_sett.max_time = 10.0 - z_job = pz.ZacrosJob( settings=z_sett, lattice=zgb.lattice, - mechanism=zgb.mechanism, - cluster_expansion=zgb.cluster_expansion ) + z_job = pz.ZacrosJob( + settings=z_sett, lattice=zgb.lattice, mechanism=zgb.mechanism, cluster_expansion=zgb.cluster_expansion + ) - #--------------------------------------- + # --------------------------------------- # Parameters scan calculation - #--------------------------------------- + # --------------------------------------- ps_params = pz.ZacrosParametersScanJob.Parameters() - ps_params.add( 'x_CO', 'molar_fraction.CO', [ cond[0] for cond in conditions ] ) - ps_params.add( 'x_O2', 'molar_fraction.O2', lambda params: 1.0-params['x_CO'] ) + ps_params.add("x_CO", "molar_fraction.CO", [cond[0] for cond in conditions]) + ps_params.add("x_O2", "molar_fraction.O2", lambda params: 1.0 - params["x_CO"]) - ps_job = pz.ZacrosParametersScanJob( reference=z_job, parameters=ps_params ) + ps_job = pz.ZacrosParametersScanJob(reference=z_job, parameters=ps_params) results = ps_job.run() - tof = numpy.nan*numpy.empty((len(conditions),3)) - if( results.job.ok() ): + tof = numpy.nan * numpy.empty((len(conditions), 3)) + if results.job.ok(): results_dict = results.turnover_frequency() - results_dict = results.average_coverage( last=20, update=results_dict ) + results_dict = results.average_coverage(last=20, update=results_dict) for i in range(len(results_dict)): - tof[i,0] = results_dict[i]['average_coverage']['O*'] - tof[i,1] = results_dict[i]['average_coverage']['CO*'] - tof[i,2] = results_dict[i]['turnover_frequency']['CO2'] + tof[i, 0] = results_dict[i]["average_coverage"]["O*"] + tof[i, 1] = results_dict[i]["average_coverage"]["CO*"] + tof[i, 2] = results_dict[i]["turnover_frequency"]["CO2"] return tof -scm.pyzacros.init( folder="PhaseTransitions-ADP+cover" ) +scm.pyzacros.init(folder="PhaseTransitions-ADP+cover") # Run as many job simultaneously as there are cpu on the system maxjobs = multiprocessing.cpu_count() scm.plams.config.default_jobrunner = scm.plams.JobRunner(parallel=True, maxjobs=maxjobs) scm.plams.config.job.runscript.nproc = 1 -print('Running up to {} jobs in parallel simultaneously'.format(maxjobs)) +print("Running up to {} jobs in parallel simultaneously".format(maxjobs)) -#----------------- +# ----------------- # Surrogate model -#----------------- -input_var = ( { 'name' : 'CO', - 'min' : 0.2, - 'max' : 0.8, - 'num' : 5, - 'typevar' : 'lin' }, ) - -tab_var = ( {'name': 'ac_O', 'typevar':'lin'}, - {'name': 'ac_CO', 'typevar':'lin'}, - {'name':'TOF_CO2', 'typevar':'lin'} ) - -outputDir = scm.pyzacros.workdir()+'/adp.results' - -adpML = adp.adaptiveDesignProcedure( input_var, tab_var, getRate, - algorithmParams={'dth':0.01,'d2th':0.10}, # Quality Very Good - outputDir=outputDir, - randomState=10 ) +# ----------------- +input_var = ({"name": "CO", "min": 0.2, "max": 0.8, "num": 5, "typevar": "lin"},) + +tab_var = ( + {"name": "ac_O", "typevar": "lin"}, + {"name": "ac_CO", "typevar": "lin"}, + {"name": "TOF_CO2", "typevar": "lin"}, +) + +outputDir = scm.pyzacros.workdir() + "/adp.results" + +adpML = adp.adaptiveDesignProcedure( + input_var, + tab_var, + getRate, + algorithmParams={"dth": 0.01, "d2th": 0.10}, # Quality Very Good + outputDir=outputDir, + randomState=10, +) adpML.createTrainingDataAndML() -x_CO,ac_O,ac_CO,TOF_CO2 = adpML.trainingData.T +x_CO, ac_O, ac_CO, TOF_CO2 = adpML.trainingData.T -print( "-------------------------------------------------" ) -print( "%4s"%"cond", " %8s"%"x_CO", " %10s"%"ac_O", "%10s"%"ac_CO", "%12s"%"TOF_CO2" ) -print( "-------------------------------------------------" ) +print("-------------------------------------------------") +print("%4s" % "cond", " %8s" % "x_CO", " %10s" % "ac_O", "%10s" % "ac_CO", "%12s" % "TOF_CO2") +print("-------------------------------------------------") for i in range(len(x_CO)): - print( "%4d"%i, "%8.3f"%x_CO[i], "%10.6f"%ac_O[i], "%10.6f"%ac_CO[i], "%12.6f"%TOF_CO2[i] ) + print("%4d" % i, "%8.3f" % x_CO[i], "%10.6f" % ac_O[i], "%10.6f" % ac_CO[i], "%12.6f" % TOF_CO2[i]) scm.pyzacros.finish() -#--------------------------------------------- +# --------------------------------------------- # Plotting the results -#--------------------------------------------- +# --------------------------------------------- try: import matplotlib.pyplot as plt except ImportError as e: - print('Consider to install matplotlib to visualize the results!') + print("Consider to install matplotlib to visualize the results!") exit(0) fig = plt.figure() -x_CO_model = numpy.linspace(0.2,0.8,201) -ac_O_model,ac_CO_model,TOF_CO2_model = adpML.predict( x_CO_model ).T +x_CO_model = numpy.linspace(0.2, 0.8, 201) +ac_O_model, ac_CO_model, TOF_CO2_model = adpML.predict(x_CO_model).T ax = plt.axes() -ax.set_xlabel('Molar Fraction CO', fontsize=14) +ax.set_xlabel("Molar Fraction CO", fontsize=14) ax.set_ylabel("Coverage Fraction (%)", color="blue", fontsize=14) ax.plot(x_CO_model, ac_O_model, color="blue", linestyle="-.", lw=2, zorder=1) -ax.plot(x_CO, ac_O, marker='$\u25EF$', color='blue', markersize=4, lw=0, zorder=1) +ax.plot(x_CO, ac_O, marker="$\u25EF$", color="blue", markersize=4, lw=0, zorder=1) ax.plot(x_CO_model, ac_CO_model, color="blue", linestyle="-", lw=2, zorder=2) -ax.plot(x_CO, ac_CO, marker='$\u25EF$', color='blue', markersize=4, lw=0, zorder=1) -plt.text(0.3, 0.9, 'O', fontsize=18, color="blue") -plt.text(0.7, 0.9, 'CO', fontsize=18, color="blue") +ax.plot(x_CO, ac_CO, marker="$\u25EF$", color="blue", markersize=4, lw=0, zorder=1) +plt.text(0.3, 0.9, "O", fontsize=18, color="blue") +plt.text(0.7, 0.9, "CO", fontsize=18, color="blue") ax2 = ax.twinx() -ax2.set_ylabel("TOF (mol/s/site)",color="red", fontsize=14) -ax2.plot(x_CO_model, TOF_CO2_model, color='red', linestyle='-', lw=2, zorder=0) -ax2.plot(x_CO, TOF_CO2, marker='$\u25EF$', color='red', markersize=4, lw=0, zorder=1) -plt.text(0.37, 1.5, 'CO$_2$', fontsize=18, color="red") +ax2.set_ylabel("TOF (mol/s/site)", color="red", fontsize=14) +ax2.plot(x_CO_model, TOF_CO2_model, color="red", linestyle="-", lw=2, zorder=0) +ax2.plot(x_CO, TOF_CO2, marker="$\u25EF$", color="red", markersize=4, lw=0, zorder=1) +plt.text(0.37, 1.5, "CO$_2$", fontsize=18, color="red") plt.show() diff --git a/examples/ZiffGulariBarshad/PhaseTransitions-ADP.ipynb b/examples/ZiffGulariBarshad/PhaseTransitions-ADP.ipynb index fe696db..1d03604 100644 --- a/examples/ZiffGulariBarshad/PhaseTransitions-ADP.ipynb +++ b/examples/ZiffGulariBarshad/PhaseTransitions-ADP.ipynb @@ -80,7 +80,9 @@ "import scm.pyzacros.models\n", "\n", "import adaptiveDesignProcedure as adp\n", - "import warnings; warnings.simplefilter('ignore', UserWarning)" + "import warnings\n", + "\n", + "warnings.simplefilter(\"ignore\", UserWarning)" ] }, { @@ -127,59 +129,59 @@ "metadata": {}, "outputs": [], "source": [ - "def get_rate( conditions ):\n", - " \n", + "def get_rate(conditions):\n", + "\n", " print(\"\")\n", " print(\" Requesting:\")\n", " for cond in conditions:\n", " print(\" x_CO = \", cond[0])\n", " print(\"\")\n", - " \n", - " #---------------------------------------\n", + "\n", + " # ---------------------------------------\n", " # Zacros calculation\n", - " #---------------------------------------\n", + " # ---------------------------------------\n", " zgb = pz.models.ZiffGulariBarshad()\n", "\n", " z_sett = pz.Settings()\n", " z_sett.random_seed = 953129\n", " z_sett.temperature = 500.0\n", " z_sett.pressure = 1.0\n", - " z_sett.species_numbers = ('time', 0.1)\n", + " z_sett.species_numbers = (\"time\", 0.1)\n", " z_sett.max_time = 10.0\n", "\n", - " z_job = pz.ZacrosJob( settings=z_sett, lattice=zgb.lattice,\n", - " mechanism=zgb.mechanism,\n", - " cluster_expansion=zgb.cluster_expansion )\n", + " z_job = pz.ZacrosJob(\n", + " settings=z_sett, lattice=zgb.lattice, mechanism=zgb.mechanism, cluster_expansion=zgb.cluster_expansion\n", + " )\n", "\n", - " #---------------------------------------\n", + " # ---------------------------------------\n", " # Parameters scan calculation\n", - " #---------------------------------------\n", + " # ---------------------------------------\n", " ps_params = pz.ZacrosParametersScanJob.Parameters()\n", - " ps_params.add( 'x_CO', 'molar_fraction.CO', [ cond[0] for cond in conditions ] )\n", - " ps_params.add( 'x_O2', 'molar_fraction.O2', lambda params: 1.0-params['x_CO'] )\n", + " ps_params.add(\"x_CO\", \"molar_fraction.CO\", [cond[0] for cond in conditions])\n", + " ps_params.add(\"x_O2\", \"molar_fraction.O2\", lambda params: 1.0 - params[\"x_CO\"])\n", "\n", - " ps_job = pz.ZacrosParametersScanJob( reference=z_job, parameters=ps_params )\n", + " ps_job = pz.ZacrosParametersScanJob(reference=z_job, parameters=ps_params)\n", "\n", - " #---------------------------------------\n", + " # ---------------------------------------\n", " # Running the calculations\n", - " #---------------------------------------\n", + " # ---------------------------------------\n", " results = ps_job.run()\n", - " \n", + "\n", " if not results.job.ok():\n", - " print('Something went wrong!')\n", + " print(\"Something went wrong!\")\n", "\n", - " #---------------------------------------\n", + " # ---------------------------------------\n", " # Collecting the results\n", - " #---------------------------------------\n", - " data = numpy.nan*numpy.empty((len(conditions),3))\n", - " if( results.job.ok() ):\n", + " # ---------------------------------------\n", + " data = numpy.nan * numpy.empty((len(conditions), 3))\n", + " if results.job.ok():\n", " results_dict = results.turnover_frequency()\n", - " results_dict = results.average_coverage( last=20, update=results_dict )\n", + " results_dict = results.average_coverage(last=20, update=results_dict)\n", "\n", " for i in range(len(results_dict)):\n", - " data[i,0] = results_dict[i]['average_coverage']['O*']\n", - " data[i,1] = results_dict[i]['average_coverage']['CO*']\n", - " data[i,2] = results_dict[i]['turnover_frequency']['CO2']\n", + " data[i, 0] = results_dict[i][\"average_coverage\"][\"O*\"]\n", + " data[i, 1] = results_dict[i][\"average_coverage\"][\"CO*\"]\n", + " data[i, 2] = results_dict[i][\"turnover_frequency\"][\"CO2\"]\n", "\n", " return data" ] @@ -248,7 +250,7 @@ "maxjobs = multiprocessing.cpu_count()\n", "scm.plams.config.default_jobrunner = scm.plams.JobRunner(parallel=True, maxjobs=maxjobs)\n", "scm.plams.config.job.runscript.nproc = 1\n", - "print('Running up to {} jobs in parallel simultaneously'.format(maxjobs))" + "print(\"Running up to {} jobs in parallel simultaneously\".format(maxjobs))" ] }, { @@ -275,11 +277,13 @@ "metadata": {}, "outputs": [], "source": [ - "input_var = ( { 'name':'x_CO', 'min':0.2, 'max':0.8, 'num':5 }, )\n", + "input_var = ({\"name\": \"x_CO\", \"min\": 0.2, \"max\": 0.8, \"num\": 5},)\n", "\n", - "output_var = ( {'name':'ac_O'},\n", - " {'name':'ac_CO'},\n", - " {'name':'TOF_CO2'}, )" + "output_var = (\n", + " {\"name\": \"ac_O\"},\n", + " {\"name\": \"ac_CO\"},\n", + " {\"name\": \"TOF_CO2\"},\n", + ")" ] }, { @@ -365,9 +369,9 @@ } ], "source": [ - "adpML = adp.adaptiveDesignProcedure( input_var, output_var, get_rate,\n", - " outputDir=scm.pyzacros.workdir()+'/adp.results',\n", - " randomState=10 )" + "adpML = adp.adaptiveDesignProcedure(\n", + " input_var, output_var, get_rate, outputDir=scm.pyzacros.workdir() + \"/adp.results\", randomState=10\n", + ")" ] }, { @@ -976,13 +980,13 @@ } ], "source": [ - "x_CO,ac_O,ac_CO,TOF_CO2 = adpML.trainingData.T\n", + "x_CO, ac_O, ac_CO, TOF_CO2 = adpML.trainingData.T\n", "\n", - "print( \"-------------------------------------------------\" )\n", - "print( \"%4s\"%\"cond\", \"%8s\"%\"x_CO\", \"%10s\"%\"ac_O\", \"%10s\"%\"ac_CO\", \"%12s\"%\"TOF_CO2\" )\n", - "print( \"-------------------------------------------------\" )\n", + "print(\"-------------------------------------------------\")\n", + "print(\"%4s\" % \"cond\", \"%8s\" % \"x_CO\", \"%10s\" % \"ac_O\", \"%10s\" % \"ac_CO\", \"%12s\" % \"TOF_CO2\")\n", + "print(\"-------------------------------------------------\")\n", "for i in range(len(x_CO)):\n", - " print( \"%4d\"%i, \"%8.3f\"%x_CO[i], \"%10.6f\"%ac_O[i], \"%10.6f\"%ac_CO[i], \"%12.6f\"%TOF_CO2[i] )" + " print(\"%4d\" % i, \"%8.3f\" % x_CO[i], \"%10.6f\" % ac_O[i], \"%10.6f\" % ac_CO[i], \"%12.6f\" % TOF_CO2[i])" ] }, { @@ -1022,25 +1026,25 @@ "\n", "fig = plt.figure()\n", "\n", - "x_CO_model = numpy.linspace(0.2,0.8,201)\n", - "ac_O_model,ac_CO_model,TOF_CO2_model = adpML.predict( x_CO_model ).T\n", + "x_CO_model = numpy.linspace(0.2, 0.8, 201)\n", + "ac_O_model, ac_CO_model, TOF_CO2_model = adpML.predict(x_CO_model).T\n", "\n", "ax = plt.axes()\n", - "ax.set_xlabel('Molar Fraction CO', fontsize=14)\n", + "ax.set_xlabel(\"Molar Fraction CO\", fontsize=14)\n", "\n", "ax.set_ylabel(\"Coverage Fraction (%)\", color=\"blue\", fontsize=14)\n", "ax.plot(x_CO_model, ac_O_model, color=\"blue\", linestyle=\"-.\", lw=2, zorder=1)\n", - "ax.plot(x_CO, ac_O, marker='$\\u25EF$', color='blue', markersize=4, lw=0, zorder=1)\n", + "ax.plot(x_CO, ac_O, marker=\"$\\u25EF$\", color=\"blue\", markersize=4, lw=0, zorder=1)\n", "ax.plot(x_CO_model, ac_CO_model, color=\"blue\", linestyle=\"-\", lw=2, zorder=2)\n", - "ax.plot(x_CO, ac_CO, marker='$\\u25EF$', color='blue', markersize=4, lw=0, zorder=1)\n", - "plt.text(0.3, 0.9, 'O', fontsize=18, color=\"blue\")\n", - "plt.text(0.7, 0.9, 'CO', fontsize=18, color=\"blue\")\n", + "ax.plot(x_CO, ac_CO, marker=\"$\\u25EF$\", color=\"blue\", markersize=4, lw=0, zorder=1)\n", + "plt.text(0.3, 0.9, \"O\", fontsize=18, color=\"blue\")\n", + "plt.text(0.7, 0.9, \"CO\", fontsize=18, color=\"blue\")\n", "\n", "ax2 = ax.twinx()\n", - "ax2.set_ylabel(\"TOF (mol/s/site)\",color=\"red\", fontsize=14)\n", - "ax2.plot(x_CO_model, TOF_CO2_model, color='red', linestyle='-', lw=2, zorder=0)\n", - "ax2.plot(x_CO, TOF_CO2, marker='$\\u25EF$', color='red', markersize=4, lw=0, zorder=1)\n", - "plt.text(0.37, 1.5, 'CO$_2$', fontsize=18, color=\"red\")\n", + "ax2.set_ylabel(\"TOF (mol/s/site)\", color=\"red\", fontsize=14)\n", + "ax2.plot(x_CO_model, TOF_CO2_model, color=\"red\", linestyle=\"-\", lw=2, zorder=0)\n", + "ax2.plot(x_CO, TOF_CO2, marker=\"$\\u25EF$\", color=\"red\", markersize=4, lw=0, zorder=1)\n", + "plt.text(0.37, 1.5, \"CO$_2$\", fontsize=18, color=\"red\")\n", "\n", "plt.show()" ] @@ -1129,22 +1133,22 @@ "\n", "path = \"plams_workdir/adp.results/\"\n", "\n", - "x_CO_model = np.linspace(0.2,0.8,201)\n", - "ac_O_model,ac_CO_model,TOF_CO2_model = adp.predict( x_CO_model, path ).T\n", + "x_CO_model = np.linspace(0.2, 0.8, 201)\n", + "ac_O_model, ac_CO_model, TOF_CO2_model = adp.predict(x_CO_model, path).T\n", "\n", "ax = plt.axes()\n", - "ax.set_xlabel('Molar Fraction CO', fontsize=14)\n", + "ax.set_xlabel(\"Molar Fraction CO\", fontsize=14)\n", "\n", "ax.set_ylabel(\"Coverage Fraction (%)\", color=\"blue\", fontsize=14)\n", "ax.plot(x_CO_model, ac_O_model, color=\"blue\", linestyle=\"-.\", lw=2, zorder=1)\n", "ax.plot(x_CO_model, ac_CO_model, color=\"blue\", linestyle=\"-\", lw=2, zorder=2)\n", - "plt.text(0.3, 0.9, 'O', fontsize=18, color=\"blue\")\n", - "plt.text(0.7, 0.9, 'CO', fontsize=18, color=\"blue\")\n", + "plt.text(0.3, 0.9, \"O\", fontsize=18, color=\"blue\")\n", + "plt.text(0.7, 0.9, \"CO\", fontsize=18, color=\"blue\")\n", "\n", "ax2 = ax.twinx()\n", - "ax2.set_ylabel(\"TOF (mol/s/site)\",color=\"red\", fontsize=14)\n", - "ax2.plot(x_CO_model, TOF_CO2_model, color='red', linestyle='-', lw=2, zorder=0)\n", - "plt.text(0.37, 1.5, 'CO$_2$', fontsize=18, color=\"red\")\n", + "ax2.set_ylabel(\"TOF (mol/s/site)\", color=\"red\", fontsize=14)\n", + "ax2.plot(x_CO_model, TOF_CO2_model, color=\"red\", linestyle=\"-\", lw=2, zorder=0)\n", + "plt.text(0.37, 1.5, \"CO$_2$\", fontsize=18, color=\"red\")\n", "\n", "plt.show()" ] diff --git a/examples/ZiffGulariBarshad/PhaseTransitions-ADP.py b/examples/ZiffGulariBarshad/PhaseTransitions-ADP.py index 6ec85c8..2e5d6fb 100644 --- a/examples/ZiffGulariBarshad/PhaseTransitions-ADP.py +++ b/examples/ZiffGulariBarshad/PhaseTransitions-ADP.py @@ -16,7 +16,7 @@ # reduce the overall computational cost, it would be ideal to generate more # points in the more interesting areas automatically. This is the main goal # of the **Adaptive Design Procedure (ADP)**. -# +# # The ADP was created to generate training data for Machine Learning (ML) algorithms, # with a particular emphasis on approximating computationally-intensive # first-principles kinetic models in catalysis. The procedure is based on @@ -29,7 +29,7 @@ # **Surrogate Model** of the data based on ML techniques, # allowing interpolation of points not included in the original data set, # which is critical for multiscale simulations of complex chemical reactors. -# +# # In this tutorial, we will likewise examine the effects of altering the gas # phase's composition in the $CO_2$ Turnover frequency (TOF) in the ZGB model, # but we will do so while utilizing the ADP to both suggest the values of the @@ -56,7 +56,9 @@ import scm.pyzacros.models import adaptiveDesignProcedure as adp -import warnings; warnings.simplefilter('ignore', UserWarning) +import warnings + +warnings.simplefilter("ignore", UserWarning) # The ``import warning`` line is just needed to get clean output messages further down. @@ -72,7 +74,7 @@ # example, we have ``one input variable``, the molar fraction of CO, and # ``three output variables``, the average coverage for $O*$ and $CO*$ # and the $CO_2$ TOF. -# +# # This ``get_rate()`` function performs a ``ZacrosParametersScanJob`` # calculation. To follow the details, please refer to the example # **Phase Transitions in the ZGB model**. In a nutshell, it configures @@ -84,59 +86,60 @@ # ``turnover frequency()`` and ``average coverage()`` functions and # storing them in the output array in the correct order. -def get_rate( conditions ): - + +def get_rate(conditions): + print("") print(" Requesting:") for cond in conditions: print(" x_CO = ", cond[0]) print("") - - #--------------------------------------- + + # --------------------------------------- # Zacros calculation - #--------------------------------------- + # --------------------------------------- zgb = pz.models.ZiffGulariBarshad() z_sett = pz.Settings() z_sett.random_seed = 953129 z_sett.temperature = 500.0 z_sett.pressure = 1.0 - z_sett.species_numbers = ('time', 0.1) + z_sett.species_numbers = ("time", 0.1) z_sett.max_time = 10.0 - z_job = pz.ZacrosJob( settings=z_sett, lattice=zgb.lattice, - mechanism=zgb.mechanism, - cluster_expansion=zgb.cluster_expansion ) + z_job = pz.ZacrosJob( + settings=z_sett, lattice=zgb.lattice, mechanism=zgb.mechanism, cluster_expansion=zgb.cluster_expansion + ) - #--------------------------------------- + # --------------------------------------- # Parameters scan calculation - #--------------------------------------- + # --------------------------------------- ps_params = pz.ZacrosParametersScanJob.Parameters() - ps_params.add( 'x_CO', 'molar_fraction.CO', [ cond[0] for cond in conditions ] ) - ps_params.add( 'x_O2', 'molar_fraction.O2', lambda params: 1.0-params['x_CO'] ) + ps_params.add("x_CO", "molar_fraction.CO", [cond[0] for cond in conditions]) + ps_params.add("x_O2", "molar_fraction.O2", lambda params: 1.0 - params["x_CO"]) - ps_job = pz.ZacrosParametersScanJob( reference=z_job, parameters=ps_params ) + ps_job = pz.ZacrosParametersScanJob(reference=z_job, parameters=ps_params) - #--------------------------------------- + # --------------------------------------- # Running the calculations - #--------------------------------------- + # --------------------------------------- results = ps_job.run() - + if not results.job.ok(): - print('Something went wrong!') + print("Something went wrong!") - #--------------------------------------- + # --------------------------------------- # Collecting the results - #--------------------------------------- - data = numpy.nan*numpy.empty((len(conditions),3)) - if( results.job.ok() ): + # --------------------------------------- + data = numpy.nan * numpy.empty((len(conditions), 3)) + if results.job.ok(): results_dict = results.turnover_frequency() - results_dict = results.average_coverage( last=20, update=results_dict ) + results_dict = results.average_coverage(last=20, update=results_dict) for i in range(len(results_dict)): - data[i,0] = results_dict[i]['average_coverage']['O*'] - data[i,1] = results_dict[i]['average_coverage']['CO*'] - data[i,2] = results_dict[i]['turnover_frequency']['CO2'] + data[i, 0] = results_dict[i]["average_coverage"]["O*"] + data[i, 1] = results_dict[i]["average_coverage"]["CO*"] + data[i, 2] = results_dict[i]["turnover_frequency"]["CO2"] return data @@ -152,12 +155,12 @@ def get_rate( conditions ): # instances as we request. In this case, we choose to use the maximum number of # simultaneous processes (``maxjobs``) equal to the number of processors in the # machine. Additionally, by setting ``nproc = 1`` we establish that only one -# processor will be used for each zacros instance. +# processor will be used for each zacros instance. maxjobs = multiprocessing.cpu_count() scm.plams.config.default_jobrunner = scm.plams.JobRunner(parallel=True, maxjobs=maxjobs) scm.plams.config.job.runscript.nproc = 1 -print('Running up to {} jobs in parallel simultaneously'.format(maxjobs)) +print("Running up to {} jobs in parallel simultaneously".format(maxjobs)) # Firstly, we must define the input and output variables. As previously stated, for @@ -171,11 +174,13 @@ def get_rate( conditions ): # and output variables should be in correspondence with the array sizes used in the # ``get_rate()`` function. -input_var = ( { 'name':'x_CO', 'min':0.2, 'max':0.8, 'num':5 }, ) +input_var = ({"name": "x_CO", "min": 0.2, "max": 0.8, "num": 5},) -output_var = ( {'name':'ac_O'}, - {'name':'ac_CO'}, - {'name':'TOF_CO2'}, ) +output_var = ( + {"name": "ac_O"}, + {"name": "ac_CO"}, + {"name": "TOF_CO2"}, +) # Then, we create an ``adaptativeDesignProcedure`` object by calling its constructor, @@ -186,9 +191,9 @@ def get_rate( conditions ): # It is also possible to provide several parameters to control the algorithm using # the keyword ``algorithmParams''. But we will get back to that later. -adpML = adp.adaptiveDesignProcedure( input_var, output_var, get_rate, - outputDir=scm.pyzacros.workdir()+'/adp.results', - randomState=10 ) +adpML = adp.adaptiveDesignProcedure( + input_var, output_var, get_rate, outputDir=scm.pyzacros.workdir() + "/adp.results", randomState=10 +) # Now, we begin the calculation by invoking the method ``createTrainingDataAndML()``, @@ -204,17 +209,17 @@ def get_rate( conditions ): # If the execution got up to this point, everything worked as expected. Hooray! -# +# # The results are then collected by accessing the ``trainingData`` attribute of the # ``adpML`` object, and they are presented nicely in a table in the lines that follow. -x_CO,ac_O,ac_CO,TOF_CO2 = adpML.trainingData.T +x_CO, ac_O, ac_CO, TOF_CO2 = adpML.trainingData.T -print( "-------------------------------------------------" ) -print( "%4s"%"cond", "%8s"%"x_CO", "%10s"%"ac_O", "%10s"%"ac_CO", "%12s"%"TOF_CO2" ) -print( "-------------------------------------------------" ) +print("-------------------------------------------------") +print("%4s" % "cond", "%8s" % "x_CO", "%10s" % "ac_O", "%10s" % "ac_CO", "%12s" % "TOF_CO2") +print("-------------------------------------------------") for i in range(len(x_CO)): - print( "%4d"%i, "%8.3f"%x_CO[i], "%10.6f"%ac_O[i], "%10.6f"%ac_CO[i], "%12.6f"%TOF_CO2[i] ) + print("%4d" % i, "%8.3f" % x_CO[i], "%10.6f" % ac_O[i], "%10.6f" % ac_CO[i], "%12.6f" % TOF_CO2[i]) # The above results are the final aim of the calculation. However, we @@ -230,25 +235,25 @@ def get_rate( conditions ): fig = plt.figure() -x_CO_model = numpy.linspace(0.2,0.8,201) -ac_O_model,ac_CO_model,TOF_CO2_model = adpML.predict( x_CO_model ).T +x_CO_model = numpy.linspace(0.2, 0.8, 201) +ac_O_model, ac_CO_model, TOF_CO2_model = adpML.predict(x_CO_model).T ax = plt.axes() -ax.set_xlabel('Molar Fraction CO', fontsize=14) +ax.set_xlabel("Molar Fraction CO", fontsize=14) ax.set_ylabel("Coverage Fraction (%)", color="blue", fontsize=14) ax.plot(x_CO_model, ac_O_model, color="blue", linestyle="-.", lw=2, zorder=1) -ax.plot(x_CO, ac_O, marker='$\u25EF$', color='blue', markersize=4, lw=0, zorder=1) +ax.plot(x_CO, ac_O, marker="$\u25EF$", color="blue", markersize=4, lw=0, zorder=1) ax.plot(x_CO_model, ac_CO_model, color="blue", linestyle="-", lw=2, zorder=2) -ax.plot(x_CO, ac_CO, marker='$\u25EF$', color='blue', markersize=4, lw=0, zorder=1) -plt.text(0.3, 0.9, 'O', fontsize=18, color="blue") -plt.text(0.7, 0.9, 'CO', fontsize=18, color="blue") +ax.plot(x_CO, ac_CO, marker="$\u25EF$", color="blue", markersize=4, lw=0, zorder=1) +plt.text(0.3, 0.9, "O", fontsize=18, color="blue") +plt.text(0.7, 0.9, "CO", fontsize=18, color="blue") ax2 = ax.twinx() -ax2.set_ylabel("TOF (mol/s/site)",color="red", fontsize=14) -ax2.plot(x_CO_model, TOF_CO2_model, color='red', linestyle='-', lw=2, zorder=0) -ax2.plot(x_CO, TOF_CO2, marker='$\u25EF$', color='red', markersize=4, lw=0, zorder=1) -plt.text(0.37, 1.5, 'CO$_2$', fontsize=18, color="red") +ax2.set_ylabel("TOF (mol/s/site)", color="red", fontsize=14) +ax2.plot(x_CO_model, TOF_CO2_model, color="red", linestyle="-", lw=2, zorder=0) +ax2.plot(x_CO, TOF_CO2, marker="$\u25EF$", color="red", markersize=4, lw=0, zorder=1) +plt.text(0.37, 1.5, "CO$_2$", fontsize=18, color="red") plt.show() @@ -264,7 +269,7 @@ def get_rate( conditions ): # the only way to do so is by increasing the number of points in the training # set by tuning the ADP parameters' (``algorithmParams``). However, now that # we have the surrogate model, we can quickly obtain the average coverage for -# $O*$ and $CO*$ and the $CO_2$ TOF for any $CO$ molar fraction. +# $O*$ and $CO*$ and the $CO_2$ TOF for any $CO$ molar fraction. # Now, we can close the pyZacros environment: @@ -284,22 +289,22 @@ def get_rate( conditions ): path = "plams_workdir/adp.results/" -x_CO_model = np.linspace(0.2,0.8,201) -ac_O_model,ac_CO_model,TOF_CO2_model = adp.predict( x_CO_model, path ).T +x_CO_model = np.linspace(0.2, 0.8, 201) +ac_O_model, ac_CO_model, TOF_CO2_model = adp.predict(x_CO_model, path).T ax = plt.axes() -ax.set_xlabel('Molar Fraction CO', fontsize=14) +ax.set_xlabel("Molar Fraction CO", fontsize=14) ax.set_ylabel("Coverage Fraction (%)", color="blue", fontsize=14) ax.plot(x_CO_model, ac_O_model, color="blue", linestyle="-.", lw=2, zorder=1) ax.plot(x_CO_model, ac_CO_model, color="blue", linestyle="-", lw=2, zorder=2) -plt.text(0.3, 0.9, 'O', fontsize=18, color="blue") -plt.text(0.7, 0.9, 'CO', fontsize=18, color="blue") +plt.text(0.3, 0.9, "O", fontsize=18, color="blue") +plt.text(0.7, 0.9, "CO", fontsize=18, color="blue") ax2 = ax.twinx() -ax2.set_ylabel("TOF (mol/s/site)",color="red", fontsize=14) -ax2.plot(x_CO_model, TOF_CO2_model, color='red', linestyle='-', lw=2, zorder=0) -plt.text(0.37, 1.5, 'CO$_2$', fontsize=18, color="red") +ax2.set_ylabel("TOF (mol/s/site)", color="red", fontsize=14) +ax2.plot(x_CO_model, TOF_CO2_model, color="red", linestyle="-", lw=2, zorder=0) +plt.text(0.37, 1.5, "CO$_2$", fontsize=18, color="red") plt.show() @@ -308,4 +313,4 @@ def get_rate( conditions ): # ``forestFile`` in the ADP constructor allows you to alter the prefix ``ml_ExtraTrees``. # In the ``adp.predict()`` method, you can provide the complete path to this file, but if # a directory is supplied instead, it will try to discover the proper file inside, -# as shown in the lines of code above. +# as shown in the lines of code above. diff --git a/examples/ZiffGulariBarshad/PhaseTransitions-ADP_ViewResults.py b/examples/ZiffGulariBarshad/PhaseTransitions-ADP_ViewResults.py index 1bbb5c7..c437dcf 100644 --- a/examples/ZiffGulariBarshad/PhaseTransitions-ADP_ViewResults.py +++ b/examples/ZiffGulariBarshad/PhaseTransitions-ADP_ViewResults.py @@ -4,21 +4,21 @@ path = "plams_workdir.007/adp.results/" -x_CO_model = np.linspace(0.2,0.8,201) -ac_O_model,ac_CO_model,TOF_CO2_model = adp.predict( x_CO_model.reshape(-1,1), path ).T +x_CO_model = np.linspace(0.2, 0.8, 201) +ac_O_model, ac_CO_model, TOF_CO2_model = adp.predict(x_CO_model.reshape(-1, 1), path).T ax = plt.axes() -ax.set_xlabel('Molar Fraction CO', fontsize=14) +ax.set_xlabel("Molar Fraction CO", fontsize=14) ax.set_ylabel("Coverage Fraction (%)", color="blue", fontsize=14) ax.plot(x_CO_model, ac_O_model, color="blue", linestyle="-.", lw=2, zorder=1) ax.plot(x_CO_model, ac_CO_model, color="blue", linestyle="-", lw=2, zorder=2) -plt.text(0.3, 0.9, 'O', fontsize=18, color="blue") -plt.text(0.7, 0.9, 'CO', fontsize=18, color="blue") +plt.text(0.3, 0.9, "O", fontsize=18, color="blue") +plt.text(0.7, 0.9, "CO", fontsize=18, color="blue") ax2 = ax.twinx() -ax2.set_ylabel("TOF (mol/s/site)",color="red", fontsize=14) -ax2.plot(x_CO_model, TOF_CO2_model, color='red', linestyle='-', lw=2, zorder=0) -plt.text(0.37, 1.5, 'CO$_2$', fontsize=18, color="red") +ax2.set_ylabel("TOF (mol/s/site)", color="red", fontsize=14) +ax2.plot(x_CO_model, TOF_CO2_model, color="red", linestyle="-", lw=2, zorder=0) +plt.text(0.37, 1.5, "CO$_2$", fontsize=18, color="red") plt.show() diff --git a/examples/ZiffGulariBarshad/PhaseTransitions-SteadyState-ADP.py b/examples/ZiffGulariBarshad/PhaseTransitions-SteadyState-ADP.py index 84146cf..fe82bc1 100644 --- a/examples/ZiffGulariBarshad/PhaseTransitions-SteadyState-ADP.py +++ b/examples/ZiffGulariBarshad/PhaseTransitions-SteadyState-ADP.py @@ -6,9 +6,12 @@ import scm.pyzacros.models import adaptiveDesignProcedure as adp -import warnings; warnings.simplefilter('ignore', UserWarning) +import warnings -def get_rate( conditions ): +warnings.simplefilter("ignore", UserWarning) + + +def get_rate(conditions): print("") print(" Requesting:") @@ -16,66 +19,64 @@ def get_rate( conditions ): print(" x_CO = ", cond[0]) print("") - #--------------------------------------- + # --------------------------------------- # Zacros calculation - #--------------------------------------- + # --------------------------------------- zgb = pz.models.ZiffGulariBarshad() z_sett = pz.Settings() z_sett.random_seed = 953129 z_sett.temperature = 500.0 z_sett.pressure = 1.0 - z_sett.species_numbers = ('time', 0.1) + z_sett.species_numbers = ("time", 0.1) z_sett.max_time = 10.0 - z_job = pz.ZacrosJob( settings=z_sett, lattice=zgb.lattice, - mechanism=zgb.mechanism, - cluster_expansion=zgb.cluster_expansion ) + z_job = pz.ZacrosJob( + settings=z_sett, lattice=zgb.lattice, mechanism=zgb.mechanism, cluster_expansion=zgb.cluster_expansion + ) - #--------------------------------------- + # --------------------------------------- # Steady-State calculation - #--------------------------------------- + # --------------------------------------- ss_sett = pz.Settings() ss_sett.turnover_frequency.nbatch = 20 ss_sett.turnover_frequency.confidence = 0.96 ss_sett.turnover_frequency.nreplicas = 4 ss_params = pz.ZacrosSteadyStateJob.Parameters() - ss_params.add( 'max_time', 'restart.max_time', - 2*z_sett.max_time*( numpy.arange(10)+1 )**3 ) + ss_params.add("max_time", "restart.max_time", 2 * z_sett.max_time * (numpy.arange(10) + 1) ** 3) - ss_job = pz.ZacrosSteadyStateJob( settings=ss_sett, reference=z_job, - parameters=ss_params ) + ss_job = pz.ZacrosSteadyStateJob(settings=ss_sett, reference=z_job, parameters=ss_params) - #--------------------------------------- + # --------------------------------------- # Parameters scan calculation - #--------------------------------------- + # --------------------------------------- ps_params = pz.ZacrosParametersScanJob.Parameters() - ps_params.add( 'x_CO', 'molar_fraction.CO', [ cond[0] for cond in conditions ] ) - ps_params.add( 'x_O2', 'molar_fraction.O2', lambda params: 1.0-params['x_CO'] ) + ps_params.add("x_CO", "molar_fraction.CO", [cond[0] for cond in conditions]) + ps_params.add("x_O2", "molar_fraction.O2", lambda params: 1.0 - params["x_CO"]) - ps_job = pz.ZacrosParametersScanJob( reference=ss_job, parameters=ps_params ) + ps_job = pz.ZacrosParametersScanJob(reference=ss_job, parameters=ps_params) - #--------------------------------------- + # --------------------------------------- # Running the calculations - #--------------------------------------- + # --------------------------------------- results = ps_job.run() if not results.job.ok(): - print('Something went wrong!') + print("Something went wrong!") - #--------------------------------------- + # --------------------------------------- # Collecting the results - #--------------------------------------- - data = numpy.nan*numpy.empty((len(conditions),3)) - if( results.job.ok() ): + # --------------------------------------- + data = numpy.nan * numpy.empty((len(conditions), 3)) + if results.job.ok(): results_dict = results.turnover_frequency() - results_dict = results.average_coverage( last=20, update=results_dict ) + results_dict = results.average_coverage(last=20, update=results_dict) for i in range(len(results_dict)): - data[i,0] = results_dict[i]['average_coverage']['O*'] - data[i,1] = results_dict[i]['average_coverage']['CO*'] - data[i,2] = results_dict[i]['turnover_frequency']['CO2'] + data[i, 0] = results_dict[i]["average_coverage"]["O*"] + data[i, 1] = results_dict[i]["average_coverage"]["CO*"] + data[i, 2] = results_dict[i]["turnover_frequency"]["CO2"] return data @@ -85,53 +86,59 @@ def get_rate( conditions ): maxjobs = multiprocessing.cpu_count() scm.plams.config.default_jobrunner = scm.plams.JobRunner(parallel=True, maxjobs=maxjobs) scm.plams.config.job.runscript.nproc = 1 -print('Running up to {} jobs in parallel simultaneously'.format(maxjobs)) +print("Running up to {} jobs in parallel simultaneously".format(maxjobs)) -input_var = ( { 'name':'x_CO', 'min':0.2, 'max':0.8, 'num':5 }, ) +input_var = ({"name": "x_CO", "min": 0.2, "max": 0.8, "num": 5},) -output_var = ( {'name':'ac_O'}, - {'name':'ac_CO'}, - {'name':'TOF_CO2'}, ) +output_var = ( + {"name": "ac_O"}, + {"name": "ac_CO"}, + {"name": "TOF_CO2"}, +) -adpML = adp.adaptiveDesignProcedure( input_var, output_var, get_rate, - algorithmParams={'dth':0.01,'d2th':0.10}, - outputDir=scm.pyzacros.workdir()+'/adp.results', - randomState=10 ) +adpML = adp.adaptiveDesignProcedure( + input_var, + output_var, + get_rate, + algorithmParams={"dth": 0.01, "d2th": 0.10}, + outputDir=scm.pyzacros.workdir() + "/adp.results", + randomState=10, +) adpML.createTrainingDataAndML() -x_CO,ac_O,ac_CO,TOF_CO2 = adpML.trainingData.T +x_CO, ac_O, ac_CO, TOF_CO2 = adpML.trainingData.T -print( "-------------------------------------------------" ) -print( "%4s"%"cond", "%8s"%"x_CO", "%10s"%"ac_O", "%10s"%"ac_CO", "%12s"%"TOF_CO2" ) -print( "-------------------------------------------------" ) +print("-------------------------------------------------") +print("%4s" % "cond", "%8s" % "x_CO", "%10s" % "ac_O", "%10s" % "ac_CO", "%12s" % "TOF_CO2") +print("-------------------------------------------------") for i in range(len(x_CO)): - print( "%4d"%i, "%8.3f"%x_CO[i], "%10.6f"%ac_O[i], "%10.6f"%ac_CO[i], "%12.6f"%TOF_CO2[i] ) + print("%4d" % i, "%8.3f" % x_CO[i], "%10.6f" % ac_O[i], "%10.6f" % ac_CO[i], "%12.6f" % TOF_CO2[i]) import matplotlib.pyplot as plt fig = plt.figure() -x_CO_model = numpy.linspace(0.2,0.8,201) -ac_O_model,ac_CO_model,TOF_CO2_model = adpML.predict( x_CO_model ).T +x_CO_model = numpy.linspace(0.2, 0.8, 201) +ac_O_model, ac_CO_model, TOF_CO2_model = adpML.predict(x_CO_model).T ax = plt.axes() -ax.set_xlabel('Molar Fraction CO', fontsize=14) +ax.set_xlabel("Molar Fraction CO", fontsize=14) ax.set_ylabel("Coverage Fraction (%)", color="blue", fontsize=14) ax.plot(x_CO_model, ac_O_model, color="blue", linestyle="-.", lw=2, zorder=1) -ax.plot(x_CO, ac_O, marker='$\u25EF$', color='blue', markersize=4, lw=0, zorder=1) +ax.plot(x_CO, ac_O, marker="$\u25EF$", color="blue", markersize=4, lw=0, zorder=1) ax.plot(x_CO_model, ac_CO_model, color="blue", linestyle="-", lw=2, zorder=2) -ax.plot(x_CO, ac_CO, marker='$\u25EF$', color='blue', markersize=4, lw=0, zorder=1) -plt.text(0.3, 0.9, 'O', fontsize=18, color="blue") -plt.text(0.7, 0.9, 'CO', fontsize=18, color="blue") +ax.plot(x_CO, ac_CO, marker="$\u25EF$", color="blue", markersize=4, lw=0, zorder=1) +plt.text(0.3, 0.9, "O", fontsize=18, color="blue") +plt.text(0.7, 0.9, "CO", fontsize=18, color="blue") ax2 = ax.twinx() -ax2.set_ylabel("TOF (mol/s/site)",color="red", fontsize=14) -ax2.plot(x_CO_model, TOF_CO2_model, color='red', linestyle='-', lw=2, zorder=0) -ax2.plot(x_CO, TOF_CO2, marker='$\u25EF$', color='red', markersize=4, lw=0, zorder=1) -plt.text(0.37, 1.5, 'CO$_2$', fontsize=18, color="red") +ax2.set_ylabel("TOF (mol/s/site)", color="red", fontsize=14) +ax2.plot(x_CO_model, TOF_CO2_model, color="red", linestyle="-", lw=2, zorder=0) +ax2.plot(x_CO, TOF_CO2, marker="$\u25EF$", color="red", markersize=4, lw=0, zorder=1) +plt.text(0.37, 1.5, "CO$_2$", fontsize=18, color="red") plt.show() diff --git a/examples/ZiffGulariBarshad/PhaseTransitions-SteadyState.ipynb b/examples/ZiffGulariBarshad/PhaseTransitions-SteadyState.ipynb index 867b0e6..ef03926 100644 --- a/examples/ZiffGulariBarshad/PhaseTransitions-SteadyState.ipynb +++ b/examples/ZiffGulariBarshad/PhaseTransitions-SteadyState.ipynb @@ -104,7 +104,7 @@ "maxjobs = multiprocessing.cpu_count()\n", "scm.plams.config.default_jobrunner = scm.plams.JobRunner(parallel=True, maxjobs=maxjobs)\n", "scm.plams.config.job.runscript.nproc = 1\n", - "print('Running up to {} jobs in parallel simultaneously'.format(maxjobs))" + "print(\"Running up to {} jobs in parallel simultaneously\".format(maxjobs))" ] }, { @@ -169,12 +169,12 @@ "z_sett.temperature = 500.0\n", "z_sett.pressure = 1.0\n", "z_sett.max_time = 10.0\n", - "z_sett.species_numbers = ('time', 0.1)\n", + "z_sett.species_numbers = (\"time\", 0.1)\n", "z_sett.random_seed = 953129\n", "\n", - "z_job = pz.ZacrosJob( settings=z_sett, lattice=zgb.lattice,\n", - " mechanism=zgb.mechanism,\n", - " cluster_expansion=zgb.cluster_expansion )" + "z_job = pz.ZacrosJob(\n", + " settings=z_sett, lattice=zgb.lattice, mechanism=zgb.mechanism, cluster_expansion=zgb.cluster_expansion\n", + ")" ] }, { @@ -216,11 +216,9 @@ "ss_sett.turnover_frequency.nreplicas = 4\n", "\n", "ss_params = pz.ZacrosSteadyStateJob.Parameters()\n", - "ss_params.add( 'max_time', 'restart.max_time',\n", - " 2*z_sett.max_time*( numpy.arange(10)+1 )**2 )\n", + "ss_params.add(\"max_time\", \"restart.max_time\", 2 * z_sett.max_time * (numpy.arange(10) + 1) ** 2)\n", "\n", - "ss_job = pz.ZacrosSteadyStateJob( settings=ss_sett, reference=z_job,\n", - " parameters=ss_params )" + "ss_job = pz.ZacrosSteadyStateJob(settings=ss_sett, reference=z_job, parameters=ss_params)" ] }, { @@ -323,10 +321,10 @@ ], "source": [ "ps_params = pz.ZacrosParametersScanJob.Parameters()\n", - "ps_params.add( 'x_CO', 'molar_fraction.CO', numpy.arange(0.2, 0.8, 0.01) )\n", - "ps_params.add( 'x_O2', 'molar_fraction.O2', lambda params: 1.0-params['x_CO'] )\n", + "ps_params.add(\"x_CO\", \"molar_fraction.CO\", numpy.arange(0.2, 0.8, 0.01))\n", + "ps_params.add(\"x_O2\", \"molar_fraction.O2\", lambda params: 1.0 - params[\"x_CO\"])\n", "\n", - "ps_job = pz.ZacrosParametersScanJob( reference=ss_job, parameters=ps_params )" + "ps_job = pz.ZacrosParametersScanJob(reference=ss_job, parameters=ps_params)" ] }, { @@ -5923,7 +5921,7 @@ "results = ps_job.run()\n", "\n", "if not results.job.ok():\n", - " print('Something went wrong!') " + " print(\"Something went wrong!\")" ] }, { @@ -6033,20 +6031,27 @@ "max_time = []\n", "\n", "results_dict = results.turnover_frequency()\n", - "results_dict = results.average_coverage( last=10, update=results_dict )\n", + "results_dict = results.average_coverage(last=10, update=results_dict)\n", "\n", - "for i,idx in enumerate(results.indices()):\n", - " x_CO.append( results_dict[i]['x_CO'] )\n", - " ac_O.append( results_dict[i]['average_coverage']['O*'] )\n", - " ac_CO.append( results_dict[i]['average_coverage']['CO*'] )\n", - " TOF_CO2.append( results_dict[i]['turnover_frequency']['CO2'] )\n", - " max_time.append( results.children_results( child_id=idx ).history( pos=-1 )['max_time'] )\n", + "for i, idx in enumerate(results.indices()):\n", + " x_CO.append(results_dict[i][\"x_CO\"])\n", + " ac_O.append(results_dict[i][\"average_coverage\"][\"O*\"])\n", + " ac_CO.append(results_dict[i][\"average_coverage\"][\"CO*\"])\n", + " TOF_CO2.append(results_dict[i][\"turnover_frequency\"][\"CO2\"])\n", + " max_time.append(results.children_results(child_id=idx).history(pos=-1)[\"max_time\"])\n", "\n", - "print( \"-----------------------------------------------------------\" )\n", - "print( \"%4s\"%\"cond\", \"%8s\"%\"x_CO\", \"%10s\"%\"ac_O\", \"%10s\"%\"ac_CO\", \"%12s\"%\"TOF_CO2\", \"%10s\"%\"max_time\" )\n", - "print( \"-----------------------------------------------------------\" )\n", + "print(\"-----------------------------------------------------------\")\n", + "print(\"%4s\" % \"cond\", \"%8s\" % \"x_CO\", \"%10s\" % \"ac_O\", \"%10s\" % \"ac_CO\", \"%12s\" % \"TOF_CO2\", \"%10s\" % \"max_time\")\n", + "print(\"-----------------------------------------------------------\")\n", "for i in range(len(x_CO)):\n", - " print( \"%4d\"%i, \"%8.2f\"%x_CO[i], \"%10.6f\"%ac_O[i], \"%10.6f\"%ac_CO[i], \"%12.6f\"%TOF_CO2[i], \"%10.3f\"%max_time[i] )" + " print(\n", + " \"%4d\" % i,\n", + " \"%8.2f\" % x_CO[i],\n", + " \"%10.6f\" % ac_O[i],\n", + " \"%10.6f\" % ac_CO[i],\n", + " \"%12.6f\" % TOF_CO2[i],\n", + " \"%10.3f\" % max_time[i],\n", + " )" ] }, { @@ -6086,17 +6091,17 @@ "fig = plt.figure()\n", "\n", "ax = plt.axes()\n", - "ax.set_xlabel('Molar fraction CO', fontsize=14)\n", + "ax.set_xlabel(\"Molar fraction CO\", fontsize=14)\n", "ax.set_ylabel(\"Coverage Fraction (%)\", color=\"blue\", fontsize=14)\n", "ax.plot(x_CO, ac_O, color=\"blue\", linestyle=\"-.\", lw=2, zorder=1)\n", "ax.plot(x_CO, ac_CO, color=\"blue\", linestyle=\"-\", lw=2, zorder=2)\n", - "plt.text(0.3, 0.9, 'O', fontsize=18, color=\"blue\")\n", - "plt.text(0.7, 0.9, 'CO', fontsize=18, color=\"blue\")\n", + "plt.text(0.3, 0.9, \"O\", fontsize=18, color=\"blue\")\n", + "plt.text(0.7, 0.9, \"CO\", fontsize=18, color=\"blue\")\n", "\n", "ax2 = ax.twinx()\n", - "ax2.set_ylabel(\"TOF (mol/s/site)\",color=\"red\", fontsize=14)\n", + "ax2.set_ylabel(\"TOF (mol/s/site)\", color=\"red\", fontsize=14)\n", "ax2.plot(x_CO, TOF_CO2, color=\"red\", lw=2, zorder=5)\n", - "plt.text(0.37, 1.5, 'CO$_2$', fontsize=18, color=\"red\")\n", + "plt.text(0.37, 1.5, \"CO$_2$\", fontsize=18, color=\"red\")\n", "\n", "plt.show()" ] diff --git a/examples/ZiffGulariBarshad/PhaseTransitions-SteadyState.py b/examples/ZiffGulariBarshad/PhaseTransitions-SteadyState.py index 6dd0229..4c3d338 100644 --- a/examples/ZiffGulariBarshad/PhaseTransitions-SteadyState.py +++ b/examples/ZiffGulariBarshad/PhaseTransitions-SteadyState.py @@ -39,12 +39,12 @@ # instances as we request. In this case, we choose to use the maximum number of # simultaneous processes (``maxjobs``) equal to the number of processors in the # machine. Additionally, by setting ``nproc = 1`` we establish that only one -# processor will be used for each zacros instance. +# processor will be used for each zacros instance. maxjobs = multiprocessing.cpu_count() scm.plams.config.default_jobrunner = scm.plams.JobRunner(parallel=True, maxjobs=maxjobs) scm.plams.config.job.runscript.nproc = 1 -print('Running up to {} jobs in parallel simultaneously'.format(maxjobs)) +print("Running up to {} jobs in parallel simultaneously".format(maxjobs)) # Now, we initialize our Ziff-Gulari-Barshad model, which by luck is available as a @@ -59,7 +59,7 @@ # ``ZacrosJob``. So, We will go through them one at a time: # **1. Setting up the ZacrosJob** -# +# # For ``ZacrosJob``, all parameters are set using a ``Setting`` object. To begin, # we define the physical parameters: ``temperature`` (in K), and ``pressure`` # (in bar). The calculation parameters are then set: ``species numbers`` (in s) @@ -77,16 +77,16 @@ z_sett.temperature = 500.0 z_sett.pressure = 1.0 z_sett.max_time = 10.0 -z_sett.species_numbers = ('time', 0.1) +z_sett.species_numbers = ("time", 0.1) z_sett.random_seed = 953129 -z_job = pz.ZacrosJob( settings=z_sett, lattice=zgb.lattice, - mechanism=zgb.mechanism, - cluster_expansion=zgb.cluster_expansion ) +z_job = pz.ZacrosJob( + settings=z_sett, lattice=zgb.lattice, mechanism=zgb.mechanism, cluster_expansion=zgb.cluster_expansion +) # **2. Setting up the ZacrosSteadyStateJob** -# +# # We also need to create a ``Setting`` object for ``ZacrosJob`` There, we ask for a # steady-state configuration using a TOFs calculation with a 96% confidence level # (``turnover frequency.confidence``), using four replicas to speed up the calculation @@ -103,15 +103,13 @@ ss_sett.turnover_frequency.nreplicas = 4 ss_params = pz.ZacrosSteadyStateJob.Parameters() -ss_params.add( 'max_time', 'restart.max_time', - 2*z_sett.max_time*( numpy.arange(10)+1 )**2 ) +ss_params.add("max_time", "restart.max_time", 2 * z_sett.max_time * (numpy.arange(10) + 1) ** 2) -ss_job = pz.ZacrosSteadyStateJob( settings=ss_sett, reference=z_job, - parameters=ss_params ) +ss_job = pz.ZacrosSteadyStateJob(settings=ss_sett, reference=z_job, parameters=ss_params) # **3. Setting up the ZacrosParametersScanJob** -# +# # Although the ``ZacrosParametersScanJob`` does not require a ``Setting`` object, # it does require a ``ZacrosSteadyStateJob.Parameters`` object to specify which # parameters must be modified systematically. In this instance, all we need is a @@ -121,13 +119,13 @@ # fractions will be used internally to replace ``molar fraction.CO`` and # ``molar fraction.O2`` in the Zacros input files. Then, using the # ``ZacrosSteadyStateJob`` defined earlier (``ss job``) and the parameters we just -# defined (``ps params``), we create the ``ZacrosParametersScanJob``: +# defined (``ps params``), we create the ``ZacrosParametersScanJob``: ps_params = pz.ZacrosParametersScanJob.Parameters() -ps_params.add( 'x_CO', 'molar_fraction.CO', numpy.arange(0.2, 0.8, 0.01) ) -ps_params.add( 'x_O2', 'molar_fraction.O2', lambda params: 1.0-params['x_CO'] ) +ps_params.add("x_CO", "molar_fraction.CO", numpy.arange(0.2, 0.8, 0.01)) +ps_params.add("x_O2", "molar_fraction.O2", lambda params: 1.0 - params["x_CO"]) -ps_job = pz.ZacrosParametersScanJob( reference=ss_job, parameters=ps_params ) +ps_job = pz.ZacrosParametersScanJob(reference=ss_job, parameters=ps_params) # The parameters scan calculation setup is ready. Therefore, we can start it @@ -139,11 +137,11 @@ results = ps_job.run() if not results.job.ok(): - print('Something went wrong!') + print("Something went wrong!") # If the execution got up to this point, everything worked as expected. Hooray! -# +# # Finally, in the following lines, we just nicely print the results in a table. See # the API documentation to learn more about how the ``results`` object is structured, # and the available methods. In this case, we use the ``turnover_frequency()`` and @@ -156,7 +154,7 @@ # parameters, so if you want to access the properties of one of the child jobs, we # recommend using a loop like the one we use here. In the lines that follow, we use this # ``idx`` to get the maximum time that the simulation required to achieve the steady -# state for that specific composition ("max time"). +# state for that specific composition ("max time"). x_CO = [] ac_O = [] @@ -165,20 +163,27 @@ max_time = [] results_dict = results.turnover_frequency() -results_dict = results.average_coverage( last=10, update=results_dict ) - -for i,idx in enumerate(results.indices()): - x_CO.append( results_dict[i]['x_CO'] ) - ac_O.append( results_dict[i]['average_coverage']['O*'] ) - ac_CO.append( results_dict[i]['average_coverage']['CO*'] ) - TOF_CO2.append( results_dict[i]['turnover_frequency']['CO2'] ) - max_time.append( results.children_results( child_id=idx ).history( pos=-1 )['max_time'] ) - -print( "-----------------------------------------------------------" ) -print( "%4s"%"cond", "%8s"%"x_CO", "%10s"%"ac_O", "%10s"%"ac_CO", "%12s"%"TOF_CO2", "%10s"%"max_time" ) -print( "-----------------------------------------------------------" ) +results_dict = results.average_coverage(last=10, update=results_dict) + +for i, idx in enumerate(results.indices()): + x_CO.append(results_dict[i]["x_CO"]) + ac_O.append(results_dict[i]["average_coverage"]["O*"]) + ac_CO.append(results_dict[i]["average_coverage"]["CO*"]) + TOF_CO2.append(results_dict[i]["turnover_frequency"]["CO2"]) + max_time.append(results.children_results(child_id=idx).history(pos=-1)["max_time"]) + +print("-----------------------------------------------------------") +print("%4s" % "cond", "%8s" % "x_CO", "%10s" % "ac_O", "%10s" % "ac_CO", "%12s" % "TOF_CO2", "%10s" % "max_time") +print("-----------------------------------------------------------") for i in range(len(x_CO)): - print( "%4d"%i, "%8.2f"%x_CO[i], "%10.6f"%ac_O[i], "%10.6f"%ac_CO[i], "%12.6f"%TOF_CO2[i], "%10.3f"%max_time[i] ) + print( + "%4d" % i, + "%8.2f" % x_CO[i], + "%10.6f" % ac_O[i], + "%10.6f" % ac_CO[i], + "%12.6f" % TOF_CO2[i], + "%10.3f" % max_time[i], + ) # The above results are the final aim of the calculation. However, we @@ -194,17 +199,17 @@ fig = plt.figure() ax = plt.axes() -ax.set_xlabel('Molar fraction CO', fontsize=14) +ax.set_xlabel("Molar fraction CO", fontsize=14) ax.set_ylabel("Coverage Fraction (%)", color="blue", fontsize=14) ax.plot(x_CO, ac_O, color="blue", linestyle="-.", lw=2, zorder=1) ax.plot(x_CO, ac_CO, color="blue", linestyle="-", lw=2, zorder=2) -plt.text(0.3, 0.9, 'O', fontsize=18, color="blue") -plt.text(0.7, 0.9, 'CO', fontsize=18, color="blue") +plt.text(0.3, 0.9, "O", fontsize=18, color="blue") +plt.text(0.7, 0.9, "CO", fontsize=18, color="blue") ax2 = ax.twinx() -ax2.set_ylabel("TOF (mol/s/site)",color="red", fontsize=14) +ax2.set_ylabel("TOF (mol/s/site)", color="red", fontsize=14) ax2.plot(x_CO, TOF_CO2, color="red", lw=2, zorder=5) -plt.text(0.37, 1.5, 'CO$_2$', fontsize=18, color="red") +plt.text(0.37, 1.5, "CO$_2$", fontsize=18, color="red") plt.show() @@ -221,4 +226,3 @@ # Now, we can close the pyZacros environment: scm.pyzacros.finish() - diff --git a/examples/ZiffGulariBarshad/PhaseTransitions-v2.py b/examples/ZiffGulariBarshad/PhaseTransitions-v2.py index 6cb7c35..7bd2355 100644 --- a/examples/ZiffGulariBarshad/PhaseTransitions-v2.py +++ b/examples/ZiffGulariBarshad/PhaseTransitions-v2.py @@ -7,61 +7,57 @@ scm.pyzacros.init() -#============================================== +# ============================================== # Initializing the ZGB model -#============================================== +# ============================================== zgb = pz.models.ZiffGulariBarshad() -#============================================== +# ============================================== # Calculation Settings and Execution -#============================================== +# ============================================== # Configuring parallel execution -#-------------------------------- +# -------------------------------- maxjobs = multiprocessing.cpu_count() scm.plams.config.default_jobrunner = scm.plams.JobRunner(parallel=True, maxjobs=maxjobs) scm.plams.config.job.runscript.nproc = 1 -print('Running up to {} jobs in parallel simultaneously'.format(maxjobs)) +print("Running up to {} jobs in parallel simultaneously".format(maxjobs)) # Settings for ZacrosJob -#------------------------ +# ------------------------ sett = pz.Settings() sett.molar_fraction.CO = 0.45 sett.molar_fraction.O2 = 0.55 sett.temperature = 500.0 sett.pressure = 1.0 sett.max_time = 10.0 -sett.snapshots = ('time', 0.5) -sett.species_numbers = ('time', 0.1) +sett.snapshots = ("time", 0.5) +sett.species_numbers = ("time", 0.1) sett.random_seed = 953129 -job = pz.ZacrosJob( settings=sett, - lattice=zgb.lattice, - mechanism=zgb.mechanism, - cluster_expansion=zgb.cluster_expansion ) +job = pz.ZacrosJob(settings=sett, lattice=zgb.lattice, mechanism=zgb.mechanism, cluster_expansion=zgb.cluster_expansion) # Settings for ZacrosParametersScanJob -#-------------------------------------- +# -------------------------------------- parameters = pz.ZacrosParametersScanJob.Parameters() -parameters.add( 'x_CO', 'molar_fraction.CO', numpy.arange(0.2, 0.8, 0.01) ) -parameters.add( 'x_O2', 'molar_fraction.O2', lambda params: 1.0-params['x_CO'] ) +parameters.add("x_CO", "molar_fraction.CO", numpy.arange(0.2, 0.8, 0.01)) +parameters.add("x_O2", "molar_fraction.O2", lambda params: 1.0 - params["x_CO"]) -mjob = pz.ZacrosParametersScanJob( reference=job, - parameters=parameters ) +mjob = pz.ZacrosParametersScanJob(reference=job, parameters=parameters) # Running the calculations -#-------------------------- +# -------------------------- results = mjob.run() if not results.job.ok(): - print('Something went wrong!') + print("Something went wrong!") -#============================================== +# ============================================== # Getting the Results -#============================================== +# ============================================== x_CO = [] ac_O = [] @@ -69,63 +65,63 @@ TOF_CO2 = [] results_dict = results.turnover_frequency() -results_dict = results.average_coverage( update=results_dict ) +results_dict = results.average_coverage(update=results_dict) -for i,idx in enumerate(results.indices()): - x_CO.append( results_dict[i]['x_CO'] ) - ac_O.append( results_dict[i]['average_coverage']['O*'] ) - ac_CO.append( results_dict[i]['average_coverage']['CO*'] ) - TOF_CO2.append( results_dict[i]['turnover_frequency']['CO2'] ) +for i, idx in enumerate(results.indices()): + x_CO.append(results_dict[i]["x_CO"]) + ac_O.append(results_dict[i]["average_coverage"]["O*"]) + ac_CO.append(results_dict[i]["average_coverage"]["CO*"]) + TOF_CO2.append(results_dict[i]["turnover_frequency"]["CO2"]) print("----------------------------------------------") -print("%4s"%"cond", "%8s"%"x_CO", "%10s"%"ac_O", "%10s"%"ac_CO", "%10s"%"TOF_CO2") +print("%4s" % "cond", "%8s" % "x_CO", "%10s" % "ac_O", "%10s" % "ac_CO", "%10s" % "TOF_CO2") print("----------------------------------------------") for i in range(len(x_CO)): - print("%4d"%i, "%8.2f"%x_CO[i], "%10.6f"%ac_O[i], "%10.6f"%ac_CO[i], "%10.6f"%TOF_CO2[i]) + print("%4d" % i, "%8.2f" % x_CO[i], "%10.6f" % ac_O[i], "%10.6f" % ac_CO[i], "%10.6f" % TOF_CO2[i]) -#============================================== +# ============================================== # Visualizing the Results -#============================================== +# ============================================== try: import matplotlib.pyplot as plt except ImportError as e: - print('Consider to install matplotlib to visualize the results!') + print("Consider to install matplotlib to visualize the results!") exit(0) # Coverage and TOF plot -#----------------------- +# ----------------------- fig = plt.figure() ax = plt.axes() -ax.set_xlabel('Molar Fraction CO', fontsize=14) +ax.set_xlabel("Molar Fraction CO", fontsize=14) ax.set_ylabel("Coverage Fraction (%)", color="blue", fontsize=14) ax.plot(x_CO, ac_O, color="blue", linestyle="-.", lw=2, zorder=1) ax.plot(x_CO, ac_CO, color="blue", linestyle="-", lw=2, zorder=2) -plt.text(0.3, 0.9, 'O', fontsize=18, color="blue") -plt.text(0.7, 0.9, 'CO', fontsize=18, color="blue") +plt.text(0.3, 0.9, "O", fontsize=18, color="blue") +plt.text(0.7, 0.9, "CO", fontsize=18, color="blue") ax2 = ax.twinx() -ax2.set_ylabel("TOF (mol/s/site)",color="red", fontsize=14) +ax2.set_ylabel("TOF (mol/s/site)", color="red", fontsize=14) ax2.plot(x_CO, TOF_CO2, color="red", lw=2, zorder=5) -plt.text(0.37, 1.5, 'CO$_2$', fontsize=18, color="red") +plt.text(0.37, 1.5, "CO$_2$", fontsize=18, color="red") plt.show() cresults = results.children_results() # Lattice states for x_CO=0.54 and CO=0.55 -#------------------------------------------ +# ------------------------------------------ cresults[33].last_lattice_state().plot() cresults[34].last_lattice_state().plot() # Molecule numbers for x_CO=0.54 and CO=0.55 -#-------------------------------------------- -cresults[33].plot_molecule_numbers( ["CO2"], normalize_per_site=True ) -cresults[34].plot_molecule_numbers( ["CO2"], normalize_per_site=True ) +# -------------------------------------------- +cresults[33].plot_molecule_numbers(["CO2"], normalize_per_site=True) +cresults[34].plot_molecule_numbers(["CO2"], normalize_per_site=True) # Molecule numbers for x_CO=0.54 and CO=0.55. First Derivative -#------------------------------------------------------------- -cresults[33].plot_molecule_numbers( ["CO2"], normalize_per_site=True, derivative=True ) -cresults[34].plot_molecule_numbers( ["CO2"], normalize_per_site=True, derivative=True ) +# ------------------------------------------------------------- +cresults[33].plot_molecule_numbers(["CO2"], normalize_per_site=True, derivative=True) +cresults[34].plot_molecule_numbers(["CO2"], normalize_per_site=True, derivative=True) scm.pyzacros.finish() diff --git a/examples/ZiffGulariBarshad/PhaseTransitions.ipynb b/examples/ZiffGulariBarshad/PhaseTransitions.ipynb index 978e9ef..bc21ade 100644 --- a/examples/ZiffGulariBarshad/PhaseTransitions.ipynb +++ b/examples/ZiffGulariBarshad/PhaseTransitions.ipynb @@ -109,7 +109,7 @@ "metadata": {}, "outputs": [], "source": [ - "lattice = pz.Lattice( lattice_type=pz.Lattice.RECTANGULAR, lattice_constant=1.0, repeat_cell=[50,50] )" + "lattice = pz.Lattice(lattice_type=pz.Lattice.RECTANGULAR, lattice_constant=1.0, repeat_cell=[50, 50])" ] }, { @@ -151,27 +151,29 @@ "outputs": [], "source": [ "# CO_adsorption:\n", - "CO_adsorption = pz.ElementaryReaction(initial=[s0,CO_gas],\n", - " final=[CO_ads],\n", - " reversible=False,\n", - " pre_expon=10.0,\n", - " activation_energy=0.0)\n", + "CO_adsorption = pz.ElementaryReaction(\n", + " initial=[s0, CO_gas], final=[CO_ads], reversible=False, pre_expon=10.0, activation_energy=0.0\n", + ")\n", "\n", "# O2_adsorption:\n", - "O2_adsorption = pz.ElementaryReaction(initial=[s0,s0,O2_gas],\n", - " final=[O_ads,O_ads],\n", - " neighboring=[(0, 1)],\n", - " reversible=False,\n", - " pre_expon=2.5,\n", - " activation_energy=0.0)\n", + "O2_adsorption = pz.ElementaryReaction(\n", + " initial=[s0, s0, O2_gas],\n", + " final=[O_ads, O_ads],\n", + " neighboring=[(0, 1)],\n", + " reversible=False,\n", + " pre_expon=2.5,\n", + " activation_energy=0.0,\n", + ")\n", "\n", "# CO_oxidation:\n", - "CO_oxidation = pz.ElementaryReaction(initial=[CO_ads, O_ads],\n", - " final=[s0, s0, CO2_gas],\n", - " neighboring=[(0, 1)],\n", - " reversible=False,\n", - " pre_expon=1.0e+20,\n", - " activation_energy=0.0)\n", + "CO_oxidation = pz.ElementaryReaction(\n", + " initial=[CO_ads, O_ads],\n", + " final=[s0, s0, CO2_gas],\n", + " neighboring=[(0, 1)],\n", + " reversible=False,\n", + " pre_expon=1.0e20,\n", + " activation_energy=0.0,\n", + ")\n", "\n", "mechanism = [CO_adsorption, O2_adsorption, CO_oxidation]" ] @@ -235,7 +237,7 @@ "maxjobs = multiprocessing.cpu_count()\n", "scm.plams.config.default_jobrunner = scm.plams.JobRunner(parallel=True, maxjobs=maxjobs)\n", "scm.plams.config.job.runscript.nproc = 1\n", - "print('Running up to {} jobs in parallel simultaneously'.format(maxjobs))" + "print(\"Running up to {} jobs in parallel simultaneously\".format(maxjobs))" ] }, { @@ -269,8 +271,8 @@ "sett.temperature = 500.0\n", "sett.pressure = 1.0\n", "sett.max_time = 10.0\n", - "sett.snapshots = ('time', 0.5)\n", - "sett.species_numbers = ('time', 0.1)\n", + "sett.snapshots = (\"time\", 0.5)\n", + "sett.species_numbers = (\"time\", 0.1)\n", "sett.random_seed = 953129" ] }, @@ -631,23 +633,20 @@ } ], "source": [ - "x_CO = numpy.arange(0.2,0.8,0.01)\n", + "x_CO = numpy.arange(0.2, 0.8, 0.01)\n", "\n", "results = []\n", "for x in x_CO:\n", - " sett.molar_fraction.CO = x\n", - " sett.molar_fraction.O2 = 1.0-x\n", + " sett.molar_fraction.CO = x\n", + " sett.molar_fraction.O2 = 1.0 - x\n", "\n", - " job = pz.ZacrosJob( settings=sett,\n", - " lattice=lattice,\n", - " mechanism=mechanism,\n", - " cluster_expansion=cluster_expansion )\n", + " job = pz.ZacrosJob(settings=sett, lattice=lattice, mechanism=mechanism, cluster_expansion=cluster_expansion)\n", "\n", - " results.append(job.run())\n", - " \n", - "for i,x in enumerate(x_CO):\n", - " if not results[i].job.ok():\n", - " print('Something went wrong with condition xCO={}!'.format(x))" + " results.append(job.run())\n", + "\n", + "for i, x in enumerate(x_CO):\n", + " if not results[i].job.ok():\n", + " print(\"Something went wrong with condition xCO={}!\".format(x))" ] }, { @@ -686,13 +685,13 @@ "ac_CO = []\n", "TOF_CO2 = []\n", "\n", - "for i,x in enumerate(x_CO):\n", - " ac = results[i].average_coverage( last=5 )\n", - " TOFs,_,_,_ = results[i].turnover_frequency()\n", + "for i, x in enumerate(x_CO):\n", + " ac = results[i].average_coverage(last=5)\n", + " TOFs, _, _, _ = results[i].turnover_frequency()\n", "\n", - " ac_O.append( ac[\"O*\"] )\n", - " ac_CO.append( ac[\"CO*\"] )\n", - " TOF_CO2.append( TOFs[\"CO2\"] )" + " ac_O.append(ac[\"O*\"])\n", + " ac_CO.append(ac[\"CO*\"])\n", + " TOF_CO2.append(TOFs[\"CO2\"])" ] }, { @@ -784,11 +783,11 @@ ], "source": [ "print(\"----------------------------------------------\")\n", - "print(\"%4s\"%\"cond\", \"%8s\"%\"x_CO\", \"%10s\"%\"ac_O\", \"%10s\"%\"ac_CO\", \"%10s\"%\"TOF_CO2\")\n", + "print(\"%4s\" % \"cond\", \"%8s\" % \"x_CO\", \"%10s\" % \"ac_O\", \"%10s\" % \"ac_CO\", \"%10s\" % \"TOF_CO2\")\n", "print(\"----------------------------------------------\")\n", "\n", - "for i,x in enumerate(x_CO):\n", - " print(\"%4d\"%i, \"%8.2f\"%x_CO[i], \"%10.6f\"%ac_O[i], \"%10.6f\"%ac_CO[i], \"%10.6f\"%TOF_CO2[i])" + "for i, x in enumerate(x_CO):\n", + " print(\"%4d\" % i, \"%8.2f\" % x_CO[i], \"%10.6f\" % ac_O[i], \"%10.6f\" % ac_CO[i], \"%10.6f\" % TOF_CO2[i])" ] }, { @@ -828,17 +827,17 @@ "fig = plt.figure()\n", "\n", "ax = plt.axes()\n", - "ax.set_xlabel('Molar Fraction CO', fontsize=14)\n", + "ax.set_xlabel(\"Molar Fraction CO\", fontsize=14)\n", "ax.set_ylabel(\"Coverage Fraction (%)\", color=\"blue\", fontsize=14)\n", "ax.plot(x_CO, ac_O, color=\"blue\", linestyle=\"-.\", lw=2, zorder=1)\n", "ax.plot(x_CO, ac_CO, color=\"blue\", linestyle=\"-\", lw=2, zorder=2)\n", - "plt.text(0.3, 0.9, 'O', fontsize=18, color=\"blue\")\n", - "plt.text(0.7, 0.9, 'CO', fontsize=18, color=\"blue\")\n", + "plt.text(0.3, 0.9, \"O\", fontsize=18, color=\"blue\")\n", + "plt.text(0.7, 0.9, \"CO\", fontsize=18, color=\"blue\")\n", "\n", "ax2 = ax.twinx()\n", - "ax2.set_ylabel(\"TOF (mol/s/site)\",color=\"red\", fontsize=14)\n", + "ax2.set_ylabel(\"TOF (mol/s/site)\", color=\"red\", fontsize=14)\n", "ax2.plot(x_CO, TOF_CO2, color=\"red\", lw=2, zorder=5)\n", - "plt.text(0.37, 1.5, 'CO$_2$', fontsize=18, color=\"red\")\n", + "plt.text(0.37, 1.5, \"CO$_2$\", fontsize=18, color=\"red\")\n", "\n", "plt.show()" ] @@ -987,8 +986,8 @@ } ], "source": [ - "results[33].plot_molecule_numbers( [\"CO2\"], normalize_per_site=True )\n", - "results[34].plot_molecule_numbers( [\"CO2\"], normalize_per_site=True )" + "results[33].plot_molecule_numbers([\"CO2\"], normalize_per_site=True)\n", + "results[34].plot_molecule_numbers([\"CO2\"], normalize_per_site=True)" ] }, { @@ -1019,8 +1018,8 @@ } ], "source": [ - "results[33].plot_molecule_numbers( [\"CO2\"], normalize_per_site=True, derivative=True )\n", - "results[34].plot_molecule_numbers( [\"CO2\"], normalize_per_site=True, derivative=True )" + "results[33].plot_molecule_numbers([\"CO2\"], normalize_per_site=True, derivative=True)\n", + "results[34].plot_molecule_numbers([\"CO2\"], normalize_per_site=True, derivative=True)" ] }, { diff --git a/examples/ZiffGulariBarshad/PhaseTransitions.py b/examples/ZiffGulariBarshad/PhaseTransitions.py index d714b2c..49c985f 100644 --- a/examples/ZiffGulariBarshad/PhaseTransitions.py +++ b/examples/ZiffGulariBarshad/PhaseTransitions.py @@ -46,7 +46,7 @@ # **3. A rectangular lattice with a single site type**. -lattice = pz.Lattice( lattice_type=pz.Lattice.RECTANGULAR, lattice_constant=1.0, repeat_cell=[50,50] ) +lattice = pz.Lattice(lattice_type=pz.Lattice.RECTANGULAR, lattice_constant=1.0, repeat_cell=[50, 50]) # **4. Two clusters in the cluster-expansion Hamiltonian:** $CO^*$-bs and $O^*$-bs. @@ -62,27 +62,29 @@ # of $O_2$, and $CO$ oxidation. # CO_adsorption: -CO_adsorption = pz.ElementaryReaction(initial=[s0,CO_gas], - final=[CO_ads], - reversible=False, - pre_expon=10.0, - activation_energy=0.0) +CO_adsorption = pz.ElementaryReaction( + initial=[s0, CO_gas], final=[CO_ads], reversible=False, pre_expon=10.0, activation_energy=0.0 +) # O2_adsorption: -O2_adsorption = pz.ElementaryReaction(initial=[s0,s0,O2_gas], - final=[O_ads,O_ads], - neighboring=[(0, 1)], - reversible=False, - pre_expon=2.5, - activation_energy=0.0) +O2_adsorption = pz.ElementaryReaction( + initial=[s0, s0, O2_gas], + final=[O_ads, O_ads], + neighboring=[(0, 1)], + reversible=False, + pre_expon=2.5, + activation_energy=0.0, +) # CO_oxidation: -CO_oxidation = pz.ElementaryReaction(initial=[CO_ads, O_ads], - final=[s0, s0, CO2_gas], - neighboring=[(0, 1)], - reversible=False, - pre_expon=1.0e+20, - activation_energy=0.0) +CO_oxidation = pz.ElementaryReaction( + initial=[CO_ads, O_ads], + final=[s0, s0, CO2_gas], + neighboring=[(0, 1)], + reversible=False, + pre_expon=1.0e20, + activation_energy=0.0, +) mechanism = [CO_adsorption, O2_adsorption, CO_oxidation] @@ -99,12 +101,12 @@ # In this case, we choose to use the maximum number of simultaneous processes # (``maxjobs``) equal to the number of processors in the machine. Additionally, # by setting ``nproc = 1`` we establish that only one processor will be used -# for each zacros instance. +# for each zacros instance. maxjobs = multiprocessing.cpu_count() scm.plams.config.default_jobrunner = scm.plams.JobRunner(parallel=True, maxjobs=maxjobs) scm.plams.config.job.runscript.nproc = 1 -print('Running up to {} jobs in parallel simultaneously'.format(maxjobs)) +print("Running up to {} jobs in parallel simultaneously".format(maxjobs)) # Now we have to set up the calculation using a ``Settings`` object. Firstly, @@ -125,8 +127,8 @@ sett.temperature = 500.0 sett.pressure = 1.0 sett.max_time = 10.0 -sett.snapshots = ('time', 0.5) -sett.species_numbers = ('time', 0.1) +sett.snapshots = ("time", 0.5) +sett.species_numbers = ("time", 0.1) sett.random_seed = 953129 @@ -145,23 +147,20 @@ # ensure that every calculation was completed successfully and wait for all # parallel processes to complete before proceeding to access the results. -x_CO = numpy.arange(0.2,0.8,0.01) +x_CO = numpy.arange(0.2, 0.8, 0.01) results = [] for x in x_CO: - sett.molar_fraction.CO = x - sett.molar_fraction.O2 = 1.0-x + sett.molar_fraction.CO = x + sett.molar_fraction.O2 = 1.0 - x - job = pz.ZacrosJob( settings=sett, - lattice=lattice, - mechanism=mechanism, - cluster_expansion=cluster_expansion ) + job = pz.ZacrosJob(settings=sett, lattice=lattice, mechanism=mechanism, cluster_expansion=cluster_expansion) - results.append(job.run()) - -for i,x in enumerate(x_CO): - if not results[i].job.ok(): - print('Something went wrong with condition xCO={}!'.format(x)) + results.append(job.run()) + +for i, x in enumerate(x_CO): + if not results[i].job.ok(): + print("Something went wrong with condition xCO={}!".format(x)) # If the script worked successfully, you should have seen several @@ -177,23 +176,23 @@ ac_CO = [] TOF_CO2 = [] -for i,x in enumerate(x_CO): - ac = results[i].average_coverage( last=5 ) - TOFs,_,_,_ = results[i].turnover_frequency() +for i, x in enumerate(x_CO): + ac = results[i].average_coverage(last=5) + TOFs, _, _, _ = results[i].turnover_frequency() - ac_O.append( ac["O*"] ) - ac_CO.append( ac["CO*"] ) - TOF_CO2.append( TOFs["CO2"] ) + ac_O.append(ac["O*"]) + ac_CO.append(ac["CO*"]) + TOF_CO2.append(TOFs["CO2"]) # Finally, we just nicely print the results in a table. print("----------------------------------------------") -print("%4s"%"cond", "%8s"%"x_CO", "%10s"%"ac_O", "%10s"%"ac_CO", "%10s"%"TOF_CO2") +print("%4s" % "cond", "%8s" % "x_CO", "%10s" % "ac_O", "%10s" % "ac_CO", "%10s" % "TOF_CO2") print("----------------------------------------------") -for i,x in enumerate(x_CO): - print("%4d"%i, "%8.2f"%x_CO[i], "%10.6f"%ac_O[i], "%10.6f"%ac_CO[i], "%10.6f"%TOF_CO2[i]) +for i, x in enumerate(x_CO): + print("%4d" % i, "%8.2f" % x_CO[i], "%10.6f" % ac_O[i], "%10.6f" % ac_CO[i], "%10.6f" % TOF_CO2[i]) # The above results are the final aim of the calculation. However, we @@ -209,17 +208,17 @@ fig = plt.figure() ax = plt.axes() -ax.set_xlabel('Molar Fraction CO', fontsize=14) +ax.set_xlabel("Molar Fraction CO", fontsize=14) ax.set_ylabel("Coverage Fraction (%)", color="blue", fontsize=14) ax.plot(x_CO, ac_O, color="blue", linestyle="-.", lw=2, zorder=1) ax.plot(x_CO, ac_CO, color="blue", linestyle="-", lw=2, zorder=2) -plt.text(0.3, 0.9, 'O', fontsize=18, color="blue") -plt.text(0.7, 0.9, 'CO', fontsize=18, color="blue") +plt.text(0.3, 0.9, "O", fontsize=18, color="blue") +plt.text(0.7, 0.9, "CO", fontsize=18, color="blue") ax2 = ax.twinx() -ax2.set_ylabel("TOF (mol/s/site)",color="red", fontsize=14) +ax2.set_ylabel("TOF (mol/s/site)", color="red", fontsize=14) ax2.plot(x_CO, TOF_CO2, color="red", lw=2, zorder=5) -plt.text(0.37, 1.5, 'CO$_2$', fontsize=18, color="red") +plt.text(0.37, 1.5, "CO$_2$", fontsize=18, color="red") plt.show() @@ -230,11 +229,11 @@ # irreversible because the molecules are sticky to their original sites # and remain stationary until they are removed by a reaction. This leads # to the figure above having three regions: -# +# # 1. Oxygen poisoned state, $x_\text{CO}<0.32$. # 2. Reactive state $0.320.55$. -# +# # The first transition at $x_\text{CO}=0.32$ is continuous, and therefore # it is of the second order. The second transition at $x_\text{CO}=0.55$ # occurs abruptly, implying that this is of a first-order transition. @@ -265,21 +264,21 @@ # the steady-state for a given composition is characterized when the # derivative of the $CO_2$ production (TOF) with respect to time is zero # and remains so: -# +# # $$ # \frac{d}{dt}TOF_{\text{CO}_2} = 0, \,\,\text{for all present and future}\,\, t # $$ -# +# # **pyZacros** also offers the function ``plot_molecule_numbers()`` to # visualize the molecule numbers and its first derivative as a function # of time. See code and figures below: -results[33].plot_molecule_numbers( ["CO2"], normalize_per_site=True ) -results[34].plot_molecule_numbers( ["CO2"], normalize_per_site=True ) +results[33].plot_molecule_numbers(["CO2"], normalize_per_site=True) +results[34].plot_molecule_numbers(["CO2"], normalize_per_site=True) -results[33].plot_molecule_numbers( ["CO2"], normalize_per_site=True, derivative=True ) -results[34].plot_molecule_numbers( ["CO2"], normalize_per_site=True, derivative=True ) +results[33].plot_molecule_numbers(["CO2"], normalize_per_site=True, derivative=True) +results[34].plot_molecule_numbers(["CO2"], normalize_per_site=True, derivative=True) # From the figures above, it is clear that we have reached a steady-state for @@ -290,4 +289,3 @@ # Now, we can close the pyZacros environment: scm.pyzacros.finish() - diff --git a/examples/ZiffGulariBarshad/SteadyState.ipynb b/examples/ZiffGulariBarshad/SteadyState.ipynb index 03478be..dd76b62 100644 --- a/examples/ZiffGulariBarshad/SteadyState.ipynb +++ b/examples/ZiffGulariBarshad/SteadyState.ipynb @@ -133,14 +133,13 @@ "z_sett.molar_fraction.O2 = 1.0 - z_sett.molar_fraction.CO\n", "z_sett.temperature = 500.0\n", "z_sett.pressure = 1.0\n", - "z_sett.species_numbers = ('time', 0.1)\n", - "z_sett.max_time = 100.0*0.1\n", + "z_sett.species_numbers = (\"time\", 0.1)\n", + "z_sett.max_time = 100.0 * 0.1\n", "z_sett.random_seed = 953129\n", "\n", - "job = pz.ZacrosJob( settings=z_sett,\n", - " lattice=zgb.lattice,\n", - " mechanism=zgb.mechanism,\n", - " cluster_expansion=zgb.cluster_expansion )" + "job = pz.ZacrosJob(\n", + " settings=z_sett, lattice=zgb.lattice, mechanism=zgb.mechanism, cluster_expansion=zgb.cluster_expansion\n", + ")" ] }, { @@ -195,9 +194,9 @@ "ss_sett.turnover_frequency.nreplicas = 1\n", "\n", "parameters = pz.ZacrosSteadyStateJob.Parameters()\n", - "parameters.add( 'max_time', 'restart.max_time', numpy.arange(20.0, 1000.0, 100) )\n", + "parameters.add(\"max_time\", \"restart.max_time\", numpy.arange(20.0, 1000.0, 100))\n", "\n", - "ss_job = pz.ZacrosSteadyStateJob( settings=ss_sett, reference=job, parameters=parameters )" + "ss_job = pz.ZacrosSteadyStateJob(settings=ss_sett, reference=job, parameters=parameters)" ] }, { @@ -317,7 +316,7 @@ "results = ss_job.run()\n", "\n", "if not ss_job.ok():\n", - " print('Something went wrong!')" + " print(\"Something went wrong!\")" ] }, { @@ -360,18 +359,22 @@ } ], "source": [ - "print(60*'-')\n", + "print(60 * \"-\")\n", "fline = \"{0:>8s}{1:>10s}{2:>15s}{3:>12s}{4:>10s}\"\n", - "print( fline.format('iter', 'max_time', 'TOF_CO2', 'error', 'conv?') )\n", - "print(60*'-')\n", + "print(fline.format(\"iter\", \"max_time\", \"TOF_CO2\", \"error\", \"conv?\"))\n", + "print(60 * \"-\")\n", "\n", - "for i,step in enumerate(results.history()):\n", + "for i, step in enumerate(results.history()):\n", " fline = \"{0:8d}{1:>10.2f}{2:15.5f}{3:>12.5f}{4:>10s}\"\n", - " print( fline.format(i,\n", - " step['max_time'],\n", - " step['turnover_frequency']['CO2'],\n", - " step['turnover_frequency_error']['CO2'],\n", - " str(all(step['converged'].values()))) )" + " print(\n", + " fline.format(\n", + " i,\n", + " step[\"max_time\"],\n", + " step[\"turnover_frequency\"][\"CO2\"],\n", + " step[\"turnover_frequency_error\"][\"CO2\"],\n", + " str(all(step[\"converged\"].values())),\n", + " )\n", + " )" ] }, { @@ -436,16 +439,22 @@ "\n", "fig = plt.figure()\n", "ax = plt.axes()\n", - "ax.set_xlabel('Time (s)', fontsize=14)\n", + "ax.set_xlabel(\"Time (s)\", fontsize=14)\n", "ax.set_ylabel(\"CO$_2$ Production (mol/site)\", fontsize=14)\n", "\n", - "colors = 'bgrcmykb'\n", + "colors = \"bgrcmykb\"\n", "for i in range(results.niterations()):\n", " for j in range(results.nreplicas()):\n", - " molecule_numbers = results.children_results(i,j).molecule_numbers(['CO2'], normalize_per_site=True)\n", - " \n", - " ax.plot( molecule_numbers['Time'], molecule_numbers['CO2'], lw=3, color=colors[i], zorder=-i )\n", - " ax.vlines( max(molecule_numbers['Time']) , 0, max(molecule_numbers['CO2']), colors='0.8', linestyles='--',)\n", + " molecule_numbers = results.children_results(i, j).molecule_numbers([\"CO2\"], normalize_per_site=True)\n", + "\n", + " ax.plot(molecule_numbers[\"Time\"], molecule_numbers[\"CO2\"], lw=3, color=colors[i], zorder=-i)\n", + " ax.vlines(\n", + " max(molecule_numbers[\"Time\"]),\n", + " 0,\n", + " max(molecule_numbers[\"CO2\"]),\n", + " colors=\"0.8\",\n", + " linestyles=\"--\",\n", + " )\n", "\n", "plt.show()" ] diff --git a/examples/ZiffGulariBarshad/SteadyState.py b/examples/ZiffGulariBarshad/SteadyState.py index 7bd6577..b0518a1 100644 --- a/examples/ZiffGulariBarshad/SteadyState.py +++ b/examples/ZiffGulariBarshad/SteadyState.py @@ -22,7 +22,7 @@ import scm.pyzacros.models -# Then, we initialize the **pyZacros** environment. +# Then, we initialize the **pyZacros** environment. scm.pyzacros.init() @@ -60,14 +60,13 @@ z_sett.molar_fraction.O2 = 1.0 - z_sett.molar_fraction.CO z_sett.temperature = 500.0 z_sett.pressure = 1.0 -z_sett.species_numbers = ('time', 0.1) -z_sett.max_time = 100.0*0.1 +z_sett.species_numbers = ("time", 0.1) +z_sett.max_time = 100.0 * 0.1 z_sett.random_seed = 953129 -job = pz.ZacrosJob( settings=z_sett, - lattice=zgb.lattice, - mechanism=zgb.mechanism, - cluster_expansion=zgb.cluster_expansion ) +job = pz.ZacrosJob( + settings=z_sett, lattice=zgb.lattice, mechanism=zgb.mechanism, cluster_expansion=zgb.cluster_expansion +) # It is now time to set up the steady state calculation. It also needs a ``Settings`` @@ -83,13 +82,13 @@ # ``turnover frequency.nreplicas`` parameter allows several simulations to run in # parallel to speed up the calculation at the expense of more computational power. # For the time being, we will leave it at 1, but we will return to it later. -# +# # In the second block of code, the ``ZacrosSteadyStateJob.Parameters()`` class allows # us to specify the grid in ``max time``, which in this case ranges from 20 to 1000 # every 100 seconds. Take note that the convergence is verified for each point on # this grid, and if it has not converged, the calculation is resumed up to the next # point in ``max time``. -# +# # Finally, we create ``ZacrosSteadyStateJob``, which references the ``ZacrosJob`` # defined above as well as the ``Settings`` object and parameters we just defined: @@ -99,9 +98,9 @@ ss_sett.turnover_frequency.nreplicas = 1 parameters = pz.ZacrosSteadyStateJob.Parameters() -parameters.add( 'max_time', 'restart.max_time', numpy.arange(20.0, 1000.0, 100) ) +parameters.add("max_time", "restart.max_time", numpy.arange(20.0, 1000.0, 100)) -ss_job = pz.ZacrosSteadyStateJob( settings=ss_sett, reference=job, parameters=parameters ) +ss_job = pz.ZacrosSteadyStateJob(settings=ss_sett, reference=job, parameters=parameters) # The steady-state calculation setup is ready. Therefore, we can start it @@ -113,11 +112,11 @@ results = ss_job.run() if not ss_job.ok(): - print('Something went wrong!') + print("Something went wrong!") # If the execution got up to this point, everything worked as expected. Hooray! -# +# # Now, in the following lines, we just nicely print the results in a table. See # the API documentation to learn more about how the ``results`` object is structured. # Here we show the history of the simulation and see how it progresses as the @@ -125,18 +124,22 @@ # and whether the calculation converged. Notice that the calculation should have # been converged at 720 s of ``max_time``. -print(60*'-') +print(60 * "-") fline = "{0:>8s}{1:>10s}{2:>15s}{3:>12s}{4:>10s}" -print( fline.format('iter', 'max_time', 'TOF_CO2', 'error', 'conv?') ) -print(60*'-') +print(fline.format("iter", "max_time", "TOF_CO2", "error", "conv?")) +print(60 * "-") -for i,step in enumerate(results.history()): +for i, step in enumerate(results.history()): fline = "{0:8d}{1:>10.2f}{2:15.5f}{3:>12.5f}{4:>10s}" - print( fline.format(i, - step['max_time'], - step['turnover_frequency']['CO2'], - step['turnover_frequency_error']['CO2'], - str(all(step['converged'].values()))) ) + print( + fline.format( + i, + step["max_time"], + step["turnover_frequency"]["CO2"], + step["turnover_frequency_error"]["CO2"], + str(all(step["converged"].values())), + ) + ) # Now that all calculations are done, we can close the pyZacros environment: @@ -156,16 +159,21 @@ fig = plt.figure() ax = plt.axes() -ax.set_xlabel('Time (s)', fontsize=14) +ax.set_xlabel("Time (s)", fontsize=14) ax.set_ylabel("CO$_2$ Production (mol/site)", fontsize=14) -colors = 'bgrcmykb' +colors = "bgrcmykb" for i in range(results.niterations()): for j in range(results.nreplicas()): - molecule_numbers = results.children_results(i,j).molecule_numbers(['CO2'], normalize_per_site=True) - - ax.plot( molecule_numbers['Time'], molecule_numbers['CO2'], lw=3, color=colors[i], zorder=-i ) - ax.vlines( max(molecule_numbers['Time']) , 0, max(molecule_numbers['CO2']), colors='0.8', linestyles='--',) + molecule_numbers = results.children_results(i, j).molecule_numbers(["CO2"], normalize_per_site=True) -plt.show() + ax.plot(molecule_numbers["Time"], molecule_numbers["CO2"], lw=3, color=colors[i], zorder=-i) + ax.vlines( + max(molecule_numbers["Time"]), + 0, + max(molecule_numbers["CO2"]), + colors="0.8", + linestyles="--", + ) +plt.show() diff --git a/examples/ZiffGulariBarshad/SteadyState_ViewResults.py b/examples/ZiffGulariBarshad/SteadyState_ViewResults.py index b14ec76..c685d45 100644 --- a/examples/ZiffGulariBarshad/SteadyState_ViewResults.py +++ b/examples/ZiffGulariBarshad/SteadyState_ViewResults.py @@ -3,32 +3,43 @@ scm.pyzacros.init() -job = scm.pyzacros.load( "plams_workdir/plamsjob/plamsjob.dill" ) +job = scm.pyzacros.load("plams_workdir/plamsjob/plamsjob.dill") results = job.results scm.pyzacros.finish() -print((8+10+15+15+10+ 5)*"-") -print("%8s"%"iter", "%10s"%"TOF_CO2", "%15s"%"max_time","%15s"%"TOF_CO2_error", "%10s"%"conv?") -print("%8s"%"", "%10s"%"mol/s/site", "%15s"%"s", "%15s"%"mol/s/site", "%10s"%"") -print((8+10+15+15+10+ 5)*"-") +print((8 + 10 + 15 + 15 + 10 + 5) * "-") +print("%8s" % "iter", "%10s" % "TOF_CO2", "%15s" % "max_time", "%15s" % "TOF_CO2_error", "%10s" % "conv?") +print("%8s" % "", "%10s" % "mol/s/site", "%15s" % "s", "%15s" % "mol/s/site", "%10s" % "") +print((8 + 10 + 15 + 15 + 10 + 5) * "-") -for i,step in enumerate(results.history()): - print("%8d"%i, "%10.5f"%step['turnover_frequency']['CO2'], "%15d"%step['max_time'], - "%15.5f"%step['turnover_frequency_error']['CO2'], "%10s"%step['converged']['CO2']) +for i, step in enumerate(results.history()): + print( + "%8d" % i, + "%10.5f" % step["turnover_frequency"]["CO2"], + "%15d" % step["max_time"], + "%15.5f" % step["turnover_frequency_error"]["CO2"], + "%10s" % step["converged"]["CO2"], + ) import matplotlib.pyplot as plt fig = plt.figure() ax = plt.axes() -ax.set_xlabel('Time (s)', fontsize=14) +ax.set_xlabel("Time (s)", fontsize=14) ax.set_ylabel("CO$_2$ Production (mol/site)", fontsize=14) -colors = 'bgrcmykb' +colors = "bgrcmykb" for i in range(results.niterations()): for j in range(results.nreplicas()): - molecule_numbers = results.children_results(i,j).molecule_numbers(['CO2'], normalize_per_site=True) - ax.plot( molecule_numbers['Time'], molecule_numbers['CO2'], lw=3, color=colors[i], zorder=-i ) - ax.vlines( max(molecule_numbers['Time']) , 0, max(molecule_numbers['CO2']), colors='0.8', linestyles='--',) + molecule_numbers = results.children_results(i, j).molecule_numbers(["CO2"], normalize_per_site=True) + ax.plot(molecule_numbers["Time"], molecule_numbers["CO2"], lw=3, color=colors[i], zorder=-i) + ax.vlines( + max(molecule_numbers["Time"]), + 0, + max(molecule_numbers["CO2"]), + colors="0.8", + linestyles="--", + ) plt.show() diff --git a/examples/ZiffGulariBarshad/ZiffGulariBarshad.py b/examples/ZiffGulariBarshad/ZiffGulariBarshad.py index 2b31503..025e7a9 100644 --- a/examples/ZiffGulariBarshad/ZiffGulariBarshad.py +++ b/examples/ZiffGulariBarshad/ZiffGulariBarshad.py @@ -2,66 +2,69 @@ This example reproduces the Zacros example described in: https://zacros.org/tutorials/4-tutorial-1-ziff-gulari-barshad-model-in-zacros """ + import scm.plams import scm.pyzacros as pz -#--------------------------------------------- +# --------------------------------------------- # Species: -#--------------------------------------------- +# --------------------------------------------- # Gas-species: CO_gas = pz.Species("CO") O2_gas = pz.Species("O2") CO2_gas = pz.Species("CO2", gas_energy=-2.337) # Surface species: -s0 = pz.Species("*", 1) # Empty adsorption site +s0 = pz.Species("*", 1) # Empty adsorption site CO_ads = pz.Species("CO*", 1) O_ads = pz.Species("O*", 1) -#--------------------------------------------- +# --------------------------------------------- # Lattice setup: -#--------------------------------------------- -lattice = pz.Lattice( lattice_type=pz.Lattice.RECTANGULAR, lattice_constant=1.0, repeat_cell=[50,50] ) +# --------------------------------------------- +lattice = pz.Lattice(lattice_type=pz.Lattice.RECTANGULAR, lattice_constant=1.0, repeat_cell=[50, 50]) -#--------------------------------------------- +# --------------------------------------------- # Clusters: -#--------------------------------------------- +# --------------------------------------------- CO_point = pz.Cluster(species=[CO_ads], energy=-1.3) O_point = pz.Cluster(species=[O_ads], energy=-2.3) cluster_expansion = [CO_point, O_point] -#--------------------------------------------- +# --------------------------------------------- # Elementary Reactions -#--------------------------------------------- +# --------------------------------------------- # CO_adsorption: -CO_adsorption = pz.ElementaryReaction(initial=[s0,CO_gas], - final=[CO_ads], - reversible=False, - pre_expon=10.0, - activation_energy=0.0) +CO_adsorption = pz.ElementaryReaction( + initial=[s0, CO_gas], final=[CO_ads], reversible=False, pre_expon=10.0, activation_energy=0.0 +) # O2_adsorption: -O2_adsorption = pz.ElementaryReaction(initial=[s0,s0,O2_gas], - final=[O_ads,O_ads], - neighboring=[(0, 1)], - reversible=False, - pre_expon=2.5, - activation_energy=0.0) +O2_adsorption = pz.ElementaryReaction( + initial=[s0, s0, O2_gas], + final=[O_ads, O_ads], + neighboring=[(0, 1)], + reversible=False, + pre_expon=2.5, + activation_energy=0.0, +) # CO_oxidation: -CO_oxidation = pz.ElementaryReaction(initial=[CO_ads, O_ads], - final=[s0, s0, CO2_gas], - neighboring=[(0, 1)], - reversible=False, - pre_expon=1.0e+20, - activation_energy=0.0) +CO_oxidation = pz.ElementaryReaction( + initial=[CO_ads, O_ads], + final=[s0, s0, CO2_gas], + neighboring=[(0, 1)], + reversible=False, + pre_expon=1.0e20, + activation_energy=0.0, +) mechanism = [CO_adsorption, O2_adsorption, CO_oxidation] -#--------------------------------------------- +# --------------------------------------------- # Calculation Settings -#--------------------------------------------- +# --------------------------------------------- scm.pyzacros.init() # Settings: @@ -71,20 +74,22 @@ sett.random_seed = 953129 sett.temperature = 500.0 sett.pressure = 1.0 -sett.snapshots = ('time', 5.e-1) -sett.process_statistics = ('time', 1.e-2) -sett.species_numbers = ('time', 1.e-2) +sett.snapshots = ("time", 5.0e-1) +sett.process_statistics = ("time", 1.0e-2) +sett.species_numbers = ("time", 1.0e-2) sett.max_time = 25.0 -job = pz.ZacrosJob( settings=sett, - lattice=lattice, - mechanism=[CO_adsorption, O2_adsorption, CO_oxidation], - cluster_expansion=[CO_point, O_point] ) +job = pz.ZacrosJob( + settings=sett, + lattice=lattice, + mechanism=[CO_adsorption, O2_adsorption, CO_oxidation], + cluster_expansion=[CO_point, O_point], +) print(job) results = job.run() -if( job.ok() ): - results.plot_lattice_states( results.lattice_states() ) +if job.ok(): + results.plot_lattice_states(results.lattice_states()) scm.pyzacros.finish() diff --git a/examples/intro/intro.py b/examples/intro/intro.py index 0a98f95..ff1fa72 100644 --- a/examples/intro/intro.py +++ b/examples/intro/intro.py @@ -7,26 +7,25 @@ CO2_g = pz.Species("CO2", gas_energy=-2.337) # Surface species -s0 = pz.Species("*") # Empty adsorption site +s0 = pz.Species("*") # Empty adsorption site CO_s = pz.Species("CO*") O_s = pz.Species("O*") # Species List -spl = pz.SpeciesList([CO_g,O2_g,CO2_g,s0,CO_s]) -spl.append( O_s ) +spl = pz.SpeciesList([CO_g, O2_g, CO2_g, s0, CO_s]) +spl.append(O_s) print(spl) # Lattice setup -lat = pz.Lattice( lattice_type=pz.Lattice.TRIANGULAR, - lattice_constant=1.0, repeat_cell=[10,3] ) +lat = pz.Lattice(lattice_type=pz.Lattice.TRIANGULAR, lattice_constant=1.0, repeat_cell=[10, 3]) print(lat) lat.plot() # Clusters -CO_p = pz.Cluster( species=[CO_s], energy=-1.3 ) -O_p = pz.Cluster( species=[O_s], energy=-2.3 ) +CO_p = pz.Cluster(species=[CO_s], energy=-1.3) +O_p = pz.Cluster(species=[O_s], energy=-2.3) print(CO_p) # Cluster Expansion @@ -34,18 +33,27 @@ print(ce) # Elementary Reactions -CO_ads = pz.ElementaryReaction( initial=[s0, CO_g], final=[CO_s], - reversible=False, pre_expon=10.0, - label="CO_adsorption" ) - -O2_ads = pz.ElementaryReaction( initial=[s0, s0, O2_g], final=[O_s, O_s], neighboring=[(0,1)], - reversible=False, pre_expon=2.5, - label="O2_adsorption" ) - -CO_oxi = pz.ElementaryReaction( initial=[CO_s, O_s], final=[s0, s0, CO2_g], - neighboring=[(0,1)], - reversible=False, pre_expon=1.0e+20, - label="CO_oxidation") +CO_ads = pz.ElementaryReaction( + initial=[s0, CO_g], final=[CO_s], reversible=False, pre_expon=10.0, label="CO_adsorption" +) + +O2_ads = pz.ElementaryReaction( + initial=[s0, s0, O2_g], + final=[O_s, O_s], + neighboring=[(0, 1)], + reversible=False, + pre_expon=2.5, + label="O2_adsorption", +) + +CO_oxi = pz.ElementaryReaction( + initial=[CO_s, O_s], + final=[s0, s0, CO2_g], + neighboring=[(0, 1)], + reversible=False, + pre_expon=1.0e20, + label="CO_oxidation", +) mech = pz.Mechanism([O2_ads, CO_ads, CO_oxi]) @@ -53,8 +61,8 @@ # LatticeState setup (initial state) ist = pz.LatticeState(lat, surface_species=spl.surface_species()) -ist.fill_sites_random(site_name='StTp1', species='CO*', coverage=0.1) -ist.fill_sites_random(site_name='StTp1', species='O*', coverage=0.1) +ist.fill_sites_random(site_name="StTp1", species="CO*", coverage=0.1) +ist.fill_sites_random(site_name="StTp1", species="O*", coverage=0.1) print(ist) @@ -67,11 +75,11 @@ sett.random_seed = 953129 sett.temperature = 500.0 sett.pressure = 1.0 -sett.snapshots = ('time', 0.1) -sett.process_statistics = ('time', 0.1) -sett.species_numbers = ('time', 0.1) -sett.event_report = 'off' -sett.max_steps = 'infinity' +sett.snapshots = ("time", 0.1) +sett.process_statistics = ("time", 0.1) +sett.species_numbers = ("time", 0.1) +sett.event_report = "off" +sett.max_steps = "infinity" sett.max_time = 1.0 sett.molar_fraction.CO = 0.45 @@ -79,22 +87,20 @@ print(sett) -job = pz.ZacrosJob( settings=sett, lattice=lat, - mechanism=[CO_ads, O2_ads, CO_oxi], - cluster_expansion=[CO_p, O_p] ) +job = pz.ZacrosJob(settings=sett, lattice=lat, mechanism=[CO_ads, O2_ads, CO_oxi], cluster_expansion=[CO_p, O_p]) print(job) results = job.run() -if( job.ok() ): - provided_quantities = results.provided_quantities() - print("nCO2 =", provided_quantities['CO2']) +if job.ok(): + provided_quantities = results.provided_quantities() + print("nCO2 =", provided_quantities["CO2"]) - results.plot_molecule_numbers( results.gas_species_names() ) - results.plot_lattice_states( results.lattice_states() ) + results.plot_molecule_numbers(results.gas_species_names()) + results.plot_lattice_states(results.lattice_states()) - pstat = results.get_process_statistics() - results.plot_process_statistics( pstat, key="number_of_events" ) + pstat = results.get_process_statistics() + results.plot_process_statistics(pstat, key="number_of_events") scm.pyzacros.finish() diff --git a/examples/intro/intro0.py b/examples/intro/intro0.py index 3611d64..196fd1f 100644 --- a/examples/intro/intro0.py +++ b/examples/intro/intro0.py @@ -7,28 +7,40 @@ CO2_g = pz.Species("CO2", gas_energy=-2.337) # Surface species: -s0 = pz.Species("*") # Empty adsorption site +s0 = pz.Species("*") # Empty adsorption site CO_s = pz.Species("CO*") O_s = pz.Species("O*") # Lattice setup: -lattice = pz.Lattice( lattice_type=pz.Lattice.RECTANGULAR, - lattice_constant=1.0, repeat_cell=[10,10] ) +lattice = pz.Lattice(lattice_type=pz.Lattice.RECTANGULAR, lattice_constant=1.0, repeat_cell=[10, 10]) lattice.plot() # Clusters: -CO_p = pz.Cluster( species=[CO_s], energy=-1.3 ) -O_p = pz.Cluster( species=[O_s], energy=-2.3 ) +CO_p = pz.Cluster(species=[CO_s], energy=-1.3) +O_p = pz.Cluster(species=[O_s], energy=-2.3) # Elementary Reactions -CO_ads = pz.ElementaryReaction( initial=[s0, CO_g], final=[CO_s], - reversible=False, pre_expon=10.0, activation_energy=0.0 ) +CO_ads = pz.ElementaryReaction( + initial=[s0, CO_g], final=[CO_s], reversible=False, pre_expon=10.0, activation_energy=0.0 +) -O2_ads = pz.ElementaryReaction( initial=[s0, s0, O2_g], final=[O_s, O_s], neighboring=[(0, 1)], - reversible=False, pre_expon=2.5, activation_energy=0.0 ) +O2_ads = pz.ElementaryReaction( + initial=[s0, s0, O2_g], + final=[O_s, O_s], + neighboring=[(0, 1)], + reversible=False, + pre_expon=2.5, + activation_energy=0.0, +) -CO_oxi = pz.ElementaryReaction( initial=[CO_s, O_s], final=[s0, s0, CO2_g], neighboring=[(0, 1)], - reversible=False, pre_expon=1.0e+20, activation_energy=0.0) +CO_oxi = pz.ElementaryReaction( + initial=[CO_s, O_s], + final=[s0, s0, CO2_g], + neighboring=[(0, 1)], + reversible=False, + pre_expon=1.0e20, + activation_energy=0.0, +) scm.pyzacros.init() @@ -36,23 +48,21 @@ sett = pz.Settings() sett.temperature = 500.0 sett.pressure = 1.0 -sett.snapshots = ('time', 5.e-1) -sett.process_statistics = ('time', 1.e-2) -sett.species_numbers = ('time', 1.e-2) +sett.snapshots = ("time", 5.0e-1) +sett.process_statistics = ("time", 1.0e-2) +sett.species_numbers = ("time", 1.0e-2) sett.max_time = 25.0 sett.random_seed = 953129 sett.molar_fraction.CO = 0.45 sett.molar_fraction.O2 = 0.55 -myJob = pz.ZacrosJob( settings=sett, lattice=lattice, - mechanism=[CO_ads, O2_ads, CO_oxi], - cluster_expansion=[CO_p, O_p] ) +myJob = pz.ZacrosJob(settings=sett, lattice=lattice, mechanism=[CO_ads, O2_ads, CO_oxi], cluster_expansion=[CO_p, O_p]) results = myJob.run() -print( "nCO2 = ", results.provided_quantities()["CO2"][-10:] ) -results.plot_molecule_numbers( results.gas_species_names() ) -results.plot_molecule_numbers( results.surface_species_names() ) +print("nCO2 = ", results.provided_quantities()["CO2"][-10:]) +results.plot_molecule_numbers(results.gas_species_names()) +results.plot_molecule_numbers(results.surface_species_names()) scm.pyzacros.finish() diff --git a/models/LangmuirHinshelwood.py b/models/LangmuirHinshelwood.py index 37caedb..8e6db92 100644 --- a/models/LangmuirHinshelwood.py +++ b/models/LangmuirHinshelwood.py @@ -2,80 +2,92 @@ import scm.pyzacros as pz + class LangmuirHinshelwood: - def __init__(self, lattice_constant=1.0, repeat_cell=[20,20]): + def __init__(self, lattice_constant=1.0, repeat_cell=[20, 20]): - #--------------------------------------------- + # --------------------------------------------- # Species: - #--------------------------------------------- + # --------------------------------------------- # Gas-species: O2_gas = pz.Species("O2") CO_gas = pz.Species("CO") CO2_gas = pz.Species("CO2", gas_energy=-3.1800) # Surface species: - s0 = pz.Species("*", 1) # Empty adsorption site + s0 = pz.Species("*", 1) # Empty adsorption site O_adsorbed = pz.Species("O*", 1) CO_adsorbed = pz.Species("CO*", 1) - #--------------------------------------------- + # --------------------------------------------- # Lattice setup: - #--------------------------------------------- - self.lattice = pz.Lattice( lattice_type=pz.Lattice.HEXAGONAL, lattice_constant=lattice_constant, repeat_cell=repeat_cell ) + # --------------------------------------------- + self.lattice = pz.Lattice( + lattice_type=pz.Lattice.HEXAGONAL, lattice_constant=lattice_constant, repeat_cell=repeat_cell + ) - #--------------------------------------------- + # --------------------------------------------- # Clusters & Cluster expansion - #--------------------------------------------- - empty_point = pz.Cluster( species=[pz.Species.UNSPECIFIED], energy=0.0, label="empty_point") - self.cluster_expansion = pz.ClusterExpansion( [empty_point] ) + # --------------------------------------------- + empty_point = pz.Cluster(species=[pz.Species.UNSPECIFIED], energy=0.0, label="empty_point") + self.cluster_expansion = pz.ClusterExpansion([empty_point]) - #--------------------------------------------- + # --------------------------------------------- # Elementary Reactions & Mechanism - #--------------------------------------------- + # --------------------------------------------- - CO_adsorption = pz.ElementaryReaction(initial=[s0,CO_gas], - final=[CO_adsorbed], - reversible=True, - pre_expon=1.000e+7, - pe_ratio=2.000, - activation_energy=0.000, - label="CO_adsorption") + CO_adsorption = pz.ElementaryReaction( + initial=[s0, CO_gas], + final=[CO_adsorbed], + reversible=True, + pre_expon=1.000e7, + pe_ratio=2.000, + activation_energy=0.000, + label="CO_adsorption", + ) - O2_adsorption = pz.ElementaryReaction(initial=[s0,s0,O2_gas], - final=[O_adsorbed,O_adsorbed], - neighboring=[(0, 1)], - reversible=True, - pre_expon=1.000e+7, - pe_ratio=5.000, - activation_energy=0.000, - label="O2_adsorption") + O2_adsorption = pz.ElementaryReaction( + initial=[s0, s0, O2_gas], + final=[O_adsorbed, O_adsorbed], + neighboring=[(0, 1)], + reversible=True, + pre_expon=1.000e7, + pe_ratio=5.000, + activation_energy=0.000, + label="O2_adsorption", + ) - O_diffusion = pz.ElementaryReaction(initial=[O_adsorbed,s0], - final=[s0,O_adsorbed], - neighboring=[(0, 1)], - reversible=True, - pre_expon=1.000e+6, - pe_ratio=1.000, - activation_energy=0.000, - label="O_diffusion") + O_diffusion = pz.ElementaryReaction( + initial=[O_adsorbed, s0], + final=[s0, O_adsorbed], + neighboring=[(0, 1)], + reversible=True, + pre_expon=1.000e6, + pe_ratio=1.000, + activation_energy=0.000, + label="O_diffusion", + ) - CO_diffusion = pz.ElementaryReaction(initial=[CO_adsorbed,s0], - final=[s0,CO_adsorbed], - neighboring=[(0, 1)], - reversible=True, - pre_expon=1.000e+6, - pe_ratio=1.000, - activation_energy=0.000, - label="CO_diffusion") + CO_diffusion = pz.ElementaryReaction( + initial=[CO_adsorbed, s0], + final=[s0, CO_adsorbed], + neighboring=[(0, 1)], + reversible=True, + pre_expon=1.000e6, + pe_ratio=1.000, + activation_energy=0.000, + label="CO_diffusion", + ) - CO_oxidation = pz.ElementaryReaction(initial=[CO_adsorbed, O_adsorbed], - final=[s0, s0, CO2_gas], - neighboring=[(0, 1)], - reversible=False, - pre_expon=4.500e+2, - activation_energy=0.000, - label="CO_oxidation") + CO_oxidation = pz.ElementaryReaction( + initial=[CO_adsorbed, O_adsorbed], + final=[s0, s0, CO2_gas], + neighboring=[(0, 1)], + reversible=False, + pre_expon=4.500e2, + activation_energy=0.000, + label="CO_oxidation", + ) self.mechanism = pz.Mechanism([CO_adsorption, O2_adsorption, O_diffusion, CO_diffusion, CO_oxidation]) - diff --git a/models/ReuterScheffler.py b/models/ReuterScheffler.py index c1f0796..a2fed81 100644 --- a/models/ReuterScheffler.py +++ b/models/ReuterScheffler.py @@ -10,175 +10,284 @@ import scm.pyzacros as pz + class ReuterScheffler: - def __init__(self, repeat_cell=[10,20]): + def __init__(self, repeat_cell=[10, 20]): - #--------------------------------------------- + # --------------------------------------------- # Species: - #--------------------------------------------- + # --------------------------------------------- # Gas-species: O2_gas = pz.Species("O2") CO_gas = pz.Species("CO") CO2_gas = pz.Species("CO2", gas_energy=-3.072) # Surface species: - s0 = pz.Species("*", 1) # Empty adsorption site + s0 = pz.Species("*", 1) # Empty adsorption site O_ads = pz.Species("O*", 1) CO_ads = pz.Species("CO*", 1) - #--------------------------------------------- + # --------------------------------------------- # Lattice setup: - #--------------------------------------------- - self.lattice = pz.Lattice( cell_vectors=[[6.43, 0.00],[0.00, 3.12]], - repeat_cell=repeat_cell, - site_types=['cus', 'brg'], - site_coordinates=[[0.25, 0.50], - [0.75, 0.50]], - neighboring_structure=[ [(0,1), pz.Lattice.SELF], - [(0,0), pz.Lattice.NORTH], - [(0,1), pz.Lattice.NORTH], - [(1,1), pz.Lattice.NORTH], - [(1,0), pz.Lattice.NORTH], - [(1,0), pz.Lattice.NORTHEAST], - [(1,0), pz.Lattice.EAST], - [(1,0), pz.Lattice.SOUTHEAST] ] ) - - #--------------------------------------------- + # --------------------------------------------- + self.lattice = pz.Lattice( + cell_vectors=[[6.43, 0.00], [0.00, 3.12]], + repeat_cell=repeat_cell, + site_types=["cus", "brg"], + site_coordinates=[[0.25, 0.50], [0.75, 0.50]], + neighboring_structure=[ + [(0, 1), pz.Lattice.SELF], + [(0, 0), pz.Lattice.NORTH], + [(0, 1), pz.Lattice.NORTH], + [(1, 1), pz.Lattice.NORTH], + [(1, 0), pz.Lattice.NORTH], + [(1, 0), pz.Lattice.NORTHEAST], + [(1, 0), pz.Lattice.EAST], + [(1, 0), pz.Lattice.SOUTHEAST], + ], + ) + + # --------------------------------------------- # Clusters & Cluster expansion - #--------------------------------------------- - O_point_brg = pz.Cluster(species=[O_ads], site_types=['brg'], multiplicity=1, energy=-2.3, label='O_point_brg') - O_point_cus = pz.Cluster(species=[O_ads], site_types=['cus'], multiplicity=1, energy=-1.0, label='O_point_cus') + # --------------------------------------------- + O_point_brg = pz.Cluster(species=[O_ads], site_types=["brg"], multiplicity=1, energy=-2.3, label="O_point_brg") + O_point_cus = pz.Cluster(species=[O_ads], site_types=["cus"], multiplicity=1, energy=-1.0, label="O_point_cus") - CO_point_brg = pz.Cluster(species=[CO_ads], site_types=['brg'], multiplicity=1, energy=-1.6, label='CO_point_brg') - CO_point_cus = pz.Cluster(species=[CO_ads], site_types=['cus'], multiplicity=1, energy=-1.3, label='CO_point_cus') + CO_point_brg = pz.Cluster( + species=[CO_ads], site_types=["brg"], multiplicity=1, energy=-1.6, label="CO_point_brg" + ) + CO_point_cus = pz.Cluster( + species=[CO_ads], site_types=["cus"], multiplicity=1, energy=-1.3, label="CO_point_cus" + ) - O_brg_O_brg_1NN = pz.Cluster(species=[O_ads,O_ads], site_types=['brg','brg'], neighboring=[(0,1)], multiplicity=2, energy=0.0, label='O_brg_O_brg_1NN') - O_cus_O_cus_1NN = pz.Cluster(species=[O_ads,O_ads], site_types=['cus','cus'], neighboring=[(0,1)], multiplicity=2, energy=0.0, label='O_cus_O_cus_1NN') - O_cus_O_brg_1NN = pz.Cluster(species=[O_ads,O_ads], site_types=['cus','brg'], neighboring=[(0,1)], multiplicity=2, energy=0.0, label='O_cus_O_brg_1NN') + O_brg_O_brg_1NN = pz.Cluster( + species=[O_ads, O_ads], + site_types=["brg", "brg"], + neighboring=[(0, 1)], + multiplicity=2, + energy=0.0, + label="O_brg_O_brg_1NN", + ) + O_cus_O_cus_1NN = pz.Cluster( + species=[O_ads, O_ads], + site_types=["cus", "cus"], + neighboring=[(0, 1)], + multiplicity=2, + energy=0.0, + label="O_cus_O_cus_1NN", + ) + O_cus_O_brg_1NN = pz.Cluster( + species=[O_ads, O_ads], + site_types=["cus", "brg"], + neighboring=[(0, 1)], + multiplicity=2, + energy=0.0, + label="O_cus_O_brg_1NN", + ) - CO_brg_CO_brg_1NN = pz.Cluster(species=[CO_ads,CO_ads], site_types=['brg','brg'], neighboring=[(0,1)], multiplicity=2, energy=0.0, label='CO_brg_CO_brg_1NN') - CO_cus_CO_cus_1NN = pz.Cluster(species=[CO_ads,CO_ads], site_types=['cus','cus'], neighboring=[(0,1)], multiplicity=2, energy=0.0, label='CO_cus_CO_cus_1NN') - CO_cus_CO_brg_1NN = pz.Cluster(species=[CO_ads,CO_ads], site_types=['cus','brg'], neighboring=[(0,1)], multiplicity=2, energy=0.0, label='CO_cus_CO_brg_1NN') + CO_brg_CO_brg_1NN = pz.Cluster( + species=[CO_ads, CO_ads], + site_types=["brg", "brg"], + neighboring=[(0, 1)], + multiplicity=2, + energy=0.0, + label="CO_brg_CO_brg_1NN", + ) + CO_cus_CO_cus_1NN = pz.Cluster( + species=[CO_ads, CO_ads], + site_types=["cus", "cus"], + neighboring=[(0, 1)], + multiplicity=2, + energy=0.0, + label="CO_cus_CO_cus_1NN", + ) + CO_cus_CO_brg_1NN = pz.Cluster( + species=[CO_ads, CO_ads], + site_types=["cus", "brg"], + neighboring=[(0, 1)], + multiplicity=2, + energy=0.0, + label="CO_cus_CO_brg_1NN", + ) - CO_brg_O_brg_1NN = pz.Cluster(species=[CO_ads,O_ads], site_types=['brg','brg'], neighboring=[(0,1)], multiplicity=2, energy=0.0, label='CO_brg_O_brg_1NN') - CO_cus_O_cus_1NN = pz.Cluster(species=[CO_ads,O_ads], site_types=['cus','cus'], neighboring=[(0,1)], multiplicity=2, energy=0.0, label='CO_cus_O_cus_1NN') - CO_cus_O_brg_1NN = pz.Cluster(species=[CO_ads,O_ads], site_types=['cus','brg'], neighboring=[(0,1)], multiplicity=2, energy=0.0, label='CO_cus_O_brg_1NN') - CO_brg_O_cus_1NN = pz.Cluster(species=[CO_ads,O_ads], site_types=['brg','cus'], neighboring=[(0,1)], multiplicity=2, energy=0.0, label='CO_brg_O_cus_1NN') + CO_brg_O_brg_1NN = pz.Cluster( + species=[CO_ads, O_ads], + site_types=["brg", "brg"], + neighboring=[(0, 1)], + multiplicity=2, + energy=0.0, + label="CO_brg_O_brg_1NN", + ) + CO_cus_O_cus_1NN = pz.Cluster( + species=[CO_ads, O_ads], + site_types=["cus", "cus"], + neighboring=[(0, 1)], + multiplicity=2, + energy=0.0, + label="CO_cus_O_cus_1NN", + ) + CO_cus_O_brg_1NN = pz.Cluster( + species=[CO_ads, O_ads], + site_types=["cus", "brg"], + neighboring=[(0, 1)], + multiplicity=2, + energy=0.0, + label="CO_cus_O_brg_1NN", + ) + CO_brg_O_cus_1NN = pz.Cluster( + species=[CO_ads, O_ads], + site_types=["brg", "cus"], + neighboring=[(0, 1)], + multiplicity=2, + energy=0.0, + label="CO_brg_O_cus_1NN", + ) - self.cluster_expansion = pz.ClusterExpansion( [O_point_brg, O_point_cus, - CO_point_brg, CO_point_cus, - O_brg_O_brg_1NN, O_cus_O_cus_1NN, O_cus_O_brg_1NN, - CO_brg_CO_brg_1NN, CO_cus_CO_cus_1NN, CO_cus_CO_brg_1NN, - CO_brg_O_brg_1NN, CO_cus_O_cus_1NN, CO_cus_O_brg_1NN, CO_brg_O_cus_1NN] ) + self.cluster_expansion = pz.ClusterExpansion( + [ + O_point_brg, + O_point_cus, + CO_point_brg, + CO_point_cus, + O_brg_O_brg_1NN, + O_cus_O_cus_1NN, + O_cus_O_brg_1NN, + CO_brg_CO_brg_1NN, + CO_cus_CO_cus_1NN, + CO_cus_CO_brg_1NN, + CO_brg_O_brg_1NN, + CO_cus_O_cus_1NN, + CO_cus_O_brg_1NN, + CO_brg_O_cus_1NN, + ] + ) - #--------------------------------------------- + # --------------------------------------------- # Elementary Reactions & Mechanism - #--------------------------------------------- + # --------------------------------------------- CO_brg_adsorption = pz.ElementaryReaction( - initial=[s0,CO_gas], - final=[CO_ads], - site_types=['brg'], - reversible=True, - pre_expon=2.04e+08, - pe_ratio=1.33e-10, - activation_energy=0.0, - prox_factor=0.0, - label="CO_brg_adsorption") + initial=[s0, CO_gas], + final=[CO_ads], + site_types=["brg"], + reversible=True, + pre_expon=2.04e08, + pe_ratio=1.33e-10, + activation_energy=0.0, + prox_factor=0.0, + label="CO_brg_adsorption", + ) CO_cus_adsorption = pz.ElementaryReaction( - initial=[s0,CO_gas], - final=[CO_ads], - site_types=['cus'], - reversible=True, - pre_expon=2.04e+08, - pe_ratio=1.53e-10, - activation_energy=0.0, - prox_factor=0.0, - label="CO_cus_adsorption") + initial=[s0, CO_gas], + final=[CO_ads], + site_types=["cus"], + reversible=True, + pre_expon=2.04e08, + pe_ratio=1.53e-10, + activation_energy=0.0, + prox_factor=0.0, + label="CO_cus_adsorption", + ) O_brg_adsorption = pz.ElementaryReaction( - initial=[s0,s0,O2_gas], - final=[O_ads,O_ads], - site_types=['brg','brg'], - neighboring=[(0, 1)], - reversible=True, - pre_expon=1.91e+08, - pe_ratio=5.33e-11, - activation_energy=0.000, - prox_factor=0.0, - label="O_brg_adsorption") + initial=[s0, s0, O2_gas], + final=[O_ads, O_ads], + site_types=["brg", "brg"], + neighboring=[(0, 1)], + reversible=True, + pre_expon=1.91e08, + pe_ratio=5.33e-11, + activation_energy=0.000, + prox_factor=0.0, + label="O_brg_adsorption", + ) O_cus_adsorption = pz.ElementaryReaction( - initial=[s0,s0,O2_gas], - final=[O_ads,O_ads], - site_types=['cus','cus'], - neighboring=[(0, 1)], - reversible=True, - pre_expon=1.91e+08, - pe_ratio=5.33e-11, - activation_energy=0.000, - prox_factor=0.0, - label="O_cus_adsorption") + initial=[s0, s0, O2_gas], + final=[O_ads, O_ads], + site_types=["cus", "cus"], + neighboring=[(0, 1)], + reversible=True, + pre_expon=1.91e08, + pe_ratio=5.33e-11, + activation_energy=0.000, + prox_factor=0.0, + label="O_cus_adsorption", + ) O_cus_O_brg_adsorption = pz.ElementaryReaction( - initial=[s0,s0,O2_gas], - final=[O_ads,O_ads], - site_types=['cus','brg'], - neighboring=[(0, 1)], - reversible=True, - pre_expon=1.91e+08, - pe_ratio=5.33e-11, - activation_energy=0.000, - prox_factor=0.0, - label="O_cus_O_brg_adsorption") + initial=[s0, s0, O2_gas], + final=[O_ads, O_ads], + site_types=["cus", "brg"], + neighboring=[(0, 1)], + reversible=True, + pre_expon=1.91e08, + pe_ratio=5.33e-11, + activation_energy=0.000, + prox_factor=0.0, + label="O_cus_O_brg_adsorption", + ) CO_brg_O_cus_oxidation = pz.ElementaryReaction( - initial=[CO_ads,O_ads], - final=[s0,s0,CO2_gas], - site_types=['brg','cus'], - neighboring=[(0, 1)], - reversible=False, - pre_expon=6.25e+12, - activation_energy=0.76, - prox_factor=0.0, - label="CO_brg_O_cus_oxidation") + initial=[CO_ads, O_ads], + final=[s0, s0, CO2_gas], + site_types=["brg", "cus"], + neighboring=[(0, 1)], + reversible=False, + pre_expon=6.25e12, + activation_energy=0.76, + prox_factor=0.0, + label="CO_brg_O_cus_oxidation", + ) CO_brg_O_brg_oxidation = pz.ElementaryReaction( - initial=[CO_ads,O_ads], - final=[s0,s0,CO2_gas], - site_types=['brg','brg'], - neighboring=[(0, 1)], - reversible=False, - pre_expon=6.25e+12, - activation_energy=1.54, - prox_factor=0.0, - label="CO_brg_O_brg_oxidation") + initial=[CO_ads, O_ads], + final=[s0, s0, CO2_gas], + site_types=["brg", "brg"], + neighboring=[(0, 1)], + reversible=False, + pre_expon=6.25e12, + activation_energy=1.54, + prox_factor=0.0, + label="CO_brg_O_brg_oxidation", + ) CO_cus_O_cus_oxidation = pz.ElementaryReaction( - initial=[CO_ads,O_ads], - final=[s0,s0,CO2_gas], - site_types=['cus','cus'], - neighboring=[(0, 1)], - reversible=False, - pre_expon=6.25e+12, - activation_energy=0.89, - prox_factor=0.0, - label="CO_cus_O_cus_oxidation") + initial=[CO_ads, O_ads], + final=[s0, s0, CO2_gas], + site_types=["cus", "cus"], + neighboring=[(0, 1)], + reversible=False, + pre_expon=6.25e12, + activation_energy=0.89, + prox_factor=0.0, + label="CO_cus_O_cus_oxidation", + ) CO_cus_O_brg_oxidation = pz.ElementaryReaction( - initial=[CO_ads,O_ads], - final=[s0,s0,CO2_gas], - site_types=['cus','brg'], - neighboring=[(0, 1)], - reversible=False, - pre_expon=6.25e+12, - activation_energy=1.25, - prox_factor=0.0, - label="CO_cus_O_brg_oxidation") - - self.mechanism = pz.Mechanism([CO_brg_adsorption, CO_cus_adsorption, - O_brg_adsorption, O_cus_adsorption, O_cus_O_brg_adsorption, - CO_brg_O_cus_oxidation, CO_brg_O_brg_oxidation, CO_cus_O_cus_oxidation, CO_cus_O_brg_oxidation]) + initial=[CO_ads, O_ads], + final=[s0, s0, CO2_gas], + site_types=["cus", "brg"], + neighboring=[(0, 1)], + reversible=False, + pre_expon=6.25e12, + activation_energy=1.25, + prox_factor=0.0, + label="CO_cus_O_brg_oxidation", + ) + self.mechanism = pz.Mechanism( + [ + CO_brg_adsorption, + CO_cus_adsorption, + O_brg_adsorption, + O_cus_adsorption, + O_cus_O_brg_adsorption, + CO_brg_O_cus_oxidation, + CO_brg_O_brg_oxidation, + CO_cus_O_cus_oxidation, + CO_cus_O_brg_oxidation, + ] + ) diff --git a/models/ZiffGulariBarshad.py b/models/ZiffGulariBarshad.py index b61d9f6..cb2b602 100644 --- a/models/ZiffGulariBarshad.py +++ b/models/ZiffGulariBarshad.py @@ -2,64 +2,72 @@ import scm.pyzacros as pz + class ZiffGulariBarshad: - def __init__(self, lattice_constant=1.0, repeat_cell=[50,50]): + def __init__(self, lattice_constant=1.0, repeat_cell=[50, 50]): - #--------------------------------------------- + # --------------------------------------------- # Species: - #--------------------------------------------- + # --------------------------------------------- # Gas-species: CO_gas = pz.Species("CO") O2_gas = pz.Species("O2") CO2_gas = pz.Species("CO2", gas_energy=-2.337) # Surface species: - s0 = pz.Species("*", 1) # Empty adsorption site + s0 = pz.Species("*", 1) # Empty adsorption site CO_ads = pz.Species("CO*", 1) O_ads = pz.Species("O*", 1) - #--------------------------------------------- + # --------------------------------------------- # Lattice setup: - #--------------------------------------------- - self.lattice = pz.Lattice( lattice_type=pz.Lattice.RECTANGULAR, - lattice_constant=lattice_constant, repeat_cell=repeat_cell ) + # --------------------------------------------- + self.lattice = pz.Lattice( + lattice_type=pz.Lattice.RECTANGULAR, lattice_constant=lattice_constant, repeat_cell=repeat_cell + ) - #--------------------------------------------- + # --------------------------------------------- # Clusters: - #--------------------------------------------- + # --------------------------------------------- CO_point = pz.Cluster(species=[CO_ads], energy=-1.3, label="CO_point") O_point = pz.Cluster(species=[O_ads], energy=-2.3, label="O_point") - self.cluster_expansion = pz.ClusterExpansion( [CO_point, O_point] ) + self.cluster_expansion = pz.ClusterExpansion([CO_point, O_point]) - #--------------------------------------------- + # --------------------------------------------- # Elementary Reactions - #--------------------------------------------- + # --------------------------------------------- # CO_adsorption: - CO_adsorption = pz.ElementaryReaction(initial=[s0,CO_gas], - final=[CO_ads], - reversible=False, - pre_expon=10.0, - activation_energy=0.0, - label="CO_adsorption") + CO_adsorption = pz.ElementaryReaction( + initial=[s0, CO_gas], + final=[CO_ads], + reversible=False, + pre_expon=10.0, + activation_energy=0.0, + label="CO_adsorption", + ) # O2_adsorption: - O2_adsorption = pz.ElementaryReaction(initial=[s0,s0,O2_gas], - final=[O_ads,O_ads], - neighboring=[(0, 1)], - reversible=False, - pre_expon=2.5, - activation_energy=0.0, - label="O2_adsorption") + O2_adsorption = pz.ElementaryReaction( + initial=[s0, s0, O2_gas], + final=[O_ads, O_ads], + neighboring=[(0, 1)], + reversible=False, + pre_expon=2.5, + activation_energy=0.0, + label="O2_adsorption", + ) # CO_oxidation: - CO_oxidation = pz.ElementaryReaction(initial=[CO_ads, O_ads], - final=[s0, s0, CO2_gas], - neighboring=[(0, 1)], - reversible=False, - pre_expon=1.0e+20, - activation_energy=0.0, - label="CO_oxidation") + CO_oxidation = pz.ElementaryReaction( + initial=[CO_ads, O_ads], + final=[s0, s0, CO2_gas], + neighboring=[(0, 1)], + reversible=False, + pre_expon=1.0e20, + activation_energy=0.0, + label="CO_oxidation", + ) self.mechanism = [CO_adsorption, O2_adsorption, CO_oxidation] diff --git a/setup.py b/setup.py index d9948e0..d0f0308 100644 --- a/setup.py +++ b/setup.py @@ -7,58 +7,58 @@ here = os.path.abspath(os.path.dirname(__file__)) -packages = ['scm.pyzacros'] + ['scm.pyzacros.'+i for i in find_packages('.')] +packages = ["scm.pyzacros"] + ["scm.pyzacros." + i for i in find_packages(".")] # To update the package version number, edit pyZacros/__version__.py version = {} -with open(os.path.join(here, '__version__.py')) as f: +with open(os.path.join(here, "__version__.py")) as f: exec(f.read(), version) -with open('README.md') as readme_file: +with open("README.md") as readme_file: readme = readme_file.read() -def package_files( directory ): + +def package_files(directory): paths = [] - for (path, directories, filenames) in os.walk(directory): + for path, directories, filenames in os.walk(directory): for filename in filenames: - paths.append(os.path.join('..', path, filename)) + paths.append(os.path.join("..", path, filename)) return paths + extra_data_files = [] -extra_data_files.extend( package_files('examples') ) -extra_data_files.extend( package_files('tests') ) +extra_data_files.extend(package_files("examples")) +extra_data_files.extend(package_files("tests")) setup( - name='pyzacros', - version=version['__version__'], + name="pyzacros", + version=version["__version__"], description="Python Library for Automating Zacros Simulations", long_description=description, author="Nestor F. Aguirre & Pablo Lopez-Tarifa", - author_email='aguirre@scm.com', - url='https://github.com/SCM-NV/pyZacros', + author_email="aguirre@scm.com", + url="https://github.com/SCM-NV/pyZacros", packages=packages, - package_dir = {'scm.pyzacros': '.'}, - package_data={'': extra_data_files}, + package_dir={"scm.pyzacros": "."}, + package_data={"": extra_data_files}, include_package_data=True, license="LGPLv3", zip_safe=False, - keywords = ['molecular modeling', 'computational chemistry', 'workflow', 'python interface'], + keywords=["molecular modeling", "computational chemistry", "workflow", "python interface"], classifiers=[ - 'Development Status :: 5 - Production/Stable', - 'Intended Audience :: Science/Research', - 'License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)', - 'Natural Language :: English', - 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.8', - 'Topic :: Scientific/Engineering :: Chemistry', - 'Topic :: Scientific/Engineering :: Physics', - 'Topic :: Software Development :: Libraries :: Python Modules', + "Development Status :: 5 - Production/Stable", + "Intended Audience :: Science/Research", + "License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)", + "Natural Language :: English", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.8", + "Topic :: Scientific/Engineering :: Chemistry", + "Topic :: Scientific/Engineering :: Physics", + "Topic :: Software Development :: Libraries :: Python Modules", ], - install_requires=[ - 'chemparse', 'scipy', 'numpy', 'networkx', - 'plams@git+https://github.com/SCM-NV/PLAMS@master'], + install_requires=["chemparse", "scipy", "numpy", "networkx", "plams@git+https://github.com/SCM-NV/PLAMS@master"], extras_require={ - 'test': ['coverage', 'pytest>=3.9', 'pytest-cov'], - 'doc': ['sphinx', 'sphinx_rtd_theme', 'nbsphinx'] - } + "test": ["coverage", "pytest>=3.9", "pytest-cov"], + "doc": ["sphinx", "sphinx_rtd_theme", "nbsphinx"], + }, ) diff --git a/tests/test_Cluster.py b/tests/test_Cluster.py index 8c40770..abd164b 100644 --- a/tests/test_Cluster.py +++ b/tests/test_Cluster.py @@ -3,16 +3,18 @@ def test_Cluster(): - print( "---------------------------------------------------" ) - print( ">>> Testing Cluster class" ) - print( "---------------------------------------------------" ) - cluster = pz.Cluster( site_types=( "f", "f" ), - neighboring=[ (0,1) ], - species=pz.SpeciesList( [ pz.Species("H*",1), pz.Species("H*",1) ] ), - multiplicity=2, - energy=0.1 ) + print("---------------------------------------------------") + print(">>> Testing Cluster class") + print("---------------------------------------------------") + cluster = pz.Cluster( + site_types=("f", "f"), + neighboring=[(0, 1)], + species=pz.SpeciesList([pz.Species("H*", 1), pz.Species("H*", 1)]), + multiplicity=2, + energy=0.1, + ) - print( cluster ) + print(cluster) output = str(cluster) expectedOutput = """\ @@ -27,15 +29,17 @@ def test_Cluster(): cluster_eng 0.100 end_cluster\ """ - assert( pz.utils.compare( output, expectedOutput, 1e-3 ) ) + assert pz.utils.compare(output, expectedOutput, 1e-3) - cluster = pz.Cluster( site_types=( "f", "f" ), - neighboring=[ (0,1) ], - species=[ pz.Species("H2**"), pz.Species("H2**") ], - multiplicity=2, - energy=0.1 ) + cluster = pz.Cluster( + site_types=("f", "f"), + neighboring=[(0, 1)], + species=[pz.Species("H2**"), pz.Species("H2**")], + multiplicity=2, + energy=0.1, + ) - print( cluster ) + print(cluster) output = str(cluster) expectedOutput = """\ @@ -50,17 +54,19 @@ def test_Cluster(): cluster_eng 0.100 end_cluster\ """ - assert( pz.utils.compare( output, expectedOutput, 1e-3 ) ) + assert pz.utils.compare(output, expectedOutput, 1e-3) - cluster = pz.Cluster( site_types=( "f", "g", "h", "i", "j" ), - neighboring=[ (0,1), (1,2), (2,3), (3,0), (2,4) ], - species=[ pz.Species("CO2**"), pz.Species("*"), pz.Species("CO2**"), pz.Species("H*",1), pz.Species("*") ], - entity_number=[ 0, 1, 0, 2, 3 ], - multiplicity=1, - energy=0.1, - label="my_weird_cluster" ) + cluster = pz.Cluster( + site_types=("f", "g", "h", "i", "j"), + neighboring=[(0, 1), (1, 2), (2, 3), (3, 0), (2, 4)], + species=[pz.Species("CO2**"), pz.Species("*"), pz.Species("CO2**"), pz.Species("H*", 1), pz.Species("*")], + entity_number=[0, 1, 0, 2, 3], + multiplicity=1, + energy=0.1, + label="my_weird_cluster", + ) - print( cluster ) + print(cluster) output = str(cluster) expectedOutput = """\ @@ -78,16 +84,18 @@ def test_Cluster(): cluster_eng 0.100 end_cluster\ """ - assert( pz.utils.compare( output, expectedOutput, 1e-3 ) ) + assert pz.utils.compare(output, expectedOutput, 1e-3) - cluster = pz.Cluster( site_types=( "f", "g", "h", "i", "j" ), - neighboring=[ (0,1), (1,2), (2,3), (3,0), (2,4) ], - species=[ pz.Species("CO2**"), pz.Species("*"), pz.Species("CO2**"), pz.Species("H*",1), pz.Species("*") ], - multiplicity=1, - energy=0.1, - label="my_weird_cluster" ) + cluster = pz.Cluster( + site_types=("f", "g", "h", "i", "j"), + neighboring=[(0, 1), (1, 2), (2, 3), (3, 0), (2, 4)], + species=[pz.Species("CO2**"), pz.Species("*"), pz.Species("CO2**"), pz.Species("H*", 1), pz.Species("*")], + multiplicity=1, + energy=0.1, + label="my_weird_cluster", + ) - print( cluster ) + print(cluster) output = str(cluster) expectedOutput = """\ @@ -105,4 +113,4 @@ def test_Cluster(): cluster_eng 0.100 end_cluster\ """ - assert( pz.utils.compare( output, expectedOutput, 1e-3 ) ) + assert pz.utils.compare(output, expectedOutput, 1e-3) diff --git a/tests/test_ElementaryReaction.py b/tests/test_ElementaryReaction.py index 6545ba0..7ce026f 100644 --- a/tests/test_ElementaryReaction.py +++ b/tests/test_ElementaryReaction.py @@ -3,24 +3,26 @@ def test_ElementaryReaction(): - print( "---------------------------------------------------" ) - print( ">>> Testing ElementaryReaction class" ) - print( "---------------------------------------------------" ) + print("---------------------------------------------------") + print(">>> Testing ElementaryReaction class") + print("---------------------------------------------------") - s0 = pz.Species( "*", 1 ) # Empty adsorption site - s1 = pz.Species( "H*", 1 ) # H adsorbed with dentation 1 - s2 = pz.Species( "H2*", 1 ) # H2 adsorbed with dentation 1 + s0 = pz.Species("*", 1) # Empty adsorption site + s1 = pz.Species("H*", 1) # H adsorbed with dentation 1 + s2 = pz.Species("H2*", 1) # H2 adsorbed with dentation 1 - myReaction1 = pz.ElementaryReaction( site_types=( "f", "f" ), - neighboring=[ (0,1) ], - initial=[s1, s1], - final=[s2, s0], - reversible=True, - pre_expon=1e+13, - pe_ratio=0.676, - activation_energy=0.2 ) + myReaction1 = pz.ElementaryReaction( + site_types=("f", "f"), + neighboring=[(0, 1)], + initial=[s1, s1], + final=[s2, s0], + reversible=True, + pre_expon=1e13, + pe_ratio=0.676, + activation_energy=0.2, + ) - print( myReaction1 ) + print(myReaction1) output = str(myReaction1) expectedOutput = """\ @@ -39,20 +41,22 @@ def test_ElementaryReaction(): activ_eng 0.2 end_reversible_step\ """ - assert( pz.utils.compare( output, expectedOutput, 1e-3 ) ) + assert pz.utils.compare(output, expectedOutput, 1e-3) - s3 = pz.Species( "H2", gas_energy=0.0 ) # H2(gas) + s3 = pz.Species("H2", gas_energy=0.0) # H2(gas) - myReaction2 = pz.ElementaryReaction( site_types=( "f", "f" ), - neighboring=[ (0,1) ], - initial=[ s1, s1 ], - final=[ s0, s0, s3 ], - reversible=False, - pre_expon=1e+13, - pe_ratio=0.676, - activation_energy=0.2 ) + myReaction2 = pz.ElementaryReaction( + site_types=("f", "f"), + neighboring=[(0, 1)], + initial=[s1, s1], + final=[s0, s0, s3], + reversible=False, + pre_expon=1e13, + pe_ratio=0.676, + activation_energy=0.2, + ) - print( myReaction2 ) + print(myReaction2) output = str(myReaction2) expectedOutput = """\ @@ -71,4 +75,4 @@ def test_ElementaryReaction(): activ_eng 0.200 end_step\ """ - assert( pz.utils.compare( output, expectedOutput, 1e-3 ) ) + assert pz.utils.compare(output, expectedOutput, 1e-3) diff --git a/tests/test_Lattice.py b/tests/test_Lattice.py index 2fe7186..10374c3 100644 --- a/tests/test_Lattice.py +++ b/tests/test_Lattice.py @@ -10,10 +10,10 @@ def test_Lattice(): print("") print("From default lattices") print("---------------------") - myLattice = pz.Lattice( lattice_type=pz.Lattice.HEXAGONAL, lattice_constant=1.0, repeat_cell=[8,10] ) + myLattice = pz.Lattice(lattice_type=pz.Lattice.HEXAGONAL, lattice_constant=1.0, repeat_cell=[8, 10]) print(myLattice) - myLattice.plot( pause=2, close=True ) + myLattice.plot(pause=2, close=True) output = str(myLattice) expectedOutput = """\ @@ -21,38 +21,44 @@ def test_Lattice(): hexagonal_periodic 1.0 8 10 end_lattice\ """ - assert( pz.utils.compare( output, expectedOutput, 1e-3 ) ) + assert pz.utils.compare(output, expectedOutput, 1e-3) print("") print("From unit-cell") print("--------------") - myLattice = pz.Lattice( cell_vectors=[[2.77185866, 0.00000000],[1.38592933, 2.40050002]], - repeat_cell=[4, 4], - site_types=["b", "h", "b", "b", "f", "t"], - site_coordinates=[[0.00001, 0.49999], - [0.33333, 0.33333], - [0.49999, 0.00001], - [0.49999, 0.49999], - [0.66667, 0.66667], - [0.99999, 0.00001]], - neighboring_structure=[ [(0,1), pz.Lattice.SELF], - [(1,2), pz.Lattice.SELF], - [(1,3), pz.Lattice.SELF], - [(3,4), pz.Lattice.SELF], - [(4,2), pz.Lattice.NORTH], - [(4,0), pz.Lattice.EAST], - [(5,5), pz.Lattice.NORTH], - [(5,5), pz.Lattice.EAST], - [(5,4), pz.Lattice.SELF], - [(5,1), pz.Lattice.SELF], - [(5,1), pz.Lattice.EAST], - [(5,4), pz.Lattice.SOUTHEAST], - [(5,1), pz.Lattice.SOUTHEAST], - [(4,5), pz.Lattice.NORTH], - [(5,5), pz.Lattice.SOUTHEAST] ] ) + myLattice = pz.Lattice( + cell_vectors=[[2.77185866, 0.00000000], [1.38592933, 2.40050002]], + repeat_cell=[4, 4], + site_types=["b", "h", "b", "b", "f", "t"], + site_coordinates=[ + [0.00001, 0.49999], + [0.33333, 0.33333], + [0.49999, 0.00001], + [0.49999, 0.49999], + [0.66667, 0.66667], + [0.99999, 0.00001], + ], + neighboring_structure=[ + [(0, 1), pz.Lattice.SELF], + [(1, 2), pz.Lattice.SELF], + [(1, 3), pz.Lattice.SELF], + [(3, 4), pz.Lattice.SELF], + [(4, 2), pz.Lattice.NORTH], + [(4, 0), pz.Lattice.EAST], + [(5, 5), pz.Lattice.NORTH], + [(5, 5), pz.Lattice.EAST], + [(5, 4), pz.Lattice.SELF], + [(5, 1), pz.Lattice.SELF], + [(5, 1), pz.Lattice.EAST], + [(5, 4), pz.Lattice.SOUTHEAST], + [(5, 1), pz.Lattice.SOUTHEAST], + [(4, 5), pz.Lattice.NORTH], + [(5, 5), pz.Lattice.SOUTHEAST], + ], + ) print(myLattice) - myLattice.plot( pause=2, close=True ) + myLattice.plot(pause=2, close=True) output = str(myLattice) expectedOutput = """\ @@ -91,47 +97,68 @@ def test_Lattice(): end_neighboring_structure end_lattice\ """ - assert( pz.utils.compare( output, expectedOutput, 1e-3 ) ) + assert pz.utils.compare(output, expectedOutput, 1e-3) print("") print("From explicitly defined lattices") print("--------------------------------") - myLattice = pz.Lattice( site_types=["cn2", "br42", "cn4", "br42", "cn2", "br42", "br44", "br44", - "br42", "cn4", "br44", "cn4", "br42", "br42", "cn2"], - site_coordinates=[[0.0000e+0, 0.0000e+0], - [1.4425e+0, 0.0000e+0], - [2.8850e+0, 0.0000e+0], - [4.3275e+0, 0.0000e+0], - [5.7700e+0, 0.0000e+0], - [7.2125e-1, 1.2492e+0], - [2.1637e+0, 1.2492e+0], - [3.6062e+0, 1.2492e+0], - [5.0487e+0, 1.2492e+0], - [1.4425e+0, 2.4985e+0], - [2.8850e+0, 2.4985e+0], - [4.3275e+0, 2.4985e+0], - [2.1637e+0, 3.7477e+0], - [3.6062e+0, 3.7477e+0], - [2.8850e+0, 4.9970e+0]], - nearest_neighbors=[[ 1, 5], - [ 0, 2], - [ 1, 3, 6, 7], - [ 2, 4], - [ 3, 8], - [ 0, 9], - [ 2, 9], - [ 2, 11], - [ 4, 11], - [ 5, 6, 10, 12], - [ 9, 11], - [ 7, 8, 10, 13], - [ 9, 14], - [11, 14], - [12, 13]] ) + myLattice = pz.Lattice( + site_types=[ + "cn2", + "br42", + "cn4", + "br42", + "cn2", + "br42", + "br44", + "br44", + "br42", + "cn4", + "br44", + "cn4", + "br42", + "br42", + "cn2", + ], + site_coordinates=[ + [0.0000e0, 0.0000e0], + [1.4425e0, 0.0000e0], + [2.8850e0, 0.0000e0], + [4.3275e0, 0.0000e0], + [5.7700e0, 0.0000e0], + [7.2125e-1, 1.2492e0], + [2.1637e0, 1.2492e0], + [3.6062e0, 1.2492e0], + [5.0487e0, 1.2492e0], + [1.4425e0, 2.4985e0], + [2.8850e0, 2.4985e0], + [4.3275e0, 2.4985e0], + [2.1637e0, 3.7477e0], + [3.6062e0, 3.7477e0], + [2.8850e0, 4.9970e0], + ], + nearest_neighbors=[ + [1, 5], + [0, 2], + [1, 3, 6, 7], + [2, 4], + [3, 8], + [0, 9], + [2, 9], + [2, 11], + [4, 11], + [5, 6, 10, 12], + [9, 11], + [7, 8, 10, 13], + [9, 14], + [11, 14], + [12, 13], + ], + ) print(myLattice) - myLattice.plot( pause=2, close=True ) + myLattice.plot(pause=2, close=True) output = str(myLattice) expectedOutput = """\ @@ -159,9 +186,9 @@ def test_Lattice(): end_lattice_structure end_lattice\ """ - assert( pz.utils.compare( output, expectedOutput, 1e-3 ) ) + assert pz.utils.compare(output, expectedOutput, 1e-3) ## reading from yaml - #myLattice = pz.Lattice(path_to_slab_yaml="./pyzacros/slabs/pd111.yaml") - #output2 = str(myLattice) - #assert( pz.utils.compare( output2, expectedOutput, 1e-3 ) ) + # myLattice = pz.Lattice(path_to_slab_yaml="./pyzacros/slabs/pd111.yaml") + # output2 = str(myLattice) + # assert( pz.utils.compare( output2, expectedOutput, 1e-3 ) ) diff --git a/tests/test_LatticeInitialState.py b/tests/test_LatticeInitialState.py index 937eee6..3dcadc6 100644 --- a/tests/test_LatticeInitialState.py +++ b/tests/test_LatticeInitialState.py @@ -8,36 +8,40 @@ def test_InitialState(): print(">>> Testing InitialState class") print("---------------------------------------------------") - s0 = pz.Species( "*", 1 ) # Empty adsorption site - s1 = pz.Species( "H*", 1 ) # H adsorbed with dentation 1 - s2 = pz.Species( "H2**", 2 ) # H2 adsorbed with dentation 2 - s3 = pz.Species( "CO3***", 3) # CO3 adsorbed with dentation 3 + s0 = pz.Species("*", 1) # Empty adsorption site + s1 = pz.Species("H*", 1) # H adsorbed with dentation 1 + s2 = pz.Species("H2**", 2) # H2 adsorbed with dentation 2 + s3 = pz.Species("CO3***", 3) # CO3 adsorbed with dentation 3 - lattice = pz.Lattice(cell_vectors=[[2.814, 0.000],[1.407, 2.437]], - repeat_cell=[3, 3], - site_types=["fcc", "hcp"], - site_coordinates=[[0.33333,0.33333],[0.66667,0.66667]], - neighboring_structure=[[(0,0), pz.Lattice.NORTH], - [(0,0), pz.Lattice.EAST], - [(0,0), pz.Lattice.SOUTHEAST], - [(1,0), pz.Lattice.SELF], - [(1,0), pz.Lattice.EAST], - [(1,0), pz.Lattice.NORTH], - [(1,1), pz.Lattice.NORTH], - [(1,1), pz.Lattice.EAST], - [(1,1), pz.Lattice.SOUTHEAST]]) + lattice = pz.Lattice( + cell_vectors=[[2.814, 0.000], [1.407, 2.437]], + repeat_cell=[3, 3], + site_types=["fcc", "hcp"], + site_coordinates=[[0.33333, 0.33333], [0.66667, 0.66667]], + neighboring_structure=[ + [(0, 0), pz.Lattice.NORTH], + [(0, 0), pz.Lattice.EAST], + [(0, 0), pz.Lattice.SOUTHEAST], + [(1, 0), pz.Lattice.SELF], + [(1, 0), pz.Lattice.EAST], + [(1, 0), pz.Lattice.NORTH], + [(1, 1), pz.Lattice.NORTH], + [(1, 1), pz.Lattice.EAST], + [(1, 1), pz.Lattice.SOUTHEAST], + ], + ) - lattice.plot( show_sites_ids=True, pause=2, close=True ) - initialState = pz.LatticeState( lattice, [s0,s1,s2] ) + lattice.plot(show_sites_ids=True, pause=2, close=True) + initialState = pz.LatticeState(lattice, [s0, s1, s2]) random.seed(10) - initialState.fill_sites_random( site_name="fcc", species="H*", coverage=0.5 ) - initialState.fill_sites_random( site_name=("fcc","hcp"), species=s2, coverage=0.5 ) - initialState.fill_site( (0,1), s2 ) - initialState.fill_sites_random( site_name="hcp", species="H*", coverage=1.0 ) - initialState.plot( pause=2, show_sites_ids=True, close=True ) + initialState.fill_sites_random(site_name="fcc", species="H*", coverage=0.5) + initialState.fill_sites_random(site_name=("fcc", "hcp"), species=s2, coverage=0.5) + initialState.fill_site((0, 1), s2) + initialState.fill_sites_random(site_name="hcp", species="H*", coverage=1.0) + initialState.plot(pause=2, show_sites_ids=True, close=True) - print( initialState ) + print(initialState) output = str(initialState) @@ -63,15 +67,17 @@ def test_InitialState(): seed_on_sites H* 18 end_initial_state\ """ - assert(output == expectedOutput) + assert output == expectedOutput - initialState = pz.LatticeState( lattice, [s3] ) - initialState.fill_sites_random( site_name=("fcc","fcc","fcc"), species=s3, coverage=0.1, neighboring=[[0,1],[1,2],[0,2]]) - initialState.fill_sites_random( site_name=("fcc","fcc","fcc"), species=s3, coverage=0.3 ) - initialState.fill_site( (12,13,14), s3 ) - initialState.plot( pause=2, show_sites_ids=True, close=True ) + initialState = pz.LatticeState(lattice, [s3]) + initialState.fill_sites_random( + site_name=("fcc", "fcc", "fcc"), species=s3, coverage=0.1, neighboring=[[0, 1], [1, 2], [0, 2]] + ) + initialState.fill_sites_random(site_name=("fcc", "fcc", "fcc"), species=s3, coverage=0.3) + initialState.fill_site((12, 13, 14), s3) + initialState.plot(pause=2, show_sites_ids=True, close=True) - print( initialState ) + print(initialState) output = str(initialState) @@ -85,4 +91,4 @@ def test_InitialState(): seed_on_sites CO3*** 13 14 15 end_initial_state\ """ - assert(output == expectedOutput) + assert output == expectedOutput diff --git a/tests/test_Mechanism.py b/tests/test_Mechanism.py index 9b94a01..5e3771d 100644 --- a/tests/test_Mechanism.py +++ b/tests/test_Mechanism.py @@ -3,38 +3,42 @@ def test_Mechanism(): - print( "---------------------------------------------------" ) - print( ">>> Testing Mechanism class" ) - print( "---------------------------------------------------" ) + print("---------------------------------------------------") + print(">>> Testing Mechanism class") + print("---------------------------------------------------") - s0 = pz.Species( "*", 1 ) # Empty adsorption site - s1 = pz.Species( "H*", 1 ) # H adsorbed with dentation 1 - s2 = pz.Species( "H2*", 1 ) # H2 adsorbed with dentation 1 - s3 = pz.Species( "H2*", 2 ) # H2 adsorbed with dentation 2 + s0 = pz.Species("*", 1) # Empty adsorption site + s1 = pz.Species("H*", 1) # H adsorbed with dentation 1 + s2 = pz.Species("H2*", 1) # H2 adsorbed with dentation 1 + s3 = pz.Species("H2*", 2) # H2 adsorbed with dentation 2 - myReaction1 = pz.ElementaryReaction( site_types=( "f", "f" ), - neighboring=[ (0,1) ], - initial=[ s1, s1 ], - final=[ s2, s0 ], - reversible=True, - pre_expon=1e+13, - pe_ratio=0.676, - activation_energy = 0.2 ) + myReaction1 = pz.ElementaryReaction( + site_types=("f", "f"), + neighboring=[(0, 1)], + initial=[s1, s1], + final=[s2, s0], + reversible=True, + pre_expon=1e13, + pe_ratio=0.676, + activation_energy=0.2, + ) - myReaction2 = pz.ElementaryReaction( site_types=( "f", "f" ), - neighboring=[ (0,1) ], - initial=[ s3, s3 ], - final=[ s2, s0 ], - reversible=True, - pre_expon=1e+13, - pe_ratio=0.676, - activation_energy = 0.2 ) + myReaction2 = pz.ElementaryReaction( + site_types=("f", "f"), + neighboring=[(0, 1)], + initial=[s3, s3], + final=[s2, s0], + reversible=True, + pre_expon=1e13, + pe_ratio=0.676, + activation_energy=0.2, + ) myMechanism = pz.Mechanism() - myMechanism.append( myReaction1 ) - myMechanism.append( myReaction2 ) + myMechanism.append(myReaction1) + myMechanism.append(myReaction2) - print( myMechanism ) + print(myMechanism) output = str(myMechanism) @@ -73,4 +77,4 @@ def test_Mechanism(): end_mechanism\ """ - assert( pz.utils.compare( output, expectedOutput, 1e-3 ) ) + assert pz.utils.compare(output, expectedOutput, 1e-3) diff --git a/tests/test_Parameters.py b/tests/test_Parameters.py index e7054d5..e0be987 100644 --- a/tests/test_Parameters.py +++ b/tests/test_Parameters.py @@ -3,27 +3,28 @@ import scm.pyzacros as pz import scm.pyzacros.utils + def test_Parameters(): - print( "---------------------------------------------------" ) - print( ">>> Testing Parameters class" ) - print( "---------------------------------------------------" ) + print("---------------------------------------------------") + print(">>> Testing Parameters class") + print("---------------------------------------------------") output = "" params = pz.ZacrosParametersScanJob.Parameters() - params.add( 'x_CO', 'molar_fraction.CO', numpy.arange(0.0, 1.0, 0.25) ) - params.add( 'x_O2', 'molar_fraction.O2', lambda p: 1.0-p['x_CO'] ) - params.set_generator( pz.ZacrosParametersScanJob.zipGenerator ) + params.add("x_CO", "molar_fraction.CO", numpy.arange(0.0, 1.0, 0.25)) + params.add("x_O2", "molar_fraction.O2", lambda p: 1.0 - p["x_CO"]) + params.set_generator(pz.ZacrosParametersScanJob.zipGenerator) output += "zipGenerator:\n" output += str(params) output += "\n" params = pz.ZacrosParametersScanJob.Parameters() - params.add( 'x_CO', 'molar_fraction.CO', numpy.arange(0.0, 1.0, 0.4) ) - params.add( 'x_O2', 'molar_fraction.O2', numpy.arange(0.0, 1.0, 0.4) ) - params.add( 'x_N2', 'molar_fraction.N2', lambda p: 0.11+p['x_CO']+p['x_O2'] ) - params.set_generator( pz.ZacrosParametersScanJob.meshgridGenerator ) + params.add("x_CO", "molar_fraction.CO", numpy.arange(0.0, 1.0, 0.4)) + params.add("x_O2", "molar_fraction.O2", numpy.arange(0.0, 1.0, 0.4)) + params.add("x_N2", "molar_fraction.N2", lambda p: 0.11 + p["x_CO"] + p["x_O2"]) + params.set_generator(pz.ZacrosParametersScanJob.meshgridGenerator) output += "meshgridGenerator:\n" output += str(params) @@ -48,4 +49,4 @@ def test_Parameters(): (2, 1): {'x_CO': 0.4, 'x_O2': 0.8, 'x_N2': 1.31} (2, 2): {'x_CO': 0.8, 'x_O2': 0.8, 'x_N2': 1.71}\ """ - assert( pz.utils.compare( output, expectedOutput, 1e-3 ) ) + assert pz.utils.compare(output, expectedOutput, 1e-3) diff --git a/tests/test_RKFLoader.py b/tests/test_RKFLoader.py index 96f9e40..fa25278 100644 --- a/tests/test_RKFLoader.py +++ b/tests/test_RKFLoader.py @@ -13,44 +13,44 @@ def generateAMSResults(test_folder): sett_ads = scm.plams.Settings() sett_ads.input.ams.Task = "PESExploration" - sett_ads.input.ams.PESExploration.Job = 'ProcessSearch' + sett_ads.input.ams.PESExploration.Job = "ProcessSearch" sett_ads.input.ams.PESExploration.RandomSeed = 100 sett_ads.input.ams.PESExploration.NumExpeditions = 10 sett_ads.input.ams.PESExploration.NumExplorers = 4 sett_ads.input.ams.PESExploration.Optimizer.ConvergedForce = 0.005 sett_ads.input.ams.PESExploration.SaddleSearch.MaxEnergy = 4.0 - sett_ads.input.ams.PESExploration.DynamicSeedStates = 'T' + sett_ads.input.ams.PESExploration.DynamicSeedStates = "T" sett_ads.input.ams.PESExploration.StructureComparison.DistanceDifference = 0.1 sett_ads.input.ams.PESExploration.StructureComparison.NeighborCutoff = 3.8 sett_ads.input.ams.PESExploration.StructureComparison.EnergyDifference = 0.05 - sett_ads.input.ams.PESExploration.StructureComparison.CheckSymmetry = 'T' - sett_ads.input.ams.PESExploration.CalculateFragments = 'T' - sett_ads.input.ams.PESExploration.StatesAlignment.ReferenceRegion = 'surface' - sett_ads.input.ReaxFF.ForceField = 'CHONSFPtClNi.ff' - sett_ads.input.ReaxFF.Charges.Solver = 'Direct' - sett_ads.input.ams.Constraints.FixedRegion = 'surface' + sett_ads.input.ams.PESExploration.StructureComparison.CheckSymmetry = "T" + sett_ads.input.ams.PESExploration.CalculateFragments = "T" + sett_ads.input.ams.PESExploration.StatesAlignment.ReferenceRegion = "surface" + sett_ads.input.ReaxFF.ForceField = "CHONSFPtClNi.ff" + sett_ads.input.ReaxFF.Charges.Solver = "Direct" + sett_ads.input.ams.Constraints.FixedRegion = "surface" sett_lat = sett_ads.copy() - sett_lat.input.ams.PESExploration.Job = 'BindingSites' - sett_lat.input.ams.PESExploration.LoadEnergyLandscape.GenerateSymmetryImages = 'T' - sett_lat.input.ams.PESExploration.CalculateFragments = 'F' + sett_lat.input.ams.PESExploration.Job = "BindingSites" + sett_lat.input.ams.PESExploration.LoadEnergyLandscape.GenerateSymmetryImages = "T" + sett_lat.input.ams.PESExploration.CalculateFragments = "F" sett_lat.input.ams.PESExploration.BindingSites.NeighborCutoff = 2.4 sett_lat.input.ams.PESExploration.BindingSites.MaxCoordinationShellsForLabels = 3 - sett_lat.input.ams.PESExploration.StructureComparison.CheckSymmetry = 'F' + sett_lat.input.ams.PESExploration.StructureComparison.CheckSymmetry = "F" - molO = scm.plams.Molecule( test_folder / "O-Pt111.xyz" ) - molCO = scm.plams.Molecule( test_folder / "CO-Pt111.xyz" ) + molO = scm.plams.Molecule(test_folder / "O-Pt111.xyz") + molCO = scm.plams.Molecule(test_folder / "CO-Pt111.xyz") jobO_ads = scm.plams.AMSJob(molecule=molO, settings=sett_ads, name="O_ads-Pt111") jobCO_ads = scm.plams.AMSJob(molecule=molCO, settings=sett_ads, name="CO_ads-Pt111") - sett_lat.input.ams.PESExploration.LoadEnergyLandscape.Path= '../O_ads-Pt111' + sett_lat.input.ams.PESExploration.LoadEnergyLandscape.Path = "../O_ads-Pt111" jobO_lat = scm.plams.AMSJob(molecule=molO, settings=sett_lat, name="O-Pt111", depend=[jobO_ads]) - sett_lat.input.ams.PESExploration.LoadEnergyLandscape.Path= '../CO_ads-Pt111' + sett_lat.input.ams.PESExploration.LoadEnergyLandscape.Path = "../CO_ads-Pt111" jobCO_lat = scm.plams.AMSJob(molecule=molCO, settings=sett_lat, name="CO-Pt111", depend=[jobCO_ads]) - jobs = [ jobO_ads, jobCO_ads, jobO_lat, jobCO_lat ] + jobs = [jobO_ads, jobCO_ads, jobO_lat, jobCO_lat] for job in jobs: job.run() @@ -58,45 +58,45 @@ def generateAMSResults(test_folder): success = True for job in jobs: if not job.ok() and "AMSBIN" not in os.environ: - print( "Warning: The calculation FAILED likely because AMS executable is not available!" ) - print( " For testing purposes, now we load precalculated results.") + print("Warning: The calculation FAILED likely because AMS executable is not available!") + print(" For testing purposes, now we load precalculated results.") success = False if success: - scm.plams.delete_job( jobO_ads ) - scm.plams.delete_job( jobCO_ads ) + scm.plams.delete_job(jobO_ads) + scm.plams.delete_job(jobCO_ads) else: jobO_lat = scm.plams.load(test_folder / "test_RKFLoader.data/O-Pt111/O-Pt111.dill") jobCO_lat = scm.plams.load(test_folder / "test_RKFLoader.data/CO-Pt111/CO-Pt111.dill") - return jobO_lat.results,jobCO_lat.results + return jobO_lat.results, jobCO_lat.results def test_RKFLoader(test_folder, tmp_path): - print( "---------------------------------------------------" ) - print( ">>> Testing RKFLoader class" ) - print( "---------------------------------------------------" ) + print("---------------------------------------------------") + print(">>> Testing RKFLoader class") + print("---------------------------------------------------") - workdir = tmp_path / 'test_RKFLoader' + workdir = tmp_path / "test_RKFLoader" scm.plams.init(folder=str(workdir)) - resultsO,resultsCO = generateAMSResults(test_folder=test_folder) + resultsO, resultsCO = generateAMSResults(test_folder=test_folder) scm.plams.finish() - loaderO = pz.RKFLoader( resultsO ) - loaderCO = pz.RKFLoader( resultsCO ) + loaderO = pz.RKFLoader(resultsO) + loaderCO = pz.RKFLoader(resultsCO) - loader = pz.RKFLoader.merge( [loaderO, loaderCO] ) - loader.replace_site_types( ['N33', 'N331', 'N221'], ['fcc', 'hcp', 'br'] ) + loader = pz.RKFLoader.merge([loaderO, loaderCO]) + loader.replace_site_types(["N33", "N331", "N221"], ["fcc", "hcp", "br"]) - output = str( loader.clusterExpansion )+"\n\n" - output += str( loader.mechanism )+"\n\n" + output = str(loader.clusterExpansion) + "\n\n" + output += str(loader.mechanism) + "\n\n" - loader.lattice.set_repeat_cell( [2,2] ) - loader.lattice.plot( pause=2 ) + loader.lattice.set_repeat_cell([2, 2]) + loader.lattice.plot(pause=2) - output += str( loader.lattice ) + output += str(loader.lattice) print(output) @@ -336,4 +336,4 @@ def test_RKFLoader(test_folder, tmp_path): end_lattice_structure end_lattice\ """ - assert( pz.utils.compare( output, expectedOutput, abs_error=1e-12, rel_error=0.1 ) ) + assert pz.utils.compare(output, expectedOutput, abs_error=1e-12, rel_error=0.1) diff --git a/tests/test_Settings.py b/tests/test_Settings.py index a4c87a4..99d9404 100644 --- a/tests/test_Settings.py +++ b/tests/test_Settings.py @@ -11,12 +11,12 @@ def test_Settings(): sett.random_seed = 71543 sett.temperature = 380.0 sett.pressure = 2.00 - sett.snapshots= ('time', 1e-5) - sett.process_statistics = ('time', 1e-5) - sett.species_numbers = ('time', 1e-5) - sett.event_report = 'off' - sett.max_steps = 'infinity' - sett.max_time = 1.0e+50 + sett.snapshots = ("time", 1e-5) + sett.process_statistics = ("time", 1e-5) + sett.species_numbers = ("time", 1e-5) + sett.event_report = "off" + sett.max_steps = "infinity" + sett.max_time = 1.0e50 sett.wall_time = 5000 output = str(sett) @@ -35,4 +35,4 @@ def test_Settings(): max_time 1e+50 wall_time 5000\ """ - assert( pz.utils.compare( output, expectedOutput, 1e-3 ) ) + assert pz.utils.compare(output, expectedOutput, 1e-3) diff --git a/tests/test_Species.py b/tests/test_Species.py index 58f0808..3e5d840 100644 --- a/tests/test_Species.py +++ b/tests/test_Species.py @@ -12,7 +12,7 @@ def test_Species(): output = str(myAdsorbedSpecies) expectedOutput = "H2*" - assert( output == expectedOutput ) + assert output == expectedOutput # Gas specie myGasSpecies = pz.Species("H2", gas_energy=0.0) @@ -20,7 +20,7 @@ def test_Species(): output = str(myGasSpecies) expectedOutput = "H2" - assert( output == expectedOutput ) + assert output == expectedOutput # Free adsorption site myAdsorptionFreeSite = pz.Species("*") @@ -28,4 +28,4 @@ def test_Species(): output = str(myAdsorptionFreeSite) expectedOutput = "*" - assert( output == expectedOutput ) + assert output == expectedOutput diff --git a/tests/test_SpeciesList.py b/tests/test_SpeciesList.py index 71a3381..4be67f1 100644 --- a/tests/test_SpeciesList.py +++ b/tests/test_SpeciesList.py @@ -3,27 +3,27 @@ def test_SpeciesList(): - print( "---------------------------------------------------" ) - print( ">>> Testing SpeciesList class" ) - print( "---------------------------------------------------" ) + print("---------------------------------------------------") + print(">>> Testing SpeciesList class") + print("---------------------------------------------------") # Adsorbed species - asp1 = pz.Species( "H2*", denticity=1 ) - asp2 = pz.Species( "O2*", denticity=1 ) - fas = pz.Species( "*" ) # Free adsorption site + asp1 = pz.Species("H2*", denticity=1) + asp2 = pz.Species("O2*", denticity=1) + fas = pz.Species("*") # Free adsorption site # Gas species - gs1 = pz.Species( "H2", gas_energy=0.0 ) - gs2 = pz.Species( "O2", gas_energy=0.0 ) + gs1 = pz.Species("H2", gas_energy=0.0) + gs2 = pz.Species("O2", gas_energy=0.0) - #mySpeciesList = pz.SpeciesList() - #mySpeciesList.append( asp1 ) - #mySpeciesList.append( asp2 ) - #mySpeciesList.append( gs1 ) - #mySpeciesList.append( gs2 ) - #mySpeciesList.append( fas ) + # mySpeciesList = pz.SpeciesList() + # mySpeciesList.append( asp1 ) + # mySpeciesList.append( asp2 ) + # mySpeciesList.append( gs1 ) + # mySpeciesList.append( gs2 ) + # mySpeciesList.append( fas ) - mySpeciesList = pz.SpeciesList( [asp1, asp2, gs1, gs2, fas] ) + mySpeciesList = pz.SpeciesList([asp1, asp2, gs1, gs2, fas]) print(mySpeciesList) @@ -37,4 +37,4 @@ def test_SpeciesList(): surf_specs_names H2* O2* surf_specs_dent 1 1\ """ - assert( pz.utils.compare( output, expectedOutput, 1e-3 ) ) + assert pz.utils.compare(output, expectedOutput, 1e-3) diff --git a/tests/test_ZacrosJob.py b/tests/test_ZacrosJob.py index 6f3dc4a..db1a698 100644 --- a/tests/test_ZacrosJob.py +++ b/tests/test_ZacrosJob.py @@ -5,53 +5,48 @@ def test_ZacrosJob(test_folder, tmp_path): - print( "---------------------------------------------------" ) - print( ">>> Testing ZacrosJob class" ) - print( "---------------------------------------------------" ) - - s0 = pz.Species( "*", 1 ) # Empty adsorption site - H_ads = pz.Species( "H*", 1 ) # H adsorbed with dentation 1 - H2_ads = pz.Species( "H2*", 1 ) # H2 adsorbed with dentation 1 - H2_gas = pz.Species( "H2", gas_energy=0.0 ) # H2(gas) - - myLattice = pz.Lattice(lattice_type=pz.Lattice.HEXAGONAL, lattice_constant=1.0, repeat_cell=[8,10]) - - myCluster1 = pz.Cluster( neighboring=[ (0,1) ], - species=[ H_ads, H_ads ], - multiplicity=2, - energy=0.1 ) - - myCluster2 = pz.Cluster( neighboring=[ (0,1) ], - species=[ H2_ads, s0 ], - multiplicity=2, - energy=0.1 ) - - myCluster3 = pz.Cluster( neighboring=[ (0,1) ], - species=[ s0, s0 ], - multiplicity=2, - energy=0.1 ) - - myClusterExpansion = pz.ClusterExpansion( [myCluster1, myCluster2, myCluster3] ) - - reaction1 = pz.ElementaryReaction( neighboring=[ (0,1) ], - initial=[ H_ads, H_ads ], - final=[ H2_ads, s0 ], - pre_expon=2.5, - pe_ratio=1.0, - activation_energy=0.2 ) - - reaction2 = pz.ElementaryReaction( neighboring=[ (0,1) ], - initial=[ H2_ads, s0 ], - final=[ s0, s0, H2_gas ], - pre_expon=10.0, - pe_ratio=0.7, - activation_energy=0.2 ) + print("---------------------------------------------------") + print(">>> Testing ZacrosJob class") + print("---------------------------------------------------") + + s0 = pz.Species("*", 1) # Empty adsorption site + H_ads = pz.Species("H*", 1) # H adsorbed with dentation 1 + H2_ads = pz.Species("H2*", 1) # H2 adsorbed with dentation 1 + H2_gas = pz.Species("H2", gas_energy=0.0) # H2(gas) + + myLattice = pz.Lattice(lattice_type=pz.Lattice.HEXAGONAL, lattice_constant=1.0, repeat_cell=[8, 10]) + + myCluster1 = pz.Cluster(neighboring=[(0, 1)], species=[H_ads, H_ads], multiplicity=2, energy=0.1) + + myCluster2 = pz.Cluster(neighboring=[(0, 1)], species=[H2_ads, s0], multiplicity=2, energy=0.1) + + myCluster3 = pz.Cluster(neighboring=[(0, 1)], species=[s0, s0], multiplicity=2, energy=0.1) + + myClusterExpansion = pz.ClusterExpansion([myCluster1, myCluster2, myCluster3]) + + reaction1 = pz.ElementaryReaction( + neighboring=[(0, 1)], + initial=[H_ads, H_ads], + final=[H2_ads, s0], + pre_expon=2.5, + pe_ratio=1.0, + activation_energy=0.2, + ) + + reaction2 = pz.ElementaryReaction( + neighboring=[(0, 1)], + initial=[H2_ads, s0], + final=[s0, s0, H2_gas], + pre_expon=10.0, + pe_ratio=0.7, + activation_energy=0.2, + ) myMechanism = pz.Mechanism() - myMechanism.append( reaction1 ) - myMechanism.append( reaction2 ) + myMechanism.append(reaction1) + myMechanism.append(reaction2) - scm.plams.init(folder=str(tmp_path / 'test_ZacrosJob')) + scm.plams.init(folder=str(tmp_path / "test_ZacrosJob")) sett = pz.Settings() sett.random_seed = 10 @@ -60,42 +55,42 @@ def test_ZacrosJob(test_folder, tmp_path): sett.max_steps = 1 sett.molar_fraction.H2 = 1.0 - myJob = pz.ZacrosJob( myLattice, myMechanism, myClusterExpansion, settings=sett ) + myJob = pz.ZacrosJob(myLattice, myMechanism, myClusterExpansion, settings=sett) print(myJob) output = str(myJob) - with open( test_folder / "test_ZacrosJob_expected_input.txt", "r" ) as inp: + with open(test_folder / "test_ZacrosJob_expected_input.txt", "r") as inp: expectedOutput = inp.read() - assert( pz.utils.compare( output, expectedOutput, 1e-3 ) ) + assert pz.utils.compare(output, expectedOutput, 1e-3) try: myJob.run() - if( not myJob.ok() ): + if not myJob.ok(): raise scm.plams.JobError("Error: The Zacros calculation FAILED!") except pz.ZacrosExecutableNotFoundError: - print( "Warning: The calculation FAILED because the zacros executable is not available!" ) - print( " For testing purposes, we just omit this step.") + print("Warning: The calculation FAILED because the zacros executable is not available!") + print(" For testing purposes, we just omit this step.") - myJob = pz.ZacrosJob.load_external( path=test_folder / 'test_ZacrosJob.idata/default' ) + myJob = pz.ZacrosJob.load_external(path=test_folder / "test_ZacrosJob.idata/default") print(myJob) output = str(myJob) - with open( test_folder / "test_ZacrosJob_expected_input_default.txt", "r" ) as inp: + with open(test_folder / "test_ZacrosJob_expected_input_default.txt", "r") as inp: expectedOutput = inp.read() - assert( pz.utils.compare( output, expectedOutput, 1e-3 ) ) + assert pz.utils.compare(output, expectedOutput, 1e-3) - myJob = pz.ZacrosJob.load_external( path=test_folder / 'test_ZacrosJob.idata/periodic_cell' ) + myJob = pz.ZacrosJob.load_external(path=test_folder / "test_ZacrosJob.idata/periodic_cell") print(myJob) output = str(myJob) - with open( test_folder / "test_ZacrosJob_expected_input_periodic_cell.txt", "r" ) as inp: + with open(test_folder / "test_ZacrosJob_expected_input_periodic_cell.txt", "r") as inp: expectedOutput = inp.read() - assert( pz.utils.compare( output, expectedOutput, 1e-3 ) ) + assert pz.utils.compare(output, expectedOutput, 1e-3) - myJob = pz.ZacrosJob.load_external( path=test_folder / 'test_ZacrosJob.idata/explicit' ) + myJob = pz.ZacrosJob.load_external(path=test_folder / "test_ZacrosJob.idata/explicit") print(myJob) output = str(myJob) - with open( test_folder / "test_ZacrosJob_expected_input_explicit.txt", "r" ) as inp: + with open(test_folder / "test_ZacrosJob_expected_input_explicit.txt", "r") as inp: expectedOutput = inp.read() - assert( pz.utils.compare( output, expectedOutput, 1e-3 ) ) + assert pz.utils.compare(output, expectedOutput, 1e-3) scm.plams.finish() diff --git a/tests/test_ZacrosJob_restart.py b/tests/test_ZacrosJob_restart.py index fb3ac2b..1d06889 100644 --- a/tests/test_ZacrosJob_restart.py +++ b/tests/test_ZacrosJob_restart.py @@ -8,16 +8,16 @@ def test_ZacrosJob_restart(tmp_path): - print( "---------------------------------------------------" ) - print( ">>> Testing ZacrosJob_restart mechanism" ) - print( "---------------------------------------------------" ) + print("---------------------------------------------------") + print(">>> Testing ZacrosJob_restart mechanism") + print("---------------------------------------------------") - zgb = pz.models.ZiffGulariBarshad( repeat_cell=[20,20] ) + zgb = pz.models.ZiffGulariBarshad(repeat_cell=[20, 20]) - #--------------------------------------------- + # --------------------------------------------- # Calculation Settings - #--------------------------------------------- - scm.plams.init(folder=str(tmp_path / 'test_ZacrosJob_restart')) + # --------------------------------------------- + scm.plams.init(folder=str(tmp_path / "test_ZacrosJob_restart")) # Settings: sett = pz.Settings() @@ -26,66 +26,66 @@ def test_ZacrosJob_restart(tmp_path): sett.random_seed = 953129 sett.temperature = 500.0 sett.pressure = 1.0 - sett.snapshots = ('time', 0.1) - sett.process_statistics = ('time', 0.1) - sett.species_numbers = ('time', 0.1) - sett.max_steps = 'infinity' + sett.snapshots = ("time", 0.1) + sett.process_statistics = ("time", 0.1) + sett.species_numbers = ("time", 0.1) + sett.max_steps = "infinity" - #--------------------------------------------- + # --------------------------------------------- # Running the calculations - #--------------------------------------------- + # --------------------------------------------- output = "" # Running the full simulation for 2s sett.max_steps = 3225 - job0 = pz.ZacrosJob( settings=sett, - lattice=zgb.lattice, - mechanism=zgb.mechanism, - cluster_expansion=zgb.cluster_expansion ) + job0 = pz.ZacrosJob( + settings=sett, lattice=zgb.lattice, mechanism=zgb.mechanism, cluster_expansion=zgb.cluster_expansion + ) try: job0.run() - if( not job0.ok() ): + if not job0.ok(): raise scm.plams.JobError("Error: The Zacros calculation FAILED!") except pz.ZacrosExecutableNotFoundError: - print( "Warning: The calculation FAILED because the zacros executable is not available!" ) - print( " So let's skip this test." ) + print("Warning: The calculation FAILED because the zacros executable is not available!") + print(" So let's skip this test.") return data = job0.results.provided_quantities() - for time,nCO2 in zip(data['Time'],data['CO2']): - output += "%5.1f"%time + "%8d"%nCO2 + "\n" - output += "--"+"\n" + for time, nCO2 in zip(data["Time"], data["CO2"]): + output += "%5.1f" % time + "%8d" % nCO2 + "\n" + output += "--" + "\n" # Running the only the first 1s sett.max_steps = 2222 - job1 = pz.ZacrosJob( settings=sett, - lattice=zgb.lattice, - mechanism=zgb.mechanism, - cluster_expansion=zgb.cluster_expansion ) + job1 = pz.ZacrosJob( + settings=sett, lattice=zgb.lattice, mechanism=zgb.mechanism, cluster_expansion=zgb.cluster_expansion + ) job1.run() data = job1.results.provided_quantities() - for time,nCO2 in zip(data['Time'],data['CO2']): - output += "%5.1f"%time + "%8d"%nCO2 + "\n" - output += "--"+"\n" + for time, nCO2 in zip(data["Time"], data["CO2"]): + output += "%5.1f" % time + "%8d" % nCO2 + "\n" + output += "--" + "\n" # Resuming the simulation, starting at 1s and finishing at 2s sett.restart.max_steps = 3225 - job2 = pz.ZacrosJob( settings=sett, - lattice=zgb.lattice, - mechanism=zgb.mechanism, - cluster_expansion=zgb.cluster_expansion, - restart=job1 ) + job2 = pz.ZacrosJob( + settings=sett, + lattice=zgb.lattice, + mechanism=zgb.mechanism, + cluster_expansion=zgb.cluster_expansion, + restart=job1, + ) job2.run() data = job2.results.provided_quantities() - for time,nCO2 in zip(data['Time'],data['CO2']): - output += "%5.1f"%time + "%8d"%nCO2 + "\n" + for time, nCO2 in zip(data["Time"], data["CO2"]): + output += "%5.1f" % time + "%8d" % nCO2 + "\n" - print( output ) + print(output) expectedOutput = """\ 0.0 0 @@ -140,7 +140,7 @@ def test_ZacrosJob_restart(tmp_path): 1.8 1199\ """ - assert( pz.utils.compare( output, expectedOutput, 1e-3 ) ) + assert pz.utils.compare(output, expectedOutput, 1e-3) lattice_states0 = job0.results.lattice_states() lattice_states2 = job2.results.lattice_states() @@ -149,8 +149,8 @@ def test_ZacrosJob_restart(tmp_path): n = len(lattice_states0) - job0.results.plot_lattice_states( lattice_states0[n-1], pause=2, close=True ) - job2.results.plot_lattice_states( lattice_states2[n-1], pause=2, close=True ) + job0.results.plot_lattice_states(lattice_states0[n - 1], pause=2, close=True) + job2.results.plot_lattice_states(lattice_states2[n - 1], pause=2, close=True) process_statistics0 = job0.results.get_process_statistics() process_statistics1 = job1.results.get_process_statistics() @@ -158,8 +158,11 @@ def test_ZacrosJob_restart(tmp_path): assert len(process_statistics0) == len(process_statistics2) - job0.results.plot_process_statistics( process_statistics0[n-1], key="occurence_frequency", log_scale=True, pause=2, close=True ) - job2.results.plot_process_statistics( process_statistics2[n-1], key="occurence_frequency", log_scale=True, pause=2, close=True ) + job0.results.plot_process_statistics( + process_statistics0[n - 1], key="occurence_frequency", log_scale=True, pause=2, close=True + ) + job2.results.plot_process_statistics( + process_statistics2[n - 1], key="occurence_frequency", log_scale=True, pause=2, close=True + ) scm.plams.finish() - diff --git a/tests/test_ZacrosParametersScanJob.py b/tests/test_ZacrosParametersScanJob.py index 764caf3..6c44053 100644 --- a/tests/test_ZacrosParametersScanJob.py +++ b/tests/test_ZacrosParametersScanJob.py @@ -10,74 +10,82 @@ def test_ZacrosParametersScanJob(test_folder, tmp_path): - print( "---------------------------------------------------" ) - print( ">>> Testing ZacrosParametersScanJob class" ) - print( "---------------------------------------------------" ) + print("---------------------------------------------------") + print(">>> Testing ZacrosParametersScanJob class") + print("---------------------------------------------------") zgb = pz.models.ZiffGulariBarshad() - #--------------------------------------------- + # --------------------------------------------- # Calculation Settings - #--------------------------------------------- - scm.plams.init(folder=tmp_path / 'test_ZacrosParametersScanJob') + # --------------------------------------------- + scm.plams.init(folder=tmp_path / "test_ZacrosParametersScanJob") ## Run as many job simultaneously as there are cpu on the system - #maxjobs = multiprocessing.cpu_count() - #scm.plams.config.default_jobrunner = scm.plams.JobRunner(parallel=True, maxjobs=maxjobs) - #scm.plams.config.job.runscript.nproc = 1 - #print('Running up to {} jobs in parallel simultaneously'.format(maxjobs)) + # maxjobs = multiprocessing.cpu_count() + # scm.plams.config.default_jobrunner = scm.plams.JobRunner(parallel=True, maxjobs=maxjobs) + # scm.plams.config.job.runscript.nproc = 1 + # print('Running up to {} jobs in parallel simultaneously'.format(maxjobs)) # Settings: sett = pz.Settings() sett.random_seed = 953129 sett.temperature = 500.0 sett.pressure = 1.0 - sett.snapshots = ('time', 2.0) - sett.species_numbers = ('time', 0.1) + sett.snapshots = ("time", 2.0) + sett.species_numbers = ("time", 0.1) sett.max_time = 10.0 parameters = pz.ZacrosParametersScanJob.Parameters() - parameters.add('x_CO', 'molar_fraction.CO', numpy.arange(0.2, 0.8, 0.1) ) - parameters.add('x_O2', 'molar_fraction.O2', lambda params: 1.0-params['x_CO']) - parameters.set_generator( pz.ZacrosParametersScanJob.meshgridGenerator ) + parameters.add("x_CO", "molar_fraction.CO", numpy.arange(0.2, 0.8, 0.1)) + parameters.add("x_O2", "molar_fraction.O2", lambda params: 1.0 - params["x_CO"]) + parameters.set_generator(pz.ZacrosParametersScanJob.meshgridGenerator) try: - job = pz.ZacrosJob( settings=sett, lattice=zgb.lattice, mechanism=zgb.mechanism, - cluster_expansion=zgb.cluster_expansion ) + job = pz.ZacrosJob( + settings=sett, lattice=zgb.lattice, mechanism=zgb.mechanism, cluster_expansion=zgb.cluster_expansion + ) - mjob = pz.ZacrosParametersScanJob( reference=job, parameters=parameters ) + mjob = pz.ZacrosParametersScanJob(reference=job, parameters=parameters) results = mjob.run() except pz.ZacrosExecutableNotFoundError: - print( "Warning: The calculation FAILED because the zacros executable is not available!" ) - print( " For testing purposes, now we load precalculated results.") + print("Warning: The calculation FAILED because the zacros executable is not available!") + print(" For testing purposes, now we load precalculated results.") - mjob = scm.plams.load( test_folder / 'test_ZacrosParametersScanJob.data/plamsjob/plamsjob.dill' ) + mjob = scm.plams.load(test_folder / "test_ZacrosParametersScanJob.data/plamsjob/plamsjob.dill") results = mjob.results output = "" - if( results.job.ok() ): + if results.job.ok(): x_CO = [] ac_O = [] ac_CO = [] TOF_CO2 = [] results_dict = results.turnover_frequency() - results_dict = results.average_coverage( last=3, update=results_dict ) + results_dict = results.average_coverage(last=3, update=results_dict) for i in range(len(results_dict)): - x_CO.append( results_dict[i]['x_CO'] ) - ac_O.append( results_dict[i]['average_coverage']['O*'] ) - ac_CO.append( results_dict[i]['average_coverage']['CO*'] ) - TOF_CO2.append( results_dict[i]['turnover_frequency']['CO2'] ) + x_CO.append(results_dict[i]["x_CO"]) + ac_O.append(results_dict[i]["average_coverage"]["O*"]) + ac_CO.append(results_dict[i]["average_coverage"]["CO*"]) + TOF_CO2.append(results_dict[i]["turnover_frequency"]["CO2"]) output += "----------------------------------------------\n" - output += "%4s"%"cond"+" %8s"%"x_CO"+" %10s"%"ac_O"+" %10s"%"ac_CO"+" %10s"%"TOF_CO2\n" + output += "%4s" % "cond" + " %8s" % "x_CO" + " %10s" % "ac_O" + " %10s" % "ac_CO" + " %10s" % "TOF_CO2\n" output += "----------------------------------------------\n" for i in range(len(x_CO)): - output += "%4d"%i+" %8.2f"%x_CO[i]+" %10.6f"%ac_O[i]+" %10.6f"%ac_CO[i]+" %10.6f"%TOF_CO2[i]+"\n" + output += ( + "%4d" % i + + " %8.2f" % x_CO[i] + + " %10.6f" % ac_O[i] + + " %10.6f" % ac_CO[i] + + " %10.6f" % TOF_CO2[i] + + "\n" + ) scm.plams.finish() @@ -96,5 +104,4 @@ def test_ZacrosParametersScanJob(test_folder, tmp_path): 6 0.80 0.000000 1.000000 0.000589\ """ - assert( pz.utils.compare( output, expectedOutput, rel_error=0.1 ) ) - + assert pz.utils.compare(output, expectedOutput, rel_error=0.1) diff --git a/tests/test_ZacrosParametersScanSteadyStateJob.py b/tests/test_ZacrosParametersScanSteadyStateJob.py index 8c3fdae..ee18d42 100644 --- a/tests/test_ZacrosParametersScanSteadyStateJob.py +++ b/tests/test_ZacrosParametersScanSteadyStateJob.py @@ -8,22 +8,22 @@ def test_ZacrosParametersScanSteadyStateJob(test_folder, tmp_path): - print( "----------------------------------------------------------------" ) - print( ">>> Testing ZacrosParametersScanJob(+ZacrosSteadyStateJob) class" ) - print( "----------------------------------------------------------------" ) + print("----------------------------------------------------------------") + print(">>> Testing ZacrosParametersScanJob(+ZacrosSteadyStateJob) class") + print("----------------------------------------------------------------") lh = pz.models.LangmuirHinshelwood() - #--------------------------------------------- + # --------------------------------------------- # Calculation Settings - #--------------------------------------------- - scm.plams.init(folder=tmp_path / 'test_ZacrosParametersScanSteadyStateJob') + # --------------------------------------------- + scm.plams.init(folder=tmp_path / "test_ZacrosParametersScanSteadyStateJob") ## Run as many job simultaneously as there are cpu on the system - #maxjobs = multiprocessing.cpu_count() - #scm.plams.config.default_jobrunner = scm.plams.JobRunner(parallel=True, maxjobs=maxjobs) - #scm.plams.config.job.runscript.nproc = 1 - #print('Running up to {} jobs in parallel simultaneously'.format(maxjobs)) + # maxjobs = multiprocessing.cpu_count() + # scm.plams.config.default_jobrunner = scm.plams.JobRunner(parallel=True, maxjobs=maxjobs) + # scm.plams.config.job.runscript.nproc = 1 + # print('Running up to {} jobs in parallel simultaneously'.format(maxjobs)) try: dt = 1.0e-5 @@ -32,64 +32,73 @@ def test_ZacrosParametersScanSteadyStateJob(test_folder, tmp_path): sett.random_seed = 1609 sett.temperature = 500.0 sett.pressure = 1.000 - sett.species_numbers = ('time', dt) - sett.max_time = 100*dt + sett.species_numbers = ("time", dt) + sett.max_time = 100 * dt - job = pz.ZacrosJob( settings=sett, lattice=lh.lattice, mechanism=lh.mechanism, cluster_expansion=lh.cluster_expansion ) + job = pz.ZacrosJob( + settings=sett, lattice=lh.lattice, mechanism=lh.mechanism, cluster_expansion=lh.cluster_expansion + ) ss_sett = pz.Settings() ss_sett.turnover_frequency.nbatch = 20 ss_sett.turnover_frequency.confidence = 0.96 ss_sett.turnover_frequency.nreplicas = 2 - ss_sett.scaling.enabled = 'T' + ss_sett.scaling.enabled = "T" ss_sett.scaling.partial_equilibrium_index_threshold = 0.1 ss_sett.scaling.upper_bound = 10 - ss_sett.scaling.max_time = 10*dt - ss_sett.scaling.species_numbers = ('time', dt) + ss_sett.scaling.max_time = 10 * dt + ss_sett.scaling.species_numbers = ("time", dt) ss_parameters = pz.ZacrosSteadyStateJob.Parameters() - ss_parameters.add( 'max_time', 'restart.max_time', 2*sett.max_time*( numpy.arange(20)+1 )**2 ) + ss_parameters.add("max_time", "restart.max_time", 2 * sett.max_time * (numpy.arange(20) + 1) ** 2) - ss_job = pz.ZacrosSteadyStateJob( settings=ss_sett, reference=job, parameters=ss_parameters ) + ss_job = pz.ZacrosSteadyStateJob(settings=ss_sett, reference=job, parameters=ss_parameters) ps_parameters = pz.ZacrosParametersScanJob.Parameters() - ps_parameters.add( 'x_CO', 'molar_fraction.CO', [0.40, 0.50] ) - ps_parameters.add( 'x_O2', 'molar_fraction.O2', lambda params: 1.0-params['x_CO'] ) - ps_parameters.set_generator( pz.ZacrosParametersScanJob.meshgridGenerator ) + ps_parameters.add("x_CO", "molar_fraction.CO", [0.40, 0.50]) + ps_parameters.add("x_O2", "molar_fraction.O2", lambda params: 1.0 - params["x_CO"]) + ps_parameters.set_generator(pz.ZacrosParametersScanJob.meshgridGenerator) - ps_job = pz.ZacrosParametersScanJob( reference=ss_job, parameters=ps_parameters ) + ps_job = pz.ZacrosParametersScanJob(reference=ss_job, parameters=ps_parameters) results = ps_job.run() except pz.ZacrosExecutableNotFoundError: - print( "Warning: The calculation FAILED because the zacros executable is not available!" ) - print( " For testing purposes, now we load precalculated results.") + print("Warning: The calculation FAILED because the zacros executable is not available!") + print(" For testing purposes, now we load precalculated results.") - ps_job = scm.plams.load( test_folder / 'test_ZacrosParametersScanSteadyStateJob.data/plamsjob/plamsjob.dill' ) + ps_job = scm.plams.load(test_folder / "test_ZacrosParametersScanSteadyStateJob.data/plamsjob/plamsjob.dill") results = ps_job.results output = "" - if( results.job.ok() ): + if results.job.ok(): x_CO = [] ac_O = [] ac_CO = [] TOF_CO2 = [] results_dict = results.turnover_frequency() - results_dict = results.average_coverage( last=3, update=results_dict ) + results_dict = results.average_coverage(last=3, update=results_dict) for i in range(len(results_dict)): - x_CO.append( results_dict[i]['x_CO'] ) - ac_O.append( results_dict[i]['average_coverage']['O*'] ) - ac_CO.append( results_dict[i]['average_coverage']['CO*'] ) - TOF_CO2.append( results_dict[i]['turnover_frequency']['CO2'] ) + x_CO.append(results_dict[i]["x_CO"]) + ac_O.append(results_dict[i]["average_coverage"]["O*"]) + ac_CO.append(results_dict[i]["average_coverage"]["CO*"]) + TOF_CO2.append(results_dict[i]["turnover_frequency"]["CO2"]) output += "------------------------------------------------\n" - output += "%4s"%"cond"+" %8s"%"x_CO"+" %10s"%"ac_O"+" %10s"%"ac_CO"+" %12s"%"TOF_CO2\n" + output += "%4s" % "cond" + " %8s" % "x_CO" + " %10s" % "ac_O" + " %10s" % "ac_CO" + " %12s" % "TOF_CO2\n" output += "------------------------------------------------\n" for i in range(len(x_CO)): - output += "%4d"%i+" %8.2f"%x_CO[i]+" %10.6f"%ac_O[i]+" %10.6f"%ac_CO[i]+" %12.6f"%TOF_CO2[i]+"\n" + output += ( + "%4d" % i + + " %8.2f" % x_CO[i] + + " %10.6f" % ac_O[i] + + " %10.6f" % ac_CO[i] + + " %12.6f" % TOF_CO2[i] + + "\n" + ) scm.plams.finish() @@ -103,4 +112,4 @@ def test_ZacrosParametersScanSteadyStateJob(test_folder, tmp_path): 1 0.50 0.446042 0.253333 301.875030\ """ - assert( pz.utils.compare( output, expectedOutput, rel_error=0.1 ) ) + assert pz.utils.compare(output, expectedOutput, rel_error=0.1) diff --git a/tests/test_ZacrosResults.py b/tests/test_ZacrosResults.py index c6eab11..119edbc 100644 --- a/tests/test_ZacrosResults.py +++ b/tests/test_ZacrosResults.py @@ -3,61 +3,63 @@ def test_ZacrosResults(test_folder, tmp_path): - print( "---------------------------------------------------" ) - print( ">>> Testing ZacrosResults class" ) - print( "---------------------------------------------------" ) + print("---------------------------------------------------") + print(">>> Testing ZacrosResults class") + print("---------------------------------------------------") - #--------------------------------------------- + # --------------------------------------------- # Species: - #--------------------------------------------- + # --------------------------------------------- # - Gas-species: CO_gas = pz.Species("CO") O2_gas = pz.Species("O2") CO2_gas = pz.Species("CO2", gas_energy=-2.337) # -) Surface species: - s0 = pz.Species("*", 1) # Empty adsorption site + s0 = pz.Species("*", 1) # Empty adsorption site CO_ads = pz.Species("CO*", 1) O_ads = pz.Species("O*", 1) - #--------------------------------------------- + # --------------------------------------------- # Lattice setup: - #--------------------------------------------- - myLattice = pz.Lattice(lattice_type=pz.Lattice.RECTANGULAR, lattice_constant=1.0, repeat_cell=[20,20]) + # --------------------------------------------- + myLattice = pz.Lattice(lattice_type=pz.Lattice.RECTANGULAR, lattice_constant=1.0, repeat_cell=[20, 20]) - #--------------------------------------------- + # --------------------------------------------- # Clusters: - #--------------------------------------------- + # --------------------------------------------- CO_point = pz.Cluster(species=[CO_ads], energy=-1.3) O_point = pz.Cluster(species=[O_ads], energy=-2.3) - #--------------------------------------------- + # --------------------------------------------- # Elementary Reactions - #--------------------------------------------- + # --------------------------------------------- # CO_adsorption: - CO_adsorption = pz.ElementaryReaction( initial=[s0,CO_gas], - final=[CO_ads], - reversible=False, - pre_expon=10.0, - label="CO_adsorption") + CO_adsorption = pz.ElementaryReaction( + initial=[s0, CO_gas], final=[CO_ads], reversible=False, pre_expon=10.0, label="CO_adsorption" + ) # O2_adsorption: - O2_adsorption = pz.ElementaryReaction( initial=[s0,s0,O2_gas], - final=[O_ads,O_ads], - neighboring=[(0, 1)], - reversible=False, - pre_expon=2.5, - label="O2_adsorption") + O2_adsorption = pz.ElementaryReaction( + initial=[s0, s0, O2_gas], + final=[O_ads, O_ads], + neighboring=[(0, 1)], + reversible=False, + pre_expon=2.5, + label="O2_adsorption", + ) # CO_oxidation: - CO_oxidation = pz.ElementaryReaction( initial=[CO_ads, O_ads], - final=[s0, s0, CO2_gas], - neighboring=[(0, 1)], - reversible=False, - pre_expon=1.0e+20, - label="CO_oxidation") + CO_oxidation = pz.ElementaryReaction( + initial=[CO_ads, O_ads], + final=[s0, s0, CO2_gas], + neighboring=[(0, 1)], + reversible=False, + pre_expon=1.0e20, + label="CO_oxidation", + ) - scm.plams.init(folder=tmp_path / 'test_ZacrosResults') + scm.plams.init(folder=tmp_path / "test_ZacrosResults") # Settings: sett = pz.Settings() @@ -66,83 +68,109 @@ def test_ZacrosResults(test_folder, tmp_path): sett.random_seed = 953129 sett.temperature = 500.0 sett.pressure = 1.0 - sett.snapshots = ('time', 0.1) - sett.process_statistics = ('time', 0.1) - sett.species_numbers = ('time', 0.1) - sett.event_report = 'off' - sett.max_steps = 'infinity' + sett.snapshots = ("time", 0.1) + sett.process_statistics = ("time", 0.1) + sett.species_numbers = ("time", 0.1) + sett.event_report = "off" + sett.max_steps = "infinity" sett.max_time = 1.0 sett.wall_time = 3600 - job = pz.ZacrosJob( settings=sett, - lattice=myLattice, - mechanism=[CO_adsorption, O2_adsorption, CO_oxidation], - cluster_expansion=[CO_point, O_point] ) + job = pz.ZacrosJob( + settings=sett, + lattice=myLattice, + mechanism=[CO_adsorption, O2_adsorption, CO_oxidation], + cluster_expansion=[CO_point, O_point], + ) - #----------------------- + # ----------------------- # Running the job - #----------------------- + # ----------------------- try: results = job.run() - if( not job.ok() ): + if not job.ok(): raise scm.plams.JobError("Error: The Zacros calculation FAILED!") except pz.ZacrosExecutableNotFoundError: - print( "Warning: The calculation FAILED because the zacros executable is not available!" ) - print( " For testing purposes, now we load precalculated results.") + print("Warning: The calculation FAILED because the zacros executable is not available!") + print(" For testing purposes, now we load precalculated results.") - job = scm.plams.load( test_folder / "test_ZacrosResults.data/plamsjob/plamsjob.dill" ) + job = scm.plams.load(test_folder / "test_ZacrosResults.data/plamsjob/plamsjob.dill") results = job.results - #----------------------- + # ----------------------- # Analyzing the results - #----------------------- + # ----------------------- reactions = results.get_reaction_network() - assert( list(reactions.keys()) == ['CO_adsorption', 'O2_adsorption', 'CO_oxidation'] ) + assert list(reactions.keys()) == ["CO_adsorption", "O2_adsorption", "CO_oxidation"] - assert( list(reactions.values()) == \ - ['CO + *(StTp1) -> CO*(StTp1)', - 'O2 + *(StTp1) + *(StTp1) -> O*(StTp1) + O*(StTp1)', - 'CO*(StTp1) + O*(StTp1) -> CO2 + *(StTp1) + *(StTp1)'] ) + assert list(reactions.values()) == [ + "CO + *(StTp1) -> CO*(StTp1)", + "O2 + *(StTp1) + *(StTp1) -> O*(StTp1) + O*(StTp1)", + "CO*(StTp1) + O*(StTp1) -> CO2 + *(StTp1) + *(StTp1)", + ] provided_quantities = results.provided_quantities() - assert( list(provided_quantities.keys()) == \ - ['Entry', 'Nevents', 'Time', 'Temperature', 'Energy', 'CO*', 'O*', 'CO', 'O2', 'CO2'] ) + assert list(provided_quantities.keys()) == [ + "Entry", + "Nevents", + "Time", + "Temperature", + "Energy", + "CO*", + "O*", + "CO", + "O2", + "CO2", + ] - assert( provided_quantities["Time"][0:5] == [0.0, 0.1, 0.2, 0.30000000000000004, 0.4] ) + assert provided_quantities["Time"][0:5] == [0.0, 0.1, 0.2, 0.30000000000000004, 0.4] - assert( provided_quantities["Energy"][0:5] == \ - [0.0, -362.40000000000106, -435.4000000000014, -481.8000000000016, -531.5000000000013] ) + assert provided_quantities["Energy"][0:5] == [ + 0.0, + -362.40000000000106, + -435.4000000000014, + -481.8000000000016, + -531.5000000000013, + ] - assert( provided_quantities["CO*"][0:5] == [0, 24, 20, 15, 9] ) + assert provided_quantities["CO*"][0:5] == [0, 24, 20, 15, 9] - assert( provided_quantities["CO2"][0:5] == [0, 100, 202, 309, 398] ) + assert provided_quantities["CO2"][0:5] == [0, 100, 202, 309, 398] - assert( results.gas_species_names() == [ "CO", "O2", "CO2" ] ) + assert results.gas_species_names() == ["CO", "O2", "CO2"] - assert( results.surface_species_names() == [ "CO*", "O*" ] ) + assert results.surface_species_names() == ["CO*", "O*"] - assert( results.site_type_names() == [ "StTp1" ] ) + assert results.site_type_names() == ["StTp1"] lattice_states = results.lattice_states() - lattice_states[3].plot( pause=2, close=True ) + lattice_states[3].plot(pause=2, close=True) - results.plot_lattice_states( lattice_states, pause=2, close=True ) + results.plot_lattice_states(lattice_states, pause=2, close=True) - results.plot_molecule_numbers( results.gas_species_names(), pause=2, close=True ) + results.plot_molecule_numbers(results.gas_species_names(), pause=2, close=True) process_statistics = results.get_process_statistics() - assert( process_statistics[1]["time"] == 0.1 ) - assert( process_statistics[10]["occurence_frequency"] == - {'CO_adsorption': 837.0000000000001, 'O2_adsorption': 550.0000000000001, 'CO_oxidation': 835.0000000000001} ) - assert( process_statistics[10]["number_of_events"] == - {'CO_adsorption': 837, 'O2_adsorption': 550, 'CO_oxidation': 835} ) - - results.plot_process_statistics( process_statistics[10], key="occurence_frequency", log_scale=True, pause=2, close=True ) - results.plot_process_statistics( process_statistics[10], key="number_of_events", pause=2, close=True ) + assert process_statistics[1]["time"] == 0.1 + assert process_statistics[10]["occurence_frequency"] == { + "CO_adsorption": 837.0000000000001, + "O2_adsorption": 550.0000000000001, + "CO_oxidation": 835.0000000000001, + } + assert process_statistics[10]["number_of_events"] == { + "CO_adsorption": 837, + "O2_adsorption": 550, + "CO_oxidation": 835, + } + + results.plot_process_statistics( + process_statistics[10], key="occurence_frequency", log_scale=True, pause=2, close=True + ) + results.plot_process_statistics(process_statistics[10], key="number_of_events", pause=2, close=True) scm.plams.finish() diff --git a/tests/test_ZacrosSteadyStateJob.py b/tests/test_ZacrosSteadyStateJob.py index 192e623..e32c7d1 100644 --- a/tests/test_ZacrosSteadyStateJob.py +++ b/tests/test_ZacrosSteadyStateJob.py @@ -8,47 +8,48 @@ def test_ZacrosSteadyStateJob(test_folder, tmp_path): - print( "---------------------------------------------------" ) - print( ">>> Testing ZacrosSteadyStateJob class" ) - print( "---------------------------------------------------" ) + print("---------------------------------------------------") + print(">>> Testing ZacrosSteadyStateJob class") + print("---------------------------------------------------") zgb = pz.models.ZiffGulariBarshad() - #--------------------------------------------- + # --------------------------------------------- # Calculation Settings - #--------------------------------------------- - scm.plams.init(folder=tmp_path / 'test_ZacrosSteadyStateJob') + # --------------------------------------------- + scm.plams.init(folder=tmp_path / "test_ZacrosSteadyStateJob") try: sett = pz.Settings() sett.random_seed = 953129 sett.temperature = 500.0 sett.pressure = 1.0 - sett.species_numbers = ('time', 0.1) + sett.species_numbers = ("time", 0.1) sett.max_time = 10.0 sett.molar_fraction.CO = 0.42 sett.molar_fraction.O2 = 1.0 - sett.molar_fraction.CO parameters = pz.ZacrosSteadyStateJob.Parameters() - parameters.add( 'max_time', 'restart.max_time', 2*sett.max_time*( numpy.arange(10)+1 )**3 ) + parameters.add("max_time", "restart.max_time", 2 * sett.max_time * (numpy.arange(10) + 1) ** 3) - job = pz.ZacrosJob( settings=sett, lattice=zgb.lattice, mechanism=zgb.mechanism, - cluster_expansion=zgb.cluster_expansion ) + job = pz.ZacrosJob( + settings=sett, lattice=zgb.lattice, mechanism=zgb.mechanism, cluster_expansion=zgb.cluster_expansion + ) sett = pz.Settings() sett.turnover_frequency.nbatch = 20 sett.turnover_frequency.confidence = 0.97 sett.turnover_frequency.nreplicas = 2 - mjob = pz.ZacrosSteadyStateJob( settings=sett, reference=job, parameters=parameters ) + mjob = pz.ZacrosSteadyStateJob(settings=sett, reference=job, parameters=parameters) results = mjob.run() except pz.ZacrosExecutableNotFoundError: - print( "Warning: The calculation FAILED because the zacros executable is not available!" ) - print( " For testing purposes, now we load precalculated results.") + print("Warning: The calculation FAILED because the zacros executable is not available!") + print(" For testing purposes, now we load precalculated results.") - mjob = scm.plams.load( test_folder / 'test_ZacrosSteadyStateJob.data/plamsjob/plamsjob.dill' ) + mjob = scm.plams.load(test_folder / "test_ZacrosSteadyStateJob.data/plamsjob/plamsjob.dill") results = mjob.results scm.plams.finish() @@ -57,12 +58,18 @@ def test_ZacrosSteadyStateJob(test_folder, tmp_path): if mjob.ok(): output += "------------------------------------------------\n" - output += "%4s"%"iter"+" %10s"%"TOF_CO2"+" %10s"%"error"+" %10s"%"max_time"+" %10s"%"conv?\n" + output += "%4s" % "iter" + " %10s" % "TOF_CO2" + " %10s" % "error" + " %10s" % "max_time" + " %10s" % "conv?\n" output += "------------------------------------------------\n" - for i,step in enumerate(results.history()): - output += "%4d"%i+" %10.5f"%step['turnover_frequency']['CO2']+" %10.5f"%step['turnover_frequency_error']['CO2'] \ - +" %10d"%step['max_time']+" %10s"%step['converged']['CO2']+"\n" + for i, step in enumerate(results.history()): + output += ( + "%4d" % i + + " %10.5f" % step["turnover_frequency"]["CO2"] + + " %10.5f" % step["turnover_frequency_error"]["CO2"] + + " %10d" % step["max_time"] + + " %10s" % step["converged"]["CO2"] + + "\n" + ) print(output) @@ -75,5 +82,4 @@ def test_ZacrosSteadyStateJob(test_folder, tmp_path): 2 0.60063 0.01698 540 True\ """ - assert( pz.utils.compare( output, expectedOutput, rel_error=0.1 ) ) - + assert pz.utils.compare(output, expectedOutput, rel_error=0.1) diff --git a/tests/test_post_process.py b/tests/test_post_process.py index fa961f1..f200ab3 100644 --- a/tests/test_post_process.py +++ b/tests/test_post_process.py @@ -8,12 +8,12 @@ def test_post_process(tmp_path): - print( "---------------------------------------------------" ) - print( ">>> Testing Zacros post_process methods" ) - print( "---------------------------------------------------" ) + print("---------------------------------------------------") + print(">>> Testing Zacros post_process methods") + print("---------------------------------------------------") - zgb = pz.models.ZiffGulariBarshad( repeat_cell=[20,20] ) - workdir = tmp_path / 'test_post_process' + zgb = pz.models.ZiffGulariBarshad(repeat_cell=[20, 20]) + workdir = tmp_path / "test_post_process" scm.plams.init(folder=str(workdir)) # Settings: @@ -23,35 +23,36 @@ def test_post_process(tmp_path): sett.random_seed = 953129 sett.temperature = 500.0 sett.pressure = 1.0 - sett.snapshots = ('time', 0.1) - sett.process_statistics = ('time', 0.1) - sett.species_numbers = ('time', 0.1) - sett.event_report = 'off' - sett.max_steps = 'infinity' + sett.snapshots = ("time", 0.1) + sett.process_statistics = ("time", 0.1) + sett.species_numbers = ("time", 0.1) + sett.event_report = "off" + sett.max_steps = "infinity" sett.max_time = 1.0 sett.wall_time = 3600 - job = pz.ZacrosJob( settings=sett, lattice=zgb.lattice, mechanism=zgb.mechanism, - cluster_expansion=zgb.cluster_expansion ) + job = pz.ZacrosJob( + settings=sett, lattice=zgb.lattice, mechanism=zgb.mechanism, cluster_expansion=zgb.cluster_expansion + ) - #----------------------- + # ----------------------- # Running the job - #----------------------- + # ----------------------- load_precalculated = False try: results = job.run() except pz.ZacrosExecutableNotFoundError: - print( "Warning: The calculation FAILED because the zacros executable is not available!" ) - print( " For testing purposes, now we load precalculated results.") + print("Warning: The calculation FAILED because the zacros executable is not available!") + print(" For testing purposes, now we load precalculated results.") load_precalculated = True scm.plams.finish() - if( load_precalculated ): - job = pz.ZacrosJob.load_external( path="tests/test_ZacrosResults.data/plamsjob" ) + if load_precalculated: + job = pz.ZacrosJob.load_external(path="tests/test_ZacrosResults.data/plamsjob") else: - job = pz.ZacrosJob.load_external( path=workdir / 'plamsjob') + job = pz.ZacrosJob.load_external(path=workdir / "plamsjob") data = job.results.provided_quantities() output = str(data) @@ -61,4 +62,4 @@ def test_post_process(tmp_path): {'Entry': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], 'Nevents': [0, 346, 614, 888, 1117, 1314, 1535, 1726, 1920, 2056, 2222], 'Time': [0.0, 0.1, 0.2, 0.30000000000000004, 0.4, 0.5, 0.6, 0.7, 0.7999999999999999, 0.8999999999999999, 0.9999999999999999], 'Temperature': [500.0, 500.0, 500.0, 500.0, 500.0, 500.0, 500.0, 500.0, 500.0, 500.0, 500.0], 'Energy': [0.0, -362.40000000000106, -435.4000000000014, -481.8000000000016, -531.5000000000013, -514.4000000000016, -528.9000000000013, -530.2000000000013, -614.3999999999997, -653.499999999999, -612.0999999999998], 'CO*': [0, 24, 20, 15, 9, 10, 7, 8, 2, 2, 2], 'O*': [0, 144, 178, 201, 226, 218, 226, 226, 266, 283, 265], 'CO': [0, -124, -222, -324, -407, -488, -573, -650, -716, -767, -837], 'O2': [0, -122, -190, -255, -312, -348, -396, -434, -490, -524, -550], 'CO2': [0, 100, 202, 309, 398, 478, 566, 642, 714, 765, 835]}\ """ - assert( pz.utils.compare( output, expectedOutput, 1e-3 ) ) + assert pz.utils.compare(output, expectedOutput, 1e-3) diff --git a/utils/compareReports.py b/utils/compareReports.py index d16df74..03a6ee1 100644 --- a/utils/compareReports.py +++ b/utils/compareReports.py @@ -1,6 +1,7 @@ import re -def compare( report1, report2, error=None, abs_error=None, rel_error=None ): + +def compare(report1, report2, error=None, abs_error=None, rel_error=None): """ Compare reports word by word. Float numbers are compared by using a given error. """ @@ -28,30 +29,41 @@ def compare( report1, report2, error=None, abs_error=None, rel_error=None ): words2 = lines2[i].split() if len(words1) != len(words2): - print("Mismatch located in number of words (line="+str(i+1)+")") + print("Mismatch located in number of words (line=" + str(i + 1) + ")") return False for j in range(len(words1)): try: float1 = float(words1[j]) float2 = float(words2[j]) - delta = abs(float1-float2) + delta = abs(float1 - float2) - thr1 = abs_error + rel_error*abs(float1) - thr2 = abs_error + rel_error*abs(float2) + thr1 = abs_error + rel_error * abs(float1) + thr2 = abs_error + rel_error * abs(float2) if delta > thr1 or delta > thr2: - print("Mismatch located in comparing report (line="+str(i+1)+")") - print("> "+str(float1)+" ~ "+str(float2)+"; delta="+str(delta)+", thr1="+str(thr1)+", thr2="+str(thr2)) + print("Mismatch located in comparing report (line=" + str(i + 1) + ")") + print( + "> " + + str(float1) + + " ~ " + + str(float2) + + "; delta=" + + str(delta) + + ", thr1=" + + str(thr1) + + ", thr2=" + + str(thr2) + ) print("Lines:") - print("report1> "+lines1[i]) - print("report2> "+lines2[i]) + print("report1> " + lines1[i]) + print("report2> " + lines2[i]) return False except ValueError: if words1[j] != words2[j]: - print("Mismatch located in comparing report (line="+str(i+1)+")") - print("> "+words1[j]+" ~ "+words2[j]) + print("Mismatch located in comparing report (line=" + str(i + 1) + ")") + print("> " + words1[j] + " ~ " + words2[j]) return False return True