Skip to content

Commit

Permalink
remove all redundant annotations
Browse files Browse the repository at this point in the history
  • Loading branch information
HydrogenSulfate committed Dec 21, 2023
1 parent eecad82 commit 6cb8685
Show file tree
Hide file tree
Showing 18 changed files with 95 additions and 1,945 deletions.
271 changes: 47 additions & 224 deletions deepmd/descriptor/se_a.py

Large diffs are not rendered by default.

103 changes: 1 addition & 102 deletions deepmd/entrypoints/freeze.py
Original file line number Diff line number Diff line change
Expand Up @@ -354,23 +354,7 @@ def freeze_graph(
InputSpec(shape=[None], dtype="float64"), # box
InputSpec(shape=[6], dtype="int32"), # mesh
{
# "coord": InputSpec(
# shape=[2880],
# dtype="float64"
# ),
# "type": InputSpec(
# shape=[960],
# dtype="int32"
# ),
# "natoms_vec": InputSpec(
# shape=[4],
# dtype="int32"
# ),
"box": InputSpec(shape=[None], dtype="float64"),
# "default_mesh": InputSpec(
# shape=[6],
# dtype="int32"
# ),
},
"",
False,
Expand All @@ -380,17 +364,7 @@ def freeze_graph(
print(
f"[{name}, {param.shape}] generated name in static_model is: {param.name}"
)
# print(f"st_model.descrpt.buffer_rcut.name = {st_model.descrpt.buffer_rcut.name}")
# print(
# f"st_model.descrpt.buffer_ntypes.name = {st_model.descrpt.buffer_ntypes.name}"
# )
# print(
# f"st_model.fitting.buffer_dfparam.name = {st_model.fitting.buffer_dfparam.name}"
# )
# print(
# f"st_model.fitting.buffer_daparam.name = {st_model.fitting.buffer_daparam.name}"
# )
# 跳过对program的裁剪,从而保留rcut、ntypes等不参与前向的参数,从而在C++端可以获取这些参数
# skip pruning for program so as to keep buffers into files
skip_prune_program = True
print(f"==>> Set skip_prune_program = {skip_prune_program}")
paddle.jit.save(st_model, output, skip_prune_program=skip_prune_program)
Expand Down Expand Up @@ -475,12 +449,8 @@ def freeze_graph_multi(

def freeze(
*,
# checkpoint_folder: str,
input_file: str,
output: str,
# node_names: Optional[str] = None,
# nvnmd_weight: Optional[str] = None,
# united_model: bool = False,
**kwargs,
):
"""Freeze the graph in supplied folder.
Expand All @@ -494,78 +464,7 @@ def freeze(
**kwargs
other arguments
"""
# We retrieve our checkpoint fullpath
# checkpoint = tf.train.get_checkpoint_state(checkpoint_folder)
# input_checkpoint = checkpoint.model_checkpoint_path

# # expand the output file to full path
# output_graph = abspath(output)

# # Before exporting our graph, we need to precise what is our output node
# # This is how TF decides what part of the Graph he has to keep
# # and what part it can dump
# # NOTE: this variable is plural, because you can have multiple output nodes
# # node_names = "energy_test,force_test,virial_test,t_rcut"

# # We clear devices to allow TensorFlow to control
# # on which device it will load operations
# clear_devices = True

# # We import the meta graph and retrieve a Saver
# try:
# # In case paralle training
# import horovod.tensorflow as _ # noqa: F401
# except ImportError:
# pass
# saver = tf.train.import_meta_graph(
# f"{input_checkpoint}.meta", clear_devices=clear_devices
# )

# # We retrieve the protobuf graph definition
# graph = tf.get_default_graph()
# try:
# input_graph_def = graph.as_graph_def()
# except google.protobuf.message.DecodeError as e:
# raise GraphTooLargeError(
# "The graph size exceeds 2 GB, the hard limitation of protobuf."
# " Then a DecodeError was raised by protobuf. You should "
# "reduce the size of your model."
# ) from e
# nodes = [n.name for n in input_graph_def.node]

# # We start a session and restore the graph weights
# with tf.Session() as sess:
# saver.restore(sess, input_checkpoint)
# model_type = run_sess(sess, "model_attr/model_type:0", feed_dict={}).decode(
# "utf-8"
# )
# if "modifier_attr/type" in nodes:
# modifier_type = run_sess(sess, "modifier_attr/type:0", feed_dict={}).decode(
# "utf-8"
# )
# else:
# modifier_type = None
# if nvnmd_weight is not None:
# save_weight(sess, nvnmd_weight) # nvnmd
# if model_type != "multi_task":
freeze_graph(
input_file,
output,
# sess,
# input_graph_def,
# nodes,
# model_type,
# modifier_type,
# output_graph,
# node_names,
)
# else:
# freeze_graph_multi(
# sess,
# input_graph_def,
# nodes,
# modifier_type,
# output_graph,
# node_names,
# united_model=united_model,
# )
3 changes: 1 addition & 2 deletions deepmd/entrypoints/test.py
Original file line number Diff line number Diff line change
Expand Up @@ -260,7 +260,7 @@ def test_ener(
data.add("energy", 1, atomic=False, must=False, high_prec=True)
data.add("force", 3, atomic=True, must=False, high_prec=False)
data.add("virial", 9, atomic=False, must=False, high_prec=False)
if dp.has_efield: # False
if dp.has_efield:
data.add("efield", 3, atomic=True, must=True, high_prec=False)
if has_atom_ener:
data.add("atom_ener", 1, atomic=True, must=True, high_prec=False)
Expand All @@ -278,7 +278,6 @@ def test_ener(
numb_test = min(nframes, numb_test)

coord = test_data["coord"][:numb_test].reshape([numb_test, -1])

box = test_data["box"][:numb_test]
if dp.has_efield:
efield = test_data["efield"][:numb_test].reshape([numb_test, -1])
Expand Down
20 changes: 4 additions & 16 deletions deepmd/entrypoints/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -207,7 +207,7 @@ def _do_work(jdata: Dict[str, Any], run_opt: RunOptions, is_compress: bool = Fal
dp_random.seed(seed)

# setup data modifier
modifier = get_modifier(jdata["model"].get("modifier", None)) # None
modifier = get_modifier(jdata["model"].get("modifier", None))

# check the multi-task mode
multi_task_mode = "fitting_net_dict" in jdata["model"]
Expand Down Expand Up @@ -275,7 +275,6 @@ def _do_work(jdata: Dict[str, Any], run_opt: RunOptions, is_compress: bool = Fal
origin_type_map = get_data(
jdata["training"]["training_data"], rcut, None, modifier
).get_type_map()
print("model.build")
model.build(train_data, stop_batch, origin_type_map=origin_type_map)

if not is_compress:
Expand Down Expand Up @@ -377,7 +376,7 @@ def get_nbor_stat(jdata, rcut, one_type: bool = False):
if type_map and len(type_map) == 0:
type_map = None
multi_task_mode = "data_dict" in jdata["training"]
if not multi_task_mode: # here
if not multi_task_mode:
train_data = get_data(
jdata["training"]["training_data"], max_rcut, type_map, None
)
Expand Down Expand Up @@ -419,15 +418,6 @@ def get_nbor_stat(jdata, rcut, one_type: bool = False):

min_nbor_dist, max_nbor_size = neistat.get_stat(train_data)

# moved from traier.py as duplicated
# TODO: this is a simple fix but we should have a clear
# architecture to call neighbor stat
# tf.constant(
# min_nbor_dist,
# name="train_attr/min_nbor_dist",
# dtype=GLOBAL_ENER_FLOAT_PRECISION,
# )
# tf.constant(max_nbor_size, name="train_attr/max_nbor_size", dtype=tf.int32)
return min_nbor_dist, max_nbor_size


Expand Down Expand Up @@ -473,9 +463,7 @@ def update_one_sel(jdata, descriptor):
if descriptor["type"] == "loc_frame":
return descriptor
rcut = descriptor["rcut"]
tmp_sel = get_sel(
jdata, rcut, one_type=descriptor["type"] in ("se_atten",)
) # [38 72],每个原子截断半径内,最多的邻域原子个数
tmp_sel = get_sel(jdata, rcut, one_type=descriptor["type"] in ("se_atten",))
sel = descriptor["sel"] # [46, 92]
if isinstance(sel, int):
# convert to list and finnally convert back to int
Expand Down Expand Up @@ -507,7 +495,7 @@ def update_sel(jdata):
if descrpt_data["type"] == "hybrid":
for ii in range(len(descrpt_data["list"])):
descrpt_data["list"][ii] = update_one_sel(jdata, descrpt_data["list"][ii])
else: # here
else:
descrpt_data = update_one_sel(jdata, descrpt_data)
jdata["model"]["descriptor"] = descrpt_data
return jdata
5 changes: 1 addition & 4 deletions deepmd/env.py
Original file line number Diff line number Diff line change
Expand Up @@ -372,10 +372,7 @@ def get_module(module_name: str) -> "ModuleType":
raise FileNotFoundError(f"module {module_name} does not exist")
else:
try:
# module = tf.load_op_library(str(module_file))
import paddle_deepmd_lib

module = paddle_deepmd_lib
import paddle_deepmd_lib as module

except tf.errors.NotFoundError as e:
# check CXX11_ABI_FLAG is compatiblity
Expand Down
61 changes: 6 additions & 55 deletions deepmd/fit/ener.py
Original file line number Diff line number Diff line change
Expand Up @@ -431,7 +431,6 @@ def _build_lower(
bias_atom_e=0.0,
type_suffix="",
suffix="",
# reuse=None,
type_i=None,
):
# cut-out inputs
Expand Down Expand Up @@ -462,51 +461,19 @@ def _build_lower(
ext_aparam = paddle.cast(ext_aparam, self.fitting_precision)
layer = paddle.concat([layer, ext_aparam], axis=1)

# if nvnmd_cfg.enable:
# one_layer = one_layer_nvnmd
# else:
# one_layer = one_layer_deepmd
for ii in range(0, len(self.n_neuron)):
# if self.layer_name is not None and self.layer_name[ii] is not None:
# layer_suffix = "share_" + self.layer_name[ii] + type_suffix
# layer_reuse = tf.AUTO_REUSE
# else:
# layer_suffix = "layer_" + str(ii) + type_suffix + suffix
# layer_reuse = reuse
if ii >= 1 and self.n_neuron[ii] == self.n_neuron[ii - 1]:
layer += self.one_layers[type_i][ii](layer)
else:
layer = self.one_layers[type_i][ii](layer)
# print(f"use {ii} of {len(self.one_layers)}_{type_i}")
# if (not self.uniform_seed) and (self.seed is not None):
# self.seed += self.seed_shift
# if self.layer_name is not None and self.layer_name[-1] is not None:
# layer_suffix = "share_" + self.layer_name[-1] + type_suffix
# layer_reuse = tf.AUTO_REUSE
# else:
# layer_suffix = "final_layer" + type_suffix + suffix
# layer_reuse = reuse
if (not self.uniform_seed) and (self.seed is not None):
self.seed += self.seed_shift
final_layer = self.final_layers[type_i](
layer,
# 1,
# activation_fn=None,
# bavg=bias_atom_e,
# name=layer_suffix,
# reuse=layer_reuse,
# seed=self.seed,
# precision=self.fitting_precision,
# trainable=self.trainable[-1],
# uniform_seed=self.uniform_seed,
# initial_variables=self.fitting_net_variables,
# mixed_prec=self.mixed_prec,
# final_layer=True,
)

final_layer = self.final_layers[type_i](layer)
if (not self.uniform_seed) and (self.seed is not None):
self.seed += self.seed_shift

return final_layer # [natoms, 1]
return final_layer

def forward(
self,
Expand Down Expand Up @@ -577,9 +544,7 @@ def forward(
self.bias_atom_e[type_i] = self.bias_atom_e[type_i]
self.bias_atom_e = self.bias_atom_e[:ntypes_atom]

inputs = paddle.reshape(
inputs, [-1, natoms[0], self.dim_descrpt]
) # [1, all_atoms, M1*M2]
inputs = paddle.reshape(inputs, [-1, natoms[0], self.dim_descrpt])
if len(self.atom_ener):
# only for atom_ener
nframes = input_dict.get("nframes")
Expand Down Expand Up @@ -643,18 +608,6 @@ def forward(
start_index = 0
outs_list = []
for type_i in range(ntypes_atom):
# final_layer = inputs
# for layer_j in range(type_i * ntypes_atom, (type_i + 1) * ntypes_atom):
# final_layer = self.one_layers[layer_j](final_layer)
# final_layer = self.final_layers[type_i](final_layer)
# print(final_layer.shape)

# # concat the results
# if type_i < len(self.atom_ener) and self.atom_ener[type_i] is not None:
# zero_layer = inputs_zero
# for layer_j in range(type_i * ntypes_atom, (type_i + 1) * ntypes_atom):
# zero_layer = self.one_layers[layer_j](zero_layer)
# zero_layer = self.final_layers[type_i](zero_layer)
final_layer = self._build_lower(
start_index,
natoms[2 + type_i],
Expand All @@ -664,7 +617,6 @@ def forward(
bias_atom_e=0.0,
type_suffix="_type_" + str(type_i),
suffix=suffix,
# reuse=reuse,
type_i=type_i,
)
# concat the results
Expand All @@ -678,13 +630,12 @@ def forward(
bias_atom_e=0.0,
type_suffix="_type_" + str(type_i),
suffix=suffix,
# reuse=True,
type_i=type_i,
)
final_layer -= zero_layer
final_layer = paddle.reshape(
final_layer, [paddle.shape(inputs)[0], natoms[2 + type_i]]
) # [1, natoms]
)
outs_list.append(final_layer)
start_index += natoms[2 + type_i]
# concat the results
Expand Down Expand Up @@ -731,7 +682,7 @@ def forward(
),
[paddle.shape(inputs)[0], paddle.sum(natoms[2 : 2 + ntypes_atom]).item()],
)
outs = outs + self.add_type # 类型编码(类似于transformer的位置编码,每种类型自己有一个特征,加到原特征上)
outs = outs + self.add_type
outs *= atype_filter
self.atom_ener_after = outs

Expand Down
Loading

0 comments on commit 6cb8685

Please sign in to comment.