Skip to content

Commit

Permalink
【PPSCI Export&Infer No.7】 volterra_ide (#807)
Browse files Browse the repository at this point in the history
* 【PPSCI Export&Infer No.7】 volterra_ide

* fix docstyle
  • Loading branch information
GreatV authored Mar 15, 2024
1 parent bee3a10 commit 14430f1
Show file tree
Hide file tree
Showing 3 changed files with 96 additions and 1 deletion.
12 changes: 12 additions & 0 deletions docs/zh/examples/volterra_ide.md
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,18 @@
python volterra_ide.py mode=eval EVAL.pretrained_model_path=https://paddle-org.bj.bcebos.com/paddlescience/models/volterra_ide/volterra_ide_pretrained.pdparams
```

=== "模型导出命令"

``` sh
python volterra_ide.py mode=export
```

=== "模型推理命令"

``` sh
python volterra_ide.py mode=infer
```

| 预训练模型 | 指标 |
|:--| :--|
| [volterra_ide_pretrained.pdparams](https://paddle-org.bj.bcebos.com/paddlescience/models/volterra_ide/volterra_ide_pretrained.pdparams) | loss(L2Rel_Validator): 0.00023 <br> L2Rel.u(L2Rel_Validator): 0.00023 |
Expand Down
17 changes: 17 additions & 0 deletions examples/ide/conf/volterra_ide.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -61,3 +61,20 @@ EVAL:
pretrained_model_path: null
eval_with_no_grad: true
npoint_eval: 100

INFER:
pretrained_model_path: https://paddle-org.bj.bcebos.com/paddlescience/models/volterra_ide/volterra_ide_pretrained.pdparams
export_path: ./inference/volterra_ide
pdmodel_path: ${INFER.export_path}.pdmodel
pdpiparams_path: ${INFER.export_path}.pdiparams
device: gpu
engine: native
precision: fp32
onnx_path: ${INFER.export_path}.onnx
ir_optim: true
min_subgraph_size: 10
gpu_mem: 4000
gpu_id: 0
max_batch_size: 64
num_cpu_threads: 4
batch_size: 16
68 changes: 67 additions & 1 deletion examples/ide/volterra_ide.py
Original file line number Diff line number Diff line change
Expand Up @@ -254,14 +254,80 @@ def u_solution_func(in_) -> np.ndarray:
plt.savefig(osp.join(cfg.output_dir, "./Volterra_IDE.png"), dpi=200)


def export(cfg: DictConfig):
# set model
model = ppsci.arch.MLP(**cfg.MODEL)

# initialize solver
solver = ppsci.solver.Solver(
model,
pretrained_model_path=cfg.INFER.pretrained_model_path,
)
# export model
from paddle.static import InputSpec

input_spec = [
{
key: InputSpec([None, 1], "float32", name=key)
for key in cfg.MODEL.input_keys
},
]
solver.export(input_spec, cfg.INFER.export_path)


def inference(cfg: DictConfig):
from deploy.python_infer import pinn_predictor

predictor = pinn_predictor.PINNPredictor(cfg)

# set geometry
geom = {"timedomain": ppsci.geometry.TimeDomain(*cfg.BOUNDS)}

input_data = geom["timedomain"].uniform_points(cfg.EVAL.npoint_eval)
input_dict = {"x": input_data}

output_dict = predictor.predict(
{key: input_dict[key] for key in cfg.MODEL.input_keys}, cfg.INFER.batch_size
)

# mapping data to cfg.INFER.output_keys
output_dict = {
store_key: output_dict[infer_key]
for store_key, infer_key in zip(cfg.MODEL.output_keys, output_dict.keys())
}

def u_solution_func(in_) -> np.ndarray:
if isinstance(in_["x"], paddle.Tensor):
return paddle.exp(-in_["x"]) * paddle.cosh(in_["x"])
return np.exp(-in_["x"]) * np.cosh(in_["x"])

label_data = u_solution_func({"x": input_data})
output_data = output_dict["u"]

# save result
plt.plot(input_data, label_data, "-", label=r"$u(t)$")
plt.plot(input_data, output_data, "o", label=r"$\hat{u}(t)$", markersize=4.0)
plt.legend()
plt.xlabel(r"$t$")
plt.ylabel(r"$u$")
plt.title(r"$u-t$")
plt.savefig("./Volterra_IDE_pred.png", dpi=200)


@hydra.main(version_base=None, config_path="./conf", config_name="volterra_ide.yaml")
def main(cfg: DictConfig):
if cfg.mode == "train":
train(cfg)
elif cfg.mode == "eval":
evaluate(cfg)
elif cfg.mode == "export":
export(cfg)
elif cfg.mode == "infer":
inference(cfg)
else:
raise ValueError(f"cfg.mode should in ['train', 'eval'], but got '{cfg.mode}'")
raise ValueError(
f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'"
)


if __name__ == "__main__":
Expand Down

0 comments on commit 14430f1

Please sign in to comment.