diff --git a/docs/zh/examples/darcy2d.md b/docs/zh/examples/darcy2d.md index eb473281ab..3c4e5b089b 100644 --- a/docs/zh/examples/darcy2d.md +++ b/docs/zh/examples/darcy2d.md @@ -14,6 +14,18 @@ python darcy2d.py mode=eval EVAL.pretrained_model_path=https://paddle-org.bj.bcebos.com/paddlescience/models/darcy2d/darcy2d_pretrained.pdparams ``` +=== "模型导出命令" + + ``` sh + python darcy2d.py mode=export + ``` + +=== "模型推理命令" + + ``` sh + python darcy2d.py mode=infer + ``` + | 预训练模型 | 指标 | |:--| :--| | [darcy2d_pretrained.pdparams](https://paddle-org.bj.bcebos.com/paddlescience/models/darcy2d/darcy2d_pretrained.pdparams) | loss(Residual): 0.36500
MSE.poisson(Residual): 0.00006 | diff --git a/examples/darcy/conf/darcy2d.yaml b/examples/darcy/conf/darcy2d.yaml index f65a782fdf..ec114d1c36 100644 --- a/examples/darcy/conf/darcy2d.yaml +++ b/examples/darcy/conf/darcy2d.yaml @@ -23,6 +23,7 @@ hydra: mode: train # running mode: train/eval seed: 42 output_dir: ${hydra:run.dir} +log_freq: 20 # set working condition NPOINT_PDE: 9801 # 99 ** 2 @@ -62,3 +63,20 @@ EVAL: batch_size: residual_validator: 8192 pretrained_model_path: null + +INFER: + pretrained_model_path: https://paddle-org.bj.bcebos.com/paddlescience/models/darcy2d/darcy2d_pretrained.pdparams + export_path: ./inference/darcy2d + pdmodel_path: ${INFER.export_path}.pdmodel + pdiparams_path: ${INFER.export_path}.pdiparams + onnx_path: ${INFER.export_path}.onnx + device: gpu + engine: native + precision: fp32 + ir_optim: true + min_subgraph_size: 5 + gpu_mem: 2000 + gpu_id: 0 + max_batch_size: 8192 + num_cpu_threads: 10 + batch_size: 8192 diff --git a/examples/darcy/darcy2d.py b/examples/darcy/darcy2d.py index 8937235dd7..12e32af173 100644 --- a/examples/darcy/darcy2d.py +++ b/examples/darcy/darcy2d.py @@ -296,14 +296,66 @@ def poisson_ref_compute_func(_in): solver.visualize() +def export(cfg: DictConfig): + # set model + model = ppsci.arch.MLP(**cfg.MODEL) + + # initialize solver + solver = ppsci.solver.Solver( + model, + pretrained_model_path=cfg.INFER.pretrained_model_path, + ) + # export model + from paddle.static import InputSpec + + input_spec = [ + {key: InputSpec([None, 1], "float32", name=key) for key in model.input_keys}, + ] + + solver.export(input_spec, cfg.INFER.export_path) + + +def inference(cfg: DictConfig): + from deploy.python_infer import pinn_predictor + + predictor = pinn_predictor.PINNPredictor(cfg) + + # set geometry + geom = {"rect": ppsci.geometry.Rectangle((0.0, 0.0), (1.0, 1.0))} + # manually collate input data for visualization, + input_dict = geom["rect"].sample_interior( + cfg.NPOINT_PDE + cfg.NPOINT_BC, evenly=True + ) + output_dict = predictor.predict( + {key: input_dict[key] for key in cfg.MODEL.input_keys}, cfg.INFER.batch_size + ) + # mapping data to cfg.INFER.output_keys + output_dict = { + store_key: output_dict[infer_key] + for store_key, infer_key in zip(cfg.MODEL.output_keys, output_dict.keys()) + } + ppsci.visualize.save_vtu_from_dict( + "./visual/darcy2d.vtu", + {**input_dict, **output_dict}, + input_dict.keys(), + cfg.MODEL.output_keys, + ) + + @hydra.main(version_base=None, config_path="./conf", config_name="darcy2d.yaml") def main(cfg: DictConfig): if cfg.mode == "train": train(cfg) elif cfg.mode == "eval": evaluate(cfg) + elif cfg.mode == "export": + export(cfg) + elif cfg.mode == "infer": + inference(cfg) else: - raise ValueError(f"cfg.mode should in ['train', 'eval'], but got '{cfg.mode}'") + raise ValueError( + f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" + ) if __name__ == "__main__":