@@ -30,6 +30,57 @@ class PINNPredictor(base.Predictor):
3030
3131 Args:
3232 cfg (DictConfig): Running configuration.
33+
34+ Examples:
35+ >>> import numpy as np
36+ >>> import paddle
37+ >>> from omegaconf import DictConfig
38+ >>> from paddle.static import InputSpec
39+ >>> import ppsci
40+ >>> from deploy.python_infer import pinn_predictor
41+ >>> model = ppsci.arch.MLP(("x", "y"), ("u", "v", "p"), 3, 16)
42+ >>> static_model = paddle.jit.to_static(
43+ ... model,
44+ ... input_spec=[
45+ ... {
46+ ... key: InputSpec([None, 1], "float32", name=key)
47+ ... for key in model.input_keys
48+ ... },
49+ ... ],
50+ ... )
51+ >>> paddle.jit.save(static_model, "./inference")
52+ >>> cfg = DictConfig(
53+ ... {
54+ ... "log_freq": 10,
55+ ... "INFER": {
56+ ... "pdmodel_path": "./inference.pdmodel",
57+ ... "pdiparams_path": "./inference.pdiparams",
58+ ... "device": "cpu",
59+ ... "engine": "native",
60+ ... "precision": "fp32",
61+ ... "onnx_path": None,
62+ ... "ir_optim": True,
63+ ... "min_subgraph_size": 15,
64+ ... "gpu_mem": 500,
65+ ... "gpu_id": 0,
66+ ... "max_batch_size": 10,
67+ ... "num_cpu_threads": 10,
68+ ... }
69+ ... }
70+ ... )
71+ >>> predictor = pinn_predictor.PINNPredictor(cfg) # doctest: +SKIP
72+ >>> pred = predictor.predict(
73+ ... {
74+ ... "x": np.random.randn(4, 1).astype("float32"),
75+ ... "y": np.random.randn(4, 1).astype("float32"),
76+ ... },
77+ ... batch_size=2,
78+ ... ) # doctest: +SKIP
79+ >>> for k, v in pred.items(): # doctest: +SKIP
80+ ... print(k, v.shape) # doctest: +SKIP
81+ save_infer_model/scale_0.tmp_0 (4, 1)
82+ save_infer_model/scale_1.tmp_0 (4, 1)
83+ save_infer_model/scale_2.tmp_0 (4, 1)
3384 """
3485
3586 def __init__ (
0 commit comments