Skip to content

Commit

Permalink
[PIR] Replace *ir names of files and directories on the Python side…
Browse files Browse the repository at this point in the history
… with `*pir` (#57209)
  • Loading branch information
DrRyanHuang authored Sep 22, 2023
1 parent a83cfb6 commit 4a15a2e
Show file tree
Hide file tree
Showing 49 changed files with 186 additions and 186 deletions.
2 changes: 1 addition & 1 deletion paddle/fluid/pybind/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ set(PYBIND_SRCS
inference_api.cc
ops_api.cc
static_op_function.cc
ir.cc
pir.cc
graph.cc
bind_fleet_executor.cc
reader_py.cc
Expand Down
8 changes: 4 additions & 4 deletions paddle/fluid/pybind/ir.cc → paddle/fluid/pybind/pir.cc
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/fluid/pybind/ir.h"
#include "paddle/fluid/pybind/pir.h"
#include <Python.h>
#include <algorithm>
#include <memory>
Expand Down Expand Up @@ -967,7 +967,7 @@ void BindUtils(pybind11::module *m) {
.. code-block:: python
import paddle
from paddle import ir
from paddle import pir
paddle.enable_static()
x = paddle.randn([4, 4])
Expand Down Expand Up @@ -1049,8 +1049,8 @@ void BindPassManager(pybind11::module *m) {
.def("empty", &PassManager::Empty);
}

void BindNewIR(pybind11::module *module) {
auto ir_module = module->def_submodule("ir");
void BindPIR(pybind11::module *module) {
auto ir_module = module->def_submodule("pir");
BindProgram(&ir_module);
BindBlock(&ir_module);
BindOperation(&ir_module);
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/pybind/ir.h → paddle/fluid/pybind/pir.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,6 @@

namespace paddle {
namespace pybind {
void BindNewIR(pybind11::module *m);
void BindPIR(pybind11::module *m);
} // namespace pybind
} // namespace paddle
4 changes: 2 additions & 2 deletions paddle/fluid/pybind/pybind.cc
Original file line number Diff line number Diff line change
Expand Up @@ -122,9 +122,9 @@ limitations under the License. */
#include "paddle/fluid/pybind/imperative.h"
#include "paddle/fluid/pybind/inference_api.h"
#include "paddle/fluid/pybind/io.h"
#include "paddle/fluid/pybind/ir.h"
#include "paddle/fluid/pybind/jit.h"
#include "paddle/fluid/pybind/metrics_py.h"
#include "paddle/fluid/pybind/pir.h"
#include "paddle/fluid/pybind/ps_gpu_wrapper_py.h"
#include "paddle/fluid/pybind/pybind_variant_caster.h"
#include "paddle/fluid/pybind/xpu_streams_py.h"
Expand Down Expand Up @@ -2939,7 +2939,7 @@ All parameter, weight, gradient are variables in Paddle.
GetAllWorkerInfos(&m);
#endif

BindNewIR(&m);
BindPIR(&m);
BindVjp(&m);
}
} // namespace pybind
Expand Down
4 changes: 2 additions & 2 deletions python/paddle/_C_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
globals()[name] = getattr(core.eager.ops, name)
__all__.append(name)

for name in dir(core.ir.ops):
globals()[name] = getattr(core.ir.ops, name)
for name in dir(core.pir.ops):
globals()[name] = getattr(core.pir.ops, name)
if name not in __all__:
__all__.append(name)
2 changes: 1 addition & 1 deletion python/paddle/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -464,7 +464,7 @@
from . import linalg # noqa: F401
from . import fft # noqa: F401
from . import signal # noqa: F401
from . import _ir_ops # noqa: F401
from . import _pir_ops # noqa: F401

import paddle.text # noqa: F401
import paddle.vision # noqa: F401
Expand Down
4 changes: 2 additions & 2 deletions python/paddle/_ir_ops.py → python/paddle/_pir_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,6 @@

__all__ = []

for name in dir(core.ir.ops):
globals()[name] = getattr(core.ir.ops, name)
for name in dir(core.pir.ops):
globals()[name] = getattr(core.pir.ops, name)
__all__.append(name)
12 changes: 6 additions & 6 deletions python/paddle/autograd/ir_backward.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
import collections
from collections.abc import Sequence

import paddle.ir
import paddle.pir
from paddle.autograd.backward_utils import State

"""
Expand Down Expand Up @@ -158,7 +158,7 @@ def some_in_set(value_list, value_set):
def operand2value(values):
value_set = set()
for item in values:
if isinstance(item, paddle.ir.OpOperand):
if isinstance(item, paddle.pir.OpOperand):
value_set.add(item.source())
else:
value_set.add(item)
Expand Down Expand Up @@ -747,26 +747,26 @@ def grad(
check_type(
outputs,
'outputs',
((paddle.ir.Value, paddle.ir.OpResult), list, tuple),
((paddle.pir.Value, paddle.pir.OpResult), list, tuple),
'paddle.autograd.ir_backward.grad',
)
check_type(
inputs,
'inputs',
((paddle.ir.Value, paddle.ir.OpResult), list, tuple),
((paddle.pir.Value, paddle.pir.OpResult), list, tuple),
'paddle.autograd.ir_backward.grad',
)
check_type(
grad_outputs,
'grad_outputs',
((paddle.ir.Value, paddle.ir.OpResult), list, tuple, type(None)),
((paddle.pir.Value, paddle.pir.OpResult), list, tuple, type(None)),
'paddle.autograd.ir_backward.grad',
)

check_type(
no_grad_vars,
'no_grad_vars',
((paddle.ir.Value, paddle.ir.OpResult), list, tuple, set, type(None)),
((paddle.pir.Value, paddle.pir.OpResult), list, tuple, set, type(None)),
'paddle.autograd.ir_backward.grad',
)
outputs = _as_list(outputs)
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/base/data_feeder.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@

import numpy as np

from ..ir import OpResult
from ..pir import OpResult
from . import core
from .framework import (
Variable,
Expand Down
10 changes: 5 additions & 5 deletions python/paddle/base/executor.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@

import numpy as np

from ..ir import OpResult
from ..pir import OpResult
from . import compiler, core, framework, get_flags, set_flags, unique_name
from .data_feeder import convert_dtype
from .framework import (
Expand Down Expand Up @@ -511,7 +511,7 @@ def _add_pir_fetch_ops(program, fetch_list, fetch_var_name):
assert isinstance(
fetch_input, OpResult
), "Wrong type for fetch_list[%s]: %s" % (i, type(fetch_input))
paddle._ir_ops.fetch(fetch_input, fetch_var_name + str(i), i)
paddle._pir_ops.fetch(fetch_input, fetch_var_name + str(i), i)


def _merge_tensors(tensor, micro_batch_num):
Expand Down Expand Up @@ -1246,7 +1246,7 @@ def _pir_feed_data(self, program, feed, scope):
pir_check_feed_shape_type(
cur_feed, feed_target_name, var_shape, var_type
)
# the last arg of set_feed_variable has no effect in new ir, we pass 0 by default.
# the last arg of set_feed_variable has no effect in pir, we pass 0 by default.
core.set_feed_variable(scope, cur_feed, feed_target_name, 0)
else:
break
Expand Down Expand Up @@ -1869,8 +1869,8 @@ def _run_pir_impl(
):
import paddle

Program = paddle.ir.Program
default_main_program = paddle.ir.core.default_main_program
Program = paddle.pir.Program
default_main_program = paddle.pir.core.default_main_program

if self._closed:
raise RuntimeError("Attempted to use a closed Executor")
Expand Down
12 changes: 6 additions & 6 deletions python/paddle/base/framework.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@

from . import core
from . import unique_name
from .. import ir
from .. import pir
from paddle.base.libpaddle import DataType
import paddle.version as fluid_version
import warnings
Expand Down Expand Up @@ -294,10 +294,10 @@ def in_dygraph_mode():
def in_pir_mode():
"""
This API checks whether paddle runs in static graph mode and use new ir api.
This API checks whether paddle runs in static graph mode and use pir api.
Returns:
bool: Whether paddle runs in static graph mode and use new ir api.
bool: Whether paddle runs in static graph mode and use pir api.
Examples:
.. code-block:: python
Expand All @@ -323,10 +323,10 @@ def use_pir_api():
def in_dynamic_or_pir_mode():
"""
This API checks whether paddle runs in dynamic graph or new ir mode.
This API checks whether paddle runs in dynamic graph or pir mode.
Returns:
bool: Whether paddle runs in static graph mode and use new ir api.
bool: Whether paddle runs in static graph mode and use pir api.
Examples:
.. code-block:: python
Expand Down Expand Up @@ -1162,7 +1162,7 @@ def convert_np_dtype_to_dtype_(np_dtype):
"""
if in_pir_mode():
return ir.core.convert_np_dtype_to_dtype_(np_dtype)
return pir.core.convert_np_dtype_to_dtype_(np_dtype)

# Convert the data type string to numpy data type.
if isinstance(np_dtype, str) and np_dtype == "bfloat16":
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/base/layer_helper_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -434,7 +434,7 @@ def create_parameter(
)
else:
if in_pir_mode():
return paddle.ir.core.create_parameter(
return paddle.pir.core.create_parameter(
dtype=dtype,
shape=shape,
**attr._to_kwargs(with_initializer=True),
Expand Down
14 changes: 7 additions & 7 deletions python/paddle/decomposition/decomp.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,15 +15,15 @@
import logging
import typing

from paddle import ir
from paddle.base.libpaddle.ir import Block, Program
from paddle import pir
from paddle.base.libpaddle.pir import Block, Program
from paddle.framework import core

from . import register


def _build_tensor_tuple(xs):
if isinstance(xs, ir.OpResult):
if isinstance(xs, pir.OpResult):
return (xs,)
elif isinstance(xs, typing.Sequence):
return tuple(xs)
Expand Down Expand Up @@ -157,20 +157,20 @@ def decompose(
dst_vars = [None] * len(src_vars)
dst_vars_dct = {}
for idx, item in enumerate(src_vars):
if not isinstance(item, ir.OpResult):
if not isinstance(item, pir.OpResult):
raise TypeError(
f"Each var in dst_vars should map corresponding var in src_vars, but got type {type(item)} in {src_vars}."
)
dst_vars_dct[item] = idx
with ir.core.program_guard(program):
with pir.core.program_guard(program):
_decompose_subgraph(
block,
dst_vars_dct,
dst_vars,
op_filter,
)
for idx, item in enumerate(dst_vars):
if not isinstance(item, ir.OpResult):
if not isinstance(item, pir.OpResult):
if item is None:
dst_vars[idx] = src_vars[idx]
else:
Expand Down Expand Up @@ -206,7 +206,7 @@ def _decompose_subgraph(block, orig_vars, dst_vars, op_filter):
if lower:
core.prim_config["composite_ops_record"].add(op_name)
input_args = _prepare_python_api_arguments(op)
ir.set_insertion_point(op)
pir.set_insertion_point(op)
orig_outs = op.results()
new_outs = _build_tensor_tuple(decom_rule(*input_args))

Expand Down
4 changes: 2 additions & 2 deletions python/paddle/decomposition/rules.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.

from paddle import _ir_ops
from paddle import _pir_ops

from .primitives import * # noqa: F403
from .register import register_decomp
Expand Down Expand Up @@ -60,7 +60,7 @@ def gelu_composite(x, approximate):
else:
# gelu(x) = 0.5 * x * (1 + erf(x / sqrt(2)))

cdf = half * (one + _ir_ops.erf(x * full(x.shape, M_SQRT1_2, x.dtype)))
cdf = half * (one + _pir_ops.erf(x * full(x.shape, M_SQRT1_2, x.dtype)))
out = x * cdf
return out

Expand Down
2 changes: 1 addition & 1 deletion python/paddle/incubate/passes/ir.py
Original file line number Diff line number Diff line change
Expand Up @@ -459,7 +459,7 @@ def RegisterPass(function=None, input_specs={}):
.. code-block:: python
>>> import paddle
>>> from paddle.base.ir import RegisterPass
>>> from paddle.incubate.passes.ir import RegisterPass
>>> @RegisterPass
>>> def multi_add_to_addn():
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/jit/dy2static/function_spec.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
import numpy as np

import paddle
import paddle.ir.core as ir_static
import paddle.pir.core as ir_static
from paddle.base import core
from paddle.base.data_feeder import convert_dtype
from paddle.base.dygraph.base import switch_to_static_graph
Expand Down
8 changes: 4 additions & 4 deletions python/paddle/jit/dy2static/newir_partial_program.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
import numpy as np

import paddle
import paddle.ir.core as ir_static
import paddle.pir.core as ir_static
from paddle import _legacy_C_ops
from paddle.amp.auto_cast import _in_amp_guard, _in_pure_fp16_guard
from paddle.autograd.ir_backward import grad
Expand All @@ -27,7 +27,7 @@
from paddle.base.data_feeder import check_type, convert_dtype
from paddle.base.dygraph.base import switch_to_static_graph
from paddle.base.framework import _apply_pass
from paddle.base.libpaddle.ir import OpResult, fake_op_result
from paddle.base.libpaddle.pir import OpResult, fake_op_result
from paddle.framework import use_pir_api
from paddle.optimizer.lr import LRScheduler

Expand Down Expand Up @@ -823,7 +823,7 @@ def _get_forward_backward_program_form(
(
forward_program,
backward_program,
), program_attr = paddle.base.libpaddle.ir.program_split(
), program_attr = paddle.base.libpaddle.pir.program_split(
whole_program,
forward_inputs,
forward_outputs,
Expand Down Expand Up @@ -1140,7 +1140,7 @@ def partial_program_from(concrete_program, from_method=False):
def add_build_strategy_for(
program, start_op_index, end_op_index, build_strategy=None, skip_vars=None
):
paddle.base.libpaddle.ir.program_split(
paddle.base.libpaddle.pir.program_split(
program,
)
if start_op_index < end_op_index:
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/jit/dy2static/program_translator.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
import warnings
import weakref

import paddle.ir.core as ir_static
import paddle.pir.core as ir_static
from paddle import decomposition
from paddle.base import core, framework
from paddle.base.data_feeder import check_type
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/nn/functional/activation.py
Original file line number Diff line number Diff line change
Expand Up @@ -759,7 +759,7 @@ def relu(x, name=None):
else:
if paddle.framework.in_dynamic_or_pir_mode():
# Below code will be removed after we can generate IR api automatically
return paddle._ir_ops.relu(x)
return paddle._pir_ops.relu(x)

check_variable_and_dtype(
x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'relu'
Expand Down
Loading

0 comments on commit 4a15a2e

Please sign in to comment.