Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

在文档中统一静态图模式与动态图模式的英文翻译 #49170

Merged
merged 15 commits into from
Dec 30, 2022
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion paddle/fluid/eager/autograd_meta.h
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ using AbstractAutogradMeta = paddle::experimental::AbstractAutogradMeta;
*
* AutogradMeta is what record the backward info for tensor. When we run
* computation graph eagerly, we can not build a static paddle program like
* static mode do, so we need a new method to record forward info to trace
* static graph mode do, so we need a new method to record forward info to trace
* backward when we finish all forward computation. This require our
* AutogradMeta class record following main members
*
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -657,7 +657,7 @@ void BuildOpFuncList(const platform::Place& place,
new phi::Kernel(phi::KernelFactory::Instance().SelectKernel(
phi_kernel_name, phi_cpu_kernel_key)));
if (op_with_kernel->PhiKernel()->IsValid()) {
VLOG(6) << "Static mode PrepareImpl - kernel name: "
VLOG(6) << "Static graph mode PrepareImpl - kernel name: "
<< phi_kernel_name
<< " | kernel key: " << phi_cpu_kernel_key
<< " | kernel: " << *(op_with_kernel->PhiKernel());
Expand Down
10 changes: 5 additions & 5 deletions paddle/fluid/framework/operator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1679,11 +1679,11 @@ void OperatorWithKernel::RunImpl(const Scope& scope,
phi_kernel_name, phi_kernel_key)));

if (phi_kernel_->IsValid()) {
VLOG(6) << "Static mode ChoosePhiKernel - kernel name: "
VLOG(6) << "Static graph mode ChoosePhiKernel - kernel name: "
<< phi_kernel_name << " | kernel key: " << phi_kernel_key
<< " | kernel: " << *phi_kernel_;
} else {
VLOG(6) << "Static mode ChoosePhiKernel - kernel `" << phi_kernel_name
VLOG(6) << "Static graph mode ChoosePhiKernel - kernel `" << phi_kernel_name
<< "` not found.";
}
} else {
Expand Down Expand Up @@ -1816,7 +1816,7 @@ void OperatorWithKernel::RunImpl(const Scope& scope,

dev_ctx = pool.Get(platform::CPUPlace());
if (phi_kernel_->IsValid()) {
VLOG(6) << "Static mode PrepareImpl - kernel name: "
VLOG(6) << "Static graph mode PrepareImpl - kernel name: "
<< phi_kernel_name << " | kernel key: " << phi_cpu_kernel_key
<< " | kernel: " << *phi_kernel_;
run_phi_kernel_ = true;
Expand Down Expand Up @@ -2084,11 +2084,11 @@ phi::KernelKey OperatorWithKernel::ChoosePhiKernel(
phi_kernel_name, phi_kernel_key)));

if (phi_kernel_->IsValid()) {
VLOG(6) << "Static mode ChoosePhiKernel - kernel name: " << phi_kernel_name
VLOG(6) << "Static graph mode ChoosePhiKernel - kernel name: " << phi_kernel_name
<< " | kernel key: " << phi_kernel_key
<< " | kernel: " << *phi_kernel_;
} else {
VLOG(6) << "Static mode ChoosePhiKernel - kernel `" << phi_kernel_name
VLOG(6) << "Static graph mode ChoosePhiKernel - kernel `" << phi_kernel_name
<< "` not found.";
}
return phi_kernel_key;
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/imperative/tracer.h
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,7 @@ class Tracer {
}

// Note(Aurelius84): The `tmp` is used as prefix key while naming a temporary
// intermediate var both in imperative and static mode. But the
// intermediate var both in imperative and static graph mode. But the
// `UniqueNameGenerator` in C++ and `unique_name.py` in Python doesn't share
// the same auto-increment id. It will create a variable repeatedly with same
// name like `tmp_0` in some cases when transform dygraph into static layers.
Expand Down
10 changes: 9 additions & 1 deletion paddle/fluid/inference/tensorrt/convert/c_allreduce_op.cc
Original file line number Diff line number Diff line change
@@ -1,3 +1,11 @@
/*
* @Author: physico
* @Date: 2022-12-19 16:59:04
* @LastEditTime: 2022-12-19 17:02:33
* @FilePath: /Paddle/paddle/fluid/inference/tensorrt/convert/c_allreduce_op.cc
* @Description:
* @Function List:
*/
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
Expand Down Expand Up @@ -33,7 +41,7 @@ class CAllReduceOpConverter : public OpConverter {
VLOG(4) << "convert fluid callreduce op to tensorrt layer";
if (!engine_->with_dynamic_shape()) {
PADDLE_THROW(platform::errors::Fatal(
"Unsupported static mode. Please set dynamic shape of inputs."));
"Unsupported static graph mode. Please set dynamic shape of inputs."));
}
ReduceType red_type = op_to_reduce_type[op.type()];
std::string name = op.type();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ class PrelnResidualBiasOpConverter : public OpConverter {
VLOG(4) << "convert fused preln_residual_bias op to tensorrt layer";
if (!engine_->with_dynamic_shape()) {
PADDLE_THROW(platform::errors::Fatal(
"Unsupported static mode. Please set dynamic shape of inputs."));
"Unsupported static graph mode. Please set dynamic shape of inputs."));
}
framework::OpDesc op_desc(op, nullptr);
// Declare inputs
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/operators/run_program_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -288,8 +288,8 @@ class RunProgramOpKernel : public framework::OpKernel<T> {
auto *out_scope_vec = ctx.Output<StepScopeVar>("OutScope");
std::unique_ptr<framework::Scope> inner_scope{nullptr};
if (out_scope_vec->size() == 0) {
// For cuda graph under static mode usage.
// For static mode, we cannot set value of a tensor before any run,
// For cuda graph under static graph mode usage.
// For static graph mode, we cannot set value of a tensor before any run,
// the OutScope variable passed to the op actually contains nothing.
// Just create a tmp scope to run the program.
PADDLE_ENFORCE_EQ(
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/operators/set_value_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -145,7 +145,7 @@ class SetValueMaker : public framework::OpProtoAndCheckerMaker {
AddAttr<std::vector<int64_t>>("shape", "(vector<int64_t>) Shape of values.")
.SetDefault({});
AddComment(R"DOC(SetValue operator.
Assignment to a phi::DenseTensor in static mode.
Assignment to a phi::DenseTensor in static graph mode.
)DOC");
}
};
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/pybind/eager_legacy_op_function_generator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -443,8 +443,8 @@ GenerateOpFunctions() {
// In this case, output will reuse input varbase.
// Dygraph mode needs to be aligned with the in-place strategy in static
// mode, and the mapping relationships between output and input that have
// been defined in static mode should be used in dygraph mode.
// Find which ops need to use Inplace strategy in static mode, and get the
// been defined in static graph mode should be used in dygraph mode.
// Find which ops need to use Inplace strategy in static graph mode, and get the
// mapping relationship between Inplace output and input.
auto& infer_inplace =
paddle::framework::OpInfoMap::Instance().Get(op_type).infer_inplace_;
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/pybind/eager_properties.cc
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ PyObject* tensor_properties_get_name(TensorObject* self, void* closure) {
EAGER_TRY
// NOTE(dev): [why not use egr::Controller::Instance::GernerateUniqueName()?]
// Beacause Controller must holder a tracer, but 'tensor.name' maybe called
// everywhere such as static mode in @to_static, which means tracer is None.
// everywhere such as static graph mode in @to_static, which means tracer is None.
static egr::UniqueNameGenerator name_generator;
if (self->tensor.name().empty()) {
self->tensor.set_name(name_generator.Generate());
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/pybind/op_function_generator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -473,8 +473,8 @@ GenerateOpFunctions(int split_count) {
// In this case, output will reuse input varbase.
// Dygraph mode needs to be aligned with the in-place strategy in static
// mode, and the mapping relationships between output and input that have
// been defined in static mode should be used in dygraph mode.
// Find which ops need to use Inplace strategy in static mode, and get the
// been defined in static graph mode should be used in dygraph mode.
// Find which ops need to use Inplace strategy in static graph mode, and get the
// mapping relationship between Inplace output and input.
auto& infer_inplace =
paddle::framework::OpInfoMap::Instance().Get(op_type).infer_inplace_;
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/fusion/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

1. We don't recommend to implement Python API for fusion kernel

- We don't recommend to implement Python API for fusion kernel, because it contains many inputs or outputs arguments generally, it is difficult to use and understand as an Python API, we recommend to call fusion kernel by pass optimization in dy2static mode or static mode.
- We don't recommend to implement Python API for fusion kernel, because it contains many inputs or outputs arguments generally, it is difficult to use and understand as an Python API, we recommend to call fusion kernel by pass optimization in dy2static mode or static graph mode.
- We also don't recommend to reuse fusion kernel in other kernel implementation, but recommended that the fusion kernel be implemented by reusing other kernels.

2. We don't require fusion kernel to have implementations for all devices
Expand Down
4 changes: 2 additions & 2 deletions python/paddle/device/cuda/graphs.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ def print_to_dot_files(self, dirname, flags=None):
def wrap_cuda_graph(function, mode="thread_local", memory_pool="default"):
assert mode in ALL_MODES
if not paddle.in_dynamic_mode():
# static mode
# static graph mode
from paddle.fluid.framework import _cuda_graph_guard

global cuda_graph_id
Expand All @@ -94,7 +94,7 @@ def wrap_cuda_graph(function, mode="thread_local", memory_pool="default"):
memory_pool_id = CoreCUDAGraph.gen_new_memory_pool_id()
else:
raise ValueError(
"memory_pool should be one of default or new under static mode, but got",
"memory_pool should be one of default or new under static graph mode, but got",
memory_pool,
)
return _cuda_graph_guard(
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/distributed/auto_parallel/engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -539,7 +539,7 @@ def _build(self, mode):

paddle.enable_static()
else:
# build program in static mode
# build program in static graph mode
serial_main_prog = self._serial_main_progs.get(mode, None)
if serial_main_prog is not None:
return
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/distributed/collective.py
Original file line number Diff line number Diff line change
Expand Up @@ -162,7 +162,7 @@ def _new_process_group_impl(

# _custom_gid provides a way for users to
# set the group id, which is usually useful
# to be compatible with the static mode.
# to be compatible with the static graph mode.
_custom_gid = None


Expand Down
4 changes: 2 additions & 2 deletions python/paddle/distributed/communication/stream/all_gather.py
Original file line number Diff line number Diff line change
Expand Up @@ -178,10 +178,10 @@ def all_gather(
tensor_or_tensor_list, tensor, group, sync_op, use_calc_stream
)
else:
assert group is None, "Group can not be used in static mode for now."
assert group is None, "Group can not be used in static graph mode for now."
if paddle.is_tensor(tensor_or_tensor_list):
raise RuntimeError(
"Only support passing a tensor list to `all_gather` in static mode now."
"Only support passing a tensor list to `all_gather` in static graph mode now."
)
else:
return _all_gather_in_static_mode(
Expand Down
4 changes: 2 additions & 2 deletions python/paddle/distributed/communication/stream/all_reduce.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ def _all_reduce_in_static_mode(tensor, op, group, sync_op, use_calc_stream):
if not isinstance(ring_id, int):
raise ValueError("The type of 'ring_id' for all_reduce should be int.")

# TODO: Support task and use task.wait in static mode
# TODO: Support task and use task.wait in static graph mode
# Use use_calc_stream rather than sync_op
helper = layer_helper.LayerHelper(op_type, **locals())
helper.append_op(
Expand Down Expand Up @@ -123,7 +123,7 @@ def all_reduce(
tensor, op, group, sync_op, use_calc_stream
)
else:
assert group is None, "Group can not be used in static mode for now."
assert group is None, "Group can not be used in static graph mode for now."
return _all_reduce_in_static_mode(
tensor, op, group, sync_op, use_calc_stream
)
Original file line number Diff line number Diff line change
Expand Up @@ -200,7 +200,7 @@ def alltoall(
"The output and input should be both tensor or tensor list."
)
else:
assert group is None, "Group can not be used in static mode for now."
assert group is None, "Group can not be used in static graph mode for now."
return _all_to_all_in_static_mode(
out_tensor_or_tensor_list,
in_tensor_or_tensor_list,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@ def broadcast(tensor, src, group=None, sync_op=True, use_calc_stream=False):
tensor, src_rank_in_group, group, sync_op, use_calc_stream
)
else:
assert group is None, "Group can not be used in static mode for now."
assert group is None, "Group can not be used in static graph mode for now."
return _broadcast_in_static_mode(
tensor, src, group, sync_op, use_calc_stream
)
2 changes: 1 addition & 1 deletion python/paddle/distributed/communication/stream/recv.py
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,7 @@ def recv(tensor, src=0, group=None, sync_op=True, use_calc_stream=False):
tensor, src_rank_in_group, group, sync_op, use_calc_stream
)
else:
assert group is None, "Group can not be used in static mode for now."
assert group is None, "Group can not be used in static graph mode for now."
return _recv_in_static_mode(
tensor, src, group, sync_op, use_calc_stream
)
2 changes: 1 addition & 1 deletion python/paddle/distributed/communication/stream/reduce.py
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,7 @@ def reduce(
tensor, dst_rank_in_group, op, group, sync_op, use_calc_stream
)
else:
assert group is None, "Group can not be used in static mode for now."
assert group is None, "Group can not be used in static graph mode for now."
return _reduce_in_static_mode(
tensor, dst, op, group, sync_op, use_calc_stream
)
2 changes: 1 addition & 1 deletion python/paddle/distributed/communication/stream/scatter.py
Original file line number Diff line number Diff line change
Expand Up @@ -220,7 +220,7 @@ def scatter(
use_calc_stream,
)
else:
assert group is None, "Group can not be used in static mode for now."
assert group is None, "Group can not be used in static graph mode for now."

return _scatter_in_static_mode(
tensor,
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/distributed/communication/stream/send.py
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@ def send(tensor, dst=0, group=None, sync_op=True, use_calc_stream=False):
tensor, dst_rank_in_group, group, sync_op, use_calc_stream
)
else:
assert group is None, "Group can not be used in static mode for now."
assert group is None, "Group can not be used in static graph mode for now."
return _send_in_static_mode(
tensor, dst, group, sync_op, use_calc_stream
)
Original file line number Diff line number Diff line change
Expand Up @@ -205,7 +205,7 @@ def __init__(
elif core.is_compiled_with_cuda():
self._device = "gpu"
assert self._device, "Only gpu and npu are supported."
assert not _non_static_mode(), "Only static mode is supported."
assert not _non_static_mode(), "Only static graph mode is supported."

op_maker = core.op_proto_and_checker_maker
self._op_role = op_maker.OpRole
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/distributed/models/moe/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ def _random_routing(topk_idx, topk_value, prob, topk=2):
elif _in_legacy_dygraph():
return core.ops.random_routing(prob, topk_value, topk_idx)
else:
raise RuntimeError("Not supporting static mode now")
raise RuntimeError("Not supporting static graph mode now")
else:
raise RuntimeError("only topk=2 is supported now")

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -279,7 +279,7 @@ def _could_be_overlap(self):
# NOTE current different nccl comm will use different cuda stream
# so if there too many dp group there will be too many stream need to be
# created and sync.
# revise here when framework support custom stream in static mode.
# revise here when framework support custom stream in static graph mode.
num_dp_comm_stream = len(set(self._group_to_grad_name_map.keys()))
if num_dp_comm_stream > __max_stream_num_allow__:
return False
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/fluid/compiler.py
Original file line number Diff line number Diff line change
Expand Up @@ -751,7 +751,7 @@ def patch_getter(self, item):
def patch_lr_scheduler(ipu_strategy):
from paddle.optimizer.lr import LRScheduler

# For IPU dynamic graph usage, lr_var is not synced in executor as static mode do.
# For IPU dynamic graph usage, lr_var is not synced in executor as static graph mode do.
# Manually set lr to ipu_strategy to update the lr.
old_step = LRScheduler.step

Expand Down
2 changes: 1 addition & 1 deletion python/paddle/fluid/contrib/optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ class Momentum(Optimizer):
momentum (float): Momentum factor
parameter_list (Iterable, optional): Iterable of ``Variable`` names to update to minimize ``loss``. \
This parameter is required in dygraph mode. \
The default value is None in static mode, at this time all parameters will be updated.
The default value is None in static graph mode, at this time all parameters will be updated.
use_nesterov (bool, optional): Enables Nesterov momentum, default is false.
regularization (WeightDecayRegularizer, optional): The strategy of regularization. There are two method: \
:ref:`api_fluid_regularizer_L1Decay` , :ref:`api_fluid_regularizer_L2Decay` . If a parameter has set \
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/fluid/dataloader/dataloader_iter.py
Original file line number Diff line number Diff line change
Expand Up @@ -307,7 +307,7 @@ def __next__(self):
if _in_legacy_dygraph():
data = self._reader.read_next_var_list()
data = _restore_batch(data, self._structure_infos.pop(0))
else: # in static mode
else: # in static graph mode
if self._return_list:
data = self._reader.read_next_list()
for i in range(len(data)):
Expand Down
4 changes: 2 additions & 2 deletions python/paddle/fluid/dygraph/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -215,7 +215,7 @@ def enable_dygraph(place=None):
print(paddle.in_dynamic_mode()) # True, dynamic mode is turn ON by default since paddle 2.0.0

paddle.enable_static()
print(paddle.in_dynamic_mode()) # False, Now we are in static mode
print(paddle.in_dynamic_mode()) # False, Now we are in static graph mode

paddle.disable_static()
print(paddle.in_dynamic_mode()) # True, Now we are in dynamic mode
Expand Down Expand Up @@ -250,7 +250,7 @@ def disable_dygraph():
print(paddle.in_dynamic_mode()) # True, dynamic mode is turn ON by default since paddle 2.0.0

paddle.enable_static()
print(paddle.in_dynamic_mode()) # False, Now we are in static mode
print(paddle.in_dynamic_mode()) # False, Now we are in static graph mode

paddle.disable_static()
print(paddle.in_dynamic_mode()) # True, Now we are in dynamic mode
Expand Down
8 changes: 4 additions & 4 deletions python/paddle/fluid/dygraph/layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -209,8 +209,8 @@ def forward(self, input):

"""
# global setting in dygraph
# NOTE(chenweihang): nn.Layer also can be used in static mode,
# but _dygraph_tracer() can not be called in static mode
# NOTE(chenweihang): nn.Layer also can be used in static graph mode,
# but _dygraph_tracer() can not be called in static graph mode
if _non_static_mode():
framework._dygraph_tracer().train_mode()
# Layer-level setting
Expand Down Expand Up @@ -250,8 +250,8 @@ def forward(self, input):

"""
# global setting in dygraph
# NOTE(chenweihang): nn.Layer also can be used in static mode,
# but _dygraph_tracer() can not be called in static mode
# NOTE(chenweihang): nn.Layer also can be used in static graph mode,
# but _dygraph_tracer() can not be called in static graph mode
if _non_static_mode():
framework._dygraph_tracer().eval_mode()
# Layer-level setting
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/fluid/dygraph/parallel.py
Original file line number Diff line number Diff line change
Expand Up @@ -628,7 +628,7 @@ def __init__(

assert (
_non_static_mode()
), "It's not supported to construct DataParallel in static mode."
), "It's not supported to construct DataParallel in static graph mode."

self._layers = layers
self.find_unused_parameters = find_unused_parameters
Expand Down
Loading