Skip to content

Commit

Permalink
在文档中统一静态图模式与动态图模式的英文翻译 (#49170)
Browse files Browse the repository at this point in the history
* 1219

* temporarily change the num_diff_files limit, test=document_fix

* Revert "temporarily change the num_diff_files limit, test=document_fix"

This reverts commit 8e70f00.

* for codestyle

* remove duplicate license

* `static mode` -> `static graph mode`

* Update hybrid_parallel_inference.py

* Update layer_function_generator.py

* Update manipulation.py

* reset

Co-authored-by: Ligoml <39876205+Ligoml@users.noreply.github.com>
Co-authored-by: SigureMo <sigure.qaq@gmail.com>
  • Loading branch information
3 people authored Dec 30, 2022
1 parent 162f8fe commit a186e60
Show file tree
Hide file tree
Showing 106 changed files with 207 additions and 184 deletions.
2 changes: 1 addition & 1 deletion paddle/fluid/eager/autograd_meta.h
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ using AbstractAutogradMeta = paddle::experimental::AbstractAutogradMeta;
*
* AutogradMeta is what record the backward info for tensor. When we run
* computation graph eagerly, we can not build a static paddle program like
* static mode do, so we need a new method to record forward info to trace
* static graph mode do, so we need a new method to record forward info to trace
* backward when we finish all forward computation. This require our
* AutogradMeta class record following main members
*
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -760,7 +760,7 @@ bool BuildOpFuncList(const platform::Place& place,
new phi::Kernel(phi::KernelFactory::Instance().SelectKernel(
phi_kernel_name, phi_cpu_kernel_key)));
if (op_with_kernel->PhiKernel()->IsValid()) {
VLOG(6) << "Static mode PrepareImpl - kernel name: "
VLOG(6) << "Static graph mode PrepareImpl - kernel name: "
<< phi_kernel_name
<< " | kernel key: " << phi_cpu_kernel_key
<< " | kernel: " << *(op_with_kernel->PhiKernel());
Expand Down
14 changes: 7 additions & 7 deletions paddle/fluid/framework/operator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1679,12 +1679,12 @@ void OperatorWithKernel::RunImpl(const Scope& scope,
phi_kernel_name, phi_kernel_key)));

if (phi_kernel_->IsValid()) {
VLOG(6) << "Static mode ChoosePhiKernel - kernel name: "
VLOG(6) << "Static graph mode ChoosePhiKernel - kernel name: "
<< phi_kernel_name << " | kernel key: " << phi_kernel_key
<< " | kernel: " << *phi_kernel_;
} else {
VLOG(6) << "Static mode ChoosePhiKernel - kernel `" << phi_kernel_name
<< "` not found.";
VLOG(6) << "Static graph mode ChoosePhiKernel - kernel `"
<< phi_kernel_name << "` not found.";
}
} else {
phi_kernel_name = kernel_signature_->name;
Expand Down Expand Up @@ -1815,7 +1815,7 @@ void OperatorWithKernel::RunImpl(const Scope& scope,

dev_ctx = pool.Get(platform::CPUPlace());
if (phi_kernel_->IsValid()) {
VLOG(6) << "Static mode PrepareImpl - kernel name: "
VLOG(6) << "Static graph mode PrepareImpl - kernel name: "
<< phi_kernel_name << " | kernel key: " << phi_cpu_kernel_key
<< " | kernel: " << *phi_kernel_;
run_phi_kernel_ = true;
Expand Down Expand Up @@ -2083,11 +2083,11 @@ phi::KernelKey OperatorWithKernel::ChoosePhiKernel(
phi_kernel_name, phi_kernel_key)));

if (phi_kernel_->IsValid()) {
VLOG(6) << "Static mode ChoosePhiKernel - kernel name: " << phi_kernel_name
<< " | kernel key: " << phi_kernel_key
VLOG(6) << "Static graph mode ChoosePhiKernel - kernel name: "
<< phi_kernel_name << " | kernel key: " << phi_kernel_key
<< " | kernel: " << *phi_kernel_;
} else {
VLOG(6) << "Static mode ChoosePhiKernel - kernel `" << phi_kernel_name
VLOG(6) << "Static graph mode ChoosePhiKernel - kernel `" << phi_kernel_name
<< "` not found.";
}
return phi_kernel_key;
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/imperative/tracer.h
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,7 @@ class Tracer {
}

// Note(Aurelius84): The `tmp` is used as prefix key while naming a temporary
// intermediate var both in imperative and static mode. But the
// intermediate var both in imperative and static graph mode. But the
// `UniqueNameGenerator` in C++ and `unique_name.py` in Python doesn't share
// the same auto-increment id. It will create a variable repeatedly with same
// name like `tmp_0` in some cases when transform dygraph into static layers.
Expand Down
31 changes: 16 additions & 15 deletions paddle/fluid/inference/tensorrt/convert/c_allreduce_op.cc
Original file line number Diff line number Diff line change
@@ -1,16 +1,16 @@
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"
#include "paddle/fluid/inference/tensorrt/plugin/c_allreduce_op_plugin.h"
Expand All @@ -32,8 +32,9 @@ class CAllReduceOpConverter : public OpConverter {
bool test_mode) override {
VLOG(4) << "convert fluid callreduce op to tensorrt layer";
if (!engine_->with_dynamic_shape()) {
PADDLE_THROW(platform::errors::Fatal(
"Unsupported static mode. Please set dynamic shape of inputs."));
PADDLE_THROW(
platform::errors::Fatal("Unsupported static graph mode. Please set "
"dynamic shape of inputs."));
}
ReduceType red_type = op_to_reduce_type[op.type()];
std::string name = op.type();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,8 +28,9 @@ class PrelnResidualBiasOpConverter : public OpConverter {
bool test_mode) override {
VLOG(4) << "convert fused preln_residual_bias op to tensorrt layer";
if (!engine_->with_dynamic_shape()) {
PADDLE_THROW(platform::errors::Fatal(
"Unsupported static mode. Please set dynamic shape of inputs."));
PADDLE_THROW(
platform::errors::Fatal("Unsupported static graph mode. Please set "
"dynamic shape of inputs."));
}
framework::OpDesc op_desc(op, nullptr);
// Declare inputs
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/operators/run_program_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -288,8 +288,8 @@ class RunProgramOpKernel : public framework::OpKernel<T> {
auto *out_scope_vec = ctx.Output<StepScopeVar>("OutScope");
std::unique_ptr<framework::Scope> inner_scope{nullptr};
if (out_scope_vec->size() == 0) {
// For cuda graph under static mode usage.
// For static mode, we cannot set value of a tensor before any run,
// For cuda graph under static graph mode usage.
// For static graph mode, we cannot set value of a tensor before any run,
// the OutScope variable passed to the op actually contains nothing.
// Just create a tmp scope to run the program.
PADDLE_ENFORCE_EQ(
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/operators/set_value_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -145,7 +145,7 @@ class SetValueMaker : public framework::OpProtoAndCheckerMaker {
AddAttr<std::vector<int64_t>>("shape", "(vector<int64_t>) Shape of values.")
.SetDefault({});
AddComment(R"DOC(SetValue operator.
Assignment to a phi::DenseTensor in static mode.
Assignment to a phi::DenseTensor in static graph mode.
)DOC");
}
};
Expand Down
6 changes: 3 additions & 3 deletions paddle/fluid/pybind/eager_legacy_op_function_generator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -443,9 +443,9 @@ GenerateOpFunctions() {
// In this case, output will reuse input varbase.
// Dygraph mode needs to be aligned with the in-place strategy in static
// mode, and the mapping relationships between output and input that have
// been defined in static mode should be used in dygraph mode.
// Find which ops need to use Inplace strategy in static mode, and get the
// mapping relationship between Inplace output and input.
// been defined in static graph mode should be used in dygraph mode.
// Find which ops need to use Inplace strategy in static graph mode, and get
// the mapping relationship between Inplace output and input.
auto& infer_inplace =
paddle::framework::OpInfoMap::Instance().Get(op_type).infer_inplace_;
std::map<std::string, std::string> inplace_map;
Expand Down
3 changes: 2 additions & 1 deletion paddle/fluid/pybind/eager_properties.cc
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,8 @@ PyObject* tensor_properties_get_name(TensorObject* self, void* closure) {
EAGER_TRY
// NOTE(dev): [why not use egr::Controller::Instance::GernerateUniqueName()?]
// Beacause Controller must holder a tracer, but 'tensor.name' maybe called
// everywhere such as static mode in @to_static, which means tracer is None.
// everywhere such as static graph mode in @to_static, which means tracer is
// None.
static egr::UniqueNameGenerator name_generator;
if (self->tensor.name().empty()) {
self->tensor.set_name(name_generator.Generate());
Expand Down
6 changes: 3 additions & 3 deletions paddle/fluid/pybind/op_function_generator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -473,9 +473,9 @@ GenerateOpFunctions(int split_count) {
// In this case, output will reuse input varbase.
// Dygraph mode needs to be aligned with the in-place strategy in static
// mode, and the mapping relationships between output and input that have
// been defined in static mode should be used in dygraph mode.
// Find which ops need to use Inplace strategy in static mode, and get the
// mapping relationship between Inplace output and input.
// been defined in static graph mode should be used in dygraph mode.
// Find which ops need to use Inplace strategy in static graph mode, and get
// the mapping relationship between Inplace output and input.
auto& infer_inplace =
paddle::framework::OpInfoMap::Instance().Get(op_type).infer_inplace_;
std::map<std::string, std::string> inplace_map;
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/fusion/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

1. We don't recommend to implement Python API for fusion kernel

- We don't recommend to implement Python API for fusion kernel, because it contains many inputs or outputs arguments generally, it is difficult to use and understand as an Python API, we recommend to call fusion kernel by pass optimization in dy2static mode or static mode.
- We don't recommend to implement Python API for fusion kernel, because it contains many inputs or outputs arguments generally, it is difficult to use and understand as an Python API, we recommend to call fusion kernel by pass optimization in dy2static mode or static graph mode.
- We also don't recommend to reuse fusion kernel in other kernel implementation, but recommended that the fusion kernel be implemented by reusing other kernels.

2. We don't require fusion kernel to have implementations for all devices
Expand Down
4 changes: 2 additions & 2 deletions python/paddle/device/cuda/graphs.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ def print_to_dot_files(self, dirname, flags=None):
def wrap_cuda_graph(function, mode="thread_local", memory_pool="default"):
assert mode in ALL_MODES
if not paddle.in_dynamic_mode():
# static mode
# static graph mode
from paddle.fluid.framework import _cuda_graph_guard

global cuda_graph_id
Expand All @@ -94,7 +94,7 @@ def wrap_cuda_graph(function, mode="thread_local", memory_pool="default"):
memory_pool_id = CoreCUDAGraph.gen_new_memory_pool_id()
else:
raise ValueError(
"memory_pool should be one of default or new under static mode, but got",
"memory_pool should be one of default or new under static graph mode, but got",
memory_pool,
)
return _cuda_graph_guard(
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/distributed/auto_parallel/engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -539,7 +539,7 @@ def _build(self, mode):

paddle.enable_static()
else:
# build program in static mode
# build program in static graph mode
serial_main_prog = self._serial_main_progs.get(mode, None)
if serial_main_prog is not None:
return
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/distributed/collective.py
Original file line number Diff line number Diff line change
Expand Up @@ -162,7 +162,7 @@ def _new_process_group_impl(

# _custom_gid provides a way for users to
# set the group id, which is usually useful
# to be compatible with the static mode.
# to be compatible with the static graph mode.
_custom_gid = None


Expand Down
6 changes: 4 additions & 2 deletions python/paddle/distributed/communication/stream/all_gather.py
Original file line number Diff line number Diff line change
Expand Up @@ -178,10 +178,12 @@ def all_gather(
tensor_or_tensor_list, tensor, group, sync_op, use_calc_stream
)
else:
assert group is None, "Group can not be used in static mode for now."
assert (
group is None
), "Group can not be used in static graph mode for now."
if paddle.is_tensor(tensor_or_tensor_list):
raise RuntimeError(
"Only support passing a tensor list to `all_gather` in static mode now."
"Only support passing a tensor list to `all_gather` in static graph mode now."
)
else:
return _all_gather_in_static_mode(
Expand Down
6 changes: 4 additions & 2 deletions python/paddle/distributed/communication/stream/all_reduce.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ def _all_reduce_in_static_mode(tensor, op, group, sync_op, use_calc_stream):
if not isinstance(ring_id, int):
raise ValueError("The type of 'ring_id' for all_reduce should be int.")

# TODO: Support task and use task.wait in static mode
# TODO: Support task and use task.wait in static graph mode
# Use use_calc_stream rather than sync_op
helper = layer_helper.LayerHelper(op_type, **locals())
helper.append_op(
Expand Down Expand Up @@ -123,7 +123,9 @@ def all_reduce(
tensor, op, group, sync_op, use_calc_stream
)
else:
assert group is None, "Group can not be used in static mode for now."
assert (
group is None
), "Group can not be used in static graph mode for now."
return _all_reduce_in_static_mode(
tensor, op, group, sync_op, use_calc_stream
)
4 changes: 3 additions & 1 deletion python/paddle/distributed/communication/stream/all_to_all.py
Original file line number Diff line number Diff line change
Expand Up @@ -200,7 +200,9 @@ def alltoall(
"The output and input should be both tensor or tensor list."
)
else:
assert group is None, "Group can not be used in static mode for now."
assert (
group is None
), "Group can not be used in static graph mode for now."
return _all_to_all_in_static_mode(
out_tensor_or_tensor_list,
in_tensor_or_tensor_list,
Expand Down
4 changes: 3 additions & 1 deletion python/paddle/distributed/communication/stream/broadcast.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,9 @@ def broadcast(tensor, src, group=None, sync_op=True, use_calc_stream=False):
tensor, src_rank_in_group, group, sync_op, use_calc_stream
)
else:
assert group is None, "Group can not be used in static mode for now."
assert (
group is None
), "Group can not be used in static graph mode for now."
return _broadcast_in_static_mode(
tensor, src, group, sync_op, use_calc_stream
)
4 changes: 3 additions & 1 deletion python/paddle/distributed/communication/stream/recv.py
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,9 @@ def recv(tensor, src=0, group=None, sync_op=True, use_calc_stream=False):
tensor, src_rank_in_group, group, sync_op, use_calc_stream
)
else:
assert group is None, "Group can not be used in static mode for now."
assert (
group is None
), "Group can not be used in static graph mode for now."
return _recv_in_static_mode(
tensor, src, group, sync_op, use_calc_stream
)
4 changes: 3 additions & 1 deletion python/paddle/distributed/communication/stream/reduce.py
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,9 @@ def reduce(
tensor, dst_rank_in_group, op, group, sync_op, use_calc_stream
)
else:
assert group is None, "Group can not be used in static mode for now."
assert (
group is None
), "Group can not be used in static graph mode for now."
return _reduce_in_static_mode(
tensor, dst, op, group, sync_op, use_calc_stream
)
4 changes: 3 additions & 1 deletion python/paddle/distributed/communication/stream/scatter.py
Original file line number Diff line number Diff line change
Expand Up @@ -220,7 +220,9 @@ def scatter(
use_calc_stream,
)
else:
assert group is None, "Group can not be used in static mode for now."
assert (
group is None
), "Group can not be used in static graph mode for now."

return _scatter_in_static_mode(
tensor,
Expand Down
4 changes: 3 additions & 1 deletion python/paddle/distributed/communication/stream/send.py
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,9 @@ def send(tensor, dst=0, group=None, sync_op=True, use_calc_stream=False):
tensor, dst_rank_in_group, group, sync_op, use_calc_stream
)
else:
assert group is None, "Group can not be used in static mode for now."
assert (
group is None
), "Group can not be used in static graph mode for now."
return _send_in_static_mode(
tensor, dst, group, sync_op, use_calc_stream
)
Original file line number Diff line number Diff line change
Expand Up @@ -206,7 +206,8 @@ def __init__(
elif core.is_compiled_with_cuda():
self._device = "gpu"
assert self._device, "Only gpu and npu are supported."
assert not in_dygraph_mode(), "Only static mode is supported."

assert not in_dygraph_mode(), "Only static graph mode is supported."

op_maker = core.op_proto_and_checker_maker
self._op_role = op_maker.OpRole
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/distributed/models/moe/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,7 @@ def _random_routing(topk_idx, topk_value, prob, topk=2):
if in_dygraph_mode():
return _legacy_C_ops.random_routing(prob, topk_value, topk_idx)
else:
raise RuntimeError("Not supporting static mode now")
raise RuntimeError("Not supporting static graph mode now")
else:
raise RuntimeError("only topk=2 is supported now")

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -279,7 +279,7 @@ def _could_be_overlap(self):
# NOTE current different nccl comm will use different cuda stream
# so if there too many dp group there will be too many stream need to be
# created and sync.
# revise here when framework support custom stream in static mode.
# revise here when framework support custom stream in static graph mode.
num_dp_comm_stream = len(set(self._group_to_grad_name_map.keys()))
if num_dp_comm_stream > __max_stream_num_allow__:
return False
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/fluid/compiler.py
Original file line number Diff line number Diff line change
Expand Up @@ -751,7 +751,7 @@ def patch_getter(self, item):
def patch_lr_scheduler(ipu_strategy):
from paddle.optimizer.lr import LRScheduler

# For IPU dynamic graph usage, lr_var is not synced in executor as static mode do.
# For IPU dynamic graph usage, lr_var is not synced in executor as static graph mode do.
# Manually set lr to ipu_strategy to update the lr.
old_step = LRScheduler.step

Expand Down
2 changes: 1 addition & 1 deletion python/paddle/fluid/contrib/optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ class Momentum(Optimizer):
momentum (float): Momentum factor
parameter_list (Iterable, optional): Iterable of ``Variable`` names to update to minimize ``loss``. \
This parameter is required in dygraph mode. \
The default value is None in static mode, at this time all parameters will be updated.
The default value is None in static graph mode, at this time all parameters will be updated.
use_nesterov (bool, optional): Enables Nesterov momentum, default is false.
regularization (WeightDecayRegularizer, optional): The strategy of regularization. There are two method: \
:ref:`api_fluid_regularizer_L1Decay` , :ref:`api_fluid_regularizer_L2Decay` . If a parameter has set \
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/fluid/dataloader/dataloader_iter.py
Original file line number Diff line number Diff line change
Expand Up @@ -303,7 +303,7 @@ def __next__(self):
)
data = _restore_batch(data, self._structure_infos.pop(0))
else:
# in static mode
# in static graph mode
if self._return_list:
data = self._reader.read_next_list()
for i in range(len(data)):
Expand Down
4 changes: 2 additions & 2 deletions python/paddle/fluid/dygraph/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -210,7 +210,7 @@ def enable_dygraph(place=None):
print(paddle.in_dynamic_mode()) # True, dynamic mode is turn ON by default since paddle 2.0.0
paddle.enable_static()
print(paddle.in_dynamic_mode()) # False, Now we are in static mode
print(paddle.in_dynamic_mode()) # False, Now we are in static graph mode
paddle.disable_static()
print(paddle.in_dynamic_mode()) # True, Now we are in dynamic mode
Expand Down Expand Up @@ -245,7 +245,7 @@ def disable_dygraph():
print(paddle.in_dynamic_mode()) # True, dynamic mode is turn ON by default since paddle 2.0.0
paddle.enable_static()
print(paddle.in_dynamic_mode()) # False, Now we are in static mode
print(paddle.in_dynamic_mode()) # False, Now we are in static graph mode
paddle.disable_static()
print(paddle.in_dynamic_mode()) # True, Now we are in dynamic mode
Expand Down
Loading

0 comments on commit a186e60

Please sign in to comment.